This repository has been archived by the owner on Sep 7, 2021. It is now read-only.
-
Notifications
You must be signed in to change notification settings - Fork 0
/
linear_algebra.tex
736 lines (718 loc) · 46.9 KB
/
linear_algebra.tex
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
\documentclass[letterpaper,11pt]{article}
\usepackage{amsmath}
\usepackage{amssymb}
\usepackage{amsfonts}
\usepackage{enumerate}
\title{Linear Algebra Theorems}
\author{Linear Algebra and Its Applications (David C. Lay)}
\begin{document}
\maketitle
\section{Linear Equations in Linear Algebra}
\subsection{Systems of Linear Equations}
A system of linear equations has either
\begin{enumerate}
\item no solution, or
\item exactly one solution, or
\item infinitely many solutions.
\end{enumerate}
\subsection{Row Reduction and Echelon Forms}
\subsubsection{Definition}
A rectangular matrix is in \textbf{echelon form} (or \textbf{row echelon form}) if it has the following three properties:
\begin{enumerate}
\item All nonzero rows are above any rows of all zeros.
\item Each leading entry of a row is in a column to the right of the leading entry of the row above it.
\item All entries in a column below a leading entry are zeros.
\end{enumerate}
If a matrix in echelon form satisfies the following additional conditions, then it is in \textbf{reduced echelon form} (or \textbf{reduced row echelon form}).
\begin{enumerate}
\item The leading entry in each nonzero row is 1.
\item Each leading 1 is the only nonzero entry in its column.
\end{enumerate}
\subsubsection{Theorem 1}
\textbf{Uniqueness of the Reduced Echelon Form:} Each matrix is row equivalent to one and only one reduced echelon form.
\subsubsection{Definition}
A \textbf{pivot position} in a matrix $A$ is a location in $A$ that corresponds to a leading 1 in the reduced echelon form of $A$. A \textbf{pivot column} is a column of $A$ that contains a pivot position.
\subsubsection{Theorem 2}
\textbf{Existence and Uniqueness Theorem:} A linear system is consistent if and only if the rightmost column of the augmented matrix is \textit{not} a pivot position -- that is, if and only if an echelon form of the augmented matrix has \textit{no} row of the form
\begin{equation}
\begin{bmatrix}
0 & \cdots & 0 & b
\end{bmatrix}
\textrm{with } b \textrm{ nonzero}
\end{equation}
If a linear system is consistent, then the solution set contains either (i) a unique solution, when there are no free variables, or (ii) infinitely many solutions, when there is at least one free variable.
\subsection{Vector Equations}
\textbf{Algebraic Properties of $\mathbb{R}^n$:} For all $\mathbf{u}$, $\mathbf{v}$, $\mathbf{w}$ in $\mathbb{R}^n$ and all scalar $c$ and $d$:
\begin{enumerate}[(i)]
\item $\mathbf{u}+\mathbf{v}=\mathbf{v}+\mathbf{u}$
\item $(\mathbf{u}+\mathbf{v})+\mathbf{w}=\mathbf{u}+(\mathbf{v}+\mathbf{w})$
\item $\mathbf{u}+\mathbf{0}=\mathbf{0}+\mathbf{u}=\mathbf{u}$
\item $\mathbf{u}+(-\mathbf{u})=-\mathbf{u}+\mathbf{u}=\mathbf{u},\textrm{where } -\mathbf{u} \textrm{ denotes } (-1)\mathbf{u}$
\item $c(\mathbf{u}+\mathbf{v})=c\mathbf{u}+c\mathbf{v}$
\item $(c+d)\mathbf{u}=c\mathbf{u}+d\mathbf{u}$
\item $c(d\mathbf{u})=(cd)(\mathbf{u})$
\item $1\mathbf{u}=\mathbf{u}$
\end{enumerate}
A vector equation
\begin{equation}
x_1\mathbf{a}_1+x_2\mathbf{a}_2+\cdots+x_n\mathbf{a}_n=\mathbf{b}
\end{equation}
has the same solution set as the linear system whose augmented matrix is
\begin{equation}
\label{equ:aug}
\begin{bmatrix}
\mathbf{a}_1 & \mathbf{a}_2 & \cdots & \mathbf{a}_n & \mathbf{b}
\end{bmatrix}
\end{equation}
In particular, $\mathbf{b}$ can be generated by a linear combination of $\mathbf{a}_1$, $\cdots$, $\mathbf{a}_n$ if and only if there exists a solution to the linear system corresponding to (\ref{equ:aug}).
\subsubsection{Definition}
If $\mathbf{v}_1$, $\cdots$, $\mathbf{v}_p$ are in $\mathbb{R}^n$, then the set of all linear combinations of $\mathbf{v}_1$,$\cdots$,$\mathbf{v}_p$ is denoted by Span \{$\mathbf{v}_1$,$\cdots$,$\mathbf{v}_p$\} and is called the \textbf{subset of $\mathbb{R}^n$ spanned} (or \textbf{generated}) \textbf{by} $\mathbf{v}_1$, $\cdots$, $\mathbf{v}_p$. That is, Span \{$\mathbf{v}_1$, $\cdots$, $\mathbf{v}_p$\} is the collection of all vectors that can be written in the form
\begin{equation}
c_1\mathbf{v}_1+c_2\mathbf{v}_2+\cdots+c_p\mathbf{v}_p
\end{equation}
with $c_1$, $\cdots$, $c_p$ scalars.
\subsection{The Matrix Equation $A\mathbf{x}=\mathbf{b}$}
\subsubsection{Definition}
If $A$ is an $m\times n$ matrix, with columns $\mathbf{a}_1$, $\cdots$, $\mathbf{a}_n$, and if $\mathbf{x}$ is in $\mathbb{R}^n$, then the product of $A$ and $\mathbf{x}$, denoted by $A\mathbf{x}$, is \textbf{the linear combination of the columns of $A$ using the corresponding entries in $\mathbf{x}$ as weights}; that is
\begin{equation}
A\mathbf{x}=
\begin{bmatrix}
\mathbf{a}_1 & \mathbf{a}_2 & \cdots & \mathbf{a}_n
\end{bmatrix}
\begin{bmatrix}
x_1 \\ x_2 \\ \vdots \\ x_n
\end{bmatrix}
=x_1\mathbf{a}_1+x_2\mathbf{a}_2+\cdots+x_n\mathbf{a}_n
\end{equation}
\subsubsection{Theorem 3}
If $A$ is an $m\times n$ matrix, with columns $\mathbf{a}_1$, $\cdots$, $\mathbf{a}_n$, and if $\mathbf{b}$ is in $\mathbb{R}^n$, the matrix equation
\begin{equation}
A\mathbf{x}=\mathbf{b}
\end{equation}
has the same solution set as the vector equation
\begin{equation}
x_1\mathbf{a}_1+x_2\mathbf{a}_2+\cdots+x_n\mathbf{a}_n=\mathbf{b}
\end{equation}
which, in turn, has the same solution set as the system of linear equations whose augmented matrix is
\begin{equation}
\begin{bmatrix}
\mathbf{a}_1 & \mathbf{a}_2 & \cdots & \mathbf{a}_n & \mathbf{b}
\end{bmatrix}
\end{equation}
The equation $A\mathbf{x}=\mathbf{b}$ has a solution if and only if $\mathbf{b}$ is a linear combination of the columns of $A$.
\subsubsection{Theorem 4}
Let $A$ be an $m\times n$ matrix. Then the following statements are logically equivalent. That is, for a particular $A$, either they are all true statements or they are all false.
\begin{enumerate}[a.]
\item For each $\mathbf{b}$ in $\mathbb{R}^m$, the equation $A\mathbf{x}=\mathbf{b}$ has a solution.
\item Each $\mathbf{b}$ in $\mathbb{R}^m$ is a linear combination of the columns of $A$.
\item The columns of $A$ span $\mathbb{R}^m$.
\item $A$ has a pivot position in every row.
\end{enumerate}
\subsubsection{Theorem 5}
If $A$ is an $m\times n$ matrix, $\mathbf{u}$ and $\mathbf{v}$ are vectors in $\mathbb{R}^n$, and $c$ is a scalar, then
\begin{enumerate}[a.]
\item $A(\mathbf{u}+\mathbf{v})=A\mathbf{u}+A\mathbf{v}$;
\item $A(c\mathbf{u})=c(A\mathbf{u})$.
\end{enumerate}
\subsection{Solution Set of Linear Systems}
\subsubsection{Homogeneous Linear Systems}
The homogeneous equation $A\mathbf{x}=\mathbf{0}$ has a nontrivial solution if and only if the equation has at least one free variable.
\subsubsection{Theorem 6}
Suppose the equation $A\mathbf{x}=\mathbf{b}$ is consistent for some given $\mathbf{b}$, and let $\mathbf{p}$ be a solution. Then the solution set of $A\mathbf{x}=\mathbf{b}$ is the set of all vectors of the form $\mathbf{w}=\mathbf{p}+\mathbf{v}_h$, where $\mathbf{v}_h$ is any solution of the homogeneous equation $A\mathbf{x}=\mathbf{0}$.
\subsection{Applications of Linear Systems}
\subsection{Linear Independence}
\subsubsection{Definition}
An indexed set of vectors \{$\mathbf{v}_1,\cdots,\mathbf{v}_p$\} in $\mathbb{R}^n$ is said to be \textbf{linearly independent} if the vector equation
\begin{equation}
x_1\mathbf{v}_1+x_2\mathbf{v}_2+\cdots+x_p\mathbf{v}_p=\mathbf{0}
\end{equation}
has only trivial solution. The set \{$\mathbf{v}_1$,$\cdots$,$\mathbf{v}_p$\} is said to be \textbf{linearly dependent} if there exists weights $c_1$,$\cdots$,$c_p$, not all zero, such that
\begin{equation}
c_1\mathbf{v}_1+c_2\mathbf{v}_2+\cdots+c_p\mathbf{v}_p=\mathbf{0}
\end{equation}
\subsubsection{Linear Independence of Matrix Columns}
The columns of a matrix $A$ are linearly independent if and only if the equation $A\mathbf{x}=\mathbf{0}$ has \textit{only} trivial solution.
\subsubsection{Sets of One or Two Vectors}
A set of two vectors \{$\mathbf{v}_1$, $\mathbf{v}_2$\} is linearly dependent if at least one of the vectors is a multiple of the other. The set is linearly independent if and only if neither of the vectors is a multiple of each other.
\subsubsection{Theorem 7}
\textbf{Characterization of Linearly Dependent Sets:} An indexed set $S=\{\mathbf{v}_1, \cdots, \mathbf{v}_p\}$ of two or more vectors is linearly dependent if and only if at least one of the vectors in $S$ is a linear combination of the others. In fact, if $S$ is linearly dependent and $\mathbf{v}_1\neq\mathbf{0}$, then some $\mathbf{v}_j$ (with $j>1$) is a linear combination of the preceding vectors, $\mathbf{v}_1,\cdots,\mathbf{v}_{j-1}$.
\subsubsection{Theorem 8}
If a set contains more vectors than there are entries in each vector, then the set is linearly dependent. That is, any set \{$\mathbf{v}_1,\cdots,\mathbf{v}_p$\} in $\mathbb{R}^n$ is linearly dependent if $p>n$.
\subsubsection{Theorem 9}
If a set $S=\{\mathbf{v}_1,\cdots,\mathbf{v}_p\}$ in $\mathbb{R}^n$ contains the zero vector, then the set is linearly dependent.
\subsection{Introduction to Linear Transformations}
\subsubsection{Definition}
A transformation (or mapping) T is linear if:
\begin{enumerate}[i]
\item $T(\mathbf{u}+\mathbf{v})=T(\mathbf{u})+T(\mathbf{v})$ for all $\mathbf{u,v}$ in the domain of $T$;
\item $T(c\mathbf{u})=cT(\mathbf{u})$ for all $\mathbf{u}$ and all scalars $c$.
\end{enumerate}
\subsubsection{}
If $T$ is a linear transformation, then
\begin{equation}
T(\mathbf{0})=\mathbf{0}
\end{equation}
and
\begin{equation}
T(c\mathbf{u}+d\mathbf{v})=cT(\mathbf{u})+dT(\mathbf{v})
\end{equation}
for all vectors $\mathbf{u,v}$ in the domain of $T$ and all scalars $c,d$.
\subsection{The Matrix of a Linear Transformation}
\subsubsection{Theorem 10}
Let $T:\mathbb{R}^n\rightarrow\mathbb{R}^m$ be a linear transformation. Then there exists a unique matrix $A$ such that
\begin{equation}
T(\mathbf{x})=A\mathbf{x}\textrm{ for all }\mathbf{x}\textrm{ in }\mathbb{R}^n
\end{equation}
In fact, $A$ is the $m\times n$ matrix whose $j$th column is the vector $T(\mathbf{e}_j)$, where $\mathbf{e}_j$ is the $j$th column of the identity matrix in $\mathbb{R}^n$:
\begin{equation}
A=
\begin{bmatrix}
T(\mathbf{e}_1) & \cdots & T(\mathbf{e}_n)
\end{bmatrix}
\end{equation}
\subsubsection{Definition}
A mapping $T:\mathbb{R}^n\rightarrow\mathbb{R}^m$ is said to be \textbf{onto} $\mathbb{R}^m$ if each $\mathbf{b}$ in $\mathbb{R}^m$ is the image of \textit{at least one} $\mathbf{x}$ in $\mathbb{R}^n$.
\subsubsection{Definition}
A mapping $T:\mathbb{R}^n\rightarrow\mathbb{R}^m$ is said to be \textbf{one-to-one} if each $\mathbf{b}$ in $\mathbb{R}^m$ is the image of \textit{at most one} $\mathbf{x}$ in $\mathbb{R}^n$.
\subsubsection{Theorem 11}
Let $T:\mathbb{R}^n\rightarrow\mathbb{R}^m$ be a linear transformation. Then $T$ is one-to-one if and only if the equation $T(\mathbf{x})=\mathbf{0}$ has only the trivial solution.
\subsubsection{Theorem 12}
Let $T:\mathbb{R}^n\rightarrow\mathbb{R}^m$ be a linear transformation and let A be the standard matrix for $T$. Then:
\begin{enumerate}[i]
\item $T$ maps $\mathbb{R}^n$ onto $\mathbb{R}^m$ if and only if the columns of A span $\mathbb{R}^m$;
\item $T$ is one-to-one if and only if the columns of $A$ are linearly independent.
\end{enumerate}
\subsection{Linear Models in Business, Science, and Engineering}
\section{Matrix Algebra}
\subsection{Matrix Operation}
\subsubsection{Theorem 1}
Let $A$, $B$, and $C$ be matrices of he same size, and let $r$ and $s$ be scalars.
\begin{enumerate}[a.]
\item $A+B=B+A$
\item $(A+B)+C=A+(B+C)$
\item $A+0=A$
\item $r(A+B)=rA+rB$
\item $(r+s)A=rA+sA$
\item $r(sA)=(rs)A$
\end{enumerate}
\subsubsection{Definition}
If $A$ is an $m\times n$ matrix, and if $B$ is an $n\times p$ matrix with columns $\mathbf{b}_1,\cdots,\mathbf{b}_p$, then the product $AB$ is the $m\times p$ matrix whose columns are $A\mathbf{b}_1,\cdots,\mathbf{b}_p$. That is,
\begin{equation}
AB=A
\begin{bmatrix}
\mathbf{b}_1 & \mathbf{b}_2 & \cdots & \mathbf{b}_p
\end{bmatrix}=
\begin{bmatrix}
A\mathbf{b}_1 & A\mathbf{b}_2 & \cdots & A\mathbf{b}_p
\end{bmatrix}
\end{equation}
Each column of $AB$ is a linear combination of the columns of $A$ using the weights from the corresponding column of $B$.
\subsubsection{Theorem 2}
Let $A$ be an $n\times m$ matrix, and let $B$ and $C$ have sizes for which the indicated sums and products are defined.
\begin{enumerate}[a.]
\item $A(BC)=(AB)C$ (associative law of multiplication)
\item $A(B+C)=AB+AC$ (left distribution law)
\item $(B+C)A=BA+CA$ (right distribution law)
\item $r(AB)=(rA)B=A(rB)$ for any scalar $r$
\item $I_mA=A=AIn$ (identity for matrix multiplication)
\end{enumerate}
\textbf{Warnings:}
\begin{enumerate}
\item In general, $AB\neq BA$.
\item The cancellation laws do \textit{not} hold for matrix multiplication. That is, if $AB=AC$, then it is \textit{not} true in general that $B=C$.
\item If a product $AB$ is the zero matrix, you \textit{cannot} conclude in general that either $A=0$ or $B=0$.
\end{enumerate}
\subsubsection{Theorem 3}
Let $A$ and $B$ denote matrices whose sizes are appropriate for the following sums and products.
\begin{enumerate}[a.]
\item $(A^T)^T=A$
\item $(A+B)^T=A^T+B^T$
\item For any scalar $r$, $(rA)^T=rA^T$
\item $(AB)^T=B^TA^T$
\end{enumerate}
The transpose of a product of matrices equals the product of their transposes in the \textit{reverse} order.
\subsection{The Inverse of a Matrix}
\subsubsection{Theorem 4}
Let $A=\begin{bmatrix}
a & b \\ c & d
\end{bmatrix}$. If $ad-bc\neq 0$, then $A$ is invertible and
\begin{equation}
A^{-1}=\frac{1}{ad-bc}
\begin{bmatrix}
d & -b \\ -c & a
\end{bmatrix}
\end{equation}
If $ad-bc=0$, then $A$ is not invertible.
\subsubsection{Theorem 5}
If $A$ is an invertible $m\times n$ matrix, then for each $\mathbf{b}$ in $\mathbb{R}^n$, the equation $A\mathbf{x}=\mathbf{b}$ has the unique solution $\mathbf{x}=A^{-1}\mathbf{b}$.
\subsubsection{Theorem 6}
\begin{enumerate}[a.]
\item If $A$ is an invertible matrix, then $A^{-1}$ is invertible and
\begin{equation}
(A^{-1})^{-1}=A
\end{equation}
\item If $A$ and $B$ are $n\times n$ invertible matrices, then so is $AB$, and the inverse of $AB$ is the product of the inverses of $A$ and $B$ in the reverse order. That is,
\begin{equation}
(AB)^{-1}=B^{-1}A^{-1}
\end{equation}
\item If $A$ is an invertible matrix, then so is $A^T$, and the inverse of $A^T$ is the transpose of $A^{-1}$. That is,
\begin{equation}
(A^T)^{-1}=(A^{-1})^T
\end{equation}
\end{enumerate}
The product of $n\times n$ invertible matrices is invertible, and the inverse is the product of their inverses in the reverse order.
If an elementary row operation is performed on an $m\times n$ matrix $A$, the resulting matrix can be written as $EA$, where the $m\times m$ matrix $E$ is created by performing the same row operation on $I_m$.
Each elementary matrix $E$ is invertible. The inverse of $E$ is the elementary matrix of the same type that transforms $E$ back into $I$.
\subsubsection{Theorem 7}
An $n\times n$ matrix $A$ is invertible if and only if $A$ is row equivalent to $I_n$, and in this case, any sequence of the elementary row operations that reduces $A$ to $I_n$ also transforms $I_n$ into $A^{-1}$.
\subsection{Characterizations of Invertible Matrices}
\subsubsection{Theorem 8}
\textbf{The Invertible Matrix Theorem:} Let $A$ be a square $n\times n$ matrix. Then the following statements are equivalent. That is, for a given $A$, the statements are either all true or all false.
\begin{enumerate}[a.]
\item $A$ is an invertible matrix.
\item $A$ is row equivalent to the $n\times n$ identity matrix.
\item $A$ has $n$ pivot positions.
\item The equation $A\mathbf{x}=\mathbf{0}$ has only the trivial solution.
\item The columns of $A$ form a linearly independent set.
\item The linear transformation $\mathbf{x}\mapsto A\mathbf{x}$ is one-to-one.
\item The equation $A\mathbf{x}=\mathbf{b}$ has at least one solution for each $\mathbf{b}$ in $\mathbb{R}^n$.
\item The columns of $A$ span $\mathbb{R}^n$.
\item The linear transformation $\mathbf{x}\mapsto A\mathbf{x}$ maps $\mathbb{R}^n$ onto $\mathbb{R}^n$.
\item There is an $n\times n$ matrix $C$ such that $CA=I$.
\item There is an $n\times n$ matrix $D$ such that $AD=I$.
\item $A^T$ is an invertible matrix.
\end{enumerate}
Let $A$ and $B$ be square matrices. If $AB=I$, then $A$ and $B$ are both invertible, with $B=A^{-1}$ and $A=B^{-1}$.
\subsubsection{Theorem 9}
Let $T:\mathbb{R}^n\rightarrow\mathbb{R}^n$ be a linear transformation and let $A$ be the standard matrix for $T$. Then $T$ is invertible if and only if $A$ is an invertible matrix. In that case, the linear transformation $S$ given by $S(\mathbf{x})=A^{-1}\mathbf{x}$ is the unique function satisfying (1) and (2).
\subsection{Partitioned Matrices}
\subsubsection{Theorem 10}
\textbf{Column-Row Expansion of $AB$:} If $A$ is $m\times n$ and $B$ is $n\times p$, then
\begin{eqnarray}
AB=\begin{bmatrix}
\textrm{col}_1(A) & \textrm{col}_2(A) & \cdots & \textrm{col}_n(A)
\end{bmatrix}
\begin{bmatrix}
\textrm{row}_1(B) \\ \textrm{row}_2(B) \\ \vdots \\ \textrm{row}_n(B)
\end{bmatrix}\\
=\textrm{col}_1(A)\textrm{row}_1(B)+\cdots +\textrm{col}_n(A)\textrm{row}_n(B)
\end{eqnarray}
\subsection{Matrix Factorizations}
\subsection{The Leontief Input-Output Model}
\subsection{Applications to Computer Graphics}
\subsection{Subspace of $\mathbb{R}^n$}
\subsubsection{Definition}
A \textbf{subspace} of $\mathbb{R}^n$ is any set $H$ in $\mathbb{R}^n$ that has three properties:
\begin{enumerate}[a.]
\item The zero vector is in $H$.
\item For each $\mathbf{u}$ and $\mathbf{v}$ in $H$, the sum $\mathbf{u}+\mathbf{v}$ is in $H$.
\item For each $\mathbf{u}$ in $H$ and each scalar $c$, the vector $c\mathbf{u}$ is in $H$.
\end{enumerate}
\subsubsection{Definition}
The \textbf{column space} of a mtrix $A$ is the set Col $A$ of all linear combinations of the columns of $A$.
\subsubsection{Definition}
The \textbf{null space} of a matrix $A$ is the set Nul $A$ of all solutions to the homogeneous equation $A\mathbf{x}=\mathbf{0}$.
\subsubsection{Theorem 12}
The null space of an $m\times n$ matrix $A$ is a subspace of $\mathbb{R}^n$. Equivalently, the set of all solutions to a system $A\mathbf{x}=\mathbf{0}$ of $m$ homogeneous linear equations in $n$ unknowns is a subspace of $\mathbb{R}^n$.
\subsubsection{Definition}
A basis for a subspace $H$ of $\mathbb{R}^n$ is a linearly independent set in $H$ that spans $H$.
\subsubsection{Theorem 13}
The pivot columns of a matrix $A$ form a basis for the column space of $A$.
\subsection{Dimension and Rank}
\subsubsection{Definition}
Suppose the set $\mathcal{B}=\{\mathbf{b}_1,\cdots,\mathbf{b}_p\}$ is a basis for a subspace $H$. For each $\mathbf{x}$ in $H$, the \textbf{coordinates of x relative to the basis} $\mathcal{B}$ are the weights $c_1,\cdots,c_p$ such that $\mathbf{x}=c_1\mathbf{b}_1+\cdots+c_p\mathbf{b}_p$, and the vector in $\mathbb{R}^p$
\begin{equation}
[\mathbf{x}]_{\mathcal{B}}=
\begin{bmatrix}
c_1 \\ \vdots \\ c_p
\end{bmatrix}
\end{equation}
is called the \textbf{coordinate vector of x (relative to $\mathcal{B}$)} or the \textbf{$\mathcal{B}$-coordinate vector of x}.
\subsubsection{Definition}
The \textbf{dimension} of a nonzero subspace $H$, denoted by dim $H$, is the number of vectors in any basis for $H$. The dimension of the zero subspace $\{\mathbf{0}\}$ is defined to be zero.
\subsubsection{Definition}
The \textbf{rank} of a matrix $A$, denoted by rank $A$, is the dimension of the column space of $A$.
\subsubsection{Theorem 14}
\textbf{The Rank Theorem:} If a matrix $A$ has $n$ columns, then rank $A$ + dim Nul $A=n$.
\subsubsection{Theorem 15}
\textbf{The Basis Theorem:} Let $H$ be a p-dimensional subspace of $\mathbb{R}^n$. Any linearly independent set of exactly $p$ elements in $H$ is automatically a basis for $H$. Also, any set of $p$ elements of $H$ that spans $H$ is automatically a basis for $H$.
\subsubsection{The Invertible Matrix Theorem (continue)}
Let $A$ be an $n\times n$ matrix. Then the following statements are each equivalent to the statement that $A$ is an invertible matrix.
\begin{enumerate}[a.]
\item The columns of $A$ form a basis of $\mathbb{R}^n$.
\item Col $A=\mathbb{R}^n$
\item dim Col $A=n$
\item rank $A=n$
\item Nul $A=\{\mathbf{0}\}$
\item dim Nul $A=0$
\end{enumerate}
\section{Determinants}
\subsection{Introduction to Determinants}
\subsubsection{Definition}
For $n\geq 2$, the determinant of an $n\times n$ matrix $A=[a_{ij}]$ is the sum of $n$ terms of the form $\pm a_{1j}$det$A_{1j}$, with plus and minus signs alternating, where the entries $a_{11},a_{12},\cdots,a_{1n}$ are from the first row of $A$. In symbols,
\begin{eqnarray} \textrm{det}A=a_{11}\textrm{det}A_{11}-a_{12}\textrm{det}A_{12}+\cdots+(-1)^{1+n}a_{1n}\textrm{det}A_{1n}\\
=\sum_{j=1}^{n}(-1)^{1+j}a_{1j}\textrm{det}A_{1j}
\end{eqnarray}
\subsubsection{Theorem 1}
The determinant of an $n\times n$ matrix $A$ can be computed by a cofactor expansion across any row or down any column. The expansion across the $i$th row using the cofactors $C_{ij}=(-1)^{i+j}$det$A_{ij}$ is
\begin{equation}
\textrm{det}A=a_{i1}C_{i1}+a_{i2}C_{i2}+\cdots+a_{in}C_{in}
\end{equation}
The cofactor expansion down the $j$th column is
\begin{equation}
\textrm{det}A=a_{1j}C_{1j}+a_{2j}C_{2j}+\cdots+a_{nj}C_{nj}
\end{equation}
\subsubsection{Theorem 2}
If $A$ is a triangular matrix, then det $A$ is the product of the entires on the main diagonal of $A$.
\subsection{Properties of Determinants}
\subsubsection{Theorem 3}
\textbf{Row Operations:} Let $A$ be a square matrix.
\begin{enumerate}[a.]
\item If a multiple of one row of $A$ is added to another row to produce a matrix B, then $\textrm{det}B$ = $\textrm{det}A$.
\item If two rows of $A$ are interchanged to produce $B$, then $\textrm{det}B$ = $-\textrm{det}A$.
\item If one row of $A$ is multiplied by $k$ to produce $B$, then $\textrm{det}B$ = $k\cdot\textrm{det}A$.
\end{enumerate}
\subsubsection{Theorem 4}
A square matrix $A$ is invertible if and only if $\textrm{det}A\neq 0$.
\subsubsection{Theorem 5}
If $A$ is an $n\times n$ matrix, then $\textrm{det}A^T=\textrm{det}A$.
\subsubsection{Theorem 6}
\textbf{Multiplicative Property:} If $A$ and $B$ are $n\times n$ matrices, then $\textrm{det}AB=(\textrm{det}A)(\textrm{det}B)$.
\subsection{Cramer's Rule, Volume, and Linear Transformations}
\subsubsection{Theorem 7}
\textbf{Cramer's Rule:} Let $A$ be an invertible $n\times n$ matrix. For any $\mathbf{b}$ in $\mathbb{R}^n$, the unique solution $\mathbf{x}$ of $A\mathbf{x}=\mathbf{b}$ has entries given by
\begin{equation}
x_i=\frac{\textrm{det}A_i(\mathbf{b})}{\textrm{det}A}\textrm{, }i=1,2,\cdots,n
\end{equation}
\subsubsection{Theorem 8}
\textbf{An Inverse Formula:} Let $A$ be an invertible $n\times n$ matrix. Then
\begin{equation}
A^{-1}=\frac{1}{\textrm{det}A}\textrm{adj}A
\end{equation}
\subsubsection{Theorem 9}
If $A$ is a $2\times 2$ matrix, the area of the parallelogram determined by the columns of $A$ is $|\textrm{det}A|$. If $A$ is a $3\times 3$ matrix, the volume of the parallelepiped determined by the columns of $A$ is $|\textrm{det}A|$.
\subsubsection{Theorem 10}
Let $T:\mathbb{R}^2\rightarrow\mathbb{R}^2$ be the linear transformation determined by a $2\times 2$ matrix $A$. If $S$ is a parallelogram in $\mathbb{R}^2$, then
\begin{equation}
\{\textrm{area of }T(S)\}=|\textrm{det}A|\cdot\{\textrm{area of }S\}
\end{equation}
If $T$ is determined by a $3\times 3$ matrix $A$, and if $S$ is a parallelepiped in $\mathbb{R}^3$, then
\begin{equation}
\{\textrm{volume of }T(S)\}=|\textrm{det}A|\cdot\{\textrm{volume of }S\}
\end{equation}
\section{Vector Spaces}
\subsection{Vector Spaces and Subspaces}
\subsubsection{Definition}
A \textbf{vector space} is a nonempty set $V$ of objects, called \textit{vectors}, on which are defined two operations, called \textit{addition} and \textit{multiplication by scalars} (real numbers), subject to the ten axioms (or rules) listed below. The axioms must hold for all vectors $\mathbf{u}$, $\mathbf{v}$, and $\mathbf{w}$ in $V$ for all scalars $c$ and $d$.
\begin{enumerate}
\item The sum of $\mathbf{u}$ and $\mathbf{u}$, denoted $\mathbf{u}+\mathbf{v}$, is in $V$.
\item $\mathbf{u}+\mathbf{v}=\mathbf{v}+\mathbf{u}$
\item $(\mathbf{u}+\mathbf{v})+\mathbf{w}=\mathbf{u}+(\mathbf{v}+\mathbf{w})$
\item There is a \textbf{zero} vector $\mathbf{0}$ in $V$ such that $\mathbf{u}+\mathbf{0}=\mathbf{u}$.
\item For each $\mathbf{u}$ in $V$, there is a vector $-\mathbf{u}$ in $V$ such that $\mathbf{u}+(-\mathbf{u})=\mathbf{0}$.
\item The scalar multiple of $\mathbf{u}$ by $c$, denoted by $c\mathbf{u}$, is in $V$.
\item $c(\mathbf{u}+\mathbf{v})=c\mathbf{u}+c\mathbf{v}$
\item $(c+d)\mathbf{u}=c\mathbf{u}+d\mathbf{u}$
\item $c(d\mathbf{u})=(cd)\mathbf{u}$
\item $1\mathbf{u}=\mathbf{u}$
\end{enumerate}
\subsubsection{Definition}
A \textbf{subspace} of a vector space $V$ is a subset $H$ of $V$ that has three properties:
\begin{enumerate}[a.]
\item The zero vector of $V$ is in $H$.
\item $H$ is closed under vector addition. That is, for each $\mathbf{u}$ and $\mathbf{v}$ in $H$, the sum $\mathbf{u}+\mathbf{v}$ is in $H$.
\item $H$ is closed under multiplication by scalars. That is, for each $\mathbf{u}$ in $H$ and each scalar $c$, the vector $c\mathbf{u}$ is in $H$.
\end{enumerate}
\subsubsection{Theorem 1}
If $\mathbf{v}_1,\cdots,\mathbf{v}_p$ are in a vector space $V$, then Span $\{\mathbf{v}_1,\cdots,\mathbf{v}_p\}$ is a subspace of $V$.
\subsection{Null Spaces, Column Spaces, and Linear Transformation}
\subsubsection{Definition}
The \textbf{null space} of an $m\times n$ matrix $A$, written as Nul $A$, is the set of all solutions to the homogeneous equation $A\mathbf{x}=\mathbf{0}$. In set notation,
\begin{equation}
\textrm{Nul}A=\{\mathbf{x}:\mathbf{x}\textrm{ is in }\mathbb{R}^n\textrm{ and }A\mathbf{x}=\mathbf{0}\}
\end{equation}
\subsubsection{Theorem 2}
The null space of an $m\times n$ matrix $A$ is a subspace of $\mathbb{R}^n$. Equivalently, the set of all solutions to a system $A\mathbf{x}=\mathbf{0}$ of $m$ homogeneous linear equations in $n$ unknowns is a subspace of $\mathbb{R}^n$.
\subsubsection{Definition}
The column space of an $m\times n$ matrix $A$, is the set of all linear combinations of the columns of $A$. If $A=\begin{bmatrix}
\mathbf{a}_1 & \cdots & \mathbf{a}_n
\end{bmatrix}$, then
\begin{equation}
\textrm{Col }A=\textrm{Span}\{\mathbf{a}_1,\cdots,\mathbf{a}_n\}
\end{equation}
\subsubsection{Theorem 3}
The column space of an $m\times n$ matrix $A$ is a subspace of $\mathbb{R}^m$.
\begin{equation}
\textrm{Col }A=\{\mathbf{b}:\mathbf{b}=A\mathbf{x}\textrm{ for some }\mathbf{x}\textrm{ in }\mathbb{R}^n\}
\end{equation}
The column space of an $m\times n$ matrix $A$ is all of $\mathbb{R}^m$ if and only if the equation $A\mathbf{x}=\mathbf{b}$ has a solution for each $\mathbf{b}$ in $\mathbb{R}^m$.
\subsubsection{Definition}
A \textbf{linear transform} $T$ from a vector space $V$ into a vector space $W$ is a rule that assigns to each vector $\mathbf{x}$ in $V$ a unique vector $T(\mathbf{x})$ in $W$, such that
\begin{enumerate}[(i)]
\item $T(\mathbf{u}+\mathbf{v})=T(\mathbf{u})+T(\mathbf{v})$ for all $\mathbf{u,v}$ in $V$, and
\item $T(c\mathbf{u})=cT(\mathbf{u})$ for all $\mathbf{u}$ in $V$ and all scalars $c$.
\end{enumerate}
\subsection{Linearly Independent Sets; Bases}
\subsubsection{Theorem 4}
An indexed set $\{\mathbf{v}_1,\dots,\mathbf{v}_p\}$ of two or more vectors, with $\mathbf{v_1}\neq\mathbf{0}$, is linearly dependent if and only if some $\mathbf{v}_j$ (with $j>1$) is a linear combination of the preceding vectors, $\mathbf{v}_1,\dots,\mathbf{v}_{j-1}$.
\subsubsection{Definition}
Let $H$ be a subspace of a vector space $V$. An indexed set of vectors $\mathcal{B}=\{\mathbf{b}_1,\dots,\mathbf{b}_p\}$ in $V$ is a \textbf{basis} for $H$ if
\begin{enumerate}[(i)]
\item $\mathcal{B}$ is a linearly independent set, and
\item the subspace spanned by $\mathcal{B}$ coincides with $H$; that is,
\begin{equation}
H=\textrm{Span}\{\mathbf{b}_1,\dots,\mathbf{b}_p\}
\end{equation}
\end{enumerate}
\subsubsection{Theorem 5}
\textbf{The Spanning Set Theorem:} Let $S=\{\mathbf{v}_1,\dots,\mathbf{v}_p\}$ be a set in $V$, and let $H=\{\mathbf{v}_1,\dots,\mathbf{v}_p\}$.
\begin{enumerate}[a.]
\item If one of the vectors in $S$--say, $\mathbf{v}_k$--is a linear combination of the remaining vectors in $S$, then the set formed from $S$ by removing $\mathbf{v}_k$ still spans $H$.
\item If $H\neq\{\mathbf{0}\}$, some subset of $S$ is a basis for $H$.
\end{enumerate}
Elementary row operations on a matrix do not affect the linear dependence relations among the columns of the matrix.
\subsubsection{Theorem 6}
The pivot columns of a matrix $A$ form a basis for Col $A$.
\subsection{Coordinate Systems}
\subsubsection{Theorem 7}
\textbf{The Unique Representation Theorem:} Let $\mathcal{B}=\{\mathbf{b}_1,\dots,\mathbf{b}_n\}$ be a basis for a vector space $V$. Then for each $\mathbf{x}$ in $V$, there exists a unique set of scalars $c_1,\dots,c_n$ such that
\begin{equation}
\mathbf{x}=c_1\mathbf{b}_1+\cdots+c_n\mathbf{b}_n
\end{equation}
\subsubsection{Definition}
Suppose $\mathcal{B}=\{\mathbf{b}_1,\dots,\mathbf{b}_n\}$ is a basis for $V$ and $\mathbf{x}$ is in $V$.The \textbf{coordinates of x relative to the basis $\mathcal{B}$} (or the \textbf{$\mathcal{B}$-coordinates of x}) are the weights $c_1,\dots,c_n$ such that $\mathbf{x}=c_1\mathbf{b}_1+\cdots+c_n\mathbf{b}_n$
\subsubsection{Theorem 8}
Let $\mathcal{B}=\{\mathbf{b}_1,\dots,\mathbf{b}_n\}$ be a basis for a vector space $V$. Then the coordinate mapping $\mathbf{x}\mapsto[\mathbf{x}]_\mathcal{B}$ is a one-to-one linear transformation from $V$ onto $\mathbb{R}^n$.
\subsection{The Dimension of a Vector Space}
\subsubsection{Theorem 9}
If a vector space $V$ has a basis $\mathcal{B}=\{\mathbf{b}_1,\dots,\mathbf{b}_n\}$, then any set in $V$ containing more than $n$ vectors must be linearly dependent.
\subsubsection{Theorem 10}
If a vector space $V$ has a basis of $n$ vectors, then every basis of $V$ must consist of exactly $n$ vectors.
\subsubsection{Definition}
If $V$ is spanned by a finite set, then $V$ is said to be \textbf{finite-dimensional}, and the dimension of $V$, written as dim $V$, is the number of vectors in a basis for $V$. The dimension of the zero vector space $\{\mathbf{0}\}$ is defined to be zero. If $V$ is not spanned by a finite set, then $V$ is said to be \textbf{infinite-dimensional}.
\subsubsection{Theorem 11}
Let $H$ be a space of a finite-dimensional vector space $V$. Any linearly independent set in $H$ can be expanded, if necessary, to a basis for $H$. Also, $H$ is finite-dimensional and
\begin{equation}
\textrm{dim} H\leq\textrm{dim} V
\end{equation}
\subsubsection{Theorem 12}
\textbf{The Basis Theorem:} Let $V$ be a $p$-dimensional vector space, $p\geq 1$. Any linearly independent set of exactly $p$ elements in $V$ is automatically a basis for $V$. Any set of exactly $p$ elements that spans $V$ is automatically a basis for $V$.
The dimension of Nul $A$ is the number of free variables in the equation $A\mathbf{x}=\mathbf{0}$, and the dimension of Col $A$ is the number of pivot columns in $A$.
\subsection{Rank}
\subsubsection{Theorem 13}
If two matrices $A$ and $B$ are row equivalent, then their row spaces are the same. If $B$ is in echelon form, the nonzero rows of $B$ form a basis for the row space of $A$ as well as for that of $B$.
\subsubsection{Definition}
The \textbf{rank} of $A$ is the dimension of the column space of $A$.
\subsubsection{Theorem 14}
\textbf{The Rank Theorem:} The dimensions of the column space and the row space of an $m\times n$ matrix $A$ are equal. This common dimension, the rank of $A$, also equals the number of pivot positions in $A$ and satisfies the equation
\begin{equation}
\textrm{rank }A+\textrm{dim Nul }A=n
\end{equation}
\subsubsection{Theorem}
\textbf{The Invertible Matrix Theorem (continued):} Let $A$ be an $n\times n$ matrix. Then the following statements are each equivalent to the statement that $A$ is an invertible matrix.
\begin{enumerate}[a.]
\item The columns of $A$ form a basis of $\mathbb{R}^n$.
\item Col $A=\mathbb{R}^n$
\item dim Col $A=n$
\item rank $A=n$
\item Nul $A=\{\mathbf{0}\}$
\item dim Nul $A=0$
\end{enumerate}
\subsection{Change of Basis}
\subsubsection{Theorem 15}
Let $\mathcal{B}=\{\mathbf{b}_1,\dots,\mathbf{b}_n\}$ and $\mathcal{C}=\{\mathbf{c}_1,\dots,\mathbf{c}_n\}$ be bases of a vector space $V$. Then there is a unique $n\times n$ matrix $\underset{\mathcal{C}\longleftarrow\mathcal{B}}{P}$ such that
\begin{equation}
[\mathbf{x}]_\mathcal{C}=\underset{\mathcal{C}\longleftarrow\mathcal{B}}{P}[\mathbf{x}]_\mathcal{C}
\end{equation}
The columns of $\underset{\mathcal{C}\longleftarrow\mathcal{B}}{P}$ are the $\mathcal{C}$-coordinate vectors of the vectors in the basis $\mathcal{B}$. That is,
\begin{equation}
\underset{\mathcal{C}\longleftarrow\mathcal{B}}{P}=\begin{bmatrix}
[\mathbf{b}_1]_\mathcal{C} & [\mathbf{b}_c]_\mathcal{C} & \cdots & [\mathbf{b}_n]_\mathcal{C}
\end{bmatrix}
\end{equation}
\subsection{Application to Difference Equations}
\subsubsection{Theorem 16}
If $a_n\neq 0$ and if $\{z_k\}$ is given, the equation
\begin{equation}
y_{k+n}+a_1y_{k+n-1}+\cdots+a_{n-1}y_{k+1}+a_ny_k=z_k\textrm{ for all }k
\end{equation}
has a unique solution whenever $y_0,\cdots,y_{n-1}$ are specified.
\subsubsection{Theorem 17}
The set $H$ of all solutions of the $n$th-orderr homogeneous linear difference equation
\begin{equation}
y_{k+n}+a_1y_{k+n-1}+\cdots+a_{n-1}y_{k+1}+a_ny_k=z_k\textrm{ for all }k
\end{equation}
is an $n$-dimensional vector space.
\subsection{Application to Markov Chains}
\subsubsection{Theorem 18}
If $P$ is an $n\times n$ regular stochastic matrix, then $P$ has a unique steady-state vector $\mathbf{q}$. Further, if $\mathbf{x}_0$ is any initial state and $\mathbf{x}_{k+1}=P\mathbf{x}_k$ for $k=0,1,2,\dots$, then the Markov chain $\{\mathbf{x}_k\}$ converges to $\mathbf{q}$ as $k\rightarrow\infty$.
\section{Eigenvalues and Eigenvectors}
\subsection{Eigenvectors and Eigenvalues}
\subsubsection{Definition}
An \textbf{eigenvector} of an $n\times n$ matrix $A$ is a nonzero vector $\mathbf{x}$ such that $A\mathbf{x}=\lambda\mathbf{x}$ for some scalar $\lambda$. A scalar $\lambda$ is called an \textbf{eigenvalue} of $A$ if there is a nontrivial solution $\mathbf{x}$ of $A\mathbf{x}=\lambda\mathbf{x}$; such an $\mathbf{x}$ is called an \textit{eigenvector corresponding to $\lambda$}.
\subsubsection{Theorem 1}
The eigenvalue of a triangular matrix are the entries on its main diagonal.
\subsubsection{Theorem 2}
If $\mathbf{v}_1,\dots,\mathbf{v}_r$ are eigenvectors that correspond to distinct eigenvalues $\lambda_1,\dots,\lambda_r$ of an $n\times n$ matrix $A$, then the set $\{\mathbf{v}_1,\dots,\mathbf{v}_r\}$ is linearly independent.
\subsection{The Characteristic Equation}
\subsubsection{Theorem}
\textbf{The Invertible Matrix Theorem: (continued)} Let $A$ be an $n\times n$ matrix. Then $A$ is invertible if and only if:
\begin{enumerate}[a.]
\item The number 0 is \textit{not} an eigenvalue of $A$.
\item The determinant of $A$ is \textit{not} zero.
\end{enumerate}
\subsubsection{Theorem 3}
\textbf{Property of Determinants:} Let $A$ and $B$ be $n\times n$ matrices.
\begin{enumerate}[a.]
\item $A$ is invertible if and only if det $A\neq 0$.
\item det $AB=(\textrm{det }A)(\textrm{det }B)$.
\item det $A^T=$det $A$.
\item If $A$ triangular, then det $A$ is the product of the entries on the main diagonal of $A$.
\item A row replacement operation on $A$ does not change the determinant. A row interchange changes the sign of the determinant. A row scaling also scales the determinant by the same scalar factor.
\end{enumerate}
\subsubsection{The Characteristic Equation}
A scalar $\lambda$ is an eigenvalue of an $n\times n$ matrix $A$ if and only if $\lambda$ satisfies the characteristic equation
\begin{equation}
\textrm{det }(A-\lambda I)=0
\end{equation}
\subsubsection{Theorem 4}
If $n\times n$ matrices $A$ and $B$ are similar, then they have the same characteristic polynomial and hence the same eigenvalues (with the same multiplicities).
\subsection{Diagonalization}
\subsubsection{Theorem 5}
\textbf{The Diagonalization Theorem:} An $n\times n$ matrix $A$ is diagonalizable if and only if $A$ has $n$ linearly independent eigenvectors.
In fact, $A=PDP^{-1}$, with $D$ a diagonal matrix, if and only if the columns of $P$ are $n$ linearly independent eigenvectors of $A$. In this case, the diagonal entries of $D$ are eigenvalues of $A$ that correspond, respectively, to the eigenvectors in $P$.
\subsubsection{Theorem 6}
An $n\times n$ matrix with $n$ distinct eigenvalues is diagonalizable.
\subsubsection{Theorem 7}
Let $A$ be an $n\times n$ matrix whose distinct eigenvalues are $\lambda_1,\dots,\lambda_p$.
\begin{enumerate}[a.]
\item For $1\leq k\leq p$, the dimension of the eigenspace for $\lambda_k$ is less than or equal to the multiplicity of the eigenvalue $\lambda_k$.
\item The matrix $A$ is diagonalizable if and only if the sum of the dimensions of the distinct eigenspaces equals to $n$, and this happens if and only if the dimension of the eigenspace for each $\lambda_k$ equals the multiplicity of $\lambda_k$.
\item If $A$ is diagonalizable and $\mathcal{B}_k$ is a basis for the eigenspace corresponding to $\lambda_k$ for each $k$, then the total collection of vectors in the sets $\mathcal{B}_1,\dots,\mathcal{B}_p$ forms an eigenvector basis for $\mathbb{R}^n$.
\end{enumerate}
\subsection{Eigenvectors and Linear Transformations}
\subsubsection{Theorem 8}
\textbf{Diagonal Matrix Representation:} Suppose $A=PDP^{-1}$, where $D$ is a diagonal $n\times n$ matrix. If $\mathcal{B}$ is the basis for $\mathbb{R}^n$ formed from the columns of $P$, then $D$ is the $\mathcal{B}$-matrix for the transformation $\mathbf{x}\mapsto A\mathbf{x}$.
\subsection{Complex Eigenvalues}
\subsubsection{Theorem 9}
Let $A$ be a real $2\times 2$ matrix with a complex eigenvalue $\lambda=a-bi(b\neq 0)$ and an associated eigenvector $\mathbf{v}$ in $\mathbb{C}^2$. Then
\begin{equation}
A=PCP^{-1}\textrm{, where }P=\begin{bmatrix}
\textrm{Re }\mathbf{v} & \textrm{Im }\mathbf{v}
\end{bmatrix}\textrm{ and }C=\begin{bmatrix}
a & -b \\ b & a
\end{bmatrix}
\end{equation}
\subsection{Discrete Dynamical Systems}
\subsection{Applications to Differential Equations}
\subsection{Iterative Estimates for Eigenvalues}
\section{Orthogonality and Least Squares}
\subsection{Inner Product, Length, and Orthogonality}
\subsubsection{Theorem 1}
Let $\mathbf{u}$, $\mathbf{v}$, and $\mathbf{w}$ be vectors in $\mathbb{R}^n$, and let $c$ be a scalar. Then
\begin{enumerate}[a.]
\item $\mathbf{u\cdot v}=\mathbf{v\cdot u}$
\item $(\mathbf{u+v})\cdot\mathbf{w}=\mathbf{u}\cdot\mathbf{w}+\mathbf{v}\cdot\mathbf{w}$
\item $(c\mathbf{u})\cdot\mathbf{v}=c(\mathbf{u}\cdot\mathbf{v})=\mathbf{u}\cdot(c\mathbf{v})$
\item $\mathbf{u}\cdot\mathbf{u}\geq 0$, and $\mathbf{u}\cdot\mathbf{u}=0$ if and only if $\mathbf{u}=\mathbf{0}$
\end{enumerate}
\subsubsection{Definition}
The \textbf{length} (or \textbf{norm}) of $\mathbf{v}$ is the nonnegative scalar $||\mathbf{v}||$ defined by
\begin{equation}
||\mathbf{v}||=\sqrt{\mathbf{v\cdot v}}=\sqrt{v^2_1+v^2_2+\cdots+v^2_n}\textrm{ and }||\mathbf{v}||^2=\mathbf{v\cdot v}
\end{equation}
\subsubsection{Definition}
For $\mathbf{u}$ and $\mathbf{v}$ in $\mathbb{R}^n$, the \textbf{distance between u and v}, written as dist($\mathbf{u,v}$), is the length of the vector $\mathbf{u}-\mathbf{v}$. That is,
\begin{equation}
\mathrm{dist}(\mathbf{u,v})=||\mathbf{u}-\mathbf{v}||
\end{equation}
\subsubsection{Definition}
Two vectors $\mathbf{u}$ and $\mathbf{v}$ in $\mathbb{R}^n$ are \textbf{orthogonal} (to each other) if $\mathbf{u\cdot v}=0$.
\subsubsection{Theorem 2}
\textbf{The Pythagorean Theorem:} Two vectors $\mathbf{u}$ and $\mathbf{v}$ are orthogonal if and only if $||\mathbf{u}+\mathbf{v}||^2=||\mathbf{u}||^2+||\mathbf{v}||^2$.
\subsubsection{Theorem 3}
Let $A$ be an $m\times n$ matrix. The orthogonal complement of the row space of $A$ is the nullspace of $A$, and the orthogonal complement of the column space of $A$ is the nullspace of $A^T$:
\begin{equation}
(\textrm{Row }A)^\bot=\textrm{Nul }A\textrm{ and }(\textrm{Col }A)^\bot=\textrm{Nul }A^T
\end{equation}
\subsection{Orthogonal Sets}
\subsubsection{Theorem 4}
If $S=\{\mathbf{u}_1,\dots,\mathbf{u}_p\}$ is an orthogonal set of nonzero vectors in $\mathbb{R}^n$, then $S$ is linearly independent and hence is a basis for the subspace spanned by $S$.
\subsubsection{Definition}
An \textbf{orthogonal basis} for a subspace $W$ of $\mathbb{R}^n$ is a basis for $W$ that is also an orthogonal set.
\subsubsection{Theorem 5}
Let $\{\mathbf{u}_1,\dots,\mathbf{u}_p\}$ be an orthogonal basis for a subspace $W$ of $\mathbb{R}^n$. For each $\mathbf{y}$ in $W$, the weights in the linear combination
\begin{equation}
\mathbf{y}=c_1\mathbf{u}_1+\cdots+c_p\mathbf{u}_p
\end{equation}
are given by
\begin{equation}
c_j=\frac{\mathbf{y}\cdot\mathbf{u}_j}{\mathbf{u}_j\cdot\mathbf{u}_j}\textrm{ }(j=1,\dots,p)
\end{equation}
\subsubsection{Theorem 6}
An $m\times n$ matrix $U$ has orthonormal columns if and only if $U^TU=I$.
\subsubsection{Theorem 7}
Let $U$ be an $m\times n$ matrix with orthonormal columns, and let $\mathbf{x}$ and $\mathbf{y}$ be in $\mathbb{R}^n$. Then
\begin{enumerate}[a.]
\item $||U\mathbf{x}||=||\mathbf{x}||$
\item $(U\mathbf{x})\cdot(U\mathbf{y})=\mathbf{x}\cdot\mathbf{y}$
\item $(U\mathbf{x})\cdot(U\mathbf{y})=0$ if and only if $\mathbf{x}\cdot\mathbf{y}=0$
\end{enumerate}
\subsection{Orthogonal Projections}
\subsubsection{Theorem 8}
\textbf{The Orthogonal Decomposition Theorem:} Let $W$ be a subspace of $\mathbb{R}^n$. Then each $\mathbf{y}$ in $\mathbb{R}^n$ can be written uniquely in the form
\begin{equation}
\mathbf{y=\hat{y}+z}
\end{equation}
where $\mathbf{\hat{y}}$ is in $W$ and $\mathbf{z}$ is in $W^\bot$. In fact, if $\{\mathbf{\mathbf{u}_1,\dots,\mathbf{u}_p}\}$ is any orthogonal basis of $W$, then
\begin{equation}
\mathbf{\hat{y}=\frac{y\cdot u_1}{u_1\cdot u_1}u_1+\cdots+\frac{y\cdot u_p}{u_p\cdot u_p}u_p}
\end{equation}
\subsubsection{Theorem 9}
\textbf{The Best Approximation Theorem:} Let $W$ be a subspace of $\mathbb{R}^n$, $\mathbf{y}$ any vector in $\mathbb{R}^n$, and $\mathbf{\hat{y}}$ the orthogonal projection of $\mathbf{y}$ onto $W$ to $\mathbf{y}$, in the sense that
\begin{equation}
\mathbf{||y-\hat{y}||<||y-v||}
\end{equation}
for all $\mathbf{v}$ in $W$ distinct from $\mathbf{\hat{y}}$.
\subsection{The Gram-Schmidt Process}
\subsection{Least-Squares Problems}
\subsection{Applications to Linear Models}
\subsection{Inner Product Spaces}
\subsubsection{Definition}
An \textbf{inner product} on a vector space $V$ is a function that, to each pair of vectors $\mathbf{u}$ and $\mathbf{v}$ in $V$, associates a real number $\langle\mathbf{u,v}\rangle$ and satisfies the following axioms, for all $\mathbf{u}$,$\mathbf{v}$,$\mathbf{w}$ in $V$ and all scalar $c$:
\begin{enumerate}
\item $\mathbf{\langle\mathbf{u,v}\rangle=\langle\mathbf{v},\mathbf{u}\rangle}$
\item $\mathbf{\langle\mathbf{u}+\mathbf{v},\mathbf{w}\rangle=\langle\mathbf{u,w}\rangle+\langle\mathbf{v},\mathbf{w}\rangle}$
\item $\mathbf{\langle c\mathbf{u,v}\rangle=c\langle\mathbf{u,v}\rangle}$
\item $\mathbf{\langle\mathbf{u,v}\rangle\geq 0}$ and $\langle\mathbf{u,u}\rangle=0$ if and only if $\mathbf{u=0}$
\end{enumerate}
A vector space with an inner product is called an \textbf{inner product space}.
\subsubsection{The Cauchy-Schwarz Inequality}
For all $\mathbf{u,v}$ in $V$,
\begin{equation}
\mathbf{|\langle u,v\rangle|\leq ||u||\textrm{ }||v||}
\end{equation}
\subsubsection{The Triangle Inequality}
For all $\mathbf{u,v}$ in $V$,
\begin{equation}
\mathbf{||u+v||\leq ||u||+||v||}
\end{equation}
\subsection{Applications of Inner Product Spaces}
\section{Symmetric Matrices and Quadratic Forms}
\subsection{Diagonalization of Symmetric Matrices}
\subsubsection{Theorem 1}
If $A$ is symmetric, then any two eigenvectors from different eigenspaces are orthogonal.
\subsubsection{Theorem 2}
An $n\times n$ matrix $A$ is orthogonally diagonalizable if and only if $A$ is a symmetric matrix.
\subsubsection{Theorem 3}
\textbf{The Spectral Theorem for Symmetric Matrices:} An $n\times n$ symmetric matrix $A$ has the following properties:
\begin{enumerate}
\item $A$ has $n$ real eigenvalues, counting multiplicities.
\item The dimension of the eigenspace for each eigenvalue $\lambda$ equals the multiplicity of $\lambda$ as a root of the characteristic equation.
\item The eigenspaces are mutually orthogonal, in the sense that eigenvectors corresponding to different eigenvalues are orthogonal.
\item $A$ is orthogonally diagonalizable.
\end{enumerate}
\subsection{Quadratic Forms}
\subsubsection{Theorem 4}
\textbf{The Principle Axes Theorem:} Let $A$ be an $n\times n$ symmetric matrix. Then there is an orthogonal change of variable, $\mathbf{x}=P\mathbf{y}$, that transforms the quadratic form $\mathbf{x}^TA\mathbf{x}$ into a quadratic form $\mathbf{y}^TD\mathbf{y}$ with no cross-product term.
\end{document}