import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
$ \vec{a} = \left[ {\begin{array}{c} 1 \\ 2\end{array} } \right] $
$ \vec{a} = \left[ {\begin{array}{c} 1 \\ 2 \\ 3 \\ 4 \\ 5\end{array} } \right] $
a_list = [1, 2, 3] # This is a Python List
a = np.array(a_list) # This is a NumPy Array with a Python List as the Input
b = np.array([1, 2, 3]) # We can directly put the list as the input as well
# Print Python List
print(a_list)
# Print NumPy Array
print(a)
# Print NumPy Array
print(b)
a_list # Looks exactly the same as when we printed the Python List
a # Wrapped with "array()"
# Python List Multiplied
a_list * 2
a_list + 2
x = np.arange(start=0, stop=10, step=2)
print(x)
x = np.linspace(start=0, stop=10, num=5)
print(x)
random_number = np.random.normal(loc=10, scale=5) # Mean = 10, Std = 5
print(random_number)
random_numbers = np.random.normal(loc=10, scale=5, size=(3,2))
print(random_numbers)
random_numbers = np.random.randn(3, 2) # Shape of Matrix (Array) Generated from the Standard Normal Distribution
print(random_numbers)
random_numbers = 10 + np.random.randn(3, 2) * 5 # Manually Shifting the Mean, and Scaling the Standard Deviation
print(random_numbers)
random_numbers = np.random.randn(3,2)
random_numbers[:,0] = 10 + random_numbers[:,0] * 5 # First Column with Mean = 10, Std = 5
random_numbers[:,1] = 50 + random_numbers[:,1] * 10 # Second Column with Mean = 50, Std = 10
print(random_numbers)
x = 10 + np.random.randn(10000) * 10 # Generate 1000 Normally Distribued Numbers in an Array
print('Mean: {:.2f} | Standard Deviation: {:.2f}'.format(x.mean(), x.std())) # Verify the Mean and Standard Deviation
plt.hist(x, bins=20, color='green')
plt.show();
x = np.random.uniform(low=10, high=20, size=10000) # Generate 1000 Uniformly Distributed Numbers in an Array
print('Mean: {:.2f} | Standard Deviation: {:.2f}'.format(x.mean(), x.std())) # Verify the Mean and Standard Deviation
plt.hist(x, bins=20, color='green')
plt.show();
print(random_numbers) # 3x2 Matrix
print(random_numbers.T) # 2x3 Matrix
$ \vec{a} = \left[ {\begin{array}{c} 1 \\ 2\end{array} } \right] $
a = np.array([1, 2])
$= 2 + \left[ {\begin{array}{c} 1 \\ 2\end{array} } \right]$
$= \left[ {\begin{array}{c} 3 \\ 4\end{array} } \right]$
2 + a
$= 2 \times \left[ {\begin{array}{c} 1 \\ 2\end{array} } \right]$
$= \left[ {\begin{array}{c} 2 \\ 4\end{array} } \right]$
2 * a
a + 2
a * 2
$ \vec{a} = \left[ {\begin{array}{c} 1 \\ 2\end{array} } \right] $
$ \vec{b} = \left[ {\begin{array}{c} 5 \\ 6\end{array} } \right] $
a = np.array([1, 2])
b = np.array([5, 6])
$= \left[ {\begin{array}{c} 1 \\ 2\end{array} } \right] + \left[ {\begin{array}{c} 5 \\ 6\end{array} } \right]$
$= \left[ {\begin{array}{c} 6 \\ 8\end{array} } \right] $
a + b
a * b
b + a
b * a
$= (1 \times 5) + (2 \times 6)$
$= 5 + 12$
$= 17$
np.dot(a, b)
A = np.array([[1,2,3],
[4,5,6]])
B = np.array([[9,8,7,6],
[8,7,6,5],
[7,6,5,4]])
print('Shape of A:\n', A.shape)
print('Shape of B:\n', B.shape)
A
10 + A
10 * A
A
B
A + B
B + A
A * B
B * A
A + A
B + B
A * A
B * B
$A = \left[ {\begin{array}{c} 1 & 2 & 3 \\ 4 & 5 & 6\end{array} } \right]$
$B = \left[ {\begin{array}{c} 9 & 8 & 7 & 6 \\ 8 & 7 & 6 & 5 \\ 7 & 6 & 5 & 4\end{array} } \right]$
$A.B = \left[ {\begin{array}{c} 1 & 2 & 3 \\ 4 & 5 & 6\end{array} } \right] . \left[ {\begin{array}{c} 9 & 8 & 7 & 6 \\ 8 & 7 & 6 & 5 \\ 7 & 6 & 5 & 4\end{array} } \right]$
$= \left[ {\begin{array}{c} (1 \times 9) + (2 \times 8) + (3 \times 7) & ... \\ (4 \times 9) + (5 \times 8) + (6 \times 7) & ...\end{array} } \right]$
$= \left[ {\begin{array}{c} 46 & 40 & 34 & 28 \\ 118 & 103 & 88 & 73 \end{array} } \right]$
print('Matrix A:'); print(A); print('Shape:', A.shape)
print('Matrix B:'); print(B); print('Shape:', B.shape)
np.dot(A, B)
np.dot(A, B).shape
Inner Dimensions Do Not Match!
np.dot(B, A)
$\vec{a} = \left[ {\begin{array}{c} 1 & 2\end{array} } \right]$
$\vec{b} = \left[ {\begin{array}{c} 5 \\ 6\end{array} } \right]$
a = np.array([1,2])
b = np.array([5,
6])
print('Vector a:\n', a)
print('Vector b:\n', b)
print('Shape of a:\n', a.shape)
print('Shape of b:\n', b.shape)
$\vec{a} = \left[ {\begin{array}{c} 1 & 2\end{array} } \right]$
$\vec{b} = \left[ {\begin{array}{c} 5 \\ 6\end{array} } \right]$
a = np.array([[1, 2]])
b = np.array([[5],
[6]])
print('Vector a:\n', a)
print('Vector b:\n', b)
print('Shape of a:\n', a.shape)
print('Shape of b:\n', b.shape)
$\left[ {\begin{array}{c} 1 & 2\end{array} } \right] . \left[ {\begin{array}{c} 5 \\ 6\end{array} } \right]$
np.dot(a, b)
$\left[ {\begin{array}{c} 5 \\ 6\end{array} } \right] . \left[ {\begin{array}{c} 1 & 2\end{array} } \right]$
np.dot(b, a)
$3 \times \left[ {\begin{array}{c} 2 & 7 \\ 1 & 5\end{array} } \right]$
$\left[ {\begin{array}{c} 4 & 3\end{array} } \right] . \left[ {\begin{array}{c} 2 \\ 5\end{array} } \right]$
$\left[ {\begin{array}{c} 2 & 6 & -1 \\ -3 & 4 & 3 \end{array} } \right] . \left[ {\begin{array}{c} 2 & 4 \\ 5 & -2 \\ 3 & -1 \end{array} } \right]$
$\left[ {\begin{array}{c} 2 & 6 & -1 & 5 & 7\\ -3 & 4 & 3 & 2 & -7 \\ -1 & 2 & -2 & -5 & 4 \\ 3 & 2 & 8 & -4 & 3\end{array} } \right] . \left[ {\begin{array}{c} 2 & 4 & 5\\ 5 & -2 & 1 \\ 3 & -1 & -7 \\ -2 & -4 & -4 \\ 4 & 6 & 1 \end{array} } \right]$
a = np.array([[ 2, 6, -1, 5, 7],
[-3, 4, 3, 2, -7],
[-1, 2, -2, -5, 4],
[ 3, 2, 8, -4, 3]])
b = np.array([[ 2, 4, 5],
[ 5, -2, 1],
[ 3, -1, -7],
[-2, -4, -4],
[ 4, 6, 1]])
np.dot(a, b)
# Create 1,000,000 Sized Arrays
a = np.random.randn(1000000)
b = np.random.randn(1000000)
print(a.shape, b.shape)
%%time
# Explicit For Loops
c = 0
for i in range(len(a)):
c += a[i] * b[i]
print(c)
%%time
# Element-Wise Multiplication, then Summing them together
c = sum(a * b)
print(c)
%%time
# Matrix Multiplication
c = a @ b
print(c)
%%time
# Dot Product
c = np.dot(a, b)
print(c)
NumPy |
PyTorch |
![]() |
![]() |
The GPU with PyTorch was done on a Desktop Computer with an nVidia GPU (We won't be able to do that with Laptop Integrated Graphics unless you're a hardcore gamer and you have your dedicated graphics properly set up).
Or, we could use Cloud Computing Services).
$\left[ {\begin{array}{c} 2 & 1 \\ 3 & 2\end{array} } \right] . \left[ {\begin{array}{c} A \\ B\end{array} } \right] = \left[ {\begin{array}{c} 13 \\ 22\end{array} } \right]$
$\left[ {\begin{array}{c} 2A + 1B \\ 3A + 2B\end{array} } \right] = \left[ {\begin{array}{c} 13 \\ 22\end{array} } \right]$
Solving {A, B} is not the Point here, the Representation of Data by Vectorization is the Point.
units = np.array([[2,1], [3,2]])
cost = np.array([13,22])
np.linalg.inv(units) @ cost
$\left[ {\begin{array}{c} 2 & 1 \\ 3 & 2\end{array} } \right] . \left[ {\begin{array}{c} A \\ B\end{array} } \right] + O = \left[ {\begin{array}{c} 12 \\ 19\end{array} } \right]$
$\left[ {\begin{array}{c} 2A + 1B \\ 3A + 2B\end{array} } \right] + O = \left[ {\begin{array}{c} 12 \\ 19\end{array} } \right]$
$\left[ {\begin{array}{c} 2A + 1B + O\\ 3A + 2B + O\end{array} } \right] = \left[ {\begin{array}{c} 12 \\ 19\end{array} } \right]$
Again, Solving {A, B, O} is not the Point here, the Representation of Data by Vectorization is the Point.
$\left[ {\begin{array}{c} 2 & 1\\ 3 & 2\end{array} } \right] . \left[ {\begin{array}{c} A \\ B\end{array} } \right] = \left[ {\begin{array}{c} 12-O \\ 19-O\end{array} } \right]$
or
$\left[ {\begin{array}{c} 2 & 1 & 1\\ 3 & 2 & 1\end{array} } \right] . \left[ {\begin{array}{c} A \\ B\\ O \end{array} } \right] = \left[ {\begin{array}{c} 12 \\ 19\end{array} } \right]$
$\left[ {\begin{array}{c} 2 & 1 \\ 3 & 2\end{array} } \right] . \left[ {\begin{array}{c} A \\ B\end{array} } \right] + 2 = \left[ {\begin{array}{c} 13 \\ 18\end{array} } \right]$
$\left[ {\begin{array}{c} 2A + 1B \\ 3A + 2B\end{array} } \right] + 2 = \left[ {\begin{array}{c} 13 \\ 18\end{array} } \right]$
$\left[ {\begin{array}{c} 2A + 1B + 2\\ 3A + 2B + 2\end{array} } \right] = \left[ {\begin{array}{c} 13 \\ 18\end{array} } \right]$
$Predicted = \left[ {\begin{array}{c} 2 & 1 \\ 3 & 2\end{array} } \right] . \left[ {\begin{array}{c} 3 \\ 4\end{array} } \right] + 2$
$= \left[ {\begin{array}{c} 2\times3 + 1\times4 \\ 3\times3 + 2\times4\end{array} } \right] + 2$
$= \left[ {\begin{array}{c} 6 + 4\\ 9 + 8\end{array} } \right] + 2$
$= \left[ {\begin{array}{c} 10\\ 17\end{array} } \right] + 2$
$= \left[ {\begin{array}{c} 12 \\ 19\end{array} } \right]$
$Error = Prediction - Actual$
$Error = \left[ {\begin{array}{c} 12 \\ 19\end{array} } \right] - \left[ {\begin{array}{c} 13 \\ 18\end{array} } \right]$
$= \left[ {\begin{array}{c} -1 \\ 1\end{array} } \right]$
prices = pd.read_csv('./sources/prices.csv', index_col=0)
prices.head()
returns = prices.pct_change().dropna()
returns.head()
returns.describe()
corr_matrix = returns.corr()
print(corr_matrix)
eigvals, eigvecs = np.linalg.eig(corr_matrix) # Eigen-decomposition of the Correlation Matrix
idx = np.argsort(eigvals)[::-1]
eigvals = eigvals[idx]
eigvecs = eigvecs[:, idx]
var_contri = eigvals / eigvals.sum()
plt.title('Scree Plot', fontsize=16)
plt.plot(var_contri, c='blue')
plt.show();
fig = plt.figure(figsize=(10,4))
plt.title('Principal Components', fontsize=16)
colors = ['blue', 'purple', 'orange']
num = 3
for i in range(3):
plt.plot(eigvecs[:,i], c=colors[i])
plt.scatter(returns.columns, eigvecs[:,i], c=colors[i], label='PC{} {:.2f}%'.format(i+1, var_contri[i]*100))
plt.legend()
plt.show();
L = np.linalg.cholesky(returns.cov()) # Cholesky Decomposition of Covariance Matrix
portfolios = np.array([eigvecs[:,i]/eigvecs[:,i].sum() for i in range(len(eigvals))])
n_sims = 10000
z = np.random.randn(n_sims, 252, len(L))
x = z @ L.T + returns.mean().values
sim_rets = ((x+1).prod(axis=1)-1) @ portfolios.T
for i in range(len(portfolios)):
print(f'Portfolio {i+1}')
print('Mean Returns: {:.2f}% | Volatility: {:.2f}%'.format(sim_rets[:,i].mean()*100, sim_rets[:,i].std()*100))
plt.title('Returns Distribution') # Returns Distribution of Portfolio 1
plt.hist(sim_rets[:,0], bins=20)
plt.show();
VAR = np.quantile(sim_rets[:,0], 0.05)
print('{:.2f}%'.format(VAR*100)) # Value at Risk % - 5% Chance to Lose More than:
CVAR = sim_rets[:,0][sim_rets[:,0] < VAR].mean()
print('{:.2f}%'.format(CVAR*100)) # Conditional Value at Risk % - 5% to Lose: