Linear Algebra#

Note

To start building sophisticated models, we will also need a few tools from linear algebra. This section offers a gentle introduction to the most essential concepts, starting from scalar arithmetic and ramping up to matrix multiplication.

Scalars#

import torch
x = torch.tensor(3.0)
y = torch.tensor(2.0)

x + y, x * y, x / y, x**y
(tensor(5.), tensor(6.), tensor(1.5000), tensor(9.))

Vectors#

x = torch.arange(4.0)
x
tensor([0., 1., 2., 3.])
len(x)
4
# dot product
y = torch.ones(4, dtype=torch.float32)
torch.dot(x, y)
tensor(6.)
# Hadamard product
x * y
tensor([0., 1., 2., 3.])
torch.sum(x * y)
tensor(6.)
# l2 norm
u = torch.tensor([3.0, -4.0])
torch.norm(u)
tensor(5.)
# l1 norm
torch.abs(u).sum()
tensor(7.)

Matrix#

A = torch.arange(20).reshape(5, 4)
A
tensor([[ 0,  1,  2,  3],
        [ 4,  5,  6,  7],
        [ 8,  9, 10, 11],
        [12, 13, 14, 15],
        [16, 17, 18, 19]])
# transpose
A.T
tensor([[ 0,  4,  8, 12, 16],
        [ 1,  5,  9, 13, 17],
        [ 2,  6, 10, 14, 18],
        [ 3,  7, 11, 15, 19]])
A = torch.arange(20, dtype=torch.float32).reshape(5, 4)
# Assign a copy of A to B by allocating new memory
B = A.clone()
A + B
tensor([[ 0.,  2.,  4.,  6.],
        [ 8., 10., 12., 14.],
        [16., 18., 20., 22.],
        [24., 26., 28., 30.],
        [32., 34., 36., 38.]])
# Frobenius norm
torch.norm(torch.ones((4, 9)))
tensor(6.)

Matrix multiplication#

# Hadamard product
A * B
tensor([[  0.,   1.,   4.,   9.],
        [ 16.,  25.,  36.,  49.],
        [ 64.,  81., 100., 121.],
        [144., 169., 196., 225.],
        [256., 289., 324., 361.]])
# matrix-vector
A.shape, x.shape, torch.mv(A, x)
(torch.Size([5, 4]), torch.Size([4]), tensor([ 14.,  38.,  62.,  86., 110.]))
# matrix-matrix
B = torch.ones(4, 3)
torch.mm(A, B)
tensor([[ 6.,  6.,  6.],
        [22., 22., 22.],
        [38., 38., 38.],
        [54., 54., 54.],
        [70., 70., 70.]])

Tensors#

X = torch.arange(24).reshape(2, 3, 4)
X
tensor([[[ 0,  1,  2,  3],
         [ 4,  5,  6,  7],
         [ 8,  9, 10, 11]],

        [[12, 13, 14, 15],
         [16, 17, 18, 19],
         [20, 21, 22, 23]]])

Reduction#

A, A.shape
(tensor([[ 0.,  1.,  2.,  3.],
         [ 4.,  5.,  6.,  7.],
         [ 8.,  9., 10., 11.],
         [12., 13., 14., 15.],
         [16., 17., 18., 19.]]),
 torch.Size([5, 4]))
# along rows
A_sum_axis0 = A.sum(axis=0)
A_sum_axis0, A_sum_axis0.shape
(tensor([40., 45., 50., 55.]), torch.Size([4]))
# along columns
A_sum_axis1 = A.sum(axis=1)
A_sum_axis1, A_sum_axis1.shape
(tensor([ 6., 22., 38., 54., 70.]), torch.Size([5]))
# along rows, columns
A.sum(axis=[0, 1])
tensor(190.)
A.mean(), A.sum() / A.numel()
(tensor(9.5000), tensor(9.5000))
# when involking cumsum, axis must be specified
A.cumsum(axis=0)
tensor([[ 0.,  1.,  2.,  3.],
        [ 4.,  6.,  8., 10.],
        [12., 15., 18., 21.],
        [24., 28., 32., 36.],
        [40., 45., 50., 55.]])
# non-reduction sum
sum_A = A.sum(axis=1, keepdims=True)
sum_A, sum_A.shape
(tensor([[ 6.],
         [22.],
         [38.],
         [54.],
         [70.]]),
 torch.Size([5, 1]))
# broadcastable
A / sum_A
tensor([[0.0000, 0.1667, 0.3333, 0.5000],
        [0.1818, 0.2273, 0.2727, 0.3182],
        [0.2105, 0.2368, 0.2632, 0.2895],
        [0.2222, 0.2407, 0.2593, 0.2778],
        [0.2286, 0.2429, 0.2571, 0.2714]])