FROM ubuntu:23.10
RUN apt-get update && apt-get install -y unzip python3 python3-pip
RUN pip install pandas==2.1.0 jupyterlab==4.0.3 matplotlib --break-system-packages
RUN pip3 install torch==2.0.1 --index-url <https://download.pytorch.org/whl/cpu> --break-system-packages
RUN pip3 install tensorboard==2.14.0 --break-system-packages
CMD ["python3", "-m", "jupyterlab", "--no-browser", "--ip=0.0.0.0", "--port=5440", "--allow-root", "--NotebookApp.token=''"]
docker build .
# build docker-filedocker run -d -p 127.0.0.1:5440:5440 -v ./nb:/nb <tag_name>
ssh [[email protected]](<mailto:[email protected]>) -L localhost:5440:localhost:5440
# Connect Jupyter Lab on VM in local computer browserdtype(<var>)
# check data typex = torch.tensor(3.14, dtype=torch.float16)
# variable assignmentx.element_size()
# check variable size in bytesExample of Overflow/Underflow (Integer)
**import** torch # import PyTorch
x = torch.tensor(2**31 - 1, dtype=torch.int32)
y = x + 1 # overflow: number gets too big to wrap around to negative
y - 1 # underflow: number gets too small to wrap around to positive
Example of Precision (Float)
Integer will overflow/underflow, but float will saturate (loose precision)
from sys import float_info # Python Float 64 here
dir(float_info) # check commands
float_info.min, float_info.max # max/min representation in float
# Precision: Saturation
big = float_info.max
big == big + 1 # return True
# Precision: Infinity
small = -big
big * 2 # return inf
big * 2 / 2 # return inf
small * 3 # return -inf
big * 2 + small * 2 # return nan
# Precision: Digits of Precision
float_info.dig # how many digits can we do faithfully
x = 1.2345678123456782
y = 1.2345678123456783
x == y # return True -> not precise enough!
# Precision: Next Valid Float
1.0 + float_info.epsilon # the next valid float after 1
x = 1.0
y = x + float_info.epsilon
x == y
# return False
y = x + float_info.epsilon / 2
x == y
# return True
x = 0.1
y = x + float_info.epsilon / 2
x == y
# return False
Matrices
# Matrix Creation
torch.manual_seed(42)
A = torch.rand(10, 3) # create a random 10 * 3 matrix
# Matrix Information
A.dtype # torch.float32
A.shape # torch.Size([10, 3])
A.device # device(type='cpu') "cpu" means regular system memory (RAM)
A.nelement() * A.element_size() # "about" how many bytes used
import sys; sys.getsizeof(A) # get how many bytes used
# Matrix Setting
A.to(torch.float64) # convert data type
if torch.cuda.is_available(): # shift to use GPU
A = A.to("cuda")
# Matrix Operations
A.T # transpose: flip along the diaganol
A.reshape(2, 15) # reshape
A.reshape(30) # reshape
# Matrix Calculation
X = torch.rand(5,3)
X + 100 # element-wise addition
torch.sigmoid(X) # element-wise sigmoid function
# multiplication:
# rule 1: 2nd dim of 1st matrix must equal 1st dim of 2nd matrix (for every pair)
# rule 2: final rows=rows of first matrix; final cols=cols of last matrix
# matrix multiply
x = torch.rand(5,3)
y = torch.rand(3,7)
z = torch.rand(7,2)
x @ y @ zPyTorch Optimization