๐ฅ PyTorch Tensors
PyTorch tensors are similar to NumPy arrays but can run on GPUs for accelerated computing.
Mastering this concept will significantly boost your Python data science skills!
๐ป Code Example:
import torch import torch.nn as nn import torch.optim as optim print("PyTorch version:", torch.__version__) # 1. Tensor basics x = torch.tensor([[1.0, 2.0], [3.0, 4.0]]) print("Shape:", x.shape, "| Dtype:", x.dtype) print("Matrix power:\n", x @ x.T) # Matrix multiply # 2. Autograd โ automatic gradient computation a = torch.tensor(3.0, requires_grad=True) b = torch.tensor(4.0, requires_grad=True) loss = a**2 + 2*b + a*b # f(a,b) = aยฒ + 2b + ab loss.backward() print(f"\nGradient df/da={a.grad:.1f}, df/db={b.grad:.1f}") # df/da = 2a + b = 10, df/db = 2 + a = 5 # 3. Build a simple linear regression model torch.manual_seed(42) X = torch.rand(100, 1) * 10 y = 3.5 * X + 2.0 + torch.randn(100, 1) model = nn.Linear(1, 1) optim_ = optim.SGD(model.parameters(), lr=0.01) loss_fn = nn.MSELoss() # 4. Training loop for epoch in range(200): y_pred = model(X) loss = loss_fn(y_pred, y) optim_.zero_grad() loss.backward() optim_.step() w = model.weight.item() b_val = model.bias.item() print(f"\nLearned โ weight: {w:.4f} | bias: {b_val:.4f}") print(f"Expected โ weight: 3.5000 | bias: 2.0000") # 5. Inference new_x = torch.tensor([[7.5]]) with torch.no_grad(): pred = model(new_x).item() print(f"\nPrediction for x=7.5: {pred:.2f}")
Keep exploring and happy coding! ๐ป