PythonPlaza - Python & AI

PyTorch

One of the most popular libraries for creating deep learning and machine learning models is PyTorch. It was created by Meta AI, and because of its adaptability, speed, and user-friendly design, it has gained particular popularity in research and production.

Fundamentally, PyTorch is a library for numerical calculation utilizing tensors, which are multi-dimensional arrays that resemble those in NumPy but can also operate on GPUs for quicker computing. Anything from a single integer (a scalar) to a vector, matrix, or higher-dimensional data like pictures or videos can be represented by a tensor. Because of this, PyTorch is especially well-suited to managing the kind of data needed in deep learning applications.

PyTorch's dynamic computing graph, sometimes referred to as "define-by-run," is one of its distinguishing characteristics. The computational graph had to be defined in its entirety before it could be executed in earlier frameworks such as TensorFlow (particularly version 1.x). In contrast, PyTorch creates the graph as operations are carried out. Because you can naturally use regular Python control flow (loops, conditionals, etc.), debugging and model experimentation become much simpler. One of the reasons PyTorch is preferred in academic research and rapid prototyping is its flexibility.



import numpy
import torch

Code Example 1:
# Create tensors
x = torch.tensor([1, 2, 3])


print(x)

#Output:


Code Example 2:
y = torch.tensor([[1, 2], [3, 4]])

print(y)

#Output:


Code Example 3:
x= torch.zeros(2, 3)      # all zeros

print(x)

#Output:


Code Example 4:
x=torch.ones(2, 3)       # all ones

print(x)

#Output:


Code Example 5:
x=torch.rand(2, 3)       # random values (0–1)

print(x)

#Output:


Code Example 6:
torch.arange(0, 10)    # range

print(x)

#Output:


Code Example 7:
a = torch.tensor([1, 2, 3])
b = torch.tensor([4, 5, 6])

# Arithmetic
print(a + b)

#Output:

print(a * b)

# Matrix multiplication
m1 = torch.tensor([[1, 2], [3, 4]])
m2 = torch.tensor([[5, 6], [7, 8]])

print(torch.matmul(m1, m2))

#Output:


Code Example 8:
Indexing & Slicing
x = torch.tensor([[1, 2, 3], [4, 5, 6]])

print(x[0])      # first row

#Output:


print(x[:, 1])   # second column

#Output:



x = torch.tensor([[1, 2, 3], [4, 5, 6]])

print(x[:, [1,2]])   # All rows, 2nd and 3rd columns

#Output:

tensor([[2, 3],
        [5, 6]])


print(x[:, [0,2]])   # All rows, 1st and 3rd column

#Output:

tensor([[1, 3],
        [4, 6]])

print(x[:, [0,1,2]])   # All rows, 1st 2nd and 3rd column

#Output:

tensor([[1, 2, 3],
        [4, 5, 6]])


Let's solve some Neural Network use cases with PyTorch.

UseCase 1: Using Pytorch and a Feedforward Neural Networks (FNN), determine loan Credit scoring for loan approval/loan rejection. The independent variables are income, credit score, debt ratio, and employment history Output: approve/reject loan

import torch
import torch.nn as nn
import torch.optim as optim
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler
import pandas as pd
import numpy as np

# ---------------------------------------------------
# STEP 1: Create Sample Dataset
# ---------------------------------------------------

data = {
'income': [50000, 30000, 80000, 45000, 100000, 25000, 70000, 40000],
'credit_score': [700, 550, 750, 620, 800, 500, 720, 580],
'debt_ratio': [0.20, 0.50, 0.15, 0.35, 0.10, 0.60, 0.25, 0.45],
'employment_history': [5, 1, 8, 3, 10, 0, 6, 2],
'loan_status': [1, 0, 1, 0, 1, 0, 1, 0]
}

df = pd.DataFrame(data)

# ---------------------------------------------------
# STEP 2: Define Features and Labels
# ---------------------------------------------------

X = df[['income', 'credit_score', 'debt_ratio', 'employment_history']].values
y = df['loan_status'].values

# ---------------------------------------------------
# STEP 3: Normalize Features
# ---------------------------------------------------

scaler = StandardScaler()
X = scaler.fit_transform(X)

# ---------------------------------------------------
# STEP 4: Train-Test Split
# ---------------------------------------------------

X_train, X_test, y_train, y_test = train_test_split(
X, y, test_size=0.2, random_state=42
)

# Convert to tensors
X_train = torch.FloatTensor(X_train)
X_test = torch.FloatTensor(X_test)

y_train = torch.FloatTensor(y_train).view(-1, 1)
y_test = torch.FloatTensor(y_test).view(-1, 1)

# ---------------------------------------------------
# STEP 5: Build Feedforward Neural Network
# ---------------------------------------------------

class LoanApprovalNN(nn.Module):
def __init__(self):
super(LoanApprovalNN, self).__init__()

self.fc1 = nn.Linear(4, 8)
self.relu1 = nn.ReLU()

self.fc2 = nn.Linear(8, 4)
self.relu2 = nn.ReLU()

self.fc3 = nn.Linear(4, 1)
self.sigmoid = nn.Sigmoid()

def forward(self, x):
x = self.fc1(x)
x = self.relu1(x)

x = self.fc2(x)
x = self.relu2(x)

x = self.fc3(x)
x = self.sigmoid(x)

return x

model = LoanApprovalNN()

# ---------------------------------------------------
# STEP 6: Loss Function and Optimizer
# ---------------------------------------------------

criterion = nn.BCELoss()
optimizer = optim.Adam(model.parameters(), lr=0.01)

# ---------------------------------------------------
# STEP 7: Train the Model
# ---------------------------------------------------

epochs = 200

for epoch in range(epochs):

# Forward pass
outputs = model(X_train)
loss = criterion(outputs, y_train)

# Backward pass
optimizer.zero_grad()
loss.backward()
optimizer.step()

if (epoch + 1) % 20 == 0:
print(f'Epoch [{epoch+1}/{epochs}], Loss: {loss.item():.4f}')

# ---------------------------------------------------
# STEP 8: Evaluate the Model
# ---------------------------------------------------

with torch.no_grad():
predictions = model(X_test)
predicted_classes = (predictions >= 0.5).float()

accuracy = (predicted_classes == y_test).sum().item() / y_test.size(0)

print("\nModel Accuracy:", accuracy)

# ---------------------------------------------------
# STEP 9: Predict New Loan Application
# ---------------------------------------------------

new_applicant = np.array([[60000, 680, 0.30, 4]])

# Normalize using same scaler
new_applicant = scaler.transform(new_applicant)

new_applicant_tensor = torch.FloatTensor(new_applicant)

with torch.no_grad():
prediction = model(new_applicant_tensor)

if prediction.item() >= 0.5:
print("\nLoan Status: APPROVED")
else:
print("\nLoan Status: REJECTED")

print("Approval Probability:", prediction.item())


UseCase 2: Using Pytorch and a Feedforward Neural Networks (FNN), determine the insurance price prediction. The independent variables are age, BMI, smoking status, exercise level, and medical conditions Output: insurance premium

import torch
import torch.nn as nn
import torch.optim as optim

import pandas as pd
import numpy as np

from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler

# ---------------------------------------------------
# STEP 1: Create Sample Dataset
# ---------------------------------------------------

data = {
'age': [25, 45, 30, 50, 35, 28, 60, 40],
'bmi': [22.5, 30.2, 27.8, 35.1, 26.4, 24.3, 38.5, 29.0],
'smoking_status': [0, 1, 0, 1, 0, 0, 1, 1], # 0 = No, 1 = Yes
'exercise_level': [4, 1, 3, 0, 5, 4, 0, 2], # Hours/week
'medical_conditions': [0, 2, 1, 3, 0, 0, 4, 2],
'insurance_premium': [2000, 8500, 3200, 12000, 2800, 2200, 15000, 7800]
}

df = pd.DataFrame(data)

# ---------------------------------------------------
# STEP 2: Define Features and Target
# ---------------------------------------------------

X = df[['age', 'bmi', 'smoking_status',
'exercise_level', 'medical_conditions']].values

y = df['insurance_premium'].values

# ---------------------------------------------------
# STEP 3: Normalize Features
# ---------------------------------------------------

scaler = StandardScaler()
X = scaler.fit_transform(X)

# ---------------------------------------------------
# STEP 4: Train-Test Split
# ---------------------------------------------------

X_train, X_test, y_train, y_test = train_test_split(
X, y, test_size=0.2, random_state=42
)

# Convert to tensors
X_train = torch.FloatTensor(X_train)
X_test = torch.FloatTensor(X_test)

y_train = torch.FloatTensor(y_train).view(-1, 1)
y_test = torch.FloatTensor(y_test).view(-1, 1)

# ---------------------------------------------------
# STEP 5: Build Feedforward Neural Network
# ---------------------------------------------------

class InsurancePremiumNN(nn.Module):

def __init__(self):
super(InsurancePremiumNN, self).__init__()

self.fc1 = nn.Linear(5, 16)
self.relu1 = nn.ReLU()

self.fc2 = nn.Linear(16, 8)
self.relu2 = nn.ReLU()

self.fc3 = nn.Linear(8, 1)

def forward(self, x):

x = self.fc1(x)
x = self.relu1(x)

x = self.fc2(x)
x = self.relu2(x)

x = self.fc3(x)

return x

model = InsurancePremiumNN()

# ---------------------------------------------------
# STEP 6: Define Loss Function and Optimizer
# ---------------------------------------------------

criterion = nn.MSELoss()
optimizer = optim.Adam(model.parameters(), lr=0.01)

# ---------------------------------------------------
# STEP 7: Train the Model
# ---------------------------------------------------

epochs = 500

for epoch in range(epochs):

# Forward pass
outputs = model(X_train)

loss = criterion(outputs, y_train)

# Backpropagation
optimizer.zero_grad()
loss.backward()
optimizer.step()

if (epoch + 1) % 50 == 0:
print(f'Epoch [{epoch+1}/{epochs}], Loss: {loss.item():.2f}')

# ---------------------------------------------------
# STEP 8: Evaluate the Model
# ---------------------------------------------------

with torch.no_grad():

predictions = model(X_test)

mse = criterion(predictions, y_test)

rmse = torch.sqrt(mse)

print("\nTest RMSE:", rmse.item())

# ---------------------------------------------------
# STEP 9: Predict Insurance Premium
# ---------------------------------------------------

# Example:
# age = 40
# bmi = 28
# smoking = yes(1)
# exercise = 2 hours/week
# medical_conditions = 1

new_customer = np.array([[40, 28, 1, 2, 1]])

# Normalize input
new_customer = scaler.transform(new_customer)

new_customer_tensor = torch.FloatTensor(new_customer)

with torch.no_grad():

predicted_premium = model(new_customer_tensor)

print("\nPredicted Insurance Premium: $",
round(predicted_premium.item(), 2))
UseCase 3: Using Pytorch and a Feedforward Neural Network (FNN), give a Customer churn prediction. The independent variables are monthly usage, complaints, subscription duration, and payment delays Output: churn (yes/no)

import torch
import torch.nn as nn
import torch.optim as optim

import pandas as pd
import numpy as np

from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler

# ---------------------------------------------------
# STEP 1: Create Sample Dataset
# ---------------------------------------------------

data = {
'monthly_usage': [120, 50, 200, 80, 150, 40, 220, 60],
'complaints': [1, 5, 0, 3, 1, 6, 0, 4],
'subscription_duration': [24, 6, 36, 12, 30, 3, 48, 8],
'payment_delays': [0, 4, 0, 2, 1, 5, 0, 3],
'churn': [0, 1, 0, 1, 0, 1, 0, 1]
}

df = pd.DataFrame(data)

# ---------------------------------------------------
# STEP 2: Define Features and Labels
# ---------------------------------------------------

X = df[['monthly_usage',
'complaints',
'subscription_duration',
'payment_delays']].values

y = df['churn'].values

# ---------------------------------------------------
# STEP 3: Normalize Features
# ---------------------------------------------------

scaler = StandardScaler()
X = scaler.fit_transform(X)

# ---------------------------------------------------
# STEP 4: Train-Test Split
# ---------------------------------------------------

X_train, X_test, y_train, y_test = train_test_split(
X,
y,
test_size=0.2,
random_state=42
)

# ---------------------------------------------------
# STEP 5: Convert Data to PyTorch Tensors
# ---------------------------------------------------

X_train = torch.FloatTensor(X_train)
X_test = torch.FloatTensor(X_test)

y_train = torch.FloatTensor(y_train).view(-1, 1)
y_test = torch.FloatTensor(y_test).view(-1, 1)

# ---------------------------------------------------
# STEP 6: Build Feedforward Neural Network
# ---------------------------------------------------

class CustomerChurnNN(nn.Module):

def __init__(self):

super(CustomerChurnNN, self).__init__()

# First hidden layer
self.fc1 = nn.Linear(4, 16)
self.relu1 = nn.ReLU()

# Second hidden layer
self.fc2 = nn.Linear(16, 8)
self.relu2 = nn.ReLU()

# Output layer
self.fc3 = nn.Linear(8, 1)

# Sigmoid activation for binary classification
self.sigmoid = nn.Sigmoid()

def forward(self, x):

x = self.fc1(x)
x = self.relu1(x)

x = self.fc2(x)
x = self.relu2(x)

x = self.fc3(x)

x = self.sigmoid(x)

return x

model = CustomerChurnNN()

# ---------------------------------------------------
# STEP 7: Define Loss Function and Optimizer
# ---------------------------------------------------

criterion = nn.BCELoss()

optimizer = optim.Adam(
model.parameters(),
lr=0.01
)

# ---------------------------------------------------
# STEP 8: Train the Model
# ---------------------------------------------------

epochs = 100

for epoch in range(epochs):

# Forward pass
outputs = model(X_train)

# Compute loss
loss = criterion(outputs, y_train)

# Backpropagation
optimizer.zero_grad()

loss.backward()

optimizer.step()

# Print progress
if (epoch + 1) % 10 == 0:
print(f'Epoch [{epoch+1}/{epochs}], Loss: {loss.item():.4f}')

# ---------------------------------------------------
# STEP 9: Evaluate Model
# ---------------------------------------------------

with torch.no_grad():

predictions = model(X_test)

predicted_classes = (predictions >= 0.5).float()

accuracy = (
predicted_classes == y_test
).sum().item() / y_test.size(0)

print("\nTest Accuracy:", accuracy)

# ---------------------------------------------------
# STEP 10: Predict New Customer
# ---------------------------------------------------

# Example:
# monthly_usage = 70
# complaints = 4
# subscription_duration = 8
# payment_delays = 3

new_customer = np.array([[70, 4, 8, 3]])

# Normalize using same scaler
new_customer = scaler.transform(new_customer)

# Convert to tensor
new_customer_tensor = torch.FloatTensor(new_customer)

with torch.no_grad():

prediction = model(new_customer_tensor)

probability = prediction.item()

if probability >= 0.5:
print("\nPrediction: CUSTOMER WILL CHURN")
else:
print("\nPrediction: CUSTOMER WILL STAY")

print("Churn Probability:", probability)

UseCase 4: Build a Feedforward Neural Network (FNN) in Pytorch for spam detection using your features:
Inputs (features):
num_links (number of links) num_caps (number of capital letters) email_length ip_address (you’ll need to convert this to numeric form) Output: spam (1) or not spam (0) Output: churn (yes/no)

 
import torch
import torch.nn as nn
import torch.optim as optim

import pandas as pd
import numpy as np

from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler

import ipaddress

# ---------------------------------------------------
# STEP 1: Create Sample Dataset
# ---------------------------------------------------

data = {
'num_links': [10, 1, 7, 0, 15, 2, 12, 1],
'num_caps': [50, 5, 40, 2, 70, 3, 60, 4],
'email_length': [500, 120, 450, 100, 700, 150, 650, 130],
'ip_address': [
'192.168.1.1',
'10.0.0.2',
'172.16.0.5',
'10.0.0.3',
'203.0.113.7',
'10.0.0.4',
'198.51.100.2',
'10.0.0.5'
],
'spam': [1, 0, 1, 0, 1, 0, 1, 0]
}

df = pd.DataFrame(data)

# ---------------------------------------------------
# STEP 2: Convert IP Address to Numeric
# ---------------------------------------------------

def ip_to_int(ip):
return int(ipaddress.ip_address(ip))

df['ip_address'] = df['ip_address'].apply(ip_to_int)

# ---------------------------------------------------
# STEP 3: Define Features and Labels
# ---------------------------------------------------

X = df[['num_links',
'num_caps',
'email_length',
'ip_address']].values

y = df['spam'].values

# ---------------------------------------------------
# STEP 4: Normalize Features
# ---------------------------------------------------

scaler = StandardScaler()

X = scaler.fit_transform(X)

# ---------------------------------------------------
# STEP 5: Train-Test Split
# ---------------------------------------------------

X_train, X_test, y_train, y_test = train_test_split(
X,
y,
test_size=0.2,
random_state=42
)

# ---------------------------------------------------
# STEP 6: Convert to PyTorch Tensors
# ---------------------------------------------------

X_train = torch.FloatTensor(X_train)
X_test = torch.FloatTensor(X_test)

y_train = torch.FloatTensor(y_train).view(-1, 1)
y_test = torch.FloatTensor(y_test).view(-1, 1)

# ---------------------------------------------------
# STEP 7: Build Feedforward Neural Network
# ---------------------------------------------------

class SpamDetectionNN(nn.Module):

def __init__(self):

super(SpamDetectionNN, self).__init__()

# First hidden layer
self.fc1 = nn.Linear(4, 16)
self.relu1 = nn.ReLU()

# Second hidden layer
self.fc2 = nn.Linear(16, 8)
self.relu2 = nn.ReLU()

# Output layer
self.fc3 = nn.Linear(8, 1)

# Sigmoid activation
self.sigmoid = nn.Sigmoid()

def forward(self, x):

x = self.fc1(x)
x = self.relu1(x)

x = self.fc2(x)
x = self.relu2(x)

x = self.fc3(x)

x = self.sigmoid(x)

return x

model = SpamDetectionNN()

# ---------------------------------------------------
# STEP 8: Define Loss Function and Optimizer
# ---------------------------------------------------

criterion = nn.BCELoss()

optimizer = optim.Adam(
model.parameters(),
lr=0.01
)

# ---------------------------------------------------
# STEP 9: Train the Model
# ---------------------------------------------------

epochs = 100

for epoch in range(epochs):

# Forward pass
outputs = model(X_train)

# Compute loss
loss = criterion(outputs, y_train)

# Backpropagation
optimizer.zero_grad()

loss.backward()

optimizer.step()

# Print training progress
if (epoch + 1) % 10 == 0:
print(f'Epoch [{epoch+1}/{epochs}], Loss: {loss.item():.4f}')

# ---------------------------------------------------
# STEP 10: Evaluate the Model
# ---------------------------------------------------

with torch.no_grad():

predictions = model(X_test)

predicted_classes = (predictions >= 0.5).float()

accuracy = (
predicted_classes == y_test
).sum().item() / y_test.size(0)

print("\nTest Accuracy:", accuracy)

# ---------------------------------------------------
# STEP 11: Predict New Email
# ---------------------------------------------------

# Example Email:
# num_links = 8
# num_caps = 45
# email_length = 400
# ip_address = 203.0.113.10

new_email = np.array([[
8,
45,
400,
ip_to_int('203.0.113.10')
]])

# Normalize using same scaler
new_email = scaler.transform(new_email)

# Convert to tensor
new_email_tensor = torch.FloatTensor(new_email)

with torch.no_grad():

prediction = model(new_email_tensor)

probability = prediction.item()

if probability >= 0.5:
print("\nPrediction: SPAM EMAIL")
else:
print("\nPrediction: NOT SPAM")

print("Spam Probability:", probability)

UseCase 5: Build a Feedforward Neural Network (FNN) in Pytorch for house price prediction
Inputs:
school_rating area_size no_of_bedrooms age crime_rate
Output:
price (continuous)

 
import torch
import torch.nn as nn
import torch.optim as optim

import pandas as pd
import numpy as np

from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler

# ---------------------------------------------------
# STEP 1: Create Sample Dataset
# ---------------------------------------------------

data = {
'school_rating': [8, 5, 9, 6, 7, 4, 10, 3],
'area_size': [2500, 1400, 3200, 1800, 2200, 1200, 4000, 1000],
'no_of_bedrooms': [4, 2, 5, 3, 4, 2, 6, 1],
'age': [5, 20, 2, 15, 8, 30, 1, 40],
'crime_rate': [2, 7, 1, 5, 3, 8, 1, 9],
'price': [550000, 250000, 750000, 350000,
500000, 200000, 950000, 150000]
}

df = pd.DataFrame(data)

# ---------------------------------------------------
# STEP 2: Define Features and Target
# ---------------------------------------------------

X = df[['school_rating',
'area_size',
'no_of_bedrooms',
'age',
'crime_rate']].values

y = df['price'].values

# ---------------------------------------------------
# STEP 3: Normalize Features
# ---------------------------------------------------

scaler = StandardScaler()

X = scaler.fit_transform(X)

# ---------------------------------------------------
# STEP 4: Train-Test Split
# ---------------------------------------------------

X_train, X_test, y_train, y_test = train_test_split(
X,
y,
test_size=0.2,
random_state=42
)

# ---------------------------------------------------
# STEP 5: Convert Data to PyTorch Tensors
# ---------------------------------------------------

X_train = torch.FloatTensor(X_train)
X_test = torch.FloatTensor(X_test)

y_train = torch.FloatTensor(y_train).view(-1, 1)
y_test = torch.FloatTensor(y_test).view(-1, 1)

# ---------------------------------------------------
# STEP 6: Build Feedforward Neural Network
# ---------------------------------------------------

class HousePriceNN(nn.Module):

def __init__(self):

super(HousePriceNN, self).__init__()

# Hidden Layer 1
self.fc1 = nn.Linear(5, 16)
self.relu1 = nn.ReLU()

# Hidden Layer 2
self.fc2 = nn.Linear(16, 8)
self.relu2 = nn.ReLU()

# Output Layer
self.fc3 = nn.Linear(8, 1)

def forward(self, x):

x = self.fc1(x)
x = self.relu1(x)

x = self.fc2(x)
x = self.relu2(x)

x = self.fc3(x)

return x

model = HousePriceNN()

# ---------------------------------------------------
# STEP 7: Define Loss Function and Optimizer
# ---------------------------------------------------

criterion = nn.MSELoss()

optimizer = optim.Adam(
model.parameters(),
lr=0.01
)

# ---------------------------------------------------
# STEP 8: Train the Model
# ---------------------------------------------------

epochs = 500

for epoch in range(epochs):

# Forward pass
outputs = model(X_train)

# Compute loss
loss = criterion(outputs, y_train)

# Backpropagation
optimizer.zero_grad()

loss.backward()

optimizer.step()

# Print progress
if (epoch + 1) % 50 == 0:
print(f'Epoch [{epoch+1}/{epochs}], Loss: {loss.item():.2f}')

# ---------------------------------------------------
# STEP 9: Evaluate the Model
# ---------------------------------------------------

with torch.no_grad():

predictions = model(X_test)

mse = criterion(predictions, y_test)

rmse = torch.sqrt(mse)

print("\nTest RMSE:", rmse.item())

# ---------------------------------------------------
# STEP 10: Predict New House Price
# ---------------------------------------------------

# Example House:
# school_rating = 8
# area_size = 2800
# bedrooms = 4
# age = 6
# crime_rate = 2

new_house = np.array([[8, 2800, 4, 6, 2]])

# Normalize using same scaler
new_house = scaler.transform(new_house)

# Convert to tensor
new_house_tensor = torch.FloatTensor(new_house)

with torch.no_grad():

predicted_price = model(new_house_tensor)

print("\nPredicted House Price: $",
round(predicted_price.item(), 2))

UseCase 6: A Feedforward Neural Network (FNN) using Pytorch, predict the probability of disease.
Inputs:
age blood_pressure cholesterol glucose_level
Output:
disease_probability (0 to 1)

 
import torch
import torch.nn as nn
import torch.optim as optim

import pandas as pd
import numpy as np

from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler

# ---------------------------------------------------
# STEP 1: Create Sample Dataset
# ---------------------------------------------------

data = {
'age': [25, 45, 60, 35, 50, 70, 40, 55],
'blood_pressure': [120, 145, 160, 130, 150, 170, 135, 155],
'cholesterol': [180, 240, 280, 200, 260, 300, 210, 270],
'glucose_level': [90, 140, 180, 100, 160, 200, 110, 170],
'disease_probability': [0, 1, 1, 0, 1, 1, 0, 1]
}

df = pd.DataFrame(data)

# ---------------------------------------------------
# STEP 2: Define Features and Labels
# ---------------------------------------------------

X = df[['age',
'blood_pressure',
'cholesterol',
'glucose_level']].values

y = df['disease_probability'].values

# ---------------------------------------------------
# STEP 3: Normalize Features
# ---------------------------------------------------

scaler = StandardScaler()

X = scaler.fit_transform(X)

# ---------------------------------------------------
# STEP 4: Train-Test Split
# ---------------------------------------------------

X_train, X_test, y_train, y_test = train_test_split(
X,
y,
test_size=0.2,
random_state=42
)

# ---------------------------------------------------
# STEP 5: Convert Data to PyTorch Tensors
# ---------------------------------------------------

X_train = torch.FloatTensor(X_train)
X_test = torch.FloatTensor(X_test)

y_train = torch.FloatTensor(y_train).view(-1, 1)
y_test = torch.FloatTensor(y_test).view(-1, 1)

# ---------------------------------------------------
# STEP 6: Build Feedforward Neural Network
# ---------------------------------------------------

class DiseasePredictionNN(nn.Module):

def __init__(self):

super(DiseasePredictionNN, self).__init__()

# Hidden Layer 1
self.fc1 = nn.Linear(4, 16)
self.relu1 = nn.ReLU()

# Hidden Layer 2
self.fc2 = nn.Linear(16, 8)
self.relu2 = nn.ReLU()

# Output Layer
self.fc3 = nn.Linear(8, 1)

# Sigmoid activation for probability output
self.sigmoid = nn.Sigmoid()

def forward(self, x):

x = self.fc1(x)
x = self.relu1(x)

x = self.fc2(x)
x = self.relu2(x)

x = self.fc3(x)

x = self.sigmoid(x)

return x

model = DiseasePredictionNN()

# ---------------------------------------------------
# STEP 7: Define Loss Function and Optimizer
# ---------------------------------------------------

criterion = nn.BCELoss()

optimizer = optim.Adam(
model.parameters(),
lr=0.01
)

# ---------------------------------------------------
# STEP 8: Train the Model
# ---------------------------------------------------

epochs = 100

for epoch in range(epochs):

# Forward pass
outputs = model(X_train)

# Compute loss
loss = criterion(outputs, y_train)

# Backpropagation
optimizer.zero_grad()

loss.backward()

optimizer.step()

# Print progress
if (epoch + 1) % 10 == 0:
print(f'Epoch [{epoch+1}/{epochs}], Loss: {loss.item():.4f}')

# ---------------------------------------------------
# STEP 9: Evaluate the Model
# ---------------------------------------------------

with torch.no_grad():

predictions = model(X_test)

predicted_classes = (predictions >= 0.5).float()

accuracy = (
predicted_classes == y_test
).sum().item() / y_test.size(0)

print("\nTest Accuracy:", accuracy)

# ---------------------------------------------------
# STEP 10: Predict Disease Probability
# ---------------------------------------------------

# Example Patient:
# age = 52
# blood_pressure = 148
# cholesterol = 255
# glucose_level = 165

new_patient = np.array([[52, 148, 255, 165]])

# Normalize using same scaler
new_patient = scaler.transform(new_patient)

# Convert to tensor
new_patient_tensor = torch.FloatTensor(new_patient)

with torch.no_grad():

prediction = model(new_patient_tensor)

probability = prediction.item()

print("\nDisease Probability:",
round(probability, 4))

if probability >= 0.5:
print("Prediction: HIGH DISEASE RISK")
else:
print("Prediction: LOW DISEASE RISK")
 

UseCase 7: Build a Feedforward Neural Network (FNN) in Pytorch to predict the probability that a transaction is fraudulent,
Inputs:
transaction_amount (numeric) location_mismatch (0 = normal, 1 = suspicious) device_type (categorical → must be encoded) transaction_time (hour of day, e.g., 0–23)
Output:
fraud_probability (0 to 1)

 
import torch
import torch.nn as nn
import torch.optim as optim

import pandas as pd
import numpy as np

from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler
from sklearn.preprocessing import OneHotEncoder
from sklearn.compose import ColumnTransformer

# ---------------------------------------------------
# STEP 1: Sample Dataset
# ---------------------------------------------------

data = {
'transaction_amount': [50, 2000, 30, 5000, 120, 7000, 80, 3000],
'location_mismatch': [0, 1, 0, 1, 0, 1, 0, 1],
'device_type': ['mobile', 'desktop', 'mobile', 'desktop',
'mobile', 'desktop', 'tablet', 'desktop'],
'transaction_time': [10, 23, 14, 2, 9, 1, 16, 22],
'fraud': [0, 1, 0, 1, 0, 1, 0, 1]
}

df = pd.DataFrame(data)

# ---------------------------------------------------
# STEP 2: Encode Categorical Variable (device_type)
# ---------------------------------------------------

preprocessor = ColumnTransformer(
transformers=[
('cat', OneHotEncoder(drop='first'), ['device_type']),
('num', StandardScaler(),
['transaction_amount', 'location_mismatch', 'transaction_time'])
]
)

X = df[['device_type',
'transaction_amount',
'location_mismatch',
'transaction_time']]

y = df['fraud'].values

X = preprocessor.fit_transform(X)

# Convert sparse matrix to dense
X = X.toarray()

# ---------------------------------------------------
# STEP 3: Train-Test Split
# ---------------------------------------------------

X_train, X_test, y_train, y_test = train_test_split(
X, y,
test_size=0.2,
random_state=42
)

# ---------------------------------------------------
# STEP 4: Convert to PyTorch Tensors
# ---------------------------------------------------

X_train = torch.FloatTensor(X_train)
X_test = torch.FloatTensor(X_test)

y_train = torch.FloatTensor(y_train).view(-1, 1)
y_test = torch.FloatTensor(y_test).view(-1, 1)

# ---------------------------------------------------
# STEP 5: Build Feedforward Neural Network
# ---------------------------------------------------

class FraudDetectionNN(nn.Module):

def __init__(self, input_dim):

super(FraudDetectionNN, self).__init__()

self.fc1 = nn.Linear(input_dim, 16)
self.relu1 = nn.ReLU()

self.fc2 = nn.Linear(16, 8)
self.relu2 = nn.ReLU()

self.fc3 = nn.Linear(8, 1)
self.sigmoid = nn.Sigmoid()

def forward(self, x):

x = self.fc1(x)
x = self.relu1(x)

x = self.fc2(x)
x = self.relu2(x)

x = self.fc3(x)

x = self.sigmoid(x)

return x

model = FraudDetectionNN(X_train.shape[1])

# ---------------------------------------------------
# STEP 6: Loss Function & Optimizer
# ---------------------------------------------------

criterion = nn.BCELoss()
optimizer = optim.Adam(model.parameters(), lr=0.01)

# ---------------------------------------------------
# STEP 7: Training Loop
# ---------------------------------------------------

epochs = 100

for epoch in range(epochs):

outputs = model(X_train)

loss = criterion(outputs, y_train)

optimizer.zero_grad()
loss.backward()
optimizer.step()

if (epoch + 1) % 10 == 0:
print(f"Epoch [{epoch+1}/{epochs}], Loss: {loss.item():.4f}")

# ---------------------------------------------------
# STEP 8: Evaluation
# ---------------------------------------------------

with torch.no_grad():

preds = model(X_test)

predicted_class = (preds >= 0.5).float()

accuracy = (predicted_class == y_test).sum().item() / y_test.size(0)

print("\nTest Accuracy:", accuracy)

# ---------------------------------------------------
# STEP 9: Predict New Transaction
# ---------------------------------------------------

# Example transaction:
# amount = 4000
# location mismatch = 1
# device = "desktop"
# time = 23

new_data = pd.DataFrame([{
'device_type': 'desktop',
'transaction_amount': 4000,
'location_mismatch': 1,
'transaction_time': 23
}])

new_data = preprocessor.transform(new_data).toarray()

new_tensor = torch.FloatTensor(new_data)

with torch.no_grad():

fraud_prob = model(new_tensor).item()

print("\nFraud Probability:", round(fraud_prob, 4))

if fraud_prob >= 0.5:
print("Prediction: FRAUDULENT TRANSACTION")
else:
print("Prediction: LEGITIMATE TRANSACTION")

 

UseCase 8: Build a Feedforward Neural Network (FNN) in Pytorch to predict the probability of purchase
Inputs:
clicks (number of clicks) browsing_time (e.g., seconds or minutes) ad_impressions demographics (categorical → must be encoded)
Output:
purchase_probability (0 to 1)

import torch
import torch.nn as nn
import torch.optim as optim

import pandas as pd
import numpy as np

from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler, OneHotEncoder
from sklearn.compose import ColumnTransformer

# ---------------------------------------------------
# STEP 1: Sample Dataset
# ---------------------------------------------------

data = {
'clicks': [1, 5, 2, 10, 3, 8, 1, 6],
'browsing_time': [30, 200, 45, 400, 60, 300, 25, 250],
'ad_impressions': [10, 50, 20, 80, 30, 70, 15, 60],
'demographics': ['young', 'adult', 'young', 'adult',
'senior', 'adult', 'young', 'senior'],
'purchase': [0, 1, 0, 1, 0, 1, 0, 1]
}

df = pd.DataFrame(data)

# ---------------------------------------------------
# STEP 2: Encode Categorical + Scale Numeric Features
# ---------------------------------------------------

preprocessor = ColumnTransformer(
transformers=[
('cat', OneHotEncoder(drop='first'), ['demographics']),
('num', StandardScaler(), ['clicks', 'browsing_time', 'ad_impressions'])
]
)

X = df[['demographics', 'clicks', 'browsing_time', 'ad_impressions']]
y = df['purchase'].values

X = preprocessor.fit_transform(X)
X = X.toarray()

# ---------------------------------------------------
# STEP 3: Train-Test Split
# ---------------------------------------------------

X_train, X_test, y_train, y_test = train_test_split(
X, y,
test_size=0.2,
random_state=42
)

# ---------------------------------------------------
# STEP 4: Convert to PyTorch Tensors
# ---------------------------------------------------

X_train = torch.FloatTensor(X_train)
X_test = torch.FloatTensor(X_test)

y_train = torch.FloatTensor(y_train).view(-1, 1)
y_test = torch.FloatTensor(y_test).view(-1, 1)

# ---------------------------------------------------
# STEP 5: Define Feedforward Neural Network
# ---------------------------------------------------

class PurchasePredictionNN(nn.Module):

def __init__(self, input_dim):

super(PurchasePredictionNN, self).__init__()

self.fc1 = nn.Linear(input_dim, 16)
self.relu1 = nn.ReLU()

self.fc2 = nn.Linear(16, 8)
self.relu2 = nn.ReLU()

self.fc3 = nn.Linear(8, 1)
self.sigmoid = nn.Sigmoid()

def forward(self, x):

x = self.fc1(x)
x = self.relu1(x)

x = self.fc2(x)
x = self.relu2(x)

x = self.fc3(x)
x = self.sigmoid(x)

return x

model = PurchasePredictionNN(X_train.shape[1])

# ---------------------------------------------------
# STEP 6: Loss Function & Optimizer
# ---------------------------------------------------

criterion = nn.BCELoss()
optimizer = optim.Adam(model.parameters(), lr=0.01)

# ---------------------------------------------------
# STEP 7: Training Loop
# ---------------------------------------------------

epochs = 100

for epoch in range(epochs):

outputs = model(X_train)

loss = criterion(outputs, y_train)

optimizer.zero_grad()
loss.backward()
optimizer.step()

if (epoch + 1) % 10 == 0:
print(f"Epoch [{epoch+1}/{epochs}], Loss: {loss.item():.4f}")

# ---------------------------------------------------
# STEP 8: Evaluation
# ---------------------------------------------------

with torch.no_grad():

preds = model(X_test)

predicted_class = (preds >= 0.5).float()

accuracy = (predicted_class == y_test).sum().item() / y_test.size(0)

print("\nTest Accuracy:", accuracy)

# ---------------------------------------------------
# STEP 9: Predict New User Purchase Probability
# ---------------------------------------------------

# Example user:
# clicks = 7
# browsing_time = 280
# ad_impressions = 65
# demographics = "adult"

new_user = pd.DataFrame([{
'demographics': 'adult',
'clicks': 7,
'browsing_time': 280,
'ad_impressions': 65
}])

new_user = preprocessor.transform(new_user).toarray()
new_tensor = torch.FloatTensor(new_user)

with torch.no_grad():

purchase_prob = model(new_tensor).item()

print("\nPurchase Probability:", round(purchase_prob, 4))

if purchase_prob >= 0.5:
print("Prediction: USER WILL BUY")
else:
print("Prediction: USER WILL NOT BUY")
 

UseCase 9: Build a Feedforward Neural Network (FNN) in Pytorch to predict the probability of attrition.
Inputs:
salary job_satisfaction overtime (0 = no, 1 = yes) tenure (years in company)
Output:
attrition_probability (0 to 1)

import torch
import torch.nn as nn
import torch.optim as optim
from torch.utils.data import Dataset, DataLoader

# --------------------------------------------------
# Sample Dataset
# --------------------------------------------------
# Features:
# salary
# job_satisfaction
# overtime (0 or 1)
# tenure
#
# Target:
# attrition_probability (0 to 1)
# --------------------------------------------------

X = torch.tensor([
    [50000, 3, 0, 2],
    [75000, 4, 0, 5],
    [40000, 2, 1, 1],
    [90000, 5, 0, 8],
    [35000, 1, 1, 1],
    [60000, 3, 1, 4],
    [85000, 4, 0, 7],
    [30000, 1, 1, 0.5]
], dtype=torch.float32)

y = torch.tensor([
    [0.65],
    [0.10],
    [0.85],
    [0.05],
    [0.95],
    [0.50],
    [0.15],
    [0.98]
], dtype=torch.float32)

# --------------------------------------------------
# Normalize Inputs
# --------------------------------------------------
X_mean = X.mean(dim=0)
X_std = X.std(dim=0)

X = (X - X_mean) / X_std

# --------------------------------------------------
# Custom Dataset
# --------------------------------------------------
class AttritionDataset(Dataset):
    def __init__(self, features, targets):
        self.features = features
        self.targets = targets

    def __len__(self):
        return len(self.features)

    def __getitem__(self, idx):
        return self.features[idx], self.targets[idx]

dataset = AttritionDataset(X, y)
dataloader = DataLoader(dataset, batch_size=4, shuffle=True)

# --------------------------------------------------
# Feedforward Neural Network
# --------------------------------------------------
class AttritionFNN(nn.Module):
    def __init__(self):
        super(AttritionFNN, self).__init__()

        self.network = nn.Sequential(
            nn.Linear(4, 16),   # 4 input features
            nn.ReLU(),

            nn.Linear(16, 8),
            nn.ReLU(),

            nn.Linear(8, 1),
            nn.Sigmoid()        # Output probability between 0 and 1
        )

    def forward(self, x):
        return self.network(x)

model = AttritionFNN()

# --------------------------------------------------
# Loss and Optimizer
# --------------------------------------------------
criterion = nn.BCELoss()
optimizer = optim.Adam(model.parameters(), lr=0.001)

# --------------------------------------------------
# Training Loop
# --------------------------------------------------
epochs = 200

for epoch in range(epochs):

    for batch_X, batch_y in dataloader:

        # Forward pass
        predictions = model(batch_X)

        # Compute loss
        loss = criterion(predictions, batch_y)

        # Backpropagation
        optimizer.zero_grad()
        loss.backward()
        optimizer.step()

    if (epoch + 1) % 20 == 0:
        print(f"Epoch [{epoch+1}/{epochs}], Loss: {loss.item():.4f}")

# --------------------------------------------------
# Prediction Example
# --------------------------------------------------
# New employee:
# salary = 45000
# job_satisfaction = 2
# overtime = 1
# tenure = 1.5 years

new_employee = torch.tensor([[45000, 2, 1, 1.5]], dtype=torch.float32)

# Normalize using training stats
new_employee = (new_employee - X_mean) / X_std

# Predict
model.eval()

with torch.no_grad():
    probability = model(new_employee)

print("\nPredicted Attrition Probability:", probability.item())
 

UseCase 10: Build a Feedforward Neural Network (FNN) in Pytorch to predict the demand
Inputs:
price promotion (e.g., 0 = no promo, 1 = promo intensity) season_indicator (e.g., 0=winter, 1=spring, 2=summer, 3=fall) competitor_pricing
Output:
demand_estimate (continuous value)


import torch
import torch.nn as nn
import torch.optim as optim
from torch.utils.data import Dataset, DataLoader

# --------------------------------------------------
# Sample Dataset
# --------------------------------------------------
# Features:
# price
# promotion
# season_indicator
# competitor_pricing
#
# Target:
# demand_estimate (continuous value)
# --------------------------------------------------

X = torch.tensor([
    [10.0, 0.0, 0.0, 11.0],
    [12.0, 1.0, 1.0, 13.0],
    [8.0,  1.0, 2.0, 9.0],
    [15.0, 0.0, 3.0, 14.0],
    [7.0,  1.0, 2.0, 8.0],
    [20.0, 0.0, 0.0, 19.0],
    [9.0,  1.0, 1.0, 10.0],
    [14.0, 0.0, 3.0, 15.0]
], dtype=torch.float32)

y = torch.tensor([
    [120.0],
    [180.0],
    [250.0],
    [90.0],
    [300.0],
    [60.0],
    [220.0],
    [100.0]
], dtype=torch.float32)

# --------------------------------------------------
# Normalize Inputs
# --------------------------------------------------
X_mean = X.mean(dim=0)
X_std = X.std(dim=0)

X = (X - X_mean) / X_std

# --------------------------------------------------
# Custom Dataset
# --------------------------------------------------
class DemandDataset(Dataset):
    def __init__(self, features, targets):
        self.features = features
        self.targets = targets

    def __len__(self):
        return len(self.features)

    def __getitem__(self, idx):
        return self.features[idx], self.targets[idx]

dataset = DemandDataset(X, y)
dataloader = DataLoader(dataset, batch_size=4, shuffle=True)

# --------------------------------------------------
# Feedforward Neural Network
# --------------------------------------------------
class DemandFNN(nn.Module):
    def __init__(self):
        super(DemandFNN, self).__init__()

        self.network = nn.Sequential(
            nn.Linear(4, 16),   # 4 input features
            nn.ReLU(),

            nn.Linear(16, 8),
            nn.ReLU(),

            nn.Linear(8, 1)     # Regression output
        )

    def forward(self, x):
        return self.network(x)

model = DemandFNN()

# --------------------------------------------------
# Loss and Optimizer
# --------------------------------------------------
criterion = nn.MSELoss()   # Regression loss
optimizer = optim.Adam(model.parameters(), lr=0.001)

# --------------------------------------------------
# Training Loop
# --------------------------------------------------
epochs = 300

for epoch in range(epochs):

    for batch_X, batch_y in dataloader:

        # Forward pass
        predictions = model(batch_X)

        # Compute loss
        loss = criterion(predictions, batch_y)

        # Backpropagation
        optimizer.zero_grad()
        loss.backward()
        optimizer.step()

    if (epoch + 1) % 50 == 0:
        print(f"Epoch [{epoch+1}/{epochs}], Loss: {loss.item():.4f}")

# --------------------------------------------------
# Prediction Example
# --------------------------------------------------
# New scenario:
# price = 11
# promotion = 1
# season_indicator = 2 (summer)
# competitor_pricing = 12

new_data = torch.tensor([[11.0, 1.0, 2.0, 12.0]], dtype=torch.float32)

# Normalize using training statistics
new_data = (new_data - X_mean) / X_std

# Predict demand
model.eval()

with torch.no_grad():
    demand_prediction = model(new_data)

print("\nPredicted Demand Estimate:", demand_prediction.item())
 


About Us  | Contact Us | Sitemap  | Privacy Policy