|
|
TensorFlow is an open-source library by Google used for Machine learning (ML) Deep learning (Neural networks)and Numerical computation with tensors TensorFlow is very useful to build models that can learn patterns from data like image recognition, text classification, etc. A tensor is just a multi-dimensional array
Code Example 1:
import numpy
#Create a tensor.
import tensorflow as tf
#Scalar (0D tensor)
a = tf.constant(5)
#Vector (1D tensor)
b = tf.constant([1, 2, 3])
#Matrix (2D tensor)
c = tf.constant([[1, 2], [3, 4]])
print(a)
print(b)
print(c)
#Output:
tf.Tensor(5, shape=(), dtype=int32)
tf.Tensor([1 2 3], shape=(3,), dtype=int32)
tf.Tensor(
[[1 2]
[3 4]], shape=(2, 2), dtype=int32)
Code Example 2:
import numpy
#Matrix Addition
A = tf.constant([[1, 2],
[3, 4]])
B = tf.constant([[5, 6],
[7, 8]])
C=tf.add(A,B)
print(C)
#Output:
tf.Tensor(
[[ 5 12]
[21 32]], shape=(2, 2), dtype=int32)
Code Example 3:
#Matrix Multiplication
A = tf.constant([[1, 2],
[3, 4]])
B = tf.constant([[5, 6],
[7, 8]])
C = tf.matmul(A, B)
print(C)
#Output:
tf.Tensor(
[[19 22]
[43 50]], shape=(2, 2), dtype=int32)
Code Example 4:
#Create a 2x3 matrix of zeros
matrix_int = tf.zeros((2, 3), dtype=tf.int32)
print(matrix_int)
#Output:
tf.Tensor(
[[0. 0. 0.]
[0. 0. 0.]], shape=(2, 3), dtype=float32)
Code Example 5:
#Create [2,3] matrix with 1's
matrix = tf.ones((2, 3))
//default datatype is float32
print(matrix)
#Output:
tf.Tensor(
[[1. 1. 1.]
[1. 1. 1.]], shape=(2, 3), dtype=float32)
Code Example 6:
# Create a 3x3 identity matrix
identity_matrix = tf.eye(3)
print(identity_matrix)
#Output:
tf.Tensor(
[[1. 0. 0.]
[0. 1. 0.]
[0. 0. 1.]], shape=(3, 3), dtype=float32)
Code Example 7:
#Transpose of Matrix
matrix = tf.constant([[1, 2, 3],
[4, 5, 6]])
transpose_matrix = tf.transpose(matrix)
print(transpose_matrix)
#Output:
[[0. 0. 0. 0.]
[0. 0. 0. 0.]
[0. 0. 0. 0.]]
Code Example 8:
#creates a 3×3 matrix of random numbers drawn from a normal
#(Gaussian) distribution.
#mean=0 → the center of the distribution (average value)
#stddev=1 → the spread (standard deviation)
x=tf.random.normal((3,3), mean=0, stddev=1)
print(x)
#Output:
tf.Tensor(
[[ 0.12 -0.45 1.03]
[-1.22 0.67 -0.30]
[ 0.55 -0.88 0.09]], shape=(3, 3), dtype=float32)
Code Example 9:
#Get 1st column
matrix = tf.constant([[1, 2, 3],
[4, 5, 6]])
result=matrix[:,0:1]
print(result)
//: → take all rows
//0:1 → take columns from index 0 up to (but not including) 1
#Output:
[[1]
[4]]
Code Example 10:
# Get columns 2,3.
matrix = tf.constant([[1, 2, 3],
[4, 5, 6]])
result=matrix[:,1:3]
print(result)
#Output:
[[2,3]
[5,6]]
Code Example 11:
# Get the entire 2nd row.
matrix = tf.constant([
[10, 20, 30],
[40, 50, 60],
[70, 80, 90]
])
row_2 = matrix[1]
print(row_2)
#Output:
[40 50 60]
Let's solve some Neural Network use cases with Tensor Flow.
import pandas as pd
from sklearn.model_selection import train_test_split
from sklearn.linear_model import LogisticRegression
from sklearn.preprocessing import StandardScaler
from sklearn.pipeline import Pipeline
from sklearn.metrics import accuracy_score, classification_report, confusion_matrix
# -----------------------------------
# 1. Load data from Excel
# -----------------------------------
data = pd.read_excel("loan_data.xlsx")
df = pd.DataFrame(data)
print("Dataset Preview:")
print(data.head())
# -----------------------------------
# 2. Define features and target
# -----------------------------------
X = df[["Income", "CreditScore", "LoanAmount"]]
y = df["Default"]
# -----------------------------------
# 3. Split into training and testing
# -----------------------------------
X_train, X_test, y_train, y_test = train_test_split(
X, y, test_size=0.25, random_state=42, stratify=y
)
# -----------------------------------
# 4. Train the Linear Regression model
# -----------------------------------
model = Pipeline([
("scaler", StandardScaler()),
("logreg", LogisticRegression())
])
model.fit(X_train, y_train)
Step 8: Evaluate the model
print("Accuracy:", accuracy_score(y_test, y_pred))
print("\nConfusion Matrix:\n", confusion_matrix(y_test, y_pred))
print("\nClassification Report:\n", classification_report(y_test, y_pred))
Step 9: Predict default for a new customer
new_customer = [[4500, 620, 16000]] # Income, CreditScore, LoanAmount
default_prediction = model.predict(new_customer)
default_probability = model.predict_proba(new_customer)[0][1]
print("Default Prediction:", default_prediction[0])
print("Probability of Default:", default_probability)
import pandas as pd
import tensorflow as tf
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense
from sklearn.preprocessing import StandardScaler
from sklearn.model_selection import train_test_split
# -----------------------------
# 1. Create sample dataset
# -----------------------------
data = pd.DataFrame({
"age": [25, 32, 47, 51, 62, 23, 36, 40, 55, 29],
"bmi": [22.0, 28.5, 31.2, 29.0, 35.1, 24.3, 27.8, 30.5, 33.0, 26.1],
"smoker": [0, 1, 1, 0, 1, 0, 0, 1, 1, 0], # 0 = no, 1 = yes
"exercise": [3, 1, 1, 2, 0, 3, 2, 1, 0, 2], # 0 = low, 3 = high
"conditions": [0, 1, 2, 1, 3, 0, 1, 1, 2, 0], # number of diseases
"charges": [2000, 12000, 25000, 18000, 32000, 2200, 8000, 15000, 28000, 6000]
})
# -----------------------------
# 2. Split features and target
# -----------------------------
X = data[["age", "bmi", "smoker", "exercise", "conditions"]]
y = data["charges"]
# -----------------------------
# 3. Train-test split
# -----------------------------
X_train, X_test, y_train, y_test = train_test_split(
X, y, test_size=0.2, random_state=42
)
# -----------------------------
# 4. Feature scaling
# -----------------------------
scaler = StandardScaler()
X_train = scaler.fit_transform(X_train)
X_test = scaler.transform(X_test)
# -----------------------------
# 5. Build neural network
# -----------------------------
model = tf.keras.Sequential([
tf.keras.layers.Dense(16, activation="relu", input_shape=(5,)), # Hidden layer 1
tf.keras.layers.Dense(8, activation="relu"), # Hidden layer 2
tf.keras.layers.Dense(1) # Output layer
])
# -----------------------------
# 6. Compile model
# -----------------------------
model.compile(
optimizer="adam",
loss="mse",
metrics=["mae"]
)
# -----------------------------
# 7. Train model
# -----------------------------
model.fit(X_train, y_train, epochs=150, verbose=0)
# -----------------------------
# 8. Evaluate model
# -----------------------------
loss, mae = model.evaluate(X_test, y_test)
print("Test MAE:", mae)
# -----------------------------
# 9. Predict new case
# -----------------------------
sample = np.array([[45, 30.0, 1, 2, 1]]) # age, bmi, smoker, exercise, conditions
sample = scaler.transform(sample)
prediction = model.predict(sample)
print("Predicted insurance cost:", prediction[0][0])
import pandas as pd
import tensorflow as tf
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense
from sklearn.preprocessing import StandardScaler
from sklearn.model_selection import train_test_split
data = {
"monthly_usage": [10, 50, 30, 80, 90, 20, 60, 70, 15, 85],
"complaints": [5, 1, 3, 0, 0, 4, 2, 1, 5, 0],
"subscription_duration": [2, 24, 12, 36, 48, 6, 18, 30, 3, 40],
"payment_delays": [4, 0, 2, 0, 0, 3, 1, 0, 5, 0],
"churn": [1, 0, 1, 0, 0, 1, 0, 0, 1, 0]
}
df = pd.DataFrame(data)
print(df)
2. Train-Test Split
X = df.drop("churn", axis=1)
y = df["churn"]
X_train, X_test, y_train, y_test = train_test_split(
X, y, test_size=0.3, random_state=42
)
3. Feature Scaling
scaler = StandardScaler()
X_train = scaler.fit_transform(X_train)
X_test = scaler.transform(X_test)
4. Build the FNN Model
model = Sequential([
Dense(8, activation='relu', input_shape=(4,)), # Hidden Layer 1
Dense(4, activation='relu'), # Hidden Layer 2
Dense(1, activation='sigmoid') # Output Layer
])
5. Compile the Model
model.compile(
optimizer='adam',
loss='binary_crossentropy',
metrics=['accuracy']
)
6. Train the Model
history = model.fit(
X_train, y_train,
epochs=50,
batch_size=2,
validation_split=0.2,
verbose=1
)
7. Evaluate the Model
loss, accuracy = model.evaluate(X_test, y_test)
print("Test Accuracy:", accuracy)
8. Predictions
predictions = model.predict(X_test)
predicted_classes = (predictions > 0.5).astype(int)
print("Predictions:\n", predicted_classes)
print("Actual:\n", y_test.values)
9. Predict New Customer
new_customer = [[40, 2, 10, 1]] # example input
new_customer_scaled = scaler.transform(new_customer)
prediction = model.predict(new_customer_scaled)
if prediction[0][0] > 0.5:
print("Customer will churn")
else:
print("Customer will stay")
import numpy as np
import tensorflow as tf
from tensorflow import keras
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler
# --------------------------
# Example dataset (dummy)
# --------------------------
# Features: [links, caps, length, ip_numeric]
X = np.array([
[5, 20, 500, 3232235777],
[0, 2, 120, 3232235778],
[10, 50, 1000, 3232235779],
[1, 5, 200, 3232235780],
])
# Labels: 1 = spam, 0 = not spam
y = np.array([1, 0, 1, 0])
# --------------------------
# Train-test split
# --------------------------
X_train, X_test, y_train, y_test = train_test_split(
X, y, test_size=0.2, random_state=42
)
# --------------------------
# Feature scaling
# --------------------------
scaler = StandardScaler()
X_train = scaler.fit_transform(X_train)
X_test = scaler.transform(X_test)
# --------------------------
# Build FNN model
# --------------------------
model = keras.Sequential([
keras.layers.Dense(16, activation='relu', input_shape=(4,)),
keras.layers.Dense(8, activation='relu'),
keras.layers.Dense(1, activation='sigmoid') # binary output
])
# --------------------------
# Compile model
# --------------------------
model.compile(
optimizer='adam',
loss='binary_crossentropy',
metrics=['accuracy']
)
# --------------------------
# Train model
# --------------------------
model.fit(
X_train, y_train,
epochs=20,
batch_size=2,
validation_split=0.2
)
# --------------------------
# Evaluate
# --------------------------
loss, accuracy = model.evaluate(X_test, y_test)
print("Test Accuracy:", accuracy)
# --------------------------
# Predict new email
# --------------------------
new_email = np.array([[3, 10, 400, 3232235781]])
new_email = scaler.transform(new_email)
prediction = model.predict(new_email)
print("Spam probability:", prediction[0][0])
print("Spam" if prediction[0][0] > 0.5 else "Not Spam")
import numpy as np
import tensorflow as tf
from tensorflow import keras
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler
# --------------------------
# Example dataset (dummy)
# --------------------------
# Features:
# [school_rating, area_size, bedrooms, age, crime_rate]
X = np.array([
[9, 2000, 3, 10, 2.5],
[6, 1500, 2, 20, 5.0],
[10, 3000, 4, 5, 1.5],
[5, 1200, 2, 30, 6.5],
[7, 1800, 3, 15, 3.0],
[10, 3500, 5, 2, 1.0],
[4, 1000, 1, 40, 7.5],
])
# Target: house price (in $1000s)
y = np.array([430, 240, 680, 170, 330, 880, 130])
# --------------------------
# Train-test split
# --------------------------
X_train, X_test, y_train, y_test = train_test_split(
X, y, test_size=0.2, random_state=42
)
# --------------------------
# Feature scaling
# --------------------------
scaler = StandardScaler()
X_train = scaler.fit_transform(X_train)
X_test = scaler.transform(X_test)
# --------------------------
# Build FNN model
# --------------------------
model = keras.Sequential([
keras.layers.Dense(64, activation='relu', input_shape=(5,)),
keras.layers.Dense(32, activation='relu'),
keras.layers.Dense(16, activation='relu'),
keras.layers.Dense(1) # regression output
])
# --------------------------
# Compile model
# --------------------------
model.compile(
optimizer='adam',
loss='mse',
metrics=['mae']
)
# --------------------------
# Train model
# --------------------------
model.fit(
X_train, y_train,
epochs=60,
batch_size=2,
validation_split=0.2
)
# --------------------------
# Evaluate model
# --------------------------
loss, mae = model.evaluate(X_test, y_test)
print("Test MAE:", mae)
# --------------------------
# Predict new house price
# --------------------------
# Example:
# school_rating=8, area=2200, bedrooms=3, age=8, crime_rate=2.8
new_house = np.array([[8, 2200, 3, 8, 2.8]])
new_house = scaler.transform(new_house)
prediction = model.predict(new_house)
print("Predicted price (in $1000s):", prediction[0][0])
import numpy as np
import tensorflow as tf
from tensorflow import keras
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler
# --------------------------
# Example dataset (dummy)
# --------------------------
# Features: [age, blood_pressure, cholesterol, glucose]
X = np.array([
[25, 120, 180, 90],
[45, 140, 220, 110],
[60, 150, 250, 130],
[30, 130, 200, 100],
[50, 145, 240, 120],
[35, 135, 210, 105],
[65, 160, 260, 140],
])
# Labels: 1 = disease, 0 = no disease
y = np.array([0, 1, 1, 0, 1, 0, 1])
# --------------------------
# Train-test split
# --------------------------
X_train, X_test, y_train, y_test = train_test_split(
X, y, test_size=0.2, random_state=42
)
# --------------------------
# Feature scaling
# --------------------------
scaler = StandardScaler()
X_train = scaler.fit_transform(X_train)
X_test = scaler.transform(X_test)
# --------------------------
# Build FNN model
# --------------------------
model = keras.Sequential([
keras.layers.Dense(32, activation='relu', input_shape=(4,)),
keras.layers.Dense(16, activation='relu'),
keras.layers.Dense(1, activation='sigmoid') # probability output
])
# --------------------------
# Compile model
# --------------------------
model.compile(
optimizer='adam',
loss='binary_crossentropy',
metrics=['accuracy']
)
# --------------------------
# Train model
# --------------------------
model.fit(
X_train, y_train,
epochs=50,
batch_size=2,
validation_split=0.2
)
# --------------------------
# Evaluate model
# --------------------------
loss, accuracy = model.evaluate(X_test, y_test)
print("Test Accuracy:", accuracy)
# --------------------------
# Predict disease probability
# --------------------------
# Example: age=55, bp=150, cholesterol=230, glucose=125
new_patient = np.array([[55, 150, 230, 125]])
new_patient = scaler.transform(new_patient)
prediction = model.predict(new_patient)
print("Disease Probability:", prediction[0][0])
if prediction[0][0] > 0.5:
print("High Risk")
else:
print("Low Risk")
import numpy as np
import tensorflow as tf
from tensorflow import keras
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler
# --------------------------
# Example dataset (dummy)
# --------------------------
# Features: [age, blood_pressure, cholesterol, glucose]
X = np.array([
[25, 120, 180, 90],
[45, 140, 220, 110],
[60, 150, 250, 130],
[30, 130, 200, 100],
[50, 145, 240, 120],
[35, 135, 210, 105],
[65, 160, 260, 140],
])
# Labels: 1 = disease, 0 = no disease
y = np.array([0, 1, 1, 0, 1, 0, 1])
# --------------------------
# Train-test split
# --------------------------
X_train, X_test, y_train, y_test = train_test_split(
X, y, test_size=0.2, random_state=42
)
# --------------------------
# Feature scaling
# --------------------------
scaler = StandardScaler()
X_train = scaler.fit_transform(X_train)
X_test = scaler.transform(X_test)
# --------------------------
# Build FNN model
# --------------------------
model = keras.Sequential([
keras.layers.Dense(32, activation='relu', input_shape=(4,)),
keras.layers.Dense(16, activation='relu'),
keras.layers.Dense(1, activation='sigmoid') # probability output
])
# --------------------------
# Compile model
# --------------------------
model.compile(
optimizer='adam',
loss='binary_crossentropy',
metrics=['accuracy']
)
# --------------------------
# Train model
# --------------------------
model.fit(
X_train, y_train,
epochs=50,
batch_size=2,
validation_split=0.2
)
# --------------------------
# Evaluate model
# --------------------------
loss, accuracy = model.evaluate(X_test, y_test)
print("Test Accuracy:", accuracy)
# --------------------------
# Predict disease probability
# --------------------------
# Example: age=55, bp=150, cholesterol=230, glucose=125
new_patient = np.array([[55, 150, 230, 125]])
new_patient = scaler.transform(new_patient)
prediction = model.predict(new_patient)
print("Disease Probability:", prediction[0][0])
if prediction[0][0] > 0.5:
print("High Risk")
else:
print("Low Risk")
import numpy as np
import tensorflow as tf
from tensorflow import keras
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler
# --------------------------
# Example dataset (dummy)
# --------------------------
# demographics: 0=young, 1=adult, 2=senior
# Features: [clicks, browsing_time, impressions, demographics]
X = np.array([
[5, 300, 20, 0],
[2, 100, 15, 1],
[10, 600, 30, 0],
[1, 50, 10, 2],
[7, 400, 25, 1],
[12, 800, 40, 0],
[3, 120, 12, 2],
])
# Labels: 1 = purchase, 0 = no purchase
y = np.array([1, 0, 1, 0, 1, 1, 0])
# --------------------------
# Train-test split
# --------------------------
X_train, X_test, y_train, y_test = train_test_split(
X, y, test_size=0.2, random_state=42
)
# --------------------------
# Feature scaling
# --------------------------
scaler = StandardScaler()
X_train = scaler.fit_transform(X_train)
X_test = scaler.transform(X_test)
# --------------------------
# Build FNN model
# --------------------------
model = keras.Sequential([
keras.layers.Dense(32, activation='relu', input_shape=(4,)),
keras.layers.Dense(16, activation='relu'),
keras.layers.Dense(1, activation='sigmoid') # probability output
])
# --------------------------
# Compile model
# --------------------------
model.compile(
optimizer='adam',
loss='binary_crossentropy',
metrics=['accuracy']
)
# --------------------------
# Train model
# --------------------------
model.fit(
X_train, y_train,
epochs=50,
batch_size=2,
validation_split=0.2
)
# --------------------------
# Evaluate model
# --------------------------
loss, accuracy = model.evaluate(X_test, y_test)
print("Test Accuracy:", accuracy)
# --------------------------
# Predict purchase probability
# --------------------------
# Example: clicks=6, time=350, impressions=22, demographic=adult(1)
new_user = np.array([[6, 350, 22, 1]])
new_user = scaler.transform(new_user)
prediction = model.predict(new_user)
print("Purchase Probability:", prediction[0][0])
if prediction[0][0] > 0.5:
print("Likely to Convert")
else:
print("Unlikely to Convert")
import numpy as np
import tensorflow as tf
from tensorflow import keras
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler
# --------------------------
# Example dataset (dummy)
# --------------------------
# Features: [salary, job_satisfaction, overtime, tenure]
X = np.array([
[50000, 4, 0, 5],
[80000, 3, 1, 2],
[120000, 5, 0, 10],
[45000, 2, 1, 1],
[70000, 3, 0, 4],
[30000, 1, 1, 1],
[95000, 4, 0, 8],
])
# Labels: 1 = leave, 0 = stay
y = np.array([0, 1, 0, 1, 0, 1, 0])
# --------------------------
# Train-test split
# --------------------------
X_train, X_test, y_train, y_test = train_test_split(
X, y, test_size=0.2, random_state=42
)
# --------------------------
# Feature scaling
# --------------------------
scaler = StandardScaler()
X_train = scaler.fit_transform(X_train)
X_test = scaler.transform(X_test)
# --------------------------
# Build FNN model
# --------------------------
model = keras.Sequential([
keras.layers.Dense(32, activation='relu', input_shape=(4,)),
keras.layers.Dense(16, activation='relu'),
keras.layers.Dense(1, activation='sigmoid') # probability output
])
# --------------------------
# Compile model
# --------------------------
model.compile(
optimizer='adam',
loss='binary_crossentropy',
metrics=['accuracy']
)
# --------------------------
# Train model
# --------------------------
model.fit(
X_train, y_train,
epochs=50,
batch_size=2,
validation_split=0.2
)
# --------------------------
# Evaluate model
# --------------------------
loss, accuracy = model.evaluate(X_test, y_test)
print("Test Accuracy:", accuracy)
# --------------------------
# Predict attrition risk
# --------------------------
# Example employee:
# salary=60000, satisfaction=3, overtime=1, tenure=3
new_employee = np.array([[60000, 3, 1, 3]])
new_employee = scaler.transform(new_employee)
prediction = model.predict(new_employee)
print("Attrition Probability:", prediction[0][0])
if prediction[0][0] > 0.5:
print("High Risk of Leaving")
else:
print("Likely to Stay")
import numpy as np
import tensorflow as tf
from tensorflow import keras
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler
# --------------------------
# Example dataset (dummy)
# --------------------------
# Features: [price, promotion, season, competitor_price]
X = np.array([
[10, 1, 2, 12],
[15, 0, 1, 14],
[8, 1, 3, 10],
[20, 0, 0, 18],
[12, 1, 2, 11],
[25, 0, 1, 22],
[9, 1, 3, 9],
[18, 0, 2, 17],
])
# Target: demand (units sold)
y = np.array([500, 300, 650, 200, 550, 150, 700, 250])
# --------------------------
# Train-test split
# --------------------------
X_train, X_test, y_train, y_test = train_test_split(
X, y, test_size=0.2, random_state=42
)
# --------------------------
# Feature scaling
# --------------------------
scaler = StandardScaler()
X_train = scaler.fit_transform(X_train)
X_test = scaler.transform(X_test)
# --------------------------
# Build FNN model
# --------------------------
model = keras.Sequential([
keras.layers.Dense(64, activation='relu', input_shape=(4,)),
keras.layers.Dense(32, activation='relu'),
keras.layers.Dense(16, activation='relu'),
keras.layers.Dense(1) # regression output
])
# --------------------------
# Compile model
# --------------------------
model.compile(
optimizer='adam',
loss='mse',
metrics=['mae']
)
# --------------------------
# Train model
# --------------------------
model.fit(
X_train, y_train,
epochs=80,
batch_size=2,
validation_split=0.2
)
# --------------------------
# Evaluate model
# --------------------------
loss, mae = model.evaluate(X_test, y_test)
print("Test MAE:", mae)
# --------------------------
# Predict demand
# --------------------------
# Example:
# price=14, promotion=1, season=2, competitor_price=13
new_data = np.array([[14, 1, 2, 13]])
new_data = scaler.transform(new_data)
prediction = model.predict(new_data)
print("Predicted Demand:", prediction[0][0])