ALL THE PROJECTS ARE HERE

"Discover cutting-edge AI, ML, and web projects—your gateway to innovation and limitless possibilities!"

AI Projects

Face Land mark detection

Click below to read more.

Object Detection

Click below to read more.

Image Segmentation

COMING SOON....

ML Projects

HEART DISEASE PREDICTION

Click below to read more.

Prediction of Wine type

Click below to read more.

Parkinson’s Disease Prediction using Machine Learning

COMING SOON...

Web Projects

PORTFOLIO-WEBSITE

Learn to build websites that adapt to any screen size. Click below to read more.

E-commerce website FoodMart-1

Building apps that work offline and feel like native apps. Click below to read more.

Educational website

COMING SOON....

Semantic Segmentation - Full Project

Detailed information about the semantic segmentation project...


       
import cv2
import dlib
# Load the pre-trained Haar Cascade classifier for face detection (OpenCV)
face_cascade = cv2.CascadeClassifier(cv2.data.haarcascades + 'haarcascade_frontalface_default.xml')

# Initialize Dlib's face detector and shape predictor (for landmarks)
detector = dlib.get_frontal_face_detector()
# Download the model from Dlib's site
predictor = dlib.shape_predictor('shape_predictor_68_face_landmarks.dat')  

# Open the webcam or use a video file
cap = cv2.VideoCapture(0)  # For webcam input, use 0. For video files, use 'video_file_path'

while True:
    ret, frame = cap.read()
    if not ret:
        break
    
    # Convert the frame to grayscale (needed for Haar Cascade)
    gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)

    # Detect faces using Haar Cascade
    faces = face_cascade.detectMultiScale(gray, scaleFactor=1.1, minNeighbors=5)

    # Detect faces using Dlib's detector
    dlib_faces = detector(gray)

    # Draw rectangles around faces using Haar Cascade
    for (x, y, w, h) in faces:
        cv2.rectangle(frame, (x, y), (x + w, y + h), (255, 0, 0), 2)
    
    # Draw rectangles around faces and landmarks using Dlib
    for face in dlib_faces:
        # Draw the bounding box of the face
        x, y, w, h = (face.left(), face.top(), face.width(), face.height())
        cv2.rectangle(frame, (x, y), (x + w, y + h), (0, 255, 0), 2)
        
        # Get the landmarks
        landmarks = predictor(gray, face)
        
        # Draw the landmarks (points on the face)
        for n in range(0, 68):  # For 68 facial landmarks
            x, y = landmarks.part(n).x, landmarks.part(n).y
            cv2.circle(frame, (x, y), 1, (0, 0, 255), -1)

    # Display the frame with face rectangles and landmarks
    cv2.imshow("Face Detection", frame)

    # Press 'q' to quit
    if cv2.waitKey(1) & 0xFF == ord('q'):
        break

# Release the video capture object and close any OpenCV windows
cap.release()
cv2.destroyAllWindows()


    

Object Detection - Full Project

Detailed information about the object detection project...


      
import cv2
import numpy as np

# Load YOLO
net = cv2.dnn.readNet("yolov3.weights", "yolov3.cfg")
layer_names = net.getLayerNames()
output_layers = [layer_names[i - 1] for i in net.getUnconnectedOutLayers()]

# Load COCO names (class labels)
with open("coco.names", "r") as f:
    classes = [line.strip() for line in f.readlines()]

# Initialize the webcam or use a video file
cap = cv2.VideoCapture(0)  # 0 for webcam; replace with video path for a video file

while True:
    ret, frame = cap.read()
    if not ret:
        break

    # Prepare the frame (resize, normalize, and pass it to the YOLO model)
    blob = cv2.dnn.blobFromImage(frame, 0.00392, (416, 416), (0, 0, 0), True, crop=False)
    net.setInput(blob)
    layer_outputs = net.forward(output_layers)

    # Initialize lists for detected objects
    class_ids = []
    confidences = []
    boxes = []

    # Loop through all detected objects in the image
    for output in layer_outputs:
        for detection in output:
            scores = detection[5:]
            class_id = np.argmax(scores)
            confidence = scores[class_id]
            
            if confidence > 0.5:  # Filter out weak predictions
                # Object's bounding box
                center_x = int(detection[0] * frame.shape[1])
                center_y = int(detection[1] * frame.shape[0])
                w = int(detection[2] * frame.shape[1])
                h = int(detection[3] * frame.shape[0])

                # Rectangle coordinates
                x = int(center_x - w / 2)
                y = int(center_y - h / 2)

                # Append the object details
                class_ids.append(class_id)
                confidences.append(float(confidence))
                boxes.append([x, y, w, h])

    # Apply Non-maxima Suppression to eliminate redundant overlapping boxes
    indexes = cv2.dnn.NMSBoxes(boxes, confidences, 0.5, 0.4)

    # Draw the boxes and labels
    for i in range(len(boxes)):
        if i in indexes:
            x, y, w, h = boxes[i]
            label = str(classes[class_ids[i]])
            confidence = str(round(confidences[i], 2))

            # Draw the rectangle and label
            cv2.rectangle(frame, (x, y), (x + w, y + h), (0, 255, 0), 2)
            cv2.putText(frame, f"{label} {confidence}", (x, y - 10),
                        cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0), 2)

    # Display the frame with the detected objects
    cv2.imshow("Object Detection", frame)

    # Press 'q' to quit
    if cv2.waitKey(1) & 0xFF == ord('q'):
        break

# Release the webcam and close OpenCV windows
cap.release()
cv2.destroyAllWindows()


    

Image Segmentation - Full Project

COMING SOON...


      COMING SOON....
     

ML | Heart Disease Prediction

source code


import pandas as pd
import pylab as pl
import numpy as np
import scipy.optimize as opt
import statsmodels.api as sm
from sklearn import preprocessing
'exec(% matplotlib inline)'
import matplotlib.pyplot as plt
import matplotlib.mlab as mlab
import seaborn as sns

# dataset
disease_df = pd.read_csv("framingham.csv")
disease_df.drop(['education'], inplace = True, axis = 1)
disease_df.rename(columns ={'male':'Sex_male'}, inplace = True)
# removing NaN / NULL values
disease_df.dropna(axis = 0, inplace = True)
print(disease_df.head(), disease_df.shape)
print(disease_df.TenYearCHD.value_counts())
X = np.asarray(disease_df[['age', 'Sex_male', 'cigsPerDay', 
                           'totChol', 'sysBP', 'glucose']])
y = np.asarray(disease_df['TenYearCHD'])

# normalization of the dataset
X = preprocessing.StandardScaler().fit(X).transform(X)

# Train-and-Test -Split
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split( 
        X, y, test_size = 0.3, random_state = 4)

print ('Train set:', X_train.shape,  y_train.shape)
print ('Test set:', X_test.shape,  y_test.shape)
# counting no. of patients affected with CHD
plt.figure(figsize=(7, 5))
sns.countplot(x='TenYearCHD', data=disease_df,
             palette="BuGn_r")
plt.show()
laste = disease_df['TenYearCHD'].plot()
plt.show(laste)
from sklearn.linear_model import LogisticRegression
logreg = LogisticRegression()
logreg.fit(X_train, y_train)
y_pred = logreg.predict(X_test)
# Evaluation and accuracy
from sklearn.metrics import accuracy_score
print('Accuracy of the model is =', 
      accuracy_score(y_test, y_pred))
# Confusion matrix 
from sklearn.metrics import confusion_matrix, classification_report
cm = confusion_matrix(y_test, y_pred)
conf_matrix = pd.DataFrame(data = cm, 
                           columns = ['Predicted:0', 'Predicted:1'], 
                           index =['Actual:0', 'Actual:1'])

plt.figure(figsize = (8, 5))
sns.heatmap(conf_matrix, annot = True, fmt = 'd', cmap = "Greens")

plt.show()
print('The details for confusion matrix is =')
print (classification_report(y_test, y_pred))
    

Prediction of Wine type using Deep Learning - Full Project

source code...


# Import Required Libraries 
import matplotlib.pyplot as plt
import pandas as pd
import numpy as np

# Read in white wine data
white = pd.read_csv("http://archive.ics.uci.edu/ml/machine-learning-databases/wine-quality/winequality-white.csv", sep =';')

# Read in red wine data
red = pd.read_csv("http://archive.ics.uci.edu/ml/machine-learning-databases/wine-quality/winequality-red.csv", sep =';')
# First rows of `red`
red.head()
# Last rows of `white`
white.tail()
# Take a sample of five rows of `red`
red.sample(5)
# Describe `white`
white.describe()
# Double check for null values in `red`
pd.isnull(red)
# Create Histogram
fig, ax = plt.subplots(1, 2)

ax[0].hist(red.alcohol, 10, facecolor ='red',
            alpha = 0.5, label ="Red wine")

ax[1].hist(white.alcohol, 10, facecolor ='white',
        ec ="black", lw = 0.5, alpha = 0.5,
        label ="White wine")

fig.subplots_adjust(left = 0, right = 1, bottom = 0, 
            top = 0.5, hspace = 0.05, wspace = 1)

ax[0].set_ylim([0, 1000])
ax[0].set_xlabel("Alcohol in % Vol")
ax[0].set_ylabel("Frequency")
ax[1].set_ylim([0, 1000])
ax[1].set_xlabel("Alcohol in % Vol")
ax[1].set_ylabel("Frequency")

fig.suptitle("Distribution of Alcohol in % Vol")
plt.show()
# Add `type` column to `red` with price one
red['type'] = 1

# Add `type` column to `white` with price zero
white['type'] = 0

# conacat `white` to `red`
wines = pd.concat([red, white], ignore_index=True) 

# Import `train_test_split` from `sklearn.model_selection`
from sklearn.model_selection import train_test_split
# Use .iloc for position based indexing
X = wines.iloc[:, 0:11]
y = np.ravel(wines.type)

# Splitting the data set for training and validating 
X_train, X_test, y_train, y_test = train_test_split(
        X, y, test_size = 0.34, random_state = 45)

# This code is modified by Susobhan Akhuli
# Import `Sequential` from `keras.models`
from keras.models import Sequential

# Import `Dense` from `keras.layers`
from keras.layers import Dense

# Initialize the constructor
model = Sequential()

# Add an input layer
model.add(Dense(12, activation ='relu', input_shape =(11, )))

# Add one hidden layer
model.add(Dense(9, activation ='relu'))

# Add an output layer
model.add(Dense(1, activation ='sigmoid'))

# Model output shape
model.output_shape

# Model summary
model.summary()

# Model config
model.get_config()

# List all weight tensors
model.get_weights()
model.compile(loss ='binary_crossentropy', 
  optimizer ='adam', metrics =['accuracy'])
# Training Model
model.fit(X_train, y_train, epochs = 3,
           batch_size = 1, verbose = 1)
 
# Predicting the Value
y_pred = model.predict(X_test)
print(y_pred)

    

Reinforcement Learning - Full Project

COMING SOON...



    

Responsive Web Design - Full Project

Detailed information about the responsive web design project...


      

Download Project ZIP

E-commerce website - Full Project

Detailed information about the progressive web apps project...


Download Project ZIP

Educational website- Full Project

COMING SOON...


      COMING SOON....