write ups and practical assignment of machine Learning

Practical Assignment :-

1.LR Programming

#import the packges
import matplotlib.pyplot as plt
import pandas as pd
#Read Dataset
dataset=pd.read_csv("hours.csv")
#index read 
x=dataset.iloc[:,:-1].values  #slice all column
y=dataset.iloc[:,1].values  #last Column

#import packages of LR
from sklearn.linear_model import LinearRegression
regressor=LinearRegression() #create object of LR

# Fit Function
regressor.fit(x,y)

#score Function
Accuracy=regressor.score(x,y)*100
print('Accuracy')
print(Accuracy)

#Predict Function
y_pred=regressor.predict([[10]])
print(y_pred)

#input from user
hours=int(input("Enter the no of hours"))

# Coefficient 
# intercept
eq=regressor.coef_*hours+regressor.intercept_
print("Risk Score",eq[0])

plt.plot(x,y,'o')
plt.plot(x,regressor.predict(x));
plt.show()

2. KNN Programm:-

#import the packages
import pandas as pd
import numpy as np

#Read dataset
dataset=pd.read_csv("kdata.csv")
X=dataset.iloc[:,:-1].values
y=dataset.iloc[:,2].values

#import KNeighborshood Classifier and create object of it
from sklearn.neighbors import KNeighborsClassifier

#Creating model
classifier=KNeighborsClassifier(n_neighbors=3)
# Training model
classifier.fit(X,y)

#predict the class for the point(6,6)
X_test=np.array([6,6])
# Predictions for test data
y_pred=classifier.predict([X_test])
print(y_pred)

# KNeighborsClassifier looks for the 5 nearest neighbors
#If set to uniform, all points in each neighbourhood have 
#equal influence in predicting class i.e. predicted class is the class with highest number of points in the neighbourhood.
classifier=KNeighborsClassifier(n_neighbors=3,weights='distance')
classifier.fit(X,y)

#predict the class for the point(6,6)
X_test=np.array([6,2])
y_pred=classifier.predict([X_test])
print(y_pred)

3.Kmeans programming:-

#import packages
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd

#create dataset using DataFrame
df=pd.DataFrame({'X':[0.1,0.15,0.08,0.16,0.2,0.25,0.24,0.3],
                 'y':[0.6,0.71,0.9,0.85,0.3,0.5,0.1,0.2]})
f1 = df['X'].values
f2 = df['y'].values
X = np.array(list(zip(f1, f2)))
print(X)

#centroid points
C_x=np.array([0.1,0.3])
C_y=np.array([0.6,0.2])
centroids=C_x,C_y

#plot the given points
colmap = {1: 'r', 2: 'b'}
plt.scatter(f1, f2, color='k')
plt.show()

#for i in centroids():
plt.scatter(C_x[0],C_y[0], color=colmap[1])
plt.scatter(C_x[1],C_y[1], color=colmap[2])
plt.show()

C = np.array(list((C_x, C_y)), dtype=np.float32)
print (C)

#plot given elements with centroid elements
plt.scatter(f1, f2, c='#050505')
plt.scatter(C_x[0], C_y[0], marker='*', s=200, c='r')
plt.scatter(C_x[1], C_y[1], marker='*', s=200, c='b')
plt.show()


#import KMeans class and create object of it
from sklearn.cluster import KMeans
model=KMeans(n_clusters=2,random_state=0)
model.fit(X)
labels=model.labels_
print(labels)

#using labels find population around centroid
count=0
for i in range(len(labels)):
    if (labels[i]==1):
        count=count+1

print('No of population around cluster 2:',count-1)
	
#Find new centroids
new_centroids = model.cluster_centers_

print('Previous value of m1 and m2 is:')
print('M1==',centroids[0])
print('M1==',centroids[1])

print('updated value of m1 and m2 is:')
print('M1==',new_centroids[0])
print('M1==',new_centroids[1])

#Output
#[[0.1  0.6 ]
# [0.15 0.71]
# [0.08 0.9 ]
# [0.16 0.85]
# [0.2  0.3 ]
# [0.25 0.5 ]
# [0.24 0.1 ]
# [0.3  0.2 ]]
  

#[[0.1 0.3]
# [0.6 0.2]]
 

#[1 1 1 1 0 0 0 0]
#No of population around cluster 2: 3
#Previous value of m1 and m2 is:
#M1== [0.1 0.3]
#M1== [0.6 0.2]
#updated value of m1 and m2 is:
#M1== [0.2475 0.275 ]
#M1== [0.1225 0.765 ]

4. DC Program:-

#import packages
import pandas as pd
import numpy as np

# Read dataset
dataset=pd.read_csv("tree1.csv")

x=dataset.iloc[:,:-1]
y=dataset.iloc[:,5]

#Label encoder
from sklearn.preprocessing import LabelEncoder
le=LabelEncoder()
x=x.apply(le.fit_transform)
print(x)

# 1 1 0 0

#import Decesion Tree Classifier

from sklearn.tree import DecisionTreeClassifier
# Create decision tree classifer object
regressor=DecisionTreeClassifier()
# Train model
regressor.fit(x.iloc[:,1:5],y)

x_in=np.array([1,1,0,0])
y_pred=regressor.predict([x_in])

print(y_pred)

from sklearn.externals.six import StringIO
#from IPython.display import Image
from sklearn.tree import export_graphviz
import pydotplus

dot_data=StringIO()
export_graphviz(regressor,out_file=dot_data,filled=True,rounded=True,special_characters=True)

#Draw Graph
graph=pydotplus.graph_from_dot_data(dot_data.getvalue())

# Show graph & Create png File
graph.write_png("tree.png")

# see the tree in .spyder3 folder














Published by amol nalge

hello this is amol nalge you can must see the solution of the your question in this .

Leave a Reply

Fill in your details below or click an icon to log in:

WordPress.com Logo

You are commenting using your WordPress.com account. Log Out /  Change )

Twitter picture

You are commenting using your Twitter account. Log Out /  Change )

Facebook photo

You are commenting using your Facebook account. Log Out /  Change )

Connecting to %s

%d bloggers like this: