.
多元線性回歸
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 |
print "---multiple regression---" from linear_algebra import dot from gradient_descent import minimize_stochastic from simple_linear_regression import total_sum_of_squares def predict(x_i, beta): return dot(x_i, beta) def error(x_i, y_i, beta): return y_i - predict(x_i, beta) def squared_error(x_i, y_i, beta): return error(x_i, y_i, beta) ** 2 def squared_error_gradient(x_i, y_i, beta): """the gradient corresponding to the ith squared error term""" return [-2 * x_ij * error(x_i, y_i, beta) for x_ij in x_i] def estimate_beta(x, y): beta_initial = [random.random() for x_i in x[0]] return minimize_stochastic(squared_error, squared_error_gradient, x, y, beta_initial, 0.001) def multiple_r_squared(x, y, beta): sum_of_squared_errors = sum(error(x_i, y_i, beta) ** 2 for x_i, y_i in zip(x, y)) return 1.0 - sum_of_squared_errors / total_sum_of_squares(y) <span class="Web-H" style="font-family: 'courier new', courier, monospace;"># data set</span> <span class="Web-H" style="font-family: 'courier new', courier, monospace;">x = [[1,49,4,0],[1,41,9,0],[1,40,8,0],[1,25,6,0],[1,21,1,0],[1,21,0,0],[1,19,3,0],[1,19,0,0],[1,18,9,0],[1,18,8,0],[1,16,4,0],[1,15,3,0],[1,15,0,0],[1,15,2,0],[1,15,7,0],[1,14,0,0],[1,14,1,0],[1,13,1,0],[1,13,7,0],[1,13,4,0],[1,13,2,0],[1,12,5,0],[1,12,0,0],[1,11,9,0],[1,10,9,0],[1,10,1,0],[1,10,1,0],[1,10,7,0],[1,10,9,0],[1,10,1,0],[1,10,6,0],[1,10,6,0],[1,10,8,0],[1,10,10,0],[1,10,6,0],[1,10,0,0],[1,10,5,0],[1,10,3,0],[1,10,4,0],[1,9,9,0],[1,9,9,0],[1,9,0,0],[1,9,0,0],[1,9,6,0],[1,9,10,0],[1,9,8,0],[1,9,5,0],[1,9,2,0],[1,9,9,0],[1,9,10,0],[1,9,7,0],[1,9,2,0],[1,9,0,0],[1,9,4,0],[1,9,6,0],[1,9,4,0],[1,9,7,0],[1,8,3,0],[1,8,2,0],[1,8,4,0],[1,8,9,0],[1,8,2,0],[1,8,3,0],[1,8,5,0],[1,8,8,0],[1,8,0,0],[1,8,9,0],[1,8,10,0],[1,8,5,0],[1,8,5,0],[1,7,5,0],[1,7,5,0],[1,7,0,0],[1,7,2,0],[1,7,8,0],[1,7,10,0],[1,7,5,0],[1,7,3,0],[1,7,3,0],[1,7,6,0],[1,7,7,0],[1,7,7,0],[1,7,9,0],[1,7,3,0],[1,7,8,0],[1,6,4,0],[1,6,6,0],[1,6,4,0],[1,6,9,0],[1,6,0,0],[1,6,1,0],[1,6,4,0],[1,6,1,0],[1,6,0,0],[1,6,7,0],[1,6,0,0],[1,6,8,0],[1,6,4,0],[1,6,2,1],[1,6,1,1],[1,6,3,1],[1,6,6,1],[1,6,4,1],[1,6,4,1],[1,6,1,1],[1,6,3,1],[1,6,4,1],[1,5,1,1],[1,5,9,1],[1,5,4,1],[1,5,6,1],[1,5,4,1],[1,5,4,1],[1,5,10,1],[1,5,5,1],[1,5,2,1],[1,5,4,1],[1,5,4,1],[1,5,9,1],[1,5,3,1],[1,5,10,1],[1,5,2,1],[1,5,2,1],[1,5,9,1],[1,4,8,1],[1,4,6,1],[1,4,0,1],[1,4,10,1],[1,4,5,1],[1,4,10,1],[1,4,9,1],[1,4,1,1],[1,4,4,1],[1,4,4,1],[1,4,0,1],[1,4,3,1],[1,4,1,1],[1,4,3,1],[1,4,2,1],[1,4,4,1],[1,4,4,1],[1,4,8,1],[1,4,2,1],[1,4,4,1],[1,3,2,1],[1,3,6,1],[1,3,4,1],[1,3,7,1],[1,3,4,1],[1,3,1,1],[1,3,10,1],[1,3,3,1],[1,3,4,1],[1,3,7,1],[1,3,5,1],[1,3,6,1],[1,3,1,1],[1,3,6,1],[1,3,10,1],[1,3,2,1],[1,3,4,1],[1,3,2,1],[1,3,1,1],[1,3,5,1],[1,2,4,1],[1,2,2,1],[1,2,8,1],[1,2,3,1],[1,2,1,1],[1,2,9,1],[1,2,10,1],[1,2,9,1],[1,2,4,1],[1,2,5,1],[1,2,0,1],[1,2,9,1],[1,2,9,1],[1,2,0,1],[1,2,1,1],[1,2,1,1],[1,2,4,1],[1,1,0,1],[1,1,2,1],[1,1,2,1],[1,1,5,1],[1,1,3,1],[1,1,10,1],[1,1,6,1],[1,1,0,1],[1,1,8,1],[1,1,6,1],[1,1,4,1],[1,1,9,1],[1,1,9,1],[1,1,4,1],[1,1,2,1],[1,1,9,1],[1,1,0,1],[1,1,8,1],[1,1,6,1],[1,1,1,1],[1,1,1,1],[1,1,5,1]]</span> <span class="Web-H" style="font-family: 'courier new', courier, monospace;">daily_minutes_good = [68.77,51.25,52.08,38.36,44.54,57.13,51.4,41.42,31.22,34.76,54.01,38.79,47.59,49.1,27.66,41.03,36.73,48.65,28.12,46.62,35.57,32.98,35,26.07,23.77,39.73,40.57,31.65,31.21,36.32,20.45,21.93,26.02,27.34,23.49,46.94,30.5,33.8,24.23,21.4,27.94,32.24,40.57,25.07,19.42,22.39,18.42,46.96,23.72,26.41,26.97,36.76,40.32,35.02,29.47,30.2,31,38.11,38.18,36.31,21.03,30.86,36.07,28.66,29.08,37.28,15.28,24.17,22.31,30.17,25.53,19.85,35.37,44.6,17.23,13.47,26.33,35.02,32.09,24.81,19.33,28.77,24.26,31.98,25.73,24.86,16.28,34.51,15.23,39.72,40.8,26.06,35.76,34.76,16.13,44.04,18.03,19.65,32.62,35.59,39.43,14.18,35.24,40.13,41.82,35.45,36.07,43.67,24.61,20.9,21.9,18.79,27.61,27.21,26.61,29.77,20.59,27.53,13.82,33.2,25,33.1,36.65,18.63,14.87,22.2,36.81,25.53,24.62,26.25,18.21,28.08,19.42,29.79,32.8,35.99,28.32,27.79,35.88,29.06,36.28,14.1,36.63,37.49,26.9,18.58,38.48,24.48,18.95,33.55,14.24,29.04,32.51,25.63,22.22,19,32.73,15.16,13.9,27.2,32.01,29.27,33,13.74,20.42,27.32,18.23,35.35,28.48,9.08,24.62,20.12,35.26,19.92,31.02,16.49,12.16,30.7,31.22,34.65,13.13,27.51,33.2,31.57,14.1,33.42,17.44,10.12,24.42,9.82,23.39,30.93,15.03,21.67,31.09,33.29,22.61,26.89,23.48,8.38,27.81,32.35,23.84]</span> <span class="Web-H" style="font-family: 'courier new', courier, monospace;">random.seed(0)</span> <span class="Web-H" style="font-family: 'courier new', courier, monospace;">beta = estimate_beta(x, daily_minutes_good) # [30.63, 0.972, -1.868, 0.911]</span> <span class="Web-H" style="font-family: 'courier new', courier, monospace;"> # beta是多元線性的參數</span> <span class="Web-H" style="font-family: 'courier new', courier, monospace;">print "beta", beta</span> <span class="Web-H" style="font-family: 'courier new', courier, monospace;">print "r-squared", multiple_r_squared(x, daily_minutes_good, beta)</span> <span class="Web-H" style="font-family: 'courier new', courier, monospace;"># 自己給的資料集</span> <span class="Web-H" style="font-family: 'courier new', courier, monospace;">x1=[1,30,0,1]</span> <span class="Web-H" style="font-family: 'courier new', courier, monospace;">print predict(x1,beta)</span> |
羅吉斯回歸
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 |
print "---Logistic_regression---" from working_with_data import rescale from linear_algebra import dot, vector_add from machine_learning import train_test_split from functools import partial from gradient_descent import maximize_stochastic,maximize_batch def logistic(x): return 1.0 / (1 + math.exp(-x)) def logistic_log_likelihood_i(x_i, y_i, beta): if y_i == 1: return math.log(logistic(dot(x_i, beta))) else: return math.log(1 - logistic(dot(x_i, beta))) def logistic_log_likelihood(x, y, beta): return sum(logistic_log_likelihood_i(x_i, y_i, beta) for x_i, y_i in zip(x, y)) def logistic_log_partial_ij(x_i, y_i, beta, j): """here i is the index of the data point, j the index of the derivative""" return (y_i - logistic(dot(x_i, beta))) * x_i[j] def logistic_log_gradient_i(x_i, y_i, beta): """the gradient of the log likelihood corresponding to the i-th data point""" return [logistic_log_partial_ij(x_i, y_i, beta, j) for j, _ in enumerate(beta)] def logistic_log_gradient(x, y, beta): return reduce(vector_add, [logistic_log_gradient_i(x_i, y_i, beta) for x_i, y_i in zip(x, y)]) # 資料集 data = [(0.7,48000,1),(1.9,48000,0),(2.5,60000,1),(4.2,63000,0),(6,76000,0),(6.5,69000,0),(7.5,76000,0),(8.1,88000,0),(8.7,83000,1),(10,83000,1),(0.8,43000,0),(1.8,60000,0),(10,79000,1),(6.1,76000,0),(1.4,50000,0),(9.1,92000,0),(5.8,75000,0),(5.2,69000,0),(1,56000,0),(6,67000,0),(4.9,74000,0),(6.4,63000,1),(6.2,82000,0),(3.3,58000,0),(9.3,90000,1),(5.5,57000,1),(9.1,102000,0),(2.4,54000,0),(8.2,65000,1),(5.3,82000,0),(9.8,107000,0),(1.8,64000,0),(0.6,46000,1),(0.8,48000,0),(8.6,84000,1),(0.6,45000,0),(0.5,30000,1),(7.3,89000,0),(2.5,48000,1),(5.6,76000,0),(7.4,77000,0),(2.7,56000,0),(0.7,48000,0),(1.2,42000,0),(0.2,32000,1),(4.7,56000,1),(2.8,44000,1),(7.6,78000,0),(1.1,63000,0),(8,79000,1),(2.7,56000,0),(6,52000,1),(4.6,56000,0),(2.5,51000,0),(5.7,71000,0),(2.9,65000,0),(1.1,33000,1),(3,62000,0),(4,71000,0),(2.4,61000,0),(7.5,75000,0),(9.7,81000,1),(3.2,62000,0),(7.9,88000,0),(4.7,44000,1),(2.5,55000,0),(1.6,41000,0),(6.7,64000,1),(6.9,66000,1),(7.9,78000,1),(8.1,102000,0),(5.3,48000,1),(8.5,66000,1),(0.2,56000,0),(6,69000,0),(7.5,77000,0),(8,86000,0),(4.4,68000,0),(4.9,75000,0),(1.5,60000,0),(2.2,50000,0),(3.4,49000,1),(4.2,70000,0),(7.7,98000,0),(8.2,85000,0),(5.4,88000,0),(0.1,46000,0),(1.5,37000,0),(6.3,86000,0),(3.7,57000,0),(8.4,85000,0),(2,42000,0),(5.8,69000,1),(2.7,64000,0),(3.1,63000,0),(1.9,48000,0),(10,72000,1),(0.2,45000,0),(8.6,95000,0),(1.5,64000,0),(9.8,95000,0),(5.3,65000,0),(7.5,80000,0),(9.9,91000,0),(9.7,50000,1),(2.8,68000,0),(3.6,58000,0),(3.9,74000,0),(4.4,76000,0),(2.5,49000,0),(7.2,81000,0),(5.2,60000,1),(2.4,62000,0),(8.9,94000,0),(2.4,63000,0),(6.8,69000,1),(6.5,77000,0),(7,86000,0),(9.4,94000,0),(7.8,72000,1),(0.2,53000,0),(10,97000,0),(5.5,65000,0),(7.7,71000,1),(8.1,66000,1),(9.8,91000,0),(8,84000,0),(2.7,55000,0),(2.8,62000,0),(9.4,79000,0),(2.5,57000,0),(7.4,70000,1),(2.1,47000,0),(5.3,62000,1),(6.3,79000,0),(6.8,58000,1),(5.7,80000,0),(2.2,61000,0),(4.8,62000,0),(3.7,64000,0),(4.1,85000,0),(2.3,51000,0),(3.5,58000,0),(0.9,43000,0),(0.9,54000,0),(4.5,74000,0),(6.5,55000,1),(4.1,41000,1),(7.1,73000,0),(1.1,66000,0),(9.1,81000,1),(8,69000,1),(7.3,72000,1),(3.3,50000,0),(3.9,58000,0),(2.6,49000,0),(1.6,78000,0),(0.7,56000,0),(2.1,36000,1),(7.5,90000,0),(4.8,59000,1),(8.9,95000,0),(6.2,72000,0),(6.3,63000,0),(9.1,100000,0),(7.3,61000,1),(5.6,74000,0),(0.5,66000,0),(1.1,59000,0),(5.1,61000,0),(6.2,70000,0),(6.6,56000,1),(6.3,76000,0),(6.5,78000,0),(5.1,59000,0),(9.5,74000,1),(4.5,64000,0),(2,54000,0),(1,52000,0),(4,69000,0),(6.5,76000,0),(3,60000,0),(4.5,63000,0),(7.8,70000,0),(3.9,60000,1),(0.8,51000,0),(4.2,78000,0),(1.1,54000,0),(6.2,60000,0),(2.9,59000,0),(2.1,52000,0),(8.2,87000,0),(4.8,73000,0),(2.2,42000,1),(9.1,98000,0),(6.5,84000,0),(6.9,73000,0),(5.1,72000,0),(9.1,69000,1),(9.8,79000,1),] data = map(list, data) # change tuples to lists x = [[1] + row[:2] for row in data] # each element is [1, experience, salary] y = [row[2] for row in data] # each element is paid_account print "linear regression:" rescaled_x = rescale(x) beta = estimate_beta(rescaled_x, y) print beta print "logistic regression:" random.seed(0) x_train, x_test, y_train, y_test = train_test_split(rescaled_x, y, 0.33) # want to maximize log likelihood on the training data fn = partial(logistic_log_likelihood, x_train, y_train) gradient_fn = partial(logistic_log_gradient, x_train, y_train) # pick a random starting point beta_0 = [1, 1, 1] # and maximize using gradient descent beta_hat = maximize_batch(fn, gradient_fn, beta_0) print "beta_batch", beta_hat beta_0 = [1, 1, 1] beta_hat = maximize_stochastic(logistic_log_likelihood_i, logistic_log_gradient_i, x_train, y_train, beta_0) print "beta stochastic", beta_hat true_positives = false_positives = true_negatives = false_negatives = 0 for x_i, y_i in zip(x_test, y_test): predict = logistic(dot(beta_hat, x_i)) if y_i == 1 and predict >= 0.5: # TP: paid and we predict paid true_positives += 1 elif y_i == 1: # FN: paid and we predict unpaid false_negatives += 1 elif predict >= 0.5: # FP: unpaid and we predict paid false_positives += 1 else: # TN: unpaid and we predict unpaid true_negatives += 1 precision = true_positives / (true_positives + false_positives) recall = true_positives / (true_positives + false_negatives) print "precision", precision print "recall", recall |
SVM支援向量機
1 2 3 4 5 6 7 8 9 10 11 12 13 |
<span class="Web-H" style="font-family: 'courier new', courier, monospace;">print "---SYM iris為例---"</span> <span class="Web-H" style="font-family: 'courier new', courier, monospace;">from sklearn import datasets</span> <span class="Web-H" style="font-family: 'courier new', courier, monospace;">from sklearn import svm</span> <span class="Web-H" style="font-family: 'courier new', courier, monospace;">clf = svm.SVC(gamma=0.001, C=140.) # 前140筆資料做建模</span> <span class="Web-H" style="font-family: 'courier new', courier, monospace;">print clf # model</span> <span class="Web-H" style="font-family: 'courier new', courier, monospace;">iris = datasets.load_iris()</span> <span class="Web-H" style="font-family: 'courier new', courier, monospace;">clf.fit(iris.data[:-10], iris.target[:-10])</span> <span class="Web-H" style="font-family: 'courier new', courier, monospace;">result=clf.predict(iris.data[-10:]) # 後10筆資料做預測</span> <span class="Web-H" style="font-family: 'courier new', courier, monospace;">print "predict:"</span> <span class="Web-H" style="font-family: 'courier new', courier, monospace;">print result</span> <span class="Web-H" style="font-family: 'courier new', courier, monospace;">print "actual:"</span> <span class="Web-H" style="font-family: 'courier new', courier, monospace;">print iris.data[-10:]# 後10筆</span> <span class="Web-H" style="font-family: 'courier new', courier, monospace;">print iris.target[-10]</span> |
SVM 以數字為例
1 2 3 |
digits = datasets.load_digits() print digits.data[0] print digits.target[0:5] |
SVM 用matplotlib畫圖
1 2 3 4 5 6 7 8 |
import matplotlib.pyplot as plot plot.figure(1, figsize=(3, 3)) plot.imshow(digits.images[2], cmap=plot.cm.gray_r, interpolation='nearest') plot.show() # for i in range(0,5): # plot.imshow(digits.images[i], cmap=plot.cm.gray_r, interpolation='nearest') # plot.show() |
SVM 判斷數字
1 2 3 4 |
clf.fit(digits.data[:-1], digits.target[:-1]) result=clf.predict(digits.data[2]) <span lang="zh-TW">print result # 預測數字為</span><span lang="en-US">2</span> <span lang="zh-TW">print digits.target[2] # 實際數字為</span><span lang="en-US">2</span> |
PS:紅色字為警告,不影響
SVM 自定義數字
.
用EXCEL自訂一個數字,並將該矩陣轉成array形式,就可以用內建的SVM跑辨識數字的功能
.
1 2 3 4 5 6 7 8 9 10 11 |
import numpy as np clf.fit(digits.data, digits.target) <span lang="zh-TW"># 自定數字</span><span lang="en-US">9 arra</span> nine=np.array([[0., 0., 0., 7., 9., 5., 17., 0., 0., 4., 6., 0., 0., 6., 9., 0., 0., 10., 0., 0., 0., 5., 11., 0., 0., 1., 9., 1., 5., 0., 15., 0., 0., 0., 3., 8., 6., 0., 14., 0., 0., 0., 0., 0., 0., 0., 13., 0., 0., 0., 0., 0., 0., 0., 8., 0., 0., 0., 0., 0., 0., 0., 9., 0.]]) print nine <span lang="zh-TW">print clf.predict(nine) # 預測數字為多少,</span><span lang="en-US">9</span> <span lang="zh-TW"># 給定畫圖</span><span lang="en-US">array</span> nine2=np.array([[0., 0., 0., 7., 9., 5., 17., 0.], [0., 4., 6., 0., 0., 6., 9., 0.], [0., 10., 0., 0., 0., 5., 11., 0.], [0., 1., 9., 1., 5., 0., 15., 0.], [0., 0., 3., 8., 6., 0., 14., 0.], [0., 0., 0., 0., 0., 0., 13., 0.], [0., 0., 0., 0., 0., 0., 8., 0.], [0., 0., 0., 0., 0., 0., 9., 0.]]) plot.figure(1, figsize=(3, 3)) plot.imshow(nine2, cmap=plot.cm.gray_r, interpolation='nearest') plot.show() |
用EXCEL自訂一個數字出來
.
EXCEL表格
將數字轉成矩陣
跑函式庫得知預測出來的數字是9
.
Numpy.array
Matplotlib
.
備註:2017/05/18 計算方法分析與設計 課堂筆記