:)
[DL] Coursera: DL Specialization C1W3A1 본문
Planar data classification with one hidden layer
import numpy as np
import copy
import matplotlib.pyplot as plt
from testCases_v2 import *
from public_tests import *
import sklearn
import sklearn.datasets
import sklearn.linear_model
from planar_utils import plot_decision_boundary, sigmoid, load_planar_dataset, load_extra_datasets
%matplotlib inline
%load_ext autoreload
%autoreload 2
# 데이터 불러오기
X, Y = load_planar_dataset()
# 'X[0, :]' : 첫번째 차원에 해당하는 데이터
# 'X[1, :]' : 두번째 차원에 해당하는 데이터
# 'c=Y' : 데이터 포인트의 색상을 Y 벡터에 따라 지정
# 's=40' : 점의 크기를 40으로 설정
# 'cmap=plt.cm.Spectral' : 색상 맵 설정
plt.scatter(X[0, :], X[1, :], c=Y, s=40, cmap=plt.cm.Spectral);
shape_X = X.shape
shape_Y = Y.shape
m = X.shape[1]
print ('The shape of X is: ' + str(shape_X))
print ('The shape of Y is: ' + str(shape_Y))
print ('I have m = %d training examples!' % (m))
The shape of X is: (2, 400)
The shape of Y is: (1, 400)
I have m = 400 training examples!
Simple Logistic Regression
# LogisticRegressionCV 모델을 사용하여 로지스틱 회귀 수행
clf = sklearn.linear_model.LogisticRegressionCV();
clf.fit(X.T, Y.T);
# 모델이 학습한 decision boundary 시각화
plot_decision_boundary(lambda x: clf.predict(x), X, Y)
plt.title("Logistic Regression")
# 정확도 출력
LR_predictions = clf.predict(X.T)
print ('Accuracy of logistic regression: %d ' % float((np.dot(Y,LR_predictions) + np.dot(1-Y,1-LR_predictions))/float(Y.size)*100) +
'% ' + "(percentage of correctly labelled datapoints)")
Accuracy of logistic regression: 47 % (percentage of correctly labelled datapoints)
Neural Network model
# 신경망의 입력층, 은닉층, 출력층의 크기를 계산하는 함수
def layer_sizes(X, Y):
n_x = X.shape[0]
n_h = 4
n_y = Y.shape[0]
return (n_x, n_h, n_y)
t_X, t_Y = layer_sizes_test_case()
(n_x, n_h, n_y) = layer_sizes(t_X, t_Y)
print("The size of the input layer is: n_x = " + str(n_x))
print("The size of the hidden layer is: n_h = " + str(n_h))
print("The size of the output layer is: n_y = " + str(n_y))
layer_sizes_test(layer_sizes)
The size of the input layer is: n_x = 5
The size of the hidden layer is: n_h = 4
The size of the output layer is: n_y = 2
# 신경망의 가중치와 편향 초기화
def initialize_parameters(n_x, n_h, n_y):
W1 = np.random.randn(n_h, n_x) * 0.01
W2 = np.random.randn(n_y, n_h) * 0.01
b1 = np.zeros((n_h, 1))
b2 = np.zeros((n_y, 1))
parameters = {"W1": W1,
"b1": b1,
"W2": W2,
"b2": b2}
return parameters
np.random.seed(2)
n_x, n_h, n_y = initialize_parameters_test_case()
parameters = initialize_parameters(n_x, n_h, n_y)
print("W1 = " + str(parameters["W1"]))
print("b1 = " + str(parameters["b1"]))
print("W2 = " + str(parameters["W2"]))
print("b2 = " + str(parameters["b2"]))
initialize_parameters_test(initialize_parameters)
W1 = [[-0.00416758 -0.00056267]
[-0.02136196 0.01640271]
[-0.01793436 -0.00841747]
[ 0.00502881 -0.01245288]]
b1 = [[0.]
[0.]
[0.]
[0.]]
W2 = [[-0.01057952 -0.00909008 0.00551454 0.02292208]]
b2 = [[0.]]
# 신경망의 순방향 전파
def forward_propagation(X, parameters):
W1 = parameters['W1']
b1 = parameters['b1']
W2 = parameters['W2']
b2 = parameters['b2']
Z1 = np.dot(W1, X) + b1
A1 = np.tanh(Z1)
Z2 = np.dot(W2, A1) + b2
A2 = 1 / (1 + np.exp(-Z2))
assert(A2.shape == (1, X.shape[1]))
cache = {"Z1": Z1,
"A1": A1,
"Z2": Z2,
"A2": A2}
return A2, cache
t_X, parameters = forward_propagation_test_case()
A2, cache = forward_propagation(t_X, parameters)
print("A2 = " + str(A2))
forward_propagation_test(forward_propagation)
A2 = [[0.21292656 0.21274673 0.21295976]]
# 신경망에서 순방향 전파의 결과로 얻어진 예측값과 실제값을 비교하여 손실 계산
def compute_cost(A2, Y):
m = Y.shape[1] # number of examples
logprobs = np.multiply(Y, np.log(A2)) + np.multiply((1 - Y), np.log(1 - A2))
cost = - np.sum(logprobs) / m
cost = float(np.squeeze(cost))
return cost
A2, t_Y = compute_cost_test_case()
cost = compute_cost(A2, t_Y)
print("cost = " + str(compute_cost(A2, t_Y)))
compute_cost_test(compute_cost)
cost = 0.6930587610394646
# 신경망에서 역전파를 수행하여 가중치와 편향에 대한 기울기 계산하는 함수
def backward_propagation(parameters, cache, X, Y):
m = X.shape[1]
W1 = parameters['W1']
W2 = parameters['W2']
A1 = cache['A1']
A2 = cache['A2']
dZ2 = A2 - Y
dW2 = (1 / m) * np.dot(dZ2, A1.T)
db2 = (1 / m) * np.sum(dZ2, axis=1, keepdims=True)
dZ1 = np.dot(W2.T, dZ2) * (1 - np.power(A1, 2))
dW1 = (1 / m) * np.dot(dZ1, X.T)
db1 = (1 / m) * np.sum(dZ1, axis=1, keepdims=True)
grads = {"dW1": dW1,
"db1": db1,
"dW2": dW2,
"db2": db2}
return grads
parameters, cache, t_X, t_Y = backward_propagation_test_case()
grads = backward_propagation(parameters, cache, t_X, t_Y)
print ("dW1 = "+ str(grads["dW1"]))
print ("db1 = "+ str(grads["db1"]))
print ("dW2 = "+ str(grads["dW2"]))
print ("db2 = "+ str(grads["db2"]))
backward_propagation_test(backward_propagation)
dW1 = [[ 0.00301023 -0.00747267]
[ 0.00257968 -0.00641288]
[-0.00156892 0.003893 ]
[-0.00652037 0.01618243]]
db1 = [[ 0.00176201]
[ 0.00150995]
[-0.00091736]
[-0.00381422]]
dW2 = [[ 0.00078841 0.01765429 -0.00084166 -0.01022527]]
db2 = [[-0.16655712]]
# 신경망의 가중치와 편향 업데이트
def update_parameters(parameters, grads, learning_rate = 1.2):
W1 = parameters['W1']
b1 = parameters['b1']
W2 = parameters['W2']
b2 = parameters['b2']
dW1 = grads['dW1']
db1 = grads['db1']
dW2 = grads['dW2']
db2 = grads['db2']
W1 = W1 - learning_rate * dW1
b1 = b1 - learning_rate * db1
W2 = W2 - learning_rate * dW2
b2 = b2 - learning_rate * db2
parameters = {"W1": W1,
"b1": b1,
"W2": W2,
"b2": b2}
return parameters
parameters, grads = update_parameters_test_case()
parameters = update_parameters(parameters, grads)
print("W1 = " + str(parameters["W1"]))
print("b1 = " + str(parameters["b1"]))
print("W2 = " + str(parameters["W2"]))
print("b2 = " + str(parameters["b2"]))
update_parameters_test(update_parameters)
W1 = [[-0.00643025 0.01936718]
[-0.02410458 0.03978052]
[-0.01653973 -0.02096177]
[ 0.01046864 -0.05990141]]
b1 = [[-1.02420756e-06]
[ 1.27373948e-05]
[ 8.32996807e-07]
[-3.20136836e-06]]
W2 = [[-0.01041081 -0.04463285 0.01758031 0.04747113]]
b2 = [[0.00010457]]
# 신경망 모델을 정의하고 학습시키는 함수
def nn_model(X, Y, n_h, num_iterations = 10000, print_cost=False):
np.random.seed(3)
n_x = layer_sizes(X, Y)[0]
n_y = layer_sizes(X, Y)[2]
parameters = initialize_parameters(n_x, n_h, n_y)
for i in range(0, num_iterations):
# 순방향 전파
A2, cache = forward_propagation(X, parameters)
# 손실 계산
cost = compute_cost(A2, Y)
# 역전파
grads = backward_propagation(parameters, cache, X, Y)
# 파라미터 업데이트
parameters = update_parameters(parameters, grads)
if print_cost and i % 1000 == 0:
print ("Cost after iteration %i: %f" %(i, cost))
return parameters
nn_model_test(nn_model)
Cost after iteration 0: 0.693086
Cost after iteration 1000: 0.000220
Cost after iteration 2000: 0.000108
Cost after iteration 3000: 0.000072
Cost after iteration 4000: 0.000054
Cost after iteration 5000: 0.000043
Cost after iteration 6000: 0.000036
Cost after iteration 7000: 0.000030
Cost after iteration 8000: 0.000027
Cost after iteration 9000: 0.000024
W1 = [[ 0.71392202 1.31281102]
[-0.76411243 -1.41967065]
[-0.75040545 -1.38857337]
[ 0.56495575 1.04857776]]
b1 = [[-0.0073536 ]
[ 0.01534663]
[ 0.01262938]
[ 0.00218135]]
W2 = [[ 2.82545815 -3.3063945 -3.16116615 1.8549574 ]]
b2 = [[0.00393452]]
# 신경망의 학습 결과를 사용하여 새로운 데이터 예측
def predict(parameters, X):
A2, cache = forward_propagation(X, parameters)
predictions = (A2 > 0.5)
return predictions
parameters, t_X = predict_test_case()
predictions = predict(parameters, t_X)
print("Predictions: " + str(predictions))
predict_test(predict)
Predictions: [[ True False True]]
# 신경망 모델을 구축하고 학습
parameters = nn_model(X, Y, n_h = 4, num_iterations = 10000, print_cost=True)
# decision boundary 시각화
plot_decision_boundary(lambda x: predict(parameters, x.T), X, Y)
plt.title("Decision Boundary for hidden layer size " + str(4))
Cost after iteration 0: 0.693162
Cost after iteration 1000: 0.258625
Cost after iteration 2000: 0.239334
Cost after iteration 3000: 0.230802
Cost after iteration 4000: 0.225528
Cost after iteration 5000: 0.221845
Cost after iteration 6000: 0.219094
Cost after iteration 7000: 0.220661
Cost after iteration 8000: 0.219409
Cost after iteration 9000: 0.218485
# 정확도 예측
predictions = predict(parameters, X)
print ('Accuracy: %d' % float((np.dot(Y, predictions.T) + np.dot(1 - Y, 1 - predictions.T)) / float(Y.size) * 100) + '%')
Accuracy: 90%
'AI' 카테고리의 다른 글
[LLM] Large Language Model (3) | 2024.09.13 |
---|---|
[DL] Coursera: DL Specialization C2W1A1 (1) | 2024.09.07 |
[DL] Coursera: DL Specialization C1W4A2 (1) | 2024.08.30 |
[DL] Coursera: DL Specialization C1W4A1 (0) | 2024.08.27 |
[DL] Coursera: DL Specialization C1W2A2 (0) | 2024.08.09 |