-
Notifications
You must be signed in to change notification settings - Fork 0
/
back_prop.py
37 lines (27 loc) · 1004 Bytes
/
back_prop.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
import numpy as np
from activation_function_forward_backward import *
def linear_backward(dZ, cache):
A_prev, W, b = cache
m = A_prev.shape[1]
dW = 1/m * np.dot(dZ, A_prev.T)
db = 1/m * np.sum(dZ,axis = 1,keepdims = True )
dA_prev = np.dot(W.T,dZ)
"""
Arguments:
dZ -- Gradient with respect to ;
cache -- tuple of values (A_prev, W, b) from forward propagation
Returns:
dA_prev -- Gradient of the cost with prev layer
dW -- db Gradient
"""
return dA_prev, dW, db
def linear_activation_backward(dA, cache, activation):
linear_cache, activation_cache = cache
if activation == "relu":
dZ = relu_backward(dA, activation_cache)
dA_prev, dW, db = linear_backward(dZ, linear_cache)
elif activation == "sigmoid":
dZ = sigmoid_backward(dA, activation_cache)
dA_prev, dW, db = linear_backward(dZ, linear_cache)
#Gradient with respect to prev and Gradient
return dA_prev, dW, db