dx works but that makes no sense?
This commit is contained in:
@@ -58,12 +58,15 @@ def blocksparse_matmul_grad(op, dy):
|
|||||||
def run_shift():
|
def run_shift():
|
||||||
B, C, H, W = 1, 16, 4, 4
|
B, C, H, W = 1, 16, 4, 4
|
||||||
R, S, F = 3, 3, 16
|
R, S, F = 3, 3, 16
|
||||||
|
np.random.seed(2)
|
||||||
a = tf.placeholder(tf.float32, shape=[C, H, W, B])
|
a = tf.placeholder(tf.float32, shape=[C, H, W, B])
|
||||||
b = tf.placeholder(tf.float32, shape=[C, F])
|
b = tf.placeholder(tf.float32, shape=[C, F])
|
||||||
#hshift_h = np.random.randint(-R//2, R//2 + 1, size=C, dtype=np.int32)
|
hshift_h = np.random.randint(- (R//2), R//2 + 1, size=C, dtype=np.int32)
|
||||||
#hshift_w = np.random.randint(-S//2, R//2 + 1, size=C, dtype=np.int32)
|
hshift_w = np.random.randint(- (S//2), R//2 + 1, size=C, dtype=np.int32)
|
||||||
hshift_h = np.ones(C, dtype=np.int32)
|
print(hshift_h)
|
||||||
hshift_w = np.ones(C, dtype=np.int32)
|
print(hshift_w)
|
||||||
|
#hshift_h = np.ones(C, dtype=np.int32)
|
||||||
|
#hshift_w = np.ones(C, dtype=np.int32)
|
||||||
c = module.shift_conv(a, b, shift_h=tf.make_tensor_proto(hshift_h), shift_w=tf.make_tensor_proto(hshift_w))
|
c = module.shift_conv(a, b, shift_h=tf.make_tensor_proto(hshift_h), shift_w=tf.make_tensor_proto(hshift_w))
|
||||||
# Reference
|
# Reference
|
||||||
ha = np.random.rand(C, H, W, B)
|
ha = np.random.rand(C, H, W, B)
|
||||||
@@ -75,13 +78,16 @@ def run_shift():
|
|||||||
extra_feed_dict={a: ha, b: hb})
|
extra_feed_dict={a: ha, b: hb})
|
||||||
dx_t, dx_n = grads[0]
|
dx_t, dx_n = grads[0]
|
||||||
dw_t, dw_n = grads[1]
|
dw_t, dw_n = grads[1]
|
||||||
print(dx_t)
|
print(dw_t)
|
||||||
print(dx_n)
|
print(dw_n)
|
||||||
#print(np.max(dw_t - dw_n))
|
print(np.max(dw_t - dw_n))
|
||||||
|
#np.savetxt('diff.dat', dw_t - dw_n, fmt='%2.4f')
|
||||||
|
#np.savetxt('theoretical.dat', dw_t, fmt='%2.4f')
|
||||||
|
#np.savetxt('numerical.dat', dw_n, fmt='%2.4f')
|
||||||
print(np.max(dx_t - dx_n))
|
print(np.max(dx_t - dx_n))
|
||||||
np.savetxt('diff.dat', dx_t - dx_n, fmt='%2.4f')
|
#np.savetxt('diff.dat', dx_t - dx_n, fmt='%2.4f')
|
||||||
np.savetxt('theoretical.dat', dx_t, fmt='%2.4f')
|
#np.savetxt('theoretical.dat', dx_t, fmt='%2.4f')
|
||||||
np.savetxt('numerical.dat', dx_n, fmt='%2.4f')
|
#np.savetxt('numerical.dat', dx_n, fmt='%2.4f')
|
||||||
# Run
|
# Run
|
||||||
sess.run(tf.global_variables_initializer())
|
sess.run(tf.global_variables_initializer())
|
||||||
result = sess.run([c], feed_dict = {a: ha,
|
result = sess.run([c], feed_dict = {a: ha,
|
||||||
|
@@ -287,15 +287,15 @@ else{
|
|||||||
if(ty_ == BPROP){
|
if(ty_ == BPROP){
|
||||||
os << R"(
|
os << R"(
|
||||||
int32 rcwhc[TM] = rxc / ABS;
|
int32 rcwhc[TM] = rxc / ABS;
|
||||||
int32 rcw[TM] = (rcwhc % AW);
|
int32 rcw[TM] = rcwhc % AW;
|
||||||
int32 rchc[TM] = rcwhc / AW;
|
int32 rchc[TM] = rcwhc / AW;
|
||||||
int32 rch[TM] = (rchc % AH);
|
int32 rch[TM] = rchc % AH;
|
||||||
int1 maskh[TM] = (rch >= pad_h) && (rch < (AH - pad_h));
|
int1 maskh[TM] = (rch >= pad_h) && (rch < (AH - pad_h));
|
||||||
int1 maskw[TM] = (rcw >= pad_w) && (rcw < (AW - pad_w));
|
int1 maskw[TM] = (rcw >= pad_w) && (rcw < (AW - pad_w));
|
||||||
int1 interior[TM, TN] = maskh[:, newaxis] && maskw[:, newaxis];
|
int1 interior[TM, TN] = maskh[:, newaxis] && maskw[:, newaxis];
|
||||||
__constant__ int32* pd[TN] = delta + ryc;
|
__constant__ int32* pd[TN] = delta + ryc;
|
||||||
fp32* shift_pc[TM, TN] = pc + (*pd)[newaxis, :];
|
fp32* shift_pc[TM, TN] = pc + (*pd)[newaxis, :];
|
||||||
pc = interior ? shift_pc : pc;
|
pc = interior ? pc : shift_pc;
|
||||||
@checkc __atomic_add(pc, C);
|
@checkc __atomic_add(pc, C);
|
||||||
)";
|
)";
|
||||||
}
|
}
|
||||||
|
Reference in New Issue
Block a user