Skip to content

Commit

Permalink
Merge pull request #1202 from STEllAR-GROUP/fixing_1200_again
Browse files Browse the repository at this point in the history
Renaming gradient variable to prevent error as reported in #1200
  • Loading branch information
hkaiser authored Jun 26, 2020
2 parents b8caf1b + af2479c commit 835c007
Show file tree
Hide file tree
Showing 4 changed files with 12 additions and 12 deletions.
6 changes: 3 additions & 3 deletions examples/algorithms/lra/lra.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -25,7 +25,7 @@ char const* const lra_code = R"(block(
define(transx, transpose(x)), // transx: [2, 30]
define(pred, constant(0.0, shape(x, 0))),
define(error, constant(0.0, shape(x, 0))),
define(gradient, constant(0.0, shape(x, 1))),
define(grad, constant(0.0, shape(x, 1))),
define(step, 0),
while(
step < iterations,
Expand All @@ -34,9 +34,9 @@ char const* const lra_code = R"(block(
// exp(-dot(x, weights)): [30], pred: [30]
store(pred, sigmoid(dot(x, weights))),
store(error, pred - y), // error: [30]
store(gradient, dot(transx, error)), // gradient: [2]
store(grad, dot(transx, error)), // grad: [2]
parallel_block(
store(weights, weights - (alpha * gradient)),
store(weights, weights - (alpha * grad)),
store(step, step + 1)
)
)
Expand Down
6 changes: 3 additions & 3 deletions examples/algorithms/lra/lra_csv.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -64,7 +64,7 @@ char const* const lra_code = R"(block(
define(transx, transpose(x)), // transx: [M, N]
define(pred, constant(0.0, shape(x, 0))),
define(error, constant(0.0, shape(x, 0))),
define(gradient, constant(0.0, shape(x, 1))),
define(grad, constant(0.0, shape(x, 1))),
define(step, 0),
while(
step < iterations,
Expand All @@ -73,9 +73,9 @@ char const* const lra_code = R"(block(
// exp(-dot(x, weights)): [N], pred: [N]
store(pred, sigmoid(dot(x, weights))),
store(error, pred - y), // error: [N]
store(gradient, dot(transx, error)), // gradient: [M]
store(grad, dot(transx, error)), // grad: [M]
parallel_block(
store(weights, weights - (alpha * gradient)),
store(weights, weights - (alpha * grad)),
store(step, step + 1)
)
)
Expand Down
6 changes: 3 additions & 3 deletions examples/algorithms/lra/phylanx_lra_csv.py
Original file line number Diff line number Diff line change
Expand Up @@ -18,15 +18,15 @@ def lra(file_name, xlo1, xhi1, ylo1, yhi1, xlo2, xhi2, ylo2, yhi2, alpha,
transx = transpose(x)
pred = constant(0.0, shape(x, 0))
error = constant(0.0, shape(x, 0))
gradient = constant(0.0, shape(x, 1))
grad = constant(0.0, shape(x, 1))
step = 0
while step < iterations:
if enable_output:
print("step: ", step, ", ", weights)
pred = 1.0 / (1.0 + exp(-dot(x, weights)))
error = pred - y
gradient = dot(transx, error)
weights = weights - (alpha * gradient)
grad = dot(transx, error)
weights = weights - (alpha * grad)
step += 1
return weights

Expand Down
6 changes: 3 additions & 3 deletions examples/algorithms/lra/phylanx_lra_csv_np.py
Original file line number Diff line number Diff line change
Expand Up @@ -14,15 +14,15 @@ def lra(x, y, alpha, iterations, enable_output):
transx = np.transpose(x)
pred = constant(0.0, shape(x, 0))
error = constant(0.0, shape(x, 0))
gradient = constant(0.0, shape(x, 1))
grad = constant(0.0, shape(x, 1))
step = 0
while step < iterations:
if (enable_output):
print("step: ", step, ", ", weights)
pred = 1.0 / (1.0 + np.exp(-np.dot(x, weights)))
error = pred - y
gradient = np.dot(transx, error)
weights = weights - (alpha * gradient)
grad = np.dot(transx, error)
weights = weights - (alpha * grad)
step += 1
return weights

Expand Down

0 comments on commit 835c007

Please sign in to comment.