Skip to content

Commit

Permalink
adding taylor series propagation & tests
Browse files Browse the repository at this point in the history
  • Loading branch information
TimSiebert1 committed Jan 10, 2024
1 parent d9c411f commit ec8a9ed
Show file tree
Hide file tree
Showing 9 changed files with 751 additions and 1 deletion.
141 changes: 141 additions & 0 deletions src/ADOLC_wrap.jl
Original file line number Diff line number Diff line change
Expand Up @@ -279,7 +279,148 @@ function gradient(func,
end
end


function check_input_taylor_coeff(num_independent,
derivative_order::Int64;
num_direction=nothing,
init_series=nothing)
if num_direction == 1
if derivative_order == 1
@assert(init_series !== nothing,
"For derivative_order=$(derivative_order) and
num_direction=$(num_direction) you have to provide an
init_series (as a vector) to initialize the taylor series
propagation!")

@assert(length(size(init_series))==1,
"Please provide the init_series with length(size(init_series))=1
and not $(length(size(init_series))).")

@assert(size(init_series)[1]==num_independent,
"Please provide a init_series of length
$(num_independent) to initialize the taylor series
propagation. Each entry corresponds to one independant.")

else
if init_series !== nothing
@assert(length(size(init_series))==2,
"Please provide the init_series with length(size(init_series))==2
and not $(length(size(init_series))).")
@assert size(init_series==(num_independent, derivative_order),
"The init_series has the wrong shape: $(size(init_series)) but must be
($(num_independent), $(derivative_order)). Please provide the taylor
coefficients of the init_series up to order derivative_order-1.
In detail init_series must have the shape (num_independent, derivative_order)
and the i-th column corresponds to the i-1-th taylor coefficient
of the init_series.")
end
end
end
@assert(init_series !== nothing,
"For derivative_order=$(derivative_order) you have to provide
an init_series (as a vector) to initialize the taylor series
propagation!")
@assert(length(size(init_series))==derivative_order,
"The input for init_series has the wrong shape! Please provide
a vector of $(num_independent) to initialize the taylor
series propagation. Each entry corresponds to one independant")
end


function taylor_coeff(func,
init_point,
num_dependent,
num_independent,
derivative_order;
num_direction=nothing,
init_series=nothing)


a = [AdoubleCxx() for _ in eachindex(init_point)]
y0 = Vector{Float64}(undef, num_dependent)
tape_num = 1
keep = 0
trace_on(tape_num, keep)
a << init_point
b = func(a)
b >> y0
trace_off(0)

"""
check_input_taylor_coeff(num_independent,
derivative_order,
num_direction=num_direction,
init_series=init_series)
"""

if num_direction === nothing
num_direction = num_independent

elseif num_direction == 1
if derivative_order == 1
y1 = Vector{Float64}(undef, 2)
fos_forward(tape_num, num_dependent, num_independent, keep, init_point, init_series, y0, y1)
return y0, y1
else
Y = myalloc2(num_dependent, derivative_order)
hos_forward(tape_num,
num_dependent,
num_independent,
derivative_order,
0,
init_point,
init_series,
y0,
Y)
return y0, Y
end
else
if derivative_order == 1
if init_series === nothing
init_series = myalloc2(num_independent, num_direction)
for i in 1:num_independent
for j in 1:num_direction
init_series[i, j] = 0.0
if i == j
init_series[i, i] = 1.0
end
end
end
end
Y = myalloc2(num_dependent, num_direction)
fov_forward(tape_num, num_dependent, num_independent, num_direction, init_point, init_series, y0, Y)
return y0, Y
else
if init_series === nothing
init_series = myalloc3(num_independent, num_direction, derivative_order)
for i in 1:num_independent
for j in 1:derivative_order
for k in 1:num_direction
init_series[i, j, k] = 0.0
end
end
end
for k in 1:num_direction
init_series[k, k, 1] = 1.0
end
end
Y = myalloc3(num_dependent, num_direction, derivative_order)
hov_forward(tape_num,
num_dependent,
num_independent,
derivative_order,
num_independent,
init_point,
init_series,
y0,
Y)
return y0, Y
end
end
end

export abs_normal!, AbsNormalProblem, gradient, _gradient_tape_based, _gradient_tape_less
export _higher_order, tensor_address2, build_tensor, create_cxx_identity
export taylor_coeff, check_input_taylor_coeff

end # module ADOLC_wrap
20 changes: 19 additions & 1 deletion src/TODO
Original file line number Diff line number Diff line change
Expand Up @@ -5,4 +5,22 @@ in tensor_address:
- move "+1" to c++

in gradient:
- rename
- rename

in all new driver:
- write wrappers for c++ type

in AdoubleModule:
- write wrapper type for adouble-c++-type

in AdolcWrap:
- write abstract class for Tladouble & Tbadouble

in check_input_taylor_coeff:
- work on CXXMatrix
- add more checks

in taylor_coeff:
- test other modes
- test mixed deriections

49 changes: 49 additions & 0 deletions tests/test_abs_normal.jl
Original file line number Diff line number Diff line change
@@ -0,0 +1,49 @@
include("../../external/ADOLC_wrap/src/ADOLC_wrap.jl")
using .ADOLC_wrap.AdoubleModule
using .ADOLC_wrap

enableMinMaxUsingAbs()

function func_eval(x)
return (max(-x[1]-x[2], -x[1]-x[2]+x[1]^2+x[2]^2-1) + max(-x[2]-x[3], -x[2]-x[3]+x[2]^2+x[3]^2-1))
end

x = [-0.500000, -0.500000, -0.500000]
n = length(x)
y = Vector{Float64}(undef, 1)
m = length(y)
a = [AdoubleCxx() for _ in 1:length(x)]
b = [AdoubleCxx() for _ in 1:length(y)]

tape_num = 0
trace_on(tape_num, 1)
a << x
b[1] = func_eval(a)
b >> y
trace_off(0)

abs_normal_problem =
AbsNormalProblem{Float64}(tape_num, m, n, x, y)


abs_normal!(abs_normal_problem, tape_num)

using Test
@test abs_normal_problem.Y[1, 1] == -1.5
@test abs_normal_problem.Y[1, 2] == -3.0
@test abs_normal_problem.Y[1, 3] == -1.5

@test abs_normal_problem.J[1, 1] == 0.5
@test abs_normal_problem.J[1, 2] == 0.5

@test abs_normal_problem.Z[1, 1] == -1.0
@test abs_normal_problem.Z[1, 2] == -1.0
@test abs_normal_problem.Z[1, 3] == 0.0
@test abs_normal_problem.Z[2, 1] == 0.0
@test abs_normal_problem.Z[2, 2] == -1.0
@test abs_normal_problem.Z[2, 3] == -1.0

@test abs_normal_problem.L[1, 1] == 0.0
@test abs_normal_problem.L[1, 2] == 0.0
@test abs_normal_problem.L[2, 1] == 0.0
@test abs_normal_problem.L[2, 2] == 0.0
76 changes: 76 additions & 0 deletions tests/test_gradient.jl
Original file line number Diff line number Diff line change
@@ -0,0 +1,76 @@
include("../src/ADOLC_wrap.jl")
using .ADOLC_wrap
using Test
# Chained LQ

function chained_lq(x)
return (
max(-x[1] - x[2], -x[1] - x[2] + x[1]^2 + x[2]^2 - 1) +
max(-x[2] - x[3], -x[2] - x[3] + x[2]^2 + x[3]^2 - 1)
)
end

x = [0.0, 2.0, -1.0]
g = gradient(chained_lq, x, 1)

@test g[1]==-1.0
@test g[2]==6.0
@test g[3]==-3.0



################################# reverse mode test ###################################


function func(x)
return [
x[1] * x[2]^3,
x[1] + x[3] / x[2]
]
end

init_point = [-1.0, 2.0, -1.0]
num_dependent = 2
Z = gradient(func, init_point, num_dependent, mode=:tape_based)

@test Z[1, 1] == 8.0
@test Z[2, 1] == 1.0
@test Z[1, 2] == -12.0
@test Z[2, 2] == 0.25
@test Z[1, 3] == 0.0
@test Z[2, 3] == 0.5




Z = gradient(func, init_point, num_dependent, mode=:tape_based, derivative_order=2)


@test Z[1][1, 1, 1] == 8.0
@test Z[1][1, 2, 1] == -12.0
@test Z[1][1, 3, 1] == 0.0
@test Z[1][1, 1, 2] == 0.0
@test Z[1][1, 2, 2] == 12.0
@test Z[1][1, 3, 2] == 0.0

@test Z[1][2, 1, 1] == 1.0
@test Z[1][2, 2, 1] == 0.25
@test Z[1][2, 3, 1] == 0.5
@test Z[1][2, 1, 2] == 0.0
@test Z[1][2, 2, 2] == 0.0
@test Z[1][2, 3, 2] == 0.0


@test Z[2][1, 1, 1] == 8.0
@test Z[2][1, 2, 1] == -12.0
@test Z[2][1, 3, 1] == 0.0
@test Z[2][1, 1, 2] == 12.0
@test Z[2][1, 2, 2] == -12.0
@test Z[2][1, 3, 2] == 0.0

@test Z[2][2, 1, 1] == 1.0
@test Z[2][2, 2, 1] == 0.25
@test Z[2][2, 3, 1] == 0.5
@test Z[2][2, 1, 2] == 0.0
@test Z[2][2, 2, 2] ==
@test Z[2][2, 3, 2] == 0.0
29 changes: 29 additions & 0 deletions tests/test_gradient_split.jl
Original file line number Diff line number Diff line change
@@ -0,0 +1,29 @@
include("../src/ADOLC_wrap.jl")
using .ADOLC_wrap
using .ADOLC_wrap.TladoubleModule
using .ADOLC_wrap.Adouble
using Test

# Chained LQ

function chained_lq(x)
return (
max(-x[1] - x[2], -x[1] - x[2] + x[1]^2 + x[2]^2 - 1) +
max(-x[2] - x[3], -x[2] - x[3] + x[2]^2 + x[3]^2 - 1)
)
end

x = [0.0, 2.0, -1.0]
g = Main.gradient(chained_lq, x, 1, switch_point=100)
g1 = Main.gradient(chained_lq, x, 1, switch_point=100, mode=:tape_based)


@test g[1]==-1.0
@test g[2]==6.0
@test g[3]==-3.0

@test g[1]==g1[1]
@test g[2]==g1[2]
@test g[3]==g1[3]


Empty file added tests/test_hess_vec.jl
Empty file.
Loading

0 comments on commit ec8a9ed

Please sign in to comment.