Skip to content

Commit

Permalink
Removed @time; julia-first.jl makes plots without variance at the mom…
Browse files Browse the repository at this point in the history
…ent since predict can't return it.
  • Loading branch information
ericagol committed Sep 19, 2018
1 parent dd9798c commit bcf350a
Show file tree
Hide file tree
Showing 2 changed files with 82 additions and 7 deletions.
64 changes: 58 additions & 6 deletions examples/julia-first.jl
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
import Optim
import PyPlot
using Optim
using PyPlot
#import celerite
include("../src/celerite.jl")

Expand All @@ -13,6 +13,7 @@ y = 0.2*(t-5.0) + sin.(3.0*t + 0.1*(t-5.0).^2) + yerr .* randn(length(t))
true_t = linspace(0, 10, 5000)
true_y = 0.2*(true_t-5) + sin.(3*true_t + 0.1*(true_t-5).^2)

PyPlot.clf()
PyPlot.plot(true_t, true_y, "k", lw=1.5, alpha=0.3)
PyPlot.errorbar(t, y, yerr=yerr, fmt=".k", capsize=0)
PyPlot.xlabel("x")
Expand All @@ -37,14 +38,65 @@ gp = celerite.Celerite(kernel)
celerite.compute!(gp, t, yerr)
celerite.log_likelihood(gp, y)

mu, variance = celerite.predict_full(gp, y, true_t, return_var=true)
sigma = sqrt(variance)
#mu, variance = celerite.predict_full(gp, y, true_t, return_var=true)
mu = celerite.predict_full(gp, y, true_t, return_cov=false, return_var=false)
#sigma = sqrt(variance)

PyPlot.plot(true_t, true_y, "k", lw=1.5, alpha=0.3,label="Simulated data")
PyPlot.errorbar(t, y, yerr=yerr, fmt=".k", capsize=0)
PyPlot.plot(true_t, mu, "g",label="Initial kernel")
#PyPlot.fill_between(true_t, mu+sigma, mu-sigma, color="g", alpha=0.3)
PyPlot.xlabel("x")
PyPlot.ylabel("y")
PyPlot.xlim(0, 10)
PyPlot.ylim(-2.5, 2.5);

vector = celerite.get_parameter_vector(gp.kernel)
mask = ones(Bool, length(vector))
mask[2] = false # Don't fit for the first Q
function nll(params)
vector[mask] = params
celerite.set_parameter_vector!(gp.kernel, vector)
celerite.compute!(gp, t, yerr)
return -celerite.log_likelihood(gp, y)
end;

result = Optim.optimize(nll, vector[mask], Optim.LBFGS())
result

vector[mask] = Optim.minimizer(result)
vector

celerite.set_parameter_vector!(gp.kernel, vector)

#mu, variance = celerite.predict(gp, y, true_t, return_var=true)
mu = celerite.predict(gp, y, true_t, return_cov = false, return_var=false)
#sigma = sqrt(variance)

PyPlot.plot(true_t, true_y, "k", lw=1.5, alpha=0.3)
PyPlot.errorbar(t, y, yerr=yerr, fmt=".k", capsize=0)
PyPlot.plot(true_t, mu, "g")
PyPlot.fill_between(true_t, mu+sigma, mu-sigma, color="g", alpha=0.3)
PyPlot.plot(true_t, mu, "r",label="Optimized kernel")
#PyPlot.fill_between(true_t, mu+sigma, mu-sigma, color="g", alpha=0.3)
PyPlot.xlabel("x")
PyPlot.ylabel("y")
PyPlot.xlim(0, 10)
PyPlot.ylim(-2.5, 2.5);
PyPlot.legend(loc="upper left")

read(STDIN,Char)
PyPlot.clf()

omega = exp.(linspace(log(0.1), log(20), 5000))
psd = celerite.get_psd(gp.kernel, omega)

for term in gp.kernel.terms
PyPlot.plot(omega, celerite.get_psd(term, omega), "--", color="orange")
end
PyPlot.plot(omega, psd, color="orange")

PyPlot.yscale("log")
PyPlot.xscale("log")
PyPlot.xlim(omega[1], omega[end])
PyPlot.xlabel("omega")
PyPlot.ylabel("S(omega)");
PyPlot.plot(2*pi/2.0*[1.,1.],[1e-8,1e2])
25 changes: 24 additions & 1 deletion src/gp.jl
Original file line number Diff line number Diff line change
Expand Up @@ -316,7 +316,8 @@ function compute!(gp::Celerite, x, yerr = 0.0)
coeffs = get_all_coefficients(gp.kernel)
var = yerr.^2 + zeros(Float64, length(x))
gp.n = length(x)
@time gp.D,gp.W,gp.up,gp.phi = cholesky!(coeffs..., x, var, gp.W, gp.phi, gp.up, gp.D)
# @time gp.D,gp.W,gp.up,gp.phi = cholesky!(coeffs..., x, var, gp.W, gp.phi, gp.up, gp.D)
gp.D,gp.W,gp.up,gp.phi = cholesky!(coeffs..., x, var, gp.W, gp.phi, gp.up, gp.D)
gp.J = size(gp.W)[1]
# Compute the log determinant (square the determinant of the Cholesky factor):
gp.logdet = 2 * sum(log.(gp.D))
Expand Down Expand Up @@ -795,6 +796,28 @@ function predict_full_ldlt(gp::Celerite, y, t; return_cov=true, return_var=false
return mu, cov
end

function predict(gp::Celerite, y, t; return_cov=true, return_var=false)
# Prediction with covariance using full covariance matrix
# WARNING: do not use this with large datasets!
alpha = apply_inverse(gp, y)
Kxs = get_matrix(gp, t, gp.x)
mu = Kxs * alpha
if !return_cov && !return_var
return mu
end

KxsT = transpose(Kxs)
if return_var
v = -sum(KxsT .* apply_inverse(gp, KxsT), 1)
v = v + get_value(gp.kernel, [0.0])[1]
return mu, v[1, :]
end

cov = get_matrix(gp, t)
cov = cov - Kxs * apply_inverse(gp, KxsT)
return mu, cov
end

function predict_full(gp::Celerite, y, t; return_cov=true, return_var=false)
# Prediction with covariance using full covariance matrix
# WARNING: do not use this with large datasets!
Expand Down

0 comments on commit bcf350a

Please sign in to comment.