Browse Source

Initial upload

master
Jurjen Rooze 5 months ago
parent
commit
8b7ba5a67f
  1. 381
      ContinuousG_OrgMin.R
  2. 419
      PDE_and_Particles_sim.R
  3. 191
      compiled_calls.R
  4. 273
      main_app3_agingmodel.R
  5. 11
      readme.txt

381
ContinuousG_OrgMin.R

@ -0,0 +1,381 @@
#=============================================================================
# Remineralization with reactivity distribution
#
# Script corresponds to the second application in the manuscript.
# It produces figure 2 in the manuscript and some additional plots.
#
# Coded by J. Rooze
#=============================================================================
library(marelac)
library(ReacTran)
library(rootSolve)
#=============================================================================
# Units used in the program:
#=============================================================================
#normalize
time_scale <- 1 / 1e-3 #time scale = 1/k [yr]
length_scale <- 15 * 1e-2 #[m]
#=============================================================================
# Model domain and grid definition
#=============================================================================
L <- 50 * 1e-2 / length_scale # depth of sediment domain [-]
N <- 50 # number of grid layers
grid <- setup.grid.1D(x.up = 0, L = L, N = N)
# A handy function for plotting profiles
plot_profile <- function(...) {
plot(y = grid$x.mid * length_scale, ylim = c(L,0) * length_scale, type = "l",
ylab = "depth (m)", ...)
}
#=============================================================================
# Model parameters:
#=============================================================================
PerSecToPerYr <- 365*24*60^2
dens_sed <- 2.6 * 1e6 #g/m3
SAR <- 0.001 * (time_scale / length_scale) #[m/yr] [yr/m] = [-]
parms <- c(Db_max = 1e-10 * PerSecToPerYr * time_scale / length_scale^2, #[m2/yr] [yr/m^2] = [-]
Db_efold = 0.02 / length_scale, #[m]/ [m] = [-]
TOC.swi = 3/100) #[g C / g sed]
Db <- setup.prop.1D(func = p.exp, grid = grid, y.0 = parms["Db_max"],
y.inf = 0, x.att = parms["Db_efold"])
#=============================================================================
# Distribution functions
#=============================================================================
initial_uniform_dist <- function(k, k_min, k_max, val = 1) {
val * (k >= k_min) * (k <= k_max)
}
#The distribution function
#alternative expressions also work well, for instance: exp(b0 + b1 * k + b2*k^2)
polynom_dist <- function(k, b0, b1, b2) {
C0 * exp(b0 * sqrt(k) + b1 * k) + b2 * k * exp(k)
}
#mom = integer for moment
polynom_highermom <- function(k, k_mean, mom, b0, b1, b2) {
if (mom == 1) {
return(k * polynom_dist(k, b0, b1, b2))
} else {
return((k-k_mean)^mom * polynom_dist(k, b0, b1, b2))
}
}
multiroot_func <- function(x, k_min, k_max, conc, k_mean, k_var) {
b0 <- x[1]
b1 <- x[2]
b2 <- x[3]
F1 <- conc - integrate(polynom_dist, b0 = b0, b1 = b1, b2 = b2,
lower = k_min, upper = k_max)$value
F2 <- k_mean - integrate(polynom_highermom, b0 = b0, b1 = b1, b2 = b2,
mom = 1, k_mean = k_mean,
lower = k_min, upper = k_max)$value / conc
F3 <- k_var - integrate(polynom_highermom, b0 = b0, b1 = b1, b2 = b2,
mom = 2, k_mean = k_mean,
lower = k_min, upper = k_max)$value / conc
return(c(F1, F2, F3))
}
polynom_dist_coefs <- function(conc, k_mean, k_var, k_min, k_max, guess, ...) {
sol <- multiroot(multiroot_func, k_min = k_min, k_max = k_max,
conc = conc, k_mean = k_mean, k_var = k_var,
start = guess, ...)
}
#function to numerically integrate distribution function
int_kpowC <- function(k_min = 0, k_max = Inf, b0, b1, b2, pow_ord) {
integrate(f = function(k, ...) {
k^pow_ord * polynom_dist(k, ...)
} , lower = k_min, upper = k_max, b0 = b0, b1 = b1, b2 = b2)$value
}
#=============================================================================
# Choose distribution (with values for working examples)
#=============================================================================
k_min <- 0 * time_scale
k_max <- 5e-2 * time_scale
conc.swi <- integrate(initial_uniform_dist,
k_min = k_min, k_max = k_max,
lower = k_min, upper = k_max)$value
#correct for concentration in parms
uniform_conc <- parms[["TOC.swi"]]/conc.swi
conc.swi <- integrate(initial_uniform_dist,
k_min = k_min, k_max = k_max,
lower = k_min, upper = k_max,
val = uniform_conc)$value
#check: k_min + (k_max - k_min) * 0.5
kmean.swi <- integrate(function(k, k_min, k_max)
k * initial_uniform_dist(k, k_min, k_max, val = uniform_conc),
k_min = k_min, k_max = k_max,
lower = k_min, upper = k_max)$value / conc.swi
#check: sd(seq(k_min, k_max, length = 1e6))^2
kvar.swi <- integrate(function(k, k_min, k_max, k_mean) {
(k-k_mean)^2 * initial_uniform_dist(k, k_min, k_max, val = uniform_conc)
}, k_min = k_min, k_max = k_max, k_mean = kmean.swi,
lower = k_min, upper = k_max)$value / conc.swi
guess.swi <- c(0,0,0) #guess for an uniform distribution
C0 <- uniform_conc #needed for polynom_dist function
#=============================================================================
# Check initial distribution
#=============================================================================
#calculate coefficients of polynomial distriburion
coefs_init <- polynom_dist_coefs(conc = conc.swi, k_mean = kmean.swi, k_var = kvar.swi,
k_min = k_min, k_max = k_max, guess = guess.swi,
rtol = 1e-8 )$root
kaxs <- seq(k_min, k_max, length = 100)
recon_dist <- polynom_dist(k = kaxs, b0 = coefs_init[1], b1 = coefs_init[2], b2 = coefs_init[3])
#plot initial distribution
plot(y = recon_dist, x = kaxs/time_scale,
xlab = "k [1/y]", ylab = "dens", type = "l", ylim = c(0, max(recon_dist)))
abline(v = kmean.swi/time_scale, col = 3, lty = 2) #mean
#=============================================================================
# Model formulation
#=============================================================================
model <- function (t, state, parms) {
with(as.list(parms), {
# Initialization of state variables
TOC <- state[1:N] # #g C / g sed
Ck <- state[(N+1):(2*N)]
Cvar <- state[(2*N+1):(3*N)]
Cage <- state[(3*N+1):(4*N)]
k_mean <- Ck / TOC
k_var <- Cvar / TOC
# Transport and rean terms
tranTOC <- tran.1D(C = TOC, C.up = TOC.swi, D = Db$int,
v = SAR, VF = 1, dx = grid)
tranCk <- tran.1D(C = Ck, C.up = TOC.swi * kmean.swi, D = Db$int,
v = SAR, VF = 1, dx = grid)
tranCvar <- tran.1D(C = Cvar, C.up = TOC.swi * kvar.swi, D = Db$int,
v = SAR, VF = 1, dx = grid)
tranCage <- tran.1D(C = Cage, C.up = 0, D = Db$int,
v = SAR, VF = 1, dx = grid)
#extra transport term for the variance
kmean_bnd <- c(kmean.swi,
0.5 * (k_mean[1:(N-1)] + k_mean[2:N]),
k_mean[N]) #N+1 vector length
dkmean_dx <- (kmean_bnd[2:(N+1)] - kmean_bnd[1:N]) / grid$dx #N vector length
Cvar_mutran <- 2 * Db$mid * TOC * dkmean_dx^2
# reactions [mol/m3/yr]
R <- - k_mean * TOC
if(any(range(k_mean) < 0 ))
browser()
#Following line goes over all cells and integrates k^2 C(k) for the chosen distribution
guess <- guess.swi
int_kpow2C <- int_kpow3C <- rep(0, length.out = N)
for (i in 1:N) {
polynom_coefs <- polynom_dist_coefs(conc = TOC[i], k_mean = k_mean[i], k_var = k_var[i],
k_min = k_min, k_max = k_max, guess = guess)$root
b0 <- polynom_coefs[1]
b1 <- polynom_coefs[2]
b2 <- polynom_coefs[3]
int_kpow2C[i] <- int_kpowC(k_min, k_max, b0, b1, b2, 2)
int_kpow3C[i] <- int_kpowC(k_min, k_max, b0, b1, b2, 3)
#improve estimate coefficient for next cell
guess <- c(b0, b1, b2)
}
R.TOC <- R
R.Ck <- - int_kpow2C
R.Cvar <- - int_kpow3C + 2 * k_mean * int_kpow2C - k_mean^3 * TOC
R.Cage <- TOC + Cage / TOC * R #aging + reaction
dTOCdt <- tranTOC$dC + R.TOC
dCkdt <- tranCk$dC + R.Ck
dCvardt <- tranCvar$dC + R.Cvar + Cvar_mutran
dCagedt <- tranCage$dC + R.Cage
# Assemble the total rate of change, and return fluxes and reaction rates
return(list(c(dTOCdt = dTOCdt, dCkdt = dCkdt, dCvardt = dCvardt, dCagedt = dCagedt)))
})
}
discrete_validation_model <- function(t, state, parms) {
with(as.list(parms),
with(as.list(transport_parms), {
dCdt <- rep(0, length.out = length(state))
for (i_bin in 1:n_bins) {
conc <- state[((i_bin-1)*N+1):(i_bin*N)]
trans <- tran.1D(C = conc, C.up = Cup_bins[i_bin], D = Db$int,
v = SAR, VF = 1, dx = grid)
R <- - k_bins[i_bin] * conc
dCdt[((i_bin-1)*N+1):(i_bin*N)] <- trans$dC + R
}
return(list(dCdt))
})
)
}
#=============================================================================
# Model solution
#=============================================================================
# Initialize state variables for solver
TOC.in <- rep( unname(parms["TOC.swi"]), length.out = N) #wght C / wght sed
Ck.in <- kmean.swi * TOC.in
Cvar.in <- kvar.swi * TOC.in
Cage.in <- 0 * TOC.in
state <- c(TOC.in, Ck.in, Cvar.in, Cage.in)
#solve the model
#std <- steady.1D(y = state, func = model, parms = parms,
# names = c("TOC", "Ck", "Cvar"), nspec = 3)
#times <- seq(0, 0.32, length.out = 10)
times <- seq(0, 1000, by = 1) / time_scale #[yr] [1/yr] = [-]
print(system.time(
trans <- ode.1D(y = state, times = times, func = model, parms = parms,
names = c("TOC", "Ck", "Cvar", "Cage"), nspec = 4, method = "vode",
verbose = TRUE)
))
if (attributes(trans)$istate[1] != 2)
stop("numerical solution did not converge")
state_trans <- trans[length(times), 2:(length(state)+1)]
TOC <- state_trans[1:N]
Ck <- state_trans[(N+1):(2*N)]
Cvar <- state_trans[(2*N+1):(3*N)]
Cage <- state_trans[(3*N+1):(4*N)]
#=============================================================================
# Validation model with discrete bins of OM with different reactivities
#=============================================================================
n_bins <- 30 #number of bins
bin_width <- (k_max-k_min)/n_bins #delta k for each bin
k_bins <- bin_width * ((1:n_bins) - 0.5) #center of bins
Cup_bins <- vapply(1:n_bins, function(i_bin) { #upper boundary for each bin
integrate(f = polynom_dist, b0 = coefs_init[1], b1 = coefs_init[2], b2 = coefs_init[3],
lower = k_min + (i_bin-1) * bin_width,
upper = k_min + i_bin * bin_width)$value
}, FUN.VALUE = 1 )
validation_parms <- list(n_bins = n_bins, transport_parms = parms,
k_bins = k_bins, Cup_bins = Cup_bins)
C_init_bins <- as.vector(sapply(1:n_bins, function(i) rep(Cup_bins[i], N) ))
val_state <- C_init_bins
valtrans <- ode(y = val_state, times = times, func = discrete_validation_model,
parms = validation_parms, method = "vode")
state_val <- valtrans[length(times), 2:(length(val_state)+1)]
stateval_mat <- matrix(ncol = n_bins, state_val)
TOC_val <- rowSums(sapply(1:n_bins, function(i)
return(state_val[ ((i-1)*N+1):(i*N)])))
Ck_val <- rowSums(sapply(1:n_bins, function(i)
return(state_val[ ((i-1)*N+1):(i*N)] * k_bins[i] )))
Cvar_val <- vapply(1:N, function(i) {
conc_row <- sum(stateval_mat[i,])
k_mean_row <- sum(k_bins * stateval_mat[i,]) / conc_row
sum( stateval_mat[i,] * (k_bins - k_mean_row)^2 )
}, FUN.VALUE = 1)
#=============================================================================
# Plot results (Figure 2 in manuscript)
#=============================================================================
par(mfrow = c(1,4))
plot_profile(x = TOC * 100, xlim = c(0,parms["TOC.swi"])*1e2,
xlab = paste("TOC", "(%)"))
lines(TOC_val * 100, y = grid$x.mid * length_scale, lwd=2, lty = 3, col = 2)
legend("topleft", c("continuous", "discrete"), lty = c(1,3),
lwd = c(1,2), col = c(1,2), bty = "n" )
plot_profile(x = Ck/TOC / time_scale, xlim = c(k_min, k_max)/time_scale,
xlab = paste("k mean", "(y-1)"))
lines(Ck_val/TOC_val/time_scale, y = grid$x.mid * length_scale, col = 2, lty = 3, lwd = 2 )
plot_profile(x = sqrt(Cvar/TOC / time_scale^2), #xlim = c(0, max(sqrt(Cvar/TOC))) / time_scale,
xlab = paste("sd k (y-1)"))
lines(sqrt(Cvar_val/TOC_val/time_scale^2), y = grid$x.mid * length_scale, col = 2, lty = 3, lwd = 2 )
coef_top <- polynom_dist_coefs(conc = TOC[1], k_mean = Ck[1]/TOC[1],
k_var = Cvar[1]/TOC[1],
k_min = k_min, k_max = k_max, guess = guess.swi,
rtol = 1e-8 )$root
top_dist <- polynom_dist(kaxs, coef_top[1],coef_top[2], coef_top[3])
coef_bot <- polynom_dist_coefs(conc = TOC[N], k_mean = Ck[N]/TOC[N],
k_var = Cvar[N]/TOC[N],
k_min = k_min, k_max = k_max, guess = guess.swi,
rtol = 1e-8 )$root
bot_dist <- polynom_dist(kaxs, coef_bot[1], coef_bot[2], coef_bot[3])
plot(y = top_dist, x = kaxs/time_scale,
xlab = "k (y-1)", ylab = "distribution", type = "l", col = "green",
ylim = c(0, max(top_dist)),
xlim = c(k_min, k_max) / time_scale )
lines(y = bot_dist, x = kaxs/time_scale, col = "brown" )
legend("right", legend = c("top", "bottom"), lty = c(1,1),
col = c("green", "brown"), bty = "n")
#=============================================================================
# Additional plots
#=============================================================================
par(mfrow = c(1,4))
#Plot the age of organic matter over depth
plot_profile(x = Cage/TOC * time_scale,
xlab = paste("age", "(y)"))
#Plot the Peclet number
if (parms["Db_max"] > 0 ) {
plot_profile(x = SAR * parms["Db_efold"] / Db$mid, xlab = "Pe" )
abline(v = 1, lty = 2)
}
#Plot mean reactivity vs age
plot(x = Ck/TOC / time_scale, y = Cage/TOC * time_scale, type = "l",
ylab = "age (y)", xlab = "mean k (y-1)",
ylim = rev(range( Cage/TOC * time_scale)))
#Plot variance reactivity vs age
plot(x = Cvar/TOC / time_scale^2, y = Cage/TOC * time_scale, type = "l",
ylab = "age (y)", xlab = "var age (y^2)",
ylim = rev(range( Cage/TOC * time_scale)))

419
PDE_and_Particles_sim.R

@ -0,0 +1,419 @@
#=============================================================================
# Compare PDEs to discrete Langrangian particle tracking simulation
#
# Script corresponds to the first application in the manuscript.
# It can produce figure 1 from the manuscript.
#
# Coded by J. Rooze
#=============================================================================
library(deSolve)
library(english)
#create a grid in which space between nodes equals the jumping distance of molecules
n_cells <- 50 #number of cells including boundaries
l_domain <- 1 #length domain
jump_distance <- l_domain / (n_cells-1) #jumping distance of particles
jump_freq <- 1 #[1/time] #the frequency of successful jumps
#Reactions
R1 <- 0#10 # Production [conc / (time * location)]
k <- 0# 1e-3 # Consumption rate constant [1/time]
#functions below are slightly different than standard functions in moments package
#b.c. sums are divided by n rather than (n-1), e.g. var = [sum(Xi-meanX)^2] / n
#function to calculate variance
moment_func <- function(X, m) {
xi <- mean(X)
var_X <- sum((X - xi)^2) / length(X)
#treat special cases
if (m == 1 ) {
return(xi)
} else if ((m < 2) || (round(m) != m)) {
stop("Wrong input to moment calculation function")
} else if (m == 2) {
return(var_X)
} else {
return(sum((X - xi)^m) / (length(X) * sqrt(var_X)^m ))
}
}
#Create populations of particles
#make first 2 populations for the left and right boundaries
particles_left_bnd <- c(rnorm(n = 1000, mean = 0, sd = 2 ),
rnorm( 500, -0.5, 1 ), rnorm( 500, 0.5, 1 ))
particles_right_bnd <- rnorm(n=4000, mean=1, sd=0.2 ) #standard Gaussian (skewness = 0, kurtosis = 3)
#fill the grid interior with left boundary condition, and most right with with right population
age_particles_init_grid <- lapply(1:n_cells, function(i_cell) { #as a list
if (i_cell == n_cells) {
particles_right_bnd
} else
particles_left_bnd
})
age_particles_init <- c(rep(particles_left_bnd, n_cells -1), particles_right_bnd) #as vector
################################################################################
## Setup and run particle tracking simulation
################################################################################
#particle tracking
n_init <- vapply(age_particles_init_grid, length, FUN.VALUE = n_cells)
total_particles <- sum(n_init)
xloc_particles <- rep(1:n_cells, n_init)
age_particles <- age_particles_init
#time-stepping
dt <- 1 / jump_freq
sim_time <- 5000
time <- 0
n_timesteps <- ceiling(sim_time/dt)
output_times <- round( exp(seq(0, log(sim_time), length.out = 6 )))
#list to store results
results <- vector(mode="list", length = length(output_times)+1)
results[[1]] <- matrix(ncol = 2, c(xloc_particles, age_particles))
#simulate with particle tracking
for(i_timestep in 1:n_timesteps) {
#jump direction: -1 = left, 1 = right
jumps <- sample(c(-1,1), replace=TRUE, size=total_particles)
#accounting for jumps (all particles shift left or right)
xloc_particles <- xloc_particles + jumps
#enforce boundary conditions
particles_at_left_bnd <- (xloc_particles == 1)
particles_at_right_bnd <- (xloc_particles == n_cells)
#remove particles at boundaries
xloc_particles <- xloc_particles[!(particles_at_left_bnd | particles_at_right_bnd)]
age_particles <- age_particles[!(particles_at_left_bnd | particles_at_right_bnd)]
#place new particles at boundaries
xloc_particles <- c(rep(1, length(particles_left_bnd)), #new particles left
rep(n_cells, length(particles_right_bnd)), #new particles right
xloc_particles) #particles within domain
age_particles <- c(particles_left_bnd, particles_right_bnd,
age_particles)
#consumption reaction (concentration-dependent)
if (!is.na(k) && k > 0) {
for (i_cell in 2:(n_cells-1)) {
#filter particles in specific cell
particles_in_cell <- (xloc_particles == i_cell)
#determine rate
R2_cell <- round(k * sum(particles_in_cell))
if (R2_cell > 1) {
#randomize sequence of particles to be removed
toberemoved <- sample((1:length(xloc_particles))[particles_in_cell])[1:R2_cell]
#remove concentration
xloc_particles <- xloc_particles[-toberemoved]
age_particles <- age_particles[-toberemoved]
}
}
}
#production reaction (not concentration-dependent)
new_xloc_particles <- rep(2:(n_cells-1), each = R1)
new_age_particles <- rep(0, length(new_xloc_particles))
#add the new particles to vectors
xloc_particles <- c(xloc_particles, new_xloc_particles)
age_particles <- c(age_particles, new_age_particles)
#update number of total particles
total_particles <- length(xloc_particles)
#save result
if (i_timestep %in% output_times) {
results[[match(i_timestep, output_times) ]] <- matrix(ncol = 2, c(xloc_particles, age_particles))
print(i_timestep)
}
}
#binning based on x location
bin_particles <- function(result) {
xloc <- result[,1]
age <- result[,2]
summary_grid <- matrix(nrow = 1+num_of_moments, ncol = n_cells)
for (i_cell in 1:n_cells) {
indices <- (xloc == i_cell)
#check to avoid NA values when no particles are present
if (sum(indices) == 0) {
ages_icell <- c(0,0) #yields mu = 0 and sd = 0
} else
ages_icell <- age[indices]
summary_grid[1, i_cell] <- sum(indices) #number of particles
for (i_moment in (1:num_of_moments) ) { #statistics
summary_grid[1 + i_moment, i_cell] <- moment_func(ages_icell, m = i_moment)
}
}
return(summary_grid)
}
################################################################################
## Setup and run simulation for partial differential equations
################################################################################
#translate particle distribution to settings for pde
#for pde grid
nx <- n_cells
particles_per_conc <- 1e3
num_of_moments <- 8
#boundary conditions (actually not used in pde, because boundaries included in grid)
bc_mat <- matrix(ncol = 2, nrow = num_of_moments+1, 0)
colnames(bc_mat) <- c("bc_left", "bc_right")
bc_mat[1, ] <- c(length(particles_left_bnd), length(particles_right_bnd)) / particles_per_conc
for (i_moment in 1:num_of_moments) {
bc_mat[1+i_moment, ] <- vapply(list(particles_left_bnd, particles_right_bnd),
moment_func, m = i_moment, FUN.VALUE = 2)
}
#initial conditions
init_mat <- matrix(nrow = nx, ncol = num_of_moments+1, 0)
init_mat[,1] <- approx(x = seq(0, 1, length=n_cells),
y = vapply(age_particles_init_grid, length, FUN.VALUE = n_cells ),
xout = seq(0, 1, length = nx ))$y / particles_per_conc
for(i_moment in (1:num_of_moments) ) {
init_mat[, 1+i_moment] <- approx(x = seq(0, 1, length=n_cells),
y = vapply(age_particles_init_grid, moment_func,
m = i_moment, FUN.VALUE = n_cells ),
xout = seq(0, 1, length = nx ))$y *
init_mat[,1] #multiply by concentration
#account for difference between definitions moments and phi = (x - mu)^m / n
if (i_moment > 2) {
init_sd <- sqrt(init_mat[, 3] / init_mat[,1])
init_mat[, 1+i_moment] <- init_mat[, 1+i_moment] * init_sd^i_moment
}
}
#simulate derived pde's, finite differences
heat_pde <- function(t, y, parms) {
with(as.list(parms), {
#for uniform grid using arithmetic mean works
if (length(D) == nx) {
D_bnd <- 0.5 * (D[1:(nx-1)] + D[2:nx]) #length nx-1
} else if (length(D) == 1) {
D_bnd <- rep(D, nx-1)
} else
stop("D vector in parms has the wrong length")
#F = -D dC/dx
flux <- - D_bnd * (y[2:nx] - y[1:(nx-1)]) / dx #length nx-1
#dC/dt = - dF/dx
dCdt <- - (flux[2:(nx-1)] - flux[1:(nx-2)]) / dx #length nx-2
#first and last cell are fixed as boundary conditions, so there dC/dt=0
list(c(0,dCdt,0))
})
}
coupled_pde <- function(t, y, parms) {
with(as.list(parms), {
#convert vector with state variables to matrix
#order of columns: concentration, 1st moment, 2nd moment, etc.
sv_mat <- matrix(nrow = nx, y)
conc <- sv_mat[,1]
mu <- sv_mat[,2] / conc
var <- sv_mat[,3] / conc
R1 <- c(0, rep(R1[1], nx-2), 0)
R2 <- c(0, k * conc[-c(1, nx)], 0) #exclude boundaries
aging <- 0
#matrix to store differentials
dsvdt_mat <- matrix(nrow = nx, ncol = num_of_moments+1, 0)
#change in concentration over time
dsvdt_mat[,1] <- unlist( heat_pde(t, sv_mat[,1], parms) ) + R1 - R2
#d(mu * C)/dt
dsvdt_mat[,2] <- unlist( heat_pde(t, sv_mat[,2], parms) ) + aging - mu * R2
#d(var * C)/dt
varheat_term <- unlist( heat_pde(t, sv_mat[,3], parms) )
#calculate dmu/dx for left and right cell boundaries
dmudx <- (mu[2:(nx)] - mu[1:(nx-1)])/dx
#the gradient for interior cells, note that 2 multiplier from pde is in addition
muterm <- c(0, D * conc[2:(nx-1)] * (dmudx[1:(nx-2)]^2 + dmudx[2:(nx-1)]^2), 0)
#reaction terms
#Production of material with age 0
dCmomentsdt_R1 <- matrix(nrow = nx, ncol = num_of_moments-1, 0)
if (R1[2] != 0) { #test if production is turned on
#for centralized moments mu0 = 1 and mu1=0
centralmoms <- sv_mat / conc
centralmoms[,2] <- 0
#calculate the noncentral moments from central moments
noncentralmoms <- sv_mat
for (i_mom in 1:(num_of_moments+1) ) {
n <- i_mom-1
k <- (1:i_mom)-1
noncentralmoms[,i_mom] <- vapply(1:nx, function(j_cell) {
sum(
choose(n, k) * centralmoms[j_cell, 1:i_mom] * mu[j_cell]^(n-k)
) * conc[j_cell]
}, FUN.VALUE = 1 )
}
#create matrix to store results for var and higher moms
dCmomentsdt <- matrix(nrow = nx, ncol = num_of_moments-1, 0)
dmudt <- -mu * R1 / conc
for (i_cell in 1:nx) {
for (i_mom in 2:num_of_moments) {
sumup <- 0
for (j in 0:i_mom) {
mult <- choose(i_mom, j)
mupowderiv <- j * mu[i_cell]^(j-1) * dmudt[i_cell]
term <- (-1)^j * mult * mupowderiv * noncentralmoms[i_cell, i_mom-j+1]
sumup <- sumup + term
}
dCmomentsdt_R1[i_cell, i_mom -1] <- sumup
}
}
#respect boundary conditions
dCmomentsdt_R1[1,] <- 0
dCmomentsdt_R1[nx,] <- 0
}
#balance for variance
varreacterm <- dCmomentsdt_R1[,1] - var * R2
dsvdt_mat[,3] <- varheat_term + muterm + varreacterm
#calculate the derivatives for higher moments
if (num_of_moments > 2) {
for (i_moment in 3:num_of_moments) {
#d(mean moment value * C)/dt
#note that first column is for conc, 2nd for first moment, etc
heat_term <- unlist( heat_pde(t, sv_mat[, 1+i_moment], parms) )
prevmoment <- sv_mat[,i_moment]/conc
dprevmomentdx <- (prevmoment[2:(nx)] - prevmoment[1:(nx-1)])/dx #at cell boundaries
dprevdmu <- 0.5 * (dprevmomentdx * dmudx)[1:(nx-2)] +
0.5 * (dprevmomentdx * dmudx)[2:(nx-1)] #shifting to mid cells
dmudx_term <- 2 * i_moment * D * conc * c(0, dprevdmu,0)
#reaction terms
currentmoment <- sv_mat[,i_moment+1] / conc
reacterm <- dCmomentsdt_R1[,i_moment-1] -
currentmoment * R2
dsvdt_mat[, 1+i_moment] <- heat_term + dmudx_term + reacterm
}
}
list(c(dsvdt_mat))
})
}
#simulate concentration
parms <- c(
D = jump_freq * jump_distance^2 * 0.5,
nx = nx,
dx = (jump_distance * (n_cells-1)) / (nx - 1),
R1 = R1/particles_per_conc,
k = k
)
pde_numerical_coupled <- ode(y = c(init_mat), func = coupled_pde,
parms = parms, times = output_times,
method = "vode")
#plotting
{
plot_rows <- floor(sqrt(num_of_moments+1))
plot_cols <- ceiling((num_of_moments+1)/plot_rows)
par(mfrow = c(plot_rows, plot_cols))
#determine ylim for plotting
y_lim_mat <- matrix(ncol = 2, nrow = num_of_moments+1, 0)
for (i_output in 1:length(output_times)) {
binned_result <- bin_particles( results[[i_output]] )
y_lim_mat[1,] <- c(min(binned_result[1, ]), max(binned_result[1, ]))
for (j_prop in 2:(num_of_moments+1)) {
y_lim_mat[j_prop,] <- c(min(binned_result[j_prop, ], y_lim_mat[j_prop,1]),
max(binned_result[j_prop, ], y_lim_mat[j_prop,2]))
}
}
#first plot: Number of Particles
for (j_line in 1:length(output_times)) {
plot_func <- plot
if (j_line > 1)
plot_func <- points
binned_result <- bin_particles( results[[j_line]] )
plot_func(x = seq(0, l_domain, length.out = n_cells), y = binned_result[1,],
ylim = y_lim_mat[1,]*c(0.9, 1.1), pch = 1,
xlab = "x", ylab = "num. of particles",
col = j_line)
pde_conc <- pde_numerical_coupled[j_line, 2:(1+nx)]
lines(x = seq(0, l_domain, length.out = nx),
y = pde_conc * particles_per_conc,
col = j_line, lwd = 1.5)
}
#plot the moments
for (i_moment in 1:num_of_moments) {
for (j_line in 1:length(output_times)) {
plot_func <- plot
if (j_line > 1)
plot_func <- points
binned_result <- bin_particles( results[[j_line]] )
plot_func(x = seq(0, l_domain, length.out = n_cells), y = binned_result[1+i_moment,],
xlab = "x", ylab = paste(ordinal(i_moment), "moment"),
col = j_line, ylim = y_lim_mat[1+i_moment,], pch = 1, lwd = 1.5)
pde_conc <- unname(pde_numerical_coupled[j_line, 2:(1+nx)])
pde_moment <- unname(pde_numerical_coupled[j_line, (2+i_moment*nx):(1+nx*(i_moment+1))]/pde_conc)
if (i_moment > 2) {
pde_sd <- sqrt(unname(pde_numerical_coupled[j_line, (2+2*nx):(1+nx*(2+1))]) / pde_conc)
pde_moment <- pde_moment / pde_sd^i_moment
}
lines(x = seq(0, l_domain, length.out = nx),
y = pde_moment,
col = j_line, lwd = 1.5)
}
}
legend("topleft", legend = output_times, col = 1:j_line, pch = 1, title ='time',
lty=1, lwd = 1.5)
legend("topright", legend = c("particle track.", "pde soln"),
pch = c(1,NA), lty = c(NA,1), lwd = c(NA, 1.5))
}

191
compiled_calls.R

@ -0,0 +1,191 @@
#interface for assymheavi.c
#compilation: R CMD SHLIB assymheavi.c -lgsl -lm
dyn.load(paste0(getwd(),"/C/assymheavi.so"))
#moments should always be the central ones, also when using other center
fitting_cost <- function(k1, k2, d, xmin, xmax,
center = NULL, moments) {
n_mom <- length(moments)
if (length(k1) != 1 || length(k2) != 1 || length(d) != 1 ||
length(xmax) != 1 || length(xmin) != 1)
stop("Wrong length input parameters")
if (n_mom == 3) {
include_conc <- FALSE
moments <- c(0, moments) #length has to be 4
} else if (n_mom == 4) {
include_conc <- TRUE
} else
stop("Wrong number of moments specified.")
if (is.null(center)) {
n_center = 1
use_noncentral_moments = FALSE
} else {
n_center = length(center)
use_noncentral_moments = TRUE
}
output <- .C("cost_function",
k1 = as.double(k1),
k2 = as.double(k2),
d = as.double(d),
xmin = as.double(xmin),
xmax = as.double(xmax),
use_noncentral_moms = as.integer(use_noncentral_moments),
new_center = as.double(center),
ncenter = as.integer(n_center),
include_conc = as.integer(include_conc),
moms = as.double(moments),
total_error = double(n_center))
return(output$total_error)
}
optimize_boxed <- function(start, xmin, xmax,
lower, upper,
center = NULL, moments,
max_iter = 8) {
n_mom <- length(moments)
if (length(start) != n_mom || length(lower) != n_mom || length(upper) != n_mom) {
stop("Vector length of start, lower, or upper is wrong.")
}
if (n_mom == 3) {
include_conc <- FALSE
moments <- c(0, moments) #length has to be 4
start <- c(start, 0) #mult is last parm
lower <- c(lower, 0)
upper <- c(upper, 0)
} else if (n_mom == 4) {
include_conc <- TRUE
} else
stop("Wrong number of moments specified.")
if ( all(start == rep(0,4)) )
stop("Start only contains zeros, integral cannot be evaluated.")
if (max_iter < 1)
stop("max_iter has to be higher than 0")
if (is.null(center)) {
n_center = 1
use_noncentral_moments = FALSE
} else {
n_center = length(center)
use_noncentral_moments = TRUE
}
output <- .C("multiopt_boxed",
start = as.double(start),
xmin = as.double(xmin),
xmax = as.double(xmax),
lower = as.double(lower),
upper = as.double(upper),
moms = as.double(moments),
use_noncentral_moms = as.integer(use_noncentral_moments),
center = as.double(center),
ncenter = as.integer(n_center),
niter = as.integer(max_iter),
include_conc = as.integer(include_conc),
result = double(n_mom))
return(output$result)
}
determine_moments <- function(p, xmin, xmax, center = NULL) {
if (length(p) == 3) {
k1 <- p[1]
k2 <- p[2]
d <- p[3]
} else {
mult <- p[1]
k1 <- p[2]
k2 <- p[3]
d <- p[4]
}
if (length(xmax) != 1 || length(xmin) != 1)
stop("Wrong length input parameters")
if (is.null(center)) {
use_noncentral_moments = FALSE
} else {
if (length(center) != 1)
stop("Only one center can be specified.")
use_noncentral_moments = TRUE
}
output <- .C("determine_moments",
k1 = as.double(k1),
k2 = as.double(k2),
d = as.double(d),
xmin = as.double(xmin),
xmax = as.double(xmax),
use_noncentral_moment = as.integer(use_noncentral_moments),
center = as.double(center),
moments = double(4) )
if (length(p) == 3) {
result <- output$moments[-1]
} else {
result <- output$moments
result[1] <- result[1] * mult
}
return(result)
}
#
# #testing
# moments <- c(4.555001, 14.251334, 141.726752)
# p_plnm <- c(3.3194099, 0.5221001, 1.6298263) #mult = 1.2665346
# p_mom <- c(8.0000000, 3.9996094, 1.4304688) #mult = 0.9760191
#
# fitting_cost(k1 = p_plnm[1], k2 = p_plnm[2], d = p_plnm[3],
# xmin = 0, xmax = 100, center = NULL, moments)
#
# fitting_cost(k1 = p_mom[1], k2 = p_mom[2], d = p_mom[3],
# xmin = 0, xmax = 100, center = NULL, moments)
#
# center <- seq(-20, 50, by = 0.1)
#
# fC_opt <- fitting_cost(k1 = p_plnm[1], k2 = p_plnm[2], d = p_plnm[3],
# xmin = 0, xmax = 100, center = center, moments)
#
# fC_mom <- fitting_cost(k1 = p_mom[1], k2 = p_mom[2], d = p_mom[3],
# xmin = 0, xmax = 100, center = center, moments)
# par(mfrow = c(1,2))
# plot(fC_opt ~ center)
# abline(v= center[fC_opt==max(fC_opt)])
#
# plot(fC_mom ~ center)
#
# abline(v= center[fC_mom==max(fC_mom)])
#
#
# sum(fC_opt)
# sum(fC_mom)
#
# #optimization
# moments <- c(4.555001, 14.251334, 141.726752)
# p_new <- optimize_boxed(start = p_mom, xmin = 0, xmax = 100,
# lower = c(1e-3, 1e-3, 1), upper = c(8, 6, 20),
# center = seq(1, 20, by = 1), moments,
# max_iter = 8)
# dyn.unload("/home/jurjen/Dropbox/MovingSigma/scripts/C/assymheavi.so")

273
main_app3_agingmodel.R

@ -0,0 +1,273 @@
#vode
library(deSolve)
library(ReacTran)
library(english)
setwd("~/Dropbox/MovingSigma/scripts/AgingOMApp/")
source("implement_pdes.R")
source("distribut_funcs.R")
source("multiG_comparison.R")
#scripts for distribution
source("assymheaviside_funcs.R")
#make sure that assymheavi.c in the C folder has been compiled
source("compiled_calls.R")
#redefine function interface with multiplier as new first parameter
pdf <- function(x, p) {
p[1] * assymheavi_pdf(x, p[-1])
}
cdf <- function(a, b, p) {
p[1] * assymheavi_cdf(a, b, p[-1])
}
Cmu <- function(a, b, p) {
p[1] * assymheavi_Cmu(a,b, p[-1])
}
#integration bounds for reactivity/age distribution
integral_xmin <- 0 #lower bound age
integral_xmax <- 60 #upper bound age
#===============================================================================
# Model domain and grid definition
#===============================================================================
L <- 0.05 # depth of sediment domain [m]
N <- 25 # number of grid layers
grid <- setup.grid.1D(x.up = 0, L = L, N = N)
#===============================================================================
# Model parameters
#===============================================================================
#Transport parameters
parms <- c(Db_max = 1e-11 * (365*24*60^2), #bioturbation at top [m2/y]
Db_efold = 0.02, #bioturbation attenuation factor [m]
SAR = 0.001) #sediment accumulation rate [m/y]
Db <- setup.prop.1D(func = p.exp, grid = grid, y.0 = parms["Db_max"],
y.inf = 0, x.att = parms["Db_efold"])
#===============================================================================
# Functions needed for including reactions
#===============================================================================
find_dist_coefs <- function(moments, guess, center = NULL) {
coefs <- optimize_boxed(start = guess[-1], xmin = integral_xmin,
xmax = integral_xmax,
lower = c(1e-1, 1e-1, p_init[4]),
upper = c(p_init[2], p_init[3], 10 ),
center = NULL, moments[-1],
max_iter = 8)
mult <- moments[1] / cdf(a = integral_xmin, b = integral_xmax, c(1, coefs))
return(c(mult, coefs))
}
#function for age based reaction rate coefficients
reaction_basedonage <- function(x, b0 = 0.1, b1 = -1, b2 = 1e-3) (-b0 * (x+b2)^b1)
#===============================================================================
# Boundary and initial conditions
#===============================================================================
# initial condition and upper boundary condition are the same
#parameters for the distribution (defined in assymheaviside_funcs.R)
k1_init <- 6
k2_init <- 4
d_init <- 1
mult_init <- 1/cdf(integral_xmin, integral_xmax, c(1, k1_init, k2_init, d_init) )
p_init <- c(mult_init, k1_init, k2_init, d_init)
#check initial distribution
par(mfrow = c(1,1))
xseq <- seq(0, 10, length = 1000)
plot(pdf(xseq, p_init) ~ xseq, type = "l", xlab = "age (y)", ylab = "df" )
#distribution at upper boundary
conc_top <- cdf(a = integral_xmin, b = integral_xmax, p = p_init)
moments_top <- determine_moments(p_init,
xmin = integral_xmin, xmax = integral_xmax)
conc_top <- moments_top[1]
mean_top <- moments_top[2]
var_top <- moments_top[3]
skew_top <- moments_top[4]
num_moments <- length(moments_top)
#upper boundary conditions (statevars are concentration * moments)
boundcond <- unname(c(conc_top, conc_top * moments_top[-1] ))
#for lower boundary conditions no-gradient conditions are automatically applied
#initial conditions (concentration cannot be zero!)
init_statevars <- matrix(nrow = grid$N, #row for each cell
ncol = num_moments, #column for each moment
byrow = TRUE, boundcond * 0.05 ) #note that C * mu is multiplied
init_statevars[1,] <- boundcond
#===============================================================================
# Simulation settings
#===============================================================================
include_reactions <- TRUE
include_aging <- TRUE
#select a distribution, which should have a number of coefficients matching
# with the number of moments (including concentration)
distribution_func <- function(x, coefs) {
coefs[1] * assymheavi_pdf(x, coefs[-1])
}
#select the function that will be used:
reaction_func <- reaction_basedonage
#simulation time settings
times <- seq(0, 20, length = 21)
#run additional simulations for validation purposes
run_multiG_comparison <- TRUE
discrete_classes <- 50 #number of state variables in multi-G model
#===============================================================================
# Run the simulation
#===============================================================================
#simulate partial differential equations
#function defined in implement_pdes.R
pde_sim <- solve_pde(init_sv = init_statevars,
parms = parms,
times = times,
boundcond = boundcond,
include_reactions = include_reactions,
include_aging = include_aging)
#run discrete model for validation
if (run_multiG_comparison) {
dummy <- run_discrete_model(init_sv = init_statevars,
parms = parms,
times = times,
boundcond = boundcond,
include_reactions = include_reactions,
include_aging = include_aging,
nbins = discrete_classes)
disc_sim <- dummy[[1]]
disc_vals <- dummy[[2]]
} else
disc_sim <- NULL
#===============================================================================
# Plot results
#===============================================================================
#age_index specifies output time for plotting (standard the last time)
age_index <- length(times)
final_state <- matrix(pde_sim[age_index, 2:(length(init_statevars)+1)],
ncol = num_moments)
if (run_multiG_comparison) {
final_state_disc <- matrix(disc_sim[age_index, 2:(discrete_classes*grid$N+1)],
ncol = discrete_classes)
}
par(mfrow = c(2,3), cex = 1)
#plot concentration
if (run_multiG_comparison) {
#concentration from discrete simulation
disc_conc <- rowSums(sapply(1:discrete_classes, function(i)
return(final_state_disc[ ((i-1)*N+1):(i*N)])))
plot(y = grid$x.mid, ylim = c(L,0), type = "l", ylab = "depth (m)",
x = final_state[,1], xlim = c(0, max(final_state[,1], disc_conc)),
xlab = expression(paste("concentration (mol m"^-3, ")")))
lines(y = grid$x.mid, x= disc_conc, col = 2, lty=2)
legend("topleft", legend = c("continuous", "discrete"), lty = 1:2, col=1:2,
bty = "n")
} else {
plot(y = grid$x.mid, ylim = c(L,0), type = "l", ylab = "depth (m)",
x = final_state[,1], xlim = c(0, max(final_state[,1])),
xlab = expression(paste("concentration (mol m"^-3, ")")))
}
#plot mean
if (num_moments > 1) {
state <- final_state[, 2] / final_state[, 1]
if (run_multiG_comparison) {
disc_mean <- rowSums(sapply(1:discrete_classes, function(i)
return(final_state_disc[ ((i-1)*N+1):(i*N)] * disc_vals[i] / rowSums(final_state_disc) )))
plot(y = grid$x.mid, ylim = c(L,0), type = "l", ylab = "depth (m)",
x = state,
xlim = c(0, max(state, disc_mean)),
xlab = "mean (y)")
lines(y = grid$x.mid, x= disc_mean, col = 2, lty=2 )
} else {
plot(y = grid$x.mid, ylim = c(L,0), type = "l", ylab = "depth (m)",
x = state,
xlim = c(0, max(state)),
xlab = "mean (y)")
}
}
#plot variance and skewness
for (i_plot in 3:num_moments) {
state <- final_state[, i_plot] / final_state[, 1]
xlab <- substitute( paste(a, " moment (y"^b, ")"),
list(a = ordinal(i_plot-1),
b = (i_plot-1)) )
if (!run_multiG_comparison)
plot(y = grid$x.mid, ylim = c(L,0), type = "l", ylab = "depth (m)",
x = state,
xlab = xlab )
if (run_multiG_comparison) {
disc_mom <- rep(0, grid$N)
for (i_cell in 1:grid$N) {
#mu = sum_i (C_i age_i) / sum(C_i)
mu <- sum(final_state_disc[i_cell,] * disc_vals) / sum(final_state_disc[i_cell,])
#phi = sum_i (C_i * (age_i - mu)^x ) / sum(C_i)
disc_mom[i_cell] <- sum(final_state_disc[i_cell,] * (disc_vals - mu)^(i_plot-1) ) /
sum(final_state_disc[i_cell,])
}
plot(y = grid$x.mid, ylim = c(L,0), type = "l", ylab = "depth (m)",
x = state, xlim = c(0, max(state, disc_mom) ),
xlab = xlab )
lines(y = grid$x.mid, x= disc_mom, col = 2, lty=2)
}
}
#plot distributions from three domain depths
moments <- final_state[1,] * c(1, 1/rep(final_state[1,1], num_moments-1) )
x <- seq(integral_xmin, 50, length = 100)
y <- distribution_func(x, find_dist_coefs(moments, p_init) )
plot(y=y, x=x, ylab = "df", xlab = "age (y)", type = "l")
moments <- final_state[5,] * c(1, 1/rep(final_state[5,1], num_moments-1) )
lines(y=distribution_func(x, find_dist_coefs(moments, p_init)), x=x, col =2)
moments <- final_state[18,] * c(1, 1/rep(final_state[18,1], num_moments-1) )
lines(y=distribution_func(x, find_dist_coefs(moments, p_init)), x=x, col =3)
legend("topright", legend = grid$x.mid[c(1,5,18)] * 100, col = 1:3, lty = 1, pch = NA,
title = "depth (cm)", bty = 'n')

11
readme.txt

@ -0,0 +1,11 @@
Scripts corresponding to the applications in the paper are contained in this folder.
Application 1: PDE_and_Particles_sim.R
Application 2: ContinuousG_OrgMin.R
Application 3: AgingOMApp/main_app3_agingmodel.R
The scripts for the first two scripts can be run after installing the packages that are listed in the first lines.
For the third application, the C-code ./AgingOMApp/C/assymheavi.c needs to be compiled. Asumming that the GNU Scientific Library has been installed on a Unix-type system (e.g., Linux and macOS), it can be done by running the comp.sh scripti (or wih the terminal command: R CMD SHLIB assymheavi.c -lm -lgsl). Then open main_app3_agingmodel.R and change the working directory [line 6: setwd("...")] to the correct path, and the script can be run in R.
All scripts were tested in R version 4.2.2 Patched (2022-11-10 r83330).
Loading…
Cancel
Save