Skip to contents
library(uqsa)
library(parallel)
library(SBtabVFGEN)
library(rgsl)

We load the model from a collection of TSV files (inst/extdata/AKAR4 on github), the function below locates the tsv files after package installation:

modelFiles <- uqsa_example("AKAR4",full.names=TRUE)

Then we import the contents as a list of data.frames

SBtab <- sbtab_from_tsv(modelFiles) # SBtabVFGEN
#> [tsv] file[1] «/tmp/RtmpIQ7Xkh/temp_libpath13f03910aa14/uqsa/extdata/AKAR4/AKAR4_100nM.tsv» belongs to Document «AKAR4»
#>  I'll take this as the Model Name.

We load the R functions of the model (e.g. the jacobian of the model):

  • AKAR4_vf, the vector field,
  • AKAR4_jac, the jacobian,
  • AKAR4_default, the default parameters

The R file we source additionally includes a variable called model, which serves the purpose of having generic names for all model constituents, to make it easier to write generic scripts:

  • model$vf() corresponds to AKAR4_vf()
    • but does nt use AKAR4 in the name
  • model$jac() corresponds to AKAR4_jac()
  • model$name is a string, "AKAR4" in this case

You can investigate the sourced file to find out more.

Generally, model is a list of functions, one of which is called model$par() and returns the default parameters of the model in linear space:

source(uqsa_example("AKAR4",pat="^AKAR4[.]R$")) # loads "model"
#> Loading required package: deSolve
model$name   # AKAR4
#> [1] "AKAR4"
model$par()  # default values for parameters
#> kf_C_AKAR4 kb_C_AKAR4 kcat_AKARp 
#>      0.018      0.106     10.200
model$init() # default initial values for ODE state variables
#>   AKAR4 AKAR4_C  AKAR4p       C 
#>     0.2     0.0     0.0     0.0

The model exists in R and C form: - AKAR4.R - AKAR4_gvf.c

The file ending in _gvf.c contains functions used for simulations with rgsl::r_gsl_odeiv2_outer() (_gvf.c needs to be compiled). The functions in .R can be used more freely in R, and are functionally identical (they describe the same model, but solutions are slower in R).

The next function call checks that the model [...]_gvf.c file exist, and builds a shared library .so from these sources. It attaches a comment to its return value that indicates how the shared library for this model is named:

modelName <- checkModel("AKAR4",uqsa_example("AKAR4",pat="_gvf[.]c$")) # SBtabVFGEN
#> building a shared library from c source, and using GSL odeiv2 as backend (pkg-config is used here).
#> cc -shared -fPIC `pkg-config --cflags gsl` -o './AKAR4.so' '/tmp/RtmpIQ7Xkh/temp_libpath13f03910aa14/uqsa/extdata/AKAR4/AKAR4_gvf.c' `pkg-config --libs gsl`
comment(modelName) # should be AKAR4.so
#> [1] "./AKAR4.so"

The comment will be used to find the .so file (if you move that file, adjust the comment on the modelName).

We want to sample in logarithmic space, so we set up a mapping function parMap. The sampler will call this function on the sampling variable parABC before it simulates the model.

parMap <- function (parABC=0) {
    return(10^parABC)
}

The ODE model receives a parameter vector p for each experiment. It consists of the biological model parameters (which are the same for all experiments), and input parameters (which together with initial conditions distinguish the experimental setups). This model deosn’t have input parameters, so the biological and ODE parameters are the same. Otherwise, two different kinds of parameters are concatenated into one big numeric vector inside of the solver. The model function AKAR4_default() returns the default values for p.

Next, we load the list of experiments from the same list of data.frames (SBtab content):

experiments <- sbtab.data(SBtab)
parVal <- AKAR4_default()
print(parVal)
#> kf_C_AKAR4 kb_C_AKAR4 kcat_AKARp 
#>      0.018      0.106     10.200

Define lower and upper limits for log-uniform prior distribution for the parameters:

ll <- log10(parVal) - 3
ul <- log10(parVal) + 3

Define Number of Samples for the pre-calibration npc and each ABC-MCMC chain ns. During ABC, we save every 100-th point, so work(nChains*ns*100) ≈ work(npc):

ns <- 800    # ABC-MCMC sample size
npc <- 50000 # pre-calibration size
delta <- 0.02
set.seed(2022)
nCores <- parallel::detectCores()
options(mc.cores=nCores)

We define a function that measures the distance between experiment and simulation:

distanceMeasure <- function(funcSim, dataExpr, dataErr = 1.0){
  distance <- mean(((funcSim-dataExpr$AKAR4pOUT)/max(dataExpr$AKAR4pOUT))^2,na.rm=TRUE)
  return(distance)
}

We divide the workload into chunks and loop over the chunks:

chunks <- list(c(1,2),3)
priorPDF <- dUniformPrior(ll, ul)
rprior <- rUniformPrior(ll, ul)

start_time = Sys.time()
for (i in seq(length(chunks))){
    expInd <- chunks[[i]]
    simulate <- simulator.c(experiments[expInd],modelName,parMap,noise=TRUE)
    Obj <- makeObjective(experiments[expInd],modelName,distanceMeasure,parMap,simulate)
    time_pC <- Sys.time()

    pC <- preCalibration(Obj, npc, rprior, rep=3)
    M <- getMCMCPar(pC$prePar, pC$preDelta, delta, num=1)
    time_pC <- Sys.time() - time_pC
    cat(sprintf("\t - time spent on precalibration: %g s\n",time_pC))

    time_ABC <- Sys.time()
    mcmc <- ABCMCMC(Obj, as.numeric(M$startPar), ns, M$Sigma, delta, priorPDF)
    time_ABC <- Sys.time() - time_ABC
    cat(sprintf("\t - time spent on ABC-MCMC: %g s\n",time_ABC))

    if (i>1){
        simulate <- simulator.c(experiments[chunks[[1]]],modelName,parMap)
        Obj <- makeObjective(experiments[chunks[[1]]],modelName,distanceMeasure,parMap,simulate)
        mcmc$draws <- checkFitWithPreviousExperiments(mcmc$draws, Obj, delta)
    }

    C <- fitCopula(mcmc$draws)
    priorPDF <- dCopulaPrior(C)
    rprior <- rCopulaPrior(C)
}
#>   - time spent on precalibration: 33.9671 s
#> Started chain.
#> 
#> n = 10000          used (Mb) gc trigger  (Mb) max used  (Mb)
#> Ncells 1848632 98.8    2856611 152.6  2856611 152.6
#> Vcells 3378079 25.8    8388608  64.0  8323579  63.6
#> 
#> n = 20000          used (Mb) gc trigger  (Mb) max used  (Mb)
#> Ncells 1848638 98.8    2856611 152.6  2856611 152.6
#> Vcells 3378106 25.8    8388608  64.0  8323579  63.6
#> 
#> n = 30000          used (Mb) gc trigger  (Mb) max used  (Mb)
#> Ncells 1848641 98.8    2856611 152.6  2856611 152.6
#> Vcells 3378138 25.8    8388608  64.0  8323579  63.6
#> 
#> n = 40000          used (Mb) gc trigger  (Mb) max used  (Mb)
#> Ncells 1848644 98.8    2856611 152.6  2856611 152.6
#> Vcells 3378162 25.8    8388608  64.0  8323579  63.6
#> 
#> n = 50000          used (Mb) gc trigger  (Mb) max used  (Mb)
#> Ncells 1848648 98.8    2856611 152.6  2856611 152.6
#> Vcells 3378193 25.8    8388608  64.0  8323579  63.6
#> 
#> n = 60000          used (Mb) gc trigger  (Mb) max used  (Mb)
#> Ncells 1848650 98.8    2856611 152.6  2856611 152.6
#> Vcells 3378217 25.8    8388608  64.0  8323579  63.6
#> 
#> n = 70000          used (Mb) gc trigger  (Mb) max used  (Mb)
#> Ncells 1848654 98.8    2856611 152.6  2856611 152.6
#> Vcells 3378249 25.8    8388608  64.0  8323579  63.6
#> 
#> n = 80000          used (Mb) gc trigger  (Mb) max used  (Mb)
#> Ncells 1848657 98.8    2856611 152.6  2856611 152.6
#> Vcells 3378277 25.8    8388608  64.0  8323579  63.6
#>   - time spent on ABC-MCMC: 13.8504 s
#>   - time spent on precalibration: 24.6518 s
#> Started chain.
#> 
#> n = 10000          used  (Mb) gc trigger  (Mb) max used  (Mb)
#> Ncells 1905640 101.8    2856612 152.6  2856612 152.6
#> Vcells 3512313  26.8    8388608  64.0  8388604  64.0
#> 
#> n = 20000          used  (Mb) gc trigger  (Mb) max used  (Mb)
#> Ncells 1905643 101.8    2856615 152.6  2856615 152.6
#> Vcells 3512341  26.8    8388608  64.0  8388604  64.0
#> 
#> n = 30000          used  (Mb) gc trigger  (Mb) max used  (Mb)
#> Ncells 1905646 101.8    2856615 152.6  2856615 152.6
#> Vcells 3512369  26.8    8388608  64.0  8388604  64.0
#> 
#> n = 40000          used  (Mb) gc trigger  (Mb) max used  (Mb)
#> Ncells 1905649 101.8    2856616 152.6  2856616 152.6
#> Vcells 3512397  26.8    8388608  64.0  8388604  64.0
#> 
#> n = 50000          used  (Mb) gc trigger  (Mb) max used  (Mb)
#> Ncells 1905652 101.8    2856616 152.6  2856616 152.6
#> Vcells 3512425  26.8    8388608  64.0  8388604  64.0
#> 
#> n = 60000          used  (Mb) gc trigger  (Mb) max used  (Mb)
#> Ncells 1905656 101.8    2856617 152.6  2856617 152.6
#> Vcells 3512457  26.8    8388608  64.0  8388604  64.0
#> 
#> n = 70000          used  (Mb) gc trigger  (Mb) max used  (Mb)
#> Ncells 1905659 101.8    2856617 152.6  2856617 152.6
#> Vcells 3512485  26.8    8388608  64.0  8388604  64.0
#> 
#> n = 80000          used  (Mb) gc trigger  (Mb) max used  (Mb)
#> Ncells 1905661 101.8    2856618 152.6  2856618 152.6
#> Vcells 3512509  26.8    8388608  64.0  8388604  64.0
#>   - time spent on ABC-MCMC: 7.32202 s
#> 
#> -Checking fit with previous data
#> --  154  samples  did not fit previous datasets
end_time = Sys.time()
time_ = end_time - start_time
print(time_)
#> Time difference of 22.23162 mins

We plot the sample as a two dimensional histogram plot-matrix using the hexbin package:

colnames(mcmc$draws)<-names(parVal)
hexbin::hexplom(mcmc$draws)

A sensitivity plot using the results of the above loop:

y<-simulate(t(mcmc$draws))
dim(y[[1]]$func)
#> [1]   1 225 646

where y[[1]] refers to the simulation corresponding to the first experiment: experiment[[1]], and $func refers to the output function values (rather than state variables). The outout functions are defined in the table SBtab$Output.

f<-aperm(y[[1]]$func[1,,]) # aperm makes the sample-index (3rd) the first index of f, default permutation
S<-globalSensitivity(mcmc$draws,f)
S[1,]<-0 # the first index of S is time, and initially sensitivity is 0
cuS<-t(apply(S,1,cumsum))
plot.new()
tm<-experiments[[3]]$outputTimes
plot(tm,cuS[,3],type="l")
## this section makes a little sensitivity plot:
for (si in dim(S)[2]:1){
    polygon(c(tm,rev(tm)),c(cuS[,si],numeric(length(tm))),col=si+1)
}