You signed in with another tab or window. Reload to refresh your session.You signed out in another tab or window. Reload to refresh your session.You switched accounts on another tab or window. Reload to refresh your session.Dismiss alert
Just came through this issue. I first thought it was a problem due to to nfs storage and try with a prior local copy of the file. Below is part of what is thrown by MinionQC (i.e. R). Interestingly, from a signle gridion run, some libraries got processed successfully and some others did not. Other thing to mention, I have been able to process successfully the "weird" files locally on my Mac.
I installed MinionQC via conda (bioconda repo) and have the version 1.4.1.
Could you help on that?
Best.
`(base) [Ztest]$ cp /nfs/SEQ_Nanopore/R-19-0167-6/L-19-1406-5/20191219_1649_GA20000_FAL80778_4d440b92/GXB02046_20191219_174922_FAL80778_gridion_sequencing_run_L-19-1406-5_sequencing_summary.txt sequencing_summary.txt && MinIONQC.R -i sequencing_summary.txt -o .
During startup - Warning messages:
1: Setting LC_CTYPE failed, using "C"
2: Setting LC_COLLATE failed, using "C"
3: Setting LC_TIME failed, using "C"
4: Setting LC_MESSAGES failed, using "C"
5: Setting LC_MONETARY failed, using "C"
6: Setting LC_PAPER failed, using "C"
7: Setting LC_MEASUREMENT failed, using "C"
INFO [2020-10-26 12:15:39] Loading input file: sequencing_summary.txt
INFO [2020-10-26 12:15:41] MinION flowcell detected
INFO [2020-10-26 12:15:51] .: creating output directory:./.
INFO [2020-10-26 12:15:51] .: summarising input file for flowcell
INFO [2020-10-26 12:15:53] .: plotting length histogram
INFO [2020-10-26 12:15:56] .: plotting mean Q score histogram
INFO [2020-10-26 12:15:58] .: plotting flowcell overview
INFO [2020-10-26 12:16:54] .: plotting flowcell yield over time
INFO [2020-10-26 12:17:10] .: plotting flowcell yield by read length
INFO [2020-10-26 12:17:25] .: plotting sequence length over time
*** caught segfault ***
address 0x7f21f504d040, cause 'memory not mapped'
*** caught segfault ***
address 0x7f21fa04f040, cause 'invalid permissions'
*** caught segfault ***
address 0x7f21f784e040, cause 'invalid permissions'
Hi,
Just came through this issue. I first thought it was a problem due to to nfs storage and try with a prior local copy of the file. Below is part of what is thrown by MinionQC (i.e. R). Interestingly, from a signle gridion run, some libraries got processed successfully and some others did not. Other thing to mention, I have been able to process successfully the "weird" files locally on my Mac.
I installed MinionQC via conda (bioconda repo) and have the version 1.4.1.
Could you help on that?
Best.
`(base) [Ztest]$ cp /nfs/SEQ_Nanopore/R-19-0167-6/L-19-1406-5/20191219_1649_GA20000_FAL80778_4d440b92/GXB02046_20191219_174922_FAL80778_gridion_sequencing_run_L-19-1406-5_sequencing_summary.txt sequencing_summary.txt && MinIONQC.R -i sequencing_summary.txt -o .
During startup - Warning messages:
1: Setting LC_CTYPE failed, using "C"
2: Setting LC_COLLATE failed, using "C"
3: Setting LC_TIME failed, using "C"
4: Setting LC_MESSAGES failed, using "C"
5: Setting LC_MONETARY failed, using "C"
6: Setting LC_PAPER failed, using "C"
7: Setting LC_MEASUREMENT failed, using "C"
INFO [2020-10-26 12:15:39] Loading input file: sequencing_summary.txt
INFO [2020-10-26 12:15:41] MinION flowcell detected
INFO [2020-10-26 12:15:51] .: creating output directory:./.
INFO [2020-10-26 12:15:51] .: summarising input file for flowcell
INFO [2020-10-26 12:15:53] .: plotting length histogram
INFO [2020-10-26 12:15:56] .: plotting mean Q score histogram
INFO [2020-10-26 12:15:58] .: plotting flowcell overview
INFO [2020-10-26 12:16:54] .: plotting flowcell yield over time
INFO [2020-10-26 12:17:10] .: plotting flowcell yield by read length
INFO [2020-10-26 12:17:25] .: plotting sequence length over time
*** caught segfault ***
address 0x7f21f504d040, cause 'memory not mapped'
*** caught segfault ***
address 0x7f21fa04f040, cause 'invalid permissions'
*** caught segfault ***
address 0x7f21f784e040, cause 'invalid permissions'
Traceback:
1: gam.fit3.post.proc(G$X, G$L, G$lsp0, G$S, G$off, object)
Traceback:
1: gam.fit3.post.proc(G$X, G$L, G$lsp0, G$S, G$off, object)
2: gam.outer(lsp, fscale = null.stuff$null.scale, family = G$family, control = control, criterion = criterion, method = method, optimizer = optimizer, scale = scale, gamma = gamma, G = G, start = start, ...)
3: estimate.gam(G, method, optimizer, control, in.out, scale, gamma, ...)
4: (function (formula, family = gaussian(), data = list(), weights = NULL, subset = NULL, na.action, offset = NULL, method = "GCV.Cp", optimizer = c("outer", "newton"), control = list(), scale = , select = FALSE, knots = NULL, sp = NULL, min.sp = NULL, H = NULL, gamma = 1, fit = TRUE, paraPen = NULL, G = NULL, in.out = NULL, drop.unused.levels = TRUE, drop.intercept = NULL, discrete = FALSE, ...) { control <- do.call("gam.control", control) if (is.null(G) && discrete) { cl <- match.call() cl[[1]] <- quote(bam) cl$fit = FALSE G <- eval(cl, parent.frame()) } if (is.null(G)) { gp <- interpret.gam(formula) cl <- match.call() mf <- match.call(expand.dots = FALSE) mf$formula <- gp$fake.formula mf$family <- mf$control <- mf$scale <- mf$knots <- mf$sp <- mf$min.sp <- mf$H <- mf$select <- mf$drop.intercept <- mf$gamma <- mf$method <- mf$fit <- mf$paraPen <- mf$G <- mf$optimizer <- mf$in.out <- mf$discrete <- mf$... <- NULL mf$drop.unused.levels <- drop.unused.levels mf[[1]] <- quote(stats::model.frame) pmf <- mf mf <- eval(mf, parent.frame()) if (nrow(mf) < 2) stop("Not enough (non-NA) data to do anything meaningful") terms <- attr(mf, "terms") vars <- all.vars1(gp$fake.formula[-2]) inp <- parse(text = paste("list(", paste(vars, collapse = ","), ")")) if (!is.list(data) && !is.data.frame(data)) data <- as.data.frame(data) dl <- eval(inp, data, parent.frame()) names(dl) <- vars var.summary <- variable.summary(gp$pf, dl, nrow(mf)) rm(dl) if (is.list(formula)) { environment(formula) <- environment(formula[[1]]) pterms <- list() tlab <- rep("", 0) for (i in 1:length(formula)) { pmf$formula <- gp[[i]]$pf pterms[[i]] <- attr(eval(pmf, parent.frame()), "terms") tlabi <- attr(pterms[[i]], "term.labels") if (i > 1 && length(tlabi) > 0) tlabi <- paste(tlabi, i - 1, sep = ".") tlab <- c(tlab, tlabi) } attr(pterms, "term.labels") <- tlab } else { pmf$formula <- gp$pf pmf <- eval(pmf, parent.frame()) pterms <- attr(pmf, "terms") } if (is.character(family)) family <- eval(parse(text = family)) if (is.function(family)) family <- family() if (is.null(family$family)) stop("family not recognized") if (family$family[1] == "gaussian" && family$link == "identity") am <- TRUE else am <- FALSE if (!control$keepData) rm(data) if (is.null(family$drop.intercept)) { lengthf <- if (is.list(formula)) length(formula) else 1 if (is.null(drop.intercept)) drop.intercept <- rep(FALSE, lengthf) else { drop.intercept <- rep(drop.intercept, length = lengthf) if (sum(drop.intercept)) family$drop.intercept <- drop.intercept } } else drop.intercept <- as.logical(family$drop.intercept) if (inherits(family, "general.family") && !is.null(family$presetup)) eval(family$presetup) gsname <- if (is.list(formula)) "gam.setup.list" else "gam.setup" G <- do.call(gsname, list(formula = gp, pterms = pterms, data = mf, knots = knots, sp = sp, min.sp = min.sp, H = H, absorb.cons = TRUE, sparse.cons = 0, select = select, idLinksBases = control$idLinksBases, scale.penalty = control$scalePenalty, paraPen = paraPen, drop.intercept = drop.intercept)) G$var.summary <- var.summary G$family <- family if ((is.list(formula) && (is.null(family$nlp) || family$nlp != gp$nlp)) || (!is.list(formula) && !is.null(family$npl) && (family$npl > 1))) stop("incorrect number of linear predictors for family") G$terms <- terms G$mf <- mf G$cl <- cl G$am <- am if (is.null(G$offset)) G$offset <- rep(0, G$n) G$min.edf <- G$nsdf if (G$m) for (i in 1:G$m) G$min.edf <- G$min.edf + G$smooth[[i]]$null.space.dim G$formula <- formula G$pred.formula <- gp$pred.formula environment(G$formula) <- environment(formula) } else { if (!is.null(sp) && any(sp >= 0)) { if (is.null(G$L)) G$L <- diag(length(G$sp)) if (length(sp) != ncol(G$L)) stop("length of sp must be number of free smoothing parameters in original model") ind <- sp >= 0 spind <- log(sp[ind]) spind[!is.finite(spind)] <- -30 G$lsp0 <- G$lsp0 + drop(G$L[, ind, drop = FALSE] %% spind) G$L <- G$L[, !ind, drop = FALSE] G$sp <- rep(-1, ncol(G$L)) } } if (!fit) { class(G) <- "gam.prefit" return(G) } if (ncol(G$X) > nrow(G$X)) stop("Model has more coefficients than data") G$conv.tol <- control$mgcv.tol G$max.half <- control$mgcv.half object <- estimate.gam(G, method, optimizer, control, in.out, scale, gamma, ...) if (!is.null(G$L)) { object$full.sp <- as.numeric(exp(G$L %% log(object$sp) + G$lsp0)) names(object$full.sp) <- names(G$lsp0) } names(object$sp) <- names(G$sp) object$paraPen <- G$pP object$formula <- G$formula if (is.list(object$formula)) attr(object$formula, "lpi") <- attr(G$X, "lpi") object$var.summary <- G$var.summary object$cmX <- G$cmX object$model <- G$mf object$na.action <- attr(G$mf, "na.action") object$control <- control object$terms <- G$terms object$pred.formula <- G$pred.formula attr(object$pred.formula, "full") <- reformulate(all.vars(object$terms)) object$pterms <- G$pterms object$assign <- G$assign object$contrasts <- G$contrasts object$xlevels <- G$xlevels object$offset <- G$offset if (!is.null(G$Xcentre)) object$Xcentre <- G$Xcentre if (control$keepData) object$data <- data object$df.residual <- nrow(G$X) - sum(object$edf) object$min.edf <- G$min.edf if (G$am && !(method %in% c("REML", "ML", "P-ML", "P-REML"))) object$optimizer <- "magic" else object$optimizer <- optimizer object$call <- G$cl class(object) <- c("gam", "glm", "lm") if (is.null(object$deviance)) object$deviance <- sum(residuals(object, "deviance")^2) names(object$gcv.ubre) <- method environment(object$formula) <- environment(object$pred.formula) <- environment(object$terms) <- environment(object$pterms) <- .GlobalEnv if (!is.null(object$model)) environment(attr(object$model, "terms")) <- .GlobalEnv if (!is.null(attr(object$pred.formula, "full"))) environment(attr(object$pred.formula, "full")) <- .GlobalEnv object})(formula, data = data, weights = weight, method = "REML")
5: do.call(method, c(base.args, method.args))
6: f(...)
7: self$compute_group(data = group, scales = scales, ...)
8: FUN(X[[i]], ...)
9: lapply(groups, function(group) { self$compute_group(data = group, scales = scales, ...)})
10: f(..., self = self)
11: (function (...) f(..., self = self))(data = data, scales = scales, na.rm = FALSE, se = TRUE, flipped_aes = FALSE, method = function (formula, family = gaussian(), data = list(), weights = NULL, subset = NULL, na.action, offset = NULL, method = "GCV.Cp", optimizer = c("outer", "newton"), control = list(), scale = 0, select = FALSE, knots = NULL, sp = NULL, min.sp = NULL, H = NULL, gamma = 1, fit = TRUE, paraPen = NULL, G = NULL, in.out = NULL, drop.unused.levels = TRUE, drop.intercept = NULL, discrete = FALSE, ...) { control <- do.call("gam.control", control) if (is.null(G) && discrete) { cl <- match.call() cl[[1]] <- quote(bam) cl$fit = FALSE G <- eval(cl, parent.frame()) } if (is.null(G)) { gp <- interpret.gam(formula) cl <- match.call() mf <- match.call(expand.dots = FALSE) mf$formula <- gp$fake.formula mf$family <- mf$control <- mf$scale <- mf$knots <- mf$sp <- mf$min.sp <- mf$H <- mf$select <- mf$drop.intercept <- mf$gamma <- mf$method <- mf$fit <- mf$paraPen <- mf$G <- mf$optimizer <- mf$in.out <- mf$discrete <- mf$... <- NULL mf$drop.unused.levels <- drop.unused.levels mf[[1]] <- quote(stats::model.frame) pmf <- mf mf <- eval(mf, parent.frame()) if (nrow(mf) < 2) stop("Not enough (non-NA) data to do anything meaningful") terms <- attr(mf, "terms") vars <- all.vars1(gp$fake.formula[-2]) inp <- parse(text = paste("list(", paste(vars, collapse = ","), ")")) if (!is.list(data) && !is.data.frame(data)) data <- as.data.frame(data) dl <- eval(inp, data, parent.frame()) names(dl) <- vars var.summary <- variable.summary(gp$pf, dl, nrow(mf)) rm(dl) if (is.list(formula)) { environment(formula) <- environment(formula[[1]]) pterms <- list() tlab <- rep("", 0) for (i in 1:length(formula)) { pmf$formula <- gp[[i]]$pf pterms[[i]] <- attr(eval(pmf, parent.frame()), "terms") tlabi <- attr(pterms[[i]], "term.labels") if (i > 1 && length(tlabi) > 0) tlabi <- paste(tlabi, i - 1, sep = ".") tlab <- c(tlab, tlabi) } attr(pterms, "term.labels") <- tlab } else { pmf$formula <- gp$pf pmf <- eval(pmf, parent.frame()) pterms <- attr(pmf, "terms") } if (is.character(family)) family <- eval(parse(text = family)) if (is.function(family)) family <- family() if (is.null(family$family)) stop("family not recognized") if (family$family[1] == "gaussian" && family$link == "identity") am <- TRUE else am <- FALSE if (!control$keepData) rm(data) if (is.null(family$drop.intercept)) { lengthf <- if (is.list(formula)) length(formula) else 1 if (is.null(drop.intercept)) drop.intercept <- rep(FALSE, lengthf) else { drop.intercept <- rep(drop.intercept, length = lengthf) if (sum(drop.intercept)) family$drop.intercept <- drop.intercept } } else drop.intercept <- as.logical(family$drop.intercept) if (inherits(family, "general.family") && !is.null(family$presetup)) eval(family$presetup) gsname <- if (is.list(formula)) "gam.setup.list" else "gam.setup" G <- do.call(gsname, list(formula = gp, pterms = pterms, data = mf, knots = knots, sp = sp, min.sp = min.sp, H = H, absorb.cons = TRUE, sparse.cons = 0, select = select, idLinksBases = control$idLinksBases, scale.penalty = control$scalePenalty, paraPen = paraPen, drop.intercept = drop.intercept)) G$var.summary <- var.summary G$family <- family if ((is.list(formula) && (is.null(family$nlp) || family$nlp != gp$nlp)) || (!is.list(formula) && !is.null(family$npl) && (family$npl > 1))) stop("incorrect number of linear predictors for family") G$terms <- terms G$mf <- mf G$cl <- cl G$am <- am if (is.null(G$offset)) G$offset <- rep(0, G$n) G$min.edf <- G$nsdf if (G$m) for (i in 1:G$m) G$min.edf <- G$min.edf + G$smooth[[i]]$null.space.dim G$formula <- formula G$pred.formula <- gp$pred.formula environment(G$formula) <- environment(formula) } else { if (!is.null(sp) && any(sp >= 0)) { if (is.null(G$L)) G$L <- diag(length(G$sp)) if (length(sp) != ncol(G$L)) stop("length of sp must be number of free smoothing parameters in original model") ind <- sp >= 0 spind <- log(sp[ind]) spind[!is.finite(spind)] <- -30 G$lsp0 <- G$lsp0 + drop(G$L[, ind, drop = FALSE] %% spind) G$L <- G$L[, !ind, drop = FALSE] G$sp <- rep(-1, ncol(G$L)) } } if (!fit) { class(G) <- "gam.prefit" return(G) } if (ncol(G$X) > nrow(G$X)) stop("Model has more coefficients than data") G$conv.tol <- control$mgcv.tol G$max.half <- control$mgcv.half object <- estimate.gam(G, method, optimizer, control, in.out, scale, gamma, ...) if (!is.null(G$L)) { object$full.sp <- as.numeric(exp(G$L %% log(object$sp) + G$lsp0)) names(object$full.sp) <- names(G$lsp0) } names(object$sp) <- names(G$sp) object$paraPen <- G$pP object$formula <- G$formula if (is.list(object$formula)) attr(object$formula, "lpi") <- attr(G$X, "lpi") object$var.summary <- G$var.summary object$cmX <- G$cmX object$model <- G$mf object$na.action <- attr(G$mf, "na.action") object$control <- control object$terms <- G$terms object$pred.formula <- G$pred.formula attr(object$pred.formula, "full") <- reformulate(all.vars(object$terms)) object$pterms <- G$pterms object$assign <- G$assign object$contrasts <- G$contrasts object$xlevels <- G$xlevels object$offset <- G$offset if (!is.null(G$Xcentre)) object$Xcentre <- G$Xcentre if (control$keepData) object$data <- data object$df.residual <- nrow(G$X) - sum(object$edf) object$min.edf <- G$min.edf if (G$am && !(method %in% c("REML", "ML", "P-ML", "P-REML"))) object$optimizer <- "magic" else object$optimizer <- optimizer object$call <- G$cl class(object) <- c("gam", "glm", "lm") if (is.null(object$deviance)) object$deviance <- sum(residuals(object, "deviance")^2) names(object$gcv.ubre) <- method environment(object$formula) <- environment(object$pred.formula) <- environment(object$terms) <- environment(object$pterms) <- .GlobalEnv if (!is.null(object$model)) environment(attr(object$model, "terms")) <- .GlobalEnv if (!is.null(attr(object$pred.formula, "full"))) environment(attr(object$pred.formula, "full")) <- .GlobalEnv object }, formula = y ~ s(x, bs = "cs"))
12: do.call(self$compute_panel, args)
13: doTryCatch(return(expr), name, parentenv, handler)
14: tryCatchOne(expr, names, parentenv, handlers[[1L]])
15: tryCatchList(expr, classes, parentenv, handlers)
16: tryCatch(do.call(self$compute_panel, args), error = function(e) { warn(glue("Computation failed in
{snake_class(self)}()
:\n{e$message}")) new_data_frame()})17: fun(x, ...)
18: apply_fun(df)
19: dapply(data, "PANEL", function(data) { scales <- layout$get_scales(data$PANEL[1]) tryCatch(do.call(self$compute_panel, args), error = function(e) { warn(glue("Computation failed in
{snake_class(self)}()
:\n{e$message}")) new_data_frame() })})20: f(..., self = self)
21: self$stat$compute_layer(data, params, layout)
22: f(..., self = self)
23: l$compute_statistic(d, layout)
24: f(l = layers[[i]], d = data[[i]])
25: by_layer(function(l, d) l$compute_statistic(d, layout))
26: ggplot_build.ggplot(x)
27: ggplot_build(x)
28: print.ggplot(x)
29: print(x)
30: grid.draw.ggplot(plot)
31: grid.draw(plot)
32: ggsave(filename = file.path(output.dir, "length_by_hour.png"), width = p1m * 960/75, height = p1m * 480/75, plot = p7)
33: withCallingHandlers(expr, message = function(c) invokeRestart("muffleMessage"))
34: suppressMessages(ggsave(filename = file.path(output.dir, "length_by_hour.png"), width = p1m * 960/75, height = p1m * 480/75, plot = p7))
35: single.flowcell(input.file, output.dir, q)
An irrecoverable exception occurred. R is aborting now ...`
The text was updated successfully, but these errors were encountered: