Browse code

fix broken examples

occurred due to capitalisation of some column names in sample meta of MTBLS79 in pmp

Gavin Rhys Lloyd authored on 26/04/2021 15:00:13
Showing 37 changed files

... ...
@@ -34,9 +34,9 @@ MTBLS79_DatasetExperiment=function(filtered=FALSE) {
34 34
 
35 35
 # internal function to generate corrected data from pmp
36 36
 prep_from_pmp = function() {
37
-    library(pmp)
37
+    # library(pmp)
38 38
     # the pmp SE object
39
-    SE = MTBLS79
39
+    SE = pmp::MTBLS79
40 40
     
41 41
     # convert to DE
42 42
     DE = as.DatasetExperiment(SE)
... ...
@@ -117,5 +117,5 @@ prep_from_pmp = function() {
117 117
     to_filter=colnames(MTBLS79_corrected)[!to_filter] # names of features to remove
118 118
     
119 119
     # write the data
120
-    usethis::use_data(MTBLS79_corrected,to_filter,internal=TRUE,overwrite=TRUE)
120
+    #usethis::use_data(MTBLS79_corrected,to_filter,internal=TRUE,overwrite=TRUE)
121 121
 }
122 122
\ No newline at end of file
... ...
@@ -5,9 +5,9 @@
5 5
 #' M = filter_by_name(mode='include',dimension='variable',
6 6
 #'         names=colnames(D$data)[1:10]) + # first 10 features
7 7
 #'     filter_smeta(mode='exclude',levels='QC',
8
-#'         factor_name='class') + # reduce to two group comparison
9
-#'     confounders_clsq(factor_name = 'class',
10
-#'         confounding_factors=c('sample_order','batch'))
8
+#'         factor_name='Class') + # reduce to two group comparison
9
+#'     confounders_clsq(factor_name = 'Class',
10
+#'         confounding_factors=c('run_order','Batch'))
11 11
 #' M = model_apply(M,D)
12 12
 #' @export confounders_clsq
13 13
 confounders_clsq = function(alpha=0.05,mtc='fdr',factor_name,
... ...
@@ -169,9 +169,9 @@ setMethod(f="model_apply",
169 169
 #' M = filter_by_name(mode='include',dimension='variable',
170 170
 #'         names=colnames(D$data)[1:10]) + # first 10 features
171 171
 #'     filter_smeta(mode='exclude',levels='QC',
172
-#'         factor_name='class') + # reduce to two group comparison
173
-#'     confounders_clsq(factor_name = 'class',
174
-#'         confounding_factors=c('sample_order','batch'))
172
+#'         factor_name='Class') + # reduce to two group comparison
173
+#'     confounders_clsq(factor_name = 'Class',
174
+#'         confounding_factors=c('run_order','Batch'))
175 175
 #' M = model_apply(M,D)
176 176
 #' C = C=confounders_lsq_barchart(feature_to_plot=1,threshold=15)
177 177
 #' chart_plot(C,M[3])
... ...
@@ -251,9 +251,9 @@ setMethod(f="chart_plot",
251 251
 #' M = filter_by_name(mode='include',dimension='variable',
252 252
 #'         names=colnames(D$data)[1:10]) + # first 10 features
253 253
 #'     filter_smeta(mode='exclude',levels='QC',
254
-#'         factor_name='class') + # reduce to two group comparison
255
-#'     confounders_clsq(factor_name = 'class',
256
-#'         confounding_factors=c('sample_order','batch'))
254
+#'         factor_name='Class') + # reduce to two group comparison
255
+#'     confounders_clsq(factor_name = 'Class',
256
+#'         confounding_factors=c('run_order','Batch'))
257 257
 #' M = model_apply(M,D)
258 258
 #' C = C=confounders_lsq_boxplot(threshold=15)
259 259
 #' chart_plot(C,M[3])
... ...
@@ -6,8 +6,8 @@
6 6
 #' D = D[,1:10]
7 7
 #'
8 8
 #' # convert to numeric for this example
9
-#' D$sample_meta$sample_order=as.numeric(D$sample_meta$sample_order)
10
-#' D$sample_meta$sample_rep=as.numeric(D$sample_meta$sample_rep)
9
+#' D$sample_meta$sample_order=as.numeric(D$sample_meta$run_order)
10
+#' D$sample_meta$sample_rep=as.numeric(D$sample_meta$Sample_Rep)
11 11
 #'
12 12
 #' M = corr_coef(factor_names=c('sample_order','sample_rep'))
13 13
 #' M = model_apply(M,D)
... ...
@@ -1,7 +1,7 @@
1 1
 #' @eval get_description('dratio_filter')
2 2
 #' @examples
3 3
 #' D = MTBLS79_DatasetExperiment()
4
-#' M = dratio_filter(threshold=20,qc_label='QC',factor_name='class')
4
+#' M = dratio_filter(threshold=20,qc_label='QC',factor_name='Class')
5 5
 #' M = model_apply(M,D)
6 6
 #' @export dratio_filter
7 7
 dratio_filter = function(threshold=20, qc_label='QC', factor_name, ...) {
... ...
@@ -206,7 +206,7 @@ setMethod(f="chart_plot",
206 206
 #' @eval get_description('mv_boxplot')
207 207
 #' @examples
208 208
 #' D = MTBLS79_DatasetExperiment()
209
-#' C = mv_boxplot(factor_name='class')
209
+#' C = mv_boxplot(factor_name='Class')
210 210
 #' chart_plot(C,D)
211 211
 #'
212 212
 #' @import struct
... ...
@@ -342,7 +342,7 @@ setMethod(f="chart_plot",
342 342
 #' @eval get_description('DatasetExperiment_dist')
343 343
 #' @examples
344 344
 #' D = MTBLS79_DatasetExperiment()
345
-#' C = DatasetExperiment_dist(factor_name='class')
345
+#' C = DatasetExperiment_dist(factor_name='Class')
346 346
 #' chart_plot(C,D)
347 347
 #' @import struct
348 348
 #' @export DatasetExperiment_dist
... ...
@@ -420,7 +420,7 @@ setMethod(f="chart_plot",
420 420
 #' @eval get_description('DatasetExperiment_boxplot')
421 421
 #' @examples
422 422
 #' D = MTBLS79_DatasetExperiment()
423
-#' C = DatasetExperiment_boxplot(factor_name='class',number=10,per_class=FALSE)
423
+#' C = DatasetExperiment_boxplot(factor_name='Class',number=10,per_class=FALSE)
424 424
 #' chart_plot(C,D)
425 425
 #' @return struct object
426 426
 #' @export DatasetExperiment_boxplot
... ...
@@ -533,7 +533,7 @@ setMethod(f="chart_plot",
533 533
 #' @examples
534 534
 #' D1=MTBLS79_DatasetExperiment(filtered=FALSE)
535 535
 #' D2=MTBLS79_DatasetExperiment(filtered=TRUE)
536
-#' C = compare_dist(factor_name='class')
536
+#' C = compare_dist(factor_name='Class')
537 537
 #' chart_plot(C,D1,D2)
538 538
 #' @import struct
539 539
 #' @export compare_dist
... ...
@@ -2,10 +2,10 @@
2 2
 #' @examples
3 3
 #' D = MTBLS79_DatasetExperiment()
4 4
 #' C = feature_profile_array(
5
-#'     run_order='sample_order',
5
+#'     run_order='run_order',
6 6
 #'     qc_label='QC',
7
-#'     qc_column='class',
8
-#'     colour_by='class',
7
+#'     qc_column='Class',
8
+#'     colour_by='Class',
9 9
 #'     feature_to_plot=1:3,
10 10
 #'     nrow=1,
11 11
 #'     log=TRUE)
... ...
@@ -1,10 +1,10 @@
1 1
 #' @eval get_description('feature_profile')
2 2
 #' @examples
3 3
 #' D = MTBLS79_DatasetExperiment()
4
-#' C = feature_profile(run_order='sample_order',
4
+#' C = feature_profile(run_order='run_order',
5 5
 #'     qc_label='QC',
6
-#'     qc_column='class',
7
-#'     colour_by='class',
6
+#'     qc_column='Class',
7
+#'     colour_by='Class',
8 8
 #'     feature_to_plot=1)
9 9
 #' chart_plot(C,D)
10 10
 #' @export feature_profile
... ...
@@ -1,7 +1,7 @@
1 1
 #' @eval get_description('filter_na_count')
2 2
 #' @examples
3 3
 #' D = MTBLS79_DatasetExperiment()
4
-#' M = filter_na_count(threshold=3,factor_name='class')
4
+#' M = filter_na_count(threshold=3,factor_name='Class')
5 5
 #' M = model_apply(M,D)
6 6
 #' @export filter_na_count
7 7
 filter_na_count = function(threshold,factor_name,...) {
... ...
@@ -9,7 +9,7 @@
9 9
 #' pred=as.data.frame(pred)
10 10
 #'
11 11
 #' # apply method
12
-#' M = fisher_exact(alpha=0.05,mtc='fdr',factor_name='class',factor_pred=pred)
12
+#' M = fisher_exact(alpha=0.05,mtc='fdr',factor_name='Class',factor_pred=pred)
13 13
 #' M=model_apply(M,D)
14 14
 #' @import struct
15 15
 #' @import stats
... ...
@@ -1,7 +1,7 @@
1 1
 #' @eval get_description('fold_change')
2 2
 #' @examples
3 3
 #' D = MTBLS79_DatasetExperiment()
4
-#' M = fold_change(factor_name='class')
4
+#' M = fold_change(factor_name='Class')
5 5
 #' M = model_apply(M,D)
6 6
 #' @import stats
7 7
 #' @export fold_change
... ...
@@ -2,8 +2,8 @@
2 2
 #' @examples
3 3
 #' D = MTBLS79_DatasetExperiment()
4 4
 #' D=D[,1:10,drop=FALSE]
5
-#' M = filter_smeta(mode='exclude',levels='QC',factor_name='class') +
6
-#'     fold_change_int(factor_name=c('class','batch'))
5
+#' M = filter_smeta(mode='exclude',levels='QC',factor_name='Class') +
6
+#'     fold_change_int(factor_name=c('Class','Batch'))
7 7
 #' M = model_apply(M,D)
8 8
 #' @export fold_change_int
9 9
 fold_change_int = function(
... ...
@@ -4,20 +4,20 @@
4 4
 #' D = MTBLS79_DatasetExperiment(filtered=TRUE)
5 5
 #'
6 6
 #' # normalise, impute and scale then remove QCs
7
-#' P = pqn_norm(qc_label='QC',factor_name='class') +
7
+#' P = pqn_norm(qc_label='QC',factor_name='Class') +
8 8
 #'     knn_impute(neighbours=5) +
9
-#'     glog_transform(qc_label='QC',factor_name='class') +
10
-#'     filter_smeta(mode='exclude',levels='QC',factor_name='class')
9
+#'     glog_transform(qc_label='QC',factor_name='Class') +
10
+#'     filter_smeta(mode='exclude',levels='QC',factor_name='Class')
11 11
 #' P = model_apply(P,D)
12 12
 #' D = predicted(P)
13 13
 #'
14 14
 #' # forward selection using a PLSDA model
15
-#' M = forward_selection_by_rank(factor_name='class',
15
+#' M = forward_selection_by_rank(factor_name='Class',
16 16
 #'                              min_no_vars=2,
17 17
 #'                              max_no_vars=11,
18 18
 #'                              variable_rank=1:2063) *
19 19
 #'     (mean_centre() + PLSDA(number_components=1,
20
-#'                            factor_name='class'))
20
+#'                            factor_name='Class'))
21 21
 #' M = run(M,D,balanced_accuracy())
22 22
 #'
23 23
 #' @export forward_selection_by_rank
... ...
@@ -260,20 +260,20 @@ eval_loess=function(x,X,Y,k=10,p=0.66)
260 260
 #' D = MTBLS79_DatasetExperiment(filtered=TRUE)
261 261
 #'
262 262
 #' # normalise, impute and scale then remove QCs
263
-#' P = pqn_norm(qc_label='QC',factor_name='class') +
263
+#' P = pqn_norm(qc_label='QC',factor_name='Class') +
264 264
 #'     knn_impute(neighbours=5) +
265
-#'     glog_transform(qc_label='QC',factor_name='class') +
266
-#'     filter_smeta(mode='exclude',levels='QC',factor_name='class')
265
+#'     glog_transform(qc_label='QC',factor_name='Class') +
266
+#'     filter_smeta(mode='exclude',levels='QC',factor_name='Class')
267 267
 #' P = model_apply(P,D)
268 268
 #' D = predicted(P)
269 269
 #'
270 270
 #' # forward selection using a PLSDA model
271
-#' M = forward_selection_by_rank(factor_name='class',
271
+#' M = forward_selection_by_rank(factor_name='Class',
272 272
 #'                              min_no_vars=2,
273 273
 #'                              max_no_vars=11,
274 274
 #'                              variable_rank=1:2063) *
275 275
 #'     (mean_centre() + PLSDA(number_components=1,
276
-#'                            factor_name='class'))
276
+#'                            factor_name='Class'))
277 277
 #' M = run(M,D,balanced_accuracy())
278 278
 #'
279 279
 #' # chart
... ...
@@ -3,10 +3,10 @@
3 3
 #' @examples
4 4
 #' D = MTBLS79_DatasetExperiment()
5 5
 #' # some preprocessing
6
-#' M = pqn_norm(qc_label='QC',factor_name='class') +
6
+#' M = pqn_norm(qc_label='QC',factor_name='Class') +
7 7
 #'     knn_impute() +
8
-#'     glog_transform(qc_label='QC',factor_name='class') +
9
-#'     filter_smeta(factor_name='class',levels='QC',mode='exclude')
8
+#'     glog_transform(qc_label='QC',factor_name='Class') +
9
+#'     filter_smeta(factor_name='Class',levels='QC',mode='exclude')
10 10
 #' M=model_apply(M,D)
11 11
 #' D=predicted(M)
12 12
 #'
... ...
@@ -15,8 +15,8 @@
15 15
 #'
16 16
 #' # optmise number of components for PLS model
17 17
 #' I = grid_search_1d(param_to_optimise='number_components',search_values=1:5,
18
-#'         model_index=2,factor_name='class') *
19
-#'         (mean_centre()+PLSDA(factor_name='class'))
18
+#'         model_index=2,factor_name='Class') *
19
+#'         (mean_centre()+PLSDA(factor_name='Class'))
20 20
 #' I = run(I,D,balanced_accuracy())
21 21
 #'
22 22
 grid_search_1d = function(param_to_optimise,search_values,model_index,factor_name,max_min='min',...) {
... ...
@@ -1,7 +1,7 @@
1 1
 #' @eval get_description('rsd_filter')
2 2
 #' @export rsd_filter
3 3
 #' @examples
4
-#' M = rsd_filter(factor_name='class')
4
+#' M = rsd_filter(factor_name='Class')
5 5
 #'
6 6
 rsd_filter = function(rsd_threshold=20,qc_label='QC',factor_name,...) {
7 7
     out=struct::new_struct('rsd_filter',
... ...
@@ -1,7 +1,7 @@
1 1
 #' @eval get_description('ttest')
2 2
 #' @export ttest
3 3
 #' @examples
4
-#' M = ttest(factor_name='class')
4
+#' M = ttest(factor_name='Class')
5 5
 #'
6 6
 ttest = function(
7 7
     alpha=0.05,
... ...
@@ -2,7 +2,7 @@
2 2
 #' @return struct object
3 3
 #' @export wilcox_test
4 4
 #' @examples
5
-#' M = wilcox_test(factor_name='class')
5
+#' M = wilcox_test(factor_name='Class')
6 6
 #'
7 7
 wilcox_test = function(alpha=0.05,mtc='fdr',factor_names,paired=FALSE,paired_factor=character(0),...) {
8 8
     out=struct::new_struct('wilcox_test',
... ...
@@ -33,6 +33,6 @@ A boxplot to visualise the distribution of values within a subset of features.
33 33
 }
34 34
 \examples{
35 35
 D = MTBLS79_DatasetExperiment()
36
-C = DatasetExperiment_boxplot(factor_name='class',number=10,per_class=FALSE)
36
+C = DatasetExperiment_boxplot(factor_name='Class',number=10,per_class=FALSE)
37 37
 chart_plot(C,D)
38 38
 }
... ...
@@ -21,6 +21,6 @@ A histogram to visualise the distribution of values within features.
21 21
 }
22 22
 \examples{
23 23
 D = MTBLS79_DatasetExperiment()
24
-C = DatasetExperiment_dist(factor_name='class')
24
+C = DatasetExperiment_dist(factor_name='Class')
25 25
 chart_plot(C,D)
26 26
 }
... ...
@@ -20,6 +20,6 @@ Histograms and boxplots computed across samples and features are used to visuall
20 20
 \examples{
21 21
 D1=MTBLS79_DatasetExperiment(filtered=FALSE)
22 22
 D2=MTBLS79_DatasetExperiment(filtered=TRUE)
23
-C = compare_dist(factor_name='class')
23
+C = compare_dist(factor_name='Class')
24 24
 chart_plot(C,D1,D2)
25 25
 }
... ...
@@ -37,8 +37,8 @@ D = MTBLS79_DatasetExperiment()
37 37
 M = filter_by_name(mode='include',dimension='variable',
38 38
         names=colnames(D$data)[1:10]) + # first 10 features
39 39
     filter_smeta(mode='exclude',levels='QC',
40
-        factor_name='class') + # reduce to two group comparison
41
-    confounders_clsq(factor_name = 'class',
42
-        confounding_factors=c('sample_order','batch'))
40
+        factor_name='Class') + # reduce to two group comparison
41
+    confounders_clsq(factor_name = 'Class',
42
+        confounding_factors=c('run_order','Batch'))
43 43
 M = model_apply(M,D)
44 44
 }
... ...
@@ -24,9 +24,9 @@ D = MTBLS79_DatasetExperiment()
24 24
 M = filter_by_name(mode='include',dimension='variable',
25 25
         names=colnames(D$data)[1:10]) + # first 10 features
26 26
     filter_smeta(mode='exclude',levels='QC',
27
-        factor_name='class') + # reduce to two group comparison
28
-    confounders_clsq(factor_name = 'class',
29
-        confounding_factors=c('sample_order','batch'))
27
+        factor_name='Class') + # reduce to two group comparison
28
+    confounders_clsq(factor_name = 'Class',
29
+        confounding_factors=c('run_order','Batch'))
30 30
 M = model_apply(M,D)
31 31
 C = C=confounders_lsq_barchart(feature_to_plot=1,threshold=15)
32 32
 chart_plot(C,M[3])
... ...
@@ -22,9 +22,9 @@ D = MTBLS79_DatasetExperiment()
22 22
 M = filter_by_name(mode='include',dimension='variable',
23 23
         names=colnames(D$data)[1:10]) + # first 10 features
24 24
     filter_smeta(mode='exclude',levels='QC',
25
-        factor_name='class') + # reduce to two group comparison
26
-    confounders_clsq(factor_name = 'class',
27
-        confounding_factors=c('sample_order','batch'))
25
+        factor_name='Class') + # reduce to two group comparison
26
+    confounders_clsq(factor_name = 'Class',
27
+        confounding_factors=c('run_order','Batch'))
28 28
 M = model_apply(M,D)
29 29
 C = C=confounders_lsq_boxplot(threshold=15)
30 30
 chart_plot(C,M[3])
... ...
@@ -33,8 +33,8 @@ D = MTBLS79_DatasetExperiment(filtered=TRUE)
33 33
 D = D[,1:10]
34 34
 
35 35
 # convert to numeric for this example
36
-D$sample_meta$sample_order=as.numeric(D$sample_meta$sample_order)
37
-D$sample_meta$sample_rep=as.numeric(D$sample_meta$sample_rep)
36
+D$sample_meta$sample_order=as.numeric(D$sample_meta$run_order)
37
+D$sample_meta$sample_rep=as.numeric(D$sample_meta$Sample_Rep)
38 38
 
39 39
 M = corr_coef(factor_names=c('sample_order','sample_rep'))
40 40
 M = model_apply(M,D)
... ...
@@ -23,7 +23,7 @@ The dispersion ratio (d-ratio) compares the standard deviation (or non-parametri
23 23
 }
24 24
 \examples{
25 25
 D = MTBLS79_DatasetExperiment()
26
-M = dratio_filter(threshold=20,qc_label='QC',factor_name='class')
26
+M = dratio_filter(threshold=20,qc_label='QC',factor_name='Class')
27 27
 M = model_apply(M,D)
28 28
 }
29 29
 \references{
... ...
@@ -37,10 +37,10 @@ A plot visualising the change in intensity of a feature with a continuous variab
37 37
 }
38 38
 \examples{
39 39
 D = MTBLS79_DatasetExperiment()
40
-C = feature_profile(run_order='sample_order',
40
+C = feature_profile(run_order='run_order',
41 41
     qc_label='QC',
42
-    qc_column='class',
43
-    colour_by='class',
42
+    qc_column='Class',
43
+    colour_by='Class',
44 44
     feature_to_plot=1)
45 45
 chart_plot(C,D)
46 46
 }
... ...
@@ -41,10 +41,10 @@ A plot visualising the change in intensity of a feature with a continuous variab
41 41
 \examples{
42 42
 D = MTBLS79_DatasetExperiment()
43 43
 C = feature_profile_array(
44
-    run_order='sample_order',
44
+    run_order='run_order',
45 45
     qc_label='QC',
46
-    qc_column='class',
47
-    colour_by='class',
46
+    qc_column='Class',
47
+    colour_by='Class',
48 48
     feature_to_plot=1:3,
49 49
     nrow=1,
50 50
     log=TRUE)
... ...
@@ -21,6 +21,6 @@ The number of measured values is counted for each feature, and any feature with
21 21
 }
22 22
 \examples{
23 23
 D = MTBLS79_DatasetExperiment()
24
-M = filter_na_count(threshold=3,factor_name='class')
24
+M = filter_na_count(threshold=3,factor_name='Class')
25 25
 M = model_apply(M,D)
26 26
 }
... ...
@@ -33,6 +33,6 @@ pred=lapply(pred,factor,levels=c(TRUE,FALSE))
33 33
 pred=as.data.frame(pred)
34 34
 
35 35
 # apply method
36
-M = fisher_exact(alpha=0.05,mtc='fdr',factor_name='class',factor_pred=pred)
36
+M = fisher_exact(alpha=0.05,mtc='fdr',factor_name='Class',factor_pred=pred)
37 37
 M=model_apply(M,D)
38 38
 }
... ...
@@ -40,7 +40,7 @@ Fold change is the relative change in mean (or non-parametric equivalent) intens
40 40
 }
41 41
 \examples{
42 42
 D = MTBLS79_DatasetExperiment()
43
-M = fold_change(factor_name='class')
43
+M = fold_change(factor_name='Class')
44 44
 M = model_apply(M,D)
45 45
 }
46 46
 \references{
... ...
@@ -35,8 +35,8 @@ For more than one factor the fold change calculation is extended to include all
35 35
 \examples{
36 36
 D = MTBLS79_DatasetExperiment()
37 37
 D=D[,1:10,drop=FALSE]
38
-M = filter_smeta(mode='exclude',levels='QC',factor_name='class') +
39
-    fold_change_int(factor_name=c('class','batch'))
38
+M = filter_smeta(mode='exclude',levels='QC',factor_name='Class') +
39
+    fold_change_int(factor_name=c('Class','Batch'))
40 40
 M = model_apply(M,D)
41 41
 }
42 42
 \references{
... ...
@@ -37,20 +37,20 @@ A model is trained and performance metric computed by including increasing numbe
37 37
 D = MTBLS79_DatasetExperiment(filtered=TRUE)
38 38
 
39 39
 # normalise, impute and scale then remove QCs
40
-P = pqn_norm(qc_label='QC',factor_name='class') +
40
+P = pqn_norm(qc_label='QC',factor_name='Class') +
41 41
     knn_impute(neighbours=5) +
42
-    glog_transform(qc_label='QC',factor_name='class') +
43
-    filter_smeta(mode='exclude',levels='QC',factor_name='class')
42
+    glog_transform(qc_label='QC',factor_name='Class') +
43
+    filter_smeta(mode='exclude',levels='QC',factor_name='Class')
44 44
 P = model_apply(P,D)
45 45
 D = predicted(P)
46 46
 
47 47
 # forward selection using a PLSDA model
48
-M = forward_selection_by_rank(factor_name='class',
48
+M = forward_selection_by_rank(factor_name='Class',
49 49
                              min_no_vars=2,
50 50
                              max_no_vars=11,
51 51
                              variable_rank=1:2063) *
52 52
     (mean_centre() + PLSDA(number_components=1,
53
-                           factor_name='class'))
53
+                           factor_name='Class'))
54 54
 M = run(M,D,balanced_accuracy())
55 55
 
56 56
 }
... ...
@@ -20,20 +20,20 @@ A line plot for forward selection. The computed model performance metric is plot
20 20
 D = MTBLS79_DatasetExperiment(filtered=TRUE)
21 21
 
22 22
 # normalise, impute and scale then remove QCs
23
-P = pqn_norm(qc_label='QC',factor_name='class') +
23
+P = pqn_norm(qc_label='QC',factor_name='Class') +
24 24
     knn_impute(neighbours=5) +
25
-    glog_transform(qc_label='QC',factor_name='class') +
26
-    filter_smeta(mode='exclude',levels='QC',factor_name='class')
25
+    glog_transform(qc_label='QC',factor_name='Class') +
26
+    filter_smeta(mode='exclude',levels='QC',factor_name='Class')
27 27
 P = model_apply(P,D)
28 28
 D = predicted(P)
29 29
 
30 30
 # forward selection using a PLSDA model
31
-M = forward_selection_by_rank(factor_name='class',
31
+M = forward_selection_by_rank(factor_name='Class',
32 32
                              min_no_vars=2,
33 33
                              max_no_vars=11,
34 34
                              variable_rank=1:2063) *
35 35
     (mean_centre() + PLSDA(number_components=1,
36
-                           factor_name='class'))
36
+                           factor_name='Class'))
37 37
 M = run(M,D,balanced_accuracy())
38 38
 
39 39
 # chart
... ...
@@ -35,10 +35,10 @@ A one dimensional grid search calculates a performance metric for a model at eve
35 35
 \examples{
36 36
 D = MTBLS79_DatasetExperiment()
37 37
 # some preprocessing
38
-M = pqn_norm(qc_label='QC',factor_name='class') +
38
+M = pqn_norm(qc_label='QC',factor_name='Class') +
39 39
     knn_impute() +
40
-    glog_transform(qc_label='QC',factor_name='class') +
41
-    filter_smeta(factor_name='class',levels='QC',mode='exclude')
40
+    glog_transform(qc_label='QC',factor_name='Class') +
41
+    filter_smeta(factor_name='Class',levels='QC',mode='exclude')
42 42
 M=model_apply(M,D)
43 43
 D=predicted(M)
44 44
 
... ...
@@ -47,8 +47,8 @@ D=D[,1:10]
47 47
 
48 48
 # optmise number of components for PLS model
49 49
 I = grid_search_1d(param_to_optimise='number_components',search_values=1:5,
50
-        model_index=2,factor_name='class') *
51
-        (mean_centre()+PLSDA(factor_name='class'))
50
+        model_index=2,factor_name='Class') *
51
+        (mean_centre()+PLSDA(factor_name='Class'))
52 52
 I = run(I,D,balanced_accuracy())
53 53
 
54 54
 }
... ...
@@ -31,7 +31,7 @@ Boxplots of the number of missing values per sample/feature.
31 31
 }
32 32
 \examples{
33 33
 D = MTBLS79_DatasetExperiment()
34
-C = mv_boxplot(factor_name='class')
34
+C = mv_boxplot(factor_name='Class')
35 35
 chart_plot(C,D)
36 36
 
37 37
 }
... ...
@@ -25,7 +25,7 @@ An RSD filter calculates the relative standard deviation (the ratio of the stand
25 25
 This object makes use of functionality from the following packages:\itemize{\item{\code{pmp}}}
26 26
 }
27 27
 \examples{
28
-M = rsd_filter(factor_name='class')
28
+M = rsd_filter(factor_name='Class')
29 29
 
30 30
 }
31 31
 \references{
... ...
@@ -36,6 +36,6 @@ A  \code{ttest} object.
36 36
 A t-test compares the means of two factor levels. Multiple-test corrected p-values are used to indicate the significance of the computed difference for all features.
37 37
 }
38 38
 \examples{
39
-M = ttest(factor_name='class')
39
+M = ttest(factor_name='Class')
40 40
 
41 41
 }
... ...
@@ -35,6 +35,6 @@ struct object
35 35
 A Mann-Whitney-Wilcoxon signed rank test compares ,the ranks of values in two groups. It is the non-parametric equivalent of a t-test. Multiple test corrected p-values are computed as indicators of significance for each variable/feature.
36 36
 }
37 37
 \examples{
38
-M = wilcox_test(factor_name='class')
38
+M = wilcox_test(factor_name='Class')
39 39
 
40 40
 }