diff --git a/cscar.Rmd b/cscar.Rmd index 60a784c..5c93f9b 100644 --- a/cscar.Rmd +++ b/cscar.Rmd @@ -8,7 +8,7 @@ output: transition: slide incremental: true center: true - css: slides_light.css + css: slides.css --- ```{r setup, include=FALSE} @@ -35,7 +35,7 @@ $$y \sim \mathscr{N}(X\beta, \sigma^2)$$

-## Modeling Specialties {data-background='img/ml3.png'} +## Modeling Specialties {data-background='img/ml2.png' data-state='opac25'}

Mixed models
@@ -105,7 +105,7 @@ stats-consulting@umich.edu   ds-consulting@umich.edu     hpc-co

-
Hands on programming

+
Hands-on programming

Data Science Skill Series



diff --git a/cscar.html b/cscar.html index b4a593c..5fb105a 100644 --- a/cscar.html +++ b/cscar.html @@ -472,7 +472,7 @@

Statistical Modeling

\[y \sim \mathscr{N}(X\beta, \sigma^2)\]

-
+

Modeling Specialties

Mixed models
Machine learning
High dimensional data
Code optimization
Text analysis
Analysis of complex sample surveys
GIS, spatial, remote-sensing
and more! @@ -509,7 +509,7 @@

Workshops

-
Hands on programming

Data Science Skill Series +
Hands-on programming

Data Science Skill Series



diff --git a/cscar_light.Rmd b/cscar_light.Rmd new file mode 100644 index 0000000..6112f61 --- /dev/null +++ b/cscar_light.Rmd @@ -0,0 +1,322 @@ +--- +title: "" + +author: "cscar.research.umich.edu" +output: + revealjs::revealjs_presentation: + theme: white + transition: slide + incremental: true + center: true + css: slides.css +--- + +```{r setup, include=FALSE} +knitr::opts_chunk$set(echo = TRUE) +``` + +## Data Science Collaboration + +

+ +

+ +
+ +## Statistical Modeling {data-background='img/priorLikePosterior.png' data-state='opac50'} + +
+
+
+ +

+ +$$y \sim \mathscr{N}(X\beta, \sigma^2)$$ +

+ + +## Modeling Specialties {data-background='img/ml2.png' data-state='opac25'} + +

+Mixed models
+Machine learning
+High dimensional data
+Code optimization
+Text analysis
+Analysis of complex sample surveys
+GIS, spatial, remote-sensing
+and more! +

+ + +## Programming Expertise {data-background='img/Rlogo.svg' data-state='opac75'} + +

+R
+Python
+Matlab
+Stan
+Stata
+Go
+and more! +

+ + + + + +## Statistical
and
Computational Programming + + + +
+ + + + + + +## Other Assistance {data-background='img/plotly_scatter.png' data-state='opac75'} + + +

+Visualization
+Web scraping
+Data I/O
+Concept formation
+Reproducible research
+Best practices +

+ +## {data-background='img/q_black.png' data-state='opac25'} + +

+For common data and analysis problems, just stop by! +

+Send a question to the help list! +

+
+ +

+stats-consulting@umich.edu   ds-consulting@umich.edu     hpc-consulting@umich.edu +

+ +## Workshops {data-background='img/code_white2.png' data-state='opac50'} + + +

+
Hands-on programming

+Data Science Skill Series

+ +

+ +

+[cscar.research.umich.edu/events](cscar.research.umich.edu/events) +

+ +## {data-transition="zoom" data-autoslide="3000"} + + + + + +## {data-transition="zoom" data-autoslide="3000" } + + + +## {data-transition="zoom" data-autoslide="2000"} + + + + +## {data-transition="zoom" data-autoslide="2000" } + + + +## {data-transition="zoom" data-autoslide="1000"} + + + + +## {data-transition="zoom" data-autoslide="1000" } + + + +## {data-transition="zoom" data-autoslide="500"} + + + + +## {data-transition="zoom" data-autoslide="500" } + + + + +## {data-transition="zoom" data-autoslide="250"} + + + + +## {data-transition="zoom" data-autoslide="250" } + + + + +## {data-transition="zoom" data-autoslide="125"} + + + + +## {data-transition="zoom" data-autoslide="125" } + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +## {data-transition="zoom" data-autoslide="125"} + + + +## {data-transition="zoom" data-autoslide="125"} + + + + +## {data-transition="zoom" data-autoslide="125" } + + + +## {data-transition="zoom" data-autoslide="125"} + + + + +## {data-transition="zoom" data-autoslide="125"} + + + +## {data-transition="zoom" data-autoslide="125"} + + + + + +## {data-transition="zoom" data-autoslide="125" } + + + +## {data-transition="zoom" data-autoslide="125"} + + + +## {data-transition="zoom" data-autoslide="125"} + + + + +## {data-transition="zoom" data-autoslide="125"} + + + +## {data-transition="zoom" data-autoslide="125"} + + + + + + + +## {data-transition="zoom" data-autoslide="125" } + + + +## {data-transition="zoom" data-autoslide="125"} + + + + + +## {data-transition="zoom" data-autoslide="125"} + + + +## {data-transition="zoom" data-autoslide="125"} + + + + + +## {data-transition="zoom" data-autoslide="125" } + + + + +## {data-transition="zoom" data-autoslide="125"} + + + + + +## {data-transition="zoom" data-autoslide="125"} + + + +## {data-transition="zoom" data-autoslide="125"} + + + + + +## {data-transition="zoom" data-autoslide="125" } + + + +## {data-transition="zoom" data-autoslide="125"} + + + + + +## {data-transition="zoom" data-background="img/sunshine.jpg" data-autoslide="3000"} + + + +
Data Science Serenity
+ + + +## Join Us Now! {data-transition="zoom" data-autoslide="3000" } + +

Can you do any less?!

+ + + +## {data-background="img/cscar84.png" data-state='opac75'} \ No newline at end of file diff --git a/cscar_light.html b/cscar_light.html new file mode 100644 index 0000000..35ea849 --- /dev/null +++ b/cscar_light.html @@ -0,0 +1,721 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ +
+

+

cscar.research.umich.edu

+
+ +
+

Data Science Collaboration

+
+



+
+
+
+

Statistical Modeling

+




+

+\[y \sim \mathscr{N}(X\beta, \sigma^2)\] +

+
+
+

Modeling Specialties

+

+Mixed models
Machine learning
High dimensional data
Code optimization
Text analysis
Analysis of complex sample surveys
GIS, spatial, remote-sensing
and more! +

+
+
+

Programming Expertise

+

+R
Python
Matlab
Stan
Stata
Go
and more! +

+
+
+

Statistical
and
Computational Programming

+

+


+

+
+
+

Other Assistance

+

+Visualization
Web scraping
Data I/O
Concept formation
Reproducible research
Best practices +

+
+
+

+

+For common data and analysis problems, just stop by!

Send a question to the help list! +

+


+

+stats-consulting@umich.edu   ds-consulting@umich.edu     hpc-consulting@umich.edu +

+
+
+

Workshops

+

+
Hands-on programming

Data Science Skill Series +

+



+

+cscar.research.umich.edu/events +

+
+
+

+

+
+
+

+

+
+
+

+

+
+
+

+

+
+
+

+

+
+
+

+

+
+
+

+

+
+
+

+

+
+
+

+

+
+
+

+

+
+
+

+

+
+
+

+

+ + + + + + + + + +
+
+

+

+
+
+

+

+
+
+

+

+
+
+

+

+
+
+

+

+
+
+

+

+
+
+

+

+
+
+

+

+
+
+

+

+
+
+

+

+
+
+

+

+
+
+

+

+
+
+

+

+
+
+

+

+
+
+

+

+
+
+

+

+
+
+

+

+
+
+

+

+
+
+

+

+
+
+

+

+
+
+

+

+
+
+

+

+
+Data Science Serenity +
+
+
+

Join Us Now!

+

+Can you do any less?! +

+
+
+

+
+
+
+ + + + + + + + + + + + + diff --git a/img/code_white.png b/img/code_white.png new file mode 100644 index 0000000..87cfff4 Binary files /dev/null and b/img/code_white.png differ diff --git a/img/code_white2.png b/img/code_white2.png new file mode 100644 index 0000000..f16ba19 Binary files /dev/null and b/img/code_white2.png differ diff --git a/misc_code_for_display.R b/misc_code_for_display.R new file mode 100644 index 0000000..136047a --- /dev/null +++ b/misc_code_for_display.R @@ -0,0 +1,266 @@ +# Functions --------------------------------------------------------------- + + +# ints and Times +dataGen <- function(nclusters, + nwithin, + corr = 0, + balanced = T) +{ + # setup + nclus = nclusters # number of groups + clus = factor(rep(1:nclus, each=nwithin)) # cluster variable + n = length(clus) # total n + + # parameters + sigma = 1 # residual sd + psi = matrix(c(1,corr,corr,1), 2, 2) # re covar + gamma_ = MASS::mvrnorm(nclus, mu=c(0,0), Sigma=psi, empirical=TRUE) # random effects + e = rnorm(n, mean=0, sd=sigma) # residual error + intercept = 3 # fixed effects + b1 = .75 + + # data + x = rep(1:nwithin-1, times=nclus) # covariate + y = intercept+gamma_[clus,1] + (b1+gamma_[clus,2])*x + e # target + d = data.frame(time=x, y, clus=clus) +} + +# nlme control msMaxIter, msMaxEval, abs.tol set to lavaan defaults + + +runNLME <- function(data) +{ + nlmemod = lme(y ~ time, data=data, + random = ~time|clus, + weights=varIdent(form = ~1|time), + method='ML', + control = list( + maxIter = 1000, + msMaxIter = 10000, + msMaxEval = 20000, + returnObject = T, + abs.tol = 2.220446e-15 + )) + varRes = coef(nlmemod$modelStruct$varStruct, unconstrained =FALSE,allCoef=T)*nlmemod$sigma + varRE = as.numeric(VarCorr(nlmemod)[1:2,1]) + corRE = as.numeric(VarCorr(nlmemod)['time','Corr']) + fixed = fixef(nlmemod) + list(varRes=varRes, varRE=varRE, corRE=corRE, fixed=fixed) +} + +runNLME_REML <- function(data) { + nlmemod = lme(y ~ time, data=data, + random = ~time|clus, + weights=varIdent(form = ~1|time), + method='REML', + control = list( + maxIter = 1000, + msMaxIter = 10000, + msMaxEval = 20000, + returnObject = T, + abs.tol = 2.220446e-15 + )) + varRes = coef(nlmemod$modelStruct$varStruct, unconstrained =FALSE,allCoef=T)*nlmemod$sigma + varRE = as.numeric(VarCorr(nlmemod)[1:2,1]) + corRE = as.numeric(VarCorr(nlmemod)['time','Corr']) + fixed = fixef(nlmemod) + list(varRes=varRes, varRE=varRE, corRE=corRE, fixed=fixed) +} + +runGAM <- function(data) { + gammod = gam(y ~ time + s(clus, bs='re') + s(time, clus, bs='re'), data=data, + method='REML') + varRes = gammod$sig2 + varRE = gam.vcomp(gammod)[1:2,1]^2 + # corRE = as.numeric(VarCorr(nlmemod)['time','Corr']) + fixed = coef(gammod)[1:2] + list(varRes=varRes, varRE=varRE, corRE=NULL, fixed=fixed) +} + +runBayes_brm = function(data, ...){ + # variance heterogeneity coming in next release + bayesmod = brm(y ~ time + (1 + time|clus), data = data, + iter = 6000, warmup=1000, thin=20, + prior = set_prior("normal(0,10)", class = "b"), + ...) + vc = VarCorr(bayesmod) + varRes = VarCorr(bayesmod)[[2]][['cov']][['mean']] + varRE = diag(VarCorr(bayesmod)[[1]][['cov']][['mean']]) + corRE = VarCorr(bayesmod)[[1]][['cor']][['mean']][2] + fixed = fixef(bayesmod)[,'mean'] + list(varRes=varRes, varRE=varRE, corRE=corRE, fixed=fixed) +} + +runBayes_rstan = function(compcode, data, ...){ + standat = brms::make_standata(y ~ time + (1 + time|clus), data=data) + bayesmod = sampling(sm, data = standat, + iter = 6000, warmup=1000, thin=20,...) + varRes = get_posterior_mean(bayesmod, 'sigma')[,'mean-all chains'] + varRE = get_posterior_mean(bayesmod, 'sd_1')[,'mean-all chains'] + corRE = get_posterior_mean(bayesmod, 'cor_1')[,'mean-all chains'] + fixed = get_posterior_mean(bayesmod, pars=c('b_Intercept', 'b'))[,'mean-all chains'] + list(varRes=varRes, varRE=varRE, corRE=corRE, fixed=fixed) +} + + +runGC <- function(data) { + ntime = unique(data$time) + data$time = factor(data$time) + dataWide = tidyr::spread(data, time, y) + colnames(dataWide)[-1] = paste0('y', colnames(dataWide)[-1]) + + IModel = paste0('I =~ 1*y0 ', paste0('+ 1*', colnames(dataWide)[-c(1:2)], collapse=''), '\n') + SModel = paste0('S =~ 0*y0 ', paste0('+ ', 1:dplyr::last(ntime), '*', colnames(dataWide)[-c(1:2)], collapse=''), '\n') + CenterY = paste0('y0', paste0(' + ', colnames(dataWide)[-c(1:2)], collapse=''), ' ~ 0*1') + LVmodel = paste0(IModel, SModel, CenterY) + + suppressWarnings({semres = growth(LVmodel, data=dataWide)}) + varRes = sqrt(coef(semres)[ntime+1]) + varRE = coef(semres)[c('I~~I','S~~S')] + covRE = coef(semres)[c('I~~S')] + corRE = covRE/prod(sqrt(varRE)) + fixed = coef(semres)[c('I~1', 'S~1')] + list(varRes=varRes, varRE=varRE, corRE=corRE, fixed=fixed) +} + + +summarize_growth_mixed <- function(results, growth=T, whichpar='fixed', clean=F, settings=grid) { + + if (!clean) fail_idx = replicate(45, rep(FALSE, 500), simplify = F) + if (clean) { + if (growth){ + fail_idx = sapply(results, function(x) + sapply(x, function(res) any(is.nan(unlist(res)))), simplify=F) # | abs(res$corRE)>1 + } + else { + fail_idx = sapply(results, function(x) + sapply(x, function(res) any(res$corRE==1 | res$corRE==-1)), simplify=F) + } + } + + if(whichpar=='fixed'){ + # fixed effects + resultFE0 = sapply(1:length(results), function(x) + sapply(results[[x]][!fail_idx[[x]]], function(res) res$fixed), simplify=F) + resultFE = lapply(resultFE0, function(res) c(rowMeans(res), c(apply(res, 1, quantile, p=c(.025,.975))))) + resultFE = do.call('rbind', resultFE); colnames(resultFE) = c('Int','Time', 'LL_Int', 'UL_Int', 'LL_Time', 'UL_Time') + resultFE = data.frame(settings, resultFE) + return(resultFE) + } + + if(whichpar=='varRes'){ + stop('Not wanting to parse residual variance for 5 to 25 scores at present. :P') + # residual variance + # resultvarRes0 = sapply(1:length(results), function(x) + # sapply(results[[x]][!fail_idx[[x]]], function(res) res$varRes), simplify=F) + # resultvarRes = lapply(resultvarRes0, function(res) c(rowMeans(res), c(apply(res, 1, quantile, p=c(.025,.975))))) + # resultvarRes = do.call('rbind', resultvarRes); colnames(resultFE) = c('Int','Time', 'LL_Int', 'UL_Int', 'LL_Time', 'UL_Time') + # resultvarRes = data.frame(settings, round(resultvarRes, 2)) + # return(resultvarRes) + } + + if (whichpar=='varRE'){ + # random effects variance + resultvarRE0 = sapply(1:length(results), function(x) + sapply(results[[x]][!fail_idx[[x]]], function(res) res$varRE), simplify=F) + resultvarRE = lapply(resultvarRE0, function(res) c(rowMeans(res), c(apply(res, 1, quantile, p=c(.025,.975))))) + resultvarRE = do.call('rbind', resultvarRE); colnames(resultvarRE) = c('Int','Time', 'LL_Int', 'UL_Int', 'LL_Time', 'UL_Time') + resultvarRE = data.frame(settings, resultvarRE) + return(resultvarRE) + } + + if (whichpar=='corRE'){ + # cor random effects + resultcorRE0 = sapply(1:length(results), function(x) + sapply(results[[x]][!fail_idx[[x]]], function(res) res$corRE), simplify=F) + resultcorRE = lapply(resultcorRE0, function(res) c(mean(res, na.rm=T), quantile(res, p=c(.025,.975), na.rm=T))) + resultcorRE = do.call('rbind', resultcorRE); colnames(resultcorRE) = c('corRE_est', 'LL_corRE', 'UL_corRE') + resultcorRE = data.frame(settings, resultcorRE) + return(resultcorRE) + } +} + +summary_df <- function(g, m, whichpar='fixed') { + require(dplyr) + + if(whichpar=='fixed'){ + # fixed effects + res = left_join(g, m, by=c('nClusters', 'nWithinCluster', 'corRE')) %>% + arrange(nClusters, nWithinCluster, corRE) %>% + mutate(widthGrowth_Int = UL_Int.x-LL_Int.x, + widthMixed_Int = UL_Int.y-LL_Int.y, + mixedWidthMinusgrowthWidth_Int = widthMixed_Int-widthGrowth_Int, + widthGrowth_Time = UL_Time.x-LL_Time.x, + widthMixed_Time = UL_Time.y-LL_Time.y, + mixedWidthMinusgrowthWidth_Time = widthMixed_Time-widthGrowth_Time) + return(res) + } + + if(whichpar=='varRes'){ + stop('Not wanting to parse residual variance for 5 to 25 scores at present. :P') + # residual variance + # growthvarRes0 = sapply(1:length(results), function(x) + # sapply(results[[x]][!fail_idx[[x]]], function(res) res$varRes), simplify=F) + # growthvarRes = lapply(growthvarRes0, function(res) c(rowMeans(res), c(apply(res, 1, quantile, p=c(.025,.975))))) + # growthvarRes = do.call('rbind', growthvarRes); colnames(growthFE) = c('Int','Time', 'LL_Int', 'UL_Int', 'LL_Time', 'UL_Time') + # growthvarRes = data.frame(grid, round(growthvarRes, 2)) + # return(growthvarRes) + } + + if (whichpar=='varRE'){ + # random effects variance + res = left_join(g, m, by=c('nClusters', 'nWithinCluster', 'corRE')) %>% + arrange(nClusters, nWithinCluster, corRE) %>% + mutate(widthGrowth_Int = UL_Int.x-LL_Int.x, + widthMixed_Int = UL_Int.y-LL_Int.y, + mixedWidthMinusgrowthWidth_Int = widthMixed_Int-widthGrowth_Int, + widthGrowth_Time = UL_Time.x-LL_Time.x, + widthMixed_Time = UL_Time.y-LL_Time.y, + mixedWidthMinusgrowthWidth_Time = widthMixed_Time-widthGrowth_Time) + return(res) + } + + if (whichpar=='corRE'){ + # cor random effects + res = left_join(g, m, by=c('nClusters', 'nWithinCluster', 'corRE')) %>% + arrange(nClusters, nWithinCluster, corRE) %>% + mutate(widthGrowth = UL_corRE.x-LL_corRE.x, + widthMixed = UL_corRE.y-LL_corRE.y, + mixedWidthMinusgrowthWidth = widthMixed-widthGrowth) + return(res) + } +} + + +bias <- function(res, whichpar='fixed') { + require(dplyr) + if (whichpar=='fixed') { + out = res %>% + arrange(nClusters, nWithinCluster, corRE) %>% + mutate(biasLGC_Int = Int.x-3, + biasLGC_Time = Time.x-.75, + biasMM_Int = Int.y-3, + biasMM_Time = Time.y-.75) + return(out) + } + + else if (whichpar=='varRE') { + out = res %>% + arrange(nClusters, nWithinCluster, corRE) %>% + mutate(biasLGC_Int = Int.x-1, + biasLGC_Time = Time.x-1, + biasMM_Int = Int.y-1, + biasMM_Time = Time.y-1) + } + + else if (whichpar=='corRE'){ + out = corEst %>% + arrange(nClusters, nWithinCluster, corRE) %>% + mutate(biasLGC_corRE = corRE_est.x-corRE, + biasMM_corRE = corRE_est.y-corRE) + } + + return(out) + +} \ No newline at end of file diff --git a/misc_code_for_visuals.R b/misc_code_for_visuals.R new file mode 100644 index 0000000..6755e45 --- /dev/null +++ b/misc_code_for_visuals.R @@ -0,0 +1,74 @@ +library(tidyverse); library(modelr); library(mgcv); library(plotly); library(lazerhawk) + +mod = gam(accel ~ s(times, bs='gp'), data=MASS::mcycle) + +ax = list( + title = "", + zeroline = FALSE, + showline = FALSE, + showticks = FALSE, + showticklabels = FALSE, + showgrid = FALSE +) + +MASS::mcycle %>% + add_predictions(mod) %>% + plot_ly(x=~times, y=~accel) %>% + add_markers(color=~accel, colors=viridis::plasma(133), showlegend=F) %>% + add_lines(y=~pred, color=I('#ff5500'), opacity=.5, showlegend=F) %>% + hide_colorbar() %>% + theme_plotly() %>% + layout(xaxis = ax, yaxis = ax) + + +input = data_frame( + n = 10, + dataMean = 7, + dataVar = 3, + priorMean = 2, + priorVar = 1 +) + +simN = 500 +theta = seq(0, 10, length.out = simN) + +obs = rnorm(input$n, input$dataMean, sqrt(input$dataVar)) +prior = data.frame(Distribution='Prior', theta=theta, + density = dnorm(theta, input$priorMean, sqrt(input$priorVar))) %>% + mutate(density=density/sum(density)) + +like = data.frame(Distribution='Likelihood', theta=theta, + density = sapply(theta, function(parm) exp(sum(dnorm(obs, mean=parm, sd=sqrt(input$dataVar), log = T))))) %>% + mutate(density=density/sum(density)) + +denom = sum(like$density*prior$density) +post = data.frame(Distribution='Posterior', theta=theta, + density = like$density*prior$density/denom) %>% + mutate(density=density/sum(density)) + +thetamean = sum(post$density*theta) +plotdata = rbind(prior, like, post) + + + + + +g = ggplot(aes(x=theta, y=density, group=Distribution, color=Distribution, fill=Distribution), data=plotdata) + + geom_ribbon(aes(ymin=0, ymax=density), alpha=.5 ) + + geom_point(aes(x=value, y=0), data=data.frame(Distribution=c('Prior', 'Likelihood', 'Posterior'), + value=c(input$priorMean, mean(obs), thetamean)), + color=alpha('#ff5503', .25)) + + xlab('') + + lims(x=c(0, 10)) + + lazerhawk::theme_trueMinimal() + + theme(axis.title.x=element_text(color=alpha('black',.6), vjust=.1, hjust=.5), + axis.text.y=element_blank(), + axis.title.y=element_blank(), + axis.ticks.y=element_blank(), + strip.text=element_text(color=alpha('black',.5), vjust=.01), + legend.position='none', + plot.background=element_rect(fill = "transparent",colour = NA)) + +ggplotly(g, tooltip='none') %>% + theme_plotly() %>% + layout(xaxis=ax) diff --git a/slides.css b/slides.css new file mode 100644 index 0000000..2728e8c --- /dev/null +++ b/slides.css @@ -0,0 +1,162 @@ +@import url('https://fonts.googleapis.com/css?family=Roboto'); + +.emph { + color: #ff5503; + font-weight: 500; +} + +/* pack func and objclass colors come from hcl(seq(90,360, length.out=4), c=80, l=80)*/ +.pack { + color: #AC9CFF; /*#e41a1c*/ + font-weight: 500; +} + +.func { /*#984ea3;can just use `` instead*/ + color: #00CBB6; + font-weight: 500; +} + +.objclass { + color: #AAB400; /*#4daf4a; #FFC5D0*/ + font-weight: 500; +} + + + + +a { + color: #1e90ff; /*dodgerblue*/ +} + +.resize{ + width: 400px; + height: auto; +} + + +.reveal section p { + font-family: 'Roboto', sans-serif; + text-align: left; +} + +.reveal ol, +.reveal dl, +.reveal ul { + display: block; + text-align: left; + font-size: 75%; + margin: 0 0 0 1em; + +} + +.reveal section img { + background: transparent; /*theme background color: https://github.com/rstudio/revealjs/tree/master/inst/reveal.js-3.2.0/css/theme*/ + border: 0; + box-shadow: 0 0 0 rgba(0, 0, 0, 0.15); + /*max-width: 500px;*/ +} + + +.reveal .footer { + position: absolute; + bottom: 1em; + left: 1em; + font-size: 0.5em; +} + +.reveal .slides section .fragment { + font-size: 75%; + font-weight: 600; +} + + +.reveal .progress span { + display: none; +} + +.reveal .playback { + display: none; +} + +.reveal .controls { + display: none !important; +} + +.reveal pre code { + box-sizing: border-box; + left: 0; /* This changes where the code chunk box actually starts */ + /* padding: 10px 0 10px 60px; /* Change the last value here to move the text left or right */ + position: relative; + width: 100%; /* This changes where the code chunk box ends on the right side */ + font-size: 100%; /*changes output size; and comments*/ + box-shadow: .5 rgba(0, 0, 0, 0.15); +} + +.col2 { + columns: 2 200px; /* number of columns and width in pixels*/ + /*font-size: 50%;*/ /* ignored, but can do in the div style within a slide*/ + /*font-size: 20pt;*/ + -webkit-columns: 2 200px; /* chrome, safari */ + -moz-columns: 2 200px; /* firefox */ +} + +.col2 .reveal pre { + font-size:25%; +} + +.col3 { + columns: 3 100px; + /*font-size: 16pt;*/ + -webkit-columns: 3 100px; + -moz-columns: 3 100px; +} + + +/* none of the following seemed to do much*/ +.title { + text-align: center; + font-variant: small-caps; + margin: 0 auto; + +/* background-image: url('img/CSCAR.png'); + background-position: center center; + background-attachment: fixed; + background-repeat: no-repeat; + background-size: 100% 100%; */ +} + + +.sectionSlide { + color: #1e90ff; + text-align: center; + font-size: 200%; + font-variant: small-caps; + font-weight: 500; + background: linear-gradient(to bottom, #002b36, white); +} + +.reveal .sectionSlideTitle { + data-background-color: white; +} + +#linearBg2 { /* fallback */ + background-color: #002b36; + background-repeat: repeat-x; /* Safari 4-5, Chrome 1-9 */ + background: -webkit-gradient(linear, 0% 0%, 0% 100%, from(#002b36), to(#2F2727)); /* Safari 5.1, Chrome 10+ */ + background: -webkit-linear-gradient(top, #2F2727, #002b36); /* Firefox 3.6+ */ + background: -moz-linear-gradient(top, #2F2727, #002b36); /* IE 10 */ + background: -ms-linear-gradient(top, #2F2727, #002b36); /* Opera 11.10+ */ + background: -o-linear-gradient(top, #2F2727, #002b36); +} + +html.opac75 .slide-background.present { + opacity: 0.75; +} + +html.opac50 .slide-background.present { + opacity: 0.50; +} + +html.opac25 .slide-background.present { + opacity: 0.25; +} \ No newline at end of file