-
Notifications
You must be signed in to change notification settings - Fork 4
/
Copy pathrefs.bib
411 lines (372 loc) · 32.4 KB
/
refs.bib
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
@book{kruschke_doing_2010,
title = {Doing Bayesian Data Analysis: A Tutorial Introduction with R},
isbn = {9780123814869},
shorttitle = {Doing Bayesian Data Analysis},
abstract = {There is an explosion of interest in Bayesian statistics, primarily because recently created computational methods have finally made Bayesian analysis tractable and accessible to a wide audience. Doing Bayesian Data Analysis, A Tutorial Introduction with R and {BUGS}, is for first year graduate students or advanced undergraduates and provides an accessible approach, as all mathematics is explained intuitively and with concrete examples. It assumes only algebra and ‘rusty’ calculus. Unlike other textbooks, this book begins with the basics, including essential concepts of probability and random sampling. The book gradually climbs all the way to advanced hierarchical modeling methods for realistic data. The text provides complete examples with the R programming language and {BUGS} software (both freeware), and begins with basic programming examples, working up gradually to complete programs for complex analyses and presentation graphics. These templates can be easily adapted for a large variety of students and their own research {needs.The} textbook bridges the students from their undergraduate training into modern Bayesian methods.-Accessible, including the basics of essential concepts of probability and random sampling -Examples with R programming language and {BUGS} software -Comprehensive coverage of all scenarios addressed by non-bayesian textbooks- t-tests, analysis of variance ({ANOVA)} and comparisons in {ANOVA}, multiple regression, and chi-square (contingency table analysis). -Coverage of experiment planning -R and {BUGS} computer programming code on website -Exercises have explicit purposes and guidelines for accomplishment},
language = {en},
publisher = {Academic Press},
author = {Kruschke, John},
month = nov,
year = {2010},
keywords = {Mathematics / Applied, Mathematics / General}
}
@book{gelman_bda,
edition = {3rd},
title = {Bayesian Data Analysis},
isbn = {9781439840955},
abstract = {Now in its third edition, this classic book is widely considered the leading text on Bayesian methods, lauded for its accessible, practical approach to analyzing data and solving research problems. Bayesian Data Analysis, Third Edition continues to take an applied approach to analysis using up-to-date Bayesian methods. The authors—all leaders in the statistics community—introduce basic concepts from a data-analytic perspective before presenting advanced methods. Throughout the text, numerous worked examples drawn from real applications and research emphasize the use of Bayesian inference in practice. New to the Third Edition Four new chapters on nonparametric modeling Coverage of weakly informative priors and boundary-avoiding priors Updated discussion of cross-validation and predictive information criteria Improved convergence monitoring and effective sample size calculations for iterative simulation Presentations of Hamiltonian Monte Carlo, variational Bayes, and expectation propagation New and revised software code The book can be used in three different ways. For undergraduate students, it introduces Bayesian inference starting from first principles. For graduate students, the text presents effective current approaches to Bayesian modeling and computation in statistics and related fields. For researchers, it provides an assortment of Bayesian methods in applied statistics. Additional materials, including data sets used in the examples, solutions to selected exercises, and software instructions, are available on the book’s web page.},
language = {en},
publisher = {{CRC} Press},
author = {Gelman, Andrew and Carlin, John B. and Stern, Hal S. and Dunson, David B. and Vehtari, Aki and Rubin, Donald B.},
month = nov,
year = {2013},
keywords = {Computers / Mathematical \& Statistical Software, Mathematics / Probability \& Statistics / General, Psychology / Research \& Methodology}
}
@book{gelman_arm,
title = {Data Analysis Using Regression and {Multilevel/Hierarchical} Models},
isbn = {9781139460934},
abstract = {Data Analysis Using Regression and {Multilevel/Hierarchical} Models, first published in 2007, is a comprehensive manual for the applied researcher who wants to perform data analysis using linear and nonlinear regression and multilevel models. The book introduces a wide variety of models, whilst at the same time instructing the reader in how to fit these models using available software packages. The book illustrates the concepts by working through scores of real data examples that have arisen from the authors' own applied research, with programming codes provided for each one. Topics covered include causal inference, including regression, poststratification, matching, regression discontinuity, and instrumental variables, as well as multilevel logistic regression and missing-data imputation. Practical tips regarding building, fitting, and understanding are provided throughout.},
language = {en},
publisher = {Cambridge University Press},
author = {Gelman, Andrew and Hill, Jennifer},
month = dec,
year = {2006},
keywords = {Mathematics / Probability \& Statistics / General, Political Science / General, Psychology / Assessment, Testing \& Measurement, Social Science / Research}
}
@book{bugsbook,
address = {Boca Raton, {FL}},
title = {The {BUGS} Book: A Practical Introduction to Bayesian Analysis},
isbn = {9781584888499},
shorttitle = {The {BUGS} Book},
abstract = {Bayesian statistical methods have become widely used for data analysis and modelling in recent years, and the {BUGS} software has become the most popular software for Bayesian analysis worldwide. Authored by the team that originally developed this software, The {BUGS} Book provides a practical introduction to this program and its use. The text presents complete coverage of all the functionalities of {BUGS}, including prediction, missing data, model criticism, and prior sensitivity. It also features a large number of worked examples and a wide range of applications from various disciplines. The book introduces regression models, techniques for criticism and comparison, and a wide range of modelling issues before going into the vital area of hierarchical models, one of the most common applications of Bayesian methods. It deals with essentials of modelling without getting bogged down in complexity. The book emphasises model criticism, model comparison, sensitivity analysis to alternative priors, and thoughtful choice of prior distributions—all those aspects of the "art" of modelling that are easily overlooked in more theoretical expositions. More pragmatic than ideological, the authors systematically work through the large range of "tricks" that reveal the real power of the {BUGS} software, for example, dealing with missing data, censoring, grouped data, prediction, ranking, parameter constraints, and so on. Many of the examples are biostatistical, but they do not require domain knowledge and are generalisable to a wide range of other application areas. Full code and data for examples, exercises, and some solutions can be found on the book’s website.},
language = {English},
publisher = {Chapman and {Hall/CRC}},
author = {Lunn, David and Jackson, Chris and Best, Nicky and Thomas, Andrew and Spiegelhalter, David},
month = oct,
year = {2012}
}
@book{jeff_gill_bayesian_2008,
address = {Boca Raton},
edition = {Second},
title = {Bayesian methods : a social and behavioral sciences approach},
isbn = {9781584885627},
lccn = {QA 279.5},
shorttitle = {Bayesian methods},
language = {eng},
publisher = {Chapman \& {Hall/CRC}},
author = {Gill, Jeff},
year = {2008},
keywords = {Bayes' solution, Bayesian analysis, Bayesian statistical decision theory., Decisión estadística., Methode van Bayes., Social sciences Statistical methods., Teorías bayesian.}
}
@book{simon_jackman_bayesian_2009,
address = {Chichester, {UK}},
title = {Bayesian analysis for the social sciences},
isbn = {9780470011546},
lccn = {HA 29},
language = {eng},
publisher = {Wiley},
author = {Jackman, Simon},
year = {2009},
keywords = {Bayes' solution, Bayesian analysis, Bayesian statistical decision theory., Bayes-Verfahren., Social sciences Statistical methods.}
}
@book{scott_lynch_2007,
address = {New York},
title = {Introduction to applied Bayesian statistics and estimation for social scientists},
isbn = {9780387712642},
abstract = {Lynch covers the complete process of Bayesian statistical analysis in great detail from the development of a model through the process of making statistical inference. The key feature of the book is that it covers models that are most commonly used on social science research.},
language = {eng},
publisher = {Springer},
author = {Lynch, Scott M.},
collaborator = {ebrary, Inc},
year = {2007},
keywords = {Bayes' solution, Bayesian analysis, Bayesian statistical decision theory., Social sciences Statistical methods.}
}
@book{mcgrayne_theory_2012,
address = {New Haven Conn.},
edition = {Reprint},
title = {The Theory That Would Not Die: How Bayes' Rule Cracked the Enigma Code, Hunted Down Russian Submarines, and Emerged Triumphant from Two Centuries of Controversy},
isbn = {9780300188226},
shorttitle = {The Theory That Would Not Die},
abstract = {Drawing on primary source material and interviews with statisticians and other scientists, {"The} Theory That Would Not Die" is the riveting account of how a seemingly simple theorem ignited one of the greatest scientific controversies of all time. Bayes' rule appears to be a straightforward, one-line theorem: by updating our initial beliefs with objective new information, we get a new and improved belief. To its adherents, it is an elegant statement about learning from experience. To its opponents, it is subjectivity run amok. In the first-ever account of Bayes' rule for general readers, Sharon Bertsch {McGrayne} explores this controversial theorem and the human obsessions surrounding it. She traces its discovery by an amateur mathematician in the 1740s through its development into roughly its modern form by French scientist Pierre Simon Laplace. She reveals why respected statisticians rendered it professionally taboo for 150 years - at the same time that practitioners relied on it to solve crises involving great uncertainty and scanty information, even breaking Germany's Enigma code during World War {II}, and explains how the advent of off-the-shelf computer technology in the 1980s proved to be a game-changer. Today, Bayes' rule is used everywhere from {DNA} decoding to Homeland Security. {"The} Theory That Would Not Die" is a vivid account of the generations-long dispute over one of the greatest breakthroughs in the history of applied mathematics and statistics.},
language = {English},
publisher = {Yale University Press},
author = {{McGrayne}, Sharon Bertsch},
month = sep,
year = {2012}
}
@book{albert_bayesian_2009,
address = {New York},
edition = {Second},
title = {Bayesian Computation with R},
isbn = {9780387922973},
abstract = {There has been a dramatic growth in the development and application of Bayesian inferential methods. Some of this growth is due to the availability of powerful simulation-based algorithms to summarize posterior distributions. There has been also a growing interest in the use of the system R for statistical analyses. R's open source nature, free availability, and large number of contributor packages have made R the software of choice for many statisticians in education and industry. Bayesian Computation with R introduces Bayesian modeling by the use of computation using the R language. The early chapters present the basic tenets of Bayesian thinking by use of familiar one and two-parameter inferential problems. Bayesian computational methods such as Laplace's method, rejection sampling, and the {SIR} algorithm are illustrated in the context of a random effects model. The construction and implementation of Markov Chain Monte Carlo ({MCMC)} methods is introduced. These simulation-based algorithms are implemented for a variety of Bayesian applications such as normal and binary response regression, hierarchical modeling, order-restricted inference, and robust modeling. Algorithms written in R are used to develop Bayesian tests and assess Bayesian models by use of the posterior predictive distribution. The use of R to interface with {WinBUGS}, a popular {MCMC} computing language, is described with several illustrative examples. This book is a suitable companion book for an introductory course on Bayesian methods and is valuable to the statistical practitioner who wishes to learn more about the R language and Bayesian methodology. The {LearnBayes} package, written by the author and available from the {CRAN} website, contains all of the R functions described in the book. The second edition contains several new topics such as the use of mixtures of conjugate priors and the use of Zellner’s g priors to choose between models in linear regression. There are more illustrations of the construction of informative prior distributions, such as the use of conditional means priors and multivariate normal priors in binary regressions. The new edition contains changes in the R code illustrations according to the latest edition of the {LearnBayes} package.},
language = {English},
publisher = {Springer},
author = {Albert, Jim},
month = jun,
year = {2009}
}
@article{gelmanPardoe2006,
title={Bayesian measures of explained variance and pooling in multilevel (hierarchical) models},
author={Gelman, Andrew and Pardoe, Iain},
journal={Technometrics},
volume={48},
number={2},
pages={241--251},
year={2006},
publisher={Taylor \& Francis}
}
@article{gelmanHwangVehtari,
title={Understanding predictive information criteria for Bayesian models},
author={Gelman, Andrew and Hwang, Jessica and Vehtari, Aki},
journal={Statistics and Computing},
volume={24},
number={6},
pages={997--1016},
year={2014},
publisher={Springer}
}
@article{gelmanVehtariWAIC,
title={WAIC and cross-validation in Stan},
author={Vehtari, Aki and Gelman, Andrew},
year={2014}
}
@book{mcelreath2016,
title={Statistical Rethinking: A Bayesian Course with Examples in R and Stan},
author={McElreath, Richard},
volume={122},
year={2016},
publisher={CRC Press}
}
@article{friedman_projection_1981,
title = {Projection Pursuit Regression},
volume = {76},
issn = {0162-1459},
url = {http://www.jstor.org/stable/2287576},
doi = {10.2307/2287576},
abstract = {A new method for nonparametric multiple regression is presented. The procedure models the regression surface as a sum of general smooth functions of linear combinations of the predictor variables in an iterative manner. It is more general than standard stepwise and stagewise regression procedures, does not require the definition of a metric in the predictor space, and lends itself to graphical interpretation.},
number = {376},
journal = {Journal of the American Statistical Association},
author = {Friedman, Jerome H. and Stuetzle, Werner},
month = dec,
year = {1981},
note = {{ArticleType:} research-article / Full publication date: Dec., 1981 / Copyright © 1981 American Statistical Association},
pages = {817--823}
},
@article{friedman2000additive,
title={Additive logistic regression: a statistical view of boosting (with discussion and a rejoinder by the authors)},
author={Friedman, Jerome and Hastie, Trevor and Tibshirani, Robert and others},
journal={The annals of statistics},
volume={28},
number={2},
pages={337--407},
year={2000},
publisher={Institute of Mathematical Statistics}
}
@book{fahrmeir2013regression,
title={Regression: models, methods and applications},
author={Fahrmeir, Ludwig and Kneib, Thomas and Lang, Stefan and Marx, Brian},
year={2013},
publisher={Springer Science \& Business Media}
},
@book{rasmussen_gaussian_2006,
address = {Cambridge, Mass.},
title = {Gaussian processes for machine learning},
isbn = {{026218253X} 9780262182539},
abstract = {{"Gaussian} processes ({GPs)} provide a principled, practical, probabilistic approach to learning in kernel machines. {GPs} have received increased attention in the machine-learning community over the past decade, and this book provides a long-needed systematic and unified treatment of theoretical and practical aspects of {GPs} in machine learning. The treatment is comprehensive and self-contained, targeted at researchers and students in machine learning and applied statistics."--Jacket.},
language = {English},
publisher = {{MIT} Press},
author = {Rasmussen, Carl Edward and Williams, Christopher K. I},
year = {2006}
},
@book{hardin_generalized_2012,
title = {Generalized Linear Models and Extensions, Third Edition},
isbn = {1597181056},
publisher = {Stata Press},
author = {Hardin, James W. and Hilbe, Joseph M.},
month = jun,
year = {2012}
},
@book{wood_generalized_2006,
title = {Generalized additive models: an introduction with R},
volume = {66},
shorttitle = {Generalized additive models},
publisher = {{CRC} Press},
author = {Wood, S. N},
year = {2006},
file = {[PDF] from bath.ac.uk:C:\Users\mclark19\AppData\Roaming\Mozilla\Firefox\Profiles\qypfnwz4.default\zotero\storage\N99I9S57\Wood - 2006 - Generalized additive models an introduction with .pdf:application/pdf;Snapshot:C:\Users\mclark19\AppData\Roaming\Mozilla\Firefox\Profiles\qypfnwz4.default\zotero\storage\AVVZHAIN\Wood - 2006 - Generalized additive models an introduction with .html:text/html}
},
@article{rigby_generalized_2005,
title = {Generalized additive models for location, scale and shape},
volume = {54},
issn = {1467-9876},
url = {http://onlinelibrary.wiley.com/doi/10.1111/j.1467-9876.2005.00510.x/abstract},
doi = {10.1111/j.1467-9876.2005.00510.x},
abstract = {Summary. A general class of statistical models for a univariate response variable is presented which we call the generalized additive model for location, scale and shape {(GAMLSS).} The model assumes independent observations of the response variable y given the parameters, the explanatory variables and the values of the random effects. The distribution for the response variable in the {GAMLSS} can be selected from a very general family of distributions including highly skew or kurtotic continuous and discrete distributions. The systematic part of the model is expanded to allow modelling not only of the mean (or location) but also of the other parameters of the distribution of y, as parametric and/or additive nonparametric (smooth) functions of explanatory variables and/or random-effects terms. Maximum (penalized) likelihood estimation is used to fit the (non)parametric models. A {Newton–Raphson} or Fisher scoring algorithm is used to maximize the (penalized) likelihood. The additive terms in the model are fitted by using a backfitting algorithm. Censored data are easily incorporated into the framework. Five data sets from different fields of application are analysed to emphasize the generality of the {GAMLSS} class of models.},
number = {3},
journal = {Journal of the Royal Statistical Society: Series C {(Applied} Statistics)},
author = {Rigby, R. A. and Stasinopoulos, D. M.},
year = {2005},
keywords = {Beta–binomial distribution, {Box–Cox} transformation, Centile estimation, Cubic smoothing splines, Generalized linear mixed model, {LMS} method, Negative binomial distribution, Non-normality, Nonparametric models, Overdispersion, Penalized likelihood, Random effects, Skewness and kurtosis},
file = {Full Text PDF:C:\Users\mclark19\AppData\Roaming\Mozilla\Firefox\Profiles\qypfnwz4.default\zotero\storage\D6PZADQB\Rigby and Stasinopoulos - 2005 - Generalized additive models for location, scale an.pdf:application/pdf;Snapshot:C:\Users\mclark19\AppData\Roaming\Mozilla\Firefox\Profiles\qypfnwz4.default\zotero\storage\W752GFV4\full.html:text/html}
},
@book{hastie_generalized_1990,
title = {Generalized Additive Models},
isbn = {9780412343902},
publisher = {{CRC} Press},
author = {Hastie, {T.J.} and Tibshirani, {R.J.}},
month = jun,
year = {1990},
keywords = {Mathematics / Probability \& Statistics / General}
},
@book{fox_multiple_2000,
title = {Multiple and Generalized Nonparametric Regression},
isbn = {9780761921899},
abstract = {This book builds on John Fox's previous volume in the {QASS} Series, Non Parametric Simple Regression. In this book, the reader learns how to estimate and plot smooth functions when there are multiple independent variables.},
publisher = {{SAGE}},
author = {Fox, John},
month = may,
year = {2000},
keywords = {Mathematics / Probability \& Statistics / General, Mathematics / Probability \& Statistics / Regression Analysis, Nonparametric statistics, Regression analysis, Social Science / General, Social Science / Methodology, Social Science / Research, Social Science / Statistics, Social sciences, Social sciences - Statistical methods, Social sciences/ Statistical methods}
},
@book{fox_nonparametric_2000,
title = {Nonparametric Simple Regression: Smoothing Scatterplots},
isbn = {9780761915850},
shorttitle = {Nonparametric Simple Regression},
abstract = {John Fox introduces readers to the techniques of kernel estimation, additive nonparametric regression, and the ways nonparametric regression can be employed to select transformations of the data preceding a linear least-squares fit.},
publisher = {{SAGE}},
author = {Fox, John},
month = jan,
year = {2000},
keywords = {Mathematics / Probability \& Statistics / General, Medical / General, Nonparametric statistics, Regression analysis, Social Science / General, Social Science / Research, Social Science / Statistics, Social sciences}
},
@article{beyerlein_alternative_2008,
title = {Alternative regression models to assess increase in childhood {BMI}},
volume = {8},
issn = {1471-2288},
url = {http://www.biomedcentral.com/1471-2288/8/59},
doi = {10.1186/1471-2288-8-59},
number = {1},
journal = {{BMC} Medical Research Methodology},
author = {Beyerlein, Andreas and Fahrmeir, Ludwig and Mansmann, Ulrich and Toschke, André M},
year = {2008},
pages = {59},
annote = {A straightforward application of gamlss},
file = {BMC Medical Research Methodology | Full text | Alternative regression models to assess increase in childhood BMI:C:\Users\mclark19\AppData\Roaming\Mozilla\Firefox\Profiles\qypfnwz4.default\zotero\storage\KADXBB3S\59.html:text/html}
},
@book{hardin_generalized_2007,
title = {Generalized linear models and extensions},
publisher = {Stata Corp},
author = {Hardin, J. W and Hilbe, J.},
year = {2007},
file = {Snapshot:C:\Users\mclark19\AppData\Roaming\Mozilla\Firefox\Profiles\qypfnwz4.default\zotero\storage\9T3DGIWI\Hardin and Hilbe - 2007 - Generalized linear models and extensions.html:text/html}
},
@book{venables_modern_2002,
title = {Modern Applied Statistics With S},
isbn = {9780387954578},
abstract = {S-{PLUS} is a powerful environment for the statistical and graphical analysis of data. It provides the tools to implement many statistical ideas which have been made possible by the widespread availability of workstations having good graphics and computational capabilities. This book is a guide to using S-{PLUS} to perform statistical analyses and provides both an introduction to the use of S-{PLUS} and a course in modern statistical methods. S-{PLUS} is available for both Windows and {UNIX} workstations, and both versions are covered in {depth.The} aim of the book is to show how to use S-{PLUS} as a powerful and graphical data analysis system. Readers are assumed to have a basic grounding in statistics, and so the book in intended for would-be users of S-{PLUS} and both students and researchers using statistics. Throughout, the emphasis is on presenting practical problems and full analyses of real data sets. Many of the methods discussed are state-of-the-art approaches to topics such as linear, nonlinear, and smooth regression models, tree-based methods, multivariate analysis and pattern recognition, survival analysis, time series and spatial statistics. Throughout, modern techniques such as robust methods, non-parametric smoothing, and bootstrapping are used where {appropriate.This} third edition is intended for users of S-{PLUS} 4.5, 5.0, 2000 or later, although S-{PLUS} 3.3/4 are also considered. The major change from the second edition is coverage of the current versions of S-{PLUS.} The material has been extensively rewritten using new examples and the latest computationally intensive methods. The companion volume on S Programming will provide an in-depth guide for those writing software in the S {language.The} authors have written several software libraries that enhance S-{PLUS;} these and all the datasets used are available on the Internet in versions for Windows and {UNIX.} There are extensive on-line complements covering advanced material, user-contributed extensions, further exercises, and new features of S-{PLUS} as they are {introduced.Dr.} Venables is now Statistician with {CSRIO} in Queensland, having been at the Department of Statistics, University of Adelaide, for many years previously. He has given many short courses on S-{PLUS} in Australia, Europe, and the {USA.} Professor Ripley holds the Chair of Applied Statistics at the University of Oxford, and is the author of four other books on spatial statistics, simulation, pattern recognition, and neural networks.},
publisher = {Birkh\"{a}user},
author = {Venables, William N. and Ripley, Brian D.},
month = aug,
year = {2002},
keywords = {Business \& Economics / Statistics, Computers / Mathematical \& Statistical Software, Mathematical statistics, Mathematical statistics - Data processing, Mathematical statistics/ Data processing, Mathematics / Probability \& Statistics / General, S, S {(Computer} program language), S {(Computer} system), S-{PLUS} {(Computer} program language), statistics, Statistics - Data processing, Statistics/ Data processing}
},
@article{breiman_statistical_2001,
title = {Statistical Modeling: The Two Cultures (with comments and a rejoinder by the author)},
volume = {16},
issn = {0883-4237},
shorttitle = {Statistical Modeling},
url = {http://projecteuclid.org/euclid.ss/1009213726},
doi = {10.1214/ss/1009213726},
abstract = {There are two cultures in the use of statistical modeling to reach
conclusions from data. One assumes that the data are generated by a given
stochastic data model. The other uses algorithmic models and treats the data
mechanism as unknown. The statistical community has been committed to the
almost exclusive use of data models. This commitment has led to irrelevant
theory, questionable conclusions, and has kept statisticians from working on a
large range of interesting current problems. Algorithmic modeling, both in
theory and practice, has developed rapidly in fields outside statistics. It can
be used both on large complex data sets and as a more accurate and informative
alternative to data modeling on smaller data sets. If our goal as a field is to
use data to solve problems, then we need to move away from exclusive dependence
on data models and adopt a more diverse set of tools.},
number = {3},
journal = {Statistical Science},
author = {Breiman, Leo},
month = aug,
year = {2001},
pages = {199--231}
},
@book{bybee_pisa_2009,
title = {Pisa Science 2006: Implications for Science Teachers and Teaching},
isbn = {9781933531311},
shorttitle = {Pisa Science 2006},
publisher = {{NSTA} Press},
author = {Bybee, Rodger W. and {McCrae}, Barry},
month = may,
year = {2009},
keywords = {Education / General, Education / Student Life \& Student Affairs, Education / Teaching Methods \& Materials / Science \& Technology, Education / Testing \& Measurement, Educational tests and measurements, High school students, High school students - Rating of, High school students/ Rating of, Programme for International Student Assessment, Science, Science - Study and teaching - United States, Science - Study and teaching {(Secondary)}, Science / Study \& Teaching, Science/ Study and teaching {(Secondary)}}
},
@book{hastie_elements_2009,
edition = {2nd ed. 2009. Corr. 3rd printing 5th Printing.},
title = {The Elements of Statistical Learning: Data Mining, Inference, and Prediction, Second Edition},
isbn = {0387848576},
shorttitle = {The Elements of Statistical Learning},
publisher = {Springer},
author = {Hastie, Trevor and Tibshirani, Robert and Friedman, Jerome},
month = feb,
year = {2009}
},
@book{ruppert_semiparametric_2003,
title = {Semiparametric Regression},
isbn = {9780521785167},
abstract = {Semiparametric regression is concerned with the flexible incorporation of non-linear functional relationships in regression analyses. Any application area that benefits from regression analysis can also benefit from semiparametric regression. Assuming only a basic familiarity with ordinary parametric regression, this user-friendly book explains the techniques and benefits of semiparametric regression in a concise and modular fashion. The authors make liberal use of graphics and examples plus case studies taken from environmental, financial, and other applications. They include practical advice on implementation and pointers to relevant software. The book is suitable as a textbook for students with little background in regression as well as a reference book for statistically oriented scientists such as biostatisticians, econometricians, quantitative social scientists, epidemiologists, with a good working knowledge of regression and the desire to begin using more flexible semiparametric models. Even experts on semiparametric regression should find something new here.},
publisher = {Cambridge University Press},
author = {Ruppert, David and Wand, Matt P. and Carroll, Raymond J.},
month = jul,
year = {2003},
keywords = {Mathematics / General, Mathematics / Probability \& Statistics / General, Mathematics / Probability \& Statistics / Regression Analysis, Medical / Epidemiology, Nonparametric statistics, Regression analysis}
},
@book{wasserman_all_2006,
title = {All of Nonparametric Statistics},
isbn = {9780387251455},
abstract = {The goal of this text is to provide the reader with a single book where they can find a brief account of many, modern topics in nonparametric inference. The book is aimed at Master's level or {Ph.D.} level students in statistics, computer science, and engineering. It is also suitable for researchers who want to get up to speed quickly on modern nonparametric methods. This text covers a wide range of topics including: the bootstrap, the nonparametric delta method, nonparametric regression, density estimation, orthogonal function methods, minimax estimation, nonparametric confidence sets, and wavelets. The book has a mixture of methods and theory. From the reviews: {"...The} book is excellent." {(Short} Book Reviews of the {ISI}, June 2006) {"Now} we have All of Nonparametric Statistics a?{\textbar} . the writing is excellent and the author is to be congratulated on the clarity achieved. a?{\textbar} the book is excellent." {(N.R.} Draper, Short Book Reviews, Vol. 26 (1), 2006) {"Overall}, I enjoyed reading this book very much. I like Wasserman's intuitive explanations and careful insights into why one path or approach is taken over another. Most of all, I am impressed with the wealth of information on the subject of asymptotic nonparametric inferences." {(Stergios} B. Fotopoulos for Technometrics, Vol. 49, No. 1., February 2007)},
publisher = {Springer},
author = {Wasserman, Larry},
year = {2006},
keywords = {Artificial intelligence, Computers / Intelligence {(AI)} \& Semantics, Mathematical statistics, Mathematics / General, Mathematics / Probability \& Statistics / General, Nonparametric statistics, statistics}
}
@article{wood2008,
title={Fast stable direct fitting and smoothness selection for generalized additive models},
author={Wood, Simon N},
journal={Journal of the Royal Statistical Society: Series B (Statistical Methodology)},
volume={70},
number={3},
pages={495--518},
year={2008},
publisher={Wiley Online Library}
}
@book{kline2015principles,
title={Principles and practice of structural equation modeling},
author={Kline, Rex B},
year={2015},
publisher={Guilford publications}
}
@book{pearl2009causality,
title={Causality},
author={Pearl, Judea},
year={2009},
publisher={Cambridge university press}
}
@book{shalizi2017advanced,
title={Advanced data analysis from an elementary point of view},
author={Shalizi, Cosma},
year={2017},
publisher={Citeseer}
}
@book{laudan2012science,
title={Science and relativism: Some key controversies in the philosophy of science},
author={Laudan, Larry},
year={2012},
publisher={University of Chicago Press}
}