[med-svn] [r-cran-tidyr] 01/06: New upstream version 0.7.2
Andreas Tille
tille at debian.org
Tue Dec 12 22:39:57 UTC 2017
This is an automated email from the git hooks/post-receive script.
tille pushed a commit to branch master
in repository r-cran-tidyr.
commit 286b771a46aef63b276397d51b0ef81267309d60
Author: Andreas Tille <tille at debian.org>
Date: Tue Dec 12 23:26:39 2017 +0100
New upstream version 0.7.2
---
DESCRIPTION | 6 +-
MD5 | 18 +-
NEWS.md | 8 +-
R/gather.R | 4 +-
R/nest.R | 4 +-
R/spread.R | 4 +-
inst/doc/tidy-data.html | 573 +++++++++++++++++++-------------------
src/melt.cpp | 29 +-
tests/testthat/test-gather.R | 1 -
tests/testthat/test-underscored.R | 24 +-
10 files changed, 359 insertions(+), 312 deletions(-)
diff --git a/DESCRIPTION b/DESCRIPTION
index ab6cffe..9e0a04f 100644
--- a/DESCRIPTION
+++ b/DESCRIPTION
@@ -1,6 +1,6 @@
Package: tidyr
Title: Easily Tidy Data with 'spread()' and 'gather()' Functions
-Version: 0.7.1
+Version: 0.7.2
Authors at R: c(
person("Hadley", "Wickham", , "hadley at rstudio.com", c("aut", "cre")),
person("Lionel", "Henry", , "lionel at rstudio.com", "aut"),
@@ -21,10 +21,10 @@ VignetteBuilder: knitr
LinkingTo: Rcpp
RoxygenNote: 6.0.1
NeedsCompilation: yes
-Packaged: 2017-08-24 14:15:41 UTC; lionel
+Packaged: 2017-10-16 13:20:18 UTC; hadley
Author: Hadley Wickham [aut, cre],
Lionel Henry [aut],
RStudio [cph]
Maintainer: Hadley Wickham <hadley at rstudio.com>
Repository: CRAN
-Date/Publication: 2017-09-01 15:15:47 UTC
+Date/Publication: 2017-10-16 23:09:23 UTC
diff --git a/MD5 b/MD5
index 4e03373..4546363 100644
--- a/MD5
+++ b/MD5
@@ -1,7 +1,7 @@
-cde6f0d6d9862ea9c02d7a8f74c2a4ef *DESCRIPTION
+e4929bd95996152f944a8a4f1916d161 *DESCRIPTION
1734bf7b2a958fa874a85d6417f4a0e0 *LICENSE
3c2147163007eb93226799a3c1b504cb *NAMESPACE
-7207da2c9eccf02933f1c2943bc529c1 *NEWS.md
+3dd392bbc97f36aa21e53df835aa21f6 *NEWS.md
d5af2bc872fd256dd82f2607ea2aff67 *R/RcppExports.R
79ea586e36b0123e161a26e98bd99b64 *R/compat-lazyeval.R
61e0ad373cfce05de2c489ef9b60ae19 *R/complete.R
@@ -10,14 +10,14 @@ e772ef2ee60ba55dc6e3b13630c67597 *R/data.R
879a422a977f98b329cd4fcd27fcdb60 *R/expand.R
6f091ae6e4aeefc42913a0cfe9a8fa55 *R/extract.R
a703420c10f3bc22ac51104adb5c6b97 *R/fill.R
-0cfc2fcac7717cd157a19c433daad751 *R/gather.R
+d0e02ae557774152a660d250b3248317 *R/gather.R
1782474ccca6bd3a688775757d31175b *R/id.R
-0c515b16500dad45447331dd855be8f1 *R/nest.R
+216515455a16b9cd26ce9bf72605d96b *R/nest.R
5763e52a7b14ad4c4ca09a6bd8d9437e *R/replace_na.R
7c4594c6c21d88cbb1a5540ccc2d58b0 *R/separate-rows.R
3fe96a5046df1ca5a1ca1e23eedd2a4e *R/separate.R
d6746fae28232c2c13cb5cb61da547e8 *R/seq.R
-e03af1810aa7ec04c02bd3e2bfb846a9 *R/spread.R
+47f83e666e17604c61163194dc1d744d *R/spread.R
f6f27545149d75e3e0b18aefc7f0bcca *R/tidyr.R
48603c047f87f084d9d780232b4e941e *R/unite.R
0c47542be37eef2d0a7dc359bb5d8cc5 *R/unnest.R
@@ -41,7 +41,7 @@ f3284df0b78edfb5a2c9f5e44cd3bc65 *demo/dadmom.R
4c61156afe9636ec80849e49fa98a0a3 *demo/so-9684671.R
c4383a3f9fca197d86b0ae4a22abc79a *inst/doc/tidy-data.R
296da1cc768b3709970402d615c512b2 *inst/doc/tidy-data.Rmd
-6d6aaeefef2147b8af25fa9604f74aba *inst/doc/tidy-data.html
+4c1701cdf9da02d9b4732e487026c5a0 *inst/doc/tidy-data.html
89e7aa3629e3af61431582cb0b9882d1 *man/complete.Rd
6a1ba38c59e9935977006c90db7f47c8 *man/deprecated-se.Rd
6971dd7d01266c075b63eea069d09f1f *man/drop_na.Rd
@@ -66,7 +66,7 @@ c8ba478dc1fb90bc2c84ea2e3871bb66 *man/unnest.Rd
098fdd0edc34de56a2da62f5dd22373a *man/who.Rd
aa56ef8384b525ea2846f3cdb59b92e5 *src/RcppExports.cpp
81db5dd38227b4cab4713128f04f46c1 *src/fill.cpp
-e9fa31140b3e8191fc77a1c114d2ad5a *src/melt.cpp
+f511f296525c27047f66e2d642306cff *src/melt.cpp
32534931093398158fef10463826e304 *src/simplifyPieces.cpp
14fd04cc33329083bbe4c25bdd2f0531 *tests/testthat.R
0596c84dbd8e83646f1ee3e2a798d4f2 *tests/testthat/test-complete.R
@@ -75,13 +75,13 @@ e9fa31140b3e8191fc77a1c114d2ad5a *src/melt.cpp
3d4f4ce4fa98d50fade3a2352bb63c33 *tests/testthat/test-extract.R
52bdaf7932812e1bf7b5b34ae12fc7aa *tests/testthat/test-fill.R
b0a7fb6ecf9db133274a91a5e329d6f1 *tests/testthat/test-full_seq.R
-f1a2c9fe2acd33a44e7ce1522f2125a9 *tests/testthat/test-gather.R
+744c9d07a61c87bf3e5e99d45177e5a4 *tests/testthat/test-gather.R
f3eab4757a75d067572f56a8cd2fa4df *tests/testthat/test-id.R
27c5bb9b05002b9ed64efffcc076c788 *tests/testthat/test-nest.R
93135c802368f5391e817cd05add0c1f *tests/testthat/test-replace_na.R
0c42de930422f560478c509972ace9e9 *tests/testthat/test-separate.R
a91b5b14318349c8490fb4c719b1d8cf *tests/testthat/test-spread.R
-733a68e17806af6e775a72ac31a7947c *tests/testthat/test-underscored.R
+8d632517c77d3d672483aedd1aa3cb8d *tests/testthat/test-underscored.R
e5481a1d49d145db4c477d47bf6b3392 *tests/testthat/test-unite.R
70aa1570a7b1907fb1b33c0c7a25bb84 *tests/testthat/test-unnest.R
54858865b5d09e66c0541c370836818a *vignettes/billboard.csv
diff --git a/NEWS.md b/NEWS.md
index 64e31a1..71857b6 100644
--- a/NEWS.md
+++ b/NEWS.md
@@ -1,3 +1,10 @@
+# tidyr 0.7.2
+
+* The SE variants `gather_()`, `spread_()` and `nest_()` now
+ treat non-syntactic names in the same way as pre tidy eval versions
+ of tidyr (#361).
+
+* Fix tidyr bug revealed by R-devel.
# tidyr 0.7.1
@@ -18,7 +25,6 @@ writing functions and refer to contextual objects, it is still a good
idea to avoid data expressions by following the advice of the 0.7.0
release notes.
-
# tidyr 0.7.0
This release includes important changes to tidyr internals. Tidyr now
diff --git a/R/gather.R b/R/gather.R
index eac643f..d9d769c 100644
--- a/R/gather.R
+++ b/R/gather.R
@@ -216,8 +216,8 @@ gather_ <- function(data, key_col, value_col, gather_cols, na.rm = FALSE,
gather_.data.frame <- function(data, key_col, value_col, gather_cols,
na.rm = FALSE, convert = FALSE,
factor_key = FALSE) {
- key_col <- compat_lazy(key_col, caller_env())
- value_col <- compat_lazy(value_col, caller_env())
+ key_col <- sym(key_col)
+ value_col <- sym(value_col)
gather_cols <- syms(gather_cols)
gather(data,
diff --git a/R/nest.R b/R/nest.R
index 2335a5d..7d894c0 100644
--- a/R/nest.R
+++ b/R/nest.R
@@ -81,7 +81,7 @@ nest_ <- function(data, key_col, nest_cols = character()) {
}
#' @export
nest_.data.frame <- function(data, key_col, nest_cols = character()) {
- key_col <- compat_lazy(key_col, caller_env())
- nest_cols <- compat_lazy_dots(nest_cols, caller_env())
+ key_col <- sym(key_col)
+ nest_cols <- syms(nest_cols)
nest(data, .key = !! key_col, !!! nest_cols)
}
diff --git a/R/spread.R b/R/spread.R
index d129d90..e0939ff 100644
--- a/R/spread.R
+++ b/R/spread.R
@@ -173,8 +173,8 @@ spread_ <- function(data, key_col, value_col, fill = NA, convert = FALSE,
#' @export
spread_.data.frame <- function(data, key_col, value_col, fill = NA,
convert = FALSE, drop = TRUE, sep = NULL) {
- key_col <- compat_lazy(key_col, caller_env())
- value_col <- compat_lazy(value_col, caller_env())
+ key_col <- sym(key_col)
+ value_col <- sym(value_col)
spread(data,
key = !! key_col,
diff --git a/inst/doc/tidy-data.html b/inst/doc/tidy-data.html
index 3b1a75a..5ca537e 100644
--- a/inst/doc/tidy-data.html
+++ b/inst/doc/tidy-data.html
@@ -18,28 +18,46 @@
<style type="text/css">code{white-space: pre;}</style>
<style type="text/css">
+div.sourceCode { overflow-x: auto; }
table.sourceCode, tr.sourceCode, td.lineNumbers, td.sourceCode {
margin: 0; padding: 0; vertical-align: baseline; border: none; }
table.sourceCode { width: 100%; line-height: 100%; }
td.lineNumbers { text-align: right; padding-right: 4px; padding-left: 4px; color: #aaaaaa; border-right: 1px solid #aaaaaa; }
td.sourceCode { padding-left: 5px; }
-code > span.kw { color: #007020; font-weight: bold; }
-code > span.dt { color: #902000; }
-code > span.dv { color: #40a070; }
-code > span.bn { color: #40a070; }
-code > span.fl { color: #40a070; }
-code > span.ch { color: #4070a0; }
-code > span.st { color: #4070a0; }
-code > span.co { color: #60a0b0; font-style: italic; }
-code > span.ot { color: #007020; }
-code > span.al { color: #ff0000; font-weight: bold; }
-code > span.fu { color: #06287e; }
-code > span.er { color: #ff0000; font-weight: bold; }
+code > span.kw { color: #007020; font-weight: bold; } /* Keyword */
+code > span.dt { color: #902000; } /* DataType */
+code > span.dv { color: #40a070; } /* DecVal */
+code > span.bn { color: #40a070; } /* BaseN */
+code > span.fl { color: #40a070; } /* Float */
+code > span.ch { color: #4070a0; } /* Char */
+code > span.st { color: #4070a0; } /* String */
+code > span.co { color: #60a0b0; font-style: italic; } /* Comment */
+code > span.ot { color: #007020; } /* Other */
+code > span.al { color: #ff0000; font-weight: bold; } /* Alert */
+code > span.fu { color: #06287e; } /* Function */
+code > span.er { color: #ff0000; font-weight: bold; } /* Error */
+code > span.wa { color: #60a0b0; font-weight: bold; font-style: italic; } /* Warning */
+code > span.cn { color: #880000; } /* Constant */
+code > span.sc { color: #4070a0; } /* SpecialChar */
+code > span.vs { color: #4070a0; } /* VerbatimString */
+code > span.ss { color: #bb6688; } /* SpecialString */
+code > span.im { } /* Import */
+code > span.va { color: #19177c; } /* Variable */
+code > span.cf { color: #007020; font-weight: bold; } /* ControlFlow */
+code > span.op { color: #666666; } /* Operator */
+code > span.bu { } /* BuiltIn */
+code > span.ex { } /* Extension */
+code > span.pp { color: #bc7a00; } /* Preprocessor */
+code > span.at { color: #7d9029; } /* Attribute */
+code > span.do { color: #ba2121; font-style: italic; } /* Documentation */
+code > span.an { color: #60a0b0; font-weight: bold; font-style: italic; } /* Annotation */
+code > span.cv { color: #60a0b0; font-weight: bold; font-style: italic; } /* CommentVar */
+code > span.in { color: #60a0b0; font-weight: bold; font-style: italic; } /* Information */
</style>
-<link href="data:text/css,body%20%7B%0A%20%20background%2Dcolor%3A%20%23fff%3B%0A%20%20margin%3A%201em%20auto%3B%0A%20%20max%2Dwidth%3A%20700px%3B%0A%20%20overflow%3A%20visible%3B%0A%20%20padding%2Dleft%3A%202em%3B%0A%20%20padding%2Dright%3A%202em%3B%0A%20%20font%2Dfamily%3A%20%22Open%20Sans%22%2C%20%22Helvetica%20Neue%22%2C%20Helvetica%2C%20Arial%2C%20sans%2Dserif%3B%0A%20%20font%2Dsize%3A%2014px%3B%0A%20%20line%2Dheight%3A%201%2E35%3B%0A%7D%0A%0A%23header%20%7B%0A%20%20text%2Dalign%3A% [...]
+<link href="data:text/css;charset=utf-8,body%20%7B%0Abackground%2Dcolor%3A%20%23fff%3B%0Amargin%3A%201em%20auto%3B%0Amax%2Dwidth%3A%20700px%3B%0Aoverflow%3A%20visible%3B%0Apadding%2Dleft%3A%202em%3B%0Apadding%2Dright%3A%202em%3B%0Afont%2Dfamily%3A%20%22Open%20Sans%22%2C%20%22Helvetica%20Neue%22%2C%20Helvetica%2C%20Arial%2C%20sans%2Dserif%3B%0Afont%2Dsize%3A%2014px%3B%0Aline%2Dheight%3A%201%2E35%3B%0A%7D%0A%23header%20%7B%0Atext%2Dalign%3A%20center%3B%0A%7D%0A%23TOC%20%7B%0Aclear%3A%20bot [...]
</head>
@@ -67,28 +85,29 @@ code > span.er { color: #ff0000; font-weight: bold; }
<div id="data-structure" class="section level2">
<h2>Data structure</h2>
<p>Most statistical datasets are data frames made up of <strong>rows</strong> and <strong>columns</strong>. The columns are almost always labeled and the rows are sometimes labeled. The following code provides some data about an imaginary experiment in a format commonly seen in the wild. The table has two columns and three rows, and both rows and columns are labeled.</p>
-<pre class="sourceCode r"><code class="sourceCode r">preg <-<span class="st"> </span><span class="kw">read.csv</span>(<span class="st">"preg.csv"</span>, <span class="dt">stringsAsFactors =</span> <span class="ot">FALSE</span>)
+<div class="sourceCode"><pre class="sourceCode r"><code class="sourceCode r">preg <-<span class="st"> </span><span class="kw">read.csv</span>(<span class="st">"preg.csv"</span>, <span class="dt">stringsAsFactors =</span> <span class="ot">FALSE</span>)
preg
<span class="co">#> name treatmenta treatmentb</span>
<span class="co">#> 1 John Smith NA 18</span>
<span class="co">#> 2 Jane Doe 4 1</span>
-<span class="co">#> 3 Mary Johnson 6 7</span></code></pre>
+<span class="co">#> 3 Mary Johnson 6 7</span></code></pre></div>
<p>There are many ways to structure the same underlying data. The following table shows the same data as above, but the rows and columns have been transposed.</p>
-<pre class="sourceCode r"><code class="sourceCode r"><span class="kw">read.csv</span>(<span class="st">"preg2.csv"</span>, <span class="dt">stringsAsFactors =</span> <span class="ot">FALSE</span>)
+<div class="sourceCode"><pre class="sourceCode r"><code class="sourceCode r"><span class="kw">read.csv</span>(<span class="st">"preg2.csv"</span>, <span class="dt">stringsAsFactors =</span> <span class="ot">FALSE</span>)
<span class="co">#> treatment John.Smith Jane.Doe Mary.Johnson</span>
<span class="co">#> 1 a NA 4 6</span>
-<span class="co">#> 2 b 18 1 7</span></code></pre>
+<span class="co">#> 2 b 18 1 7</span></code></pre></div>
<p>The data is the same, but the layout is different. Our vocabulary of rows and columns is simply not rich enough to describe why the two tables represent the same data. In addition to appearance, we need a way to describe the underlying semantics, or meaning, of the values displayed in the table.</p>
</div>
<div id="data-semantics" class="section level2">
<h2>Data semantics</h2>
<p>A dataset is a collection of <strong>values</strong>, usually either numbers (if quantitative) or strings (if qualitative). Values are organised in two ways. Every value belongs to a <strong>variable</strong> and an <strong>observation</strong>. A variable contains all values that measure the same underlying attribute (like height, temperature, duration) across units. An observation contains all values measured on the same unit (like a person, or a day, or a race) across attributes.</p>
<p>A tidy version of the pregnancy data looks like this: (you’ll learn how the functions work a little later)</p>
-<pre class="sourceCode r"><code class="sourceCode r"><span class="kw">library</span>(tidyr)
+<div class="sourceCode"><pre class="sourceCode r"><code class="sourceCode r"><span class="kw">library</span>(tidyr)
<span class="kw">library</span>(dplyr)
-preg2 <-<span class="st"> </span>preg %>%<span class="st"> </span>
-<span class="st"> </span><span class="kw">gather</span>(treatment, n, treatmenta:treatmentb) %>%
-<span class="st"> </span><span class="kw">mutate</span>(<span class="dt">treatment =</span> <span class="kw">gsub</span>(<span class="st">"treatment"</span>, <span class="st">""</span>, treatment)) %>%
+<span class="co">#> Warning: package 'dplyr' was built under R version 3.4.2</span>
+preg2 <-<span class="st"> </span>preg <span class="op">%>%</span><span class="st"> </span>
+<span class="st"> </span><span class="kw">gather</span>(treatment, n, treatmenta<span class="op">:</span>treatmentb) <span class="op">%>%</span>
+<span class="st"> </span><span class="kw">mutate</span>(<span class="dt">treatment =</span> <span class="kw">gsub</span>(<span class="st">"treatment"</span>, <span class="st">""</span>, treatment)) <span class="op">%>%</span>
<span class="st"> </span><span class="kw">arrange</span>(name, treatment)
preg2
<span class="co">#> name treatment n</span>
@@ -97,7 +116,7 @@ preg2
<span class="co">#> 3 John Smith a NA</span>
<span class="co">#> 4 John Smith b 18</span>
<span class="co">#> 5 Mary Johnson a 6</span>
-<span class="co">#> 6 Mary Johnson b 7</span></code></pre>
+<span class="co">#> 6 Mary Johnson b 7</span></code></pre></div>
<p>This makes the values, variables and observations more clear. The dataset contains 18 values representing three variables and six observations. The variables are:</p>
<ol style="list-style-type: decimal">
<li><p><code>name</code>, with three possible values (John, Mary, and Jane).</p></li>
@@ -137,288 +156,284 @@ preg2
<h2>Column headers are values, not variable names</h2>
<p>A common type of messy dataset is tabular data designed for presentation, where variables form both the rows and columns, and column headers are values, not variable names. While I would call this arrangement messy, in some cases it can be extremely useful. It provides efficient storage for completely crossed designs, and it can lead to extremely efficient computation if desired operations can be expressed as matrix operations.</p>
<p>The following code shows a subset of a typical dataset of this form. This dataset explores the relationship between income and religion in the US. It comes from a report<a href="#fn1" class="footnoteRef" id="fnref1"><sup>1</sup></a> produced by the Pew Research Center, an American think-tank that collects data on attitudes to topics ranging from religion to the internet, and produces many reports that contain datasets in this format.</p>
-<pre class="sourceCode r"><code class="sourceCode r">pew <-<span class="st"> </span><span class="kw">tbl_df</span>(<span class="kw">read.csv</span>(<span class="st">"pew.csv"</span>, <span class="dt">stringsAsFactors =</span> <span class="ot">FALSE</span>, <span class="dt">check.names =</span> <span class="ot">FALSE</span>))
+<div class="sourceCode"><pre class="sourceCode r"><code class="sourceCode r">pew <-<span class="st"> </span><span class="kw">tbl_df</span>(<span class="kw">read.csv</span>(<span class="st">"pew.csv"</span>, <span class="dt">stringsAsFactors =</span> <span class="ot">FALSE</span>, <span class="dt">check.names =</span> <span class="ot">FALSE</span>))
pew
<span class="co">#> # A tibble: 18 x 11</span>
-<span class="co">#> religion `<$10k` `$10-20k` `$20-30k` `$30-40k` `$40-50k`</span>
-<span class="co">#> <chr> <int> <int> <int> <int> <int></span>
-<span class="co">#> 1 Agnostic 27 34 60 81 76</span>
-<span class="co">#> 2 Atheist 12 27 37 52 35</span>
-<span class="co">#> 3 Buddhist 27 21 30 34 33</span>
-<span class="co">#> 4 Catholic 418 617 732 670 638</span>
-<span class="co">#> 5 Don’t know/refused 15 14 15 11 10</span>
-<span class="co">#> 6 Evangelical Prot 575 869 1064 982 881</span>
-<span class="co">#> 7 Hindu 1 9 7 9 11</span>
-<span class="co">#> 8 Historically Black Prot 228 244 236 238 197</span>
-<span class="co">#> 9 Jehovah's Witness 20 27 24 24 21</span>
-<span class="co">#> 10 Jewish 19 19 25 25 30</span>
-<span class="co">#> # ... with 8 more rows, and 5 more variables: `$50-75k` <int>,</span>
-<span class="co">#> # `$75-100k` <int>, `$100-150k` <int>, `>150k` <int>, `Don't</span>
-<span class="co">#> # know/refused` <int></span></code></pre>
+<span class="co">#> religi… `<$10… `$10-… `$20-… `$30… `$40… `$50… `$75… `$10… `>15… `Don'…</span>
+<span class="co">#> <chr> <int> <int> <int> <int> <int> <int> <int> <int> <int> <int></span>
+<span class="co">#> 1 Agnost… 27 34 60 81 76 137 122 109 84 96</span>
+<span class="co">#> 2 Atheist 12 27 37 52 35 70 73 59 74 76</span>
+<span class="co">#> 3 Buddhi… 27 21 30 34 33 58 62 39 53 54</span>
+<span class="co">#> 4 Cathol… 418 617 732 670 638 1116 949 792 633 1489</span>
+<span class="co">#> 5 Don’t … 15 14 15 11 10 35 21 17 18 116</span>
+<span class="co">#> 6 Evange… 575 869 1064 982 881 1486 949 723 414 1529</span>
+<span class="co">#> 7 Hindu 1 9 7 9 11 34 47 48 54 37</span>
+<span class="co">#> 8 Histor… 228 244 236 238 197 223 131 81 78 339</span>
+<span class="co">#> 9 Jehova… 20 27 24 24 21 30 15 11 6 37</span>
+<span class="co">#> 10 Jewish 19 19 25 25 30 95 69 87 151 162</span>
+<span class="co">#> # ... with 8 more rows</span></code></pre></div>
<p>This dataset has three variables, <code>religion</code>, <code>income</code> and <code>frequency</code>. To tidy it, we need to <strong>gather</strong> the non-variable columns into a two-column key-value pair. This action is often described as making a wide dataset long (or tall), but I’ll avoid those terms because they’re imprecise.</p>
<p>When gathering variables, we need to provide the name of the new key-value columns to create. The first argument, is the name of the key column, which is the name of the variable defined by the values of the column headings. In this case, it’s <code>income</code>. The second argument is the name of the value column, <code>frequency</code>. The third argument defines the columns to gather, here, every column except religion.</p>
-<pre class="sourceCode r"><code class="sourceCode r">pew %>%
-<span class="st"> </span><span class="kw">gather</span>(income, frequency, -religion)
+<div class="sourceCode"><pre class="sourceCode r"><code class="sourceCode r">pew <span class="op">%>%</span>
+<span class="st"> </span><span class="kw">gather</span>(income, frequency, <span class="op">-</span>religion)
<span class="co">#> # A tibble: 180 x 3</span>
-<span class="co">#> religion income frequency</span>
-<span class="co">#> <chr> <chr> <int></span>
-<span class="co">#> 1 Agnostic <$10k 27</span>
-<span class="co">#> 2 Atheist <$10k 12</span>
-<span class="co">#> 3 Buddhist <$10k 27</span>
-<span class="co">#> 4 Catholic <$10k 418</span>
-<span class="co">#> 5 Don’t know/refused <$10k 15</span>
-<span class="co">#> 6 Evangelical Prot <$10k 575</span>
-<span class="co">#> 7 Hindu <$10k 1</span>
-<span class="co">#> 8 Historically Black Prot <$10k 228</span>
-<span class="co">#> 9 Jehovah's Witness <$10k 20</span>
-<span class="co">#> 10 Jewish <$10k 19</span>
-<span class="co">#> # ... with 170 more rows</span></code></pre>
+<span class="co">#> religion income frequency</span>
+<span class="co">#> <chr> <chr> <int></span>
+<span class="co">#> 1 Agnostic <$10k 27</span>
+<span class="co">#> 2 Atheist <$10k 12</span>
+<span class="co">#> 3 Buddhist <$10k 27</span>
+<span class="co">#> 4 Catholic <$10k 418</span>
+<span class="co">#> 5 Don’t know/refused <$10k 15</span>
+<span class="co">#> 6 Evangelical Prot <$10k 575</span>
+<span class="co">#> 7 Hindu <$10k 1</span>
+<span class="co">#> 8 Historically Black Prot <$10k 228</span>
+<span class="co">#> 9 Jehovah's Witness <$10k 20</span>
+<span class="co">#> 10 Jewish <$10k 19</span>
+<span class="co">#> # ... with 170 more rows</span></code></pre></div>
<p>This form is tidy because each column represents a variable and each row represents an observation, in this case a demographic unit corresponding to a combination of <code>religion</code> and <code>income</code>.</p>
<p>This format is also used to record regularly spaced observations over time. For example, the Billboard dataset shown below records the date a song first entered the billboard top 100. It has variables for <code>artist</code>, <code>track</code>, <code>date.entered</code>, <code>rank</code> and <code>week</code>. The rank in each week after it enters the top 100 is recorded in 75 columns, <code>wk1</code> to <code>wk75</code>. This form of storage is not tidy, but it is useful for data [...]
-<pre class="sourceCode r"><code class="sourceCode r">billboard <-<span class="st"> </span><span class="kw">tbl_df</span>(<span class="kw">read.csv</span>(<span class="st">"billboard.csv"</span>, <span class="dt">stringsAsFactors =</span> <span class="ot">FALSE</span>))
+<div class="sourceCode"><pre class="sourceCode r"><code class="sourceCode r">billboard <-<span class="st"> </span><span class="kw">tbl_df</span>(<span class="kw">read.csv</span>(<span class="st">"billboard.csv"</span>, <span class="dt">stringsAsFactors =</span> <span class="ot">FALSE</span>))
billboard
<span class="co">#> # A tibble: 317 x 81</span>
-<span class="co">#> year artist track time date.entered wk1</span>
-<span class="co">#> <int> <chr> <chr> <chr> <chr> <int></span>
-<span class="co">#> 1 2000 2 Pac Baby Don't Cry (Keep... 4:22 2000-02-26 87</span>
-<span class="co">#> 2 2000 2Ge+her The Hardest Part Of ... 3:15 2000-09-02 91</span>
-<span class="co">#> 3 2000 3 Doors Down Kryptonite 3:53 2000-04-08 81</span>
-<span class="co">#> 4 2000 3 Doors Down Loser 4:24 2000-10-21 76</span>
-<span class="co">#> 5 2000 504 Boyz Wobble Wobble 3:35 2000-04-15 57</span>
-<span class="co">#> 6 2000 98^0 Give Me Just One Nig... 3:24 2000-08-19 51</span>
-<span class="co">#> 7 2000 A*Teens Dancing Queen 3:44 2000-07-08 97</span>
-<span class="co">#> 8 2000 Aaliyah I Don't Wanna 4:15 2000-01-29 84</span>
-<span class="co">#> 9 2000 Aaliyah Try Again 4:03 2000-03-18 59</span>
-<span class="co">#> 10 2000 Adams, Yolanda Open My Heart 5:30 2000-08-26 76</span>
-<span class="co">#> # ... with 307 more rows, and 75 more variables: wk2 <int>, wk3 <int>,</span>
-<span class="co">#> # wk4 <int>, wk5 <int>, wk6 <int>, wk7 <int>, wk8 <int>, wk9 <int>,</span>
-<span class="co">#> # wk10 <int>, wk11 <int>, wk12 <int>, wk13 <int>, wk14 <int>,</span>
-<span class="co">#> # wk15 <int>, wk16 <int>, wk17 <int>, wk18 <int>, wk19 <int>,</span>
-<span class="co">#> # wk20 <int>, wk21 <int>, wk22 <int>, wk23 <int>, wk24 <int>,</span>
-<span class="co">#> # wk25 <int>, wk26 <int>, wk27 <int>, wk28 <int>, wk29 <int>,</span>
-<span class="co">#> # wk30 <int>, wk31 <int>, wk32 <int>, wk33 <int>, wk34 <int>,</span>
-<span class="co">#> # wk35 <int>, wk36 <int>, wk37 <int>, wk38 <int>, wk39 <int>,</span>
-<span class="co">#> # wk40 <int>, wk41 <int>, wk42 <int>, wk43 <int>, wk44 <int>,</span>
-<span class="co">#> # wk45 <int>, wk46 <int>, wk47 <int>, wk48 <int>, wk49 <int>,</span>
-<span class="co">#> # wk50 <int>, wk51 <int>, wk52 <int>, wk53 <int>, wk54 <int>,</span>
-<span class="co">#> # wk55 <int>, wk56 <int>, wk57 <int>, wk58 <int>, wk59 <int>,</span>
-<span class="co">#> # wk60 <int>, wk61 <int>, wk62 <int>, wk63 <int>, wk64 <int>,</span>
-<span class="co">#> # wk65 <int>, wk66 <lgl>, wk67 <lgl>, wk68 <lgl>, wk69 <lgl>,</span>
-<span class="co">#> # wk70 <lgl>, wk71 <lgl>, wk72 <lgl>, wk73 <lgl>, wk74 <lgl>,</span>
-<span class="co">#> # wk75 <lgl>, wk76 <lgl></span></code></pre>
+<span class="co">#> year arti… track time date… wk1 wk2 wk3 wk4 wk5 wk6 wk7</span>
+<span class="co">#> <int> <chr> <chr> <chr> <chr> <int> <int> <int> <int> <int> <int> <int></span>
+<span class="co">#> 1 2000 2 Pac Baby… 4:22 2000… 87 82 72 77 87 94 99</span>
+<span class="co">#> 2 2000 2Ge+… The … 3:15 2000… 91 87 92 NA NA NA NA</span>
+<span class="co">#> 3 2000 3 Do… Kryp… 3:53 2000… 81 70 68 67 66 57 54</span>
+<span class="co">#> 4 2000 3 Do… Loser 4:24 2000… 76 76 72 69 67 65 55</span>
+<span class="co">#> 5 2000 504 … Wobb… 3:35 2000… 57 34 25 17 17 31 36</span>
+<span class="co">#> 6 2000 98^0 Give… 3:24 2000… 51 39 34 26 26 19 2</span>
+<span class="co">#> 7 2000 A*Te… Danc… 3:44 2000… 97 97 96 95 100 NA NA</span>
+<span class="co">#> 8 2000 Aali… I Do… 4:15 2000… 84 62 51 41 38 35 35</span>
+<span class="co">#> 9 2000 Aali… Try … 4:03 2000… 59 53 38 28 21 18 16</span>
+<span class="co">#> 10 2000 Adam… Open… 5:30 2000… 76 76 74 69 68 67 61</span>
+<span class="co">#> # ... with 307 more rows, and 69 more variables: wk8 <int>, wk9 <int>,</span>
+<span class="co">#> # wk10 <int>, wk11 <int>, wk12 <int>, wk13 <int>, wk14 <int>, wk15</span>
+<span class="co">#> # <int>, wk16 <int>, wk17 <int>, wk18 <int>, wk19 <int>, wk20 <int>,</span>
+<span class="co">#> # wk21 <int>, wk22 <int>, wk23 <int>, wk24 <int>, wk25 <int>, wk26</span>
+<span class="co">#> # <int>, wk27 <int>, wk28 <int>, wk29 <int>, wk30 <int>, wk31 <int>,</span>
+<span class="co">#> # wk32 <int>, wk33 <int>, wk34 <int>, wk35 <int>, wk36 <int>, wk37</span>
+<span class="co">#> # <int>, wk38 <int>, wk39 <int>, wk40 <int>, wk41 <int>, wk42 <int>,</span>
+<span class="co">#> # wk43 <int>, wk44 <int>, wk45 <int>, wk46 <int>, wk47 <int>, wk48</span>
+<span class="co">#> # <int>, wk49 <int>, wk50 <int>, wk51 <int>, wk52 <int>, wk53 <int>,</span>
+<span class="co">#> # wk54 <int>, wk55 <int>, wk56 <int>, wk57 <int>, wk58 <int>, wk59</span>
+<span class="co">#> # <int>, wk60 <int>, wk61 <int>, wk62 <int>, wk63 <int>, wk64 <int>,</span>
+<span class="co">#> # wk65 <int>, wk66 <lgl>, wk67 <lgl>, wk68 <lgl>, wk69 <lgl>, wk70</span>
+<span class="co">#> # <lgl>, wk71 <lgl>, wk72 <lgl>, wk73 <lgl>, wk74 <lgl>, wk75 <lgl>,</span>
+<span class="co">#> # wk76 <lgl></span></code></pre></div>
<p>To tidy this dataset, we first gather together all the <code>wk</code> columns. The column names give the <code>week</code> and the values are the <code>rank</code>s:</p>
-<pre class="sourceCode r"><code class="sourceCode r">billboard2 <-<span class="st"> </span>billboard %>%<span class="st"> </span>
-<span class="st"> </span><span class="kw">gather</span>(week, rank, wk1:wk76, <span class="dt">na.rm =</span> <span class="ot">TRUE</span>)
+<div class="sourceCode"><pre class="sourceCode r"><code class="sourceCode r">billboard2 <-<span class="st"> </span>billboard <span class="op">%>%</span><span class="st"> </span>
+<span class="st"> </span><span class="kw">gather</span>(week, rank, wk1<span class="op">:</span>wk76, <span class="dt">na.rm =</span> <span class="ot">TRUE</span>)
billboard2
<span class="co">#> # A tibble: 5,307 x 7</span>
-<span class="co">#> year artist track time date.entered week</span>
-<span class="co">#> * <int> <chr> <chr> <chr> <chr> <chr></span>
-<span class="co">#> 1 2000 2 Pac Baby Don't Cry (Keep... 4:22 2000-02-26 wk1</span>
-<span class="co">#> 2 2000 2Ge+her The Hardest Part Of ... 3:15 2000-09-02 wk1</span>
-<span class="co">#> 3 2000 3 Doors Down Kryptonite 3:53 2000-04-08 wk1</span>
-<span class="co">#> 4 2000 3 Doors Down Loser 4:24 2000-10-21 wk1</span>
-<span class="co">#> 5 2000 504 Boyz Wobble Wobble 3:35 2000-04-15 wk1</span>
-<span class="co">#> 6 2000 98^0 Give Me Just One Nig... 3:24 2000-08-19 wk1</span>
-<span class="co">#> 7 2000 A*Teens Dancing Queen 3:44 2000-07-08 wk1</span>
-<span class="co">#> 8 2000 Aaliyah I Don't Wanna 4:15 2000-01-29 wk1</span>
-<span class="co">#> 9 2000 Aaliyah Try Again 4:03 2000-03-18 wk1</span>
-<span class="co">#> 10 2000 Adams, Yolanda Open My Heart 5:30 2000-08-26 wk1</span>
-<span class="co">#> # ... with 5,297 more rows, and 1 more variables: rank <int></span></code></pre>
+<span class="co">#> year artist track time date.en… week rank</span>
+<span class="co">#> * <int> <chr> <chr> <chr> <chr> <chr> <int></span>
+<span class="co">#> 1 2000 2 Pac Baby Don't Cry (Keep... 4:22 2000-02… wk1 87</span>
+<span class="co">#> 2 2000 2Ge+her The Hardest Part Of ... 3:15 2000-09… wk1 91</span>
+<span class="co">#> 3 2000 3 Doors Down Kryptonite 3:53 2000-04… wk1 81</span>
+<span class="co">#> 4 2000 3 Doors Down Loser 4:24 2000-10… wk1 76</span>
+<span class="co">#> 5 2000 504 Boyz Wobble Wobble 3:35 2000-04… wk1 57</span>
+<span class="co">#> 6 2000 98^0 Give Me Just One Nig... 3:24 2000-08… wk1 51</span>
+<span class="co">#> 7 2000 A*Teens Dancing Queen 3:44 2000-07… wk1 97</span>
+<span class="co">#> 8 2000 Aaliyah I Don't Wanna 4:15 2000-01… wk1 84</span>
+<span class="co">#> 9 2000 Aaliyah Try Again 4:03 2000-03… wk1 59</span>
+<span class="co">#> 10 2000 Adams, Yolanda Open My Heart 5:30 2000-08… wk1 76</span>
+<span class="co">#> # ... with 5,297 more rows</span></code></pre></div>
<p>Here we use <code>na.rm</code> to drop any missing values from the gather columns. In this data, missing values represent weeks that the song wasn’t in the charts, so can be safely dropped.</p>
<p>In this case it’s also nice to do a little cleaning, converting the week variable to a number, and figuring out the date corresponding to each week on the charts:</p>
-<pre class="sourceCode r"><code class="sourceCode r">billboard3 <-<span class="st"> </span>billboard2 %>%
+<div class="sourceCode"><pre class="sourceCode r"><code class="sourceCode r">billboard3 <-<span class="st"> </span>billboard2 <span class="op">%>%</span>
<span class="st"> </span><span class="kw">mutate</span>(
<span class="dt">week =</span> <span class="kw">extract_numeric</span>(week),
- <span class="dt">date =</span> <span class="kw">as.Date</span>(date.entered) +<span class="st"> </span><span class="dv">7</span> *<span class="st"> </span>(week -<span class="st"> </span><span class="dv">1</span>)) %>%
-<span class="st"> </span><span class="kw">select</span>(-date.entered)
+ <span class="dt">date =</span> <span class="kw">as.Date</span>(date.entered) <span class="op">+</span><span class="st"> </span><span class="dv">7</span> <span class="op">*</span><span class="st"> </span>(week <span class="op">-</span><span class="st"> </span><span class="dv">1</span>)) <span class="op">%>%</span>
+<span class="st"> </span><span class="kw">select</span>(<span class="op">-</span>date.entered)
<span class="co">#> extract_numeric() is deprecated: please use readr::parse_number() instead</span>
billboard3
<span class="co">#> # A tibble: 5,307 x 7</span>
-<span class="co">#> year artist track time week rank</span>
-<span class="co">#> <int> <chr> <chr> <chr> <dbl> <int></span>
-<span class="co">#> 1 2000 2 Pac Baby Don't Cry (Keep... 4:22 1 87</span>
-<span class="co">#> 2 2000 2Ge+her The Hardest Part Of ... 3:15 1 91</span>
-<span class="co">#> 3 2000 3 Doors Down Kryptonite 3:53 1 81</span>
-<span class="co">#> 4 2000 3 Doors Down Loser 4:24 1 76</span>
-<span class="co">#> 5 2000 504 Boyz Wobble Wobble 3:35 1 57</span>
-<span class="co">#> 6 2000 98^0 Give Me Just One Nig... 3:24 1 51</span>
-<span class="co">#> 7 2000 A*Teens Dancing Queen 3:44 1 97</span>
-<span class="co">#> 8 2000 Aaliyah I Don't Wanna 4:15 1 84</span>
-<span class="co">#> 9 2000 Aaliyah Try Again 4:03 1 59</span>
-<span class="co">#> 10 2000 Adams, Yolanda Open My Heart 5:30 1 76</span>
-<span class="co">#> # ... with 5,297 more rows, and 1 more variables: date <date></span></code></pre>
+<span class="co">#> year artist track time week rank date </span>
+<span class="co">#> <int> <chr> <chr> <chr> <dbl> <int> <date> </span>
+<span class="co">#> 1 2000 2 Pac Baby Don't Cry (Keep… 4:22 1.00 87 2000-02-26</span>
+<span class="co">#> 2 2000 2Ge+her The Hardest Part Of … 3:15 1.00 91 2000-09-02</span>
+<span class="co">#> 3 2000 3 Doors Down Kryptonite 3:53 1.00 81 2000-04-08</span>
+<span class="co">#> 4 2000 3 Doors Down Loser 4:24 1.00 76 2000-10-21</span>
+<span class="co">#> 5 2000 504 Boyz Wobble Wobble 3:35 1.00 57 2000-04-15</span>
+<span class="co">#> 6 2000 98^0 Give Me Just One Nig… 3:24 1.00 51 2000-08-19</span>
+<span class="co">#> 7 2000 A*Teens Dancing Queen 3:44 1.00 97 2000-07-08</span>
+<span class="co">#> 8 2000 Aaliyah I Don't Wanna 4:15 1.00 84 2000-01-29</span>
+<span class="co">#> 9 2000 Aaliyah Try Again 4:03 1.00 59 2000-03-18</span>
+<span class="co">#> 10 2000 Adams, Yolanda Open My Heart 5:30 1.00 76 2000-08-26</span>
+<span class="co">#> # ... with 5,297 more rows</span></code></pre></div>
<p>Finally, it’s always a good idea to sort the data. We could do it by artist, track and week:</p>
-<pre class="sourceCode r"><code class="sourceCode r">billboard3 %>%<span class="st"> </span><span class="kw">arrange</span>(artist, track, week)
+<div class="sourceCode"><pre class="sourceCode r"><code class="sourceCode r">billboard3 <span class="op">%>%</span><span class="st"> </span><span class="kw">arrange</span>(artist, track, week)
<span class="co">#> # A tibble: 5,307 x 7</span>
-<span class="co">#> year artist track time week rank date</span>
-<span class="co">#> <int> <chr> <chr> <chr> <dbl> <int> <date></span>
-<span class="co">#> 1 2000 2 Pac Baby Don't Cry (Keep... 4:22 1 87 2000-02-26</span>
-<span class="co">#> 2 2000 2 Pac Baby Don't Cry (Keep... 4:22 2 82 2000-03-04</span>
-<span class="co">#> 3 2000 2 Pac Baby Don't Cry (Keep... 4:22 3 72 2000-03-11</span>
-<span class="co">#> 4 2000 2 Pac Baby Don't Cry (Keep... 4:22 4 77 2000-03-18</span>
-<span class="co">#> 5 2000 2 Pac Baby Don't Cry (Keep... 4:22 5 87 2000-03-25</span>
-<span class="co">#> 6 2000 2 Pac Baby Don't Cry (Keep... 4:22 6 94 2000-04-01</span>
-<span class="co">#> 7 2000 2 Pac Baby Don't Cry (Keep... 4:22 7 99 2000-04-08</span>
-<span class="co">#> 8 2000 2Ge+her The Hardest Part Of ... 3:15 1 91 2000-09-02</span>
-<span class="co">#> 9 2000 2Ge+her The Hardest Part Of ... 3:15 2 87 2000-09-09</span>
-<span class="co">#> 10 2000 2Ge+her The Hardest Part Of ... 3:15 3 92 2000-09-16</span>
-<span class="co">#> # ... with 5,297 more rows</span></code></pre>
+<span class="co">#> year artist track time week rank date </span>
+<span class="co">#> <int> <chr> <chr> <chr> <dbl> <int> <date> </span>
+<span class="co">#> 1 2000 2 Pac Baby Don't Cry (Keep... 4:22 1.00 87 2000-02-26</span>
+<span class="co">#> 2 2000 2 Pac Baby Don't Cry (Keep... 4:22 2.00 82 2000-03-04</span>
+<span class="co">#> 3 2000 2 Pac Baby Don't Cry (Keep... 4:22 3.00 72 2000-03-11</span>
+<span class="co">#> 4 2000 2 Pac Baby Don't Cry (Keep... 4:22 4.00 77 2000-03-18</span>
+<span class="co">#> 5 2000 2 Pac Baby Don't Cry (Keep... 4:22 5.00 87 2000-03-25</span>
+<span class="co">#> 6 2000 2 Pac Baby Don't Cry (Keep... 4:22 6.00 94 2000-04-01</span>
+<span class="co">#> 7 2000 2 Pac Baby Don't Cry (Keep... 4:22 7.00 99 2000-04-08</span>
+<span class="co">#> 8 2000 2Ge+her The Hardest Part Of ... 3:15 1.00 91 2000-09-02</span>
+<span class="co">#> 9 2000 2Ge+her The Hardest Part Of ... 3:15 2.00 87 2000-09-09</span>
+<span class="co">#> 10 2000 2Ge+her The Hardest Part Of ... 3:15 3.00 92 2000-09-16</span>
+<span class="co">#> # ... with 5,297 more rows</span></code></pre></div>
<p>Or by date and rank:</p>
-<pre class="sourceCode r"><code class="sourceCode r">billboard3 %>%<span class="st"> </span><span class="kw">arrange</span>(date, rank)
+<div class="sourceCode"><pre class="sourceCode r"><code class="sourceCode r">billboard3 <span class="op">%>%</span><span class="st"> </span><span class="kw">arrange</span>(date, rank)
<span class="co">#> # A tibble: 5,307 x 7</span>
-<span class="co">#> year artist track time week rank date</span>
-<span class="co">#> <int> <chr> <chr> <chr> <dbl> <int> <date></span>
-<span class="co">#> 1 2000 Lonestar Amazed 4:25 1 81 1999-06-05</span>
-<span class="co">#> 2 2000 Lonestar Amazed 4:25 2 54 1999-06-12</span>
-<span class="co">#> 3 2000 Lonestar Amazed 4:25 3 44 1999-06-19</span>
-<span class="co">#> 4 2000 Lonestar Amazed 4:25 4 39 1999-06-26</span>
-<span class="co">#> 5 2000 Lonestar Amazed 4:25 5 38 1999-07-03</span>
-<span class="co">#> 6 2000 Lonestar Amazed 4:25 6 33 1999-07-10</span>
-<span class="co">#> 7 2000 Lonestar Amazed 4:25 7 29 1999-07-17</span>
-<span class="co">#> 8 2000 Amber Sexual 4:38 1 99 1999-07-17</span>
-<span class="co">#> 9 2000 Lonestar Amazed 4:25 8 29 1999-07-24</span>
-<span class="co">#> 10 2000 Amber Sexual 4:38 2 99 1999-07-24</span>
-<span class="co">#> # ... with 5,297 more rows</span></code></pre>
+<span class="co">#> year artist track time week rank date </span>
+<span class="co">#> <int> <chr> <chr> <chr> <dbl> <int> <date> </span>
+<span class="co">#> 1 2000 Lonestar Amazed 4:25 1.00 81 1999-06-05</span>
+<span class="co">#> 2 2000 Lonestar Amazed 4:25 2.00 54 1999-06-12</span>
+<span class="co">#> 3 2000 Lonestar Amazed 4:25 3.00 44 1999-06-19</span>
+<span class="co">#> 4 2000 Lonestar Amazed 4:25 4.00 39 1999-06-26</span>
+<span class="co">#> 5 2000 Lonestar Amazed 4:25 5.00 38 1999-07-03</span>
+<span class="co">#> 6 2000 Lonestar Amazed 4:25 6.00 33 1999-07-10</span>
+<span class="co">#> 7 2000 Lonestar Amazed 4:25 7.00 29 1999-07-17</span>
+<span class="co">#> 8 2000 Amber Sexual 4:38 1.00 99 1999-07-17</span>
+<span class="co">#> 9 2000 Lonestar Amazed 4:25 8.00 29 1999-07-24</span>
+<span class="co">#> 10 2000 Amber Sexual 4:38 2.00 99 1999-07-24</span>
+<span class="co">#> # ... with 5,297 more rows</span></code></pre></div>
</div>
<div id="multiple-variables-stored-in-one-column" class="section level2">
<h2>Multiple variables stored in one column</h2>
<p>After gathering columns, the key column is sometimes a combination of multiple underlying variable names. This happens in the <code>tb</code> (tuberculosis) dataset, shown below. This dataset comes from the World Health Organisation, and records the counts of confirmed tuberculosis cases by <code>country</code>, <code>year</code>, and demographic group. The demographic groups are broken down by <code>sex</code> (m, f) and <code>age</code> (0-14, 15-25, 25-34, 35-44, 45-54, 55-64, unkn [...]
-<pre class="sourceCode r"><code class="sourceCode r">tb <-<span class="st"> </span><span class="kw">tbl_df</span>(<span class="kw">read.csv</span>(<span class="st">"tb.csv"</span>, <span class="dt">stringsAsFactors =</span> <span class="ot">FALSE</span>))
+<div class="sourceCode"><pre class="sourceCode r"><code class="sourceCode r">tb <-<span class="st"> </span><span class="kw">tbl_df</span>(<span class="kw">read.csv</span>(<span class="st">"tb.csv"</span>, <span class="dt">stringsAsFactors =</span> <span class="ot">FALSE</span>))
tb
<span class="co">#> # A tibble: 5,769 x 22</span>
-<span class="co">#> iso2 year m04 m514 m014 m1524 m2534 m3544 m4554 m5564 m65 mu</span>
+<span class="co">#> iso2 year m04 m514 m014 m1524 m2534 m3544 m4554 m5564 m65 mu</span>
<span class="co">#> <chr> <int> <int> <int> <int> <int> <int> <int> <int> <int> <int> <int></span>
-<span class="co">#> 1 AD 1989 NA NA NA NA NA NA NA NA NA NA</span>
-<span class="co">#> 2 AD 1990 NA NA NA NA NA NA NA NA NA NA</span>
-<span class="co">#> 3 AD 1991 NA NA NA NA NA NA NA NA NA NA</span>
-<span class="co">#> 4 AD 1992 NA NA NA NA NA NA NA NA NA NA</span>
-<span class="co">#> 5 AD 1993 NA NA NA NA NA NA NA NA NA NA</span>
-<span class="co">#> 6 AD 1994 NA NA NA NA NA NA NA NA NA NA</span>
-<span class="co">#> 7 AD 1996 NA NA 0 0 0 4 1 0 0 NA</span>
-<span class="co">#> 8 AD 1997 NA NA 0 0 1 2 2 1 6 NA</span>
-<span class="co">#> 9 AD 1998 NA NA 0 0 0 1 0 0 0 NA</span>
-<span class="co">#> 10 AD 1999 NA NA 0 0 0 1 1 0 0 NA</span>
+<span class="co">#> 1 AD 1989 NA NA NA NA NA NA NA NA NA NA</span>
+<span class="co">#> 2 AD 1990 NA NA NA NA NA NA NA NA NA NA</span>
+<span class="co">#> 3 AD 1991 NA NA NA NA NA NA NA NA NA NA</span>
+<span class="co">#> 4 AD 1992 NA NA NA NA NA NA NA NA NA NA</span>
+<span class="co">#> 5 AD 1993 NA NA NA NA NA NA NA NA NA NA</span>
+<span class="co">#> 6 AD 1994 NA NA NA NA NA NA NA NA NA NA</span>
+<span class="co">#> 7 AD 1996 NA NA 0 0 0 4 1 0 0 NA</span>
+<span class="co">#> 8 AD 1997 NA NA 0 0 1 2 2 1 6 NA</span>
+<span class="co">#> 9 AD 1998 NA NA 0 0 0 1 0 0 0 NA</span>
+<span class="co">#> 10 AD 1999 NA NA 0 0 0 1 1 0 0 NA</span>
<span class="co">#> # ... with 5,759 more rows, and 10 more variables: f04 <int>, f514 <int>,</span>
-<span class="co">#> # f014 <int>, f1524 <int>, f2534 <int>, f3544 <int>, f4554 <int>,</span>
-<span class="co">#> # f5564 <int>, f65 <int>, fu <int></span></code></pre>
+<span class="co">#> # f014 <int>, f1524 <int>, f2534 <int>, f3544 <int>, f4554 <int>, f5564</span>
+<span class="co">#> # <int>, f65 <int>, fu <int></span></code></pre></div>
<p>First we gather up the non-variable columns:</p>
-<pre class="sourceCode r"><code class="sourceCode r">tb2 <-<span class="st"> </span>tb %>%<span class="st"> </span>
-<span class="st"> </span><span class="kw">gather</span>(demo, n, -iso2, -year, <span class="dt">na.rm =</span> <span class="ot">TRUE</span>)
+<div class="sourceCode"><pre class="sourceCode r"><code class="sourceCode r">tb2 <-<span class="st"> </span>tb <span class="op">%>%</span><span class="st"> </span>
+<span class="st"> </span><span class="kw">gather</span>(demo, n, <span class="op">-</span>iso2, <span class="op">-</span>year, <span class="dt">na.rm =</span> <span class="ot">TRUE</span>)
tb2
<span class="co">#> # A tibble: 35,750 x 4</span>
-<span class="co">#> iso2 year demo n</span>
+<span class="co">#> iso2 year demo n</span>
<span class="co">#> * <chr> <int> <chr> <int></span>
-<span class="co">#> 1 AD 2005 m04 0</span>
-<span class="co">#> 2 AD 2006 m04 0</span>
-<span class="co">#> 3 AD 2008 m04 0</span>
-<span class="co">#> 4 AE 2006 m04 0</span>
-<span class="co">#> 5 AE 2007 m04 0</span>
-<span class="co">#> 6 AE 2008 m04 0</span>
-<span class="co">#> 7 AG 2007 m04 0</span>
-<span class="co">#> 8 AL 2005 m04 0</span>
-<span class="co">#> 9 AL 2006 m04 1</span>
-<span class="co">#> 10 AL 2007 m04 0</span>
-<span class="co">#> # ... with 35,740 more rows</span></code></pre>
+<span class="co">#> 1 AD 2005 m04 0</span>
+<span class="co">#> 2 AD 2006 m04 0</span>
+<span class="co">#> 3 AD 2008 m04 0</span>
+<span class="co">#> 4 AE 2006 m04 0</span>
+<span class="co">#> 5 AE 2007 m04 0</span>
+<span class="co">#> 6 AE 2008 m04 0</span>
+<span class="co">#> 7 AG 2007 m04 0</span>
+<span class="co">#> 8 AL 2005 m04 0</span>
+<span class="co">#> 9 AL 2006 m04 1</span>
+<span class="co">#> 10 AL 2007 m04 0</span>
+<span class="co">#> # ... with 35,740 more rows</span></code></pre></div>
<p>Column headers in this format are often separated by a non-alphanumeric character (e.g. <code>.</code>, <code>-</code>, <code>_</code>, <code>:</code>), or have a fixed width format, like in this dataset. <code>separate()</code> makes it easy to split a compound variables into individual variables. You can either pass it a regular expression to split on (the default is to split on non-alphanumeric columns), or a vector of character positions. In this case we want to split after the fi [...]
-<pre class="sourceCode r"><code class="sourceCode r">tb3 <-<span class="st"> </span>tb2 %>%<span class="st"> </span>
+<div class="sourceCode"><pre class="sourceCode r"><code class="sourceCode r">tb3 <-<span class="st"> </span>tb2 <span class="op">%>%</span><span class="st"> </span>
<span class="st"> </span><span class="kw">separate</span>(demo, <span class="kw">c</span>(<span class="st">"sex"</span>, <span class="st">"age"</span>), <span class="dv">1</span>)
tb3
<span class="co">#> # A tibble: 35,750 x 5</span>
-<span class="co">#> iso2 year sex age n</span>
+<span class="co">#> iso2 year sex age n</span>
<span class="co">#> * <chr> <int> <chr> <chr> <int></span>
-<span class="co">#> 1 AD 2005 m 04 0</span>
-<span class="co">#> 2 AD 2006 m 04 0</span>
-<span class="co">#> 3 AD 2008 m 04 0</span>
-<span class="co">#> 4 AE 2006 m 04 0</span>
-<span class="co">#> 5 AE 2007 m 04 0</span>
-<span class="co">#> 6 AE 2008 m 04 0</span>
-<span class="co">#> 7 AG 2007 m 04 0</span>
-<span class="co">#> 8 AL 2005 m 04 0</span>
-<span class="co">#> 9 AL 2006 m 04 1</span>
-<span class="co">#> 10 AL 2007 m 04 0</span>
-<span class="co">#> # ... with 35,740 more rows</span></code></pre>
+<span class="co">#> 1 AD 2005 m 04 0</span>
+<span class="co">#> 2 AD 2006 m 04 0</span>
+<span class="co">#> 3 AD 2008 m 04 0</span>
+<span class="co">#> 4 AE 2006 m 04 0</span>
+<span class="co">#> 5 AE 2007 m 04 0</span>
+<span class="co">#> 6 AE 2008 m 04 0</span>
+<span class="co">#> 7 AG 2007 m 04 0</span>
+<span class="co">#> 8 AL 2005 m 04 0</span>
+<span class="co">#> 9 AL 2006 m 04 1</span>
+<span class="co">#> 10 AL 2007 m 04 0</span>
+<span class="co">#> # ... with 35,740 more rows</span></code></pre></div>
<p>Storing the values in this form resolves a problem in the original data. We want to compare rates, not counts, which means we need to know the population. In the original format, there is no easy way to add a population variable. It has to be stored in a separate table, which makes it hard to correctly match populations to counts. In tidy form, adding variables for population and rate is easy because they’re just additional columns.</p>
</div>
<div id="variables-are-stored-in-both-rows-and-columns" class="section level2">
<h2>Variables are stored in both rows and columns</h2>
<p>The most complicated form of messy data occurs when variables are stored in both rows and columns. The code below loads daily weather data from the Global Historical Climatology Network for one weather station (MX17004) in Mexico for five months in 2010.</p>
-<pre class="sourceCode r"><code class="sourceCode r">weather <-<span class="st"> </span><span class="kw">tbl_df</span>(<span class="kw">read.csv</span>(<span class="st">"weather.csv"</span>, <span class="dt">stringsAsFactors =</span> <span class="ot">FALSE</span>))
+<div class="sourceCode"><pre class="sourceCode r"><code class="sourceCode r">weather <-<span class="st"> </span><span class="kw">tbl_df</span>(<span class="kw">read.csv</span>(<span class="st">"weather.csv"</span>, <span class="dt">stringsAsFactors =</span> <span class="ot">FALSE</span>))
weather
<span class="co">#> # A tibble: 22 x 35</span>
-<span class="co">#> id year month element d1 d2 d3 d4 d5 d6 d7</span>
-<span class="co">#> <chr> <int> <int> <chr> <dbl> <dbl> <dbl> <dbl> <dbl> <dbl> <dbl></span>
-<span class="co">#> 1 MX17004 2010 1 tmax NA NA NA NA NA NA NA</span>
-<span class="co">#> 2 MX17004 2010 1 tmin NA NA NA NA NA NA NA</span>
-<span class="co">#> 3 MX17004 2010 2 tmax NA 27.3 24.1 NA NA NA NA</span>
-<span class="co">#> 4 MX17004 2010 2 tmin NA 14.4 14.4 NA NA NA NA</span>
-<span class="co">#> 5 MX17004 2010 3 tmax NA NA NA NA 32.1 NA NA</span>
-<span class="co">#> 6 MX17004 2010 3 tmin NA NA NA NA 14.2 NA NA</span>
-<span class="co">#> 7 MX17004 2010 4 tmax NA NA NA NA NA NA NA</span>
-<span class="co">#> 8 MX17004 2010 4 tmin NA NA NA NA NA NA NA</span>
-<span class="co">#> 9 MX17004 2010 5 tmax NA NA NA NA NA NA NA</span>
-<span class="co">#> 10 MX17004 2010 5 tmin NA NA NA NA NA NA NA</span>
-<span class="co">#> # ... with 12 more rows, and 24 more variables: d8 <dbl>, d9 <lgl>,</span>
-<span class="co">#> # d10 <dbl>, d11 <dbl>, d12 <lgl>, d13 <dbl>, d14 <dbl>, d15 <dbl>,</span>
-<span class="co">#> # d16 <dbl>, d17 <dbl>, d18 <lgl>, d19 <lgl>, d20 <lgl>, d21 <lgl>,</span>
-<span class="co">#> # d22 <lgl>, d23 <dbl>, d24 <lgl>, d25 <dbl>, d26 <dbl>, d27 <dbl>,</span>
-<span class="co">#> # d28 <dbl>, d29 <dbl>, d30 <dbl>, d31 <dbl></span></code></pre>
+<span class="co">#> id year month elem… d1 d2 d3 d4 d5 d6 d7 d8</span>
+<span class="co">#> <chr> <int> <int> <chr> <dbl> <dbl> <dbl> <dbl> <dbl> <dbl> <dbl> <dbl></span>
+<span class="co">#> 1 MX17… 2010 1 tmax NA NA NA NA NA NA NA NA</span>
+<span class="co">#> 2 MX17… 2010 1 tmin NA NA NA NA NA NA NA NA</span>
+<span class="co">#> 3 MX17… 2010 2 tmax NA 27.3 24.1 NA NA NA NA NA</span>
+<span class="co">#> 4 MX17… 2010 2 tmin NA 14.4 14.4 NA NA NA NA NA</span>
+<span class="co">#> 5 MX17… 2010 3 tmax NA NA NA NA 32.1 NA NA NA</span>
+<span class="co">#> 6 MX17… 2010 3 tmin NA NA NA NA 14.2 NA NA NA</span>
+<span class="co">#> 7 MX17… 2010 4 tmax NA NA NA NA NA NA NA NA</span>
+<span class="co">#> 8 MX17… 2010 4 tmin NA NA NA NA NA NA NA NA</span>
+<span class="co">#> 9 MX17… 2010 5 tmax NA NA NA NA NA NA NA NA</span>
+<span class="co">#> 10 MX17… 2010 5 tmin NA NA NA NA NA NA NA NA</span>
+<span class="co">#> # ... with 12 more rows, and 23 more variables: d9 <lgl>, d10 <dbl>, d11</span>
+<span class="co">#> # <dbl>, d12 <lgl>, d13 <dbl>, d14 <dbl>, d15 <dbl>, d16 <dbl>, d17</span>
+<span class="co">#> # <dbl>, d18 <lgl>, d19 <lgl>, d20 <lgl>, d21 <lgl>, d22 <lgl>, d23</span>
+<span class="co">#> # <dbl>, d24 <lgl>, d25 <dbl>, d26 <dbl>, d27 <dbl>, d28 <dbl>, d29</span>
+<span class="co">#> # <dbl>, d30 <dbl>, d31 <dbl></span></code></pre></div>
<p>It has variables in individual columns (<code>id</code>, <code>year</code>, <code>month</code>), spread across columns (<code>day</code>, d1-d31) and across rows (<code>tmin</code>, <code>tmax</code>) (minimum and maximum temperature). Months with fewer than 31 days have structural missing values for the last day(s) of the month.</p>
<p>To tidy this dataset we first gather the day columns:</p>
-<pre class="sourceCode r"><code class="sourceCode r">weather2 <-<span class="st"> </span>weather %>%
-<span class="st"> </span><span class="kw">gather</span>(day, value, d1:d31, <span class="dt">na.rm =</span> <span class="ot">TRUE</span>)
+<div class="sourceCode"><pre class="sourceCode r"><code class="sourceCode r">weather2 <-<span class="st"> </span>weather <span class="op">%>%</span>
+<span class="st"> </span><span class="kw">gather</span>(day, value, d1<span class="op">:</span>d31, <span class="dt">na.rm =</span> <span class="ot">TRUE</span>)
weather2
<span class="co">#> # A tibble: 66 x 6</span>
-<span class="co">#> id year month element day value</span>
-<span class="co">#> * <chr> <int> <int> <chr> <chr> <dbl></span>
-<span class="co">#> 1 MX17004 2010 12 tmax d1 29.9</span>
-<span class="co">#> 2 MX17004 2010 12 tmin d1 13.8</span>
-<span class="co">#> 3 MX17004 2010 2 tmax d2 27.3</span>
-<span class="co">#> 4 MX17004 2010 2 tmin d2 14.4</span>
-<span class="co">#> 5 MX17004 2010 11 tmax d2 31.3</span>
-<span class="co">#> 6 MX17004 2010 11 tmin d2 16.3</span>
-<span class="co">#> 7 MX17004 2010 2 tmax d3 24.1</span>
-<span class="co">#> 8 MX17004 2010 2 tmin d3 14.4</span>
-<span class="co">#> 9 MX17004 2010 7 tmax d3 28.6</span>
-<span class="co">#> 10 MX17004 2010 7 tmin d3 17.5</span>
-<span class="co">#> # ... with 56 more rows</span></code></pre>
+<span class="co">#> id year month element day value</span>
+<span class="co">#> * <chr> <int> <int> <chr> <chr> <dbl></span>
+<span class="co">#> 1 MX17004 2010 12 tmax d1 29.9</span>
+<span class="co">#> 2 MX17004 2010 12 tmin d1 13.8</span>
+<span class="co">#> 3 MX17004 2010 2 tmax d2 27.3</span>
+<span class="co">#> 4 MX17004 2010 2 tmin d2 14.4</span>
+<span class="co">#> 5 MX17004 2010 11 tmax d2 31.3</span>
+<span class="co">#> 6 MX17004 2010 11 tmin d2 16.3</span>
+<span class="co">#> 7 MX17004 2010 2 tmax d3 24.1</span>
+<span class="co">#> 8 MX17004 2010 2 tmin d3 14.4</span>
+<span class="co">#> 9 MX17004 2010 7 tmax d3 28.6</span>
+<span class="co">#> 10 MX17004 2010 7 tmin d3 17.5</span>
+<span class="co">#> # ... with 56 more rows</span></code></pre></div>
<p>For presentation, I’ve dropped the missing values, making them implicit rather than explicit. This is ok because we know how many days are in each month and can easily reconstruct the explicit missing values.</p>
<p>We’ll also do a little cleaning:</p>
-<pre class="sourceCode r"><code class="sourceCode r">weather3 <-<span class="st"> </span>weather2 %>%<span class="st"> </span>
-<span class="st"> </span><span class="kw">mutate</span>(<span class="dt">day =</span> <span class="kw">extract_numeric</span>(day)) %>%
-<span class="st"> </span><span class="kw">select</span>(id, year, month, day, element, value) %>%
+<div class="sourceCode"><pre class="sourceCode r"><code class="sourceCode r">weather3 <-<span class="st"> </span>weather2 <span class="op">%>%</span><span class="st"> </span>
+<span class="st"> </span><span class="kw">mutate</span>(<span class="dt">day =</span> <span class="kw">extract_numeric</span>(day)) <span class="op">%>%</span>
+<span class="st"> </span><span class="kw">select</span>(id, year, month, day, element, value) <span class="op">%>%</span>
<span class="st"> </span><span class="kw">arrange</span>(id, year, month, day)
<span class="co">#> extract_numeric() is deprecated: please use readr::parse_number() instead</span>
weather3
<span class="co">#> # A tibble: 66 x 6</span>
-<span class="co">#> id year month day element value</span>
-<span class="co">#> <chr> <int> <int> <dbl> <chr> <dbl></span>
-<span class="co">#> 1 MX17004 2010 1 30 tmax 27.8</span>
-<span class="co">#> 2 MX17004 2010 1 30 tmin 14.5</span>
-<span class="co">#> 3 MX17004 2010 2 2 tmax 27.3</span>
-<span class="co">#> 4 MX17004 2010 2 2 tmin 14.4</span>
-<span class="co">#> 5 MX17004 2010 2 3 tmax 24.1</span>
-<span class="co">#> 6 MX17004 2010 2 3 tmin 14.4</span>
-<span class="co">#> 7 MX17004 2010 2 11 tmax 29.7</span>
-<span class="co">#> 8 MX17004 2010 2 11 tmin 13.4</span>
-<span class="co">#> 9 MX17004 2010 2 23 tmax 29.9</span>
-<span class="co">#> 10 MX17004 2010 2 23 tmin 10.7</span>
-<span class="co">#> # ... with 56 more rows</span></code></pre>
+<span class="co">#> id year month day element value</span>
+<span class="co">#> <chr> <int> <int> <dbl> <chr> <dbl></span>
+<span class="co">#> 1 MX17004 2010 1 30.0 tmax 27.8</span>
+<span class="co">#> 2 MX17004 2010 1 30.0 tmin 14.5</span>
+<span class="co">#> 3 MX17004 2010 2 2.00 tmax 27.3</span>
+<span class="co">#> 4 MX17004 2010 2 2.00 tmin 14.4</span>
+<span class="co">#> 5 MX17004 2010 2 3.00 tmax 24.1</span>
+<span class="co">#> 6 MX17004 2010 2 3.00 tmin 14.4</span>
+<span class="co">#> 7 MX17004 2010 2 11.0 tmax 29.7</span>
+<span class="co">#> 8 MX17004 2010 2 11.0 tmin 13.4</span>
+<span class="co">#> 9 MX17004 2010 2 23.0 tmax 29.9</span>
+<span class="co">#> 10 MX17004 2010 2 23.0 tmin 10.7</span>
+<span class="co">#> # ... with 56 more rows</span></code></pre></div>
<p>This dataset is mostly tidy, but the <code>element</code> column is not a variable; it stores the names of variables. (Not shown in this example are the other meteorological variables <code>prcp</code> (precipitation) and <code>snow</code> (snowfall)). Fixing this requires the spread operation. This performs the inverse of gathering by spreading the <code>element</code> and <code>value</code> columns back out into the columns:</p>
-<pre class="sourceCode r"><code class="sourceCode r">weather3 %>%<span class="st"> </span><span class="kw">spread</span>(element, value)
+<div class="sourceCode"><pre class="sourceCode r"><code class="sourceCode r">weather3 <span class="op">%>%</span><span class="st"> </span><span class="kw">spread</span>(element, value)
<span class="co">#> # A tibble: 33 x 6</span>
-<span class="co">#> id year month day tmax tmin</span>
-<span class="co">#> * <chr> <int> <int> <dbl> <dbl> <dbl></span>
-<span class="co">#> 1 MX17004 2010 1 30 27.8 14.5</span>
-<span class="co">#> 2 MX17004 2010 2 2 27.3 14.4</span>
-<span class="co">#> 3 MX17004 2010 2 3 24.1 14.4</span>
-<span class="co">#> 4 MX17004 2010 2 11 29.7 13.4</span>
-<span class="co">#> 5 MX17004 2010 2 23 29.9 10.7</span>
-<span class="co">#> 6 MX17004 2010 3 5 32.1 14.2</span>
-<span class="co">#> 7 MX17004 2010 3 10 34.5 16.8</span>
-<span class="co">#> 8 MX17004 2010 3 16 31.1 17.6</span>
-<span class="co">#> 9 MX17004 2010 4 27 36.3 16.7</span>
-<span class="co">#> 10 MX17004 2010 5 27 33.2 18.2</span>
-<span class="co">#> # ... with 23 more rows</span></code></pre>
+<span class="co">#> id year month day tmax tmin</span>
+<span class="co">#> * <chr> <int> <int> <dbl> <dbl> <dbl></span>
+<span class="co">#> 1 MX17004 2010 1 30.0 27.8 14.5</span>
+<span class="co">#> 2 MX17004 2010 2 2.00 27.3 14.4</span>
+<span class="co">#> 3 MX17004 2010 2 3.00 24.1 14.4</span>
+<span class="co">#> 4 MX17004 2010 2 11.0 29.7 13.4</span>
+<span class="co">#> 5 MX17004 2010 2 23.0 29.9 10.7</span>
+<span class="co">#> 6 MX17004 2010 3 5.00 32.1 14.2</span>
+<span class="co">#> 7 MX17004 2010 3 10.0 34.5 16.8</span>
+<span class="co">#> 8 MX17004 2010 3 16.0 31.1 17.6</span>
+<span class="co">#> 9 MX17004 2010 4 27.0 36.3 16.7</span>
+<span class="co">#> 10 MX17004 2010 5 27.0 33.2 18.2</span>
+<span class="co">#> # ... with 23 more rows</span></code></pre></div>
<p>This form is tidy: there’s one variable in each column, and each row represents one day.</p>
</div>
<div id="multiple-types" class="section level2">
@@ -426,45 +441,45 @@ weather3
<p>Datasets often involve values collected at multiple levels, on different types of observational units. During tidying, each type of observational unit should be stored in its own table. This is closely related to the idea of database normalisation, where each fact is expressed in only one place. It’s important because otherwise inconsistencies can arise.</p>
<p>The billboard dataset actually contains observations on two types of observational units: the song and its rank in each week. This manifests itself through the duplication of facts about the song: <code>artist</code>, <code>year</code> and <code>time</code> are repeated many times.</p>
<p>This dataset needs to be broken down into two pieces: a song dataset which stores <code>artist</code>, <code>song name</code> and <code>time</code>, and a ranking dataset which gives the <code>rank</code> of the <code>song</code> in each <code>week</code>. We first extract a <code>song</code> dataset:</p>
-<pre class="sourceCode r"><code class="sourceCode r">song <-<span class="st"> </span>billboard3 %>%<span class="st"> </span>
-<span class="st"> </span><span class="kw">select</span>(artist, track, year, time) %>%
-<span class="st"> </span><span class="kw">unique</span>() %>%
+<div class="sourceCode"><pre class="sourceCode r"><code class="sourceCode r">song <-<span class="st"> </span>billboard3 <span class="op">%>%</span><span class="st"> </span>
+<span class="st"> </span><span class="kw">select</span>(artist, track, year, time) <span class="op">%>%</span>
+<span class="st"> </span><span class="kw">unique</span>() <span class="op">%>%</span>
<span class="st"> </span><span class="kw">mutate</span>(<span class="dt">song_id =</span> <span class="kw">row_number</span>())
song
<span class="co">#> # A tibble: 317 x 5</span>
-<span class="co">#> artist track year time song_id</span>
-<span class="co">#> <chr> <chr> <int> <chr> <int></span>
-<span class="co">#> 1 2 Pac Baby Don't Cry (Keep... 2000 4:22 1</span>
-<span class="co">#> 2 2Ge+her The Hardest Part Of ... 2000 3:15 2</span>
-<span class="co">#> 3 3 Doors Down Kryptonite 2000 3:53 3</span>
-<span class="co">#> 4 3 Doors Down Loser 2000 4:24 4</span>
-<span class="co">#> 5 504 Boyz Wobble Wobble 2000 3:35 5</span>
-<span class="co">#> 6 98^0 Give Me Just One Nig... 2000 3:24 6</span>
-<span class="co">#> 7 A*Teens Dancing Queen 2000 3:44 7</span>
-<span class="co">#> 8 Aaliyah I Don't Wanna 2000 4:15 8</span>
-<span class="co">#> 9 Aaliyah Try Again 2000 4:03 9</span>
-<span class="co">#> 10 Adams, Yolanda Open My Heart 2000 5:30 10</span>
-<span class="co">#> # ... with 307 more rows</span></code></pre>
+<span class="co">#> artist track year time song_id</span>
+<span class="co">#> <chr> <chr> <int> <chr> <int></span>
+<span class="co">#> 1 2 Pac Baby Don't Cry (Keep... 2000 4:22 1</span>
+<span class="co">#> 2 2Ge+her The Hardest Part Of ... 2000 3:15 2</span>
+<span class="co">#> 3 3 Doors Down Kryptonite 2000 3:53 3</span>
+<span class="co">#> 4 3 Doors Down Loser 2000 4:24 4</span>
+<span class="co">#> 5 504 Boyz Wobble Wobble 2000 3:35 5</span>
+<span class="co">#> 6 98^0 Give Me Just One Nig... 2000 3:24 6</span>
+<span class="co">#> 7 A*Teens Dancing Queen 2000 3:44 7</span>
+<span class="co">#> 8 Aaliyah I Don't Wanna 2000 4:15 8</span>
+<span class="co">#> 9 Aaliyah Try Again 2000 4:03 9</span>
+<span class="co">#> 10 Adams, Yolanda Open My Heart 2000 5:30 10</span>
+<span class="co">#> # ... with 307 more rows</span></code></pre></div>
<p>Then use that to make a <code>rank</code> dataset by replacing repeated song facts with a pointer to song details (a unique song id):</p>
-<pre class="sourceCode r"><code class="sourceCode r">rank <-<span class="st"> </span>billboard3 %>%
-<span class="st"> </span><span class="kw">left_join</span>(song, <span class="kw">c</span>(<span class="st">"artist"</span>, <span class="st">"track"</span>, <span class="st">"year"</span>, <span class="st">"time"</span>)) %>%
-<span class="st"> </span><span class="kw">select</span>(song_id, date, week, rank) %>%
+<div class="sourceCode"><pre class="sourceCode r"><code class="sourceCode r">rank <-<span class="st"> </span>billboard3 <span class="op">%>%</span>
+<span class="st"> </span><span class="kw">left_join</span>(song, <span class="kw">c</span>(<span class="st">"artist"</span>, <span class="st">"track"</span>, <span class="st">"year"</span>, <span class="st">"time"</span>)) <span class="op">%>%</span>
+<span class="st"> </span><span class="kw">select</span>(song_id, date, week, rank) <span class="op">%>%</span>
<span class="st"> </span><span class="kw">arrange</span>(song_id, date)
rank
<span class="co">#> # A tibble: 5,307 x 4</span>
-<span class="co">#> song_id date week rank</span>
-<span class="co">#> <int> <date> <dbl> <int></span>
-<span class="co">#> 1 1 2000-02-26 1 87</span>
-<span class="co">#> 2 1 2000-03-04 2 82</span>
-<span class="co">#> 3 1 2000-03-11 3 72</span>
-<span class="co">#> 4 1 2000-03-18 4 77</span>
-<span class="co">#> 5 1 2000-03-25 5 87</span>
-<span class="co">#> 6 1 2000-04-01 6 94</span>
-<span class="co">#> 7 1 2000-04-08 7 99</span>
-<span class="co">#> 8 2 2000-09-02 1 91</span>
-<span class="co">#> 9 2 2000-09-09 2 87</span>
-<span class="co">#> 10 2 2000-09-16 3 92</span>
-<span class="co">#> # ... with 5,297 more rows</span></code></pre>
+<span class="co">#> song_id date week rank</span>
+<span class="co">#> <int> <date> <dbl> <int></span>
+<span class="co">#> 1 1 2000-02-26 1.00 87</span>
+<span class="co">#> 2 1 2000-03-04 2.00 82</span>
+<span class="co">#> 3 1 2000-03-11 3.00 72</span>
+<span class="co">#> 4 1 2000-03-18 4.00 77</span>
+<span class="co">#> 5 1 2000-03-25 5.00 87</span>
+<span class="co">#> 6 1 2000-04-01 6.00 94</span>
+<span class="co">#> 7 1 2000-04-08 7.00 99</span>
+<span class="co">#> 8 2 2000-09-02 1.00 91</span>
+<span class="co">#> 9 2 2000-09-09 2.00 87</span>
+<span class="co">#> 10 2 2000-09-16 3.00 92</span>
+<span class="co">#> # ... with 5,297 more rows</span></code></pre></div>
<p>You could also imagine a <code>week</code> dataset which would record background information about the week, maybe the total number of songs sold or similar “demographic” information.</p>
<p>Normalisation is useful for tidying and eliminating inconsistencies. However, there are few data analysis tools that work directly with relational data, so analysis usually also requires denormalisation or the merging the datasets back into one table.</p>
</div>
@@ -477,10 +492,10 @@ rank
<li><p>Combine all tables into a single table.</p></li>
</ol>
<p>Plyr makes this straightforward in R. The following code generates a vector of file names in a directory (<code>data/</code>) which match a regular expression (ends in <code>.csv</code>). Next we name each element of the vector with the name of the file. We do this because will preserve the names in the following step, ensuring that each row in the final data frame is labeled with its source. Finally, <code>ldply()</code> loops over each path, reading in the csv file and combining the [...]
-<pre class="sourceCode r"><code class="sourceCode r"><span class="kw">library</span>(plyr)
+<div class="sourceCode"><pre class="sourceCode r"><code class="sourceCode r"><span class="kw">library</span>(plyr)
paths <-<span class="st"> </span><span class="kw">dir</span>(<span class="st">"data"</span>, <span class="dt">pattern =</span> <span class="st">"</span><span class="ch">\\</span><span class="st">.csv$"</span>, <span class="dt">full.names =</span> <span class="ot">TRUE</span>)
<span class="kw">names</span>(paths) <-<span class="st"> </span><span class="kw">basename</span>(paths)
-<span class="kw">ldply</span>(paths, read.csv, <span class="dt">stringsAsFactors =</span> <span class="ot">FALSE</span>)</code></pre>
+<span class="kw">ldply</span>(paths, read.csv, <span class="dt">stringsAsFactors =</span> <span class="ot">FALSE</span>)</code></pre></div>
<p>Once you have a single table, you can perform additional tidying as needed. An example of this type of cleaning can be found at <a href="https://github.com/hadley/data-baby-names" class="uri">https://github.com/hadley/data-baby-names</a> which takes 129 yearly baby name tables provided by the US Social Security Administration and combines them into a single file.</p>
<p>A more complicated situation occurs when the dataset structure changes over time. For example, the datasets may contain different variables, the same variables with different names, different file formats, or different conventions for missing values. This may require you to tidy each file to individually (or, if you’re lucky, in small groups) and then combine them once tidied. An example of this type of tidying is illustrated in <a href="https://github.com/hadley/data-fuel-economy" cl [...]
</div>
diff --git a/src/melt.cpp b/src/melt.cpp
index d1d1616..3cce1e8 100644
--- a/src/melt.cpp
+++ b/src/melt.cpp
@@ -33,6 +33,15 @@ SEXP rep_(SEXP x, int n, std::string var_name) {
case REALSXP:
DO_REP(REALSXP, double, REAL);
break;
+ case LGLSXP:
+ DO_REP(LGLSXP, int, LOGICAL);
+ break;
+ case CPLXSXP:
+ DO_REP(CPLXSXP, Rcomplex, COMPLEX);
+ break;
+ case RAWSXP:
+ DO_REP(RAWSXP, Rbyte, RAW);
+ break;
case STRSXP: {
int counter = 0;
for (int i = 0; i < n; ++i) {
@@ -43,18 +52,16 @@ SEXP rep_(SEXP x, int n, std::string var_name) {
}
break;
}
- case LGLSXP:
- DO_REP(LGLSXP, int, LOGICAL);
- break;
- case CPLXSXP:
- DO_REP(CPLXSXP, Rcomplex, COMPLEX);
- break;
- case RAWSXP:
- DO_REP(RAWSXP, Rbyte, RAW);
- break;
- case VECSXP:
- DO_REP(VECSXP, SEXP, STRING_PTR);
+ case VECSXP: {
+ int counter = 0;
+ for (int i = 0; i < n; ++i) {
+ for (int j = 0; j < xn; ++j) {
+ SET_VECTOR_ELT(output, counter, VECTOR_ELT(x, j));
+ ++counter;
+ }
+ }
break;
+ }
default: {
stop("Unhandled RTYPE in '%s'", var_name);
return R_NilValue;
diff --git a/tests/testthat/test-gather.R b/tests/testthat/test-gather.R
index 238ec8c..ed1b82a 100644
--- a/tests/testthat/test-gather.R
+++ b/tests/testthat/test-gather.R
@@ -122,7 +122,6 @@ test_that("gather preserves OBJECT bit on e.g. POSIXct", {
test_that("can handle list-columns", {
df <- tibble(x = 1:2, y = list("a", TRUE))
out <- gather(df, k, v, -y)
-
expect_identical(out$y, df$y)
})
diff --git a/tests/testthat/test-underscored.R b/tests/testthat/test-underscored.R
index b29729e..69cd129 100644
--- a/tests/testthat/test-underscored.R
+++ b/tests/testthat/test-underscored.R
@@ -66,6 +66,10 @@ test_that("gather_() works with non-syntactic names", {
gather(df, key, val, `non-syntactic`),
gather_(df, "key", "val", "non-syntactic")
)
+ expect_identical(
+ gather(df, `key space`, `val space`, `non-syntactic`),
+ gather_(df, "key space", "val space", "non-syntactic")
+ )
})
test_that("nest_()", {
@@ -73,6 +77,14 @@ test_that("nest_()", {
expect_identical(nest_(df, "y", "y"), nest(df, y, .key = y))
})
+test_that("nest_() works with non-syntactic names", {
+ df <- tibble(`x` = c(1, 1, 1), `non-syntactic` = 1:3)
+ expect_identical(
+ nest_(df, "non-syntactic", "non-syntactic"),
+ nest(df, `non-syntactic`, .key = `non-syntactic`)
+ )
+})
+
test_that("separate_()", {
df <- tibble(x = c(NA, "a b"))
out <- separate_(df, "x", c("x", "y"))
@@ -94,11 +106,19 @@ test_that("separate_rows() works with non-syntactic names", {
test_that("spread_()", {
df1 <- data.frame(x = c("a", "b"), y = 1:2)
df2 <- data.frame(x = c("b", "a"), y = 2:1)
- one <- spread_(df1, "x", ~y)
- two <- spread_(df2, "x", ~y) %>% select(a, b) %>% arrange(a, b)
+ one <- spread_(df1, "x", "y")
+ two <- spread_(df2, "x", "y") %>% select(a, b) %>% arrange(a, b)
expect_identical(one, two)
})
+test_that("spread_() works with non-syntactic names", {
+ df <- tibble(`non-syntactic` = c("a", "b"), `non syntactic` = 1:2)
+ expect_identical(
+ spread(df, `non-syntactic`, `non syntactic`),
+ spread_(df, "non-syntactic", "non syntactic")
+ )
+})
+
test_that("unite_()", {
df <- tibble(x = "a", y = "b")
out <- unite_(df, "z", c("x", "y"))
--
Alioth's /usr/local/bin/git-commit-notice on /srv/git.debian.org/git/debian-med/r-cran-tidyr.git
More information about the debian-med-commit
mailing list