-
Notifications
You must be signed in to change notification settings - Fork 0
/
run_analysis.R
196 lines (161 loc) · 7.14 KB
/
run_analysis.R
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
# Coursera Getting and Cleaning Data
# Course Project
#
# One of the most exciting areas in all of data science right now is wearable computing -
# see for example this article . Companies like Fitbit, Nike, and Jawbone Up are racing to
# develop the most advanced algorithms to attract new users. The data linked to from the
# course website represent data collected from the accelerometers from the Samsung Galaxy S
# smartphone. A full description is available at the site where the data was obtained:
#
# http://archive.ics.uci.edu/ml/datasets/Human+Activity+Recognition+Using+Smartphones
#
# Here are the data for the project:
#
# https://d396qusza40orc.cloudfront.net/getdata%2Fprojectfiles%2FUCI%20HAR%20Dataset.zip
#
# You should create one R script called run_analysis.R that does the following.
#
# Merges the training and the test sets to create one data set.
# Extracts only the measurements on the mean and standard deviation for each measurement.
# Uses descriptive activity names to name the activities in the data set
# Appropriately labels the data set with descriptive variable names.
#
# From the data set in step 4, creates a second, independent tidy data set with the average
# of each variable for each activity and each subject.
#############################################################################################
# setup
# set working directory
path_wd = "C:\\Users\\Frank\\Documents\\Coursera\\Getting_And_Cleaning_Data\\CourseProject"
setwd(path_wd)
# load required packages
library(dplyr)
library(reshape2)
#############################################################################################
# download and extract datasets
# download raw data
fileurl <- "https://d396qusza40orc.cloudfront.net/getdata%2Fprojectfiles%2FUCI%20HAR%20Dataset.zip"
download.file(fileurl, "dataset.zip")
dateDownloaded <- date()
list.files("./")
# unzip downloaded data
unzip("dataset.zip")
list.files("./")
# set paths to unzipped raw data
path_test = paste(path_wd,"\\UCI HAR Dataset\\test", sep = "")
path_train = paste(path_wd,"\\UCI HAR Dataset\\train", sep = "")
#############################################################################################
# read in datasets
# set paths to common files
file_activity <- paste(path_wd, "\\UCI HAR Dataset\\activity_labels.txt", sep = "")
file_features <- paste(path_wd, "\\UCI HAR Dataset\\features.txt", sep = "")
# read in common files
activity <- read.table(file_activity)
# activity <- tbl_df(activity)
features <- read.table(file_features)
# features <- tbl_df(features)
# set paths to test files
file_subject_test = paste(path_test, "\\subject_test.txt", sep = "")
file_x_test = paste(path_test, "\\X_test.txt", sep = "")
file_y_test = paste(path_test,"\\y_test.txt", sep = "")
# set paths to train files
file_subject_train = paste(path_train, "\\subject_train.txt", sep = "")
file_x_train = paste(path_train, "\\X_train.txt", sep = "")
file_y_train = paste(path_train,"\\y_train.txt", sep = "")
# read in test data
data <- read.table(file_subject_test)
data_subject_test <- tbl_df(data)
data <- read.table(file_x_test)
data_x_test <- tbl_df(data)
data <- read.table(file_y_test)
data_y_test <- tbl_df(data)
# read in train data
data <- read.table(file_subject_train)
data_subject_train <- tbl_df(data)
data <- read.table(file_x_train)
data_x_train <- tbl_df(data)
data <- read.table(file_y_train)
data_y_train <- tbl_df(data)
# clean up interim data
rm(data)
rm(path_test)
rm(path_train)
rm(file_activity)
rm(file_features)
rm(file_subject_test)
rm(file_x_test)
rm(file_y_test)
rm(file_subject_train)
rm(file_x_train)
rm(file_y_train)
#############################################################################################
# format datasets
# assign subject variable name and source dataset value
names(data_subject_test) <- "subject"
data_subject_test <- mutate(data_subject_test, dataset = "test")
names(data_subject_train) <- "subject"
data_subject_train <- mutate(data_subject_train, dataset = "train")
# assign x data variable names from features.txt
var_names <- make.names(features$V2, unique = TRUE)
names(data_x_test) <- var_names
names(data_x_train) <- var_names
# document original and modified variable names for codebook.txt
column_names <- cbind(features, var_names)
names(column_names) <- c("features.txt_column_number", "features.txt_variable_name", "tidy.txt_variable_name")
column_names <- tbl_df(column_names)
# join y data and activity labels
data_y_test_join_activity <- inner_join(data_y_test, activity)
data_y_train_join_activity <- inner_join(data_y_train, activity)
# assign y data variable names
names(data_y_test_join_activity) <- c("activity_id", "activity")
names(data_y_train_join_activity) <- c("activity_id", "activity")
# combine subject, x and y datasets
data_test_bind <- cbind(data_subject_test, data_y_test_join_activity, data_x_test)
data_train_bind <- cbind(data_subject_train, data_y_train_join_activity, data_x_train)
# merge formatted test and train datasets
data_all <- rbind(data_test_bind, data_train_bind)
# clean up interim data
rm(activity)
rm(features)
rm(var_names)
rm(data_subject_test)
rm(data_subject_train)
rm(data_x_test)
rm(data_x_train)
rm(data_y_test)
rm(data_y_train)
rm(data_y_test_join_activity)
rm(data_y_train_join_activity)
rm(data_test_bind)
rm(data_train_bind)
#############################################################################################
# filter merged dataset
# select data columns "mean" and "std" only [as in features.txt variable names mean() and std()]
data_all_select <- select(data_all, subject, activity, contains("mean"), contains("std"),
-contains("freq"), -contains("angle"))
data_all_select <- tbl_df(data_all_select)
# match variables selected to original file names for README.txt
column_names_select <- data.frame(names(data_all_select))
names(column_names_select) <- "tidy.txt_variable_name"
column_names_selected <- left_join(column_names_select, column_names)
# clean up interim data
rm(column_names_select)
rm(data_all)
#############################################################################################
# summarize and output tidy data
# for each combination of subject and activity: calculate the mean of each variable
data_melt <- melt(data_all_select, c("subject", "activity"))
data_summary <- dcast(data_melt, activity + subject ~ variable, mean)
# rename variables
ds_rename <- paste("mean[", names(data_summary), "]", sep = "")
ds_rename[1:2] <- names(data_summary)[1:2]
names(data_summary) <- ds_rename
# save out summarized data to text file "tidy.txt" and variable names "codebook.txt" [delimiter = " "]
path_file_out <- paste(path_wd, "\\UCI HAR Dataset", sep = "")
write.table(data_summary, paste(path_file_out, "\\tidy.txt", sep = ""), row.names = FALSE)
write.table(column_names_selected, paste(path_file_out, "\\codebook.txt", sep = ""), row.names = FALSE)
# clean up interim data
rm(data_melt)
#############################################################################################
# review output data
# output_data <- read.table(paste(path_file_out, "\\tidy.txt", sep = ""), header = TRUE)
# output_codebook <- read.table(paste(path_file_out, "\\codebook.txt", sep = ""), header = TRUE)