-
Notifications
You must be signed in to change notification settings - Fork 11
/
demoCode.txt
90 lines (81 loc) · 3.05 KB
/
demoCode.txt
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
## set up and load things
setwd("H:/Bibliometrics/Training/Text mining")
source("H:/Bibliometrics/R/Functions/pubmedXML.R")
mystopwords <- scan("H:/Bibliometrics/R/Functions/stopwords.txt", what="varchar", skip=1)
library(reutils)
## get and clean data
mquery <- "zika virus[mh] AND 2016[dp]"
pmids <- esearch(mquery, db = "pubmed", usehistory = TRUE)
pmids
rxml <- efetch(pmids, retmode = "xml", outfile = "zikaPubs_raw.xml")
cxml <- clean_api_xml(rxml, outfile = "zikaPubs_clean.xml")
theData <- extract_xml(cxml)
rm(pmids, rxml, cxml)
## count functions on basic metadata
library(plyr)
journals <- count(theData$journal)
journals <- journals[order(journals$freq),]
tail(journals, 10)
mesh <- count(unlist(strsplit(theData$meshHeadings, "|", fixed = TRUE)))
mesh <- mesh[order(mesh$freq),]
tail(mesh, 25)
## set up the dtm
docs <- data.frame(doc_id = theData$pmid, text = paste(theData$articletitle, theData$abstract, sep = ". "))
library(tm)
library(slam)
corpDocs <- Corpus(DataframeSource(docs))
corpDocs <- tm_map(corpDocs, removePunctuation)
corpDocs <- tm_map(corpDocs, content_transformer(tolower))
corpDocs <- tm_map(corpDocs, removeWords, stopwords("English"))
corpDocs <- tm_map(corpDocs, removeWords, mystopwords)
corpDocs <- tm_map(corpDocs, stemDocument)
corpDocs <- tm_map(corpDocs, stripWhitespace)
dtm <- DocumentTermMatrix(corpDocs)
rownames(dtm) <- theData$pmid
dtm <- dtm[row_sums(dtm) > 0,]
dtm
dtm <- removeSparseTerms(dtm, 0.995)
dtm <- dtm[row_sums(dtm) > 0,]
dtm
## find terms appearing > x times
findFreqTerms(dtm, 100)
## find frequent terms in each document
tDocs <- findMostFreqTerms(dtm)
tDocs[[5]]
tDocs[[52]]
tDocs$`28277248`
## top terms by overall frequency
theTerms <- col_sums(dtm)
theTerms <- sort(theTerms)
tail(theTerms, 50)
## top terms by number of documents
terms2 <- col_sums(dtm > 0)
terms2 <- sort(terms2)
tail(terms2, 50)
## find term associations
assoc1 <- findAssocs(dtm, "microcephali", 0.25)
assoc1
assoc2 <- findAssocs(dtm, "mosquito", 0.25)
assoc2
assoc3 <- findAssocs(dtm, c("dengu", "flavivirus"), 0.25)
assoc3
## cluster by topic
library(topicmodels)
seed <- list(106, 375, 519, 781, 944)
ldaOut <- LDA(dtm, 10, method = "Gibbs", control = list(nstart = 5, seed = seed, best = TRUE, burnin = 4000, iter = 2000, thin = 500))
docTopics <- topics(ldaOut)
docTopics <- data.frame(pmid = names(docTopics), primaryTopic = docTopics)
write.csv(docTopics, file = "docstotopics.csv")
topicTerms <- terms(ldaOut, 15)
write.csv(topicTerms, file = "topicsToTerms.csv")
topicTerms
topicProbs <- as.data.frame(ldaOut@gamma)
write.csv(topicProbs, file = "topicProbabilities.csv")
topicList <- topics(ldaOut, threshold = 0.15)
topicList <- sapply(topicList, paste0, collapse = "|")
topicList <- data.frame(pmid = names(topicList), topicList = topicList)
newData <- merge(theData, docTopics, by = "pmid", all.x = TRUE)
newData <- merge(newData, topicList, by = "pmid", all.x = TRUE)
pubsPerTopic <- count(newData$primaryTopic)
pubsPerTopic
write.csv(newData, file = "clustered_data.csv")