├── .gitignore ├── docs ├── pkgdown.yml ├── link.svg ├── docsearch.js ├── pkgdown.js ├── 404.html ├── authors.html ├── index.html ├── pkgdown.css ├── reference │ ├── doiref.html │ ├── index.html │ ├── getrss.html │ ├── getpubmedtbl.html │ ├── getgsauthor.html │ └── getpubmed.html └── docsearch.css ├── .Rbuildignore ├── NAMESPACE ├── .travis.yml ├── man ├── doiref.Rd ├── getrss.Rd ├── getpubmedtbl.Rd ├── getpubmed.Rd └── getgsauthor.Rd ├── scifetch.Rproj ├── DESCRIPTION └── R ├── doi.R ├── pubmed.R ├── rss.R └── googlescholar.R /.gitignore: -------------------------------------------------------------------------------- 1 | packrat/lib*/ 2 | .Rproj.user 3 | .Rbuildignore 4 | .Rhistory 5 | .gitignore 6 | -------------------------------------------------------------------------------- /docs/pkgdown.yml: -------------------------------------------------------------------------------- 1 | pandoc: 2.3.1 2 | pkgdown: 1.4.1 3 | pkgdown_sha: ~ 4 | articles: [] 5 | 6 | -------------------------------------------------------------------------------- /.Rbuildignore: -------------------------------------------------------------------------------- 1 | ^packrat/ 2 | ^\.Rprofile$ 3 | ^.*\.Rproj$ 4 | ^\.Rproj\.user$ 5 | .travis.yml 6 | ^docs$ 7 | -------------------------------------------------------------------------------- /NAMESPACE: -------------------------------------------------------------------------------- 1 | # Generated by roxygen2: do not edit by hand 2 | 3 | export(doiref) 4 | export(getgsauthor) 5 | export(getpubmed) 6 | export(getpubmedtbl) 7 | export(getrss) 8 | importFrom(dplyr,"%>%") 9 | -------------------------------------------------------------------------------- /.travis.yml: -------------------------------------------------------------------------------- 1 | language: r 2 | sudo: required 3 | cran: https://cran.rstudio.com 4 | 5 | cache: 6 | packages: true 7 | 8 | env: 9 | global: 10 | - NOT_CRAN: true 11 | - R_CHECK_ARGS="--as-cran --timings" 12 | - R_CHECK_TIME="TRUE" 13 | - R_CHECK_TESTS="TRUE" 14 | - _R_CHECK_TIMINGS_="0" 15 | -------------------------------------------------------------------------------- /man/doiref.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/doi.R 3 | \name{doiref} 4 | \alias{doiref} 5 | \title{Fetch Bibliographies via DOI} 6 | \usage{ 7 | doiref(path, style = "apa") 8 | } 9 | \arguments{ 10 | \item{path}{file path} 11 | 12 | \item{style}{citaiton style, default apa} 13 | } 14 | \description{ 15 | Fetch Bibliographies via DOI 16 | } 17 | -------------------------------------------------------------------------------- /scifetch.Rproj: -------------------------------------------------------------------------------- 1 | Version: 1.0 2 | 3 | RestoreWorkspace: Default 4 | SaveWorkspace: Default 5 | AlwaysSaveHistory: Default 6 | 7 | EnableCodeIndexing: Yes 8 | UseSpacesForTab: Yes 9 | NumSpacesForTab: 8 10 | Encoding: UTF-8 11 | 12 | RnwWeave: knitr 13 | LaTeX: XeLaTeX 14 | 15 | AutoAppendNewline: Yes 16 | StripTrailingWhitespace: Yes 17 | 18 | BuildType: Package 19 | PackageUseDevtools: Yes 20 | PackageInstallArgs: --no-multiarch --with-keep.source 21 | -------------------------------------------------------------------------------- /man/getrss.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/rss.R 3 | \name{getrss} 4 | \alias{getrss} 5 | \title{Fetch rss into tibble} 6 | \usage{ 7 | getrss(feed) 8 | } 9 | \arguments{ 10 | \item{feed}{feed address} 11 | } 12 | \value{ 13 | tibble object 14 | } 15 | \description{ 16 | Fetch rss into tibble 17 | } 18 | \examples{ 19 | \dontrun{ 20 | library(scifetch) 21 | feed <- 'yihui.name/index.xml' 22 | z <- getrss(feed) 23 | } 24 | } 25 | \seealso{ 26 | getpubmed 27 | } 28 | -------------------------------------------------------------------------------- /man/getpubmedtbl.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/pubmed.R 3 | \name{getpubmedtbl} 4 | \alias{getpubmedtbl} 5 | \title{Get the basic data from xml} 6 | \usage{ 7 | getpubmedtbl(xml2) 8 | } 9 | \arguments{ 10 | \item{xml2}{the xml object from getpubmed} 11 | } 12 | \value{ 13 | tibble object 14 | } 15 | \description{ 16 | Get the basic data from xml 17 | } 18 | \examples{ 19 | \dontrun{ 20 | library(scifetch) 21 | query <- 'janusz pawliszyn[AU]' 22 | z <- getpubmed(query) 23 | z2 <- getpubmedtbl(z)} 24 | } 25 | \seealso{ 26 | getpubmed 27 | } 28 | -------------------------------------------------------------------------------- /man/getpubmed.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/pubmed.R 3 | \name{getpubmed} 4 | \alias{getpubmed} 5 | \title{Fetch xml file from pubmed} 6 | \usage{ 7 | getpubmed(query, start = 1, end = 100) 8 | } 9 | \arguments{ 10 | \item{query}{the NCBI Entrez for search} 11 | 12 | \item{start}{begin item} 13 | 14 | \item{end}{finish item} 15 | } 16 | \value{ 17 | xml object 18 | } 19 | \description{ 20 | Fetch xml file from pubmed 21 | } 22 | \examples{ 23 | \dontrun{ 24 | library(scifetch) 25 | query <- 'janusz pawliszyn[AU]' 26 | z <- getpubmed(query) 27 | } 28 | } 29 | \seealso{ 30 | getpubmedtbl 31 | } 32 | -------------------------------------------------------------------------------- /DESCRIPTION: -------------------------------------------------------------------------------- 1 | Package: scifetch 2 | Type: Package 3 | Title: Fetch literature data from pubmed, google scholar and RSS 4 | Version: 0.1.0 5 | Imports: tibble, dplyr, purrr, xml2, stringr, tidyr, rvest,jsonlite,lubridate, httr,rcrossref 6 | Authors@R: person(given = "Miao", family = "YU", 7 | role = c("aut", "cre"), 8 | email = "yufreecas@gmail.com", 9 | comment = c(ORCID = "0000-0002-2804-6014")) 10 | Maintainer: Miao Yu 11 | Description: 12 | Fetch literature data from pubmed and google scholar in a tidyverse way. This package is written under tidyverse frame. 13 | License: GPL-2 14 | RoxygenNote: 7.0.2 15 | -------------------------------------------------------------------------------- /man/getgsauthor.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/googlescholar.R 3 | \name{getgsauthor} 4 | \alias{getgsauthor} 5 | \title{Fetch author's publication from Google Scholar} 6 | \usage{ 7 | getgsauthor(id, start = 1, end = 100) 8 | } 9 | \arguments{ 10 | \item{id}{the google scholar ID to be searched} 11 | 12 | \item{start}{begin item} 13 | 14 | \item{end}{finish item} 15 | } 16 | \value{ 17 | tibble object 18 | } 19 | \description{ 20 | Fetch author's publication from Google Scholar 21 | } 22 | \examples{ 23 | \dontrun{ 24 | library(scifetch) 25 | id <- "SPNX8oUAAAAJ" 26 | z <- getgsauthor(id) 27 | } 28 | } 29 | -------------------------------------------------------------------------------- /R/doi.R: -------------------------------------------------------------------------------- 1 | #' Fetch Bibliographies via DOI 2 | #' @param path file path 3 | #' @param style citaiton style, default apa 4 | #' @return NULL 5 | #' @export 6 | doiref <- function(path, style = 'apa'){ 7 | mystring <- readLines(path) 8 | doi <- unlist(stringr::str_extract_all(mystring,"\\[\\@\\b10\\.(\\d+\\.*)+[\\/](([^\\s\\.])+\\.*)+\\b\\]")) 9 | doi <- unique(doi) 10 | n <- as.numeric(factor(doi,levels = doi)) 11 | for (i in 1:length(doi)){ 12 | x <- readLines(path) 13 | x <- gsub(doi[i], paste0('[',n[i],']'),x,fixed = T) 14 | writeLines(x,path) 15 | temp <- try(rcrossref::cr_cn(dois = unlist(strsplit(doi[i],split = '\\[@|\\]'))[2], format = "text", style = style), T) 16 | cat(temp,file=path,sep="\n",append=TRUE) 17 | } 18 | } 19 | -------------------------------------------------------------------------------- /docs/link.svg: -------------------------------------------------------------------------------- 1 | 2 | 3 | 5 | 8 | 12 | 13 | -------------------------------------------------------------------------------- /docs/docsearch.js: -------------------------------------------------------------------------------- 1 | $(function() { 2 | 3 | // register a handler to move the focus to the search bar 4 | // upon pressing shift + "/" (i.e. "?") 5 | $(document).on('keydown', function(e) { 6 | if (e.shiftKey && e.keyCode == 191) { 7 | e.preventDefault(); 8 | $("#search-input").focus(); 9 | } 10 | }); 11 | 12 | $(document).ready(function() { 13 | // do keyword highlighting 14 | /* modified from https://jsfiddle.net/julmot/bL6bb5oo/ */ 15 | var mark = function() { 16 | 17 | var referrer = document.URL ; 18 | var paramKey = "q" ; 19 | 20 | if (referrer.indexOf("?") !== -1) { 21 | var qs = referrer.substr(referrer.indexOf('?') + 1); 22 | var qs_noanchor = qs.split('#')[0]; 23 | var qsa = qs_noanchor.split('&'); 24 | var keyword = ""; 25 | 26 | for (var i = 0; i < qsa.length; i++) { 27 | var currentParam = qsa[i].split('='); 28 | 29 | if (currentParam.length !== 2) { 30 | continue; 31 | } 32 | 33 | if (currentParam[0] == paramKey) { 34 | keyword = decodeURIComponent(currentParam[1].replace(/\+/g, "%20")); 35 | } 36 | } 37 | 38 | if (keyword !== "") { 39 | $(".contents").unmark({ 40 | done: function() { 41 | $(".contents").mark(keyword); 42 | } 43 | }); 44 | } 45 | } 46 | }; 47 | 48 | mark(); 49 | }); 50 | }); 51 | 52 | /* Search term highlighting ------------------------------*/ 53 | 54 | function matchedWords(hit) { 55 | var words = []; 56 | 57 | var hierarchy = hit._highlightResult.hierarchy; 58 | // loop to fetch from lvl0, lvl1, etc. 59 | for (var idx in hierarchy) { 60 | words = words.concat(hierarchy[idx].matchedWords); 61 | } 62 | 63 | var content = hit._highlightResult.content; 64 | if (content) { 65 | words = words.concat(content.matchedWords); 66 | } 67 | 68 | // return unique words 69 | var words_uniq = [...new Set(words)]; 70 | return words_uniq; 71 | } 72 | 73 | function updateHitURL(hit) { 74 | 75 | var words = matchedWords(hit); 76 | var url = ""; 77 | 78 | if (hit.anchor) { 79 | url = hit.url_without_anchor + '?q=' + escape(words.join(" ")) + '#' + hit.anchor; 80 | } else { 81 | url = hit.url + '?q=' + escape(words.join(" ")); 82 | } 83 | 84 | return url; 85 | } 86 | -------------------------------------------------------------------------------- /docs/pkgdown.js: -------------------------------------------------------------------------------- 1 | /* http://gregfranko.com/blog/jquery-best-practices/ */ 2 | (function($) { 3 | $(function() { 4 | 5 | $('.navbar-fixed-top').headroom(); 6 | 7 | $('body').css('padding-top', $('.navbar').height() + 10); 8 | $(window).resize(function(){ 9 | $('body').css('padding-top', $('.navbar').height() + 10); 10 | }); 11 | 12 | $('body').scrollspy({ 13 | target: '#sidebar', 14 | offset: 60 15 | }); 16 | 17 | $('[data-toggle="tooltip"]').tooltip(); 18 | 19 | var cur_path = paths(location.pathname); 20 | var links = $("#navbar ul li a"); 21 | var max_length = -1; 22 | var pos = -1; 23 | for (var i = 0; i < links.length; i++) { 24 | if (links[i].getAttribute("href") === "#") 25 | continue; 26 | // Ignore external links 27 | if (links[i].host !== location.host) 28 | continue; 29 | 30 | var nav_path = paths(links[i].pathname); 31 | 32 | var length = prefix_length(nav_path, cur_path); 33 | if (length > max_length) { 34 | max_length = length; 35 | pos = i; 36 | } 37 | } 38 | 39 | // Add class to parent
  • , and enclosing
  • if in dropdown 40 | if (pos >= 0) { 41 | var menu_anchor = $(links[pos]); 42 | menu_anchor.parent().addClass("active"); 43 | menu_anchor.closest("li.dropdown").addClass("active"); 44 | } 45 | }); 46 | 47 | function paths(pathname) { 48 | var pieces = pathname.split("/"); 49 | pieces.shift(); // always starts with / 50 | 51 | var end = pieces[pieces.length - 1]; 52 | if (end === "index.html" || end === "") 53 | pieces.pop(); 54 | return(pieces); 55 | } 56 | 57 | // Returns -1 if not found 58 | function prefix_length(needle, haystack) { 59 | if (needle.length > haystack.length) 60 | return(-1); 61 | 62 | // Special case for length-0 haystack, since for loop won't run 63 | if (haystack.length === 0) { 64 | return(needle.length === 0 ? 0 : -1); 65 | } 66 | 67 | for (var i = 0; i < haystack.length; i++) { 68 | if (needle[i] != haystack[i]) 69 | return(i); 70 | } 71 | 72 | return(haystack.length); 73 | } 74 | 75 | /* Clipboard --------------------------*/ 76 | 77 | function changeTooltipMessage(element, msg) { 78 | var tooltipOriginalTitle=element.getAttribute('data-original-title'); 79 | element.setAttribute('data-original-title', msg); 80 | $(element).tooltip('show'); 81 | element.setAttribute('data-original-title', tooltipOriginalTitle); 82 | } 83 | 84 | if(ClipboardJS.isSupported()) { 85 | $(document).ready(function() { 86 | var copyButton = ""; 87 | 88 | $(".examples, div.sourceCode").addClass("hasCopyButton"); 89 | 90 | // Insert copy buttons: 91 | $(copyButton).prependTo(".hasCopyButton"); 92 | 93 | // Initialize tooltips: 94 | $('.btn-copy-ex').tooltip({container: 'body'}); 95 | 96 | // Initialize clipboard: 97 | var clipboardBtnCopies = new ClipboardJS('[data-clipboard-copy]', { 98 | text: function(trigger) { 99 | return trigger.parentNode.textContent; 100 | } 101 | }); 102 | 103 | clipboardBtnCopies.on('success', function(e) { 104 | changeTooltipMessage(e.trigger, 'Copied!'); 105 | e.clearSelection(); 106 | }); 107 | 108 | clipboardBtnCopies.on('error', function() { 109 | changeTooltipMessage(e.trigger,'Press Ctrl+C or Command+C to copy'); 110 | }); 111 | }); 112 | } 113 | })(window.jQuery || window.$) 114 | -------------------------------------------------------------------------------- /docs/404.html: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 | Page not found (404) • scifetch 10 | 11 | 12 | 13 | 14 | 15 | 16 | 17 | 18 | 19 | 20 | 21 | 22 | 23 | 24 | 25 | 26 | 27 | 28 | 29 | 30 | 31 | 32 | 33 | 34 | 35 | 36 | 37 | 38 | 39 | 40 | 41 | 42 | 43 | 44 | 45 | 46 | 47 | 51 | 52 | 53 | 54 | 55 | 56 | 57 |
    58 |
    59 | 93 | 94 | 95 | 96 |
    97 | 98 |
    99 |
    100 | 103 | 104 | Content not found. Please use links in the navbar. 105 | 106 |
    107 | 108 |
    109 | 110 | 111 | 112 |
    113 | 116 | 117 |
    118 |

    Site built with pkgdown 1.4.1.

    119 |
    120 | 121 |
    122 |
    123 | 124 | 125 | 126 | 127 | 128 | 129 | 130 | 131 | -------------------------------------------------------------------------------- /R/pubmed.R: -------------------------------------------------------------------------------- 1 | #' Fetch xml file from pubmed 2 | #' @param query the NCBI Entrez for search 3 | #' @param start begin item 4 | #' @param end finish item 5 | #' @return xml object 6 | #' @seealso getpubmedtbl 7 | #' @examples \dontrun{ 8 | #' library(scifetch) 9 | #' query <- 'janusz pawliszyn[AU]' 10 | #' z <- getpubmed(query) 11 | #' } 12 | #' @export 13 | getpubmed <- function(query, start = 1, end = 100){ 14 | query <- as.character(query) 15 | query <- gsub(" ", "+", query, fixed = TRUE) 16 | PID <- paste("https://eutils.ncbi.nlm.nih.gov/entrez/eutils/esearch.fcgi?", 17 | "db=pubmed&term=", 18 | query, 19 | "&usehistory=y", 20 | sep = "") 21 | # use ID to get full records 22 | # tmpConnect <- url(PID, open = "rb") 23 | xml <- xml2::read_xml(PID) 24 | # close.connection(tmpConnect) 25 | list <- xml2::as_list(xml) 26 | n <- list$eSearchResult$Count[[1]] 27 | warning <- paste(n,"records founds") 28 | message(warning) 29 | efetch_url = paste("https://eutils.ncbi.nlm.nih.gov/entrez/eutils/efetch.fcgi?", 30 | "db=pubmed&WebEnv=", 31 | list$eSearchResult$WebEnv[[1]], 32 | "&query_key=", 33 | list$eSearchResult$QueryKey[[1]], 34 | "&retstart=", 35 | start-1, 36 | "&retmax=", 37 | end, 38 | "&retmode=xml", 39 | sep = "") 40 | xml2 <- xml2::read_xml(efetch_url) 41 | return(xml2) 42 | } 43 | #' Get the basic data from xml 44 | #' @param xml2 the xml object from getpubmed 45 | #' @seealso getpubmed 46 | #' @return tibble object 47 | #' @examples \dontrun{ 48 | #' library(scifetch) 49 | #' query <- 'janusz pawliszyn[AU]' 50 | #' z <- getpubmed(query) 51 | #' z2 <- getpubmedtbl(z)} 52 | #' @importFrom dplyr %>% 53 | #' @export 54 | getpubmedtbl <- function(xml2){ 55 | options(warn = -1) 56 | getabs <- function(list){ 57 | if (length(list) > 2){ 58 | abstract <- paste(list[1:length(list)-1], collapse = " ", sep = " ") 59 | } else if (length(list) < 1) { 60 | abstract <- NA 61 | } else { 62 | abstract <- list[1] 63 | } 64 | return(abstract) 65 | } 66 | record <- xml2 %>% 67 | xml2::xml_find_all(".//PubmedArticle//MedlineCitation//Article") 68 | journal <- record %>% 69 | xml2::xml_find_all("Journal//ISOAbbreviation") %>% 70 | xml2::xml_contents() %>% 71 | as.character() %>% 72 | unlist() 73 | title <- record %>% 74 | xml2::xml_find_all("ArticleTitle") %>% 75 | xml2::xml_contents() %>% 76 | as.character() %>% 77 | unlist() 78 | year <- record %>% 79 | xml2::xml_find_all("Journal//JournalIssue//PubDate") %>% 80 | xml2::as_list() %>% 81 | purrr::map(unlist) %>% 82 | purrr::map(paste0) %>% 83 | purrr::map(`[`,1) %>% 84 | unlist() 85 | month <- record %>% 86 | xml2::xml_find_all("Journal//JournalIssue//PubDate") %>% 87 | xml2::as_list() %>% 88 | purrr::map(unlist) %>% 89 | purrr::map(paste0) %>% 90 | purrr::map(`[`,2) %>% 91 | unlist() 92 | day <- record %>% 93 | xml2::xml_find_all("Journal//JournalIssue//PubDate") %>% 94 | xml2::as_list() %>% 95 | purrr::map(unlist) %>% 96 | purrr::map(paste0) %>% 97 | purrr::map(`[`,3) %>% 98 | unlist() 99 | abstract <- record %>% 100 | xml2::as_list() %>% 101 | purrr::map(`[[`,'Abstract') %>% 102 | purrr::map(getabs) %>% 103 | purrr::map(unlist) %>% 104 | stringr::str_replace_all('list\\(\"',"") %>% 105 | stringr::str_replace_all('\"\\)',"") %>% 106 | unlist() 107 | 108 | paperdf <- tibble::as.tibble(cbind(journal,title,year,month,day,abstract)) %>% 109 | dplyr::mutate(day = dplyr::case_when(is.na(day) ~ '01', !is.na(day) ~ day)) %>% 110 | dplyr::mutate(month = dplyr::case_when(!(month %in% month.abb) & is.na(month) ~ 'Jan', !(month %in% month.abb) & !is.na(month) ~ month.abb[as.numeric(month)], month %in% month.abb ~ month)) %>% 111 | tidyr::unite(date, year, month, day, sep = '') %>% 112 | dplyr::mutate(date = lubridate::ymd(date)) %>% 113 | dplyr::bind_cols(line = 1:length(title)) 114 | options(warn = 0) 115 | return(paperdf) 116 | } 117 | 118 | 119 | -------------------------------------------------------------------------------- /docs/authors.html: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 | Authors • scifetch 10 | 11 | 12 | 13 | 14 | 15 | 16 | 17 | 18 | 19 | 20 | 21 | 22 | 23 | 24 | 25 | 26 | 27 | 28 | 29 | 30 | 31 | 32 | 33 | 34 | 35 | 36 | 37 | 38 | 39 | 40 | 41 | 42 | 43 | 44 | 45 | 46 | 47 | 51 | 52 | 53 | 54 | 55 | 56 | 57 |
    58 |
    59 | 93 | 94 | 95 | 96 |
    97 | 98 |
    99 |
    100 | 103 | 104 |
      105 |
    • 106 |

      Miao YU. Author, maintainer. ORCID 107 |

      108 |
    • 109 |
    110 | 111 |
    112 | 113 |
    114 | 115 | 116 | 117 |
    118 | 121 | 122 |
    123 |

    Site built with pkgdown 1.4.1.

    124 |
    125 | 126 |
    127 |
    128 | 129 | 130 | 131 | 132 | 133 | 134 | 135 | 136 | -------------------------------------------------------------------------------- /docs/index.html: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | Fetch literature data from pubmed, google scholar and RSS • scifetch 9 | 10 | 11 | 12 | 13 | 14 | 16 | 17 | 21 | 22 | 23 |
    24 |
    57 | 58 | 59 | 60 | 61 |
    62 |
    63 | 64 | Fetch literature data from pubmed and google scholar in a tidyverse way. This package is written under tidyverse frame. 65 |
    66 | 67 | 82 |
    83 | 84 | 85 |
    88 | 89 |
    90 |

    Site built with pkgdown 1.4.1.

    91 |
    92 | 93 |
    94 |
    95 | 96 | 97 | 98 | 99 | 100 | 101 | -------------------------------------------------------------------------------- /R/rss.R: -------------------------------------------------------------------------------- 1 | #' Fetch rss into tibble 2 | #' @param feed feed address 3 | #' @return tibble object 4 | #' @seealso getpubmed 5 | #' @examples \dontrun{ 6 | #' library(scifetch) 7 | #' feed <- 'yihui.name/index.xml' 8 | #' z <- getrss(feed) 9 | #' } 10 | getrss <- function(feed) { 11 | # This function is modified from tidyRSS and credits should belong to the author of tidyRSS package 12 | formats <- c( 13 | "a d b Y H:M:S z", "a, d b Y H:M z", 14 | "Y-m-d H:M:S z", "d b Y H:M:S", 15 | "d b Y H:M:S z", "a b d H:M:S z Y", 16 | "a b dH:M:S Y", "Y-m-d" 17 | ) 18 | json_parse <- function(feed) { 19 | res <- jsonlite::fromJSON(feed) 20 | items <- res$items 21 | results <- tibble::tibble( 22 | title = items$title, 23 | date = lubridate::parse_date_time(items$date_published, orders = formats) %>% 24 | as.character() %>% substr(1, 10), 25 | linkTitle = items$url, 26 | source = res$title, 27 | description = items$content_html 28 | ) 29 | return(results) 30 | } 31 | 32 | rss_parse <- function(doc) { 33 | channel <- xml2::xml_find_all(doc, "channel") 34 | if (identical(length(channel), 0L)) { 35 | ns <- xml2::xml_ns_rename(xml2::xml_ns(doc), d1 = "rss") 36 | channel <- xml2::xml_find_all(doc, "rss:channel", ns = ns) 37 | site <- xml2::xml_find_all(doc, "rss:item", ns = ns) 38 | res <- suppressWarnings({ 39 | tibble::tibble( 40 | title = xml2::xml_text(xml2::xml_find_all(site, "rss:title", ns = ns)), 41 | date = xml2::xml_text(xml2::xml_find_first(site, "rss:pubDate", ns = ns)) %>% 42 | lubridate::parse_date_time(orders = formats) %>% 43 | as.character() %>% substr(1, 10), 44 | linkTitle = xml2::xml_text(xml2::xml_find_all(site, "rss:link", ns = ns)), 45 | source = xml2::xml_text(xml2::xml_find_first(channel, "rss:title", ns = ns)), 46 | description = xml2::xml_text(xml2::xml_find_first(site, "rss:description", ns = ns)) 47 | ) 48 | }) 49 | } else { 50 | site <- xml2::xml_find_all(channel, "item") 51 | res <- suppressWarnings({ 52 | tibble::tibble( 53 | title = xml2::xml_text(xml2::xml_find_first(site, "title")), 54 | date = xml2::xml_text(xml2::xml_find_first(site, "pubDate")) %>% 55 | lubridate::parse_date_time(orders = formats) %>% 56 | as.character() %>% substr(1, 10), 57 | linkTitle = xml2::xml_text(xml2::xml_find_first(site, "link")), 58 | source = xml2::xml_text(xml2::xml_find_first(channel, "title")), 59 | description = xml2::xml_text(xml2::xml_find_first(site, "description")) 60 | ) 61 | }) 62 | res <- Filter(function(x) !all(is.na(x)), res) 63 | } 64 | return(res) 65 | } 66 | 67 | atom_parse <- function(doc) { 68 | ns <- c(atom = "http://www.w3.org/2005/Atom") 69 | feed_title <- xml2::xml_text(xml2::xml_find_first(doc, ".//atom:feed/atom:title", ns = ns)) 70 | entries <- xml2::xml_find_all(doc, ".//atom:entry", ns = ns) 71 | 72 | parse_entry <- function(entry) { 73 | title <- xml2::xml_text(xml2::xml_find_first(entry, ".//atom:title", ns = ns)) 74 | 75 | pub <- xml2::xml_find_first(entry, ".//atom:published", ns = ns) 76 | upd <- xml2::xml_find_first(entry, ".//atom:updated", ns = ns) 77 | date <- if (length(pub) > 0) xml2::xml_text(pub) else xml2::xml_text(upd) 78 | date_parsed <- lubridate::parse_date_time(date, orders = formats) %>% 79 | as.character() %>% substr(1, 10) 80 | 81 | link <- xml2::xml_find_first(entry, ".//atom:link", ns = ns) 82 | link_href <- if (length(link) > 0) xml2::xml_attr(link, "href") else NA_character_ 83 | 84 | content <- xml2::xml_find_first(entry, ".//atom:content", ns = ns) 85 | summary <- xml2::xml_find_first(entry, ".//atom:summary", ns = ns) 86 | description <- if (length(content) > 0) { 87 | xml2::xml_text(content) 88 | } else if (length(summary) > 0) { 89 | xml2::xml_text(summary) 90 | } else { 91 | NA_character_ 92 | } 93 | 94 | list( 95 | title = title, 96 | date = date_parsed, 97 | linkTitle = link_href, 98 | description = description 99 | ) 100 | } 101 | 102 | entries_list <- lapply(entries, parse_entry) 103 | res <- tibble::tibble( 104 | title = sapply(entries_list, `[[`, "title"), 105 | date = sapply(entries_list, `[[`, "date"), 106 | linkTitle = sapply(entries_list, `[[`, "linkTitle"), 107 | source = feed_title, 108 | description = sapply(entries_list, `[[`, "description") 109 | ) 110 | return(res) 111 | } 112 | 113 | invisible({ 114 | suppressWarnings({ 115 | stopifnot(identical(length(feed), 1L)) 116 | 117 | msg <- "Error in feed parse; please check URL." 118 | httr::set_config(httr::config(ssl_verifypeer = 0L, ssl_verifyhost = 0L)) 119 | response <- try(httr::GET(feed), silent = TRUE) 120 | if (inherits(response, "try-error")) stop(msg) 121 | 122 | if (grepl("application/json", response$headers$`content-type`)) { 123 | result <- json_parse(feed) 124 | } else { 125 | doc <- xml2::read_xml(response) 126 | root <- xml2::xml_root(doc) 127 | root_name <- xml2::xml_name(root) 128 | root_xmlns <- xml2::xml_attr(root, "xmlns") 129 | 130 | if (root_name == "feed" && root_xmlns == "http://www.w3.org/2005/Atom") { 131 | result <- atom_parse(doc) 132 | } else { 133 | result <- rss_parse(doc) 134 | } 135 | } 136 | 137 | if (NROW(result) > 0) result else NULL 138 | }) 139 | }) 140 | } 141 | -------------------------------------------------------------------------------- /docs/pkgdown.css: -------------------------------------------------------------------------------- 1 | /* Sticky footer */ 2 | 3 | /** 4 | * Basic idea: https://philipwalton.github.io/solved-by-flexbox/demos/sticky-footer/ 5 | * Details: https://github.com/philipwalton/solved-by-flexbox/blob/master/assets/css/components/site.css 6 | * 7 | * .Site -> body > .container 8 | * .Site-content -> body > .container .row 9 | * .footer -> footer 10 | * 11 | * Key idea seems to be to ensure that .container and __all its parents__ 12 | * have height set to 100% 13 | * 14 | */ 15 | 16 | html, body { 17 | height: 100%; 18 | } 19 | 20 | body > .container { 21 | display: flex; 22 | height: 100%; 23 | flex-direction: column; 24 | } 25 | 26 | body > .container .row { 27 | flex: 1 0 auto; 28 | } 29 | 30 | footer { 31 | margin-top: 45px; 32 | padding: 35px 0 36px; 33 | border-top: 1px solid #e5e5e5; 34 | color: #666; 35 | display: flex; 36 | flex-shrink: 0; 37 | } 38 | footer p { 39 | margin-bottom: 0; 40 | } 41 | footer div { 42 | flex: 1; 43 | } 44 | footer .pkgdown { 45 | text-align: right; 46 | } 47 | footer p { 48 | margin-bottom: 0; 49 | } 50 | 51 | img.icon { 52 | float: right; 53 | } 54 | 55 | img { 56 | max-width: 100%; 57 | } 58 | 59 | /* Fix bug in bootstrap (only seen in firefox) */ 60 | summary { 61 | display: list-item; 62 | } 63 | 64 | /* Typographic tweaking ---------------------------------*/ 65 | 66 | .contents .page-header { 67 | margin-top: calc(-60px + 1em); 68 | } 69 | 70 | /* Section anchors ---------------------------------*/ 71 | 72 | a.anchor { 73 | margin-left: -30px; 74 | display:inline-block; 75 | width: 30px; 76 | height: 30px; 77 | visibility: hidden; 78 | 79 | background-image: url(./link.svg); 80 | background-repeat: no-repeat; 81 | background-size: 20px 20px; 82 | background-position: center center; 83 | } 84 | 85 | .hasAnchor:hover a.anchor { 86 | visibility: visible; 87 | } 88 | 89 | @media (max-width: 767px) { 90 | .hasAnchor:hover a.anchor { 91 | visibility: hidden; 92 | } 93 | } 94 | 95 | 96 | /* Fixes for fixed navbar --------------------------*/ 97 | 98 | .contents h1, .contents h2, .contents h3, .contents h4 { 99 | padding-top: 60px; 100 | margin-top: -40px; 101 | } 102 | 103 | /* Sidebar --------------------------*/ 104 | 105 | #sidebar { 106 | margin-top: 30px; 107 | position: -webkit-sticky; 108 | position: sticky; 109 | top: 70px; 110 | } 111 | #sidebar h2 { 112 | font-size: 1.5em; 113 | margin-top: 1em; 114 | } 115 | 116 | #sidebar h2:first-child { 117 | margin-top: 0; 118 | } 119 | 120 | #sidebar .list-unstyled li { 121 | margin-bottom: 0.5em; 122 | } 123 | 124 | .orcid { 125 | height: 16px; 126 | /* margins are required by official ORCID trademark and display guidelines */ 127 | margin-left:4px; 128 | margin-right:4px; 129 | vertical-align: middle; 130 | } 131 | 132 | /* Reference index & topics ----------------------------------------------- */ 133 | 134 | .ref-index th {font-weight: normal;} 135 | 136 | .ref-index td {vertical-align: top;} 137 | .ref-index .icon {width: 40px;} 138 | .ref-index .alias {width: 40%;} 139 | .ref-index-icons .alias {width: calc(40% - 40px);} 140 | .ref-index .title {width: 60%;} 141 | 142 | .ref-arguments th {text-align: right; padding-right: 10px;} 143 | .ref-arguments th, .ref-arguments td {vertical-align: top;} 144 | .ref-arguments .name {width: 20%;} 145 | .ref-arguments .desc {width: 80%;} 146 | 147 | /* Nice scrolling for wide elements --------------------------------------- */ 148 | 149 | table { 150 | display: block; 151 | overflow: auto; 152 | } 153 | 154 | /* Syntax highlighting ---------------------------------------------------- */ 155 | 156 | pre { 157 | word-wrap: normal; 158 | word-break: normal; 159 | border: 1px solid #eee; 160 | } 161 | 162 | pre, code { 163 | background-color: #f8f8f8; 164 | color: #333; 165 | } 166 | 167 | pre code { 168 | overflow: auto; 169 | word-wrap: normal; 170 | white-space: pre; 171 | } 172 | 173 | pre .img { 174 | margin: 5px 0; 175 | } 176 | 177 | pre .img img { 178 | background-color: #fff; 179 | display: block; 180 | height: auto; 181 | } 182 | 183 | code a, pre a { 184 | color: #375f84; 185 | } 186 | 187 | a.sourceLine:hover { 188 | text-decoration: none; 189 | } 190 | 191 | .fl {color: #1514b5;} 192 | .fu {color: #000000;} /* function */ 193 | .ch,.st {color: #036a07;} /* string */ 194 | .kw {color: #264D66;} /* keyword */ 195 | .co {color: #888888;} /* comment */ 196 | 197 | .message { color: black; font-weight: bolder;} 198 | .error { color: orange; font-weight: bolder;} 199 | .warning { color: #6A0366; font-weight: bolder;} 200 | 201 | /* Clipboard --------------------------*/ 202 | 203 | .hasCopyButton { 204 | position: relative; 205 | } 206 | 207 | .btn-copy-ex { 208 | position: absolute; 209 | right: 0; 210 | top: 0; 211 | visibility: hidden; 212 | } 213 | 214 | .hasCopyButton:hover button.btn-copy-ex { 215 | visibility: visible; 216 | } 217 | 218 | /* headroom.js ------------------------ */ 219 | 220 | .headroom { 221 | will-change: transform; 222 | transition: transform 200ms linear; 223 | } 224 | .headroom--pinned { 225 | transform: translateY(0%); 226 | } 227 | .headroom--unpinned { 228 | transform: translateY(-100%); 229 | } 230 | 231 | /* mark.js ----------------------------*/ 232 | 233 | mark { 234 | background-color: rgba(255, 255, 51, 0.5); 235 | border-bottom: 2px solid rgba(255, 153, 51, 0.3); 236 | padding: 1px; 237 | } 238 | 239 | /* vertical spacing after htmlwidgets */ 240 | .html-widget { 241 | margin-bottom: 10px; 242 | } 243 | 244 | /* fontawesome ------------------------ */ 245 | 246 | .fab { 247 | font-family: "Font Awesome 5 Brands" !important; 248 | } 249 | 250 | /* don't display links in code chunks when printing */ 251 | /* source: https://stackoverflow.com/a/10781533 */ 252 | @media print { 253 | code a:link:after, code a:visited:after { 254 | content: ""; 255 | } 256 | } 257 | -------------------------------------------------------------------------------- /docs/reference/doiref.html: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 | Fetch Bibliographies via DOI — doiref • scifetch 10 | 11 | 12 | 13 | 14 | 15 | 16 | 17 | 18 | 19 | 20 | 21 | 22 | 23 | 24 | 25 | 26 | 27 | 28 | 29 | 30 | 31 | 32 | 33 | 34 | 35 | 36 | 37 | 38 | 39 | 40 | 41 | 42 | 43 | 44 | 45 | 46 | 47 | 48 | 49 | 53 | 54 | 55 | 56 | 57 | 58 | 59 |
    60 |
    61 | 95 | 96 | 97 | 98 |
    99 | 100 |
    101 |
    102 | 107 | 108 |
    109 |

    Fetch Bibliographies via DOI

    110 |
    111 | 112 |
    doiref(path, style = "apa")
    113 | 114 |

    Arguments

    115 | 116 | 117 | 118 | 119 | 120 | 121 | 122 | 123 | 124 | 125 |
    path

    file path

    style

    citaiton style, default apa

    126 | 127 | 128 |
    129 | 136 |
    137 | 138 | 139 |
    140 | 143 | 144 |
    145 |

    Site built with pkgdown 1.4.1.

    146 |
    147 | 148 |
    149 |
    150 | 151 | 152 | 153 | 154 | 155 | 156 | 157 | 158 | -------------------------------------------------------------------------------- /R/googlescholar.R: -------------------------------------------------------------------------------- 1 | #' Fetch author's publication from Google Scholar 2 | #' @param id the google scholar ID to be searched 3 | #' @param start begin item 4 | #' @param end finish item 5 | #' @return tibble object 6 | #' @examples \dontrun{ 7 | #' library(scifetch) 8 | #' id <- "SPNX8oUAAAAJ" 9 | #' z <- getgsauthor(id) 10 | #' } 11 | #' @export 12 | getgsauthor <- function(id, start = 1, end = 100) { 13 | url_template <- "http://scholar.google.com/citations?hl=en&user=%s&cstart=%d&pagesize=%d" 14 | url <- sprintf(url_template, id, start-1, end) 15 | 16 | cites <- url %>% 17 | xml2::read_html() %>% 18 | rvest::html_nodes(xpath="//tr[@class='gsc_a_tr']") 19 | 20 | title <- cites %>% 21 | rvest::html_nodes(".gsc_a_at") %>% 22 | rvest::html_text() 23 | 24 | pubid <- cites %>% 25 | rvest::html_nodes(".gsc_a_at") %>% 26 | rvest::html_attr("href") %>% 27 | stringr::str_extract(":.*$") %>% 28 | stringr::str_sub(start=2) 29 | 30 | doc_id <- cites %>% 31 | rvest::html_nodes(".gsc_a_ac") %>% 32 | rvest::html_attr("href") %>% 33 | stringr::str_extract("cites=.*$") %>% 34 | stringr::str_sub(start=7) 35 | 36 | cited_by <- suppressWarnings( 37 | cites %>% 38 | rvest::html_nodes(".gsc_a_ac") %>% 39 | rvest::html_text() %>% 40 | as.numeric() %>% 41 | replace(is.na(), 0)) 42 | 43 | year <- cites %>% 44 | rvest::html_nodes(".gsc_a_y") %>% 45 | rvest::html_text() %>% 46 | as.numeric() 47 | 48 | authors <- cites %>% 49 | rvest::html_nodes("td .gs_gray") %>% 50 | rvest::html_text() %>% 51 | subset(c(TRUE,FALSE)) 52 | 53 | details <- cites %>% 54 | rvest::html_nodes("td .gs_gray") %>% 55 | rvest::html_text() %>% 56 | subset(c(FALSE,TRUE)) 57 | 58 | first_digit <- as.numeric(regexpr("[\\[\\(]?\\d", details)) - 1 59 | journal <- stringr::str_trim(stringr::str_sub(details, end=first_digit)) %>% 60 | stringr::str_replace(",$", "") 61 | 62 | numbers <- stringr::str_sub(details, start=first_digit) %>% 63 | stringr::str_trim() %>% 64 | stringr::str_sub(end=-5) %>% 65 | stringr::str_trim() %>% 66 | stringr::str_replace(",$", "") 67 | 68 | data <- tibble::as_tibble(cbind( 69 | title = title, 70 | author=authors, 71 | journal=journal, 72 | number=numbers, 73 | cites=cited_by, 74 | year=year, 75 | cid=doc_id, 76 | pubid=pubid)) 77 | return(data) 78 | } 79 | 80 | #' #' Fetch journal's publication from Google Scholar 81 | #' #' @param id the journal ID to be searched 82 | #' #' @param start begin item 83 | #' #' @param end finish item 84 | #' #' @param years start year 85 | #' #' @param yeare end year 86 | #' #' @return tibble object 87 | #' #' @examples \dontrun{ 88 | #' #' library(scifetch) 89 | #' #' id <- "environmental science and technology" 90 | #' #' z <- getgsjournal(id) 91 | #' #' } 92 | #' #' @export 93 | #' getgsjournal <- function(name, start = 1, end = 100, years = 2016, yeare = 2017) { 94 | #' url_template <- "https://scholar.google.ca/scholar?hl=en&scisbd=1&as_publication=%s&cstart=%d&pagesize=%d&as_ylo=%d&as_yhi=%d" 95 | #' url <- sprintf(url_template, name,start, end, years, yeare) 96 | #' 97 | #' cites <- url %>% 98 | #' read_html() %>% 99 | #' html_nodes(xpath='//*[contains(concat( " ", @class, " " ), concat( " ", "gs_ri", " " ))]') 100 | #' 101 | #' title <- cites %>% 102 | #' html_nodes(".gsc_a_at") %>% 103 | #' html_text() 104 | #' 105 | #' pubid <- cites %>% 106 | #' html_nodes(".gsc_a_at") %>% 107 | #' html_attr("href") %>% 108 | #' str_extract(":.*$") %>% 109 | #' str_sub(start=2) 110 | #' 111 | #' doc_id <- cites %>% 112 | #' html_nodes(".gsc_a_ac") %>% 113 | #' html_attr("href") %>% 114 | #' str_extract("cites=.*$") %>% 115 | #' str_sub(start=7) 116 | #' 117 | #' cited_by <- suppressWarnings( 118 | #' cites %>% 119 | #' html_nodes(".gsc_a_ac") %>% 120 | #' html_text() %>% 121 | #' as.numeric(.) %>% 122 | #' replace(is.na(.), 0)) 123 | #' 124 | #' year <- cites %>% 125 | #' html_nodes(".gsc_a_y") %>% 126 | #' html_text() %>% 127 | #' as.numeric() 128 | #' 129 | #' authors <- cites %>% 130 | #' html_nodes("td .gs_gray") %>% 131 | #' html_text() %>% 132 | #' subset(c(TRUE,FALSE)) 133 | #' 134 | #' details <- cites %>% 135 | #' html_nodes("td .gs_gray") %>% 136 | #' html_text() %>% 137 | #' subset(c(FALSE,TRUE)) 138 | #' 139 | #' first_digit <- as.numeric(regexpr("[\\[\\(]?\\d", details)) - 1 140 | #' journal <- str_trim(str_sub(details, end=first_digit)) %>% 141 | #' str_replace(",$", "") 142 | #' 143 | #' numbers <- str_sub(details, start=first_digit) %>% 144 | #' str_trim() %>% 145 | #' str_sub(end=-5) %>% 146 | #' str_trim() %>% 147 | #' str_replace(",$", "") 148 | #' 149 | #' data <- tibble::as_tibble(cbind( 150 | #' title = title, 151 | #' author=authors, 152 | #' journal=journal, 153 | #' number=numbers, 154 | #' cites=cited_by, 155 | #' year=year, 156 | #' cid=doc_id, 157 | #' pubid=pubid)) 158 | #' return(data) 159 | #' } 160 | #' 161 | -------------------------------------------------------------------------------- /docs/reference/index.html: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 | Function reference • scifetch 10 | 11 | 12 | 13 | 14 | 15 | 16 | 17 | 18 | 19 | 20 | 21 | 22 | 23 | 24 | 25 | 26 | 27 | 28 | 29 | 30 | 31 | 32 | 33 | 34 | 35 | 36 | 37 | 38 | 39 | 40 | 41 | 42 | 43 | 44 | 45 | 46 | 47 | 51 | 52 | 53 | 54 | 55 | 56 | 57 |
    58 |
    59 | 93 | 94 | 95 | 96 |
    97 | 98 |
    99 |
    100 | 103 | 104 | 105 | 106 | 107 | 108 | 109 | 110 | 111 | 112 | 113 | 114 | 118 | 119 | 120 | 121 | 124 | 125 | 126 | 127 | 130 | 131 | 132 | 133 | 136 | 137 | 138 | 139 | 142 | 143 | 144 | 145 | 148 | 149 | 150 | 151 |
    115 |

    All functions

    116 |

    117 |
    122 |

    doiref()

    123 |

    Fetch Bibliographies via DOI

    128 |

    getgsauthor()

    129 |

    Fetch author's publication from Google Scholar

    134 |

    getpubmed()

    135 |

    Fetch xml file from pubmed

    140 |

    getpubmedtbl()

    141 |

    Get the basic data from xml

    146 |

    getrss()

    147 |

    Fetch rss into tibble

    152 |
    153 | 154 | 160 |
    161 | 162 | 163 |
    164 | 167 | 168 |
    169 |

    Site built with pkgdown 1.4.1.

    170 |
    171 | 172 |
    173 |
    174 | 175 | 176 | 177 | 178 | 179 | 180 | 181 | 182 | -------------------------------------------------------------------------------- /docs/reference/getrss.html: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 | Fetch rss into tibble — getrss • scifetch 10 | 11 | 12 | 13 | 14 | 15 | 16 | 17 | 18 | 19 | 20 | 21 | 22 | 23 | 24 | 25 | 26 | 27 | 28 | 29 | 30 | 31 | 32 | 33 | 34 | 35 | 36 | 37 | 38 | 39 | 40 | 41 | 42 | 43 | 44 | 45 | 46 | 47 | 48 | 49 | 53 | 54 | 55 | 56 | 57 | 58 | 59 |
    60 |
    61 | 95 | 96 | 97 | 98 |
    99 | 100 |
    101 |
    102 | 107 | 108 |
    109 |

    Fetch rss into tibble

    110 |
    111 | 112 |
    getrss(feed)
    113 | 114 |

    Arguments

    115 | 116 | 117 | 118 | 119 | 120 | 121 |
    feed

    feed address

    122 | 123 |

    Value

    124 | 125 |

    tibble object

    126 |

    See also

    127 | 128 |

    getpubmed

    129 | 130 |

    Examples

    131 |
    if (FALSE) { 132 | library(scifetch) 133 | feed <- 'yihui.name/index.xml' 134 | z <- getrss(feed) 135 | }
    136 |
    137 | 147 |
    148 | 149 | 150 |
    151 | 154 | 155 |
    156 |

    Site built with pkgdown 1.4.1.

    157 |
    158 | 159 |
    160 |
    161 | 162 | 163 | 164 | 165 | 166 | 167 | 168 | 169 | -------------------------------------------------------------------------------- /docs/reference/getpubmedtbl.html: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 | Get the basic data from xml — getpubmedtbl • scifetch 10 | 11 | 12 | 13 | 14 | 15 | 16 | 17 | 18 | 19 | 20 | 21 | 22 | 23 | 24 | 25 | 26 | 27 | 28 | 29 | 30 | 31 | 32 | 33 | 34 | 35 | 36 | 37 | 38 | 39 | 40 | 41 | 42 | 43 | 44 | 45 | 46 | 47 | 48 | 49 | 53 | 54 | 55 | 56 | 57 | 58 | 59 |
    60 |
    61 | 95 | 96 | 97 | 98 |
    99 | 100 |
    101 |
    102 | 107 | 108 |
    109 |

    Get the basic data from xml

    110 |
    111 | 112 |
    getpubmedtbl(xml2)
    113 | 114 |

    Arguments

    115 | 116 | 117 | 118 | 119 | 120 | 121 |
    xml2

    the xml object from getpubmed

    122 | 123 |

    Value

    124 | 125 |

    tibble object

    126 |

    See also

    127 | 128 |

    getpubmed

    129 | 130 |

    Examples

    131 |
    if (FALSE) { 132 | library(scifetch) 133 | query <- 'janusz pawliszyn[AU]' 134 | z <- getpubmed(query) 135 | z2 <- getpubmedtbl(z)}
    136 |
    137 | 147 |
    148 | 149 | 150 |
    151 | 154 | 155 |
    156 |

    Site built with pkgdown 1.4.1.

    157 |
    158 | 159 |
    160 |
    161 | 162 | 163 | 164 | 165 | 166 | 167 | 168 | 169 | -------------------------------------------------------------------------------- /docs/reference/getgsauthor.html: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 | Fetch author's publication from Google Scholar — getgsauthor • scifetch 10 | 11 | 12 | 13 | 14 | 15 | 16 | 17 | 18 | 19 | 20 | 21 | 22 | 23 | 24 | 25 | 26 | 27 | 28 | 29 | 30 | 31 | 32 | 33 | 34 | 35 | 36 | 37 | 38 | 39 | 40 | 41 | 42 | 43 | 44 | 45 | 46 | 47 | 48 | 49 | 53 | 54 | 55 | 56 | 57 | 58 | 59 |
    60 |
    61 | 95 | 96 | 97 | 98 |
    99 | 100 |
    101 |
    102 | 107 | 108 |
    109 |

    Fetch author's publication from Google Scholar

    110 |
    111 | 112 |
    getgsauthor(id, start = 1, end = 100)
    113 | 114 |

    Arguments

    115 | 116 | 117 | 118 | 119 | 120 | 121 | 122 | 123 | 124 | 125 | 126 | 127 | 128 | 129 |
    id

    the google scholar ID to be searched

    start

    begin item

    end

    finish item

    130 | 131 |

    Value

    132 | 133 |

    tibble object

    134 | 135 |

    Examples

    136 |
    if (FALSE) { 137 | library(scifetch) 138 | id <- "SPNX8oUAAAAJ" 139 | z <- getgsauthor(id) 140 | }
    141 |
    142 | 151 |
    152 | 153 | 154 |
    155 | 158 | 159 |
    160 |

    Site built with pkgdown 1.4.1.

    161 |
    162 | 163 |
    164 |
    165 | 166 | 167 | 168 | 169 | 170 | 171 | 172 | 173 | -------------------------------------------------------------------------------- /docs/reference/getpubmed.html: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 | Fetch xml file from pubmed — getpubmed • scifetch 10 | 11 | 12 | 13 | 14 | 15 | 16 | 17 | 18 | 19 | 20 | 21 | 22 | 23 | 24 | 25 | 26 | 27 | 28 | 29 | 30 | 31 | 32 | 33 | 34 | 35 | 36 | 37 | 38 | 39 | 40 | 41 | 42 | 43 | 44 | 45 | 46 | 47 | 48 | 49 | 53 | 54 | 55 | 56 | 57 | 58 | 59 |
    60 |
    61 | 95 | 96 | 97 | 98 |
    99 | 100 |
    101 |
    102 | 107 | 108 |
    109 |

    Fetch xml file from pubmed

    110 |
    111 | 112 |
    getpubmed(query, start = 1, end = 100)
    113 | 114 |

    Arguments

    115 | 116 | 117 | 118 | 119 | 120 | 121 | 122 | 123 | 124 | 125 | 126 | 127 | 128 | 129 |
    query

    the NCBI Entrez for search

    start

    begin item

    end

    finish item

    130 | 131 |

    Value

    132 | 133 |

    xml object

    134 |

    See also

    135 | 136 |

    getpubmedtbl

    137 | 138 |

    Examples

    139 |
    if (FALSE) { 140 | library(scifetch) 141 | query <- 'janusz pawliszyn[AU]' 142 | z <- getpubmed(query) 143 | }
    144 |
    145 | 155 |
    156 | 157 | 158 |
    159 | 162 | 163 |
    164 |

    Site built with pkgdown 1.4.1.

    165 |
    166 | 167 |
    168 |
    169 | 170 | 171 | 172 | 173 | 174 | 175 | 176 | 177 | -------------------------------------------------------------------------------- /docs/docsearch.css: -------------------------------------------------------------------------------- 1 | /* Docsearch -------------------------------------------------------------- */ 2 | /* 3 | Source: https://github.com/algolia/docsearch/ 4 | License: MIT 5 | */ 6 | 7 | .algolia-autocomplete { 8 | display: block; 9 | -webkit-box-flex: 1; 10 | -ms-flex: 1; 11 | flex: 1 12 | } 13 | 14 | .algolia-autocomplete .ds-dropdown-menu { 15 | width: 100%; 16 | min-width: none; 17 | max-width: none; 18 | padding: .75rem 0; 19 | background-color: #fff; 20 | background-clip: padding-box; 21 | border: 1px solid rgba(0, 0, 0, .1); 22 | box-shadow: 0 .5rem 1rem rgba(0, 0, 0, .175); 23 | } 24 | 25 | @media (min-width:768px) { 26 | .algolia-autocomplete .ds-dropdown-menu { 27 | width: 175% 28 | } 29 | } 30 | 31 | .algolia-autocomplete .ds-dropdown-menu::before { 32 | display: none 33 | } 34 | 35 | .algolia-autocomplete .ds-dropdown-menu [class^=ds-dataset-] { 36 | padding: 0; 37 | background-color: rgb(255,255,255); 38 | border: 0; 39 | max-height: 80vh; 40 | } 41 | 42 | .algolia-autocomplete .ds-dropdown-menu .ds-suggestions { 43 | margin-top: 0 44 | } 45 | 46 | .algolia-autocomplete .algolia-docsearch-suggestion { 47 | padding: 0; 48 | overflow: visible 49 | } 50 | 51 | .algolia-autocomplete .algolia-docsearch-suggestion--category-header { 52 | padding: .125rem 1rem; 53 | margin-top: 0; 54 | font-size: 1.3em; 55 | font-weight: 500; 56 | color: #00008B; 57 | border-bottom: 0 58 | } 59 | 60 | .algolia-autocomplete .algolia-docsearch-suggestion--wrapper { 61 | float: none; 62 | padding-top: 0 63 | } 64 | 65 | .algolia-autocomplete .algolia-docsearch-suggestion--subcategory-column { 66 | float: none; 67 | width: auto; 68 | padding: 0; 69 | text-align: left 70 | } 71 | 72 | .algolia-autocomplete .algolia-docsearch-suggestion--content { 73 | float: none; 74 | width: auto; 75 | padding: 0 76 | } 77 | 78 | .algolia-autocomplete .algolia-docsearch-suggestion--content::before { 79 | display: none 80 | } 81 | 82 | .algolia-autocomplete .ds-suggestion:not(:first-child) .algolia-docsearch-suggestion--category-header { 83 | padding-top: .75rem; 84 | margin-top: .75rem; 85 | border-top: 1px solid rgba(0, 0, 0, .1) 86 | } 87 | 88 | .algolia-autocomplete .ds-suggestion .algolia-docsearch-suggestion--subcategory-column { 89 | display: block; 90 | padding: .1rem 1rem; 91 | margin-bottom: 0.1; 92 | font-size: 1.0em; 93 | font-weight: 400 94 | /* display: none */ 95 | } 96 | 97 | .algolia-autocomplete .algolia-docsearch-suggestion--title { 98 | display: block; 99 | padding: .25rem 1rem; 100 | margin-bottom: 0; 101 | font-size: 0.9em; 102 | font-weight: 400 103 | } 104 | 105 | .algolia-autocomplete .algolia-docsearch-suggestion--text { 106 | padding: 0 1rem .5rem; 107 | margin-top: -.25rem; 108 | font-size: 0.8em; 109 | font-weight: 400; 110 | line-height: 1.25 111 | } 112 | 113 | .algolia-autocomplete .algolia-docsearch-footer { 114 | width: 110px; 115 | height: 20px; 116 | z-index: 3; 117 | margin-top: 10.66667px; 118 | float: right; 119 | font-size: 0; 120 | line-height: 0; 121 | } 122 | 123 | .algolia-autocomplete .algolia-docsearch-footer--logo { 124 | background-image: url("data:image/svg+xml;utf8,"); 125 | background-repeat: no-repeat; 126 | background-position: 50%; 127 | background-size: 100%; 128 | overflow: hidden; 129 | text-indent: -9000px; 130 | width: 100%; 131 | height: 100%; 132 | display: block; 133 | transform: translate(-8px); 134 | } 135 | 136 | .algolia-autocomplete .algolia-docsearch-suggestion--highlight { 137 | color: #FF8C00; 138 | background: rgba(232, 189, 54, 0.1) 139 | } 140 | 141 | 142 | .algolia-autocomplete .algolia-docsearch-suggestion--text .algolia-docsearch-suggestion--highlight { 143 | box-shadow: inset 0 -2px 0 0 rgba(105, 105, 105, .5) 144 | } 145 | 146 | .algolia-autocomplete .ds-suggestion.ds-cursor .algolia-docsearch-suggestion--content { 147 | background-color: rgba(192, 192, 192, .15) 148 | } 149 | --------------------------------------------------------------------------------