# File src/library/utils/R/MARC.R
# Part of the R package, http://www.R-project.org
#
# Copyright (C) 1995-2012 The R Core Team
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# A copy of the GNU General Public License is available at
# http://www.r-project.org/Licenses/
MARC_relator_db <-
structure(list(term = c("Actor", "Adapter", "Annotator", "Applicant",
"Architect", "Arranger", "Artist", "Assignee", "Associated name",
"Attributed name", "Auctioneer", "Author", "Author in quotations or text extracts",
"Author of afterword, colophon, etc.", "Author of dialog", "Author of introduction, etc.",
"Author of screenplay, etc.", "Bibliographic antecedent", "Binder",
"Binding designer", "Book designer", "Book producer", "Bookjacket designer",
"Bookplate designer", "Bookseller", "Calligrapher", "Cartographer",
"Censor", "Choreographer", "Client", "Collaborator", "Collector",
"Collotyper", "Commentator", "Commentator for written text",
"Compiler", "Complainant", "Complainant-appellant", "Complainant-appellee",
"Composer", "Compositor", "Conceptor", "Conductor", "Consultant",
"Consultant to a project", "Contestant", "Contestant-appellant",
"Contestant-appellee", "Contestee", "Contestee-appellant", "Contestee-appellee",
"Contractor", "Contributor", "Copyright claimant", "Copyright holder",
"Corrector", "Correspondent", "Costume designer", "Creator",
"Curator of an exhibition", "Dancer", "Dedicatee", "Dedicator",
"Defendant", "Defendant-appellant", "Defendant-appellee", "Degree grantor",
"Delineator", "Depositor", "Designer", "Director", "Dissertant",
"Distributor", "Donor", "Draftsman", "Dubious author", "Editor",
"Electrotyper", "Engineer", "Engraver", "Etcher", "Expert", "Facsimilist",
"Film editor", "Forger", "Former owner", "Funder", "Honoree",
"Host", "Illuminator", "Illustrator", "Inscriber", "Instrumentalist",
"Interviewee", "Interviewer", "Inventor", "Landscape architect",
"Lender", "Libelant", "Libelant-appellant", "Libelant-appellee",
"Libelee", "Libelee-appellant", "Libelee-appellee", "Librettist",
"Licensee", "Licensor", "Lithographer", "Lyricist", "Metadata contact",
"Metal-engraver", "Moderator", "Monitor", "Musician", "Narrator",
"Opponent", "Organizer of meeting", "Originator", "Other", "Owner",
"Papermaker", "Patent applicant", "Patent holder", "Patron",
"Performer", "Photographer", "Plaintiff", "Plaintiff-appellant",
"Plaintiff-appellee", "Platemaker", "Printer", "Printer of plates",
"Process contact", "Producer", "Production personnel", "Programmer",
"Proofreader", "Publisher", "Publishing director", "Recipient",
"Recording engineer", "Redactor", "Renderer", "Research team head",
"Research team member", "Researcher", "Respondent", "Respondent-appellant",
"Respondent-appellee", "Reviewer", "Rubricator", "Scenarist",
"Scientific advisor", "Scribe", "Sculptor", "Secretary", "Signer",
"Singer", "Speaker", "Sponsor", "Standards body", "Stereotyper",
"Surveyor", "Thesis advisor", "Transcriber", "Translator", "Type designer",
"Typographer", "Vocalist", "Witness", "Wood-engraver", "Woodcutter",
"Writer of accompanying material"), code = c("act", "adp", "ann",
"app", "arc", "arr", "art", "asg", "asn", "att", "auc", "aut",
"aqt", "aft", "aud", "aui", "aus", "ant", "bnd", "bdd ", "bkd",
"bkp", "bjd", "bpd", "bsl", "cll", "ctg", "cns", "chr", "cli",
"clb", "col", "clt", "cmm", "cwt", "com", "cpl", "cpt", "cpe",
"cmp", "cmt ", "ccp", "cnd", "csl", "csp", "cos", "cot", "coe",
"cts", "ctt", "cte", "ctr", "ctb", "cpc", "cph", "crr", "crp",
"cst", "cre", "cur", "dnc", "dte", "dto", "dfd", "dft", "dfe",
"dgg", "dln", "dpt", "dsr", "drt", "dis", "dst", "dnr", "drm",
"dub", "edt", "elt", "eng", "egr", "etr", "exp", "fac", "flm",
"frg ", "fmo", "fnd", "hnr", "hst", "ilu", "ill", "ins", "itr",
"ive", "ivr", "inv", "lsa", "len", "lil", "lit", "lie", "lel",
"let", "lee", "lbt", "lse", "lso", "ltg", "lyr", "mdc", "mte",
"mod", "mon", "mus", "nrt", "opn", "orm", "org", "oth", "own",
"ppm", "pta", "pth", "pat", "prf", "pht", "ptf", "ptt", "pte",
"plt", "prt", "pop", "prc", "pro", "prd", "prg", "pfr", "pbl",
"pbd", "rcp", "rce", "red", "ren", "rth", "rtm", "res", "rsp",
"rst", "rse", "rev", "rbr", "sce", "sad", "scr", "scl", "sec",
"sgn", "sng", "spk", "spn", "stn", "str", "srv", "ths", "trc",
"trl", "tyd", "tyg", "voc", "wit", "wde", "wdc", "wam"), description = c("Use for a person who principally exhibits acting skills in a musical or dramatic presentation or entertainment.",
"Use for a person who 1) reworks a musical composition, usually for a different medium, or 2) rewrites novels or stories for motion pictures or other audiovisual medium.",
"Use for a person who writes manuscript annotations on a printed item.",
"Appraiser (USE: Expert)", "", "Use for a person who transcribes a musical composition, usually for a different medium from that of the original, in an arrangement the musical substance remains essentially unchanged.",
"Use for a person (e.g., a painter) who conceives, and perhaps also implements, an original graphic design or work of art, if specific codes (e.g., [egr], [etr]) are not desired. For book illustrators, prefer Illustrator [ill]. (UF: Graphic technician)",
"Use for a person or organization to whom a license for printing or publishing has been transferred.",
"Use as a general relator for a name associated with or found in an item or collection, or which cannot be determined to be that of a Former owner [fmo] or other designated relator indicative of provenance.",
"Use to relate an author, artist, etc. to a work for which there is or once was substantial authority for designating that person as author, creator, etc. of the work. (UF: Supposed name)",
"Use for a person or corporate body in change or the estimation and public auctioning of goods, particularly books, artistic works, etc.",
"Use for a person or corporate body chiefly responsible for the intellectual or artistic content of a work, usually printed text. This term may also be used when more than one person or body bears such responsibility. (UF: Joint author)",
"Use for a person whose work is largely quoted or extracted in a works to which he or she did not contribute directly. Such quotations are found particularly in exhibition catalogs, collections of photographs, etc.",
"Use for a person or corporate body responsible for an afterword, postface, colophon, etc. but who is not the chief author of a work.",
"Use for a person or corporate body responsible for the dialog or spoken commentary for a screenplay or sound recording.",
"Use for a person or corporate body responsible for an introduction, preface, foreword, or other critical introductory matter, but who is not the chief author.",
"Use for a person or corporate body responsible for a motion picture screenplay, dialog, spoken commentary, etc.",
"Use for the author responsible for a work upon which the work represented by the catalog record is based. This may be appropriate for adaptations, sequels, continuations, indexes, etc.",
"", "(UF: Designer of binding)", "Use for the person or firm responsible for the entire graphic design of a book, including arrangement of type and illustration, choice of materials, and process used. (UF: Designer of book)",
"Use for the person or firm responsible for the production of books and other print media, if specific codes (e.g., [bkd], [egr], [tyd], [prt]) are not desired. (UF: Producer of book)",
"(UF: Designer of bookjacket)", "(UF: Designer of bookplate)",
"Bowdlerizer (USE: Censor)", "", "", "Use for a censor, bowdlerizer, expurgator, etc., official or private. (UF: Bowdlerizer, Expurgator)",
"Use for a person who composes or arranges dances or other movements (e.g., 'master of swords') for a musical or dramatic presentation or entertainment.",
"Use for a person or organization for whom another person or organization is acting.",
"Use for a person or corporate body that takes a limited part in the elaboration of a work of another person or corporate body that brings complements (e.g., appendices, notes) to the work.",
"Use for a person who has brought together material from various sources, which has been arranged, described, and cataloged as a collection. The collector is neither the creator of the material nor the person to whom manuscripts in the collection may have been addressed.",
"", "Use for a person who provides interpretation, analysis, or a discussion of the subject matter on a recording, motion picture, or other audiovisual medium.",
"Use for a person or corporate body responsible for the commentary or explanatory notes about a text. For the writer of manuscript annotations in a printed book, use Annotator [ann].",
"Use for a person who produces a work or publication by selecting and putting together material from the works of various persons or bodies.",
"Use for the party who applies to the courts for redress, usually in an equity proceeding.",
"Use for a complainant who takes an appeal from one court or jurisdiction to another to reverse the judgment, usually in an equity proceeding.",
"Use for a complainant against whom an appeal is taken from one court or jurisdiction to another to reverse the judgment, usually in an equity proceeding.",
"Use for a person who creates a musical work, usually a piece of music in manuscript or printed form.",
"(UF: Typesetter)", "Use for a person or corporate body responsible for the original idea on which a work is based, this includes the scientific author of an audio-visual item and the conceptor of an advertisement.",
"Use for a person who directs a performing group (orchestra, chorus, opera, etc.).",
"Use for the person called upon for professional advice or services in a specialized field of knowledge or training.",
"Use for a person or corporate body engaged specifically to provide an intellectual overview of a strategic or operational task and by analysis, specification, or instruction, to create or propose a cost-effective course of action or solution.",
"Use for the party who opposes, resists, or disputes, in a court of law, a claim, decision, result, etc.",
"Use for a contestant who takes an appeal from one court of law or jurisdiction to another to reverse the judgment.",
"Use for a contestant against whom an appeal is taken from one court of law or jurisdiction to another to reverse the judgment.",
"Use for the party defending a claim, decision, result, etc. being opposed, resisted, or disputed in a court of law.",
"Use for a contestee who takes an appeal from one court or jurisdiction to another to reverse the judgment.",
"Use for a contestee against whom an appeal is taken from one court or jurisdiction to another to reverse the judgment.",
"Use for the person or corporate body who enters into a contract with another person or corporate body to perform a specific task.",
"Use for one whose work has been contributed to a larger work, such as an anthology, serial publication, or other compilation of individual works. Do not use for someone whose sole function in relation to a work is as author, editor, compiler or translator.",
"Use for the person listed as a copyright owner at the time of registration. Copyright can be granted or later transferred to another person or agent, at which time the claimant becomes the copyright holder.",
"", "Use for a corrector of manuscripts, such as the scriptorium official who corrected the work of a scribe. For printed matter, use Proofreader [pfr].",
"Use for a person or organization who was either the writer or recipient of a letter or other communication.",
"Use for a person who designs or makes costumes, fixes hair, etc., for a musical or dramatic presentation or entertainment. // Counterfeiter (USE: Forger)",
"Use for a person or corporate body responsible for the intellectual or artistic content of a work.",
"Use for a person who is responsible for conceiving and organizing an exhibition.",
"Use for a person who principally exhibits dancing skills in a musical or dramatic presentation or entertainment.",
"Use for a person or organization to whom a book, manuscript, etc., is dedicated (not the recipient of a gift).",
"Use for the author of a dedication, which may be a formal statement or in epistolary or verse form.",
"Use for the party defending or denying allegations made in a suit and against whom relief or recovery is sought in the courts, usually in a legal action.",
"Use for a defendant who takes an appeal from one court or jurisdiction to another to reverse the judgment, usually in a legal action.",
"Use for a defendant against whom an appeal is taken from one court or jurisdiction to another to reverse the judgment, usually in a legal action.",
"Use for the corporate body granting a degree for which the thesis or dissertation described was presented.",
"Use for a person or organization executing technical drawings from others' designs. // Deponent (USE: Witness)",
"Use for a person or organization placing material in the physical custody of a library or repository without transferring the legal title.",
"Use for a person or organization responsible for design if specific codes (e.g., [bkd], [tyd]) are not desired. // Designer of binding (USE: Binding designer) // Designer of book (USE: Book designer) // Designer of bookjacket (USE: Bookjacket designer) // Designer of bookplate (USE: Bookplate designer) // Designer of type (USE: Type designer)",
"Use for a person who is responsible for the general management of a work or who supervises the production of a performance for stage, screen, or sound recording.",
"Use for a person who presents a thesis for a university or higher-level educational degree.",
"Use for an agent or agency that has exclusive or shared marketing rights for an item.",
"Use for the donor of a book, manuscript, etc., to its present owner. Donors to previous owners are designated as Former owner [fmo] or Inscriber [ins].",
"Use for the person who prepares technical or mechanical drawings. (UF: Technical draftsman)",
"Use for a person or corporate body to which authorship has been dubiously or incorrectly ascribed.",
"Use for a person who prepares for publication a work not primarily his/her own, such as by elucidating text, adding introductory or other critical matter, or technically directing an editorial staff.",
"", "Use for a person or organization that is responsible for technical planning and design, particularly with construction.",
"", "", "Use for a person in charge of the description and appraisal of the value of goods, particularly rare items, works of art, etc. (UF: Appraiser) // Eyewitness (USE: Witness) // Expurgator (USE: Censor)",
"Use for the person or body that executed the facsimile. (UF: Copier)",
"Use for an editor of a motion picture film. This term is used regardless of the medium upon which the motion picture is produced or manufactured (e.g., acetate film, video tape). (UF: Motion picture editor)",
"(UF: Copier, Counterfeiter)", "Use for the person or organization who owned an item at any time in the past. Includes those to whom the material was once presented. The person or organization giving the item to the present owner is designated as Donor [dnr]",
"Use for the person or agency that furnished financial support for the production of the work. // Graphic technician (USE: Artist) [Relator term 'Graphic technician' (coded [grt]) used before March 1988 only.]",
"Use for the person in memory or honor of whom a book, manuscript, etc. is donated. (UF: Memorial)",
"Use for the person who is invited or regularly leads a program (often broadcast) that includes other guests, performers, etc. (e.g., talk show host).",
"", "Use for the person who conceives, and perhaps also implements, a design or illustration, usually to accompany a written text. // Imprimatur (USE: Licensor)",
"Use for the person who signs a presentation statement.", "Use for a person who principally plays an instrument in a musical or dramatic presentation or entertainment.",
"", "", "// Investigator (USE: Originator) // Joint author (USE: Author)",
"Use for the person or organization whose work involves coordinating the arrangement of existing and proposed land features and structures. ",
"Use for a person or organization permitting the temporary use of a book, manuscript, etc., such as for photocopying or microfilming.",
"Use for the party who files a libel in an ecclesiastical or admiralty case.",
"Use for a libelant who takes an appeal from one ecclesiastical court or admiralty to another to reverse the judgment.",
"Use for a libelant against whom an appeal is taken from one ecclesiastical court or admiralty to another to reverse the judgment.",
"Use for the party against whom a libel has been filed in an ecclesiastical court or admiralty.",
"Use for a libelee who takes an appeal from one ecclesiastical court or admiralty to another to reverse the judgment.",
"Use for a libelee against whom an appeal is taken from one ecclesiastical court or admiralty to another to reverse the judgment.",
"Use for the writer of the text of an opera, oratorio, etc.",
"Use for the original recipient of the right to print or publish.",
"Use for the signer of the license, imprimatur, etc. (UF: Imprimatur)",
"Use for the person who prepares the stone or plate for lithographic printing, including a graphic artist creating a design directly on the surface from which printing will be done.",
"Use for the writer of the text of a song. // Memorial (USE: Honoree)",
"Use for the person or organization primarily responsible for compiling and maintaining the original description of a metadata set (e.g., geospatial metadata set).",
"", "Use for the person who leads a program (often broadcast) where topics are discussed, usually with participation of experts in fields related to the discussion.",
"Use for a person or organization that supervises compliance with the contract and is responsible for the report and controls its distribution. Sometimes referred to as the grantee, or controlling agency. // Motion picture editor (USE: Film editor)",
"Use for the person who performs music or contributes to the musical content of a work when it is not possible or desirable to identify the function more precisely.",
"Use for the speaker who relates the particulars of an act, occurrence, or course of events. // Observer (USE: Witness) // Onlooker (USE: Witness)",
"Use for the person or corporate body responsible for opposing a thesis or dissertation.",
"Use for the person or corporate body responsible for organizing a meeting for which an item is the report or proceedings.",
"Use for the author or agency performing the work, i.e., the name of a person or organization associated with the intellectual content of the work. This category does not include the publisher or personal affiliation, or sponsor except where it is also the corporate author. Includes a person designated in the work as investigator or principal investigator. (UF: Principal investigator)",
"Use for relator codes from other lists which have no equivalent in the MARC list or for terms which have not been assigned a code.",
"Use for the person or organization that currently owns an item or collection.",
"", "Use for the person or corporate body that applied for a patent.",
"Use for the person or corporate body that was granted the patent referred to by the item. (UF: Patentee) // Patentee (USE: Patent holder)",
"Use for the person responsible for commissioning a work. Usually a patron uses his or her means or influence to support the work of artists, writers, etc. This includes those who commission and pay for individual works.",
"User for a person who exhibits musical or acting skills i a musical or dramatic presentation or entertainment, if specific codes for those functions ([act], [dnc], [itr], [voc], etc.) are not used. If specific codes are used, [prf] is used for a person whose principal skill is not known or specified. // Performer of research (USE: Researcher)",
"Use for the person or organization responsible for taking photographs, whether they are used in their original form or as reproductions.",
"Use for the party who complains or sues in court in a personal action, usually in a legal proceeding.",
"Use for a plaintiff who takes an appeal from one court or jurisdiction to another to reverse the judgment, usually in a legal proceeding.",
"Use for a plaintiff against whom an appeal is taken from one court or jurisdiction to another to reverse the judgment, usually in a legal proceeding.",
"// Plates, Printer of (USE: Printer of Plates) // Principal investigator (USE: Originator)",
"Use for the person or organization who prints texts, whether from type or plates.",
"Use for the person or organization who prints illustrations from plates. (UF: Plates, Printer of)",
"Use for a person or organization primarily responsible for performing or initiating a process, such as is done with the collection of metadata sets.",
"Use for a person who is responsible for the making of a motion picture, including business aspects, management of the productions, and the commercial success of the work. // Producer of book (USE: Book producer)",
"Use for a person who is associated with the production (props, lighting, special effects, etc.) of a musical or dramatic presentation or entertainment.",
"Use for a person or corporate body responsible for the creation and/or maintenance of computer program design documents, source code, and machine-executable digital files and supporting documentation. // Promoter (USE: Thesis advisor)",
"Use for a person who corrects printed matter. For manuscripts, use Corrector [crr].",
"", "Use for a person who presides over the elaboration of a collective work to ensure its coherence or continuity. This includes editors-in-chief, literary editors, editors of series, etc.",
"Use for the person to whom correspondence is addressed.", "Use for a person who supervises the technical aspects of a sound or video recording session.",
"Use for a person who writes or develops the framework for an item without being intellectually responsible for its content.",
"Use for the draftsman who prepares drawings of architectural designs (i.e., renderings) in accurate, representational perspective to show what the project will look like when completed.",
"Use for the person or corporate body that directed or managed a research project.",
"Use for the person or corporate body that participated in a research project but whose role did not involve direction or management of it.",
"Use for the person or corporate body responsible for performing research. (UF: Performer of research)",
"Use for the party who makes an answer to the courts pursuant to an application for redress, usually in an equity proceeding.",
"Use for a respondent who takes an appeal from one court or jurisdiction to another to reverse the judgment, usually in an equity proceeding.",
"Use for a respondent against whom an appeal is taken from one court or jurisdiction to another to reverse the judgment, usually in an equity proceeding.",
"Use for a person or corporate body responsible for the review of book, motion picture, performance, etc.",
"", "Use for the author of a motion picture screenplay.", "Use for a person who brings scientific, pedagogical, or historical competence to the conception and realization on a work, particularly in the case of audio-visual items.",
"Use for an amanuensis and for a writer of manuscripts proper. For a person who makes pen-facsimiles, use Facsimilist [fac].",
"Use when the more general term Artist [art] is not desired.",
"Use for a recorder, redactor, or other person responsible for expressing the views of a corporate body.",
"Use for the person whose signature appears without a presentation or other statement indicative of provenance. When there is a presentation statement, use Inscriber [ins].",
"Use for a person who uses his or her voice with or without instrumental accompaniment to produce music. A singer's performance may or may not include actual words.",
"Use for a person who participates in a program (often broadcast) and makes a formalized contribution or presentation generally prepared in advance.",
"Use for the person or agency that issued a contract or under the auspices of which a work has been written, printed, published, etc.",
"Use for a corporate body or agency responsible for the development or enforcement of a standard.",
"// Supposed name (USE: Attributed name)", "Use for a person or organization who does measurements of tracts of land, etc. to determine location, forms, and boundaries. // Technical draftsman (USE: Draftsman) // Testifier (USE: Witness)",
"Use for the person under whose supervision a degree candidate develops and presents a thesis, memoire, or text of a dissertation. (UF: Promoter)",
"Use for a person who prepares a handwritten or typewritten copy from original material, including from dictated or orally recorded material. For makers of pen-facsimiles, use Facsimilist [fac].",
"Use for a person who renders a text from one language into another, or from an older form of a language into the modern form.",
"Use for the person who designed the type face used in a particular item. (UF: Designer of type) // Typesetter (USE: Compositor)",
"Use for the person primarily responsible for choice and arrangement of type used in an item. If the typographer is also responsible for other aspects of the graphic design of a book (e.g., Book designer [bkd]), codes for both functions may be needed.",
"Use for a person who principally exhibits singing skills in a musical or dramatic presentation or entertainment.",
"Use for a person who verifies the truthfulness of an event or action. (UF: Deponent, Eyewitness, Observer, Onlooker, Testifier)",
"User for a person who makes prints by cutting the image in relief on the end-grain of a wood block.",
"User for a person who makes prints by cutting the image in relief on the plank side of a wood block.",
"Use for a person who writes significant material which accompanies a sound recording or other audiovisual material."
), usage = c("", "", "", "", "", "", "", "", "", "", "", "Use for full authors who have made substantial contributions to the package and should show up in the package citation.",
"", "", "", "", "", "", "", "", "", "", "", "", "", "", "", "",
"", "", "", "", "", "", "", "Use for package maintainers that collected code (potentially in other languages) but did not make further substantial contributions to the package.",
"", "", "", "", "", "", "", "", "", "", "", "", "", "", "", "",
"Use for authors who have made smaller contributions (such as code patches etc.) but should not show up in the package citation.",
"", "Use for all copyright holders.", "", "", "", "Use for the package maintainer.",
"", "", "", "", "", "", "", "", "", "", "", "", "", "", "", "",
"", "", "", "", "", "", "", "", "", "", "", "", "", "", "", "",
"", "", "", "", "", "", "", "", "", "", "", "", "", "", "", "",
"", "", "", "", "", "", "", "", "", "", "", "", "", "", "", "",
"", "", "", "", "", "", "", "", "", "", "", "", "", "", "", "",
"", "", "", "", "", "", "", "", "", "", "", "", "", "", "", "",
"", "", "", "", "", "", "", "", "If the package is part of a thesis, use for the thesis advisor.",
"", "If the R code is merely a translation from another language (typically S), use for the translator to R.",
"", "", "", "", "", "", "")), .Names = c("term", "code", "description",
"usage"), class = "data.frame", row.names = c(NA, -173L))
MARC_relator_db_codes_used_with_R <-
c("aut", "com", "ctb", "cph", "cre", "ths", "trl")
# File src/library/utils/R/RShowDoc.R
# Part of the R package, http://www.R-project.org
#
# Copyright (C) 1995-2013 The R Core Team
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# A copy of the GNU General Public License is available at
# http://www.r-project.org/Licenses/
RShowDoc <- function(what, type=c("pdf", "html", "txt"), package)
{
paste. <- function(x, ext) paste(x, ext, sep=".")
pdf_viewer <- function(path) {
pdfviewer <- getOption("pdfviewer")
if(identical(pdfviewer, "false")) {
} else if(.Platform$OS.type == "windows" &&
identical(pdfviewer, file.path(R.home("bin"), "open.exe")))
shell.exec(path)
else system2(pdfviewer, shQuote(path), wait = FALSE)
}
html_viewer <- function(path) {
## we don't use browseURL under Windows as shell.exec does
## not want an encoded URL.
browser <- getOption("browser")
if(is.null(browser) && .Platform$OS.type == "windows")
shell.exec(chartr("/", "\\", path))
else browseURL(paste0("file://", URLencode(path)))
}
type <- match.arg(type)
if(missing(what) || length(what) != 1L || !is.character(what)) {
message(" RShowDoc() should be used with a character string argument specifying\n a documentation file")
return(invisible())
}
if(!missing(package)) {
pkgpath <- find.package(package)
if(type == "pdf") {
path <- file.path(pkgpath, "doc", paste.(what, "pdf"))
if(file.exists(path)) {
pdf_viewer(path)
return(invisible(path))
}
path <- file.path(pkgpath, paste.(what, "pdf"))
if(file.exists(path)) {
pdf_viewer(path)
return(invisible(path))
}
type <- "html"
}
if(type == "html") {
path <- file.path(pkgpath, "doc", paste.(what, "html"))
if(file.exists(path)) {
html_viewer(path)
return(invisible(path))
}
path <- file.path(pkgpath, paste.(what, "html"))
if(file.exists(path)) {
html_viewer(path)
return(invisible(path))
}
}
path <- file.path(pkgpath, "doc", what)
if(file.exists(path)) {
file.show(path)
return(invisible(path))
}
path <- file.path(pkgpath, what)
if(file.exists(path)) {
file.show(path)
return(invisible(path))
}
stop(gettextf("no documentation for %s found in package %s",
sQuote(what), sQuote(package)), domain = NA)
}
if(what == "FAQ") what <- "R-FAQ"
if(what == "NEWS") {
if(type == "pdf") type <- "html"
if(type == "html") {
path <- file.path(R.home("doc"), "html", paste.(what, "html"))
if(file.exists(path)) {
html_viewer(path)
return(invisible(path))
}
}
## This is in UTF-8 and has a BOM on the first line
path <- file.path(R.home(), what)
tf <- tempfile()
tmp <- readLines(path)
tmp[1] <- ""
writeLines(tmp, tf)
file.show(tf, delete.file = TRUE, encoding = "UTF-8")
return(invisible(path))
} else if(what == "COPYING") {
path <- file.path(R.home("doc"), what)
file.show(path)
return(invisible(path))
} else if(what %in% dir(file.path(R.home("share"), "licenses"))) {
path <- file.path(R.home("share"), "licenses", what)
file.show(path)
return(invisible(path))
} else if(what %in% c("R-admin", "R-data", "R-exts", "R-FAQ", "R-intro",
"R-ints", "R-lang")) {
if(type == "pdf") {
path <- file.path(R.home("doc"), "manual", paste.(what, "pdf"))
if(file.exists(path)) {
pdf_viewer(path)
return(invisible(path))
}
type <- "html"
}
if(type == "html") {
path <- file.path(R.home("doc"), "manual", paste.(what, "html"))
if(file.exists(path)) {
html_viewer(path)
return(invisible(path))
}
}
if(what == "R-FAQ" &&
file.exists(path <- file.path(R.home("doc"), "FAQ"))) {
file.show(path)
return(invisible(path))
}
} else if(.Platform$OS.type == "windows" && what %in% "rw-FAQ") {
if(type == "pdf") type <- "html"
if(type == "html") {
path <- file.path(R.home("doc"), "html", paste.(what, "html"))
if(file.exists(path)) {
html_viewer(path)
return(invisible(path))
}
}
path <- file.path(R.home("doc"), what)
if(file.exists(path)) {
file.show(path)
return(invisible(path))
}
path <- file.path(R.home(), "src", "gnuwin32", what)
if(file.exists(path)) {
file.show(path)
return(invisible(path))
}
} else {
rdocdir <- R.home("doc")
docs <- dir(rdocdir, full.names=TRUE)
docs <- docs[sapply(docs, function(x) file_test("-f", x))]
m <- match(what, basename(docs), 0L)
if(m > 0L) {
file.show(docs[m])
return(invisible(docs[m]))
}
}
stop("document not found")
}
# File src/library/utils/R/RSiteSearch.R
# Part of the R package, http://www.R-project.org
#
# Copyright (C) 1995-2012 The R Core Team
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# A copy of the GNU General Public License is available at
# http://www.r-project.org/Licenses/
RSiteSearch <- function(string,
restrict = c("functions", "vignettes", "views"),
format = c("normal", "short"),
sortby = c("score", "date:late", "date:early",
"subject", "subject:descending",
"from", "from:descending", "size", "size:descending"),
matchesPerPage = 20)
{
string <- paste0("http://search.r-project.org/cgi-bin/namazu.cgi?query=",
gsub(" ", "+", string))
mpp <- paste0("max=", matchesPerPage)
format <- paste0("result=", match.arg(format))
restrictVALS <- c("functions", "vignettes", "views")
restr <- match.arg(restrict, choices = restrictVALS, several.ok = TRUE)
restr <- paste(paste0("idxname=", restr), collapse = "&")
sortby <- match.arg(sortby)
sortby <- paste0("sort=",
switch(sortby,
"score"=, "date:late"=, "date:early" = sortby,
"subject" = "field:subject:ascending",
"subject:descending" = "field:subject:descending",
"from" = "field:from:ascending",
"from:descending" = "field:from:descending",
"size" = "field:size:ascending",
"size:descending" = "field:size:descending"))
## we know this is a http:// URL, so encoding should be safe.
## it seems that firefox on Mac OS needs it for {...}
## OTOH, Namazu does not decode in, say, sort=date:late.
qstring <- paste(URLencode(string), mpp, format, sortby, restr, sep = "&")
browseURL(qstring)
cat(gettextf("A search query has been submitted to %s",
"http://search.r-project.org"), "\n", sep = "")
cat(gettext("The results page should open in your browser shortly\n"))
invisible(qstring)
}
# File src/library/utils/R/Rprof.R
# Part of the R package, http://www.R-project.org
#
# Copyright (C) 1995-2013 The R Core Team
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# A copy of the GNU General Public License is available at
# http://www.r-project.org/Licenses/
Rprof <- function(filename = "Rprof.out", append = FALSE, interval = 0.02,
memory.profiling = FALSE, gc.profiling = FALSE,
line.profiling = FALSE, numfiles = 100L, bufsize = 10000L)
{
if(is.null(filename)) filename <- ""
invisible(.External(C_Rprof, filename, append, interval, memory.profiling,
gc.profiling, line.profiling, numfiles, bufsize))
}
Rprofmem <- function(filename = "Rprofmem.out", append = FALSE, threshold = 0)
{
if(is.null(filename)) filename <- ""
invisible(.External(C_Rprofmem, filename, append, as.double(threshold)))
}
# File src/library/utils/R/Sweave.R
# Part of the R package, http://www.R-project.org
#
# Copyright (C) 1995-2014 The R Core Team
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# A copy of the GNU General Public License is available at
# http://www.r-project.org/Licenses/
### The drivers are now in SweaveDrivers.R
### FIXMEs
### b) It would be nice to allow multiple 'grdevice' options
### Encodings (currently, different from 2.13.0)
###
### SweaveReadFile figures out an encoding, uses it (not currently for
### \SweaveInclude files) and returns it as an attribute. This is
### then passed as an attribute of 'file' to the driver's setup
### routine. Unless it is "" or "ASCII", the RweaveLatex driver
### re-encodes the output back to 'encoding': the Rtangle driver
### leaves it in the encoding of the current locale and records what
### that is in a comment. The "UTF-8" encoding is preserved on
### both input and output in RweaveLatex, but is handled like
### other encodings in Rtangle.
###
### SweaveReadFile first looks for a call to one of the LaTeX packages
### inputen[cx] and deduces the vignette encoding from that, falling
### back to the package encoding, then Latin-1 (with a warning). This
### should work OK provided the package encoding is Latin-1: it is
### UTF-8 then LaTeX needs to be told what to do. It also assumes
### that R output is in the current locale: a package with a different
### encoding from the current one might have data in that package's
### encoding.
### Correspondence between input and output is maintained in two
### places: Each chunk has a srclines attribute, recording the input
### lines it corresponds to. Each code chunk will have attached
### srcrefs that duplicate the srclines. We don't need srclines for
### code, but we do need it for doc chunks, and it's easiest to just
### keep it for everything.
Stangle <- function(file, driver = Rtangle(),
syntax = getOption("SweaveSyntax"),
encoding = "", ...)
Sweave(file = file, driver = driver, encoding = encoding, ...)
Sweave <- function(file, driver = RweaveLatex(),
syntax = getOption("SweaveSyntax"),
encoding = "", ...)
{
if (is.character(driver)) driver <- get(driver, mode = "function")()
else if (is.function(driver)) driver <- driver()
if (is.null(syntax)) syntax <- SweaveGetSyntax(file) # from the extension
if (is.character(syntax)) syntax <- get(syntax, mode = "list")
if (.Platform$OS.type == "windows") file <- chartr("\\", "/", file)
text <- SweaveReadFile(file, syntax, encoding = encoding)
attr(file, "encoding") <- encoding <- attr(text, "encoding")
srcFilenames <- attr(text, "files")
srcFilenum <- attr(text, "srcFilenum")
srcLinenum <- attr(text, "srcLinenum")
## drobj$options is the current set of options for this file.
drobj <- driver$setup(file = file, syntax = syntax, ...)
on.exit(driver$finish(drobj, error = TRUE))
syntax <- attr(text, "syntax") # this is from the file commands.
if (!is.na(envopts <- Sys.getenv("SWEAVE_OPTIONS", NA)))
drobj$options <-
SweaveParseOptions(envopts, drobj$options, driver$checkopts)
drobj$filename <- file
mode <- "doc"
chunknr <- 0L
chunk <- NULL
chunkopts <- NULL
namedchunks <- list()
prevfilenum <- 0L
prevlinediff <- 0L
for (linenum in seq_along(text)) {
line <- text[linenum]
filenum <- srcFilenum[linenum]
linediff <- srcLinenum[linenum] - linenum
if(nzchar(Sys.getenv("R_DEBUG_Sweave"))) {
## Extensive logging for debugging, needs 'ls' (unix-like or Rtools):
cat(sprintf("l.%3d: %30s -'%4s'- ", linenum, substr(line,1,30), mode))
cat(sprintf("%16s\n", system(paste("ls -s",
summary(drobj$output)$description), intern=TRUE)))
}
if (length(grep(syntax$doc, line))) { # start new documentation chunk
if (mode == "doc") {
if (!is.null(chunk)) drobj <- driver$writedoc(drobj, chunk)
} else {
if (!is.null(chunkopts$label))
namedchunks[[chunkopts$label]] <- chunk
if (!is.null(chunk))
drobj <- driver$runcode(drobj, chunk, chunkopts)
mode <- "doc"
}
chunk <- NULL
} else if (length(grep(syntax$code, line))) { # start new code chunk
if (mode == "doc") {
if (!is.null(chunk)) drobj <- driver$writedoc(drobj, chunk)
} else {
if (!is.null(chunkopts$label))
namedchunks[[chunkopts$label]] <- chunk
if (!is.null(chunk))
drobj <- driver$runcode(drobj, chunk, chunkopts)
}
mode <- "code"
chunkopts <- sub(syntax$code, "\\1", line)
chunkopts <- SweaveParseOptions(chunkopts,
drobj$options,
driver$checkopts)
## these #line directives are used for error messages when parsing
file <- srcFilenames[filenum]
chunk <- paste0("#line ", linenum+linediff+1L, ' "', basename(file), '"')
attr(chunk, "srclines") <- linenum + linediff
attr(chunk, "srcFilenum") <- filenum
attr(chunk, "srcFilenames") <- srcFilenames
chunknr <- chunknr + 1L # this is really 'code chunk number'
chunkopts$chunknr <- chunknr
} else { # continuation of current chunk
if (mode == "code" && length(grep(syntax$coderef, line))) {
chunkref <- sub(syntax$coderef, "\\1", line)
if (!(chunkref %in% names(namedchunks))) {
## omit unknown references
warning(gettextf("reference to unknown chunk %s",
sQuote(chunkref)),
call. = TRUE,domain = NA)
next
} else {
## these #line directives are used for error messages
## when parsing
file <- srcFilenames[filenum]
line <- c(namedchunks[[chunkref]],
paste0("#line ", linenum+linediff+1L,
' "', basename(file), '"'))
}
}
if (mode == "code" &&
(prevfilenum != filenum ||
prevlinediff != linediff)) {
file <- srcFilenames[filenum]
line <- c(paste0("#line ", linenum+linediff, ' "', basename(file), '"'),
line)
}
srclines <- c(attr(chunk, "srclines"), rep(linenum+linediff, length(line)))
srcfilenum <- c(attr(chunk, "srcFilenum"), rep(filenum, length(line)))
chunk <- c(chunk, line)
attr(chunk, "srclines") <- srclines
attr(chunk, "srcFilenum") <- srcfilenum
attr(chunk, "srcFilenames") <- srcFilenames
}
prevfilenum <- filenum
prevlinediff <- linediff
}
if (!is.null(chunk)) { # write out final chunk
drobj <-
if (mode == "doc") driver$writedoc(drobj, chunk)
else driver$runcode(drobj, chunk, chunkopts)
}
on.exit() # clear action to finish with error = TRUE
drobj$srcFilenames <- srcFilenames
driver$finish(drobj)
}
SweaveReadFile <- function(file, syntax, encoding = "")
{
## file can be a vector to keep track of recursive calls to
## SweaveReadFile. In this case only the first element is
## tried to read in, the rest are forbidden names for further
## SweaveInput
f <- file[1L]
bf <- basename(f)
df <- dirname(f)
if (!file.exists(f)) {
f <- list.files(df, full.names = TRUE,
pattern = paste0(bf, syntax$extension))
if (length(f) == 0L)
stop(gettextf("no Sweave file with name %s found",
sQuote(file[1L])), domain = NA)
else if (length(f) > 1L)
stop(paste(sprintf(ngettext(length(f), "%d Sweave file for basename %s found",
"%d Sweave files for basename %s found",
domain = "R-utils"),
length(f), sQuote(file[1L])), paste(":\n ", f, collapse = "")),
domain = NA)
}
## An incomplete last line is not a real problem.
text <- readLines(f[1L], warn = FALSE)
srcLinenum <- seq_along(text)
if (encoding != "bytes") {
## now sort out an encoding, if needed.
enc <- tools:::.getVignetteEncoding(text, convert = TRUE)
if (enc == "non-ASCII") {
enc <- if (nzchar(encoding)) {
encoding
} else {
stop(sQuote(basename(file)),
" is not ASCII and does not declare an encoding",
domain = NA, call. = FALSE)
}
} else if (enc == "unknown") {
stop(sQuote(basename(file)),
" declares an encoding that Sweave does not know about",
domain = NA, call. = FALSE)
}
if (enc == "UTF-8")
Encoding(text) <- enc
else {
if (nzchar(enc)) text <- iconv(text, enc, "") else enc <- "ASCII"
}
} else enc <- "bytes"
pos <- grep(syntax$syntaxname, text)
if (length(pos) > 1L)
warning(gettextf("more than one syntax specification found, using the first one"),
domain = NA)
if (length(pos) > 0L) {
sname <- sub(syntax$syntaxname, "\\1", text[pos[1L]])
syntax <- get(sname, mode = "list")
if (!identical(class(syntax), "SweaveSyntax"))
stop(gettextf("object %s does not have class \"SweaveSyntax\"",
sQuote(sname)), domain = NA)
text <- text[-pos]
srcLinenum <- srcLinenum[-pos]
}
srcFilenum <- rep_len(1, length(srcLinenum))
if (!is.null(syntax$input)) {
while(length(pos <- grep(syntax$input, text))) {
pos <- pos[1L]
ifile <- file.path(df, sub(syntax$input, "\\1", text[pos]))
if (any(ifile == file)) {
stop(paste(gettextf("recursive Sweave input %s in stack",
sQuote(ifile)),
paste("\n ", seq_len(file), ": ",
rev(file), collapse="")),
domain = NA)
}
itext <- SweaveReadFile(c(ifile, file), syntax, encoding = encoding)
pre <- seq_len(pos-1L)
post <- seq_len(length(text) - pos) + pos
text <- c(text[pre], itext, text[post])
srcLinenum <- c(srcLinenum[pre], attr(itext, "srcLinenum"),
srcLinenum[post])
srcFilenum <- c(srcFilenum[pre], attr(itext, "srcFilenum")+length(f),
srcFilenum[post])
f <- c(f, attr(itext, "files"))
}
}
attr(text, "syntax") <- syntax
attr(text, "files") <- f
attr(text, "encoding") <- enc
attr(text, "srcLinenum") <- srcLinenum
attr(text, "srcFilenum") <- srcFilenum
text
}
###**********************************************************
## NB: } should not be escaped in [] .
SweaveSyntaxNoweb <-
list(doc = "^@",
code = "^<<(.*)>>=.*",
coderef = "^<<(.*)>>.*",
docopt = "^[[:space:]]*\\\\SweaveOpts\\{([^}]*)\\}",
docexpr = "\\\\Sexpr\\{([^}]*)\\}",
extension = "\\.[rsRS]?nw$",
syntaxname = "^[[:space:]]*\\\\SweaveSyntax\\{([^}]*)\\}",
input = "^[[:space:]]*\\\\SweaveInput\\{([^}]*)\\}",
trans = list(
doc = "@",
code = "<<\\1>>=",
coderef = "<<\\1>>",
docopt = "\\\\SweaveOpts{\\1}",
docexpr = "\\\\Sexpr{\\1}",
extension = ".Snw",
syntaxname = "\\\\SweaveSyntax{SweaveSyntaxNoweb}",
input = "\\\\SweaveInput{\\1}")
)
class(SweaveSyntaxNoweb) <- "SweaveSyntax"
SweaveSyntaxLatex <- SweaveSyntaxNoweb
SweaveSyntaxLatex$doc <- "^[[:space:]]*\\\\end\\{Scode\\}"
SweaveSyntaxLatex$code <- "^[[:space:]]*\\\\begin\\{Scode\\}\\{?([^}]*)\\}?.*"
SweaveSyntaxLatex$coderef <- "^[[:space:]]*\\\\Scoderef\\{([^}]*)\\}.*"
SweaveSyntaxLatex$extension <- "\\.[rsRS]tex$"
SweaveSyntaxLatex$trans$doc <- "\\\\end{Scode}"
SweaveSyntaxLatex$trans$code <- "\\\\begin{Scode}{\\1}"
SweaveSyntaxLatex$trans$coderef <- "\\\\Scoderef{\\1}"
SweaveSyntaxLatex$trans$syntaxname <- "\\\\SweaveSyntax{SweaveSyntaxLatex}"
SweaveSyntaxLatex$trans$extension <- ".Stex"
SweaveGetSyntax <- function(file)
{
synt <- apropos("SweaveSyntax", mode = "list")
for (sname in synt) {
s <- get(sname, mode = "list")
if (!identical(class(s), "SweaveSyntax")) next
if (length(grep(s$extension, file))) return(s)
}
SweaveSyntaxNoweb
}
SweaveSyntConv <- function(file, syntax, output=NULL)
{
if (is.character(syntax)) syntax <- get(syntax)
if (!identical(class(syntax), "SweaveSyntax"))
stop(gettextf("target syntax not of class %s",
dQuote("SweaveSyntax")),
domain = NA)
if (is.null(syntax$trans))
stop("target syntax contains no translation table")
insynt <- SweaveGetSyntax(file)
text <- readLines(file)
if (is.null(output))
output <- sub(insynt$extension, syntax$trans$extension, basename(file))
TN <- names(syntax$trans)
for (n in TN)
if (n != "extension") text <- gsub(insynt[[n]], syntax$trans[[n]], text)
cat(text, file = output, sep = "\n")
cat("Wrote file", output, "\n")
}
###**********************************************************
## parses an option string, from
## - the header of a code chunk
## - an \SweaveOpts{} statement (strangely, left to the drivers)
## - the value of environment variable SWEAVE_OPTIONS
##
## The format is name=value pairs with whitespace being discarded
## (and could have been done all at once).
SweaveParseOptions <- function(text, defaults = list(), check = NULL)
{
x <- sub("^[[:space:]]*(.*)", "\\1", text)
x <- sub("(.*[^[:space:]])[[:space:]]*$", "\\1", x)
x <- unlist(strsplit(x, "[[:space:]]*,[[:space:]]*"))
x <- strsplit(x, "[[:space:]]*=[[:space:]]*")
## only the first option may have no name: the chunk label
if (length(x)) {
if (length(x[[1L]]) == 1L) x[[1L]] <- c("label", x[[1L]])
} else return(defaults)
if (any(sapply(x, length) != 2L))
stop(gettextf("parse error or empty option in\n%s", text), domain = NA)
options <- defaults
for (k in seq_along(x)) options[[ x[[k]][1L] ]] <- x[[k]][2L]
## This is undocumented
if (!is.null(options[["label"]]) && !is.null(options[["engine"]]))
options[["label"]] <-
sub(paste0("\\.", options[["engine"]], "$"),
"", options[["label"]])
if (!is.null(check)) check(options) else options
}
## really part of the RweaveLatex and Rtangle drivers
SweaveHooks <- function(options, run = FALSE, envir = .GlobalEnv)
{
if (is.null(SweaveHooks <- getOption("SweaveHooks"))) return(NULL)
z <- character()
for (k in names(SweaveHooks))
if (nzchar(k) && is.logical(options[[k]]) && options[[k]])
if (is.function(SweaveHooks[[k]])) {
z <- c(z, k)
if (run) eval(SweaveHooks[[k]](), envir=envir)
}
z # a character vector.
}
### For R CMD xxxx ------------------------------------------
.Sweave <- function(args = NULL)
{
options(warn = 1)
if (is.null(args)) {
args <- commandArgs(TRUE)
args <- paste(args, collapse=" ")
args <- strsplit(args,'nextArg', fixed = TRUE)[[1L]][-1L]
}
Usage <- function() {
cat("Usage: R CMD Sweave [options] file",
"",
"A front-end for Sweave and other vignette engines, via buildVignette()",
"",
"Options:",
" -h, --help print this help message and exit",
" -v, --version print version info and exit",
" --driver=name use named Sweave driver",
" --engine=pkg::engine use named vignette engine",
" --encoding=enc default encoding 'enc' for file",
" --clean corresponds to --clean=default",
" --clean= remove some of the created files:",
' "default" removes those the same initial name;',
' "keepOuts" keeps e.g. *.tex even when PDF is produced',
" --options= comma-separated list of Sweave/engine options",
" --pdf convert to PDF document",
" --compact= try to compact PDF document:",
' "no" (default), "qpdf", "gs", "gs+qpdf", "both"',
" --compact same as --compact=qpdf",
"",
"Report bugs at bugs.r-project.org .",
sep = "\n")
}
do_exit <- function(status = 0L)
q("no", status = status, runLast = FALSE)
if (!length(args)) {
Usage()
do_exit(1L)
}
file <- character()
driver <- encoding <- options <- ""
engine <- NULL
toPDF <- FALSE
compact <- Sys.getenv("_R_SWEAVE_COMPACT_PDF_", "no")
clean <- FALSE ## default!
while(length(args)) {
a <- args[1L]
if (a %in% c("-h", "--help")) {
Usage()
do_exit()
}
else if (a %in% c("-v", "--version")) {
cat("Sweave front-end: ",
R.version[["major"]], ".", R.version[["minor"]],
" (r", R.version[["svn rev"]], ")\n", sep = "")
cat("",
"Copyright (C) 2006-2014 The R Core Team.",
"This is free software; see the GNU General Public License version 2",
"or later for copying conditions. There is NO warranty.",
sep = "\n")
do_exit()
} else if (substr(a, 1, 9) == "--driver=") {
driver <- substr(a, 10, 1000)
} else if (substr(a, 1, 9) == "--engine=") {
engine <- substr(a, 10, 1000)
} else if (substr(a, 1, 11) == "--encoding=") {
encoding <- substr(a, 12, 1000)
} else if (a == "--clean") {
clean <- TRUE
} else if (substr(a, 1, 8) == "--clean=") {
clean. <- substr(a, 9, 1000)
clean <- switch(clean.,
"default" = TRUE,
"keepOuts" = NA,
message(gettextf("Warning: unknown option '--clean='%s",
clean.), domain = NA))
} else if (substr(a, 1, 10) == "--options=") {
options <- substr(a, 11, 1000)
} else if (a == "--pdf") {
toPDF <- TRUE
} else if (substr(a, 1, 10) == "--compact=") {
compact <- substr(a, 11, 1000)
} else if (a == "--compact") {
compact <- "qpdf"
} else if (substr(a, 1, 1) == "-") {
message(gettextf("Warning: unknown option %s", sQuote(a)),
domain = NA)
} else file <- c(file, a)
args <- args[-1L]
}
if(length(file) != 1L) {
Usage()
do_exit(1L)
}
args <- list(file=file, tangle=FALSE, latex=toPDF, engine=engine, clean=clean)
if(nzchar(driver)) args <- c(args, driver)
args <- c(args, encoding = encoding)
if(nzchar(options)) {
opts <- eval(parse(text = paste("list(", options, ")")))
args <- c(args, opts)
}
output <- do.call(tools::buildVignette, args)
message("Output file: ", output)
if (toPDF && compact != "no"
&& length(output) == 1 && grepl(".pdf$", output, ignore.case=TRUE)) {
##
## Same code as used for --compact-vignettes in
## .build_packages() ...
message("Compacting PDF document")
if(compact %in% c("gs", "gs+qpdf", "both")) {
gs_cmd <- tools:::find_gs_cmd(Sys.getenv("R_GSCMD", ""))
gs_quality <- "ebook"
} else {
gs_cmd <- ""
gs_quality <- "none"
}
qpdf <- if(compact %in% c("qpdf", "gs+qpdf", "both"))
Sys.which(Sys.getenv("R_QPDF", "qpdf"))
else ""
res <- tools::compactPDF(output, qpdf = qpdf,
gs_cmd = gs_cmd,
gs_quality = gs_quality)
res <- format(res, diff = 1e5)
if(length(res))
message(paste(format(res), collapse = "\n"))
}
do_exit()
}
.Stangle <- function(args = NULL)
{
options(warn = 1)
if (is.null(args)) {
args <- commandArgs(TRUE)
args <- paste(args, collapse=" ")
args <- strsplit(args,'nextArg', fixed = TRUE)[[1L]][-1L]
}
Usage <- function() {
cat("Usage: R CMD Stangle file",
"",
"A front-end for Stangle and other vignette engines",
"",
"Options:",
" -h, --help print this help message and exit",
" -v, --version print version info and exit",
" --engine=pkg::engine use named vignette engine",
" --encoding=enc assume encoding 'enc' for file",
" --options= comma-separated list of Stangle options",
"",
"Report bugs at bugs@r-project.org .",
sep = "\n")
}
do_exit <- function(status = 0L)
q("no", status = status, runLast = FALSE)
if (!length(args)) {
Usage()
do_exit(1L)
}
file <- character()
encoding <- options <- ""
engine <- NULL
while(length(args)) {
a <- args[1L]
if (a %in% c("-h", "--help")) {
Usage()
do_exit()
}
else if (a %in% c("-v", "--version")) {
cat("Stangle front-end: ",
R.version[["major"]], ".", R.version[["minor"]],
" (r", R.version[["svn rev"]], ")\n", sep = "")
cat("",
"Copyright (C) 2006-2011 The R Core Team.",
"This is free software; see the GNU General Public License version 2",
"or later for copying conditions. There is NO warranty.",
sep = "\n")
do_exit()
} else if (substr(a, 1, 9) == "--engine=") {
engine <- substr(a, 10, 1000)
} else if (substr(a, 1, 11) == "--encoding=") {
encoding <- substr(a, 12, 1000)
} else if (substr(a, 1, 10) == "--options=") {
options <- substr(a, 11, 1000)
} else if (substr(a, 1, 1) == "-") {
message(gettextf("Warning: unknown option %s", sQuote(a)),
domain = NA)
} else file <- c(file, a)
args <- args[-1L]
}
if(length(file) != 1L) {
Usage()
do_exit(1L)
}
args <- list(file=file, tangle=TRUE, weave=FALSE, engine=engine,
encoding=encoding)
if(nzchar(options)) {
opts <- eval(parse(text = paste("list(", options, ")")))
args <- c(args, opts)
}
output <- do.call(tools::buildVignette, args)
message("Output file: ", output)
do_exit()
}
# File src/library/utils/R/SweaveDrivers.R
# Part of the R package, http://www.R-project.org
#
# Copyright (C) 1995-2013 The R Core Team
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# A copy of the GNU General Public License is available at
# http://www.r-project.org/Licenses/
RweaveLatex <- function()
{
list(setup = RweaveLatexSetup,
runcode = RweaveLatexRuncode,
writedoc = RweaveLatexWritedoc,
finish = RweaveLatexFinish,
checkopts = RweaveLatexOptions)
}
## We definitely do not want '.' in here, to avoid misidentification
## of file extensions. Note that - is used literally here.
.SweaveValidFilenameRegexp <- "^[[:alnum:]/#+_-]+$"
RweaveLatexSetup <-
function(file, syntax, output = NULL, quiet = FALSE, debug = FALSE,
stylepath, ...)
{
dots <- list(...)
if (is.null(output)) {
prefix.string <- basename(sub(syntax$extension, "", file))
output <- paste(prefix.string, "tex", sep = ".")
} else prefix.string <- basename(sub("\\.tex$", "", output))
if (!quiet) cat("Writing to file ", output, "\n",
"Processing code chunks with options ...\n", sep = "")
encoding <- attr(file, "encoding")
if (encoding %in% c("ASCII", "bytes")) encoding <- ""
output <- file(output, open = "w", encoding = encoding)
if (missing(stylepath)) {
p <- Sys.getenv("SWEAVE_STYLEPATH_DEFAULT")
stylepath <-
if (length(p) >= 1L && nzchar(p[1L])) identical(p, "TRUE") else FALSE
}
if (stylepath) {
styfile <- file.path(R.home("share"), "texmf", "tex", "latex", "Sweave")
if (.Platform$OS.type == "windows")
styfile <- chartr("\\", "/", styfile)
if (length(grep(" ", styfile)))
warning(gettextf("path to %s contains spaces,\n", sQuote(styfile)),
gettext("this may cause problems when running LaTeX"),
domain = NA)
} else styfile <- "Sweave"
options <- list(prefix = TRUE, prefix.string = prefix.string,
engine = "R", print = FALSE, eval = TRUE, fig = FALSE,
pdf = TRUE, eps = FALSE, png = FALSE, jpeg = FALSE,
grdevice = "", width = 6, height = 6, resolution = 300,
term = TRUE, echo = TRUE, keep.source = TRUE,
results = "verbatim",
split = FALSE, strip.white = "true", include = TRUE,
pdf.version = grDevices::pdf.options()$version,
pdf.encoding = grDevices::pdf.options()$encoding,
pdf.compress = grDevices::pdf.options()$compress,
expand = TRUE, # unused by us, for 'highlight'
concordance = FALSE, figs.only = TRUE)
options$.defaults <- options
options[names(dots)] <- dots
## to be on the safe side: see if defaults pass the check
options <- RweaveLatexOptions(options)
list(output = output, styfile = styfile, havesty = FALSE,
haveconcordance = FALSE, debug = debug, quiet = quiet,
syntax = syntax, options = options,
chunkout = list(), # a list of open connections
srclines = integer())
}
makeRweaveLatexCodeRunner <- function(evalFunc = RweaveEvalWithOpt)
{
## Return a function suitable as the 'runcode' element
## of an Sweave driver. evalFunc will be used for the
## actual evaluation of chunk code.
## FIXME: well, actually not for the figures.
## If there were just one figure option set, we could eval the chunk
## only once.
function(object, chunk, options) {
pdf.Swd <- function(name, width, height, ...)
grDevices::pdf(file = paste(chunkprefix, "pdf", sep = "."),
width = width, height = height,
version = options$pdf.version,
encoding = options$pdf.encoding,
compress = options$pdf.compress)
eps.Swd <- function(name, width, height, ...)
grDevices::postscript(file = paste(name, "eps", sep = "."),
width = width, height = height,
paper = "special", horizontal = FALSE)
png.Swd <- function(name, width, height, options, ...)
grDevices::png(filename = paste(chunkprefix, "png", sep = "."),
width = width, height = height,
res = options$resolution, units = "in")
jpeg.Swd <- function(name, width, height, options, ...)
grDevices::jpeg(filename = paste(chunkprefix, "jpeg", sep = "."),
width = width, height = height,
res = options$resolution, units = "in")
if (!(options$engine %in% c("R", "S"))) return(object)
devs <- devoffs <- list()
if (options$fig && options$eval) {
if (options$pdf) {
devs <- c(devs, list(pdf.Swd))
devoffs <- c(devoffs, list(grDevices::dev.off))
}
if (options$eps) {
devs <- c(devs, list(eps.Swd))
devoffs <- c(devoffs, list(grDevices::dev.off))
}
if (options$png) {
devs <- c(devs, list(png.Swd))
devoffs <- c(devoffs, list(grDevices::dev.off))
}
if (options$jpeg) {
devs <- c(devs, list(jpeg.Swd))
devoffs <- c(devoffs, list(grDevices::dev.off))
}
if (nzchar(grd <- options$grdevice)) {
devs <- c(devs, list(get(grd, envir = .GlobalEnv)))
grdo <- paste(grd, "off", sep = ".")
devoffs <- c(devoffs,
if (exists(grdo, envir = .GlobalEnv))
list(get(grdo, envir = .GlobalEnv))
else list(grDevices::dev.off))
}
}
if (!object$quiet) {
cat(formatC(options$chunknr, width = 2), ":")
if (options$echo) cat(" echo")
if (options$keep.source) cat(" keep.source")
if (options$eval) {
if (options$print) cat(" print")
if (options$term) cat(" term")
cat("", options$results)
if (options$fig) {
if (options$eps) cat(" eps")
if (options$pdf) cat(" pdf")
if (options$png) cat(" png")
if (options$jpeg) cat(" jpeg")
if (!is.null(options$grdevice)) cat("", options$grdevice)
}
}
cat(" (")
if (!is.null(options$label))
cat("label = ", options$label, ", ", sep = "")
filenum <- attr(chunk, "srcFilenum")[1]
filename <- attr(chunk, "srcFilenames")[filenum]
cat(basename(filename), ":", attr(chunk, "srclines")[1], ")", sep = "")
cat("\n")
}
chunkprefix <- RweaveChunkPrefix(options)
if (options$split) {
## [x][[1L]] avoids partial matching of x
chunkout <- object$chunkout[chunkprefix][[1L]]
if (is.null(chunkout)) {
chunkout <- file(paste(chunkprefix, "tex", sep = "."), "w")
if (!is.null(options$label))
object$chunkout[[chunkprefix]] <- chunkout
if(!grepl(.SweaveValidFilenameRegexp, chunkout))
warning("file stem ", sQuote(chunkout), " is not portable",
call. = FALSE, domain = NA)
}
} else chunkout <- object$output
srcfile <- srcfilecopy(object$filename, chunk, isFile = TRUE)
## Note that we edit the error message below, so change both
## if you change this line:
chunkexps <- try(parse(text = chunk, srcfile = srcfile), silent = TRUE)
if (inherits(chunkexps, "try-error"))
chunkexps[1L] <- sub(" parse(text = chunk, srcfile = srcfile) : \n ",
"", chunkexps[1L], fixed = TRUE)
RweaveTryStop(chunkexps, options)
## Some worker functions used below...
putSinput <- function(dce, leading) {
if (!openSinput) {
if (!openSchunk) {
cat("\\begin{Schunk}\n", file = chunkout)
linesout[thisline + 1L] <<- srcline
filenumout[thisline + 1L] <<- srcfilenum
thisline <<- thisline + 1L
openSchunk <<- TRUE
}
cat("\\begin{Sinput}", file = chunkout)
openSinput <<- TRUE
}
leading <- max(leading, 1L) # safety check
cat("\n", paste(getOption("prompt"), dce[seq_len(leading)],
sep = "", collapse = "\n"),
file = chunkout, sep = "")
if (length(dce) > leading)
cat("\n", paste(getOption("continue"), dce[-seq_len(leading)],
sep = "", collapse = "\n"),
file = chunkout, sep = "")
linesout[thisline + seq_along(dce)] <<- srcline
filenumout[thisline + seq_along(dce)] <<- srcfilenum
thisline <<- thisline + length(dce)
}
trySrcLines <- function(srcfile, showfrom, showto, ce) {
lines <- tryCatch(suppressWarnings(getSrcLines(srcfile, showfrom, showto)),
error = function(e)e)
if (inherits(lines, "error")) {
lines <- if (is.null(ce)) character()
else deparse(ce, width.cutoff = 0.75*getOption("width"))
}
lines
}
echoComments <- function(showto) {
if (options$echo && !is.na(lastshown) && lastshown < showto) {
dce <- trySrcLines(srcfile, lastshown + 1L, showto, NULL)
linedirs <- grepl("^#line ", dce)
dce <- dce[!linedirs]
if (length(dce))
putSinput(dce, length(dce)) # These are all trailing comments
lastshown <<- showto
}
}
openSinput <- FALSE
openSchunk <- FALSE
srclines <- attr(chunk, "srclines")
srcfilenums <- attr(chunk, "srcFilenum")
linesout <- integer() # maintains concordance
filenumout <- integer() # ditto
srcline <- srclines[1L] # current input line
srcfilenum <- srcfilenums[1L] # from this file
thisline <- 0L # current output line
lastshown <- 0L # last line already displayed;
refline <- NA # line containing the current named chunk ref
leading <- 1L # How many lines get the user prompt
srcrefs <- attr(chunkexps, "srcref")
if (length(devs)) {
if(!grepl(.SweaveValidFilenameRegexp, chunkprefix))
warning("file stem ", sQuote(chunkprefix), " is not portable",
call. = FALSE, domain = NA)
if (options$figs.only)
devs[[1L]](name = chunkprefix,
width = options$width, height = options$height,
options)
}
SweaveHooks(options, run = TRUE)
for (nce in seq_along(chunkexps)) {
ce <- chunkexps[[nce]]
if (options$keep.source && nce <= length(srcrefs) &&
!is.null(srcref <- srcrefs[[nce]])) {
showfrom <- srcref[7L]
showto <- srcref[8L]
dce <- trySrcLines(srcfile, lastshown+1L, showto, ce)
leading <- showfrom - lastshown
lastshown <- showto
srcline <- srcref[3L]
linedirs <- grepl("^#line ", dce)
dce <- dce[!linedirs]
# Need to reduce leading lines if some were just removed
leading <- leading - sum(linedirs[seq_len(leading)])
while (length(dce) && length(grep("^[[:blank:]]*$", dce[1L]))) {
dce <- dce[-1L]
leading <- leading - 1L
}
} else {
dce <- deparse(ce, width.cutoff = 0.75*getOption("width"))
leading <- 1L
}
if (object$debug)
cat("\nRnw> ", paste(dce, collapse = "\n+ "),"\n")
if (options$echo && length(dce)) putSinput(dce, leading)
## avoid the limitations (and overhead) of output text connections
if (options$eval) {
tmpcon <- file()
sink(file = tmpcon)
err <- tryCatch(evalFunc(ce, options), finally = {
cat("\n") # make sure final line is complete
sink()
})
output <- readLines(tmpcon)
close(tmpcon)
## delete empty output
if (length(output) == 1L && !nzchar(output[1L])) output <- NULL
RweaveTryStop(err, options)
} else output <- NULL
## or writeLines(output)
if (length(output) && object$debug)
cat(paste(output, collapse = "\n"))
if (length(output) && (options$results != "hide")) {
if (openSinput) {
cat("\n\\end{Sinput}\n", file = chunkout)
linesout[thisline + 1L:2L] <- srcline
filenumout[thisline + 1L:2L] <- srcfilenum
thisline <- thisline + 2L
openSinput <- FALSE
}
if (options$results == "verbatim") {
if (!openSchunk) {
cat("\\begin{Schunk}\n", file = chunkout)
linesout[thisline + 1L] <- srcline
filenumout[thisline + 1L] <- srcfilenum
thisline <- thisline + 1L
openSchunk <- TRUE
}
cat("\\begin{Soutput}\n", file = chunkout)
linesout[thisline + 1L] <- srcline
filenumout[thisline + 1L] <- srcfilenum
thisline <- thisline + 1L
}
output <- paste(output, collapse = "\n")
if (options$strip.white %in% c("all", "true")) {
output <- sub("^[[:space:]]*\n", "", output)
output <- sub("\n[[:space:]]*$", "", output)
if (options$strip.white == "all")
output <- sub("\n[[:space:]]*\n", "\n", output)
}
cat(output, file = chunkout)
count <- sum(strsplit(output, NULL)[[1L]] == "\n")
if (count > 0L) {
linesout[thisline + 1L:count] <- srcline
filenumout[thisline + 1L:count] <- srcfilenum
thisline <- thisline + count
}
remove(output)
if (options$results == "verbatim") {
cat("\n\\end{Soutput}\n", file = chunkout)
linesout[thisline + 1L:2L] <- srcline
filenumout[thisline + 1L:2L] <- srcfilenum
thisline <- thisline + 2L
}
}
} # end of loop over chunkexps.
## Echo remaining comments if necessary
if (options$keep.source) echoComments(length(srcfile$lines))
if (openSinput) {
cat("\n\\end{Sinput}\n", file = chunkout)
linesout[thisline + 1L:2L] <- srcline
filenumout[thisline + 1L:2L] <- srcfilenum
thisline <- thisline + 2L
}
if (openSchunk) {
cat("\\end{Schunk}\n", file = chunkout)
linesout[thisline + 1L] <- srcline
filenumout[thisline + 1L] <- srcfilenum
thisline <- thisline + 1L
}
if (is.null(options$label) && options$split) close(chunkout)
if (options$split && options$include) {
cat("\\input{", chunkprefix, "}\n", sep = "", file = object$output)
linesout[thisline + 1L] <- srcline
filenumout[thisline + 1L] <- srcfilenum
thisline <- thisline + 1L
}
if (length(devs)) {
if (options$figs.only) devoffs[[1L]]()
for (i in seq_along(devs)) {
if (options$figs.only && i == 1) next
devs[[i]](name = chunkprefix, width = options$width,
height = options$height, options)
err <- tryCatch({
SweaveHooks(options, run = TRUE)
eval(chunkexps, envir = .GlobalEnv)
}, error = function(e) {
devoffs[[i]]()
stop(conditionMessage(e), call. = FALSE, domain = NA)
})
devoffs[[i]]()
}
if (options$include) {
cat("\\includegraphics{", chunkprefix, "}\n", sep = "",
file = object$output)
linesout[thisline + 1L] <- srcline
filenumout[thisline + 1L] <- srcfilenum
thisline <- thisline + 1L
}
}
object$linesout <- c(object$linesout, linesout)
object$filenumout <- c(object$filenumout, filenumout)
object
}
}
RweaveLatexRuncode <- makeRweaveLatexCodeRunner()
RweaveLatexWritedoc <- function(object, chunk)
{
linesout <- attr(chunk, "srclines")
filenumout <- attr(chunk, "srcFilenum")
if (length(grep("\\usepackage[^\\}]*Sweave.*\\}", chunk)))
object$havesty <- TRUE
if (!object$havesty) {
begindoc <- "^[[:space:]]*\\\\begin\\{document\\}"
which <- grep(begindoc, chunk)
if (length(which)) {
chunk[which] <- sub(begindoc,
paste("\\\\usepackage{",
object$styfile,
"}\n\\\\begin{document}", sep = ""),
chunk[which])
idx <- c(1L:which, which, seq(from = which+1L,
length.out = length(linesout)-which))
linesout <- linesout[idx]
filenumout <- filenumout[idx]
object$havesty <- TRUE
}
}
while(length(pos <- grep(object$syntax$docexpr, chunk)))
{
cmdloc <- regexpr(object$syntax$docexpr, chunk[pos[1L]])
cmd <- substr(chunk[pos[1L]], cmdloc,
cmdloc + attr(cmdloc, "match.length") - 1L)
cmd <- sub(object$syntax$docexpr, "\\1", cmd)
if (object$options$eval) {
val <- tryCatch(as.character(eval(parse(text = cmd), envir = .GlobalEnv)),
error = function(e) {
filenum <- attr(chunk, "srcFilenum")[pos[1L]]
filename <- attr(chunk, "srcFilenames")[filenum]
location <- paste0(basename(filename), ":", attr(chunk, "srclines")[pos[1L]])
stop("at ",location, ", ", conditionMessage(e), call. = FALSE)
})
## protect against character(), because sub() will fail
if (length(val) == 0L) val <- ""
}
else val <- paste0("\\\\verb#<<", cmd, ">>#")
## it's always debatable what \verb delim-character to use;
## originally had '{' but that really can mess up LaTeX
chunk[pos[1L]] <- sub(object$syntax$docexpr, val, chunk[pos[1L]])
}
## Process \SweaveOpts{} or similar
## Since they are only supposed to affect code chunks, it is OK
## to process all such in a doc chunk at once.
while(length(pos <- grep(object$syntax$docopt, chunk)))
{
opts <- sub(paste0(".*", object$syntax$docopt, ".*"),
"\\1", chunk[pos[1L]])
object$options <- SweaveParseOptions(opts, object$options,
RweaveLatexOptions)
if (isTRUE(object$options$concordance)
&& !object$haveconcordance) {
savelabel <- object$options$label
object$options$label <- "concordance"
prefix <- RweaveChunkPrefix(object$options)
object$options$label <- savelabel
object$concordfile <- paste(prefix, "tex", sep = ".")
chunk[pos[1L]] <- sub(object$syntax$docopt,
paste0("\\\\input{", prefix, "}"),
chunk[pos[1L]])
object$haveconcordance <- TRUE
} else
chunk[pos[1L]] <- sub(object$syntax$docopt, "", chunk[pos[1L]])
}
cat(chunk, sep = "\n", file = object$output)
object$linesout <- c(object$linesout, linesout)
object$filenumout <- c(object$filenumout, filenumout)
object
}
RweaveLatexFinish <- function(object, error = FALSE)
{
outputname <- summary(object$output)$description
if (!object$quiet && !error) {
if(!file.exists(outputname))
stop(gettextf("the output file '%s' has disappeared", outputname))
cat("\n",
sprintf("You can now run (pdf)latex on %s", sQuote(outputname)),
"\n", sep = "")
}
close(object$output)
if (length(object$chunkout))
for (con in object$chunkout) close(con)
if (object$haveconcordance) {
## This output format is subject to change. Currently it contains
## three or four parts, separated by colons:
## 1. The output .tex filename
## 2. The input .Rnw filename
## 3. Optionally, the starting line number of the output coded as "ofs nn",
## where nn is the offset to the first output line. This is omitted if nn is 0.
## 4. The input line numbers corresponding to each output line.
## This are compressed using the following simple scheme:
## The first line number, followed by
## a run-length encoded diff of the rest of the line numbers.
linesout <- object$linesout
filenumout <- object$filenumout
filenames <- object$srcFilenames[filenumout]
if (!is.null(filenames)) { # Might be NULL if an error occurred
filegps <- rle(filenames)
offset <- 0L
for (i in seq_along(filegps$lengths)) {
len <- filegps$lengths[i]
inputname <- filegps$values[i]
vals <- rle(diff(linesout[offset + seq_len(len)]))
vals <- c(linesout[offset + 1L], as.numeric(rbind(vals$lengths, vals$values)))
concordance <- paste(strwrap(paste(vals, collapse = " ")), collapse = " %\n")
special <- paste0("\\Sconcordance{concordance:", outputname, ":",
inputname, ":",
if (offset) paste0("ofs ", offset, ":") else "",
"%\n",
concordance,"}\n")
cat(special, file = object$concordfile, append=offset > 0L)
offset <- offset + len
}
}
}
invisible(outputname)
}
## This is the check function for both RweaveLatex and Rtangle drivers
RweaveLatexOptions <- function(options)
{
defaults <- options[[".defaults"]]
## convert a character string to logical
c2l <- function(x)
if (is.null(x)) FALSE else suppressWarnings(as.logical(x))
## numeric
NUMOPTS <- c("width", "height", "resolution")
## character: largely for safety, but 'label' matters as there
## is no default (and someone uses "F")
CHAROPTS <- c("results", "prefix.string", "engine", "label",
"strip.white", "pdf.version", "pdf.encoding", "grdevice")
for (opt in names(options)) {
if(opt == ".defaults") next
oldval <- options[[opt]]
defval <- defaults[[opt]]
if(opt %in% CHAROPTS || is.character(defval)) {
} else if(is.logical(defval))
options[[opt]] <- c2l(oldval)
else if(opt %in% NUMOPTS || is.numeric(defval))
options[[opt]] <- as.numeric(oldval)
else if(!is.na(newval <- c2l(oldval)))
options[[opt]] <- newval
else if(!is.na(newval <- suppressWarnings(as.numeric(oldval))))
options[[opt]] <- newval
if (is.na(options[[opt]]))
stop(gettextf("invalid value for %s : %s", sQuote(opt), oldval),
domain = NA)
}
if (!is.null(options$results)) {
res <- as.character(options$results)
if(tolower(res) != res) # documented as lower-case
warning("value of 'results' option should be lowercase",
call. = FALSE)
options$results <- tolower(res)
}
options$results <- match.arg(options$results, c("verbatim", "tex", "hide"))
if (!is.null(options$strip.white)) {
res <- as.character(options$strip.white)
if(tolower(res) != res)
warning("value of 'strip.white' option should be lowercase",
call. = FALSE)
options$strip.white <- tolower(res)
}
options$strip.white <-
match.arg(options$strip.white, c("true", "false", "all"))
options
}
RweaveChunkPrefix <- function(options)
{
if (!is.null(options$label)) {
if (options$prefix)
paste0(options$prefix.string, "-", options$label)
else
options$label
} else
paste0(options$prefix.string, "-",
formatC(options$chunknr, flag = "0", width = 3))
}
RweaveEvalWithOpt <- function (expr, options)
{
if (options$eval) {
## Note: try() as opposed to tryCatch() for back compatibility;
## and RweaveTryStop() will work with it
res <- try(withVisible(eval(expr, .GlobalEnv)), silent = TRUE)
if (inherits(res, "try-error")) return(res)
if (options$print || (options$term && res$visible)) {
if (.isMethodsDispatchOn() && isS4(res$value))
methods:::show(res$value) else print(res$value)
}
}
res
}
RweaveTryStop <- function(err, options)
{
if (inherits(err, "try-error")) { ## from RweaveEvalWithOpt()
cat("\n")
msg <- paste(" chunk", options$chunknr)
if (!is.null(options$label))
msg <- paste0(msg, " (label = ", options$label, ")")
msg <- paste(msg, "\n")
stop(msg, err, call. = FALSE)
}
}
###------------------------------------------------------------------------
Rtangle <- function()
{
list(setup = RtangleSetup,
runcode = RtangleRuncode,
writedoc = RtangleWritedoc,
finish = RtangleFinish,
checkopts = RweaveLatexOptions)
}
RtangleSetup <-
function(file, syntax, output = NULL, annotate = TRUE, split = FALSE,
quiet = FALSE, ...)
{
dots <- list(...)
if (is.null(output)) {
prefix.string <- basename(sub(syntax$extension, "", file))
## This is odd, since for split = TRUE it uses the engine name.
output <- paste(prefix.string, "R", sep = ".")
} else
prefix.string <- basename(sub("\\.[rsRS]$", "", output))
if (!split) {
if (identical(output, "stdout")) output <- stdout()
else if (identical(output, "stderr")) output <- stderr()
else {
if (!quiet) cat("Writing to file", output, "\n")
## We could at some future point try to write the file in
## 'encoding'.
output <- file(output, open = "w")
}
lines <- c(sprintf("R code from vignette source '%s'", file),
if(attr(file, "encoding") != "ASCII")
sprintf("Encoding: %s", localeToCharset()[1L])
)
lines <- c(paste("###", lines), "")
writeLines(lines, output)
} else {
if (!quiet) cat("Writing chunks to files ...\n")
output <- NULL
}
options <- list(split = split, prefix = TRUE,
prefix.string = prefix.string,
engine = "R", eval = TRUE,
show.line.nos = FALSE)
options$.defaults <- options
options[names(dots)] <- dots
## to be on the safe side: see if defaults pass the check
options <- RweaveLatexOptions(options)
list(output = output, annotate = annotate, options = options,
chunkout = list(), quiet = quiet, syntax = syntax)
}
RtangleRuncode <- function(object, chunk, options)
{
if (!(options$engine %in% c("R", "S"))) return(object)
chunkprefix <- RweaveChunkPrefix(options)
if (options$split) {
if(!grepl(.SweaveValidFilenameRegexp, chunkprefix))
warning("file stem ", sQuote(chunkprefix), " is not portable",
call. = FALSE, domain = NA)
outfile <- paste(chunkprefix, options$engine, sep = ".")
if (!object$quiet) cat(options$chunknr, ":", outfile,"\n")
## [x][[1L]] avoids partial matching of x
chunkout <- object$chunkout[chunkprefix][[1L]]
if (is.null(chunkout)) {
chunkout <- file(outfile, "w")
if (!is.null(options$label))
object$chunkout[[chunkprefix]] <- chunkout
}
} else
chunkout <- object$output
if (object$annotate) {
lnos <- grep("^#line ", chunk, value = TRUE)
if(length(lnos)) {
srclines <- attr(chunk, "srclines")
srcfilenum <- attr(chunk, "srcFilenum")
## this currently includes the chunk header
lno <- if (length(srclines)) paste(min(srclines), max(srclines), sep = "-") else srclines
fn <- sub('[^"]*"([^"]+).*', "\\1", lnos[1L])
}
cat("###################################################\n",
"### code chunk number ", options$chunknr,
": ",
if(!is.null(options$label)) options$label
else paste(fn, lno, sep = ":"),
ifelse(options$eval, "", " (eval = FALSE)"), "\n",
"###################################################\n",
file = chunkout, sep = "")
}
## The next returns a character vector of the logical options
## which are true and have hooks set.
hooks <- SweaveHooks(options, run = FALSE)
for (k in hooks)
cat("getOption(\"SweaveHooks\")[[\"", k, "\"]]()\n",
file = chunkout, sep = "")
if (!options$show.line.nos)
chunk <- grep("^#line ", chunk, value = TRUE, invert = TRUE)
if (!options$eval) chunk <- paste("##", chunk)
cat(chunk, "\n", file = chunkout, sep = "\n")
if (is.null(options$label) && options$split) close(chunkout)
object
}
RtangleWritedoc <- function(object, chunk)
{
while(length(pos <- grep(object$syntax$docopt, chunk))) {
opts <- sub(paste0(".*", object$syntax$docopt, ".*"),
"\\1", chunk[pos[1L]])
object$options <- SweaveParseOptions(opts, object$options,
RweaveLatexOptions)
chunk[pos[1L]] <- sub(object$syntax$docopt, "", chunk[pos[1L]])
}
object
}
RtangleFinish <- function(object, error = FALSE)
{
## might be stdout() or stderr()
if (!is.null(object$output) && object$output >= 3)
close(object$output)
if (length(object$chunkout))
for (con in object$chunkout) close(con)
}
# File src/library/utils/R/URLencode.R
# Part of the R package, http://www.R-project.org
#
# Copyright (C) 1995-2012 The R Core Team
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# A copy of the GNU General Public License is available at
# http://www.r-project.org/Licenses/
URLencode <- function(URL, reserved = FALSE)
{
## It is unsafe to use ranges here as collation is locale-dependent.
## We want to do this on characters and not on bytes.
OK <- paste0("[^-ABCDEFGHIJKLMNOPQRSTUVWXYZ",
"abcdefghijklmnopqrstuvwxyz0123456789$_.+!*'(),",
if(!reserved) ";/?:@=&", "]")
x <- strsplit(URL, "")[[1L]]
z <- grep(OK, x)
if(length(z)) {
y <- sapply(x[z], function(x)
paste0("%", as.character(charToRaw(x)), collapse = ""))
x[z] <- y
}
paste(x, collapse="")
}
URLdecode <- function(URL)
{
x <- charToRaw(URL)
pc <- charToRaw("%")
out <- raw(0L)
i <- 1L
while(i <= length(x)) {
if(x[i] != pc) {
out <- c(out, x[i])
i <- i + 1L
} else {
y <- as.integer(x[i + 1L:2L])
y[y > 96L] <- y[y > 96L] - 32L # a-f -> A-F
y[y > 57L] <- y[y > 57L] - 7L # A-F
y <- sum((y - 48L) * c(16L, 1L))
out <- c(out, as.raw(as.character(y)))
i <- i + 3L
}
}
rawToChar(out)
}
# File src/library/utils/R/adist.R
# Part of the R package, http://www.R-project.org
#
# Copyright (C) 1995-2012 The R Core Team
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# A copy of the GNU General Public License is available at
# http://www.r-project.org/Licenses/
adist <-
function(x, y = NULL, costs = NULL, counts = FALSE, fixed = TRUE,
partial = !fixed, ignore.case = FALSE, useBytes = FALSE)
{
bytesToInt <- function(x) {
if(is.na(x)) return(NA_integer_)
as.integer(charToRaw(x))
}
costs <- .amatch_costs(costs)
nmx <- names(x)
x <- as.character(x)
names(x) <- nmx
if(!is.null(y)) {
nmy <- names(y)
y <- as.character(y)
names(y) <- nmy
}
if(!identical(fixed, FALSE) && !identical(partial, TRUE)) {
ex <- Encoding(x)
useBytes <- identical(useBytes, TRUE) || any(ex == "bytes")
if(!is.null(y)) {
ey <- Encoding(y)
useBytes <- useBytes || any(ey == "bytes")
}
if(useBytes) {
x <- lapply(x, bytesToInt)
y <- if(is.null(y)) {
x
} else {
lapply(y, bytesToInt)
}
} else {
ignore.case <- identical(ignore.case, TRUE)
x <- if(ignore.case) {
lapply(tolower(enc2utf8(x)), utf8ToInt)
} else {
lapply(enc2utf8(x), utf8ToInt)
}
y <- if(is.null(y)) {
x
} else if(ignore.case) {
lapply(tolower(enc2utf8(y)), utf8ToInt)
} else {
lapply(enc2utf8(y), utf8ToInt)
}
}
}
else {
if(is.null(y)) {
y <- x
}
## TRE needs integer costs: coerce here for simplicity.
costs <- as.integer(costs)
}
.Internal(adist(x, y, costs, counts, fixed, partial, ignore.case,
useBytes))
}
aregexec <-
function(pattern, text, max.distance = 0.1, costs = NULL,
ignore.case = FALSE, fixed = FALSE, useBytes = FALSE)
{
## TRE needs integer costs: coerce here for simplicity.
costs <- as.integer(.amatch_costs(costs))
bounds <- .amatch_bounds(max.distance)
.Internal(aregexec(as.character(pattern),
as.character(text),
bounds, costs, ignore.case, fixed, useBytes))
}
## No longer used by adist(), but could be more generally useful ...
regquote <-
function(x)
gsub("([*.?+^&\\[])", "\\\\\\1", x)
# File src/library/utils/R/alarm.R
# Part of the R package, http://www.R-project.org
#
# Copyright (C) 1995-2012 The R Core Team
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# A copy of the GNU General Public License is available at
# http://www.r-project.org/Licenses/
alarm <- function() {cat("\a"); flush.console()}
# File src/library/utils/R/apropos.R
# Part of the R package, http://www.R-project.org
#
# Copyright (C) 1995-2012 The R Core Team
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# A copy of the GNU General Public License is available at
# http://www.r-project.org/Licenses/
apropos <- function (what, where = FALSE, ignore.case = TRUE, mode = "any")
{
stopifnot(is.character(what))
x <- character(0L)
check.mode <- mode != "any"
for (i in seq_along(search())) {
li <-
if(ignore.case)
grep(what, ls(pos = i, all.names = TRUE),
ignore.case = TRUE, value = TRUE)
else ls(pos = i, pattern = what, all.names = TRUE)
if(length(li)) {
if(check.mode)
li <- li[sapply(li, exists, where = i,
mode = mode, inherits = FALSE)]
x <- c(x, if(where) structure(li, names = rep.int(i, length(li))) else li)
}
}
sort(x)
}
find <- function(what, mode = "any", numeric = FALSE, simple.words=TRUE)
{
stopifnot(is.character(what))
if(length(what) > 1L) {
warning("elements of 'what' after the first will be ignored")
what <- what[1L]
}
# would need to escape at least + * | as well
# if(simple.words)
# what <- gsub("([.[])", "\\\\\\1", paste0("^",what,"$"))
len.s <- length(sp <- search())
ind <- logical(len.s)
check.mode <- mode != "any"
for (i in 1L:len.s) {
if(simple.words) {
found <- what %in% ls(pos = i, all.names = TRUE)
if(found && check.mode)
found <- exists(what, where = i, mode = mode, inherits=FALSE)
ind[i] <- found
} else {
li <- ls(pos = i, pattern = what, all.names = TRUE)
ll <- length(li)
if(ll > 0 && check.mode) {
mode.ok <- sapply(li, exists, where = i, mode = mode,
inherits = FALSE)
ll <- sum(mode.ok)
if(ll >= 2) # some languages have multiple plurals
warning(sprintf(ngettext(ll,
"%d occurrence in %s",
"%d occurrences in %s"), ll, sp[i]),
domain = NA)
}
ind[i] <- ll > 0L
}
}
## found name in search()[ ind ]
if(numeric) structure(which(ind), names=sp[ind]) else sp[ind]
}
# File src/library/utils/R/aspell.R
# Part of the R package, http://www.R-project.org
#
# Copyright (C) 1995-2013 The R Core Team
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# A copy of the GNU General Public License is available at
# http://www.r-project.org/Licenses/
aspell <-
function(files, filter, control = list(), encoding = "unknown",
program = NULL, dictionaries = character())
{
## Take the given files and feed them through spell checker in
## Ispell pipe mode.
## Think about options and more command line options eventually.
program <- aspell_find_program(program)
if(is.na(program))
stop("No suitable spell-checker program found")
## Be nice.
if(inherits(files, "Rd"))
files <- list(files)
files_are_names <- is.character(files)
filter_args <- list()
if(missing(filter) || is.null(filter)) {
filter <- if(!files_are_names) {
function(ifile, encoding) {
if(inherits(ifile, "srcfile"))
readLines(ifile$filename, encoding = encoding,
warn = FALSE)
else if(inherits(ifile, "connection"))
readLines(ifile, encoding = encoding, warn = FALSE)
else {
## What should this do with encodings?
as.character(ifile)
}
}
}
else NULL
}
else if(is.character(filter)) {
## Look up filter in aspell filter db.
filter_name <- filter[1L]
filter <- aspell_filter_db[[filter_name]]
## Warn if the filter was not found in the db.
if(is.null(filter))
warning(gettextf("Filter '%s' is not available.",
filter_name),
domain = NA)
}
else if(is.list(filter)) {
## Support
## list("Rd", drop = "\\references"
## at least for now.
filter_name <- filter[[1L]][1L]
filter_args <- filter[-1L]
filter <- aspell_filter_db[[filter_name]]
## Warn if the filter was not found in the db.
if(is.null(filter))
warning(gettextf("Filter '%s' is not available.",
filter_name),
domain = NA)
}
else if(!is.function(filter))
stop("Invalid 'filter' argument.")
encoding <- rep(encoding, length.out = length(files))
verbose <- getOption("verbose")
db <- data.frame(Original = character(), File = character(),
Line = integer(), Column = integer(),
stringsAsFactors = FALSE)
db$Suggestions <- list()
tfile <- tempfile("aspell")
on.exit(unlink(tfile))
if(length(dictionaries)) {
paths <- aspell_find_dictionaries(dictionaries)
ind <- paths == ""
if(any(ind)) {
warning(gettextf("The following dictionaries were not found:\n%s",
paste(sprintf(" %s", dictionaries[ind]),
collapse = "\n")),
domain = NA)
paths <- paths[!ind]
}
if(length(paths)) {
words <- unlist(lapply(paths, readRDS), use.names = FALSE)
personal <- tempfile("aspell_personal")
on.exit(unlink(personal), add = TRUE)
##
## How can we get the right language set (if needed)?
## Maybe aspell() needs an additional 'language' arg?
aspell_write_personal_dictionary_file(words, personal,
program = program)
##
control <- c(control, "-p", shQuote(personal))
}
}
## No special expansion of control argument for now.
control <- as.character(control)
fnames <- names(files)
files <- as.list(files)
for (i in seq_along(files)) {
file <- files[[i]]
if(files_are_names)
fname <- file
else {
## Try srcfiles and srcrefs ...
fname <- if(inherits(file, "srcfile"))
file$filename
else
attr(attr(file, "srcref"), "srcfile")$filename
## As a last resort, try the names of the files argument.
if(is.null(fname))
fname <- fnames[i]
## If unknown ...
if(is.null(fname))
fname <- ""
}
enc <- encoding[i]
if(verbose)
message(gettextf("Processing file %s", fname),
domain = NA)
lines <- if(is.null(filter))
readLines(file, encoding = enc, warn = FALSE)
else {
## Assume that filter takes an input file (and additional
## arguments) and return a character vector.
do.call(filter, c(list(file, encoding = enc), filter_args))
}
## Need to escape all lines with carets to ensure Aspell handles
## them as data: the Aspell docs say
## It is recommended that programmatic interfaces prefix every
## data line with an uparrow to protect themselves against
## future changes in Aspell.
writeLines(paste0("^", lines), tfile)
## Note that this re-encodes character strings with marked
## encodings to the current encoding (which is definitely fine
## if this is UTF-8 and Aspell was compiled with full UTF-8
## support). Alternatively, we could try using something along
## the lines of
## writeLines(paste0("^", lines), tfile,
## useBytes = TRUE)
## and pass the encoding info to Aspell in case we know it.
out <- tools:::.system_with_capture(program, c("-a", control),
stdin = tfile)
if(out$status != 0L)
stop(gettextf("Running aspell failed with diagnostics:\n%s",
paste(out$stderr, collapse = "\n")),
domain = NA)
## Hopefully everything worked ok.
lines <- out$stdout[-1L]
pos <- cumsum(lines == "") + 1L
## Format is as follows.
## First line is a header.
## Blank lines separate the results for each line.
## Results for the word on each line are given as follows.
## * If the word was found in the main dictionary, or your personal
## dictionary, then the line contains only a `*'.
## * If the word is not in the dictionary, but there are
## suggestions, then the line contains an `&', a space, the
## misspelled word, a space, the number of near misses, the number
## of characters between the beginning of the line and the
## beginning of the misspelled word, a colon, another space, and a
## list of the suggestions separated by commas and spaces.
## * If the word does not appear in the dictionary, and there are no
## suggestions, then the line contains a `#', a space, the
## misspelled word, a space, and the character offset from the
## beginning of the line.
## This can be summarized as follows:
## OK: *
## Suggestions: & original count offset: miss, miss, ...
## None: # original offset
## Look at words not in dictionary with suggestions.
ind <- grepl("^&", lines)
if(any(ind)) {
info <- strsplit(lines[ind], ": ", fixed = TRUE)
one <- strsplit(sapply(info, `[`, 1L), " ", fixed = TRUE)
two <- strsplit(sapply(info, `[`, 2L), ", ", fixed = TRUE)
db1 <- data.frame(Original =
as.character(sapply(one, `[`, 2L)),
File = fname,
Line = pos[ind],
Column =
as.integer(sapply(one, `[`, 4L)),
stringsAsFactors = FALSE)
db1$Suggestions <- two
db <- rbind(db, db1)
}
## Looks at words not in dictionary with no suggestions.
ind <- grepl("^#", lines)
if(any(ind)) {
one <- strsplit(lines[ind], " ", fixed = TRUE)
db1 <- data.frame(Original =
as.character(sapply(one, `[`, 2L)),
File = fname,
Line = pos[ind],
Column =
as.integer(sapply(one, `[`, 3L)),
stringsAsFactors = FALSE)
db1$Suggestions <- vector("list", length(one))
db <- rbind(db, db1)
}
}
class(db) <- c("aspell", "data.frame")
db
}
print.aspell <-
function(x, sort = TRUE, verbose = FALSE, indent = 2L, ...)
{
## A very simple printer ...
if(!(nr <- nrow(x))) return(invisible(x))
if (sort)
x <- x[order(x$Original, x$File, x$Line, x$Column), ]
if (verbose)
out <-
sprintf("%sWord: %s (%s:%d:%d)\n%s",
c("", rep.int("\n", nr - 1L)),
x$Original, x$File, x$Line, x$Column,
formatDL(rep.int("Suggestions", nr),
sapply(x$Suggestions, paste, collapse = " "),
style = "list"))
else {
s <- split(sprintf("%s:%d:%d", x$File, x$Line, x$Column),
x$Original)
sep <- sprintf("\n%s",
paste(rep.int(" ", indent), collapse = ""))
out <- paste(names(s),
sapply(s, paste, collapse = sep),
sep = sep, collapse = "\n\n")
}
writeLines(out)
invisible(x)
}
summary.aspell <-
function(object, ...)
{
words <- sort(unique(object$Original))
if(length(words)) {
writeLines("Possibly mis-spelled words:")
print(words)
}
invisible(words)
}
aspell_filter_db <- new.env(hash = FALSE) # small
aspell_filter_db$Rd <- tools::RdTextFilter
aspell_filter_db$Sweave <- tools::SweaveTeXFilter
aspell_find_program <-
function(program = NULL)
{
check <- !is.null(program) || !is.null(names(program))
if(is.null(program))
program <- getOption("aspell_program")
if(is.null(program))
program <- c("aspell", "hunspell", "ispell")
program <- Filter(nzchar, Sys.which(program))[1L]
if(!is.na(program) && check) {
out <- c(system(sprintf("%s -v", program),
intern = TRUE), "")[1L]
if(grepl("really Aspell", out))
names(program) <- "aspell"
else if(grepl("really Hunspell", out))
names(program) <- "hunspell"
else if(grepl("International Ispell", out))
names(program) <- "ispell"
else
names(program) <- NA_character_
}
program
}
aspell_dictionaries_R <- "en_stats"
aspell_find_dictionaries <-
function(dictionaries, dirnames = character())
{
dictionaries <- as.character(dictionaries)
if(!(n <- length(dictionaries))) return(character())
## Always search the R system dictionary directory first.
dirnames <- c(file.path(R.home("share"), "dictionaries"), dirnames)
## For now, all dictionary files should be .rds files.
ind <- !grepl("\\.rds$", dictionaries)
if(any(ind))
dictionaries[ind] <- sprintf("%s.rds", dictionaries[ind])
out <- character(n)
## Dictionaries with no path separators are looked for in the given
## dictionary directories (by default, the R system dictionary
## directory).
ind <- grepl(.Platform$file.sep, dictionaries, fixed = TRUE)
## (Equivalently, could check where paths == basename(paths).)
if(length(pos <- which(ind))) {
pos <- pos[file_test("-f", dictionaries[pos])]
out[pos] <- normalizePath(dictionaries[pos], "/")
}
if(length(pos <- which(!ind))) {
out[pos] <- find_files_in_directories(dictionaries[pos],
dirnames)
}
out
}
### Utilities.
aspell_inspect_context <-
function(x)
{
x <- split(x, x$File)
y <- Map(function(f, x) {
lines <- readLines(f, warn = FALSE)[x$Line]
cbind(f,
x$Line,
substring(lines, 1L, x$Column - 1L),
x$Original,
substring(lines, x$Column + nchar(x$Original)))
},
names(x), x)
y <- data.frame(do.call(rbind, y), stringsAsFactors = FALSE)
names(y) <- c("File", "Line", "Left", "Original", "Right")
class(y) <- c("aspell_inspect_context", "data.frame")
y
}
print.aspell_inspect_context <-
function(x, ...)
{
s <- split(x, x$File)
nms <- names(s)
for(i in seq_along(s)) {
e <- s[[i]]
writeLines(c(sprintf("File '%s':", nms[i]),
sprintf(" Line %s: \"%s\", \"%s\", \"%s\"",
format(e$Line),
gsub("\"", "\\\"", e$Left),
e$Original,
gsub("\"", "\\\"", e$Right)),
""))
}
invisible(x)
}
## For spell-checking the R manuals:
## This can really only be done with Aspell as the other checkers have
## no texinfo mode.
aspell_control_R_manuals <-
list(aspell =
c("--master=en_US",
"--add-extra-dicts=en_GB",
"--mode=texinfo",
"--add-texinfo-ignore=acronym",
"--add-texinfo-ignore=deftypefun",
"--add-texinfo-ignore=deftypefunx",
"--add-texinfo-ignore=findex",
"--add-texinfo-ignore=enindex",
"--add-texinfo-ignore=include",
"--add-texinfo-ignore=ifclear",
"--add-texinfo-ignore=ifset",
"--add-texinfo-ignore=math",
"--add-texinfo-ignore=macro",
"--add-texinfo-ignore=multitable",
"--add-texinfo-ignore=node",
"--add-texinfo-ignore=pkg",
"--add-texinfo-ignore=printindex",
"--add-texinfo-ignore=set",
"--add-texinfo-ignore=vindex",
"--add-texinfo-ignore-env=menu",
"--add-texinfo-ignore=CRANpkg"
),
hunspell =
c("-d en_US,en_GB"))
aspell_R_manuals <-
function(which = NULL, dir = NULL, program = NULL,
dictionaries = aspell_dictionaries_R)
{
if(is.null(dir)) dir <- tools:::.R_top_srcdir_from_Rd()
## Allow specifying 'R-exts' and alikes, or full paths.
files <- if(is.null(which)) {
Sys.glob(file.path(dir, "doc", "manual", "*.texi"))
} else {
ind <- which(which ==
basename(tools::file_path_sans_ext(which)))
which[ind] <-
file.path(dir, "doc", "manual",
sprintf("%s.texi", which[ind]))
which
}
program <- aspell_find_program(program)
aspell(files,
control = aspell_control_R_manuals[[names(program)]],
program = program,
dictionaries = dictionaries)
}
## For spell-checking the R Rd files:
aspell_control_R_Rd_files <-
list(aspell =
c("--master=en_US",
"--add-extra-dicts=en_GB"),
hunspell =
c("-d en_US,en_GB"))
aspell_R_Rd_files <-
function(which = NULL, dir = NULL, drop = "\\references",
program = NULL, dictionaries = aspell_dictionaries_R)
{
files <- character()
if(is.null(dir)) dir <- tools:::.R_top_srcdir_from_Rd()
if(is.null(which)) {
which <- tools:::.get_standard_package_names()$base
# CHANGES.Rd could be dropped from checks in the future;
# it will not be updated post 2.15.0
files <- c(file.path(dir, "doc", "NEWS.Rd"),
file.path(dir, "src", "gnuwin32", "CHANGES.Rd"))
files <- files[file_test("-f", files)]
}
files <-
c(files,
unlist(lapply(file.path(dir, "src", "library", which, "man"),
tools::list_files_with_type,
"docs", OS_subdirs = c("unix", "windows")),
use.names = FALSE))
program <- aspell_find_program(program)
aspell(files,
filter = list("Rd", drop = drop),
control = aspell_control_R_Rd_files[[names(program)]],
program = program,
dictionaries = dictionaries)
}
## For spell-checking Rd files in a package:
aspell_package_Rd_files <-
function(dir, drop = c("\\author", "\\references"),
control = list(), program = NULL, dictionaries = character())
{
dir <- normalizePath(dir, "/")
subdir <- file.path(dir, "man")
files <- if(file_test("-d", subdir))
tools::list_files_with_type(subdir,
"docs",
OS_subdirs = c("unix", "windows"))
else character()
meta <- tools:::.get_package_metadata(dir, installed = FALSE)
if(is.na(encoding <- meta["Encoding"]))
encoding <- "unknown"
defaults <- .aspell_package_defaults(dir, encoding)$Rd_files
if(!is.null(defaults)) {
## Direct settings currently override (could add a list add =
## TRUE mechanism eventually).
if(!is.null(d <- defaults$drop))
drop <- d
if(!is.null(d <- defaults$control))
control <- d
if(!is.null(d <- defaults$program))
program <- d
if(!is.null(d <- defaults$dictionaries)) {
dictionaries <-
aspell_find_dictionaries(d, file.path(dir, ".aspell"))
}
##
## Deprecated in favor of specifying R level dictionaries.
## Maybe give a warning (in particular if both are given)?
if(!is.null(d <- defaults$personal))
control <- c(control,
sprintf("-p %s",
shQuote(file.path(dir, ".aspell", d))))
##
}
aspell(files,
filter = list("Rd", drop = drop),
control = control,
encoding = encoding,
program = program,
dictionaries = dictionaries)
}
## For spell-checking the R vignettes:
## This should really be done with Aspell as the other checkers have far
## less powerful TeX modes.
aspell_control_R_vignettes <-
list(aspell =
c("--mode=tex",
"--master=en_US",
"--add-extra-dicts=en_GB",
"--add-tex-command='code p'",
"--add-tex-command='pkg p'",
"--add-tex-command='CRANpkg p'"
),
hunspell =
c("-t", "-d en_US,en_GB"))
aspell_R_vignettes <-
function(program = NULL, dictionaries = aspell_dictionaries_R)
{
files <- Sys.glob(file.path(tools:::.R_top_srcdir_from_Rd(),
"src", "library", "*", "vignettes",
"*.Rnw"))
program <- aspell_find_program(program)
aspell(files,
filter = "Sweave",
control = aspell_control_R_vignettes[[names(program)]],
program = program,
dictionaries = dictionaries)
}
## For spell-checking vignettes in a package:
## This should really be done with Aspell as the other checkers have far
## less powerful TeX modes.
aspell_control_package_vignettes <-
list(aspell =
c("--add-tex-command='citep oop'",
"--add-tex-command='Sexpr p'",
"--add-tex-command='code p'",
"--add-tex-command='pkg p'",
"--add-tex-command='proglang p'",
"--add-tex-command='samp p'"
))
aspell_package_vignettes <-
function(dir,
control = list(), program = NULL, dictionaries = character())
{
dir <- tools::file_path_as_absolute(dir)
vinfo <- tools::pkgVignettes(dir = dir)
files <- vinfo$docs
if(!length(files)) return(aspell(character()))
meta <- tools:::.get_package_metadata(dir, installed = FALSE)
if(is.na(encoding <- meta["Encoding"]))
encoding <- "unknown"
defaults <- .aspell_package_defaults(dir, encoding)$vignettes
if(!is.null(defaults)) {
if(!is.null(d <- defaults$control))
control <- d
if(!is.null(d <- defaults$program))
program <- d
if(!is.null(d <- defaults$dictionaries)) {
dictionaries <-
aspell_find_dictionaries(d, file.path(dir, ".aspell"))
}
##
## Deprecated in favor of specifying R level dictionaries.
## Maybe give a warning (in particular if both are given)?
if(!is.null(d <- defaults$personal))
control <- c(control,
sprintf("-p %s",
shQuote(file.path(dir, ".aspell", d))))
##
}
program <- aspell_find_program(program)
files <- split(files, vinfo$engine)
do.call(rbind,
Map(function(files, engine) {
engine <- tools::vignetteEngine(engine)
aspell(files,
filter = engine$aspell$filter,
control =
c(engine$aspell$control,
aspell_control_package_vignettes[[names(program)]],
control),
program = program,
dictionaries = dictionaries)
},
files,
names(files)
)
)
}
## Spell-checking R files.
aspell_filter_db$R <-
function(ifile, encoding = "unknown", ignore = character())
{
pd <- get_parse_data_for_message_strings(ifile, encoding)
if(is.null(pd) || !NROW(pd)) return(character())
## Strip the string delimiters.
pd$text <- substring(pd$text, 2L, nchar(pd$text) - 1L)
## Replace whitespace C backslash escape sequences by whitespace.
pd$text <- gsub("(^|[^\\])\\\\[fnrt]", "\\1 ", pd$text)
pd$text <- gsub( "([^\\])\\\\[fnrt]", "\\1 ", pd$text)
## (Do this twice for now because in e.g.
## \n\t\tInformation on package %s
## the first \t is not matched the first time. Alternatively, we
## could match with
## (^|[^\\])((\\\\[fnrt])+)
## but then computing the replacement (\\1 plus as many blanks as
## the characters in \\2) is not straightforward.
## For gettextf() calls, replace basic percent escape sequences by
## whitespace.
ind <- pd$caller == "gettextf"
if(any(ind)) {
pd$text[ind] <-
gsub("(^|[^%])%[dioxXfeEgGaAs]", "\\1 ", pd$text[ind])
pd$text[ind] <-
gsub(" ([^%])%[dioxXfeEgGaAs]", "\\1 ", pd$text[ind])
## (See above for doing this twice.)
}
lines <- readLines(ifile, encoding = encoding, warn = FALSE)
## Column positions in the parse data have tabs expanded to tab
## stops using a tab width of 8, so for lines with tabs we need to
## map the column positions back to character positions.
lines_in_pd <- sort(unique(c(pd$line1, pd$line2)))
tab <- Map(function(tp, nc) {
if(tp[1L] == -1L) return(NULL)
widths <- rep.int(1, nc)
for(i in tp) {
cols <- cumsum(widths)
widths[i] <- 8 - (cols[i] - 1) %% 8
}
cumsum(c(1, widths))
},
gregexpr("\t", lines[lines_in_pd], fixed = TRUE),
nchar(lines[lines_in_pd]))
names(tab) <- lines_in_pd
lines[lines_in_pd] <- gsub("[^\t]", " ", lines[lines_in_pd])
lines[-lines_in_pd] <- ""
for(entry in split(pd, seq_len(NROW(pd)))) {
line1 <- entry$line1
line2 <- entry$line2
col1 <- entry$col1 + 1L
col2 <- entry$col2 - 1L
if(line1 == line2) {
if(length(ptab <- tab[[as.character(line1)]])) {
col1 <- which(ptab == col1)
col2 <- which(ptab == col2)
}
substring(lines[line1], col1, col2) <- entry$text
} else {
texts <- unlist(strsplit(entry$text, "\n", fixed = TRUE))
n <- length(texts)
if(length(ptab <- tab[[as.character(line1)]])) {
col1 <- which(ptab == col1)
}
substring(lines[line1], col1) <- texts[1L]
pos <- seq(from = 2, length.out = n - 2)
if(length(pos))
lines[line1 + pos - 1] <- texts[pos]
if(length(ptab <- tab[[as.character(line2)]])) {
col2 <- which(ptab == col2)
}
substring(lines[line2], 1L, col2) <- texts[n]
}
}
for(re in ignore[nzchar(ignore)])
lines <- blank_out_regexp_matches(lines, re)
lines
}
get_parse_data_for_message_strings <-
function(file, encoding = "unknown")
{
## The message strings considered are the string constants subject to
## translation in gettext-family calls (see below for details).
exprs <- parse(file = file, encoding = encoding, keep.source = TRUE)
if(!length(exprs)) return(NULL)
pd <- getParseData(exprs)
## Function for computing grandparent ids.
parents <- pd$parent
names(parents) <- pd$id
gpids <- function(ids)
parents[as.character(parents[as.character(ids)])]
ind <- (pd$token == "SYMBOL_FUNCTION_CALL") &
!is.na(match(pd$text,
c("warning", "stop",
"message", "packageStartupMessage",
"gettext", "gettextf", "ngettext")))
funs <- pd$text[ind]
ids <- gpids(pd$id[ind])
calls <- getParseText(pd, ids)
table <- pd[pd$token == "STR_CONST", ]
pos <- match(gpids(table$id), ids)
ind <- !is.na(pos)
table <- split(table[ind, ], factor(pos[ind], seq_along(ids)))
## We have synopses
## message(..., domain = NULL, appendLF = TRUE)
## packageStartupMessage(..., domain = NULL, appendLF = TRUE)
## warning(..., call. = TRUE, immediate. = FALSE, domain = NULL)
## stop(..., call. = TRUE, domain = NULL)
## gettext(..., domain = NULL)
## ngettext(n, msg1, msg2, domain = NULL)
## gettextf(fmt, ..., domain = NULL)
## For the first five, we simply take all unnamed strings.
## (Could make this more precise, of course.)
## For the latter two, we take the msg1/msg2 and fmt arguments,
## provided these are strings.
##
## Using domain = NA inhibits translation: perhaps it should
## optionally also inhibit spell checking?
##
extract_message_strings <- function(fun, call, table) {
## Matching a call containing ... gives
## Error in match.call(message, call) :
## ... used in a situation where it doesn't exist
## so eliminate these.
## (Note that we also drop "..." strings.)
call <- parse(text = call)[[1L]]
call <- call[ as.character(call) != "..." ]
mc <- as.list(match.call(get(fun, envir = .BaseNamespaceEnv),
call))
args <- if(fun == "gettextf")
mc["fmt"]
else if(fun == "ngettext")
mc[c("msg1", "msg2")]
else {
if(!is.null(names(mc)))
mc <- mc[!nzchar(names(mc))]
mc[-1L]
}
strings <- as.character(args[vapply(args, is.character, TRUE)])
## Need to canonicalize to match string constants before and
## after parsing ...
texts <- vapply(parse(text = table$text), as.character, "")
pos <- which(!is.na(match(texts, strings)))
cbind(table[pos, ], caller = rep.int(fun, length(pos)))
}
do.call(rbind,
Map(extract_message_strings,
as.list(funs), as.list(calls), table))
}
## For spell-checking the R R files.
aspell_R_R_files <-
function(which = NULL, dir = NULL,
ignore = c("[ \t]'[^']*'[ \t[:punct:]]",
"[ \t][[:alnum:]_.]*\\(\\)[ \t[:punct:]]"),
program = NULL, dictionaries = aspell_dictionaries_R)
{
if(is.null(dir)) dir <- tools:::.R_top_srcdir_from_Rd()
if(is.null(which))
which <- tools:::.get_standard_package_names()$base
files <-
unlist(lapply(file.path(dir, "src", "library", which, "R"),
tools::list_files_with_type,
"code",
OS_subdirs = c("unix", "windows")),
use.names = FALSE)
program <- aspell_find_program(program)
aspell(files,
filter = list("R", ignore = ignore),
control = aspell_control_R_Rd_files[[names(program)]],
program = program,
dictionaries = dictionaries)
}
## For spell-checking R files in a package.
aspell_package_R_files <-
function(dir, ignore = character(),
control = list(), program = NULL, dictionaries = character())
{
dir <- tools::file_path_as_absolute(dir)
subdir <- file.path(dir, "R")
files <- if(file_test("-d", subdir))
tools::list_files_with_type(subdir,
"code",
OS_subdirs = c("unix", "windows"))
else character()
meta <- tools:::.get_package_metadata(dir, installed = FALSE)
if(is.na(encoding <- meta["Encoding"]))
encoding <- "unknown"
defaults <- .aspell_package_defaults(dir, encoding)$R_files
if(!is.null(defaults)) {
if(!is.null(d <- defaults$ignore))
ignore <- d
if(!is.null(d <- defaults$control))
control <- d
if(!is.null(d <- defaults$program))
program <- d
if(!is.null(d <- defaults$dictionaries)) {
dictionaries <-
aspell_find_dictionaries(d, file.path(dir, ".aspell"))
}
}
program <- aspell_find_program(program)
aspell(files,
filter = list("R", ignore = ignore),
control = control,
encoding = encoding,
program = program,
dictionaries = dictionaries)
}
## Spell-checking pot files.
## (Of course, directly analyzing the message strings would be more
## useful, but require writing appropriate text filters.)
## See also tools:::checkPoFile().
aspell_filter_db$pot <-
function (ifile, encoding = "unknown", ignore = character())
{
lines <- readLines(ifile, encoding = encoding, warn = FALSE)
ind <- grepl("^msgid[ \t]", lines)
do_entry <- function(s) {
out <- character(length(s))
i <- 1L
out[i] <- blank_out_regexp_matches(s[i], "^msgid[ \t]+\"")
while(grepl("^\"", s[i <- i + 1L]))
out[i] <- sub("^\"", " ", s[i])
if(grepl("^msgid_plural[ \t]", s[i])) {
out[i] <- blank_out_regexp_matches(s[i], "^msgid_plural[ \t]+\"")
while(grepl("^\"", s[i <- i + 1L]))
out[i] <- sub("^\"", " ", s[i])
}
out
}
entries <- split(lines, cumsum(ind))
lines <- c(character(length(entries[[1L]])),
as.character(do.call(c, lapply(entries[-1L], do_entry))))
lines <- sub("\"[ \t]*$", " ", lines)
##
## Could replace backslash escapes for blanks and percent escapes by
## blanks, similar to what the R text filter does.
##
for(re in ignore[nzchar(ignore)])
lines <- blank_out_regexp_matches(lines, re)
lines
}
## For spell-checking all pot files in a package.
aspell_package_pot_files <-
function(dir, ignore = character(),
control = list(), program = NULL, dictionaries = character())
{
dir <- tools::file_path_as_absolute(dir)
subdir <- file.path(dir, "po")
files <- if(file_test("-d", subdir))
Sys.glob(file.path(subdir, "*.pot"))
else character()
meta <- tools:::.get_package_metadata(dir, installed = FALSE)
if(is.na(encoding <- meta["Encoding"]))
encoding <- "unknown"
program <- aspell_find_program(program)
aspell(files,
filter = list("pot", ignore = ignore),
control = control,
encoding = encoding,
program = program,
dictionaries = dictionaries)
}
## For spell-checking the R C files.
aspell_R_C_files <-
function(which = NULL, dir = NULL,
ignore = c("[ \t]'[[:alnum:]_.]*'[ \t[:punct:]]",
"[ \t][[:alnum:]_.]*\\(\\)[ \t[:punct:]]"),
program = NULL, dictionaries = aspell_dictionaries_R)
{
if(is.null(dir)) dir <- tools:::.R_top_srcdir_from_Rd()
if(is.null(which))
which <- tools:::.get_standard_package_names()$base
if(!is.na(pos <- match("base", which)))
which[pos] <- "R"
files <- sprintf("%s.pot",
file.path(dir, "src", "library",
which, "po", which))
files <- files[file_test("-f", files)]
program <- aspell_find_program(program)
aspell(files,
filter = list("pot", ignore = ignore),
control = aspell_control_R_Rd_files[[names(program)]],
program = program,
dictionaries = dictionaries)
}
## For spell-checking package C files.
aspell_package_C_files <-
function(dir, ignore = character(),
control = list(), program = NULL, dictionaries = character())
{
dir <- tools::file_path_as_absolute(dir)
## Assume that the package C message template file is shipped as
## 'po/PACKAGE.pot'.
files <- file.path(dir, "po",
paste(basename(dir), "pot", collapse = "."))
files <- files[file_test("-f", files)]
meta <- tools:::.get_package_metadata(dir, installed = FALSE)
if(is.na(encoding <- meta["Encoding"]))
encoding <- "unknown"
defaults <- .aspell_package_defaults(dir, encoding)$C_files
if(!is.null(defaults)) {
if(!is.null(d <- defaults$ignore))
ignore <- d
if(!is.null(d <- defaults$control))
control <- d
if(!is.null(d <- defaults$program))
program <- d
if(!is.null(d <- defaults$dictionaries)) {
dictionaries <-
aspell_find_dictionaries(d, file.path(dir, ".aspell"))
}
}
program <- aspell_find_program(program)
aspell(files,
filter = list("pot", ignore = ignore),
control = control,
encoding = encoding,
program = program,
dictionaries = dictionaries)
}
## Spell-checking DCF files.
aspell_filter_db$dcf <-
function(ifile, encoding, keep = c("Title", "Description"),
ignore = character())
{
lines <- readLines(ifile, encoding = encoding, warn = FALSE)
line_has_tags <- grepl("^[^[:blank:]][^:]*:", lines)
tags <- sub(":.*", "", lines[line_has_tags])
lines <- split(lines, cumsum(line_has_tags))
ind <- is.na(match(tags, keep))
lines[ind] <- lapply(lines[ind], function(s) rep.int("", length(s)))
lines <- unlist(lines, use.names = FALSE)
for(re in ignore[nzchar(ignore)])
lines <- blank_out_regexp_matches(lines, re)
lines
}
## For spell-checking package DESCRIPTION files.
aspell_package_description <-
function(dir, ignore = character(),
control = list(), program = NULL, dictionaries = character())
{
dir <- tools::file_path_as_absolute(dir)
files <- file.path(dir, "DESCRIPTION")
meta <- tools:::.get_package_metadata(dir, installed = FALSE)
if(is.na(encoding <- meta["Encoding"]))
encoding <- "unknown"
program <- aspell_find_program(program)
aspell(files,
filter = list("dcf", ignore = ignore),
control = control,
encoding = encoding,
program = program,
dictionaries = dictionaries)
}
## For writing personal dictionaries:
aspell_write_personal_dictionary_file <-
function(x, out, language = "en", program = NULL)
{
if(inherits(x, "aspell"))
x <- sort(unique(x$Original))
program <- aspell_find_program(program)
if(is.na(program))
stop("No suitable spell check program found.")
##
## Ispell and Hunspell take simple word lists as personal dictionary
## files, but Aspell requires a special format, see e.g.
## http://aspell.net/man-html/Format-of-the-Personal-and-Replacement-Dictionaries.html
## and one has to create these by hand, as
## aspell --lang=en create personal ./foo "a b c"
## gives: Sorry "create/merge personal" is currently unimplemented.
## Encodings are a nightmare.
## Try to canonicalize to UTF-8 for Aspell (which allows recording
## the encoding in the personal dictionary).
##
## What should we do for Hunspell (which can handle UTF-8, but has
## no encoding information in the personal dictionary), or Ispell
## (which cannot handle UTF-8)?
##
if(names(program) == "aspell") {
header <- sprintf("personal_ws-1.1 %s %d UTF-8",
language, length(x))
x <- enc2utf8(x)
}
else {
header <- NULL
}
writeLines(c(header, x), out, useBytes = TRUE)
}
## For reading package defaults:
.aspell_package_defaults <-
function(dir, encoding = "unknown")
{
dfile <- file.path(dir, ".aspell", "defaults.R")
if(!file_test("-f", dfile))
return(NULL)
exprs <- parse(dfile, encoding = encoding)
envir <- new.env()
for(e in exprs) eval(e, envir)
as.list(envir)
}
## Utilities.
blank_out_regexp_matches <-
function(s, re)
{
m <- gregexpr(re, s)
regmatches(s, m) <- Map(blanks, lapply(regmatches(s, m), nchar))
s
}
blanks <-
function(n) {
vapply(Map(rep.int, rep.int(" ", length(n)), n, USE.NAMES = FALSE),
paste, "", collapse = "")
}
find_files_in_directories <-
function(basenames, dirnames)
{
dirnames <- dirnames[file_test("-d", dirnames)]
dirnames <- normalizePath(dirnames, "/")
out <- character(length(basenames))
pos <- seq_along(out)
for(dir in dirnames) {
paths <- file.path(dir, basenames[pos])
ind <- file_test("-f", paths)
out[pos[ind]] <- paths[ind]
pos <- pos[!ind]
if(!length(pos)) break
}
out
}
# File src/library/utils/R/browseVignettes.R
# Part of the R package, http://www.R-project.org
#
# Copyright (C) 1995-2013 The R Core Team
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# A copy of the GNU General Public License is available at
# http://www.r-project.org/Licenses/
browseVignettes <- function(package = NULL, lib.loc = NULL, all = TRUE)
{
vinfo <- tools:::getVignetteInfo(package, lib.loc, all)
pkgs <- unique(vinfo[, "Package"])
db <- lapply(pkgs, function(p) vinfo[vinfo[,"Package"] == p,,drop=FALSE])
names(db) <- pkgs
attr(db, "call") <- sys.call()
attr(db, "footer") <-
if (all) ""
else sprintf(gettext("Use %s \n to list the vignettes in all available packages."),
"browseVignettes(all = TRUE)")
class(db) <- "browseVignettes"
return(db)
}
print.browseVignettes <- function(x, ...)
{
if (length(x) == 0L) {
message(gettextf("No vignettes found by %s",
paste(deparse(attr(x, "call")), collapse=" ")),
domain = NA)
return(invisible(x))
}
oneLink <- function(s) {
if (length(s) == 0L) return(character(0L))
title <- s[, "Title"]
if (tools:::httpdPort > 0L)
prefix <- sprintf("/library/%s/doc", pkg)
else
prefix <- sprintf("file://%s/doc", s[, "Dir"])
src <- s[, "File"]
pdf <- s[, "PDF"]
rcode <- s[, "R"]
pdfext <- sub("^.*\\.", "", pdf)
sprintf("
", attr(x, "footer")))
cat("\n")
sink()
## the first two don't work on Windows with browser=NULL.
## browseURL(URLencode(sprintf("file://%s", file)))
## browseURL(URLencode(file))
if (tools:::httpdPort > 0L)
browseURL(sprintf("http://127.0.0.1:%d/session/%s", tools:::httpdPort, basename(file)))
else
browseURL(sprintf("file://%s", file))
## browseURL(file)
invisible(x)
}
# File src/library/utils/R/unix/bug.report.R
# Part of the R package, http://www.R-project.org
#
# Copyright (C) 1995-2012 The R Core Team
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# A copy of the GNU General Public License is available at
# http://www.r-project.org/Licenses/
bug.report.info <- function()
c("R Version:",
paste0(" ", names(R.version), " = ", R.version),
if (nzchar(Sys.getenv("R_GUI_APP_VERSION")))
c("", "GUI:",
paste0(" R-GUI ", Sys.getenv("R_GUI_APP_VERSION"),
" (", Sys.getenv("R_GUI_APP_REVISION"),")")),
if (.Platform$OS.type == "windows") c("", win.version()),
"",
"Locale:", paste0(" ", Sys.getlocale()),
"",
"Search Path:",
strwrap(paste(search(), collapse=", "), indent = 1, exdent = 1),
"")
bug.report <- function(subject = "", address,
file = "R.bug.report", package = NULL, lib.loc = NULL,
...)
{
baseR <- function() {
writeLines(c(" Bug reports on R and the base packages need to be submitted",
" to the tracker at http://bugs.r-project.org/ .",
"",
" We will now try to open that website in a browser"))
flush.console()
Sys.sleep(2)
browseURL("https://bugs.r-project.org/bugzilla3/index.cgi")
}
findEmail <- function(x) {
## extract the part within the first < >: the rest may be invalid.
x <- paste(x, collapse = " ") # could be multiple lines
sub("[^<]*<([^>]+)>.*", "\\1", x)
}
if (is.null(package)) return(baseR())
DESC <- packageDescription(package, lib.loc)
if (!inherits(DESC, "packageDescription"))
stop(gettextf("Package %s: DESCRIPTION file not found",
sQuote(package)), domain = NA)
info <- paste0(c("Package", " Version", " Maintainer", " Built"),
": ",
c(DESC$Package, DESC$Version, DESC$Maintainer, DESC$Built))
info <- c(info, "", bug.report.info())
if(identical(DESC$Priority, "base")) return(baseR())
if (!is.null(DESC$BugReports)) {
writeLines(info)
cat("\nThis package has a bug submission web page, which we will now attempt\n",
"to open. The information above may be useful in your report. If the web\n",
"page doesn't work, you should send email to the maintainer,\n",
DESC$Maintainer, ".\n",
sep = "")
flush.console()
Sys.sleep(2)
browseURL(DESC$BugReports)
return(invisible())
}
if (missing(address)) address <- findEmail(DESC$Maintainer)
create.post(instructions = c("", "<>", rep("", 3)),
description = "bug report",
subject = subject, address = address,
filename = file, info = info, ...)
}
# File src/library/utils/R/capture.output.R
# Part of the R package, http://www.R-project.org
#
# Copyright (C) 1995-2012 The R Core Team
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# A copy of the GNU General Public License is available at
# http://www.r-project.org/Licenses/
capture.output <- function(..., file=NULL, append=FALSE)
{
args <- substitute(list(...))[-1L]
rval <- NULL; closeit <- TRUE
if (is.null(file))
file <- textConnection("rval", "w", local = TRUE)
else if (is.character(file))
file <- file(file, if(append) "a" else "w")
else if (inherits(file, "connection")) {
if (!isOpen(file)) open(file, if(append) "a" else "w")
else closeit <- FALSE
} else
stop("'file' must be NULL, a character string or a connection")
sink(file)
## for error recovery: all output will be lost if file=NULL
on.exit({sink(); if(closeit) close(file)})
pf <- parent.frame()
evalVis <- function(expr)
withVisible(eval(expr, pf))
for(i in seq_along(args)) {
expr <- args[[i]]
tmp <- switch(mode(expr),
"expression" = lapply(expr, evalVis),
"call" =, "name" = list(evalVis(expr)),
stop("bad argument"))
for(item in tmp)
if (item$visible) print(item$value)
}
## we need to close the text connection before returning 'rval'
on.exit()
sink()
if(closeit) close(file)
if(is.null(rval)) invisible(NULL) else rval
}
# File src/library/utils/R/changedFiles.R
# Part of the R package, http://www.R-project.org
#
# Copyright (C) 2013 The R Core Team
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# A copy of the GNU General Public License is available at
# http://www.r-project.org/Licenses/
fileSnapshot <- function(path = ".", file.info = TRUE, timestamp = NULL,
md5sum = FALSE, digest = NULL,
full.names = length(path) > 1, ...) {
if (length(path) > 1 && !full.names)
stop("'full.names' must be TRUE for multiple paths.")
if (length(timestamp) == 1)
file.create(timestamp)
path <- normalizePath(path)
args <- list(...)
fullnames <- names <- character(0)
for (i in seq_along(path)) {
newnames <- do.call(list.files, c(path = path[i], full.names = full.names, args))
names <- c(names, newnames)
if (full.names) fullnames <- names
else fullnames <- c(fullnames, file.path(path[i], newnames))
}
if (file.info) {
info <- file.info(fullnames)
if (!full.names)
rownames(info) <- names
} else
info <- data.frame(row.names = names)
if (md5sum)
info <- data.frame(info, md5sum = suppressWarnings(tools::md5sum(fullnames)),
stringsAsFactors = FALSE)
if (!is.null(digest))
info <- data.frame(info, digest = digest(fullnames), stringsAsFactors = FALSE)
structure(list(info = info, path = path, timestamp = timestamp,
file.info = file.info, md5sum = md5sum, digest = digest,
full.names = full.names, args = args), class = "fileSnapshot")
}
changedFiles <- function(before, after, path = before$path, timestamp = before$timestamp,
check.file.info = c("size", "isdir", "mode", "mtime"),
md5sum = before$md5sum, digest = before$digest,
full.names = before$full.names, ...) {
stopifnot(inherits(before, "fileSnapshot"))
if (missing(after)) {
get.file.info <- length(check.file.info) > 0 && before$file.info
args <- before$args
newargs <- list(...)
args[names(newargs)] <- newargs
after <- do.call(fileSnapshot, c(list(path = path, timestamp = NULL,
file.info = get.file.info, md5sum = md5sum,
digest = digest, full.names = full.names), args))
}
stopifnot(inherits(after, "fileSnapshot"))
preinfo <- before$info
postinfo <- after$info
prenames <- rownames(preinfo)
postnames <- rownames(postinfo)
added <- setdiff(postnames, prenames)
deleted <- setdiff(prenames, postnames)
common <- intersect(prenames, postnames)
if (!before$file.info || !after$file.info)
check.file.info <- NULL
if (length(check.file.info)) {
pre <- preinfo[common, check.file.info, drop = FALSE]
post <- postinfo[common, check.file.info, drop = FALSE]
changes <- pre != post
}
else changes <- matrix(logical(0), nrow = length(common), ncol = 0,
dimnames = list(common, character(0)))
if (length(timestamp))
if (file.exists(timestamp)) {
fullnames <- if (after$full.names) common else file.path(after$path, common)
changes <- cbind(changes, Newer = file_test("-nt", fullnames, timestamp))
} else
warning("Timestamp file no longer exists.")
if (md5sum) {
pre <- preinfo[common, "md5sum"]
post <- postinfo[common, "md5sum"]
changes <- cbind(changes, md5sum = pre != post)
}
if (!is.null(digest)) {
pre <- preinfo[common, "digest"]
post <- postinfo[common, "digest"]
changes <- cbind(changes, digest = pre != post)
}
changed <- rownames(changes)[rowSums(changes, na.rm = TRUE) > 0]
structure(list(added = added, deleted = deleted, changed = changed,
unchanged = setdiff(common, changed), changes = changes),
class = "changedFiles")
}
print.fileSnapshot <- function(x, verbose = FALSE, ...) {
cat("File snapshot:\n path = ", x$path,
"\n timestamp = ", x$timestamp,
"\n file.info = ", x$file.info,
"\n md5sum = ", x$md5sum,
"\n digest = ", deparse(x$digest, control = NULL),
"\n full.names = ", x$full.names,
"\n args = ", deparse(x$args, control = NULL),
"\n ", nrow(x$info), " files recorded.\n", sep="")
if (verbose) {
if (ncol(x$info)) print(x$info)
else cat("Files:", rownames(x$info), sep="\n ")
}
invisible(x)
}
print.changedFiles <- function(x, verbose = FALSE, ...) {
if (length(x$added))
cat("Files added:\n", paste0(" ", x$added, collapse="\n"), "\n", sep="")
if (length(x$deleted))
cat("Files deleted:\n", paste0(" ", x$deleted, collapse="\n"), "\n", sep="")
changes <- x$changes
if (!verbose) {
changes <- changes[rowSums(changes, na.rm = TRUE) > 0, , drop=FALSE]
changes <- changes[, colSums(changes, na.rm = TRUE) > 0, drop=FALSE]
}
if (verbose || nrow(changes)) {
cat("File changes:\n")
print(changes)
}
invisible(x)
}
# File src/library/utils/R/citation.R
# Part of the R package, http://www.R-project.org
#
# Copyright (C) 1995-2013 The R Core Team
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# A copy of the GNU General Public License is available at
# http://www.r-project.org/Licenses/
## What a silly name ...
.is_not_nonempty_text <-
function(x)
is.null(x) || anyNA(x) || all(grepl("^[[:space:]]*$", x))
person <-
function(given = NULL, family = NULL, middle = NULL,
email = NULL, role = NULL, comment = NULL,
first = NULL, last = NULL)
{
## Arrange all arguments in lists of equal length.
args <- list(given = given, family = family, middle = middle,
email = email, role = role, comment = comment,
first = first, last = last)
if(all(sapply(args, is.null))) {
return(structure(list(), class = "person"))
}
args <- lapply(args, .listify)
args_length <- sapply(args, length)
if(!all(args_length_ok <- args_length %in% c(1L, max(args_length))))
warning(gettextf("Not all arguments are of the same length, the following need to be recycled: %s",
paste(names(args)[!args_length_ok],
collapse = ", ")),
domain = NA)
args <- lapply(args, function(x) rep(x, length.out = max(args_length)))
##
## We could do this more elegantly, but let's just go through the
## list processing person by person. I'm just recycling the old
## person() code for this.
##
person1 <-
function(given = NULL, family = NULL, middle = NULL,
email = NULL, role = NULL, comment = NULL,
first = NULL, last = NULL)
{
if(!.is_not_nonempty_text(first)) {
if(!.is_not_nonempty_text(given))
stop(gettextf("Use either %s or %s/%s but not both.",
sQuote("given"),
sQuote("first"), sQuote("middle")),
domain = NA)
##
## Start warning eventually ... maybe use message() for now?
message(gettextf("It is recommended to use %s instead of %s.",
sQuote("given"), sQuote("first")),
domain = NA)
##
given <- first
}
if(!.is_not_nonempty_text(middle)) {
##
## Start warning eventually ... maybe use message() for now?
message(gettextf("It is recommended to use %s instead of %s.",
sQuote("given"), sQuote("middle")),
domain = NA)
##
given <- c(given, unlist(strsplit(middle, "[[:space:]]+")))
}
if(!.is_not_nonempty_text(last)) {
if(!.is_not_nonempty_text(family))
stop(gettextf("Use either %s or %s but not both.",
sQuote("family"), sQuote("last")),
domain = NA)
##
## Start warning eventually ... maybe use message() for now?
message(gettextf("It is recommended to use %s instead of %s.",
sQuote("family"), sQuote("last")),
domain = NA)
##
family <- last
}
## Set all empty arguments to NULL.
if(.is_not_nonempty_text(given)) given <- NULL
if(.is_not_nonempty_text(family)) family <- NULL
if(.is_not_nonempty_text(email)) email <- NULL
if(.is_not_nonempty_text(role)) {
if(!is.null(role))
warning(sprintf(ngettext(length(role),
"Invalid role specification: %s.",
"Invalid role specifications: %s."),
paste(sQuote(role), collapse = ", ")),
domain = NA)
role <- NULL
}
if(.is_not_nonempty_text(comment)) comment <- NULL
##
## Use something along the lines of
## tools:::.valid_maintainer_field_regexp
## to validate given email addresses.
##
if(length(role))
role <- .canonicalize_person_role(role)
rval <- list(given = given, family = family, role = role,
email = email, comment = comment)
## Canonicalize 0-length character arguments to NULL.
if(any(ind <- (sapply(rval, length) == 0L)))
rval[ind] <- vector("list", length = sum(ind))
## Give nothing if there is nothing.
if(all(sapply(rval, is.null)))
rval <- NULL
return(rval)
}
rval <-
lapply(seq_along(args$given),
function(i)
with(args,
person1(given = given[[i]], family = family[[i]],
middle = middle[[i]], email = email[[i]],
role = role[[i]], comment = comment[[i]],
first = first[[i]], last = last[[i]])))
##
## Should we check that for each person there is at least one
## non-NULL entry?
##
## Yes!
rval <- rval[!sapply(rval, is.null)]
class(rval) <- "person"
rval
}
.canonicalize_person_role <-
function(role)
{
## Be nice. Given roles must either exactly match the role code,
## or be uniquely pmatchable modulo case against the role terms.
pos <- which(is.na(match(role, MARC_relator_db$code)))
if(length(pos)) {
ind <- pmatch(tolower(role[pos]),
tolower(MARC_relator_db$name),
0L)
role[pos[ind > 0L]] <- MARC_relator_db$code[ind]
if(any(ind <- (ind == 0L))) {
warning(sprintf(ngettext(length(pos[ind]),
"Invalid role specification: %s.",
"Invalid role specifications: %s."),
paste(sQuote(role[pos[ind]]), collapse = ", ")),
domain = NA)
role <- role[-pos[ind]]
}
}
role
}
`[[.person` <-
`[.person` <-
function(x, i)
{
rval <- unclass(x)[i]
class(rval) <- class(x)
return(rval)
}
print.person <-
function(x, ...)
{
if(length(x)) print(format(x, ...))
invisible(x)
}
`$.person` <-
function(x, name)
{
##
## extract internal list elements, return list if length > 1, vector
## otherwise (to mirror the behaviur of the input format for
## person())
##
name <- match.arg(name,
c("given", "family", "role", "email", "comment",
"first", "last", "middle")) # for now ...
##
## Let's be nice and support first/middle/last for now.
##
if(name %in% c("first", "last", "middle")) {
message(gettextf("It is recommended to use %s/%s instead of %s/%s/%s.",
sQuote("given"), sQuote("family"),
sQuote("first"), sQuote("middle"), sQuote("last")),
domain = NA)
oname <- name
name <- switch(name,
"first" = "given",
"middle" = "given",
"last" = "family"
)
} else {
oname <- name
}
rval <- lapply(unclass(x), function(p) p[[name]])
if(oname == "first") rval <- lapply(rval, head, 1L)
if(oname == "middle") {
rval <- lapply(rval, tail, -1L)
if(any(ind <- (sapply(rval, length) == 0L)))
rval[ind] <- vector("list", length = sum(ind))
}
if(length(rval) == 1L) rval <- rval[[1L]]
rval
}
`$<-.person` <-
function(x, name, value)
{
name <- match.arg(name, c("given", "family", "role", "email", "comment"))
x <- .listify(unclass(x))
value <- rep(value, length.out = length(x))
if(name == "role")
value <- lapply(value, .canonicalize_person_role)
for(i in seq_along(x)) {
x[[i]][[name]] <- if(.is_not_nonempty_text(value[[i]]))
NULL
else as.character(value[[i]])
}
class(x) <- "person"
x
}
c.person <-
function(..., recursive = FALSE)
{
args <- list(...)
if(!all(sapply(args, inherits, "person")))
warning(gettextf("method is only applicable to %s objects",
sQuote("person")),
domain = NA)
args <- lapply(args, unclass)
rval <- do.call("c", args)
class(rval) <- "person"
rval
}
as.person <-
function(x)
UseMethod("as.person")
as.person.default <-
function(x)
{
if(inherits(x, "person")) return(x)
x <- as.character(x)
if(!length(x)) return(person())
## Need to split the strings into individual person components.
## We used to split at ',' and 'and', but of course these could be
## contained in roles or comments as well.
## Hence, try the following.
## A. Replace all comment, role and email substrings by all-z
## substrings of the same length.
## B. Tokenize the strings according to the split regexp matches in
## the corresponding z-ified strings.
## C. Extract the persons from the thus obtained tokens.
## Create strings consisting of a given character c with given
## numbers n of characters.
strings <- function(n, c = "z") {
vapply(Map(rep.int, rep.int(c, length(n)), n,
USE.NAMES = FALSE),
paste, "", collapse = "")
}
## Replace matches of pattern in x by all-z substrings of the same
## length.
zify <- function(pattern, x) {
if(!length(x)) return(character())
m <- gregexpr(pattern, x)
regmatches(x, m) <-
Map(strings, lapply(regmatches(x, m), nchar))
x
}
## Step A.
y <- zify("\\([^)]*\\)", x)
y <- zify("\\[[^]]*\\]", y)
y <- zify("<[^>]*>", y)
## Step B.
pattern <- "[[:space:]]?(,|,?[[:space:]]and)[[:space:]]+"
x <- do.call("c",
regmatches(x, gregexpr(pattern, y), invert = TRUE))
x <- x[!sapply(x, .is_not_nonempty_text)]
if(!length(x)) return(person())
## Step C.
as_person1 <- function(x) {
comment <- if(grepl("\\(.*\\)", x))
sub(".*\\(([^)]*)\\).*", "\\1", x)
else NULL
x <- sub("[[:space:]]*\\([^)]*\\)", "", x)
email <- if(grepl("<.*>", x))
sub(".*<([^>]*)>.*", "\\1", x)
else NULL
x <- sub("[[:space:]]*<[^>]*>", "", x)
role <- if(grepl("\\[.*\\]", x))
unlist(strsplit(gsub("[[:space:]]*", "",
sub(".*\\[([^]]*)\\].*", "\\1", x)),
",", fixed = TRUE))
else NULL
x <- sub("[[:space:]]*\\[[^)]*\\]", "", x)
x <- unlist(strsplit(x, "[[:space:]]+"))
z <- person(given = x[-length(x)], family = x[length(x)],
email = email, role = role, comment = comment)
return(z)
}
as.list(do.call("c", lapply(x, as_person1)))
}
personList <-
function(...)
{
z <- list(...)
if(!all(sapply(z, inherits, "person")))
stop(gettextf("all arguments must be of class %s",
dQuote("person")),
domain = NA)
do.call("c", z)
}
as.personList <-
function(x)
UseMethod("as.personList")
as.personList.person <-
function(x)
x
as.personList.default <-
function(x)
{
if(inherits(x, "person")) return(x)
do.call("c", lapply(x, as.person))
}
format.person <-
function(x,
include = c("given", "family", "email", "role", "comment"),
braces =
list(given = "", family = "", email = c("<", ">"),
role = c("[", "]"), comment = c("(", ")")),
collapse =
list(given = " ", family = " ", email = ", ",
role = ", ", comment = ", "),
...
)
{
if(!length(x)) return(character())
args <- c("given", "family", "email", "role", "comment")
include <- sapply(include, match.arg, args)
## process defaults
braces <- braces[args]
collapse <- collapse[args]
names(braces) <- names(collapse) <- args
if(is.null(braces$given)) braces$given <- ""
if(is.null(braces$family)) braces$family <- ""
if(is.null(braces$email)) braces$email <- c("<", ">")
if(is.null(braces$role)) braces$role <- c("[", "]")
if(is.null(braces$comment)) braces$comment <- c("(", ")")
braces <- lapply(braces, rep, length.out = 2L)
if(is.null(collapse$given)) collapse$given <- " "
if(is.null(collapse$family)) collapse$family <- " "
if(is.null(collapse$email)) collapse$email <- ", "
if(is.null(collapse$role)) collapse$role <- ", "
if(is.null(collapse$comment)) collapse$comment <- ", "
collapse <- lapply(collapse, rep, length.out = 1L)
## extract selected elements
x <- lapply(unclass(x), "[", include)
braces <- braces[include]
collapse <- collapse[include]
## format 1 person
format_person1 <- function(p) {
rval <- lapply(seq_along(p), function(i) if(is.null(p[[i]])) NULL else
paste0(braces[[i]][1L], paste(p[[i]], collapse = collapse[[i]]),
braces[[i]][2L]))
paste(do.call("c", rval), collapse = " ")
}
sapply(x, format_person1)
}
as.character.person <-
function(x, ...)
format(x, ...)
toBibtex.person <-
function(object, ...)
paste(format(object, include = c("given", "family")),
collapse = " and ")
######################################################################
bibentry <-
function(bibtype, textVersion = NULL, header = NULL, footer = NULL, key = NULL,
...,
other = list(), mheader = NULL, mfooter = NULL)
{
BibTeX_names <- names(tools:::BibTeX_entry_field_db)
args <- c(list(...), other)
if(!length(args))
return(structure(list(), class = "bibentry"))
if(any(sapply(names(args), .is_not_nonempty_text)))
stop("all fields have to be named")
## arrange all arguments in lists of equal length
args <- c(list(bibtype = bibtype, textVersion = textVersion,
header = header, footer = footer, key = key), list(...))
args <- lapply(args, .listify)
other <- lapply(other, .listify)
max_length <- max(sapply(c(args, other), length))
args_length <- sapply(args, length)
if(!all(args_length_ok <- args_length %in% c(1L, max_length)))
warning(gettextf("Not all arguments are of the same length, the following need to be recycled: %s",
paste(names(args)[!args_length_ok],
collapse = ", ")),
domain = NA)
args <- lapply(args, function(x) rep(x, length.out = max_length))
other_length <- sapply(other, length)
if(!all(other_length_ok <- other_length %in% c(1L, max_length)))
warning(gettextf("Not all arguments are of the same length, the following need to be recycled: %s",
paste(names(other)[!other_length_ok],
collapse = ", ")),
domain = NA)
other <- lapply(other, function(x) rep(x, length.out = max_length))
bibentry1 <-
function(bibtype, textVersion, header = NULL, footer = NULL, key = NULL, ..., other = list())
{
## process bibtype
bibtype <- as.character(bibtype)
stopifnot(length(bibtype) == 1L)
pos <- match(tolower(bibtype), tolower(BibTeX_names))
if(is.na(pos))
stop(gettextf("%s has to be one of %s",
sQuote("bibtype"),
paste(BibTeX_names, collapse = ", ")),
domain = NA)
bibtype <- BibTeX_names[pos]
## process fields
rval <- c(list(...), other)
rval <- rval[!sapply(rval, .is_not_nonempty_text)]
fields <- tolower(names(rval))
names(rval) <- fields
attr(rval, "bibtype") <- bibtype
## check required fields
.bibentry_check_bibentry1(rval)
## canonicalize
pos <- fields %in% c("author", "editor")
if(any(pos)) {
for(i in which(pos)) rval[[i]] <- as.person(rval[[i]])
}
if(any(!pos)) {
for(i in which(!pos)) rval[[i]] <- as.character(rval[[i]])
}
## set attributes
attr(rval, "key") <-
if(is.null(key)) NULL else as.character(key)
if(!is.null(textVersion))
attr(rval, "textVersion") <- as.character(textVersion)
if(!.is_not_nonempty_text(header))
attr(rval, "header") <- paste(header, collapse = "\n")
if(!.is_not_nonempty_text(footer))
attr(rval, "footer") <- paste(footer, collapse = "\n")
return(rval)
}
rval <- lapply(seq_along(args$bibtype),
function(i)
do.call("bibentry1",
c(lapply(args, "[[", i),
list(other = lapply(other, "[[", i)))))
## add main header/footer for overall bibentry vector
if(!.is_not_nonempty_text(mheader))
attr(rval, "mheader") <- paste(mheader, collapse = "\n")
if(!.is_not_nonempty_text(mfooter))
attr(rval, "mfooter") <- paste(mfooter, collapse = "\n")
class(rval) <- "bibentry"
rval
}
.bibentry_check_bibentry1 <-
function(x, force = FALSE)
{
fields <- names(x)
if(!force && !.is_not_nonempty_text(x$crossref)) return(NULL)
bibtype <- attr(x, "bibtype")
rfields <-
strsplit(tools:::BibTeX_entry_field_db[[bibtype]], "|",
fixed = TRUE)
if(length(rfields) > 0L) {
ok <- sapply(rfields, function(f) any(f %in% fields))
if(any(!ok))
stop(sprintf(ngettext(sum(!ok),
"A bibentry of bibtype %s has to specify the field: %s",
"A bibentry of bibtype %s has to specify the fields: %s"),
sQuote(bibtype), paste(rfields[!ok], collapse = ", ")),
domain = NA)
}
}
bibentry_attribute_names <-
c("bibtype", "textVersion", "header", "footer", "key")
bibentry_list_attribute_names <-
c("mheader", "mfooter")
.bibentry_get_key <-
function(x)
{
if(!length(x)) return(character())
keys <- lapply(unclass(x), attr, "key")
keys[!vapply(keys, length, 0L)] <- ""
unlist(keys)
}
`[[.bibentry` <-
`[.bibentry` <-
function(x, i, drop = TRUE)
{
if(!length(x)) return(x)
cl <- class(x)
class(x) <- NULL
## For character subscripting, use keys if there are no names.
## Note that creating bibentries does not add the keys as names:
## assuming that both can independently be set, we would need to
## track whether names were auto-generated or not.
## (We could consider providing a names() getter which returns given
## names or keys as used for character subscripting, though).
if(is.character(i) && is.null(names(x)))
names(x) <- .bibentry_get_key(x)
y <- x[i]
if(!all(ok <- sapply(y, length) > 0L)) {
warning("subscript out of bounds")
y <- y[ok]
}
if(!drop)
attributes(y) <- attributes(x)[bibentry_list_attribute_names]
class(y) <- cl
y
}
bibentry_format_styles <-
c("text", "Bibtex", "citation", "html", "latex", "textVersion", "R")
.bibentry_match_format_style <-
function(style)
{
ind <- pmatch(tolower(style), tolower(bibentry_format_styles),
nomatch = 0L)
if(all(ind == 0L))
stop(gettextf("%s should be one of %s",
sQuote("style"),
paste(dQuote(bibentry_format_styles),
collapse = ", ")),
domain = NA)
bibentry_format_styles[ind]
}
format.bibentry <-
function(x, style = "text", .bibstyle = NULL,
citation.bibtex.max = getOption("citation.bibtex.max", 1),
sort = FALSE, ...)
{
if(!length(x)) return(character())
style <- .bibentry_match_format_style(style)
if(sort) x <- sort(x, .bibstyle = .bibstyle)
x$.index <- as.list(seq_along(x))
.format_bibentry_via_Rd <- function(f) {
out <- file()
saveopt <- tools::Rd2txt_options(width = getOption("width"))
on.exit({tools::Rd2txt_options(saveopt); close(out)})
sapply(.bibentry_expand_crossrefs(x),
function(y) {
rd <- tools::toRd(y, style = .bibstyle)
##
## Ensure a closing
via a final empty line for
## now (PR #15692).
if(style == "html") rd <- paste(rd, "\n")
##
con <- textConnection(rd)
on.exit(close(con))
f(con, fragment = TRUE, out = out, ...)
paste(readLines(out), collapse = "\n")
})
}
.format_bibentry_as_citation <- function(x) {
bibtex <- length(x) <= citation.bibtex.max
c(paste(strwrap(attr(x, "mheader")), collapse = "\n"),
unlist(lapply(x, function(y) {
paste(c(if(!is.null(y$header))
c(strwrap(y$header), ""),
if(!is.null(y$textVersion)) {
strwrap(y$textVersion, prefix = " ")
} else {
format(y)
},
if(bibtex) {
c(gettext("\nA BibTeX entry for LaTeX users is\n"),
paste0(" ", unclass(toBibtex(y))))
},
if(!is.null(y$footer))
c("", strwrap(y$footer))),
collapse = "\n")
})),
paste(strwrap(attr(x, "mfooter")), collapse = "\n")
)
}
out <-
switch(style,
"text" = .format_bibentry_via_Rd(tools::Rd2txt),
"html" = .format_bibentry_via_Rd(tools::Rd2HTML),
"latex" = .format_bibentry_via_Rd(tools::Rd2latex),
"Bibtex" = {
unlist(lapply(x,
function(y)
paste(toBibtex(y), collapse = "\n")))
},
"textVersion" = {
out <- lapply(unclass(x), attr, "textVersion")
out[!sapply(out, length)] <- ""
unlist(out)
},
"citation" = .format_bibentry_as_citation(x),
"R" = .format_bibentry_as_R_code(x, ...)
)
as.character(out)
}
.bibentry_expand_crossrefs <-
function(x, more = list())
{
y <- if(length(more))
do.call(c, c(list(x), more))
else
x
x <- unclass(x)
y <- unclass(y)
crossrefs <- lapply(x, `[[`, "crossref")
pc <- which(vapply(crossrefs, length, 0L) > 0L)
if(length(pc)) {
pk <- match(unlist(crossrefs[pc]), .bibentry_get_key(y))
## If an entry has a crossref we cannot resolve it might still
## be complete: we could warn about the bad crossref ...
ok <- !is.na(pk)
## Merge entries: note that InCollection and InProceedings need
## to remap title to booktitle as needed.
x[pc[ok]] <-
Map(function(u, v) {
add <- setdiff(names(v), names(u))
u[add] <- v[add]
if(!is.na(match(tolower(attr(u, "bibtype")),
c("incollection", "inproceedings"))) &&
is.null(u$booktitle))
u$booktitle <- v$title
u
},
x[pc[ok]],
y[pk[ok]])
## Now check entries with crossrefs for completeness.
## Ignore bad entries with a warning.
status <- lapply(x[pc],
function(e)
tryCatch(.bibentry_check_bibentry1(e, TRUE),
error = identity))
bad <- which(sapply(status, inherits, "error"))
if(length(bad)) {
for(b in bad) {
warning(gettextf("Dropping invalid entry %d:\n%s",
pc[b],
conditionMessage(status[[b]])))
}
x[pc[bad]] <- NULL
}
}
class(x) <- "bibentry"
x
}
print.bibentry <-
function(x, style = "text", .bibstyle = NULL, ...)
{
style <- .bibentry_match_format_style(style)
if(style == "R") {
writeLines(format(x, "R", collapse = TRUE, ...))
} else if(length(x)) {
y <- format(x, style, .bibstyle, ...)
if(style == "citation") {
## Printing in citation style does extra headers/footers
## (which however may be empty), so it is handled
## differently.
n <- length(y)
if(nzchar(header <- y[1L]))
header <- c("", header, "")
if(nzchar(footer <- y[n]))
footer <- c("", footer, "")
writeLines(c(header,
paste(y[-c(1L, n)], collapse = "\n\n"),
footer))
} else {
writeLines(paste(y, collapse = "\n\n"))
}
}
invisible(x)
}
## Not vectorized for now: see ?regmatches for a vectorized version.
.blanks <-
function(n)
paste(rep.int(" ", n), collapse = "")
.format_call_RR <-
function(cname, cargs)
{
## Format call with ragged right argument list (one arg per line).
cargs <- as.list(cargs)
n <- length(cargs)
lens <- sapply(cargs, length)
sums <- cumsum(lens)
starters <- c(sprintf("%s(", cname),
rep.int(.blanks(nchar(cname) + 1L), sums[n] - 1L))
trailers <- c(rep.int("", sums[n] - 1L), ")")
trailers[sums[-n]] <- ","
sprintf("%s%s%s", starters, unlist(cargs), trailers)
}
.format_bibentry_as_R_code <-
function(x, collapse = FALSE)
{
if(!length(x)) return("bibentry()")
x$.index <- NULL
## There are two subleties for constructing R calls giving a given
## bibentry object.
## * There can be mheader and mfooter entries.
## If there are, we put them into the first bibentry.
## * There could be field names which clash with the names of the
## bibentry() formals: these would need to be put as a list into
## the 'other' formal.
## The following make it into the attributes of an entry.
anames <- bibentry_attribute_names
## The following make it into the attributes of the object.
manames <- c("mheader", "mfooter")
## Format a single element (person or string, at least for now).
f <- function(e) {
if(inherits(e, "person"))
.format_person_as_R_code(e)
else
deparse(e)
}
g <- function(u, v) {
prefix <- sprintf("%s = ", u)
n <- length(v)
if(n > 1L)
prefix <- c(prefix,
rep.int(.blanks(nchar(prefix)), n - 1L))
sprintf("%s%s", prefix, v)
}
s <- lapply(unclass(x),
function(e) {
a <- Filter(length, attributes(e)[anames])
e <- e[!sapply(e, is.null)]
ind <- !is.na(match(names(e),
c(anames, manames, "other")))
if(any(ind)) {
other <- paste(names(e[ind]),
sapply(e[ind], f),
sep = " = ")
other <- Map(g,
names(e[ind]),
sapply(e[ind], f))
other <- .format_call_RR("list", other)
e <- e[!ind]
} else {
other <- NULL
}
c(Map(g, names(a), sapply(a, deparse)),
Map(g, names(e), sapply(e, f)),
if(length(other)) list(g("other", other)))
})
if(!is.null(mheader <- attr(x, "mheader")))
s[[1L]] <- c(s[[1L]],
paste("mheader = ", deparse(mheader)))
if(!is.null(mfooter <- attr(x, "mfooter")))
s[[1L]] <- c(s[[1L]],
paste("mfooter = ", deparse(mfooter)))
s <- Map(.format_call_RR, "bibentry", s)
if(collapse && (length(s) > 1L))
paste(.format_call_RR("c", s), collapse = "\n")
else
unlist(lapply(s, paste, collapse = "\n"), use.names = FALSE)
}
.format_person_as_R_code <-
function(x)
{
s <- lapply(unclass(x),
function(e) {
e <- e[!sapply(e, is.null)]
cargs <-
sprintf("%s = %s", names(e), sapply(e, deparse))
.format_call_RR("person", cargs)
})
if(length(s) > 1L)
.format_call_RR("c", s)
else
unlist(s, use.names = FALSE)
}
`$.bibentry` <-
function(x, name)
{
if(!length(x)) return(NULL)
##
## Extract internal list elements, return list if length > 1, vector
## otherwise (to mirror the behaviour of the input format for
## bibentry())
##
is_attribute <- name %in% bibentry_attribute_names
rval <- if(is_attribute) lapply(unclass(x), attr, name)
else lapply(unclass(x), "[[", name)
if(length(rval) == 1L) rval <- rval[[1L]]
rval
}
`$<-.bibentry` <-
function(x, name, value)
{
is_attribute <- name %in% bibentry_attribute_names
x <- unclass(x)
name <- tolower(name)
## recycle value
value <- rep(.listify(value), length.out = length(x))
## check bibtype
if(name == "bibtype") {
stopifnot(all(sapply(value, length) == 1L))
BibTeX_names <- names(tools:::BibTeX_entry_field_db)
value <- unlist(value)
pos <- match(tolower(value), tolower(BibTeX_names))
if(anyNA(pos))
stop(gettextf("%s has to be one of %s",
sQuote("bibtype"),
paste(BibTeX_names, collapse = ", ")),
domain = NA)
value <- as.list(BibTeX_names[pos])
}
## replace all values
for(i in seq_along(x)) {
if(is_attribute) {
attr(x[[i]], name) <-
if(is.null(value[[i]])) NULL else paste(value[[i]])
} else {
x[[i]][[name]] <-
if(is.null(value[[i]])) NULL else {
if(name %in% c("author", "editor"))
as.person(value[[i]])
else paste(value[[i]])
}
}
}
## check whether all elements still have their required fields
for(i in seq_along(x)) .bibentry_check_bibentry1(x[[i]])
class(x) <- "bibentry"
x
}
c.bibentry <-
function(..., recursive = FALSE)
{
args <- list(...)
if(!all(sapply(args, inherits, "bibentry")))
warning(gettextf("method is only applicable to %s objects",
sQuote("bibentry")),
domain = NA)
args <- lapply(args, unclass)
rval <- do.call("c", args)
class(rval) <- "bibentry"
rval
}
toBibtex.bibentry <-
function(object, ...)
{
format_author <- function(author) paste(sapply(author, function(p) {
fnms <- p$family
only_given_or_family <- is.null(fnms) || is.null(p$given)
fbrc <- if(length(fnms) > 1L ||
any(grepl("[[:space:]]", fnms)) ||
only_given_or_family) c("{", "}") else ""
gbrc <- if(only_given_or_family) c("{", "}") else ""
format(p, include = c("given", "family"),
braces = list(given = gbrc, family = fbrc))
}), collapse = " and ")
format_bibentry1 <- function(object) {
object <- unclass(object)[[1L]]
rval <- paste0("@", attr(object, "bibtype"), "{", attr(object, "key"), ",")
if("author" %in% names(object))
object$author <- format_author(object$author)
if("editor" %in% names(object))
object$editor <- format_author(object$editor)
rval <- c(rval,
sapply(names(object), function (n)
paste0(" ", n, " = {", object[[n]], "},")),
"}", "")
return(rval)
}
if(length(object)) {
object$.index <- NULL
rval <- head(unlist(lapply(object, format_bibentry1)), -1L)
} else
rval <- character()
class(rval) <- "Bibtex"
rval
}
sort.bibentry <-
function(x, decreasing = FALSE, .bibstyle = NULL, drop = FALSE, ...)
{
x[order(tools::bibstyle(.bibstyle)$sortKeys(x),
decreasing = decreasing),
drop = drop]
}
rep.bibentry <-
function(x, ...)
{
y <- NextMethod("rep")
class(y) <- class(x)
y
}
unique.bibentry <-
function(x, ...)
{
y <- NextMethod("unique")
class(y) <- class(x)
y
}
######################################################################
citEntry <-
function(entry, textVersion, header = NULL, footer = NULL, ...)
bibentry(bibtype = entry, textVersion = textVersion,
header = header, footer = footer, ...)
citHeader <-
function(...)
{
rval <- paste(...)
class(rval) <- "citationHeader"
rval
}
citFooter <-
function(...)
{
rval <- paste(...)
class(rval) <- "citationFooter"
rval
}
readCitationFile <-
function(file, meta = NULL)
{
exprs <- tools:::.parse_CITATION_file(file, meta$Encoding)
rval <- list()
mheader <- NULL
mfooter <- NULL
k <- 0L
envir <- new.env(hash = TRUE)
## Make the package metadata available to the citation entries.
assign("meta", meta, envir = envir)
for(expr in exprs) {
x <- eval(expr, envir = envir)
if(inherits(x, "bibentry"))
rval <- c(rval, list(x))
else if(identical(class(x), "citationHeader"))
mheader <- c(mheader, x)
else if(identical(class(x), "citationFooter"))
mfooter <- c(mfooter, x)
}
rval <- if(length(rval) == 1L)
rval[[1L]]
else
do.call("c", rval)
if(!.is_not_nonempty_text(mheader))
attr(rval, "mheader") <- paste(mheader, collapse = "\n")
if(!.is_not_nonempty_text(mfooter))
attr(rval, "mfooter") <- paste(mfooter, collapse = "\n")
.citation(rval)
}
######################################################################
citation <-
function(package = "base", lib.loc = NULL, auto = NULL)
{
## Allow citation(auto = meta) in CITATION files to include
## auto-generated package citation.
if(inherits(auto, "packageDescription")) {
auto_was_meta <- TRUE
meta <- auto
package <- meta$Package
} else {
auto_was_meta <- FALSE
dir <- system.file(package = package, lib.loc = lib.loc)
if(dir == "")
stop(gettextf("package %s not found", sQuote(package)),
domain = NA)
meta <- packageDescription(pkg = package,
lib.loc = dirname(dir))
## if(is.null(auto)): Use default auto-citation if no CITATION
## available.
citfile <- file.path(dir, "CITATION")
if(is.null(auto)) auto <- !file_test("-f", citfile)
## if CITATION is available
if(!auto) {
return(readCitationFile(citfile, meta))
}
}
## Auto-generate citation info.
## Base packages without a CITATION file use the base citation.
if((!is.null(meta$Priority)) && (meta$Priority == "base")) {
cit <- citation("base", auto = FALSE)
attr(cit, "mheader")[1L] <-
paste0("The ", sQuote(package), " package is part of R. ",
attr(cit, "mheader")[1L])
return(.citation(cit))
}
year <- sub("-.*", "", meta$`Date/Publication`)
if(!length(year)) {
year <- sub(".*((19|20)[[:digit:]]{2}).*", "\\1", meta$Date,
perl = TRUE) # may not be needed, but safer
if(is.null(meta$Date)){
warning(gettextf("no date field in DESCRIPTION file of package %s",
sQuote(package)),
domain = NA)
}
else if(!length(year)) {
warning(gettextf("could not determine year for %s from package DESCRIPTION file",
sQuote(package)),
domain = NA)
}
}
author <- meta$`Authors@R`
##
## Older versions took persons with no roles as "implied" authors.
## So for now check whether Authors@R gives any authors; if not fall
## back to the plain text Author field.
if(length(author)) {
author <- .read_authors_at_R_field(author)
## We only want those with author roles.
author <- Filter(.person_has_author_role, author)
}
if(length(author)) {
has_authors_at_R_field <- TRUE
} else {
has_authors_at_R_field <- FALSE
author <- as.personList(meta$Author)
}
##
z <- list(title = paste0(package, ": ", meta$Title),
author = author,
year = year,
note = paste("R package version", meta$Version)
)
z$url <- if(identical(meta$Repository, "CRAN"))
sprintf("http://CRAN.R-project.org/package=%s", package)
else
meta$URL
if(identical(meta$Repository, "R-Forge")) {
z$url <- if(!is.null(rfp <- meta$"Repository/R-Forge/Project"))
sprintf("http://R-Forge.R-project.org/projects/%s/", rfp)
else
"http://R-Forge.R-project.org/"
if(!is.null(rfr <- meta$"Repository/R-Forge/Revision"))
z$note <- paste(z$note, rfr, sep = "/r")
}
header <- if(!auto_was_meta) {
gettextf("To cite package %s in publications use:",
sQuote(package))
} else NULL
## No auto-generation message for auto was meta so that maintainers
## can safely use citation(auto = meta) in their CITATION without
## getting notified about possible needs for editing.
footer <- if(!has_authors_at_R_field && !auto_was_meta) {
gettextf("ATTENTION: This citation information has been auto-generated from the package DESCRIPTION file and may need manual editing, see %s.",
sQuote("help(\"citation\")"))
} else NULL
author <- format(z$author, include = c("given", "family"))
if(length(author) > 1L)
author <- paste(paste(head(author, -1L), collapse = ", "),
tail(author, 1L), sep = " and ")
rval <- bibentry(bibtype = "Manual",
textVersion =
paste0(author, " (", z$year, "). ", z$title, ". ",
z$note, ". ", z$url),
header = header,
footer = footer,
other = z
)
.citation(rval)
}
.citation <-
function(x)
{
class(x) <- c("citation", "bibentry")
x
}
.read_authors_at_R_field <-
function(x)
{
out <- eval(parse(text = x))
## Let's by nice ...
## Alternatively, we could throw an error.
if(!inherits(out, "person"))
out <- do.call("c", lapply(x, as.person))
out
}
.person_has_author_role <-
function(x)
{
##
## Earlier versions used
## is.null(r <- x$role) || "aut" %in% r
## using author roles by default.
##
"aut" %in% x$role
}
print.citation <-
function(x, style = "citation", ...)
{
NextMethod("print", x, style = style, ...)
invisible(x)
}
as.bibentry <-
function(x)
UseMethod("as.bibentry")
as.bibentry.bibentry <- identity
as.bibentry.citation <-
function(x)
{
class(x) <- "bibentry"
x
}
.listify <-
function(x)
if(inherits(x, "list")) x else list(x)
.format_person_for_plain_author_spec <-
function(x) {
## Names first.
out <- format(x, include = c("given", "family"))
## Only show roles recommended for usage with R.
role <- x$role
if(!length(role)) return("")
role <- role[role %in% MARC_relator_db_codes_used_with_R]
if(!length(role)) return("")
out <- sprintf("%s [%s]", out, paste(role, collapse = ", "))
if(!is.null(comment <- x$comment))
out <- sprintf("%s (%s)", out,
paste(comment, collapse = "\n"))
out
}
## NB: because of the use of strwrap(), this always outputs
## in the current locale even if the input has a marked encoding.
.format_authors_at_R_field_for_author <-
function(x)
{
if(is.character(x))
x <- .read_authors_at_R_field(x)
header <- attr(x, "header")
footer <- attr(x, "footer")
x <- sapply(x, .format_person_for_plain_author_spec)
## Drop persons with irrelevant roles.
x <- x[x != ""]
## And format.
if(!length(x)) return("")
## We need to ensure that the first line has no indentation, whereas
## all subsequent lines are indented (as .write_description avoids
## folding for Author fields). We use a common indentation of 2,
## with an extra indentation of 2 within single author descriptions.
out <- paste(lapply(strwrap(x, indent = 0L, exdent = 4L,
simplify = FALSE),
paste, collapse = "\n"),
collapse = ",\n ")
if(!is.null(header)) {
header <- paste(strwrap(header, indent = 0L, exdent = 2L),
collapse = "\n")
out <- paste(header, out, sep = "\n ")
}
if(!is.null(footer)) {
footer <- paste(strwrap(footer, indent = 2L, exdent = 2L),
collapse = "\n")
out <- paste(out, footer, sep = ".\n")
}
out
}
## preserves encoding if marked.
.format_authors_at_R_field_for_maintainer <-
function(x)
{
if(is.character(x))
x <- .read_authors_at_R_field(x)
## Maintainers need cre roles and email addresses.
x <- Filter(function(e)
!is.null(e$email) && ("cre" %in% e$role),
x)
## If this leaves nothing ...
if(!length(x)) return("")
paste(format(x, include = c("given", "family", "email")),
collapse = ",\n ")
}
# Cite using the default style (which is usually citeNatbib)
cite <-
function(keys, bib, ...)
{
fn <- tools::bibstyle()$cite
if (is.null(fn))
fn <- citeNatbib
fn(keys, bib, ...)
}
# Cite using natbib-like options. A bibstyle would normally
# choose some of these options and just have a cite(keys, bib, previous)
# function within it.
citeNatbib <-
local({
cited <- c()
function(keys, bib, textual = FALSE, before = NULL, after = NULL,
mode = c("authoryear", "numbers", "super"),
abbreviate = TRUE, longnamesfirst = TRUE,
bibpunct = c("(", ")", ";", "a", "", ","),
previous) {
shortName <- function(person) {
if (length(person$family))
paste(tools:::cleanupLatex(person$family), collapse = " ")
else
paste(tools:::cleanupLatex(person$given), collapse = " ")
}
authorList <- function(paper)
names <- sapply(paper$author, shortName)
if (!missing(previous))
cited <<- previous
if (!missing(mode))
mode <- match.arg(mode)
else
mode <- switch(bibpunct[4L],
n = "numbers",
s = "super",
"authoryear")
numeric <- mode %in% c('numbers', 'super')
if (numeric)
bib <- sort(bib)
keys <- unlist(strsplit(keys, " *, *"))
if (!length(keys)) return("")
n <- length(keys)
first <- !(keys %in% cited)
cited <<- unique(c(cited, keys))
bibkeys <- unlist(bib$key)
# Use year to hold numeric entry; makes things
# simpler below
year <- match(keys, bibkeys)
papers <- bib[year]
if (textual || !numeric) {
auth <- character(n)
if (!numeric)
year <- unlist(papers$year)
authorLists <- lapply(papers, authorList)
lastAuthors <- NULL
for (i in seq_along(keys)) {
authors <- authorLists[[i]]
if (identical(lastAuthors, authors))
auth[i] <- ""
else {
if (length(authors) > 1L)
authors[length(authors)] <- paste("and", authors[length(authors)])
if (length(authors) > 2L) {
if (!abbreviate || (first[i] && longnamesfirst))
auth[i] <- paste(authors, collapse=", ")
else
auth[i] <- paste(authors[1L], "et al.")
} else
auth[i] <- paste(authors, collapse=" ")
}
lastAuthors <- authors
}
suppressauth <- which(!nzchar(auth))
if (length(suppressauth)) {
for (i in suppressauth)
year[i - 1L] <-
paste0(year[i - 1L], bibpunct[6L], " ", year[i])
auth <- auth[-suppressauth]
year <- year[-suppressauth]
}
}
if (!is.null(before))
before <- paste0(before, " ")
if (!is.null(after))
after <- paste0(" ", after)
if (textual) {
result <- paste0(bibpunct[1L], before, year, after, bibpunct[2L])
if (mode == "super")
result <- paste0(auth, "^{", result, "}")
else
result <- paste0(auth, " ", result)
result <- paste(result, collapse = paste0(bibpunct[3L], " "))
} else if (numeric) {
result <- paste(year, collapse=paste0(bibpunct[3L], " "))
result <- paste0(bibpunct[1L], before, result, after, bibpunct[2L])
if (mode == "super")
result <- paste0("^{", result, "}")
} else {
result <- paste0(auth, bibpunct[5L], " ", year)
result <- paste(result, collapse = paste0(bibpunct[3L], " "))
result <- paste0(bibpunct[1L], before, result, after, bibpunct[2L])
}
result
}
})
# File src/library/utils/R/combn.R
# Part of the R package, http://www.R-project.org
#
# Copyright (C) 1995-2013 The R Core Team
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# A copy of the GNU General Public License is available at
# http://www.r-project.org/Licenses/
combn <- function(x, m, FUN = NULL, simplify = TRUE, ...)
{
## DATE WRITTEN: 14 April 1994 LAST REVISED: 10 July 1995
## AUTHOR: Scott Chasalow
##
## DESCRIPTION:
## Generate all combinations of the elements of x taken m at a time.
## If x is a positive integer, returns all combinations
## of the elements of seq(x) taken m at a time.
## If argument "FUN" is not null, applies a function given
## by the argument to each point. If simplify is FALSE, returns
## a list; else returns a vector or an array. "..." are passed
## unchanged to function given by argument FUN, if any.
stopifnot(length(m) == 1L, is.numeric(m))
if(m < 0) stop("m < 0", domain = NA)
if(is.numeric(x) && length(x) == 1L && x > 0 && trunc(x) == x)
x <- seq_len(x)
n <- length(x)
if(n < m) stop("n < m", domain = NA)
x0 <- x
if(simplify) {
if(is.factor(x)) x <- as.integer(x)
}
m <- as.integer(m)
e <- 0
h <- m
a <- seq_len(m)
nofun <- is.null(FUN)
if(!nofun && !is.function(FUN))
stop("'FUN' must be a function or NULL")
# first result : what kind, what length,.. ?
len.r <- length(r <- if(nofun) x[a] else FUN(x[a], ...))
count <- as.integer(round(choose(n, m))) # >= 1
if(simplify) {
dim.use <-
if(nofun)
c(m, count) # matrix also when count = 1
else {
d <- dim(r)
if(length(d) > 1L)
c(d, count)
else if(len.r > 1L)
c(len.r, count)
else # MM: *still* a matrix - a la "drop = FALSE"
c(d, count)
} ## NULL in all 'else' cases
}
if(simplify)
out <- matrix(r, nrow = len.r, ncol = count) # matrix for now
else {
out <- vector("list", count)
out[[1L]] <- r
}
if(m > 0) {
i <- 2L
nmmp1 <- n - m + 1L # using 1L to keep integer arithmetic
while(a[1L] != nmmp1) {
if(e < n - h) {
h <- 1L
e <- a[m]
j <- 1L
}
else {
e <- a[m - h]
h <- h + 1L
j <- 1L:h
}
a[m - h + j] <- e + j
r <- if(nofun) x[a] else FUN(x[a], ...)
if(simplify) out[, i] <- r else out[[i]] <- r
i <- i + 1L
}
}
if(simplify) {
if(is.factor(x0)) {
levels(out) <- levels(x0)
class(out) <- class(x0)
}
dim(out) <- dim.use
}
out
}
# File src/library/utils/R/completion.R
# Part of the R package, http://www.R-project.org
#
# Copyright (C) 2006 Deepayan Sarkar
# 2006-2013 The R Core Team
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# A copy of the GNU General Public License is available at
# http://www.r-project.org/Licenses/
### Note: By default, we try not to do things that might be slow due
### to network latency (think NFS). For example, retrieving a list of
### available packages is potentially slow, and is thus disabled
### initially.
### Status: I'm mostly happy with things. The only obvious
### improvement I can think of is figuring out when we are in
### continuation mode (R prompt == "+") and make use of previous lines
### in that case. I haven't found a way to do that.
### Note: sprintf seems faster than paste based on naive benchmarking:
## > system.time(for (i in 1L:100000L) sprintf("foo%sbar%d", letters, 1L:26L) )
## user system total user.children system.children
## 4.796 0.088 4.887 0.000 0.000
## > system.time(for (i in 1L:100000L) paste("foo", letters, "bar", 1L:26L) )
## user system total user.children system.children
## 8.300 0.028 8.336 0.000 0.000
### so will change all pastes to sprintf. However, we need to be
### careful because 0 length components in sprintf will cause errors.
## [July 2013] First attempt to support fuzzy matching, if
## rc.settings(fuzzy=TRUE), based on suggestion from Rasmus Baath.
## Initially, this replaces use of grep() to find matches by
## findMatches(), which behaves differently depending on the 'fuzzy'
## setting. This does not affect basic object name completion, which
## is done using apropos(). For that, we need to write a fuzzy
## version of apropos (which is not that difficult; just loop through
## everything in the search path).
findExactMatches <- function(pattern, values)
{
grep(pattern, values, value = TRUE)
}
### agrep() version
##
## findFuzzyMatches <- function(pattern, values)
## {
## ## Try exact matches first, and return them if found
## ans <- findExactMatches(pattern, values)
## if (length(ans) == 0) {
## fuzzies <-
## agrep(pattern, values, max.distance = 2,
## ignore.case = TRUE, fixed = FALSE, value = TRUE)
## ## Multiple inconsistent matches will lead to more deletion
## ## than reasonable. To avoid this, we find distances, and
## ## return the one with minimum distance. However, if minimum
## ## distance is not unique, this will still delete.
## ## E.g., a = list(.foobar = 1, foo.bar = 2) ; a$foob
## if (length(fuzzies) == 0) character(0)
## else {
## fdist <- adist(pattern, fuzzies, ignore.case=TRUE, partial = TRUE, fixed = FALSE)
## fmin <- which(fdist == min(fdist))
## fuzzies[fmin]
## }
## }
## else
## ans
## }
### normalizing version (from Rasmus Baath)
##
findFuzzyMatches <- function(pattern, values) {
## FIXME: option to allow experimentation, remove eventually
if (!is.null(ffun <- getOption("fuzzy.match.fun"))) {
return (ffun(pattern, values))
}
## Try exact matches first, and return them if found
exact.matches <- findExactMatches(pattern, values)
if (length(exact.matches) == 0) {
## Removes "\\." and "_" in the pattern excluding the anchor
## (^) and the first character but does not removes "\\." and
## "_" if it is the last character.
normalizedPattern <- gsub("(? 0]
}
## accessing the help system: should allow anything with an index entry
## this just looks at packages on the search path.
matchAvailableTopics <- function(prefix, text)
{
.readAliases <- function(path) {
if(file.exists(f <- file.path(path, "help", "aliases.rds")))
names(readRDS(f))
else if(file.exists(f <- file.path(path, "help", "AnIndex")))
## aliases.rds was introduced before 2.10.0, as can phase this out
scan(f, what = list("", ""), sep = "\t", quote = "",
na.strings = "", quiet = TRUE)[[1L]]
else character()
}
if (length(text) != 1L || text == "") return (character())
## Update list of help topics if necessary
pkgpaths <- searchpaths()[substr(search(), 1L, 8L) == "package:"]
if (!identical(basename(pkgpaths), .CompletionEnv[["attached_packages"]])) {
assign("attached_packages",
basename(pkgpaths),
envir = .CompletionEnv)
assign("help_topics",
unique(unlist(lapply(pkgpaths, .readAliases))),
envir = .CompletionEnv)
}
aliases <- .CompletionEnv[["help_topics"]]
ans <- findMatches(sprintf("^%s", makeRegexpSafe(text)), aliases)
if (nzchar(prefix)) {
## FIXME: This is a little unsafe. We are not protecting
## prefix to make sure that we do not get any special
## characters (like ? or + or *). However, these are unlikely
## in practice.
tmp <- grep(sprintf("-%s$", prefix), ans, value = TRUE)
if (length(tmp)) substring(tmp, 1, nchar(tmp) - nchar(prefix) - 1L)
else character(0)
}
else ans
}
## this is for requests of the form ?suffix[TAB] or prefix?suffix[TAB]
helpCompletions <- function(prefix = "", suffix)
{
## Do not attempt to complete ?? (help.search) or ??? (invalid)
if (prefix %in% c("?", "??")) return (character(0))
nc <-
if (.CompletionEnv$settings[["help"]])
matchAvailableTopics(prefix, suffix)
else
normalCompletions(suffix, check.mode = FALSE)
if (length(nc)) sprintf("%s?%s", prefix, nc)
else character()
}
specialCompletions <- function(text, spl)
{
## we'll only try to complete after the last special operator, and
## assume that everything before is meaningfully complete. A more
## sophisticated version of this function may decide to do things
## differently.
## Note that this will involve evaluations, which may have side
## effects. This (side-effects) would not happen normally (except
## of lazy loaded symbols, which most likely would have been
## evaluated shortly anyway), because explicit function calls
## (with parentheses) are not evaluated. In any case, these
## evaluations won't happen if settings$ops==FALSE
## spl (locations of matches) is guaranteed to be non-empty
wm <- which.max(spl)
op <- names(spl)[wm]
opStart <- spl[wm]
opEnd <- opStart + nchar(op)
if (opStart < 1) return(character()) # shouldn't happen
prefix <- substr(text, 1L, opStart - 1L)
suffix <- substr(text, opEnd, 1000000L)
if (op == "?") return(helpCompletions(prefix, suffix))
if (opStart <= 1) return(character()) # not meaningful
## ( breaks words, so prefix should not involve function calls,
## and thus, hopefully no side-effects.
comps <- specialOpCompletionsHelper(op, suffix, prefix)
if (length(comps) == 0L) comps <- ""
sprintf("%s%s%s", prefix, op, comps)
}
## completions on special keywords (subset of those in gram.c). Some
## issues with parentheses: e.g. mode(get("repeat")) is "function", so
## it is normally completed with a left-paren appended, but that is
## not normal usage. Putting it here means that both 'repeat' and
## 'repeat(' will be valid completions (as they should be)
keywordCompletions <- function(text)
{
## FIXME: Will not allow fuzzy completions, as this adds too much
## noise in normalCompletions. Should revisit later once we
## figure out a way to suppress fuzzy matching if there is at
## least one exact match.
findExactMatches(sprintf("^%s", makeRegexpSafe(text)),
c("NULL", "NA", "TRUE", "FALSE", "Inf", "NaN",
"NA_integer_", "NA_real_", "NA_character_", "NA_complex_",
"repeat ", "in ", "next ", "break ", "else "))
}
## 'package' environments in the search path. These will be completed
## with a ::
attachedPackageCompletions <- function(text, add = rc.getOption("package.suffix"))
{
## FIXME: Will not allow fuzzy completions. See comment in keywordCompletions() above
if (.CompletionEnv$settings[["ns"]])
{
s <- grep("^package", search(), value = TRUE)
comps <-
findExactMatches(sprintf("^%s", makeRegexpSafe(text)),
substr(s, 9L, 1000000L))
if (length(comps) && !is.null(add))
sprintf("%s%s", comps, add)
else
comps
}
else character()
}
## this provides the most basic completion, looking for completions in
## the search path using apropos, plus keywords. Plus completion on
## attached packages if settings$ns == TRUE
normalCompletions <-
function(text, check.mode = TRUE,
add.fun = rc.getOption("function.suffix"))
{
## use apropos or equivalent
if (text == "") character() ## too many otherwise
else
{
comps <-
if (.CompletionEnv$settings[["fuzzy"]])
fuzzyApropos(sprintf("^%s", makeRegexpSafe(text)))
else
apropos(sprintf("^%s", makeRegexpSafe(text)), ignore.case = FALSE)
if (.CompletionEnv$settings[["func"]] && check.mode && !is.null(add.fun))
{
which.function <- sapply(comps, function(s) exists(s, mode = "function"))
if (any(which.function))
comps[which.function] <-
sprintf("%s%s", comps[which.function], add.fun)
##sprintf("\033[31m%s\033[0m%s", comps[which.function], add.fun)
}
c(comps, keywordCompletions(text), attachedPackageCompletions(text))
}
}
## completion on function arguments. This involves the most work (as
## we need to look back in the line buffer to figure out which
## function we are inside, if any), and is also potentially intensive
## when many functions match the function that we are supposedly in
## (which will happen for common generic functions like print (we are
## very optimistic here, erring on the side of
## whatever-the-opposite-of-caution-is (our justification being that
## erring on the side of caution is practically useless and not erring
## at all is expensive to the point of being impossible (we really
## don't want to evaluate the dotplot() call in "print(dotplot(x),
## positi[TAB] )" ))))
## this defines potential function name boundaries
breakRE <- "[^\\.\\w]"
## breakRE <- "[ \t\n \\\" '`><=-%;,&}\\\?\\\+\\\{\\\|\\\(\\\)\\\*]"
## for some special functions like library, data, etc, normal
## completion is rarely meaningful, especially for the first argument.
## Unfortunately, knowing whether the token being completed is the
## first arg of such a function involves more work than we would
## normally want to do. On the other hand, inFunction() below already
## does most of this work, so we will add a piece of code (mostly
## irrelevant to its primary purpose) to indicate this. The following
## two functions are just wrappers to access and modify this
## information.
setIsFirstArg <- function(v)
.CompletionEnv[["isFirstArg"]] <- v
getIsFirstArg <- function()
.CompletionEnv[["isFirstArg"]]
inFunction <-
function(line = .CompletionEnv[["linebuffer"]],
cursor = .CompletionEnv[["start"]])
{
## are we inside a function? Yes if the number of ( encountered
## going backwards exceeds number of ). In that case, we would
## also like to know what function we are currently inside
## (ideally, also what arguments to it have already been supplied,
## but let's not dream that far ahead).
parens <-
sapply(c("(", ")"),
function(s) gregexpr(s, substr(line, 1L, cursor), fixed = TRUE)[[1L]],
simplify = FALSE)
## remove -1's
parens <- lapply(parens, function(x) x[x > 0])
## The naive algo is as follows: set counter = 0; go backwards
## from cursor, set counter-- when a ) is encountered, and
## counter++ when a ( is encountered. We are inside a function
## that starts at the first ( with counter > 0.
temp <-
data.frame(i = c(parens[["("]], parens[[")"]]),
c = rep(c(1, -1), sapply(parens, length)))
if (nrow(temp) == 0) return(character())
temp <- temp[order(-temp$i), , drop = FALSE] ## order backwards
wp <- which(cumsum(temp$c) > 0)
if (length(wp)) # inside a function
{
## return guessed name of function, letting someone else
## decide what to do with that name
index <- temp$i[wp[1L]]
prefix <- substr(line, 1L, index - 1L)
suffix <- substr(line, index + 1L, cursor + 1L)
## note in passing whether we are the first argument (no '='
## and no ',' in suffix)
if ((length(grep("=", suffix, fixed = TRUE)) == 0L) &&
(length(grep(",", suffix, fixed = TRUE)) == 0L))
setIsFirstArg(TRUE)
if ((length(grep("=", suffix, fixed = TRUE))) &&
(length(grep(",", substr(suffix,
tail.default(gregexpr("=", suffix, fixed = TRUE)[[1L]], 1L),
1000000L), fixed = TRUE)) == 0L))
{
## we are on the wrong side of a = to be an argument, so
## we don't care even if we are inside a function
return(character())
}
else ## guess function name
{
possible <- suppressWarnings(strsplit(prefix, breakRE, perl = TRUE))[[1L]]
possible <- possible[possible != ""]
if (length(possible)) return(tail.default(possible, 1))
else return(character())
}
}
else # not inside function
{
return(character())
}
}
argNames <-
function(fname, use.arg.db = .CompletionEnv$settings[["argdb"]])
{
if (use.arg.db) args <- .FunArgEnv[[fname]]
if (!is.null(args)) return(args)
## else
args <- do.call(argsAnywhere, list(fname))
if (is.null(args))
character()
else if (is.list(args))
unlist(lapply(args, function(f) names(formals(f))))
else
names(formals(args))
}
specialFunctionArgs <- function(fun, text)
{
## certain special functions have special possible arguments.
## This is primarily applicable to library and require, for which
## rownames(installed.packages()). This is disabled by default,
## because the first call to installed.packages() can be time
## consuming, e.g. on a network file system. However, the results
## are cached, so subsequent calls are not that expensive.
switch(EXPR = fun,
library = ,
require = {
if (.CompletionEnv$settings[["ipck"]])
{
findMatches(sprintf("^%s", makeRegexpSafe(text)),
rownames(installed.packages()))
}
else character()
},
data = {
if (.CompletionEnv$settings[["data"]])
{
findMatches(sprintf("^%s", makeRegexpSafe(text)),
data()$results[, "Item"])
}
else character()
},
## otherwise,
character())
}
functionArgs <-
function(fun, text,
S3methods = .CompletionEnv$settings[["S3"]],
S4methods = FALSE,
add.args = rc.getOption("funarg.suffix"))
{
if (length(fun) < 1L || any(fun == "")) return(character())
specialFunArgs <- specialFunctionArgs(fun, text)
if (S3methods && exists(fun, mode = "function"))
fun <-
c(fun,
tryCatch(methods(fun),
warning = function(w) {},
error = function(e) {}))
if (S4methods) warning("cannot handle S4 methods yet")
allArgs <- unique(unlist(lapply(fun, argNames)))
ans <- findMatches(sprintf("^%s", makeRegexpSafe(text)), allArgs)
if (length(ans) && !is.null(add.args))
ans <- sprintf("%s%s", ans, add.args)
c(specialFunArgs, ans)
}
## Note: Inside the C code, we redefine
## rl_attempted_completion_function rather than
## rl_completion_entry_function, which means that if
## retrieveCompletions() returns a length-0 result, by default the
## fallback filename completion mechanism will be used. This is not
## quite the best way to go, as in certain (most) situations filename
## completion will definitely be inappropriate even if no valid R
## completions are found. We could return "" as the only completion,
## but that produces an irritating blank line on
## list-possible-completions (or whatever the correct name is).
## Instead (since we don't want to reinvent the wheel), we use the
## following scheme: If the character just preceding our token is " or
## ', we immediately go to file name completion. If not, we do our
## stuff, and disable file name completion (using
## .Call("RCSuppressFileCompletion")) even if we don't find any
## matches.
## Note that under this scheme, filename completion will fail
## (possibly in unexpected ways) if the partial name contains 'unusual
## characters', namely ones that have been set (see C code) to cause a
## word break because doing so is meaningful in R syntax (e.g. "+",
## "-" ("/" is exempt (and treated specially below) because of its
## ubiquitousness in UNIX file names (where this package is most
## likely to be used))
## decide whether to fall back on filename completion. Yes if the
## number of quotes between the cursor and the beginning of the line
## is an odd number.
## FIXME: should include backtick (`)? May be useful, but needs more
## thought; e.g., should imply not-filename, but rather variable
## names. Must cooperate with the if (isInsideQuotes()) branch in
## .completeToken().
isInsideQuotes <-
fileCompletionPreferred <- function()
{
((st <- .CompletionEnv[["start"]]) > 0 && {
## yes if the number of quote signs to the left is odd
linebuffer <- .CompletionEnv[["linebuffer"]]
lbss <- head.default(unlist(strsplit(linebuffer, "")), .CompletionEnv[["end"]])
((sum(lbss == "'") %% 2 == 1) ||
(sum(lbss == '"') %% 2 == 1))
})
}
## File name completion, used if settings$quotes == TRUE. Front ends
## that can do filename completion themselves should probably not use
## this if they can do a better job.
correctFilenameToken <- function()
{
## Helper function
## If a file name contains spaces, the token will only have the
## part after the last space. This function tries to recover the
## complete initial part.
## Find part between last " or '
linebuffer <- .CompletionEnv[["linebuffer"]]
lbss <- head.default(unlist(strsplit(linebuffer, "")), .CompletionEnv[["end"]])
whichDoubleQuote <- lbss == '"'
whichSingleQuote <- lbss == "'"
insideDoubleQuote <- (sum(whichDoubleQuote) %% 2 == 1)
insideSingleQuote <- (sum(whichSingleQuote) %% 2 == 1)
loc.start <-
if (insideDoubleQuote && insideSingleQuote)
{
## Should not happen, but if it does, should take whichever comes later
max(which(whichDoubleQuote), which(whichSingleQuote))
}
else if (insideDoubleQuote)
max(which(whichDoubleQuote))
else if (insideSingleQuote)
max(which(whichSingleQuote))
else ## should not happen, abort non-intrusively
.CompletionEnv[["start"]]
substring(linebuffer, loc.start + 1L, .CompletionEnv[["end"]])
}
fileCompletions <- function(token)
{
## uses Sys.glob (conveniently introduced in 2.5.0)
## token may not start just after the begin quote, e.g., if spaces
## are included. Get 'correct' partial file name by looking back
## to begin quote
pfilename <- correctFilenameToken()
## Sys.glob doesn't work without expansion. Is that intended?
pfilename.expanded <- path.expand(pfilename)
comps <- Sys.glob(sprintf("%s*", pfilename.expanded), dirmark = TRUE)
## If there is only one completion (and it's a directory), also
## include files inside in list of completions. This is not
## particularly useful, but without this, readline tends to add an
## end-quote (if sole completion) which is irritating if one is
## actually looking for something inside the directory. Note that
## we don't actually test to see if it's a directory, because if
## it is not, list.files() will simply return character(0).
if (length(comps) == 1 && substring(comps, nchar(comps), nchar(comps)) == "/") {
filesInside <- list.files(comps, all.files = TRUE, full.names = FALSE, no.. = TRUE)
if (length(filesInside)) comps <- c(comps, file.path(comps, filesInside))
}
## for things that only extend beyond the cursor, need to
## 'unexpand' path
if (pfilename.expanded != pfilename)
comps <- sub(path.expand("~"), "~", comps, fixed = TRUE)
## for tokens that were non-trivially corrected by adding prefix,
## need to delete extra part
if (pfilename != token)
comps <- substring(comps, nchar(pfilename) - nchar(token) + 1L, 1000L)
comps
}
## .completeToken() is the primary interface, and does the actual
## completion when called from C code.
.completeToken <- function()
{
text <- .CompletionEnv[["token"]]
if (isInsideQuotes())
{
## If we're in here, that means we think the cursor is inside
## quotes. In most cases, this means that standard filename
## completion is more appropriate, but probably not if we're
## trying to access things of the form x["foo... or x$"foo...
## The following tries to figure this out, but it won't work
## in all cases (e.g. x[, "foo"])
## We assume that whoever determines our token boundaries
## considers quote signs as a breaking symbol.
## If the 'quotes' setting is FALSE, we will make no attempt to
## do filename completion (this is likely to happen with
## front-ends that are capable of doing their own file name
## completion; such front-ends can fall back to their native
## file completion when rc.status("fileName") is TRUE.
if (.CompletionEnv$settings[["quotes"]])
{
## ## This was used to make a guess whether we are in
## ## special situations like ::, ?, [, etc. But from R
## ## 3.0.0 we re-evaluate the token based from the
## ## begin-quote, so this is postponed. This part can be
## ## deleted once this is stable enough.
## st <- .CompletionEnv[["start"]]
## probablyNotFilename <-
## ((st > 2L &&
## ((prequote <- substr(.CompletionEnv[["linebuffer"]], st-1L, st-1L)) %in% c("?", "[", ":", "$"))) ||
## (st == 2L &&
## ((prequote <- substr(.CompletionEnv[["linebuffer"]], st-1L, st-1L)) %in% c("?")))
## )
## FIXME|TODO: readline (and maybe other backends) will
## usually use a fixed set of breakpoints to detect
## tokens. If we are handling quotes ourselves, the more
## likely correct token is everything from the last
## unclosed quote onwards (which may include spaces,
## punctuations, etc. that would normally cause breaks).
## We already do this when we guess the token ourselves
## (e.g., for Windows) (and also in the fileCompletions()
## call below using correctFilenameToken()), and can
## re-use that here. The problem is that for other
## backends a token may already have been determined, and
## that's what we will need to use. We can still fake it
## by using the correct token but substracting the extra
## part when providing completions, but that will need
## some work.
## Related to that: if we implement that, should also
## check before for '?' and move to help completion
## if so.
### str(correctFilenameToken())
### str(.guessTokenFromLine(update = FALSE))
## TODO: For extra credit, we could also allow for
## spaces like in 'package ? grid', but will leave
## that for the future (maybe some regexp magic will
## make this simple)
fullToken <- .guessTokenFromLine(update = FALSE)
probablyHelp <- (fullToken$start >= 2L &&
((substr(.CompletionEnv[["linebuffer"]],
fullToken$start-1L,
fullToken$start-1L)) == "?"))
if (probablyHelp) {
fullToken$prefix <- .guessTokenFromLine(end = fullToken$start - 2, update = FALSE)$token
}
probablyName <- ((fullToken$start > 2L &&
((substr(.CompletionEnv[["linebuffer"]],
fullToken$start-1L,
fullToken$start-1L)) == "$"))
||
(fullToken$start > 3L &&
((substr(.CompletionEnv[["linebuffer"]],
fullToken$start-2L,
fullToken$start-1L)) == "[[")))
probablyNamespace <- (fullToken$start > 3L &&
((substr(.CompletionEnv[["linebuffer"]],
fullToken$start-2L,
fullToken$start-1L)) %in% c("::")))
## in anticipation that we will handle this eventually:
probablyBacktick <- (fullToken$start >= 1L &&
((substr(.CompletionEnv[["linebuffer"]],
fullToken$start,
fullToken$start)) %in% c("`")))
probablySpecial <- probablyHelp || probablyName || probablyNamespace
## str(list(probablyHelp = probablyHelp,
## probablyName = probablyName,
## probablyNamespace = probablyNamespace,
## probablyBacktick = probablyBacktick,
## probablySpecial = probablySpecial))
## For now, we only handle probablyHelp, and just decline
## to do filename completion if any of the other special
## situations are detected (but don't try to complete).
tentativeCompletions <-
if (probablyHelp) {
substring(helpCompletions(fullToken$prefix, fullToken$token),
2L + nchar(fullToken$prefix), 1000L) # drop initial "prefix + ?"
}
else if (!probablySpecial)
fileCompletions(fullToken$token) # FIXME: but not if probablyBacktick
.setFileComp(FALSE)
## str(c(fullToken, list(comps = tentativeCompletions)))
## Adjust for self-computed token
.CompletionEnv[["comps"]] <-
substring(tentativeCompletions,
1L + nchar(fullToken$token) - nchar(text),
1000L)
}
else
{
.CompletionEnv[["comps"]] <- character()
.setFileComp(TRUE)
}
}
else
{
.setFileComp(FALSE)
setIsFirstArg(FALSE) # might be changed by inFunction() call
## make a guess at what function we are inside
guessedFunction <-
if (.CompletionEnv$settings[["args"]])
inFunction(.CompletionEnv[["linebuffer"]],
.CompletionEnv[["start"]])
else ""
.CompletionEnv[["fguess"]] <- guessedFunction
## if this is not "", then we want to add possible arguments
## of that function(s) (methods etc). Should be character()
## if nothing matches
fargComps <- functionArgs(guessedFunction, text)
if (getIsFirstArg() && length(guessedFunction) &&
guessedFunction %in%
c("library", "require", "data"))
{
.CompletionEnv[["comps"]] <- fargComps
## don't try anything else
return()
}
## Is there an arithmetic operator in there in there? If so,
## work on the part after that and append to prefix before
## returning. It would have been easier if these were
## word-break characters, but that potentially interferes with
## filename completion.
## lastArithOp <- tail(gregexpr("/", text, fixed = TRUE)[[1L]], 1)
lastArithOp <- tail.default(gregexpr("[\"'^/*+-]", text)[[1L]], 1)
if (haveArithOp <- (lastArithOp > 0))
{
prefix <- substr(text, 1L, lastArithOp)
text <- substr(text, lastArithOp + 1L, 1000000L)
}
spl <- specialOpLocs(text)
comps <-
if (length(spl))
specialCompletions(text, spl)
else
{
## should we append a left-paren for functions?
## Usually yes, but not when inside certain special
## functions which often take other functions as
## arguments
appendFunctionSuffix <-
!any(guessedFunction %in%
c("help", "args", "formals", "example",
"do.call", "environment", "page", "apply",
"sapply", "lapply", "tapply", "mapply",
"methods", "fix", "edit"))
normalCompletions(text, check.mode = appendFunctionSuffix)
}
if (haveArithOp && length(comps))
{
comps <- paste0(prefix, comps)
}
comps <- c(comps, fargComps)
.CompletionEnv[["comps"]] <- comps
}
}
## support functions that attempt to provide tools useful specifically
## for the Windows Rgui.
## Note: even though these are unexported functions, changes in the
## API should be noted in man/rcompgen.Rd
.win32consoleCompletion <-
function(linebuffer, cursorPosition,
check.repeat = TRUE,
minlength = -1)
{
isRepeat <- ## is TAB being pressed repeatedly with this combination?
if (check.repeat)
(linebuffer == .CompletionEnv[["linebuffer"]] &&
cursorPosition == .CompletionEnv[["end"]])
else TRUE
.assignLinebuffer(linebuffer)
.assignEnd(cursorPosition)
.guessTokenFromLine()
token <- .CompletionEnv[["token"]]
comps <-
if (nchar(token, type = "chars") < minlength) character()
else
{
.completeToken()
.retrieveCompletions()
}
## FIXME: no idea how much of this is MBCS-safe
if (length(comps) == 0L)
{
## no completions
addition <- ""
possible <- character()
}
else if (length(comps) == 1L)
{
## FIXME (maybe): in certain cases the completion may be
## shorter than the token (e.g. when trying to complete on an
## impossible name inside a list). It's debatable what the
## behaviour should be in this case, but readline and Emacs
## actually delete part of the token (at least currently). To
## achieve this in Rgui one would need to do somewhat more
## work than I'm ready to do right now (especially since it's
## not clear that this is the right thing to do to begin
## with). So, in this case, I'll just pretend that no
## completion was found.
addition <- substr(comps, nchar(token, type = "chars") + 1L, 100000L)
possible <- character()
}
else if (length(comps) > 1L)
{
## more than one completion. The right thing to is to extend
## the line by the unique part if any, and list the multiple
## possibilities otherwise.
additions <- substr(comps, nchar(token, type = "chars") + 1L, 100000L)
if (length(table(substr(additions, 1L, 1L))) > 1L)
{
## no unique substring
addition <- ""
possible <-
if (isRepeat) capture.output(cat(format(comps, justify = "left"), fill = TRUE))
else character()
}
else
{
## need to figure out maximal unique substr
keepUpto <- 1
while (length(table(substr(additions, 1L, keepUpto))) == 1L)
keepUpto <- keepUpto + 1L
addition <- substr(additions[1L], 1L, keepUpto - 1L)
possible <- character()
}
}
list(addition = addition,
possible = possible,
comps = paste(comps, collapse = " "))
}
## usage:
## .addFunctionInfo(foo = c("arg1", "arg2"), bar = c("a", "b"))
.addFunctionInfo <- function(...)
{
dots <- list(...)
for (nm in names(dots))
.FunArgEnv[[nm]] <- dots[[nm]]
}
.initialize.argdb <-
function()
{
## lattice
lattice.common <-
c("data", "allow.multiple", "outer", "auto.key", "aspect",
"panel", "prepanel", "scales", "strip", "groups", "xlab",
"xlim", "ylab", "ylim", "drop.unused.levels", "...",
"default.scales", "subscripts", "subset", "formula", "cond",
"aspect", "as.table", "between", "key", "legend", "page",
"main", "sub", "par.strip.text", "layout", "skip", "strip",
"strip.left", "xlab.default", "ylab.default", "xlab",
"ylab", "panel", "xscale.components", "yscale.components",
"axis", "index.cond", "perm.cond", "...", "par.settings",
"plot.args", "lattice.options")
densityplot <-
c("plot.points", "ref", "groups", "jitter.amount",
"bw", "adjust", "kernel", "weights", "window", "width",
"give.Rkern", "n", "from", "to", "cut", "na.rm")
panel.xyplot <-
c("type", "groups", "pch", "col", "col.line",
"col.symbol", "font", "fontfamily", "fontface", "lty",
"cex", "fill", "lwd", "horizontal")
.addFunctionInfo(xyplot.formula = c(lattice.common, panel.xyplot),
densityplot.formula = c(lattice.common, densityplot))
## grid
grid.clip <-
c("x", "y", "width", "height", "just", "hjust", "vjust",
"default.units", "name", "vp")
grid.curve <-
c("x1", "y1", "x2", "y2", "default.units", "curvature",
"angle", "ncp", "shape", "square", "squareShape", "inflect",
"arrow", "open", "debug", "name", "gp", "vp")
grid.polyline <-
c("x", "y", "id", "id.lengths", "default.units", "arrow",
"name", "gp", "vp")
grid.xspline <-
c("x", "y", "id", "id.lengths", "default.units", "shape",
"open", "arrow", "repEnds", "name", "gp", "vp")
.addFunctionInfo(grid.clip = grid.clip,
grid.curve = grid.curve,
grid.polyline = grid.polyline,
grid.xspline = grid.xspline)
## par, options
par <-
c("xlog", "ylog", "adj", "ann", "ask", "bg", "bty", "cex",
"cex.axis", "cex.lab", "cex.main", "cex.sub", "cin", "col",
"col.axis", "col.lab", "col.main", "col.sub", "cra", "crt",
"csi", "cxy", "din", "err", "family", "fg", "fig", "fin",
"font", "font.axis", "font.lab", "font.main", "font.sub",
"gamma", "lab", "las", "lend", "lheight", "ljoin", "lmitre",
"lty", "lwd", "mai", "mar", "mex", "mfcol", "mfg", "mfrow",
"mgp", "mkh", "new", "oma", "omd", "omi", "pch", "pin",
"plt", "ps", "pty", "smo", "srt", "tck", "tcl", "usr",
"xaxp", "xaxs", "xaxt", "xpd", "yaxp", "yaxs", "yaxt")
options <- c("add.smooth", "browser", "check.bounds", "continue",
"contrasts", "defaultPackages", "demo.ask", "device",
"digits", "dvipscmd", "echo", "editor", "encoding",
"example.ask", "expressions", "help.search.types",
"help.try.all.packages", "htmlhelp", "HTTPUserAgent",
"internet.info", "keep.source", "keep.source.pkgs",
"locatorBell", "mailer", "max.print", "menu.graphics",
"na.action", "OutDec", "pager", "papersize",
"par.ask.default", "pdfviewer", "pkgType", "printcmd",
"prompt", "repos", "scipen", "show.coef.Pvalues",
"show.error.messages", "show.signif.stars", "str",
"stringsAsFactors", "timeout", "ts.eps", "ts.S.compat",
"unzip", "verbose", "warn", "warning.length", "width")
.addFunctionInfo(par = par, options = options)
## read.csv etc (... passed to read.table)
}
.CompletionEnv <- new.env(hash = FALSE)
## needed to save some overhead in .win32consoleCompletion
assign("linebuffer", "", env = .CompletionEnv)
assign("end", 1, env = .CompletionEnv)
assign("settings",
list(ops = TRUE, ns = TRUE,
args = TRUE, func = FALSE,
ipck = FALSE, S3 = TRUE, data = TRUE,
help = TRUE, argdb = TRUE, fuzzy = FALSE,
files = TRUE, # FIXME: deprecate in favour of quotes
quotes = TRUE),
env = .CompletionEnv)
assign("options",
list(package.suffix = "::",
funarg.suffix = "=",
function.suffix = "("),
env = .CompletionEnv)
## These keeps track of attached packages and available help topics.
## Needs updating only when packages are attached.
assign("attached_packages", character(0), env = .CompletionEnv)
assign("help_topics", character(0), env = .CompletionEnv)
.FunArgEnv <- new.env(hash = TRUE, parent = emptyenv())
.initialize.argdb()
# File src/library/utils/R/data.R
# Part of the R package, http://www.R-project.org
#
# Copyright (C) 1995-2013 The R Core Team
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# A copy of the GNU General Public License is available at
# http://www.r-project.org/Licenses/
data <-
function(..., list = character(), package = NULL, lib.loc = NULL,
verbose = getOption("verbose"), envir = .GlobalEnv)
{
fileExt <- function(x) {
db <- grepl("\\.[^.]+\\.(gz|bz2|xz)$", x)
ans <- sub(".*\\.", "", x)
ans[db] <- sub(".*\\.([^.]+\\.)(gz|bz2|xz)$", "\\1\\2", x[db])
ans
}
names <- c(as.character(substitute(list(...))[-1L]), list)
## Find the directories of the given packages and maybe the working
## directory.
if(!is.null(package)) {
if(!is.character(package))
stop("'package' must be a character string or NULL")
if(any(package %in% "base"))
warning("datasets have been moved from package 'base' to package 'datasets'")
if(any(package %in% "stats"))
warning("datasets have been moved from package 'stats' to package 'datasets'")
package[package %in% c("base", "stats")] <- "datasets"
}
paths <- find.package(package, lib.loc, verbose = verbose)
if(is.null(lib.loc))
paths <- c(path.package(package, TRUE),
if(!length(package)) getwd(), # ignored if NULL
paths)
paths <- unique(paths[file.exists(paths)])
## Find the directories with a 'data' subdirectory.
paths <- paths[file_test("-d", file.path(paths, "data"))]
dataExts <- tools:::.make_file_exts("data")
if(length(names) == 0L) {
## List all possible data sets.
## Build the data db.
db <- matrix(character(), nrow = 0L, ncol = 4L)
for(path in paths) {
entries <- NULL
## Use "." as the 'package name' of the working directory.
packageName <-
if(file_test("-f", file.path(path, "DESCRIPTION")))
basename(path)
else
"."
## Check for new-style 'Meta/data.rds'
if(file_test("-f", INDEX <- file.path(path, "Meta", "data.rds"))) {
entries <- readRDS(INDEX)
} else {
## No index: should only be true for ./data >= 2.0.0
dataDir <- file.path(path, "data")
entries <- tools::list_files_with_type(dataDir, "data")
if(length(entries)) {
entries <-
unique(tools::file_path_sans_ext(basename(entries)))
entries <- cbind(entries, "")
}
}
if(NROW(entries)) {
if(is.matrix(entries) && ncol(entries) == 2L)
db <- rbind(db, cbind(packageName, dirname(path), entries))
else
warning(gettextf("data index for package %s is invalid and will be ignored",
sQuote(packageName)),
domain=NA, call.=FALSE)
}
}
colnames(db) <- c("Package", "LibPath", "Item", "Title")
footer <- if(missing(package))
paste0("Use ",
sQuote(paste("data(package =",
".packages(all.available = TRUE))")),
"\n",
"to list the data sets in all *available* packages.")
else
NULL
y <- list(title = "Data sets", header = NULL, results = db,
footer = footer)
class(y) <- "packageIQR"
return(y)
}
paths <- file.path(paths, "data")
for(name in names) {
found <- FALSE
for(p in paths) {
## does this package have "Rdata" databases?
if(file_test("-f", file.path(p, "Rdata.rds"))) {
rds <- readRDS(file.path(p, "Rdata.rds"))
if(name %in% names(rds)) {
## found it, so copy objects from database
found <- TRUE
if(verbose)
message(sprintf("name=%s:\t found in Rdata.rds", name),
domain=NA)
thispkg <- sub(".*/([^/]*)/data$", "\\1", p)
thispkg <- sub("_.*$", "", thispkg) # versioned installs.
thispkg <- paste0("package:", thispkg)
objs <- rds[[name]] # guaranteed an exact match
lazyLoad(file.path(p, "Rdata"), envir = envir,
filter = function(x) x %in% objs)
break
} else if(verbose)
message(sprintf("name=%s:\t NOT found in names() of Rdata.rds, i.e.,\n\t%s\n",
name, paste(names(rds), collapse=",")),
domain=NA)
}
## check for zipped data dir
if(file_test("-f", file.path(p, "Rdata.zip"))) {
warning("zipped data found for package ",
sQuote(basename(dirname(p))),
".\nThat is defunct, so please re-install the package.",
domain = NA)
if(file_test("-f", fp <- file.path(p, "filelist")))
files <- file.path(p, scan(fp, what="", quiet = TRUE))
else {
warning(gettextf("file 'filelist' is missing for directory %s", sQuote(p)), domain = NA)
next
}
} else {
files <- list.files(p, full.names = TRUE)
}
files <- files[grep(name, files, fixed = TRUE)]
if(length(files) > 1L) {
## more than one candidate
o <- match(fileExt(files), dataExts, nomatch = 100L)
paths0 <- dirname(files)
## Next line seems unnecessary to MM (FIXME?)
paths0 <- factor(paths0, levels = unique(paths0))
files <- files[order(paths0, o)]
}
if(length(files)) {
## have a plausible candidate (or more)
for(file in files) {
if(verbose)
message("name=", name, ":\t file= ...",
.Platform$file.sep, basename(file), "::\t",
appendLF = FALSE, domain = NA)
ext <- fileExt(file)
## make sure the match is really for 'name.ext'
if(basename(file) != paste0(name, ".", ext))
found <- FALSE
else {
found <- TRUE
zfile <- file
zipname <- file.path(dirname(file), "Rdata.zip")
if(file.exists(zipname)) {
Rdatadir <- tempfile("Rdata")
dir.create(Rdatadir, showWarnings=FALSE)
topic <- basename(file)
rc <- .External(C_unzip, zipname, topic, Rdatadir, FALSE, TRUE, FALSE, FALSE)
if(rc == 0L) zfile <- file.path(Rdatadir, topic)
}
if(zfile != file) on.exit(unlink(zfile))
switch(ext,
R = , r = {
## ensure utils is visible
library("utils")
sys.source(zfile, chdir = TRUE,
envir = envir)
},
RData = , rdata = , rda =
load(zfile, envir = envir),
TXT = , txt = , tab = ,
tab.gz = , tab.bz2 = , tab.xz = ,
txt.gz = , txt.bz2 = , txt.xz =
assign(name,
## ensure default for as.is has not been
## overridden by options(stringsAsFactor)
read.table(zfile, header = TRUE, as.is = FALSE),
envir = envir),
CSV = , csv = ,
csv.gz = , csv.bz2 = , csv.xz =
assign(name,
read.table(zfile, header = TRUE,
sep = ";", as.is = FALSE),
envir = envir),
found <- FALSE)
}
if (found) break # from files
}
if(verbose) message(if(!found) "*NOT* ", "found", domain = NA)
}
if (found) break # from paths
}
if(!found)
warning(gettextf("data set %s not found", sQuote(name)),
domain = NA)
}
invisible(names)
}
# File src/library/utils/R/databrowser.R
# Part of the R package, http://www.R-project.org
#
# Copyright (C) 1995-2013 The R Core Team
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# A copy of the GNU General Public License is available at
# http://www.r-project.org/Licenses/
browseEnv <- function(envir = .GlobalEnv, pattern,
excludepatt = "^last\\.warning",
html = .Platform$GUI != "AQUA",
expanded = TRUE, properties = NULL,
main = NULL, debugMe = FALSE)
{
objlist <- ls(envir = envir, pattern = pattern)#, all.names = FALSE
if(length(iX <- grep(excludepatt, objlist)))
objlist <- objlist[ - iX]
if(debugMe) { cat("envir= "); print(envir)
cat("objlist =\n"); print(objlist) }
n <- length(objlist)
if(n == 0L) {
cat("Empty environment, nothing to do!\n")
return(invisible())
}
str1 <- function(obj) {
md <- mode(obj)
lg <- length(obj)
objdim <- dim(obj)
if(length(objdim) == 0L)
dim.field <- paste("length:", lg)
else{
dim.field <- "dim:"
for(i in seq_along(objdim))
dim.field <- paste(dim.field,objdim[i])
if(is.matrix(obj))
md <- "matrix"
}
obj.class <- oldClass(obj)
if(!is.null(obj.class)) {
md <- obj.class[1L]
if(inherits(obj, "factor"))
dim.field <- paste("levels:",length(levels(obj)))
}
list(type = md, dim.field = dim.field)
}
N <- 0L
M <- n
IDS <- rep.int(NA,n)
NAMES <- rep.int(NA,n)
TYPES <- rep.int(NA,n)
DIMS <- rep.int(NA,n)
IsRoot <- rep.int(TRUE,n)
Container <- rep.int(FALSE,n)
ItemsPerContainer <- rep.int(0,n)
ParentID <- rep.int(-1,n)
for( objNam in objlist ){
N <- N+1L
if(debugMe) cat(" ", N,":", objNam)
obj <- get(objNam, envir = envir)
sOb <- str1(obj)
if(debugMe) cat(", type=", sOb$type,",", sOb$dim.field,"\n")
## Fixme : put these 4 in a matrix or data.frame row:
IDS[N] <- N
NAMES[N] <- objNam
TYPES[N] <- sOb$type
DIMS[N] <- sOb$dim.field
if(is.recursive(obj) && !is.function(obj) && !is.environment(obj)
## includes "list", "expression", also "data.frame", ..
&& (lg <- length(obj))) {
Container[N] <- TRUE
ItemsPerContainer[N] <- lg
nm <- names(obj)
if(is.null(nm)) nm <- paste0("[[", format(1L:lg), "]]")
for(i in 1L:lg) {
M <- M+1
ParentID[M] <- N
if(nm[i] == "") nm[i] <- paste0("[[", i, "]]")
s.l <- str1(obj[[i]])
##cat(" objname:",nm[i],", type=",md.l,",",dim.field.l,"\n")
IDS <- c(IDS,M)
NAMES <- c(NAMES, nm[i])
TYPES <- c(TYPES, s.l$type)
DIMS <- c(DIMS, s.l$dim.field)
}
}## recursive
else if(!is.null(class(obj))) {
## treat some special __non-recursive__ classes:
if(inherits(obj, "table")) {
obj.nms <- attr(obj,"dimnames")
lg <- length(obj.nms)
if(length(names(obj.nms)) >0)
nm <- names(obj.nms)
else
nm <- rep.int("", lg)
Container[N] <- TRUE
ItemsPerContainer[N] <- lg
for(i in seq_len(lg)){
M <- M+1L
ParentID[M] <- N
if(nm[i] == "") nm[i] = paste0("[[",i,"]]")
md.l <- mode(obj.nms[[i]])
objdim.l <- dim(obj.nms[[i]])
if(length(objdim.l) == 0L)
dim.field.l <- paste("length:", length(obj.nms[[i]]))
else{
dim.field.l <- "dim:"
for(j in seq_along(objdim.l))
dim.field.l <- paste(dim.field.l,objdim.l[i])
}
##cat(" objname:",nm[i],", type=",md.l,",",dim.field.l,"\n")
IDS <- c(IDS,M)
NAMES <- c(NAMES, nm[i])
TYPES <- c(TYPES, md.l)
DIMS <- c(DIMS,dim.field.l)
}
}## "table"
else if(inherits(obj, "mts")) {
nm <- dimnames(obj)[[2L]]
lg <- length(nm)
Container[N] <- TRUE
ItemsPerContainer[N] <- lg
for(i in seq_len(lg)){
M <- M+1L
ParentID[M] <- N
md.l <- mode(obj[[i]])
dim.field.l <- paste("length:",dim(obj)[1L])
md.l <- "ts"
##cat(" tseries:",nm[i],", type=",md.l,",",dim.field.l,"\n")
IDS <- c(IDS,M)
NAMES <- c(NAMES, nm[i])
TYPES <- c(TYPES, md.l)
DIMS <- c(DIMS,dim.field.l)
}
}## "mts"
} ## recursive or classed
} ## "for each object"
if(debugMe) cat(" __end {for}\n ")##; browser()
Container <- c(Container, rep.int(FALSE, M-N))
IsRoot <- c(IsRoot, rep.int(FALSE, M-N))
ItemsPerContainer <- c(ItemsPerContainer, rep.int(0, M-N))
if(is.null(main))
main <- paste("R objects in", deparse(substitute(envir)))
if(is.null(properties)) {
properties <- as.list(c(date = format(Sys.time(), "%Y-%b-%d %H:%M"),
local({
si <- Sys.info()
si[c("user","nodename","sysname")]})))
}
if(html)
wsbrowser(IDS, IsRoot, Container, ItemsPerContainer, ParentID,
NAMES, TYPES, DIMS, kind = "HTML", main = main,
properties = properties, expanded)
else if(.Platform$GUI == "AQUA") {
awsbrowser <- get("wsbrowser", envir = as.environment("tools:RGUI"))
awsbrowser(as.integer(IDS), IsRoot, Container,
as.integer(ItemsPerContainer), as.integer(ParentID),
NAMES, TYPES, DIMS)
} else stop("only 'html = TRUE' is supported on this platform")
}
wsbrowser <- function(IDS, IsRoot, IsContainer, ItemsPerContainer,
ParentID, NAMES, TYPES, DIMS, expanded=TRUE,
kind = "HTML",
main = "R Workspace", properties = list(),
browser = getOption("browser"))
{
if(kind != "HTML")
stop(gettextf("kind '%s' not yet implemented", kind), domain = NA)
bold <- function(ch) paste0("",ch,"")
ital <- function(ch) paste0("",ch,"")
entry <- function(ch) paste0("
",ch,"
")
Par <- function(ch) paste0("
",ch,"
")
Trow <- function(N, ...) {
if(length(list(...)) != N) stop("wrong number of table row entries")
paste("
\n",file=Hfile)
close(Hfile)
switch(.Platform$OS.type,
windows = , ## do we need anything here?
unix = { url <- fname },
)
if(substr(url, 1L, 1L) != "/")
url <- paste0("/", url)
url <- paste0("file://", URLencode(url))
browseURL(url = url, browser = browser)
cat(main, "environment is shown in browser",
if(is.character(browser)) sQuote(browser),"\n")
invisible(fname)
}
# File src/library/utils/R/de.R
# Part of the R package, http://www.R-project.org
#
# Copyright (C) 1995-2013 The R Core Team
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# A copy of the GNU General Public License is available at
# http://www.r-project.org/Licenses/
de.ncols <- function(inlist)
{
ncols <- matrix(0, nrow=length(inlist), ncol=2L)
i <- 1L
for( telt in inlist ) {
if( is.matrix(telt) ) {
ncols[i, 1L] <- ncol(telt)
ncols[i, 2L] <- 2L
}
else if( is.list(telt) ) {
for( telt2 in telt )
if( !is.vector(telt2) ) stop("wrong argument to 'dataentry'")
ncols[i, 1L] <- length(telt)
ncols[i, 2L] <- 3L
}
else if( is.vector(telt) ) {
ncols[i, 1L] <- 1L
ncols[i, 2L] <- 1L
}
else stop("wrong argument to 'dataentry'")
i <- i+1L
}
return(ncols)
}
de.setup <- function(ilist, list.names, incols)
{
ilen <- sum(incols)
ivec <- vector("list", ilen)
inames <- vector("list", ilen)
i <- 1L
k <- 0L
for( telt in ilist ) {
k <- k+1L
if( is.list(telt) ) {
y <- names(telt)
for( j in seq_along(telt) ) {
ivec[[i]] <- telt[[j]]
if( is.null(y) || y[j]=="" )
inames[[i]] <- paste0("var", i)
else inames[[i]] <- y[j]
i <- i+1L
}
}
else if( is.vector(telt) ) {
ivec[[i]] <- telt
inames[[i]] <- list.names[[k]]
i <- i+1
}
else if( is.matrix(telt) ) {
y <- dimnames(telt)[[2L]]
for( j in seq_len(ncol(telt)) ) {
ivec[[i]] <- telt[, j]
if( is.null(y) || y[j]=="" )
inames[[i]] <- paste0("var", i)
else inames[[i]] <- y[j]
i <- i+1L
}
}
else stop("wrong argument to 'dataentry'")
}
names(ivec) <- inames
return(ivec)
}
de.restore <- function(inlist, ncols, coltypes, argnames, args)
{
## take the data in inlist and restore it
## to the format described by ncols and coltypes
p <- length(ncols)
rlist <- vector("list", length=p)
rnames <- vector("character", length=p)
j <- 1L
lnames <- names(inlist)
if(p) for(i in seq_len(p)) {
if(coltypes[i]==2) {
tlen <- length(inlist[[j]])
x <- matrix(0, nrow=tlen, ncol=ncols[i])
cnames <- vector("character", ncol(x))
for( ind1 in seq_len(ncols[i])) {
if(tlen != length(inlist[[j]]) ) {
warning("could not restore type information")
return(inlist)
}
x[, ind1] <- inlist[[j]]
cnames[ind1] <- lnames[j]
j <- j+1L
}
if( nrow(x) == nrow(args[[i]]) )
rn <- dimnames(args[[i]])[[1L]]
else rn <- NULL
if( any(cnames!="") )
dimnames(x) <- list(rn, cnames)
rlist[[i]] <- x
rnames[i] <- argnames[i]
}
else if(coltypes[i]==3) {
x <- vector("list", length=ncols[i])
cnames <- vector("character", ncols[i])
for( ind1 in seq_len(ncols[i])) {
x[[ind1]] <- inlist[[j]]
cnames[ind1] <- lnames[j]
j <- j+1L
}
if( any(cnames!="") )
names(x) <- cnames
rlist[[i]] <- x
rnames[i] <- argnames[i]
}
else {
rlist[[i]] <- inlist[[j]]
j <- j+1
rnames[i] <- argnames[i]
}
}
names(rlist) <- rnames
return(rlist)
}
de <- function(..., Modes=list(), Names=NULL)
{
sdata <- list(...)
snames <- as.character(substitute(list(...))[-1L])
if( is.null(sdata) ) {
if( is.null(Names) ) {
odata <- vector("list", length=max(1,length(Modes)))
}
else {
if( (length(Names) != length(Modes)) && length(Modes) ) {
warning("'modes' argument ignored")
Modes <- list()
}
odata <- vector("list", length=length(Names))
names(odata) <- Names
}
ncols <- rep.int(1, length(odata))
coltypes <- rep.int(1, length(odata))
}
else {
ncols <- de.ncols(sdata)
coltypes <- ncols[, 2L]
ncols <- ncols[, 1]
odata <- de.setup(sdata, snames, ncols)
if(length(Names))
if( length(Names) != length(odata) )
warning("'names' argument ignored")
else names(odata) <- Names
if(length(Modes))
if(length(Modes) != length(odata)) {
warning("'modes' argument ignored")
Modes <- list()
}
}
rdata <- dataentry(odata, as.list(Modes))
if(any(coltypes != 1L)) {
if(length(rdata) == sum(ncols))
rdata <- de.restore(rdata, ncols, coltypes, snames, sdata)
else warning("could not restore variables properly")
}
return(rdata)
}
data.entry <- function(..., Modes=NULL, Names=NULL)
{
tmp1 <- de(..., Modes=Modes, Names=Names)
j <- 1L
nn <- names(tmp1)
for(i in nn) {
assign(i, tmp1[[j]], envir=.GlobalEnv)
j <- j+1L
}
if(j == 1L) warning("did not assign() anything")
invisible(nn)
}
# File src/library/utils/R/debugger.R
# Part of the R package, http://www.R-project.org
#
# Copyright (C) 1995-2013 The R Core Team
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# A copy of the GNU General Public License is available at
# http://www.r-project.org/Licenses/
dump.frames <- function(dumpto = "last.dump", to.file = FALSE)
{
calls <- sys.calls()
last.dump <- sys.frames()
names(last.dump) <- limitedLabels(calls)
last.dump <- last.dump[-length(last.dump)] # remove this function
attr(last.dump, "error.message") <- geterrmessage()
class(last.dump) <- "dump.frames"
if(dumpto != "last.dump") assign(dumpto, last.dump)
if (to.file) # compress=TRUE is now the default.
save(list=dumpto, file = paste(dumpto, "rda", sep = "."))
else assign(dumpto, last.dump, envir=.GlobalEnv)
invisible()
}
debugger <- function(dump = last.dump)
{
debugger.look <- function(.selection)
{
## allow e.g. '...' to fail
for(.obj in ls(envir=dump[[.selection]], all.names=TRUE))
tryCatch(assign(.obj, get(.obj, envir=dump[[.selection]])),
error=function(e) {})
cat(gettext("Browsing in the environment with call:\n "),
calls[.selection], "\n", sep = "")
rm(.obj, .selection)
browser()
}
if (!inherits(dump, "dump.frames")) {
cat(gettextf("'dump' is not an object of class %s\n",
dQuote("dump.frames")))
return(invisible())
}
err.action <- getOption("error")
on.exit(options(error=err.action))
if (length(msg <- attr(dump, "error.message")))
cat(gettext("Message: "), msg)
n <- length(dump)
if (!n) {
cat(gettextf("'dump' is empty\n"))
return(invisible())
}
calls <- names(dump)
repeat {
cat(gettext("Available environments had calls:\n"))
cat(paste0(1L:n, ": ", calls), sep = "\n")
cat(gettext("\nEnter an environment number, or 0 to exit "))
repeat {
ind <- .Call(C_menu, as.character(calls))
if(ind <= n) break
}
if(ind == 0L) return(invisible())
debugger.look(ind)
}
}
## allow for the numbering by menu here
limitedLabels <- function(value, maxwidth = getOption("width") - 5L)
{
srcrefs <- sapply(value, function(v)
if (!is.null(srcref <- attr(v, "srcref"))) {
srcfile <- attr(srcref, "srcfile")
paste0(basename(srcfile$filename), "#", srcref[1L],": ")
} else "")
value <- paste0(srcrefs, as.character(value))
if(is.null(maxwidth) || maxwidth < 40L) maxwidth <- 40L
maxwidth <- min(maxwidth, 1000L)
strtrim(value, maxwidth)
}
recover <-
function()
{
if(.isMethodsDispatchOn()) {
## turn off tracing
tState <- tracingState(FALSE)
on.exit(tracingState(tState))
}
## find an interesting environment to start from
calls <- sys.calls()
from <- 0L
n <- length(calls)
if(identical(sys.function(n), recover))
## options(error=recover) produces a call to this function as an object
n <- n - 1L
## look for a call inserted by trace() (and don't show frames below)
## this level.
for(i in rev(seq_len(n))) {
calli <- calls[[i]]
fname <- calli[[1L]]
## deparse can use more than one line
if(!is.na(match(deparse(fname)[1L],
c("methods::.doTrace", ".doTrace")))) {
from <- i-1L
break
}
}
## if no trace, look for the first frame from the bottom that is not
## stop or recover
if(from == 0L)
for(i in rev(seq_len(n))) {
calli <- calls[[i]]
fname <- calli[[1L]]
if(!is.name(fname) ||
is.na(match(as.character(fname), c("recover", "stop", "Stop")))) {
from <- i
break
}
}
if(from > 0L) {
if(!interactive()) {
try(dump.frames())
cat(gettext("recover called non-interactively; frames dumped, use debugger() to view\n"))
return(NULL)
}
else if(identical(getOption("show.error.messages"), FALSE)) # from try(silent=TRUE)?
return(NULL)
calls <- limitedLabels(calls[1L:from])
repeat {
which <- menu(calls,
title="\nEnter a frame number, or 0 to exit ")
if(which)
eval(substitute(browser(skipCalls=skip),
list(skip=7-which)), envir = sys.frame(which))
else
break
}
}
else
cat(gettext("No suitable frames for recover()\n"))
}
# File src/library/utils/R/demo.R
# Part of the R package, http://www.R-project.org
#
# Copyright (C) 1995-2012 The R Core Team
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# A copy of the GNU General Public License is available at
# http://www.r-project.org/Licenses/
demo <-
function(topic, package = NULL, lib.loc = NULL,
character.only = FALSE, verbose = getOption("verbose"),
echo = TRUE, ask = getOption("demo.ask"),
encoding = getOption("encoding"))
{
paths <- find.package(package, lib.loc, verbose = verbose)
## Find the directories with a 'demo' subdirectory.
paths <- paths[file_test("-d", file.path(paths, "demo"))]
## Earlier versions remembered given packages with no 'demo'
## subdirectory, and warned about them.
if(missing(topic)) {
## List all possible demos.
## Build the demo db.
db <- matrix(character(), nrow = 0L, ncol = 4L)
for(path in paths) {
entries <- NULL
## Check for new-style 'Meta/demo.rds', then for '00Index'.
if(file_test("-f", INDEX <- file.path(path, "Meta", "demo.rds"))) {
entries <- readRDS(INDEX)
}
if(NROW(entries)) {
db <- rbind(db,
cbind(basename(path), dirname(path),
entries))
}
}
colnames(db) <- c("Package", "LibPath", "Item", "Title")
footer <- if(missing(package))
paste0("Use ",
sQuote(paste("demo(package =",
".packages(all.available = TRUE))")),
"\n",
"to list the demos in all *available* packages.")
else
NULL
y <- list(title = "Demos", header = NULL, results = db,
footer = footer)
class(y) <- "packageIQR"
return(y)
}
if(!character.only) {
topic <- substitute(topic)
if (is.call(topic) && (topic[[1L]] == "::" || topic[[1L]] == ":::")) {
package <- as.character(topic[[2L]])
topic <- as.character(topic[[3L]])
} else
topic <- as.character(topic)
}
available <- character()
paths <- file.path(paths, "demo")
for(p in paths) {
files <- basename(tools::list_files_with_type(p, "demo"))
## Files with base names sans extension matching topic
files <- files[topic == tools::file_path_sans_ext(files)]
if(length(files))
available <- c(available, file.path(p, files))
}
if(length(available) == 0L)
stop(gettextf("No demo found for topic %s", sQuote(topic)), domain = NA)
if(length(available) > 1L) {
available <- available[1L]
warning(gettextf("Demo for topic %s' found more than once,\nusing the one found in %s",
sQuote(topic), sQuote(dirname(available[1L]))), domain = NA)
}
## now figure out if the package has an encoding
pkgpath <- dirname(dirname(available))
if (file.exists(file <- file.path(pkgpath, "Meta", "package.rds"))) {
desc <- readRDS(file)$DESCRIPTION
if (length(desc) == 1L) {
enc <- as.list(desc)[["Encoding"]]
!if(!is.null(enc)) encoding <- enc
}
}
if(ask == "default")
ask <- echo && grDevices::dev.interactive(orNone = TRUE)
if(.Device != "null device") {
oldask <- grDevices::devAskNewPage(ask = ask)
on.exit(grDevices::devAskNewPage(oldask), add = TRUE)
}
op <- options(device.ask.default = ask)
on.exit(options(op), add = TRUE)
if (echo) {
cat("\n\n",
"\tdemo(", topic, ")\n",
"\t---- ", rep.int("~", nchar(topic, type = "w")), "\n",
sep = "")
if(ask && interactive())
readline("\nType to start : ")
}
source(available, echo = echo, max.deparse.length = Inf,
keep.source = TRUE, encoding = encoding)
}
# File src/library/utils/R/edit.R
# Part of the R package, http://www.R-project.org
#
# Copyright (C) 1995-2012 The R Core Team
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# A copy of the GNU General Public License is available at
# http://www.r-project.org/Licenses/
dataentry <- function (data, modes)
{
if(!is.list(data) || !length(data) || !all(sapply(data, is.vector)))
stop("invalid 'data' argument")
if(!is.list(modes) ||
(length(modes) && !all(sapply(modes, is.character))))
stop("invalid 'modes' argument")
.External2(C_dataentry, data, modes)
}
View <- function (x, title)
{
## could multi-line deparse with maliciously-designed inputs
if(missing(title)) title <- paste("Data:", deparse(substitute(x))[1])
as.num.or.char <- function(x)
{
if (is.character(x)) x
else if (is.numeric(x)) {storage.mode(x) <- "double"; x}
else as.character(x)
}
x0 <- as.data.frame(x)
x <- lapply(x0, as.num.or.char)
rn <- row.names(x0)
if(any(rn != seq_along(rn))) x <- c(list(row.names = rn), x)
if(!is.list(x) || !length(x) || !all(sapply(x, is.atomic)) ||
!max(sapply(x, length)))
stop("invalid 'x' argument")
invisible(.External2(C_dataviewer, x, title))
}
edit <- function(name,...)UseMethod("edit")
edit.default <-
function (name = NULL, file = "", title = NULL,
editor = getOption("editor"), ...)
{
if (is.null(title)) title <- deparse(substitute(name))
if (is.function(editor)) invisible(editor(name, file, title))
else .External2(C_edit, name, file, title, editor)
}
edit.data.frame <-
function(name, factor.mode = c("character", "numeric"),
edit.row.names = any(row.names(name) != 1L:nrow(name)), ...)
{
if (.Platform$OS.type == "unix" && .Platform$GUI != "AQUA")
if(.Platform$GUI == "unknown" || Sys.getenv("DISPLAY") == "" )
return (edit.default(name, ...))
is.vector.unclass <- function(x) is.vector(unclass(x))
if (length(name) && !all(sapply(name, is.vector.unclass)
| sapply(name, is.factor)))
stop("can only handle vector and factor elements")
factor.mode <- match.arg(factor.mode)
as.num.or.char <- function(x)
{
if (is.numeric(x)) x
else if (is.factor(x) && factor.mode == "numeric") as.numeric(x)
else as.character(x)
}
attrlist <- lapply(name, attributes)
datalist <- lapply(name, as.num.or.char)
factors <- if (length(name))
which(sapply(name, is.factor))
else
numeric()
logicals <- if (length(name))
which(sapply(name, is.logical))
else
numeric()
if(length(name)) {
has_class <-
sapply(name, function(x) (is.object(x) || isS4(x)) && !is.factor(x))
if(any(has_class))
warning(sprintf(ngettext(sum(has_class),
"class discarded from column %s",
"classes discarded from columns %s"),
paste(sQuote(names(name)[has_class]),
collapse=", ")),
domain = NA, call. = FALSE, immediate. = TRUE)
}
modes <- lapply(datalist, mode)
if (edit.row.names) {
datalist <- c(list(row.names = row.names(name)), datalist)
modes <- c(list(row.names = "character"), modes)
}
rn <- attr(name, "row.names")
out <- .External2(C_dataentry, datalist, modes)
if(length(out) == 0L) {
## e.g. started with 0-col data frame or NULL, and created no cols
return (name)
}
lengths <- sapply(out, length)
maxlength <- max(lengths)
if (edit.row.names) rn <- out[[1L]]
for (i in which(lengths != maxlength))
out[[i]] <- c(out[[i]], rep.int(NA, maxlength - lengths[i]))
if (edit.row.names) {
out <- out[-1L]
if((ln <- length(rn)) < maxlength)
rn <- c(rn, paste0("row", (ln+1):maxlength))
} else if(length(rn) != maxlength) rn <- seq_len(maxlength)
for (i in factors) {
if(factor.mode != mode(out[[i]])) next # user might have switched mode
a <- attrlist[[i]]
if (factor.mode == "numeric") {
o <- as.integer(out[[i]])
ok <- is.na(o) | (o > 0 & o <= length(a$levels))
if (any(!ok)) {
warning(gettextf("invalid factor levels in '%s'", names(out)[i]),
domain = NA)
o[!ok] <- NA
}
attributes(o) <- a
} else {
o <- out[[i]]
if (any(new <- is.na(match(o, c(a$levels, NA_integer_))))) {
new <- unique(o[new])
warning(gettextf("added factor levels in '%s'", names(out)[i]),
domain = NA)
o <- factor(o, levels=c(a$levels, new),
ordered = is.ordered(o))
} else {
o <- match(o, a$levels)
attributes(o) <- a
}
}
out[[i]] <- o
}
for (i in logicals) out[[i]] <- as.logical(out[[i]])
attr(out, "row.names") <- rn
attr(out, "class") <- "data.frame"
if (edit.row.names) {
if(anyDuplicated(rn)) {
warning("edited row names contain duplicates and will be ignored")
attr(out, "row.names") <- seq_len(maxlength)
}
}
out
}
edit.matrix <-
function(name, edit.row.names = !is.null(dn[[1L]]), ...)
{
if (.Platform$OS.type == "unix" && .Platform$GUI != "AQUA")
if(.Platform$GUI == "unknown" || Sys.getenv("DISPLAY")=="" )
return (edit.default(name, ...))
if(!is.matrix(name) ||
! mode(name) %in% c("numeric", "character", "logical") ||
any(dim(name) < 1))
stop("invalid input matrix")
## logical matrices will be edited as character
logicals <- is.logical(name)
if (logicals) mode(name) <- "character"
if(is.object(name) || isS4(name))
warning("class of 'name' will be discarded",
call. = FALSE, immediate. = TRUE)
dn <- dimnames(name)
datalist <- split(name, col(name))
if(!is.null(dn[[2L]])) names(datalist) <- dn[[2L]]
else names(datalist) <- paste0("col", 1L:ncol(name))
modes <- as.list(rep.int(mode(name), ncol(name)))
## guard aginst user error (PR#10500)
if(edit.row.names && is.null(dn[[1L]]))
stop("cannot edit NULL row names")
if (edit.row.names) {
datalist <- c(list(row.names = dn[[1L]]), datalist)
modes <- c(list(row.names = "character"), modes)
}
out <- .External2(C_dataentry, datalist, modes)
lengths <- sapply(out, length)
maxlength <- max(lengths)
if (edit.row.names) rn <- out[[1L]]
for (i in which(lengths != maxlength))
out[[i]] <- c(out[[i]], rep.int(NA, maxlength - lengths[i]))
if (edit.row.names) {
out <- out[-1L]
if((ln <- length(rn)) < maxlength)
rn <- c(rn, paste0("row", (ln+1L):maxlength))
}
out <- do.call("cbind", out)
if (edit.row.names)
rownames(out) <- rn
else if(!is.null(dn[[1L]]) && length(dn[[1L]]) == maxlength)
rownames(out) <- dn[[1L]]
if (logicals) mode(out) <- "logical"
out
}
file.edit <-
function (..., title = file, editor=getOption("editor"), fileEncoding="")
{
file <- path.expand(c(...))
title <- rep(as.character(title), len=length(file))
if(nzchar(fileEncoding) && fileEncoding != "native.enc") {
tfile <- file
for(i in seq_along(file)) {
## We won't know when that is done with
## so leave around for the R session.
tfile <- tempfile()
con <- file(file[i], encoding = fileEncoding)
writeLines(readLines(con), tfile)
close(con)
file[i] <- tfile
}
}
if (is.function(editor)) invisible(editor(file = file, title = title))
else invisible(.External2(C_fileedit, file, title, editor))
}
vi <- function(name = NULL, file = "")
edit.default(name, file, editor = "vi")
emacs <- function(name = NULL, file = "")
edit.default(name, file, editor = "emacs")
xemacs <- function(name = NULL, file = "")
edit.default(name, file, editor = "xemacs")
xedit <- function(name = NULL, file = "")
edit.default(name, file, editor = "xedit")
pico <- function(name = NULL, file = "")
edit.default(name, file, editor = "pico")
# File src/library/utils/R/example.R
# Part of the R package, http://www.R-project.org
#
# Copyright (C) 1995-2013 The R Core Team
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# A copy of the GNU General Public License is available at
# http://www.r-project.org/Licenses/
## Examples as from 2.11.0 will always be new-style and hence in UTF-8
example <-
function(topic, package = NULL, lib.loc = NULL,
character.only = FALSE, give.lines = FALSE, local = FALSE,
echo = TRUE, verbose = getOption("verbose"), setRNG = FALSE,
ask = getOption("example.ask"),
prompt.prefix = abbreviate(topic, 6),
run.dontrun = FALSE)
{
if (!character.only) {
topic <- substitute(topic)
if(!is.character(topic)) topic <- deparse(topic)[1L]
}
pkgpaths <- find.package(package, lib.loc, verbose = verbose)
## will only return at most one path
file <- index.search(topic, pkgpaths, TRUE)
if(!length(file)) {
warning(gettextf("no help found for %s", sQuote(topic)), domain = NA)
return(invisible())
}
packagePath <- dirname(dirname(file))
pkgname <- basename(packagePath)
lib <- dirname(packagePath)
tf <- tempfile("Rex")
tools::Rd2ex(.getHelpFile(file), tf, commentDontrun = !run.dontrun)
if (!file.exists(tf)) {
if(give.lines) return(character())
warning(gettextf("%s has a help file but no examples", sQuote(topic)),
domain = NA)
return(invisible())
}
on.exit(unlink(tf))
if(give.lines)
return(readLines(tf))
if(pkgname != "base")
library(pkgname, lib.loc = lib, character.only = TRUE)
if(!is.logical(setRNG) || setRNG) {
## save current RNG state:
if((exists(".Random.seed", envir = .GlobalEnv))) {
oldSeed <- get(".Random.seed", envir = .GlobalEnv)
on.exit(assign(".Random.seed", oldSeed, envir = .GlobalEnv),
add = TRUE)
} else {
oldRNG <- RNGkind()
on.exit(RNGkind(oldRNG[1L], oldRNG[2L]), add = TRUE)
}
## set RNG
if(is.logical(setRNG)) { # i.e. == TRUE: use the same as R CMD check
## see share/R/examples-header.R
RNGkind("default", "default")
set.seed(1)
} else eval(setRNG)
}
zz <- readLines(tf, n = 1L)
skips <- 0L
if (echo) {
## skip over header
zcon <- file(tf, open="rt")
while(length(zz) && !length(grep("^### \\*\\*", zz))) {
skips <- skips + 1L
zz <- readLines(zcon, n=1L)
}
close(zcon)
}
if(ask == "default")
ask <- echo && grDevices::dev.interactive(orNone = TRUE)
if(ask) {
if(.Device != "null device") {
oldask <- grDevices::devAskNewPage(ask = TRUE)
if(!oldask) on.exit(grDevices::devAskNewPage(oldask), add = TRUE)
}
##
## This ensures that any device opened by the examples will
## have ask = TRUE set, but it does not return the device to
## the expected 'ask' state if it is left as the current device.
##
op <- options(device.ask.default = TRUE)
on.exit(options(op), add = TRUE)
}
source(tf, local, echo = echo,
prompt.echo = paste0(prompt.prefix, getOption("prompt")),
continue.echo = paste0(prompt.prefix, getOption("continue")),
verbose = verbose, max.deparse.length = Inf, encoding = "UTF-8",
skip.echo = skips, keep.source=TRUE)
}
# File src/library/utils/R/filetest.R
# Part of the R package, http://www.R-project.org
#
# Copyright (C) 1995-2012 The R Core Team
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# A copy of the GNU General Public License is available at
# http://www.r-project.org/Licenses/
### ** file_test
file_test <-
function(op, x, y)
{
## Provide shell-style '-f', '-d', '-x', '-nt' and '-ot' tests.
## Note that file.exists() only tests existence ('test -e' on some
## systems), and that our '-f' tests for existence and not being a
## directory (the GNU variant tests for being a regular file).
## Note: vectorized in x and y.
switch(op,
"-f" = !is.na(isdir <- file.info(x)$isdir) & !isdir,
"-d" = !is.na(isdir <- file.info(x)$isdir) & isdir,
"-nt" = (!is.na(mt.x <- file.info(x)$mtime)
& !is.na(mt.y <- file.info(y)$mtime)
& (mt.x > mt.y)),
"-ot" = (!is.na(mt.x <- file.info(x)$mtime)
& !is.na(mt.y <- file.info(y)$mtime)
& (mt.x < mt.y)),
"-x" = (file.access(x, 1L) == 0L),
stop(gettextf("test '%s' is not available", op),
domain = NA))
}
# File src/library/utils/R/fineLineNum.R
# Part of the R package, http://www.R-project.org
#
# Copyright (C) 2009-2014 Duncan Murdoch and the R Core Team
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# A copy of the GNU General Public License is available at
# http://www.r-project.org/Licenses/
.normalizePath <- function(path, wd) {
if (!missing(wd) && !is.null(wd)) {
oldwd <- setwd(wd)
on.exit(setwd(oldwd))
}
suppressWarnings(normalizePath(path))
}
fnLineNum <- function(f, srcfile, line, nameonly=TRUE) {
stopifnot(length(line) == 1)
targetfilename <- .normalizePath(srcfile$filename)
fnsrc <- attr(f, "srcref")
if (!is.null(fnsrc))
fnsrc <- attr(fnsrc, "srcfile")
else
fnsrc <- attr(body(f), "srcfile")
if (is.null(fnsrc)) return(NULL)
if (missing(srcfile)) {
srcfile <- fnsrc
}
isBrace <- function(expr)
typeof(expr) == "symbol" && identical(as.character(expr), "{")
lineNumInExpr <- function(expr, haveSrcrefs = FALSE) {
if (typeof(expr) == "language") {
srcrefs <- attr(expr, "srcref")
for (i in seq_along(expr)) {
srcref <- srcrefs[[i]]
# Check for non-matching range
if (!is.null(srcref) && (srcref[1] > line || line > srcref[3])) next
# We're in range. See if there's a finer division
finer <- lineNumInExpr(expr[[i]], haveSrcrefs || !is.null(srcrefs))
if (!is.null(finer)) {
return(c(i, finer))
}
# Do we have a srcref? It must point to this expression.
# But do avoid matching the opening brace in a block: match the whole block
# instead.
havebrace <- isBrace(expr[[i]])
if (!is.null(srcref)
&& (!haveSrcrefs || !havebrace)) {
return(i)
}
}
}
return(NULL)
}
perfectMatch <- identical(.normalizePath(fnsrc$filename, fnsrc$wd), targetfilename)
if (perfectMatch ||
(nameonly && !is.null(fnsrc$filename) && basename(fnsrc$filename) == basename(targetfilename))) {
if (!is.na(srcfile$timestamp) && !is.null(fnsrc$timestamp) && fnsrc$timestamp != srcfile$timestamp)
timediff <- fnsrc$timestamp - srcfile$timestamp
else
timediff <- 0
at <- lineNumInExpr(body(f))
if (!is.null(at))
return(list(at=at, filename=.normalizePath(fnsrc$filename, fnsrc$wd), line=line,
timediff=timediff))
}
return(NULL)
}
findLineNum <- function(srcfile, line, nameonly=TRUE, envir=parent.frame(),
lastenv) {
count <- 0
result <- list()
if (!inherits(srcfile, "srcfile")) {
if (missing(line)) {
line <- as.numeric(sub(".*#", "", srcfile))
if (is.na(line)) stop("Line number missing")
srcfile <- sub("#[^#]*", "", srcfile)
}
srcfile <- srcfile(srcfile)
}
if (missing(lastenv)) {
if (missing(envir)) lastenv <- globalenv()
else lastenv <- emptyenv()
}
if (!is.environment(envir))
envir <- environment(envir)
fns <- character()
envirs <- list()
e <- envir
repeat {
fns <- c(fns, lsf.str(envir=e, all=TRUE))
oldlen <- length(envirs)
length(envirs) <- length(fns)
if (length(envirs) > oldlen)
for (i in seq.int(oldlen+1, length(envirs))) envirs[[i]] <- e
if (identical(e, lastenv) || identical(e, emptyenv())) break
e <- parent.env(e)
}
for (i in seq_along(fns)) {
functionName <- fns[i]
fn <- get(functionName, envir=envirs[[i]])
loc <- fnLineNum(fn, srcfile=srcfile, line=line,
nameonly=nameonly)
if (!is.null(loc)) {
count <- count + 1
result[[count]] <- c(list(name=functionName, env=envirs[[i]]), loc)
}
gen <- tryCatch(methods::isGeneric(functionName, envirs[[i]], fdef=fn),
error = identity)
if (isTRUE(gen)) {
e1 <- environment(fn)$.AllMTable
if (!is.null(e1)) {
sigs <- ls(e1)
for (j in seq_along(sigs)) {
sig <- sigs[j]
fn <- get(sig, e1)
if (typeof(fn) != "closure") next
loc <- fnLineNum(fn, srcfile=srcfile, line=line,
nameonly=nameonly)
if (is.null(loc)
&& length(body(fn)) > 1
&& length(body(fn)[[2]]) > 2
&& typeof(body(fn)[[c(2,3)]]) == "closure") {
# desperate try: look for
# .local <- original defn
fn2 <- body(fn)[[c(2,3)]]
loc <- fnLineNum(fn2, srcfile=srcfile, line=line,
nameonly=nameonly)
# FIXME: can trace() set a breakpoint
# within a function like this?
if (!is.null(loc)) loc$at <- c(2,3)
}
if (!is.null(loc)) {
count <- count + 1
result[[count]] <- c(list(name=functionName, env=envirs[[i]],
signature=strsplit(sig, "#")[[1]]), loc)
}
}
}
}
}
return(structure(result, class="findLineNumResult"))
}
print.findLineNumResult <- function(x, steps=TRUE, ...) {
if (!length(x)) cat("No source refs found.\n")
filename <- NULL
line <- 0
for (i in seq_along(x)) {
if (!identical(filename, x[[i]]$filename) ||
!identical(line, x[[i]]$line)) {
filename <- x[[i]]$filename
line <- x[[i]]$line
cat(filename, "#", line, ":\n", sep = "")
}
cat(" ", x[[i]]$name, if (steps) paste(" step ", paste(x[[i]]$at, collapse=",")) else "", sep = "")
if (!is.null(x[[i]]$signature))
cat(" signature ", paste(x[[i]]$signature, collapse=","), sep = "")
cat(" in ", format(x[[i]]$env), "\n", sep = "")
}
}
setBreakpoint <- function(srcfile, line, nameonly=TRUE, envir=parent.frame(), lastenv,
verbose = TRUE, tracer, print=FALSE, clear=FALSE,
...) {
if (missing(lastenv)) {
if (missing(envir)) lastenv <- globalenv()
else lastenv <- emptyenv()
}
locations <- findLineNum(srcfile, line, nameonly, envir, lastenv)
if (verbose) print(locations, steps=!clear)
breakpoint <- missing(tracer)
while (length(locations)) {
what <- locations[[1]]$name
where <- locations[[1]]$env
at <- list(locations[[1]]$at)
signature <- locations[[1]]$signature
if (breakpoint) {
filename <- basename(locations[[1]]$filename)
linenum <- locations[[1]]$line
tracer <- bquote({cat(paste0(.(filename), "#", .(linenum), "\n"))
browser(skipCalls=4L)})
}
locations[[1]] <- NULL
i <- 1
while (i <= length(locations)) {
if (what == locations[[i]]$name &&
identical(where, locations[[i]]$env) &&
identical(signature, locations[[i]]$signature)) {
at <- c(at, list(locations[[i]]))
locations[[i]] <- NULL
} else
i <- i+1
}
if (clear) {
if (is.null(signature))
untrace(what, where=where)
else
untrace(what, signature=signature, where=where)
} else if (is.null(signature))
trace(what, tracer, at=at, where=where, print=print, ...)
else
trace(what, signature=signature, tracer, at=at, where=where, ...)
}
}
# File src/library/utils/R/fix.R
# Part of the R package, http://www.R-project.org
#
# Copyright (C) 1995-2012 The R Core Team
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# A copy of the GNU General Public License is available at
# http://www.r-project.org/Licenses/
fix <- function (x, ...)
{
subx <- substitute(x)
if (is.name(subx))
subx <- deparse(subx)
if (!is.character(subx) || length(subx) != 1L)
stop("'fix' requires a name")
parent <- parent.frame()
if (exists(subx, envir=parent, inherits = TRUE))
x <- edit(get(subx, envir=parent), title = subx, ...)
else {
x <- edit(function(){}, title = subx, ...)
environment(x) <- .GlobalEnv
}
assign(subx, x, envir = .GlobalEnv)
}
# File src/library/utils/R/format.R
# Part of the R package, http://www.R-project.org
#
# Copyright (C) 1995-2012 The R Core Team
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# A copy of the GNU General Public License is available at
# http://www.r-project.org/Licenses/
formatUL <-
function(x, label = "*", offset = 0,
width = 0.9 * getOption("width"))
{
if(!length(x))
return(character())
.format_rl_table(label, x, offset, width)
}
formatOL <-
function(x, type = "arabic", offset = 0, start = 1,
width = 0.9 * getOption("width"))
{
if(!length(x))
return(character())
type_tokens <- c("1", "A", "a", "I", "i")
type_full_names <- c("arabic", "Alph", "alph", "Roman", "roman")
type <- match.arg(type, c(type_tokens, type_full_names))
if(nchar(type, "b") > 1L)
type <- type_tokens[match(type, type_full_names)]
len <- length(x)
labels <- seq.int(start[1L], length.out = len)
upper <- labels[len]
if(type %in% c("A", "a")) {
if(upper > 26L)
stop(gettextf("too many list items (at most up to %d)", 26L),
domain = NA)
labels <- if(type == "A")
LETTERS[labels]
else
letters[labels]
}
else if(type %in% c("I", "i")) {
if(upper > 3899L)
stop(gettextf("too many list items (at most up to %d)", 3899L),
domain = NA)
labels <- as.character(as.roman(labels))
if(type == "i")
labels <- tolower(labels)
}
.format_rl_table(sprintf("%s.", labels), x, offset, width)
}
.format_rl_table <-
function(labels, x, offset = 0, width = 0.9 * getOption("width"),
sep = " ")
{
## Format a 2-column table with right-justified item labels and
## left-justified text. Somewhat tricky because strwrap() eats up
## leading whitespace ...
.make_empty_string <- function(n) {
paste(rep.int(" ", n), collapse = "")
}
labels <- format(labels, justify = "right")
len <- length(x)
delta <- nchar(labels[1L], "width") + offset
x <- strwrap(x, width = width - delta - nchar(sep, "width"),
simplify = FALSE)
nlines <- cumsum(sapply(x, length))
prefix <- rep.int(.make_empty_string(delta), nlines[len])
prefix[1L + c(0L, nlines[-len])] <-
paste0(.make_empty_string(offset), labels)
paste(prefix, unlist(x), sep = sep)
}
# File src/library/utils/R/frametools.R
# Part of the R package, http://www.R-project.org
#
# Copyright (C) 1995-2012 The R Core Team
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# A copy of the GNU General Public License is available at
# http://www.r-project.org/Licenses/
stack <- function(x, ...) UseMethod("stack")
stack.data.frame <- function(x, select, ...)
{
if (!missing(select)) {
nl <- as.list(1L:ncol(x))
names(nl) <- names(x)
vars <- eval(substitute(select),nl, parent.frame())
x <- x[, vars, drop=FALSE]
}
keep <- unlist(lapply(x, is.vector))
if(!sum(keep)) stop("no vector columns were selected")
if(!all(keep))
warning("non-vector columns will be ignored")
x <- x[, keep, drop = FALSE]
## need to avoid promotion to factors
data.frame(values = unlist(unname(x)),
ind = factor(rep.int(names(x), lapply(x, length))),
stringsAsFactors = FALSE)
}
stack.default <- function(x, ...)
{
x <- as.list(x)
keep <- unlist(lapply(x, is.vector))
if(!sum(keep)) stop("at least one vector element is required")
if(!all(keep)) warning("non-vector elements will be ignored")
x <- x[keep]
data.frame(values = unlist(unname(x)),
ind = factor(rep.int(names(x), lapply(x, length))),
stringsAsFactors = FALSE)
}
unstack <- function(x, ...) UseMethod("unstack")
unstack.data.frame <- function(x, form, ...)
{
form <- if(missing(form)) stats::formula(x) else stats::as.formula(form)
if (length(form) < 3)
stop("'form' must be a two-sided formula")
res <- c(tapply(eval(form[[2L]], x), eval(form[[3L]], x), as.vector))
if (length(res) >= 2L && any(diff(unlist(lapply(res, length))) != 0L))
return(res)
data.frame(res, stringsAsFactors = FALSE)
}
unstack.default <- function(x, form, ...)
{
x <- as.list(x)
form <- stats::as.formula(form)
if ((length(form) < 3) || (length(all.vars(form))>2))
stop("'form' must be a two-sided formula with one term on each side")
res <- c(tapply(eval(form[[2L]], x), eval(form[[3L]], x), as.vector))
if (length(res) >= 2L && any(diff(unlist(lapply(res, length))) != 0L))
return(res)
data.frame(res, stringsAsFactors = FALSE)
}
# File src/library/utils/R/glob2rx.R
# Part of the R package, http://www.R-project.org
#
# Copyright (C) 1995-2012 The R Core Team
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# A copy of the GNU General Public License is available at
# http://www.r-project.org/Licenses/
glob2rx <- function(pattern, trim.head = FALSE, trim.tail = TRUE)
{
## Purpose: Change 'ls' aka 'wildcard' aka 'globbing' _pattern_ to
## Regular Expression (as in grep, perl, emacs, ...)
## -------------------------------------------------------------------------
## Author: Martin Maechler ETH Zurich, ~ 1991
## New version using [g]sub() : 2004
p <- gsub("\\.","\\\\.", paste0("^", pattern, "$"))
p <- gsub("\\?", ".", gsub("\\*", ".*", p))
## 'Escaping hell' : at least for '(', '[' and '{'
p <- gsub("([^\\])\\(", "\\1\\\\(", p)
p <- gsub("([^\\])\\[", "\\1\\\\[", p)
p <- gsub("([^\\])\\{", "\\1\\\\{", p)
## these are trimming ".*$" and "^.*" - in most cases only for aesthetics
if(trim.tail) p <- sub("\\.\\*\\$$", "", p)
if(trim.head) p <- sub("\\^\\.\\*", "", p)
p
}
# File src/library/utils/R/head.R
# Part of the R package, http://www.R-project.org
#
# Copyright (C) 1995-2012 The R Core Team
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# A copy of the GNU General Public License is available at
# http://www.r-project.org/Licenses/
### placed in the public domain 2002
### Patrick Burns patrick@burns-stat.com
###
### Adapted for negative arguments by Vincent Goulet
### , 2006
head <- function(x, ...) UseMethod("head")
head.default <- function(x, n = 6L, ...)
{
stopifnot(length(n) == 1L)
n <- if (n < 0L) max(length(x) + n, 0L) else min(n, length(x))
x[seq_len(n)]
}
## head.matrix and tail.matrix are now exported (to be used for other classes)
head.data.frame <- head.matrix <- function(x, n = 6L, ...)
{
stopifnot(length(n) == 1L)
n <- if (n < 0L) max(nrow(x) + n, 0L) else min(n, nrow(x))
x[seq_len(n), , drop=FALSE]
}
head.table <- function(x, n = 6L, ...) {
(if(length(dim(x)) == 2L) head.matrix else head.default)(x, n=n)
}
head.ftable <- function(x, n = 6L, ...) {
r <- format(x)
dimnames(r) <- list(rep.int("", nrow(r)), rep.int("", ncol(r)))
noquote(head.matrix(r, n = n + nrow(r) - nrow(x), ...))
}
head.function <- function(x, n = 6L, ...)
{
lines <- as.matrix(deparse(x))
dimnames(lines) <- list(seq_along(lines),"")
noquote(head(lines, n=n))
}
tail <- function(x, ...) UseMethod("tail")
tail.default <- function(x, n = 6L, ...)
{
stopifnot(length(n) == 1L)
xlen <- length(x)
n <- if (n < 0L) max(xlen + n, 0L) else min(n, xlen)
x[seq.int(to = xlen, length.out = n)]
}
tail.data.frame <- function(x, n = 6L, ...)
{
stopifnot(length(n) == 1L)
nrx <- nrow(x)
n <- if (n < 0L) max(nrx + n, 0L) else min(n, nrx)
x[seq.int(to = nrx, length.out = n), , drop = FALSE]
}
tail.matrix <- function(x, n = 6L, addrownums = TRUE, ...)
{
stopifnot(length(n) == 1L)
nrx <- nrow(x)
n <- if (n < 0L) max(nrx + n, 0L) else min(n, nrx)
sel <- seq.int(to = nrx, length.out = n)
ans <- x[sel, , drop = FALSE]
if (addrownums && is.null(rownames(x)))
rownames(ans) <- paste0("[", sel, ",]")
ans
}
tail.table <- function(x, n = 6L, addrownums = TRUE, ...) {
(if(length(dim(x)) == 2L) tail.matrix else tail.default)(x, n=n,
addrownums = addrownums, ...)
}
tail.ftable <- function(x, n = 6L, addrownums = FALSE, ...) {
r <- format(x)
dimnames(r) <- list(if(!addrownums) rep.int("", nrow(r)),
rep.int("", ncol(r)))
noquote(tail.matrix(r, n = n, addrownums = addrownums, ...))
}
tail.function <- function(x, n = 6L, ...)
{
lines <- as.matrix(deparse(x))
dimnames(lines) <- list(seq_along(lines),"")
noquote(tail(lines, n=n))
}
# File src/library/utils/R/help.R
# Part of the R package, http://www.R-project.org
#
# Copyright (C) 1995-2012 The R Core Team
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# A copy of the GNU General Public License is available at
# http://www.r-project.org/Licenses/
help <-
function(topic, package = NULL, lib.loc = NULL,
verbose = getOption("verbose"),
try.all.packages = getOption("help.try.all.packages"),
help_type = getOption("help_type"))
{
types <- c("text", "html", "pdf")
if(!missing(package)) # Don't check for NULL; may be nonstandard eval
if(is.name(y <- substitute(package)))
package <- as.character(y)
## If no topic was given ...
if(missing(topic)) {
if(!is.null(package)) { # "Help" on package.
help_type <- if(!length(help_type)) "text"
else match.arg(tolower(help_type), types)
## Carter Butts and others misuse 'help(package=)' in startup
if (interactive() && help_type == "html") {
if (tools:::httpdPort == 0L) tools::startDynamicHelp()
if (tools:::httpdPort <= 0L) # fallback to text help
return(library(help = package, lib.loc = lib.loc,
character.only = TRUE))
browser <- if (.Platform$GUI == "AQUA") {
get("aqua.browser", envir = as.environment("tools:RGUI"))
} else getOption("browser")
browseURL(paste0("http://127.0.0.1:", tools:::httpdPort,
"/library/", package, "/html/00Index.html"),
browser)
return(invisible())
} else return(library(help = package, lib.loc = lib.loc,
character.only = TRUE))
}
if(!is.null(lib.loc)) # text "Help" on library.
return(library(lib.loc = lib.loc))
## ultimate default is to give help on help()
topic <- "help"; package <- "utils"; lib.loc <- .Library
}
ischar <- tryCatch(is.character(topic) && length(topic) == 1L,
error = identity)
if(inherits(ischar, "error")) ischar <- FALSE
## if this was not a length-one character vector, try for the name.
if(!ischar) {
## the reserved words that could be parsed as a help arg:
reserved <-
c("TRUE", "FALSE", "NULL", "Inf", "NaN", "NA", "NA_integer_",
"NA_real_", "NA_complex_", "NA_character_")
stopic <- deparse(substitute(topic))
if(!is.name(substitute(topic)) && ! stopic %in% reserved)
stop("'topic' should be a name, length-one character vector or reserved word")
topic <- stopic
}
help_type <- if(!length(help_type)) "text"
else match.arg(tolower(help_type), types)
paths <- index.search(topic,
find.package(if (is.null(package)) loadedNamespaces() else package,
lib.loc, verbose = verbose))
tried_all_packages <- FALSE
if(!length(paths)
&& is.logical(try.all.packages) && !is.na(try.all.packages)
&& try.all.packages && is.null(package) && is.null(lib.loc)) {
## Try all the remaining packages.
for(lib in .libPaths()) {
packages <- .packages(TRUE, lib)
packages <- packages[is.na(match(packages, .packages()))]
paths <- c(paths, index.search(topic, file.path(lib, packages)))
}
paths <- paths[paths != ""]
tried_all_packages <- TRUE
}
paths <- unique(paths)
attributes(paths) <-
list(call = match.call(), topic = topic,
tried_all_packages = tried_all_packages, type = help_type)
class(paths) <- "help_files_with_topic"
paths
}
print.help_files_with_topic <- function(x, ...)
{
browser <- getOption("browser")
topic <- attr(x, "topic")
type <- attr(x, "type")
if (.Platform$GUI == "AQUA" && type == "html")
browser <- get("aqua.browser", envir = as.environment("tools:RGUI"))
paths <- as.character(x)
if(!length(paths)) {
writeLines(c(gettextf("No documentation for %s in specified packages and libraries:",
sQuote(topic)),
gettextf("you could try %s",
sQuote(paste0("??", topic)))))
return(invisible(x))
}
if(type == "html")
if (tools:::httpdPort == 0L) tools::startDynamicHelp()
if(attr(x, "tried_all_packages")) {
paths <- unique(dirname(dirname(paths)))
msg <- gettextf("Help for topic %s is not in any loaded package but can be found in the following packages:",
sQuote(topic))
if (type == "html" && tools:::httpdPort > 0L) {
path <- file.path(tempdir(), ".R/doc/html")
dir.create(path, recursive = TRUE, showWarnings = FALSE)
out <- paste0('\n',
'R: help\n',
'\n',
'\n',
'\n\n\n')
out <- c(out, '
\n\n\n")
writeLines(out, file.path(path, "all.available.html"))
browseURL(paste0("http://127.0.0.1:", tools:::httpdPort,
"/doc/html/all.available.html"), browser)
} else {
writeLines(c(strwrap(msg), "",
paste(" ",
formatDL(c(gettext("Package"), basename(paths)),
c(gettext("Library"), dirname(paths)),
indent = 22))))
}
} else {
if(length(paths) > 1L) {
if (type == "html" && tools:::httpdPort > 0L) { # Redo the search if dynamic help is running
browseURL(paste0("http://127.0.0.1:", tools:::httpdPort,
"/library/NULL/help/", topic), browser)
return(invisible(x))
}
file <- paths[1L]
p <- paths
msg <- gettextf("Help on topic %s was found in the following packages:",
sQuote(topic))
paths <- dirname(dirname(paths))
txt <- formatDL(c("Package", basename(paths)),
c("Library", dirname(paths)),
indent = 22L)
writeLines(c(strwrap(msg), "", paste(" ", txt), ""))
if(interactive()) {
fp <- file.path(paths, "Meta", "Rd.rds")
tp <- basename(p)
titles <- tp
if(type == "html" || type == "latex")
tp <- tools::file_path_sans_ext(tp)
for (i in seq_along(fp)) {
tmp <- try(readRDS(fp[i]))
titles[i] <- if(inherits(tmp, "try-error"))
"unknown title" else
tmp[tools::file_path_sans_ext(tmp$File) == tp[i], "Title"]
}
txt <- paste0(titles, " {", basename(paths), "}")
## the default on menu() is currtently graphics = FALSE
res <- menu(txt, title = gettext("Choose one"),
graphics = getOption("menu.graphics"))
if(res > 0) file <- p[res]
} else {
writeLines(gettext("\nUsing the first match ..."))
}
}
else
file <- paths
if(type == "html") {
if (tools:::httpdPort > 0L) {
path <- dirname(file)
dirpath <- dirname(path)
pkgname <- basename(dirpath)
browseURL(paste0("http://127.0.0.1:", tools:::httpdPort,
"/library/", pkgname, "/html/", basename(file),
".html"), browser)
} else {
warning("HTML help is unavailable", call. = FALSE)
att <- attributes(x)
xx <- sub("/html/([^/]*)\\.html$", "/help/\\1", x)
attributes(xx) <- att
attr(xx, "type") <- "text"
print(xx)
}
} else if(type == "text") {
pkgname <- basename(dirname(dirname(file)))
temp <- tools::Rd2txt(.getHelpFile(file), out = tempfile("Rtxt"),
package = pkgname)
file.show(temp, title = gettextf("R Help on %s", sQuote(topic)),
delete.file = TRUE)
}
else if(type %in% "pdf") {
path <- dirname(file)
dirpath <- dirname(path)
texinputs <- file.path(dirpath, "help", "figures")
tf2 <- tempfile("Rlatex")
tools::Rd2latex(.getHelpFile(file), out = tf2)
.show_help_on_topic_offline(tf2, topic, type, texinputs)
unlink(tf2)
}
}
invisible(x)
}
.show_help_on_topic_offline <-
function(file, topic, type = "pdf", texinputs = NULL)
{
encoding <-""
lines <- readLines(file)
encpatt <- "^\\\\inputencoding\\{(.*)\\}$"
if(length(res <- grep(encpatt, lines, perl = TRUE, useBytes = TRUE)))
encoding <- sub(encpatt, "\\1", lines[res],
perl = TRUE, useBytes = TRUE)
texfile <- paste0(topic, ".tex")
on.exit(unlink(texfile)) ## ? leave to helper
if(nzchar(opt <- Sys.getenv("R_RD4PDF"))) opt else "times,inconsolata"
has_figure <- any(grepl("\\Figure", lines))
cat("\\documentclass[", getOption("papersize"), "paper]{article}\n",
"\\usepackage[", opt, "]{Rd}\n",
if(nzchar(encoding)) sprintf("\\usepackage[%s]{inputenc}\n", encoding),
"\\InputIfFileExists{Rhelp.cfg}{}{}\n",
"\\usepackage{graphicx}\n",
"\\begin{document}\n",
file = texfile, sep = "")
file.append(texfile, file)
cat("\\end{document}\n", file = texfile, append = TRUE)
helper <- if (exists("offline_help_helper", envir = .GlobalEnv))
get("offline_help_helper", envir = .GlobalEnv)
else utils:::offline_help_helper
if (has_figure) helper(texfile, type, texinputs)
else helper(texfile, type)
invisible()
}
.getHelpFile <- function(file)
{
path <- dirname(file)
dirpath <- dirname(path)
if(!file.exists(dirpath))
stop(gettextf("invalid %s argument", sQuote("file")), domain = NA)
pkgname <- basename(dirpath)
RdDB <- file.path(path, pkgname)
if(!file.exists(paste(RdDB, "rdx", sep = ".")))
stop(gettextf("package %s exists but was not installed under R >= 2.10.0 so help cannot be accessed", sQuote(pkgname)), domain = NA)
tools:::fetchRdDB(RdDB, basename(file))
}
offline_help_helper <- function(texfile, type, texinputs = NULL)
{
## Some systems have problems with texfile names like ".C.tex"
tf <- tempfile("tex", tmpdir = ".", fileext = ".tex"); on.exit(unlink(tf))
file.copy(texfile, tf)
tools::texi2pdf(tf, clean = TRUE, texinputs = texinputs)
ofile <- sub("tex$", "pdf", tf)
ofile2 <- sub("tex$", "pdf", texfile)
if(!file.exists(ofile))
stop(gettextf("creation of %s failed", sQuote(ofile2)), domain = NA)
if(file.copy(ofile, ofile2, overwrite = TRUE)) {
unlink(ofile)
message(gettextf("Saving help page to %s", sQuote(basename(ofile2))),
domain = NA)
} else {
message(gettextf("Saving help page to %s", sQuote(ofile)), domain = NA)
}
invisible()
}
# File src/library/utils/R/unix/help.request.R
# Part of the R package, http://www.R-project.org
#
# Copyright (C) 1995-2013 The R Core Team
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# A copy of the GNU General Public License is available at
# http://www.r-project.org/Licenses/
help.request <- function (subject = "", address = "r-help@R-project.org",
file = "R.help.request", ...)
{
no <- function(answer) answer == "n"
yes <- function(answer) answer == "y"
webpage <- "corresponding web page"
catPlease <- function()
cat("Please do this first - the",
webpage,"has been loaded in your web browser\n")
go <- function(url) {
catPlease()
browseURL(url)
}
readMyLine <- function(..., .A. = "(y/n)")
readline(paste(paste(strwrap(paste(...)), collapse="\n"),
.A., "")) # space after question
checkPkgs <- function(pkgDescs,
pkgtxt = paste("packages",
paste(names(pkgDescs), collapse=", ")))
{
cat("Checking if", pkgtxt, "are up-to-date; may take some time...\n")
stopifnot(sapply(pkgDescs, inherits, what="packageDescription"))
fields <- .instPkgFields(NULL)
n <- length(pkgDescs)
iPkgs <- matrix(NA_character_, n, 2L + length(fields),
dimnames=list(NULL, c("Package", "LibPath", fields)))
for(i in seq_len(n)) {
desc <- c(unlist(pkgDescs[[i]]),
"LibPath" = dirname(dirname(dirname(attr(pkgDescs[[i]],
"file")))))
nms <- intersect(names(desc), colnames(iPkgs))
iPkgs[i, nms] <- desc[nms]
}
old <- old.packages(instPkgs = iPkgs)
if (!is.null(old)) {
update <- readMyLine("The following installed packages are out-of-date:\n",
paste(strwrap(rownames(old),
width = 0.7 *getOption("width"),
indent = 0.15*getOption("width")),
collapse="\n"),
"would you like to update now?")
if (yes(update)) update.packages(oldPkgs = old, ask = FALSE)
}
}
cat("Checklist:\n")
post <- readline("Have you read the posting guide? (y/n) ")
if (no(post)) return(go("http://www.r-project.org/posting-guide.html"))
FAQ <- readline("Have you checked the FAQ? (y/n) ")
if (no(FAQ)) return(go("http://cran.r-project.org/faqs.html"))
intro <- readline("Have you checked An Introduction to R? (y/n) ")
if (no(intro))
return(go("http://cran.r-project.org/manuals.html"))
NEWS <- readMyLine("Have you checked the NEWS of the latest development release?")
if (no(NEWS)) return(go("http://cran.r-project.org/doc/manuals/r-devel/NEWS.html"))
rsitesearch <- readline("Have you looked on RSiteSearch? (y/n) ")
if (no(rsitesearch)) {
catPlease()
return(RSiteSearch(subject))
}
inf <- sessionInfo()
if ("otherPkgs" %in% names(inf)) {
oPkgs <- names(inf$otherPkgs)
## FIXME: inf$otherPkgs is a list of packageDescription()s
other <-
readMyLine("You have packages",
paste0("(", paste(sQuote(oPkgs), collapse=", "),")"),
"other than the base packages loaded. ",
"If your query relates to one of these, have you ",
"checked any corresponding books/manuals and",
"considered contacting the package maintainer?",
.A. = "(y/n/NA)")
if(no(other)) return("Please do this first.")
}
page <- url("http://cran.r-project.org/bin/windows/base")
title <- grep("", readLines(page, 10L), fixed = TRUE, value = TRUE)
ver <- sub("^.*R-([^ ]*) for Windows.*$", "\\1", title)
if (getRversion() < numeric_version(ver)) {
update <- readMyLine("Your R version is out-of-date,",
"would you like to update now?")
if(yes(update)) return(go(getOption("repos")))
}
if ("otherPkgs" %in% names(inf)) {
checkPkgs(inf$otherPkgs)
}
## To get long prompt!
cat("Have you written example code that is\n",
"- minimal\n - reproducible\n - self-contained\n - commented",
"\nusing data that is either\n",
"- constructed by the code\n - loaded by data()\n",
"- reproduced using dump(\"mydata\", file = \"\")\n")
code <- readMyLine("have you checked this code in a fresh R session",
"(invoking R with the --vanilla option if possible)",
"and is this code copied to the clipboard?")
if (no(code))
return(cat("\nIf your query is not directly related to code",
"(e.g. a general query \nabout R's capabilities),",
"email R-help@r-project.org directly. ",
"\nOtherwise prepare some example code first.\n"))
change <- readline(paste("Would you like to change your subject line:",
subject, "to something more meaningful? (y/n) ",
sep = "\n"))
if (yes(change))
subject <- readline("Enter subject: \n")
create.post(instructions = paste(
"\\n<>\\n\\n",
"\\n<>",
"\\n<>\\n\\n\\n\\n"),
description = "help request",
subject = subject, address = address,
filename = file, info = bug.report.info(), ...)
}
# File src/library/utils/R/help.search.R
# Part of the R package, http://www.R-project.org
#
# Copyright (C) 1995-2013 The R Core Team
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# A copy of the GNU General Public License is available at
# http://www.r-project.org/Licenses/
.hsearch_db <- local({
hdb <- NULL
function(new) {
if(!missing(new))
hdb <<- new
else
hdb
}
})
merge.vignette.index <- function(hDB, path, pkg) {
## Vignettes in the hsearch index started in R2.14.0
## Most packages don't have them, so the following should not be
## too inefficient
if(file.exists(v_file <- file.path(path, "Meta", "vignette.rds"))
&& !is.null(vDB <- readRDS(v_file))
&& nrow(vDB)) {
## Make it look like an hDB base matrix and append it
base <- matrix("", nrow=nrow(vDB), ncol=8)
colnames(base) <- colnames(hDB[[1]])
base[,"Package"] <- pkg
base[,"LibPath"] <- path
id <- as.character(1:nrow(vDB) + NROW(hDB[[1]]))
base[,"ID"] <- id
base[,"name"] <- sub("\\.[^.]*$", "", basename(vDB$File))
base[,"topic"] <- base[,"name"]
base[,"title"] <- vDB$Title
base[,"Type"] <- "vignette"
hDB[[1L]] <- rbind(hDB[[1L]], base)
aliases <- matrix("", nrow=nrow(vDB), ncol=3)
colnames(aliases) <- colnames(hDB[[2]])
aliases[,"Aliases"] <- base[,"name"]
aliases[,"ID"] <- id
aliases[,"Package"] <- pkg
hDB[[2L]] <- rbind(hDB[[2L]], aliases)
nkeywords <- sum(sapply(vDB$Keywords, length))
if (nkeywords) {
keywords <- matrix("", nrow=nkeywords, ncol=3)
colnames(keywords) <- colnames(hDB[[4]])
keywords[,"Concepts"] <- unlist(vDB$Keywords)
keywords[,"ID"] <- unlist(lapply(1:nrow(vDB),
function(i) rep(id[i], length(vDB$Keywords[[i]]))))
keywords[,"Package"] <- pkg
hDB[[4L]] <- rbind(hDB[[4L]], keywords)
}
}
hDB
}
merge.demo.index <- function(hDB, path, pkg) {
## Demos in the hsearch index started in R2.14.0
if(file.exists(d_file <- file.path(path, "Meta", "demo.rds"))
&& !is.null(dDB <- readRDS(d_file))
&& nrow(dDB)) {
## Make it look like an hDB base matrix and append it
base <- matrix("", nrow=nrow(dDB), ncol=8)
colnames(base) <- colnames(hDB[[1]])
base[,"Package"] <- pkg
base[,"LibPath"] <- path
id <- as.character(1:nrow(dDB) + NROW(hDB[[1]]))
base[,"ID"] <- id
base[,"name"] <- dDB[,1]
base[,"topic"] <- base[,"name"]
base[,"title"] <- dDB[,2]
base[,"Type"] <- "demo"
hDB[[1L]] <- rbind(hDB[[1L]], base)
aliases <- matrix("", nrow=nrow(dDB), ncol=3)
colnames(aliases) <- colnames(hDB[[2]])
aliases[,"Aliases"] <- base[,"name"]
aliases[,"ID"] <- id
aliases[,"Package"] <- pkg
hDB[[2L]] <- rbind(hDB[[2L]], aliases)
}
hDB
}
## FIXME: use UTF-8, either always or optionally
## (Needs UTF-8-savvy & fast agrep, and PCRE regexps.)
help.search <-
function(pattern, fields = c("alias", "concept", "title"),
apropos, keyword, whatis, ignore.case = TRUE,
package = NULL, lib.loc = NULL,
help.db = getOption("help.db"),
verbose = getOption("verbose"),
rebuild = FALSE, agrep = NULL, use_UTF8 = FALSE,
types = getOption("help.search.types")
)
{
WINDOWS <- .Platform$OS.type == "windows"
### Argument handling.
FIELDS <- c("alias", "concept", "keyword", "name", "title")
TYPES <- c("help", "vignette", "demo")
if (is.logical(verbose)) verbose <- 2*as.integer(verbose)
.wrong_args <- function(args)
gettextf("argument %s must be a single character string", sQuote(args))
fuzzy <- agrep
if(!missing(pattern)) {
if(!is.character(pattern) || (length(pattern) > 1L))
stop(.wrong_args("pattern"), domain = NA)
i <- pmatch(fields, FIELDS)
if(anyNA(i))
stop("incorrect field specification")
else
fields <- FIELDS[i]
} else if(!missing(apropos)) {
if(!is.character(apropos) || (length(apropos) > 1L))
stop(.wrong_args("apropos"), domain = NA)
else {
pattern <- apropos
fields <- c("alias", "title")
}
} else if(!missing(keyword)) {
if(!is.character(keyword) || (length(keyword) > 1L))
stop(.wrong_args("keyword"), domain = NA)
else {
pattern <- keyword
fields <- "keyword"
if(is.null(fuzzy)) fuzzy <- FALSE
}
} else if(!missing(whatis)) {
if(!is.character(whatis) || (length(whatis) > 1))
stop(.wrong_args("whatis"), domain = NA)
else {
pattern <- whatis
fields <- "alias"
}
} else {
stop("do not know what to search")
}
i <- pmatch(types, TYPES)
if (anyNA(i))
stop("incorrect type specification")
else
types <- TYPES[i]
if(is.null(lib.loc))
lib.loc <- .libPaths()
if(!missing(help.db))
warning("argument 'help.db' is deprecated")
### Set up the hsearch db.
db <- eval(.hsearch_db())
if(is.null(db))
rebuild <- TRUE
else if(!rebuild) {
## Need to find out whether this has the info we need.
## Note that when looking for packages in libraries we always
## use the first location found. Hence if the library search
## path changes we might find different versions of a package.
## Thus we need to rebuild the hsearch db in case the specified
## library path is different from the one used when building the
## hsearch db (stored as its "LibPaths" attribute).
if(!identical(lib.loc, attr(db, "LibPaths")) ||
!all(types %in% attr(db, "Types")) ||
## We also need to rebuild the hsearch db in case an existing
## dir in the library path was modified more recently than
## the db, as packages might have been installed or removed.
any(attr(db, "mtime") <
file.info(lib.loc[file.exists(lib.loc)])$mtime) ||
## Or if the user changed the locale character type ...
!identical(attr(db, "ctype"), Sys.getlocale("LC_CTYPE"))
)
rebuild <- TRUE
## We also need to rebuild if 'packages' was used before and has
## changed.
if (!is.null(package) &&
any(! package %in% db$Base[, "Package"]))
rebuild <- TRUE
}
if(rebuild) {
if(verbose > 0L) {
message("Rebuilding the help.search() database", " ", "...",
if(verbose > 1L) "...", domain = NA)
flush.console()
}
if(!is.null(package)) {
packages_in_hsearch_db <- package
package_paths <- NULL
} else {
## local version of .packages(all.available = TRUE),
## recording paths
ans <- character(0L); paths <- character(0L)
lib.loc <- lib.loc[file.exists(lib.loc)]
valid_package_version_regexp <-
.standard_regexps()$valid_package_version
for (lib in lib.loc) {
a <- list.files(lib, all.files = FALSE, full.names = FALSE)
for (nam in a) {
pfile <- file.path(lib, nam, "Meta", "package.rds")
if (file.exists(pfile))
info <- readRDS(pfile)$DESCRIPTION[c("Package", "Version")]
else next
if ( (length(info) != 2L) || anyNA(info) ) next
if (!grepl(valid_package_version_regexp, info["Version"])) next
ans <- c(ans, nam)
paths <- c(paths, file.path(lib, nam))
}
}
un <- !duplicated(ans)
packages_in_hsearch_db <- ans[un]
package_paths <- paths[un]
names(package_paths) <- ans[un]
}
## Create the hsearch db.
np <- 0L
if(verbose >= 2L) {
message("Packages {readRDS() sequentially}:", domain = NA)
flush.console()
}
tot <- length(package_paths)
incr <- 0L
if(verbose && WINDOWS) {
pb <- winProgressBar("R: creating the help.search() DB", max = tot)
on.exit(close(pb))
} else if(verbose == 1L) incr <- ifelse(tot > 500L, 100L, 10L)
## Starting with R 1.8.0, prebuilt hsearch indices are available
## in Meta/hsearch.rds, and the code to build this from the Rd
## contents (as obtained from both new and old style Rd indices)
## has been moved to tools:::.build_hsearch_index() which
## creates a per-package list of base, aliases and keywords
## information. When building the global index, it seems (see
## e.g. also the code in tools:::Rdcontents()), most efficient to
## create a list *matrix* (dbMat below), stuff the individual
## indices into its rows, and finally create the base, alias,
## keyword, and concept information in rbind() calls on the
## columns. This is *much* more efficient than building
## incrementally.
dbMat <- vector("list", length(packages_in_hsearch_db) * 4L)
dim(dbMat) <- c(length(packages_in_hsearch_db), 4L)
defunct_standard_package_names <-
tools:::.get_standard_package_names()$stubs
for(p in packages_in_hsearch_db) {
if(incr && np %% incr == 0L) {
message(".", appendLF = FALSE, domain = NA)
flush.console()
}
np <- np + 1L
if(verbose && WINDOWS) setWinProgressBar(pb, np)
if(verbose >= 2L) {
message(" ", p, appendLF = ((np %% 5L) == 0L), domain=NA)
flush.console()
}
path <- if(!is.null(package_paths)) package_paths[p]
else find.package(p, lib.loc, quiet = TRUE)
if(length(path) == 0L) {
if(is.null(package)) next
else stop(gettextf("could not find package %s", sQuote(p)),
domain = NA)
}
## Hsearch 'Meta/hsearch.rds' indices were introduced in
## R 1.8.0. If they are missing, we really cannot use
## the package (as library() will refuse to load it).
## We always load hsearch.rds to establish the format,
## sometimes vignette.rds.
if(file.exists(hs_file <- file.path(path, "Meta", "hsearch.rds"))) {
hDB <- readRDS(hs_file)
if(!is.null(hDB)) {
## Fill up possibly missing information.
if(is.na(match("Encoding", colnames(hDB[[1L]]))))
hDB[[1L]] <- cbind(hDB[[1L]], Encoding = "")
nh <- NROW(hDB[[1L]])
hDB[[1L]] <- cbind(hDB[[1L]],
Type = rep("help", nh))
if (nh)
hDB[[1L]][, "LibPath"] <- path
if ("vignette" %in% types)
hDB <- merge.vignette.index(hDB, path, p)
if ("demo" %in% types)
hDB <- merge.demo.index(hDB, path, p)
## Put the hsearch index for the np-th package into the
## np-th row of the matrix used for aggregating.
dbMat[np, seq_along(hDB)] <- hDB
} else if(verbose >= 2L) {
message(gettextf("package %s has empty hsearch data - strangely",
sQuote(p)), domain = NA)
flush.console()
}
}
else if(!is.null(package))
warning("no hsearch.rds meta data for package ", p, domain = NA)
}
if(verbose >= 2L) {
message(ifelse(np %% 5L == 0L, "\n", "\n\n"),
sprintf("Built dbMat[%d,%d]", nrow(dbMat), ncol(dbMat)),
domain = NA)
flush.console()
## DEBUG save(dbMat, file="~/R/hsearch_dbMat.rda", compress=TRUE)
}
## workaround methods:::rbind() misbehavior:
if(.isMethodsDispatchOn()) {
bind_was_on <- methods:::bind_activation(FALSE)
if(bind_was_on) on.exit(methods:::bind_activation(TRUE))
}
## Create the global base, aliases, keywords and concepts tables
## via calls to rbind() on the columns of the matrix used for
## aggregating.
db <- list(Base = do.call("rbind", dbMat[, 1]),
Aliases = do.call("rbind", dbMat[, 2]),
Keywords = do.call("rbind", dbMat[, 3]),
Concepts = do.call("rbind", dbMat[, 4]))
if(is.null(db$Concepts))
db$Concepts <-
matrix(character(), ncol = 3L,
dimnames = list(NULL,
c("Concepts", "ID", "Package")))
## Make the IDs globally unique by prefixing them with the
## number of the package in the global index.
for(i in which(sapply(db, NROW) > 0L)) {
db[[i]][, "ID"] <-
paste(rep.int(seq_along(packages_in_hsearch_db),
sapply(dbMat[, i], NROW)),
db[[i]][, "ID"],
sep = "/")
}
## And maybe re-encode ...
if(!identical(Sys.getlocale("LC_CTYPE"), "C")) {
if(verbose >= 2L) {
message("reencoding ...", appendLF=FALSE, domain = NA)
flush.console()
}
encoding <- db$Base[, "Encoding"]
target <- ifelse(use_UTF8 && !l10n_info()$`UTF-8`, "UTF-8", "")
## As iconv is not vectorized in the 'from' argument, loop
## over groups of identical encodings.
for(enc in unique(encoding)) {
if(enc != target) next
IDs <- db$Base[encoding == enc, "ID"]
for(i in seq_along(db)) {
ind <- db[[i]][, "ID"] %in% IDs
db[[i]][ind, ] <- iconv(db[[i]][ind, ], enc, "")
}
}
if(verbose >= 2L) {
message(" ", "done", domain = NA)
flush.console()
}
}
bad_IDs <-
unlist(sapply(db,
function(u)
u[rowSums(is.na(nchar(u, "c", TRUE))) > 0, "ID"]))
## FIXME: drop this fallback
if(length(bad_IDs)) { ## try latin1
for(i in seq_along(db)) {
ind <- db[[i]][, "ID"] %in% bad_IDs
db[[i]][ind, ] <- iconv(db[[i]][ind, ], "latin1", "")
}
bad_IDs <-
unlist(sapply(db,
function(u)
u[rowSums(is.na(nchar(u, "c", TRUE))) > 0, "ID"]))
}
## If there are any invalid multi-byte character data
## left, we simple remove all Rd objects with at least one
## invalid entry, and warn.
if(length(bad_IDs)) {
warning("removing all entries with invalid multi-byte character data")
for(i in seq_along(db)) {
ind <- db[[i]][, "ID"] %in% bad_IDs
db[[i]] <- db[[i]][!ind, ]
}
}
if(verbose >= 2L) {
message("saving the database ...", appendLF=FALSE, domain = NA)
flush.console()
}
attr(db, "LibPaths") <- lib.loc
attr(db, "mtime") <- Sys.time()
attr(db, "ctype") <- Sys.getlocale("LC_CTYPE")
attr(db, "Types") <- unique(c("help", types))
.hsearch_db(db)
if(verbose >= 2L) {
message(" ", "done", domain = NA)
flush.console()
}
if(verbose > 0L) {
message("... database rebuilt", domain = NA)
if(WINDOWS) {
close(pb)
on.exit() # clear closing of progress bar
}
flush.console()
}
}
### Matching.
if(verbose >= 2L) {
message("Database of ",
NROW(db$Base), " help objects (",
NROW(db$Aliases), " aliases, ",
NROW(db$Concepts), " concepts, ",
NROW(db$Keywords), " keywords)",
domain = NA)
flush.console()
}
if(!is.null(package)) {
## Argument 'package' was given. Need to check that all given
## packages exist in the db, and only search the given ones.
pos_in_hsearch_db <-
match(package, unique(db$Base[, "Package"]), nomatch = 0L)
## This should not happen for R >= 2.4.0
if(any(pos_in_hsearch_db) == 0L)
stop(gettextf("no information in the database for package %s: need 'rebuild = TRUE'?",
sQuote(package[pos_in_hsearch_db == 0][1L])),
domain = NA)
db <-
lapply(db,
function(x) {
x[x[, "Package"] %in% package, , drop = FALSE]
})
}
## Subset to the requested help types
db$Base <- db$Base[db$Base[,"Type"] %in% types,,drop=FALSE]
##
## No need continuing if there are no objects in the data base.
## But shouldn't we return something of class "hsearch"?
if(!length(db$Base)) return(invisible())
##
## If agrep is NULL (default), we want to use fuzzy matching iff
## 'pattern' contains no characters special to regular expressions.
## We use the following crude approximation: if pattern contains
## only alphanumeric characters or whitespace or a '-', it is taken
## 'as is', and fuzzy matching is used unless turned off explicitly,
## or pattern has very few (currently, less than 5) characters.
if(is.null(fuzzy) || is.na(fuzzy))
fuzzy <-
(grepl("^([[:alnum:]]|[[:space:]]|-)+$", pattern)
&& (nchar(pattern, type="c") > 4L))
if(is.logical(fuzzy)) {
if(fuzzy)
max.distance <- 0.1
}
else if(is.numeric(fuzzy) || is.list(fuzzy)) {
max.distance <- fuzzy
fuzzy <- TRUE
}
else
stop("incorrect 'agrep' specification")
searchFun <- function(x) {
if(fuzzy)
agrep(pattern, x, ignore.case = ignore.case,
max.distance = max.distance)
else
grep(pattern, x, ignore.case = ignore.case, perl = use_UTF8)
}
dbBase <- db$Base
searchDbField <- function(field) {
switch(field,
alias = {
aliases <- db$Aliases
match(aliases[searchFun(aliases[, "Aliases"]),
"ID"],
dbBase[, "ID"])
},
concept = {
concepts <- db$Concepts
match(concepts[searchFun(concepts[, "Concepts"]),
"ID"],
dbBase[, "ID"])
},
keyword = {
keywords <- db$Keywords
match(keywords[searchFun(keywords[, "Keywords"]),
"ID"],
dbBase[, "ID"])
},
searchFun(db$Base[, field]))
}
i <- NULL
for(f in fields) i <- c(i, searchDbField(f))
db <- dbBase[sort(unique(i)),
c("topic", "title", "Package", "LibPath", "name", "Type"),
drop = FALSE]
if(verbose>= 2L) {
message(sprintf(ngettext(NROW(db),
"matched %d object.",
"matched %d objects."),
NROW(db)),
domain = NA)
flush.console()
}
## Retval.
y <- list(pattern = pattern, fields = fields,
type = if(fuzzy) "fuzzy" else "regexp",
agrep = agrep,
ignore.case = ignore.case, types = types,
package = package, lib.loc = lib.loc,
matches = db)
class(y) <- "hsearch"
y
}
## this extra indirection allows the Mac GUI to replace this
## yet call the printhsearchInternal function.
print.hsearch <- function(x, ...)
printhsearchInternal(x, ...)
printhsearchInternal <- function(x, ...)
{
help_type <- getOption("help_type", default="text")
types <- x$types
if (help_type == "html") {
browser <- getOption("browser")
if (tools:::httpdPort == 0L) tools::startDynamicHelp()
if (tools:::httpdPort > 0L) {
url <- paste0("http://127.0.0.1:", tools:::httpdPort,
"/doc/html/Search?pattern=", tools:::escapeAmpersand(x$pattern),
# Only encode non-default values
if (!("title" %in% x$fields)) "&title=0",
if ("keyword" %in% x$fields) "&keyword=1",
if (!("alias" %in% x$fields)) "&alias=0",
if (!("concept" %in% x$fields)) "&concept=0",
if ("name" %in% x$fields) "&name=1",
if (!is.null(x$agrep)) paste0("&agrep=", x$agrep),
if (!x$ignore.case) "&ignore.case=0",
if (!identical(types, getOption("help.search.types")))
paste0("&types=", paste(types, collapse=";")),
if (!is.null(x$package))
paste0("&package=", paste(x$package, collapse=";")),
if (!identical(x$lib.loc, .libPaths()))
paste0("&lib.loc=", paste(x$lib.loc, collapse=";")))
browseURL(url, browser)
return(invisible(x))
}
}
hfields <- paste(x$fields, collapse = " or ")
vfieldnames <- c(alias = "name", concept="keyword", keyword=NA,
name="name", title="title")
vfieldnames <- vfieldnames[x$fields]
vfields <- paste(unique(vfieldnames[!is.na(vfieldnames)]), collapse = " or ")
dfieldnames <- c(alias = "name", concept=NA, keyword=NA,
name = "name", title = "title")
dfieldnames <- dfieldnames[x$fields]
dfields <- paste(unique(dfieldnames[!is.na(dfieldnames)]), collapse = " or ")
fields <- list(help=hfields, vignette=vfields, demo=dfields)
matchtype <- switch(x$type, fuzzy = "fuzzy", "regular expression")
typenames <- c(vignette = "Vignettes", help = "Help files", demo="Demos")
db <- x$matches
if(NROW(db) == 0) {
typenames <- paste(tolower(typenames[types]), collapse=" or ")
writeLines(strwrap(paste("No", typenames, "found with", fields$help,
"matching", sQuote(x$pattern),
"using", matchtype, "matching.")))
return(invisible(x))
}
outFile <- tempfile()
outConn <- file(outFile, open = "w")
typeinstruct <- c(vignette = paste("Type 'vignette(\"FOO\", package=\"PKG\")' to",
"inspect entries 'PKG::FOO'."),
help = paste("Type '?PKG::FOO' to",
"inspect entries 'PKG::FOO',",
"or 'TYPE?PKG::FOO' for entries like",
"'PKG::FOO-TYPE'."),
demo = paste("Type 'demo(PKG::FOO)' to",
"run demonstration 'PKG::FOO'."))
for (type in types) {
if(NROW(dbtemp <- db[db[,"Type"] == type,,drop=FALSE]) > 0) {
writeLines(c(strwrap(paste(typenames[type], "with", fields[[type]],
"matching", sQuote(x$pattern),
"using", matchtype, "matching:")),
"\n"),
outConn)
dbnam <- paste0(dbtemp[, "Package"], "::", dbtemp[ , "topic"])
dbtit <- paste0(dbtemp[ , "title"])
writeLines(formatDL(dbnam, dbtit), outConn)
writeLines(c("\n",
strwrap(typeinstruct[type]),
"\n\n"),
outConn)
}
}
close(outConn)
file.show(outFile, delete.file = TRUE)
invisible(x)
}
# File src/library/utils/R/help.start.R
# Part of the R package, http://www.R-project.org
#
# Copyright (C) 1995-2012 The R Core Team
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# A copy of the GNU General Public License is available at
# http://www.r-project.org/Licenses/
help.start <-
function (update = FALSE, gui = "irrelevant",
browser = getOption("browser"), remote = NULL)
{
WINDOWS <- .Platform$OS.type == "windows"
if (!WINDOWS) {
## should always be set, but might be empty
if (!is.function(browser) &&
(length(browser) != 1L || !is.character(browser) || !nzchar(browser)))
stop("invalid browser name, check options(\"browser\").")
}
home <- if (is.null(remote)) {
if (tools:::httpdPort == 0L) tools::startDynamicHelp()
if (tools:::httpdPort > 0L) {
if (update) make.packages.html(temp = TRUE)
paste0("http://127.0.0.1:", tools:::httpdPort)
} else stop("help.start() requires the HTTP server to be running",
call. = FALSE)
} else remote
url <- paste0(home, "/doc/html/index.html")
## FIXME: maybe these should use message()?
if (WINDOWS) {
cat(gettextf("If nothing happens, you should open\n'%s' yourself\n", url))
} else if (is.character(browser)) {
writeLines(strwrap(gettextf("If the browser launched by '%s' is already running, it is *not* restarted, and you must switch to its window.",
browser),
exdent = 4L))
writeLines(gettext("Otherwise, be patient ..."))
}
browseURL(url, browser = browser)
invisible()
}
browseURL <- function(url, browser = getOption("browser"), encodeIfNeeded=FALSE)
{
WINDOWS <- .Platform$OS.type == "windows"
if (!is.character(url) || length(url) != 1L|| !nzchar(url))
stop("'url' must be a non-empty character string")
if(identical(browser, "false")) return(invisible())
if(WINDOWS && is.null(browser)) return(shell.exec(url))
else if (is.function(browser))
return(invisible(browser(if(encodeIfNeeded) URLencode(url) else url)))
if (!is.character(browser) || length(browser) != 1L || !nzchar(browser))
stop("'browser' must be a non-empty character string")
if (WINDOWS) {
## No shell used, but spaces are possible
return(system(paste0('"', browser, '" ',
if(encodeIfNeeded) URLencode(url) else url),
wait = FALSE))
}
## Unix-alike, character "browser"
if (.Platform$GUI == "AQUA" ||
length(grep("^(localhost|):", Sys.getenv("DISPLAY"))) )
isLocal <- TRUE
else
isLocal <- FALSE
## escape characters. ' can occur in URLs, so we must use " to
## delimit the URL. We need to escape $, but "`\ do not occur in
## valid URLs (RFC 2396, on the W3C site).
.shQuote <- function(string)
paste0('"', gsub("\\$", "\\\\$", string), '"')
quotedUrl <- .shQuote(if(encodeIfNeeded) URLencode(url) else url)
remoteCmd <- if (isLocal)
switch(basename(browser),
"gnome-moz-remote" =, "open" = quotedUrl,
"galeon" = paste("-x", quotedUrl),
"kfmclient" = paste("openURL", quotedUrl),
"mozilla" =, "opera" =, "firefox" = {
paste0("-remote \"openURL(",
## Quote ',' and ')' ...
gsub("([,)$])", "%\\1", url), ")\"")
}, quotedUrl)
else quotedUrl
system(paste(browser, remoteCmd, "> /dev/null 2>&1 ||",
browser, quotedUrl, "&"))
}
# File src/library/utils/R/history.R
# Part of the R package, http://www.R-project.org
#
# Copyright (C) 1995-2012 The R Core Team
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# A copy of the GNU General Public License is available at
# http://www.r-project.org/Licenses/
loadhistory <- function(file = ".Rhistory")
invisible(.External2(C_loadhistory, file))
savehistory <- function(file = ".Rhistory")
invisible(.External2(C_savehistory, file))
history <- function(max.show = 25, reverse = FALSE, pattern, ...)
{
file1 <- tempfile("Rrawhist")
savehistory(file1)
rawhist <- readLines(file1)
unlink(file1)
if(!missing(pattern))
rawhist <- unique(grep(pattern, rawhist, value = TRUE, ...))
nlines <- length(rawhist)
if(nlines) {
inds <- max(1, nlines-max.show):nlines
if(reverse) inds <- rev(inds)
} else inds <- integer()
file2 <- tempfile("hist")
writeLines(rawhist[inds], file2)
file.show(file2, title = "R History", delete.file = TRUE)
}
timestamp <- function(stamp = date(), prefix = "##------ ",
suffix = " ------##", quiet = FALSE)
{
stamp <- paste0(prefix, stamp, suffix)
.External2(C_addhistory, stamp)
if (!quiet) cat(stamp, sep = "\n")
invisible(stamp)
}
# File src/library/utils/R/iconv.R
# Part of the R package, http://www.R-project.org
#
# Copyright (C) 1995-2012 The R Core Team
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# A copy of the GNU General Public License is available at
# http://www.r-project.org/Licenses/
## If you were wondering what these language codes stand for, see
## ftp://ftp.ilog.fr/pub/Users/haible/utf8/ISO_639
localeToCharset <- function(locale = Sys.getlocale("LC_CTYPE"))
{
guess <- function(en)
{
if(en %in% c("aa", "af", "an", "br", "ca", "da", "de", "en",
"es", "et", "eu", "fi", "fo", "fr", "ga", "gl",
"gv", "id", "is", "it", "kl", "kw", "ml", "ms",
"nb", "nn", "no", "oc", "om", "pt", "so", "sq",
"st", "sv", "tl", "uz", "wa", "xh", "zu"))
return("ISO8859-1")
if(en %in% c("bs", "cs", "hr", "hu", "pl", "ro", "sk", "sl"))
return("ISO8859-2")
if(en %in% "mt") return("ISO8859-3")
if(en %in% c("mk", "ru")) return("ISO8859-5")
if(en %in% "ar") return("ISO8859-6")
if(en %in% "el") return("ISO8859-7")
if(en %in% c("he", "iw", "yi")) return("ISO8859-8")
if(en %in% "tr") return("ISO8859-9")
if(en %in% "lg") return("ISO8859-10")
if(en %in% c("lt", "lv", "mi")) return("ISO8859-13")
if(en %in% "cy") return("ISO8859-14")
if(en %in% "uk") return("KOI8-U")
if(en %in% "ja") return("EUC-JP")
if(en %in% "ko") return("EUC-KR")
if(en %in% "th") return("TIS-620")
if(en %in% "tg") return("KOI8-T")
if(en %in% "ka") return("GEORGIAN-PS")
if(en %in% "kk") return("PT154")
## not safe to guess for zh
return(NA_character_)
}
if(locale %in% c("C", "POSIX")) return("ASCII")
if(.Platform$OS.type == "windows") {
x <- strsplit(locale, ".", fixed=TRUE)[[1L]]
if(length(x) != 2) return(NA_character_)
## PUTTY suggests mapping Windows code pages as
## 1250 -> ISO 8859-2
## 1251 -> KOI8-U
## 1252 -> ISO 8859-1
## 1253 -> ISO 8859-7
## 1254 -> ISO 8859-9
## 1255 -> ISO 8859-8
## 1256 -> ISO 8859-6
## 1257 -> ISO 8859-13
switch(x[2L],
# this is quite wrong "1250" = return("ISO8859-2"),
# this is quite wrong "1251" = return("KOI8-U"),
"1252" = return("ISO8859-1"),
# "1253" = return("ISO8859-7"),
# "1254" = return("ISO8859-9"),
# "1255" = return("ISO8859-8"),
# "1256" = return("ISO8859-6"),
"1257" = return("ISO8859-13")
)
return(paste0("CP", x[2L]))
} else {
## Assume locales are like en_US[.utf8[@euro]]
x <- strsplit(locale, ".", fixed=TRUE)[[1L]]
enc <- if(length(x) == 2) gsub("@.*$o", "", x[2L]) else ""
# AIX uses UTF-8, OS X utf-8
if(toupper(enc) == "UTF-8") enc <- "utf8"
if(nzchar(enc) && enc != "utf8") {
enc <- tolower(enc)
known <-
c("ISO8859-1", "ISO8859-2", "ISO8859-3", "ISO8859-6",
"ISO8859-7", "ISO8859-8", "ISO8859-9", "ISO8859-10",
"ISO8859-13", "ISO8859-14", "ISO8859-15",
"CP1251", "CP1255", "EUC-JP", "EUC-KR", "EUC-TW",
"GEORGIAN-PS", "KOI8-R", "KOI8-U", "TCVN",
"BIG5" , "GB2312", "GB18030", "GBK",
"TIS-620", "SHIFT_JIS", "GB2312", "BIG5-HKSCS")
names(known) <-
c("iso88591", "iso88592", "iso88593", "iso88596",
"iso88597", "iso88598", "iso88599", "iso885910",
"iso885913", "iso885914", "iso885915",
"cp1251", "cp1255", "eucjp", "euckr", "euctw",
"georgianps", "koi8r", "koi8u", "tcvn",
"big5" , "gb2312", "gb18030", "gbk",
"tis-620", "sjis", "eucn", "big5-hkscs")
if (length(grep("darwin",R.version$os))) {
k <- c(known, "ISO8859-1", "ISO8859-2", "ISO8859-4",
"ISO8859-7", "ISO8859-9", "ISO8859-13", "ISO8859-15",
"KOI8-U", "KOI8-R", "PT154", "ASCII", "ARMSCII-8",
"ISCII-DEV", "BIG5-HKCSC")
names(k) <- c(names(known), "iso8859-1", "iso8859-2", "iso8859-4",
"iso8859-7", "iso8859-9", "iso8859-13", "iso8859-15",
"koi8-u", "koi8-r", "pt154", "us-ascii", "armscii-8",
"iscii-dev", "big5hkscs")
known <- k
}
if(enc %in% names(known)) return(unname(known[enc]))
if(length(grep("^cp-", enc))) # old Linux
return(sub("cp-([0-9]+)", "CP\\1", enc))
if(enc == "EUC") {
## let's hope it is a ll_* name.
if(length(grep("^[[:alpha:]]{2}_", x[1L], perl = TRUE))) {
ll <- substr(x[1L], 1L, 2L)
return(switch(ll, "jp"="EUC-JP", "kr"="EUC-KR",
"zh"="GB2312"))
}
}
}
## on Darwin all real locales w/o encoding are UTF-8
## HOWEVER! unlike the C code, we cannot filter out
## invalid locales, so it will be wrong for non-supported
## locales (why is this duplicated in R code anyway?)
if (length(grep("darwin", R.version$os))) return("UTF-8")
## let's hope it is a ll_* name.
if(length(grep("^[[:alpha:]]{2}_", x[1L], perl = TRUE))) {
ll <- substr(x[1L], 1L, 2L)
if(enc == "utf8") return(c("UTF-8", guess(ll)))
else return(guess(ll))
}
return(NA_character_)
}
}
# File src/library/utils/R/indices.R
# Part of the R package, http://www.R-project.org
#
# Copyright (C) 1995-2013 The R Core Team
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# A copy of the GNU General Public License is available at
# http://www.r-project.org/Licenses/
packageDescription <-
function(pkg, lib.loc = NULL, fields = NULL, drop = TRUE, encoding = "")
{
retval <- list()
if(!is.null(fields)){
fields <- as.character(fields)
retval[fields] <- NA
}
## If the NULL default for lib.loc is used,
## the loaded packages/namespaces are searched before the libraries.
pkgpath <-
if(is.null(lib.loc)) {
if(pkg == "base")
file.path(.Library, "base")
else if(pkg %in% loadedNamespaces())
getNamespaceInfo(pkg, "path")
else if((envname <- paste0("package:", pkg)) %in% search()) {
attr(as.environment(envname), "path")
## could be NULL if a perverse user has been naming
## environments to look like packages.
}
}
if(is.null(pkgpath)) pkgpath <- ""
if(pkgpath == "") {
libs <- if(is.null(lib.loc)) .libPaths() else lib.loc
for(lib in libs)
if(file.access(file.path(lib, pkg), 5) == 0L) {
pkgpath <- file.path(lib, pkg)
break
}
}
if(pkgpath == "") {
warning(gettextf("no package '%s' was found", pkg), domain = NA)
return(NA)
}
## New in 2.7.0: look for installed metadata first.
## We always need to be able to drop back to the file as this
## is used during package installation.
if(file.exists(file <- file.path(pkgpath, "Meta", "package.rds"))) {
desc <- readRDS(file)$DESCRIPTION
if(length(desc) < 1)
stop(gettextf("metadata of package '%s' is corrupt", pkg),
domain = NA)
desc <- as.list(desc)
} else if(file.exists(file <- file.path(pkgpath,"DESCRIPTION"))) {
dcf <- read.dcf(file=file)
if(NROW(dcf) < 1L)
stop(gettextf("DESCRIPTION file of package '%s' is corrupt", pkg),
domain = NA)
desc <- as.list(dcf[1,])
} else file <- ""
if(file != "") {
## read the Encoding field if any
enc <- desc[["Encoding"]]
if(!is.null(enc) && !is.na(encoding)) {
## Determine encoding and re-encode if necessary and possible.
if (missing(encoding) && Sys.getlocale("LC_CTYPE") == "C")
encoding <- "ASCII//TRANSLIT"
## might have an invalid encoding ...
newdesc <- try(lapply(desc, iconv, from = enc, to = encoding))
if(!inherits(newdesc, "try-error")) desc <- newdesc
else
warning("'DESCRIPTION' file has an 'Encoding' field and re-encoding is not possible", call. = FALSE)
}
if(!is.null(fields)){
ok <- names(desc) %in% fields
retval[names(desc)[ok]] <- desc[ok]
}
else
retval[names(desc)] <- desc
}
if((file == "") || (length(retval) == 0)){
warning(gettextf("DESCRIPTION file of package '%s' is missing or broken", pkg), domain = NA)
return(NA)
}
if(drop & length(fields) == 1L)
return(retval[[1L]])
class(retval) <- "packageDescription"
if(!is.null(fields)) attr(retval, "fields") <- fields
attr(retval, "file") <- file
retval
}
print.packageDescription <-
function(x, abbrCollate = 0.8 * getOption("width"), ...)
{
xx <- x
xx[] <- lapply(xx, function(x) if(is.na(x)) "NA" else x)
if(abbrCollate > 0 && any(names(xx) == "Collate")) {
## trim a long "Collate" field -- respecting word boundaries
wrds <- strsplit(xx$Collate,"[ \n]")[[1L]]
k <- which.max(cumsum(nchar(wrds)) > abbrCollate) - 1L
xx$Collate <- paste(c(wrds[seq_len(k)], "....."), collapse=" ")
}
write.dcf(as.data.frame.list(xx, optional = TRUE))
cat("\n-- File:", attr(x, "file"), "\n")
if(!is.null(attr(x, "fields"))){
cat("-- Fields read: ")
cat(attr(x, "fields"), sep = ", ")
cat("\n")
}
invisible(x)
}
# Simple convenience functions
maintainer <- function(pkg)
{
force(pkg)
desc <- packageDescription(pkg)
if(is.list(desc)) gsub("\n", " ", desc$Maintainer, fixed = TRUE)
else NA_character_
}
packageVersion <- function(pkg, lib.loc = NULL)
{
res <- suppressWarnings(packageDescription(pkg, lib.loc=lib.loc,
fields = "Version"))
if (!is.na(res)) package_version(res) else
stop(gettextf("package %s not found", sQuote(pkg)), domain = NA)
}
## used with firstOnly = TRUE for example()
## used with firstOnly = FALSE in help()
index.search <- function(topic, paths, firstOnly = FALSE)
{
res <- character()
for (p in paths) {
if(file.exists(f <- file.path(p, "help", "aliases.rds")))
al <- readRDS(f)
else if(file.exists(f <- file.path(p, "help", "AnIndex"))) {
## aliases.rds was introduced before 2.10.0, as can phase this out
foo <- scan(f, what = list(a="", b=""), sep = "\t", quote = "",
na.strings = "", quiet = TRUE)
al <- structure(foo$b, names = foo$a)
} else next
f <- al[topic]
if(is.na(f)) next
res <- c(res, file.path(p, "help", f))
if(firstOnly) break
}
res
}
print.packageIQR <-
function(x, ...)
{
db <- x$results
## Split according to Package.
out <- if(nrow(db) == 0L)
NULL
else
lapply(split(1 : nrow(db), db[, "Package"]),
function(ind) db[ind, c("Item", "Title"),
drop = FALSE])
outFile <- tempfile("RpackageIQR")
outConn <- file(outFile, open = "w")
first <- TRUE
for(pkg in names(out)) {
writeLines(paste0(ifelse(first, "", "\n"), x$title,
" in package ", sQuote(pkg), ":\n"),
outConn)
writeLines(formatDL(out[[pkg]][, "Item"],
out[[pkg]][, "Title"]),
outConn)
first <- FALSE
}
if(first) {
close(outConn)
unlink(outFile)
writeLines(paste("no", tolower(x$title), "found"))
if(!is.null(x$footer))
writeLines(c("", x$footer))
}
else {
if(!is.null(x$footer))
writeLines(c("\n", x$footer), outConn)
close(outConn)
file.show(outFile, delete.file = TRUE,
title = paste("R", tolower(x$title)))
}
invisible(x)
}
# File src/library/utils/R/linkhtml.R
# Part of the R package, http://www.R-project.org
#
# Copyright (C) 1995-2012 The R Core Team
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# A copy of the GNU General Public License is available at
# http://www.r-project.org/Licenses/
make.packages.html <-
function(lib.loc = .libPaths(), temp = FALSE, verbose = TRUE,
docdir = R.home("doc"))
{
add_lib_index <- function(libs)
{
cat('
\n
\n', file = out)
for (i in seq_along(libs)) {
nm <- libs[i]
if (nm == .Library) {
cat('
\n', file = out)
for (a in nm) {
if(use_alpha)
cat("
\n", sep = "", file = out)
for (i in pg[first == a]) {
title <- packageDescription(i, lib.loc = lib, fields = "Title",
encoding = "UTF-8")
if (is.na(title)) title <- "-- Title is missing --"
cat('