├── .Rbuildignore
├── .gitignore
├── DESCRIPTION
├── LICENSE.md
├── NAMESPACE
├── NEWS.md
├── R
├── af.R
├── af_cap.R
├── af_snp.R
├── center.R
├── centerscale.R
├── check_geno.R
├── compute_nulls.R
├── convtests.R
├── covar_BEDMatrix.R
├── covar_basic.R
├── covar_logit_BEDMatrix.R
├── covar_logit_basic.R
├── data.R
├── gof_stat.R
├── gof_stat_snp.R
├── lfa-deprecated.R
├── lfa.R
├── lfa_BEDMatrix.R
├── lfa_matrix.R
├── lreg.R
├── model.gof.R
├── pca_af.R
├── pca_af_BEDMatrix.R
├── pvals_empir.R
├── pvals_empir_brute.R
├── read.bed.R
├── read.tped.recode.R
├── sHWE.R
└── trunc_svd.R
├── README.md
├── data
└── hgdp_subset.rda
├── inst
└── CITATION
├── man
├── af.Rd
├── af_snp.Rd
├── center-deprecated.Rd
├── centerscale.Rd
├── hgdp_subset.Rd
├── lfa-deprecated.Rd
├── lfa.Rd
├── model.gof-deprecated.Rd
├── pca_af.Rd
├── read.bed-deprecated.Rd
├── read.tped.recode-deprecated.Rd
├── sHWE.Rd
└── trunc_svd.Rd
├── src
├── .gitignore
├── Makevars
├── fastmat.c
├── lfa-init.c
├── lfa.c
├── lfa.h
└── lreg.c
├── tests
├── testthat.R
└── testthat
│ └── test-lfa.R
└── vignettes
├── lfa.Rnw
└── lfa.bib
/.Rbuildignore:
--------------------------------------------------------------------------------
1 | ^doc$
2 | ^Meta$
3 | unpub
4 | ^LICENSE\.md$
5 |
--------------------------------------------------------------------------------
/.gitignore:
--------------------------------------------------------------------------------
1 | *~
2 | .Rhistory
3 | .RData
4 | .Rproj.user
5 | doc
6 | Meta
7 | logo/
8 | unpub/*
9 | /doc/
10 | /Meta/
11 |
--------------------------------------------------------------------------------
/DESCRIPTION:
--------------------------------------------------------------------------------
1 | Package: lfa
2 | Title: Logistic Factor Analysis for Categorical Data
3 | Version: 2.9.0
4 | Authors@R: c(
5 | person(given = "Wei",
6 | family = "Hao",
7 | role = "aut",
8 | email = "whao@princeton.edu"),
9 | person(given = "Minsun",
10 | family = "Song",
11 | role = "aut"),
12 | person(given = "Alejandro",
13 | family = "Ochoa",
14 | role = c("aut", "cre"),
15 | email = "alejandro.ochoa@duke.edu",
16 | comment = c(ORCID = "0000-0003-4928-3403")),
17 | person(given = "John D.",
18 | family = "Storey",
19 | role = "aut",
20 | email = "jstorey@princeton.edu",
21 | comment = c(ORCID = "0000-0001-5992-402X"))
22 | )
23 | Encoding: UTF-8
24 | LazyData: true
25 | Description: Logistic Factor Analysis is a method for a PCA analogue on Binomial data via estimation of latent structure in the natural parameter. The main method estimates genetic population structure from genotype data. There are also methods for estimating individual-specific allele frequencies using the population structure. Lastly, a structured Hardy-Weinberg equilibrium (HWE) test is developed, which quantifies the goodness of fit of the genotype data to the estimated population structure, via the estimated individual-specific allele frequencies (all of which generalizes traditional HWE tests).
26 | Imports:
27 | utils,
28 | methods,
29 | corpcor,
30 | RSpectra
31 | Depends:
32 | R (>= 4.0)
33 | Suggests:
34 | knitr,
35 | ggplot2,
36 | testthat,
37 | BEDMatrix,
38 | genio
39 | VignetteBuilder: knitr
40 | License: GPL (>= 3)
41 | biocViews: SNP, DimensionReduction, PrincipalComponent, Regression
42 | BugReports: https://github.com/StoreyLab/lfa/issues
43 | URL: https://github.com/StoreyLab/lfa
44 | Roxygen: list(markdown = TRUE)
45 | RoxygenNote: 7.2.3
46 |
--------------------------------------------------------------------------------
/LICENSE.md:
--------------------------------------------------------------------------------
1 | GNU General Public License
2 | ==========================
3 |
4 | _Version 3, 29 June 2007_
5 | _Copyright © 2007 Free Software Foundation, Inc. <>_
6 |
7 | Everyone is permitted to copy and distribute verbatim copies of this license
8 | document, but changing it is not allowed.
9 |
10 | ## Preamble
11 |
12 | The GNU General Public License is a free, copyleft license for software and other
13 | kinds of works.
14 |
15 | The licenses for most software and other practical works are designed to take away
16 | your freedom to share and change the works. By contrast, the GNU General Public
17 | License is intended to guarantee your freedom to share and change all versions of a
18 | program--to make sure it remains free software for all its users. We, the Free
19 | Software Foundation, use the GNU General Public License for most of our software; it
20 | applies also to any other work released this way by its authors. You can apply it to
21 | your programs, too.
22 |
23 | When we speak of free software, we are referring to freedom, not price. Our General
24 | Public Licenses are designed to make sure that you have the freedom to distribute
25 | copies of free software (and charge for them if you wish), that you receive source
26 | code or can get it if you want it, that you can change the software or use pieces of
27 | it in new free programs, and that you know you can do these things.
28 |
29 | To protect your rights, we need to prevent others from denying you these rights or
30 | asking you to surrender the rights. Therefore, you have certain responsibilities if
31 | you distribute copies of the software, or if you modify it: responsibilities to
32 | respect the freedom of others.
33 |
34 | For example, if you distribute copies of such a program, whether gratis or for a fee,
35 | you must pass on to the recipients the same freedoms that you received. You must make
36 | sure that they, too, receive or can get the source code. And you must show them these
37 | terms so they know their rights.
38 |
39 | Developers that use the GNU GPL protect your rights with two steps: **(1)** assert
40 | copyright on the software, and **(2)** offer you this License giving you legal permission
41 | to copy, distribute and/or modify it.
42 |
43 | For the developers' and authors' protection, the GPL clearly explains that there is
44 | no warranty for this free software. For both users' and authors' sake, the GPL
45 | requires that modified versions be marked as changed, so that their problems will not
46 | be attributed erroneously to authors of previous versions.
47 |
48 | Some devices are designed to deny users access to install or run modified versions of
49 | the software inside them, although the manufacturer can do so. This is fundamentally
50 | incompatible with the aim of protecting users' freedom to change the software. The
51 | systematic pattern of such abuse occurs in the area of products for individuals to
52 | use, which is precisely where it is most unacceptable. Therefore, we have designed
53 | this version of the GPL to prohibit the practice for those products. If such problems
54 | arise substantially in other domains, we stand ready to extend this provision to
55 | those domains in future versions of the GPL, as needed to protect the freedom of
56 | users.
57 |
58 | Finally, every program is threatened constantly by software patents. States should
59 | not allow patents to restrict development and use of software on general-purpose
60 | computers, but in those that do, we wish to avoid the special danger that patents
61 | applied to a free program could make it effectively proprietary. To prevent this, the
62 | GPL assures that patents cannot be used to render the program non-free.
63 |
64 | The precise terms and conditions for copying, distribution and modification follow.
65 |
66 | ## TERMS AND CONDITIONS
67 |
68 | ### 0. Definitions
69 |
70 | “This License” refers to version 3 of the GNU General Public License.
71 |
72 | “Copyright” also means copyright-like laws that apply to other kinds of
73 | works, such as semiconductor masks.
74 |
75 | “The Program” refers to any copyrightable work licensed under this
76 | License. Each licensee is addressed as “you”. “Licensees” and
77 | “recipients” may be individuals or organizations.
78 |
79 | To “modify” a work means to copy from or adapt all or part of the work in
80 | a fashion requiring copyright permission, other than the making of an exact copy. The
81 | resulting work is called a “modified version” of the earlier work or a
82 | work “based on” the earlier work.
83 |
84 | A “covered work” means either the unmodified Program or a work based on
85 | the Program.
86 |
87 | To “propagate” a work means to do anything with it that, without
88 | permission, would make you directly or secondarily liable for infringement under
89 | applicable copyright law, except executing it on a computer or modifying a private
90 | copy. Propagation includes copying, distribution (with or without modification),
91 | making available to the public, and in some countries other activities as well.
92 |
93 | To “convey” a work means any kind of propagation that enables other
94 | parties to make or receive copies. Mere interaction with a user through a computer
95 | network, with no transfer of a copy, is not conveying.
96 |
97 | An interactive user interface displays “Appropriate Legal Notices” to the
98 | extent that it includes a convenient and prominently visible feature that **(1)**
99 | displays an appropriate copyright notice, and **(2)** tells the user that there is no
100 | warranty for the work (except to the extent that warranties are provided), that
101 | licensees may convey the work under this License, and how to view a copy of this
102 | License. If the interface presents a list of user commands or options, such as a
103 | menu, a prominent item in the list meets this criterion.
104 |
105 | ### 1. Source Code
106 |
107 | The “source code” for a work means the preferred form of the work for
108 | making modifications to it. “Object code” means any non-source form of a
109 | work.
110 |
111 | A “Standard Interface” means an interface that either is an official
112 | standard defined by a recognized standards body, or, in the case of interfaces
113 | specified for a particular programming language, one that is widely used among
114 | developers working in that language.
115 |
116 | The “System Libraries” of an executable work include anything, other than
117 | the work as a whole, that **(a)** is included in the normal form of packaging a Major
118 | Component, but which is not part of that Major Component, and **(b)** serves only to
119 | enable use of the work with that Major Component, or to implement a Standard
120 | Interface for which an implementation is available to the public in source code form.
121 | A “Major Component”, in this context, means a major essential component
122 | (kernel, window system, and so on) of the specific operating system (if any) on which
123 | the executable work runs, or a compiler used to produce the work, or an object code
124 | interpreter used to run it.
125 |
126 | The “Corresponding Source” for a work in object code form means all the
127 | source code needed to generate, install, and (for an executable work) run the object
128 | code and to modify the work, including scripts to control those activities. However,
129 | it does not include the work's System Libraries, or general-purpose tools or
130 | generally available free programs which are used unmodified in performing those
131 | activities but which are not part of the work. For example, Corresponding Source
132 | includes interface definition files associated with source files for the work, and
133 | the source code for shared libraries and dynamically linked subprograms that the work
134 | is specifically designed to require, such as by intimate data communication or
135 | control flow between those subprograms and other parts of the work.
136 |
137 | The Corresponding Source need not include anything that users can regenerate
138 | automatically from other parts of the Corresponding Source.
139 |
140 | The Corresponding Source for a work in source code form is that same work.
141 |
142 | ### 2. Basic Permissions
143 |
144 | All rights granted under this License are granted for the term of copyright on the
145 | Program, and are irrevocable provided the stated conditions are met. This License
146 | explicitly affirms your unlimited permission to run the unmodified Program. The
147 | output from running a covered work is covered by this License only if the output,
148 | given its content, constitutes a covered work. This License acknowledges your rights
149 | of fair use or other equivalent, as provided by copyright law.
150 |
151 | You may make, run and propagate covered works that you do not convey, without
152 | conditions so long as your license otherwise remains in force. You may convey covered
153 | works to others for the sole purpose of having them make modifications exclusively
154 | for you, or provide you with facilities for running those works, provided that you
155 | comply with the terms of this License in conveying all material for which you do not
156 | control copyright. Those thus making or running the covered works for you must do so
157 | exclusively on your behalf, under your direction and control, on terms that prohibit
158 | them from making any copies of your copyrighted material outside their relationship
159 | with you.
160 |
161 | Conveying under any other circumstances is permitted solely under the conditions
162 | stated below. Sublicensing is not allowed; section 10 makes it unnecessary.
163 |
164 | ### 3. Protecting Users' Legal Rights From Anti-Circumvention Law
165 |
166 | No covered work shall be deemed part of an effective technological measure under any
167 | applicable law fulfilling obligations under article 11 of the WIPO copyright treaty
168 | adopted on 20 December 1996, or similar laws prohibiting or restricting circumvention
169 | of such measures.
170 |
171 | When you convey a covered work, you waive any legal power to forbid circumvention of
172 | technological measures to the extent such circumvention is effected by exercising
173 | rights under this License with respect to the covered work, and you disclaim any
174 | intention to limit operation or modification of the work as a means of enforcing,
175 | against the work's users, your or third parties' legal rights to forbid circumvention
176 | of technological measures.
177 |
178 | ### 4. Conveying Verbatim Copies
179 |
180 | You may convey verbatim copies of the Program's source code as you receive it, in any
181 | medium, provided that you conspicuously and appropriately publish on each copy an
182 | appropriate copyright notice; keep intact all notices stating that this License and
183 | any non-permissive terms added in accord with section 7 apply to the code; keep
184 | intact all notices of the absence of any warranty; and give all recipients a copy of
185 | this License along with the Program.
186 |
187 | You may charge any price or no price for each copy that you convey, and you may offer
188 | support or warranty protection for a fee.
189 |
190 | ### 5. Conveying Modified Source Versions
191 |
192 | You may convey a work based on the Program, or the modifications to produce it from
193 | the Program, in the form of source code under the terms of section 4, provided that
194 | you also meet all of these conditions:
195 |
196 | * **a)** The work must carry prominent notices stating that you modified it, and giving a
197 | relevant date.
198 | * **b)** The work must carry prominent notices stating that it is released under this
199 | License and any conditions added under section 7. This requirement modifies the
200 | requirement in section 4 to “keep intact all notices”.
201 | * **c)** You must license the entire work, as a whole, under this License to anyone who
202 | comes into possession of a copy. This License will therefore apply, along with any
203 | applicable section 7 additional terms, to the whole of the work, and all its parts,
204 | regardless of how they are packaged. This License gives no permission to license the
205 | work in any other way, but it does not invalidate such permission if you have
206 | separately received it.
207 | * **d)** If the work has interactive user interfaces, each must display Appropriate Legal
208 | Notices; however, if the Program has interactive interfaces that do not display
209 | Appropriate Legal Notices, your work need not make them do so.
210 |
211 | A compilation of a covered work with other separate and independent works, which are
212 | not by their nature extensions of the covered work, and which are not combined with
213 | it such as to form a larger program, in or on a volume of a storage or distribution
214 | medium, is called an “aggregate” if the compilation and its resulting
215 | copyright are not used to limit the access or legal rights of the compilation's users
216 | beyond what the individual works permit. Inclusion of a covered work in an aggregate
217 | does not cause this License to apply to the other parts of the aggregate.
218 |
219 | ### 6. Conveying Non-Source Forms
220 |
221 | You may convey a covered work in object code form under the terms of sections 4 and
222 | 5, provided that you also convey the machine-readable Corresponding Source under the
223 | terms of this License, in one of these ways:
224 |
225 | * **a)** Convey the object code in, or embodied in, a physical product (including a
226 | physical distribution medium), accompanied by the Corresponding Source fixed on a
227 | durable physical medium customarily used for software interchange.
228 | * **b)** Convey the object code in, or embodied in, a physical product (including a
229 | physical distribution medium), accompanied by a written offer, valid for at least
230 | three years and valid for as long as you offer spare parts or customer support for
231 | that product model, to give anyone who possesses the object code either **(1)** a copy of
232 | the Corresponding Source for all the software in the product that is covered by this
233 | License, on a durable physical medium customarily used for software interchange, for
234 | a price no more than your reasonable cost of physically performing this conveying of
235 | source, or **(2)** access to copy the Corresponding Source from a network server at no
236 | charge.
237 | * **c)** Convey individual copies of the object code with a copy of the written offer to
238 | provide the Corresponding Source. This alternative is allowed only occasionally and
239 | noncommercially, and only if you received the object code with such an offer, in
240 | accord with subsection 6b.
241 | * **d)** Convey the object code by offering access from a designated place (gratis or for
242 | a charge), and offer equivalent access to the Corresponding Source in the same way
243 | through the same place at no further charge. You need not require recipients to copy
244 | the Corresponding Source along with the object code. If the place to copy the object
245 | code is a network server, the Corresponding Source may be on a different server
246 | (operated by you or a third party) that supports equivalent copying facilities,
247 | provided you maintain clear directions next to the object code saying where to find
248 | the Corresponding Source. Regardless of what server hosts the Corresponding Source,
249 | you remain obligated to ensure that it is available for as long as needed to satisfy
250 | these requirements.
251 | * **e)** Convey the object code using peer-to-peer transmission, provided you inform
252 | other peers where the object code and Corresponding Source of the work are being
253 | offered to the general public at no charge under subsection 6d.
254 |
255 | A separable portion of the object code, whose source code is excluded from the
256 | Corresponding Source as a System Library, need not be included in conveying the
257 | object code work.
258 |
259 | A “User Product” is either **(1)** a “consumer product”, which
260 | means any tangible personal property which is normally used for personal, family, or
261 | household purposes, or **(2)** anything designed or sold for incorporation into a
262 | dwelling. In determining whether a product is a consumer product, doubtful cases
263 | shall be resolved in favor of coverage. For a particular product received by a
264 | particular user, “normally used” refers to a typical or common use of
265 | that class of product, regardless of the status of the particular user or of the way
266 | in which the particular user actually uses, or expects or is expected to use, the
267 | product. A product is a consumer product regardless of whether the product has
268 | substantial commercial, industrial or non-consumer uses, unless such uses represent
269 | the only significant mode of use of the product.
270 |
271 | “Installation Information” for a User Product means any methods,
272 | procedures, authorization keys, or other information required to install and execute
273 | modified versions of a covered work in that User Product from a modified version of
274 | its Corresponding Source. The information must suffice to ensure that the continued
275 | functioning of the modified object code is in no case prevented or interfered with
276 | solely because modification has been made.
277 |
278 | If you convey an object code work under this section in, or with, or specifically for
279 | use in, a User Product, and the conveying occurs as part of a transaction in which
280 | the right of possession and use of the User Product is transferred to the recipient
281 | in perpetuity or for a fixed term (regardless of how the transaction is
282 | characterized), the Corresponding Source conveyed under this section must be
283 | accompanied by the Installation Information. But this requirement does not apply if
284 | neither you nor any third party retains the ability to install modified object code
285 | on the User Product (for example, the work has been installed in ROM).
286 |
287 | The requirement to provide Installation Information does not include a requirement to
288 | continue to provide support service, warranty, or updates for a work that has been
289 | modified or installed by the recipient, or for the User Product in which it has been
290 | modified or installed. Access to a network may be denied when the modification itself
291 | materially and adversely affects the operation of the network or violates the rules
292 | and protocols for communication across the network.
293 |
294 | Corresponding Source conveyed, and Installation Information provided, in accord with
295 | this section must be in a format that is publicly documented (and with an
296 | implementation available to the public in source code form), and must require no
297 | special password or key for unpacking, reading or copying.
298 |
299 | ### 7. Additional Terms
300 |
301 | “Additional permissions” are terms that supplement the terms of this
302 | License by making exceptions from one or more of its conditions. Additional
303 | permissions that are applicable to the entire Program shall be treated as though they
304 | were included in this License, to the extent that they are valid under applicable
305 | law. If additional permissions apply only to part of the Program, that part may be
306 | used separately under those permissions, but the entire Program remains governed by
307 | this License without regard to the additional permissions.
308 |
309 | When you convey a copy of a covered work, you may at your option remove any
310 | additional permissions from that copy, or from any part of it. (Additional
311 | permissions may be written to require their own removal in certain cases when you
312 | modify the work.) You may place additional permissions on material, added by you to a
313 | covered work, for which you have or can give appropriate copyright permission.
314 |
315 | Notwithstanding any other provision of this License, for material you add to a
316 | covered work, you may (if authorized by the copyright holders of that material)
317 | supplement the terms of this License with terms:
318 |
319 | * **a)** Disclaiming warranty or limiting liability differently from the terms of
320 | sections 15 and 16 of this License; or
321 | * **b)** Requiring preservation of specified reasonable legal notices or author
322 | attributions in that material or in the Appropriate Legal Notices displayed by works
323 | containing it; or
324 | * **c)** Prohibiting misrepresentation of the origin of that material, or requiring that
325 | modified versions of such material be marked in reasonable ways as different from the
326 | original version; or
327 | * **d)** Limiting the use for publicity purposes of names of licensors or authors of the
328 | material; or
329 | * **e)** Declining to grant rights under trademark law for use of some trade names,
330 | trademarks, or service marks; or
331 | * **f)** Requiring indemnification of licensors and authors of that material by anyone
332 | who conveys the material (or modified versions of it) with contractual assumptions of
333 | liability to the recipient, for any liability that these contractual assumptions
334 | directly impose on those licensors and authors.
335 |
336 | All other non-permissive additional terms are considered “further
337 | restrictions” within the meaning of section 10. If the Program as you received
338 | it, or any part of it, contains a notice stating that it is governed by this License
339 | along with a term that is a further restriction, you may remove that term. If a
340 | license document contains a further restriction but permits relicensing or conveying
341 | under this License, you may add to a covered work material governed by the terms of
342 | that license document, provided that the further restriction does not survive such
343 | relicensing or conveying.
344 |
345 | If you add terms to a covered work in accord with this section, you must place, in
346 | the relevant source files, a statement of the additional terms that apply to those
347 | files, or a notice indicating where to find the applicable terms.
348 |
349 | Additional terms, permissive or non-permissive, may be stated in the form of a
350 | separately written license, or stated as exceptions; the above requirements apply
351 | either way.
352 |
353 | ### 8. Termination
354 |
355 | You may not propagate or modify a covered work except as expressly provided under
356 | this License. Any attempt otherwise to propagate or modify it is void, and will
357 | automatically terminate your rights under this License (including any patent licenses
358 | granted under the third paragraph of section 11).
359 |
360 | However, if you cease all violation of this License, then your license from a
361 | particular copyright holder is reinstated **(a)** provisionally, unless and until the
362 | copyright holder explicitly and finally terminates your license, and **(b)** permanently,
363 | if the copyright holder fails to notify you of the violation by some reasonable means
364 | prior to 60 days after the cessation.
365 |
366 | Moreover, your license from a particular copyright holder is reinstated permanently
367 | if the copyright holder notifies you of the violation by some reasonable means, this
368 | is the first time you have received notice of violation of this License (for any
369 | work) from that copyright holder, and you cure the violation prior to 30 days after
370 | your receipt of the notice.
371 |
372 | Termination of your rights under this section does not terminate the licenses of
373 | parties who have received copies or rights from you under this License. If your
374 | rights have been terminated and not permanently reinstated, you do not qualify to
375 | receive new licenses for the same material under section 10.
376 |
377 | ### 9. Acceptance Not Required for Having Copies
378 |
379 | You are not required to accept this License in order to receive or run a copy of the
380 | Program. Ancillary propagation of a covered work occurring solely as a consequence of
381 | using peer-to-peer transmission to receive a copy likewise does not require
382 | acceptance. However, nothing other than this License grants you permission to
383 | propagate or modify any covered work. These actions infringe copyright if you do not
384 | accept this License. Therefore, by modifying or propagating a covered work, you
385 | indicate your acceptance of this License to do so.
386 |
387 | ### 10. Automatic Licensing of Downstream Recipients
388 |
389 | Each time you convey a covered work, the recipient automatically receives a license
390 | from the original licensors, to run, modify and propagate that work, subject to this
391 | License. You are not responsible for enforcing compliance by third parties with this
392 | License.
393 |
394 | An “entity transaction” is a transaction transferring control of an
395 | organization, or substantially all assets of one, or subdividing an organization, or
396 | merging organizations. If propagation of a covered work results from an entity
397 | transaction, each party to that transaction who receives a copy of the work also
398 | receives whatever licenses to the work the party's predecessor in interest had or
399 | could give under the previous paragraph, plus a right to possession of the
400 | Corresponding Source of the work from the predecessor in interest, if the predecessor
401 | has it or can get it with reasonable efforts.
402 |
403 | You may not impose any further restrictions on the exercise of the rights granted or
404 | affirmed under this License. For example, you may not impose a license fee, royalty,
405 | or other charge for exercise of rights granted under this License, and you may not
406 | initiate litigation (including a cross-claim or counterclaim in a lawsuit) alleging
407 | that any patent claim is infringed by making, using, selling, offering for sale, or
408 | importing the Program or any portion of it.
409 |
410 | ### 11. Patents
411 |
412 | A “contributor” is a copyright holder who authorizes use under this
413 | License of the Program or a work on which the Program is based. The work thus
414 | licensed is called the contributor's “contributor version”.
415 |
416 | A contributor's “essential patent claims” are all patent claims owned or
417 | controlled by the contributor, whether already acquired or hereafter acquired, that
418 | would be infringed by some manner, permitted by this License, of making, using, or
419 | selling its contributor version, but do not include claims that would be infringed
420 | only as a consequence of further modification of the contributor version. For
421 | purposes of this definition, “control” includes the right to grant patent
422 | sublicenses in a manner consistent with the requirements of this License.
423 |
424 | Each contributor grants you a non-exclusive, worldwide, royalty-free patent license
425 | under the contributor's essential patent claims, to make, use, sell, offer for sale,
426 | import and otherwise run, modify and propagate the contents of its contributor
427 | version.
428 |
429 | In the following three paragraphs, a “patent license” is any express
430 | agreement or commitment, however denominated, not to enforce a patent (such as an
431 | express permission to practice a patent or covenant not to sue for patent
432 | infringement). To “grant” such a patent license to a party means to make
433 | such an agreement or commitment not to enforce a patent against the party.
434 |
435 | If you convey a covered work, knowingly relying on a patent license, and the
436 | Corresponding Source of the work is not available for anyone to copy, free of charge
437 | and under the terms of this License, through a publicly available network server or
438 | other readily accessible means, then you must either **(1)** cause the Corresponding
439 | Source to be so available, or **(2)** arrange to deprive yourself of the benefit of the
440 | patent license for this particular work, or **(3)** arrange, in a manner consistent with
441 | the requirements of this License, to extend the patent license to downstream
442 | recipients. “Knowingly relying” means you have actual knowledge that, but
443 | for the patent license, your conveying the covered work in a country, or your
444 | recipient's use of the covered work in a country, would infringe one or more
445 | identifiable patents in that country that you have reason to believe are valid.
446 |
447 | If, pursuant to or in connection with a single transaction or arrangement, you
448 | convey, or propagate by procuring conveyance of, a covered work, and grant a patent
449 | license to some of the parties receiving the covered work authorizing them to use,
450 | propagate, modify or convey a specific copy of the covered work, then the patent
451 | license you grant is automatically extended to all recipients of the covered work and
452 | works based on it.
453 |
454 | A patent license is “discriminatory” if it does not include within the
455 | scope of its coverage, prohibits the exercise of, or is conditioned on the
456 | non-exercise of one or more of the rights that are specifically granted under this
457 | License. You may not convey a covered work if you are a party to an arrangement with
458 | a third party that is in the business of distributing software, under which you make
459 | payment to the third party based on the extent of your activity of conveying the
460 | work, and under which the third party grants, to any of the parties who would receive
461 | the covered work from you, a discriminatory patent license **(a)** in connection with
462 | copies of the covered work conveyed by you (or copies made from those copies), or **(b)**
463 | primarily for and in connection with specific products or compilations that contain
464 | the covered work, unless you entered into that arrangement, or that patent license
465 | was granted, prior to 28 March 2007.
466 |
467 | Nothing in this License shall be construed as excluding or limiting any implied
468 | license or other defenses to infringement that may otherwise be available to you
469 | under applicable patent law.
470 |
471 | ### 12. No Surrender of Others' Freedom
472 |
473 | If conditions are imposed on you (whether by court order, agreement or otherwise)
474 | that contradict the conditions of this License, they do not excuse you from the
475 | conditions of this License. If you cannot convey a covered work so as to satisfy
476 | simultaneously your obligations under this License and any other pertinent
477 | obligations, then as a consequence you may not convey it at all. For example, if you
478 | agree to terms that obligate you to collect a royalty for further conveying from
479 | those to whom you convey the Program, the only way you could satisfy both those terms
480 | and this License would be to refrain entirely from conveying the Program.
481 |
482 | ### 13. Use with the GNU Affero General Public License
483 |
484 | Notwithstanding any other provision of this License, you have permission to link or
485 | combine any covered work with a work licensed under version 3 of the GNU Affero
486 | General Public License into a single combined work, and to convey the resulting work.
487 | The terms of this License will continue to apply to the part which is the covered
488 | work, but the special requirements of the GNU Affero General Public License, section
489 | 13, concerning interaction through a network will apply to the combination as such.
490 |
491 | ### 14. Revised Versions of this License
492 |
493 | The Free Software Foundation may publish revised and/or new versions of the GNU
494 | General Public License from time to time. Such new versions will be similar in spirit
495 | to the present version, but may differ in detail to address new problems or concerns.
496 |
497 | Each version is given a distinguishing version number. If the Program specifies that
498 | a certain numbered version of the GNU General Public License “or any later
499 | version” applies to it, you have the option of following the terms and
500 | conditions either of that numbered version or of any later version published by the
501 | Free Software Foundation. If the Program does not specify a version number of the GNU
502 | General Public License, you may choose any version ever published by the Free
503 | Software Foundation.
504 |
505 | If the Program specifies that a proxy can decide which future versions of the GNU
506 | General Public License can be used, that proxy's public statement of acceptance of a
507 | version permanently authorizes you to choose that version for the Program.
508 |
509 | Later license versions may give you additional or different permissions. However, no
510 | additional obligations are imposed on any author or copyright holder as a result of
511 | your choosing to follow a later version.
512 |
513 | ### 15. Disclaimer of Warranty
514 |
515 | THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY APPLICABLE LAW.
516 | EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES
517 | PROVIDE THE PROGRAM “AS IS” WITHOUT WARRANTY OF ANY KIND, EITHER
518 | EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
519 | MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE ENTIRE RISK AS TO THE
520 | QUALITY AND PERFORMANCE OF THE PROGRAM IS WITH YOU. SHOULD THE PROGRAM PROVE
521 | DEFECTIVE, YOU ASSUME THE COST OF ALL NECESSARY SERVICING, REPAIR OR CORRECTION.
522 |
523 | ### 16. Limitation of Liability
524 |
525 | IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING WILL ANY
526 | COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS THE PROGRAM AS
527 | PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY GENERAL, SPECIAL,
528 | INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE USE OR INABILITY TO USE THE
529 | PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF DATA OR DATA BEING RENDERED INACCURATE
530 | OR LOSSES SUSTAINED BY YOU OR THIRD PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE
531 | WITH ANY OTHER PROGRAMS), EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE
532 | POSSIBILITY OF SUCH DAMAGES.
533 |
534 | ### 17. Interpretation of Sections 15 and 16
535 |
536 | If the disclaimer of warranty and limitation of liability provided above cannot be
537 | given local legal effect according to their terms, reviewing courts shall apply local
538 | law that most closely approximates an absolute waiver of all civil liability in
539 | connection with the Program, unless a warranty or assumption of liability accompanies
540 | a copy of the Program in return for a fee.
541 |
542 | _END OF TERMS AND CONDITIONS_
543 |
544 | ## How to Apply These Terms to Your New Programs
545 |
546 | If you develop a new program, and you want it to be of the greatest possible use to
547 | the public, the best way to achieve this is to make it free software which everyone
548 | can redistribute and change under these terms.
549 |
550 | To do so, attach the following notices to the program. It is safest to attach them
551 | to the start of each source file to most effectively state the exclusion of warranty;
552 | and each file should have at least the “copyright” line and a pointer to
553 | where the full notice is found.
554 |
555 |
556 | Copyright (C)
557 |
558 | This program is free software: you can redistribute it and/or modify
559 | it under the terms of the GNU General Public License as published by
560 | the Free Software Foundation, either version 3 of the License, or
561 | (at your option) any later version.
562 |
563 | This program is distributed in the hope that it will be useful,
564 | but WITHOUT ANY WARRANTY; without even the implied warranty of
565 | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
566 | GNU General Public License for more details.
567 |
568 | You should have received a copy of the GNU General Public License
569 | along with this program. If not, see .
570 |
571 | Also add information on how to contact you by electronic and paper mail.
572 |
573 | If the program does terminal interaction, make it output a short notice like this
574 | when it starts in an interactive mode:
575 |
576 | Copyright (C)
577 | This program comes with ABSOLUTELY NO WARRANTY; for details type 'show w'.
578 | This is free software, and you are welcome to redistribute it
579 | under certain conditions; type 'show c' for details.
580 |
581 | The hypothetical commands `show w` and `show c` should show the appropriate parts of
582 | the General Public License. Of course, your program's commands might be different;
583 | for a GUI interface, you would use an “about box”.
584 |
585 | You should also get your employer (if you work as a programmer) or school, if any, to
586 | sign a “copyright disclaimer” for the program, if necessary. For more
587 | information on this, and how to apply and follow the GNU GPL, see
588 | <>.
589 |
590 | The GNU General Public License does not permit incorporating your program into
591 | proprietary programs. If your program is a subroutine library, you may consider it
592 | more useful to permit linking proprietary applications with the library. If this is
593 | what you want to do, use the GNU Lesser General Public License instead of this
594 | License. But first, please read
595 | <>.
596 |
--------------------------------------------------------------------------------
/NAMESPACE:
--------------------------------------------------------------------------------
1 | # Generated by roxygen2: do not edit by hand
2 |
3 | export(af)
4 | export(af_snp)
5 | export(center)
6 | export(centerscale)
7 | export(lfa)
8 | export(model.gof)
9 | export(pca_af)
10 | export(read.bed)
11 | export(read.tped.recode)
12 | export(sHWE)
13 | export(trunc_svd)
14 | useDynLib(lfa, .registration = TRUE)
15 |
--------------------------------------------------------------------------------
/NEWS.md:
--------------------------------------------------------------------------------
1 | # lfa 2.0.0.9000 (2020-09-18)
2 |
3 | Major overhaul from last version (1.9.0, last updated 2018-02-11).
4 | Overall, added unit testing to all functions, which resulted in the identification and fixing of several edge-case bugs, and also minor improvements.
5 |
6 | - User-facing changes
7 | - Removed redundant/obsolete exported functions:
8 | - `model.gof`: entirely redundant with `sHWE` in this same package
9 | - `read.tped.recode`: obsolete file format; instead, use `plink` for file conversions!
10 | - `read.bed`: instead, use `read_plink` from the `genio` package!
11 | - `center`: only worked for matrices without missingness (useless for real data), no dependencies in code, plus centering is trivial in R
12 | - Renamed remaining functions, replacing periods with underscores.
13 | - Only specific change: `trunc.svd` -> `trunc_svd`
14 | - NOTE `af_snp` and `pca_af` already had underscores instead of periods (unchanged).
15 | All other functions unchanged.
16 | - Function `trunc_svd`
17 | - Debugged `d=1` case (output matrices were dropped to vectors, which was a fatal error in `lfa` after it called `trunc_svd`).
18 | - Added option `force` that, when `TRUE`, forces the Lanczos algorithm to be used in all cases (most useful for testing purposes).
19 | - Function `lfa`
20 | - Improved documentation.
21 | - Functions `af_snp` and `af`
22 | - Fixed a bug in which large logistic factor coefficients resulted in `NaN` allele frequencies instead of 1 as they should be in the limit.
23 | - Improved code to "impute" allele frequencies for `NA` genotypes.
24 | Original version preserved `NA` values (a genotype that was `NA` for a particular individual and locus combination resulted in an `NA` in the corresponding allele frequency only, and conversely non-`NA` genotypes never resulted in `NA` allele frequencies).
25 | The new version takes advantage of knowing the LFs of all individuals (regardless of genotype missingness), and LFs and their coefficients are never `NA`, permitting allele frequencies to always be imputed into non-`NA` values!
26 | - Function `pca_af`
27 | - Similarly imputes allele frequencies for `NA` genotypes (internal change was trivial)
28 | - Debugged `d=1` case, which incorrectly returned an intercept column matrix instead of an allele frequency matrix.
29 | - Function `check_geno`
30 | - Debugged testing for matrix class (original code when run in R 4.0 generated warning "the condition has length > 1 and only the first element will be used")
31 | - Function `sHWE` (through internal `inverse_2x2`)
32 | - When a test was "singular" at a single SNP, function used to die; now that SNP gets an `NA` p-value.
33 | - Other previous `NA` cases here are avoided now that `af` never returns `NA` values.
34 |
35 | - Internal changes
36 | - Separated R functions into one source file each.
37 | - Added `.gitignore` files from another project.
38 | - Removed `src/lfa.so` from version control tracking.
39 | - Added unit tests for all functions using `testthat`.
40 | - Updates to C code solely to pass latest `R CMD check` requirements.
41 |
42 | # lfa 2.0.1.9000 (2020-11-11)
43 |
44 | - Function `lfa` added support for BEDMatrix objects for the genotype matrix `X`.
45 | - This consumes lower memory when the number of loci `m` is very large, so it enables analysis of larger datasets.
46 | - Algorithm for BEDMatrix case is different: instead of Lanczos truncated SVD, covariance matrices are computed explicitly and truncated eigendecomposition performed. This means runtime and memory complexity are very different here as the number of individuals `n` gets larger.
47 | - Added `RSpectra` package dependency (for fast truncated eigendecomposition).
48 |
49 | # lfa 2.0.2.9000 (2020-11-12)
50 |
51 | - More functions updated to support BEDMatrix inputs for the genotype matrix `X`. Although BEDMatrix is supported, in these cases there are minimal memory reduction advantages as outputs or intermediate matrices are necessarily as large as the input genotype data.
52 | - Function `af`. Although there is memory saving by not loading `X` entirely into memory, the output individual-specific allele frequency matrix `P` has the same dimensions so memory usage may still be excessive for in large datasets, negating the BEDMatrix advantage.
53 | - Function `pca_af`. Note same memory usage issue as `af`.
54 | - Function `sHWE`. A worse memory problem is present, since internal code calculates the entire individual-specific allele frequency matrix `P`, then simulates `B` random genotype matrices of the same dimensions as input (each in memory) from which `LF` and ultimately HWE statistics are obtained.
55 |
56 | # lfa 2.0.3.9000 (2020-12-16)
57 |
58 | - Fixed an integer overflow error that occurred in `sHWE` (in internal function `compute_nulls`), which only occurred if the number of individuals times the number of loci exceeded the maximum integer size in R (the value of `.Machine$integer.max`, which is 2,147,483,647 in my 64-bit machine).
59 | - Function `lfa` added `rspectra` option (`FALSE` by default), as an alternative way of calculating SVD internally (for experimental use only).
60 | - Function `trunc_svd` is now exported.
61 | - Minor, user-imperceptible changes in other functions.
62 |
63 | # lfa 2.0.4.9000 (2020-12-22)
64 |
65 | - Function `sHWE` fixed bug: an error could occur when internal statistics vector included `NA` values.
66 | - Original error gave this obscure message, which occurred because an index went out of bounds due to a discrepancy in vector lengths due to the presence of `NA` values:
67 | ```
68 | Error in while ((i2 <= B0) & (obs_stat[i1] >= stat0[i2])) { :
69 | missing value where TRUE/FALSE needed
70 | ```
71 | - Now empirical p-value code is separated into new internal function `pvals_empir`, and its tested against a new naive version `pvals_empir_brute` (slower brute-force algorithm, used to validate outputs only) in unit tests including simulated data with `NA` values.
72 | - Also refactored other internal `sHWE` code into a new internal function `gof_stat`, which by itself better handles BEDMatrix files (though overall memory savings are not yet there on the broader `sHWE`).
73 | - Spell-checked this news file (edited earlier entries).
74 |
75 | # lfa 2.0.5.9000 (2021-02-16)
76 |
77 | * Documentation updates:
78 | - Fixed links to functions, in many cases these were broken because of incompatible mixed Rd and markdown syntax (now markdown is used more fully).
79 |
80 | # lfa 2.0.6.9000 (2021-03-01)
81 |
82 | * Functions `af_snp`, `af`, and `sHWE` added parameters `max_iter` (default 100) and `tol` (default 1e-10).
83 | - Previous version of code had these parameters hardcoded.
84 | - NOTE: `max_iter = 20` used to be the default value, which in downstream tests was not routinely sufficient to converge with comparable numerical accuracy to `glm` fits (not in this package `lfa`, but in downstream packages `gcatest` and `jackstraw`, which require calculating deviances).
85 |
86 | # lfa 2.0.7 (2021-06-16)
87 |
88 | * Lots of minor changes for Bioconductor update.
89 | - Function `trunc_svd`:
90 | - Removed `seed`, `ltrace`, and `V` options.
91 | - Added `maxit` option.
92 | - Reduced default `tol` from 1e-10 to `.Machine$double.eps` (about 2e-16)
93 | - Function `lfa`:
94 | - Reduced default `tol` from 1e-13 to `.Machine$double.eps` (about 2e-16)
95 | - Added more examples in function docs.
96 | - DESCRIPTION:
97 | - Updated to `Authors@R`.
98 | - Lengthened "Description" paragraph.
99 | - Increased R dependency from 3.2 to 4.0.
100 | - Updated `README.md`.
101 | - Reformatted this `NEWS.md` slightly to improve its automatic parsing.
102 | - Added published paper citation to vignette, `README.md`, `inst/CITATION`.
103 | - First two used to point to arXiv preprint, last one didn't exist.
104 | - Updated vignette to reflect that `read.bed` has been removed.
105 | - Corrected spelling.
106 | - Resurrected and deprecated functions that were exported in last Bioconductor release but deleted on GitHub:
107 | - `center`
108 | - `model.gof`
109 | - `read.bed`
110 | - `read.tped.recode`
111 | - Internal changes:
112 | - All unexported functions are now prefixed with a period.
113 | - Replaced `1:x` with `seq_len(x)` several functions.
114 | - Reformatted all code with package `reformatR` and otherwise match Bioconductor guidelines.
115 | - Split some functions up so individual functions have less than 50 lines.
116 | - Removed unexported function `inverse_2x2`, probably speeding up `sHWE` slightly.
117 | - Removed unexported function `mv` (all instances called C code directly instead of this R wrapper).
118 | - Cleaned up `trunc_svd` source considerably.
119 |
120 | # lfa 2.0.8 (2021-06-18)
121 |
122 | - Minor updates:
123 | - Added `LICENSE.md`.
124 | - Edits to `README.md`.
125 | - Vignette now also suggests `BEDMatrix` for loading data.
126 |
127 | # lfa 2.0.9 (2022-11-11)
128 |
129 | - Fixed critical bug that prevented compilation of C code in latest R-devel.
130 | Documenting here path that led to debugging as it may be informative to maintainers of other packages that have written similar code.
131 | - Here's error message, abbreviated:
132 | ```
133 | fastmat.c: In function ‘mv’:
134 | fastmat.c:22:14: error: too few arguments to function ‘dgemv_’
135 | 22 | F77_CALL(dgemv)(&tr,dimA,dimA+1,&alpha,A,dimA,v,&one,&zero,ret,&one);
136 | | ^~~~~
137 | /home/biocbuild/bbs-3.17-bioc/R/include/R_ext/RS.h:77:25: note: in definition of macro ‘F77_CALL’
138 | 77 | # define F77_CALL(x) x ## _
139 | | ^
140 | /home/biocbuild/bbs-3.17-bioc/R/include/R_ext/BLAS.h:107:10: note: declared here
141 | 107 | F77_NAME(dgemv)(const char *trans, const int *m, const int *n,
142 | | ^~~~~
143 | ...
144 | make: *** [/home/biocbuild/bbs-3.17-bioc/R/etc/Makeconf:176: fastmat.o] Error 1
145 | ERROR: compilation failed for package ‘lfa’
146 | ```
147 | - Bug manifested after R-devel commit r82062 (2022-04-02): `R CMD check --as-cran now uses FC_LEN_T` (I was testing locally using `--as-cran`, perhaps it manifests later otherwise.)
148 | - Googling for `FC_LEN_T` led me to R news, which pointed me to [Writing R Extensions: 6.6.1 Fortran character strings](https://cran.r-project.org/doc/manuals/R-exts.html#Fortran-character-strings), which shows that an argument of type `FC_LEN_T` now has to be added to specify the length of a string passed to Fortran code.
149 | - Eventually text-searched for `dgemv` in the R source code and came across `array.c` examples where it sufficed to append the C macro `FCONE` to my existing `dgemv` call, and that solves it!
150 | (`FCONE`, defined in `R_ext/BLAS.h`, equal to `,(FC_LEN_T)1` if `FC_LEN_T` has been defined, otherwise it is blank.)
151 |
152 | # lfa 2.0.10 (2022-11-11)
153 |
154 | - Minor non-code updates to fix check `--as-cran` notes:
155 | - Package description cannot start with package name.
156 | - `README.md` updated an `http` link to `https` to which it redirects.
157 | - Function `sHWE` documentation used `\doi` instead of direct link.
158 |
159 | # lfa 2.1.10 (2023-05-25)
160 |
161 | - Version bump for bioconductor devel.
162 |
163 | # lfa 2.1.11 (2023-06-20)
164 |
165 | - Commented out excessive test for internal function `.lreg` against `glm`, which differ more often than expected due to poor or lack of convergence.
166 | - Removed unused LaTeX package dependencies from vignette to prevent errors restricted to specific testing platforms.
167 |
--------------------------------------------------------------------------------
/R/af.R:
--------------------------------------------------------------------------------
1 | #' @title Allele frequencies
2 | #' @description Compute matrix of individual-specific allele frequencies
3 | #' @inheritParams lfa
4 | #' @param LF Matrix of logistic factors, with intercept.
5 | #' Pass in the return value from [lfa()]!
6 | #' @param max_iter Maximum number of iterations for logistic regression
7 | #' @param tol Numerical tolerance for convergence of logistic regression
8 | #' @details Computes the matrix of individual-specific allele
9 | #' frequencies, which has the same dimensions of the genotype matrix.
10 | #' Be warned that this function could use a ton of memory, as the
11 | #' return value is all doubles. It could be wise to pass only a
12 | #' selection of the SNPs in your genotype matrix to get an idea for
13 | #' memory usage. Use [gc()] to check memory usage!
14 | #' @examples
15 | #' LF <- lfa( hgdp_subset, 4 )
16 | #' allele_freqs <- af( hgdp_subset, LF )
17 | #' @return Matrix of individual-specific allele frequencies.
18 | #' @export
19 | af <- function(X, LF, safety = FALSE, max_iter = 100, tol = 1e-10) {
20 | if (missing(X))
21 | stop("Genotype matrix `X` is required!")
22 | if (missing(LF))
23 | stop("`LF` matrix is required!")
24 |
25 | # check class
26 | if (!is.matrix(X)) # BEDMatrix returns TRUE here
27 | stop("`X` must be a matrix!")
28 |
29 | # get dimensions
30 | if (methods::is(X, "BEDMatrix")) {
31 | m <- ncol(X)
32 | n <- nrow(X)
33 | } else {
34 | n <- ncol(X)
35 | # m not used in this case
36 | }
37 |
38 | # dimensions should agree
39 | if (n != nrow(LF))
40 | stop("Number of individuals in `X` and `LF` disagree!")
41 |
42 | if (!methods::is(X, "BEDMatrix")) {
43 | # usual R object behavior
44 | if (safety)
45 | .check_geno(X)
46 | return(t(apply(X, 1, af_snp, LF, max_iter=max_iter, tol=tol)))
47 | } else {
48 | # BEDMatrix case.
49 | P <- matrix(0, m, n) # init output matrix
50 | for (i in seq_len(m)) {
51 | # get locus i genotype vector
52 | xi <- X[, i]
53 | # calculate and store result
54 | P[i, ] <- af_snp(xi, LF, max_iter=max_iter, tol=tol)
55 | }
56 | # done!
57 | return(P)
58 | }
59 | }
60 |
61 |
--------------------------------------------------------------------------------
/R/af_cap.R:
--------------------------------------------------------------------------------
1 | # for PCA method. Caps IAFs to 1/(2n) of [0,1] boundary. Preserves input type
2 | # (matrix vs vector).
3 | .af_cap <- function(P) {
4 | if (missing(P))
5 | stop("Individual-specific allele frequency matrix `P` is required!")
6 |
7 | # getting sample size is only step that varies between vectors and matrices
8 | n_ind <- if (is.matrix(P))
9 | ncol(P) else length(P)
10 |
11 | # calculate allele frequency caps according to sample size
12 | p_cap_lo <- 1/(2 * n_ind)
13 | p_cap_hi <- 1 - p_cap_lo # symmetric capping
14 |
15 | # apply caps throughout the matrix or vector
16 | P[P > p_cap_hi] <- p_cap_hi
17 | P[P < p_cap_lo] <- p_cap_lo
18 |
19 | # return modified individual-specific allele frequency matrix
20 | return(P)
21 | }
22 |
23 |
--------------------------------------------------------------------------------
/R/af_snp.R:
--------------------------------------------------------------------------------
1 | #' @title Allele frequencies for SNP
2 | #' @description Computes individual-specific allele frequencies for a
3 | #' single SNP.
4 | #' @inheritParams af
5 | #' @param snp vector of 0's, 1's, and 2's
6 | #' @return vector of allele frequencies
7 | #' @examples
8 | #' LF <- lfa(hgdp_subset, 4)
9 | #' # pick one SNP only
10 | #' snp <- hgdp_subset[ 1, ]
11 | #' # allele frequency vector for that SNO only
12 | #' allele_freqs_snp <- af_snp(snp, LF)
13 | #' @seealso [af()]
14 | #' @export
15 | af_snp <- function(snp, LF, max_iter = 100, tol = 1e-10) {
16 | if (missing(snp))
17 | stop("`snp` is required!")
18 | if (missing(LF))
19 | stop("`LF` matrix is required!")
20 |
21 | # dimensions should agree
22 | if (length(snp) != nrow(LF))
23 | stop("Number of individuals in `snp` and `LF` disagree!")
24 |
25 | # can only regress with non-NA individuals
26 | indexes_keep <- !is.na(snp)
27 | snp <- snp[indexes_keep] # overwrite
28 | LF2 <- LF[indexes_keep, , drop = FALSE] # don't overwite LF
29 | # get coefficients from logistic regression
30 | betas <- .lreg(snp, LF2, max_iter, tol)
31 |
32 | # get allele frequencies. Though `snp` may be NA, no `LF` or `beta` are
33 | # NA, so this imputes the missing genotypes!
34 | est <- .Call("mv_c", LF, betas)
35 | # very large `est` can result in NaN's (i.e. est==1000). Oddly, very large
36 | # negative are no problem
37 | af <- ifelse(est > 100, 1, exp(est)/(1 + exp(est)))
38 | return(af)
39 | }
40 |
--------------------------------------------------------------------------------
/R/center.R:
--------------------------------------------------------------------------------
1 | #' @title Matrix centering
2 | #'
3 | #' @description
4 | #' C routine to row-center a matrix
5 | #'
6 | #' @param A matrix
7 | #' @return `A` but row centered
8 | #' @name center-deprecated
9 | #' @usage center(A)
10 | #' @seealso [lfa-deprecated()]
11 | #' @keywords internal
12 | NULL
13 |
14 | #' @rdname lfa-deprecated
15 | #' @section `center`:
16 | #' For `center`, use `function(x) x - rowMeans(x)`.
17 | #' @export
18 | center <- function(A) {
19 | .Deprecated('function(x) x - rowMeans(x)')
20 | return(A - rowMeans(A))
21 | }
22 |
--------------------------------------------------------------------------------
/R/centerscale.R:
--------------------------------------------------------------------------------
1 | #' @title Matrix centering and scaling
2 | #'
3 | #' @description
4 | #' C routine to row-center and scale a matrix. Doesn't work with missing data.
5 | #'
6 | #' @param A matrix
7 | #' @examples
8 | #' Xc <- centerscale(hgdp_subset)
9 | #' @return matrix same dimensions `A` but row centered and scaled
10 | #' @export
11 | centerscale <- function(A) {
12 | as.matrix(.Call("centerscale_c", A))
13 | }
14 |
--------------------------------------------------------------------------------
/R/check_geno.R:
--------------------------------------------------------------------------------
1 | # stops if X is not good on some way or another
2 | .check_geno <- function(X) {
3 | ret <- FALSE
4 | if (!is.matrix(X))
5 | stop("The input must be genotypes in a matrix class.")
6 |
7 | if (!is.integer(X[1]))
8 | stop("Elements of the genotype matrix should be integer.")
9 |
10 | classes <- names(table(as.vector(X)))
11 | if (!all(classes %in% c("0", "1", "2")))
12 | stop("Expecting genotypes to be 0, 1, and 2.")
13 |
14 | uniqLen <- apply(X, 1, function(x) length(unique(x)))
15 | if (sum(uniqLen == 1) > 1)
16 | stop("Remove ", uniqLen, " loci without variation across samples.")
17 |
18 | m <- nrow(X)
19 | n <- ncol(X)
20 |
21 | if (m <= n)
22 | stop("The genotype matrix should be tall.")
23 | }
24 |
--------------------------------------------------------------------------------
/R/compute_nulls.R:
--------------------------------------------------------------------------------
1 | .compute_nulls <- function(P, d, B, max_iter = 100, tol = 1e-10) {
2 | m <- nrow(P)
3 | n <- ncol(P)
4 |
5 | # since m and n are integers, multiplying them causes a buffer overflow
6 | # let's multiply them as doubles, overcomes the problem
7 | n_data <- (n + 0) * (m + 0)
8 |
9 | stats0 <- matrix(0, m, B)
10 | for (i in seq_len(B)) {
11 | X0 <- matrix(stats::rbinom(n_data, 2, P), nrow=m, ncol=n)
12 | LF0 <- lfa(X0, d)
13 | # this calculates stats correctly, even when X0 is BEDMatrix!
14 | stats0[, i] <- .gof_stat(X0, LF0, max_iter=max_iter, tol=tol)
15 | }
16 |
17 | return(stats0)
18 | }
19 |
--------------------------------------------------------------------------------
/R/convtests.R:
--------------------------------------------------------------------------------
1 | .convtests <- function(Bsz, tol, d_org, residuals, d, Smax) {
2 | Len_res <- sum(residuals[seq_len(d_org)] < tol * Smax)
3 | # if this happens, we've converged!
4 | if (Len_res == d_org)
5 | return(list(converged=TRUE, d=d))
6 |
7 | # ... otherwise not converged.
8 | d <- max(d, d_org + Len_res)
9 | if (d > Bsz - 3)
10 | d <- Bsz - 3
11 | return(list(converged=FALSE, d=d))
12 | }
13 |
--------------------------------------------------------------------------------
/R/covar_BEDMatrix.R:
--------------------------------------------------------------------------------
1 | # based on popkinsuppl::kinship_std - limited to BEDMatrix case - returns locus
2 | # mean values too (needed by LFA) - does not use popkin for memory control -
3 | # does not normalize by p(1-p), to match how LFA does it - similarly does not
4 | # 'average' non-NA cases, just sums (doesn't average for fixed m either).
5 | # `m_chunk = 1000` gave good performance in tests
6 | .covar_BEDMatrix <- function(X, m_chunk = 1000) {
7 | if (missing(X))
8 | stop("Genotype matrix `X` is required!")
9 | if (!methods::is(X, "BEDMatrix"))
10 | stop("`X` must be a BEDMatrix object!")
11 |
12 | # get dimensions
13 | n <- nrow(X)
14 | m <- ncol(X)
15 |
16 | # initialize desired matrix and vector
17 | covar <- matrix(0, nrow=n, ncol=n)
18 | X_mean <- vector("numeric", m)
19 |
20 | # navigate chunks
21 | i_chunk <- 1 # start of first chunk
22 | while (TRUE) {
23 | # start an infinite loop, break inside as needed
24 | if (i_chunk > m)
25 | break # reached end
26 | # range of SNPs to extract in this chunk
27 | indexes_loci_chunk <- i_chunk:min(i_chunk + m_chunk - 1, m)
28 | # transpose for our usual setup (makes centering easiest)
29 | Xi <- t(X[, indexes_loci_chunk, drop=FALSE])
30 | # update for next chunk! (overshoots at end, that's ok)
31 | i_chunk <- i_chunk + m_chunk
32 | # standard mean
33 | Xi_mean <- rowMeans(Xi, na.rm=TRUE)
34 | X_mean[indexes_loci_chunk] <- Xi_mean # store for output
35 | Xi <- Xi - Xi_mean # center
36 | if (anyNA(Xi))
37 | Xi[is.na(Xi)] <- 0 # set NAs to zero ('impute')
38 | covar <- covar + crossprod(Xi) # add to running sum.
39 | }
40 |
41 | return(list(covar=covar, X_mean=X_mean))
42 | }
43 |
44 |
--------------------------------------------------------------------------------
/R/covar_basic.R:
--------------------------------------------------------------------------------
1 | # basic covariance formula for an R genotype matrix X, to checks the more
2 | # elaborate .covar_BEDMatrix against this
3 | .covar_basic <- function(X) {
4 | if (missing(X))
5 | stop("Genotype matrix `X` is required!")
6 | if (!is.matrix(X))
7 | stop("`X` must be a matrix!")
8 |
9 | # standard mean
10 | X_mean <- rowMeans(X, na.rm=TRUE)
11 |
12 | # center before cross product...
13 | X <- X - X_mean
14 |
15 | # before applying cross product, to prevent NA errors, just set those
16 | # values to zero and it works out!
17 | if (anyNA(X))
18 | X[is.na(X)] <- 0
19 |
20 | # cross product matrix is what we desire
21 | covar <- crossprod(X)
22 |
23 | return(covar)
24 | }
25 |
26 |
--------------------------------------------------------------------------------
/R/covar_logit_BEDMatrix.R:
--------------------------------------------------------------------------------
1 | # based on .covar_BEDMatrix / popkinsuppl::kinship_std. Computes
2 | # covariance for second SVD step of LFA.
3 | .covar_logit_BEDMatrix <- function(X, X_mean, V, ploidy = 2, m_chunk = 1000) {
4 | if (missing(X))
5 | stop("Genotype matrix `X` is required!")
6 | if (missing(X_mean))
7 | stop("Mean locus frequency `X_mean` is required!")
8 | if (missing(V))
9 | stop("Truncated eigenvector matrix `V` is required!")
10 | if (!("BEDMatrix" %in% class(X)))
11 | stop("`X` must be a BEDMatrix object!")
12 | # get dimensions
13 | n <- nrow(X)
14 | m <- ncol(X)
15 | # turn eigenvector matrix into a projection matrix
16 | P_V <- tcrossprod(V)
17 | # initialize desired matrix and vector
18 | covar <- matrix(0, nrow = n, ncol = n)
19 | # navigate chunks
20 | i_chunk <- 1 # start of first chunk
21 | while (TRUE) {
22 | # start an infinite loop, break inside as needed
23 | if (i_chunk > m)
24 | break # reached end
25 | # range of SNPs to extract in this chunk
26 | loci_chunk <- i_chunk:min(i_chunk + m_chunk - 1, m)
27 | # transpose for our usual setup (makes centering easiest)
28 | Xi <- t(X[, loci_chunk, drop = FALSE])
29 | # update for next chunk! (overshoots at end, that's ok)
30 | i_chunk <- i_chunk + m_chunk
31 | Xi_mean <- X_mean[loci_chunk] # precomputed data
32 | Xi <- Xi - Xi_mean # center
33 | if (anyNA(Xi))
34 | Xi[is.na(Xi)] <- 0 # set NAs to zero ('impute')
35 | # project data using first-pass eigenvecs (P_V)
36 | Zi <- (Xi %*% P_V + Xi_mean)/ploidy
37 | # apply LFA threshold to this subset, will remove some loci
38 | loci_keep <- as.logical(.Call("lfa_threshold", Zi, 1/(ploidy * n)))
39 | if (!any(loci_keep))
40 | next # move on if nothing passed
41 | Zi <- Zi[loci_keep, , drop = FALSE] # subset loci
42 | Zi <- log(Zi/(1 - Zi)) # logit transform whole matrix
43 | Zi <- centerscale(Zi)
44 | covar <- covar + crossprod(Zi) # add to running sum.
45 | }
46 | return(covar)
47 | }
48 |
49 | # projection trick. Full rank version: (X is centered matrix though!): X = U D
50 | # t(V); X V = U D t(V) V = U D; X V D^(-1) = U; limited rank now: Z = U_r D_r
51 | # t(V_r); Z = (X V D^(-1))_r D_r t(V_r); Z = (X V)_r D_r^(-1) D_r t(V_r); Z = X
52 | # V_r t(V_r)
53 |
--------------------------------------------------------------------------------
/R/covar_logit_basic.R:
--------------------------------------------------------------------------------
1 | # basic covariance for second (logit) SVD of X, to check
2 | # .covar_logit_BEDMatrix against
3 | .covar_logit_basic <- function(X, V, ploidy = 2) {
4 | if (missing(X))
5 | stop("Genotype matrix `X` is required!")
6 | if (missing(V))
7 | stop("Truncated eigenvector matrix `V` is required!")
8 | if (!is.matrix(X))
9 | stop("`X` must be a matrix!")
10 |
11 | # get dimensions
12 | n <- ncol(X)
13 |
14 | # standard mean
15 | X_mean <- rowMeans(X, na.rm=TRUE)
16 |
17 | # center data
18 | X <- X - X_mean
19 |
20 | # set NAs to zero ('impute')
21 | if (anyNA(X))
22 | X[is.na(X)] <- 0
23 |
24 | # project data to rowspace of V (first-pass eigenvectors)
25 | Z <- X %*% tcrossprod(V) + X_mean
26 | Z <- Z/ploidy
27 |
28 | # apply LFA thhreshold to this subset
29 | ind <- as.logical(.Call("lfa_threshold", Z, 1/(ploidy * n)))
30 | # subset loci
31 | Z <- Z[ind, , drop=FALSE]
32 | # logit transformation of whole matrix
33 | Z <- log(Z/(1 - Z))
34 |
35 | # center and scale this reduced matrix
36 | Z <- centerscale(Z)
37 |
38 | # cross product matrix
39 | covar <- crossprod(Z)
40 |
41 | return(covar)
42 | }
43 |
44 |
--------------------------------------------------------------------------------
/R/data.R:
--------------------------------------------------------------------------------
1 | #' @name hgdp_subset
2 | #' @title HGDP subset
3 | #' @description Subset of the HGDP dataset.
4 | #' @docType data
5 | #' @usage hgdp_subset
6 | #' @format a matrix of 0's, 1's and 2's.
7 | #' @return genotype matrix
8 | #' @source Stanford HGDP
9 |
10 | NULL
11 |
--------------------------------------------------------------------------------
/R/gof_stat.R:
--------------------------------------------------------------------------------
1 | .gof_stat <- function(X, LF, max_iter = 100, tol = 1e-10) {
2 | # wrapper around .gof_stat_snp, applying it correctly across matrix whether
3 | # input is a regular R matrix or a BEDMatrix object
4 |
5 | if (missing(X))
6 | stop("Genotype matrix `X` is required!")
7 | if (missing(LF))
8 | stop("`LF` matrix is required!")
9 |
10 | # check class
11 | if (!is.matrix(X)) # BEDMatrix returns TRUE
12 | stop("`X` must be a matrix!")
13 |
14 | # get dimensions
15 | if (methods::is(X, "BEDMatrix")) {
16 | m <- ncol(X)
17 | n <- nrow(X)
18 | } else {
19 | n <- ncol(X)
20 | m <- nrow(X)
21 | }
22 |
23 | # dimensions should agree
24 | if (n != nrow(LF))
25 | stop("Number of individuals in `X` and `LF` disagree!")
26 |
27 | if (!methods::is(X, "BEDMatrix")) {
28 | # usual R object behavior
29 | gof_stats <- apply(X, 1, .gof_stat_snp, LF, max_iter=max_iter, tol=tol)
30 | } else {
31 | # BEDMatrix case: write an explicit loop around the genotype matrix.
32 | # Questions: is it better to write a simple loop (one locus at the
33 | # time) or to read big chunks (1000 loci at the time)? Since `af_snp`
34 | # is the bottleneck, maybe the difference is small
35 |
36 | # output vector
37 | gof_stats <- vector("numeric", m)
38 | for (i in seq_len(m)) {
39 | # get locus i genotype vector
40 | xi <- X[, i]
41 | # calculate and store result
42 | gof_stats[i] <- .gof_stat_snp(xi, LF, max_iter=max_iter, tol=tol)
43 | }
44 | }
45 | return(gof_stats)
46 | }
47 |
--------------------------------------------------------------------------------
/R/gof_stat_snp.R:
--------------------------------------------------------------------------------
1 | .gof_stat_snp <- function(snp, LF, max_iter = 100, tol = 1e-10) {
2 | # remove NAs before calculating GOF statistics
3 | keep <- !is.na(snp)
4 | snp <- snp[keep]
5 | LF <- LF[keep, , drop = FALSE]
6 | # get vector of allele frequencies at this SNP
7 | p <- af_snp(snp, LF, max_iter = max_iter, tol = tol)
8 | # some intermediate calcs
9 | p0 <- (1 - p)^2
10 | p1 <- 2 * p * (1 - p)
11 | est <- c(sum(p0), sum(p1))
12 | N <- c(sum(snp == 0), sum(snp == 1))
13 | # construct Sigma and inverse
14 | sigma11 <- sum(p0 * (1 - p0))
15 | sigma12 <- -sum(p0 * p1)
16 | sigma22 <- sum(p1 * (1 - p1))
17 | # determinant
18 | determ <- sigma11 * sigma22 - sigma12^2
19 | # not invertible if this is zero
20 | if (determ == 0)
21 | return(NA)
22 | # else continue
23 | Sigma_inv <- c(sigma22, -sigma12, -sigma12, sigma11)
24 | Sigma_inv <- matrix(Sigma_inv, nrow=2, ncol=2)/determ
25 | stat <- t(N - est) %*% Sigma_inv %*% (N - est)
26 | return(stat)
27 | }
28 |
--------------------------------------------------------------------------------
/R/lfa-deprecated.R:
--------------------------------------------------------------------------------
1 | #' @title Deprecated functions in package `lfa`.
2 | #' @description The functions listed below are deprecated and will be defunct in
3 | #' the near future. When possible, alternative functions with similar
4 | #' functionality are also mentioned. Help pages for deprecated functions are
5 | #' available at `help("-deprecated")`.
6 | #' @name lfa-deprecated
7 | #' @return Function-dependent
8 | #' @keywords internal
9 | NULL
10 |
--------------------------------------------------------------------------------
/R/lfa.R:
--------------------------------------------------------------------------------
1 | #' Logistic factor analysis
2 | #'
3 | #' Fit logistic factor model of dimension `d` to binomial data.
4 | #' Computes `d - 1` singular vectors followed by intercept.
5 | #'
6 | #' Genotype matrix should have values in 0, 1, 2, or `NA`.
7 | #' The coding of the SNPs (which case is 0 vs 2) does not change the output.
8 | #'
9 | #' @param X A matrix of SNP genotypes, i.e. an integer matrix of 0's,
10 | #' 1's, 2's and `NA`s.
11 | #' BEDMatrix is supported.
12 | #' Sparse matrices of class Matrix are not supported (yet).
13 | #' @param d Number of logistic factors, including the intercept
14 | #' @param adjustments A matrix of adjustment variables to hold fixed during
15 | #' estimation. Number of rows must equal number of individuals in `X`.
16 | #' These adjustments take the place of LFs in the output, so the number of
17 | #' columns must not exceed `d-2` to allow for the intercept and at least one
18 | #' proper LF to be included.
19 | #' When present, these adjustment variables appear in the first columns of the
20 | #' output.
21 | #' Not supported when `X` is a BEDMatrix object.
22 | #' @param rspectra If `TRUE`, use
23 | #' [RSpectra::svds()] instead of default
24 | #' [trunc_svd()] or
25 | #' [corpcor::fast.svd()] options.
26 | #' Ignored if `X` is a BEDMatrix object.
27 | #' @param override Optional boolean passed to [trunc_svd()]
28 | #' to bypass its Lanczos bidiagonalization SVD, instead using
29 | #' [corpcor::fast.svd()].
30 | #' Usually not advised unless encountering a bug in the SVD code.
31 | #' Ignored if `X` is a BEDMatrix object.
32 | #' @param safety Optional boolean to bypass checks on the genotype
33 | #' matrices, which require a non-trivial amount of computation.
34 | #' Ignored if `X` is a BEDMatrix object.
35 | #' @param ploidy Ploidy of data, defaults to 2 for bi-allelic unphased SNPs
36 | #' @param tol Tolerance value passed to [trunc_svd()]
37 | #' Ignored if `X` is a BEDMatrix object.
38 | #' @param m_chunk If `X` is a BEDMatrix object, number of loci to read per
39 | #' chunk (to control memory usage).
40 | #'
41 | #' @return The matrix of logistic factors, with individuals along rows and
42 | #' factors along columns.
43 | #' The intercept appears at the end of the columns, and adjustments in the
44 | #' beginning if present.
45 | #'
46 | #' @examples
47 | #' LF <- lfa(hgdp_subset, 4)
48 | #' dim(LF)
49 | #' head(LF)
50 | #' @useDynLib lfa, .registration = TRUE
51 | #' @export
52 | lfa <- function(X, d, adjustments = NULL, override = FALSE, safety = FALSE,
53 | rspectra = FALSE, ploidy = 2, tol = .Machine$double.eps, m_chunk = 1000) {
54 | if (missing(X))
55 | stop("Genotype matrix `X` is required!")
56 | if (missing(d))
57 | stop("Dimension number `d` is required!")
58 | # check class
59 | if (!is.matrix(X)) # BEDMatrix returns TRUE
60 | stop("`X` must be a matrix!")
61 | # data dimensions (BEDMatrix is transposed)
62 | n <- if (methods::is(X, "BEDMatrix")) nrow(X) else ncol(X)
63 | # check for d validity
64 | if (!is.numeric(d)) {
65 | stop("d must be numeric")
66 | } else if (d != as.integer(d)) {
67 | stop("d should be integer")
68 | } else if (d < 1) {
69 | stop("d should be at least 1")
70 | } else if (d == 1) {
71 | return(matrix(1, n, 1)) # return intercept column vector only
72 | } else if (d > 1) {
73 | d <- d - 1 #for the svd stuff
74 | }
75 | # check adjustments vars
76 | if (!is.null(adjustments)) {
77 | if (methods::is(X, "BEDMatrix"))
78 | stop("`adjustments` are not supported when `X` is class BEDMatrix!")
79 | if (!is.matrix(adjustments))
80 | stop("`adjustments` must be a matrix!")
81 | if (nrow(adjustments) != n)
82 | stop("`adjustments` and `X` number of individuals disagree!")
83 | if (ncol(adjustments) >= d)
84 | stop("need to estimate at least one non-adjustment logistic factor")
85 | if (anyNA(adjustments))
86 | stop("`adjustments` must not have missing values!")
87 | }
88 | if (methods::is(X, "BEDMatrix"))
89 | return(.lfa_BEDMatrix(X, d, ploidy=ploidy, m_chunk=m_chunk))
90 | # else continue
91 | if (safety)
92 | .check_geno(X) # check data if asked to
93 | # now use 'R matrix' version of code, return those LFs
94 | return(.lfa_matrix(X, d, adjustments, override, rspectra, ploidy, tol))
95 | }
96 |
--------------------------------------------------------------------------------
/R/lfa_BEDMatrix.R:
--------------------------------------------------------------------------------
1 | # internal version of lfa for BEDMatrix data. `d` should be `d-1` from `lfa`
2 | # input! `adjustments` not supported yet. `lfa` checks not repeated
3 | .lfa_BEDMatrix <- function(X, d, ploidy = 2, m_chunk = 1000) {
4 | if (missing(X))
5 | stop("Genotype matrix `X` is required!")
6 | if (missing(d))
7 | stop("Dimension number `d` is missing!")
8 | if (!("BEDMatrix" %in% class(X)))
9 | stop("`X` must be a BEDMatrix object!")
10 |
11 | # calculate covariance matrix and loci means
12 | obj <- .covar_BEDMatrix(X, m_chunk = m_chunk)
13 | covar <- obj$covar
14 | X_mean <- obj$X_mean
15 |
16 | # get truncated eigendecomposition
17 | obj <- RSpectra::eigs_sym(covar, d)
18 | V <- obj$vectors
19 |
20 | # calculate covariance matrix for second step (after logit
21 | # filter/transform)
22 | covar <- .covar_logit_BEDMatrix(X, X_mean, V)
23 |
24 | # get truncated eigendecomposition of second level, which yields the
25 | # logistic factors
26 | obj <- RSpectra::eigs_sym(covar, d)
27 | V <- obj$vectors
28 |
29 | # add intercept column last
30 | V <- cbind(V, 1)
31 |
32 | return(V)
33 | }
34 |
35 |
36 |
--------------------------------------------------------------------------------
/R/lfa_matrix.R:
--------------------------------------------------------------------------------
1 | # LFA for in-memory R matrices only (as opposed to BEDMatrix).
2 | # Skips validations already performed in [lfa()].
3 | .lfa_matrix <- function(X, d, adjustments, override, rspectra, ploidy, tol) {
4 | n <- ncol(X) # number of individuals
5 | if (!rspectra) {
6 | adjust <- 8 # a mysterious param for trunc_svd
7 | if (n - d < 10)
8 | adjust <- n - d - 1
9 | }
10 | NA_IND <- is.na(X) # index the missing values
11 | mean_X <- rowMeans(X, na.rm=TRUE)
12 | norm_X <- X - mean_X # center
13 | norm_X[NA_IND] <- 0 # then 'impute'
14 | # first SVD
15 | if (rspectra) {
16 | mysvd <- RSpectra::svds(norm_X, d)
17 | } else {
18 | mysvd <- trunc_svd(norm_X, d, adjust, tol, override=override)
19 | }
20 | rm(norm_X)
21 | D <- diag(mysvd$d, nrow=d) # pass `d` so `diag` gets `d=1` right
22 | U <- mysvd$u
23 | V <- mysvd$v
24 | rm(mysvd)
25 | # form projection
26 | z <- U %*% D %*% t(V)
27 | z <- z + mean_X
28 | z <- z/ploidy
29 | rm(U); rm(D); rm(V)
30 | # remove rows that exceed logit (0,1) domain
31 | z <- z[as.logical(.Call("lfa_threshold", z, 1/(ploidy * n))), ]
32 | z <- log(z/(1 - z)) # logit
33 | norm_z <- centerscale(z) # center/scale in logit scale now
34 | # regress out adjustment vars, if relevant
35 | if (!is.null(adjustments)) {
36 | norm_z <- t(stats::residuals(stats::lm(t(norm_z) ~ adjustments - 1)))
37 | d <- d - ncol(adjustments)
38 | }
39 | # second SVD yields the logistic factors
40 | if (rspectra) {
41 | v <- RSpectra::svds(norm_z, d)$v
42 | } else {
43 | v <- trunc_svd(norm_z, d, adjust, tol, override=override)$v
44 | }
45 | v <- cbind(v, 1) # add intercept column last
46 | if (!is.null(adjustments))
47 | v <- cbind(adjustments, v) # add adjustment variables first
48 | return(v)
49 | }
50 |
--------------------------------------------------------------------------------
/R/lreg.R:
--------------------------------------------------------------------------------
1 | # C based logistic regression
2 | .lreg <- function(x, LF, max_iter = 100, tol = 1e-10) {
3 | if (missing(x))
4 | stop("Genotype vector `x` is required!")
5 | if (is.null(LF))
6 | stop("`LF` matrix is required!")
7 |
8 | # make sure the data is NA-free. Focus on x only, that's a more common
9 | # issue (it'd be wasteful to test LFs repeatedly (for each locus))
10 | if (anyNA(x))
11 | stop("Genotype vector `x` must not have NA values!")
12 |
13 | # why weird doubling of everything?
14 | LF2 <- rbind(LF, LF)
15 | x1 <- as.numeric((x == 1) | (x == 2))
16 | x2 <- as.numeric(x == 2)
17 | x2 <- c(x1, x2)
18 | # get the desired coefficients
19 | betas <- .Call("lreg_c", LF2, x2, max_iter, tol)
20 |
21 | # if coefficients are NA, use glm
22 | if (anyNA(betas)) {
23 | # `-1` is because LF already has intercept. NOTE: this reduces betas
24 | # by 1 as well, we don't match `lreg_c` otherwise!
25 | # suppressWarnings: because sometimes we get warning 'glm.fit: fitted
26 | # probabilities numerically 0 or 1 occurred'. Occurs on randomly
27 | # simulated data, nothing particularly problematic, so meh
28 | suppressWarnings(betas <- stats::glm(cbind(x, 2 - x) ~ -1 + LF,
29 | family = "binomial")$coef)
30 | names(betas) <- NULL
31 | }
32 | return(betas)
33 | }
34 |
--------------------------------------------------------------------------------
/R/model.gof.R:
--------------------------------------------------------------------------------
1 | #' @title LFA model goodness of fit
2 | #'
3 | #' @description
4 | #' Compute SNP-by-SNP goodness-of-fit when compared to population
5 | #' structure. This can be aggregated to determine genome-wide
6 | #' goodness-of-fit for a particular value of `d`.
7 | #'
8 | #' @details
9 | #' This function returns p-values for LFA model goodness of fit based
10 | #' on a simulated null.
11 | #'
12 | #' @note Genotype matrix is expected to be a matrix of integers with
13 | #' values 0, 1, and 2. Currently no support for missing values. Note
14 | #' that the coding of the SNPs does not affect the algorithm.
15 | #'
16 | #' @param X A matrix of SNP genotypes, i.e. an integer matrix of 0's,
17 | #' 1's, 2's and `NA`s.
18 | #' BEDMatrix is supported.
19 | #' @param LF matrix of logistic factors
20 | #' @param B number of null datasets to generate, `B = 1` is usually
21 | #' sufficient. If computational time/power allows, a few extra
22 | #' `B` could be helpful
23 | #' @return vector of p-values for each SNP.
24 | #' @name model.gof-deprecated
25 | #' @usage model.gof(X, LF, B)
26 | #' @seealso [lfa-deprecated()]
27 | #' @keywords internal
28 | NULL
29 |
30 | #' @rdname lfa-deprecated
31 | #' @section `model.gof`:
32 | #' For `model.gof`, use [sHWE()].
33 | #' @export
34 | model.gof <- function(X, LF, B) {
35 | .Deprecated('sHWE')
36 | sHWE(X, LF, B)
37 | }
38 |
--------------------------------------------------------------------------------
/R/pca_af.R:
--------------------------------------------------------------------------------
1 | #' @title PCA Allele frequencies
2 | #' @description Compute matrix of individual-specific allele frequencies
3 | #' via PCA
4 | #' @inheritParams lfa
5 | #' @details This corresponds to algorithm 1 in the paper. Only used for
6 | #' comparison purposes.
7 | #' @return Matrix of individual-specific allele frequencies.
8 | #' @examples
9 | #' LF <- lfa(hgdp_subset, 4)
10 | #' allele_freqs_lfa <- af(hgdp_subset, LF)
11 | #' allele_freqs_pca <- pca_af(hgdp_subset, 4)
12 | #' summary(abs(allele_freqs_lfa-allele_freqs_pca))
13 | #' @export
14 | pca_af <- function(X, d, override = FALSE, ploidy = 2, tol = 1e-13,
15 | m_chunk = 1000) {
16 | if (missing(X))
17 | stop("Genotype matrix `X` is required!")
18 | if (missing(d))
19 | stop("Principal components number `d` is required!")
20 | # check class
21 | if (!is.matrix(X)) # returns true for BEDMatrix
22 | stop("`X` must be a matrix!")
23 | # check for d validity
24 | if (d != as.integer(d)) {
25 | stop("d should be integer")
26 | } else if (d < 1) {
27 | stop("d should be at least 1")
28 | } else if (d >= 1) {
29 | d <- d - 1 #for the svd stuff
30 | }
31 | if (methods::is(X, "BEDMatrix"))
32 | return(.pca_af_BEDMatrix(X, d, ploidy, m_chunk))
33 | # else below is regular X (R matrix, not BEDMatrix)
34 | m <- nrow(X) # data dimensions
35 | n <- ncol(X)
36 | adjust <- 8
37 | if (n - d < 10)
38 | adjust <- n - d - 1
39 | X_mean <- rowMeans(X, na.rm=TRUE)
40 | if (d == 0) {
41 | # this is 'intercept only' all allele frequencies are just the mean
42 | P <- matrix(rep.int(X_mean, n), nrow=m, ncol=n)
43 | return(P)
44 | }
45 | norm_X <- X - X_mean # center
46 | norm_X[is.na(X)] <- 0 # then 'impute'
47 | mysvd <- trunc_svd(norm_X, d=d, adjust=adjust, tol=tol, override=override)
48 | rm(norm_X)
49 | D <- mysvd$d
50 | U <- mysvd$u
51 | V <- mysvd$v
52 | rm(mysvd)
53 | P <- U %*% diag(D, d, d) %*% t(V)
54 | P <- P + X_mean
55 | P <- P/ploidy
56 | P <- .af_cap(P) # cap allele frequencies (they could be out of range)
57 | return(P)
58 | }
59 |
--------------------------------------------------------------------------------
/R/pca_af_BEDMatrix.R:
--------------------------------------------------------------------------------
1 | .pca_af_BEDMatrix <- function(X, d, ploidy, m_chunk) {
2 | # data dimensions (transposed for BEDMatrix)
3 | m <- ncol(X)
4 | n <- nrow(X)
5 | # Calculate covariance matrix and loci means.
6 | # NOTE: inefficient for d=0 (no PCs, just mean).
7 | obj <- .covar_BEDMatrix(X, m_chunk=m_chunk)
8 | covar <- obj$covar
9 | X_mean <- obj$X_mean
10 | # this is 'intercept only', all allele frequencies are just the mean
11 | if (d == 0)
12 | return(matrix(rep.int(X_mean, n), nrow=m, ncol=n))
13 | # get truncated eigendecomposition
14 | obj <- RSpectra::eigs_sym(covar, d)
15 | V <- obj$vectors
16 | P_V <- tcrossprod(V) # turn eigenvectors into projection matrix
17 | # Form P in parts, so X is not in memory all at once.
18 | # P is fully in memory, potentially negating the BEDMatrix advantage
19 | P <- matrix(0, nrow = m, ncol = n) # initialize
20 | # navigate chunks
21 | i_chunk <- 1 # start of first chunk
22 | while (TRUE) {
23 | # start an infinite loop, break inside as needed.
24 | if (i_chunk > m)
25 | break # reached end
26 | # range of SNPs to extract in this chunk
27 | indexes_loci_chunk <- i_chunk:min(i_chunk + m_chunk - 1, m)
28 | # transpose for our usual setup (makes centering easiest)
29 | Xi <- t(X[, indexes_loci_chunk, drop = FALSE])
30 | # update for next chunk! (overshoots at end, that's ok)
31 | i_chunk <- i_chunk + m_chunk
32 | # get row means from precomputed data
33 | Xi_mean <- X_mean[indexes_loci_chunk]
34 | Xi <- Xi - Xi_mean # center
35 | if (anyNA(Xi))
36 | Xi[is.na(Xi)] <- 0 # set NAs to zero ('impute')
37 | # project data to V rowspace
38 | Pi <- (Xi %*% P_V + Xi_mean)/ploidy
39 | # cap allele frequencies, store in output matrix
40 | P[indexes_loci_chunk, ] <- .af_cap(Pi)
41 | }
42 | return(P)
43 | }
44 |
--------------------------------------------------------------------------------
/R/pvals_empir.R:
--------------------------------------------------------------------------------
1 | .pvals_empir <- function(stats1, stats0) {
2 | if (missing(stats1))
3 | stop("`stats1` observed statistics vector is required!")
4 | if (missing(stats0))
5 | stop("`stats0` null statistics (vector or matrix) is required!")
6 | # NOTE: values in `stats1`, `stats0` can be NA. Observed cases must be kept
7 | # (their p-values are NA too)
8 |
9 | # helps us map back to original position after sort below. NOTE: preserves
10 | # NA, orders them last!
11 | stats1_order <- order(stats1)
12 | # NAs go last, same length as input
13 | stats1_sorted <- stats1[stats1_order]
14 |
15 | # Flatten stats0 to vector. Original order doesn't matter (sort and forget
16 | # about original). NAs are removed
17 | stats0 <- sort(as.vector(stats0))
18 | # number of non-NA values in stats0
19 | m0 <- length(stats0)
20 |
21 | # begin calculating p-values!
22 | m <- length(stats1)
23 | # initialize to NAs, to preserve stats1 NAs
24 | pvals <- rep(NA, m)
25 | i0 <- 1 # index on stats0
26 | for (i1 in seq_len(m)) {
27 | # i1 is index in stats1_sorted; look at i1'th observed statistic
28 | stats1_sorted_i1 <- stats1_sorted[i1]
29 |
30 | # since NAs appear in the end, stop if we see one
31 | if (is.na(stats1_sorted_i1))
32 | break
33 |
34 | # i0 = |null stats <= current observed stat|, so p-value is proportion
35 | # of null stats *strictly larger* than the current observed stat.
36 | # Increment i0 until null stat i0 exceeds obs stat i1 (both ascending)
37 | while ((i0 <= m0) && (stats1_sorted_i1 >= stats0[i0])) {
38 | i0 <- i0 + 1
39 | }
40 | # pval = 1 - prop null stats smaller than obs stat. stats1_order[ i1 ]
41 | # puts value in orig pos
42 | pvals[stats1_order[i1]] <- 1 - ((i0 - 1)/m0)
43 | }
44 |
45 | return(pvals)
46 | }
47 |
--------------------------------------------------------------------------------
/R/pvals_empir_brute.R:
--------------------------------------------------------------------------------
1 | # compute empirical p-values by brute force (clear implementation). Used only
2 | # to validate `.pvals_empir`, which is way faster on large data, but code is
3 | # much harder to understand. Both versions handle NAs in inputs
4 | .pvals_empir_brute <- function(stats1, stats0) {
5 |
6 | # first remove NAs in stats0
7 | if (anyNA(stats0))
8 | stats0 <- stats0[!is.na(stats0)]
9 | # NAs in stats1 get preserved though
10 |
11 | # for loop and output length
12 | m <- length(stats1)
13 | # for p-value normalization
14 | m0 <- length(stats0)
15 |
16 | # create output vector
17 | pvals <- rep.int(NA, m)
18 | for (i in seq_len(m)) {
19 | # NAs are preserved
20 | if (!is.na(stats1[i]))
21 | pvals[i] <- sum(stats0 > stats1[i])/m0
22 | }
23 | return(pvals)
24 | }
25 |
26 |
--------------------------------------------------------------------------------
/R/read.bed.R:
--------------------------------------------------------------------------------
1 | #' @title File input: .bed
2 | #' @description Reads in genotypes in .bed format with corresponding bim
3 | #' and fam files
4 | #' @details Use plink with --make-bed
5 | #' @return Genotype matrix
6 | #' @param bed.prefix Path leading to the bed, bim, and fam files.
7 | #' @name read.bed-deprecated
8 | #' @usage read.bed(bed.prefix)
9 | #' @seealso [lfa-deprecated()]
10 | #' @keywords internal
11 | NULL
12 |
13 | #' @rdname lfa-deprecated
14 | #' @section `read.bed`:
15 | #' For `read.bed`, use [genio::read_plink()].
16 | #' @export
17 | read.bed <- function(bed.prefix) {
18 | .Deprecated("genio::read_plink")
19 | bed.filename <- paste(bed.prefix, ".bed", sep = "")
20 | bim.filename <- paste(bed.prefix, ".bim", sep = "")
21 | fam.filename <- paste(bed.prefix, ".fam", sep = "")
22 | if (!file.exists(bed.filename))
23 | stop("need .bed file")
24 | if (!file.exists(bim.filename))
25 | stop("need .bim file")
26 | if (!file.exists(fam.filename))
27 | stop("need .fam file")
28 | buffer <- utils::read.table(fam.filename, colClasses = "character")
29 | n <- nrow(buffer)
30 | buffer <- utils::read.table(bim.filename, colClasses = "character")
31 | m <- nrow(buffer)
32 | rm(buffer)
33 | X <- matrix(0, m, n)
34 | snp.map <- binary.genotype.map()
35 | bed <- file(bed.filename, "rb")
36 | if (readBin(bed, what = "integer", n = 1, size = 1) != 108)
37 | stop("not valid bed file (magic number fail)")
38 | if (readBin(bed, what = "integer", n = 1, size = 1) != 27)
39 | stop("not valid bed file (magic number fail)")
40 | buffer <- readBin(bed, what = "integer", n = 1, size = 1)
41 | if (buffer == 0) {
42 | stop("individual major mode not yet supported")
43 | } else if (buffer == 1) {
44 | print("snp major mode")
45 | } else {
46 | stop("bed mode problem")
47 | }
48 | numbytes <- ceiling(n/4)
49 | for (i in seq_len(m)) {
50 | indices <- readBin(bed, what = "int", n = numbytes, size = 1,
51 | signed = FALSE) + 1
52 | snp.in <- snp.map[, indices]
53 | X[i, ] <- as.vector(snp.in[seq_len(n)])
54 | }
55 | close(bed)
56 | X
57 | }
58 |
59 | binary.genotype.map <- function() {
60 | combinations <- as.matrix(expand.grid(0:3, 0:3, 0:3, 0:3))
61 | snp.map <- matrix(0, 4, 256)
62 | colnames(combinations) <- NULL
63 | bitstring <- list()
64 | bitstring[[1]] <- "00"
65 | bitstring[[2]] <- "01"
66 | bitstring[[3]] <- "10"
67 | bitstring[[4]] <- "11"
68 | indices <- apply(combinations, 1, function(x) {
69 | strtoi(paste(bitstring[[x[1] + 1]], bitstring[[x[2] + 1]],
70 | bitstring[[x[3] + 1]], bitstring[[x[4] + 1]], sep = ""), base = 2)
71 | })
72 | indices <- indices + 1
73 | combinations[combinations == 1] <- NA #PLINK IS BACKWARDS
74 | combinations[combinations == 2] <- 1 #PLINK IS BACKWARDS
75 | combinations[combinations == 0] <- 2 #PLINK IS BACKWARDS
76 | combinations[combinations == 3] <- 0 #PLINK IS BACKWARDS
77 | snp.map <- apply(combinations, 1, rev)
78 | snp.map[, indices] <- snp.map
79 | snp.map
80 | }
81 |
--------------------------------------------------------------------------------
/R/read.tped.recode.R:
--------------------------------------------------------------------------------
1 | #' @title Read .tped
2 | #' @description Reads a .tped format genotype matrix and returns the R
3 | #' object needed by \code{\link{lfa}}.
4 | #' @details Use --transpose and --recode12 on your plink formatted genotypes
5 | #' to generate the proper tped file. This is a pretty terrible function
6 | #' that uses a growing matrix for the genotypes so it is to your
7 | #' benefit to have as large a \code{buffer.size} as possible.
8 | #' @param tped.filename Path to your .tped file after tranposing and recoding.
9 | #' @param buffer.size Number of characters to keep in the buffer
10 | #' @examples
11 | #' #assuming you have a .tped file in the right directory
12 | #' x = NULL
13 | #' \dontrun{x = read.tped.recode('file.tped')}
14 | #' @return genotype matrix with elements 0, 1, 2, and NA.
15 | #' @name read.tped.recode-deprecated
16 | #' @usage read.tped.recode(tped.filename, buffer.size=5e8)
17 | #' @seealso [lfa-deprecated()]
18 | #' @keywords internal
19 | NULL
20 |
21 | #' @rdname lfa-deprecated
22 | #' @section `read.tped.recode`:
23 | #' For `read.tped.recode`, use `plink` (external binary) to convert to
24 | #' BED/BIM/FAM, then parse with
25 | #' [genio::read_plink()].
26 | #' @export
27 | read.tped.recode <- function(tped.filename, buffer.size = 5e+08) {
28 | .Deprecated(msg = "Use `plink` (external binary) for file conversions!")
29 | tped.line <- readLines(tped.filename, n = 1)
30 | if (nchar(tped.line) > buffer.size/10)
31 | warning("recommend increasing buffer")
32 | tped.line <- strsplit(tped.line, " ")[[1]]
33 | if (length(tped.line) <= 4)
34 | stop("expecting SNPs in tped (line length <= 4)")
35 | if (!(as.integer(tped.line[5]) %in% 0:2))
36 | stop("expecting -recode12")
37 | n <- (length(tped.line) - 4)/2
38 | message("reading in", n, "individuals")
39 | X <- NULL
40 | buffer <- NULL
41 | con <- file(tped.filename, "r")
42 | m <- 0
43 | while (TRUE) {
44 | buffer <- paste(buffer, readChar(con, buffer.size), sep = "")
45 | if (identical(buffer, character(0)))
46 | break
47 | in.lines <- strsplit(buffer, "\n")[[1]]
48 | new.m <- length(in.lines) - 1
49 | if (new.m < 2)
50 | stop("probably should increase buffer")
51 | if (substr(buffer, nchar(buffer), nchar(buffer)) == "\n") {
52 | new.m <- new.m + 1
53 | snps <- in.lines
54 | buffer <- NULL
55 | } else {
56 | snps <- in.lines[seq_len(new.m)]
57 | buffer <- in.lines[new.m + 1]
58 | }
59 | geno.tmp <- matrix(0, new.m, n)
60 | for (i in seq_len(new.m)) geno.tmp[i, ] <- .tped_line(in.lines[i])
61 | X <- rbind(X, geno.tmp)
62 | m <- m + new.m
63 | message("finished snp ", m)
64 | }
65 |
66 | close(con)
67 | X
68 | }
69 |
70 | .tped_line <- function(tped.line) {
71 | snps = strsplit(tped.line, " ")[[1]]
72 | if (length(snps) <= 4)
73 | stop("invalid tped (line length <= 4)")
74 | snps <- as.integer(snps[5:length(snps)])
75 | if (length(snps)%%2 == 1)
76 | stop("snp length error")
77 | even <- seq(2, length(snps), 2)
78 | odds <- seq(1, length(snps), 2)
79 | ret <- snps[even] + snps[odds] - 2
80 | ret[ret < 0] <- NA
81 | ret
82 | }
83 |
--------------------------------------------------------------------------------
/R/sHWE.R:
--------------------------------------------------------------------------------
1 | #' @title Hardy-Weinberg Equilibrium in structure populations
2 | #'
3 | #' @description
4 | #' Compute structural Hardy-Weinberg Equilibrium (sHWE) p-values
5 | #' on a SNP-by-SNP basis. These p-values can be aggregated to
6 | #' determine genome-wide goodness-of-fit for a particular value
7 | #' of `d`. See \doi{10.1101/240804} for more
8 | #' details.
9 | #'
10 | #' @param LF matrix of logistic factors
11 | #' @param B number of null datasets to generate, `B = 1` is usually
12 | #' sufficient. If computational time/power allows, a few extra
13 | #' `B` could be helpful
14 | #' @inheritParams lfa
15 | #' @inheritParams af
16 | #' @examples
17 | #' # get LFs
18 | #' LF <- lfa(hgdp_subset, 4)
19 | #' # look at a small (300) number of SNPs for rest of this example:
20 | #' hgdp_subset_small <- hgdp_subset[ 1:300, ]
21 | #' gof_4 <- sHWE(hgdp_subset_small, LF, 3)
22 | #' LF <- lfa(hgdp_subset, 10)
23 | #' gof_10 <- sHWE(hgdp_subset_small, LF, 3)
24 | #' hist(gof_4)
25 | #' hist(gof_10)
26 | #' @return a vector of p-values for each SNP.
27 | #' @export
28 | sHWE <- function(X, LF, B, max_iter = 100, tol = 1e-10) {
29 | if (missing(X))
30 | stop("Genotype matrix `X` is required!")
31 | if (missing(LF))
32 | stop("`LF` matrix is required!")
33 | if (missing(B))
34 | stop("`B` scalar is required!")
35 |
36 | # check class
37 | if (!is.matrix(X)) # BEDMatrix returns TRUE
38 | stop("`X` must be a matrix!")
39 |
40 | # get dimensions
41 | if (methods::is(X, "BEDMatrix")) {
42 | m <- ncol(X)
43 | n <- nrow(X)
44 | } else {
45 | n <- ncol(X)
46 | m <- nrow(X)
47 | }
48 |
49 | # dimensions should agree
50 | if (n != nrow(LF))
51 | stop("Number of individuals in `X` and `LF` disagrees!")
52 |
53 | # calculate observed stats across matrix
54 | stats1 <- .gof_stat(X, LF, max_iter=max_iter, tol=tol)
55 |
56 | # to create null statistics, get P matrix, then simulate data from it
57 | d <- ncol(LF)
58 | # this already works on BEDMatrix, but produces this large matrix!
59 | P <- af(X, LF)
60 | rm(X)
61 | stats0 <- .compute_nulls(P, d, B, max_iter=max_iter, tol=tol)
62 |
63 | # calculate empirical p-values based on these distributions
64 | pvals <- .pvals_empir(stats1, stats0)
65 |
66 | return(pvals)
67 | }
68 |
--------------------------------------------------------------------------------
/R/trunc_svd.R:
--------------------------------------------------------------------------------
1 | #' @title Truncated singular value decomposition
2 | #'
3 | #' @description
4 | #' Truncated SVD
5 | #'
6 | #' @details
7 | #' Performs singular value decomposition but only returns the first `d`
8 | #' singular vectors/values.
9 | #' The truncated SVD utilizes Lanczos bidiagonalization.
10 | #' See references.
11 | #'
12 | #' This function was modified from the package irlba 1.0.1 under GPL.
13 | #' Replacing the [crossprod()] calls with the C wrapper to
14 | #' `dgemv` is a dramatic difference in larger datasets.
15 | #' Since the wrapper is technically not a matrix multiplication function, it
16 | #' seemed wise to make a copy of the function.
17 | #'
18 | #' @param A matrix to decompose
19 | #' @param d number of singular vectors
20 | #' @param adjust extra singular vectors to calculate for accuracy
21 | #' @param tol convergence criterion
22 | #' @param override `TRUE` means we use
23 | #' [corpcor::fast.svd()] instead of the
24 | #' iterative algorithm (useful for small data or very high `d`).
25 | #' @param force If `TRUE`, forces the Lanczos algorithm to be used on all
26 | #' datasets (usually
27 | #' [corpcor::fast.svd()]
28 | #' is used on small datasets or large `d`)
29 | #' @param maxit Maximum number of iterations
30 | #' @return list with singular value decomposition. Has elements 'd', 'u', 'v',
31 | #' and 'iter'
32 | #' @examples
33 | #' obj <- trunc_svd( hgdp_subset, 4 )
34 | #' obj$d
35 | #' obj$u
36 | #' obj$v
37 | #' obj$iter
38 | #' @export
39 | trunc_svd <- function(A, d, adjust = 3, tol = .Machine$double.eps,
40 | override = FALSE, force = FALSE, maxit = 1000) {
41 | if (missing(A))
42 | stop("Input matrix `A` is required!")
43 | if (missing(d))
44 | stop("Dimension number `d` is required!")
45 | if (d <= 0)
46 | stop("d must be positive")
47 | m <- nrow(A)
48 | n <- ncol(A)
49 | if (d > min(m, n))
50 | stop("d must be less than min(m,n)")
51 | if (!force) { # uses fast.svd() for small matrices or large `d`
52 | if ((log10(m) + log10(n)) <= 6 || m < 1000 || n < 100 || d > n/20 ||
53 | override) {
54 | mysvd <- corpcor::fast.svd(A)
55 | indexes <- seq_len(d)
56 | return(list(d = mysvd$d[indexes], u = mysvd$u[, indexes,
57 | drop = FALSE], v = mysvd$v[, indexes, drop = FALSE], iter = NA))
58 | }
59 | }
60 | d_org <- d # remember original value
61 | d <- d + adjust # *adjust* d
62 | if (m < n)
63 | stop("expecting tall or sq matrix")
64 | if (d > min(m, n))
65 | stop("d must be less than min(m,n)-adjust")
66 | W <- matrix(0, m, d + 1)
67 | V <- matrix(0, n, d + 1)
68 | V <- .new_col_ortho_unit(V, 1)
69 | dat <- list()
70 | iter <- 1
71 | while (iter <= maxit) {
72 | dat <- .trunc_svd_iter(A, V, W, dat$B, dat$Smax, d, d_org, iter, tol)
73 | V <- dat$V
74 | W <- dat$W
75 | Bsvd <- dat$Bsvd
76 | if (dat$converged || iter >= maxit) break # break criterion
77 | d <- dat$d
78 | V[, seq_len(d + 1)] <- cbind(V[, seq_len(nrow(Bsvd$v))] %*%
79 | Bsvd$v[, seq_len(d)], dat$G)
80 | dat$B <- cbind(diag(Bsvd$d[seq_len(d)]), dat$R[seq_len(d)])
81 | W[, seq_len(d)] <- W[, seq_len(nrow(Bsvd$u))] %*% Bsvd$u[, seq_len(d)]
82 | iter <- iter + 1
83 | }
84 | d <- Bsvd$d[seq_len(d_org)]
85 | u <- W[, seq_len(nrow(Bsvd$u))] %*% Bsvd$u[, seq_len(d_org)]
86 | v <- V[, seq_len(nrow(Bsvd$v))] %*% Bsvd$v[, seq_len(d_org)]
87 | return(list(d = d, u = u, v = v, iter = iter))
88 | }
89 |
90 | .trunc_svd_iter <- function(A, V, W, B, Smax, d, d_org, iter, tol) {
91 | j <- 1
92 | if (iter != 1)
93 | j <- d + 1
94 | W[, j] <- .Call("mv_c", A, V[, j]) # W=AV
95 | if (iter != 1)
96 | W[, j] <- .orthog(W[, j], W[, seq_len(j) - 1])
97 | S <- .norm(W[, j])
98 | if (S < tol) { # normalize W and check for dependent vectors
99 | W <- .new_col_ortho_unit(W, j)
100 | S <- 0
101 | } else W[, j] <- W[, j]/S
102 | # lanczos steps
103 | while (j <= ncol(W)) {
104 | G <- .Call("tmv_c", A, W[, j]) - S * V[, j]
105 | G <- .orthog(G, V[, seq_len(j)])
106 | if (j + 1 <= ncol(W)) { # while not the 'edge' of the bidiag matrix
107 | R <- .norm(G)
108 | if (R <= tol) { # check for dependence
109 | V <- .new_col_ortho_unit(V, j + 1)
110 | G <- V[, j + 1]
111 | R <- 0
112 | } else V[, j + 1] <- G/R
113 | if (is.null(B)) {
114 | B <- cbind(S, R) # make block diag matrix
115 | } else B <- rbind(cbind(B, 0), c(rep(0, j - 1), S, R))
116 | W[, j + 1] <- .Call("mv_c", A, V[, j + 1]) - W[, j] * R
117 | if (iter == 1)
118 | W[, j + 1] <- .orthog(W[, j + 1], W[, seq_len(j)])
119 | S <- .norm(W[, j + 1])
120 | if (S <= tol) {
121 | W <- .new_col_ortho_unit(W, j + 1)
122 | S <- 0
123 | } else W[, j + 1] <- W[, j + 1]/S
124 | } else B <- rbind(B, c(rep(0, j - 1), S)) # add block
125 | j <- j + 1
126 | }
127 | Bsz <- nrow(B)
128 | R_F <- .norm(G)
129 | G <- G/R_F
130 | Bsvd <- svd(B) # SVD of bidiag matrix
131 | Smax <- max(Smax, Bsvd$d[1], tol^(2/3))
132 | R <- R_F * Bsvd$u[Bsz, ] # compute residuals
133 | ct <- .convtests(Bsz, tol, d_org, abs(R), d, Smax) # check convergence
134 | return(c(ct, list(V=V, W=W, B=B, G=G, R=R, Bsvd=Bsvd, Smax=Smax)))
135 | }
136 |
137 | # replace column with random data!
138 | .new_col_ortho_unit <- function(W, j) {
139 | # new column with random data
140 | Wj <- stats::rnorm(nrow(W))
141 | # remove projection to existing data in W (cols < j).
142 | # Nothing to do if j==1
143 | if (j > 1)
144 | Wj <- .orthog(Wj, W[, seq_len(j-1)])
145 | # unit normalize and store in desired column
146 | W[, j] <- Wj/.norm(Wj)
147 | return(W) # return whole matrix
148 | }
149 |
150 | # these work just fine if x/X/Y are dropped to vectors
151 | .norm <- function(x) return(sqrt(drop(crossprod(x))))
152 | .orthog <- function(Y, X) return(Y - X %*% crossprod(X, Y))
153 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | # lfa
2 |
3 | Logistic factor analysis
4 |
5 | ## Installation
6 |
7 | To install latest version on Bioconductor, open R and type:
8 |
9 | ```R
10 | if (!requireNamespace("BiocManager", quietly = TRUE))
11 | install.packages("BiocManager")
12 |
13 | BiocManager::install("lfa")
14 | ```
15 |
16 | You can also install development version from GitHub this way:
17 | ```R
18 | install.packages("devtools")
19 | library("devtools")
20 | install_github("Storeylab/lfa")
21 | ```
22 | Apple OS X users, see Troubleshooting below.
23 |
24 | ## Data input
25 |
26 | We recommend using the `genio` or `BEDMatrix` packages to read genotype data into an R matrix.
27 |
28 | Be warned that genotype matrices from `genio` and some `lfa` functions require a lot of memory.
29 | As a rule of thumb, the in memory sizes of a few relevant genotype matrices:
30 |
31 | - 431345 SNPs by 940 individuals: 1.5 GB needed for genotype matrix, about 9 GB to run `lfa`.
32 | - 339100 SNPs by 1500 individuals: 1.9 GB needed for genotype matrix, about 11.5 GB to run `lfa`.
33 |
34 | `BEDMatrix` inputs consume much less memory but can be slower otherwise.
35 |
36 | ## Troubleshooting
37 |
38 | Apple OS X users may experience a problem due to Fortran code that is included in this package. You must install the X code command line tools (XCode CLI) and `gfortran`. Try the following commands on terminal:
39 |
40 | ```
41 | xcode-select --install
42 | brew install gcc
43 | ```
44 |
45 | If XCode installation fails, you may have to sign up on Apple Developer: https://www.ics.uci.edu/~pattis/common/handouts/macmingweclipse/allexperimental/macxcodecommandlinetools.html
46 |
47 | Alternatively, this Installer Package for macOS R toolchain may work https://github.com/rmacoslib/r-macos-rtools/
48 |
49 | ## Citations
50 |
51 | Hao, Wei, Minsun Song, and John D. Storey. "Probabilistic Models of Genetic Variation in Structured Populations Applied to Global Human Studies." Bioinformatics 32, no. 5 (March 1, 2016): 713–21. [doi:10.1093/bioinformatics/btv641](https://doi.org/10.1093/bioinformatics/btv641). [arXiv](https://arxiv.org/abs/1312.2041).
52 |
53 |
--------------------------------------------------------------------------------
/data/hgdp_subset.rda:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/StoreyLab/lfa/10695b0fac4de7ce0bc099dd94c472cbd6b10d8e/data/hgdp_subset.rda
--------------------------------------------------------------------------------
/inst/CITATION:
--------------------------------------------------------------------------------
1 | bibentry(
2 | bibtype = "Article",
3 | title = "Probabilistic Models of Genetic Variation in Structured Populations Applied to Global Human Studies",
4 | author = personList(
5 | person("Wei", "Hao"),
6 | person("Minsun", "Song"),
7 | person("John D.", "Storey")
8 | ),
9 | journal = "Bioinformatics",
10 | year = "2016",
11 | doi = '10.1093/bioinformatics/btv641',
12 | volume = "32",
13 | number = "5",
14 | pages = "713-21",
15 | issn = "1367-4811"
16 | )
17 |
--------------------------------------------------------------------------------
/man/af.Rd:
--------------------------------------------------------------------------------
1 | % Generated by roxygen2: do not edit by hand
2 | % Please edit documentation in R/af.R
3 | \name{af}
4 | \alias{af}
5 | \title{Allele frequencies}
6 | \usage{
7 | af(X, LF, safety = FALSE, max_iter = 100, tol = 1e-10)
8 | }
9 | \arguments{
10 | \item{X}{A matrix of SNP genotypes, i.e. an integer matrix of 0's,
11 | 1's, 2's and \code{NA}s.
12 | BEDMatrix is supported.
13 | Sparse matrices of class Matrix are not supported (yet).}
14 |
15 | \item{LF}{Matrix of logistic factors, with intercept.
16 | Pass in the return value from \code{\link[=lfa]{lfa()}}!}
17 |
18 | \item{safety}{Optional boolean to bypass checks on the genotype
19 | matrices, which require a non-trivial amount of computation.
20 | Ignored if \code{X} is a BEDMatrix object.}
21 |
22 | \item{max_iter}{Maximum number of iterations for logistic regression}
23 |
24 | \item{tol}{Numerical tolerance for convergence of logistic regression}
25 | }
26 | \value{
27 | Matrix of individual-specific allele frequencies.
28 | }
29 | \description{
30 | Compute matrix of individual-specific allele frequencies
31 | }
32 | \details{
33 | Computes the matrix of individual-specific allele
34 | frequencies, which has the same dimensions of the genotype matrix.
35 | Be warned that this function could use a ton of memory, as the
36 | return value is all doubles. It could be wise to pass only a
37 | selection of the SNPs in your genotype matrix to get an idea for
38 | memory usage. Use \code{\link[=gc]{gc()}} to check memory usage!
39 | }
40 | \examples{
41 | LF <- lfa( hgdp_subset, 4 )
42 | allele_freqs <- af( hgdp_subset, LF )
43 | }
44 |
--------------------------------------------------------------------------------
/man/af_snp.Rd:
--------------------------------------------------------------------------------
1 | % Generated by roxygen2: do not edit by hand
2 | % Please edit documentation in R/af_snp.R
3 | \name{af_snp}
4 | \alias{af_snp}
5 | \title{Allele frequencies for SNP}
6 | \usage{
7 | af_snp(snp, LF, max_iter = 100, tol = 1e-10)
8 | }
9 | \arguments{
10 | \item{snp}{vector of 0's, 1's, and 2's}
11 |
12 | \item{LF}{Matrix of logistic factors, with intercept.
13 | Pass in the return value from \code{\link[=lfa]{lfa()}}!}
14 |
15 | \item{max_iter}{Maximum number of iterations for logistic regression}
16 |
17 | \item{tol}{Numerical tolerance for convergence of logistic regression}
18 | }
19 | \value{
20 | vector of allele frequencies
21 | }
22 | \description{
23 | Computes individual-specific allele frequencies for a
24 | single SNP.
25 | }
26 | \examples{
27 | LF <- lfa(hgdp_subset, 4)
28 | # pick one SNP only
29 | snp <- hgdp_subset[ 1, ]
30 | # allele frequency vector for that SNO only
31 | allele_freqs_snp <- af_snp(snp, LF)
32 | }
33 | \seealso{
34 | \code{\link[=af]{af()}}
35 | }
36 |
--------------------------------------------------------------------------------
/man/center-deprecated.Rd:
--------------------------------------------------------------------------------
1 | % Generated by roxygen2: do not edit by hand
2 | % Please edit documentation in R/center.R
3 | \name{center-deprecated}
4 | \alias{center-deprecated}
5 | \title{Matrix centering}
6 | \usage{
7 | center(A)
8 | }
9 | \arguments{
10 | \item{A}{matrix}
11 | }
12 | \value{
13 | \code{A} but row centered
14 | }
15 | \description{
16 | C routine to row-center a matrix
17 | }
18 | \seealso{
19 | \code{\link[=lfa-deprecated]{lfa-deprecated()}}
20 | }
21 | \keyword{internal}
22 |
--------------------------------------------------------------------------------
/man/centerscale.Rd:
--------------------------------------------------------------------------------
1 | % Generated by roxygen2: do not edit by hand
2 | % Please edit documentation in R/centerscale.R
3 | \name{centerscale}
4 | \alias{centerscale}
5 | \title{Matrix centering and scaling}
6 | \usage{
7 | centerscale(A)
8 | }
9 | \arguments{
10 | \item{A}{matrix}
11 | }
12 | \value{
13 | matrix same dimensions \code{A} but row centered and scaled
14 | }
15 | \description{
16 | C routine to row-center and scale a matrix. Doesn't work with missing data.
17 | }
18 | \examples{
19 | Xc <- centerscale(hgdp_subset)
20 | }
21 |
--------------------------------------------------------------------------------
/man/hgdp_subset.Rd:
--------------------------------------------------------------------------------
1 | % Generated by roxygen2: do not edit by hand
2 | % Please edit documentation in R/data.R
3 | \docType{data}
4 | \name{hgdp_subset}
5 | \alias{hgdp_subset}
6 | \title{HGDP subset}
7 | \format{
8 | a matrix of 0's, 1's and 2's.
9 | }
10 | \source{
11 | Stanford HGDP \url{http://www.hagsc.org/hgdp/files.html}
12 | }
13 | \usage{
14 | hgdp_subset
15 | }
16 | \value{
17 | genotype matrix
18 | }
19 | \description{
20 | Subset of the HGDP dataset.
21 | }
22 |
--------------------------------------------------------------------------------
/man/lfa-deprecated.Rd:
--------------------------------------------------------------------------------
1 | % Generated by roxygen2: do not edit by hand
2 | % Please edit documentation in R/center.R, R/lfa-deprecated.R, R/model.gof.R,
3 | % R/read.bed.R, R/read.tped.recode.R
4 | \name{center}
5 | \alias{center}
6 | \alias{lfa-deprecated}
7 | \alias{model.gof}
8 | \alias{read.bed}
9 | \alias{read.tped.recode}
10 | \title{Deprecated functions in package \code{lfa}.}
11 | \usage{
12 | center(A)
13 |
14 | model.gof(X, LF, B)
15 |
16 | read.bed(bed.prefix)
17 |
18 | read.tped.recode(tped.filename, buffer.size = 5e+08)
19 | }
20 | \value{
21 | Function-dependent
22 | }
23 | \description{
24 | The functions listed below are deprecated and will be defunct in
25 | the near future. When possible, alternative functions with similar
26 | functionality are also mentioned. Help pages for deprecated functions are
27 | available at \code{help("-deprecated")}.
28 | }
29 | \section{\code{center}}{
30 |
31 | For \code{center}, use \code{function(x) x - rowMeans(x)}.
32 | }
33 |
34 | \section{\code{model.gof}}{
35 |
36 | For \code{model.gof}, use \code{\link[=sHWE]{sHWE()}}.
37 | }
38 |
39 | \section{\code{read.bed}}{
40 |
41 | For \code{read.bed}, use \code{\link[genio:read_plink]{genio::read_plink()}}.
42 | }
43 |
44 | \section{\code{read.tped.recode}}{
45 |
46 | For \code{read.tped.recode}, use \code{plink} (external binary) to convert to
47 | BED/BIM/FAM, then parse with
48 | \code{\link[genio:read_plink]{genio::read_plink()}}.
49 | }
50 |
51 | \keyword{internal}
52 |
--------------------------------------------------------------------------------
/man/lfa.Rd:
--------------------------------------------------------------------------------
1 | % Generated by roxygen2: do not edit by hand
2 | % Please edit documentation in R/lfa.R
3 | \name{lfa}
4 | \alias{lfa}
5 | \title{Logistic factor analysis}
6 | \usage{
7 | lfa(
8 | X,
9 | d,
10 | adjustments = NULL,
11 | override = FALSE,
12 | safety = FALSE,
13 | rspectra = FALSE,
14 | ploidy = 2,
15 | tol = .Machine$double.eps,
16 | m_chunk = 1000
17 | )
18 | }
19 | \arguments{
20 | \item{X}{A matrix of SNP genotypes, i.e. an integer matrix of 0's,
21 | 1's, 2's and \code{NA}s.
22 | BEDMatrix is supported.
23 | Sparse matrices of class Matrix are not supported (yet).}
24 |
25 | \item{d}{Number of logistic factors, including the intercept}
26 |
27 | \item{adjustments}{A matrix of adjustment variables to hold fixed during
28 | estimation. Number of rows must equal number of individuals in \code{X}.
29 | These adjustments take the place of LFs in the output, so the number of
30 | columns must not exceed \code{d-2} to allow for the intercept and at least one
31 | proper LF to be included.
32 | When present, these adjustment variables appear in the first columns of the
33 | output.
34 | Not supported when \code{X} is a BEDMatrix object.}
35 |
36 | \item{override}{Optional boolean passed to \code{\link[=trunc_svd]{trunc_svd()}}
37 | to bypass its Lanczos bidiagonalization SVD, instead using
38 | \code{\link[corpcor:fast.svd]{corpcor::fast.svd()}}.
39 | Usually not advised unless encountering a bug in the SVD code.
40 | Ignored if \code{X} is a BEDMatrix object.}
41 |
42 | \item{safety}{Optional boolean to bypass checks on the genotype
43 | matrices, which require a non-trivial amount of computation.
44 | Ignored if \code{X} is a BEDMatrix object.}
45 |
46 | \item{rspectra}{If \code{TRUE}, use
47 | \code{\link[RSpectra:svds]{RSpectra::svds()}} instead of default
48 | \code{\link[=trunc_svd]{trunc_svd()}} or
49 | \code{\link[corpcor:fast.svd]{corpcor::fast.svd()}} options.
50 | Ignored if \code{X} is a BEDMatrix object.}
51 |
52 | \item{ploidy}{Ploidy of data, defaults to 2 for bi-allelic unphased SNPs}
53 |
54 | \item{tol}{Tolerance value passed to \code{\link[=trunc_svd]{trunc_svd()}}
55 | Ignored if \code{X} is a BEDMatrix object.}
56 |
57 | \item{m_chunk}{If \code{X} is a BEDMatrix object, number of loci to read per
58 | chunk (to control memory usage).}
59 | }
60 | \value{
61 | The matrix of logistic factors, with individuals along rows and
62 | factors along columns.
63 | The intercept appears at the end of the columns, and adjustments in the
64 | beginning if present.
65 | }
66 | \description{
67 | Fit logistic factor model of dimension \code{d} to binomial data.
68 | Computes \code{d - 1} singular vectors followed by intercept.
69 | }
70 | \details{
71 | Genotype matrix should have values in 0, 1, 2, or \code{NA}.
72 | The coding of the SNPs (which case is 0 vs 2) does not change the output.
73 | }
74 | \examples{
75 | LF <- lfa(hgdp_subset, 4)
76 | dim(LF)
77 | head(LF)
78 | }
79 |
--------------------------------------------------------------------------------
/man/model.gof-deprecated.Rd:
--------------------------------------------------------------------------------
1 | % Generated by roxygen2: do not edit by hand
2 | % Please edit documentation in R/model.gof.R
3 | \name{model.gof-deprecated}
4 | \alias{model.gof-deprecated}
5 | \title{LFA model goodness of fit}
6 | \usage{
7 | model.gof(X, LF, B)
8 | }
9 | \arguments{
10 | \item{X}{A matrix of SNP genotypes, i.e. an integer matrix of 0's,
11 | 1's, 2's and \code{NA}s.
12 | BEDMatrix is supported.}
13 |
14 | \item{LF}{matrix of logistic factors}
15 |
16 | \item{B}{number of null datasets to generate, \code{B = 1} is usually
17 | sufficient. If computational time/power allows, a few extra
18 | \code{B} could be helpful}
19 | }
20 | \value{
21 | vector of p-values for each SNP.
22 | }
23 | \description{
24 | Compute SNP-by-SNP goodness-of-fit when compared to population
25 | structure. This can be aggregated to determine genome-wide
26 | goodness-of-fit for a particular value of \code{d}.
27 | }
28 | \details{
29 | This function returns p-values for LFA model goodness of fit based
30 | on a simulated null.
31 | }
32 | \note{
33 | Genotype matrix is expected to be a matrix of integers with
34 | values 0, 1, and 2. Currently no support for missing values. Note
35 | that the coding of the SNPs does not affect the algorithm.
36 | }
37 | \seealso{
38 | \code{\link[=lfa-deprecated]{lfa-deprecated()}}
39 | }
40 | \keyword{internal}
41 |
--------------------------------------------------------------------------------
/man/pca_af.Rd:
--------------------------------------------------------------------------------
1 | % Generated by roxygen2: do not edit by hand
2 | % Please edit documentation in R/pca_af.R
3 | \name{pca_af}
4 | \alias{pca_af}
5 | \title{PCA Allele frequencies}
6 | \usage{
7 | pca_af(X, d, override = FALSE, ploidy = 2, tol = 1e-13, m_chunk = 1000)
8 | }
9 | \arguments{
10 | \item{X}{A matrix of SNP genotypes, i.e. an integer matrix of 0's,
11 | 1's, 2's and \code{NA}s.
12 | BEDMatrix is supported.
13 | Sparse matrices of class Matrix are not supported (yet).}
14 |
15 | \item{d}{Number of logistic factors, including the intercept}
16 |
17 | \item{override}{Optional boolean passed to \code{\link[=trunc_svd]{trunc_svd()}}
18 | to bypass its Lanczos bidiagonalization SVD, instead using
19 | \code{\link[corpcor:fast.svd]{corpcor::fast.svd()}}.
20 | Usually not advised unless encountering a bug in the SVD code.
21 | Ignored if \code{X} is a BEDMatrix object.}
22 |
23 | \item{ploidy}{Ploidy of data, defaults to 2 for bi-allelic unphased SNPs}
24 |
25 | \item{tol}{Tolerance value passed to \code{\link[=trunc_svd]{trunc_svd()}}
26 | Ignored if \code{X} is a BEDMatrix object.}
27 |
28 | \item{m_chunk}{If \code{X} is a BEDMatrix object, number of loci to read per
29 | chunk (to control memory usage).}
30 | }
31 | \value{
32 | Matrix of individual-specific allele frequencies.
33 | }
34 | \description{
35 | Compute matrix of individual-specific allele frequencies
36 | via PCA
37 | }
38 | \details{
39 | This corresponds to algorithm 1 in the paper. Only used for
40 | comparison purposes.
41 | }
42 | \examples{
43 | LF <- lfa(hgdp_subset, 4)
44 | allele_freqs_lfa <- af(hgdp_subset, LF)
45 | allele_freqs_pca <- pca_af(hgdp_subset, 4)
46 | summary(abs(allele_freqs_lfa-allele_freqs_pca))
47 | }
48 |
--------------------------------------------------------------------------------
/man/read.bed-deprecated.Rd:
--------------------------------------------------------------------------------
1 | % Generated by roxygen2: do not edit by hand
2 | % Please edit documentation in R/read.bed.R
3 | \name{read.bed-deprecated}
4 | \alias{read.bed-deprecated}
5 | \title{File input: .bed}
6 | \usage{
7 | read.bed(bed.prefix)
8 | }
9 | \arguments{
10 | \item{bed.prefix}{Path leading to the bed, bim, and fam files.}
11 | }
12 | \value{
13 | Genotype matrix
14 | }
15 | \description{
16 | Reads in genotypes in .bed format with corresponding bim
17 | and fam files
18 | }
19 | \details{
20 | Use plink with --make-bed
21 | }
22 | \seealso{
23 | \code{\link[=lfa-deprecated]{lfa-deprecated()}}
24 | }
25 | \keyword{internal}
26 |
--------------------------------------------------------------------------------
/man/read.tped.recode-deprecated.Rd:
--------------------------------------------------------------------------------
1 | % Generated by roxygen2: do not edit by hand
2 | % Please edit documentation in R/read.tped.recode.R
3 | \name{read.tped.recode-deprecated}
4 | \alias{read.tped.recode-deprecated}
5 | \title{Read .tped}
6 | \usage{
7 | read.tped.recode(tped.filename, buffer.size=5e8)
8 | }
9 | \arguments{
10 | \item{tped.filename}{Path to your .tped file after tranposing and recoding.}
11 |
12 | \item{buffer.size}{Number of characters to keep in the buffer}
13 | }
14 | \value{
15 | genotype matrix with elements 0, 1, 2, and NA.
16 | }
17 | \description{
18 | Reads a .tped format genotype matrix and returns the R
19 | object needed by \code{\link{lfa}}.
20 | }
21 | \details{
22 | Use --transpose and --recode12 on your plink formatted genotypes
23 | to generate the proper tped file. This is a pretty terrible function
24 | that uses a growing matrix for the genotypes so it is to your
25 | benefit to have as large a \code{buffer.size} as possible.
26 | }
27 | \examples{
28 | #assuming you have a .tped file in the right directory
29 | x = NULL
30 | \dontrun{x = read.tped.recode('file.tped')}
31 | }
32 | \seealso{
33 | \code{\link[=lfa-deprecated]{lfa-deprecated()}}
34 | }
35 | \keyword{internal}
36 |
--------------------------------------------------------------------------------
/man/sHWE.Rd:
--------------------------------------------------------------------------------
1 | % Generated by roxygen2: do not edit by hand
2 | % Please edit documentation in R/sHWE.R
3 | \name{sHWE}
4 | \alias{sHWE}
5 | \title{Hardy-Weinberg Equilibrium in structure populations}
6 | \usage{
7 | sHWE(X, LF, B, max_iter = 100, tol = 1e-10)
8 | }
9 | \arguments{
10 | \item{X}{A matrix of SNP genotypes, i.e. an integer matrix of 0's,
11 | 1's, 2's and \code{NA}s.
12 | BEDMatrix is supported.
13 | Sparse matrices of class Matrix are not supported (yet).}
14 |
15 | \item{LF}{matrix of logistic factors}
16 |
17 | \item{B}{number of null datasets to generate, \code{B = 1} is usually
18 | sufficient. If computational time/power allows, a few extra
19 | \code{B} could be helpful}
20 |
21 | \item{max_iter}{Maximum number of iterations for logistic regression}
22 |
23 | \item{tol}{Tolerance value passed to \code{\link[=trunc_svd]{trunc_svd()}}
24 | Ignored if \code{X} is a BEDMatrix object.}
25 | }
26 | \value{
27 | a vector of p-values for each SNP.
28 | }
29 | \description{
30 | Compute structural Hardy-Weinberg Equilibrium (sHWE) p-values
31 | on a SNP-by-SNP basis. These p-values can be aggregated to
32 | determine genome-wide goodness-of-fit for a particular value
33 | of \code{d}. See \doi{10.1101/240804} for more
34 | details.
35 | }
36 | \examples{
37 | # get LFs
38 | LF <- lfa(hgdp_subset, 4)
39 | # look at a small (300) number of SNPs for rest of this example:
40 | hgdp_subset_small <- hgdp_subset[ 1:300, ]
41 | gof_4 <- sHWE(hgdp_subset_small, LF, 3)
42 | LF <- lfa(hgdp_subset, 10)
43 | gof_10 <- sHWE(hgdp_subset_small, LF, 3)
44 | hist(gof_4)
45 | hist(gof_10)
46 | }
47 |
--------------------------------------------------------------------------------
/man/trunc_svd.Rd:
--------------------------------------------------------------------------------
1 | % Generated by roxygen2: do not edit by hand
2 | % Please edit documentation in R/trunc_svd.R
3 | \name{trunc_svd}
4 | \alias{trunc_svd}
5 | \title{Truncated singular value decomposition}
6 | \usage{
7 | trunc_svd(
8 | A,
9 | d,
10 | adjust = 3,
11 | tol = .Machine$double.eps,
12 | override = FALSE,
13 | force = FALSE,
14 | maxit = 1000
15 | )
16 | }
17 | \arguments{
18 | \item{A}{matrix to decompose}
19 |
20 | \item{d}{number of singular vectors}
21 |
22 | \item{adjust}{extra singular vectors to calculate for accuracy}
23 |
24 | \item{tol}{convergence criterion}
25 |
26 | \item{override}{\code{TRUE} means we use
27 | \code{\link[corpcor:fast.svd]{corpcor::fast.svd()}} instead of the
28 | iterative algorithm (useful for small data or very high \code{d}).}
29 |
30 | \item{force}{If \code{TRUE}, forces the Lanczos algorithm to be used on all
31 | datasets (usually
32 | \code{\link[corpcor:fast.svd]{corpcor::fast.svd()}}
33 | is used on small datasets or large \code{d})}
34 |
35 | \item{maxit}{Maximum number of iterations}
36 | }
37 | \value{
38 | list with singular value decomposition. Has elements 'd', 'u', 'v',
39 | and 'iter'
40 | }
41 | \description{
42 | Truncated SVD
43 | }
44 | \details{
45 | Performs singular value decomposition but only returns the first \code{d}
46 | singular vectors/values.
47 | The truncated SVD utilizes Lanczos bidiagonalization.
48 | See references.
49 |
50 | This function was modified from the package irlba 1.0.1 under GPL.
51 | Replacing the \code{\link[=crossprod]{crossprod()}} calls with the C wrapper to
52 | \code{dgemv} is a dramatic difference in larger datasets.
53 | Since the wrapper is technically not a matrix multiplication function, it
54 | seemed wise to make a copy of the function.
55 | }
56 | \examples{
57 | obj <- trunc_svd( hgdp_subset, 4 )
58 | obj$d
59 | obj$u
60 | obj$v
61 | obj$iter
62 | }
63 |
--------------------------------------------------------------------------------
/src/.gitignore:
--------------------------------------------------------------------------------
1 | *.o
2 | *.so
3 | *.dll
4 |
--------------------------------------------------------------------------------
/src/Makevars:
--------------------------------------------------------------------------------
1 | PKG_LIBS = $(SUBLIBS) $(LAPACK_LIBS) $(BLAS_LIBS) $(FLIBS)
2 |
3 |
--------------------------------------------------------------------------------
/src/fastmat.c:
--------------------------------------------------------------------------------
1 | #include "lfa.h"
2 |
3 | SEXP mv_c(SEXP RA, SEXP Rv){
4 | int *dimA;
5 | double *v, *A;
6 |
7 | dimA = getDims(RA);
8 | PROTECT(RA=coerceVector(RA, REALSXP));
9 | PROTECT(Rv=coerceVector(Rv, REALSXP));
10 | A = REAL(RA);
11 | v = REAL(Rv);
12 |
13 | SEXP Rret;
14 | double *ret;
15 | PROTECT(Rret = allocVector(REALSXP, dimA[0]));
16 | ret = REAL(Rret);
17 |
18 | double alpha = 1.0;
19 | double zero = 0.0;
20 | char tr = 'N';
21 | int one = 1;
22 | F77_CALL(dgemv)(&tr,dimA,dimA+1,&alpha,A,dimA,v,&one,&zero,ret,&one FCONE);
23 |
24 | UNPROTECT(3);
25 |
26 | return Rret;
27 | }
28 |
29 | SEXP tmv_c(SEXP RA, SEXP Rv){
30 | int *dimA;
31 | double *v, *A;
32 |
33 | dimA = getDims(RA);
34 | PROTECT(RA=coerceVector(RA, REALSXP));
35 | PROTECT(Rv=coerceVector(Rv, REALSXP));
36 | A = REAL(RA);
37 | v = REAL(Rv);
38 |
39 | SEXP Rret;
40 | double *ret;
41 | PROTECT(Rret = allocVector(REALSXP, dimA[1]));
42 | ret = REAL(Rret);
43 |
44 | double alpha = 1.0;
45 | double zero = 0.0;
46 | char tr = 'T';
47 | int one = 1;
48 | F77_CALL(dgemv)(&tr,dimA,dimA+1,&alpha,A,dimA,v,&one,&zero,ret,&one FCONE);
49 |
50 | UNPROTECT(3);
51 |
52 | return Rret;
53 | }
54 |
--------------------------------------------------------------------------------
/src/lfa-init.c:
--------------------------------------------------------------------------------
1 | #include "lfa.h"
2 | #include
3 | #include
4 | #include
5 |
6 | static const R_CallMethodDef callMethods[] = {
7 | {"lfa_threshold", (DL_FUNC) &lfa_threshold, 2},
8 | {"lfa_scaling", (DL_FUNC) &lfa_scaling, 2},
9 | {"centerscale_c", (DL_FUNC) ¢erscale_c, 1},
10 | {"lreg_c", (DL_FUNC) &lreg_c, 4},
11 | {"mv_c", (DL_FUNC) &mv_c, 2},
12 | {"tmv_c", (DL_FUNC) &tmv_c, 2},
13 | {NULL, NULL, 0}
14 | };
15 |
16 | void R_init_lfa(DllInfo *info) {
17 | R_registerRoutines(info, NULL, callMethods, NULL, NULL);
18 | R_useDynamicSymbols(info, TRUE);
19 | }
20 |
21 |
--------------------------------------------------------------------------------
/src/lfa.c:
--------------------------------------------------------------------------------
1 | #include "lfa.h"
2 |
3 | SEXP lfa_threshold(SEXP RX, SEXP Rthresh){
4 | int *dimX, n, i, ind;
5 | double *X, max, min;
6 | double thresh = (double)(*REAL(Rthresh));
7 |
8 | dimX = getDims(RX);
9 | PROTECT(RX = coerceVector(RX, REALSXP));
10 | X = REAL(RX);
11 |
12 | if(dimX[1] <1)
13 | Rprintf("dimension problem in lfa_threshold...");
14 |
15 | SEXP Rret; //returns boolean list of valid rows
16 | double *ret;
17 | PROTECT(Rret = allocVector(REALSXP, dimX[0]));
18 | ret = REAL(Rret);
19 |
20 | for(n = 0; n < dimX[0]; n++){
21 | min = X[n]; //set min/max to first element
22 | max = X[n];
23 | ind = n + dimX[0]; //start from second element
24 | for(i = 1; i < dimX[1]; i++){
25 | if(X[ind] > max)
26 | max = X[ind];
27 | else if(X[ind] < min)
28 | min = X[ind];
29 | ind += dimX[0]; //iterate across loops of course
30 | }
31 | //Rprintf("%f %f\n", max, min);
32 | if((max < (1-thresh)) && (min > thresh))
33 | ret[n] = 1;
34 | else
35 | ret[n] = 0;
36 | }
37 |
38 | UNPROTECT(2);
39 | return Rret;
40 | }
41 |
42 |
43 | //This function seeks to do the following lines of R code:
44 | // mean_x = apply(x,1,mean)
45 | // sd_x = apply(x,1,sd)
46 | // z = (z*sd_x) + mean_x
47 | // z = z/2
48 | //except be really efficient by taking full advantage of passing by
49 | //reference.
50 | SEXP lfa_scaling(SEXP RX, SEXP RZ){
51 | int *dimX, n, i, ind;
52 | double *X, *Z, mean, sd;
53 |
54 | dimX = getDims(RX);
55 | PROTECT(RX = coerceVector(RX, REALSXP));
56 | X = REAL(RX);
57 |
58 | PROTECT(RZ = coerceVector(RZ, REALSXP));
59 | Z = REAL(RZ);
60 |
61 | for(n = 0; n < dimX[0]; n++){
62 | mean = 0;
63 | sd = 0;
64 |
65 | ind = n;
66 | for(i = 0; i < dimX[1]; i++){
67 | mean += X[ind];
68 | ind += dimX[0]; //looping over rows...
69 | }
70 | mean = mean/dimX[1];
71 |
72 | ind=n;
73 | for(i = 0; i < dimX[1]; i++){
74 | Z[ind] *= sd;
75 | Z[ind] += mean;
76 | Z[ind] /= 2;
77 | ind += dimX[0]; //looping over rows...
78 | }
79 | }
80 |
81 | UNPROTECT(2);
82 | return R_NilValue;
83 | }
84 |
85 |
86 | //two utility functions for centerscale
87 | double sd(double* A, int n, int inc){
88 | int i, ind=0;
89 | double sum = 0;
90 | for(i = 0; i < n; i++){
91 | sum += A[ind];
92 | ind += inc;
93 | }
94 |
95 | double mean = sum/n;
96 | sum = 0;
97 | ind = 0;
98 |
99 | for(i = 0; i < n; i++) {
100 | sum += (A[ind]-mean) * (A[ind]-mean);
101 | ind += inc;
102 | }
103 |
104 | return sqrt(sum/(n-1));
105 | }
106 |
107 | double mean(double* A, int n, int inc){
108 | int i, ind = 0;
109 | double sum = 0;
110 |
111 | for(i = 0; i < n; i++){
112 | sum += A[ind];
113 | ind += inc;
114 | }
115 |
116 | return sum/n;
117 | }
118 |
119 | SEXP centerscale_c(SEXP RA){
120 | int *dimA;
121 | double *A;
122 |
123 | dimA = getDims(RA);
124 | // if(dimA[0] <= 1) error("er, first dimension is 1? that's weird."); // let's allow single locus processing!
125 | if(dimA[1] <= 1) error("er, second dimension is 1? that's weird.");
126 | PROTECT(RA=coerceVector(RA, REALSXP));
127 | A = REAL(RA);
128 |
129 | SEXP Rret = PROTECT(duplicate(RA));
130 | double *ret = REAL(Rret);
131 |
132 | int i, j, ind;
133 | double m, s;
134 | for(i = 0; i < dimA[0]; i++){
135 | ind = i;
136 | m = mean(A+i, dimA[1], dimA[0]);
137 | s = sd(A+i, dimA[1], dimA[0]);
138 |
139 | for(j = 0; j < dimA[1]; j++){
140 | if (s != 0) {
141 | ret[ind] = (A[ind] - m)/s;
142 | ind += dimA[0];
143 | }
144 | else {
145 | ret[ind] = 0;
146 | ind += dimA[0];
147 | }
148 | }
149 | }
150 |
151 | UNPROTECT(2);
152 | return Rret;
153 | }
154 |
155 |
--------------------------------------------------------------------------------
/src/lfa.h:
--------------------------------------------------------------------------------
1 | #include
2 | #include
3 | #include
4 | #include
5 | #include
6 | #include
7 | #include
8 | #include
9 |
10 | #define getDims(A) INTEGER(coerceVector(getAttrib(A, R_DimSymbol), INTSXP))
11 |
12 | SEXP lfa_threshold(SEXP, SEXP);
13 | SEXP lfa_scaling(SEXP, SEXP);
14 | SEXP centerscale_c(SEXP);
15 | SEXP lreg_c(SEXP, SEXP, SEXP, SEXP);
16 | SEXP mv_c(SEXP, SEXP);
17 | SEXP tmv_c(SEXP, SEXP);
18 |
--------------------------------------------------------------------------------
/src/lreg.c:
--------------------------------------------------------------------------------
1 | #include "lfa.h"
2 |
3 | //logistic regression
4 | //you MUST add the constant before this
5 | SEXP lreg_c(SEXP RX, SEXP Ry, SEXP Rmi, SEXP Rtol){
6 | int *dimX, maxiter = (int)(*REAL(Rmi));
7 | double *X, *y, tol = (double)(*REAL(Rtol));
8 | int i, j, k, ind1, ind2, ind;
9 | int flag;
10 |
11 | dimX = getDims(RX);
12 | PROTECT(Ry = coerceVector(Ry, REALSXP));
13 | PROTECT(RX = coerceVector(RX, REALSXP));
14 | y = REAL(Ry);
15 | X = REAL(RX);
16 |
17 | SEXP Rret;
18 | double *ret;
19 | PROTECT(Rret = allocVector(REALSXP, dimX[1]));
20 | ret = REAL(Rret);
21 |
22 | int numblock = 64*dimX[1];
23 | double *b = (double*) malloc(sizeof(double)*dimX[1]); //beta
24 | double *bl = (double*) malloc(sizeof(double)*dimX[1]); //beta last
25 | double *f = (double*) malloc(sizeof(double)*dimX[1]); //tmp
26 | double *p = (double*) malloc(sizeof(double)*dimX[0]); //mle
27 |
28 | double *w = (double*) malloc(sizeof(double)*dimX[1]*dimX[1]);
29 | int* ipiv = (int*) malloc(sizeof(int) *dimX[1]); //for inverting
30 | double *wo = (double*) malloc(sizeof(double)*numblock);
31 | double max; // check convergence
32 |
33 | int iter = 1;
34 | double alpha = -1.0, zero = 0.0, one = 1.0;
35 | int ione = 1;
36 | int info=0;
37 | char tr = 'n';
38 | double tmp;
39 | for(i = 0; i < dimX[1]; i++) {
40 | b[i] = 0;
41 | bl[i] = 0;
42 | }
43 |
44 | //IRLS
45 | flag = 0;
46 | while(iter <= maxiter){
47 | ///////////////////////////////////////////////////////////////////////
48 | //p <- as.vector(1/(1 + exp(-X %*% b)))
49 | F77_CALL(dgemv)(&tr,dimX,dimX+1,&alpha,X,dimX,b,&ione,&zero,p,&ione FCONE);
50 | for(i = 0; i < dimX[0]; i++)
51 | p[i] = 1/(1+exp(p[i]));
52 |
53 | ///////////////////////////////////////////////////////////////////////
54 | //var.b <- solve(crossprod(X, p * (1 - p) * X))
55 | //
56 | //here, solve is inverting the matrix.
57 | //p*(1-p) is applied to cols of X.
58 | //at the moment I am manually computing the crossprod
59 | //which is guaranteed to be symmetric
60 | for(i = 0; i < dimX[1]; i++){ //rows
61 | for(j = i; j < dimX[1]; j++){ //columns
62 | ind1 = i*dimX[0]; //i-th col of X
63 | ind2 = j*dimX[0]; //j-th col of X
64 | ind = dimX[1]*i + j; //position on w
65 | w[ind] = 0;
66 | for(k = 0; k < dimX[0]; k++){ //loop over X'p(1-p)X
67 | w[ind]+=X[ind1]*X[ind2]*p[k]*(1-p[k]);
68 | ind1++;
69 | ind2++;
70 | }
71 | if(i != j) //reflect it
72 | w[dimX[1]*j+i] = w[ind];
73 | }
74 | }
75 |
76 | //actually inverting here. remember to pay attention to includes
77 | F77_CALL(dgetrf)(dimX+1,dimX+1,w,dimX+1,ipiv,&info);
78 | if(info != 0) {
79 | //Rprintf("warning: dgetrf error, NA used\n");
80 | //Rprintf("info:%i iter:%i\n", info, iter);
81 | //error("dgetrf error\n");
82 | flag = 1;
83 | }
84 | F77_CALL(dgetri)(dimX+1,w,dimX+1,ipiv,wo,&numblock,&info);
85 | if(info != 0) {
86 | //Rprintf("warning: dgetri error, NA used\n");
87 | //Rprintf("info:%i iter:%i\n", info, iter);
88 | //error("dgetri error\n");
89 | flag = 1;
90 | }
91 |
92 | //if a failure, skip outta here.
93 | if(flag == 1){
94 | for(i = 0; i < dimX[1]; i++) ret[i] = R_NaReal;
95 | free(b);
96 | free(bl);
97 | free(f);
98 | free(p);
99 | free(w);
100 | free(ipiv);
101 | free(wo);
102 | UNPROTECT(3);
103 | return Rret;
104 | }
105 |
106 |
107 | ///////////////////////////////////////////////////////////////////////
108 | //b <- b + var.b %*% crossprod(X, y - p)
109 | //use f to calculate crossprod(X,y-p) first.
110 | //then use dgemv
111 | ind = 0; //since we are iterating over X in order
112 | for(i = 0; i < dimX[1]; i++){ //cols of X, values of f
113 | f[i] = 0;
114 | for(j = 0; j < dimX[0]; j++){ //rows of X, values of y-p
115 | f[i] += X[ind] * (y[j] - p[j]);
116 | ind++;
117 | }
118 | }
119 |
120 | F77_CALL(dgemv)(&tr,dimX+1,dimX+1,&one,w,dimX+1,f,&ione,&one,b,&ione FCONE);
121 |
122 |
123 | ///////////////////////////////////////////////////////////////////////
124 | //if (max(abs(b - b.last)/(abs(b.last) + 0.01*tol)) < tol) break
125 | //check to see if we need to break
126 | max = 0.0;
127 | for(i = 0; i < dimX[1]; i++) {
128 | tmp = fabs(b[i] - bl[i])/(fabs(bl[i]) + 0.01*tol);
129 | if(tmp > max) max = tmp;
130 | }
131 |
132 | if(max < tol)
133 | break;
134 |
135 |
136 | ///////////////////////////////////////////////////////////////////////
137 | //b.last <- b
138 | //it <- it + 1
139 | for(i = 0; i < dimX[1]; i++) bl[i] = b[i];
140 |
141 | iter++;
142 | }
143 |
144 | //if(iter > maxiter) printf("warning: max iterations exceeded\n");
145 |
146 | //set the return...
147 | for(i = 0; i < dimX[1]; i++) ret[i] = b[i];
148 |
149 | free(b);
150 | free(bl);
151 | free(f);
152 | free(p);
153 | free(w);
154 | free(ipiv);
155 | free(wo);
156 | UNPROTECT(3);
157 | return Rret;
158 | }
159 |
160 |
161 |
--------------------------------------------------------------------------------
/tests/testthat.R:
--------------------------------------------------------------------------------
1 | library(testthat)
2 | library(lfa)
3 |
4 | test_check("lfa")
5 |
--------------------------------------------------------------------------------
/tests/testthat/test-lfa.R:
--------------------------------------------------------------------------------
1 | # generate random data for tests
2 |
3 | # data dimensions
4 | n_ind <- 10
5 | m_loci <- 300
6 | # total data size
7 | n_data <- n_ind * m_loci
8 | # add missingness
9 | miss <- 0.1
10 |
11 | # completely unstructured genotypes
12 | # create ancestral allele frequencies
13 | p_anc <- runif( m_loci )
14 | # create genotypes
15 | X <- rbinom( n_data, 2, p_anc )
16 | # add missing values
17 | X[ sample( n_data, n_data * miss ) ] <- NA
18 | # turn into matrix
19 | X <- matrix( X, nrow = m_loci, ncol = n_ind )
20 |
21 | # to have a reasonable dataset always, remove fixed loci and all NA loci
22 | # first remove loci that are entirely NA (with just 10 indiviuals, very possible)
23 | loci_keep <- rowSums( !is.na(X) ) > 0
24 | X <- X[ loci_keep, ]
25 | # now identify fixed loci
26 | p_anc_hat <- rowMeans( X, na.rm = TRUE )
27 | loci_keep <- (0 < p_anc_hat) & (p_anc_hat < 1)
28 | X <- X[ loci_keep, ]
29 | # update number of loci and data size
30 | m_loci <- nrow( X )
31 | n_data <- n_ind * m_loci
32 |
33 | # also create a matrix A != X without missingness, can be continuous values
34 | # same dimensions as X
35 | A <- matrix(
36 | rnorm( n_data ),
37 | nrow = m_loci,
38 | ncol = n_ind
39 | )
40 |
41 | test_that( "trunc_svd works, matches base::svd", {
42 | # expect errors when things are missing (both X and d are required)
43 | expect_error( trunc_svd() )
44 | expect_error( trunc_svd( A = A ) )
45 | expect_error( trunc_svd( d = 1 ) )
46 |
47 | # NOTE: since all dimensions are small, internally this defaults to fast.svd
48 | # test all d values, for completeness
49 | for ( force in c(FALSE, TRUE) ) {
50 | # Lanczos works best for small d (accuracy declines dramatically as d gets closer to n_ind)
51 | d_max <- if (force) n_ind / 2 else n_ind
52 |
53 | for ( d in 1 : d_max ) {
54 | # try to run successfully
55 | expect_silent(
56 | obj <- trunc_svd(
57 | A = A,
58 | d = d,
59 | force = force
60 | )
61 | )
62 | # test return values
63 | expect_true( is.list(obj) )
64 | expect_equal( length(obj), 4 )
65 | expect_equal( names(obj), c('d', 'u', 'v', 'iter') )
66 | # these must be matrices
67 | expect_true( is.matrix( obj$u ) )
68 | expect_true( is.matrix( obj$v ) )
69 | # dimensions, these are all different but obviously related
70 | expect_equal( length( obj$d ), d )
71 | expect_equal( nrow( obj$u ), m_loci )
72 | expect_equal( ncol( obj$u ), d )
73 | expect_equal( nrow( obj$v ), n_ind )
74 | expect_equal( ncol( obj$v ), d )
75 |
76 | # ultimate test is to compare to R's vanilla SVD (must agree!)
77 | obj2 <- svd( A, nu = d, nv = d )
78 | # svd's d is always length n_ind, must subset
79 | expect_equal( obj$d, obj2$d[ 1:d ] )
80 | # signs differ randomly, just compare absolute values
81 | expect_equal( abs(obj$u), abs(obj2$u) )
82 | expect_equal( abs(obj$v), abs(obj2$v) )
83 |
84 | # NOTE: though this would have been more precise, for some reason sign alignments didn't work well
85 | # signs differ randomly, align using first column of `u`
86 | ## # sgn has length m_loci
87 | ## sgn <- sign( obj$u[ , 1 ] * obj2$u[ , 1 ] )
88 | ## sgn[ sgn == 0 ] <- 1 # never use zeroes, just preserve (probably extremely rare)
89 | ## # this fixes signs, multiplies down columns, which is what we want
90 | ## expect_equal( obj$u, sgn * obj2$u )
91 | ## # sign flips are the same here, but only for a smaller number of rows
92 | ## expect_equal( obj$v, sgn[ 1 : n_ind ] * obj2$v )
93 | }
94 | }
95 |
96 | # run on HGDP data (way bigger than my other toy examples)
97 | A <- hgdp_subset
98 | d <- 4
99 | expect_silent(
100 | obj <- trunc_svd(A, d, force = TRUE)
101 | )
102 | # jump straight into comparison to R's vanilla SVD
103 | obj2 <- svd( A, nu = d, nv = d )
104 | # svd's d is always length n_ind, must subset
105 | expect_equal( obj$d, obj2$d[ 1:d ] )
106 | # signs differ randomly, just compare absolute values
107 | expect_equal( abs(obj$u), abs(obj2$u) )
108 | expect_equal( abs(obj$v), abs(obj2$v) )
109 |
110 | })
111 |
112 | test_that("lfa works", {
113 | # expect errors when things are missing (both X and d are required)
114 | expect_error( lfa() )
115 | expect_error( lfa( X = X ) )
116 | expect_error( lfa( d = 3 ) )
117 | # and when d is invalid
118 | expect_error( lfa( X = X, d = 'a' ) )
119 | expect_error( lfa( X = X, d = 5.9 ) ) # d must be integer
120 | expect_error( lfa( X = X, d = 0 ) ) # require d >= 1
121 |
122 | # test several d values, for completeness
123 | # NOTES:
124 | # - due to `lfa_threshold` removing too many SNPs in our toy examples, d can't be too large
125 | # - there's no "force" version here for `trunc_svd` (essentially only fast.svd outputs are tested, though they've all been verified to agree
126 | for ( d in 1 : (n_ind/2) ) {
127 | # test run overall
128 | expect_silent(
129 | LFs <- lfa( X = X, d = d )
130 | )
131 | expect_true( is.matrix( LFs ) )
132 | # test dimensions
133 | expect_equal( nrow( LFs ), n_ind )
134 | expect_equal( ncol( LFs ), d )
135 | # last column should always be intercept
136 | expect_equal( LFs[, d], rep.int(1, n_ind) )
137 | # nothing should be NA
138 | expect_true( !anyNA( LFs ) )
139 |
140 | # repeat with RSpectra, should get the same LFs!
141 | expect_silent(
142 | LFs2 <- lfa( X = X, d = d, rspectra = TRUE )
143 | )
144 | # ignore sign flips
145 | expect_equal( abs(LFs), abs(LFs2) )
146 | }
147 | })
148 |
149 | test_that("lfa works with adjustments", {
150 | # weird thing is that adjustments take the place of LFs, so d >= ncol(adjustments) + 2!
151 | # this ensures there is at least the intercept and one proper LF)
152 | # (below we try 1 and 2 adjustments, so smallest d to test is 4)
153 | d <- 4
154 |
155 | # trigger errors when adjustments are the wrong type/dimensions
156 | # adjustments must be a matrix
157 | expect_error( lfa( X = X, d = d, adjustments = 1:n_ind ) )
158 | # adjustments rows must equal n_ind
159 | expect_error( lfa( X = X, d = d, adjustments = cbind( 2:n_ind ) ) )
160 | # adjustments columns must not equal or exceed `d-1`
161 | expect_error( lfa( X = X, d = d, adjustments = cbind( 1:n_ind, 1:n_ind, 1:n_ind ) ) )
162 | # adjustments aren't allowed to have NAs
163 | expect_error( lfa( X = X, d = d, adjustments = cbind( c(2:n_ind, NA) ) ) )
164 |
165 | # create random data for test
166 | # adjustments are matrices in general
167 | # try 1-column adjustments
168 | adjustments1 <- cbind( rnorm( n_ind ) )
169 | # and 2 columns
170 | adjustments2 <- cbind( adjustments1, rnorm( n_ind ) )
171 |
172 | # repeat all tests for both
173 | for (adjustments in list( adjustments1, adjustments2 ) ) {
174 | # test run overall
175 | expect_silent(
176 | LFs <- lfa( X = X, d = d, adjustments = adjustments )
177 | )
178 | expect_true( is.matrix( LFs ) )
179 | # test dimensions
180 | expect_equal( nrow( LFs ), n_ind )
181 | expect_equal( ncol( LFs ), d ) # always d columns, regardless of adjustments size
182 | # last column should always be intercept
183 | expect_equal( LFs[ , d ], rep.int(1, n_ind) )
184 | # adjustment variables are repeated in first columns
185 | # (attributes differ, so use *_equivalent instead of *_equal)
186 | expect_equivalent( LFs[ , 1:ncol(adjustments) ], adjustments )
187 | # nothing should be NA
188 | expect_true( !anyNA( LFs ) )
189 | }
190 | })
191 |
192 | test_that( ".lreg works", {
193 | # this core function is for data without missingness only!
194 |
195 | # get LFs from the full data with missingness (that's ok)
196 | d <- 3
197 | LFs <- lfa( X = X, d = d )
198 | # now generate a new unstructured genotype vector without missingness
199 | p_anc <- 0.5
200 | # create genotypes
201 | x <- rbinom( n_ind, 2, p_anc )
202 |
203 | # expect errors when key data is missing
204 | expect_error( .lreg( ) )
205 | expect_error( .lreg( x = x ) )
206 | expect_error( .lreg( LF = LFs ) )
207 |
208 | # begin test!
209 | expect_silent(
210 | betas <- .lreg( x = x, LF = LFs )
211 | )
212 | # test that coefficients are as expected
213 | expect_true( is.numeric( betas ) )
214 | expect_equal( length( betas ), d )
215 | expect_true( !anyNA( betas ) )
216 |
217 | ## # compare to GLM
218 | ## # compared to internal code, here we don't double things (looks more like jackstraw code)
219 | ## suppressWarnings(
220 | ## obj_glm <- glm(
221 | ## cbind( x, 2 - x ) ~ -1 + LFs,
222 | ## family = "binomial"
223 | ## )
224 | ## )
225 | ## betas_glm <- obj_glm$coef
226 | ## names( betas_glm ) <- NULL
227 | ## # compare
228 | ## expect_equal( betas, betas_glm )
229 | })
230 |
231 | test_that( "af_snp works", {
232 | # like .lreg, except NAs are handled and returns allele frequencies instead of coefficients
233 |
234 | # get LFs from the full data
235 | d <- 3
236 | LFs <- lfa( X = X, d = d )
237 |
238 | # expect errors when key data is missing
239 | expect_error( af_snp( ) )
240 | expect_error( af_snp( snp = X[ 1, ] ) )
241 | expect_error( af_snp( LF = LFs ) )
242 | # expect errors for mismatched dimensions
243 | # here number of individuals disagrees
244 | expect_error( af_snp( snp = X[ 1, ], LF = LFs[ 2:n_ind, ] ) )
245 |
246 | # begin test!
247 | # test a few SNPs in the same data (not all, that'd be overkill)
248 | m_loci_max <- 10
249 | for ( i in 1 : m_loci_max ) {
250 | xi <- X[ i, ]
251 | expect_silent(
252 | af <- af_snp( snp = xi, LF = LFs )
253 | )
254 | # test that AFs are as expected
255 | expect_true( is.numeric( af ) )
256 | expect_equal( length( af ), n_ind )
257 | expect_true( !anyNA( af ) )
258 | }
259 | })
260 |
261 | test_that( "af works", {
262 | # this is a boring wrapper around af_snp, applying it to the whole genome
263 |
264 | # get LFs from the full data
265 | d <- 3
266 | LFs <- lfa( X = X, d = d )
267 |
268 | # expect errors when key data is missing
269 | expect_error( af( ) )
270 | expect_error( af( X = X ) )
271 | expect_error( af( LF = LFs ) )
272 | # expected error if X is not a matrix
273 | expect_error( af( X = as.numeric(X), LF = LFs ) )
274 | # expect errors for mismatched dimensions
275 | # here number of individuals disagrees
276 | expect_error( af( X = X, LF = LFs[ 2:n_ind, ] ) )
277 |
278 | # begin test!
279 | expect_silent(
280 | P <- af( X = X, LF = LFs )
281 | )
282 | # test that AFs are as expected
283 | expect_true( is.numeric( P ) )
284 | expect_true( is.matrix( P ) )
285 | expect_equal( nrow( P ), m_loci )
286 | expect_equal( ncol( P ), n_ind )
287 | expect_true( !anyNA( P ) )
288 | })
289 |
290 | test_that( '.af_cap works', {
291 | # only one param, mandatory
292 | expect_error( .af_cap() )
293 |
294 | # proper run
295 | # use earlier A matrix, any continuous data should work
296 | expect_silent( P <- .af_cap( A ) )
297 | # test that AFs are as expected
298 | expect_true( is.numeric( P ) )
299 | expect_true( is.matrix( P ) )
300 | expect_equal( nrow( P ), m_loci )
301 | expect_equal( ncol( P ), n_ind )
302 | expect_true( !anyNA( P ) )
303 |
304 | # test vector version
305 | expect_silent( pi <- .af_cap( A[ 1, ] ) )
306 | # test that AFs are as expected
307 | expect_true( is.numeric( pi ) )
308 | expect_true( !is.matrix( pi ) )
309 | expect_equal( length( pi ), n_ind )
310 | expect_true( !anyNA( pi ) )
311 | })
312 |
313 | test_that( "pca_af works", {
314 | # expect errors when key data is missing
315 | expect_error( pca_af( ) )
316 | expect_error( pca_af( X = X ) )
317 | expect_error( pca_af( d = d ) )
318 |
319 | # in all these cases dimensions are so small only fast.svd version is run, so all d possible values should work
320 | for ( d in 1 : n_ind ) {
321 | # try a successful run
322 | expect_silent(
323 | P <- pca_af( X = X, d = d )
324 | )
325 | # test that AFs are as expected
326 | expect_true( is.numeric( P ) )
327 | expect_true( is.matrix( P ) )
328 | expect_equal( nrow( P ), m_loci )
329 | expect_equal( ncol( P ), n_ind )
330 | expect_true( !anyNA( P ) )
331 | }
332 | })
333 |
334 | test_that( "centerscale works", {
335 | # use this function
336 | # NOTE: only works for data without missingness!
337 | expect_silent(
338 | A_cs <- centerscale(A)
339 | )
340 | # compare to expected value
341 | # first compute means
342 | x_m <- rowMeans( A )
343 | # now compute standard deviation, scale by it
344 | x_sd <- sqrt( rowSums( ( A - x_m )^2 ) / (n_ind-1) )
345 | A_cs2 <- ( A - x_m ) / x_sd
346 | expect_equal( A_cs, A_cs2 )
347 | })
348 |
349 | test_that( ".check_geno works", {
350 | # our simulated data should pass this check
351 | expect_silent( .check_geno( X ) )
352 |
353 | # now creater expected failures
354 | # this tests all cases implemented
355 | # ... if encoding is different this way
356 | expect_error( .check_geno( X - 1 ) )
357 | # ... if matrix is not tall
358 | expect_error( .check_geno( t(X) ) )
359 | # ... if it's a vector instead of a matrix
360 | expect_error( .check_geno( 0:2 ) )
361 | # ... if there is a fixed locus
362 | # (create a 4x3 matrix, so it is tall, and with data in correct range otherwise)
363 | expect_error( .check_geno( rbind(0:2, c(0,0,0), 2:0, 0:2 ) ) )
364 | # ... with the other continuous matrix
365 | expect_error( .check_geno( A ) )
366 | })
367 |
368 | test_that( ".gof_stat_snp works", {
369 | # get LFs for test
370 | d <- 3
371 | LFs <- lfa( X = X, d = d )
372 |
373 | # begin test!
374 | # test a few SNPs in the same data (not all, that'd be overkill)
375 | m_loci_max <- 10
376 | for ( i in 1 : m_loci_max ) {
377 | xi <- X[ i, ]
378 | expect_silent(
379 | stat <- .gof_stat_snp( snp = xi, LF = LFs )
380 | )
381 | # validate features of the stat, which should be a scalar
382 | expect_equal( length(stat), 1 )
383 | }
384 | })
385 |
386 | test_that( ".compute_nulls works", {
387 | d <- 3
388 | B <- 2
389 | # first compute LFs
390 | LFs <- lfa( X = X, d = d )
391 | # then compute allele frequencies
392 | P <- af( X = X, LF = LFs )
393 | # now test begins
394 | expect_silent(
395 | stat0 <- .compute_nulls(P = P, d = d, B = B)
396 | )
397 | # test return value
398 | expect_true( is.matrix( stat0 ) )
399 | expect_equal( nrow( stat0 ), m_loci )
400 | expect_equal( ncol( stat0 ), B )
401 | })
402 |
403 | test_that( ".pvals_empir works", {
404 | # generate some small random data with NAs
405 | # these don't need the same lenghts, so let's make it funky
406 | m0 <- 100 # total null (separate from observed)
407 | m <- 40 # total observed
408 | m1 <- 10 # observed which are truly alternative
409 | # null is N(0,1)
410 | stats0 <- rnorm( m0 )
411 | # data is also mostly null, but a few alternatives N(1, 1)
412 | stats1 <- c( rnorm( m - m1 ), rnorm( m1, mean = 1 ) )
413 | # scramble them
414 | stats1 <- sample( stats1 )
415 | # sprinkle NAs in both
416 | stats0[ sample.int( m0, 5 ) ] <- NA
417 | stats1[ sample.int( m, 5 ) ] <- NA
418 | # compute p-values with naive, brute-force, clear formula
419 | pvals <- .pvals_empir_brute( stats1, stats0 )
420 |
421 | # another random dataset with discrete statistics, to make sure ties are handled correctly (are inequalities strict?)
422 | # replace Normal with Poisson
423 | stats0_discr <- rpois( m0, lambda = 10 )
424 | # data is also mostly null, but a few alternatives N(1, 1)
425 | stats1_discr <- c( rpois( m - m1, lambda = 10 ), rpois( m1, lambda = 30 ) )
426 | # scramble them
427 | stats1_discr <- sample( stats1_discr )
428 | # sprinkle NAs in both
429 | stats0_discr[ sample.int( m0, 5 ) ] <- NA
430 | stats1_discr[ sample.int( m, 5 ) ] <- NA
431 | # compute p-values with naive, brute-force, clear formula
432 | pvals_discr <- .pvals_empir_brute( stats1_discr, stats0_discr )
433 |
434 | # cause errors on purpose
435 | # all have missing arguments
436 | expect_error( .pvals_empir( ) )
437 | expect_error( .pvals_empir( stats1 ) )
438 | expect_error( .pvals_empir( stats0 = stats0 ) )
439 |
440 | # first direct test of Normal data
441 | expect_equal(
442 | pvals,
443 | .pvals_empir( stats1, stats0 )
444 | )
445 | # now discrete data
446 | expect_equal(
447 | pvals_discr,
448 | .pvals_empir( stats1_discr, stats0_discr )
449 | )
450 | })
451 |
452 | test_that( "sHWE works", {
453 | # get LFs from the full data
454 | d <- 3
455 | LFs <- lfa( X = X, d = d )
456 | # just use default suggestion
457 | B <- 1
458 |
459 | # expect errors when key data is missing
460 | expect_error( sHWE( ) )
461 | expect_error( sHWE( X = X ) )
462 | expect_error( sHWE( LF = LFs ) )
463 | expect_error( sHWE( B = B ) )
464 | expect_error( sHWE( LF = LFs, B = B ) )
465 | expect_error( sHWE( X = X, B = B ) )
466 | expect_error( sHWE( X = X, LF = LFs ) )
467 | # expected error if X is not a matrix
468 | expect_error( sHWE( X = as.numeric(X), LF = LFs, B = B ) )
469 | # expect errors for mismatched dimensions
470 | # here number of individuals disagrees
471 | expect_error( sHWE( X = X, LF = LFs[ 2:n_ind, ], B = B ) )
472 |
473 | # now a successful run
474 | expect_silent(
475 | pvals <- sHWE( X = X, LF = LFs, B = B )
476 | )
477 | # test output dimensions, etc
478 | expect_equal( length( pvals ), m_loci )
479 | expect_true( max( pvals, na.rm = TRUE ) <= 1 )
480 | expect_true( min( pvals, na.rm = TRUE ) >= 0 )
481 | })
482 |
483 | ### BEDMatrix tests
484 |
485 | # require external packages for this...
486 |
487 | if (
488 | suppressMessages(suppressWarnings(require(BEDMatrix))) &&
489 | suppressMessages(suppressWarnings(require(genio)))
490 | ) {
491 | context('lfa_BEDMatrix')
492 |
493 | # write the same data we simulated onto a temporary file
494 | file_bed <- tempfile('delete-me-random-test') # output name without extensions!
495 | genio::write_plink( file_bed, X )
496 |
497 | # load as a BEDMatrix object
498 | X_BEDMatrix <- suppressMessages(suppressWarnings( BEDMatrix( file_bed ) ))
499 |
500 | test_that( ".covar_BEDMatrix and .covar_logit_BEDMatrix work", {
501 | # computes not only covariance structure, but also mean vector
502 |
503 | # first compute data from ordinary R matrix, standard methods
504 | covar_direct <- .covar_basic( X )
505 | X_mean <- rowMeans(X, na.rm = TRUE)
506 |
507 | # now compute from BEDMatrix object!
508 | expect_silent(
509 | obj <- .covar_BEDMatrix(X_BEDMatrix)
510 | )
511 | # used "equivalent" because attributes differ, doesn't matter
512 | expect_equivalent( covar_direct, obj$covar )
513 | expect_equal( X_mean, obj$X_mean )
514 |
515 | # get eigendecomposition, make sure it agrees as expected with vanilla SVD
516 | # this is a test for whether the last `obj$covar` is scaled correctly or not
517 | d <- 3
518 | obj2 <- RSpectra::eigs_sym( obj$covar, d )
519 | V <- obj2$vectors
520 | # ultimate test is to compare to R's vanilla SVD (must agree!)
521 | # but have to transform X the same way as is normal
522 | Xc <- X - X_mean
523 | Xc[ is.na(Xc) ] <- 0
524 | obj3 <- svd( Xc, nu = d, nv = d )
525 | # sqrt(eigenvalues) should be singular values
526 | expect_equal( sqrt(obj2$values), obj3$d[ 1:d ] )
527 | # signs differ randomly, just compare absolute values
528 | expect_equal( abs(V), abs(obj3$v) )
529 |
530 | ## # this is a test of recovering U when it's not available
531 | ## expect_equal( abs( Xc %*% V %*% diag( 1/sqrt(obj2$values), d )), abs(obj3$u) )
532 |
533 | ## # construct projected data with proper SVD (truncated)
534 | ## Z <- obj3$u %*% diag( obj3$d[ 1:d ], d ) %*% t( obj3$v )
535 | ## # match it up with my prediction
536 | ## Z2 <- Xc %*% tcrossprod( V )
537 | ## expect_equal( Z, Z2 )
538 |
539 | # now test that subsequent step is also as desired
540 | expect_silent(
541 | covar_Z <- .covar_logit_BEDMatrix( X_BEDMatrix, X_mean, V )
542 | )
543 | expect_silent(
544 | covar_Z_basic <- .covar_logit_basic( X, V )
545 | )
546 | expect_equal( covar_Z, covar_Z_basic )
547 |
548 | # repeat with edge case m_chunk
549 | expect_silent(
550 | obj <- .covar_BEDMatrix(X_BEDMatrix, m_chunk = 1)
551 | )
552 | expect_equivalent( covar_direct, obj$covar )
553 | expect_equal( X_mean, obj$X_mean )
554 | expect_silent(
555 | covar_Z <- .covar_logit_BEDMatrix( X_BEDMatrix, X_mean, V, m_chunk = 1 )
556 | )
557 | expect_equal( covar_Z, covar_Z_basic )
558 | })
559 |
560 | test_that( "lfa works with BEDMatrix", {
561 | # large d doesn't work in toy data (see first `lfa` tests above for notes)
562 | for ( d in 1 : (n_ind/2) ) {
563 | # essentially the previously-tested version, no need to retest
564 | LFs <- lfa( X = X, d = d )
565 | # new version for BEDMatrix
566 | expect_silent(
567 | LFs2 <- lfa( X = X_BEDMatrix, d = d )
568 | )
569 | # signs vary randomly, but otherwise should match!
570 | expect_equal( abs(LFs), abs(LFs2) )
571 | }
572 | })
573 |
574 | test_that( "af works with BEDMatrix", {
575 | for ( d in 1 : (n_ind/2) ) {
576 | # setup data
577 | #d <- 3
578 | LFs <- lfa( X = X, d = d )
579 | # get ordinary `af` output
580 | P_basic <- af( X = X, LF = LFs )
581 | # get BEDMatrix version
582 | expect_silent(
583 | P_BM <- af( X = X_BEDMatrix, LF = LFs )
584 | )
585 | expect_equal( P_basic, P_BM )
586 | }
587 | })
588 |
589 | test_that( "pca_af works with BEDMatrix", {
590 | # in all these cases dimensions are so small only fast.svd version is run, so all d possible values should work
591 | for ( d in 1 : n_ind ) {
592 | # get ordinary `pca_af` output
593 | P_basic <- pca_af( X = X, d = d )
594 | # get BEDMatrix version
595 | expect_silent(
596 | P_BM <- pca_af( X = X_BEDMatrix, d = d )
597 | )
598 | expect_equal( P_basic, P_BM )
599 | }
600 | })
601 |
602 | test_that( "sHWE works with BEDMatrix", {
603 | # get LFs from the full data
604 | d <- 3
605 | LFs <- lfa( X = X, d = d )
606 | # just use default suggestion
607 | B <- 1
608 |
609 | # get ordinary output
610 | set.seed( 1 )
611 | pvals_basic <- sHWE( X = X, LF = LFs, B = B )
612 |
613 | # get BEDMatrix version
614 | set.seed( 1 ) # reset seed first, so random draws are reproduced
615 | expect_silent(
616 | pvals_BM <- sHWE( X = X_BEDMatrix, LF = LFs, B = B )
617 | )
618 | expect_equal( pvals_basic, pvals_BM )
619 |
620 | # let randomness happen again
621 | set.seed( NULL )
622 | })
623 |
624 | # delete temporary data when done
625 | genio::delete_files_plink( file_bed )
626 | }
627 |
--------------------------------------------------------------------------------
/vignettes/lfa.Rnw:
--------------------------------------------------------------------------------
1 | \documentclass[10pt]{article}
2 |
3 | %\VignetteEngine{knitr::knitr}
4 | %\VignetteIndexEntry{lfa Package}
5 |
6 | \usepackage{fullpage}
7 | \usepackage{hyperref}
8 |
9 | \title{Logistic Factor Analysis Vignette}
10 | \author{Wei Hao, Minsun Song, John D. Storey}
11 | \date{\today}
12 |
13 | \begin{document}
14 | \maketitle
15 |
16 | \section{Introduction}
17 |
18 | Logistic Factor Analysis (LFA)~\cite{hao_probabilistic_2016}. Briefly, LFA fits
19 | a latent variable model on categorical (i.e. SNP genotypes coded as 0, 1, and 2)
20 | data by modeling the logit transformed binomial parameters in terms of latent
21 | variables. The resulting ``logistic factors'' are analagous to principal
22 | components, but fit into a convenient likelihood based model. As a result, the
23 | logistic factors can power a number of other analyses.
24 |
25 | \section{Sample usage}
26 |
27 | We include a sample real dataset with the package as the variable
28 | \texttt{hgdp\_subset}---a small subset of the HGDP genotypes. The row
29 | names are the rsids for the SNPs and the column names are coarse
30 | geographical labels for the individuals.
31 |
32 | <>=
33 | library(lfa)
34 | dim(hgdp_subset)
35 | @
36 |
37 | \subsection{\texttt{lfa}}
38 |
39 | The \texttt{lfa} function has two required arguments. The first is the
40 | genotype matrix, and the second is the number of logistic factors
41 | including the intercept.
42 |
43 | <>=
44 | LF <- lfa(hgdp_subset, 4)
45 | dim(LF)
46 | head(LF)
47 | @
48 |
49 | We can plot the first two logistic factors and color by geographical
50 | information:
51 |
52 | <>=
53 | dat <- data.frame(LF[,1], LF[,2], colnames(hgdp_subset))
54 | colnames(dat) = c("LF1", "LF2", "geo")
55 | library(ggplot2)
56 | ggplot(dat, aes(LF1, LF2, color=geo)) + geom_point() + theme_bw() +
57 | coord_fixed(ratio=(max(dat[,1])-min(dat[,1]))/(max(dat[,2])-min(dat[,2])))
58 | @
59 |
60 | One aspect of \texttt{lfa} is that the return value is a matrix of
61 | logistic factors, thus, an important part of subsequent analysis is to
62 | keep your matrix of logistic factors to pass as an argument.
63 |
64 | \subsection{\texttt{af}}
65 |
66 | Given a genotype matrix and logistic factors, the \texttt{af} function
67 | computes the individual-specific allele frequencies
68 |
69 | <>=
70 | allele_freqs <- af(hgdp_subset, LF)
71 | allele_freqs[1:5, 1:5]
72 | @
73 |
74 | Since the calculation is independent at each locus, you can pass a
75 | subset of the genotype matrix as an argument if you aren't interested
76 | in all the SNPs.
77 |
78 | <>=
79 | subset <- af(hgdp_subset[15:25,], LF)
80 | subset[1:5,1:5]
81 | @
82 |
83 | Given the allele frequencies, you can do some other interesting
84 | calculations---for example, compute the log-likelihood for each SNP.
85 |
86 | <>=
87 | ll <- function(snp, af){
88 | -sum(snp*log(af) + (2-snp)*log(1-af))
89 | }
90 | log_lik <- sapply(1:nrow(hgdp_subset), function(i) {ll(hgdp_subset[i,],
91 | allele_freqs[i,])})
92 | which(max(log_lik) == log_lik)
93 | @
94 |
95 | \section{Data Input}
96 |
97 | The best way to load genotypes is by using the function \texttt{read\_plink},
98 | from the \texttt{genio} package, which assumes that you have binary PLINK
99 | formatted genotypes. The binary PLINK format uses files: a \texttt{.bed} for
100 | the genotypes, a \texttt{.bim} for the genotype information, and a
101 | \texttt{.fam} for the individuals information.
102 | \texttt{read\_plink} takes as an argument the prefix for your three files.
103 | A \texttt{BEDMatrix} object (from the eponymous function and package) is also
104 | supported, and can result in reduced memory usage (at a small runtime penalty).
105 |
106 | \bibliographystyle{plain}
107 | \bibliography{lfa}
108 |
109 | \end{document}
110 |
111 |
--------------------------------------------------------------------------------
/vignettes/lfa.bib:
--------------------------------------------------------------------------------
1 |
2 | @article{hao_probabilistic_2016,
3 | title = {Probabilistic models of genetic variation in structured populations applied to global human studies},
4 | volume = {32},
5 | issn = {1367-4811},
6 | doi = {10.1093/bioinformatics/btv641},
7 | language = {eng},
8 | number = {5},
9 | journal = {Bioinformatics},
10 | author = {Hao, Wei and Song, Minsun and Storey, John D.},
11 | month = mar,
12 | year = {2016},
13 | pmid = {26545820},
14 | pmcid = {PMC4795615},
15 | pages = {713--721}
16 | }
17 |
--------------------------------------------------------------------------------