-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathbiblio_20200617.bib
4775 lines (4775 loc) · 451 KB
/
biblio_20200617.bib
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
@article{Tizno2019,
abstract = {Whilst the different forms of conventional (charge-based) memories are well suited to their individual roles in computers and other electronic devices, flaws in their properties mean that intensive research into alternative, or emerging, memories continues. In particular, the goal of simultaneously achieving the contradictory requirements of non-volatility and fast, low-voltage (low-energy) switching has proved challenging. Here, we report an oxide-free, floating-gate memory cell based on III-V semiconductor heterostructures with a junctionless channel and non-destructive read of the stored data. Non-volatile data retention of at least 104 s in combination with switching at ≤2.6 V is achieved by use of the extraordinary 2.1 eV conduction band offsets of InAs/AlSb and a triple-barrier resonant tunnelling structure. The combination of low-voltage operation and small capacitance implies intrinsic switching energy per unit area that is 100 and 1000 times smaller than dynamic random access memory and Flash respectively. The device may thus be considered as a new emerging memory with considerable potential.},
author = {Tizno, Ofogh and Marshall, Andrew R J and Fern{\'{a}}ndez-Delgado, Natalia and Herrera, Miriam and Molina, Sergio I and Hayne, Manus},
doi = {10.1038/s41598-019-45370-1},
issn = {2045-2322},
journal = {Scientific Reports},
keywords = {Electrical and electronic engineering,Electronics,Semiconductors,photonics and device physics},
number = {1},
pages = {8950},
publisher = {Nature Publishing Group},
title = {{Room-temperature Operation of Low-voltage, Non-volatile, Compound-semiconductor Memory Cells}},
url = {http://www.nature.com/articles/s41598-019-45370-1},
volume = {9},
year = {2019}
}
@book{Spiegel2008,
abstract = {This third edition covers elementary concepts in algebra, geometry, etc. and more advanced concepts in differential equations and vector analysis. It also expands its section on Probability and Statistics and includes a new section on Financial Mathematics to keep up with the current developments in finance studies as well as in the studies of math and the sciences.},
author = {Spiegel, Murray and Lipschutz, Seymour and Liu, John},
edition = {3},
isbn = {0071548564},
pages = {312},
publisher = {McGraw Hill Professional},
title = {{Schaum's Mathematical Handbook Of Formulas And Tables}},
year = {2008}
}
@article{Tang2007,
author = {Tang, Jonathan},
keywords = {Haskell,Programming,Scheme interpreter},
pages = {138},
title = {{Write Yourself a Scheme in 48 Hours: An Introduction to Haskell through Example}},
url = {https://upload.wikimedia.org/wikipedia/commons/a/aa/Write{\_}Yourself{\_}a{\_}Scheme{\_}in{\_}48{\_}Hours.pdf{\%}0Ahttps://www.google.com/patents/US6691155{\%}5Cnhttps://upload.wikimedia.org/wikipedia/commons/a/aa/Write{\_}Yourself{\_}a{\_}Scheme{\_}in{\_}48{\_}Hours.pdf},
year = {2007}
}
@article{Eriksson2004,
author = {Eriksson, Kenneth and Johnson, Claes and Estep, Donald and Eriksson, Kenneth and Johnson, Claes and Estep, Donald},
doi = {10.1007/978-3-662-05800-8_11},
journal = {Applied Mathematics: Body and Soul},
pages = {911--928},
title = {{Double Integrals}},
year = {2004}
}
@article{Lacroix2018,
abstract = {The problem of Knowledge Base Completion can be framed as a 3rd-order binary tensor completion problem. In this light, the Canonical Tensor Decomposition (CP) (Hitchcock, 1927) seems like a natural solution; however, current implementations of CP on standard Knowledge Base Completion benchmarks are lagging behind their competitors. In this work, we attempt to understand the limits of CP for knowledge base completion. First, we motivate and test a novel regularizer, based on tensor nuclear {\$}p{\$}-norms. Then, we present a reformulation of the problem that makes it invariant to arbitrary choices in the inclusion of predicates or their reciprocals in the dataset. These two methods combined allow us to beat the current state of the art on several datasets with a CP decomposition, and obtain even better results using the more advanced ComplEx model.},
archivePrefix = {arXiv},
arxivId = {1806.07297},
author = {Lacroix, Timoth{\'{e}}e and Usunier, Nicolas and Obozinski, Guillaume},
eprint = {1806.07297},
month = {jun},
title = {{Canonical Tensor Decomposition for Knowledge Base Completion}},
url = {http://arxiv.org/abs/1806.07297},
year = {2018}
}
@article{Ungar1991,
author = {Ungar, David},
title = {{Organizing Programs Without Classes}},
year = {1991}
}
@article{Aron1997,
abstract = {A practical methodology is presented for creating closeness in an experimental context. Whether or not an individual is in a relationship, particular pairings of individuals in the relationship, an...},
author = {Aron, Arthur and Melinat, Edward and Aron, Elaine N and Vallone, Robert Darrin and Bator, Renee J},
doi = {10.1177/0146167297234003},
issn = {0146-1672},
journal = {Personality and Social Psychology Bulletin},
number = {4},
pages = {363--377},
publisher = {Sage PublicationsSage CA: Thousand Oaks, CA},
title = {{The Experimental Generation of Interpersonal Closeness: A Procedure and Some Preliminary Findings}},
url = {http://journals.sagepub.com/doi/10.1177/0146167297234003},
volume = {23},
year = {1997}
}
@inproceedings{Kodratoff1990,
abstract = {Among the several misunderstandings about Program Synthesis (PS), we particularly examine the one relative to Logic Programming alleged to have solved this problem. We exem- pli€{\~{}} how it is indeed quite possible to write down specifications in PROLOG. Nevertheless, well-known theoretical reasons limit this possibility, and we provide a detailed analysis of the practi- cal reasons why a formal specification may be hard to program in PROLOG. All that contributes to the clarification of the exact role of PS in AI and in Software Engineering, and its possible application to software certification.},
author = {Kodratoff, Yves and Franova, Marta and Partridge, Derek},
booktitle = {Systems Integration '90. Proceedings of the First International Conference on Systems Integration},
doi = {10.1109/ICSI.1990.138700},
isbn = {0-8186-9027-5},
keywords = {certification cycle,in- ductive theorem proving,program synthesis from formal specifications},
pages = {346--355},
publisher = {IEEE Comput. Soc. Press},
title = {{Logic Programming and Program Synthesis}},
url = {http://ieeexplore.ieee.org/document/138700/},
year = {1990}
}
@book{Diehl2020,
author = {Diehl, Stephen},
title = {{What I Wish I Knew When Learning Haskell}},
url = {https://github.com/sdiehl/wiwinwlh},
year = {2020}
}
@article{Segal2019,
abstract = {The prevalence of e-learning systems and on-line courses has made educational material widely accessible to students of varying abilities and backgrounds. There is thus a growing need to accommodate for individual differences in e-learning systems. This paper presents an algorithm called EduRank for personalizing educational content to students that combines a collaborative filtering algorithm with voting methods. EduRank constructs a difficulty ranking for each student by aggregating the rankings of similar students using different aspects of their performance on common questions. These aspects include grades, number of retries, and time spent solving questions. It infers a difficulty ranking directly over the questions for each student, rather than ordering them according to the student's predicted score. The EduRank algorithm was tested on two data sets containing thousands of students and a million records. It was able to outperform the state-of-the-art ranking approaches as well as a domain expert. EduRank was used by students in a classroom activity, where a prior model was incorporated to predict the difficulty rankings of students with no prior history in the system. It was shown to lead students to solve more difficult questions than an ordering by a domain expert, without reducing their performance.},
archivePrefix = {arXiv},
arxivId = {1907.12047},
author = {Segal, Avi and Gal, Kobi and Shani, Guy and Shapira, Bracha},
doi = {10.1016/j.ijhcs.2019.07.002},
eprint = {1907.12047},
month = {jul},
title = {{A difficulty ranking approach to personalization in E-learning}},
url = {http://arxiv.org/abs/1907.12047 http://dx.doi.org/10.1016/j.ijhcs.2019.07.002},
year = {2019}
}
@article{,
abstract = {This cheat sheet is a condensed version of machine learning manual, which contains many classical equations and diagrams on machine learning, and aims to help you quickly recall knowledge and ideas in machine learning. This cheat sheet has two significant advantages: 1. Clearer symbols. Mathematical formulas use quite a lot of confusing symbols. For example, X can be a set, a random variable, or a matrix. This is very confusing and makes it very difficult for readers to understand the meaning of math formulas. This cheat sheet tries to standardize the usage of symbols, and all symbols are clearly pre-defined, see section {\S}. 2. Less thinking jumps. In many machine learning books, authors omit some intermediary steps of a mathematical proof process, which may save some space but causes difficulty for readers to understand this formula and readers get lost in the middle way of the derivation process. This cheat sheet tries to keep important intermediary steps as where as possible},
pages = {135},
title = {{Machine Learning Cheat Sheet: classical equations, diagrams and tricks in machine learning}},
url = {https://github.com/soulmachine/machine-learning-cheat-sheet},
year = {2017}
}
@article{Bose2006,
abstract = {We study the problem of reconstruction of a high-resolution image fromseveral blurred low-resolution image frames. The image frames consist of blurred, decimated, and noisy versions of a high-resolution image. The high-resolution image is modeled as a Markov random field (MRF), and a maximuma posteriori (MAP) estimation technique is used for the restoration.We show that with the periodic boundary condition, a high-resolution image can be restored efficiently by using fast Fourier transforms. We also apply the preconditioned conjugate gradient method to restore high-resolution images in the aperiodic boundary condition. Computer simulations are given to illustrate the effectiveness of the proposed approach.},
author = {Bose, Nirmal K and Ng, Michael K and Yau, Andy C},
doi = {10.1155/ASP/2006/35726},
issn = {11108657},
journal = {Eurasip Journal on Applied Signal Processing},
pages = {1--14},
title = {{A fast algorithm for image super-resolution from blurred observations}},
volume = {2006},
year = {2006}
}
@article{Turek1999,
abstract = {Volumes of data used in science and industry are growing rapidly. When researchers face the challenge of analyzing them, their format is often the first obstacle. Lack of standardized ways of exploring different data layouts requires an effort each time to solve the problem from scratch. Possibility to access data in a rich, uniform manner, e.g. using Structured Query Language (SQL) would offer expressiveness and user-friendliness. Comma-separated values (CSV) are one of the most common data storage formats. Despite its simplicity, with growing file size handling it becomes non-trivial. Importing CSVs into existing databases is time-consuming and troublesome, or even impossible if its horizontal dimension reaches thousands of columns. Most databases are optimized for handling large number of rows rather than columns, therefore, performance for datasets with non-typical layouts is often unacceptable. Other challenges include schema creation, updates and repeated data imports. To address the above-mentioned problems, I present a system for accessing very large CSV-based datasets by means of SQL. It's characterized by: "no copy" approach - data stay mostly in the CSV files; "zero configuration" - no need to specify database schema; written in C++, with boost [1], SQLite [2] and Qt [3], doesn't require installation and has very small size; query rewriting, dynamic creation of indices for appropriate columns and static data retrieval directly from CSV files ensure efficient plan execution; effortless support for millions of columns; due to per-value typing, using mixed text/numbers data is easy; very simple network protocol provides efficient interface for MATLAB and reduces implementation time for other languages. The software is available as freeware along with educational videos on its website [4]. It doesn't need any prerequisites to run, as all of the libraries are included in the distribution package. I test it against existing database solutions using a battery of benchmarks and discuss the results. {\textcopyright}2014 Stanislaw Adaszewski.},
author = {Turek, Stefan},
doi = {10.1007/978-3-642-58393-3_3},
pages = {97--280},
title = {{Other mathematical components}},
year = {1999}
}
@book{Ayres,
author = {Ayres, F and Jaisingh, L R},
edition = {2},
isbn = {0-07-140327-2},
title = {{Schaum's outline of theory and problems of abstract algebra}},
url = {https://books.google.com.uy/books?id=U0r4{\%}5C{\_}S7eeDYC}
}
@article{Ly2018,
abstract = {Atrophy of neurons in the prefrontal cortex (PFC) plays a key role in the pathophysiology of depression and related disorders. The ability to promote both structural and functional plasticity in the PFC has been hypothesized to underlie the fast-acting antidepressant properties of the dissociative anesthetic ketamine. Here, we report that, like ketamine, serotonergic psychedelics are capable of robustly increasing neuritogenesis and/or spinogenesis both in vitro and in vivo. These changes in neuronal structure are accompanied by increased synapse number and function, as measured by fluorescence microscopy and electrophysiology. The structural changes induced by psychedelics appear to result from stimulation of the TrkB, mTOR, and 5-HT2A signaling pathways and could possibly explain the clinical effectiveness of these compounds. Our results underscore the therapeutic potential of psychedelics and, importantly, identify several lead scaffolds for medicinal chemistry efforts focused on developing plasticity-promoting compounds as safe, effective, and fast-acting treatments for depression and related disorders. Ly et al. demonstrate that psychedelic compounds such as LSD, DMT, and DOI increase dendritic arbor complexity, promote dendritic spine growth, and stimulate synapse formation. These cellular effects are similar to those produced by the fast-acting antidepressant ketamine and highlight the potential of psychedelics for treating depression and related disorders.},
author = {Ly, Calvin and Greb, Alexandra C and Cameron, Lindsay P and Wong, Jonathan M and Barragan, Eden V and Wilson, Paige C and Burbach, Kyle F and {Soltanzadeh Zarandi}, Sina and Sood, Alexander and Paddy, Michael R and Duim, Whitney C and Dennis, Megan Y and McAllister, A Kimberley and Ori-McKenney, Kassandra M and Gray, John A and Olson, David E},
doi = {10.1016/j.celrep.2018.05.022},
issn = {22111247},
journal = {Cell Reports},
keywords = {DMT,LSD,MDMA,depression,ketamine,neural plasticity,noribogaine,psychedelic,spinogenesis,synaptogenesis},
number = {11},
pages = {3170--3182},
publisher = {ElsevierCompany.},
title = {{Psychedelics Promote Structural and Functional Neural Plasticity}},
url = {https://doi.org/10.1016/j.celrep.2018.05.022},
volume = {23},
year = {2018}
}
@article{Margalit2019,
author = {Margalit, Dan and Rabinoff, Joseph},
pages = {1--9},
title = {{Interactive Linear Algebra}},
year = {2019}
}
@book{Tanenbaum2008,
abstract = {The widely anticipated revision of this worldwide best-seller incorporates the latest developments in operating systems (OS)technologies. The Third Edition includes up-to-date materials on relevant. OS such as Linux, Windows, and embedded real-time and multimedia systems. Tanenbaum also provides information on current research based on his experience as an operating systems researcher.},
author = {Tanenbaum, Andrew S},
edition = {3},
isbn = {9780135013014},
pages = {1076},
publisher = {Pearson College Division},
title = {{Modern Operating Systems}},
year = {2008}
}
@article{Chlipala,
author = {Chlipala, A},
title = {{An Introduction to Programming and Proving with Dependent Types in Coq}}
}
@book{Siyavulab,
editor = {Siyavula},
title = {{Mathematics Grade 11 Teachers}}
}
@article{Morse2009,
abstract = {Trace diagrams are structured graphs with edges labeled by matrices. Each diagram has an interpretation as a particular multilinear function. We provide a rigorous combinatorial definition of these diagrams using a notion of signed graph coloring, and prove that they may be efficiently represented in terms of matrix minors. Using this viewpoint, we provide new proofs of several standard determinant formulas and a new generalization of the Jacobi determinant theorem.},
archivePrefix = {arXiv},
arxivId = {0903.1373v1},
author = {Morse, Steven and Peterson, Elisha},
doi = {10.2140/involve.2010.3.33},
eprint = {0903.1373v1},
journal = {Arxiv preprint arXiv09031373},
keywords = {combinatorics},
number = {1},
pages = {1--39},
title = {{Trace Diagrams, Matrix Minors, and Determinant Identities}},
url = {http://arxiv.org/abs/0903.1373},
year = {2009}
}
@book{Tanenbaum2012,
abstract = {Structured Computer Organization, specifically written for undergraduate students, is a best-selling guide that provides an accessible introduction to computer hardware and architecture. This text will also serve as a useful resource for all computer professionals and engineers who need an overview or introduction to computer architecture. This book takes a modern structured, layered approach to understanding computer systems. It's highly accessible - and it's been thoroughly updated to reflect today's most critical new technologies and the latest developments in computer organization and architecture. Tanenbaum's renowned writing style and painstaking research make this one of the most accessible and accurate books available, maintaining the author's popular method of presenting a computer as a series of layers, each one built upon the ones below it, and understandable as a separate entity.},
author = {Tanenbaum, Andrew S and Austin, Todd},
edition = {6},
isbn = {9780133061796},
pages = {800},
publisher = {Pearson Education},
title = {{Structured Computer Organization}},
year = {2012}
}
@book{Seitz2014,
abstract = {Python hacker. Those are two words you really could use to describe me. At Immunity, I am lucky enough to work with people who actually, really, know how to code Python. I am not one of those people. I spend a great deal of my time penetration testing, and that requires rapid Python tool development, with a focus on execution and delivering results (not necessarily on prettiness, optimization, or even stability). Throughout this book you will learn that this is how I code, but I also feel as though it is part of what makes me a strong pentester. I hope that this philosophy and style helps you as well. As you progress through the book, you will also realize that I don't take deep dives on any single topic. This is by design. I want to give you the bare minimum, with a little flavor, so that you have some foundational knowledge. With that in mind, I've sprinkled ideas and homework assignments throughout the book to kickstart you in your own direction. I encourage you to explore these ideas, and I would love to hear back any of your own implementations, tooling, or homework assignments that you have done. As with any technical book, readers at different skill levels with Python (or information security in general) will experience this book differently. Some of you may simply grab it and nab chapters that are pertinent to a consulting gig you are on, while others may read it cover to cover. I would recommend that if you are a novice to intermediate Python programmer that you start at the beginning of the book and read it straight through in order. You will pick up some good building blocks along the way.},
author = {Seitz, Justin},
doi = {10.1016/S1353-4858(15)30025-8},
isbn = {1593275900},
pages = {192},
title = {{Black Hat Python: Python Programming for Hackers and Pentesters}},
url = {https://linkinghub.elsevier.com/retrieve/pii/S1353485815300258},
year = {2014}
}
@book{Daschner2017,
abstract = {Find out how to craft effective, business-oriented Java EE 8 applications that target customer's demands in the age of Cloud platforms and container technology. About This Book Understand the principles of modern Java EE and how to realize effective architectures Gain knowledge of how to design enterprise software in the age of automation, Continuous Delivery and Cloud platforms Learn about the reasoning and motivations behind state-of-the-art enterprise Java technology, that focuses on business Who This Book Is For This book is for experienced Java EE developers who are aspiring to become the architects of enterprise-grade applications, or software architects who would like to leverage Java EE to create effective blueprints of applications. What You Will Learn What enterprise software engineers should focus on Implement applications, packages, and components in a modern way Design and structure application architectures Discover how to realize technical and cross-cutting aspects Get to grips with containers and container orchestration technology Realize zero-dependency, 12-factor, and Cloud-native applications Implement automated, fast, reliable, and maintainable software tests Discover distributed system architectures and their requirements In Detail Java EE 8 brings with it a load of features, mainly targeting newer architectures such as microservices, modernized security APIs, and cloud deployments. This book will teach you to design and develop modern, business-oriented applications using Java EE 8. It shows how to structure systems and applications, and how design patterns and Domain Driven Design aspects are realized in the age of Java EE 8. You will learn about the concepts and principles behind Java EE applications, and how to effect communication, persistence, technical and cross-cutting concerns, and asynchronous behavior. This book covers Continuous Delivery, DevOps, infrastructure-as-code, containers, container orchestration technologies, such as Docker and Kubernetes, and why and especially how Java EE fits into this world. It also covers the requirements behind containerized, zero-dependency applications and how modern Java EE application servers support these approaches. You will also learn about automated, fast, and reliable software tests, in different test levels, scopes, and test technologies. This book covers the prerequisites and challenges of distributed systems that lead to microservice, shared-nothing architectures. The challenges and solutions of consistency versus scalability will further lead us to event sourcing, event-driven architectures, and the CQRS principle. This book also includes the nuts and bolts of application performance as well as how to realize resilience, logging, monitoring and tracing in a modern enterprise world. Last but not least the demands of securing enterprise systems are covered. By the end, you will understand the ins and outs of Java EE so that you can make critical design decisions that not only live up to, but also surpass your clients' expectations. Style and approach This book focuses on solving business problems and meeting customer demands in the enterprise world. It covers how to create enterprise applications with reasonable technology choices, free of cargo-cult and over-engineering. The aspects shown in this book not only demonstrate how to realize a certain solution, but also explain its motivations and reasoning.},
author = {Daschner, Sebastian},
isbn = {1788393856},
pages = {442},
publisher = {Packt Publishing},
title = {{Architecting modern Java EE applications}},
year = {2017}
}
@book{Bhargava2016a,
abstract = {Grokking Algorithms is a fully illustrated, friendly guide that teaches you how to apply common algorithms to the practical problems you face every day as a programmer. You'll start with sorting and searching and, as you build up your skills in thinking algorithmically, you'll tackle more complex concerns such as data compression and artificial intelligence. Each carefully presented example includes helpful diagrams and fully annotated code samples in Python.},
author = {Bhargava, Aditya},
isbn = {9781617292231},
keywords = {Breadth-first search,Dijkstra's algorithm,Dynamic programming,Greedy algorithms,Hash tables,Introduction to algorithms,K-nearest neighbors,Quicksort,Recursion,Selection sort},
mendeley-tags = {Breadth-first search,Dijkstra's algorithm,Dynamic programming,Greedy algorithms,Hash tables,Introduction to algorithms,K-nearest neighbors,Quicksort,Recursion,Selection sort},
pages = {256},
publisher = {Manning Publications},
title = {{Grokking Algorithms: An illustrated guide for programmers and other curious people}},
year = {2016}
}
@article{Ashmore2016,
abstract = {Given published success stories from Netflix, Amazon, and many others; many companies are adopting microservices architecture. In fact, the published success at microservices for some major companies has started a fad. For organizations that are heavily invested in Java technologies, writing microservices using Java is a natural progression.},
author = {Ashmore, Derek C},
pages = {105},
publisher = {DVT Press},
title = {{Microservices for Java EE Architects: Addendum for The Java EE Architect's Handbook}},
year = {2016}
}
@article{Herbert1999,
abstract = {Software architectures address the high-level specification, design and analysis of software systems. Formal models can provide the essential underpinning for architectural description languages (ADLs), and formal techniques can play an important role in systems analysis. While formal models and formal analysis may always enhance conventional notations and methods, they are of the greatest benefit when they employ tractable models and efficient, mechanizable techniques. The novelty in our work has been in our effort to find and mechanize a general semantic framework for software architectures that can provide tractable models and support architectural formal analysis. The resultant semantic framework is a layered one: the core is a simple model of elements and topology, which provides the basis for general architectural theorems and proof techniques; the structural core is augmented by semantic layers representing the semantics of relevant properties of the design. The model has been implemented in the higher-order logic proof tool PVS (Prototype Verification System), and has been used in correctness proofs during a case study of a distributed transaction protocol. (17 References).},
author = {Herbert, John and Dutertre, Bruno and Riemenschneider, Robert and Stavridou, Victoria},
doi = {10.1007/3-540-48119-2_9},
isbn = {3540665870},
issn = {16113349},
pages = {116--133},
title = {{A formalization of Software Architecture}},
year = {1999}
}
@techreport{Bottino1994,
abstract = {The interest in the use of Logic Programming in education stems from the great enthusiasm for Prolog that arose in the late seventies. However, Prolog is neither the purest Logic Programming language nor the easiest language for novices to come to terms with-but the opportunity to introduce very powerful ideas by teaching students to program in Prolog has been seized by many teachers at all levels of education. The continued success of Logic Programming depends in part on improved support for learning Prolog. Therefore, we concentrate here on issues connected with the development of a more integrated and complete view of learning and teaching Prolog.},
author = {Bottino, In and Forcheri, R M and Molfino, P},
title = {{Logic Programming in Education: a Perspective on the State of the Art}},
year = {1994}
}
@article{Turner2018,
abstract = {We introduce the Metropolis-Hastings generative adversarial network (MH-GAN), which combines aspects of Markov chain Monte Carlo and GANs. The MH-GAN draws samples from the distribution implicitly defined by a GAN's discriminator-generator pair, as opposed to sampling in a standard GAN which draws samples from the distribution defined by the generator. It uses the discriminator from GAN training to build a wrapper around the generator for improved sampling. With a perfect discriminator, this wrapped generator samples from the true distribution on the data exactly even when the generator is imperfect. We demonstrate the benefits of the improved generator on multiple benchmark datasets, including CIFAR-10 and CelebA, using DCGAN and WGAN.},
archivePrefix = {arXiv},
arxivId = {1811.11357},
author = {Turner, Ryan and Hung, Jane and Saatci, Yunus and Yosinski, Jason},
eprint = {1811.11357},
number = {NeurIPS},
pages = {1--10},
title = {{Metropolis-Hastings Generative Adversarial Networks}},
url = {http://arxiv.org/abs/1811.11357},
year = {2018}
}
@article{Curtis,
author = {Curtis, Sharon and Lowe, Gavin},
title = {{A Graphical Calculus}}
}
@book{Kernighan1988,
abstract = {Introduces the features of the C programming language, discusses data types, variables, operators, control flow, functions, pointers, arrays, and structures, and looks at the UNIX system interface.},
author = {Kernighan, Brian W and Ritchie, Dennis M},
edition = {2},
isbn = {0131103709},
keywords = {alloc argc argument argv arithmetic array assignme},
mendeley-tags = {alloc argc argument argv arithmetic array assignme},
publisher = {Prentice Hall},
title = {{C Programming Language}},
year = {1988}
}
@article{Williams2010,
author = {Williams, Michael Peretzian},
journal = {Johns Hopkins Apl Technical Digest},
number = {4},
pages = {354--363},
title = {{Solving Polynomial Equations Using Linear Algebra}},
url = {https://www.jhuapl.edu/Content/techdigest/pdf/V28-N04/28-04-Williams.pdf},
volume = {28},
year = {2010}
}
@article{Kriesel2007,
abstract = {Originally, this work has been prepared in the framework of a seminar of the University of Bonn in Germany, but it has been and will be extended (after being presented and published online under www.dkriesel.com on 5/27/2005). First and foremost, to provide a comprehensive overview of the subject of neural networks and, second, just to acquire more and more knowledge about LATEX. And who knows – maybe one day this summary will become a real preface!},
archivePrefix = {arXiv},
arxivId = {arXiv:1411.3159v1},
author = {Kriesel, David},
doi = {10.1016/S0140-6736(95)92880-4},
eprint = {arXiv:1411.3159v1},
isbn = {9780849371943},
issn = {01406736},
journal = {Springer-Verlag, Berlin},
pages = {Second edition},
pmid = {7823875},
title = {{A brief Introduction on Neural Networks}},
year = {2007}
}
@book{Shapiro2015,
abstract = {Treatment of hair disorders has progressed considerably over time. More patients are now interested in hair care and some are bombarded by promising advertisements. In reality, hair disorders may be complex and require accurate diagnosis for suitable treatment. Hair Loss and Restoration provides an extensive look at the practical management, both medical and surgical, of all forms of hair loss. Proper examination of the patient with hair loss is discussed in depth as is androgenetic alopecia, the most common cause of hair loss. The autoimmune disease alopecia areata is examined comprehensively, including its pathogenesis, clinical features, differential diagnosis, and treatment. This edition also covers new developments on the diagnosis and treatment of the disease. Hair loss from drugs and radiation is reviewed along with other topics such as telogen effluvium and frontal fibrosing alopecia—an increasing concern in scarring hair loss. Hair restoration surgery is described in detail and an excellent review of what is available from a nonmedical approach to hair loss is provided. Extensively referenced and illustrated with more than 300 clinical color photographs, this compact and easy-to-read book is a valuable resource for both doctors and patients.},
author = {Shapiro, Jerry and Otberg, Nina},
booktitle = {Academic Medicine},
doi = {10.1201/b18330},
edition = {2},
isbn = {9780429160592},
pages = {232},
publisher = {CRC Press},
title = {{Hair Loss and Restoration}},
url = {https://www.taylorfrancis.com/books/9781482231991},
year = {2015}
}
@techreport{Goss,
author = {Goss, Nuzzo-Jones and Walonoski, Jason A and Heffernan, Neil T and Livak, Tom and Luengo, Vanda and Vadcard, Lucile and Goldrei, Simon and Kay, Judy and Kummerfeld, Bob and Libbrecht, Paul and Machuca, Enrique and Spanbroek, Mark and Blank, Glenn and Parvez, Shahida and Moritz, Sally and Turner, Terence E and Macasek, Michael A},
title = {{Table of contents Preface The eXtensible Tutor Architecture: A New Foundation for ITS Design of Adaptive Feedback in a Web Educational System Exploiting User Models to Automate the Harvesting of Metadata for Learning Objects MEDEA: an Open Service-Based L}}
}
@article{Ousterhout2010,
abstract = {Disk-oriented approaches to online storage are becoming increasingly problematic: they do not scale gracefully to meet the needs of large-scale Web applications, and improvements in disk capacity have far out-stripped improvements in access latency and bandwidth. This paper argues for a new approach to datacenter storage called RAMCloud, where information is kept entirely in DRAM and large-scale systems are created by aggregating the main memories of thousands of commodity servers. We believe that RAMClouds can provide durable and available storage with 100-1000x the throughput of disk-based systems and 100-1000x lower access latency. The combination of low latency and large scale will enable a new breed of data-intensive applications.},
author = {Ousterhout, John and Rosenblum, Mendel and Rumble, Stephen M and Stratmann, Eric and Stutsman, Ryan and Agrawal, Parag and Erickson, David and Kozyrakis, Christos and Leverich, Jacob and Mazi{\`{e}}res, David and Mitra, Subhasish and Narayanan, Aravind and Parulkar, Guru},
doi = {10.1145/1713254.1713276},
issn = {01635980},
journal = {ACM SIGOPS Operating Systems Review},
number = {4},
pages = {92},
title = {{The case for RAMClouds}},
url = {http://portal.acm.org/citation.cfm?doid=1713254.1713276},
volume = {43},
year = {2010}
}
@techreport{Eisenstein,
abstract = {Design problems involve issues of stylistic preference and flexible standards of success; human designers often proceed by intuition and are unaware of following any strict rule-based procedures. These features make design tasks especially difficult to automate. Adaptation is proposed as a means to overcome these challenges. We describe a system that applies an adaptive algorithm to automated user interface design within the framework of the MOBI-D (Model-Based Interface Designer) interface development environment. Preliminary experiments indicate that adaptation improves the performance of the automated user interface design system. Keywords Model-based interface development, machine learning, decision trees, theory refinement, user interface development tools, interface models, theory refinement INTRODUCTION},
author = {Eisenstein, Jacob and Puerta, Angel},
title = {{Adaptation in Automated User-Interface Design}}
}
@book{Cometti2000a,
abstract = {Partiendo de una afirmaci{\'{o}}n com{\'{u}}n es las ciencias (humanas y biol{\'{o}}gicas), el individuo tiene una estructura que pone en juego y moviliza la energ{\'{i}}a. Esta estructura est{\'{a}} constituida por las palancas, las articulaciones y los m{\'{u}}sculos; pero s{\'{o}}lo los m{\'{u}}sculos son los elementos sobre los cuales puede actuar directamente el entrenamiento. Cuando el m{\'{u}}sculo funciona produce la fuerza, que depende de su estiramiento. El libro, Los m{\'{e}}todos modernos de musculaci{\'{o}}n, se presenta divido en dos partes. La primera, que incluye las bases te{\'{o}}ricas, trata: Los mecanismos de la fuerza -factores estructurales y nerviosos-; Los m{\'{e}}todos de desarrollo de la fuerza -m{\'{e}}todos de fuerza m{\'{a}}xima, m{\'{e}}todo por repeticiones, m{\'{e}}todo din{\'{a}}mico, m{\'{e}}todo de la pir{\'{a}}mide-; Los reg{\'{i}}menes de contracci{\'{o}}n -reg{\'{i}}menes isom{\'{e}}tricos, anisom{\'{e}}tricos, exc{\'{e}}ntrico y pliom{\'{e}}trico-. La segunda parte, que incluye los datos pr{\'{a}}cticos, trata: Los m{\'{e}}todos de desarrollo de la masa muscular -los m{\'{e}}todos post-fatiga, la planificaci{\'{o}}n de los m{\'{e}}todos-; Los m{\'{e}}todos conc{\'{e}}ntricos -la l{\'{o}}gica de Zatsiorski, el m{\'{e}}todo B{\'{u}}lgaro, el principio de carga ascendente y descendente-; Los m{\'{e}}todos isom{\'{e}}tricos -las particularidades de m{\'{e}}todo isom{\'{e}}trico, planificaci{\'{o}}n del m{\'{e}}todo isom{\'{e}}trico-; Los m{\'{e}}todos exc{\'{e}}ntricos -principio de contraste, la prefatiga-; Los m{\'{e}}todos pliom{\'{e}}tricos -particularidades y planificaci{\'{o}}n-; La electroestimulaci{\'{o}}n -el trabajo por electroestimulaci{\'{o}}n, efecto inmediato, efecto retardado-; Los m{\'{e}}todos combinados; Ejemplos concretos aplicados a diferentes deportes. Todo el libro est{\'{a}} claramente ilustrado por medio de numerosas tablas, diagramas y gr{\'{a}}ficos.},
author = {Cometti, Gilles},
isbn = {8499108563},
pages = {294},
publisher = {Paidotribo},
title = {{Los M{\'{e}}todos Modernos de Musculaci{\'{o}}n}},
url = {http://ir.obihiro.ac.jp/dspace/handle/10322/3933},
year = {2000}
}
@article{Kohler2018,
abstract = {Normalization helps us find a database schema at design time that can process the most frequent updates efficiently at run time. Unfortunately, relational normalization only works for idealized database instances in which duplicates and null markers are not present. On one hand, these features occur frequently in real-world data compliant with the industry standard SQL, and especially in modern application domains. On the other hand, the features impose challenges that make it difficult to extend the existing forty year old normalization framework to SQL, and any current extensions are fairly limited. We introduce a new class of functional dependencies and show that they provide the right notion for SQL schema design. Axiomatic and linear-time algorithmic characterizations of the associated implication problem are established. These foundations enable us to propose a Boyce–Codd normal form for SQL. We justify the normal form by showing that it permits precisely those SQL instances which are free from data redundancy. Unlike the relational case, there are SQL schemata that cannot be converted into Boyce–Codd normal form. Nevertheless, for an expressive sub-class of our functional dependencies we establish a normalization algorithm that always produces a schema in Value-Redundancy free normal form. This normal form permits precisely those instances which are free from any redundant data value occurrences other than the null marker. Experiments show that our functional dependencies occur frequently in real-world data and that they are effective in eliminating redundant values from these data sets without loss of information.},
author = {K{\"{o}}hler, Henning and Link, Sebastian},
doi = {10.1016/j.is.2018.04.001},
isbn = {9781450335317},
issn = {03064379},
journal = {Information Systems},
keywords = {Armstrong database,Axioms,Boyce–Codd normal form,Data redundancy,Database schema design,Functional dependency,Normalization,Reasoning,Update anomaly},
pages = {88--113},
title = {{SQL Schema Design: Foundations, Normal Forms, and Normalization}},
volume = {76},
year = {2018}
}
@book{Milewski2019,
author = {Milewski, Bartosz},
title = {{Category Theory for Programmers}},
url = {https://github.com/hmemcpy/milewski-ctfp-pdf},
year = {2019}
}
@book{Martin2011,
author = {Martin, John C},
isbn = {9780073191461},
title = {{Introduction to Languages and the Theory of Computation}},
year = {2011}
}
@book{Alcock2013,
author = {Alcock, Lara},
isbn = {0199661316},
publisher = {Oxford University Press},
title = {{How To Study As A Mathematics Major}},
year = {2013}
}
@book{MacKay2003,
abstract = {Information theory and inference, often taught separately, are here united in one entertaining textbook. These topics lie at the heart of many exciting areas of contemporary science and engineering - communication, signal processing, data mining, machine learning, pattern recognition, computational neuroscience, bioinformatics, and cryptography. This textbook introduces theory in tandem with applications. Information theory is taught alongside practical communication systems, such as arithmetic coding for data compression and sparse-graph codes for error-correction. A toolbox of inference techniques, including message-passing algorithms, Monte Carlo methods, and variational approximations, are developed alongside applications of these tools to clustering, convolutional codes, independent component analysis, and neural networks. The final part of the book describes the state of the art in error-correcting codes, including low-density parity-check codes, turbo codes, and digital fountain codes -- the twenty-first century standards for satellite communications, disk drives, and data broadcast. Richly illustrated, filled with worked examples and over 400 exercises, some with detailed solutions, David MacKay's groundbreaking book is ideal for self-learning and for undergraduate or graduate courses. Interludes on crosswords, evolution, and sex provide entertainment along the way. In sum, this is a textbook on information, communication, and coding for a new generation of students, and an unparalleled entry point into these subjects for professionals in areas as diverse as computational biology, financial engineering, and machine learning.},
author = {MacKay, David J C},
booktitle = {Advanced Science Letters},
isbn = {0521642981},
pages = {628},
publisher = {Cambridge University Press},
title = {{Information Theory, Inference and Learning Algorithms}},
year = {2003}
}
@article{Kaliszyk2018,
abstract = {We introduce a theorem proving algorithm that uses practically no domain heuristics for guiding its connection-style proof search. Instead, it runs many Monte-Carlo simulations guided by reinforcement learning from previous proof attempts. We produce several versions of the prover, parameterized by different learning and guiding algorithms. The strongest version of the system is trained on a large corpus of mathematical problems and evaluated on previously unseen problems. The trained system solves within the same number of inferences over 40{\%} more problems than a baseline prover, which is an unusually high improvement in this hard AI domain. To our knowledge this is the first time reinforcement learning has been convincingly applied to solving general mathematical problems on a large scale.},
archivePrefix = {arXiv},
arxivId = {1805.07563},
author = {Kaliszyk, Cezary and Michalewski, Henryk and Urban, Josef and Ol{\v{s}}{\'{a}}k, Mirek},
eprint = {1805.07563},
issn = {10495258},
journal = {Advances in Neural Information Processing Systems},
title = {{Reinforcement learning of theorem proving}},
year = {2018}
}
@book{Bona2017,
author = {B{\'{o}}na, Mikl{\'{o}}s},
isbn = {9789813148840},
title = {{A Walk Through Combinatorics}},
year = {2017}
}
@book{Ashmore,
author = {Ashmore, Derek C},
title = {{The JAVA EE Architect ' s Handbook}}
}
@article{Lample2019,
abstract = {Neural networks have a reputation for being better at solving statistical or approximate problems than at performing calculations or working with symbolic data. In this paper, we show that they can be surprisingly good at more elaborated tasks in mathematics, such as symbolic integration and solving differential equations. We propose a syntax for representing mathematical problems, and methods for generating large datasets that can be used to train sequence-to-sequence models. We achieve results that outperform commercial Computer Algebra Systems such as Matlab or Mathematica.},
archivePrefix = {arXiv},
arxivId = {1912.01412},
author = {Lample, Guillaume and Charton, Fran{\c{c}}ois},
eprint = {1912.01412},
title = {{Deep Learning for Symbolic Mathematics}},
url = {http://arxiv.org/abs/1912.01412},
year = {2019}
}
@techreport{Adiwardana,
abstract = {We present Meena, a multi-turn open-domain chatbot trained end-to-end on data mined and filtered from public domain social media conversations. This 2.6B parameter neural network is trained to minimize perplexity, an automatic metric that we compare against human judgement of multi-turn conversation quality. To capture this judgement, we propose a human evaluation metric called Sensibleness and Specificity Average (SSA), which captures key elements of good conversation. Interestingly , our experiments show strong correlation between perplexity and SSA. The fact that the best perplexity end-to-end trained Meena scores high on SSA (72{\%} on multi-turn evaluation) suggests that a human-level SSA of 86{\%} is potentially within reach if we can better optimize perplexity. Additionally, the full version of Meena (with a filtering mechanism and tuned decoding) scores 79{\%} SSA, 23{\%} higher in absolute SSA than existing chatbots that we evaluated.},
archivePrefix = {arXiv},
arxivId = {2001.09977v1},
author = {Adiwardana, Daniel and Luong, Minh-Thang and So, David R and Hall, Jamie and Fiedel, Noah and Thoppilan, Romal and Yang, Zi and Kulshreshtha, Apoorv and Nemade, Gaurav and {Lu Quoc}, Yifeng and Le, V},
eprint = {2001.09977v1},
title = {{Towards a Human-like Open-Domain Chatbot}}
}
@article{Schmidt2018,
author = {Schmidt, Andreas},
number = {1},
pages = {1--66},
title = {{How to build a Search-Engine with Common Unix-Tools}},
url = {http://www.smiffy.de/dbkda-2018/},
year = {2018}
}
@phdthesis{Rosario2017,
abstract = {Our focus is to test a new preprocessing approach that uses resampling, inspired by the bootstrap, combined with data augmentation, by treating each short text as a population and sampling similar words from a semantic space to create a longer text. We use blog post titles collected from the Technorati blog aggregator as experimental data with each title appearing in one of ten categories. We first test how well the raw short texts are classified using a variant of SVM designed specifically for short texts as well as a supervised topic model and an SVM model that uses semantic vectors as features. We then build a semantic space and augment each short text with related terms under a variety of experimental conditions. We test the classifiers on the augmented data and compare performance to the aforementioned baselines. The classifier performance on augmented test sets outperformed the baseline classifiers in most cases.},
author = {Rosario, Ryan Robert},
school = {UNIVERSITY OF CALIFORNIA},
title = {{A Data Augmentation Approach to Short Text Classification}},
year = {2017}
}
@book{Smola,
author = {Smola, A and Vishwanathan, S V N},
isbn = {0 521 82583 0},
title = {{Introduction to Machine Learning}}
}
@article{Schilling2019,
abstract = {{\textless}p{\textgreater}The cooling of boiling water all the way down to freezing, by thermally connecting it to a thermal bath held at ambient temperature without external intervention, would be quite unexpected. We describe the equivalent of a “thermal inductor,” composed of a Peltier element and an electric inductance, which can drive the temperature difference between two bodies to change sign by imposing inertia on the heat flowing between them, and enable continuing heat transfer from the chilling body to its warmer counterpart without the need of an external driving force. We demonstrate its operation in an experiment and show that the process can pass through a series of quasi-equilibrium states while fully complying with the second law of thermodynamics. This thermal inductor extends the analogy between electrical and thermal circuits and could serve, with further progress in thermoelectric materials, to cool hot materials well below ambient temperature without external energy supplies or moving parts.{\textless}/p{\textgreater}},
author = {Schilling, A and Zhang, X and Bossen, O},
doi = {10.1126/sciadv.aat9953},
issn = {2375-2548},
journal = {Science Advances},
number = {4},
pages = {eaat9953},
publisher = {American Association for the Advancement of Science},
title = {{Heat flowing from cold to hot without external intervention by using a “thermal inductor”}},
url = {http://advances.sciencemag.org/lookup/doi/10.1126/sciadv.aat9953},
volume = {5},
year = {2019}
}
@article{Buot2005,
author = {Buot, Max and Richards, Donald},
number = {June},
title = {{Homotopy Continuation Computational Algorithms}},
year = {2005}
}
@article{Meijer1991,
abstract = {We develop a calculus for lazy functional programming based on recursion operators associated with data type definitions. For these operators we derive various algebraic laws that are useful in deriving and manipulating programs. We shall show that all example functions in Bird and Wadler's “Introduction to Functional Programming” can be expressed using these operators.},
author = {Meijer, Erik and Fokkinga, Maarten and Paterson, Ross},
doi = {10.1007/3540543961_7},
isbn = {9783540475996},
issn = {16113349},
journal = {Lecture Notes in Computer Science (including subseries Lecture Notes in Artificial Intelligence and Lecture Notes in Bioinformatics)},
pages = {124--144},
title = {{Functional programming with bananas, lenses, envelopes and barbed wire}},
volume = {523 LNCS},
year = {1991}
}
@article{Lamport2009,
abstract = {Summary form only given. Algorithms are different from programs and should not be described with programming languages. For example, algorithms are usually best described in terms of mathematical objects like sets and graphs instead of the primitive objects like bytes and integers provided by programming languages. +CAL is an algorithm language based on TLA+. A +CAL algorithm is translated to a TLA+ specification that can then be checked with the TLC model checker},
author = {Lamport, Leslie},
doi = {10.1007/978-3-642-03466-4_2},
isbn = {3642034659},
issn = {03029743},
journal = {Lecture Notes in Computer Science (including subseries Lecture Notes in Artificial Intelligence and Lecture Notes in Bioinformatics)},
number = {October},
pages = {36--60},
title = {{The PlusCal algorithm language}},
volume = {5684 LNCS},
year = {2009}
}
@inproceedings{Gray1996,
author = {Gray, Jim and Bosworth, A and Lyaman, A and Pirahesh, H},
booktitle = {Proceedings of the Twelfth International Conference on Data Engineering},
doi = {10.1109/ICDE.1996.492099},
isbn = {0-8186-7240-4},
pages = {152--159},
publisher = {IEEE Comput. Soc. Press},
title = {{Data cube: a relational aggregation operator generalizing GROUP-BY, CROSS-TAB, and SUB-TOTALS}},
url = {http://ieeexplore.ieee.org/document/492099/},
year = {1996}
}
@incollection{Devai2010,
author = {D{\'{e}}vai, Gergely},
doi = {10.1007/978-3-642-17685-2_10},
pages = {354--371},
title = {{Embedding a Proof System in Haskell}},
url = {http://link.springer.com/10.1007/978-3-642-17685-2{\_}10},
year = {2010}
}
@article{Gray,
abstract = {Data analysis applications typically aggregate data across many dimensions looking for anomalies or unusual patterns. The SQL aggregate functions and the GROUP BY operator produce zero-dimensional or one-dimensional aggregates. Applications need the N -dimensional generalization of these operators. This paper defines that operator, called the data cube or simply cube. The cube operator generalizes the histogram, cross-tabulation, roll-up, drill-down, and sub-total constructs found in most report writers. The novelty is that cubes are relations. Consequently, the cube operator can be imbedded in more complex non-procedural data analysis programs. The cube operator treats each of the N aggregation attributes as a dimension of N -space. The aggregate of a particular set of attribute values is a point in this space. The set of points forms an N -dimensional cube. Super-aggregates are computed by aggregating the N -cube to lower dimensional spaces. This paper (1) explains the cube and roll-up operators, (2) shows how they fit in SQL, (3) explains how users can define new aggregate functions for cubes, and (4) discusses efficient techniques to compute the cube. Many of these features are being added to the SQL Standard.},
author = {Gray, Jim},
keywords = {aggregation,analysis,data cube,data mining,database,query,summarization},
title = {{Data Cube : A Relational Aggregation Operator}}
}
@misc{Indyk,
author = {Indyk, Piotr},
title = {{Tutorial on Compressed Sensing (or Compressive Sampling, or Linear Sketching)}}
}
@book{Aho1988,
abstract = {Originally developed by Alfred Aho, Brian Kernighan, and Peter Weinberger in 1977, AWK is a pattern-matching language for writing short programs to perform common data-manipulation tasks. In 1985, a new version of the language was developed, incorporating additional features such as multiple input files, dynamic regular expressions, and user-defined functions. This new version is available for both Unix and MS-DOS. This is the first book on AWK. It begins with a tutorial that shows how easy AWK is to use. The tutorial is followed by a comprehensive manual for the new version of AWK. Subsequent chapters illustrate the language by a range of useful applications, such as: *Retrieving, transforming, reducing, and validating data *Managing small, personal databases *Text processing *Little languages *Experimenting with algorithms The examples illustrates the book's three themes: showing how to use AWK well, demonstrating AWK's versatility, and explaining how common computing operations are done. In addition, the book contains two appendixes: summary of the language, and answers to selected exercises. 020107981XB04062001},
author = {Aho, Alfred V and Kernighan, Brian W and Weinberger, Peter J},
booktitle = {Addison-Wesley series in computer science and information processing},
isbn = {020107981X},
pages = {225},
publisher = {Addison-Wesley Publishing Company},
title = {{The AWK Programming Language}},
url = {https://books.google.com.uy/books?id=53ueQgAACAAJ},
year = {1988}
}
@misc{,
pages = {1--12},
title = {{Inkscape Interface Tutorial}},
year = {2019}
}
@article{Fong2019,
abstract = {A supervised learning algorithm searches over a set of functions A → B parametrised by a space P to find the best approximation to some ideal function f:A → B. It does this by taking examples (a, f(a)) ϵ A × B, and updating the parameter according to some rule. We define a category where these update rules may be composed, and show that gradient descent-with respect to a fixed step size and an error function satisfying a certain property-defines a monoidal functor from a category of parametrised functions to this category of update rules. A key contribution is the notion of request function. This provides a structural perspective on backpropagation, giving a broad generalisation of neural networks and linking it with structures from bidirectional programming and open games.},
archivePrefix = {arXiv},
arxivId = {1711.10455},
author = {Fong, Brendan and Spivak, David and Tuyeras, Remy},
doi = {10.1109/LICS.2019.8785665},
eprint = {1711.10455},
isbn = {9781728136080},
issn = {10436871},
journal = {Proceedings - Symposium on Logic in Computer Science},
pages = {1--17},
title = {{Backprop as Functor: A compositional perspective on supervised learning}},
volume = {2019-June},
year = {2019}
}
@misc{Wikipedia,
author = {Wikipedia},
booktitle = {Wikipedia},
title = {{Programming paradigms}}
}
@book{Beaver2013a,
abstract = {The best way to stay safe online is to stop hackers before they attack - first, by understanding their thinking and second, by ethically hacking your own site to measure the effectiveness of your security. This practical, top-selling guide will help you do both. Fully updated for Windows 8 and the latest version of Linux, Hacking For Dummies, 4th Edition explores the malicious hacker's mindset and helps you develop an ethical hacking plan (also known as penetration testing) using the newest tools and techniques. More timely than ever, this must-have book covers the very latest threats, including web app hacks, database hacks, VoIP hacks, and hacking of mobile devices. Guides you through the techniques and tools you need to stop hackers before they hack you Completely updated to examine the latest hacks to Windows 8 and the newest version of Linux Explores the malicious hackers's mindset so that you can counteract or avoid attacks completely Suggests ways to report vulnerabilities to upper management, manage security changes, and put anti-hacking policies and procedures in place If you're responsible for security or penetration testing in your organization, or want to beef up your current system through ethical hacking, make sure you get Hacking For Dummies, 4th Edition.},
author = {Beaver, Kevin},
isbn = {978-1-118-38093-2},
keywords = {1118380932,Computer Books: General,Computer Data Security,Computer fraud {\&} hacking,Computer networks,Computer security,Computers,Computers - Computer Security,Computers / Security / General,Computing: Professional {\&} Programming,Hackers,Hacking For Dummies,Kevin Beaver,Security - General,Security measures,Wiley John + Sons},
pages = {411},
publisher = {John Wiley {\&} Sons},
title = {{Hacking For Dummies}},
url = {https://www.wiley.com/en-us/Hacking+For+Dummies{\%}2C+4th+Edition-p-9781118380932},
year = {2013}
}
@article{KUHLMAN2013,
abstract = {This document is a selflearning document for a course in Python programming. This course contains (1) a part for beginners, (2) a discussion of several advanced topics that are of interest to Python programmers, and (3) a Python workbook with lots of exercises.},
author = {KUHLMAN, Dave},
journal = {A Python Book},
keywords = {advanced python,and,beginning python,ython book},
pages = {1--227},
title = {{A Python Book}},
year = {2013}
}
@article{Capel2000,
abstract = {The objective of this work is the super-resolution enhancement of image sequences. We consider in particular images of scenes for which the point-to-point image transformation is a plane projective transformation. We first describe the imaging model, and a maximum likelihood (ML) estimator of the super-resolution image. We demonstrate the extreme noise sensitivity of the unconstrained ML estimator. We show that the Irani and Peleg [9, 10] super-resolution algorithm does not suffer from this sensitivity, and explain that this stability is due to the error back-projection method which effectively constrains the solution. We then propose two estimators suitable for the enhancement of text images: a maximum a posterior (MAP) estimator based on a Huber prior, and an estimator regularized using the Total Variation norm. We demonstrate the improved noise robustness of these approaches over the Irani and Peleg estimator. We also show the effects of a poorly estimated point spread function (PSF) on the super-resolution result and explain conditions necessary for this parameter to be included in the optimization. Results are evaluated on both real and synthetic sequences of text images. In the case of the real images, the projective transformations relating the images are estimated automatically from the image data, so that the entire algorithm is automatic. {\textcopyright}2000 IEEE.},
author = {Capel, David and Zisserman, Andrew},
doi = {10.1109/icpr.2000.905409},
issn = {10514651},
journal = {Proceedings - International Conference on Pattern Recognition},
number = {1},
pages = {600--605},
title = {{Super-resolution enhancement of text image sequences}},
volume = {15},
year = {2000}
}
@book{Eberly2005,
author = {Eberly, D E},
isbn = {1558607323},
title = {{Real-Time Collision Detection}},
year = {2005}
}
@book{Hertzog2017a,
abstract = {Kali Linux has not only become the information security professional's platform of choice, but evolved into an industrial-grade, and world-class operating system distribution--mature, secure, and enterprise-ready. Through the decade-long development process, Muts and his team, along with countless volunteers from the hacker community, have taken on the burden of streamlining and organizing our work environment, freeing us from much of the drudgery. They provided a secure and reliable foundation, allowing us to concentrate on securing our digital world. An amazing community has built up around Kali Linux. Every month, more than 300,000 of us download a version of Kali. We come together in online and real-world training rooms and grind through the sprawling Offensive Security Penetration Testing Labs, pursuing the near-legendary Offensive Security certifications. We come together on the Kali forums, some 40,000 strong, and hundreds of us at a time can be found on the Kali IRC channel. We gather at conferences and attend Kali Dojos to learn from the developers themselves how to best leverage Kali. However, the Kali team has never released an official Kali Linux manual, until now. In this book, we'll focus on the Kali Linux platform itself, and help you understand and maximize Kali from the ground up. The developers will walk you through Kali Linux features and fundamentals, provide a crash course in basic Linux commands and concepts, and then walk you through the most common Kali Linux installation scenarios. You'll learn how to configure, troubleshoot and secure Kali Linux and then dive into the powerful Debian package manager. Throughout this expansive section, you'll learn how to install and configure packages, how to update and upgrade your Kali installation, and how to create your own custom packages. Then you'll learn how to deploy your custom installation across massive enterprise networks. Finally, you'll be guided through advanced topics such as kernel compilation, custom ISO creation, industrial-strength encryption, and even how to install crypto kill switches to safeguard your sensitive information. Whether you're a veteran or an absolute n00b, this is the best place to start with Kali Linux, the security professional's platform of choice.},
author = {Hertzog, Raphael and O'Gorman, Jim and Aharoni, Mati},
isbn = {9780997615609},
keywords = {DIVA model,speech acquisition and production,speech recognition system,speech target},
pages = {314},
title = {{Kali Linux Revealed: Mastering the Penetration Testing Distribution}},
year = {2017}
}
@article{Jiji2006,
abstract = {We propose a learning-based, single-image super-resolution reconstruction technique using the contourlet transform, which is capable of capturing the smoothness along contoursmaking use of directional decompositions. The contourlet coefficients at finer ...},
author = {Jiji, C V and Chaudhuri, Subhasis},
doi = {10.1155/ASP/2006/73767},
issn = {1687-6180},
journal = {EURASIP Journal on Advances in Signal Processing},
number = {1},
pages = {73767},
title = {{Single-Frame Image Super-resolution through Contourlet Learning}},
url = {https://asp-eurasipjournals.springeropen.com/articles/10.1155/ASP/2006/73767},
volume = {2006},
year = {2006}
}
@article{TranconyWidemann2014,
abstract = {The field of declarative stream programming (discrete time, clocked synchronous, modular, data-centric) is divided between the data-flow graph paradigm favored by domain experts, and the functional reactive paradigm favored by academics. In this paper, we describe the foundations of a framework for unifying functional and data-flow styles that differs from FRP proper in significant ways: It is based on set theory to match the expectations of domain experts, and the two paradigms are reduced symmetrically to a low-level middle ground, with strongly compositional semantics. The design of the framework is derived from mathematical first principles, in particular coalgebraic coinduction and a standard relational model of stateful computation. The abstract syntax and semantics introduced here constitute the full core of a novel stream programming language.},
author = {{Tranc{\'{o}}n y Widemann}, Baltasar and Lepper, Markus},
doi = {10.4204/eptcs.153.10},
journal = {Electronic Proceedings in Theoretical Computer Science},
keywords = {coinduction,data flow,stream programming,total functions},
number = {Msfp},
pages = {143--167},
title = {{Foundations of Total Functional Data-Flow Programming}},
volume = {153},
year = {2014}
}
@article{Moseley2006a,
abstract = {Complexity is the single major difficulty in the successful development of large-scale software systems. Following Brooks we distinguish accidental from essential difficulty, but disagree with his premise that most complexity remaining in contemporary systems is essential. We identify common causes of complexity and discuss general approaches which can be taken to eliminate them where they are accidental in nature. To make things more concrete we then give an outline for a potential complexity-minimizing approach based on functional programming and Codd's relational model of data.},
author = {Moseley, Ben and Marks, Peter},
doi = {10.1.1.93.8928},
journal = {Complexity},
pages = {1--66},
title = {{Out of the tar pit}},
url = {http://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.93.8928{\&}rep=rep1{\&}type=pdf},
year = {2006}
}
@incollection{Hayashi1994,
author = {Hayashi, Susumu},
doi = {10.1007/3-540-58085-9_74},
pages = {108--126},
publisher = {Springer, Berlin, Heidelberg},
title = {{Logic of refinement types}},
url = {http://link.springer.com/10.1007/3-540-58085-9{\_}74},
year = {1994}
}
@article{OpenSourceModelicaConsortium2018a,
author = {{Open Source Modelica Consortium}},
title = {{OpenModelica User ' s Guide}},
year = {2018}
}
@article{McDermott2004,
abstract = {MRI scanners enable fast, noninvasive, and high-resolution imaging of organs and soft tissue. The images are reconstructed from NMR signals generated by nuclear spins that precess in a static magnetic field B0 in the presence of magnetic field gradients. Most clinical MRI scanners operate at a magnetic field B0 = 1.5 T, corresponding to a proton resonance frequency of 64 MHz. Because these systems rely on large superconducting magnets, they are costly and demanding of infrastructure. On the other hand, low-field imagers have the potential to be less expensive, less confining, and more mobile. The major obstacle is the intrinsically low sensitivity of the low-field NMR experiment. Here, we show that prepolarization of the nuclear spins and detection with a superconducting quantum interference device (SQUID) yield a signal that is independent of B0, allowing acquisition of high-resolution MRIs in microtesla fields. Reduction of the strength of the measurement field eliminates inhomogeneous broadening of the NMR lines, resulting in enhanced signal-to-noise ratio and spatial resolution for a fixed strength of the magnetic field gradients used to encode the image. We present high-resolution images of phantoms and other samples and T1-weighted contrast images acquired in highly inhomogeneous magnetic fields of 132 microT; here, T1 is the spin-lattice relaxation time. These techniques could readily be adapted to existing multichannel SQUID systems used for magnetic source imaging of brain signals. Further potential applications include low-cost systems for tumor screening and imaging peripheral regions of the body.},
author = {McDermott, Robert and Lee, SeungKyun and ten Haken, Bennie and Trabesinger, Andreas H and Pines, Alexander and Clarke, John},
doi = {10.1073/pnas.0402382101},
issn = {0027-8424},
journal = {Proceedings of the National Academy of Sciences of the United States of America},
month = {may},
number = {21},
pages = {7857--7861},
pmid = {15141077},
publisher = {National Academy of Sciences},
title = {{Microtesla MRI with a superconducting quantum interference device.}},
url = {http://www.ncbi.nlm.nih.gov/pubmed/15141077 http://www.pubmedcentral.nih.gov/articlerender.fcgi?artid=PMC419521},
volume = {101},
year = {2004}
}
@article{Bosnjak2016,
abstract = {Given that in practice training data is scarce for all but a small set of problems, a core question is how to incorporate prior knowledge into a model. In this paper, we consider the case of prior procedural knowledge for neural networks, such as knowing how a program should traverse a sequence, but not what local actions should be performed at each step. To this end, we present an end-to-end differentiable interpreter for the programming language Forth which enables programmers to write program sketches with slots that can be filled with behaviour trained from program input-output data. We can optimise this behaviour directly through gradient descent techniques on user-specified objectives, and also integrate the program into any larger neural computation graph. We show empirically that our interpreter is able to effectively leverage different levels of prior program structure and learn complex behaviours such as sequence sorting and addition. When connected to outputs of an LSTM and trained jointly, our interpreter achieves state-of-the-art accuracy for end-to-end reasoning about quantities expressed in natural language stories.},
archivePrefix = {arXiv},
arxivId = {1605.06640},
author = {Bo{\v{s}}njak, Matko and Rockt{\"{a}}schel, Tim and Naradowsky, Jason and Riedel, Sebastian},
eprint = {1605.06640},
title = {{Programming with a Differentiable Forth Interpreter}},
url = {http://arxiv.org/abs/1605.06640},
year = {2016}
}
@book{Barendregt1994,
address = {Berlin, Heidelberg},
doi = {10.1007/3-540-58085-9},
editor = {Barendregt, Henk and Nipkow, Tobias},
isbn = {978-3-540-58085-0},
publisher = {Springer Berlin Heidelberg},
series = {Lecture Notes in Computer Science},
title = {{Types for Proofs and Programs}},
url = {http://link.springer.com/10.1007/3-540-58085-9},
volume = {806},
year = {1994}
}
@article{Courtois2002,
abstract = {Several recently proposed ciphers are built with layers of small S-boxes, interconnected by linear key-dependent layers. Their security relies on the fact, that the classical methods of cryptanalysis (e.g. linear or differential attacks) are based on probabilistic characteristics, which makes their security grow exponentially with the number of rounds Nr. In this paper we study the security of such ciphers under an additional hypothesis: the S-box can be described by an overdefined system of algebraic equations (true with probability 1).We show that this hypothesis is true for both Serpent (due to a small size of S-boxes) and Rijndael (due to unexpected algebraic properties). We study general methods known for solving overdefined systems of equations, such as XL from Euro- crypt'00, and show their inefficiency. Then we introduce a new method called XSL that uses the sparsity of the equations and their specific structure. The XSL attack has a parameter P, and in theory we show that P should be a constant. The XSL attack would then be polynomial in Nr, with a huge constant that is double- exponential in the size of the S-box. We demonstrated by computer simulations that the XSL attack works well enough on a toy cipher. It seems however that P will rather increase very slowly with Nr. More simulations are needed for bigger ciphers. Our optimistic evaluation shows that the XSL attack might be able to break Rijndael 256 bits and Serpent for key lengths 192 and 256 bits. However if only P is increased by 2 (respectively 4) the XSL attack on Rijndael (respectively Serpent) would become slower than the exhaustive search. At any rate, it seems that the security of these ciphers does not grow exponentially with the number of rounds.},
author = {Courtois, Nicolas T and Pieprzyk, Josef},
doi = {10.1007/3-540-36178-2_17},
isbn = {3540001719},
issn = {16113349},
journal = {Lecture Notes in Computer Science (including subseries Lecture Notes in Artificial Intelligence and Lecture Notes in Bioinformatics)},
keywords = {AES,Block ciphers,Camellia,Gr¨obner bases,MQ problem,Multivariate cryptanalysis,Multivariate quadratic equations,Overdefined systems of multivariate equations,Rijndael,Serpent,Sparse multivariate polynomials,Square,XL algorithm},
number = {256},
pages = {267--287},
title = {{Cryptanalysis of block ciphers with overdefined systems of equations}},
volume = {2501},
year = {2002}
}
@book{Bueche1999,
author = {Bueche, Frederick and Hecht, Eugene},
isbn = {0071367500},
keywords = {acceleration angle angular applied atoms axis body},
mendeley-tags = {acceleration angle angular applied atoms axis body},
pages = {138},
publisher = {McGraw Hill Professional},
title = {{Schaum's Easy Outline of College Physics}},
year = {1999}
}
@article{Berkhahn2019,
abstract = {We present a new flavor of Variational Autoencoder (VAE) that interpolates seamlessly between unsupervised, semi-supervised and fully supervised learning domains. We show that unlabeled datapoints not only boost unsupervised tasks, but also the classification performance. Vice versa, every label not only improves classification, but also unsupervised tasks. The proposed architecture is simple: A classification layer is connected to the topmost encoder layer, and then combined with the resampled latent layer for the decoder. The usual evidence lower bound (ELBO) loss is supplemented with a supervised loss target on this classification layer that is only applied for labeled datapoints. This simplicity allows for extending any existing VAE model to our proposed semi-supervised framework with minimal effort. In the context of classification, we found that this approach even outperforms a direct supervised setup.},
archivePrefix = {arXiv},
arxivId = {1908.03015},
author = {Berkhahn, Felix and Keys, Richard and Ouertani, Wajih and Shetty, Nikhil and Gei{\ss}ler, Dominik},
eprint = {1908.03015},
title = {{One Model To Rule Them All}},
url = {http://arxiv.org/abs/1908.03015},
year = {2019}
}
@book{Finch2011,
abstract = {This is a beginner's guide with clear step-by-step instructions, explanations, and advice. Each concept is illustrated with a complete example that you can use as a starting point for your own work. If you are an engineer, scientist, mathematician, or student, this book is for you. To get the most from Sage by using the Python programming language, we'll give you the basics of the language to get you started. For this, it will be helpful if you have some experience with basic programming concepts.},
author = {Finch, Craig},
isbn = {184951447X},
keywords = {{\_}{\_}init{\_}{\_} algebra algorithm armor{\_}values base class},
mendeley-tags = {{\_}{\_}init{\_}{\_} algebra algorithm armor{\_}values base class},
pages = {364},
publisher = {Packt Publishing Ltd},
title = {{Sage Beginner's Guide}},
year = {2011}
}
@book{Palach2014,
abstract = {Develop efficient parallel systems using the robust Python environment},
author = {Palach, Jan},
isbn = {9781783288397},
pages = {124},
publisher = {Packt Publishing},
title = {{Parallel Programming with Python}},
url = {www.it-ebooks.info},
year = {2014}
}
@article{Lamport2017,
author = {Lamport, Leslie},
title = {{A PlusCal User's Manual}},
url = {http://lamport.azurewebsites.net/tla/p-manual.pdf},
year = {2016}
}
@article{Sandfort2019,
abstract = {Labeled medical imaging data is scarce and expensive to generate. To achieve generalizable deep learning models large amounts of data are needed. Standard data augmentation is a method to increase generalizability and is routinely performed. Generative adversarial networks offer a novel method for data augmentation. We evaluate the use of CycleGAN for data augmentation in CT segmentation tasks. Using a large image database we trained a CycleGAN to transform contrast CT images into non-contrast images. We then used the trained CycleGAN to augment our training using these synthetic non-contrast images. We compared the segmentation performance of a U-Net trained on the original dataset compared to a U-Net trained on the combined dataset of original data and synthetic non-contrast images. We further evaluated the U-Net segmentation performance on two separate datasets: The original contrast CT dataset on which segmentations were created and a second dataset from a different hospital containing only non-contrast CTs. We refer to these 2 separate datasets as the in-distribution and out-of-distribution datasets, respectively. We show that in several CT segmentation tasks performance is improved significantly, especially in out-of-distribution (noncontrast CT) data. For example, when training the model with standard augmentation techniques, performance of segmentation of the kidneys on out-of-distribution non-contrast images was dramatically lower than for in-distribution data (Dice score of 0.09 vs. 0.94 for out-of-distribution vs. in-distribution data, respectively, p {\textless}0.001). When the kidney model was trained with CycleGAN augmentation techniques, the out-of-distribution (non-contrast) performance increased dramatically (from a Dice score of 0.09 to 0.66, p {\textless}0.001). Improvements for the liver and spleen were smaller, from 0.86 to 0.89 and 0.65 to 0.69, respectively. We believe this method will be valuable to medical imaging researchers to reduce manual segmentation effort and cost in CT imaging.},
author = {Sandfort, Veit and Yan, Ke and Pickhardt, Perry J and Summers, Ronald M},
doi = {10.1038/s41598-019-52737-x},
issn = {20452322},
journal = {Scientific Reports},
number = {1},
publisher = {Nature Research},
title = {{Data augmentation using generative adversarial networks (CycleGAN) to improve generalizability in CT segmentation tasks}},
volume = {9},
year = {2019}
}
@article{Howard2020,
abstract = {fastai is a deep learning library which provides practitioners with high-level components that can quickly and easily provide state-of-the-art results in standard deep learning domains, and provides researchers with low-level components that can be mixed and matched to build new approaches. It aims to do both things without substantial compromises in ease of use, flexibility, or performance. This is possible thanks to a carefully layered architecture, which expresses common underlying patterns of many deep learning and data processing techniques in terms of decoupled abstractions. These abstractions can be expressed concisely and clearly by leveraging the dynamism of the underlying Python language and the flexibility of the PyTorch library. fastai includes: a new type dispatch system for Python along with a semantic type hierarchy for tensors; a GPU-optimized computer vision library which can be extended in pure Python; an optimizer which refactors out the common functionality of modern optimizers into two basic pieces, allowing optimization algorithms to be implemented in 4–5 lines of code; a novel 2-way callback system that can access any part of the data, model, or optimizer and change it at any point during training; a new data block API; and much more. We used this library to successfully create a complete deep learning course, which we were able to write more quickly than using previous approaches, and the code was more clear. The library is already in wide use in research, industry, and teaching.},
archivePrefix = {arXiv},
arxivId = {2002.04688},
author = {Howard, Jeremy and Gugger, Sylvain},
doi = {10.3390/info11020108},
eprint = {2002.04688},
issn = {2078-2489},
journal = {Information},
month = {feb},
title = {{Fastai: A Layered API for Deep Learning}},
url = {http://arxiv.org/abs/2002.04688 http://dx.doi.org/10.3390/info11020108 https://www.mdpi.com/2078-2489/11/2/108},
year = {2020}
}
@article{Surendra2017,
abstract = {Due to the technological advancement, enormous micro data containing detailed individual information is being collected by both public and private organizations. The demand for releasing this data to public for social and economic welfare is growing. Also the organizations holding the data are under pressure to publish the data for proving their transparency. Since this micro data contains sensitive information about individuals, the raw data needs to be sanitized to preserve privacy of the individuals before releasing it to the public. There are different types of data sanitization methods and many techniques are being proposed for Privacy Preserving Data Publishing (PPDP) of micro data. Synthetic Data Generation is an alternative to data masking techniques for preserving privacy. In this paper different fully and partially synthetic data generation techniques are reviewed and key research gaps are identified which needs to be focused in the future research.},
author = {Surendra, H and Mohan, H S},
journal = {International Journal of Scientific {\&} Technology Research},
number = {3},
pages = {95--101},
title = {{A Review Of Synthetic Data Generation Methods For Privacy Preserving Data Publishing}},
volume = {6},
year = {2017}
}
@book{Siyavula,
editor = {Siyavula},
title = {{Mathematics Grade 11}}
}
@book{Vugt2009,
abstract = {This book is for anyone who wants to master Linux from the command line. When writing it, I had in mind system administrators, software developers, and enthusiastic users who want to get things going from the Linux command line. For beginning users, this may be a daunting task, as Linux commands often have many options documented only in pages that are not that easy to understand. This book is distribution agnostic. That is, while writing it, I've checked all items against Ubuntu, Red Hat, and SUSE. Since most distributions are quite similar to one of these three, this book should help you with other distributions as well. There is only one item in the book that is not distribution agnostic: the Appendix, which explains how to install OpenSUSE. I've chosen to cover installation of just one distribution, because if you don't have any Linux installed yet, you probably don't care what you install. If you do care what distribution to work with, you probably have it installed already.},
author = {van Vugt, Sander},
isbn = {1430218908},
keywords = {administrator ar+o archive arguments authenticatio},
mendeley-tags = {administrator ar+o archive arguments authenticatio},
pages = {392},
publisher = {Apress},
title = {{Beginning the Linux Command Line}},
year = {2009}
}
@article{Czajka2018,
abstract = {Hammers provide most powerful general purpose automation for proof assistants based on HOL and set theory today. Despite the gaining popularity of the more advanced versions of type theory, such as those based on the Calculus of Inductive Constructions, the construction of hammers for such foundations has been hindered so far by the lack of translation and reconstruction components. In this paper, we present an architecture of a full hammer for dependent type theory together with its implementation for the Coq proof assistant. A key component of the hammer is a proposed translation from the Calculus of Inductive Constructions, with certain extensions introduced by Coq, to untyped first-order logic. The translation is " sufficiently " sound and complete to be of practical use for automated theorem provers. We also introduce a proof reconstruction mechanism based on an eauto-type algorithm combined with limited rewriting, congruence closure and some forward reasoning. The algorithm is able to re-prove in the Coq logic most of the theorems established by the ATPs. Together with machine-learning based selection of relevant premises this consti-tutes a full hammer system. The performance of the whole procedure is evaluated in a bootstrapping scenario emulating the development of the Coq standard library. For each theorem in the library only the previous theorems and proofs can be used. We show that 40.8{\%} of the theorems can be proved in a push-button mode in about 40 seconds of real time on a 8-CPU system.},
author = {Czajka, {\L}ukasz and Kaliszyk, Cezary},
doi = {10.1007/s10817-018-9458-4},
issn = {15730670},
journal = {Journal of Automated Reasoning},
keywords = {Calculus of inductive constructions,Coq,Hammer,Proof automation},
number = {1-4},
pages = {423--453},
publisher = {Springer Netherlands},
title = {{Hammer for Coq: Automation for Dependent Type Theory}},
url = {https://doi.org/10.1007/s10817-018-9458-4},
volume = {61},
year = {2018}
}
@article{Lamport2012,
abstract = {A method of writing proofs is described that makes it harder to prove things that are not true. The method, based on hierarchical structuring, is simple and practical. The author's twenty years of experience writing such proofs is discussed.},
author = {Lamport, Leslie},
doi = {10.1007/s11784-012-0071-6},
issn = {16617738},
journal = {Journal of Fixed Point Theory and Applications},
keywords = {Structured proofs,teaching proofs},
number = {1},
pages = {43--63},
title = {{How to Write a 21st Century Proof}},
volume = {11},
year = {2012}
}
@article{Le2017,
abstract = {Program synthesis from incomplete specifications (e.g. input-output examples) has gained popularity and found real-world applications, primarily due to its ease-of-use. Since this technology is often used in an interactive setting, efficiency and correctness are often the key user expectations from a system based on such technologies. Ensuring efficiency is challenging since the highly combinatorial nature of program synthesis algorithms does not fit in a 1-2 second response expectation of a user-facing system. Meeting correctness expectations is also difficult, given that the specifications provided are incomplete, and that the users of such systems are typically non-programmers. In this paper, we describe how interactivity can be leveraged to develop efficient synthesis algorithms, as well as to decrease the cognitive burden that a user endures trying to ensure that the system produces the desired program. We build a formal model of user interaction along three dimensions: incremental algorithm, step-based problem formulation, and feedback-based intent refinement. We then illustrate the effectiveness of each of these forms of interactivity with respect to synthesis performance and correctness on a set of real-world case studies.},
archivePrefix = {arXiv},
arxivId = {1703.03539},
author = {Le, Vu and Perelman, Daniel and Polozov, Oleksandr and Raza, Mohammad and Udupa, Abhishek and Gulwani, Sumit},
eprint = {1703.03539},
pages = {1--13},
title = {{Interactive Program Synthesis}},
url = {http://arxiv.org/abs/1703.03539},
year = {2017}
}
@article{Chen2018,
abstract = {We introduce a new family of deep neural network models. Instead of specifying a discrete sequence of hidden layers, we parameterize the derivative of the hidden state using a neural network. The output of the network is computed using a black-box differential equation solver. These continuous-depth models have constant memory cost, adapt their evaluation strategy to each input, and can explicitly trade numerical precision for speed. We demonstrate these properties in continuous-depth residual networks and continuous-time latent variable models. We also construct continuous normalizing flows, a generative model that can train by maximum likelihood, without partitioning or ordering the data dimensions. For training, we show how to scalably backpropagate through any ODE solver, without access to its internal operations. This allows end-to-end training of ODEs within larger models.},
archivePrefix = {arXiv},
arxivId = {1806.07366},
author = {Chen, Ricky T Q and Rubanova, Yulia and Bettencourt, Jesse and Duvenaud, David},
eprint = {1806.07366},
number = {Nips},
pages = {1--18},
title = {{Neural Ordinary Differential Equations}},
url = {http://arxiv.org/abs/1806.07366},
year = {2018}
}
@book{Lamport2002,
author = {Lamport, Leslie},
isbn = {032114306X},
title = {{Specifying Sistems: The TLA+ Language and Tools for Hardware and Software Engineers}},
year = {2002}
}
@book{Judson2016,
author = {Judson, Thomas W and Austin, Stephen F},
pages = {471},
title = {{Abstract Algebra: Theory and Applications}},
year = {2016}
}
@book{Morgan,
author = {Morgan, Carroll},
edition = {2nd},
isbn = {9780137262250},
publisher = {Prentice Hall},
series = {Prentice Hall International Series in Computing Science},
title = {{Programming from Specifications}},
url = {http://gen.lib.rus.ec/book/index.php?md5=5c41c258c8fbe68d025074957fc0e5b3 http://www.cs.ox.ac.uk/publications/books/PfS/}
}
@article{Ngo2017,
abstract = {In the paper, we present a method for decomposing a discrete noisy curve into arcs and segments which are the frequent primitives in digital images. This method is based on two tools: dominant point detection using adaptive tangential cover and tangent space representation of the polygon issued from detected dominant points. The experiments demonstrate the robustness of the method w.r.t. noise.},
author = {Ngo, Phuc and Nasser, Hayat and Debled-Rennesson, Isabelle},
doi = {10.1007/978-3-319-54427-4_36},
isbn = {9783319544267},
issn = {16113349},
journal = {Lecture Notes in Computer Science (including subseries Lecture Notes in Artificial Intelligence and Lecture Notes in Bioinformatics)},
keywords = {Adaptive tangential cover,Curve reconstruction,Dominant point detection,Tangent space,Vectorization},
pages = {493--505},
title = {{A discrete approach for decomposing noisy digital contours into arcs and segments}},
volume = {10117 LNCS},
year = {2017}
}
@book{MurrayR.Spiegel2009,
abstract = {Fully compatible with your classroom text, Schaum's highlights all the important facts you need to know. The book contains hundreds of examples, solved problems, and practice exercises to test your skills.},
author = {{Murray R. Spiegel} and Lipschutz, Seymour and Schiller, John J and Spellman, Dennis},
booktitle = {Schaum's Outline Series},
isbn = {9780071615709},
title = {{Schaum's Outline of Complex Variables}},
year = {2009}
}
@book{Kim2015,
abstract = {Just as a professional athlete doesn't show up without a solid game plan, ethical hackers, IT professionals, and security researchers should not be unprepared, either. The Hacker Playbook provides them their own game plans. Written by a longtime security professional and CEO of Secure Planet, LLC, this step-by-step guide to the “game” of penetration hacking features hands-on examples and helpful advice from the top of the field. Through a series of football-style “plays,” this straightforward guide gets to the root of many of the roadblocks people may face while penetration testing—including attacking different types of networks, pivoting through security controls, privilege escalation, and evading antivirus software. From “Pregame” research to “The Drive” and “The Lateral Pass,” the practical plays listed can be read in order or referenced as needed. Either way, the valuable advice within will put you in the mindset of a penetration tester of a Fortune 500 company, regardless of your career or level of experience. This second version of The Hacker Playbook takes all the best "plays" from the original book and incorporates the latest attacks, tools, and lessons learned. Double the content compared to its predecessor, this guide further outlines building a lab, walks through test cases for attacks, and provides more customized code. Whether you're downing energy drinks while desperately looking for an exploit, or preparing for an exciting new job in IT security, this guide is an essential part of any ethical hacker's library—so there's no reason not to get in the game.},
archivePrefix = {arXiv},
arxivId = {arXiv:1011.1669v3},
author = {Kim, Peter},
doi = {10.1017/CBO9781107415324.004},
eprint = {arXiv:1011.1669v3},
isbn = {9781512214567},
issn = {16130073},
pages = {358},
pmid = {25246403},
title = {{The Hacker Playbook 2: Practical Guide To Penetration Testing}},
year = {2015}
}
@book{BarbaraHoffmanJohnSchorgeJosephSchafferLisaHalvorsonKarenBradshaw2012,
abstract = {The only gynecology resource that combines a full-color text and a procedural atlas—revised and updated Part medical text, part surgical atlas, Williams Gynecology is written by the renowned team of ob-gyn clinicians at Dallas' Parkland Hospital who are responsible for the landmark Williams Obstetrics. The new edition of Williams Gynecology maintains the consistent tone, leading-edge clinical insights, and quality illustrations of the successful first edition, while expanding and refreshing its content to keep pace with the most recent developments in this dynamic field. The many important topics covered in Williams Gynecology are evidence-based, yet the book is specifically designed as a practical quick-reference guide, aided throughout by helpful teaching points. Reflecting the latest clinical perspectives and research, the second edition features outstanding new coverage of minimally invasive procedures, robotics, and gynecologic anatomy. Features Two resources in one—full-color medical text and surgical atlas—conveniently surveys the entire spectrum of gynecologic disease, including general gynecology, reproductive endocrinology and infertility, urogynecology, and gynecologic oncology Atlas of gynecologic surgery contains 450 figures that illustrate operative techniques Unique consistent text design for an efficient approach to diagnosis and treatment Strong procedure orientation covers a vast array of surgical operations, which are illustrated in detail Evidence-based discussion of disease evaluation reinforces and supports the clinical relevance of the book's diagnostic and treatment methods Distinguished authorship from the same Parkland Hospital-based team which edited Williams Obstetrics—the leading reference in obstetrics for more than a century Newly illustrated gynecologic anatomy chapter created with the surgeon in mind to emphasize critical anatomy for successful surgery New coverage of minimally invasive procedures and robotics, the latest procedures in gynecologic oncology, and in-vitro fertilization Numerous illustrations, photographs, tables, and treatment algorithms},
author = {{Barbara Hoffman, John Schorge, Joseph Schaffer, Lisa Halvorson, Karen Bradshaw}, F Cunningham},
edition = {2},
isbn = {0071716726},
keywords = {abdominal abnormal abortion adenomyosis adnexal ad},
mendeley-tags = {abdominal abnormal abortion adenomyosis adnexal ad},
pages = {1401},
publisher = {McGraw Hill Professional},
title = {{Williams Gynecology}},
year = {2012}
}
@article{Trimeche2006,
abstract = {One critical aspect to achieve efficient implementations of image super-resolution is the need for accurate subpixel registration of the input images. The overall performance of super-resolution algorithms is particularly degraded in the presence of persistent outliers, for which registration has failed. To enhance the robustness of processing against this problem, we propose in this paper an integrated adaptive filtering method to reject the outlier image regions. In the process of combining the gradient images due to each low-resolution image, we use adaptive FIR filtering. The coefficients of the FIR filter are updated using the LMS algorithm, which automatically isolates the outlier image regions by decreasing the corresponding coefficients. The adaptation criterion of the LMS estimator is the error between the median of the samples from the LR images and the output of the FIR filter. Through simulated experiments on synthetic images and on real camera images, we show that the proposed technique performs well in the presence of motion outliers. This relatively simple and fastmechanism enables to add robustness in practical implementations of image super-resolution, while still being effective against Gaussian noise in the image formation model.},
author = {Trimeche, Mejdi and Bilcu, Radu Ciprian and Yrj{\"{a}}n{\"{a}}inen, Jukka},
doi = {10.1155/ASP/2006/38052},
issn = {11108657},
journal = {Eurasip Journal on Applied Signal Processing},
pages = {1--12},
title = {{Adaptive outlier rejection in image super-resolution}},
volume = {2006},
year = {2006}
}
@article{Denes2014a,
author = {D{\'{e}}n{\`{e}}s, Maxime and Hritcu, Catalin and Lampropoulos, Leonidas and Paraskevopoulou, Zoe and Pierce, Benjamin C},
journal = {Coq Workshop},
pages = {1--2},
title = {{{\{}QuickChick{\}}: Property-Based Testing for {\{}Coq{\}}}},
url = {http://prosecco.gforge.inria.fr/personal/hritcu/talks/coq6{\_}submission{\_}4.pdf},
year = {2014}
}
@article{Steorts,
author = {Steorts, Rebecca C},
title = {{The Multi Stage Gibbs Sampling : Data Augmentation Dutch Example Example : Data augmentation}}
}
@article{Williams2013,
author = {Williams, Tim},
number = {December},
title = {{Haskell at Barclays : Exotic tools for exotic trades}},
year = {2013}
}
@article{Egi2018,
archivePrefix = {arXiv},
arxivId = {1809.03252},
author = {Egi, Satoshi},
eprint = {1809.03252},
title = {{Loop Patterns: Extension of Kleene Star Operator for More Expressive Pattern Matching against Arbitrary Data Structures}},
url = {https://arxiv.org/abs/1809.03252},
year = {2018}
}
@article{Wenzel2002,
author = {Wenzel, Markus},
journal = {undefined},
title = {{Isabelle, Isar - a versatile environment for human readable formal proof documents}},
url = {https://www.semanticscholar.org/paper/Isabelle{\%}2C-Isar-a-versatile-environment-for-human-Wenzel/4dbcafadae534f8d41b06fe87b7a9a9120459a0b{\#}paper-header},
year = {2002}
}
@article{Munn2019,
author = {Munn, Alan},
title = {{A one page, dictatorial guide to LATEX packages}},
year = {2019}
}
@book{,
title = {scikit-learn user guide},
year = {2017}
}
@article{Boyapati2013,
abstract = {We report on a 47-year-old man who was initially treated with finasteride for androgenetic alopecia. Despite continuous treatment, after year 4 his hair density was not as good as at year 2, and low-dose dutasteride at 0.5 mg/week was added to the finasteride therapy. This resulted in a dramatic increase in his hair density, demonstrating that combined therapy with finasteride and dutasteride can improve hair density in patients already taking finasteride. {\textcopyright}2012 The Authors. Australasian Journal of Dermatology {\textcopyright}2012 The Australasian College of Dermatologists.},
author = {Boyapati, Ann and Sinclair, Rodney},
doi = {10.1111/j.1440-0960.2012.00909.x},
issn = {00048380},
journal = {Australasian Journal of Dermatology},
keywords = {androgenetic alopecia,dutasteride,finasteride,male pattern hair loss},
number = {1},
pages = {49--51},
title = {{Combination therapy with finasteride and low-dose dutasteride in the treatment of androgenetic alopecia}},
volume = {54},
year = {2013}
}
@article{Iworiso2019,
abstract = {This paper applies a plethora of machine learning techniques to forecast the direction of the US equity premium. Our techniques include benchmark binary probit models, classification and regression trees, along with penalized binary probit models. Our empirical analysis reveals that the sophisticated machine learning techniques significantly outperformed the benchmark binary probit forecasting models, both statistically and economically. Overall, the discriminant analysis classifiers are ranked first among all the models tested. Specifically, the high-dimensional discriminant analysis classifier ranks first in terms of statistical performance, while the quadratic discriminant analysis classifier ranks first in economic performance. The penalized likelihood binary probit models (least absolute shrinkage and selection operator, ridge, elastic net) also outperformed the benchmark binary probit models, providing significant alternatives to portfolio managers.},
author = {Iworiso, Jonathan and Vrontos, Spyridon},
doi = {10.1002/for.2632},
issn = {1099131X},
journal = {Journal of Forecasting},
keywords = {CART,binary probit,directional predictability,forecasting,penalized binary probit,recursive window},