-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathreferences.bib
204 lines (187 loc) · 12.4 KB
/
references.bib
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
---
---
@article{chan,
author={Chan, Stanley H. and Khoshabeh, Ramsin and Gibson, Kristofor B. and Gill, Philip E. and Nguyen, Truong Q.},
journal={IEEE Transactions on Image Processing},
title={An Augmented Lagrangian Method for Total Variation Video Restoration},
year={2011},
volume={20},
number={11},
pages={3097-3111},
doi={10.1109/TIP.2011.2158229}
}
@article{admm,
url = {http://dx.doi.org/10.1561/2200000016},
year = {2011},
volume = {3},
journal = {Foundations and Trends® in Machine Learning},
title = {Distributed Optimization and Statistical Learning via the Alternating Direction Method of Multipliers},
doi = {10.1561/2200000016},
issn = {1935-8237},
number = {1},
pages = {1-122},
author = {Stephen Boyd and Neal Parikh and Eric Chu and Borja Peleato and Jonathan Eckstein}
}
@article{boyd,
author = {Parikh, Neal and Boyd, Stephen},
title = {Proximal Algorithms},
year = {2014},
issue_date = {January 2014},
publisher = {Now Publishers Inc.},
address = {Hanover, MA, USA},
volume = {1},
number = {3},
issn = {2167-3888},
url = {https://doi.org/10.1561/2400000003},
doi = {10.1561/2400000003},
abstract = {This monograph is about a class of optimization algorithms called proximal algorithms. Much like Newton's method is a standard tool for solving unconstrained smooth optimization problems of modest size, proximal algorithms can be viewed as an analogous tool for nonsmooth, constrained, large-scale, or distributed versions of these problems. They are very generally applicable, but are especially well-suited to problems of substantial recent interest involving large or high-dimensional datasets. Proximal methods sit at a higher level of abstraction than classical algorithms like Newton's method: the base operation is evaluating the proximal operator of a function, which itself involves solving a small convex optimization problem. These subproblems, which generalize the problem of projecting a point onto a convex set, often admit closed-form solutions or can be solved very quickly with standard or simple specialized methods. Here, we discuss the many different interpretations of proximal operators and algorithms, describe their connections to many other topics in optimization and applied mathematics, survey some popular algorithms, and provide a large number of examples of proximal operators that commonly arise in practice.},
journal = {Found. Trends Optim.},
month = {jan},
pages = {127–239},
numpages = {113}
}
@Article{chambolle,
author={Chambolle, Antonin
and Pock, Thomas},
title={A First-Order Primal-Dual Algorithm for Convex Problems with Applications to Imaging},
journal={Journal of Mathematical Imaging and Vision},
year={2011},
month={May},
day={01},
volume={40},
number={1},
pages={120-145},
abstract={In this paper we study a first-order primal-dual algorithm for non-smooth convex optimization problems with known saddle-point structure. We prove convergence to a saddle-point with rate O(1/N) in finite dimensions for the complete class of problems. We further show accelerations of the proposed algorithm to yield improved rates on problems with some degree of smoothness. In particular we show that we can achieve O(1/N2) convergence on problems, where the primal or the dual objective is uniformly convex, and we can show linear convergence, i.e. O($\omega$N) for some $\omega$∈(0,1), on smooth problems. The wide applicability of the proposed algorithm is demonstrated on several imaging problems such as image denoising, image deconvolution, image inpainting, motion estimation and multi-label image segmentation.},
issn={1573-7683},
doi={10.1007/s10851-010-0251-1},
url={https://doi.org/10.1007/s10851-010-0251-1}
}
@article{clarkson,
ISSN = {00029947},
URL = {http://www.jstor.org/stable/1989593},
author = {James A. Clarkson and C. Raymond Adams},
journal = {Transactions of the American Mathematical Society},
number = {4},
pages = {824--854},
publisher = {American Mathematical Society},
title = {On Definitions of Bounded Variation for Functions of Two Variables},
urldate = {2023-04-04},
volume = {35},
year = {1933}
}
@ARTICLE{qaisar,
author={Qaisar, Saad and Bilal, Rana Muhammad and Iqbal, Wafa and Naureen, Muqaddas and Lee, Sungyoung},
journal={Journal of Communications and Networks},
title={Compressive sensing: From theory to applications, a survey},
year={2013},
volume={15},
number={5},
pages={443-456},
doi={10.1109/JCN.2013.000083}}
@misc{zhang,
title={Improved Total Variation based Image Compressive Sensing Recovery by Nonlocal Regularization},
author={Jian Zhang and Shaohui Liu and Debin Zhao and Ruiqin Xiong and Siwei Ma},
year={2012},
eprint={1208.3716},
archivePrefix={arXiv},
primaryClass={cs.CV}
}
@article{lustig,
author = {Lustig, Michael and Donoho, David and Pauly, John M.},
title = {Sparse MRI: The application of compressed sensing for rapid MR imaging},
journal = {Magnetic Resonance in Medicine},
volume = {58},
number = {6},
pages = {1182-1195},
keywords = {compressed sensing, compressive sampling, random sampling, rapid MRI, sparsity, sparse reconstruction, nonlinear reconstruction},
doi = {https://doi.org/10.1002/mrm.21391},
url = {https://onlinelibrary.wiley.com/doi/abs/10.1002/mrm.21391},
eprint = {https://onlinelibrary.wiley.com/doi/pdf/10.1002/mrm.21391},
abstract = {Abstract The sparsity which is implicit in MR images is exploited to significantly undersample k-space. Some MR images such as angiograms are already sparse in the pixel representation; other, more complicated images have a sparse representation in some transform domain–for example, in terms of spatial finite-differences or their wavelet coefficients. According to the recently developed mathematical theory of compressed-sensing, images with a sparse representation can be recovered from randomly undersampled k-space data, provided an appropriate nonlinear recovery scheme is used. Intuitively, artifacts due to random undersampling add as noise-like interference. In the sparse transform domain the significant coefficients stand out above the interference. A nonlinear thresholding scheme can recover the sparse coefficients, effectively recovering the image itself. In this article, practical incoherent undersampling schemes are developed and analyzed by means of their aliasing interference. Incoherence is introduced by pseudo-random variable-density undersampling of phase-encodes. The reconstruction is performed by minimizing the ℓ1 norm of a transformed image, subject to data fidelity constraints. Examples demonstrate improved spatial resolution and accelerated acquisition for multislice fast spin-echo brain imaging and 3D contrast enhanced angiography. Magn Reson Med, 2007. © 2007 Wiley-Liss, Inc.},
year = {2007}
}
@Article{yousufi,
author={Yousufi, Musyyab
and Amir, Muhammad
and Javed, Umer
and Tayyib, Muhammad
and Abdullah, Suheel
and Ullah, Hayat
and Qureshi, Ijaz Mansoor
and Alimgeer, Khurram Saleem
and Akram, Muhammad Waseem
and Khan, Khan Bahadar},
title={Application of Compressive Sensing to Ultrasound Images: A Review},
journal={BioMed Research International},
year={2019},
month={Nov},
day={15},
publisher={Hindawi},
volume={2019},
pages={7861651},
abstract={Compressive sensing (CS) offers compression of data below the Nyquist rate, making it an attractive solution in the field of medical imaging, and has been extensively used for ultrasound (US) compression and sparse recovery. In practice, CS offers a reduction in data sensing, transmission, and storage. Compressive sensing relies on the sparsity of data; i.e., data should be sparse in original or in some transformed domain. A look at the literature reveals that rich variety of algorithms have been suggested to recover data using compressive sensing from far fewer samples accurately, but with tradeoffs for efficiency. This paper reviews a number of significant CS algorithms used to recover US images from the undersampled data along with the discussion of CS in 3D US images. In this paper, sparse recovery algorithms applied to US are classified in five groups. Algorithms in each group are discussed and summarized based on their unique technique, compression ratio, sparsifying transform, 3D ultrasound, and deep learning. Research gaps and future directions are also discussed in the conclusion of this paper. This study is aimed to be beneficial for young researchers intending to work in the area of CS and its applications, specifically to US.},
issn={2314-6133},
doi={10.1155/2019/7861651},
url={https://doi.org/10.1155/2019/7861651}
}
@ARTICLE{graff,
title = "Compressive sensing in medical imaging",
author = "Graff, Christian G and Sidky, Emil Y",
abstract = "The promise of compressive sensing, exploitation of
compressibility to achieve high quality image reconstructions
with less data, has attracted a great deal of attention in the
medical imaging community. At the Compressed Sensing Incubator
meeting held in April 2014 at OSA Headquarters in Washington, DC,
presentations were given summarizing some of the research efforts
ongoing in compressive sensing for x-ray computed tomography and
magnetic resonance imaging systems. This article provides an
expanded version of these presentations. Sparsity-exploiting
reconstruction algorithms that have gained popularity in the
medical imaging community are studied, and examples of clinical
applications that could benefit from compressive sensing ideas
are provided. The current and potential future impact of
compressive sensing on the medical imaging field is discussed.",
journal = "Appl Opt",
volume = 54,
number = 8,
pages = "C23--44",
month = mar,
year = 2015,
address = "United States",
language = "en"
}
@article{bookchambolle,
title={An introduction to continuous optimization for imaging},
author={A. Chambolle and Thomas Pock},
journal={Acta Numerica},
year={2016},
volume={25},
pages={161 - 319}
}
@misc{nikopj, title={Introduction to julia by TV denoising}, url={https://nikopj.github.io/blog/julia_tvd/}, author={Janjušević, Nikola}}
@article{rof,
title = {Nonlinear total variation based noise removal algorithms},
journal = {Physica D: Nonlinear Phenomena},
volume = {60},
number = {1},
pages = {259-268},
year = {1992},
issn = {0167-2789},
doi = {https://doi.org/10.1016/0167-2789(92)90242-F},
url = {https://www.sciencedirect.com/science/article/pii/016727899290242F},
author = {Leonid I. Rudin and Stanley Osher and Emad Fatemi},
abstract = {A constrained optimization type of numerical algorithm for removing noise from images is presented. The total variation of the image is minimized subject to constraints involving the statistics of the noise. The constraints are imposed using Lanrange multipliers. The solution is obtained using the gradient-projection method. This amounts to solving a time dependent partial differential equation on a manifold determined by the constraints. As t → ∞ the solution converges to a steady state which is the denoised image. The numerical algorithm is simple and relatively fast. The results appear to be state-of-the-art for very noisy images. The method is noninvasive, yielding sharp edges in the image. The technique could be interpreted as a first step of moving each level set of the image normal to itself with velocity equal to the curvature of the level set divided by the magnitude of the gradient of the image, and a second step which projects the image back onto the constraint set.}
}
@misc{ivan-admm, title={Total Variation Filtering using ADMM}, author={Selesnick, Ivan}, year={2012}, month={Feb}}
@article{condat,
author={Condat, Laurent},
journal={IEEE Signal Processing Letters},
title={A Direct Algorithm for 1-D Total Variation Denoising},
year={2013},
volume={20},
number={11},
pages={1054-1057},
doi={10.1109/LSP.2013.2278339}}
@misc{mm, title={Total variation denoising (an MM algorithm) - New York University}, url={https://eeweb.engineering.nyu.edu/iselesni/lecture_notes/TVDmm/TVDmm.pdf}, author={Selesnick, Ivan}}
@misc{ic, title={Total variation filtering - new york university}, url={https://eeweb.engineering.nyu.edu/iselesni/lecture_notes/TV_filtering.pdf}, author={Selesnick, Ivan}}
@book{dsp, place={Cambridge}, title={Foundations of Signal Processing}, DOI={10.1017/CBO9781139839099}, publisher={Cambridge University Press}, author={Vetterli, Martin and Kovačević, Jelena and Goyal, Vivek K}, year={2014}}