theory.bib

@comment{{This file has been generated by bib2bib 1.96}}
@comment{{Command line: bib2bib -ob theory.bib -s year -c select:"theory" Omiros_refs.bib}}
@article{stable,
  author = {Papaspiliopoulos, Omiros and Roberts, Gareth},
  title = {Stability of the {G}ibbs sampler for {B}ayesian hierarchical
              models},
  journal = {Ann. Statist.},
  fjournal = {The Annals of Statistics},
  volume = {36},
  year = {2008},
  number = {1},
  pages = {95--117},
  issn = {0090-5364},
  coden = {ASTSC7},
  mrclass = {62F15 (60J27 62J05)},
  mrnumber = {2387965 (2009b:62063)},
  mrreviewer = {Miguel A. Arcones},
  url = {https://dx.doi.org/10.1214/009053607000000749},
  select = {theory},
  abstract = {We characterize the convergence of the Gibbs sampler which samples from the joint posterior distribution of parameters and missing data in hierarchical linear models with arbitrary symmetric error distributions. We show that the convergence can be uniform, geometric or subgeometric depending on the relative tail behavior of the error distributions, and on the parametrization chosen. Our theory is applied to characterize the convergence of the Gibbs sampler on latent Gaussian process models. We indicate how the theoretical framework we introduce will be useful in analyzing more complex models.},
  keywords = {Geometric ergodicity; capacitance; collapsed Gibbs sampler; state-space models; parametrization; Bayesian robustness}
}
@article{dual,
  author = {Papaspiliopoulos, Omiros and Ruggiero, Matteo},
  title = {Optimal filtering and the dual process},
  year = {2013},
  journal = {Bernoulli},
  volume = {to appear},
  select = {theory},
  abstract = {We link optimal filtering for hidden Markov models to the notion of
duality for Markov processes. We show that when the signal is dual
to a Êprocess that has two components, one deterministic and
one a pure death process, and with respect to functions that define
changes of measure conjugate to the emission density, the
filtering distributions evolve in the family of finite mixtures of such
measures and the filter can be computed at a cost that is polynomial
in the number of observations. Hence, for models in this framework, optimal
filtering reduces to a version of the Baum-Welch filter. Special cases
of our framework are the Kalman filter, but also models where the
signal is the Cox-Ingersoll-Ross process and the one-dimensional Wright-Fisher
process, which have been investigated before in the literature. The
duals of these two processes that Êwe identify in this paper appear to be
new in the literature. We also discuss the extensions of these results
to an
infinite-dimensional signal modelled as a Fleming-Viot process, and
the connection of the duality framework we develop here and Kingman's
coalescent.},
  keywords = {Auxiliary variables;Bayesian conjugacy;Dirichlet process;Finite mixture models;Cox-Ingersoll-Ross process;Hidden Markov model;Kalman filters},
  url = {https://www.isi-web.org/images/bernoulli/BEJ1305-022.pdf}
}
@unpublished{inverse,
  author = {Agapiou, Sergios and Bardsley, Johnathan and Papaspiliopoulos, Omiros and Stuart, Andrew M.},
  title = {Analysis of the {G}ibbs sampler for hierarchical inverse problems},
  year = {2013},
  note = {submitted},
  select = {theory},
  abstract = {Many inverse problems arising in applications come from continuum models
where the unknown parameter is a field. In practice the unknown field
is discretized resulting in a problem in $\R^N$, with an understanding
that refining the discretization, that is increasing $N$, will often be desirable.
In the context of Bayesian inversion this situation suggests the importance
of two issues:  (i) defining hyper-parameters
in such a way that they are interpretable in the continuum limit $N \to \infty;$ (ii) understanding the efficiency of algorithms for probing
the posterior distribution, as a function of large $N.$
Here we address these two issues in the context of linear inverse problems
subject to additive Gaussian noise within a hierarchical modelling
framework based on a Gaussian prior for the unknown
field and inverse-gamma priors for two hyper-parameters, \moda{the amplitude of the prior variance and of
the observational noise variance}. The structure of the model is such that the Gibbs sampler
can be easily implemented for probing the posterior distribution.  We
show that as the dimension $N$ grows, the behaviour of  
the algorithm has two scales: an increasingly fast  for the
amplitude of the noise variance and an increasingly slow for the amplitude of the prior
variance. In other words, as $N$ grows the Gibbs sampler convergence properties improve
 for sampling the amplitude of the noise variance and deteriorate for sampling
 the amplitude of the prior variance.  We discuss a reparametrization of the
prior variance that is robust with respect to the increase in dimension, preventing the slowing down. },
  keywords = {Gaussian process priors, Markov chain Monte Carlo, inverse covariance operators, hierarchical
models, diffusion limit},
  url = {https://arxiv.org/abs/1311.1138}
}

This file was generated by bibtex2html 1.96.