galpak3d.py   galpak3d.py 
# -*- coding: utf-8 -*- # -*- coding: utf-8 -*-
from __future__ import absolute_import from __future__ import absolute_import
from __future__ import division from __future__ import division
from __future__ import print_function from __future__ import print_function
from distutils.version import LooseVersion, StrictVersion
import pkg_resources
import os,re import os
import sys import sys
from copy import deepcopy from copy import deepcopy
import configparser import configparser
from astropy.io.fits import Header from astropy.io.fits import Header
import astropy.io.ascii as asciitable import astropy.io.ascii as asciitable
from astropy.table import Table, Column from astropy.table import Table, Column
import math import math
import numpy as np import numpy as np
np.random.seed(seed=1234) np.random.seed(seed=1234)
# LOCAL IMPORTS # LOCAL IMPORTS
from .__version__ import __version__ from .__version__ import __version__
from .math_utils import merge_where_nan, median_clip, safe_exp from .math_utils import merge_where_nan, median_clip
from .instruments import * from .instruments import Instrument, _read_instrument, MUSE, MUSEWFM, MUSEN FM, ALMA, SINFOK250, SINFOK100, SINFOJ250, SINFOJ100, HARMONI, KMOS, OSIRIS , Generic
from .hyperspectral_cube import HyperspectralCube as HyperCube from .hyperspectral_cube import HyperspectralCube as HyperCube
from .string_stdout import StringStdOut from .string_stdout import StringStdOut
from .model_class import Model from .model_class import Model
from .model_sersic3d import ModelSersic from .model_sersic3d import ModelSersic
from .galaxy_parameters import GalaxyParameters, GalaxyParametersError from .galaxy_parameters import GalaxyParameters, GalaxyParametersError
from .plot_utilities import Plots from .plot_utilities import Plots
from .mcmc import MCMC from .mcmc import MCMC
from .galpak3d_utils import _save_to_file,_read_file, _read_instrument, _re ad_model from .galpak3d_utils import _save_to_file, _read_model
#will be removed #will be removed
DiskModel = ModelSersic #for backward compatibility DiskModel = ModelSersic #for backward compatibility
DefaultModel = ModelSersic DefaultModel = ModelSersic
OII = {'wave': [3726.2, 3728.9], 'ratio':[0.8,1.0]} OII = {'wave': [3726.2, 3728.9], 'ratio':[0.8,1.0]}
# LOGGING CONFIGURATION # LOGGING CONFIGURATION
import logging import logging
logging.basicConfig(level=logging.INFO) logging.basicConfig(level=logging.INFO)
logger = logging.getLogger('GalPaK')
try:
from distutils.version import LooseVersion as Version
except ImportError:
# python3.13
from packaging.version import Version
# OPTIONAL IMPORTS # OPTIONAL IMPORTS
try: try:
import bottleneck as bn import bottleneck as bn
except ImportError: except ImportError:
logger.info(" bottleneck (optional) not installed, performances will be degraded") logging.info(" bottleneck (optional) not installed, performances will b e degraded")
import numpy as bn import numpy as bn
try: try:
import pyfftw import pyfftw
except ImportError: except ImportError:
logger.info(" PyFFTW (optional) not installed, performances will be deg raded") logging.info(" PyFFTW (optional) not installed, performances will be de graded")
try: try:
import mpdaf import mpdaf
logger.info("Found MPDAF version %s" % (mpdaf.__version__)) logging.info("Found MPDAF version %s" % (mpdaf.__version__))
mpdaf_there=True mpdaf_there=True
except ImportError: except ImportError:
mpdaf_there=False mpdaf_there=False
logger.warning(" MPDAF (optional) not installed / not required") logging.warning(" MPDAF (optional) not installed / not required")
try: try:
import emcee import emcee
emcee_there=True emcee_there=True
logger.info("Found EMCEE version %s" % (emcee.__version__)) logging.info("Found EMCEE version %s" % (emcee.__version__))
logger.warning("EMCEE tested for version > 3.0") logging.warning("EMCEE tested for version > 3.0")
except ImportError: except ImportError:
emcee_there=False emcee_there=False
logger.warning(" EMCEE (optional) not installed / not required. So opti on use_emcee is disabled") logging.warning(" EMCEE (optional) not installed / not required. So opt ion use_emcee is disabled")
try: try:
import dynesty import dynesty
dynesty_there=True dynesty_there=True
logger.info("Found Dynesty version %s \n EXPERIMENTAL and UNSUPPORTED" % (dynesty.__version__)) logging.info("Found Dynesty version %s \n EXPERIMENTAL and UNSUPPORTED" % (dynesty.__version__))
except ImportError: except ImportError:
dynesty_there=False dynesty_there=False
logger.warning(" Dynesty (optional) not installed / not required. ") logging.warning(" Dynesty (optional) not installed / not required. ")
try: try:
import pymultinest import pymultinest
multinest_there=True multinest_there=True
logger.info("Found PyMultinest version %s" % (pkg_resources.get_distrib ution("pymultinest").version)) logging.info("Found PyMultinest version %s" % (importlib.metadata.versi on("pymultinest")))
except ImportError: except ImportError:
multinest_there=False multinest_there=False
logger.warning(" PyMultinest (optional) not installed / not required. " ) logging.warning(" PyMultinest (optional) not installed / not required. ")
try: try:
import corner import corner
except ImportError: except ImportError:
logger.info("corner (optional) not installed, corner plots will be disa bled") logging.info("corner (optional) not installed, corner plots will be dis abled")
#Python3 compatibility #Python3 compatibility
try: try:
basestring basestring
except NameError: except NameError:
basestring = str basestring = str
if sys.version>=LooseVersion('2.7') and sys.version<LooseVersion('3.5'): from importlib import reload # Python 3.4+
reload(sys)
sys.setdefaultencoding('utf-8') #reload(sys)
#sys.setdefaultencoding('utf-8')
class GalPaK3D(Plots, MCMC): class GalPaK3D(Plots, MCMC):
""" """
GalPaK3D is a tool to extract Galaxy Parameters and Kinematics from GalPaK3D is a tool to extract Galaxy Parameters and Kinematics from
3-Dimensional data, using reverse deconvolution with Bayesian analysis 3-Dimensional data, using reverse deconvolution with Bayesian analysis
Markov Chain Monte Carlo. (random walk) Markov Chain Monte Carlo. (random walk)
cube: HyperspectralCube|string cube: HyperspectralCube|string
The actual data on which we'll work ; it should contain only one ga laxy. The actual data on which we'll work ; it should contain only one ga laxy.
Can be a HyperspectralCube object, a string filename to a FITS file , or Can be a HyperspectralCube object, a string filename to a FITS file , or
skipping to change at line 141 skipping to change at line 144
You should update your cube's header. You should update your cube's header.
cunit1: float cunit1: float
A value for the cube's header's CUNIT1 (&2) when it is missing. A value for the cube's header's CUNIT1 (&2) when it is missing.
You should update your cube's header. You should update your cube's header.
force_header_update: bool force_header_update: bool
Set to True to force the update of the above header cards, Set to True to force the update of the above header cards,
when their values are not missing. when their values are not missing.
Note: These will not be saved into the FITS file. (if the cube is o ne) Note: These will not be saved into the FITS file. (if the cube is o ne)
""" """
logger = logging.getLogger('GalPaK')
logger.info(' Running galpak ' + __version__) logger.info(' Running galpak ' + __version__)
def __init__(self, cube, variance=None, model=None, def __init__(self, cube, variance=None, model=None,
seeing=None, instrument=None, quiet=False, seeing=None, instrument=None, quiet=False,
crval3=None, crpix3=None, cunit3=None, cdelt3=None, ctype3 =None, cunit1=None, crval3=None, crpix3=None, cunit3=None, cdelt3=None, ctype3 =None, cunit1=None,
force_header_update=False): force_header_update=False):
# DEVS : If you change the signature above, # DEVS : If you change the signature above,
# remember to update the run() in api.py # remember to update the run() in api.py
skipping to change at line 198 skipping to change at line 202
self.chi_at_p = None self.chi_at_p = None
self.best_chisq = None self.best_chisq = None
self.stats = None self.stats = None
self.BIC = None self.BIC = None
self.DIC = None self.DIC = None
self.mcmc_method = None self.mcmc_method = None
self.mcmc_sampling = None self.mcmc_sampling = None
#self.redshift = None #self.redshift = None
# Assign the logger to a property for convenience, and set verbosit y # Assign the logger to a property for convenience, and set verbosit y
self.logger = logger
self.version = __version__ self.version = __version__
self.config = configparser.RawConfigParser() self.config = configparser.RawConfigParser()
self.model = None self.model = None
if quiet: if quiet:
self._set_verbose(None) self._set_verbose(None)
self.verbose=None self.verbose=None
else: else:
self._set_verbose(True) self._set_verbose(True)
skipping to change at line 293 skipping to change at line 296
cdelt3=cdelt3, ctype3=ctype3, cunit1=cunit1, force=force_he ader_update cdelt3=cdelt3, ctype3=ctype3, cunit1=cunit1, force=force_he ader_update
) )
except ValueError: except ValueError:
raise ValueError("The cube already has one of the header cards " raise ValueError("The cube already has one of the header cards "
"you're trying to provide. " "you're trying to provide. "
"Use force_header_update=True to override.") "Use force_header_update=True to override.")
self.logger.debug('Header after patch : %s' % self.cube) self.logger.debug('Header after patch : %s' % self.cube)
#set xy_step z_step and z_central #set xy_step z_step and z_central
# 2. Set cube metadata from the the instrument if header is incompl ete # 2. Set cube metadata from the instrument if header is incomplete
self.cube.defaults_from_instrument(instrument=instrument) self.cube.defaults_from_instrument(instrument=instrument)
# 3. Initialize steps xy_steps z_steps z_cunnit and z_central # 3. Initialize steps xy_steps z_steps z_cunnit and z_central
self.cube.initialize_self_cube() self.cube.initialize_self_cube()
# 4. Calibrate the instrument with the cube # 4. Calibrate the instrument with the cube
self.instrument.use_pixelsize_from_cube(self.cube) self.instrument.use_pixelsize_from_cube(self.cube)
self.logger.debug('z central : %4.e' % (self.cube.z_central) ) self.logger.debug('z central : %4.e' % (self.cube.z_central) )
skipping to change at line 349 skipping to change at line 352
# Set up the model context # Set up the model context
# Set up the model context # Set up the model context
if isinstance(model, basestring)==True: if isinstance(model, basestring)==True:
model = _read_model(deepcopy(model)) model = _read_model(deepcopy(model))
if model is not None: if model is not None:
self._init_model(model) self._init_model(model)
self.logger.info("Setting up the model : %s" % (self.model.__na me__())) self.logger.info("Setting up the model : %s" % (self.model.__na me__()))
self.logger.info("Model setup :\n%s" % (self.model) ) self.logger.info("Model setup :\n%s" % (self.model) )
def _init_model(self, model): def _init_model(self, model):
# Set up the simulation model # Set up the simulation model
if model is not None: if model is not None:
self.model = model self.model = model
self.logger.info("Init boundaries from model '%s'" %(self.model .__name__())) self.logger.info("Init boundaries from model '%s'" %(self.model .__name__()))
self.model_dict = self.model.__dict__ self.model_dict = self.model.__dict__
#important: #important:
self.model.pixscale = self.cube.xy_step self.model.pixscale = self.cube.xy_step
# Compute a flux estimation # Compute a flux estimation
skipping to change at line 396 skipping to change at line 398
last_chain_fraction=60, last_chain_fraction=60,
percentile=95, percentile=95,
model=None, model=None,
chi_stat='gaussian', chi_stat='gaussian',
mcmc_method='galpak', mcmc_method='galpak',
mcmc_sampling=None, mcmc_sampling=None,
min_boundaries=None, min_boundaries=None,
max_boundaries=None, max_boundaries=None,
known_parameters=None, known_parameters=None,
initial_parameters=None, initial_parameters=None,
gprior_parameters=None,
random_scale=None, random_scale=None,
min_acceptance_rate=10, min_acceptance_rate=10,
verbose=True, verbose=True,
emcee_nwalkers=30, emcee_nwalkers=30,
**kwargs): **kwargs):
# DEVS : If you change the signature above, # DEVS : If you change the signature above,
# remember to update the run() in api.py # remember to update the run() in api.py
""" """
Main method_chain of GalPak, computes and returns the galaxy parame ters Main method_chain of GalPak, computes and returns the galaxy parame ters
as a :class:`GalaxyParameters <galpak.GalaxyParameters>` object as a :class:`GalaxyParameters <galpak.GalaxyParameters>` object
skipping to change at line 485 skipping to change at line 488
max_boundaries: ndarray|GalaxyParameters max_boundaries: ndarray|GalaxyParameters
The galaxy parameters will never be more than these values. The galaxy parameters will never be more than these values.
Will override the default minimum boundaries for the parameters . Will override the default minimum boundaries for the parameters .
If any of these values are NaN, they will be replaced by the de fault ones. If any of these values are NaN, they will be replaced by the de fault ones.
known_parameters: ndarray|GalaxyParameters known_parameters: ndarray|GalaxyParameters
All set parameters in this array will be skipped in the MCMC, All set parameters in this array will be skipped in the MCMC,
the algorithm will not try to guess them. the algorithm will not try to guess them.
initial_parameters: ndarray|GalaxyParameters gprior_parameters: ndarray | [2x GalaxyParameters]
Gaussian prior parameters
the algorithm will not try to guess them.
initial_parameters: ndarray|ModelParameters
The initial galaxy parameters of the MCMC chain. The initial galaxy parameters of the MCMC chain.
If None, will use the inital parameters provided by the model. If None, will use the inital parameters provided by the model.
The galaxy parameters not initialized by the model or by this The galaxy parameters not initialized by the model or by this
parameter will be set to the mean of the boundaries. parameter will be set to the mean of the boundaries.
random_scale: float random_scale: float
Scale the amplitude of the MCMC sampling by these values. Scale the amplitude of the MCMC sampling by these values.
This is an important parameter to adjust for reasonable accepta nce rate. This is an important parameter to adjust for reasonable accepta nce rate.
The acceptance rate should be around 30-50%. The acceptance rate should be around 30-50%.
If the acceptance rate is <20-30% (too low), decrease random_sc ale If the acceptance rate is <20-30% (too low), decrease random_sc ale
skipping to change at line 615 skipping to change at line 622
# Merge provided initial parameters (if any) with the defaults # Merge provided initial parameters (if any) with the defaults
if initial_parameters is not None: if initial_parameters is not None:
#complete input with default values #complete input with default values
template = self.model.Parameters() template = self.model.Parameters()
merge_where_nan(template, initial_parameters) merge_where_nan(template, initial_parameters)
merge_where_nan(template, self.initial_parameters) merge_where_nan(template, self.initial_parameters)
self.initial_parameters = template self.initial_parameters = template
self.logger.info("Initial parameters : %s", self.initial_parameters ) self.logger.info("Initial parameters : %s", self.initial_parameters )
if gprior_parameters is not None:
if isinstance(gprior_parameters, np.ndarray) and gprior_paramet
ers.shape[0] !=2:
self.logger.error("gprior parameter should be an array of s
hape 2xNparam ")
if isinstance(gprior_parameters, list) and len(gprior_parameter
s)!=2:
self.logger.error("gprior parameter should be a list of 2 P
arameters ")
template_mu = self.model.Parameters()
template_sig= self.model.Parameters()
merge_where_nan(template_mu, gprior_parameters[0])
merge_where_nan(template_sig, gprior_parameters[1])
gprior_parameters = np.array([template_mu, template_sig])
self.gprior_parameters = gprior_parameters
# By default, try to guess all parameters # By default, try to guess all parameters
should_guess_flags = np.ones(dim_p) # 0: we know it / 1: try to gu ess should_guess_flags = np.ones(dim_p) # 0: we know it / 1: try to gu ess
#if using input image #if using input image
# @fixme; this is unused # @fixme; this is unused
if self.model_dict['flux_profile'] == 'user' and known_parameters i s None: if self.model_dict['flux_profile'] == 'user' and known_parameters i s None:
raise self.logger.error( raise self.logger.error(
"With an input image it is advised to freeze " "With an input image it is advised to freeze "
"the `inclination`, using `known_parameters=`.") "the `inclination`, using `known_parameters=`.")
skipping to change at line 711 skipping to change at line 730
'tune': True, \ 'tune': True, \
'thin': 30 'thin': 30
} }
#update default parameters #update default parameters
kwargs_emcee.update(kwargs) kwargs_emcee.update(kwargs)
#@fixme: need to accept parallelize #@fixme: need to accept parallelize
pos0 = np.array([self.initial_parameters * (1+1e-3*np.random.ra ndn(dim_p)) for i in range(emcee_nwalkers) ]) pos0 = np.array([self.initial_parameters * (1+1e-3*np.random.ra ndn(dim_p)) for i in range(emcee_nwalkers) ])
self.logger.critical("Running EMCEE with %d walkers on %d itera tions" % (emcee_nwalkers, self.max_iterations)) self.logger.critical("Running EMCEE with %d walkers on %d itera tions" % (emcee_nwalkers, self.max_iterations))
if LooseVersion(emcee.__version__)<LooseVersion('3.0'): if Version(emcee.__version__)<Version('3.0'):
raise Exception("EMCEE version not supported ", emcee.__ver sion__) raise Exception("EMCEE version not supported ", emcee.__ver sion__)
#EMCE version3 #EMCE version3
if mcmc_sampling == 'Cauchy': if mcmc_sampling == 'Cauchy':
from .mcmc import CauchyMove from .mcmc import CauchyMove
myMove=CauchyMove(self.random_amplitude.as_vector()**2) myMove=CauchyMove(self.random_amplitude.as_vector()**2)
self.logger.critical("Running EMCEE Walkers with Cauchy Sam pling") self.logger.critical("Running EMCEE Walkers with Cauchy Sam pling")
elif mcmc_sampling == 'Normal': elif mcmc_sampling == 'Normal':
from emcee.moves import GaussianMove from emcee.moves import GaussianMove
myMove=GaussianMove(self.random_amplitude.as_vector()**2) myMove=GaussianMove(self.random_amplitude.as_vector()**2)
skipping to change at line 823 skipping to change at line 842
seed=-1, verbose=False, resume=True, context=0, write_outpu t=True, seed=-1, verbose=False, resume=True, context=0, write_outpu t=True,
log_zero=-1e+100, max_iter=0, init_MPI=True, dump_callback= None) log_zero=-1e+100, max_iter=0, init_MPI=True, dump_callback= None)
""" """
if 'outpath' not in kwargs.keys(): if 'outpath' not in kwargs.keys():
outpath = './pymulti' outpath = './pymulti'
if os.path.isdir(outpath) is False: if os.path.isdir(outpath) is False:
os.mkdir(outpath) os.mkdir(outpath)
output = outpath + '/out' output = outpath + '/out'
else: else:
output = kwargs['outpath'] outpath = kwargs['outpath']
if os.path.isdir(outpath) is False:
os.mkdir(outpath)
output = outpath + '/out'
kwargs.pop('outpath') kwargs.pop('outpath')
#default parameters #default parameters
kwargs_multi={'n_live_points': 200, \ kwargs_multi={'n_live_points': 200, \
'evidence_tolerance':0.5, \ 'evidence_tolerance':0.5, \
'n_iter_before_update' : 200, \ 'n_iter_before_update' : 200, \
'const_efficiency_mode' : False, \ 'const_efficiency_mode' : False, \
'sampling_efficiency':0.8, \ 'sampling_efficiency':0.8, \
'resume' : False} 'resume' : False}
kwargs_multi.update(kwargs) kwargs_multi.update(kwargs)
skipping to change at line 881 skipping to change at line 903
#bestfit_params = stats['modes'][0]['mean'] #bestfit_params = stats['modes'][0]['mean']
#bestfit_params = stats['modes'][0]['maximum'] #bestfit_params = stats['modes'][0]['maximum']
#bestfit_params = stats['modes'][0]['maximum a posterior'] #bestfit_params = stats['modes'][0]['maximum a posterior']
#OR #OR
#bestfit_params = stats.get_best_fit()['parameters'] #bestfit_params = stats.get_best_fit()['parameters']
self.sampler = dict(samples = samples, stats=stats, \ self.sampler = dict(samples = samples, stats=stats, \
kwargs=kwargs_multi kwargs=kwargs_multi
) )
#clean
os.system('rm -rf {}/'.format(outpath))
elif mcmc_method == 'pymc3': elif mcmc_method == 'pymc3':
raise NotImplementedError raise NotImplementedError
elif mcmc_method == 'pynuts': elif mcmc_method == 'pynuts':
raise NotImplementedError raise NotImplementedError
else: else:
raise Exception("method_mcmc %s not valid. Used of of %s" % (mc mc_method, self.MCMC_VALID)) raise Exception("method_mcmc %s not valid. Used of of %s" % (mc mc_method, self.MCMC_VALID))
# Store chain # Store chain
self.logger.info("self.chain : full Markov chain") self.logger.info("self.chain : full Markov chain")
skipping to change at line 1152 skipping to change at line 1177
good[snr<snr_min] = 0 good[snr<snr_min] = 0
dim_good = good.sum() #count only pixels with snr >snr_min and abov e dim_good = good.sum() #count only pixels with snr >snr_min and abov e
Nd_good = (dim_good - self.dim_p_free - 1) # degree of freedom Nd_good = (dim_good - self.dim_p_free - 1) # degree of freedom
# like = -0.5*np.nansum(self.variance_chi)-0.5*self.compute_chi(par ams) # like = -0.5*np.nansum(self.variance_chi)-0.5*self.compute_chi(par ams)
#BIC = -2 * like(theta) #BIC = -2 * like(theta)
self.BIC = self.chi_at_p * self.Ndegree + self.dim_p_free * np.log( dim_data) self.BIC = self.chi_at_p * self.Ndegree + self.dim_p_free * np.log( dim_data)
#AIC = -2 * like + 2 dim_p #AIC = -2 * like + 2 dim_p
self.AIC = self.chi_at_p * self.Ndegree + 2* self.dim_p_free self.AIC = self.chi_at_p * self.Ndegree + 2 * self.dim_p_free
#DIC #DIC
if self.method != 'chi_sorted': if self.method != 'chi_sorted':
log_Lp = -0.5 * self.sub_chain['reduced_chi'] * self.Ndegree log_Lp = -0.5 * self.sub_chain['reduced_chi'] * self.Ndegree
if np.isfinite(log_Lp).all(): if np.isfinite(log_Lp).all():
pD = 2 * np.var(log_Lp) pD = 2 * np.var(log_Lp)
else: else:
pD = 2 * np.nanvar(log_Lp[np.isfinite(log_Lp)==True]) pD = 2 * np.nanvar(log_Lp[np.isfinite(log_Lp)==True])
# P = 2 * (logp_max-np.mean(self.lnp)) # P = 2 * (logp_max-np.mean(self.lnp))
#pD = 2 * -0.5 * (self.chi_at_p - np.mean(self.sub_chain['reduc ed_chi'])) * self.Ndegree #pD = 2 * -0.5 * (self.chi_at_p - np.mean(self.sub_chain['reduc ed_chi'])) * self.Ndegree
#DIC = -2 * like + 2 pd #DIC = -2 * like + 2 pd
self.DIC = self.chi_at_p * self.Ndegree + 2 * pD self.DIC = self.chi_at_p * self.Ndegree + 2 * pD
else: else:
self.DIC = 0 self.DIC = 0
pD=0 pD=0
self.stats = Table(np.array(['%.8f' % (self.best_chisq), '%.8f' % ( self.chi_at_p), self.stats = Table(np.array(['%.8f' % (self.best_chisq), '%.8f' % ( self.chi_at_p),
int(self.BIC), self.Ndegree, '%.2f' % (self.BIC), self.Ndegree,
int(self.AIC), self.dim_p_free, '%.2f' % (self.AIC), self.dim_p_free,
'%.2f' % (pD), int(self.DIC), '%.2f' % (pD),
'%.2f' % (self.DIC),
np.max(snr)] np.max(snr)]
), \ ), \
names=['best_chi2', 'chi2_at_p', 'BIC', 'Ndegree ', \ names=['best_chi2', 'chi2_at_p', 'BIC', 'Ndegree ', \
'AIC', 'k', \ 'AIC', 'k', \
'pD', 'DIC', 'SNRmax']) 'pD', 'DIC', 'SNRmax'])
if self.mcmc_method is 'multinest': if self.mcmc_method == 'multinest':
evidence = self.sampler['stats']['global evidence'] evidence = self.sampler['stats']['global evidence']
self.stats.add_column(evidence*-2,name='log Z') self.stats.add_column(evidence*-2,name='log Z')
return self.stats return self.stats
def create_clean_cube(self, galaxy, shape, final=False): def create_clean_cube(self, galaxy, shape, final=False):
""" """
Creates a cube containing a clean simulation of a galaxy according to Creates a cube containing a clean simulation of a galaxy according to
the provided model. the provided model.
skipping to change at line 1512 skipping to change at line 1538
def _chain_as_asciitable(self): def _chain_as_asciitable(self):
""" """
Exports the chain as an `asciitable`. Exports the chain as an `asciitable`.
See the public API `import_chain()` for the reverse operation of lo ading See the public API `import_chain()` for the reverse operation of lo ading
the chain from an `asciitable` file. the chain from an `asciitable` file.
""" """
out = StringStdOut() out = StringStdOut()
asciitable.write(self.chain, asciitable.write(self.chain,
output=out, output=out,
Writer=asciitable.FixedWidth, format='fixed_width',
names=self.chain.dtype.names) names=self.chain.dtype.names)
return out.content return out.content
def _get_min_chi_index(self): def _get_min_chi_index(self):
""" """
Gets the index in the chain of the parameters with the minimal chi. Gets the index in the chain of the parameters with the minimal chi.
""" """
if self.chain is None: if self.chain is None:
raise RuntimeError("No chain! Run `run_mcmc()` first.") raise RuntimeError("No chain! Run `run_mcmc()` first.")
 End of changes. 33 change blocks. 
34 lines changed or deleted 64 lines changed or added

This html diff was produced by rfcdiff 1.41. The latest version is available from http://tools.ietf.org/tools/rfcdiff/