diff --git a/.travis.yml b/.travis.yml
index 5e18e4424..fac0049c0 100644
--- a/.travis.yml
+++ b/.travis.yml
@@ -2,8 +2,7 @@ cache: apt
sudo: true
language: python
python:
- - "2.7"
- - "3.3"
+ - 2.7
before_install:
- wget https://repo.continuum.io/miniconda/Miniconda2-latest-Linux-x86_64.sh -O miniconda.sh; # grab miniconda
- bash miniconda.sh -b -p $HOME/miniconda # install miniconda
@@ -13,12 +12,21 @@ before_install:
- conda install pyyaml
- conda info -a # and print the info
-install:
- - conda env create --file environment.yml #set up the environment thats in the file
- - source activate test-environment # activate it
- - python setup.py install # install ptypy
+env:
+ - TEST_ENV_NAME=core_dependencies
+ - TEST_ENV_NAME=full_dependencies
+
+install: True
script:
+ - conda env create --file ${TEST_ENV_NAME}.yml #set up the environment thats in the file
+ - source activate ${TEST_ENV_NAME} # activate it
+ - conda install pytest # additional dependencies for the tests
+ - pip install pytest-cov
+ - pip install coveralls
+ - echo $PYTHONPATH
+ - conda list
+ - python setup.py install # install ptypy
- py.test ptypy/test -v --cov ptypy --cov-report term-missing # now run the tests
after_script:
diff --git a/benchmark/model_speed.py b/benchmark/model_speed.py
new file mode 100644
index 000000000..1983abc77
--- /dev/null
+++ b/benchmark/model_speed.py
@@ -0,0 +1,16 @@
+
+from ptypy import utils as u
+from ptypy.core import Ptycho, Vanilla
+
+p = u.Param()
+p.verbose_level = 3
+p.io = u.Param()
+p.io.home = "/tmp/ptypy/"
+p.scans = u.Param()
+p.scans.MF = u.Param()
+p.scans.MF.data= u.Param()
+p.scans.MF.name = 'Vanilla'
+p.scans.MF.data.name = 'QuickScan'
+p.scans.MF.data.num_frames = 50000
+p.scans.MF.data.shape = 32
+Ptycho(p,level=2)
diff --git a/core_dependencies.yml b/core_dependencies.yml
new file mode 100644
index 000000000..957f3c8b0
--- /dev/null
+++ b/core_dependencies.yml
@@ -0,0 +1,8 @@
+name: core_dependencies
+channels:
+ - conda-forge
+dependencies:
+ - python=2.7
+ - numpy
+ - scipy
+ - h5py
diff --git a/doc/Makefile b/doc/Makefile
index decf44118..9de9237e2 100644
--- a/doc/Makefile
+++ b/doc/Makefile
@@ -3,8 +3,13 @@
# You can set these variables from the command line.
SPHINXOPTS =
-SPHINXBUILD = ~/.local/bin/sphinx-build
+
+ifeq ($(SPHINXBUILD),"")
+ SPHINXBUILD = ~/.local/bin/sphinx-build
+endif
+
#SPHINXBUILD = /usr/bin/sphinx-build
+
PAPER =
BUILDDIR = build
diff --git a/doc/conf.py b/doc/conf.py
index 422944800..5a7ec654e 100644
--- a/doc/conf.py
+++ b/doc/conf.py
@@ -21,11 +21,11 @@
#sys.path.insert(0, os.path.abspath('.'))
# generate paramters.rst and other rst
-execfile('parameters2rst.py')
-execfile('tmp2rst.py')
-execfile('version.py')
import subprocess
-subprocess.call(['python', 'script2rst.py']) # We need this to have a clean sys.argv
+subprocess.check_call(['python', 'script2rst.py']) # We need this to have a clean sys.argv
+subprocess.check_call(['python','parameters2rst.py'])
+subprocess.check_call(['python','tmp2rst.py'])
+execfile('version.py')
# -- General configuration ------------------------------------------------
@@ -42,73 +42,63 @@
]
+def truncate_docstring(app, what, name, obj, options, lines):
+ """
+ Remove the Default parameter entries.
+ """
+ if not hasattr(obj, 'DEFAULT'):
+ return
+ if any(l.strip().startswith('Defaults:') for l in lines):
+ while True:
+ if lines.pop(-1).strip().startswith('Defaults:'):
+ break
+
+
def remove_mod_docstring(app, what, name, obj, options, lines):
from ptypy import utils as u
- import numpy as np
+ from ptypy import defaults_tree
u.verbose.report.headernewline='\n\n'
searchstr = ':py:data:'
- def get_refs(dct,pd,depth=2, indent = ''):
- if depth<0:
+ def get_refs(dct, pd, depth=2, indent=''):
+ if depth < 0:
return
for k, value in dct.iteritems():
ref = ', see :py:data:`~%s`' % pd.children[k].entry_point if pd.children.has_key(k) else ''
- if hasattr(value,'items'):
+ if hasattr(value, 'items'):
v = str(value.__class__.__name__)
- elif str(value)==value:
- v='"%s"' % value
+ elif str(value) == value:
+ v = '"%s"' % value
else:
- v=str(value)
+ v = str(value)
- lines.append(indent+'* *' +k+'* = ``'+v+'``' +ref)#+'\n')
+ lines.append(indent + '* *' + k + '* = ``' + v + '``' + ref)
- if hasattr(value,'items'):
- #lines.append('\n\n')
+ if hasattr(value, 'items'):
lines.append("")
- get_refs(value,pd.children[k],depth=depth-1, indent = indent+' ')
+ get_refs(value, pd.children[k], depth=depth-1, indent=indent+' ')
lines.append("")
- #lines.append('\n\n')
- #if name.find('DEFAULT')>=0:
if isinstance(obj, u.Param) or isinstance(obj, dict):
- keys = obj.keys()
pd = None
- """
- # auto_matching
- for entry,pdesc in u.validator.entry_points_Param.iteritems():
- chkeys = ':'.join([k.split('.')[-1] for k in pdesc.children.keys()])
- #print chkeys
- #print keys
- matches = [key in chkeys for key in keys]
- #print matches
- print np.mean(matches)
- if np.mean(matches)>0.8:
- print 'Param match'
- e=entry
- print e
- pd = pdesc
- break
- """
for l in lines:
start = l.find(searchstr)
if start > -1:
newstr = l[start:]
newstr = newstr.split('`')[1]
newstr = newstr.replace('~', '')
- #print newstr, what, name, options
- pd = u.validator.entry_points_dct.get(newstr,None)
+ pd = defaults_tree.get(newstr)
break
if pd is not None:
- #lines.append('Match with :py:data:`.%s` \n\n' %pd.entry_point)
get_refs(obj, pd, depth=2, indent='')
- #print lines
def setup(app):
app.connect('autodoc-process-docstring', remove_mod_docstring)
+ app.connect('autodoc-process-docstring', truncate_docstring)
napoleon_use_ivar = True
diff --git a/doc/html_templates/ptypysphinx/download.html b/doc/html_templates/ptypysphinx/download.html
index c9a67a666..59a1f3960 100644
--- a/doc/html_templates/ptypysphinx/download.html
+++ b/doc/html_templates/ptypysphinx/download.html
@@ -32,5 +32,5 @@
diff --git a/doc/index.rst b/doc/index.rst
index a59d037ed..bc755bc01 100644
--- a/doc/index.rst
+++ b/doc/index.rst
@@ -44,7 +44,7 @@ Highlights
overcoming partial coherence or related phenomena.
* **On-the-fly** reconstructions (while data is being acquired) using the
- the :any:`PtyScan` class in the linking mode :ref:`linking mode`
+ the :py:class:`PtyScan` class in the linking mode :ref:`linking mode`
Quicklinks
@@ -68,10 +68,10 @@ Quicklinks
.. rubric:: Footnotes
-.. [#Enders2016] B.Enders and P.Thibault, **Proc. R. Soc. A** 472, 20160640 (2016), `doi `_
+.. [#Enders2016] B.Enders and P.Thibault, **Proc. R. Soc. A** 472, 20160640 (2016), `doi `__
-.. [#Thi2013] P.Thibault and A.Menzel, **Nature** 494, 68 (2013), `doi `_
+.. [#Thi2013] P.Thibault and A.Menzel, **Nature** 494, 68 (2013), `doi `__
-.. [#ml] P.Thibault and M.Guizar-Sicairos, **New J. of Phys.** 14, 6 (2012), `doi `_
+.. [#ml] P.Thibault and M.Guizar-Sicairos, **New J. of Phys.** 14, 6 (2012), `doi `__
-.. [#dm] P.Thibault, M.Dierolf *et al.*, **New J. of Phys. 14**, 6 (2012), `doi `_
+.. [#dm] P.Thibault, M.Dierolf *et al.*, **New J. of Phys. 14**, 6 (2012), `doi `__
diff --git a/doc/parameters2rst.py b/doc/parameters2rst.py
index 419e3537f..37f93563a 100644
--- a/doc/parameters2rst.py
+++ b/doc/parameters2rst.py
@@ -1,5 +1,5 @@
-from ptypy import utils as u
+from ptypy import defaults_tree
prst = open('rst/parameters.rst','w')
@@ -9,32 +9,52 @@
Header+= '************************\n\n'
prst.write(Header)
-names = u.validator.parameter_descriptions
-for name,desc in u.validator.parameter_descriptions.iteritems():
- if name=='':
+for path, desc in defaults_tree.descendants:
+
+ types = desc.type
+ default = desc.default
+ lowlim, uplim = desc.limits
+ is_wildcard = (desc.name == '*')
+
+ if is_wildcard:
+ path = path.replace('*', desc.parent.name[:-1] + '_00')
+
+ if path == '':
continue
- if hasattr(desc,'children') and desc.parent is u.validator.pdroot:
- prst.write('\n'+name+'\n')
- prst.write('='*len(name)+'\n\n')
- if hasattr(desc,'children') and desc.parent.parent is u.validator.pdroot:
- prst.write('\n'+name+'\n')
- prst.write('-'*len(name)+'\n\n')
-
- prst.write('.. py:data:: '+name)
- prst.write('('+', '.join([t for t in desc.type])+')')
+ if desc.children and desc.parent is desc.root:
+ prst.write('\n'+path+'\n')
+ prst.write('='*len(path)+'\n\n')
+ if desc.children and desc.parent.parent is desc.root:
+ prst.write('\n'+path+'\n')
+ prst.write('-'*len(path)+'\n\n')
+
+ prst.write('.. py:data:: '+path)
+
+ if desc.is_symlink:
+ tp = 'Param'
+ else:
+ tp = ', '.join([str(t) for t in types])
+ prst.write(' ('+tp+')')
prst.write('\n\n')
- num = str(desc.ID) if hasattr(desc,'ID') else "None"
- prst.write(' *('+num+')* '+desc.shortdoc+'\n\n')
- prst.write(' '+desc.longdoc.replace('\n','\n ')+'\n\n')
- prst.write(' *default* = ``'+str(desc.default))
- if desc.lowlim is not None and desc.uplim is not None:
- prst.write(' (>'+str(desc.lowlim)+', <'+str(desc.uplim)+')``\n')
- elif desc.lowlim is not None and desc.uplim is None:
- prst.write(' (>'+str(desc.lowlim)+')``\n')
- elif desc.lowlim is None and desc.uplim is not None:
- prst.write(' (<'+str(desc.uplim)+')``\n')
+
+ if is_wildcard:
+ prst.write(' *Wildcard*: multiple entries with arbitrary names are accepted.\n\n')
+
+ prst.write(' '+desc.help+'\n\n')
+ prst.write(' '+desc.doc.replace('','\n').replace('\n', '\n ')+'\n\n')
+
+ if desc.is_symlink:
+ prst.write(' *default* = '+':py:data:`'+desc.path+'`\n')
else:
- prst.write('``\n')
+ prst.write(' *default* = ``'+repr(default))
+ if lowlim is not None and uplim is not None:
+ prst.write(' (>'+str(lowlim)+', <'+str(uplim)+')``\n')
+ elif lowlim is not None and uplim is None:
+ prst.write(' (>'+str(lowlim)+')``\n')
+ elif lowlim is None and uplim is not None:
+ prst.write(' (<'+str(uplim)+')``\n')
+ else:
+ prst.write('``\n')
prst.write('\n')
prst.close()
diff --git a/doc/rst/ptypy.core.rst b/doc/rst/ptypy.core.rst
index b2fa0cba0..93837fdad 100644
--- a/doc/rst/ptypy.core.rst
+++ b/doc/rst/ptypy.core.rst
@@ -44,14 +44,6 @@ ptypy.core.manager module
:undoc-members:
:show-inheritance:
-ptypy.core.model module
------------------------
-
-.. automodule:: ptypy.core.model
- :members:
- :undoc-members:
- :show-inheritance:
-
ptypy.core.paths module
-----------------------
diff --git a/doc/rst/ptypy.debug.rst b/doc/rst/ptypy.debug.rst
new file mode 100644
index 000000000..edc921576
--- /dev/null
+++ b/doc/rst/ptypy.debug.rst
@@ -0,0 +1,29 @@
+ptypy.debug package
+===================
+
+Submodules
+----------
+
+ptypy.debug.embedded_shell module
+---------------------------------
+
+.. automodule:: ptypy.debug.embedded_shell
+ :members:
+ :undoc-members:
+ :show-inheritance:
+
+ptypy.debug.ipython_kernel module
+---------------------------------
+
+.. automodule:: ptypy.debug.ipython_kernel
+ :members:
+ :undoc-members:
+ :show-inheritance:
+
+Module contents
+---------------
+
+.. automodule:: ptypy.debug
+ :members:
+ :undoc-members:
+ :show-inheritance:
diff --git a/doc/rst/ptypy.rst b/doc/rst/ptypy.rst
index d462b9cf7..46e7c1267 100644
--- a/doc/rst/ptypy.rst
+++ b/doc/rst/ptypy.rst
@@ -13,6 +13,7 @@ Subpackages
ptypy.resources
ptypy.simulations
ptypy.utils
+ ptypy.debug
Module contents
---------------
diff --git a/doc/rst/ptypy.utils.rst b/doc/rst/ptypy.utils.rst
index c4bda94ab..fb59aeea1 100644
--- a/doc/rst/ptypy.utils.rst
+++ b/doc/rst/ptypy.utils.rst
@@ -12,14 +12,6 @@ ptypy.utils.array_utils module
:undoc-members:
:show-inheritance:
-ptypy.utils.embedded_shell module
----------------------------------
-
-.. automodule:: ptypy.utils.embedded_shell
- :members:
- :undoc-members:
- :show-inheritance:
-
ptypy.utils.math_utils module
-----------------------------
@@ -76,10 +68,10 @@ ptypy.utils.scripts module
:undoc-members:
:show-inheritance:
-ptypy.utils.validator module
-----------------------------
+ptypy.utils.descriptor module
+-----------------------------
-.. automodule:: ptypy.utils.validator
+.. automodule:: ptypy.utils.descriptor
:members:
:undoc-members:
:show-inheritance:
diff --git a/doc/rst_templates/data_management.tmp b/doc/rst_templates/data_management.tmp
index fc15caaff..cb3fe7b4a 100644
--- a/doc/rst_templates/data_management.tmp
+++ b/doc/rst_templates/data_management.tmp
@@ -42,9 +42,9 @@ prior to a successful image reconstruction.
reconstruction software naturally **cannot** provide actual experimental
data. Nevertheless, the treatment of raw data is usually very similar for
every experiment. Consequently, |ptypy| provides an abstract base class,
-called :any:`PtyScan`, which aims to help with steps (B) and (C). In order
+called :py:class:`PtyScan`, which aims to help with steps (B) and (C). In order
to adapt |ptypy| for a specific experimental setup, we simply
-subclass :any:`PtyScan` and reimplement only that subset of its methods which are
+subclass :py:class:`PtyScan` and reimplement only that subset of its methods which are
affected by the specifics of the experiemental setup
(see :ref:`subclassptyscan`).
@@ -53,7 +53,7 @@ affected by the specifics of the experiemental setup
The PtyScan class
=================
-:any:`PtyScan` is the abstract base class in |ptypy| that manages raw input
+:py:class:`PtyScan` is the abstract base class in |ptypy| that manages raw input
data.
A PtyScan instance is constructed from a set of generic parameters,
@@ -67,7 +67,7 @@ It provides the following features:
process only loads the data it will later use in the reconstruction.
Hence, the load on the network is not affected by the number of
processes.
- The parallel behavior of :any:`PtyScan`, is controlled by the parameter
+ The parallel behavior of :py:class:`PtyScan`, is controlled by the parameter
:py:data:`.scan.data.load_parallel`. It uses the :py:class:`~ptypy.utils.parallel.LoadManager`
**Preparation**
@@ -135,7 +135,7 @@ The PtyScan class of |ptypy| provides support for three use cases.
In this use case, the researcher has integrated |ptypy| into the beamline
end-station or experimental setup
- with the help of a custom subclass of :any:`PtyScan` that we call
+ with the help of a custom subclass of :py:class:`PtyScan` that we call
``UserScan``. This subclass has its own methods to extract many of the
of the generic parameters of :py:data:`.scan.data` and also defaults
for specific custom parameters, for instance file paths or file name
@@ -149,7 +149,7 @@ The PtyScan class of |ptypy| provides support for three use cases.
:figclass: highlights
:name: case_integrated
- Intergrated use case of :any:`PtyScan`.
+ Integrated use case of :py:class:`PtyScan`.
A custom subclass ``UserScan``
serves as a translator between |ptypy|'s generic parameters and
@@ -189,11 +189,11 @@ The PtyScan class of |ptypy| provides support for three use cases.
:figclass: highlights
:name: case_prepared
- Standard supported use case of :any:`PtyScan`.
+ Standard supported use case of :py:class:`PtyScan`.
If a structure-compatible (see :ref:`ptyd_file`) ``*.hdf5``-file is
available, |ptypy| can be used without customizing a subclass of
- :any:`PtyScan`. It will use the shipped subclass :any:`PtydScan`
+ :py:class:`PtyScan`. It will use the shipped subclass :py:class:`PtydScan`
to read in the (prepared) raw data.
**Preparation and reconstruction on-the-fly with data acquisition.**
@@ -218,7 +218,7 @@ The PtyScan class of |ptypy| provides support for three use cases.
:figclass: highlights
:name: case_flyscan
- On-the-fly or demon-like use case of :any:`PtyScan`.
+ On-the-fly or demon-like use case of :py:class:`PtyScan`.
A separate process prepares the data *chunks* and saves them
in separate files which are
@@ -299,7 +299,7 @@ are general (optional) parameters.
expected in the dataset (see :py:data:`~.scan.data.num_frames`)
It is important to set this parameter when the data acquisition is not
finished but the reconstruction has already started. If the dataset
- is complete, the loading class :any:`PtydScan` retrieves the
+ is complete, the loading class :py:class:`PtydScan` retrieves the
total number of frames from the payload ``chunks/``
The next set of optional parameters are
diff --git a/doc/rst_templates/getting_started.tmp b/doc/rst_templates/getting_started.tmp
index cdcfcdccd..0b5d7850e 100644
--- a/doc/rst_templates/getting_started.tmp
+++ b/doc/rst_templates/getting_started.tmp
@@ -378,4 +378,3 @@ branch :py:data:`.engine` for detailed parameter descriptions.
.. literalinclude:: ../../templates/minimal_load_and_run.py
:language: python
:linenos:
- :emphasize-lines: 25-56
diff --git a/doc/script2rst.py b/doc/script2rst.py
index 352679301..0a19c45ac 100644
--- a/doc/script2rst.py
+++ b/doc/script2rst.py
@@ -12,12 +12,15 @@
if len(sys.argv) == 1:
import pkg_resources
+ import subprocess
+
for script in scripts:
scr = pkg_resources.resource_filename('ptypy', tutorial_dir+script)
if not os.path.exists(scr):
print('Using backup tutorial for %s' % script)
scr = '../tutorial/'+script
- os.system('python '+sys.argv[0]+' '+scr)
+ #subprocess.call(['python',sys.argv[0]+' '+scr]) # doesn't work
+ os.system('python ' + sys.argv[0]+' '+scr)
sys.exit()
indent_keys = ['for', 'if', 'with', 'def', 'class']
@@ -126,12 +129,17 @@ def check_for_fig(wline):
frst.write(' ' + line2)
continue
+ decorator = False
indent = False
for key in indent_keys:
if line.startswith(key):
indent = True
break
-
+
+ if line.startswith('@'):
+ indent = True
+ decorator = True
+
if indent:
frst.write('\n::\n\n >>> '+line)
func = line
@@ -139,9 +147,12 @@ def check_for_fig(wline):
while True:
line2 = fpy.readline()
if line2.strip() and not line2.startswith(' '):
- frst.write('\n')
- fpy.seek(pt)
- break
+ if decorator:
+ decorator = False
+ else:
+ frst.write('\n')
+ fpy.seek(pt)
+ break
func += line2
frst.write(' >>> '+line2)
pt = fpy.tell()
diff --git a/doc/version.py b/doc/version.py
index 6fa1f4553..e36958bf6 100644
--- a/doc/version.py
+++ b/doc/version.py
@@ -1,8 +1,8 @@
# THIS FILE IS GENERATED FROM ptypy/setup.py
-short_version='0.2.1'
-version='0.2.1'
-release=True
+short_version='0.3.0'
+version='0.3.0'
+release=False
if not release:
version += '.dev'
diff --git a/extra/ipynb/plotclient.ipynb b/extra/ipynb/plotclient.ipynb
index 88133f5ce..e9767d994 100644
--- a/extra/ipynb/plotclient.ipynb
+++ b/extra/ipynb/plotclient.ipynb
@@ -11,7 +11,6 @@
"cell_type": "code",
"execution_count": null,
"metadata": {
- "collapsed": false,
"extensions": {
"jupyter_dashboards": {
"version": 1,
@@ -30,7 +29,8 @@
"import matplotlib.pyplot as plt\n",
"import numpy as np\n",
"import time\n",
- "from ptypy.utils import PlotClient, validator, ortho\n",
+ "from ptypy.utils import PlotClient, ortho\n",
+ "from ptypy.utils.descriptor import defaults_tree\n",
"from ptypy.utils.plot_utils import PtyAxis\n",
"from ptypy.io import h5read"
]
@@ -46,7 +46,6 @@
"cell_type": "code",
"execution_count": null,
"metadata": {
- "collapsed": false,
"extensions": {
"jupyter_dashboards": {
"version": 1,
@@ -61,7 +60,8 @@
},
"outputs": [],
"source": [
- "pc = PlotClient(validator.make_sub_default('.io.interaction'),in_thread=False)\n",
+ "#pc = PlotClient(defaults_tree['io.interaction'].make_default(depth=5), in_thread=False)\n",
+ "pc = PlotClient(in_thread=False)\n",
"data = lambda: pc.get_data()\n",
"pc.start()"
]
@@ -76,9 +76,7 @@
{
"cell_type": "code",
"execution_count": null,
- "metadata": {
- "collapsed": false
- },
+ "metadata": {},
"outputs": [],
"source": [
"pc.stop()"
@@ -94,9 +92,7 @@
{
"cell_type": "code",
"execution_count": null,
- "metadata": {
- "collapsed": false
- },
+ "metadata": {},
"outputs": [],
"source": [
"base_path = '...'\n",
@@ -115,9 +111,7 @@
{
"cell_type": "code",
"execution_count": null,
- "metadata": {
- "collapsed": false
- },
+ "metadata": {},
"outputs": [],
"source": [
"ptya = PtyAxis(channel='a',cmap='viridis', vmin=None, vmax=None)\n",
@@ -136,9 +130,7 @@
{
"cell_type": "code",
"execution_count": null,
- "metadata": {
- "collapsed": false
- },
+ "metadata": {},
"outputs": [],
"source": [
"ptya = PtyAxis(channel='c',cmap='magma', vmax=None)\n",
@@ -157,9 +149,7 @@
{
"cell_type": "code",
"execution_count": null,
- "metadata": {
- "collapsed": false
- },
+ "metadata": {},
"outputs": [],
"source": [
"fig = plt.figure()\n",
@@ -216,7 +206,7 @@
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython2",
- "version": "2.7.10"
+ "version": "2.7.12"
}
},
"nbformat": 4,
diff --git a/environment.yml b/full_dependencies.yml
similarity index 80%
rename from environment.yml
rename to full_dependencies.yml
index 6c48b7ddd..6e7ab2da2 100644
--- a/environment.yml
+++ b/full_dependencies.yml
@@ -1,4 +1,4 @@
-name: test-environment
+name: full_dependencies
channels:
- conda-forge
dependencies:
@@ -6,14 +6,13 @@ dependencies:
- numpy
- scipy
- matplotlib
- - pytest
- h5py
- pyzmq
- pep8
- - pytest
- mpi4py
- pil
- pyfftw
- pip:
- pytest-cov
- coveralls
+ - fabio
diff --git a/ptypy/__init__.py b/ptypy/__init__.py
index 0c59f29fb..d6cee956f 100644
--- a/ptypy/__init__.py
+++ b/ptypy/__init__.py
@@ -1,73 +1,79 @@
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
-from version import short_version, version, release
+from .version import short_version, version, release
+
+__doc__ = \
"""
PTYPY(v%(short)s): A ptychography reconstruction package.
To cite PTYPY in publications, use
@article{ ... }
- :copyright: Copyright 2014 by the PTYPY team, see AUTHORS.
+ :copyright: Copyright 2018 by the PTYPY team, see AUTHORS.
:license: GPLv2, see LICENSE for details.
:version: %(version)s
:status: %(devrel)s
-""" % {'version':version,'devrel':'release' if release else 'development','short':short_version}
+""" % {'version': version, 'devrel': 'release' if release else 'development', 'short': short_version}
del short_version, release
try:
import zmq
- __has_zmq__= True
+except ImportError as ie:
+ __has_zmq__ = False
+else:
+ __has_zmq__ = True
del zmq
-except ImportError('ZeroMQ not found.\nInteraction server & client disabled.\n\
-Install python-zmq via the package repositories or with `pip install --user pyzmq`'):
- __has_zmq__= False
try:
import mpi4py
- __has_mpi4py__= True
+except ImportError as ie:
+ __has_mpi4py__ = False
+else:
+ __has_mpi4py__ = True
del mpi4py
-except ImportError('Message Passaging for Python (mpi4py) not found.\n\
-CPU-parallelization disabled.\n\
-Install python-mpi4py via the package repositories or with `pip install --user mpi4py`'):
- __has_mpi4py__= False
try:
import matplotlib
- __has_matplotlib__= True
+except ImportError as ie:
+ __has_matplotlib__ = False
+else:
+ __has_matplotlib__ = True
del matplotlib
-except ImportError('Plotting for Python (matplotlib) not found.\n\
-Plotting disabled.\n\
-Install python-matplotlib via the package repositories or with `pip install --user matplotlib`'):
- __has_matplotlib__= False
# Initialize MPI (eventually GPU)
-from utils import parallel
+from .utils import parallel
# Logging
-from utils import verbose
-#verbose.set_level(2)
-
-import utils
-# Import core modules
-import io
-import experiment
-import core
+from .utils import verbose
+# Log immediately dependency information
+if not __has_zmq__:
+ __zmq_msg = 'ZeroMQ not found.\nInteraction server & client disabled.\n\
+ Install python-zmq via the package repositories or with `pip install --user pyzmq`'
+ verbose.logger.warn(__zmq_msg)
+if not __has_mpi4py__:
+ __mpi_msg = 'Message Passaging for Python (mpi4py) not found.\n\
+ CPU-parallelization disabled.\n\
+ Install python-mpi4py via the package repositories or with `pip install --user mpi4py`'
+ verbose.logger.warn(__mpi_msg)
+if not __has_matplotlib__:
+ __mpl_msg = 'Plotting for Python (matplotlib) not found.\n\
+ Plotting disabled.\n\
+ Install python-matplotlib via the package repositories or with `pip install --user matplotlib`'
+ verbose.logger.warn(__mpl_msg)
-#from core import *
-import simulations
-import resources
+# Start a parameter tree
+from .utils.descriptor import EvalDescriptor
+defaults_tree = EvalDescriptor('root')
+del EvalDescriptor
-if __name__ == "__main__":
- # TODO: parse command line arguments for extra options
- import sys
- # Get parameters from command line argument
- param_filename = sys.argv[1]
- p = parameters.load(param_filename)
- # Initialize Ptycho object
- pt = Ptycho(p)
+# Import core modules
+from . import utils
+from . import io
+from . import experiment
+from . import core
+from . import simulations
+from . import resources
- # Start reconstruction
- pt.run()
diff --git a/ptypy/core/__init__.py b/ptypy/core/__init__.py
index ed7352e35..38d47f3d5 100644
--- a/ptypy/core/__init__.py
+++ b/ptypy/core/__init__.py
@@ -7,12 +7,13 @@
:copyright: Copyright 2014 by the PTYPY team, see AUTHORS.
:license: GPLv2, see LICENSE for details.
"""
-from classes import *
-from ptycho import *
-from manager import *
-import xy
-import illumination
-import sample
-import geometry
-import save_load
-import data
+from .classes import *
+from .ptycho import *
+from .manager import *
+from . import xy
+from . import illumination
+from . import sample
+from . import geometry
+from . import geometry_bragg
+from . import save_load
+from . import data
diff --git a/ptypy/core/classes.py b/ptypy/core/classes.py
index 051ebfb5a..3e33fecdd 100644
--- a/ptypy/core/classes.py
+++ b/ptypy/core/classes.py
@@ -34,6 +34,8 @@
"""
import numpy as np
import weakref
+from collections import OrderedDict
+
try:
from pympler.asizeof import asizeof
use_asizeof = True
@@ -86,7 +88,10 @@ class Base(object):
_CHILD_PREFIX = 'ID'
_PREFIX = BASE_PREFIX
-
+
+ __slots__ = ['ID','numID','owner','_pool','_recs','_record']
+ _fields = [('ID','= l:
+ nl = l + 8192 if idx > 10000 else 2*l
+ recs = np.resize(recs,(nl,))
+ self._recs[prefix] = recs
+ rec = recs[idx]
+ obj._record = rec
+ rec['ID'] = nID
+
return
-
+
@staticmethod
def _num_to_id(num):
"""
@@ -179,12 +198,18 @@ def _num_to_id(num):
@classmethod
def _from_dict(cls, dct):
"""
- create new instance from dictionary dct
+ Create new instance from dictionary dct
should be compatible with _to_dict()
"""
inst = cls.__new__(cls)
- inst.__dict__.update(dct)
-
+ for k in inst.__slots__:
+ if k not in dct:
+ continue
+ else:
+ setattr(inst,k ,dct[k])
+ if hasattr(inst,'__dict__'):
+ inst.__dict__.update(dct)
+
# Calling post dictionary import routine (empty in base)
inst._post_dict_import()
@@ -203,7 +228,12 @@ def _to_dict(self):
a dictionary. Overwrite in child for custom behavior.
Default. Returns shallow copy of internal dict as default
"""
- return self.__dict__.copy()
+ res = OrderedDict()
+ for k in self.__slots__:
+ res[k] = getattr(self, k)
+ if hasattr(self, '__dict__'):
+ res.update(self.__dict__.copy())
+ return res
def calc_mem_usage(self):
space = 64 # that is for the class itself
@@ -217,22 +247,24 @@ def calc_mem_usage(self):
space += asizeof(v, limit=0)
for kk, vv in v.iteritems():
pool_space += vv.calc_mem_usage()[0]
-
- for k, v in self.__dict__.iteritems():
- if issubclass(type(v), Base):
- continue
- elif str(k) == '_pool' or str(k) == 'pods':
- continue
- else:
- if use_asizeof:
- s = asizeof(v)
- space += s
- if type(v) is np.ndarray:
- npy_space += v.nbytes
+
+ if hasattr(self, '__dict__'):
+ for k, v in self.__dict__.iteritems():
+ if issubclass(type(v), Base):
+ continue
+ elif str(k) == '_pool' or str(k) == 'pods':
+ continue
+ else:
+ if use_asizeof:
+ s = asizeof(v)
+ space += s
+ if type(v) is np.ndarray:
+ npy_space += v.nbytes
return space + pool_space + npy_space, pool_space, npy_space
+
def get_class(ID):
"""
Determine ptypy class from unique `ID`
@@ -290,8 +322,8 @@ class Storage(Base):
_PREFIX = STORAGE_PREFIX
- def __init__(self, container, ID=None, data=None, shape=(1, 1, 1), fill=0.,
- psize=None, origin=None, layermap=None, padonly=False,
+ def __init__(self, container, ID=None, data=None, shape=DEFAULT_SHAPE,
+ fill=0., psize=1., origin=None, layermap=None, padonly=False,
**kwargs):
"""
Parameters
@@ -305,9 +337,11 @@ def __init__(self, container, ID=None, data=None, shape=(1, 1, 1), fill=0.,
data: ndarray or None
A numpy array to use as initial buffer.
+ *Deprecated* in v0.3, use fill method instead.
- shape : 3-tuple
- The shape of the buffer
+ shape : tuple, int, or None
+ The shape of the buffer. If None or int, the dimensionality
+ is found from the owning Container instance.
fill : float or complex
The default value to fill storage with, will be converted to
@@ -338,19 +372,28 @@ def __init__(self, container, ID=None, data=None, shape=(1, 1, 1), fill=0.,
self.fill_value = fill if fill is not None else 0.
# For documentation
- #: Three dimensional array as data buffer
+ #: Three/four or potentially N-dimensional array as data buffer
self.data = None
+ # dimensionality suggestion from container
+ ndim = container.ndim if container.ndim is not None else 2
+
if shape is None:
- shape = DEFAULT_SHAPE
+ shape = (1,) + (1,) * ndim
elif np.isscalar(shape):
- shape = (1, int(shape), int(shape))
- elif len(shape) == 2:
- shape = (1,) + tuple(shape)
- elif len(shape) != 3:
- raise ValueError(
- '`shape` must be None or scalar or 2-tuple or 3-tuple of int')
+ shape = (1,) + (int(shape),) * ndim
+ else:
+ shape = tuple(shape)
+ if len(shape) not in [3, 4]:
+ logger.warn('Storage view access dimension %d is not in regular '
+ 'scope (2,3). Behavior is untested.' % len(shape[1:]))
+
+ self.shape = shape
+ self.data = np.empty(self.shape, self.dtype)
+ self.data.fill(self.fill_value)
+
+ """
# Set data buffer
if data is None:
# Create data buffer
@@ -360,15 +403,17 @@ def __init__(self, container, ID=None, data=None, shape=(1, 1, 1), fill=0.,
else:
# Set initial buffer. Casting the type makes a copy
data = np.asarray(data).astype(self.dtype)
- if data.ndim < 2 or data.ndim > 3:
+ if data.ndim < self.ndim or data.ndim > (self.ndim + 1):
raise ValueError(
- 'Initial buffer must be 2D or 3D, this one is %dD.'
- % data.ndim)
- elif data.ndim == 2:
+ 'For `data_dims` = %d, initial buffer must be'
+ ' %dD or %dD, this one is %dD'
+ % (self.ndim, self.ndim, self.ndim+1, data.ndim))
+ elif data.ndim == self.ndim:
self.data = data.reshape((1,) + data.shape)
else:
self.data = data
self.shape = self.data.shape
+ """
if layermap is None:
layermap = range(len(self.data))
@@ -379,7 +424,7 @@ def __init__(self, container, ID=None, data=None, shape=(1, 1, 1), fill=0.,
# Need to bootstrap the parameters.
# We set the initial center to the middle of the array
- self._center = u.expect2(self.shape[-2:]) // 2
+ self._center = u.expectN(self.shape[-self.ndim:], self.ndim) // 2
# Set pixel size (in physical units)
self.psize = psize if psize is not None else DEFAULT_PSIZE
@@ -406,40 +451,12 @@ def __init__(self, container, ID=None, data=None, shape=(1, 1, 1), fill=0.,
# solution required
# self._origin = None
- def _to_dict(self):
+ @property
+ def ndim(self):
"""
- We will have to recompute the datalist here
+ Number of dimensions for :any:`View` access
"""
- cp = self.__dict__.copy()
- # Delete datalist reference
- try:
- del cp['_datalist']
- except:
- pass
-
- return cp
- # self._make_datalist()
-
- def _make_datalist(self):
- pass
- """
- # BE does not give the same result on all nodes
- #self._datalist = [None] * (max(self.layermap)+1)
- #u.parallel.barrier()
- #print u.parallel.rank
- self._datalist = [None] * max(self.nlayers,max(self.layermap)+1)
- for k,i in enumerate(self.layermap):
- self._datalist[i] = self.data[k]
- """
- """
- @property
- def datalist(self):
-
- if not hasattr(self,'datalist'):
- self._make_datalist()
-
- return self._datalist
- """
+ return len(self.shape[1:])
@property
def dtype(self):
@@ -459,24 +476,28 @@ def copy(self, owner=None, ID=None, fill=None):
If float, set the content to this value. If None, copy the
current content.
"""
- if fill is None:
- # Return a new Storage or sub-class object with a copy of the data.
- return type(self)(owner,
- ID,
- data=self.data.copy(),
- psize=self.psize,
- origin=self.origin,
- layermap=self.layermap)
- else:
- # Return a new Storage or sub-class object with an empty buffer
- new_storage = type(self)(owner,
- ID,
- shape=self.shape,
- psize=self.psize,
- origin=self.origin,
- layermap=self.layermap)
+
+ # if fill is None:
+ # # Return a new Storage or sub-class object with a copy of the data.
+ # return type(self)(owner,
+ # ID,
+ # data=self.data.copy(),
+ # psize=self.psize,
+ # origin=self.origin,
+ # layermap=self.layermap)
+ # else:
+ # # Return a new Storage or sub-class object with an empty buffer
+ new_storage = type(self)(owner,
+ ID,
+ shape=self.shape,
+ psize=self.psize,
+ origin=self.origin,
+ layermap=self.layermap)
+ if fill is not None:
new_storage.fill(fill)
- return new_storage
+ else:
+ new_storage.fill(self.data.copy())
+ return new_storage
def fill(self, fill=None):
"""
@@ -501,12 +522,13 @@ def fill(self, fill=None):
self.data.fill(fill)
elif type(fill) is np.ndarray:
# Replace the buffer
- if fill.ndim < 2 or fill.ndim > 3:
- raise ValueError('Numpy ndarray fill must be 2D or 3D.')
- elif fill.ndim == 2:
- fill = np.resize(fill, (self.shape[0],
- fill.shape[0],
- fill.shape[1]))
+ if fill.ndim < self.ndim or fill.ndim > (self.ndim + 1):
+ raise ValueError(
+ 'For `data_dims` = %d, initial buffer must be'
+ ' %dD or %dD, this one is %dD'
+ % (self.ndim, self.ndim, self.ndim+1, fill.ndim))
+ elif fill.ndim == self.ndim:
+ fill = np.resize(fill, (self.shape[0],) + fill.shape)
self.data = fill.astype(self.dtype)
self.shape = self.data.shape
@@ -535,33 +557,32 @@ def update_views(self, v=None):
self.update_views(v)
return
+ if not self.ndim == v.ndim:
+ raise ValueError(
+ 'Storage %s(ndim=%d) and View %s(ndim=%d) have conflicting '
+ 'data dimensions' % (self.ID, self.ndim, v.ID, v.ndim))
+
# Synchronize pixel size
- v.psize = self.psize.copy()
+ v.psize = self.psize #.copy()
- # v.shape can be None upon initialization - this means "full frame"
- if v.shape is None:
- v.shape = u.expect2(self.shape[-2:])
- v.pcoord = v.shape / 2.
- v.coord = self._to_phys(v.pcoord)
- else:
- # Convert the physical coordinates of the view to pixel coordinates
- v.pcoord = self._to_pix(v.coord)
+ # Convert the physical coordinates of the view to pixel coordinates
+ pcoord = self._to_pix(v.coord)
# Integer part (note that np.round is not stable for odd arrays)
- v.dcoord = np.round(v.pcoord + 0.00001).astype(int)
+ v.dcoord = np.round(pcoord + 0.00001).astype(int)
# These are the important attributes used when accessing the data
- v.dlow = v.dcoord - v.shape/2
- v.dhigh = v.dcoord + (v.shape + 1)/2
+ v.dlow = v.dcoord - v.shape / 2
+ v.dhigh = v.dcoord + (v.shape + 1) / 2
# v.roi = np.array([pix - v.shape/2, pix + (v.shape + 1)/2])
- v.sp = v.pcoord - v.dcoord
+ v.sp = pcoord - v.dcoord
# if self.layermap is None:
# v.slayer = 0
# else:
# v.slayer = self.layermap.index(v.layer)
- def reformat(self, newID=None):
+ def reformat(self, newID=None, update=True):
"""
Crop or pad if required.
@@ -570,6 +591,11 @@ def reformat(self, newID=None):
newID : str or int
If None (default) act on self. Otherwise create a copy
of self before doing the cropping and padding.
+
+ update : bool
+ If True, updates all Views before reformatting. Not necessarily
+ needed, if Views have been recently instantiated. Roughly doubles
+ execution time.
Returns
-------
@@ -580,10 +606,11 @@ def reformat(self, newID=None):
# If a new storage is requested, make a copy.
if newID is not None:
s = self.copy(newID)
- s.reformat(newID=None)
+ s.reformat()
return s
# Make sure all views are up to date
+ # This call takes roughly half the time of .reformat()
self.update()
# List of views on this storage
@@ -594,34 +621,39 @@ def reformat(self, newID=None):
logger.debug('%s[%s] :: %d views for this storage'
% (self.owner.ID, self.ID, len(views)))
+ sh = self.data.shape
+
# Loop through all active views to get individual boundaries
- rows = []
- cols = []
+ #axes = [[]] * self.ndim
+
+ mn = [np.inf] * self.ndim
+ mx = [-np.inf] * self.ndim
layers = []
+ dims = range(self.ndim)
for v in views:
if not v.active:
continue
# Accumulate the regions of interest to
# compute the full field of view
- # rows += [v.roi[0, 0], v.roi[1, 0]]
- # cols += [v.roi[0, 1], v.roi[1, 1]]
- rows += [v.dlow[0], v.dhigh[0]]
- cols += [v.dlow[1], v.dhigh[1]]
-
+ for d in dims:
+ #axes[d] += [v.dlow[d], v.dhigh[d]]
+ mn[d] = min(mn[d],v.dlow[d])
+ mx[d] = max(mx[d],v.dhigh[d])
+
# Gather a (unique) list of layers
if v.layer not in layers:
layers.append(v.layer)
sh = self.data.shape
- # Compute 2d misfit (distance between the buffer boundaries and the
+ # Compute Nd misfit (distance between the buffer boundaries and the
# region required to fit all the views)
- misfit = np.array([[-np.min(rows), np.max(rows) - sh[-2]],
- [-np.min(cols), np.max(cols) - sh[-1]]])
-
- logger.debug('%s[%s] :: misfit = [%s, %s]'
- % (self.owner.ID, self.ID, misfit[0], misfit[1]))
+ misfit = np.array([[-mn[d], mx[d] - sh[d+1]] for d in dims])
+
+ _misfit_str = ', '.join(['%s' % m for m in misfit])
+ logger.debug('%s[%s] :: misfit = [%s]'
+ % (self.owner.ID, self.ID, _misfit_str))
posmisfit = (misfit > 0)
negmisfit = (misfit < 0)
@@ -631,19 +663,20 @@ def reformat(self, newID=None):
if posmisfit.any() or negmisfit.any():
logger.debug(
- 'Storage %s of container %s has a misfit of [%s, %s] between '
+ 'Storage %s of container %s has a misfit of [%s] between '
'its data and its views'
- % (str(self.ID), str(self.owner.ID), misfit[0], misfit[1]))
-
+ % (str(self.ID), str(self.owner.ID), _misfit_str))
+
if needtocrop_or_pad:
if self.padonly:
misfit[negmisfit] = 0
# Recompute center and shape
new_center = self.center + misfit[:, 0]
- new_shape = (sh[0],
- sh[1] + misfit[0].sum(),
- sh[2] + misfit[1].sum())
+ new_shape = (sh[0],)
+ for d in dims:
+ new_shape += (sh[d+1] + misfit[d].sum(),)
+
logger.debug('%s[%s] :: center: %s -> %s'
% (self.owner.ID, self.ID, str(self.center),
str(new_center)))
@@ -655,7 +688,7 @@ def reformat(self, newID=None):
raise RuntimeError('Arrays larger than 50M not supported. You '
'requested %.2fM pixels.' % megapixels)
- # Apply 2d misfit
+ # Apply Nd misfit
if self.data is not None:
new_data = u.crop_pad(
self.data,
@@ -669,9 +702,10 @@ def reformat(self, newID=None):
new_data = self.data
new_shape = sh
new_center = self.center
-
+
# Deal with layermap
new_layermap = sorted(layers)
+
if self.layermap != new_layermap:
relaid_data = []
for i in new_layermap:
@@ -680,7 +714,7 @@ def reformat(self, newID=None):
d = new_data[self.layermap.index(i)]
else:
# A new layer
- d = np.empty(new_shape[-2:], self.dtype)
+ d = np.empty(new_shape[-self.ndim:], self.dtype)
d.fill(self.fill_value)
relaid_data.append(d)
new_data = np.array(relaid_data)
@@ -688,9 +722,8 @@ def reformat(self, newID=None):
self.layermap = new_layermap
self.nlayers = len(new_layermap)
-
- # BE: set a layer index in the view the datalist access has proven to
- # be too slow.
+
+ # set layer index in the view
for v in views:
v.dlayer = self.layermap.index(v.layer)
@@ -705,9 +738,6 @@ def reformat(self, newID=None):
# A storage is "distributed" if and only if layer maps are different across nodes.
self._update_distributed()
- # make datalist
- #self._make_datalist()
-
def _update_distributed(self):
self.distributed = False
if u.parallel.MPIenabled:
@@ -742,7 +772,7 @@ def _to_phys(self, pix):
@property
def psize(self):
"""
- :returns: The pixel size.
+ The pixel size.
"""
return self._psize
@@ -751,7 +781,7 @@ def psize(self, v):
"""
Set the pixel size, and update all the internal variables.
"""
- self._psize = u.expect2(v)
+ self._psize = u.expectN(v, self.ndim)
self._origin = - self._center * self._psize
self.update()
@@ -767,7 +797,7 @@ def origin(self, v):
"""
Set the origin and update all the internal variables.
"""
- self._origin = u.expect2(v)
+ self._origin = u.expectN(v, self.ndim)
self._center = - self._origin / self._psize
self.update()
@@ -784,7 +814,7 @@ def center(self, v):
"""
Set the center and update all the internal variables.
"""
- self._center = u.expect2(v)
+ self._center = u.expectN(v, self.ndim)
self._origin = - self._center * self._psize
self.update()
@@ -826,8 +856,8 @@ def zoom_to_psize(self, new_psize, **kwargs):
new_psize : scalar or array_like
new pixel size
"""
- new_psize = u.expect2(new_psize)
- sh = np.asarray(self.shape[-2:])
+ new_psize = u.expectN(new_psize, self.ndim)
+ sh = np.asarray(self.shape[1:])
# psize is quantized
new_sh = np.round(self.psize / new_psize * sh)
new_psize = self.psize / new_sh * sh
@@ -839,7 +869,7 @@ def zoom_to_psize(self, new_psize, **kwargs):
# Zoom data buffer.
# Could be that it is faster and cleaner to loop over first axis
zoom = new_sh / sh
- self.fill(u.zoom(self.data, [1.0, zoom[0], zoom[1]], **kwargs))
+ self.fill(u.zoom(self.data, [1.0] + [z for z in zoom], **kwargs))
self._psize = new_psize
self.zoom_cycle += 1 # !!! BUG: Unresolved attribute reference
@@ -855,15 +885,15 @@ def grids(self):
"""
Returns
-------
- x, y: ndarray
+ x, y : ndarray
grids in the shape of internal buffer
"""
sh = self.data.shape
- nm = np.indices(sh)[-2:]
- flat = nm.reshape((2, self.data.size))
+ nm = np.indices(sh)[1:]
+ flat = nm.reshape((self.ndim, self.data.size))
c = self._to_phys(flat.T).T
- c = c.reshape((2,) + sh)
- return c[0], c[1]
+ c = c.reshape((self.ndim,) + sh)
+ return tuple(c)
def get_view_coverage(self):
"""
@@ -949,16 +979,18 @@ def formatted_report(self, table_format=None, offset=8, align='right',
for key, column in fr.table:
if str(key) == 'shape':
dct[key] = tuple(self.data.shape)
- info = '%d * %d * %d' % dct[key]
+ info = ('%d' + ' * %d' * self.ndim) % dct[key]
elif str(key) == 'psize':
+ dec = np.floor(np.log10(self.psize).min())
dct[key] = tuple(self.psize)
- info = '%.2e * %.2e' % tuple(dct[key])
- info = info.split('e', 1)[0] + info.split('e', 1)[1][3:]
+ info = '*'.join(['%.1f' % p for p in self.psize * 10**(-dec)])
+ info += 'e%d' % dec
elif str(key) == 'dimension':
- dct[key] = (self.psize[0] * self.data.shape[-2],
- self.psize[1] * self.data.shape[-1])
- info = '%.2e*%.2e' % tuple(dct[key])
- info = info.split('e', 1)[0] + info.split('e', 1)[1][3:]
+ r = self.psize * self.data.shape[1:]
+ dec = np.floor(np.log10(r).min())
+ dct[key] = tuple(r)
+ info = '*'.join(['%.1f' % p for p in r * 10**(-dec)])
+ info += 'e%d' % dec
elif str(key) == 'memory':
dct[key] = float(self.data.nbytes) / 1e6
info = '%.1f' % dct[key]
@@ -1001,9 +1033,14 @@ def __getitem__(self, v):
# return shift(self.data[v.slayer, v.roi[0, 0]:v.roi[1, 0],
# v.roi[0, 1]:v.roi[1, 1]], v.sp)
if isinstance(v, View):
- return shift(self.data[
- v.dlayer, v.dlow[0]:v.dhigh[0], v.dlow[1]:v.dhigh[1]],
- v.sp)
+ if self.ndim == 2:
+ return shift(self.data[
+ v.dlayer, v.dlow[0]:v.dhigh[0], v.dlow[1]:v.dhigh[1]],
+ v.sp)
+ elif self.ndim == 3:
+ return shift(self.data[
+ v.dlayer, v.dlow[0]:v.dhigh[0], v.dlow[1]:v.dhigh[1],
+ v.dlow[2]:v.dhigh[2]], v.sp)
elif v in self.layermap:
return self.data[self.layermap.index(v)]
else:
@@ -1032,8 +1069,30 @@ def __setitem__(self, v, newdata):
# self.data[v.slayer, v.roi[0, 0]:v.roi[1, 0],
# v.roi[0, 1]:v.roi[1, 1]] = shift(newdata, -v.sp)
if isinstance(v, View):
- self.data[v.dlayer, v.dlow[0]:v.dhigh[0], v.dlow[1]:v.dhigh[1]] = (
- shift(newdata, -v.sp))
+ # there must be a nicer way to do this, numpy.take is nearly
+ # right, but returns copies and not views.
+ if self.ndim == 2:
+ self.data[v.dlayer,
+ v.dlow[0]:v.dhigh[0],
+ v.dlow[1]:v.dhigh[1]] = (shift(newdata, -v.sp))
+ elif self.ndim == 3:
+ self.data[v.dlayer,
+ v.dlow[0]:v.dhigh[0],
+ v.dlow[1]:v.dhigh[1],
+ v.dlow[2]:v.dhigh[2]] = (shift(newdata, -v.sp))
+ elif self.ndim == 4:
+ self.data[v.dlayer,
+ v.dlow[0]:v.dhigh[0],
+ v.dlow[1]:v.dhigh[1],
+ v.dlow[2]:v.dhigh[2],
+ v.dlow[3]:v.dhigh[3]] = (shift(newdata, -v.sp))
+ elif self.ndim == 5:
+ self.data[v.dlayer,
+ v.dlow[0]:v.dhigh[0],
+ v.dlow[1]:v.dhigh[1],
+ v.dlow[2]:v.dhigh[2],
+ v.dlow[3]:v.dhigh[3],
+ v.dlow[4]:v.dhigh[4]] = (shift(newdata, -v.sp))
elif v in self.layermap:
self.data[self.layermap.index(v)] = newdata
else:
@@ -1058,7 +1117,7 @@ def shift(v, sp):
class View(Base):
"""
- A "window" on a Container.
+ A 'window' on a Container.
A view stores all the slicing information to extract a 2D piece
of Container.
@@ -1067,16 +1126,29 @@ class View(Base):
----
The final structure of this class is yet up to debate
and the constructor signature may change. Especially since
- "DEFAULT_ACCESSRULE" is yet so small, its contents could be
+ 'DEFAULT_ACCESSRULE' is yet so small, its contents could be
incorporated in the constructor call.
"""
+ _fields = Base._fields + \
+ [('active', 'b1'),
+ ('dlayer',' %s[%s] : shape = %s layer = %s coord = %s'
% (self.owner.ID, self.storage.ID, self.ID,
@@ -1207,6 +1276,35 @@ def __str__(self):
else:
return first + '\n ACTIVE : slice = %s' % str(self.slice)
+ @property
+ def active(self):
+ return self._record['active']
+
+ @active.setter
+ def active(self,v):
+ self._record['active'] = v
+
+ @property
+ def dlayer(self):
+ return self._record['dlayer']
+
+ @dlayer.setter
+ def dlayer(self,v):
+ self._record['dlayer'] = v
+
+
+ @property
+ def layer(self):
+ return self._record['layer']
+
+ @layer.setter
+ def layer(self, v):
+ self._record['layer'] = v
+
+ @property
+ def ndim(self):
+ return self._ndim
+
@property
def slice(self):
"""
@@ -1219,9 +1317,10 @@ def slice(self):
# else:
# slayer = self.storage.layermap.index(self.layer)
- return (self.dlayer,
- slice(self.dlow[0], self.dhigh[0]),
- slice(self.dlow[1], self.dhigh[1]))
+ res = (self.dlayer,)
+ for d in range(self.ndim):
+ res += (slice(self.dlow[d], self.dhigh[d]),)
+ return res
@property
def pod(self):
@@ -1230,8 +1329,22 @@ def pod(self):
This is a common call in the code and has therefore found
its way here. May return ``None`` if there is no pod connected.
"""
- return self._pod() # weak reference
+ if isinstance(self._pod, weakref.ref):
+ return self._pod() # weak reference
+ else:
+ return self._pod
# return self.pods.values()[0]
+
+ @property
+ def pods(self):
+ """
+ Returns all :any:`POD`s still connected to this view as a dict.
+ """
+ if self._pods is not None:
+ return self._pods
+ else:
+ pod = self.pod
+ return {} if pod is None else {pod.ID: pod}
@property
def data(self):
@@ -1252,7 +1365,9 @@ def shape(self):
"""
Two dimensional shape of View.
"""
- return self._arint[0] if (self._arint[0] > 0).all() else None
+
+ sh = self._record['shape']
+ return None if (sh==0).all() else sh[:self._ndim]
@shape.setter
def shape(self, v):
@@ -1260,75 +1375,81 @@ def shape(self, v):
Set two dimensional shape of View.
"""
if v is None:
- self._arint[0] = u.expect2(0)
+ self._record['shape'][:] = 0
+ elif np.isscalar(v):
+ sh = (int(v),) * self.owner.ndim
+ self._ndim = len(sh)
+ self._record['shape'][:len(sh)] = sh
else:
- self._arint[0] = u.expect2(v)
+ self._ndim = len(v)
+ self._record['shape'][:len(v)] = v
@property
def dlow(self):
"""
Low side of the View's data range.
"""
- return self._arint[1]
+ return self._record['dlow'][:self._ndim]
@dlow.setter
def dlow(self, v):
"""
Set low side of the View's data range.
"""
- self._arint[1] = v
+ self._record['dlow'][:self._ndim] = v
@property
def dhigh(self):
"""
High side of the View's data range.
"""
- return self._arint[2]
+ return self._record['dhigh'][:self._ndim]
@dhigh.setter
def dhigh(self, v):
"""
Set high side of the View's data range.
"""
- self._arint[2] = v
+ self._record['dhigh'][:self._ndim] = v
@property
def dcoord(self):
"""
Center coordinate (index) in data buffer.
"""
- return self._arint[3]
+ return self._record['dcoord'][:self._ndim]
@dcoord.setter
def dcoord(self, v):
"""
Set high side of the View's data range.
"""
- self._arint[3] = v
+ self._record['dcoord'][:self._ndim] = v
@property
def psize(self):
"""
Pixel size of the View.
"""
- return self._arfloat[0] if (self._arfloat[0] > 0.).all() else None
+ ps = self._record['psize'][:self._ndim]
+ return ps if (ps > 0.).all() else None
@psize.setter
def psize(self, v):
"""
- Set pixel size
+ Set pixel size.
"""
if v is None:
- self._arfloat[0] = u.expect2(0.)
+ self._record['psize'][:] = 0.
else:
- self._arfloat[0] = u.expect2(v)
+ self._record['psize'][:self._ndim] = u.expectN(v, self._ndim)
@property
def coord(self):
"""
The View's physical coordinate (meters)
"""
- return self._arfloat[1]
+ return self._record['coord'][:self._ndim]
@coord.setter
def coord(self, v):
@@ -1336,11 +1457,11 @@ def coord(self, v):
Set the View's physical coordinate (meters)
"""
if v is None:
- self._arfloat[1] = u.expect2(0.)
+ self._record['coord'][:] = 0.
elif type(v) is not np.ndarray:
- self._arfloat[1] = u.expect2(v)
+ self._record['coord'][:self._ndim] = u.expectN(v, self._ndim)
else:
- self._arfloat[1] = v
+ self._record['coord'][:self._ndim] = v
@property
def sp(self):
@@ -1348,7 +1469,7 @@ def sp(self):
The subpixel difference (meters) between physical coordinate
and data coordinate.
"""
- return self._arfloat[2]
+ return self._record['sp'][:self._ndim]
@sp.setter
def sp(self, v):
@@ -1357,11 +1478,18 @@ def sp(self, v):
and data coordinate.
"""
if v is None:
- self._arfloat[2] = u.expect2(0.)
+ self._record['sp'][:] = 0.
elif type(v) is not np.ndarray:
- self._arfloat[2] = u.expect2(v)
+ self._record['sp'][:self._ndim] = u.expectN(v, self._ndim)
else:
- self._arfloat[2] = v
+ self._record['sp'][:self._ndim] = v
+
+ @property
+ def pcoord(self):
+ """
+ The physical coordinate in pixel space
+ """
+ return self.dcoord + self.sp
class Container(Base):
@@ -1402,32 +1530,31 @@ class Container(Base):
"""
_PREFIX = CONTAINER_PREFIX
- def __init__(self, ptycho=None, ID=None, data_type='complex', **kwargs):
+ def __init__(self, owner=None, ID=None, data_type='complex', data_dims=2):
"""
Parameters
----------
ID : str or int
- A unique ID, managed by the parent
+ A unique ID, managed by the parent.
- ptycho : Ptycho
- The instance of Ptycho associated with this pod.
+ owner : Base
+ A possible subclass of :any:`Base` that holds this Container.
data_type : str or numpy.dtype
data type - either a numpy.dtype object or 'complex' or
'real' (precision is taken from ptycho.FType or ptycho.CType)
"""
- # if ptycho is None:
- # ptycho = ptypy.currentPtycho
-
- super(Container, self).__init__(ptycho, ID)
- # if len(kwargs) > 0:
- # self._initialize(**kwargs)
- # def _initialize(self, original=None, data_type='complex'):
+ super(Container, self).__init__(owner, ID)
self.data_type = data_type
+ #: Default data dimensionality for Views and Storages.
+ self.ndim = data_dims
+ if self.ndim not in (2, 3):
+ logger.warn('Container untested for `data_dim` other than 2 or 3')
+
# Prepare for copy
# self.original = original if original is not None else self
self.original = self
@@ -1543,7 +1670,7 @@ def nbytes(self):
sz += s.data.nbytes
return sz
- def views_in_storage(self, s, active=True):
+ def views_in_storage(self, s, active_only=True):
"""
Return a list of views on :any:`Storage` `s`.
@@ -1551,10 +1678,10 @@ def views_in_storage(self, s, active=True):
----------
s : Storage
The storage to look for.
- active : True or False
+ active_only : True or False
If True (default), return only active views.
"""
- if active:
+ if active_only:
return [v for v in self.original.V.values()
if v.active and (v.storageID == s.ID)]
else:
@@ -1582,7 +1709,7 @@ def copy(self, ID=None, fill=None, dtype=None):
# Create new container
data_type = self.data_type if dtype is None else dtype
- new_cont = type(self)(ptycho=self.owner,
+ new_cont = type(self)(self.owner,
ID=ID,
data_type=data_type)
new_cont.original = self
@@ -1607,7 +1734,6 @@ def fill(self, fill=0.0):
self += fill
for s in self.storages.itervalues():
s.fill(fill)
- s._make_datalist()
def allreduce(self, op=None):
"""
@@ -1907,14 +2033,18 @@ class POD(Base):
_PREFIX = POD_PREFIX
- def __init__(self, ptycho=None, ID=None, views=None, geometry=None,
- **kwargs):
+ def __init__(self, ptycho=None, model=None, ID=None, views=None,
+ geometry=None, **kwargs):
"""
Parameters
----------
ptycho : Ptycho
The instance of Ptycho associated with this pod.
+ model : ~ptypy.core.manager.ScanModel
+ The instance of ScanModel (or it subclasses) which describes
+ this pod.
+
ID : str or int
The pod ID, If None it is managed by the ptycho.
@@ -1926,10 +2056,7 @@ def __init__(self, ptycho=None, ID=None, views=None, geometry=None,
"""
super(POD, self).__init__(ptycho, ID, False)
- # if len(kwargs) > 0:
- # self._initialize(**kwargs)
-
- # def _initialize(self, views=None, geometry=None): #,meta=None):
+ self.model = model
# other defaults:
self.is_empty = False
@@ -1943,8 +2070,16 @@ def __init__(self, ptycho=None, ID=None, views=None, geometry=None,
for v in self.V.values():
if v is None:
continue
- v.pods[self.ID] = self
- v._pod = weakref.ref(self)
+ if v._pod is None:
+ # you are first
+ v._pod = weakref.ref(self)
+ else:
+ # View has at least one POD connected
+ if v._pods is None:
+ v._pods = weakref.WeakValueDictionary()
+ # register the older view
+ v._pods[v.pod.ID] = v.pod
+ v._pods[self.ID] = self
#: :any:`Geo` instance with propagators
self.geometry = geometry
@@ -2008,11 +2143,13 @@ def object(self):
if not self.is_empty:
return self.ob_view.data
else:
+ # Empty probe means no object (perfect transmission)
return np.ones(self.geometry.shape, dtype=self.owner.CType)
@object.setter
def object(self, v):
- self.ob_view.data = v
+ if not self.is_empty:
+ self.ob_view.data = v
@property
def probe(self):
diff --git a/ptypy/core/data.py b/ptypy/core/data.py
index c593d6e12..ecaf476d5 100644
--- a/ptypy/core/data.py
+++ b/ptypy/core/data.py
@@ -17,23 +17,17 @@
:copyright: Copyright 2014 by the PTYPY team, see AUTHORS.
:license: GPLv2, see LICENSE for details.
"""
-from ..utils import parallel
-from ptypy import resources
-from ptypy.core import xy
import numpy as np
import os
import h5py
-if __name__ == "__main__":
- from ptypy import utils as u
- from ptypy import io
- from ptypy.core import geometry
- from ptypy.utils.verbose import logger, log, headerline
-else:
- from .. import utils as u
- from .. import io
- from .. import resources
- from ..utils.verbose import logger, log, headerline
- import geometry
+from . import geometry
+from . import xy
+from .. import utils as u
+from .. import io
+from .. import resources
+from ..utils import parallel
+from ..utils.verbose import logger, log, headerline
+from .. import defaults_tree
PTYD = dict(
# frames, positions
@@ -45,60 +39,17 @@
)
""" Basic Structure of a .ptyd datafile """
-META = dict(
- # Label will be set internally
- label=None,
- # A unique label of user choice
- experimentID=None,
- version='0.1',
- shape=None,
- psize=None,
- # lam=None,
- energy=None,
- center=None,
- distance=None,
-)
-
-GENERIC = u.Param(
- # Filename (e.g. 'foo.ptyd')
- dfile=None,
- # Format for chunk file appendix.
- chunk_format='.chunk%02d',
- # 2-tuple or int for the desired fina frame size
- # roi=None,
- # Saving option: None, 'merge', 'append', 'extlink'
- save=None,
- # Auto center: if False, no automatic center, None only
- # if center is None, True it will be enforced
- auto_center=None,
- # Parallel loading: None, 'data', 'common', 'all'
- load_parallel='data',
- # Rebin diffraction data
- rebin=None,
- # Switching orientation : None, int or 3-tuple switch
- # Actions are (transpose, invert rows, invert cols)
- orientation=None,
- # Minimum number of frames of one chunk if not at end of scan
- min_frames=1,
- # Theoretical position list (This input parameter may get deprecated)
- positions_theory=None,
- # Total number of frames to be prepared
- num_frames=None,
- recipe={},
-)
-""" Default data parameters. See :py:data:`.scan.data`
- and a short listing below """
-
-GENERIC.update(META)
WAIT = 'msg1'
EOS = 'msgEOS'
CODES = {WAIT: 'Scan unfinished. More frames available after a pause',
EOS: 'End of scan reached'}
-__all__ = ['GENERIC', 'PtyScan', 'PTYD', 'PtydScan', 'MoonFlowerScan']
+__all__ = ['PtyScan', 'PTYD', 'PtydScan',
+ 'MoonFlowerScan']
+@defaults_tree.parse_doc('scandata.PtyScan')
class PtyScan(object):
"""
PtyScan: A single ptychography scan, created on the fly or read from file.
@@ -112,44 +63,208 @@ class PtyScan(object):
- On-the-fly support in form of chunked data.
- mpi capable, child classes should not worry about mpi
+ Default data parameters. See :py:data:`.scan.data`
+
+ Defaults:
+
+ [name]
+ default = PtyScan
+ type = str
+ help =
+
+ [dfile]
+ type = file
+ default = None
+ help = Prepared data file path
+ doc = If source was ``None`` or ``'file'``, data will be loaded from this file and processing as
+ well as saving is deactivated. If source is the name of an experiment recipe or path to a
+ file, data will be saved to this file
+ userlevel = 0
+
+ [chunk_format]
+ type = str
+ default = .chunk%02d
+ help = Appendix to saved files if save == 'link'
+ doc =
+ userlevel = 2
+
+ [save]
+ type = str
+ default = None
+ help = Saving mode
+ doc = Mode to use to save data to file.
+
+ - ``None``: No saving
+ - ``'merge'``: attemts to merge data in single chunk **[not implemented]**
+ - ``'append'``: appends each chunk in master \*.ptyd file
+ - ``'link'``: appends external links in master \*.ptyd file and stores chunks separately
+
+ in the path given by the link. Links file paths are relative to master file.
+ userlevel = 1
+
+ [auto_center]
+ type = bool
+ default = None
+ help = Determine if center in data is calculated automatically
+ doc =
+ - ``False``, no automatic centering
+ - ``None``, only if :py:data:`center` is ``None``
+ - ``True``, it will be enforced
+ userlevel = 0
+
+ [load_parallel]
+ type = str
+ default = data
+ help = Determines what will be loaded in parallel
+ doc = Choose from ``None``, ``'data'``, ``'common'``, ``'all'``
+
+ [rebin]
+ type = int
+ default = None
+ help = Rebinning factor
+ doc = Rebinning factor for the raw data frames. ``'None'`` or ``1`` both mean *no binning*
+ userlevel = 1
+ lowlim = 1
+ uplim = 8
+
+ [orientation]
+ type = int, tuple, list
+ default = None
+ help = Data frame orientation
+ doc = Choose
+
+ - ``None`` or ``0``: correct orientation
+ - ``1``: invert columns (numpy.flip_lr)
+ - ``2``: invert rows (numpy.flip_ud)
+ - ``3``: invert columns, invert rows
+ - ``4``: transpose (numpy.transpose)
+ - ``4+i``: tranpose + other operations from above
+
+ Alternatively, a 3-tuple of booleans may be provided ``(do_transpose,
+ do_flipud, do_fliplr)``
+ userlevel = 1
+
+ [min_frames]
+ type = int
+ default = 1
+ help = Minimum number of frames loaded by each node
+ doc =
+ userlevel = 2
+
+ [positions_theory]
+ type = ndarray
+ default = None
+ help = Theoretical positions for this scan
+ doc = If provided, experimental positions from :py:class:`PtyScan` subclass will be ignored. If data
+ preparation is called from Ptycho instance, the calculated positions from the
+ :py:func:`ptypy.core.xy.from_pars` dict will be inserted here
+ userlevel = 2
+
+ [num_frames]
+ type = int
+ default = None
+ help = Maximum number of frames to be prepared
+ doc = If `positions_theory` are provided, num_frames will be ovverriden with the number of
+ positions available
+ userlevel = 1
+
+ [label]
+ type = str
+ default = None
+ help = The scan label
+ doc = Unique string identifying the scan
+ userlevel = 1
+
+ [experimentID]
+ type = str
+ default = None
+ help = Name of the experiment
+ doc = If None, a default value will be provided by the recipe. **unused**
+ userlevel = 2
+
+ [version]
+ type = float
+ default = 0.1
+ help = TODO: Explain this and decide if it is a user parameter.
+ doc =
+ userlevel = 2
+
+ [shape]
+ type = int, tuple
+ default = 256
+ help = Shape of the region of interest cropped from the raw data.
+ doc = Cropping dimension of the diffraction frame
+ Can be None, (dimx, dimy), or dim. In the latter case shape will be (dim, dim).
+ userlevel = 1
+
+ [center]
+ type = tuple, str
+ default = 'fftshift'
+ help = Center (pixel) of the optical axes in raw data
+ doc = If ``None``, this parameter will be set by :py:data:`~.scan.data.auto_center` or elsewhere
+ userlevel = 1
+
+ [psize]
+ type = float, tuple
+ default = 0.000172
+ help = Detector pixel size
+ doc = Dimensions of the detector pixels (in meters)
+ userlevel = 0
+ lowlim = 0
+
+ [distance]
+ type = float
+ default = 7.19
+ help = Sample to detector distance
+ doc = In meters.
+ userlevel = 0
+ lowlim = 0
+
+ [energy]
+ type = float
+ default = 7.2
+ help = Photon energy of the incident radiation in keV
+ doc =
+ userlevel = 0
+ lowlim = 0
+
+ [add_poisson_noise]
+ default = True
+ type = bool
+ help = Decides whether the scan should have poisson noise or not
"""
- DEFAULT = GENERIC.copy()
WAIT = WAIT
EOS = EOS
CODES = CODES
+ METAKEYS = ['version', 'num_frames', 'label', 'shape', 'psize', 'energy', 'center', 'distance']
+ """ Keys to store in meta param """
+
def __init__(self, pars=None, **kwargs):
# filename='./foo.ptyd', shape=None, save=True):
"""
- Class creation with minimum set of parameters, see :py:data:`GENERIC`
+ Class creation with minimum set of parameters, see :py:data:`PtyScan.DEFAULT`
Please note that class creation is not meant to load data.
Call :py:data:`initialize` to begin loading and data file creation.
"""
# Load default parameter structure
- info = u.Param(self.DEFAULT.copy())
-
- # FIXME this overwrites the child's recipe defaults
- info.update(pars, in_place_depth=1)
- info.update(kwargs)
-
- # validate(pars, '.scan.preparation')
-
- # Prepare meta data
- self.meta = u.Param(META.copy())
+ p = self.DEFAULT.copy(99)
+ p.update(pars)
+ p.update(kwargs)
# Attempt to get number of frames.
- self.num_frames = info.num_frames
+ self.num_frames = p.num_frames
""" Total number of frames to prepare / load.
Set by :py:data:`~.scan.data.num_frames` """
- self.min_frames = info.min_frames * parallel.size
+ self.min_frames = p.min_frames * parallel.size
""" Minimum number of frames to prepare / load
with call of :py:meth:`auto` """
- if info.positions_theory is not None:
- num = len(info.positions_theory)
+ if p.positions_theory is not None:
+ num = len(p.positions_theory)
logger.info('Theoretical positions are available. '
'There will be %d frames.' % num)
logger.info(
@@ -158,33 +273,17 @@ def __init__(self, pars=None, **kwargs):
'Former input value of frame number `num_frames` %s is '
'overridden to %d.' % (str(self.num_frames), num))
self.num_frames = num
- """
- # check if we got information on geometry from ptycho
- if info.geometry is not None:
- for k, v in info.geometry.items():
- # FIXME: This is a bit ugly -
- # some parameters are added to info without documentation.
- info[k] = v if info.get(k) is None else None
- # FIXME: This should probably be done more transparently:
- # it is not clear for the user that info.roi
- # has precedence over geometry.N
- if info.roi is None:
- info.roi = u.expect2(info.geometry.N)
- """
+
# None for rebin should be allowed, as in "don't rebin".
- if info.rebin is None:
- info.rebin = 1
+ if p.rebin is None:
+ p.rebin = 1
- self.info = info
- """:any:`Param` container that stores all input parameters."""
+ self.info = p
+ """:py:class:`Param` container that stores all input parameters."""
# Print a report
log(4, 'Ptypy Scan instance got the following parameters:')
- log(4, u.verbose.report(info))
-
- # Dump all input parameters as class attributes.
- # FIXME: This duplication of parameters can lead to much confusion...
- # self.__dict__.update(info)
+ log(4, u.verbose.report(p))
# Check MPI settings
lp = str(self.info.load_parallel)
@@ -205,12 +304,9 @@ def __init__(self, pars=None, **kwargs):
self.dfile = None
self.save = self.info.save
- # Copy all values for meta
- for k in self.meta.keys():
- self.meta[k] = self.info[k]
- # self.center = None # Center will be set later
- # self.roi = self.info.roi #None # ROI will be set later
- # self.shape = None
+ # Construct meta
+ self.meta = u.Param({k: self.info[k] for k in self.METAKEYS})
+
self.orientation = self.info.orientation
self.rebin = self.info.rebin
@@ -218,9 +314,6 @@ def __init__(self, pars=None, **kwargs):
self._flags = np.array([0, 0, 0], dtype=int)
self.is_initialized = False
- # post init method call
- self.post_init()
-
def initialize(self):
"""
Begins the Data preparation and intended as the first method
@@ -234,7 +327,8 @@ def initialize(self):
* Sets :py:attr:`num_frames` if needed
* Calls :py:meth:`post_initialize`
"""
- logger.info(headerline('Enter PtyScan.initialize()', 'l'))
+ logger.info(headerline('Enter %s.initialize()'
+ % self.__class__.__name__, 'l'))
# Prepare writing to file
if self.info.save is not None:
@@ -282,6 +376,9 @@ def initialize(self):
if self.has_weight2d:
logger.info('shape = '.rjust(29) + str(self.weight2d.shape))
+ # FIXME: Saving weight to info. This is not ideal, info is optional
+ self.info.weight2d = self.has_weight2d
+
logger.info('All experimental positions : ' + str(self.has_positions))
if self.has_positions:
logger.info('shape = '.rjust(29) + str(positions.shape))
@@ -479,13 +576,6 @@ def post_initialize(self):
"""
pass
- def post_init(self):
- """
- Placeholder. Called at the end of construction by all
- processes.
- """
- pass
-
def _mpi_check(self, chunksize, start=None):
"""
Executes the check() function on master node and communicates
@@ -567,240 +657,228 @@ def get_data_chunk(self, chunksize, start=None):
if msg in [EOS, WAIT]:
logger.info(CODES[msg])
return msg
- else:
- start, step = msg
- # Get scan point index distribution
- indices = self._mpi_indices(start, step)
+ start, step = msg
- # What did we get?
- data, positions, weights = self._mpi_pipeline_with_dictionaries(
- indices)
- # All these dictionaries could be empty
- # Fill weights dictionary with references to the weights in common
+ # Get scan point index distribution
+ indices = self._mpi_indices(start, step)
- has_data = (len(data) > 0)
- has_weights = (len(weights) > 0) and len(weights.values()[0]) > 0
+ # What did we get?
+ data, positions, weights = self._mpi_pipeline_with_dictionaries(
+ indices)
+ # All these dictionaries could be empty
+ # Fill weights dictionary with references to the weights in common
- if has_data:
- dsh = np.array(data.values()[0].shape[-2:])
- else:
- dsh = np.array([0, 0])
+ has_data = (len(data) > 0)
+ has_weights = (len(weights) > 0) and len(weights.values()[0]) > 0
- # Communicate result
- dsh[0] = parallel.MPImax([dsh[0]])
- dsh[1] = parallel.MPImax([dsh[1]])
+ if has_data:
+ dsh = np.array(data.values()[0].shape[-2:])
+ else:
+ dsh = np.array([0, 0])
- if not has_weights:
- # Peak at first item
- if self.has_weight2d:
- altweight = self.weight2d
- else:
- try:
- altweight = self.meta.weight2d
- except:
- altweight = np.ones(dsh)
- weights = dict.fromkeys(data.keys(), altweight)
-
- assert len(weights) == len(data), (
- 'Data and Weight frames unbalanced %d vs %d'
- % (len(data), len(weights)))
-
- sh = self.info.shape
- # Adapt roi if not set
- if sh is None:
- logger.info('ROI not set. Using full frame shape of (%d, %d).'
- % tuple(dsh))
- sh = dsh
- else:
- sh = u.expect2(sh)
-
- # Only allow square slices in data
- if sh[0] != sh[1]:
- roi = u.expect2(sh.min())
- logger.warning('Asymmetric data ROI not allowed. Setting ROI '
- 'from (%d, %d) to (%d, %d).'
- % (sh[0], sh[1], roi[0], roi[1]))
- sh = roi
-
- self.info.shape = sh
-
- cen = self.info.center
- if str(cen) == cen:
- cen = geometry.translate_to_pix(sh, cen)
-
- auto = self.info.auto_center
- # Get center in diffraction image
- if auto is None or auto is True:
- auto_cen = self._mpi_autocenter(data, weights)
- else:
- auto_cen = None
+ # Communicate result
+ dsh[0] = parallel.MPImax([dsh[0]])
+ dsh[1] = parallel.MPImax([dsh[1]])
- if cen is None and auto_cen is not None:
- logger.info('Setting center for ROI from %s to %s.'
- % (str(cen), str(auto_cen)))
- cen = auto_cen
- elif cen is None and auto is False:
- cen = dsh // 2
+ if not has_weights:
+ # Peak at first item
+ if self.has_weight2d:
+ altweight = self.weight2d
else:
- # Center is number or tuple
- cen = u.expect2(cen[-2:])
- if auto_cen is not None:
- logger.info('ROI center is %s, automatic guess is %s.'
- % (str(cen), str(auto_cen)))
-
- # It is important to set center again in order to NOT have
- # different centers for each chunk, the downside is that the center
- # may be off if only a few diffraction patterns are used for the
- # analysis. In such case it is beneficial to set the center in the
- # parameters
- self.info.center = cen
-
- # Make sure center is in the image frame
- assert (cen > 0).all() and (dsh - cen > 0).all(), (
- 'Optical axes (center = (%.1f, %.1f) outside diffraction image '
- 'frame (%d, %d).' % tuple(cen) + tuple(dsh))
-
- # Determine if the arrays require further processing
- do_flip = (self.orientation is not None
- and np.array(self.orientation).any())
- do_crop = (np.abs(sh - dsh) > 0.5).any()
- do_rebin = self.rebin is not None and (self.rebin != 1)
-
- if do_flip or do_crop or do_rebin:
- logger.info(
- 'Enter preprocessing '
- '(crop/pad %s, rebin %s, flip/rotate %s) ... \n'
- % (str(do_crop), str(do_rebin), str(do_flip)))
-
- # We proceed with numpy arrays.That is probably now more memory
- # intensive but shorter in writing
- if has_data:
- d = np.array([data[ind] for ind in indices.node])
- w = np.array([weights[ind] for ind in indices.node])
- else:
- d = np.ones((1,) + tuple(dsh))
- w = np.ones((1,) + tuple(dsh))
-
- # Crop data
- d, tmp = u.crop_pad_symmetric_2d(d, sh, cen)
-
- # Check if provided mask has the same shape as data, if not,
- # use the mask's center for cropping the mask. The latter is
- # also needed if no mask is provided, as weights is then
- # created using the requested cropping shape and thus might
- # have a different center than the raw data.
- # NOTE: Maybe distinguish between no mask provided and mask
- # with wrong size in warning
- if (dsh == np.array(w[0].shape)).all():
- w, cen = u.crop_pad_symmetric_2d(w, sh, cen)
- else:
- logger.warning('Mask does not have the same shape as data. '
- 'Will use mask center for cropping mask.')
- cen = np.array(w[0].shape) // 2
- w, cen = u.crop_pad_symmetric_2d(w, sh, cen)
-
- # Flip, rotate etc.
- d, tmp = u.switch_orientation(d, self.orientation, cen)
- w, cen = u.switch_orientation(w, self.orientation, cen)
-
- # Rebin, check if rebinning is neither to strong nor impossible
- rebin = self.rebin
- if rebin <= 1:
- pass
- elif (rebin in range(2, 6)
- and (((sh / float(rebin)) % 1) == 0.0).all()):
- mask = w > 0
- d = u.rebin_2d(d, rebin)
- w = u.rebin_2d(w, rebin)
- mask = u.rebin_2d(mask, rebin)
- # We keep only the pixels that do not include a masked pixel
- # w[mask < mask.max()] = 0
- # TODO: apply this operation when weights actually are weights
- w = (mask == mask.max())
- else:
- raise RuntimeError(
- 'Binning (%d) is to large or incompatible with array '
- 'shape (%s).' % (rebin, str(tuple(sh))))
+ try:
+ altweight = self.info.weight2d
+ except:
+ altweight = np.ones(dsh)
+ weights = dict.fromkeys(data.keys(), altweight)
+
+ assert len(weights) == len(data), (
+ 'Data and Weight frames unbalanced %d vs %d'
+ % (len(data), len(weights)))
+
+ sh = self.info.shape
+ # Adapt roi if not set
+ if sh is None:
+ logger.info('ROI not set. Using full frame shape of (%d, %d).'
+ % tuple(dsh))
+ sh = dsh
+ else:
+ sh = u.expect2(sh)
+
+ # Only allow square slices in data
+ if sh[0] != sh[1]:
+ roi = u.expect2(sh.min())
+ logger.warning('Asymmetric data ROI not allowed. Setting ROI '
+ 'from (%d, %d) to (%d, %d).'
+ % (sh[0], sh[1], roi[0], roi[1]))
+ sh = roi
+
+ self.info.shape = sh
+
+ cen = self.info.center
+ if str(cen) == cen:
+ cen = geometry.translate_to_pix(dsh, cen)
+
+ auto = self.info.auto_center
+ # Get center in diffraction image
+ if auto is None or auto is True:
+ auto_cen = self._mpi_autocenter(data, weights)
+ else:
+ auto_cen = None
+
+ if cen is None and auto_cen is not None:
+ logger.info('Setting center for ROI from %s to %s.'
+ % (str(cen), str(auto_cen)))
+ cen = auto_cen
+ elif cen is None and auto is False:
+ cen = dsh // 2
+ else:
+ # Center is number or tuple
+ cen = u.expect2(cen[-2:])
+ if auto_cen is not None:
+ logger.info('ROI center is %s, automatic guess is %s.'
+ % (str(cen), str(auto_cen)))
- # restore contiguity of the cropped/padded/rotated/flipped array
- d = np.ascontiguousarray(d)
+ # It is important to set center again in order to NOT have
+ # different centers for each chunk, the downside is that the center
+ # may be off if only a few diffraction patterns are used for the
+ # analysis. In such case it is beneficial to set the center in the
+ # parameters
+ self.info.center = cen
+
+ # Make sure center is in the image frame
+ assert (cen > 0).all() and (dsh - cen > 0).all(), (
+ 'Optical axes (center = (%.1f, %.1f) outside diffraction image '
+ 'frame (%d, %d).' % (tuple(cen) + tuple(dsh)))
+
+ # Determine if the arrays require further processing
+ do_flip = (self.orientation is not None
+ and np.array(self.orientation).any())
+ do_crop = (np.abs(sh - dsh) > 0.5).any()
+ do_rebin = self.rebin is not None and (self.rebin != 1)
+
+ if do_flip or do_crop or do_rebin:
+ logger.info(
+ 'Enter preprocessing '
+ '(crop/pad %s, rebin %s, flip/rotate %s) ... \n'
+ % (str(do_crop), str(do_rebin), str(do_flip)))
- if has_data:
- # Translate back to dictionaries
- data = dict(zip(indices.node, d))
- weights = dict(zip(indices.node, w))
+ # We proceed with numpy arrays.That is probably now more memory
+ # intensive but shorter in writing
+ if has_data:
+ d = np.array([data[ind] for ind in indices.node])
+ w = np.array([weights[ind] for ind in indices.node])
+ else:
+ d = np.ones((1,) + tuple(dsh))
+ w = np.ones((1,) + tuple(dsh))
+
+ # Crop data
+ d, tmp = u.crop_pad_symmetric_2d(d, sh, cen)
+
+ # Check if provided mask has the same shape as data, if not,
+ # use the mask's center for cropping the mask. The latter is
+ # also needed if no mask is provided, as weights is then
+ # created using the requested cropping shape and thus might
+ # have a different center than the raw data.
+ # NOTE: Maybe distinguish between no mask provided and mask
+ # with wrong size in warning
+
+ if (dsh == np.array(w[0].shape)).all():
+ w, cen = u.crop_pad_symmetric_2d(w, sh, cen)
+ else:
+ logger.warning('Mask does not have the same shape as data. '
+ 'Will use mask center for cropping mask.')
+ cen = np.array(w[0].shape) // 2
+ w, cen = u.crop_pad_symmetric_2d(w, sh, cen)
+
+ # Flip, rotate etc.
+ d, tmp = u.switch_orientation(d, self.orientation, cen)
+ w, cen = u.switch_orientation(w, self.orientation, cen)
+
+ # Rebin, check if rebinning is neither to strong nor impossible
+ rebin = self.rebin
+ if rebin <= 1:
+ pass
+ elif (rebin in range(2, 6)
+ and (((sh / float(rebin)) % 1) == 0.0).all()):
+ mask = w > 0
+ d = u.rebin_2d(d, rebin)
+ w = u.rebin_2d(w, rebin)
+ mask = u.rebin_2d(mask, rebin)
+ # We keep only the pixels that do not include a masked pixel
+ # w[mask < mask.max()] = 0
+ # TODO: apply this operation when weights actually are weights
+ w = (mask == mask.max())
+ else:
+ raise RuntimeError(
+ 'Binning (%d) is to large or incompatible with array '
+ 'shape (%s).' % (rebin, str(tuple(sh))))
- # Adapt meta info
- self.meta.center = cen / float(self.rebin)
- self.meta.shape = u.expect2(sh) / self.rebin
+ # restore contiguity of the cropped/padded/rotated/flipped array
+ d = np.ascontiguousarray(d)
+ w = np.ascontiguousarray(w)
- if self.info.psize is not None:
- self.meta.psize = u.expect2(self.info.psize) * self.rebin
- else:
- self.meta.psize = None
-
- # Prepare chunk of data
- chunk = u.Param()
- chunk.indices = indices.chunk
- chunk.indices_node = indices.node
- chunk.num = self.chunknum
- chunk.data = data
-
- # If there are weights we add them to chunk,
- # otherwise we push it into meta
- if has_weights:
- chunk.weights = weights
- elif has_data:
- chunk.weights = {}
- self.meta.weight2d = weights.values()[0]
-
- # Slice positions from common if they are empty too
- if positions is None or len(positions) == 0:
- pt = self.info.positions_theory
- if pt is not None:
- chunk.positions = pt[indices.chunk]
- else:
- try:
- chunk.positions = (
- self.info.positions_scan[indices.chunk])
- except:
- logger.info('Unable to slice position information from '
- 'experimental or theoretical resource.')
- chunk.positions = [None] * len(indices.chunk)
+ if has_data:
+ # Translate back to dictionaries
+ data = dict(zip(indices.node, d))
+ weights = dict(zip(indices.node, w))
+
+ # Adapt geometric info
+ self.meta.center = cen / float(self.rebin)
+ self.meta.shape = u.expect2(sh) / self.rebin
+
+ if self.info.psize is not None:
+ self.meta.psize = u.expect2(self.info.psize) * self.rebin
+
+ # Prepare chunk of data
+ chunk = u.Param()
+ chunk.indices = indices.chunk
+ chunk.indices_node = indices.node
+ chunk.num = self.chunknum
+ chunk.data = data
+
+ # If there are weights we add them to chunk,
+ # otherwise we push it into meta
+ if has_weights:
+ chunk.weights = weights
+ elif has_data:
+ chunk.weights = {}
+ self.weight2d = weights.values()[0]
+
+ # Slice positions from common if they are empty too
+ if positions is None or len(positions) == 0:
+ pt = self.info.positions_theory
+ if pt is not None:
+ chunk.positions = pt[indices.chunk]
else:
- # A dict : sort positions to indices.chunk
- # This may fail if there are less positions than scan points
- # (unlikely)
- chunk.positions = np.asarray(
- [positions[k] for k in indices.chunk])
- # Positions complete
-
- # With first chunk we update meta
- if self.chunknum < 1:
- """
- for k, v in self.meta.items():
- # FIXME: I would like to avoid this "blind copying"
- # BE: This is not a blind copy as only keys
- # in META above are used
- if v is None:
- self.meta[k] = self.__dict__.get(k, self.info.get(k))
- else:
- self.meta[k] = v
- self.meta['center'] = cen
- """
-
- if self.info.save is not None and parallel.master:
- io.h5append(self.dfile, meta=dict(self.meta))
+ try:
+ chunk.positions = (
+ self.info.positions_scan[indices.chunk])
+ except:
+ logger.info('Unable to slice position information from '
+ 'experimental or theoretical resource.')
+ chunk.positions = [None] * len(indices.chunk)
+ else:
+ # A dict : sort positions to indices.chunk
+ # This may fail if there are less positions than scan points
+ # (unlikely)
+ chunk.positions = np.asarray(
+ [positions[k] for k in indices.chunk])
+ # Positions complete
+
+ # With first chunk we update info
+ if self.chunknum < 1:
+ if self.info.save is not None and parallel.master:
+ io.h5append(self.dfile, meta=dict(self.meta))
- parallel.barrier()
+ parallel.barrier()
- self.chunk = chunk
- self.chunknum += 1
+ self.chunk = chunk
+ self.chunknum += 1
- return chunk
+ return chunk
- def auto(self, frames, chunk_form='dp'):
+ def auto(self, frames):
"""
Repeated calls to this function will process the data.
@@ -809,16 +887,13 @@ def auto(self, frames, chunk_form='dp'):
frames : int
Number of frames to process.
- chunk_form : str
- Currently only type data package 'dp' implemented
-
Returns
-------
variable
one of the following
- - None, if scan's end is not reached,
+ - WAIT, if scan's end is not reached,
but no data could be prepared yet
- - False, if scan's end is reached
+ - EOS, if scan's end is reached
- a data package otherwise
"""
# attempt to get data:
@@ -832,7 +907,7 @@ def auto(self, frames, chunk_form='dp'):
# del self.chunk
return msg
else:
- out = self.return_chunk_as(msg, chunk_form)
+ out = self._make_data_package(msg)
# save chunk
if self.info.save is not None:
self._mpi_save_chunk(self.info.save, msg)
@@ -840,15 +915,10 @@ def auto(self, frames, chunk_form='dp'):
del self.chunk
return out
- def return_chunk_as(self, chunk, kind='dp'):
+ def _make_data_package(self, chunk):
"""
- Returns the loaded data chunk `chunk` in the format `kind`.
-
- For now only kind=='dp' (data package) is valid.
+ Returns the loaded data chunk `chunk` as a data package.
"""
- # This is a bit ugly now
- if kind != 'dp':
- raise RuntimeError('Unknown kind of chunck format: %s' % str(kind))
# The "common" part
out = {'common': self.meta}
@@ -867,9 +937,11 @@ def return_chunk_as(self, chunk, kind='dp'):
# First look in chunk for a weight to this index, then
# look for a 2d-weight in meta, then arbitrarily set
# weight to ones.
- w = chunk.weights.get(
- index, self.meta.get('weight2d',
- np.ones_like(frame['data'])))
+ try:
+ fallback = self.weight2d
+ except AttributeError:
+ fallback = np.ones_like(frame['data'])
+ w = chunk.weights.get(index, fallback)
frame['mask'] = (w > 0)
iterables.append(frame)
@@ -926,7 +998,7 @@ def check(self, frames=None, start=None):
for preparation and should determine if data acquisition for this
scan is finished. Its main purpose is to allow for a data
acquisition scheme, where the number of frames is not known
- when :any:`PtyScan` is constructed, i.e. a data stream or an
+ when :py:class:`PtyScan` is constructed, i.e. a data stream or an
on-the-fly reconstructions.
Note
@@ -1004,7 +1076,7 @@ def load(self, indices):
Note
----
This is the *most* important method to change when subclassing
- :any:`PtyScan`. Most often it suffices to override the constructor
+ :py:class:`PtyScan`. Most often it suffices to override the constructor
and this method to create a subclass suited for a specific
experiment.
"""
@@ -1050,17 +1122,14 @@ def _mpi_autocenter(self, data, weights):
cen[k] = u.mass_center(d * (weights[k] > 0))
# For some nodes, cen may still be empty.
- # Therefore we use gather_dict to be save
+ # Therefore we use gather_dict to be safe
cen = parallel.gather_dict(cen)
parallel.barrier()
# Now master possesses all calculated centers
if parallel.master:
cen = np.array(cen.values()).mean(0)
- else:
- cen = np.array([0., 0.])
-
- parallel.allreduce(cen)
+ cen = parallel.bcast(cen)
return cen
@@ -1145,27 +1214,36 @@ def _mpi_save_chunk(self, kind='link', chunk=None):
parallel.barrier()
+@defaults_tree.parse_doc('scandata.PtydScan')
class PtydScan(PtyScan):
"""
PtyScan provided by native "ptyd" file format.
+
+ Defaults:
+
+ [name]
+ default = PtydScan
+ type = str
+ help =
+ doc =
+
+ [source]
+ default = 'file'
+ type = str, None
+ help = Alternate source file path if data is meant to be reprocessed.
+ doc = `None` for input shall be deprecated in future
+
"""
- DEFAULT = GENERIC.copy()
- def __init__(self, pars=None, source=None, **kwargs):
+ def __init__(self, pars=None, **kwargs):
"""
PtyScan provided by native "ptyd" file format.
-
- :param source: Explicit source file. If not None or 'file',
- the data may get processed depending on user input
-
- :param pars: Input like PtyScan
"""
# Create parameter set
- p = u.Param(self.DEFAULT.copy())
-
- # Copy the label
- # if pars is not None:
- # p.label = pars.get('label')
+ p = self.DEFAULT.copy(99)
+ p.update(pars)
+ p.update(kwargs)
+ source = p.source
if source is None or str(source) == 'file':
# This is the case of absolutely no additional work
@@ -1183,7 +1261,7 @@ def __init__(self, pars=None, source=None, **kwargs):
dfile = pars['dfile']
# Check for conflict
- if str(u.unique_path(source)) == str(u.unique_path(dfile)):
+ if dfile and (str(u.unique_path(source)) == str(u.unique_path(dfile))):
logger.info('Source and Sink files are the same.')
dfile = os.path.splitext(dfile)
dfile = dfile[0] + '_n.' + dfile[1]
@@ -1203,18 +1281,20 @@ def __init__(self, pars=None, source=None, **kwargs):
# At least ONE chunk must exist to ensure everything works
with h5py.File(source, 'r') as f:
check = f.get('chunks/0')
+ f.close()
# Get number of frames supposedly in the file
- # FIXME: try/except clause only for backward compatibilty
+ # FIXME: try/except clause only for backward compatibilty
# for .ptyd files created priot to commit 2e626ff
#try:
# source_frames = f.get('info/num_frames_actual')[...].item()
#except TypeError:
# source_frames = len(f.get('info/positions_scan')[...])
- f.close()
+ #f.close()
if check is None:
raise IOError('Ptyd source %s contains no data. Load aborted'
% source)
+
"""
if source_frames is None:
logger.warning('Ptyd source is not aware of the total'
@@ -1224,22 +1304,32 @@ def __init__(self, pars=None, source=None, **kwargs):
# Get meta information
meta = u.Param(io.h5read(self.source, 'meta')['meta'])
+ if meta.get('num_frames') is None:
+ logger.warning('Ptyd source is not aware of the total'
+ 'number of diffraction frames expected')
+
if len(meta) == 0:
logger.warning('There should be meta information in '
'%s. Something is odd here.' % source)
# Update given parameters when they are None
if not manipulate:
- super(PtydScan, self).__init__(meta, **kwargs)
+ p.update(meta)
else:
- # Overwrite only those set to None
+ # Replace only None entries in p
+ # FIXME:
+ # BE: This was the former right way when the defaults
+ # were mostly None, now this no longer applies, unless
+ # defaults are overwritten to None. I guess t would be
+ # canonical now to overwrite the defaults in the
+ # docstring. But since reprocessing is rare
for k, v in meta.items():
- if p.get(k) is None: # should be replace by 'unset'
+ if p.get(k) is None:
p[k] = v
- # Initialize parent class and fill self
- super(PtydScan, self).__init__(p, **kwargs)
- """
+ super(PtydScan, self).__init__(p)
+
+ """
if source_frames is not None:
if self.num_frames is None:
self.num_frames = source_frames
@@ -1350,29 +1440,65 @@ def load(self, indices):
return (out.get(key, {}) for key in ['data', 'positions', 'weights'])
-
+@defaults_tree.parse_doc('scandata.MoonFlowerScan')
class MoonFlowerScan(PtyScan):
"""
Test PtyScan class producing a romantic ptychographic data set of a moon
illuminating flowers.
- """
- DEFAULT = GENERIC.copy()
- DEFAULT.update(geometry.DEFAULT.copy())
- RECIPE = u.Param(
- # Position distance in fraction of illumination frame
- density=0.2,
- photons=1e8,
- psf=0.
- )
+ Override parent class default:
+
+ Defaults:
+
+ [name]
+ default = MoonFlowerScan
+ type = str
+ help =
+ doc =
+
+ [num_frames]
+ default = 100
+ type = int
+ help = Number of frames to simulate
+ doc =
+
+ [shape]
+ type = int, tuple
+ default = 128
+ help = Shape of the region of interest cropped from the raw data.
+ doc = Cropping dimension of the diffraction frame
+ Can be None, (dimx, dimy), or dim. In the latter case shape will be (dim, dim).
+ userlevel = 1
+
+ [density]
+ default = 0.2
+ type = float
+ help = Position distance in fraction of illumination frame
+
+ [model]
+ default = 'round'
+ type = str
+ help = The scan pattern
+
+ [photons]
+ default = 1e8
+ type = float
+ help = Total number of photons for Poisson noise
+
+ [psf]
+ default = 0.
+ type = float
+ help = Point spread function of the detector
+
+ """
def __init__(self, pars=None, **kwargs):
"""
Parent pars are for the
"""
- p = geometry.DEFAULT.copy()
- if pars is not None:
- p.update(pars)
+
+ p = self.DEFAULT.copy(depth=99)
+ p.update(pars)
# Initialize parent class
super(MoonFlowerScan, self).__init__(p, **kwargs)
@@ -1380,18 +1506,24 @@ def __init__(self, pars=None, **kwargs):
# Derive geometry from input
geo = geometry.Geo(pars=self.meta)
- # Recipe specific things
- r = self.RECIPE.copy()
- r.update(self.info.recipe)
-
# Derive scan pattern
- pos = u.Param()
- pos.spacing = geo.resolution * geo.shape * r.density
- pos.steps = np.int(np.round(np.sqrt(self.num_frames))) + 1
- pos.extent = pos.steps * pos.spacing
- pos.model = 'round'
- pos.count = self.num_frames
- self.pos = xy.from_pars(pos)
+ if p.model is 'raster':
+ pos = u.Param()
+ pos.spacing = geo.resolution * geo.shape * p.density
+ pos.steps = np.int(np.round(np.sqrt(self.num_frames))) + 1
+ pos.extent = pos.steps * pos.spacing
+ pos.model = p.model
+ self.num_frames = pos.steps**2
+ pos.count = self.num_frames
+ self.pos = xy.raster_scan(pos.spacing[0], pos.spacing[1], pos.steps, pos.steps)
+ else:
+ pos = u.Param()
+ pos.spacing = geo.resolution * geo.shape * p.density
+ pos.steps = np.int(np.round(np.sqrt(self.num_frames))) + 1
+ pos.extent = pos.steps * pos.spacing
+ pos.model = p.model
+ pos.count = self.num_frames
+ self.pos = xy.from_pars(pos)
# Calculate pixel positions
pixel = self.pos / geo.resolution
@@ -1399,14 +1531,25 @@ def __init__(self, pars=None, **kwargs):
self.pixel = np.round(pixel).astype(int) + 10
frame = self.pixel.max(0) + 10 + geo.shape
self.geo = geo
- self.obj = resources.flower_obj(frame)
-
+
+ try:
+ self.obj = resources.flower_obj(frame)
+ except:
+ # matplotlib failsafe
+ self.obj = u.gf_2d(u.parallel.MPInoise2d(frame),1.)
+
# Get probe
- moon = resources.moon_pr(self.geo.shape)
- moon /= np.sqrt(u.abs2(moon).sum() / r.photons)
+ try:
+ moon = resources.moon_pr(self.geo.shape)
+ except:
+ # matplotlib failsafe
+ moon = u.ellipsis(u.grids(self.geo.shape)).astype(complex)
+
+ moon /= np.sqrt(u.abs2(moon).sum() / p.photons)
self.pr = moon
self.load_common_in_parallel = True
- self.r = r
+
+ self.p = p
def load_positions(self):
return self.pos
@@ -1419,149 +1562,103 @@ def load(self, indices):
s = self.geo.shape
raw = {}
+ if self.p.add_poisson_noise:
+ logger.info("Generating data with poisson noise.")
+ else:
+ logger.info("Generating data without poisson noise.")
+
for k in indices:
intensity_j = u.abs2(self.geo.propagator.fw(
self.pr * self.obj[p[k][0]:p[k][0] + s[0],
p[k][1]:p[k][1] + s[1]]))
- if self.r.psf > 0.:
- intensity_j = u.gf(intensity_j, self.r.psf)
+ if self.p.psf > 0.:
+ intensity_j = u.gf(intensity_j, self.p.psf)
- raw[k] = np.random.poisson(intensity_j).astype(np.int32)
+ if self.p.add_poisson_noise:
+ raw[k] = np.random.poisson(intensity_j).astype(np.int32)
+ else:
+ raw[k] = intensity_j.astype(np.int32)
return raw, {}, {}
-
-class DataSource(object):
+@defaults_tree.parse_doc('scandata.QuickScan')
+class QuickScan(PtyScan):
"""
- A source of data for ptychographic reconstructions.
-
- The important method is "feed_data", which returns packets of diffraction
- patterns with their meta-data.
+ Test PtyScan to benchmark graph creation further down the line.
+
+ Override parent class default:
+
+ Defaults:
+
+ [name]
+ default = MoonFlowerScan
+ type = str
+ help =
+ doc =
+
+ [num_frames]
+ default = 100
+ type = int
+ help = Number of frames to simulate
+ doc =
+
+ [shape]
+ type = int, tuple
+ default = 64
+ help = Shape of the region of interest cropped from the raw data.
+ doc = Cropping dimension of the diffraction frame
+ Can be None, (dimx, dimy), or dim. In the latter case shape will be (dim, dim).
+ userlevel = 1
+
+ [density]
+ default = 0.05
+ type = float
+ help = Position distance in fraction of illumination frame
"""
- def __init__(self, scans, frames_per_call=10000000, feed_format='dp'):
- """
- DataSource initialization.
-
- Parameters
- ----------
- scans :
- a dictionary of scan structures.
-
- frames_per_call : (optional)
- number of frames to load in one call.
- By default, load as many as possible.
- feed_format :
- the format in with the data is packaged.
- For now only 'dp' is implemented.
- """
- from ..experiment import PtyScanTypes
- # FIXME: SC: when moved to top, import fails
-
- self.frames_per_call = frames_per_call
- self.feed_format = feed_format
- self.scans = scans
-
- # Sort after given keys
- self.labels = sorted(scans.keys())
-
- # Empty list for the scans
- self.pty_scans = []
-
- for label in self.labels:
- # We are making a copy of the root as we want to fill it
- scan = scans[label]
- s = scan['pars']
-
- # Copy other relevant information
- prep = s.data.copy()
-
- # Assign label
- prep.label = label
-
- # Assign source, recipe, and positions_theory
- source = prep.source
- recipe = prep.get('recipe', {})
- if prep.get('positions_theory') is None:
- prep.positions_theory = scan['pos_theory']
-
- # prep.dfile = s.data_file
- # prep.geometry = s.geometry.copy()
- # prep.xy = s.xy.copy()
-
- # if source is not None:
- # source = source.lower()
- if source is None or source.lower() == 'empty':
- prep.recipe = None
- logger.warning('Generating dummy PtyScan for scan `%s` - This '
- 'label will source only zeros as data' % label)
- self.pty_scans.append(PtyScan(prep))
- elif source.lower() in PtyScanTypes:
- pst = PtyScanTypes[source.lower()]
- logger.info('Scan %s will be prepared with the recipe "%s"'
- % (label, source))
- self.pty_scans.append(pst(prep, recipe=recipe))
- elif (source.endswith('.ptyd') or source.endswith('.pty') or
- str(source) == 'file'):
- self.pty_scans.append(PtydScan(prep, source=source))
- elif source.lower() == 'test':
- self.pty_scans.append(MoonFlowerScan(prep))
- elif source.lower() == 'sim':
- from ..simulations import SimScan
- logger.info('Scan %s will simulated' % label)
- self.pty_scans.append(SimScan(prep, s.copy()))
- else:
- raise RuntimeError('Could not manage source "%s" for scan `%s`.'
- % (str(source), label))
-
- # Initialize flags
- self.scan_current = -1
- self.data_available = True
- self.scan_total = len(self.pty_scans)
-
- @property
- def scan_available(self):
- return self.scan_current < (self.scan_total - 1)
-
- def feed_data(self):
+ def __init__(self, pars=None, **kwargs):
"""
- Yield data packages.
+ Parent pars are for the
"""
- # Get PtyScan instance to scan_number
- cur_scan = self.pty_scans[self.scan_current]
- label = self.labels[self.scan_current]
-
- # Initialize if that has not been done yet
- if not cur_scan.is_initialized:
- cur_scan.initialize()
- msg = cur_scan.auto(self.frames_per_call, self.feed_format)
+ p = self.DEFAULT.copy(depth=99)
+ p.update(pars)
- # If we catch a scan that has ended, look for an unfinished scan
- while msg == EOS and self.scan_available:
- self.scan_current += 1
- cur_scan = self.pty_scans[self.scan_current]
- label = self.labels[self.scan_current]
-
- if not cur_scan.is_initialized:
- cur_scan.initialize()
+ # Initialize parent class
+ super(QuickScan, self).__init__(p, **kwargs)
- msg = cur_scan.auto(self.frames_per_call, self.feed_format)
+ # Derive geometry from input
+ geo = geometry.Geo(pars=self.meta)
- self.data_available = (msg != EOS or self.scan_available)
+ # Derive scan pattern
+ pos = u.Param()
+ pos.spacing = geo.resolution * geo.shape * p.density
+ pos.steps = np.int(np.round(np.sqrt(self.num_frames))) + 1
+ pos.extent = pos.steps * pos.spacing
+ pos.model = 'round'
+ pos.count = self.num_frames
+ self.pos = xy.from_pars(pos)
+ self.geo = geo
+ # dumm diff pattern
+ self.diff = np.zeros(geo.shape)
+ self.diff[0,0] = 1e7
+ self.diff = np.fft.fftshift(self.diff)
+
+ self.p = p
- logger.debug(u.verbose.report(msg))
+ def load_positions(self):
+ return self.pos
- if msg != WAIT and msg != EOS:
- # Ok that would be a data package
- # Attach inner label
- msg['common']['ptylabel'] = label
- logger.info('Feeding data chunk')
- return msg
- else:
- return None
+ def load_weight(self):
+ return np.ones(self.diff.shape)
+ def load(self, indices):
+ raw = {}
+ for k in indices:
+ raw[k] = self.diff.copy().astype(np.int32)
+ return raw, {}, {}
+
if __name__ == "__main__":
u.verbose.set_level(3)
MS = MoonFlowerScan(num_frames=100)
diff --git a/ptypy/core/data_old.py b/ptypy/core/data_old.py
deleted file mode 100644
index 630a4a3c6..000000000
--- a/ptypy/core/data_old.py
+++ /dev/null
@@ -1,1218 +0,0 @@
-"""
-data - Diffraction data access
-
-This module defines a PtyScan, a container to hold the experimental
-data of a ptychography scan. Instrument-specific reduction routines should
-inherit PtyScan to prepare data for the Ptycho Instance in a uniform format.
-
-The experiment specific child class only needs to overwrite 2 functions
-of the base class:
-
-For the moment the module contains two main objects:
-PtyScan, which holds a single ptychography scan, and DataSource, which
-holds a collection of datascans and feeds the data as required.
-
-This file is part of the PTYPY package.
-
- :copyright: Copyright 2014 by the PTYPY team, see AUTHORS.
- :license: GPLv2, see LICENSE for details.
-"""
-import numpy as np
-import os
-import h5py
-if __name__ == "__main__":
- from ptypy import utils as u
- from ptypy import io
- from ptypy import resources
- from ptypy.core import geometry
- from ptypy.utils.verbose import logger, log, headerline
-else:
- from .. import utils as u
- from .. import io
- from .. import resources
- from ..utils.verbose import logger, log, headerline
- import geometry
-
-parallel = u.parallel
-
-
-PTYD = dict(
- chunks={}, # frames, positions
- meta={}, # important to understand data. loaded by every process
- info={}, # this dictionary is not loaded from a ptyd. Mainly for documentation
- common={},
-)
-""" Basic Structure of a .ptyd datafile """
-
-META = dict(
- label=None, # label will be set internally
- experimentID=None, # a unique label of user choice
- version='0.1',
- shape=None,
- psize=None,
- #lam=None,
- energy=None,
- center=None,
- distance = None,
-)
-GENERIC = dict(
- dfile = None, # filename (e.g. 'foo.ptyd')
- chunk_format='.chunk%02d', # Format for chunk file appendix.
- #roi = None, # 2-tuple or int for the desired fina frame size
- save = None, # None, 'merge', 'append', 'extlink'
- auto_center = None, # False: no automatic center,None only if center is None, True it will be enforced
- load_parallel = 'data', # None, 'data', 'common', 'all'
- rebin = None, # rebin diffraction data
- orientation = None, # None,int or 3-tuple switch, actions are (transpose, invert rows, invert cols)
- min_frames = 1, # minimum number of frames of one chunk if not at end of scan
- positions_theory = None, # Theoretical position list (This input parameter may get deprecated)
- num_frames = None, # Total number of frames to be prepared
- recipe = {},
-)
-"""Default data parameters. See :py:data:`.scan.data` and a short listing below"""
-GENERIC.update(META)
-
-WAIT = 'msg1'
-EOS = 'msgEOS'
-CODES = {WAIT: 'Scan unfinished. More frames available after a pause',
- EOS: 'End of scan reached'}
-
-__all__ = ['GENERIC','PtyScan','PTYD','PtydScan','MoonFlowerScan']
-
-import warnings
-warnings.simplefilter('always', DeprecationWarning)
-warnings.warn('This module is deprecated and will be removed from the package on 30/11/16',DeprecationWarning)
-
-class PtyScan(object):
- """\
- PtyScan: A single ptychography scan, created on the fly or read from file.
- BASECLASS
-
- Objectives:
- - Stand alone functionality
- - Can produce .ptyd data formats
- - Child instances should be able to prepare from raw data
- - On-the-fly support in form of chunked data.
- - mpi capable, child classes should not worry about mpi
-
- """
-
- DEFAULTS = GENERIC
-
- def __init__(self, pars=None, **kwargs): # filename='./foo.ptyd',shape=None, save=True):
- """
- Class creation with minimum set of parameters, see DEFAULT dict in core/data.py
- Please note the the class creation does not necessarily load data.
- Call .initialize() to begin
- """
-
- # Load default parameter structure
- info = u.Param(self.DEFAULTS.copy())
- info.update(pars)
- info.update(kwargs)
-
- # validate(pars, '.scan.preparation')
-
- # Prepare meta data
- self.meta = u.Param(META.copy())
-
- # Attempt to get number of frames.
- self.num_frames = info.num_frames
- self.min_frames = info.min_frames * parallel.size
- #logger.info('Looking for position information input parameter structure ....\n')
- #if (info.positions_theory is None) and (info.xy is not None):
- # from ptypy.core import xy
- # info.positions_theory = xy.from_pars(info.xy)
-
- if info.positions_theory is not None:
- num = len(info.positions_theory )
- logger.info('Theoretical positions are available. There will be %d frames.' % num)
- logger.info('Any experimental position information will be ignored.')
- logger.info('Former input value of frame number `num_frames` %s is overriden to %d' %(str(self.num_frames),num))
- self.num_frames = num
- """
- # check if we got information on geometry from ptycho
- if info.geometry is not None:
- for k, v in info.geometry.items():
- # FIXME: This is a bit ugly - some parameters are added to info without documentation.
- info[k] = v if info.get(k) is None else None
- # FIXME: This should probably be done more transparently: it is not clear for the user that info.roi has precedence over geometry.N
- if info.roi is None:
- info.roi = u.expect2(info.geometry.N)
- """
- # None for rebin should be allowed, as in "don't rebin".
- if info.rebin is None:
- info.rebin = 1
-
- self.info = info
-
- # Print a report
- log(4,'Ptypy Scan instance got the following parameters:')
- log(4,u.verbose.report(info))
-
- # Dump all input parameters as class attributes.
- # FIXME: This duplication of parameters can lead to much confusion...
- # self.__dict__.update(info)
-
- # Check MPI settings
- lp = str(self.info.load_parallel)
- self.load_common_in_parallel = (lp == 'all' or lp == 'common')
- self.load_in_parallel = (lp == 'all' or lp == 'data')
-
- # set data chunk and frame counters to zero
- self.framestart = 0
- self.chunknum = 0
- self.start = 0
- self.chunk = None
-
- # Initialize other instance attributes
- self.common = {}
- self.has_weight2d = None
- self.has_positions = None
- self.dfile = None
- self.save = self.info.save
-
- # copy all values for meta
- for k in self.meta.keys():
- self.meta[k] = self.info[k]
- #self.center = None # Center will be set later
- #self.roi = self.info.roi #None # ROI will be set later
- #self.shape = None
- self.orientation = self.info.orientation
- self.rebin = self.info.rebin
-
- # initialize flags
- self._flags = np.array([0, 0, 0], dtype=int)
- self.is_initialized = False
-
- def initialize(self):
- """
- Time for some read /write access
- """
-
- # Prepare writing to file
- if self.info.save is not None:
- # We will create a .ptyd
- self.dfile = self.info.dfile
- if parallel.master:
- if os.path.exists(self.dfile):
- backup = self.dfile + '.old'
- logger.warning('File %s already exist. Renamed to %s' % (self.dfile, backup))
- os.rename(self.dfile, backup)
- # Prepare an empty file with the appropriate structure
- io.h5write(self.dfile, PTYD.copy())
- # Wait for master
- parallel.barrier()
-
- if parallel.master or self.load_common_in_parallel:
- self.common = self.load_common()
- # FIXME
- # Replace Nones, because we cannot broadcast them later
- # I don't get it, why didn't we allow for a missing key?
- # Also I disagree that we should enforce a 'weight2d' or
- # positions. The user can very well add them later.
- self.common = dict([(k,v) if v is not None else (k,np.array([])) for k,v in self.common.items() ])
-
- # broadcast
- if not self.load_common_in_parallel:
- parallel.bcast_dict(self.common)
-
- self.common = u.Param(self.common)
- assert 'weight2d' in self.common and 'positions_scan' in self.common
-
- logger.info('\n'+headerline('Analysis of the "common" arrays','l'))
- # Check if weights (or mask) have been loaded by load_common.
- weight2d = self.common.weight2d
- self.has_weight2d = weight2d is not None and len(weight2d)>0
- logger.info('Check for weight or mask, "weight2d" .... %s : shape = %s' % (str(self.has_weight2d),str(weight2d.shape)))
-
-
- # Check if positions have been loaded by load_common
- positions = self.common.positions_scan
- self.has_positions = positions is not None and len(positions)>0
- logger.info('Check for positions, "positions_scan" .... %s : shape = %s' % (str(self.has_positions),str(positions.shape)))
-
-
- if self.info.positions_theory is not None:
- logger.info('Skipping experimental positions `positions_scan`')
- elif self.has_positions:
- # Store positions in the info dictionary
- self.info.positions_scan = positions
- num_pos = len(positions)
- if self.num_frames is None:
- # Frame number was not known. We just set it now.
- logger.info('Scanning positions found. There will be %d frames' % num_pos)
- self.num_frames = num_pos
- else:
- # Frame number was already specified. Maybe we didn't want to use everything?
- if num_pos > self.num_frames:
- #logger.info('Scanning positions have the same number of points as the theoretical ones (%d).' % num_pos)
- logger.info('Scanning positions (%d) exceed the desired number of scan points (%d).' % (num_pos,self.num_frames))
- logger.info('Set `num_frames` to None or to a larger value for more scan points')
- elif num_pos < self.num_frames:
- logger.info('Scanning positions (%d) are fewer than the desired number of scan points (%d).' % (num_pos,self.num_frames))
- logger.info('Resetting `num_frames` to lower value')
- self.num_frames = num_pos
- #raise RuntimeError('Scanning positions have a number of points (%d) inconsistent with what was previously deduced (%d).'
- # % (num_pos, self.info.num_frames))
- else:
- logger.info('No scanning position have been provided at this stage.')
-
- if self.num_frames is None:
- logger.warning('Number of frames `num_frames` not specified at this stage\n.')
-
- parallel.barrier()
-
- #logger.info('####### MPI Report: ########\n')
- log(4,u.verbose.report(self.common),True)
- parallel.barrier()
- logger.info(headerline('Analysis done','l')+'\n')
-
- if self.info.save is not None and parallel.master:
- logger.info('Appending common dict to file %s\n' % self.info.dfile)
- io.h5append(self.info.dfile, common=dict(self.common), info=dict(self.info))
- # wait for master
- parallel.barrier()
-
- self.is_initialized = True
-
- def _finalize(self):
- """
- Last actions when Eon-of-Scan is reached
- """
- # maybe do this at end of everything
- if self.info.save is not None and parallel.master:
- io.h5append(self.info.dfile, common=dict(self.common), info=dict(self.info))
-
- def load_common(self):
- """
- **Overwrite in child class**
-
- Loads arrays that are common and needed for preparation of all
- other data frames coming in. Any array loaded here and returned
- in a dict will be distributed afterwards through a broadcoast.
- It is not intended to load diffraction data in this step.
-
- The main purpose is that there may be common data to all processes, that
- is slow to retrieve or large files, such that if all processes
- attempt to get the data, perfomance will decline or RAM usage
- may be too large.
-
- It is a good idea to load dark field, flat field, etc
- Also positions may be handy to load here
-
- If `load_parallel` is set to `all` or common`, this function is
- executed by all nodes, otherwise the master node executes this
- function and braodcasts the results to other nodes.
-
- Returns
- -------
- common : dict of numpy arrays
- At least two keys, `weight2d` and `positions_scan` must be
- given (they can be None). The return dictionary is available
- throught
-
- Note
- ----
- The return signature of this function is not yet fixed and may
- get altered in near future. One Option would be to include
- `weight2d` and `positions_scan` as part of the return signature
- and thus fixing them in the Base class.
- """
- weight2d = None if self.info.shape is None else np.ones(u.expect2(self.info.shape), dtype='bool')
- positions_scan = None if self.num_frames is None else np.indices((self.num_frames, 2)).sum(0)
- return {'weight2d': weight2d,
- 'positions_scan': positions_scan}
-
- def _mpi_check(self, chunksize, start=None):
- """
- Executes the check() function on master node and communicates
- the result with the other nodes.
- This function determines if the end of the scan is reached
- or if there is more data after a pause.
-
- returns:
- - codes WAIT or EOS
- - or (start, frames) if data can be loaded
- """
- # take internal counter if not specified
- s = self.framestart if start is None else int(start)
-
- # check may contain data system access, so we keep it to one process
- if parallel.master:
- self.frames_accessible, eos = self.check(chunksize, start=s)
- if self.num_frames is None and eos is None:
- logger.warning('Number of frames not specified and .check() cannot determine end-of-scan. Aborting..')
- self.abort = True
- self.end_of_scan = (s + self.frames_accessible >= self.num_frames) if eos is None else eos
- # wait for master
- parallel.barrier()
- # communicate result
- self._flags = parallel.bcast(self._flags)
-
- # abort here if the flag was set
- if self.abort:
- raise RuntimeError('Load routine incapable to determine end-of-scan.')
-
- N = self.frames_accessible
- # wait if too few frames are available and we are not at the end
- if N < self.min_frames and not self.end_of_scan:
- return WAIT
- elif self.end_of_scan and N <= 0:
- return EOS
- else:
- # move forward,set new starting point
- self.framestart += N
- return s, N
-
- def _mpi_indices(self, start, step):
- """
- Funtion to generate the diffraction data index lists that
- determine which node contains which data.
- """
- indices = u.Param()
-
- # all indices in this chunk of data
- indices.chunk = range(start, start + step)
-
- # let parallel.loadmanager take care of assigning these indices to nodes
- indices.lm = parallel.loadmanager.assign(indices.chunk)
- # this one contains now a list of indices listed after ranke
-
- # index list (node specific)
- indices.node = [indices.chunk[i] for i in indices.lm[parallel.rank]]
-
- # store internally
- self.indices = indices
- return indices
-
- def get_data_chunk(self, chunksize, start=None):
- """
- This function prepares a container that is compatible to data package
- This function is called from the auto() function.
- """
- msg = self._mpi_check(chunksize, start)
- if msg in [EOS, WAIT]:
- logger.info(CODES[msg])
- return msg
- else:
- start, step = msg
-
- # get scan point index distribution
- indices = self._mpi_indices(start, step)
-
- # what did we get?
- data, positions, weights = self._mpi_pipeline_with_dictionaries(indices)
- # all these dictionaries could be empty
- # fill weights dictionary with references to the weights in common
-
- has_data = (len(data) > 0)
- has_weights = (len(weights) > 0) and len(weights.values()[0])>0
-
- if has_data:
- dsh = np.array(data.values()[0].shape[-2:])
- else:
- dsh = np.zeros([0, 0])
-
- # communicate result
- parallel.MPImax(dsh)
-
- if not has_weights:
- # peak at first item
- if self.has_weight2d:
- altweight = self.common.weight2d
- else:
- try:
- altweight = self.meta.weight2d
- except:
- altweight = np.ones(dsh)
- weights = dict.fromkeys(data.keys(), altweight)
-
- assert len(weights) == len(data), 'Data and Weight frames unbalanced %d vs %d' % (len(data), len(weights))
-
- sh = self.info.shape
- # adapt roi if not set
- if sh is None:
- logger.info('ROI not set. Using full frame shape of (%d,%d).' % tuple(dsh))
- sh = dsh
- else:
- sh = u.expect2(sh)
-
- # only allow square slices in data
- if sh[0] != sh[1]:
- roi = u.expect2(sh.min())
- logger.warning('Asymmetric data ROI not allowed. Setting ROI from (%d,%d) to (%d,%d)' % (sh[0],sh[1],roi[0],roi[1]))
- sh = roi
-
- self.info.shape = sh
-
- cen = self.info.center
- if str(cen)==cen:
- cen=geometry.translate_to_pix(sh,cen)
-
- auto = self.info.auto_center
- # get center in diffraction image
- if auto is None or auto is True:
- auto_cen = self._mpi_autocenter(data, weights)
- else:
- auto_cen = None
-
- if cen is None and auto_cen is not None:
- logger.info('Setting center for ROI from %s to %s.' %(str(cen),str(auto_cen)))
- cen = auto_cen
- elif cen is None and auto is False:
- cen = dsh // 2
- else:
- # center is number or tuple
- cen = u.expect2(cen[-2:])
- if auto_cen is not None:
- logger.info('ROI center is %s, automatic guess is %s.' %(str(cen),str(auto_cen)))
-
- # It is important to set center again in order to NOT have different centers for each chunk
- # the downside is that the center may be off if only a few diffraction patterns were
- # used for the analysis. In such case it is beneficial to set the center in the parameters
- self.info.center = cen
-
- # make sure center is in the image frame
- assert (cen > 0).all() and (
- dsh - cen > 0).all(), 'Optical axes (center = (%.1f,%.1f) outside diffraction image frame (%d,%d)' % tuple(cen)+tuple(dsh)
-
- # determine if the arrays require further processing
- do_flip = self.orientation is not None and np.array(self.orientation).any()
- do_crop = (np.abs(sh - dsh) > 0.5).any()
- do_rebin = self.rebin is not None and (self.rebin != 1)
-
- if do_flip or do_crop or do_rebin:
- logger.info('Enter preprocessing (crop/pad %s, rebin %s, flip/rotate %s) ... \n' %
- (str(do_crop), str(do_flip), str(do_rebin)))
- # we proceed with numpy arrays. That is probably now
- # more memory intensive but shorter in writing
- if has_data:
- d = np.array([data[ind] for ind in indices.node])
- w = np.array([weights[ind] for ind in indices.node])
- else:
- d = np.ones((1,)+tuple(dsh))
- w = np.ones((1,)+tuple(dsh))
-
- # flip, rotate etc.
- d, tmp = u.switch_orientation(d, self.orientation, cen)
- w, cen = u.switch_orientation(w, self.orientation, cen)
-
- # crop
- d, tmp = u.crop_pad_symmetric_2d(d, sh, cen)
- w, cen = u.crop_pad_symmetric_2d(w, sh, cen)
-
- # rebin, check if rebinning is neither to strong nor impossible
- rebin = self.rebin
- if rebin<=1:
- pass
- elif rebin in range(2, 6) and (((self.roi / float(rebin)) % 1) == 0.0).all():
- mask = w > 0
- d = u.rebin_2d(d, rebin)
- w = u.rebin_2d(w, rebin)
- mask = u.rebin_2d(mask, rebin)
- w[mask < 1] = 0
- else:
- raise RuntimeError('Binning (%d) is to large or incompatible with array shape (%s)' % (rebin,str(tuple(sh))))
-
- if has_data:
- # translate back to dictionaries
- data = dict(zip(indices.node, d))
- weights = dict(zip(indices.node, w))
-
- # adapt meta info
- self.meta.center = cen / float(self.rebin)
- self.meta.shape = u.expect2(sh) / self.rebin
- self.meta.psize = u.expect2(self.info.psize) * self.rebin if self.info.psize is not None else None
-
- # prepare chunk of data
- chunk = u.Param()
- chunk.indices = indices.chunk
- chunk.indices_node = indices.node
- chunk.num = self.chunknum
- chunk.data = data
-
- # if there were weights we add them to chunk,
- # otherwise we push it into meta
- if has_weights:
- chunk.weights = weights
- elif has_data:
- chunk.weights = {}
- self.meta.weight2d = weights.values()[0]
-
- # slice positions from common if they are empty too
- if positions is None or len(positions) == 0:
- pt = self.info.positions_theory
- if pt is not None:
- chunk.positions = pt[indices.chunk]
- else:
- try:
- chunk.positions = self.info.positions_scan[indices.chunk]
- except:
- logger.info('Unable to slice position information from experimental or theoretical ressource')
- chunk.positions = [None]*len(indices.chunk)
- else:
- # a dict : sort positions to indices.chunk
- # this may fail if there a less positions than scan points (unlikely)
- chunk.positions = np.asarray([positions[i] for i in indices.chunk])
- # positions complete
-
- # with first chunk we update meta
- if self.chunknum < 1:
- """
- for k, v in self.meta.items():
- # FIXME: I would like to avoid this "blind copying"
- # BE: This is not a blind copy as only keys in META above are used
- self.meta[k] = self.__dict__.get(k, self.info.get(k)) if v is None else v
- self.meta['center'] = cen
- """
-
- if self.info.save is not None and parallel.master:
- io.h5append(self.dfile, meta=dict(self.meta))
- parallel.barrier()
-
- self.chunk = chunk
- self.chunknum += 1
-
- return chunk
-
- def auto(self, frames, chunk_form='dp'):
- """
- Repeated calls to this function will process the data
-
- Parameters
- ----------
- frames : int
- Number of frames to process.
-
- Returns
- -------
- variable
- one of the following
- - None, if scan's end is not reached, but no data could be prepared yet
- - False, if scan's end is reached
- - a data package otherwise
- """
- # attempt to get data:
- msg = self.get_data_chunk(frames)
- if msg == WAIT:
- return msg
- elif msg == EOS:
- # cleanup maybe?
- self._finalize()
- # del self.common
- # del self.chunk
- return msg
- else:
- out = self.return_chunk_as(msg, chunk_form)
- # save chunk
- if self.info.save is not None:
- self._mpi_save_chunk(self.info.save,msg)
- # delete chunk
- del self.chunk
- return out
-
- def return_chunk_as(self, chunk, kind='dp'):
- """
- Returns the loaded data chunk `chunk` in the format `kind`
- For now only kind=='dp' (data package) is valid.
- """
- # this is a bit ugly now
- if kind != 'dp':
- raise RuntimeError('Unknown kind of chunck format: %s' % str(kind))
-
- out = {}
-
- # The "common" part
- out['common'] = self.meta
-
- # The "iterable" part
- iterables = []
- for pos, index in zip(chunk.positions, chunk.indices):
- frame = {'index': index,
- 'data': chunk.data.get(index),
- 'position': pos}
- if frame['data'] is None:
- frame['mask'] = None
- else:
- # ok we now know that we need a mask since data is not None
- # first look in chunk for a weight to this index, then
- # look for a 2d-weight in meta, then arbitrarily set
- # weight to ones.
- w = chunk.weights.get(index, self.meta.get('weight2d', np.ones_like(frame['data'])))
- frame['mask'] = (w > 0)
- iterables.append(frame)
- out['iterable'] = iterables
- return out
-
- def _mpi_pipeline_with_dictionaries(self, indices):
- """
- example processing pipeline using dictionaries.
-
- return :
- positions, data, weights
- -- Dictionaries. Keys are the respective scan point indices
- `positions` and `weights` may be empty. If so, the information
- is taken from the self.common dictionary
-
- """
- if self.load_in_parallel:
- # all nodes load raw_data and slice according to indices
- raw, pos, weights = self.load(indices=indices.node)
-
- # gather postion information as every node needs it later
- pos = parallel.gather_dict(pos)
- else:
- if parallel.master:
- raw, pos, weights = self.load(indices=indices.chunk)
- else:
- raw = {}
- pos = {}
- weights = {}
- # distribute raw data across nodes according to indices
- raw = parallel.bcast_dict(raw, indices.node)
- weights = parallel.bcast_dict(weights, indices.node)
-
- # (re)distribute position information - every node should now be
- # aware of all positions
- parallel.bcast_dict(pos)
-
- # prepare data across nodes
- data, weights = self.correct(raw, weights, self.common)
-
- return data, pos, weights
-
- def check(self, frames=None, start=None):
- """
- **Overwrite in child class**
-
- This method checks how many frames the preparation routine may
- process, starting from frame `start` at a request of `frames_requested`.
-
- This method is supposed to return the number of accessible frames
- for preparation and should determine if data acquistion for this
- scan is finished.
-
- Parameters
- ----------
- frames : int or None
- Number of frames requested.
- start : int or None
- Scanpoint index to start checking from.
-
- Returns
- -------
- frames_accessible : int
- Number of frames readable.
- end_of_scan : int or None
- is one of the following:
- - 0, end of the scan is not reached
- - 1, end of scan will be reached or is
- - None, can't say
-
- """
- if start is None:
- start = self.framestart
- if frames is None:
- frames = self.min_frames
-
- frames_accessible = min((frames, self.num_frames - start))
-
- return frames_accessible, None
-
- @property
- def end_of_scan(self):
- return not (self._flags[1] == 0)
-
- @end_of_scan.setter
- def end_of_scan(self, eos):
- self._flags[1] = int(eos)
-
- @property
- def frames_accessible(self):
- return self._flags[0]
-
- @frames_accessible.setter
- def frames_accessible(self, frames):
- self._flags[0] = frames
-
- @property
- def abort(self):
- return not (self._flags[2] == 0)
-
- @abort.setter
- def abort(self, abort):
- self._flags[2] = int(abort)
-
- def load(self, indices):
- """
- **Overwrite in child class**
-
- Loads data according to node specific scanpoint indeces that have
- been determined by the loadmanager from utils.parallel or otherwise
-
- Returns
- -------
- raw, positions, weight : dict
- Dictionaries whose keys are the given scan point `indices`
- and whose values are the respective frame / position according
- to the scan point index. `weight` and `positions` may be empty
- """
- # dummy fill
- raw = dict((i, i * np.ones(u.expect2(self.info.shape))) for i in indices)
- return raw, {}, {}
-
- def correct(self, raw, weights, common):
- """
- **Overwrite in child class**
-
- Place holder for dark and flatfield correction. If :any:`load`
- already provides data in the form of photon counts, and no frame
- specific weight is needed, this method may be left as is
-
- May get *merged* with :any:`load` in future.
-
- Returns
- -------
- data,weights : dict
- Flat and dark-corrected data dictionaries. These dictionaries
- must have the same keys as the input `raw` and contain
- corrected frames (`data`) and statistical weights (`weights`)
- which are zero for invalid or masked pixel other the number
- of detector counts that correspond to one photon count
- """
- # c=dict(indices=None,data=None,weight=None)
- data = raw
- return data, weights
-
- def _mpi_autocenter(self, data, weights):
- """
- Calculates the frame center across all nodes.
- Data and weights are dicts of the same length and different on each node
- """
- cen = dict([(k, u.mass_center(d * (weights[k] > 0))) for k, d in data.iteritems()])
- # for some nodes, cen may still be empty. Therefore we use gather_dict to be save
- cen = parallel.gather_dict(cen)
- parallel.barrier()
- # now master possesses all calculated centers
- if parallel.master:
- cen = np.array(cen.values()).mean(0)
- else:
- cen = np.array([0., 0.])
- # print cen
- parallel.allreduce(cen)
- return cen
-
- def report(self, what=None, shout=True):
- """
- Make a report on internal structure
- """
- what = what if what is not None else self.__dict__
- msg = u.verbose.report(what)
- if shout:
- logger.info(msg, extra={'allprocesses': True})
- else:
- return msg
-
- def _mpi_save_chunk(self, kind='link', chunk=None):
- """
- Saves data chunk to hdf5 file specified with `dfile`
- It works by gathering weights and data to the master node.
- Master node then writes to disk.
-
- In case you support parallel hdf5 writing, please modify this
- function to suit your installation.
-
- 2 out of 3 modes currently supported
-
- kind : 'merge','append','link'
-
- 'append' : appends chunks of data in same file
- 'link' : saves chunks in seperate files and adds ExternalLinks
-
- TODO:
- * For the 'link case, saving still requires access to
- main file `dfile` even so for just adding the link.
- This may result in conflict if main file is polled often
- by seperate read process.
- Workaraound would be to write the links on startup in
- initialise()
-
- """
- # gather all distributed dictionary data.
- c = chunk if chunk is not None else self.chunk
-
- # shallow copy
- todisk = dict(c)
- num = todisk.pop('num')
- ind = todisk.pop('indices_node')
- for k in ['data', 'weights']:
- if k in c.keys():
- if hasattr(c[k], 'iteritems'):
- v = c[k]
- else:
- v = dict(zip(ind, np.asarray(c[k])))
- parallel.barrier()
- # gather the content
- newv = parallel.gather_dict(v)
- todisk[k] = np.asarray([newv[i] for i in sorted(newv.keys())])
-
- parallel.barrier()
- # all information is at master node.
- if parallel.master:
-
- # form a dictionary
- if str(kind) == 'append':
- h5address = 'chunks/%d' % num
- io.h5append(self.dfile, {h5address: todisk})
-
- elif str(kind) == 'link':
- import h5py
- h5address = 'chunks/%d' % num
- hddaddress = self.dfile + '.part%03d' % num
- with h5py.File(self.dfile) as f:
- f[h5address] = h5py.ExternalLink(hddaddress, '/')
- f.close()
- io.h5write(hddaddress, todisk)
- elif str(kind) == 'merge':
- raise NotImplementedError('Merging all data into single chunk is not yet implemented')
- parallel.barrier()
-
-class PtydScan(PtyScan):
- """
- PtyScan provided by native "ptyd" file format.
- """
- DEFAULT = GENERIC.copy()
-
- def __init__(self, pars=None, source=None,**kwargs):
- """
- PtyScan provided by native "ptyd" file format.
-
- :param source: Explicit source file. If not None or 'file',
- the data may get processed depending on user input
-
- :param pars: Input like PtyScan
- """
- # create parameter set
- p = u.Param(self.DEFAULT.copy())
-
- # copy the label
- #if pars is not None:
- # p.label = pars.get('label')
-
- if source is None or str(source)=='file':
- # this is the case of absolutely no additional work
- logger.info('No explicit source file was given. Will continue read only')
- source = pars['dfile']
- manipulate = False
- elif pars is None or len(pars)==0:
- logger.info('No parameters provided. Saving / modification disabled')
- manipulate = False
- else:
- logger.info('Explicit source file given. Modification is possible\n')
- dfile = pars['dfile']
- # check for conflict
- if str(u.unique_path(source))==str(u.unique_path(dfile)):
- logger.info('Source and Sink files are the same.')
- dfile = os.path.splitext(dfile)
- dfile = dfile[0] +'_n.'+ dfile[1]
- logger.info('Will instead save to %s if necessary.' % os.path.split(dfile)[1])
-
- pars['dfile']= dfile
- manipulate = True
- p.update(pars)
-
-
- # make sure the source exists.
- assert os.path.exists(u.unique_path(source)), 'Source File (%s) not found' % source
-
- self.source = source
-
- # get meta information
- meta = u.Param(io.h5read(self.source, 'meta')['meta'])
-
- # update given parameters when they are None
- if not manipulate:
- super(PtydScan, self).__init__(meta, **kwargs)
- else:
- # overwrite only those set to None
- for k, v in meta.items():
- if p.get(k) is None:
- p[k] = v
- # Initialize parent class and fill self
- super(PtydScan, self).__init__(p, **kwargs)
-
- # enforce that orientations are correct
- # Other instance attributes
- self._checked = {}
- self._ch_frame_ind = None
-
-
- def check(self, frames=None, start=None):
- """
- Implementation of the check routine for a .ptyd file format
-
- See also
- --------
- Ptyscan.check
- """
-
- if start is None:
- start = self.framestart
- if frames is None:
- frames = self.min_frames
-
- # Get info about size of currently available chunks.
- # Dead external links will produce None and are excluded
- with h5py.File(self.source, 'r') as f:
- d = {}
- chitems = sorted([(int(k), v) for k, v in f['chunks'].iteritems() if v is not None], key=lambda t: t[0])
- for chkey in chitems[0][1].keys():
- d[chkey] = np.array([((int(k),) + v[chkey].shape) for k, v in chitems if v is not None])
- f.close()
-
- self._checked = d
- allframes = int(sum([ch[1] for ch in d['data']]))
- self._ch_frame_ind = np.array([(dd[0], frame) for dd in d['data'] for frame in range(dd[1])])
-
- return min((frames, allframes - start)), allframes < start+frames
-
- def _coord_to_h5_calls(self, key, coord):
- return 'chunks/%d/%s' % (coord[0], key), slice(coord[1], coord[1] + 1)
-
- def load_common(self):
- """
- In ptyd, 'common' must exist
- """
- # this total buggy right now
- return {'weight2d' : self.info.weight2d, 'positions_scan' : None}
-
- def load(self, indices):
- """
- Load from ptyd. Due to possible chunked data, slicing frames is
- non-trivial
- """
- # ok we need to communicate the some internal info
- parallel.barrier()
- self._ch_frame_ind=parallel.bcast(self._ch_frame_ind)
- parallel.barrier()
- parallel.bcast_dict(self._checked)
-
- # get the coordinates in the chunks
- coords = self._ch_frame_ind[indices]
- calls = {}
- for key in self._checked.keys():
- calls[key] = [self._coord_to_h5_calls(key, c) for c in coords]
-
- # get our data from the ptyd file
- out = {}
- with h5py.File(self.source, 'r') as f:
- for array, call in calls.iteritems():
- out[array] = [np.squeeze(f[path][slce]) for path, slce in call]
- f.close()
-
- # if the chunk provided indices, we use those instead of our own
- # Dangerous and not yet implemented
- # indices = out.get('indices',indices)
-
- # wrap in a dict
- for k, v in out.iteritems():
- out[k] = dict(zip(indices, v))
-
- return (out.get(key, {}) for key in ['data', 'positions', 'weights'])
-
-
-class MoonFlowerScan(PtyScan):
- """
- Test Ptyscan class producing a romantic ptychographic dataset of a moon
- illuminating flowers.
- """
-
- DEFAULT = GENERIC.copy().update(geometry.DEFAULT.copy())
-
- def __init__(self, pars = None, **kwargs):
- """
- Parent pars are for the
- """
- p = geometry.DEFAULT.copy()
- if pars is not None:
- p.update(pars)
- # Initialize parent class
- super(MoonFlowerScan, self).__init__(p, **kwargs)
-
- from ptypy import resources
- from ptypy.core import xy
-
- # derive geometry from input
- G = geometry.Geo(pars = self.meta)
- #G._initialize(self.meta)
-
- # derive scan pattern
- pos = u.Param()
- pos.spacing = G.resolution * G.shape / 5.
- pos.layers = np.int(np.round(np.sqrt(self.num_frames)))+1
- pos.extent = pos.layers * pos.spacing
- pos.model = 'round'
- pos.count = self.num_frames
- self.pos = xy.from_pars(pos)
-
- # calculate pixel positions
- pixel = self.pos / G.resolution
- pixel -= pixel.min(0)
- self.pixel = np.round(pixel).astype(int) + 10
- frame = self.pixel.max(0) + 10 + G.shape
- self.G = G
- #from matplotlib import pyplot as plt
- #plt.figure(200);plt.imshow(u.imsave(G.propagator.pre_ifft))
- #plt.figure(101);plt.imshow(G.propagator.grids_det[0]**2+G.propagator.grids_det[1]**2)
- # get object
- self.obj = resources.flower_obj(frame)
-
- # get probe
- moon = resources.moon_pr(self.G.shape)
- moon /= np.sqrt(u.abs2(moon).sum() / 1e8)
- self.pr = moon
- self.load_common_in_parallel = True
-
- def load_common(self):
- """
- Transmit positions
- """
- return {'weight2d': np.ones(self.pr.shape),
- 'positions_scan': self.pos}
-
- def load(self, indices):
- """
- Forward propagation
- """
- # dummy fill
- p=self.pixel
- s=self.G.shape
- raw = {}
- for i in indices:
- raw[i]=np.random.poisson(u.abs2(self.G.propagator.fw(self.pr * self.obj[p[i][0]:p[i][0]+s[0],p[i][1]:p[i][1]+s[1]]))).astype(np.int32)
- return raw, {}, {}
-
-
-
-class DataSource(object):
- """
- A source of data for ptychographic reconstructions. The important method is "feed_data", which returns
- packets of diffraction patterns with their meta-data.
- """
- def __init__(self, scans, frames_per_call=10000000, feed_format='dp'):
- """
- DataSource initialization.
-
- scans: a dictionnary of scan structures.
- frames_per_call: (optional) number of frames to load in one call. By default, load as many as possible.
- feed_format: the format in with the data is packaged. For now only 'dp' is implemented.
- """
-
- from ..experiment import PtyScanTypes
-
- self.frames_per_call = frames_per_call
- self.feed_format = feed_format
- self.scans = scans
-
- # sort after keys given
- self.labels = sorted(scans.keys())
-
- # empty list for the scans
- self.PS = []
-
- for label in self.labels:
- # we are making a copy of the root as we want to fill it
- scan = scans[label]
- s = scan['pars']
-
- # Copy other relevant information
- prep = s.data.copy()
-
- # Assign label
- prep.label = label
-
- source = prep.source
- recipe = prep.get('recipe',{})
- if prep.get('positions_theory') is None:
- prep.positions_theory = scan['pos_theory']
-
- prep.dfile = s.data_file
- #prep.geometry = s.geometry.copy()
- #prep.xy = s.xy.copy()
-
- if source is not None:
- source = source.lower()
-
- if source in PtyScanTypes:
- PS = PtyScanTypes[source]
- logger.info('Scan %s will be prepared with the recipe "%s"' % (label, source))
- self.PS.append(PS(prep, recipe= recipe))
- elif source.endswith('.ptyd') or source.endswith('.pty') or str(source)=='file':
- self.PS.append(PtydScan(prep, source=source))
- elif source=='test':
- self.PS.append(MoonFlowerScan(prep))
- elif source=='sim':
- from ..simulations import SimScan
- logger.info('Scan %s will simulated' % (label))
- self.PS.append(SimScan(prep,s.copy()))
- elif source=='empty' or source is None:
- prep.recipe = None
- logger.warning('Generating dummy PtyScan for scan `%s` - This label will source only zeros as data' % label)
- self.PS.append(PtyScan(prep))
- else:
- raise RuntimeError('Could not manage source "%s" for scan `%s`' % (str(source),label))
-
- # Initialize flags
- self.scan_current = -1
- self.data_available = True
- self.scan_total = len(self.PS)
-
- @property
- def scan_available(self):
- return self.scan_current < (self.scan_total - 1)
-
- def feed_data(self):
- """
- Yield data packages.
- """
- # get PtyScan instance to scan_number
- PS = self.PS[self.scan_current]
- label = self.labels[self.scan_current]
-
- # initialize if that has not been done yet
- if not PS.is_initialized:
- PS.initialize()
-
- msg = PS.auto(self.frames_per_call, self.feed_format)
- # if we catch a scan that has ended look for an unfinished scan
- while msg == EOS and self.scan_available:
- self.scan_current += 1
- PS = self.PS[self.scan_current]
- label = self.labels[self.scan_current]
- if not PS.is_initialized:
- PS.initialize()
- msg = PS.auto(self.frames_per_call, self.feed_format)
-
- self.data_available = (msg != EOS or self.scan_available)
-
- logger.debug(u.verbose.report(msg))
- if msg != WAIT and msg != EOS:
- # ok that would be a data package
- # attach inner label
- msg['common']['ptylabel'] = label
- #print u.verbose.report(msg)
- logger.info('Feeding data chunk')
- return msg
- else:
- return None
-
-if __name__ == "__main__":
- u.verbose.set_level(3)
- MS = MoonFlowerScan(num_frames=100)
- MS.initialize()
- for i in range(50):
- msg = MS.auto(10)
- logger.info(u.verbose.report(msg), extra={'allprocesses': True})
- parallel.barrier()
-
diff --git a/ptypy/core/geometry.py b/ptypy/core/geometry.py
index f751dffca..4c0ade50b 100644
--- a/ptypy/core/geometry.py
+++ b/ptypy/core/geometry.py
@@ -6,51 +6,23 @@
:copyright: Copyright 2014 by the PTYPY team, see AUTHORS.
:license: GPLv2, see LICENSE for details.
"""
-# for solo use ##########
-if __name__ == "__main__":
- from ptypy import utils as u
- from ptypy.utils.verbose import logger
- from ptypy.core import Base
- GEO_PREFIX = 'G'
-# for in package use #####
-else:
- from .. import utils as u
- from ..utils.verbose import logger
- from classes import Base,GEO_PREFIX
-
import numpy as np
from scipy import fftpack
+
+from .. import utils as u
+from ..utils.verbose import logger
+from .classes import Base, GEO_PREFIX
+from ..utils.descriptor import EvalDescriptor
+
try:
import pyfftw
import pyfftw.interfaces.numpy_fft as fftw_np
except ImportError:
- print "Warning: unable to import pyFFTW! Will use a slower FFT method."
+ pass
+ #logger.warning("Unable to import pyFFTW! Will use a slower FFT method.")
-__all__ = ['DEFAULT', 'Geo', 'BasicNearfieldPropagator',
- 'BasicFarfieldPropagator']
+__all__ = ['Geo', 'BasicNearfieldPropagator', 'BasicFarfieldPropagator']
-DEFAULT = u.Param(
- # Incident photon energy (in keV)
- energy=6.2,
- # Wavelength (in meters)
- lam=None,
- # Distance from object to screen
- distance=7.0,
- # Pixel size (in meters) at detector plane
- psize=172e-6,
- # Pixel size (in meters) at sample plane
- resolution=None,
- # Number of detector pixels
- shape=256,
- # Propagation type
- propagation='farfield',
- misfit=0,
- center='fftshift',
- origin='fftshift',
- precedence=None,
-)
-""" Default geometry parameters. See also :py:data:`.scan.geometry`
- and an unflattened representation below """
_old2new = u.Param(
# Distance from object to screen
@@ -67,6 +39,8 @@
)
+local_tree = EvalDescriptor('')
+@local_tree.parse_doc()
class Geo(Base):
"""
Hold and keep consistent the information about experimental parameters.
@@ -80,11 +54,89 @@ class Geo(Base):
interact : bool (True)
If set to True, changes to properties like :py:meth:`energy`,
:py:meth:`lam`, :py:meth:`shape` or :py:meth:`psize` will cause
- a call to :py:meth:`update`
-
+ a call to :py:meth:`update`.
+
+
+ Default geometry parameters. See also :py:data:`.scan.geometry`
+
+ Defaults:
+
+ [energy]
+ type = float
+ default = 6.2
+ help = Energy (in keV)
+ doc = If ``None``, uses `lam` instead.
+ userlevel = 0
+ lowlim = 0
+
+ [lam]
+ type = float
+ default = None
+ help = Wavelength (in meters)
+ doc = Used only if `energy` is ``None``
+ userlevel = 0
+ lowlim = 0
+
+ [distance]
+ type = float
+ default = 7.19
+ help = Distance from object to detector (in meters)
+ doc =
+ userlevel = 0
+ lowlim = 0
+
+ [psize]
+ type = float
+ default = 0.000172
+ help = Pixel size in the detector plane (in meters)
+ doc =
+ userlevel = 1
+ lowlim = 0
+
+ [resolution]
+ type = float
+ default = None
+ help = Pixel size in the sample plane
+ doc = This parameter is used only for simulations
+ userlevel = 2
+ lowlim = 0
+
+ [propagation]
+ type = str
+ default = farfield
+ help = Propagation type
+ doc = Either "farfield" or "nearfield"
+ userlevel = 1
+
+ [shape]
+ type = int, tuple
+ default = 256
+ help = Number of pixels in detector frame
+ doc = Can be a 2-tuple of int (Nx, Ny) or an int N, in which case it is interpreted as (N, N).
+ userlevel = 1
+
+ [misfit]
+ type = float, tuple
+ default = 0.
+ help = TODO: WHAT IS MISFIT?
+ doc =
+ userlevel = 2
+
+ [center]
+ type = int, tuple, str
+ default = 'fftshift'
+ help = TODO: document this parameter.
+ doc =
+ userlevel = 2
+
+ [origin]
+ type = int, tuple, str
+ default = 'fftshift'
+ help = TODO: document this parameter.
+ doc =
+ userlevel = 2
"""
- DEFAULT = DEFAULT
_keV2m = 1.23984193e-09
_PREFIX = GEO_PREFIX
@@ -105,13 +157,9 @@ def __init__(self, owner=None, ID=None, pars=None, **kwargs):
if the key exists in `Geo.DEFAULT`.
"""
super(Geo, self).__init__(owner, ID)
- # if len(kwargs)>0:
- # self._initialize(**kwargs)
-
- # def _initialize(self, pars=None, **kwargs):
# Starting parameters
- p = u.Param(DEFAULT)
+ p = self.DEFAULT.copy(99)
if pars is not None:
p.update(pars)
for k, v in p.items():
@@ -122,6 +170,13 @@ def __init__(self, owner=None, ID=None, pars=None, **kwargs):
p[k] = v
self.p = p
+ self._initialize(p)
+
+ def _initialize(self, p):
+ """
+ Parse input parameters, fill missing parameters and set up a
+ propagator.
+ """
self.interact = False
# Set distance
@@ -410,8 +465,8 @@ def assign_fft(self):
elif str(self.ffttype) == 'numpy':
self._numpy_fft()
elif isinstance(self.ffttype, tuple):
- self.fft = ffttype[0]
- self.ifft = ffttype[1]
+ self.fft = self.ffttype[0]
+ self.ifft = self.ffttype[1]
return (self.fft, self.ifft)
@@ -425,7 +480,6 @@ class BasicFarfieldPropagator(object):
Be aware though, that if the origin is not in the center of the frame,
coordinates are rolled periodically, just like in the conventional fft case.
"""
- DEFAULT = DEFAULT
def __init__(self, geo_pars=None, ffttype='fftw', **kwargs):
"""
@@ -456,7 +510,7 @@ def __init__(self, geo_pars=None, ffttype='fftw', **kwargs):
self.post_ifft = None
# Get default parameters and update
- self.p = u.Param(DEFAULT)
+ self.p = u.Param(Geo.DEFAULT)
if 'dtype' in kwargs:
self.dtype = kwargs['dtype']
else:
@@ -495,12 +549,12 @@ def update(self, geo_pars=None, **kwargs):
lz /= (self.sh[0] + mis[0] - self.crop_pad[0]) / self.sh[0]
# Calculate the grids
- if str(p.origin) == p.origin:
+ if u.isstr(p.origin):
c_sam = p.origin
else:
c_sam = p.origin + self.crop_pad / 2.
- if str(p.center) == p.center:
+ if u.isstr(p.center):
c_det = p.center
else:
c_det = p.center + self.crop_pad / 2.
@@ -604,7 +658,6 @@ class BasicNearfieldPropagator(object):
"""
Basic two step (i.e. two ffts) Nearfield Propagator.
"""
- DEFAULT = DEFAULT
def __init__(self, geo_pars=None, ffttype='fftw', **kwargs):
"""
@@ -630,7 +683,7 @@ def __init__(self, geo_pars=None, ffttype='fftw', **kwargs):
self.ikernel = None
# Get default parameters and update
- self.p = u.Param(DEFAULT)
+ self.p = u.Param(Geo.DEFAULT)
self.dtype = kwargs['dtype'] if 'dtype' in kwargs else np.complex128
self.update(geo_pars, **kwargs)
self.FFTch = FFTchooser(ffttype)
diff --git a/ptypy/core/geometry_bragg.py b/ptypy/core/geometry_bragg.py
new file mode 100644
index 000000000..55f8c8982
--- /dev/null
+++ b/ptypy/core/geometry_bragg.py
@@ -0,0 +1,549 @@
+"""
+Geometry management and propagation for Bragg geometry.
+"""
+
+from .. import utils as u
+from ..utils.verbose import logger
+from geometry import Geo as _Geo
+from ..utils.descriptor import EvalDescriptor
+from classes import Container, Storage, View
+import numpy as np
+from scipy.ndimage.interpolation import map_coordinates
+
+__all__ = ['DEFAULT', 'Geo_Bragg']
+
+
+local_tree = EvalDescriptor('')
+@local_tree.parse_doc()
+class Geo_Bragg(_Geo):
+ """
+ Class which presents a Geo analog valid for the 3d Bragg case.
+
+ This class follows the naming convention of:
+ Berenguer et al., Phys. Rev. B 88 (2013) 144101.
+
+ Indexing into all q-space arrays and storages follows (q3, q1, q2),
+ which corresponds to (r3, r1, r2) in the so-called natural real
+ space coordinate system. These coordinates are transformed to
+ (x, z, y) as described below.
+
+ Defaults:
+
+ [psize]
+ type = tuple
+ default = (.065, 172e-6, 172e-6)
+ help = Rocking curve step (in degrees) and pixel sizes (in meters)
+ doc = First element is the rocking curve step.
+
+ [propagation]
+ doc = Only "farfield" is valid for Bragg
+
+ [shape]
+ type = tuple
+ default = (31, 128, 128)
+ help = Number of rocking curve positions and detector pixels
+ doc = First element is the number of rocking curve positions.
+
+ [theta_bragg]
+ type = float
+ default = 6.89
+ help = Diffraction angle (theta, not two theta) in degrees
+
+ [resolution]
+ type = tuple
+ default = None
+ help = 3D sample pixel size (in meters)
+ doc = Refers to the conjugate (natural) coordinate system as (r3, r1, r2).
+ """
+
+ def _initialize(self, p):
+ """
+ Parse input parameters, fill missing parameters and set up a
+ propagator.
+ """
+ self.interact = False
+
+ # Set distance
+ if self.p.distance is None or self.p.distance == 0:
+ raise ValueError(
+ 'Distance (geometry.distance) must not be None or 0')
+
+ # Set frame shape
+ if self.p.shape is None or (np.array(self.p.shape) == 0).any():
+ raise ValueError(
+ 'Frame size (geometry.shape) must not be None or 0')
+ else:
+ self.p.shape = u.expect3(p.shape)
+
+ # Set energy and wavelength
+ if p.energy is None:
+ if p.lam is None:
+ raise ValueError(
+ 'Wavelength (geometry.lam) and energy (geometry.energy)\n'
+ 'must not both be None')
+ else:
+ self.lam = p.lam # also sets energy through a property
+ else:
+ if p.lam is not None:
+ logger.debug('Energy and wavelength both specified. '
+ 'Energy takes precedence over wavelength')
+
+ self.energy = p.energy # also sets lam through a property
+
+ # Pixel size
+ self.p.psize_is_fix = p.psize is not None
+ self.p.resolution_is_fix = p.resolution is not None
+
+ if not self.p.psize_is_fix and not self.p.resolution_is_fix:
+ raise ValueError(
+ 'Pixel size in sample plane (geometry.resolution) and '
+ 'detector plane \n(geometry.psize) must not both be None')
+
+ # Fill pixel sizes
+ if self.p.resolution_is_fix:
+ self.p.resolution = u.expect3(p.resolution)
+ else:
+ self.p.resolution = u.expect3(1.0)
+
+ if self.p.psize_is_fix:
+ self.p.psize = u.expect3(p.psize)
+ else:
+ self.p.psize = u.expect3(1.0)
+
+ # Update other values
+ self.update(False)
+
+ # Attach propagator
+ self._propagator = self._get_propagator()
+ self.interact = True
+
+ def update(self, update_propagator=True):
+ """
+ Update things which need a little computation
+ """
+
+ # Update the internal pixel sizes: 4 cases
+ if not self.p.resolution_is_fix and not self.p.psize_is_fix:
+ raise ValueError(
+ 'Neither pixel size nor sample resolution specified.')
+ elif not self.p.resolution_is_fix and self.p.psize_is_fix:
+ dq1 = self.psize[1] * 2 * np.pi / self.distance / self.lam
+ dq2 = self.psize[2] * 2 * np.pi / self.distance / self.lam
+ dq3 = np.deg2rad(self.psize[0]) * 4 * np.pi / self.lam * self.sintheta
+ self.p.resolution[1] = 2 * np.pi / \
+ (self.shape[1] * dq1 * self.costheta)
+ self.p.resolution[2] = 2 * np.pi / (self.shape[2] * dq2)
+ self.p.resolution[0] = 2 * np.pi / \
+ (self.shape[0] * dq3 * self.costheta)
+ elif self.p.resolution_is_fix and not self.p.psize_is_fix:
+ dq1 = 2 * np.pi / \
+ (self.shape[1] * self.resolution[1] * self.costheta)
+ dq2 = 2 * np.pi / (self.shape[2] * self.resolution[2])
+ dq3 = 2 * np.pi / \
+ (self.shape[0] * self.resolution[0] * self.costheta)
+ self.p.psize[1] = dq1 * self.distance * self.lam / (2 * np.pi)
+ self.p.psize[2] = dq2 * self.distance * self.lam / (2 * np.pi)
+ self.p.psize[0] = np.rad2deg(
+ dq3 * self.lam / (4 * np.pi * self.sintheta))
+ else:
+ raise ValueError(
+ 'Both pixel size and sample resolution specified.')
+
+ # These are useful to have on hand
+ self.dq1 = dq1
+ self.dq2 = dq2
+ self.dq3 = dq3
+
+ # Establish transforms between coordinate systems
+ # ...from {x z y} to {r3 r1 r2}
+ self.A_r3r1r2 = [[1 / self.costheta, 0, 0],
+ [-self.sintheta / self.costheta, 1, 0],
+ [0, 0, 1]]
+ # ...from {r3 r1 r2} to {x z y}
+ self.A_xzy = [[self.costheta, 0, 0],
+ [self.sintheta, 1, 0],
+ [0, 0, 1]]
+ # ...from {qx qz qy} to {q3 q1 q2}
+ self.A_q3q1q2 = [[1, self.sintheta / self.costheta, 0],
+ [0, 1 / self.costheta, 0],
+ [0, 0, 1]]
+ # ...from {q3 q1 q2} to {qx qz qy}
+ self.A_qxqzqy = [[1, -self.sintheta, 0],
+ [0, self.costheta, 0],
+ [0, 0, 1]]
+
+ # Update the propagator too
+ if update_propagator:
+ self.propagator.update()
+
+ @property
+ def theta_bragg(self):
+ return self.p.theta_bragg
+
+ @theta_bragg.setter
+ def theta_bragg(self, v):
+ self.p.theta_bragg = v
+ if self.interact:
+ self.update()
+
+ @property
+ def sintheta(self):
+ return np.sin(np.deg2rad(self.theta_bragg))
+
+ @property
+ def costheta(self):
+ return np.cos(np.deg2rad(self.theta_bragg))
+
+ @property
+ def tantheta(self):
+ return np.tan(np.deg2rad(self.theta_bragg))
+
+ @property
+ def resolution(self):
+ return self.p.resolution
+
+ @resolution.setter
+ def resolution(self, v):
+ self.p.resolution[:] = u.expect3(v)
+ if self.interact:
+ self.update()
+
+ @property
+ def psize(self):
+ return self.p.psize
+
+ @psize.setter
+ def psize(self, v):
+ self.p.psize[:] = u.expect3(v)
+ if self.interact:
+ self.update()
+
+ @property
+ def shape(self):
+ return self.p.shape
+
+ @shape.setter
+ def shape(self, v):
+ self.p.shape[:] = u.expect3(v).astype(int)
+ if self.interact:
+ self.update()
+
+ def _get_propagator(self):
+ """
+ The real space pixel size in the cartesian system.
+ """
+ prop = BasicBragg3dPropagator(self)
+ return prop
+
+ def _r3r1r2(self, p):
+ """
+ Transforms a single point from [x z y] to [r3 r1 r2]
+ """
+ return np.dot(self.A_r3r1r2, p)
+
+ def _xzy(self, p):
+ """
+ Transforms a single point from [r3 r1 r2] to [x z y]
+ """
+ return np.dot(self.A_xzy, p)
+
+ def _q3q1q2(self, p):
+ """
+ Transforms a single point from [qx qz qy] to [q3 q1 q2]
+ """
+ return np.dot(self.A_q3q1q2, p)
+
+ def _qzqyqx(self, p):
+ """
+ Transforms a single point from [q3 q1 q2] to [qx qz qy]
+ """
+ return np.dot(self.A_qxqzqy, p)
+
+ def transformed_grid(self, grids, input_space='real', input_system='natural'):
+ """
+
+ Transforms a coordinate grid between the cartesian and natural
+ coordinate systems in real or reciprocal space.
+
+ Parameters
+ ----------
+ grids : 3-tuple of 3-dimensional arrays: (x, z, y),
+ (r3, r1, r2), (qx, qz, qy), or (q3, q1, q2),
+ or a 3-dimensional Storage instance.
+
+ input_space: `real` or `reciprocal`
+
+ input_system: `cartesian` or `natural`
+
+ """
+
+ if isinstance(grids, Storage):
+ grids = grids.grids()
+
+ # choose transformation operator: 4 cases
+ if input_space == 'real' and input_system == 'natural':
+ r3, r1, r2 = grids
+ z = r1 + self.sintheta * r3
+ y = r2
+ x = self.costheta * r3
+ return x, z, y
+ elif input_space == 'real' and input_system == 'cartesian':
+ x, z, y = grids
+ r1 = z - self.sintheta / self.costheta * x
+ r2 = y
+ r3 = 1 / self.costheta * x
+ return r3, r1, r2
+ elif input_space == 'reciprocal' and input_system == 'natural':
+ q3, q1, q2 = grids
+ qz = self.costheta * q1
+ qy = q2
+ qx = q3 - self.sintheta * q1
+ return qx, qz, qy
+ elif input_space == 'reciprocal' and input_system == 'cartesian':
+ qx, qz, qy = grids
+ q1 = 1 / self.costheta * qz
+ q2 = qy
+ q3 = qx + self.sintheta / self.costheta * qz
+ return q3, q1, q2
+ else:
+ raise ValueError('invalid options')
+
+
+ def coordinate_shift(self, input_storage, input_space='real',
+ input_system='natural', keep_dims=True,
+ layer=0):
+ """
+ Transforms a 3D storage between the cartesian and natural
+ coordinate systems in real or reciprocal space by simply rolling
+ the axes. It tries to do this symmetrically so that the center
+ is maintained.
+
+ Note that this transform can be done in any way, and always
+ involves the choice of a new grid. This method (arbitrarily)
+ chooses the grid which results from skewing the along the
+ z/qx direction for real/reciprocal space.
+
+ The shifting is identical to doing a nearest neighbor
+ interpolation, and it would not be difficult to use other
+ interpolation orders by instead shifting an index array and
+ using scipy.ndimage.interpolation.map_coordinates(). But then
+ you have to decide how to interpolate complex numbers.
+
+ Parameters
+ ----------
+ input_storage : The storage to operate on
+
+ input_space: `real` or `reciprocal`
+
+ input_system: `cartesian` or `natural`
+
+ keep_dims : If True, maintain pixel size and number of pixels.
+ If False, keeps all the data of the input storage, which means
+ that the shape of the output storage will be larger than the
+ input.
+
+ """
+
+ C_ = Container(data_type=input_storage.dtype, data_dims=3)
+ S = input_storage
+
+ # Four cases. In real and reciprocal space, these skewing
+ # operations are done along different axes. For each space, the
+ # direction of the transform is taken care of.
+
+ if input_space == 'real':
+ # create a padded copy of the data array
+ shape = S.shape[1:]
+ pad = int(np.ceil(self.sintheta *
+ shape[0] * S.psize[0] / S.psize[1]))
+ d = np.pad(S.data[layer], pad_width=(
+ (0, 0), (0, pad), (0, 0)), mode='constant')
+ # walk along the r3/x axis and roll the r1/z axis. the
+ # array is padded at the bottom (high indices) so the
+ # actual shifts have to be positive.
+ for i in range(shape[0]):
+ if input_system == 'cartesian':
+ # roll the z axis in the negative direction for more
+ # positive x
+ shift = int(
+ round((shape[0] - i) * S.psize[0] * self.sintheta / S.psize[1]))
+ elif input_system == 'natural':
+ # roll the r1 axis in the positive direction for more
+ # positive r3
+ shift = int(
+ round(i * S.psize[0] * self.sintheta / S.psize[1]))
+ d_old = np.copy(d)
+ d[i, :, :] = np.roll(d[i, :, :], shift, axis=0)
+
+ # optionally crop the new array
+ if keep_dims:
+ d = d[:, pad / 2:shape[1] + pad / 2, :]
+ # construct a new Storage
+ if input_system == 'cartesian':
+ new_psize = S.psize * np.array([1 / self.costheta, 1, 1])
+ elif input_system == 'natural':
+ new_psize = S.psize * np.array([self.costheta, 1, 1])
+ old_center = S.origin + S.psize * shape / 2
+ S_out = C_.new_storage(ID='S0', psize=new_psize,
+ padonly=False, shape=None)
+ V = View(container=C_, storageID='S0', coord=old_center,
+ shape=d.shape, psize=new_psize)
+ S_out.reformat()
+ # should use the view here, but there is a bug (#74)
+ S_out.data[0] = d
+
+ elif input_space == 'reciprocal':
+ # create a padded copy of the data array
+ shape = S.shape[1:]
+ pad = int(np.ceil(self.sintheta * shape[1]))
+ d = np.pad(S.data[layer], pad_width=(
+ (0, pad), (0, 0), (0, 0)), mode='constant')
+ # walk along the q1/qz axis and roll the q3/qx axis. the
+ # array is padded at the right (high indices) so the
+ # actual shifts have to be positive.
+ for i in range(shape[1]):
+ if input_system == 'cartesian':
+ # roll the qx axis in the positive direction for more
+ # positive qz
+ shift = int(round(i * self.sintheta))
+ elif input_system == 'natural':
+ # roll the q3 axis in the positive direction for more
+ # negative q1
+ shift = int(round((shape[1] - i) * self.sintheta))
+ d[:, i, :] = np.roll(d[:, i, :], shift, axis=0)
+ # optionally crop the new array
+ if keep_dims:
+ d = d[pad / 2:shape[0] + pad / 2, :, :]
+ # construct a new Storage
+ if input_system == 'cartesian':
+ new_psize = S.psize * np.array([1, 1 / self.costheta, 1])
+ elif input_system == 'natural':
+ new_psize = S.psize * np.array([1, self.costheta, 1])
+ old_center = S.origin + S.psize * shape / 2
+ S_out = C_.new_storage(ID='S0', psize=new_psize,
+ padonly=False, shape=None)
+ V = View(container=C_, storageID='S0', coord=old_center,
+ shape=d.shape, psize=new_psize)
+ S_out.reformat()
+ # should use the view here, but there is a bug (#74)
+ S_out.data[0] = d
+
+ return S_out
+
+ def prepare_3d_probe(self, S_2d, auto_center=False, system='cartesian', layer=0):
+ """
+
+ Prepare a three-dimensional probe from a two-dimensional incident wavefront.
+
+ Parameters
+ ----------
+ S_2d : Two-dimensional storage holding a (typically complex)
+ wavefront. The absolute positions are not important, only the
+ pixel sizes. The first index is interpreted as the quasi-
+ vertical axis zi (coincident with z at theta=0), and the second
+ index as yi (coincident always with y and r2).
+
+ center : If true, the input wavefront is centered at the
+ intensity center of mass.
+
+ system : `cartesian` or `natural`, the coordinate system of the
+ returned object.
+
+ layer : which layer of the 2d probe to use
+
+ """
+
+ if auto_center:
+ raise NotImplementedError
+
+ # storage in natural space to fill with the probe
+ C = Container(data_type=S_2d.dtype, data_dims=3)
+ if system == 'natural':
+ View(C, storageID='S0000', psize=self.resolution, shape=self.shape)
+ S_3d = C.storages['S0000']
+ elif system == 'cartesian':
+ View(C, storageID='S0000', psize=self.resolution * np.array([self.costheta, 1, 1]), shape=self.shape)
+ S_3d = C.storages['S0000']
+
+ # center both storages (meaning that the central pixel is the
+ # physical origin)
+ S_3d.center = np.array(S_3d.shape[1:]) / 2
+ S_2d.center = np.array(S_2d.shape[1:]) / 2
+
+ # find the physical coordinates (zi, yi) of each point in the 3d probe
+ if system == 'natural':
+ r3, r1, r2 = S_3d.grids()
+ r3, r1, r2 = r3[0], r1[0], r2[0] # layer 0
+ x, z, y = self.transformed_grid((r3, r1, r2), input_space='real', input_system='natural')
+ zi = x * self.sintheta + z * (1/self.costheta - self.sintheta * self.tantheta)
+ yi = y
+ elif system == 'cartesian':
+ x, z, y = S_3d.grids()
+ x, z, y = x[0], z[0], y[0]
+ zi = x * self.sintheta + z * (1/self.costheta - self.sintheta * self.tantheta)
+ yi = y
+
+ # find the corresponding indices into S.data[layer]
+ zi[:] = zi / S_2d.psize[1] + S_2d.center[1]
+ yi[:] = yi / S_2d.psize[1] + S_2d.center[1]
+
+ # interpolate
+ if np.iscomplexobj(S_2d.data):
+ S_3d.data[0][:] = map_coordinates(np.abs(S_2d.data[layer]), (zi, yi))
+ S_3d.data[0][:] *= np.exp(1j * map_coordinates(np.angle(S_2d.data[layer]), (zi, yi)))
+ else:
+ S_3d.data[0][:] = map_coordinates(S_2d.data[layer], (zi, yi))
+
+ #import ipdb; ipdb.set_trace()
+
+ return S_3d
+
+ def probe_extent_vs_fov(self):
+ """
+ Calculates the extent of the field of view as seen from the
+ incoming beam. This is the size of the smallest probe (along its
+ vertical direction zi and horizontal direction yi) which
+ completely covers the field of view.
+
+ Returns: zi_extent, yi_extent
+ """
+ g = self
+ b, a, c = g.shape * g.resolution
+ ap = a + b * g.sintheta
+ bp = b * g.costheta
+ y = np.sqrt(ap**2 + bp**2)
+ gamma = np.arcsin(ap / y)
+ phi = (np.pi / 2 - gamma - np.deg2rad(g.theta_bragg))
+ zi_extent = np.cos(phi) * y
+ yi_extent = c
+
+ return zi_extent, yi_extent
+
+
+class BasicBragg3dPropagator(object):
+ """
+ Just a wrapper for the n-dimensional FFT, no other Bragg-specific
+ magic applied here (at the moment).
+ """
+
+ def __init__(self, geo=None, ffttype='numpy'):
+ self.geo = geo
+ if ffttype == 'numpy':
+ self.fft = np.fft.fftn
+ self.ifft = np.fft.ifftn
+ elif ffttype == 'fftw':
+ import pyfftw
+ self.fft = pyfftw.interfaces.numpy_fft.fftn
+ self.ifft = pyfftw.interfaces.numpy_fft.ifftn
+
+ def update(self):
+ """
+ Update any internal buffers.
+ """
+ return
+
+ def fw(self, a):
+ return np.fft.fftshift(self.fft(a))
+
+ def bw(self, a):
+ return self.ifft(np.fft.ifftshift(a))
diff --git a/ptypy/core/illumination.py b/ptypy/core/illumination.py
index 3445c4700..8c2b563fb 100644
--- a/ptypy/core/illumination.py
+++ b/ptypy/core/illumination.py
@@ -16,133 +16,205 @@
from ..core import geometry
from ..utils.verbose import logger
from .. import resources
+from ..utils.descriptor import EvalDescriptor
TEMPLATES = dict()
-DEFAULT_aperture = u.Param(
- # Aperture form (str). One of:
- # - None: no aperture, this may be useful for nearfield
- # - 'rect': rectangular aperture
- # - 'circ': circular aperture
- form='circ',
- # Static Noise in the transparent part of the aperture (float).
- # Can act like a diffuser but has no wavelength dependency
- # Can be either:
- # - None : no noise
- # - 2-tuple: noise in phase (amplitude (rms), minimum feature size)
- # - 4-tuple: noise in phase & modulus (rms, mfs, rms_mod, mfs_mod)
- diffuser=None,
- # Aperture width or diameter (float).
- # May also be a tuple (vertical, horizontal) for size
- # in case of an asymmetric aperture
- size=None,
- # Edge width of aperture in pixel to suppress aliasing (int).
- edge=2,
- # Size of central stop as a fraction of aperture.size (float).
- # If not None: places a central beam stop in aperture.
- # The value given here is the fraction of the stop compared to size
- central_stop=None,
- # Offset between center of aperture and optical axes (float).
- # May also be a tuple (vertical, horizontal) for size
- # in case of an asymmetric offset
- offset=0.,
- # Rotate aperture by this value (float).
- rotate=0.,
-)
-""" Default illumination.aperture parameters.
- See :py:data:`.scan.illumination.aperture` and a short listing below """
-
-DEFAULT_propagation = u.Param(
- # Parameters for propagation after aperture plane
- # Propagation to focus takes precedence to parallel propagation
- # if focused is not None
- # Parallel propagation distance (float).
- # If None or 0 : No parallel propagation
- parallel=None,
- # Propagation distance from aperture to focus (float).
- # If None or 0 : No focus propagation
- focussed=None,
- # Focal spot diameter (float).
- spot_size=None,
- # Antialiasing factor [not implemented] (float).
- # Antialiasing factor used when generating the probe.
- # (numbers larger than 2 or 3 are memory hungry)
- antialiasing=None,
-)
-
-DEFAULT_diversity = u.Param(
- # Noise added on top add the end of initialisation (float).
- # Can be either:
- # - None : no noise
- # - 2-tuple: noise in phase (amplitude (rms), minimum feature size)
- # - 4-tuple: noise in phase & modulus (rms, mfs, rms_mod, mfs_mod)
- noise=None,
- shift=None,
- power=1.0,
-)
-
-DEFAULT = u.Param(
- override=None,
- # User-defined probe (if type is None) (str).
- # `None`, path to a *.ptyr file or any python evaluable statement
- # yielding a 3d numpy array, If `None` illumination is modeled.
- model=None,
- # Number of photons in the incident illumination (int, float, None).
- photons=None,
- recon=u.Param(
- rfile='*.ptyr',
- ID=None,
- layer=None,
- ),
- stxm=u.Param(
- # Label of the scan of whose diffraction data to initialize stxm.
- # If None, use own scan_label
- label=None,
- ),
- # Diversity parameters, can be None = no diversity
- diversity=DEFAULT_diversity,
- # Aperture parameters, can be None = no aperture
- aperture=DEFAULT_aperture,
- # Propagation parameters, can be None = no propagation
- propagation=DEFAULT_propagation,
-)
-""" Default illumination parameters. See :py:data:`.scan.illumination`
- and a short listing below """
-
-__all__ = ['DEFAULT', 'init_storage', 'aperture']
-
-
-def rectangle(grids, dims=None, ew=2):
- if dims is None:
- dims = (grids.shape[-2] / 2., grids.shape[-1] / 2.)
- v, h = dims
- V, H = grids
- return (u.smooth_step(-np.abs(V) + v/2, ew)
- * u.smooth_step(-np.abs(H) + h/2, ew))
-
-
-def ellipsis(grids, dims=None, ew=2):
- if dims is None:
- dims = (grids.shape[-2] / 2., grids.shape[-1] / 2.)
- v, h = dims
- V, H = grids
- return u.smooth_step(
- 0.5 - np.sqrt(V**2/v**2 + H**2/h**2), ew/np.sqrt(v * h))
+# Local, module-level defaults. These can be appended to the defaults of
+# other classes.
+illumination_desc = EvalDescriptor('illumination')
+illumination_desc.from_string(r"""
+ [aperture]
+ type = Param
+ default =
+ help = Beam aperture parameters
+
+ [aperture.rotate]
+ type = float
+ default = 0.
+ help = Rotate aperture by this value
+ doc =
+
+ [aperture.central_stop]
+ help = size of central stop as a fraction of aperture.size
+ default = None
+ doc = If not None: places a central beam stop in aperture. The value given here is the fraction of the beam stop compared to `size`
+ lowlim = 0.
+ uplim = 1.
+ userlevel = 1
+ type = float
+
+ [aperture.diffuser]
+ help = Noise in the transparen part of the aperture
+ default = None
+ doc = Can be either:
+ - ``None`` : no noise
+ - ``2-tuple`` : noise in phase (amplitude (rms), minimum feature size)
+ - ``4-tuple`` : noise in phase & modulus (rms, mfs, rms_mod, mfs_mod)
+ userlevel = 2
+ type = tuple
+
+ [aperture.edge]
+ help = Edge width of aperture (in pixels!)
+ type = float
+ default = 2.0
+ userlevel = 2
+
+ [aperture.form]
+ default = circ
+ type = None, str
+ help = One of None, 'rect' or 'circ'
+ doc = One of:
+ - ``None`` : no aperture, this may be useful for nearfield
+ - ``'rect'`` : rectangular aperture
+ - ``'circ'`` : circular aperture
+ choices = None,'rect','circ'
+ userlevel = 2
+
+ [aperture.offset]
+ default = 0.
+ type = float, tuple
+ help = Offset between center of aperture and optical axes
+ doc = May also be a tuple (vertical,horizontal) for size in case of an asymmetric offset
+ userlevel = 2
+
+ [aperture.size]
+ default = None
+ type = float, tuple
+ help = Aperture width or diameter
+ doc = May also be a tuple *(vertical,horizontal)* in case of an asymmetric aperture
+ lowlim = 0.
+ userlevel = 0
+
+ [diversity]
+ default = None
+ type = Param, None
+ help = Probe mode(s) diversity parameters
+ doc = Can be ``None`` i.e. no diversity
+ userlevel = 1
+
+ [diversity.noise]
+ default = (0.5,1.0)
+ type = tuple
+ help = Noise in each non-primary mode of the illumination.
+ doc = Can be either:
+ - ``None`` : no noise
+ - ``2-tuple`` : noise in phase (amplitude (rms), minimum feature size)
+ - ``4-tuple`` : noise in phase & modulus (rms, mfs, rms_mod, mfs_mod)
+ userlevel = 1
+
+ [diversity.power]
+ default = 0.1
+ type = tuple, float
+ help = Power of modes relative to main mode (zero-layer)
+ uplim = 1.0
+ lowlim = 0.0
+ userlevel = 1
+
+ [diversity.shift]
+ default = None
+ type = float
+ help = Lateral shift of modes relative to main mode
+ doc = **[not implemented]**
+ userlevel = 2
+
+ [model]
+ default = None
+ type = str, ndarray
+ help = Type of illumination model
+ doc = One of:
+ - ``None`` : model initialitziation defaults to flat array filled with the specified number of photons
+ - ``'recon'`` : load model from previous reconstruction, see `recon` Parameters
+ - ``'stxm'`` : Estimate model from autocorrelation of mean diffraction data
+ - ** : one of ptypys internal image resource strings
+ - ** : one of the templates inillumination module
+
+ In script, you may pass a numpy.ndarray here directly as the model. It is considered as incoming wavefront and will be propagated according to `propagation` with an optional `aperture` applied before.
+ userlevel = 0
+
+ [photons]
+ type = int, float, None
+ default = None
+ help = Number of photons in the incident illumination
+ doc = A value specified here will take precedence over calculated statistics from the loaded data.
+ lowlim = 0
+ userlevel = 2
+
+ [propagation]
+ type = Param
+ default =
+ help = Parameters for propagation after aperture plane
+ doc = Propagation to focus takes precedence to parallel propagation if `foccused` is not ``None``
+
+ [propagation.antialiasing]
+ default = 1
+ type = float
+ help = Antialiasing factor
+ doc = Antialiasing factor used when generating the probe. (numbers larger than 2 or 3 are memory hungry)
+ **[Untested]**
+ userlevel = 2
+
+ [propagation.focussed]
+ default = None
+ type = None, float
+ lowlim =
+ help = Propagation distance from aperture to focus
+ doc = If ``None`` or ``0`` : No focus propagation
+ userlevel = 0
+
+ [propagation.parallel]
+ default = None
+ type = None, float
+ help = Parallel propagation distance
+ doc = If ``None`` or ``0`` : No parallel propagation
+ userlevel = 0
+
+ [propagation.spot_size]
+ default = None
+ type = None, float
+ help = Focal spot diameter
+ doc = If not ``None``, this parameter is used to generate the appropriate aperture size instead of :py:data:`size`
+ lowlim = 0
+ userlevel = 1
+
+ [recon]
+ default =
+ type = Param
+ help = Parameters to load from previous reconstruction
+
+ [recon.label]
+ default = None
+ type = None, str
+ help = Scan label of diffraction that is to be used for probe estimate
+ doc = If ``None``, own scan label is used
+ userlevel = 1
+
+ [recon.rfile]
+ default = \*.ptyr
+ type = str
+ help = Path to a ``.ptyr`` compatible file
+ userlevel = 0
+ """)
+
+DEFAULT = illumination_desc.make_default(99)
+DEFAULT_aperture = DEFAULT.aperture
+
+__all__ = ['init_storage', 'aperture', 'DEFAULT']
def aperture(A, grids=None, pars=None, **kwargs):
"""
Creates an aperture in the shape and dtype of `A` according
to x,y-grids `grids`. Keyword Arguments may be any of
- :any:`DEFAULT`.aperture.
+ (*FIXME* link to kwargs)
Parameters
----------
A : ndarray
Model array (at least 2-dimensional) to place aperture on top.
- pars: dict or ptypy.utils.Param
- Parameters, see :any:`DEFAULT`.aperture
+ pars : dict or ~ptypy.utils.Param
+ Parameters, *FIXME* link to parameters
grids : ndarray
Cartesian coordinate grids, if None, they will be created with
@@ -183,9 +255,9 @@ def aperture(A, grids=None, pars=None, **kwargs):
grids[1] = cgrid.imag / psize[1]
if str(p.form) == 'circ':
- apert = lambda x: ellipsis(grids, x, p.edge)
+ apert = lambda x: u.ellipsis(grids, x, p.edge)
elif str(p.form) == 'rect':
- apert = lambda x: rectangle(grids, x, p.edge)
+ apert = lambda x: u.rectangle(grids, x, p.edge)
else:
raise NotImplementedError(
'Only elliptical `circ` or rectangular `rect` apertures are'
@@ -210,22 +282,24 @@ def init_storage(storage, pars, energy=None, **kwargs):
Parameters
----------
- storage : ptypy.core.Storage
+ storage : ~ptypy.core.classes.Storage
A :any:`Storage` instance in the *probe* container of :any:`Ptycho`
pars : Param
Parameter structure for creating a probe / illumination.
- See :any:`DEFAULT`
+ See :py:data:`~ptypy.core.illumination.DEFAULT`
Also accepted as argument:
- * string giving the filename of a previous reconstruction to
- extract storage from.
- * string giving the name of an available TEMPLATE
- * FIXME: document other string cases.
- * numpy array: interpreted as initial illumination.
+
+ * string giving the filename of a previous reconstruction to
+ extract storage from.
+ * string giving the name of an available TEMPLATE
+ * FIXME: document other string cases.
+ * numpy array: interpreted as initial illumination.
energy : float, optional
Energy associated with this storage. If None, tries to retrieve
the energy from the already initialized ptypy network.
+
"""
s = storage
prefix = "[Object %s] " % str(s.ID)
diff --git a/ptypy/core/manager.py b/ptypy/core/manager.py
index eeb71ce45..2dc154984 100644
--- a/ptypy/core/manager.py
+++ b/ptypy/core/manager.py
@@ -5,9 +5,8 @@
The main task of this module is to prepare the data structure for
reconstruction, taking a data feed and connecting individual diffraction
measurements to the other containers. The way this connection is done
-is defined by the user through a model definition. The connections are
-described by the POD objects. This module also takes care of initializing
-containers according to user-defined rules.
+is defined by ScanModel and its subclasses. The connections are
+described by the POD objects.
This file is part of the PTYPY package.
@@ -15,253 +14,398 @@
:license: GPLv2, see LICENSE for details.
"""
import numpy as np
-import illumination
-import sample
-import geometry
-import model
-import xy
-import data
+import time
+from collections import OrderedDict
+from . import illumination
+from . import sample
+from . import geometry
+from . import data
from .. import utils as u
from ..utils.verbose import logger, headerline, log
-from classes import *
-from classes import DEFAULT_ACCESSRULE
-from classes import MODEL_PREFIX
+from .classes import *
+from .classes import DEFAULT_ACCESSRULE
+from .classes import MODEL_PREFIX
from ..utils import parallel
+from ..utils.descriptor import EvalDescriptor
+from .. import defaults_tree
# Please set these globally later
FType = np.float64
CType = np.complex128
-__all__ = ['DEFAULT', 'ModelManager']
-
-DESCRIPTION = u.Param()
-
-DEFAULT_coherence = u.Param(
- # Number of mutually spatially incoherent probes per diffraction pattern
- num_probe_modes=1,
- # Number of mutually spatially incoherent objects per diffraction pattern
- num_object_modes=1,
- energies=[1.0],
- # List of energies / wavelength relative to mean energy / wavelength
- spectrum=[1.0],
- # If True, the same probe is used for all energies
- probe_dispersion=None,
- # If True, the same object is used for all energies
- object_dispersion=None
-)
-
-DEFAULT_sharing = u.Param(
- # (69) number of scans per object
- # scan_per_probe = 1,
- # (70) number of scans per probe
- # scan_per_object = 1,
- # (71) `scan_label` of scan for the shared object
- object_share_with=None,
- # (72) contribution to the shared object
- object_share_power=1,
- # (73) `scan_label` of scan for the shared probe
- probe_share_with=None,
- # (74) contribution to the shared probe
- probe_share_power=1,
- # Empty Probe sharing switch
- EP_sharing=False,
-)
-
-DEFAULT = u.Param(
- # All information about the probe
- illumination=u.Param(),
- # All information about the object
- sample=u.Param(),
- # Geometry of experiment - most of it provided by data
- geometry=geometry.DEFAULT.copy(),
- xy=u.Param(),
- # Information on scanning parameters to yield position arrays
- # If positions are provided by the DataScan object, set xy.scan_type to None
- coherence=DEFAULT_coherence.copy(),
- sharing=DEFAULT_sharing.copy(),
- # if_conflict_use_meta=False,
- # Take geometric and position information from incoming meta_data
- # if possible parameters are specified both in script and in meta data
- # source=None,
- # For now only used to declare an empty scan
- tags="",
-)
+__all__ = ['ModelManager', 'ScanModel', 'Full', 'Vanilla', 'Bragg3dModel']
-class ModelManager(object):
+@defaults_tree.parse_doc('scan.ScanModel')
+class ScanModel(object):
"""
- Manages ptypy objects creation and update.
-
- The main task of ModelManager is to follow the rules for a given
- reconstruction model and create:
-
- - the probe, object, exit, diff and mask containers
- - the views
- - the PODs
-
- A ptychographic problem is defined by the combination of one or
- multiple scans. ModelManager uses encapsulate
- scan-specific elements in .scans und .scans_pars
+ Abstract base class for models. Override at least these methods:
+ _create_pods(self)
+ _initialize_geo(self, common)
+ _initialize_probe(self, probe_ids)
+ _initialize_object(self, object_ids)
+
+ Defaults:
+
+ [tags]
+ default = ['dummy']
+ help = Comma seperated string tags describing the data input
+ doc = [deprecated?]
+ type = list
+ userlevel = 2
+
+ [propagation]
+ type = str
+ default = farfield
+ help = Propagation type
+ doc = Either "farfield" or "nearfield"
+ userlevel = 1
+
+ [data]
+ default =
+ type = @scandata.*
+ help = Link to container for data preparation
+ doc =
+
+ [data.name]
+ default =
+ type = str
+ help = Name of the PtyScan subclass to use
+
+ [illumination]
+ type = Param
+ default =
+ help = Container for probe initialization model
+
+ [sample]
+ type = Param
+ default =
+ help = Container for sample initialization model
- Note
- ----
- This class is densely connected to :any:`Ptycho` the separation
- in two classes is more history than reason and these classes may get
- merged in future releases
"""
- DEFAULT = DEFAULT
- """ Default scan parameters. See :py:data:`.scan`
- and a short listing below """
-
- _PREFIX = MODEL_PREFIX
-
- def __init__(self, ptycho, pars=None, scans=None, **kwargs):
+ def __init__(self, ptycho=None, pars=None, label=None):
"""
+ Create scan model object.
Parameters
----------
- ptycho: Ptycho
- The parent Ptycho object
-
pars : dict or Param
- Input parameters (see :py:attr:`DEFAULT`)
- If None uses defaults
+ Input parameter tree.
- scans : dict or Param
- Scan-specific parameters, Values should be dict Param that
- follow the structure of `pars`.
- If None, tries in ptycho.p.scans else becomes empty dict
+ ptycho : Ptycho instance
+ Ptycho instance to which this scan belongs
+
+ label : str
+ Unique label
"""
- # Initialize the input parameters
- p = u.Param(self.DEFAULT.copy())
+ # Update parameter structure
+ # Load default parameter structure
+ p = self.DEFAULT.copy(99)
p.update(pars, in_place_depth=4)
self.p = p
-
+ self.label = label
self.ptycho = ptycho
- # Abort if ptycho is None:
- if self.ptycho is None:
- return
-
- # Prepare the list of scan_labels (important because they are sorted)
- # FIXME: BE I don't think this is the way to go. This is only needed for sharing
- # For the user it might be better to mark the sharing behavior directly
- self.scan_labels = []
-
- # Store scan specifics
- if scans is not None:
- self.scans_pars = scans
- else:
- self.scans_pars = self.ptycho.p.get('scans', u.Param())
+ # Create Associated PtyScan object
+ self.ptyscan = self.makePtyScan(self.p.data)
+
+ # Initialize instance attributes
+ self.mask = None
+ self.diff = None
+ self.positions = []
+ self.mask_views = []
+ self.diff_views = []
+ self.new_positions = None
+ self.new_diff_views = None
+ self.new_mask_views = None
+
+ self.geometries = []
+ self.shape = None
+ self.psize = None
+
+ # Object flags and constants
+ self.containers_initialized = False
+ self.data_available = True
+ self.CType = CType
+ self.FType = FType
+ self.frames_per_call = 100000
- # Scan dictionary
- # This will store everything scan specific and will hold
- # references to the storages of the respective scan.
- self.scans = u.Param()
+ @classmethod
+ def makePtyScan(cls, pars):
+ """
+ Factory for PtyScan object. Return an instance of the appropriate PtyScan
+ subclass based on the input parameters.
- # Update self.scans from information already available
- for label in self.scans_pars.keys():
- self.prepare_scan(label)
+ Parameters
+ ----------
+ pars: dict or Param
+ Input parameters according to :py:data:`.scan.data`.
+ """
- # Sharing dictionary that stores sharing behavior
- self.sharing = {'probe_ids': {}, 'object_ids': {}}
+ # Extract information on the type of object to build
+ name = pars.name
- # Initialize sharing rules for POD creations
- self.sharing_rules = model.parse_model(p.sharing, self.sharing)
+ from .. import experiment
- # This start is a little arbitrary
- self.label_idx = len(self.scans)
+ if name in (u.all_subclasses(data.PtyScan, names=True)) \
+ or name == 'PtyScan':
+ ps_class = experiment.PTYSCANS.get(name, None)
+ if ps_class is None:
+ raise RuntimeError('Unknown PtyScan subclass: "%s". Did you import it?' % name)
+ logger.info('Scan will be prepared with the PtyScan subclass "%s"' % name)
+ ps_instance = ps_class(pars)
+ else:
+ raise RuntimeError('Could not manage source "%s"' % str(name))
- def _to_dict(self):
- # Delete the model class. We do not really need to store it.
- del self.sharing_rules
- return self.__dict__.copy()
+ return ps_instance
- @classmethod
- def _from_dict(cls, dct):
- # create instance
- inst = cls(None, None)
- # overwrite internal dictionary
- inst.__dict__ = dct
- return inst
-
- def prepare_scan(self, label=None):
+ def new_data(self):
"""
- Prepare scan specific parameters and create a label if necessary.
+ Feed data from ptyscan object.
+ :return: None if no data is available, True otherwise.
"""
- if label is None:
- label = 'Scan%05d' % self.label_idx
- self.label_idx += 1
-
- try:
- # Return the scan if already prepared
- return self.scans[label]
- except KeyError:
- # Get standard parameters
- # Create a dictionary specific for the scan.
- scan = u.Param()
- self.scans[label] = scan
- scan.label = label
- # Make a copy of model dictionary
- scan.pars = self.p.copy(depth=5)
-
- # Look for a scan-specific entry in the input parameters
- scan_specific_parameters = self.scans_pars.get(label, None)
- scan.pars.update(scan_specific_parameters, in_place_depth=5)
- # Prepare the tags
- t = scan.pars.tags
- if str(t) == t:
- scan.pars.tags = [tag.strip().lower() for tag in t.split(',')]
-
- # Also create positions
- scan.pos_theory = xy.from_pars(scan.pars.xy)
+ import time
+ self._t = time.time()
+ def report_time():
+ logger.info('Time %.2f' % (time.time()-self._t ))
+ self._t = time.time()
+
+ # Initialize if that has not been done yet
+ if not self.ptyscan.is_initialized:
+ self.ptyscan.initialize()
+
+ report_time()
+ # Get data
+ logger.info('Importing data from scan %s.' % self.label)
+ dp = self.ptyscan.auto(self.frames_per_call)
+
+
+ self.data_available = (dp != data.EOS)
+ logger.debug(u.verbose.report(dp))
+
+ if dp == data.WAIT or not self.data_available:
+ return None
+
+ label = self.label
+ report_time()
+ logger.info('Creating views and storages.' )
+ # Prepare the scan geometry if not already done.
+ if not self.geometries:
+ self._initialize_geo(dp['common'])
+
+ # Create containers if not already done
+ if not self.containers_initialized:
+ self._initialize_containers()
+
+ # Generalized shape which works for 2d and 3d cases
+ sh = (1,) + tuple(self.shape)
+
+ # Storage generation if not already existing
+ if self.diff is None:
+ # This scan is brand new so we create storages for it
+ self.diff = self.Cdiff.new_storage(shape=sh, psize=self.psize, padonly=True,
+ layermap=None)
+ old_diff_views = []
+ old_diff_layers = []
+ else:
+ # ok storage exists already. Views most likely also. We store them so we can update their status later.
+ old_diff_views = self.Cdiff.views_in_storage(self.diff, active_only=False)
+ old_diff_layers = []
+ for v in old_diff_views:
+ old_diff_layers.append(v.layer)
+
+ # Same for mask
+ if self.mask is None:
+ self.mask = self.Cmask.new_storage(shape=sh, psize=self.psize, padonly=True,
+ layermap=None)
+ old_mask_views = []
+ old_mask_layers = []
+ else:
+ old_mask_views = self.Cmask.views_in_storage(self.mask, active_only=False)
+ old_mask_layers = []
+ for v in old_mask_views:
+ old_mask_layers.append(v.layer)
+
+ # this is a hack for now
+ dp = self._new_data_extra_analysis(dp)
+ if dp is None:
+ return None
+
+ # Prepare for View generation
+ AR_diff_base = DEFAULT_ACCESSRULE.copy()
+ AR_diff_base.shape = self.shape
+ AR_diff_base.coord = 0.0
+ AR_diff_base.psize = self.psize
+ AR_mask_base = AR_diff_base.copy()
+ AR_diff_base.storageID = self.diff.ID
+ AR_mask_base.storageID = self.mask.ID
+
+ diff_views = []
+ mask_views = []
+ positions = []
+
+ # First pass: create or update views and reformat corresponding storage
+ for dct in dp['iterable']:
+
+ index = dct['index']
+ active = dct['data'] is not None
+
+ pos = dct.get('position')
+
+ if pos is None:
+ logger.warning('No position set to scan point %d of scan %s' % (index, label))
+
+ AR_diff = AR_diff_base
+ AR_mask = AR_mask_base
+ AR_diff.layer = index
+ AR_mask.layer = index
+ AR_diff.active = active
+ AR_mask.active = active
+
+ # check here: is there already a view to this layer? Is it active?
+ try:
+ old_view = old_diff_views[old_diff_layers.index(index)]
+ old_active = old_view.active
+ old_view.active = active
+
+ logger.debug(
+ 'Diff view with layer/index %s of scan %s exists. \nSetting view active state from %s to %s' % (
+ index, label, old_active, active))
+ except ValueError:
+ v = View(self.Cdiff, accessrule=AR_diff)
+ diff_views.append(v)
+ logger.debug(
+ 'Diff view with layer/index %s of scan %s does not exist. \nCreating view with ID %s and set active state to %s' % (
+ index, label, v.ID, active))
+ # append position also
+ positions.append(pos)
+
+ try:
+ old_view = old_mask_views[old_mask_layers.index(index)]
+ old_view.active = active
+ except ValueError:
+ v = View(self.Cmask, accessrule=AR_mask)
+ mask_views.append(v)
+
+ # so now we should have the right views to this storages. Let them reformat()
+ # that will create the right sizes and the datalist access
+ self.diff.reformat()
+ self.mask.reformat()
+ report_time()
+ logger.info('Inserting data in diff and mask storages')
+
+ # Second pass: copy the data
+ # Benchmark: scales quadratic (!!) with number of frames per node.
+ for dct in dp['iterable']:
+ if dct['data'] is None:
+ continue
+ diff_data = dct['data']
+ idx = dct['index']
+
+ # FIXME: Find a more transparent way than this.
+ self.diff.data[self.diff.layermap.index(idx)][:] = diff_data
+ self.mask.data[self.mask.layermap.index(idx)][:] = dct.get('mask', np.ones_like(diff_data))
+
+ self.diff.nlayers = parallel.MPImax(self.diff.layermap) + 1
+ self.mask.nlayers = parallel.MPImax(self.mask.layermap) + 1
+
+ self.new_positions = positions
+ self.new_diff_views = diff_views
+ self.new_mask_views = mask_views
+ self.positions += positions
+ self.diff_views += diff_views
+ self.mask_views += mask_views
+ report_time()
+ logger.info('Data organization complete, updating stats')
+
+ self._update_stats()
+
+ # Create new views on object, probe, and exit wave, and connect
+ # these through new pods.
+ new_pods, new_probe_ids, new_object_ids = self._create_pods()
+ for pod_ in new_pods:
+ if pod_.model is not None:
+ continue
+ pod_.model = self
+ logger.info('Process %d created %d new PODs, %d new probes and %d new objects.' % (
+ parallel.rank, len(new_pods), len(new_probe_ids), len(new_object_ids)), extra={'allprocesses': True})
+
+ report_time()
+ # Adjust storages
+ self.ptycho.probe.reformat(True)
+ self.ptycho.obj.reformat(True)
+ self.ptycho.exit.reformat()
+
+ self._initialize_probe(new_probe_ids)
+ self._initialize_object(new_object_ids)
+ self._initialize_exit(new_pods)
+
+ return True
+
+ def _new_data_extra_analysis(self, dp):
+ """
+ This is a hack for 3d Bragg. Extra analysis on the incoming
+ data package. Returns modified dp, or None if no completa data
+ is available for pod creation.
+ """
+ return dp
- return scan
+ def _initialize_containers(self):
+ """
+ Initialize containers appropriate for the model. This
+ implementation works for 2d models, override if necessary.
+ """
+ if self.ptycho is None:
+ # Stand-alone use
+ self.Cdiff = Container(self, ID='Cdiff', data_type='real')
+ self.Cmask = Container(self, ID='Cmask', data_type='bool')
+ else:
+ # Use with a Ptycho instance
+ self.Cdiff = self.ptycho.diff
+ self.Cmask = self.ptycho.mask
+ self.containers_initialized = True
- def _update_stats(self, scan, mask_views=None, diff_views=None):
+ @staticmethod
+ def _initialize_exit(pods):
"""
- (Re)compute the statistics for the data stored in a scan.
+ Initializes exit waves using the pods.
+ """
+ logger.info('\n' + headerline('Creating exit waves', 'l'))
+ for pod in pods:
+ if not pod.active:
+ continue
+ pod.exit = pod.probe * pod.object
+ def _update_stats(self):
+ """
+ (Re)compute the statistics for the data stored in the scan.
These statistics are:
* Itotal: The integrated power per frame
* max/min/mean_frame: pixel-by-pixel maximum, minimum and
average among all frames.
"""
- if mask_views is None:
- mask_views = scan.mask_views
- if diff_views is None:
- diff_views = scan.diff_views
+ mask_views = self.mask_views
+ diff_views = self.diff_views
+
+ # Nothing to do if no view exist
+ if not self.diff: return
# Reinitialize containers
Itotal = []
- # DPCX = []
- # DPCY = []
- max_frame = np.zeros(scan.diff_views[0].shape)
+ max_frame = np.zeros(self.diff_views[0].shape)
min_frame = np.zeros_like(max_frame)
mean_frame = np.zeros_like(max_frame)
norm = np.zeros_like(max_frame)
- # Useful quantities
- # sh0, sh1 = scan.geo.N
- # x = np.arange(s1, dtype=float)
- # y = np.arange(s0, dtype=float)
-
for maview, diview in zip(mask_views, diff_views):
if not diview.active:
continue
dv = diview.data
m = maview.data
v = m * dv
- # pods may not yet exist, since mask & data are not linked yet
- # mv = dv.pod.ma_view.data
- # S0 = np.sum(mv*dv, axis=0)
- # S1 = np.sum(mv*dv, axis=1)
- # I0 = np.sum(S0)
Itotal.append(np.sum(v))
- # DPCX.append(np.sum(S0*x)/I0 - sh1/2.)
- # DPCY.append(np.sum(S1*y)/I0 - sh0/2.)
max_frame[max_frame < v] = v[max_frame < v]
min_frame[min_frame > v] = v[min_frame > v]
mean_frame += v
@@ -269,393 +413,479 @@ def _update_stats(self, scan, mask_views=None, diff_views=None):
parallel.allreduce(mean_frame)
parallel.allreduce(norm)
- parallel.allreduce(max_frame, parallel.MPI.MAX)
- parallel.allreduce(max_frame, parallel.MPI.MIN)
+ if parallel.MPIenabled:
+ parallel.allreduce(max_frame, parallel.MPI.MAX)
+ parallel.allreduce(min_frame, parallel.MPI.MIN)
mean_frame /= (norm + (norm == 0))
- scan.diff.norm = norm
- scan.diff.max_power = parallel.MPImax(Itotal)
- scan.diff.tot_power = parallel.MPIsum(Itotal)
- scan.diff.pbound_stub = scan.diff.max_power / mean_frame.shape[-1]**2
- scan.diff.mean = mean_frame
- scan.diff.max = max_frame
- scan.diff.min = min_frame
-
- info = {'label': scan.label,
- 'max': scan.diff.max_power,
- 'tot': scan.diff.tot_power,
- 'mean': mean_frame.sum()}
+ self.diff.norm = norm
+ self.diff.max_power = parallel.MPImax(Itotal)
+ self.diff.tot_power = parallel.MPIsum(Itotal)
+ self.diff.mean_power = self.diff.tot_power / (len(diff_views) * mean_frame.shape[-1]**2)
+ self.diff.pbound_stub = self.diff.max_power / mean_frame.shape[-1]**2
+ self.diff.mean = mean_frame
+ self.diff.max = max_frame
+ self.diff.min = min_frame
+ info = {'label': self.label, 'max': self.diff.max_power, 'tot': self.diff.tot_power, 'mean': mean_frame.sum()}
logger.info(
- '\n--- Scan %(label)s photon report ---\n'
- 'Total photons : %(tot).2e \n'
- 'Average photons : %(mean).2e\n'
- 'Maximum photons : %(max).2e\n' % info + '-' * 29)
+ '\n--- Scan %(label)s photon report ---\nTotal photons : %(tot).2e \nAverage photons : %(mean).2e\nMaximum photons : %(max).2e\n' % info + '-' * 29)
+
+ def _create_pods(self):
+ """
+ Create all new pods as specified in the new_positions,
+ new_diff_views and new_mask_views object attributes. Also create
+ all necessary views on object, probe, and exit wave.
+
+ Return the list of new pods, and dicts of new probe and object
+ ids (to allow for initialization).
+ """
+ raise NotImplementedError
+
+ def _initialize_geo(self, common):
+ """
+ Initialize the geometry/geometries based on input data package
+ Parameters
+ ----------
+ common: dict
+ metadata part of the data package passed into new_data.
- def make_datasource(self, data_pars=None):
"""
- Creates a static datasource from parameters in the self.scans dict.
+ raise NotImplementedError
+
+ def _initialize_probe(self, probe_ids):
+ raise NotImplementedError
+
+ def _initialize_object(self, object_ids):
+ raise NotImplementedError
+
+
+@defaults_tree.parse_doc('scan.Vanilla')
+class Vanilla(ScanModel):
+ """
+ Dummy for testing, there must be more than one for validate to react
+ to invalid names.
+
+ Defaults:
+
+ [name]
+ default = Vanilla
+ type = str
+ help =
+
+ [illumination.size]
+ default = None
+ type = float
+ help = Initial probe size
+ doc = The probe is initialized as a flat circle.
+
+ [sample.fill]
+ default = 1
+ type = float, complex
+ help = Initial sample value
+ doc = The sample is initialized with this value everywhere.
+
+ """
- For any additional file in data.filelist it will create a new entry in
- self.scans with generic parameters given by the current model.
+ def _create_pods(self):
"""
+ Create all new pods as specified in the new_positions,
+ new_diff_views and new_mask_views object attributes.
"""
- if data_pars is not None:
- filelist = data_pars.get('filelist')
- if filelist is not None:
- for f in filelist:
- scan = self.prepare_scan()
- scan.pars.data_file = f
+ logger.info('\n' + headerline('Creating PODS', 'l'))
+ new_pods = []
+ new_probe_ids = {}
+ new_object_ids = {}
+
+ # One probe / object storage per scan.
+ ID ='S'+self.label
+
+ # We need to return info on what storages are created
+ if not ID in self.ptycho.probe.storages.keys():
+ new_probe_ids[ID] = True
+ if not ID in self.ptycho.obj.storages.keys():
+ new_object_ids[ID] = True
+
+ geometry = self.geometries[0]
+
+ # Loop through diffraction patterns
+ for i in range(len(self.new_diff_views)):
+ dv, mv = self.new_diff_views.pop(0), self.new_mask_views.pop(0)
+
+ # Create views
+ ndim = self.Cdiff.ndim
+ pv = View(container=self.ptycho.probe,
+ accessrule={'shape': geometry.shape,
+ 'psize': geometry.resolution,
+ 'coord': u.expectN(0.0, ndim),
+ 'storageID': ID,
+ 'layer': 0,
+ 'active': True})
+
+ ov = View(container=self.ptycho.obj,
+ accessrule={'shape': geometry.shape,
+ 'psize': geometry.resolution,
+ 'coord': self.new_positions[i],
+ 'storageID': ID,
+ 'layer': 0,
+ 'active': True})
+
+ ev = View(container=self.ptycho.exit,
+ accessrule={'shape': geometry.shape,
+ 'psize': geometry.resolution,
+ 'coord': u.expectN(0.0, ndim),
+ 'storageID': dv.storageID,
+ 'layer': dv.layer,
+ 'active': dv.active})
+
+ views = {'probe': pv,
+ 'obj': ov,
+ 'diff': dv,
+ 'mask': mv,
+ 'exit': ev}
+
+ pod = POD(ptycho=self.ptycho,
+ ID=None,
+ views=views,
+ geometry=geometry)
+ pod.probe_weight = 1
+ pod.object_weight = 1
+
+ new_pods.append(pod)
+
+ return new_pods, new_probe_ids, new_object_ids
+
+ def _initialize_geo(self, common):
+ """
+ Initialize the geometry based on input data package
+ Parameters.
"""
- # Now there should be little surprises.
- # Every scan is listed in self.scans
- for label, scan in self.scans.items():
- # if scan.pars.get('data_file') is None:
- # scan.pars['data_file'] = self.ptycho.paths.get_data_file(
- # label=label)
- scan.pars['label'] = label
- return data.DataSource(self.scans)
+ # Collect geometry parameters
+ get_keys = ['distance', 'center', 'energy', 'psize', 'shape']
+ geo_pars = u.Param({key: common[key] for key in get_keys})
+ geo_pars.propagation = self.p.propagation
- def new_data(self):
+ # make a Geo instance and fix resolution
+ g = geometry.Geo(owner=self.ptycho, pars=geo_pars)
+ g.p.resolution_is_fix = True
+
+ # save the geometry
+ self.geometries = [g]
+
+ # Store frame shape
+ self.shape = np.array(common.get('shape', g.shape))
+ self.psize = g.psize
+
+ return
+
+ def _initialize_probe(self, probe_ids):
"""
- Get all new diffraction patterns and create all views and pods
- accordingly.
+ Initialize the probe storage referred to by probe_ids.keys()[0]
"""
- parallel.barrier()
- # Nothing to do if there are no new data.
- if not self.ptycho.datasource.data_available:
- return 'No Data'
+ logger.info('\n'+headerline('Probe initialization', 'l'))
- logger.info('Processing new data.')
- used_scans = []
- not_initialized = []
-
- # For some funny reason the Generator construct used to fail.
- while True:
- dp = self.ptycho.datasource.feed_data()
- if dp is None:
- break
- """
- A dp (data package) contains the following:
-
- common : dict or Param
- Meta information common to all datapoints in the
- data package. Variable names need to be consistent with
- those in the rest of ptypy package.
- (TODO further description)
-
- Important info:
- ------------------------
- shape : (tuple or array)
- expected frame shape
- label : (string)
- Script label. This label is matched to the parameter
- tree, a string signifying to which scan this package
- belongs to.
-
-
- iterable : An iterable structure that yields for each iteration
- a dict with the following fields:
-
- data : (np.2darray, float)
- diffraction data
- In MPI case, data can be None if distributed
- to other nodes
- mask : (np.2darray, bool)
- masked out areas in diffraction data array
- index : (int)
- diffraction datapoint index in scan
- position : (tuple or array)
- scan position
- """
- meta = dp['common']
- label = meta['ptylabel']
-
- # We expect a string for the label.
- assert label == str(label)
-
- used_scans.append(label)
- logger.info('Importing data from %s as scan %s.'
- % (meta['label'], label))
-
- # Prepare scan dictionary or dig up the already prepared one
- scan = self.prepare_scan(label)
- scan.meta = meta
-
- # Empty buffer
- scan.iterable = []
-
- # Prepare the scan geometry if not already done.
- if scan.get('geometries') is None:
- # Ok now that we have meta we can check if the geometry fits
- scan.geometries = []
- geo = scan.pars.geometry
- for key in geometry.DEFAULT.keys():
- # scan.pars.if_conflict_use_meta:
- if geo.get(key) is None or not (geo.precedence == 'meta'):
- mk = scan.meta.get(key)
- if mk is not None:
- # None existing key or None values in meta dict
- # are treated alike
- geo[key] = mk
-
- # Make a spectrum
- energies = np.asarray(scan.pars.coherence.energies)
- spec = scan.pars.coherence.spectrum
- spec = [1.0] if spec is None else spec
- if type(spec).__name__ == 'function':
- spectrum = spec(energies)
+ # pick storage from container, there's only one probe
+ pid = probe_ids.keys()[0]
+ s = self.ptycho.probe.S.get(pid)
+ logger.info('Initializing probe storage %s' % pid)
+
+ # use the illumination module as a utility
+ logger.info('Initializing as circle of size ' + str(self.p.illumination.size))
+ illu_pars = u.Param({'aperture':
+ {'form': 'circ', 'size': self.p.illumination.size}})
+ illumination.init_storage(s, illu_pars)
+
+ s.model_initialized = True
+
+ def _initialize_object(self, object_ids):
+ """
+ Initializes the probe storage referred to by object_ids.keys()[0]
+ """
+ logger.info('\n'+headerline('Object initialization', 'l'))
+
+ # pick storage from container, there's only one object
+ oid = object_ids.keys()[0]
+ s = self.ptycho.obj.S.get(oid)
+ logger.info('Initializing probe storage %s' % oid)
+
+ # simple fill, no need to use the sample module for this
+ s.fill(self.p.sample.fill)
+
+ s.model_initialized = True
+
+
+@defaults_tree.parse_doc('scan.Full')
+class Full(ScanModel):
+ """
+ Manage a single scan model (sharing, coherence, propagation, ...)
+
+ Defaults:
+
+ # note: this class also imports the module-level defaults for sample
+ # and illumination, below.
+
+ [name]
+ default = Full
+ type = str
+ help =
+ doc =
+
+ [coherence]
+ default =
+ help = Coherence parameters
+ doc =
+ type = Param
+ userlevel =
+ lowlim = 0
+
+ [coherence.num_probe_modes]
+ default = 1
+ help = Number of probe modes
+ doc =
+ type = int
+ userlevel = 0
+ lowlim = 0
+
+ [coherence.num_object_modes]
+ default = 1
+ help = Number of object modes
+ doc =
+ type = int
+ userlevel = 0
+ lowlim = 0
+
+ [coherence.energies]
+ default = [1.0]
+ type = list
+ help = ?
+ doc = ?
+
+ [coherence.spectrum]
+ default = [1.0]
+ help = Amplitude of relative energy bins if the probe modes have a different energy
+ doc =
+ type = list
+ userlevel = 2
+ lowlim = 0
+
+ [coherence.object_dispersion]
+ default = None
+ help = Energy dispersive response of the object
+ doc = One of:
+ - ``None`` or ``'achromatic'``: no dispersion
+ - ``'linear'``: linear response model
+ - ``'irregular'``: no assumption
+ **[not implemented]**
+ type = str
+ userlevel = 2
+
+ [coherence.probe_dispersion]
+ default = None
+ help = Energy dispersive response of the probe
+ doc = One of:
+ - ``None`` or ``'achromatic'``: no dispersion
+ - ``'linear'``: linear response model
+ - ``'irregular'``: no assumption
+ **[not implemented]**
+ type = str
+ userlevel = 2
+
+ """
+
+ _PREFIX = MODEL_PREFIX
+
+ def _create_pods(self):
+ """
+ Create all new pods as specified in the new_positions,
+ new_diff_views and new_mask_views object attributes.
+ """
+ logger.info('\n' + headerline('Creating PODS', 'l'))
+ new_pods = []
+ new_probe_ids = {}
+ new_object_ids = {}
+
+ label = self.label
+
+ # Get a list of probe and object that already exist
+ existing_probes = self.ptycho.probe.storages.keys()
+ existing_objects = self.ptycho.obj.storages.keys()
+ logger.info('Found these probes : ' + ', '.join(existing_probes))
+ logger.info('Found these objects: ' + ', '.join(existing_objects))
+
+ object_id = 'S' + self.label
+ probe_id = 'S' + self.label
+
+ positions = self.new_positions
+ di_views = self.new_diff_views
+ ma_views = self.new_mask_views
+
+ # Loop through diffraction patterns
+ for i in range(len(di_views)):
+ dv, mv = di_views.pop(0), ma_views.pop(0)
+
+ index = dv.layer
+
+ # Object and probe position
+ pos_pr = u.expect2(0.0)
+ pos_obj = positions[i] if 'empty' not in self.p.tags else 0.0
+
+ # For multiwavelength reconstructions: loop here over
+ # geometries, and modify probe_id and object_id.
+ for ii, geometry in enumerate(self.geometries):
+ # Make new IDs and keep them in record
+ # sharing_rules is not aware of IDs with suffix
+
+ pdis = self.p.coherence.probe_dispersion
+
+ if pdis is None or str(pdis) == 'achromatic':
+ gind = 0
else:
- spectrum = np.resize(np.asarray(spec), (len(energies), 1))
-
- spectrum /= spectrum.sum()
- scan.spectrum = spectrum
-
- for ii, fac in enumerate(energies):
- geoID = geometry.Geo._PREFIX + '%02d' % ii + label
- g = geometry.Geo(self.ptycho, geoID, pars=geo)
- # Fix the sample pixel size.
- # This will make the frame size adapt.
- g.p.resolution_is_fix = True
- # Save old energy value:
- g.p.energy_orig = g.energy
- # Change energy
- g.energy *= fac
- # Attach spectral contribution
- g.p.spectral = spectrum[ii]
- # Append the geometry
- scan.geometries.append(g)
-
- # Create a buffer
- scan.iterable = []
-
- scan.diff_views = []
- scan.mask_views = []
-
- # Remember the order in which these scans were fed to manager
- self.scan_labels.append(label)
-
- # Remember that these new scans are probably not initialized yet
- not_initialized.append(label)
-
- # Buffer incoming data and evaluate if we got Nones in data
- for dct in dp['iterable']:
- dct['active'] = dct['data'] is not None
- scan.iterable.append(dct)
-
- # Ok data transmission is over for now.
- # Let's see what data scans has received and create the views for those
- for label in used_scans:
-
- # Get scan Param
- scan = self.scans[label]
-
- # Pick one of the geometries for calculating the frame shape
- geo = scan.geometries[0]
- sh = np.array(scan.meta.get('shape', geo.shape))
-
- # Storage generation if not already existing
- if scan.get('diff') is None:
- # This scan is brand new so we create storages for it
- scan.diff = self.ptycho.diff.new_storage(
- shape=(1, sh[-2], sh[-1]),
- psize=geo.psize,
- padonly=True,
- layermap=None)
-
- old_diff_views = []
- old_diff_layers = []
- else:
- # Ok storage exists already. Views most likely also.
- # Let's do some analysis and deactivate the old views.
- old_diff_views = self.ptycho.diff.views_in_storage(scan.diff,
- active=False)
- old_diff_layers = []
- for v in old_diff_views:
- old_diff_layers.append(v.layer)
- # v.active = False
-
- # Same for mask
- if scan.get('mask') is None:
- scan.mask = self.ptycho.mask.new_storage(
- shape=(1, sh[-2], sh[-1]),
- psize=geo.psize,
- padonly=True,
- layermap=None)
-
- old_mask_views = []
- old_mask_layers = []
- else:
- old_mask_views = self.ptycho.mask.views_in_storage(scan.mask,
- active=False)
- old_mask_layers = []
- for v in old_mask_views:
- old_mask_layers.append(v.layer)
- # v.active = False
-
- # Prepare for View generation
- AR_diff_base = DEFAULT_ACCESSRULE.copy()
- AR_diff_base.shape = geo.shape
- AR_diff_base.coord = 0.0
- AR_diff_base.psize = geo.psize
- AR_mask_base = AR_diff_base.copy()
- AR_diff_base.storageID = scan.diff.ID
- AR_mask_base.storageID = scan.mask.ID
-
- diff_views = []
- mask_views = []
- positions = []
- # positions_theory = xy.from_pars(scan.pars.xy)
-
- for dct in scan.iterable:
- index = dct['index']
- active = dct['active']
- # tpos = positions_theory[index]
- if (scan.pars.geometry.precedence == 'meta'
- and scan.pos_theory is not None):
- pos = scan.pos_theory[index]
+ gind = ii
+
+ probe_id_suf = probe_id + 'G%02d' % gind
+ if (probe_id_suf not in new_probe_ids.keys()
+ and probe_id_suf not in existing_probes):
+ new_probe_ids[probe_id_suf] = True
+
+ odis = self.p.coherence.object_dispersion
+
+ if odis is None or str(odis) == 'achromatic':
+ gind = 0
else:
- pos = dct.get('position') # ,positions_theory[index])
-
- if pos is None:
- logger.warning('No position set to scan point %d of scan %s'
- % (index, label))
-
- AR_diff = AR_diff_base # .copy()
- AR_mask = AR_mask_base # .copy()
- AR_diff.layer = index
- AR_mask.layer = index
- AR_diff.active = active
- AR_mask.active = active
-
- # Check: is there already a view to this layer? Is it active?
- try:
- old_view = old_diff_views[old_diff_layers.index(index)]
- old_active = old_view.active
- old_view.active = active
- # Also set this for the attached pods' exit views
- # for pod in old_view.pods.itervalues():
- # pod.ex_view.active = active
-
- logger.debug(
- 'Diff view with layer/index %s of scan %s exists.\n'
- 'Setting view active state from %s to %s'
- % (index, label, old_active, active))
- except ValueError:
- v = View(self.ptycho.diff, accessrule=AR_diff)
- diff_views.append(v)
- logger.debug(
- 'Diff view with layer/index %s of scan %s does not '
- 'exist.\n Creating view with ID %s and set active '
- 'state to %s' % (index, label, v.ID, active))
- # Append position also
- positions.append(pos)
-
- try:
- old_view = old_mask_views[old_mask_layers.index(index)]
- old_view.active = active
- except ValueError:
- v = View(self.ptycho.mask, accessrule=AR_mask)
- mask_views.append(v)
-
- # Now we should have the right views to these storages. Let them
- # reformat(), which creates the right sizes and the datalist access
- scan.diff.reformat()
- scan.mask.reformat()
- # parallel.barrier()
-
- for dct in scan.iterable:
- parallel.barrier()
- if not dct['active']:
- continue
- data = dct['data']
- idx = dct['index']
- # scan.diff.datalist[idx][:] = data #.astype(scan.diff.dtype)
- # scan.mask.datalist[idx][:] = dct.get(
- # 'mask', np.ones_like(data)) # .astype(scan.mask.dtype)
- scan.diff.data[scan.diff.layermap.index(idx)][:] = data
- scan.mask.data[scan.mask.layermap.index(idx)][:] = dct.get(
- 'mask', np.ones_like(data))
- # del dct['data']
-
- scan.diff.nlayers = parallel.MPImax(scan.diff.layermap) + 1
- scan.mask.nlayers = parallel.MPImax(scan.mask.layermap) + 1
- # Empty iterable buffer
- # scan.iterable = []
- scan.new_positions = positions
- scan.new_diff_views = diff_views
- scan.new_mask_views = mask_views
- scan.diff_views += diff_views
- scan.mask_views += mask_views
-
- self._update_stats(scan)
- # Create PODs .. but only if data has arrived
-
- if used_scans:
- new_pods, new_probe_ids, new_object_ids = (
- self._create_pods(used_scans))
- logger.info('Process %d created %d new PODs, %d new probes and %d '
- 'new objects.' % (parallel.rank,
- len(new_pods),
- len(new_probe_ids),
- len(new_object_ids)),
- extra={'allprocesses': True})
-
- # Adjust storages
- self.ptycho.probe.reformat(True)
- self.ptycho.obj.reformat(True)
- self.ptycho.exit.reformat()
-
- self._initialize_probe(new_probe_ids)
- self._initialize_object(new_object_ids)
- self._initialize_exit(new_pods)
+ gind = ii
+
+ object_id_suf = object_id + 'G%02d' % gind
+ if (object_id_suf not in new_object_ids.keys()
+ and object_id_suf not in existing_objects):
+ new_object_ids[object_id_suf] = True
+
+ # Loop through modes
+ for pm in range(self.p.coherence.num_probe_modes):
+ for om in range(self.p.coherence.num_object_modes):
+ # Make a unique layer index for exit view
+ # The actual number does not matter due to the
+ # layermap access
+ exit_index = index * 10000 + pm * 100 + om
+
+ # Create views
+ # Please note that mostly references are passed,
+ # i.e. the views do mostly not own the accessrule
+ # contents
+ pv = View(container=self.ptycho.probe,
+ accessrule={'shape': geometry.shape,
+ 'psize': geometry.resolution,
+ 'coord': pos_pr,
+ 'storageID': probe_id_suf,
+ 'layer': pm,
+ 'active': True})
+
+ ov = View(container=self.ptycho.obj,
+ accessrule={'shape': geometry.shape,
+ 'psize': geometry.resolution,
+ 'coord': pos_obj,
+ 'storageID': object_id_suf,
+ 'layer': om,
+ 'active': True})
+
+ ev = View(container=self.ptycho.exit,
+ accessrule={'shape': geometry.shape,
+ 'psize': geometry.resolution,
+ 'coord': pos_pr,
+ 'storageID': (dv.storageID +
+ 'G%02d' % ii),
+ 'layer': exit_index,
+ 'active': dv.active})
+
+ views = {'probe': pv,
+ 'obj': ov,
+ 'diff': dv,
+ 'mask': mv,
+ 'exit': ev}
+
+ pod = POD(ptycho=self.ptycho,
+ ID=None,
+ views=views,
+ geometry=geometry) # , meta=meta)
+
+ new_pods.append(pod)
+
+ pod.probe_weight = 1
+ pod.object_weight = 1
+
+ return new_pods, new_probe_ids, new_object_ids
+
+ def _initialize_geo(self, common):
+ """
+ Initialize the geometry/geometries.
+ """
+ # Extract necessary info from the received data package
+ get_keys = ['distance', 'center', 'energy', 'psize', 'shape']
+ geo_pars = u.Param({key: common[key] for key in get_keys})
+
+ # Add propagation info from this scan model
+ geo_pars.propagation = self.p.propagation
+
+ # The multispectral case will have multiple geometries
+ for ii, fac in enumerate(self.p.coherence.energies):
+ geoID = geometry.Geo._PREFIX + '%02d' % ii + self.label
+ g = geometry.Geo(self.ptycho, geoID, pars=geo_pars)
+ # now we fix the sample pixel size, This will make the frame size adapt
+ g.p.resolution_is_fix = True
+ # save old energy value:
+ g.p.energy_orig = g.energy
+ # change energy
+ g.energy *= fac
+ # append the geometry
+ self.geometries.append(g)
+
+ # Store frame shape
+ self.shape = np.array(common.get('shape', self.geometries[0].shape))
+ self.psize = self.geometries[0].psize
+
+ return
def _initialize_probe(self, probe_ids):
"""
- Initializes the probe storages referred to by the probe_ids.
+ Initialize the probe storages referred to by the probe_ids.
+
+ For this case the parameter interface of the illumination module
+ matches the illumination parameters of this class, so they are
+ just fed in directly.
"""
- logger.info('\n' + headerline('Probe initialization', 'l'))
+ logger.info('\n'+headerline('Probe initialization', 'l'))
+
+ # Loop through probe ids
for pid, labels in probe_ids.items():
- # Pick scanmanagers from scan_label for illumination parameters
- # For now, the scanmanager of the first label is chosen
- scan = self.scans[labels[0]]
- # Pick storage from container
- s = self.ptycho.probe.storages.get(pid)
+ illu_pars = self.p.illumination
+
+ # pick storage from container
+ s = self.ptycho.probe.S.get(pid)
+
if s is None:
continue
else:
logger.info('Initializing probe storage %s using scan %s.'
- % (pid, scan.label))
+ % (pid, self.label))
- illu_pars = scan.pars.illumination
- if type(illu_pars) is u.Param:
- # If not a short cut but a Param, modify content from deep copy
- illu_pars = illu_pars.copy(depth=10)
+ # if photon count is None, assign a number from the stats.
+ phot = illu_pars.get('photons')
+ phot_max = self.diff.max_power
- # If photon count is None, assign a number from the stats.
- phot = illu_pars.get('photons')
- phot_max = scan.diff.max_power
+ if phot is None:
+ logger.info('Found no photon count for probe in parameters.\nUsing photon count %.2e from photon report' % phot_max)
+ illu_pars['photons'] = phot_max
+ elif np.abs(np.log10(phot)-np.log10(phot_max)) > 1:
+ logger.warn('Photon count from input parameters (%.2e) differs from statistics (%.2e) by more than a magnitude' % (phot, phot_max))
- if phot is None:
- logger.info(
- 'Found no photon count for probe in parameters.\n'
- 'Using photon count %.2e from photon report.'
- % phot_max)
- illu_pars['photons'] = phot_max
- elif np.abs(np.log10(phot) - np.log10(phot_max)) > 1:
- logger.warn(
- 'Photon count from input parameters (%.2e) differs '
- 'from statistics (%.2e) by more than a magnitude.'
- % (phot, phot_max))
+ if (self.p.coherence.num_probe_modes>1) and (type(illu_pars) is not np.ndarray):
- # Quickfix spectral contribution.
- if (scan.pars.coherence.probe_dispersion
- not in [None, 'achromatic']):
- logger.info('Applying spectral distribution input to probe')
- illu_pars['photons'] *= s.views[0].pod.geometry.p.spectral
+ if (illu_pars.diversity is None) or (None in [illu_pars.diversity.noise, illu_pars.diversity.power]):
+ log(2, "You are doing a multimodal reconstruction with none/ not much diversity between the modes! \n"
+ "This will likely not reconstruct. You should set .scan.illumination.diversity.power and "
+ ".scan.illumination.diversity.noise to something for the best results.")
illumination.init_storage(s, illu_pars)
@@ -666,344 +896,417 @@ def _initialize_object(self, object_ids):
"""
Initializes the probe storages referred to by the object_ids.
"""
- logger.info('\n' + headerline('Object initialization', 'l'))
+
+ logger.info('\n'+headerline('Object initialization', 'l'))
+
+ # Loop through object IDs
for oid, labels in object_ids.items():
- # Pick scanmanagers from scan_label for illumination parameters
- # For now, the scanmanager of the first label is chosen
- scan = self.scans[labels[0]]
- # Pick storage from container
- s = self.ptycho.obj.storages.get(oid)
+ sample_pars = self.p.sample
+
+ # pick storage from container
+ s = self.ptycho.obj.S.get(oid)
+
if s is None or s.model_initialized:
continue
else:
logger.info('Initializing object storage %s using scan %s.'
- % (oid, scan.label))
+ % (oid, self.label))
- sample_pars = scan.pars.sample
+ sample_pars = self.p.sample
if type(sample_pars) is u.Param:
# Deep copy
sample_pars = sample_pars.copy(depth=10)
# Quickfix spectral contribution.
- if (scan.pars.coherence.object_dispersion
+ if (self.p.coherence.object_dispersion
not in [None, 'achromatic']
- and scan.pars.coherence.probe_dispersion
+ and self.p.coherence.probe_dispersion
in [None, 'achromatic']):
logger.info(
'Applying spectral distribution input to object fill.')
sample_pars['fill'] *= s.views[0].pod.geometry.p.spectral
- sample.init_storage(s,sample_pars)
-
- """"
- if sample_pars.get('source') == 'diffraction':
- logger.info('STXM initialization using diffraction data')
- trans, dpc_row, dpc_col = u.stxm_analysis(s)
- s.fill(trans * np.exp(1j * u.phase_from_dpc(dpc_row, dpc_col)))
- else:
- # Find out energy or wavelength.
- # Maybe store that information in the storages too in future
- lam = s.views[0].pod.geometry.lam
- # Make this a single call in future
- obj = sample.from_pars(s.shape[-2:], lam, sample_pars)
- obj = sample.create_modes(s.shape[-3], obj)
- s.fill(obj.obj)
- """
+ sample.init_storage(s, sample_pars)
+ s.reformat() # maybe not needed
- s.reformat() # Maybe not needed
s.model_initialized = True
- def _initialize_exit(self, pods):
+# Append illumination and sample defaults
+defaults_tree['scan.Full'].add_child(illumination.illumination_desc)
+defaults_tree['scan.Full'].add_child(sample.sample_desc)
+
+# Update defaults
+Full.DEFAULT = defaults_tree['scan.Full'].make_default(99)
+
+
+import geometry_bragg
+defaults_tree['scan'].add_child(EvalDescriptor('Bragg3dModel'))
+defaults_tree['scan.Bragg3dModel'].add_child(illumination.illumination_desc, copy=True)
+defaults_tree['scan.Bragg3dModel.illumination'].prune_child('diversity')
+@defaults_tree.parse_doc('scan.Bragg3dModel')
+class Bragg3dModel(Vanilla):
+ """
+ Model for 3D Bragg ptycho data, where a set of rocking angles are
+ measured for each scanning position. The result is pods carrying
+ 3D diffraction patterns and 3D Views into a 3D object.
+
+ Inherits from Vanilla because _create_pods and the object init
+ is identical.
+
+ Frames for each position are assembled according to the actual
+ xyz data, so it will not work if two acquisitions are done at the
+ same position.
+
+ Defaults:
+
+ [name]
+ default = Bragg3dModel
+ type = str
+ help =
+
+ """
+
+ def __init__(self, ptycho=None, pars=None, label=None):
+ super(Bragg3dModel, self).__init__(ptycho, pars, label)
+ # This model holds on to incoming frames until a complete 3d
+ # diffraction pattern can be built for that position.
+ self.buffered_frames = {}
+ self.buffered_positions = []
+ #self.frames_per_call = 216 # just for testing
+
+ def _new_data_extra_analysis(self, dp):
"""
- Initializes exit waves using the pods.
+ The heavy override is new_data. I've inserted this extra method
+ for now, so as not to duplicate all the new_data code.
+
+ The PtyScans give 2d diff images at 4d (angle, x, z, y)
+ positions in the sample frame. These need to be assembled into
+ 3d (q3, q1, q2) at 3d positions. This means receiving images,
+ holding on to them, and only calling _create_pods once a
+ complete 3d diff View has been created.
+
+ The xyz axes are those specified in Geo_Bragg, and the angle
+ parameter defined such that a more positive angle corresponds to
+ a more positive q3. That is, it is the angle between the xy
+ plane of the sample with respect to the incident beam.
"""
- logger.info('\n' + headerline('Creating exit waves', 'l'))
- for pod in pods:
- if not pod.active:
+
+ logger.info(
+ 'Redistributing incoming frames so that all data from each scanning position is on the same node.')
+ dp = self._mpi_redistribute_raw_frames(dp)
+
+ logger.info(
+ 'Buffering incoming frames and binning these by scanning position.')
+ self._buffer_incoming_frames(dp)
+
+ logger.info(
+ 'Repackaging complete buffered rocking curves as 3d data packages.')
+ dp_new = self._make_3d_data_package()
+
+ # continue to pod creation if there is data for it
+ if len(dp_new['iterable']):
+ logger.info('Will continue with POD creation for %d complete positions.'
+ % len(dp_new['iterable']))
+ return dp_new
+ else:
+ return None
+
+ def _mpi_redistribute_raw_frames(self, dp):
+ """
+ Linear decomposition of incoming frames based on the scan
+ dimension that varies the most. Modifies a data package in-place
+ so that the angles of each unique scanning position end up on
+ the same node.
+ """
+
+ # work out the xyz range, we have all positions here
+ pos = []
+ for dct in dp['iterable']:
+ pos.append(dct['position'][1:])
+ pos = np.array(pos)
+ xmin, xmax = pos[:,0].min(), pos[:,0].max()
+ ymin, ymax = pos[:,2].min(), pos[:,2].max()
+ zmin, zmax = pos[:,1].min(), pos[:,1].max()
+ diffs = [xmax - xmin, zmax - zmin, ymax - ymin]
+
+ # the axis along which to slice
+ axis = diffs.index(max(diffs))
+ logger.info(
+ 'Will slice incoming frames along axis %d (%s).' % (axis, ['x', 'z', 'y'][axis]))
+
+ # pick the relevant limits and expand slightly to avoid edge effects
+ lims = {0: [xmin, xmax], 1: [zmin, zmax], 2: [ymin, ymax]}[axis]
+ lims = np.array(lims) + np.array([-1, 1]) * np.diff(lims) * .01
+ domain_width = np.diff(lims) / parallel.size
+
+ # now we can work out which node should own a certain position
+ def __node(pos):
+ return int((pos - lims[0]) / domain_width)
+
+ # work out which node should have each of my buffered frames
+ N = parallel.size
+ senditems = {}
+ for idx in range(len(dp['iterable'])):
+ if dp['iterable'][idx]['data'] is None:
continue
- pod.exit = pod.probe * pod.object
+ pos_ = pos[idx][axis]
+ if not __node(pos_) == parallel.rank:
+ senditems[idx] = __node(pos_)
+
+ # transfer data as a list corresponding to dp['iterable']
+ for sending_node in range(parallel.size):
+ if sending_node == parallel.rank:
+ # My turn to send
+ for receiver in range(parallel.size):
+ if receiver == parallel.rank:
+ continue
+ lst = []
+ for idx, rec in senditems.iteritems():
+ if rec == receiver:
+ lst.append(dp['iterable'][idx])
+ parallel.send(lst, dest=receiver)
+ else:
+ received = parallel.receive(source=sending_node)
+ for frame in received:
+ idx = frame['index']
+ dp['iterable'][idx] = frame.copy()
- def _create_pods(self, new_scans):
+ # mark sent frames disabled, would be nice to do in the loop but
+ # you can't trust communication will be blocking.
+ for idx in senditems.keys():
+ dp['iterable'][idx]['data'] = None
+ dp['iterable'][idx]['mask'] = None
+
+ return dp
+
+ def _buffer_incoming_frames(self, dp):
+ """
+ Store incoming frames in an internal buffer, binned by scanning
+ position.
+ """
+ for dct in dp['iterable']:
+ pos = dct['position'][1:]
+ try:
+ # index into the frame buffer where this frame belongs
+ idx = np.where(np.prod(np.isclose(pos, self.buffered_positions), axis=1))[0][0]
+ logger.debug('Frame %d belongs in frame buffer %d'
+ % (dct['index'], idx))
+ except:
+ # this position hasn't been encountered before, so create a buffer entry
+ idx = len(self.buffered_positions)
+ logger.debug('Frame %d doesn\'t belong in an existing frame buffer, creating buffer %d' % (dct['index'], idx))
+ self.buffered_positions.append(pos)
+ self.buffered_frames[idx] = {
+ 'position': pos,
+ 'frames': [],
+ 'masks': [],
+ 'angles': [],
+ }
+
+ # buffer the frame, mask, and angle
+ self.buffered_frames[idx]['frames'].append(dct['data'])
+ self.buffered_frames[idx]['masks'].append(dct['mask'])
+ self.buffered_frames[idx]['angles'].append(dct['position'][0])
+
+ def _make_3d_data_package(self):
"""
- Create all pods associated with the scan labels in 'scans'.
+ Go through the internal buffer to see if any positions have all
+ their 2d frames, and create a new dp-compatible structure with
+ complete 3d positions.
+ """
+ dp_new = {'iterable': []}
+ for idx, dct in self.buffered_frames.iteritems():
+ if len(dct['angles']) == self.geometries[0].shape[0]:
+ # this one is ready to go
+ logger.debug('3d diffraction data for position %d ready, will create POD' % idx)
+
+ if dct['frames'][0] is not None:
+ # First sort the frames in increasing angle (increasing
+ # q3) order. Also assume the images came in as (-q1, q2)
+ # from PtyScan. We want (q3, q1, q2) as required by
+ # Geo_Bragg, so flip the q1 dimension.
+ order = [i[0] for i in sorted(enumerate(dct['angles']), key=lambda x:x[1])]
+ dct['frames'] = [dct['frames'][i][::-1,:] for i in order]
+ dct['masks'] = [dct['masks'][i][::-1,:] for i in order]
+ diffdata = np.array(dct['frames'], dtype=self.ptycho.FType)
+ maskdata = np.array(dct['masks'], dtype=bool)
+ else:
+ # this buffer belongs to another node
+ diffdata = None
+ maskdata = None
+
+ # then assemble the data and masks
+ dp_new['iterable'].append({
+ 'index': idx,
+ 'position': dct['position'],
+ 'data': diffdata,
+ 'mask': maskdata,
+ })
- Return the list of new pods, probe and object ids (to allow for
- initialization).
+ else:
+ logger.debug('3d diffraction data for position %d isn\'t ready, have %d out of %d frames'
+ % (idx, len(dct['angles']), self.geometries[0].shape[0]))
+
+ # delete complete entries from the buffer
+ for dct in dp_new['iterable']:
+ del self.buffered_frames[dct['index']]
+
+ # make the indices on the 3d dp contiguous and unique
+ cnt = {parallel.rank: sum([(d['data'] is not None) for d in dp_new['iterable']])}
+ parallel.allgather_dict(cnt)
+ offset = sum([cnt[i] for i in range(parallel.rank)])
+ idx = 0
+ for dct in dp_new['iterable']:
+ dct['index'] = offset + idx
+ idx += 1
+ return dp_new
+
+ def _initialize_containers(self):
+ """
+ Override to get 3D containers.
+ """
+ self.ptycho._pool['C'].pop('Cprobe')
+ self.ptycho.probe = Container(self.ptycho, ID='Cprobe', data_type='complex', data_dims=3)
+ self.ptycho._pool['C'].pop('Cobj')
+ self.ptycho.obj = Container(self.ptycho, ID='Cobj', data_type='complex', data_dims=3)
+ self.ptycho._pool['C'].pop('Cexit')
+ self.ptycho.exit = Container(self.ptycho, ID='Cexit', data_type='complex', data_dims=3)
+ self.ptycho._pool['C'].pop('Cdiff')
+ self.ptycho.diff = Container(self.ptycho, ID='Cdiff', data_type='real', data_dims=3)
+ self.ptycho._pool['C'].pop('Cmask')
+ self.ptycho.mask = Container(self.ptycho, ID='Cmask', data_type='bool', data_dims=3)
+ self.Cdiff = self.ptycho.diff
+ self.Cmask = self.ptycho.mask
+ self.containers_initialized = True
+
+ def _initialize_geo(self, common):
+ """
+ Initialize the geometry based on parameters from a PtyScan.auto
+ data package. Now psize and shape change meanings: from referring
+ to raw data frames, they now refer to 3-dimensional diffraction
+ patterns as specified by Geo_Bragg.
"""
- logger.info('\n' + headerline('Creating PODS', 'l'))
- new_pods = []
- new_probe_ids = {}
- new_object_ids = {}
- # Get a list of probe and object that already exist
- existing_probes = self.ptycho.probe.storages.keys()
- # SC: delete? self.sharing_rules.probe_ids.keys()
- existing_objects = self.ptycho.obj.storages.keys()
- # SC: delete? self.sharing_rules.object_ids.keys()
- logger.info('Found these probes : ' + ', '.join(existing_probes))
- logger.info('Found these objects: ' + ', '.join(existing_objects))
- # exit_index = 0
-
- # Loop through scans
- for label in new_scans:
- scan = self.scans[label]
- # Store probe and object weights in meta
- # meta = {'probe_weight': scan.pars.probe_weight,
- # 'object_weight': scan.pars.object_weight}
-
- positions = scan.new_positions
- di_views = scan.new_diff_views
- ma_views = scan.new_mask_views
-
- # Compute sharing rules
- share = scan.pars.sharing
- alt_obj = share.object_share_with if share is not None else None
- alt_pr = share.probe_share_with if share is not None else None
-
- obj_label = label if alt_obj is None else alt_obj
- pr_label = label if alt_pr is None else alt_pr
-
- # Loop through diffraction patterns
- for i in range(len(di_views)):
- dv, mv = di_views.pop(0), ma_views.pop(0)
- index = dv.layer
-
- # Object and probe position
- pos_pr = u.expect2(0.0)
- pos_obj = positions[i] if 'empty' not in scan.pars.tags else 0.0
-
- t, object_id = self.sharing_rules(obj_label, index)
- probe_id, t = self.sharing_rules(pr_label, index)
-
- # For multiwavelength reconstructions: loop here over
- # geometries, and modify probe_id and object_id.
- for ii, geometry in enumerate(scan.geometries):
- # Make new IDs and keep them in record
- # sharing_rules is not aware of IDs with suffix
-
- pdis = scan.pars.coherence.probe_dispersion
- if pdis is None or str(pdis) == 'achromatic':
- gind = 0
- else:
- gind = ii
-
- probe_id_suf = probe_id + 'G%02d' % gind
- if (probe_id_suf not in new_probe_ids.keys()
- and probe_id_suf not in existing_probes):
- new_probe_ids[probe_id_suf] = (
- self.sharing_rules.probe_ids[probe_id])
-
- odis = scan.pars.coherence.object_dispersion
- if odis is None or str(odis) == 'achromatic':
- gind = 0
- else:
- gind = ii
-
- object_id_suf = object_id + 'G%02d' % gind
- if (object_id_suf not in new_object_ids.keys()
- and object_id_suf not in existing_objects):
- new_object_ids[object_id_suf] = (
- self.sharing_rules.object_ids[object_id])
-
- # Loop through modes
- for pm in range(scan.pars.coherence.num_probe_modes):
- for om in range(scan.pars.coherence.num_object_modes):
- # Make a unique layer index for exit view
- # The actual number does not matter due to the
- # layermap access
- exit_index = index * 10000 + pm * 100 + om
-
- # Create views
- # Please note that mostly references are passed,
- # i.e. the views do mostly not own the accessrule
- # contents
- pv = View(container=self.ptycho.probe,
- accessrule={'shape': geometry.shape,
- 'psize': geometry.resolution,
- 'coord': pos_pr,
- 'storageID': probe_id_suf,
- 'layer': pm,
- 'active': True})
-
- ov = View(container=self.ptycho.obj,
- accessrule={'shape': geometry.shape,
- 'psize': geometry.resolution,
- 'coord': pos_obj,
- 'storageID': object_id_suf,
- 'layer': om,
- 'active': True})
-
- ev = View(container=self.ptycho.exit,
- accessrule={'shape': geometry.shape,
- 'psize': geometry.resolution,
- 'coord': pos_pr,
- 'storageID': (probe_id +
- object_id[1:] +
- 'G%02d' % ii),
- 'layer': exit_index,
- 'active': dv.active})
-
- views = {'probe': pv,
- 'obj': ov,
- 'diff': dv,
- 'mask': mv,
- 'exit': ev}
-
- # views = {'probe': pv,
- # 'obj': ov,
- # 'diff': dv,
- # 'mask': mv}
-
- pod = POD(ptycho=self.ptycho,
- ID=None,
- views=views,
- geometry=geometry) # , meta=meta)
-
- new_pods.append(pod)
-
- # If Empty Probe sharing is enabled,
- # adjust POD accordingly.
- if share is not None:
- pod.probe_weight = share.probe_share_power
- pod.object_weight = share.object_share_power
- if share.EP_sharing:
- pod.is_empty = True
- else:
- pod.is_empty = False
- else:
- pod.probe_weight = 1
- pod.object_weight = 1
-
- # if 'empty' in scan.pars.tags:
- # pod.is_empty = True
- # else:
- # pod.is_empty = False
- # exit_index += 1
-
- # Delete buffer & meta (meta may be filled with a lot of stuff)
- scan.iterable = []
- # scan.meta = {}
+ # Collect and assemble geometric parameters
+ get_keys = ['distance', 'center', 'energy']
+ geo_pars = u.Param({key: common[key] for key in get_keys})
+ geo_pars.propagation = self.p.propagation
+ # take extra Bragg information into account
+ psize = tuple(common['psize'])
+ geo_pars.psize = (self.ptyscan.common.rocking_step,) + psize
+ sh = tuple(common['shape'])
+ geo_pars.shape = (self.ptyscan.common.n_rocking_positions,) + sh
+ geo_pars.theta_bragg = self.ptyscan.common.theta_bragg
+
+ # make a Geo instance and fix resolution
+ g = geometry_bragg.Geo_Bragg(owner=self.ptycho, pars=geo_pars)
+ logger.info('Reconstruction will use these geometric parameters:')
+ logger.info(g)
+ g.p.resolution_is_fix = True
+
+ # save the geometry
+ self.geometries = [g]
+
+ # Store frame shape
+ self.shape = g.shape
+ self.psize = g.psize
- return new_pods, new_probe_ids, new_object_ids
+ def _initialize_probe(self, probe_ids):
+ """
+ Initialize the probe storage referred to by probe_ids.keys()[0]
+ """
+ logger.info('\n'+headerline('Probe initialization', 'l'))
+
+ # pick storage from container, there's only one probe
+ pid = probe_ids.keys()[0]
+ s = self.ptycho.probe.S.get(pid)
+ logger.info('Initializing probe storage %s' % pid)
- def collect_diff_mask_meta(self, label=None, filename=None, save=False,
- dtype=None, **kwargs):
+ # create an oversampled probe perpendicular to its incoming
+ # direction, using the illumination module as a utility.
+ geo = self.geometries[0]
+ extent = max(geo.probe_extent_vs_fov())
+ psize = min(geo.resolution) / 5
+ shape = int(np.ceil(extent / psize))
+ logger.info('Generating incoming probe %d x %d (%.3e x %.3e) with psize %.3e...'
+ % (shape, shape, extent, extent, psize))
+ t0 = time.time()
+
+ Cprobe = Container(data_dims=2, data_type='float')
+ Sprobe = Cprobe.new_storage(psize=psize, shape=shape)
+
+ # fill the incoming probe
+ illumination.init_storage(Sprobe, self.p.illumination, energy=geo.energy)
+ logger.info('...done in %.3f seconds' % (time.time() - t0))
+
+ # Extrude the incoming probe in the right direction and frame
+ s.data[:] = geo.prepare_3d_probe(Sprobe, system='natural').data
+
+ s.model_initialized = True
+
+
+class ModelManager(object):
+ """
+ Thin wrapper class which now just interfaces Ptycho with ScanModel.
+ This should probably all be done directly in Ptycho.
+ """
+
+ def __init__(self, ptycho, pars):
"""
- *DEPRECATED*
- attempt to save diffraction data
Parameters
----------
+ ptycho: Ptycho
+ The parent Ptycho object
- label : str
- ptypy label of the scan to save
- if None, tries to save ALL diffraction data
+ pars : dict or Param
+ The .scans tree of the :any:`Ptycho` parameters.
+ """
+ assert ptycho is not None
+ self.ptycho = ptycho
- filename : str
- override the file path to write to
- will change `data_filename` in `scan_info` dict
+ # Create scan model objects
+ self.scans = OrderedDict()
+ for label, scan_pars in pars.iteritems():
+ # find out which scan model class to instantiate
+ if scan_pars.name in u.all_subclasses(ScanModel, names=True):
+ cls = eval(scan_pars.name)
+ else:
+ raise RuntimeError('Could not manage model %s' % scan_pars.name)
+ # instantiate!
+ self.scans[label] = cls(ptycho=self.ptycho, pars=scan_pars, label=label)
+
+ def _to_dict(self):
+ return self.__dict__.copy()
- all other kwargs are added to 'scan_info' key in the '.h5' file
+ @classmethod
+ def _from_dict(cls, dct):
+ # create instance
+ inst = cls(None, None)
+ # overwrite internal dictionary
+ inst.__dict__ = dct
+ return inst
+
+ @property
+ def data_available(self):
+ return any(s.data_available for s in self.scans.values())
+
+ def new_data(self):
"""
+ Get all new diffraction patterns and create all views and pods
+ accordingly.
+ """
+ parallel.barrier()
- if label is None:
- scans = {}
- for l in self.scans.keys():
- scans[l] = self.collect_diff_mask_meta(l,
- filename,
- save,
- dtype,
- **kwargs)
- return scans
- else:
- dct = {}
- # get the scan
- scan = self.scans[label]
- for kind in ['mask', 'diff']:
- storage = scan[kind]
- # fresh copy
- new = [data.copy() if data is not None else None
- for data in storage.datalist]
- Nframes = len(new)
- if parallel.MPIenabled:
- logger.info('Using MPI to gather arrays for storing %s'
- % kind)
- for i in range(Nframes):
- if parallel.master:
-
- # Root receives the data if it doesn't have it yet
- if new[i] is None:
- new[i] = parallel.receive()
- logger.info(
- '%s :: Frame %d/%d received at process %d'
- % (kind.upper(), i, Nframes, parallel.rank),
- extra={'allprocesses': True})
-
- parallel.barrier()
-
- else:
- if new[i] is not None:
- # Send data to root.
- parallel.send(new[i])
- # logger.info('Process %d - Send frame %d of %s'
- # % (parallel.rank, i, kind),
- # extra={'allprocesses':True})
-
- sender = parallel.rank
- logger.info(
- '%s :: Frame %d/%d send from process %d'
- % (kind.upper(), i, Nframes, parallel.rank),
- extra={'allprocesses': True})
-
- parallel.barrier()
-
- parallel.barrier()
-
- # storing as arrays
- if parallel.master:
- key = 'data' if kind == 'diff' else kind
- dct[key] = np.asarray(new)
-
- # save if you are master
- if parallel.master:
-
- # get meta data
- meta = self.scans[label]['meta']
- # update with geometric info
- meta.update(scan.pars.geometry.copy())
-
- # translate to scan_info and ditch variables
- # not in data.DEFAULT_scan_info
- from data import MT as LeTraducteur
-
- dct['scan_info'] = LeTraducteur.as_scan_info(
- self.scans[label]['meta'])
-
- # overwrite filename
- if filename is not None:
- dct['scan_info']['data_filename'] = filename
-
- filename = dct['scan_info'].get('data_filename')
-
- # add other kwargs to scan_info
- dct['scan_info'].update(kwargs)
- dct['scan_info']['shape'] = dct['data'].shape
-
- # switch data type for data if wanted (saves space)
- if dtype is not None:
- dct['data'] = dct['data'].astype(dtype)
-
- if save:
- # cropping
- from .. import io
-
- filename = u.clean_path(filename)
- logger.info('Saving to ' + filename)
- io.h5write(filename, dct)
- logger.info('Saved')
- return filename
-
- return dct
+ # Nothing to do if there are no new data.
+ if not self.data_available:
+ return 'No data'
+
+ logger.info('Processing new data.')
+
+ # Attempt to get new data
+ for label, scan in self.scans.iteritems():
+ new_data = scan.new_data()
diff --git a/ptypy/core/model.py b/ptypy/core/model.py
deleted file mode 100644
index 9b4882de4..000000000
--- a/ptypy/core/model.py
+++ /dev/null
@@ -1,167 +0,0 @@
-# -*- coding: utf-8 -*-
-"""
-Data sharing models.
-
-This file is part of the PTYPY package.
-
- :copyright: Copyright 2014 by the PTYPY team, see AUTHORS.
- :license: GPLv2, see LICENSE for details.
-"""
-from .. import utils as u
-from ..utils.verbose import logger
-from classes import STORAGE_PREFIX
-
-__all__ = ['parse_model']
-
-DEFAULT = u.Param(
- model_type='basic',
- scan_per_probe=1,
- scan_per_object=1,
- npts=None
-)
-
-MAX_SCAN_COUNT = 100
-
-
-def parse_model(pars, sharing_dct):
- """
- This factory function takes a model description in the input parameters
- and returns an object that can be called with a scan_label (or index) and
- a diffraction pattern index, and returns probe and object ids.
- """
- p = u.Param(DEFAULT)
- p.update(pars)
- if p.model_type.lower() == 'basic':
- return BasicSharingModel(sharing_dct,
- p.scan_per_probe,
- p.scan_per_object,
- p.npts)
- else:
- raise RuntimeError('model type %s not supported.' % p.model_type)
-
-
-class BasicSharingModel(object):
- """
- BasicSharingModel: implements the most common scan-sharing patterns.
- """
-
- def __init__(self, sharing_dct, scan_per_probe, scan_per_object, npts=None):
- """
- BasicSharingModel: implements the most common scan-sharing patterns.
-
- Parameters:
- -----------
- scan_per_probe: float < 1 or int
- number of contiguous scans using the same probe. If a int, the
- number of scans. If a float < 0, split the scans into
- 1/scan_per_probe independent probes. For instance,
- scan_per_probe = .5 will split all scans in two and assign a
- different probe to each.
- scan_per_object: int
- number of contiguous scans using the same object.
- npts: int
- number of diffraction patterns in a given scan. Needed only if
- scan_per_probe < 1.
- """
- # Prepare probe sharing
- if scan_per_probe == 0:
- self.shared_probe = True
- self.single_probe = True
- logger.info('Sharing a single probe for ALL scans.')
- elif scan_per_probe >= 1:
- self.shared_probe = True
- self.single_probe = False
- self.scan_per_probe = int(scan_per_probe)
- logger.info(
- 'Model: sharing probe between scans '
- '(one new probe every %d scan)' % self.scan_per_probe)
- else:
- self.shared_probe = False
- self.single_probe = False
- # The following will fail if npts wasn't provided.
- self.diff_per_probe = int(npts * scan_per_probe)
- self.npts = npts
- logger.info(
- 'Model: splitting scans (every %d diffraction patter)'
- % self.diff_per_probe)
-
- # Prepare object sharing
- if scan_per_object == 0:
- self.single_object = True
- self.shared_object = True
- logger.info('Sharing a single object for ALL scans.')
- elif scan_per_object >= 1:
- self.single_object = False
- self.shared_object = True
- self.scan_per_object = int(scan_per_object)
- logger.info(
- 'Model: sharing object between scans '
- '(one new object every %d scan)' % self.scan_per_object)
- else:
- raise RuntimeError(
- 'scan_per_object < 1. not supported. What does it mean anyway?')
-
- self.scan_labels = []
- self.probe_ids = sharing_dct['probe_ids']
- self.object_ids = sharing_dct['object_ids']
-
- def __call__(self, scan_label, diff_index):
- """
- Return probe and object ids given a scan and diffraction pattern index.
-
- Parameters:
- -----------
- scan_label: str or int
- An identifier for a scan (label or index)
- diff_index: int
- The index of the diffraction pattern in the given scan.
- """
- # Get an index for the scan_label
- if str(scan_label) == scan_label:
- # If it is a string, look it up
- if scan_label in self.scan_labels:
- scan_index = self.scan_labels.index(scan_label)
- else:
- scan_index = len(self.scan_labels)
- self.scan_labels.append(scan_label)
- else:
- # Nothing to do if it is an index
- scan_index = scan_label
-
- # Apply the rules for probe sharing
- if self.single_probe:
- probe_id = 0
- elif self.shared_probe:
- probe_id = scan_index // self.scan_per_probe
- else:
- probe_id = (scan_index * (self.npts // self.diff_per_probe)
- + diff_index // self.diff_per_probe)
-
- # Follow the format specified in Viewmanager
- probe_id = STORAGE_PREFIX + '%02d' % probe_id
-
- # ... and the rules for object sharing
- if self.single_object:
- object_id = 0
- else:
- object_id = scan_index // self.scan_per_object
-
- # Follow the format specified in Viewmanager
- object_id = STORAGE_PREFIX + '%02d' % object_id
-
- logger.debug(
- "Model assigned frame %d of scan %s to probe %s & object %s"
- % (diff_index, str(scan_label), probe_id, object_id))
-
- # Store sharing info
- pl = self.probe_ids.get(probe_id, [])
- if scan_label not in pl:
- pl.append(scan_label)
- self.probe_ids[probe_id] = pl
-
- ol = self.object_ids.get(object_id, [])
- if scan_label not in ol:
- ol.append(scan_label)
- self.object_ids[object_id] = ol
-
- return probe_id, object_id
diff --git a/ptypy/core/paths.py b/ptypy/core/paths.py
index dbf76c0ab..943bf3596 100644
--- a/ptypy/core/paths.py
+++ b/ptypy/core/paths.py
@@ -10,12 +10,7 @@
import sys
import os
-# for solo use ##########
-if __name__ == "__main__":
- from ptypy import utils as u
-# for in package use #####
-else:
- from .. import utils as u
+from .. import utils as u
__all__ = ['DEFAULT', 'Paths']
@@ -97,7 +92,6 @@ def plot_file(self, runtime=None):
File path for plot file
"""
p = self.get_path(self.autoplot, runtime)
- print p
return self.get_path(self.autoplot, runtime)
def get_path(self, path, runtime):
diff --git a/ptypy/core/pattern.py b/ptypy/core/pattern.py
deleted file mode 100644
index 676010351..000000000
--- a/ptypy/core/pattern.py
+++ /dev/null
@@ -1,190 +0,0 @@
-# -*- coding: utf-8 -*-
-"""
-This module generates the scan patterns.
-
-This file is part of the PTYPY package.
-
- :copyright: Copyright 2014 by the PTYPY team, see AUTHORS.
- :license: GPLv2, see LICENSE for details.
-"""
-from .. import utils as u
-#from ..utils import prop
-from ..utils.verbose import logger
-import numpy as np
-
-__all__=['DEFAULT','from_pars','round_scan','round_scan_roi'\
- 'raster_scan','spiral_scan','spiral_scan_roi']
-DEFAULT=u.Param(
- #### Paramaters for popular scan methods
- scan_type = None, # [None,'round', 'raster', 'round_roi','spiral','spiral_roi','custom']
- dr = 1.5e-6, # round,round_roi :width of shell
- nr = 5, # round : number of intervals (# of shells - 1)
- nth = 5, # round,round_roi: number of points in the first shell
- lx = 15e-6, # round_roi: Width of ROI
- ly = 15e-6, # round_roi: Height of ROI
- nx = 10, # raster scan: number of steps in x
- ny = 10, # raster scan: number of steps in y
- dx = 1.5e-6, # raster scan: step size (grid spacing)
- dy = 1.5e-6, # raster scan: step size (grid spacing)
- #### other
- positions = None, # fill this list with your own script if you want other scan patterns, choose 'custom' as san type
-)
-
-import warnings
-warnings.simplefilter('always', DeprecationWarning)
-warnings.warn('This module is deprecated and will be removed from the package on 30/11/16',DeprecationWarning)
-
-DEFAULT = u.validator.make_sub_default('.scan.xy',depth=3)
-"""Default pattern parameters. See :py:data:`.scan.xy` and a short listing below"""
-
-def from_pars(pars=None):
- """
- Creates position array from parameter tree `pars`. See :py:data:`DEFAULT`
-
- :param Param pars: Input parameters
- :returns ndarray pos: A numpy.ndarray of shape ``(N,2)`` for *N* positios
- """
- p=u.Param(DEFAULT)
- if pars is not None: # and (isinstance(pars,dict) or isinstance(pars,u.Param)):
- p.update(pars)
-
- if p.type is None:
- logger.debug('Scan_type `None` is chosen . Will use positions provided by meta information')
- return None
-
- elif p.type=='round':
- pos=round_scan(**p.round)
- elif p.type=='round_roi':
- pos=round_scan_roi(**p.round_roi)
- elif p.type=='spiral':
- pos=spiral_scan(**p.spiral)
- elif p.type=='spiral_roi':
- pos=spiral_scan_roi(**p.spiral_roi)
- elif p.type=='raster':
- pos=raster(**p.raster)
- else:
- pos = p.positions
- pos=np.asarray(pos)
- logger.info('Prepared %d positions' % len(pos))
- return pos
-
-
-
-def augment_to_coordlist(a,Npos):
-
- # force into a 2 column matrix
- # drop element if size is not a modulo of 2
- a = np.asarray(a)
- if a.size == 1:
- a=np.atleast_2d([a,a])
-
- if a.size % 2 != 0:
- a=a.flatten()[:-1]
-
- a=a.reshape(a.size//2,2)
- # append multiples of a until length is greater equal than Npos
- if a.shape[0] < Npos:
- b=np.concatenate((1+Npos//a.shape[0])*[a],axis=0)
- else:
- b=a
-
- return b[:Npos,:2]
-
-def raster_scan(ny=10,nx=10,dy=1.5e-6,dx=1.5e-6):
- """
- Generates as raster scan.
-
- Parameters
- ----------
- ny, nx : int
- Number of steps in *y* (vertical) and *x* (horizontal) direction
- *x* is the fast axis
-
- dy, dx : float
- Step size (grid spacinf) in *y* and *x*
-
- Returns
- -------
- pos : ndarray
- A (N,2)-array of positions.
-
- Examples
- --------
- >>> from ptypy.core import import xy
- >>> from matplotlib import pyplot as plt
- >>> plt.plot(xy.raster_scan());plt.show()
- """
- iix, iiy = np.indices((nx+1,ny+1))
- positions = [(sx*i, sy*j) for i,j in zip(iix.ravel(), iiy.ravel())]
- return positions
-
-def round_scan(r_in, r_out, nr, nth):
- """\
- Round scan positions, defined as in spec and matlab.
- """
- dr = (r_out - r_in)/ nr
- positions = []
- for ir in range(1,nr+2):
- rr = r_in + ir*dr
- dth = 2*np.pi / (nth*ir)
- positions.extend([(rr*np.sin(ith*dth), rr*np.cos(ith*dth)) for ith in range(nth*ir)])
- return positions
-
-def round_scan_roi(dr, lx, ly, nth):
- """\
- Round scan positions with ROI, defined as in spec and matlab.
- """
- rmax = np.sqrt( (lx/2)**2 + (ly/2)**2 )
- nr = np.floor(rmax/dr) + 1
- positions = []
- for ir in range(1,int(nr+2)):
- rr = ir*dr
- dth = 2*np.pi / (nth*ir)
- th = 2*np.pi*np.arange(nth*ir)/(nth*ir)
- x1 = rr*np.sin(th)
- x2 = rr*np.cos(th)
- positions.extend([(xx1,xx2) for xx1,xx2 in zip(x1,x2) if (np.abs(xx1) <= ly/2) and (np.abs(xx2) <= lx/2)])
- return positions
-
-
-def spiral_scan(dr,r_out=None,maxpts=None):
- """\
- Spiral scan positions.
- """
- alpha = np.sqrt(4*np.pi)
- beta = dr/(2*np.pi)
-
- if maxpts is None:
- assert r_out is not None
- maxpts = 100000000
-
- if r_out is None:
- r_out = np.inf
-
- positions = []
- for k in xrange(maxpts):
- theta = alpha*np.sqrt(k)
- r = beta * theta
- if r > r_out: break
- positions.append( (r*np.sin(theta), r*np.cos(theta)) )
- return positions
-
-def spiral_scan_roi(dr,lx,ly):
- """\
- Spiral scan positions. ROI
- """
- alpha = np.sqrt(4*np.pi)
- beta = dr/(2*np.pi)
-
- rmax = .5*np.sqrt(lx**2 + ly**2)
- positions = []
- for k in xrange(1000000000):
- theta = alpha*np.sqrt(k)
- r = beta * theta
- if r > rmax: break
- x,y = r*np.sin(theta), r*np.cos(theta)
- if abs(x) > lx/2: continue
- if abs(y) > ly/2: continue
- positions.append( (x,y) )
- return positions
-
diff --git a/ptypy/core/ptycho.py b/ptypy/core/ptycho.py
index 6ca6c49e9..8f02ba1e5 100644
--- a/ptypy/core/ptycho.py
+++ b/ptypy/core/ptycho.py
@@ -10,79 +10,26 @@
import numpy as np
import time
import paths
-import os
-import sys
+from collections import OrderedDict
from .. import utils as u
from ..utils.verbose import logger, _, report, headerline, log
from ..utils import parallel
from .. import engines
-from ..io import interaction
-from classes import Base, Container, Storage, PTYCHO_PREFIX
-from manager import ModelManager
-from . import model
-
-__all__ = ['Ptycho', 'DEFAULT', 'DEFAULT_io']
-
-DEFAULT_runtime = u.Param(
- run=os.path.split(sys.argv[0])[1].split('.')[0],
- engine="None",
- iteration=0,
- iterations=0,
-)
-
-DEFAULT_autoplot = u.Param(
- imfile="plots/%(run)s/%(run)s_%(engine)s_%(iteration)04d.png",
- threaded=True,
- interval=1,
- layout='default',
- dump=True,
- make_movie=False,
-)
-
-DEFAULT_autosave = u.Param(
- # If None or False : no saving else save with this given interval
- interval=10,
- # List of probe IDs for autosaving, if None save all [Not implemented]
- probes=None,
- # List of object IDs for autosaving, if None, save all [Not implemented]
- objects=None,
- rfile="dumps/%(run)s/%(run)s_%(engine)s_%(iteration)04d.ptyr",
-)
-
-DEFAULT_io = u.Param(
- # Auto save options: (None or False, int). If an integer,
- # specifies autosave interval, if None or False: no autosave
- autosave=DEFAULT_autosave,
- # Plotting parameters for a client
- autoplot=DEFAULT_autoplot,
- # Client-server communication
- interaction=interaction.Server_DEFAULT.copy(),
- home='./',
- rfile="recons/%(run)s/%(run)s_%(engine)s.ptyr"
-)
-"""Default io parameters. See :py:data:`.io` and a short listing below"""
-
-DEFAULT = u.Param(
- # Verbosity level
- verbose_level=3,
- # Start an ipython kernel for debugging
- ipython_kernel=False,
- # Precision for reconstruction: 'single' or 'double'.
- data_type='single',
- # Do actually nothing if True [not implemented]
- dry_run=False,
- run=None,
- # POD creation rules.
- scan=u.Param(),
- scans=u.Param(),
- # Reconstruction algorithms
- engines={},
- engine=engines.DEFAULTS.copy(),
- io=DEFAULT_io.copy(depth=2)
-)
+from .classes import Base, Container, Storage, PTYCHO_PREFIX
+from .manager import ModelManager
+from .. import defaults_tree
+# This needs to be done here as it populates the defaults tree
+from .. import __has_zmq__
+if __has_zmq__:
+ from ..io import interaction
+__all__ = ['Ptycho']
+
+
+
+@defaults_tree.parse_doc('ptycho')
class Ptycho(Base):
"""
Ptycho : A ptychographic data holder and reconstruction manager.
@@ -92,7 +39,7 @@ class Ptycho(Base):
If MPI is enabled, this class acts both as a manager (rank = 0) and
a worker (any rank), and most information exists on all processes.
- In principle the only part that is divided between processes is the
+ In its original design, the only part that is divided between processes is the
diffraction data.
By default Ptycho is instantiated once per process, but it can also
@@ -111,17 +58,211 @@ class Ptycho(Base):
runtime : Param
Runtime information, e.g. errors, iteration etc.
- modelm : ModelManager
+ ~Ptycho.model : ModelManager
THE managing instance for :any:`POD`, :any:`View` and
:any:`Geo` instances
- probe,obj,exit,diff,mask : Container
+ ~Ptycho.probe, ~Ptycho.obj,~Ptycho.exit,~Ptycho.diff,~Ptycho.mask : Container
Container instances for illuminations, samples, exit waves,
diffraction data and detector masks / weights
+
+
+ Defaults:
+
+ [verbose_level]
+ default = 1
+ help = Verbosity level
+ doc = Verbosity level for information logging.
+ - ``0``: Only critical errors
+ - ``1``: All errors
+ - ``2``: Warning
+ - ``3``: Process Information
+ - ``4``: Object Information
+ - ``5``: Debug
+ type = int
+ userlevel = 0
+ lowlim = 0
+ uplim = 5
+
+ [data_type]
+ default = 'single'
+ help = Reconstruction floating number precision
+ doc = Reconstruction floating number precision (``'single'`` or
+ ``'double'``)
+ type = str
+ userlevel = 1
+
+ [run]
+ default = None
+ help = Reconstruction identifier
+ doc = Reconstruction run identifier. If ``None``, the run name will
+ be constructed at run time from other information.
+ type = str
+ userlevel = 0
+
+ [dry_run]
+ default = False
+ help = Dry run switch
+ doc = Run everything skipping all memory and cpu-heavy steps (and
+ file saving). **NOT IMPLEMENTED**
+ type = bool
+ userlevel = 2
+
+ [ipython_kernel]
+ default = False
+ type = bool
+ help = Start an ipython kernel for debugging
+ doc = Start an ipython kernel for debugging.
+
+ [io]
+ default = None
+ type = Param
+ help = Global parameters for I/O
+ doc = Global parameter container for I/O settings.
+
+ [io.home]
+ default = "./"
+ type = str
+ help = Base directory for all I/O
+ doc = home is the root directory for all input/output operations. All other path parameters that
+ are relative paths will be relative to this directory.
+
+ [io.rfile]
+ default = "recons/%(run)s/%(run)s_%(engine)s_%(iterations)04d.ptyr"
+ type = str
+ help = Reconstruction file name (or format string)
+ doc = Reconstruction file name or format string (constructed against runtime dictionary)
+
+ [io.interaction]
+ default = None
+ type = Param
+ help = ZeroMQ interactor options
+ doc = Options for the communications server
+
+ [io.interaction.active]
+ default = True
+ type = bool
+ help = turns on the interaction
+ doc = If True the interaction starts, if False all interaction is turned off
+
+ [io.interaction.server]
+ default =
+ type = @io.interaction.server
+ help = Link to server parameter tree
+
+ [io.interaction.client]
+ default =
+ type = @io.interaction.client
+ help = Link to client parameter tree
+
+ [io.autosave]
+ default = Param
+ type = Param
+ help = Auto-save options
+ doc = Options for automatic saving during reconstruction.
+
+ [io.autosave.active]
+ default = True
+ type = bool
+ help = Activation switch
+ doc = If ``True`` the current reconstruction will be saved at regular intervals.
+
+ [io.autosave.interval]
+ default = 10
+ type = int
+ help = Auto-save interval
+ doc = If ``>0`` the current reconstruction will be saved at regular intervals according to the
+ pattern in :py:data:`paths.autosave` . If ``<=0`` not automatic saving
+ lowlim = -1
+
+ [io.autosave.rfile]
+ default = "dumps/%(run)s/%(run)s_%(engine)s_%(iterations)04d.ptyr"
+ type = str
+ help = Auto-save file name (or format string)
+ doc = Auto-save file name or format string (constructed against runtime dictionary)
+
+ [io.autoplot]
+ default = Param
+ type = Param
+ help = Plotting client parameters
+ doc = Csontainer for the plotting.
+
+ [io.autoplot.active]
+ default = True
+ type = bool
+ help = Activation switch
+ doc = If ``True`` the current reconstruction will be plotted at regular intervals.
+
+ [io.autoplot.imfile]
+ default = "plots/%(run)s/%(run)s_%(engine)s_%(iterations)04d.png"
+ type = str
+ help = Plot images file name (or format string)
+ doc = Plot images file name (or format string).
+ userlevel = 1
+
+ [io.autoplot.interval]
+ default = 1
+ type = int
+ help = Number of iterations between plot updates
+ doc = Requests to the server will happen with this iteration intervals. Note that this will work
+ only if interaction.polling_interval is smaller or equal to this number. If ``interval
+ =0`` plotting is disabled which should be used, when ptypy is run on a cluster.
+ lowlim = -1
+
+ [io.autoplot.threaded]
+ default = True
+ type = bool
+ help = Live plotting switch
+ doc = If ``True``, a plotting client will be spawned in a new thread and connected at
+ initialization. If ``False``, the master node will carry out the plotting, pausing the
+ reconstruction. This option should be set to ``True`` when ptypy is run on an isolated
+ workstation.
+
+ [io.autoplot.layout]
+ default = "default"
+ type = str
+ help = Options for default plotter or template name
+ doc = Flexible layout for default plotter is not implemented yet. Please choose one of the
+ templates ``'default'``,``'black_and_white'``,``'nearfield'``, ``'minimal'`` or ``'weak'``
+ userlevel = 2
+
+ [io.autoplot.dump]
+ default = False
+ type = bool
+ help = Switch to dump plots as image files
+ doc = Switch to dump plots as image files during reconstruction.
+
+ [io.autoplot.make_movie]
+ default = False
+ type = bool
+ help = Produce reconstruction movie after the reconstruction.
+ doc = Switch to request the production of a movie from the dumped plots at the end of the
+ reconstruction.
+
+ [scans]
+ default = None
+ type = Param
+ help = Container for instances of scan parameters
+ doc =
+
+ [scans.*]
+ default =
+ type = @scan.*
+ help = Wildcard entry for list of scans to load. See :py:data:`scan`
+
+ [engines]
+ default = None
+ type = Param
+ help = Container for instances of engine parameters
+ doc =
+
+ [engines.*]
+ default =
+ type = @engine.*
+ help = Wildcard entry for list of engines to run. See :py:data:`engine`
+ doc = The value of engines.*.name is used to choose among the available engines.
+
"""
- DEFAULT = DEFAULT
- """ Default ptycho parameters which is the trunk of the
- default :ref:`ptypy parameter tree `"""
_PREFIX = PTYCHO_PREFIX
@@ -142,7 +283,7 @@ def __init__(self, pars=None, level=2, **kwargs):
- 2 : also configures Containers, initializes ModelManager
see :py:meth:`init_data`
- 3 : also initializes ZeroMQ-communication
- :py:meth:`init_communication`
+ see :py:meth:`init_communication`
- 4 : also initializes reconstruction engines,
see :py:meth:`init_engine`
- >= 4 : also and starts reconstruction
@@ -150,21 +291,24 @@ def __init__(self, pars=None, level=2, **kwargs):
"""
super(Ptycho, self).__init__(None, 'Ptycho')
+ # Create a parameter structure from the the class-level defaults
+ self.p = self.DEFAULT.copy(99)
+
# Abort if we load complete structure
if level <= 0:
return
- self.p = self.DEFAULT.copy(depth=99)
- """ Reference for parameter tree, with which
- this instance was constructed. """
-
# Continue with initialization from parameters
if pars is not None:
- self.p.update(pars, in_place_depth=3)
+ self.p.update(pars, in_place_depth=99)
# That may be a little dangerous
self.p.update(kwargs)
+ # Validate the incoming parameters
+ # FIXME : Validation should maybe happen for each class that uses the
+ # the parameters, i.e. like a depth=1 validation
+ defaults_tree['ptycho'].validate(self.p)
# Instance attributes
# Structures
@@ -173,11 +317,8 @@ def __init__(self, pars=None, level=2, **kwargs):
self.exit = None
self.diff = None
self.mask = None
- self.modelm = None
-
- # Data
- self.datasource = None
-
+ self.model = None
+
# Communication
self.interactor = None
self.plotter = None
@@ -185,6 +326,19 @@ def __init__(self, pars=None, level=2, **kwargs):
# Early boot strapping
self._configure()
+ # Keep a bibliography
+ self.citations = u.Bibliography()
+ self.citations.add_article(
+ title='A computational framework for ptychographic reconstructions',
+ author='Enders B. and Thibault P.',
+ journal='Proc. Royal Soc. A',
+ volume=472,
+ year=2016,
+ page=20160640,
+ doi='10.1098/rspa.2016.0640',
+ comment='The Ptypy framework',
+ )
+
if level >= 1:
logger.info('\n' + headerline('Ptycho init level 1', 'l'))
self.init_structures()
@@ -234,9 +388,12 @@ def _configure(self):
if not hasattr(self, 'runtime'):
self.runtime = u.Param() # DEFAULT_runtime.copy()
+ if not hasattr(self, 'scans'):
+ # Create a scans entry if it does not already exist
+ self.scans = OrderedDict()
+
if not hasattr(self, 'engines'):
# Create an engines entry if it does not already exist
- from collections import OrderedDict
self.engines = OrderedDict()
# Generate all the paths
@@ -255,9 +412,9 @@ def init_communication(self):
iaction = self.p.io.interaction
autoplot = self.p.io.autoplot
- if parallel.master and iaction:
+ if __has_zmq__ and parallel.master and iaction.active:
# Create the interaction server
- self.interactor = interaction.Server(iaction)
+ self.interactor = interaction.Server(iaction.server)
# Register self as an accessible object for the client
self.interactor.objects['Ptycho'] = self
@@ -274,7 +431,7 @@ def init_communication(self):
self.plotter = None
else:
# Modify port
- iaction.port = port
+ iaction.server.port = port
# Inform the audience
log(4, 'Started interaction got the following parameters:'
@@ -282,12 +439,12 @@ def init_communication(self):
# Start automated plot client
self.plotter = None
- if (parallel.master and autoplot and autoplot.threaded and
+ if (parallel.master and autoplot.active and autoplot.threaded and
autoplot.interval > 0):
from multiprocessing import Process
logger.info('Spawning plot client in new Process.')
self.plotter = Process(target=u.spawn_MPLClient,
- args=(iaction, autoplot,))
+ args=(iaction.client, autoplot,))
self.plotter.start()
else:
# No interaction wanted
@@ -301,81 +458,36 @@ def init_structures(self):
Called on __init__ if ``level >= 1``.
Prepare everything for reconstruction. Creates attributes
- :py:attr:`modelm` and the containers :py:attr:`probe` for
+ :py:attr:`model` and the containers :py:attr:`probe` for
illumination, :py:attr:`obj` for the samples, :py:attr:`exit` for
the exit waves, :py:attr:`diff` for diffraction data and
- :py:attr:`mask` for detectors masks
+ :py:attr:`Ptycho.mask` for detectors masks
"""
- p = self.p
-
- # Initialize the reconstruction containers
- self.probe = Container(ptycho=self, ID='Cprobe', data_type='complex')
- self.obj = Container(ptycho=self, ID='Cobj', data_type='complex')
- self.exit = Container(ptycho=self, ID='Cexit', data_type='complex')
- self.diff = Container(ptycho=self, ID='Cdiff', data_type='real')
- self.mask = Container(ptycho=self, ID='Cmask', data_type='bool')
-
- ###################################
- # Initialize data sources load data
- ###################################
-
- # Initialize the model manager
- self.modelm = ModelManager(self, p.scan)
-
+ self.probe = Container(self, ID='Cprobe', data_type='complex')
+ self.obj = Container(self, ID='Cobj', data_type='complex')
+ self.exit = Container(self, ID='Cexit', data_type='complex')
+ self.diff = Container(self, ID='Cdiff', data_type='real')
+ self.mask = Container(self, ID='Cmask', data_type='bool')
+ # Initialize the model manager. This also initializes the
+ # containers.
+ self.model = ModelManager(self, self.p.scans)
+
def init_data(self, print_stats=True):
"""
Called on __init__ if ``level >= 2``.
- Creates a datasource and calls for :py:meth:`ModelManager.new_data()`
+ Call :py:meth:`ModelManager.new_data()`
Prints statistics on the ptypy structure if ``print_stats=True``
"""
- # Create the data source object, which give diffraction frames one
- # at a time, supporting MPI sharing.
- self.datasource = self.modelm.make_datasource()
-
# Load the data. This call creates automatically the scan managers,
# which create the views and the PODs.
- self.modelm.new_data()
+ self.model.new_data()
# Print stats
parallel.barrier()
if print_stats:
self.print_stats()
- # Create plotting instance (maybe)
-
- def _init_engines(self):
- """
- * deprecated*
- Initialize engines from parameters. Sets :py:attr:`engines`
- """
- # Store the engines in a dict
- self.engines = {}
-
- # Store the run labels in a list to ensure precedence is preserved.
- self.run_labels = []
-
- # Loop through p.engines sub-dictionaries
- for run_label, pars in self.p.engines.iteritems():
- # Copy common parameters
- engine_pars = self.p.engine.common.copy()
-
- # Identify engine by name
- engine_class = engines.by_name(pars.name)
-
- # Update engine type specific parameters
- engine_pars.update(self.p.engine[pars.name])
-
- # Update engine instance specific parameters
- engine_pars.update(pars)
-
- # Create instance
- engine = engine_class(self, engine_pars)
-
- # Store info
- self.engines[run_label] = engine
- self.run_labels.append(run_label)
-
def init_engine(self, label=None, epars=None):
"""
Called on __init__ if ``level >= 4``.
@@ -414,20 +526,12 @@ def init_engine(self, label=None, epars=None):
except KeyError('No parameter set available for engine label %s\n'
'Skipping..' % label):
pass
- # Copy common parameters
- engine_pars = self.p.engine.common.copy()
# Identify engine by name
engine_class = engines.by_name(pars.name)
- # Update engine type specific parameters
- engine_pars.update(self.p.engine[pars.name])
-
- # Update engine instance specific parameters
- engine_pars.update(pars)
-
# Create instance
- engine = engine_class(self, engine_pars)
+ engine = engine_class(self, pars)
# Attach label
engine.label = label
@@ -437,6 +541,11 @@ def init_engine(self, label=None, epars=None):
else:
# No label = prepare all engines
for label in sorted(self.p.engines.keys()):
+ # FIXME workaround to avoid parameter trees that are just meant as templates.
+ try:
+ self.p.engines[label].name
+ except:
+ continue
self.init_engine(label)
@property
@@ -469,7 +578,7 @@ def run(self, label=None, epars=None, engine=None):
using :py:meth:`init_engine` and run immediately afterwards.
For parameters see :py:data:`.engine`
- engine : BaseEngine, optional
+ engine : ~ptypy.engines.base.BaseEngine, optional
An engine instance that should be a subclass of
:py:class:`BaseEngine` or have the same methods.
"""
@@ -502,7 +611,7 @@ def run(self, label=None, epars=None, engine=None):
parallel.barrier()
# Check for new data
- self.modelm.new_data()
+ self.model.new_data()
# Last minute preparation before a contiguous block of
# iterations
@@ -587,85 +696,11 @@ def finalize(self):
except BaseException:
pass
- def _run(self, run_label=None):
- """
- *deprecated*
- Start the reconstruction. Former method
- """
- # Time the initialization
- if self.runtime.get('start') is None:
- self.runtime.start = time.asctime()
-
- # Check if there is already a runtime info collector
- if self.runtime.get('iter_info') is None:
- self.runtime.iter_info = []
-
- # Note when the last autosave was carried out
- if self.runtime.get('last_save') is None:
- self.runtime.last_save = 0
-
- # Maybe not needed
- if self.runtime.get('last_plot') is None:
- self.runtime.last_plot = 0
-
- # Run all engines sequentially
- for run_label in self.run_labels:
-
- # Set a new engine
- engine = self.engines[run_label]
- # self.current_engine = engine
-
- # Prepare the engine
- engine.initialize()
-
- # Start the iteration loop
- while not engine.finished:
- # Check for client requests
- if parallel.master and self.interactor is not None:
- self.interactor.process_requests()
-
- parallel.barrier()
-
- # Check for new data
- self.modelm.new_data()
-
- # Last minute preparation before a contiguous block of
- # iterations
- engine.prepare()
-
- if self.p.autosave is not None and self.p.autosave.interval > 1:
- if engine.curiter % self.p.autosave.interval == 0:
- auto = self.paths.auto_file(self.runtime)
- logger.info(headerline('Autosaving'), 'l')
- self.save_run(auto, 'dump')
- self.runtime.last_save = engine.curiter
- logger.info(headerline())
-
- # One iteration
- engine.iterate()
-
- # Display runtime information and do saving
- if parallel.master:
- info = self.runtime.iter_info[-1]
- # Calculate error:
- err = np.array(info['error'].values()).mean(0)
- logger.info('Iteration #%(iteration)d of %(engine)s :: '
- 'Time %(duration).2f' % info)
- logger.info('Errors :: Fourier %.2e, Photons %.2e, '
- 'Exit %.2e' % tuple(err))
-
- parallel.barrier()
- # Done. Let the engine finish up
- engine.finalize()
-
- # Save
- # Deactivated for now as something fishy happens through MPI
- self.save_run()
-
- # Clean up - if needed.
-
- # Time the initialization
- self.runtime.stop = time.asctime()
+ # Hint at citations (for all log levels)
+ citation_info = '\n'.join([headerline('This reconstruction relied on the following work', 'l', '='),
+ str(self.citations),
+ headerline('', 'l', '=')])
+ logger.warn(citation_info)
@classmethod
def _from_dict(cls, dct):
@@ -707,7 +742,7 @@ def load_run(cls, runfile, load_data=True):
header = u.Param(io.h5read(runfile, 'header')['header'])
if header['kind'] == 'minimal':
logger.info('Found minimal ptypy dump')
- content = u.Param(io.h5read(runfile, 'content')['content'])
+ content = io.h5read(runfile, 'content')['content']
logger.info('Creating new Ptycho instance')
P = Ptycho(content.pars, level=1)
@@ -735,25 +770,10 @@ def load_run(cls, runfile, load_data=True):
P._configure()
- logger.info('Reconfiguring sharing rules') # and loading data')
- print u.verbose.report(P.p)
- P.modelm.sharing_rules = model.parse_model(P.modelm.p['sharing'],
- P.modelm.sharing)
-
logger.info('Regenerating exit waves')
P.exit.reformat()
- P.modelm._initialize_exit(P.pods.values())
- """
- logger.info('Attaching datasource')
- P.datasource = P.modelm.make_datasource(P.p.data)
+ P.model._initialize_exit(P.pods.values())
- logger.info('Reconfiguring sharing rules and loading data')
- P.modelm.sharing_rules = model.parse_model(P.p.model['sharing'],
- P.modelm.sharing)
- P.modelm.new_data()
-
-
- """
if load_data:
logger.info('Loading data')
P.init_data()
@@ -781,6 +801,9 @@ def save_run(self, alt_file=None, kind='minimal', force_overwrite=True):
import save_load
from .. import io
+
+
+
dest_file = None
if parallel.master:
@@ -809,10 +832,9 @@ def save_run(self, alt_file=None, kind='minimal', force_overwrite=True):
if kind == 'fullflat':
self.interactor.stop()
- logger.info('Deleting references for interactor, '
- 'datasource and engines.')
+ logger.info('Deleting references for interactor '
+ ' and engines.')
del self.interactor
- del self.datasource
del self.paths
try:
del self.engines
@@ -847,6 +869,10 @@ def save_run(self, alt_file=None, kind='minimal', force_overwrite=True):
for ID, S in self.probe.storages.items()}
dump.obj = {ID: S._to_dict()
for ID, S in self.obj.storages.items()}
+ try:
+ defaults_tree.validate(self.p) # check the parameters are actually able to be read back in
+ except RuntimeError:
+ logger.warn("The parameters we are saving won't pass a validator check!")
dump.pars = self.p.copy() # _to_dict(Recursive=True)
dump.runtime = self.runtime.copy()
# Discard some bits of runtime to save space
@@ -865,8 +891,13 @@ def save_run(self, alt_file=None, kind='minimal', force_overwrite=True):
for ID, S in self.probe.storages.items()}
minimal.obj = {ID: S._to_dict()
for ID, S in self.obj.storages.items()}
+ try:
+ defaults_tree.validate(self.p) # check the parameters are actually able to be read back in
+ except RuntimeError:
+ logger.warn("The parameters we are saving won't pass a validator check!")
minimal.pars = self.p.copy() # _to_dict(Recursive=True)
minimal.runtime = self.runtime.copy()
+
content = minimal
h5opt = io.h5options['UNSUPPORTED']
diff --git a/ptypy/core/sample.py b/ptypy/core/sample.py
index 1202b9449..197b192b4 100644
--- a/ptypy/core/sample.py
+++ b/ptypy/core/sample.py
@@ -10,69 +10,166 @@
:license: GPLv2, see LICENSE for details.
"""
import numpy as np
-# import os
-# from matplotlib import pyplot as plt
-if __name__ == '__main__':
- from ptypy import utils as u
- from ptypy import resources
-else:
- from .. import utils as u
- from .. import resources
+from .. import utils as u
+from .. import resources
+from ..utils.descriptor import EvalDescriptor
logger = u.verbose.logger
TEMPLATES = dict()
-DEFAULT_process = u.Param(
- # Offset between center of object array and scan pattern (in pixel);
- # (float, tuple).
- offset=0,
- # Zoom value for object simulation; (float, tuple).
- zoom=None,
- # Chemical formula; (str).
- formula=None,
- # Density in [g/ccm]; (None, float).
- density=None,
- # Maximum thickness of sample in meter; (float).
- thickness=None,
- # Assigned refractive index (maximum) relative to air; (complex float)
- ref_index=0.5 + 0.j,
- # Gaussian filter smoothing with this FWHM (pixel); (float, tuple)
- smoothing=2,
-)
-
-DEFAULT_diversity = u.Param(
- noise=None,
- shift=None,
- power=1.0,
-)
-
-DEFAULT = u.Param(
- # 'scripting option to override'
- override=None,
- # Type of object model; (None, 'sim', 'stxm', 'recon')
- model=None,
- fill=1.0,
- recon=u.Param(
- ID=None,
- layer=None,
- rfile='*.ptyr',
- ),
- # STXM analysis parameters
- stxm=u.Param(
- # Label of the scan of whose diffraction data to initialize stxm.
- # If None, use own scan_label
- label=None,
- ),
- process=DEFAULT_process,
- # See other for noise
- diversity=DEFAULT_diversity,
-)
-""" Default sample parameters. See :py:data:`.scan.sample`
- and a short listing below """
-
-__all__ = ['DEFAULT', 'init_storage', 'simulate']
+# Local, module-level defaults. These can be appended to the defaults of
+# other classes.
+sample_desc = EvalDescriptor('sample')
+sample_desc.from_string(r"""
+ [model]
+ default = None
+ help = Type of initial object model
+ doc = One of:
+ - ``None`` : model initialitziation defaults to flat array filled `fill`
+ - ``'recon'`` : load model from STXM analysis of diffraction data
+ - ``'stxm'`` : Estimate model from autocorrelation of mean diffraction data
+ - ** : one of ptypys internal model resource strings
+ - ** : one of the templates in sample module
+ In script, you may pass a numpy.array here directly as the model. This array will be
+ processed according to `process` in order to *simulate* a sample from e.g. a thickness
+ profile.
+ type = str, array
+ userlevel = 0
+
+ [fill]
+ default = 1
+ help = Default fill value
+ doc =
+ type = float, complex
+ userlevel =
+
+ [recon]
+ default =
+ help = Parameters to load from previous reconstruction
+ doc =
+ type = Param
+ userlevel =
+
+ [recon.rfile]
+ default = \*.ptyr
+ help = Path to a ``.ptyr`` compatible file
+ doc =
+ type = file
+ userlevel = 0
+
+ [stxm]
+ default =
+ help = STXM analysis parameters
+ doc =
+ type = Param
+ userlevel = 1
+
+ [stxm.label]
+ default = None
+ help = Scan label of diffraction that is to be used for probe estimate
+ doc = ``None``, own scan label is used
+ type = str
+ userlevel = 1
+
+ [process]
+ default = None
+ help = Model processing parameters
+ doc = Can be ``None``, i.e. no processing
+ type = Param
+ userlevel =
+
+ [process.offset]
+ default = (0, 0)
+ help = Offset between center of object array and scan pattern
+ doc =
+ type = tuple, list
+ userlevel = 2
+ lowlim = 0
+
+ [process.zoom]
+ default = None
+ help = Zoom value for object simulation.
+ doc = If ``None``, leave the array untouched. Otherwise the modeled or loaded image will be
+ resized using :py:func:`zoom`.
+ type = list, tuple, float
+ userlevel = 2
+ lowlim = 0
+
+ [process.formula]
+ default = None
+ help = Chemical formula
+ doc = A Formula compatible with a cxro database query,e.g. ``'Au'`` or ``'NaCl'`` or ``'H2O'``
+ type = str
+ userlevel = 2
+
+ [process.density]
+ default = 1
+ help = Density in [g/ccm]
+ doc = Only used if `formula` is not None
+ type = float
+ userlevel = 2
+
+ [process.thickness]
+ default = 1.00E-06
+ help = Maximum thickness of sample
+ doc = If ``None``, the absolute values of loaded source array will be used
+ type = float
+ userlevel = 2
+
+ [process.ref_index]
+ default = (0.5, 0.0)
+ help = Assigned refractive index, tuple of format (real, complex)
+ doc = If ``None``, treat source array as projection of refractive index a+bj for (a, b). If a refractive index
+ is provided the array's absolute value will be used to scale the refractive index.
+ type = list, tuple
+ userlevel = 2
+ lowlim = 0
+
+ [process.smoothing]
+ default = 2
+ help = Smoothing scale
+ doc = Smooth the projection with gaussian kernel of width given by `smoothing_mfs`
+ type = int
+ userlevel = 2
+ lowlim = 0
+
+ [diversity]
+ default =
+ help = Probe mode(s) diversity parameters
+ doc = Can be ``None`` i.e. no diversity
+ type = Param
+ userlevel =
+
+ [diversity.noise]
+ default = None
+ help = Noise in the generated modes of the illumination
+ doc = Can be either:
+ - ``None`` : no noise
+ - ``2-tuple`` : noise in phase (amplitude (rms), minimum feature size)
+ - ``4-tuple`` : noise in phase & modulus (rms, mfs, rms_mod, mfs_mod)
+ type = tuple
+ userlevel = 1
+
+ [diversity.power]
+ default = 0.1
+ help = Power of modes relative to main mode (zero-layer)
+ doc =
+ type = tuple, float
+ userlevel = 1
+
+ [diversity.shift]
+ default = None
+ help = Lateral shift of modes relative to main mode
+ doc = **[not implemented]**
+ type = float
+ userlevel = 2
+ """)
+
+DEFAULT = sample_desc.make_default(99)
+
+DEFAULT_process = DEFAULT.process
def init_storage(storage, sample_pars=None, energy=None):
@@ -86,12 +183,12 @@ def init_storage(storage, sample_pars=None, energy=None):
sample_pars : Param
Parameter structure that defines how the sample is created.
- See :any:`DEFAULT` for the parameters.
+ *FIXME* Link to parameters
energy : float, optional
Energy associated in the experiment for this sample object.
If None, the ptypy structure is searched for the appropriate
- :any:`Geo` instance: ``storage.views[0].pod.geometry.energy``
+ :py:class:`Geo` instance: ``storage.views[0].pod.geometry.energy``
"""
s = storage
prefix = "[Object %s] " % s.ID
@@ -202,7 +299,7 @@ def simulate(A, pars, energy, fill=1.0, prefix="", **kwargs):
Numpy array as buffer. Must be at least two-dimensional
pars : Param
- Simulation parameters. See :any:`DEFAULT` .simulate
+ Simulation parameters. *FIXME* link to paramaters.
"""
lam = u.keV2m(energy)
p = DEFAULT_process.copy()
diff --git a/ptypy/core/save_load.py b/ptypy/core/save_load.py
index ff1e7088a..f4a93e834 100644
--- a/ptypy/core/save_load.py
+++ b/ptypy/core/save_load.py
@@ -1,15 +1,16 @@
-from ptypy import utils as u
-from ..io import h5write, h5read
-from classes import *
-from geometry import Geo
-from manager import ModelManager
-from classes import get_class
-from ..utils.verbose import logger
-from ptycho import Ptycho
-from weakref import WeakValueDictionary as WVD
import numpy as np
import time
+from weakref import WeakValueDictionary as WVD
+
+from .. import utils as u
+from ..io import h5write, h5read
+from .classes import *
+from .geometry import Geo
+from .manager import ModelManager
+from .classes import get_class
+from ..utils.verbose import logger
+from .ptycho import Ptycho
__all__ = ['link', 'unlink', 'to_h5', 'from_h5']
diff --git a/ptypy/core/xy.py b/ptypy/core/xy.py
index da83e5393..3aeddebcf 100644
--- a/ptypy/core/xy.py
+++ b/ptypy/core/xy.py
@@ -7,33 +7,66 @@
:copyright: Copyright 2014 by the PTYPY team, see AUTHORS.
:license: GPLv2, see LICENSE for details.
"""
-from .. import utils as u
-from ..utils.verbose import logger
import numpy as np
import warnings
+
+from .. import utils as u
+from ..utils.verbose import logger
+from ..utils.descriptor import EvalDescriptor
+
warnings.simplefilter('always', DeprecationWarning)
-__all__ = ['DEFAULT', 'from_pars', 'round_scan', 'raster_scan', 'spiral_scan']
+
+__all__ = ['xy_desc', 'from_pars', 'round_scan', 'raster_scan', 'spiral_scan']
TEMPLATES = u.Param()
-DEFAULT = u.Param(
- # Parameters for popular scan methods
- override=None,
- # Model: [None, 'round', 'raster', 'spiral' or array-like]
- model=None,
- # round_roi: Width of ROI
- extent=15e-6,
- # raster scan: step size (grid spacing)
- spacing=1.5e-6,
- steps=10,
- # raster scan: step size (grid spacing)
- offset=0,
- jitter=None,
- count=None,
-)
-""" Default pattern parameters. See :py:data:`.scan.xy`
- and a short listing below """
+# Local, module-level defaults. These can be appended to the defaults of
+# other classes.
+xy_desc = EvalDescriptor('xy')
+xy_desc.from_string(r"""
+ [override]
+ default =
+ type = array
+ help =
+
+ [model]
+ default =
+ type = str
+ help = None, 'round', 'raster', 'spiral' or array-like
+
+ [extent]
+ default = 15e-6
+ type = float, tuple
+ help =
+
+ [spacing]
+ default = 1.5e-6
+ type = float
+ help = Step size (grid spacing)
+
+ [steps]
+ default = 10
+ type = int
+ help =
+
+ [offset]
+ default = 0.
+ type = float
+ help =
+
+ [jitter]
+ default =
+ type = float
+ help =
+
+ [count]
+ default =
+ type = int
+ help =
+ """)
+
+DEFAULT = xy_desc.make_default(99)
def from_pars(xypars=None):
"""
@@ -152,7 +185,7 @@ def augment_to_coordlist(a, Npos):
return b[:Npos, :2]
-def raster_scan(ny=10, nx=10, dy=1.5e-6, dx=1.5e-6):
+def raster_scan(dy=1.5e-6, dx=1.5e-6, ny=10, nx=10):
"""
Generates a raster scan.
@@ -177,7 +210,7 @@ def raster_scan(ny=10, nx=10, dy=1.5e-6, dx=1.5e-6):
>>> pos = xy.raster_scan()
>>> plt.plot(pos[:, 1], pos[:, 0], 'o-'); plt.show()
"""
- iix, iiy = np.indices((nx+1, ny+1))
+ iix, iiy = np.indices((nx, ny))
positions = [(dx*i, dy*j) for i, j in zip(iix.ravel(), iiy.ravel())]
return np.asarray(positions)
@@ -268,105 +301,3 @@ def spiral_scan(dr=1.5e-6, r=7.5e-6, maxpts=None):
return np.asarray(positions)
-def raster_scan_legacy(nx, ny, dx, dy):# pragma: no cover
- """
- Generates a raster scan.
-
- Legacy function. May get deprecated in future.
- """
-
- warnings.warn('This function is deprecated and will be removed from the package on 30/11/16',DeprecationWarning)
- iix, iiy = np.indices((nx+1, ny+1))
- positions = [(dx*i, dy*j) for i, j in zip(iix.ravel(), iiy.ravel())]
- return positions
-
-
-def round_scan_legacy(r_in, r_out, nr, nth):# pragma: no cover
- """
- Generates a round scan,
-
- Legacy function. May get deprecated in future.
- """
- warnings.warn('This function is deprecated and will be removed from the package on 30/11/16',DeprecationWarning)
- dr = (r_out - r_in) / nr
- positions = []
- for ir in range(1, nr+2):
- rr = r_in + ir*dr
- dth = 2*np.pi / (nth*ir)
- positions.extend([(rr * np.sin(ith*dth), rr * np.cos(ith*dth))
- for ith in range(nth*ir)])
- return positions
-
-
-def round_scan_roi_legacy(dr, lx, ly, nth):# pragma: no cover
- """
- Round scan positions with ROI, defined as in spec and matlab.
-
- Legacy function. May get deprecated in future.
- """
- warnings.warn('This function is deprecated and will be removed from the package on 30/11/16',DeprecationWarning)
- rmax = np.sqrt((lx/2)**2 + (ly/2)**2)
- nr = np.floor(rmax/dr) + 1
- positions = []
- for ir in range(1, int(nr+2)):
- rr = ir * dr
- dth = 2 * np.pi / (nth*ir)
- th = 2 * np.pi * np.arange(nth*ir) / (nth*ir)
- x1 = rr * np.sin(th)
- x2 = rr * np.cos(th)
- positions.extend([(xx1, xx2) for xx1, xx2 in zip(x1, x2)
- if (np.abs(xx1) <= ly/2) and (np.abs(xx2) <= lx/2)])
- return positions
-
-
-def spiral_scan_legacy(dr, r_out=None, maxpts=None):# pragma: no cover
- """
- Spiral scan positions.
-
- Legacy function. May get deprecated in future.
- """
- warnings.warn('This function is deprecated and will be removed from the package on 30/11/16',DeprecationWarning)
-
- alpha = np.sqrt(4 * np.pi)
- beta = dr / (2*np.pi)
-
- if maxpts is None:
- assert r_out is not None
- maxpts = 100000000
-
- if r_out is None:
- r_out = np.inf
-
- positions = []
- for k in xrange(maxpts):
- theta = alpha * np.sqrt(k)
- r = beta * theta
- if r > r_out:
- break
- positions.append((r * np.sin(theta), r * np.cos(theta)))
- return positions
-
-
-def spiral_scan_roi_legacy(dr, lx, ly):# pragma: no cover
- """\
- Spiral scan positions. ROI
-
- Legacy function. May get deprecated in future.
- """
- warnings.warn('This function is deprecated and will be removed from the package on 30/11/16',DeprecationWarning)
- alpha = np.sqrt(4 * np.pi)
- beta = dr / (2*np.pi)
- rmax = .5 * np.sqrt(lx**2 + ly**2)
- positions = []
- for k in xrange(1000000000):
- theta = alpha * np.sqrt(k)
- r = beta * theta
- if r > rmax:
- break
- x, y = r * np.sin(theta), r * np.cos(theta)
- if abs(x) > lx/2:
- continue
- if abs(y) > ly/2:
- continue
- positions.append((x, y))
- return positions
diff --git a/ptypy/core/xy_old.py b/ptypy/core/xy_old.py
deleted file mode 100644
index b53518797..000000000
--- a/ptypy/core/xy_old.py
+++ /dev/null
@@ -1,184 +0,0 @@
-# -*- coding: utf-8 -*-
-"""
-This module generates the scan patterns
-
-This file is part of the PTYPY package.
-
- :copyright: Copyright 2014 by the PTYPY team, see AUTHORS.
- :license: GPLv2, see LICENSE for details.
-"""
-from .. import utils as u
-#from ..utils import prop
-from ..utils.verbose import logger
-import numpy as np
-import os
-import warnings
-warnings.simplefilter('always', DeprecationWarning)
-warnings.warn('This module is deprecated and will be removed from the package on 30/11/16',DeprecationWarning)
-
-DEFAULT=u.Param(
- #### Paramaters for popular scan methods
- scan_type = None, # [None,'round', 'raster', 'round_roi','spiral','spiral_roi','custom']
- dr = 1.5e-6, # round,round_roi :width of shell
- nr = 5, # round : number of intervals (# of shells - 1)
- nth = 5, # round,round_roi: number of points in the first shell
- lx = 15e-6, # round_roi: Width of ROI
- ly = 15e-6, # round_roi: Height of ROI
- nx = 10, # raster scan: number of steps in x
- ny = 10, # raster scan: number of steps in y
- dx = 1.5e-6, # raster scan: step size (grid spacing)
- dy = 1.5e-6, # raster scan: step size (grid spacing)
- #### other
- positions = None, # fill this list with your own script if you want other scan patterns, choose 'custom' as san type
-)
-"""Default pattern parameters. See :py:data:`.scan.xy` and a short listing below"""
-
-def from_pars(pars=None):
- warnings.warn('This function is deprecated and will be removed from the package on 30/11/16',DeprecationWarning)
-
- p=u.Param(DEFAULT)
- if pars is not None: # and (isinstance(pars,dict) or isinstance(pars,u.Param)):
- p.update(pars)
-
- if p.scan_type is None:
- logger.debug('Scan_type `None` is chosen . Will use positions provided by meta information')
- return None
-
- elif p.scan_type=='round':
- pos=round_scan_positions(0,p.dr*p.nr,p.nr,p.nth)
- elif p.scan_type=='round_roi':
- pos=round_scan_ROI_positions(p.dr,p.lx,p.ly,p.nth)
- elif p.scan_type=='spiral':
- pos=spiral_scan_positions(p.dr,p.dr*p.nr)
- elif p.scan_type=='spiral_roi':
- pos=spiral_scan_ROI_positions(p.dr,p.lx,p.ly)
- elif p.scan_type=='raster':
- pos=raster_scan_positions(p.nx, p.ny, p.dx,p.dy)
- else:
- pos = p.positions
- pos=np.asarray(pos)
- logger.info('Prepared %d positions' % len(pos))
- return pos
-
-def scanpositions(scandict):
- warnings.warn('This function is deprecated and will be removed from the package on 30/11/16',DeprecationWarning)
-
- sd=scandict
- if sd['scan_type']=='round':
- positions=round_scan_positions(0,sd['dr']*sd['nr'],sd['nr'],sd['nth'])
- elif sd['scan_type']=='round_roi':
- positions=round_scan_ROI_positions(sd['dr'],sd['lx'],sd['ly'],sd['nth'])
- elif sd['scan_type']=='spiral':
- positions=spiral_scan_positions(sd['dr'],sd['dr']*sd['nr'])
- elif sd['scan_type']=='spiral_roi':
- positions=spiral_scan_ROI_positions(sd['dr'],sd['lx'],sd['ly'])
- elif sd['scan_type']=='raster':
- positions=raster_scan_positions(sd['nx'], sd['ny'], sd['dx'],sd['dy'])
- else:
- positions=sd['positions']
- return np.asarray(positions)
-
-def augment_to_coordlist(a,Npos):
- warnings.warn('This function is deprecated and will be removed from the package on 30/11/16',DeprecationWarning)
-
- # force into a 2 column matrix
- # drop element if size is not a modulo of 2
- a = np.asarray(a)
- if a.size == 1:
- a=np.atleast_2d([a,a])
-
- if a.size % 2 != 0:
- a=a.flatten()[:-1]
-
- a=a.reshape(a.size//2,2)
- # append multiples of a until length is greater equal than Npos
- if a.shape[0] < Npos:
- b=np.concatenate((1+Npos//a.shape[0])*[a],axis=0)
- else:
- b=a
-
- return b[:Npos,:2]
-
-def raster_scan_positions(nx,ny,sx,sy):
- warnings.warn('This function is deprecated and will be removed from the package on 30/11/16',DeprecationWarning)
- iix, iiy = np.indices((nx+1,ny+1))
- positions = [(sx*i, sy*j) for i,j in zip(iix.ravel(), iiy.ravel())]
- return positions
-
-def round_scan_positions(r_in, r_out, nr, nth):
- """\
- Round scan positions, defined as in spec and matlab.
- """
- warnings.warn('This function is deprecated and will be removed from the package on 30/11/16',DeprecationWarning)
-
- dr = (r_out - r_in)/ nr
- positions = []
- for ir in range(1,nr+2):
- rr = r_in + ir*dr
- dth = 2*np.pi / (nth*ir)
- positions.extend([(rr*np.sin(ith*dth), rr*np.cos(ith*dth)) for ith in range(nth*ir)])
- return positions
-
-def round_scan_ROI_positions(dr, lx, ly, nth):
- """\
- Round scan positions with ROI, defined as in spec and matlab.
- """
- warnings.warn('This function is deprecated and will be removed from the package on 30/11/16',DeprecationWarning)
-
- rmax = np.sqrt( (lx/2)**2 + (ly/2)**2 )
- nr = np.floor(rmax/dr) + 1
- positions = []
- for ir in range(1,int(nr+2)):
- rr = ir*dr
- dth = 2*np.pi / (nth*ir)
- th = 2*np.pi*np.arange(nth*ir)/(nth*ir)
- x1 = rr*np.sin(th)
- x2 = rr*np.cos(th)
- positions.extend([(xx1,xx2) for xx1,xx2 in zip(x1,x2) if (np.abs(xx1) <= ly/2) and (np.abs(xx2) <= lx/2)])
- return positions
-
-
-def spiral_scan_positions(dr,r_out=None,maxpts=None):
- """\
- Spiral scan positions.
- """
- warnings.warn('This function is deprecated and will be removed from the package on 30/11/16',DeprecationWarning)
- alpha = np.sqrt(4*np.pi)
- beta = dr/(2*np.pi)
-
- if maxpts is None:
- assert r_out is not None
- maxpts = 100000000
-
- if r_out is None:
- r_out = np.inf
-
- positions = []
- for k in xrange(maxpts):
- theta = alpha*np.sqrt(k)
- r = beta * theta
- if r > r_out: break
- positions.append( (r*np.sin(theta), r*np.cos(theta)) )
- return positions
-
-def spiral_scan_ROI_positions(dr,lx,ly):
- """\
- Spiral scan positions. ROI
- """
- warnings.warn('This function is deprecated and will be removed from the package on 30/11/16',DeprecationWarning)
-
- alpha = np.sqrt(4*np.pi)
- beta = dr/(2*np.pi)
-
- rmax = .5*np.sqrt(lx**2 + ly**2)
- positions = []
- for k in xrange(1000000000):
- theta = alpha*np.sqrt(k)
- r = beta * theta
- if r > rmax: break
- x,y = r*np.sin(theta), r*np.cos(theta)
- if abs(x) > lx/2: continue
- if abs(y) > ly/2: continue
- positions.append( (x,y) )
- return positions
-
diff --git a/ptypy/debug/__init__.py b/ptypy/debug/__init__.py
index 993223308..9431effbe 100644
--- a/ptypy/debug/__init__.py
+++ b/ptypy/debug/__init__.py
@@ -11,7 +11,7 @@
:license: GPLv2, see LICENSE for details.
"""
-from embedded_shell import ipshell
-import ipython_kernel
+from .embedded_shell import ipshell
+from . import ipython_kernel
diff --git a/ptypy/engines/Bragg3d_engines.py b/ptypy/engines/Bragg3d_engines.py
new file mode 100644
index 000000000..5336fe130
--- /dev/null
+++ b/ptypy/engines/Bragg3d_engines.py
@@ -0,0 +1,252 @@
+# -*- coding: utf-8 -*-
+"""
+This module contains experimental DM engines for 3d Bragg ptycho.
+
+This file is part of the PTYPY package.
+
+ :copyright: Copyright 2014 by the PTYPY team, see AUTHORS.
+ :license: GPLv2, see LICENSE for details.
+"""
+from .DM import DM
+from .. import defaults_tree
+from ..core.manager import Bragg3dModel
+from ..utils import parallel
+from ..utils.verbose import logger
+import time
+import numpy as np
+
+__all__ = ['DM_3dBragg']
+
+@defaults_tree.parse_doc('engine.DM_3dBragg')
+class DM_3dBragg(DM):
+ """
+ DM engine adapted to 3d Bragg reconstruction. Specifically, sample
+ supports are added (other regulizers might get added later). The
+ engine is highly experimental and not based on any published
+ algorithms, but on the notion that DM works well for 3d Bragg data
+ except that the positioning along the beam path is ambiguous.
+
+ Defaults:
+
+ [name]
+ default = DM_3dBragg
+ type = str
+ help =
+ doc =
+
+ [sample_support]
+ default =
+ type = Param
+ help = Sample support settings
+ doc =
+
+ [sample_support.type]
+ default = 'thinlayer'
+ type = str
+ help = Sample support geometry
+ doc = Options are 'thinlayer' for one-dimensional support as
+ function of z, 'rod' for one-dimensional radial support around
+ the z axis.
+
+ [sample_support.size]
+ default = 200e-9
+ type = float
+ help = Support thickness or radius
+ doc = This parameter is ignored when shrink wrapping is used.
+
+ [sample_support.coefficient]
+ default = 0.1
+ type = float
+ help = Scaling of region outside the support
+ lowlim = 0.0
+ uplim = 1.0
+ doc = Sample amplitude is multiplied by this value outside the support region
+
+ [sample_support.shrinkwrap]
+ default =
+ type = Param
+ help = Shrink wrap settings. None for no shrink wrap.
+ doc =
+
+ [sample_support.shrinkwrap.smooth]
+ default = 1.0
+ type = float
+ lowlim = .3
+ help = Shrink wrap smoothing parameter in pixels
+ doc = Sigma of gaussian with which to smooth object profile before applying shrink wrap. Pass None for no smoothing. Values < .3 make little sense.
+
+ [sample_support.shrinkwrap.cutoff]
+ default = .5
+ type = float
+ help = Shrink wrap cutoff parameter
+ doc = The support is truncated where the object profile has decayed to this value relative to the maximum.
+
+ [sample_support.shrinkwrap.monotonic]
+ default = True
+ type = bool
+ help = Require the object profile to be monotonic
+ doc = If the object profile increases again after the maximum, then the support is cut off. Set the cutoff parameter low to make this the dominating criterion.
+
+ [sample_support.shrinkwrap.start]
+ default = 10
+ type = int
+ help = Start shrink wrap after this iteration
+ doc =
+
+ [sample_support.shrinkwrap.plot]
+ default = False
+ type = bool
+ help = Pass shrink wrap information to the plot client
+ doc = Puts shrink wrap information in the runtime dict. The plot client can choose to plot it if it likes.
+
+ """
+
+ SUPPORTED_MODELS = [Bragg3dModel, ]
+
+ def __init__(self, ptycho_parent, pars):
+ """
+ Need to override here, to copy the grouped "sample_support."
+ parameters properly.
+ """
+ p = self.DEFAULT.copy(99)
+ if pars is not None:
+ p.update(pars, in_place_depth=99)
+
+ super(DM_3dBragg, self).__init__(ptycho_parent, p)
+
+ def object_update(self):
+ """
+ DM object update, modified with sample support. We work with a
+ generalized coordinate "s", along which we calculate sample
+ density profiles and apply cutoffs. The support type switches
+ how this coordinate is calculated and used for cutoff. More
+ types can easily be added.
+ """
+ super(DM_3dBragg, self).object_update()
+
+ # no support
+ if self.p.sample_support is None:
+ return
+
+ # access object storage and geometry through any active pod
+ for name, pod in self.pods.iteritems():
+ if pod.active:
+ break
+ geo = pod.geometry
+ S = pod.ob_view.storage
+ layer = pod.ob_view.layer
+
+ # fixed support
+ if not self.p.sample_support.shrinkwrap:
+ shigh = self.p.sample_support.size / 2.0
+ slow = -shigh
+
+ # shrink wrap
+ elif self.curiter >= self.p.sample_support.shrinkwrap.start:
+ logger.info('Shrink wrapping...')
+ t0 = time.time()
+
+ # transform to cartesian (r3, r1, r2) -> (x, z, y)
+ Scart = geo.coordinate_shift(S, input_space='real',
+ input_system='natural', keep_dims=True,
+ layer=layer)
+ x, z, y = Scart.grids()
+
+ # here we calculate the object profile in the coordinate of interest
+ if self.p.sample_support.type == 'thinlayer':
+ s = z[layer][0, :, 0]
+ sprofile = np.mean(np.abs(Scart.data[layer]), axis=(0,2))
+ icenter = np.argmax(sprofile)
+ if self.p.sample_support.type == 'rod':
+ arr = np.sum(np.abs(Scart.data[layer]), axis=1)
+ ijcenter = np.unravel_index(np.argmax(arr), arr.shape)
+ xcenter = x[layer][ijcenter[0], 0, 0]
+ ycenter = y[layer][0, 0, ijcenter[1]]
+ # radial integration
+ x_, y_ = x[layer][:, 0, :], y[layer][:, 0, :]
+ r = np.sqrt((x_ - xcenter)**2 + (y_ - ycenter)**2)
+ scaling = np.min(geo.resolution)
+ r /= scaling
+ r = r.astype(np.int)
+ tbin = np.bincount(r.ravel(), arr.ravel())
+ nr = np.bincount(r.ravel())
+ s = np.arange(len(tbin)) * scaling
+ sprofile = tbin / nr
+ icenter = 0
+
+ # gaussian smooth
+ sigma = self.p.sample_support.shrinkwrap.smooth
+ if self.p.sample_support.shrinkwrap.smooth:
+ if sigma > 0:
+ n = 3 * sigma
+ nn = (np.arange(2 * n + 1) - n)
+ ssmooth = 1.0 / (sigma * np.sqrt(np.pi * 2)) * np.exp(-(nn/2.0/sigma)**2)
+ sprofile = np.convolve(sprofile, ssmooth, mode='same')
+
+ # walk around from the maximum to determine where to cut off
+ # the support, in each direction find the first values slow
+ # and shigh that lie outside the support.
+ cutoff = self.p.sample_support.shrinkwrap.cutoff
+ # walk negative
+ slow = s[0]
+ for i in range(1, icenter):
+ if (sprofile[icenter-i] / sprofile[icenter] < cutoff
+ or (self.p.sample_support.shrinkwrap.monotonic and
+ sprofile[icenter-i] > sprofile[icenter-i+1])):
+ slow = s[icenter-i]
+ break
+
+ # walk positive
+ shigh = s[len(sprofile) - 1]
+ for i in range(1, len(sprofile)-icenter-1):
+ if parallel.master:
+ print sprofile[icenter+i] / sprofile[icenter]
+ if (sprofile[icenter+i] / sprofile[icenter] < cutoff
+ or (self.p.sample_support.shrinkwrap.monotonic and
+ sprofile[icenter+i] > sprofile[icenter+i-1])):
+ shigh = s[icenter+i]
+ break
+
+ # save info for runtime dict
+ self.sx = s
+ self.sprofile = sprofile
+ self.slow = slow
+ self.shigh = shigh
+ logger.info('Cutting of the support coordinate at %.1e, %.1e' % (slow, shigh))
+ logger.info('...done in %.2f seconds.' % (time.time() - t0))
+
+ # shrink wrap, but not yet
+ else:
+ return
+
+ # apply the support according to the coordinate of interest
+ r3, r1, r2 = S.grids()
+ x, z, y = pod.geometry.transformed_grid(
+ (r3[layer], r1[layer], r2[layer]),
+ input_space='real', input_system='natural')
+ if self.p.sample_support.type == 'thinlayer':
+ s = z
+ elif self.p.sample_support.type == 'rod':
+ try:
+ s = np.sqrt((x-xcenter)**2 + (y-ycenter)**2)
+ except:
+ s = np.sqrt(x**2 + y**2)
+
+ S.data[layer][(s > shigh) | (s < slow)] \
+ *= self.p.sample_support.coefficient
+
+ def _fill_runtime(self):
+ """
+ Hack to get shrinkwrap info into the runtime dict.
+ """
+ super(DM_3dBragg, self)._fill_runtime()
+
+ try:
+ assert self.p.sample_support.shrinkwrap.plot
+ except AttributeError, AssertionError:
+ return
+
+ try:
+ self.ptycho.runtime.iter_info[-1]['shrinkwrap'] = [self.sx, self.sprofile, self.slow, self.shigh]
+ except:
+ pass
diff --git a/ptypy/engines/DM.py b/ptypy/engines/DM.py
index ba887a0b1..a894237f8 100644
--- a/ptypy/engines/DM.py
+++ b/ptypy/engines/DM.py
@@ -12,50 +12,120 @@
from .. import utils as u
from ..utils.verbose import logger, log
from ..utils import parallel
-from utils import basic_fourier_update
-from . import BaseEngine
+from .utils import basic_fourier_update
+from . import BaseEngine, register
+from .. import defaults_tree
+from ..core.manager import Full, Vanilla, Bragg3dModel
__all__ = ['DM']
-DEFAULT = u.Param(
- # Difference map parameter
- alpha=1,
- # Number of iterations before probe update starts
- probe_update_start=2,
- # If True update object before probe
- update_object_first=True,
- # Threshold for interruption of the inner overlap loop
- overlap_converge_factor=0.05,
- # Maximum of iterations for the overlap constraint inner loop
- overlap_max_iterations=10,
- # Weight of the current probe estimate in the update, formally cfact
- probe_inertia=1e-9,
- # Weight of the current object in the update, formally DM_smooth_amplitude
- object_inertia=1e-4,
- # If rms error of model vs diffraction data is smaller than this fraction,
- # Fourier constraint is met
- fourier_relax_factor=0.05,
- # Gaussian smoothing (pixel) of the current object prior to update
- obj_smooth_std=None,
- # None or tuple(min, max) of desired limits of the object modulus,
- # currently in under common in documentation
- clip_object=None,
-)
-
-
+@register()
class DM(BaseEngine):
-
- DEFAULT = DEFAULT
+ """
+ A full-fledged Difference Map engine.
+
+
+ Defaults:
+
+ [name]
+ default = DM
+ type = str
+ help =
+ doc =
+
+ [alpha]
+ default = 1
+ type = float
+ lowlim = 0.0
+ help = Difference map parameter
+
+ [probe_update_start]
+ default = 2
+ type = int
+ lowlim = 0
+ help = Number of iterations before probe update starts
+
+ [subpix_start]
+ default = 0
+ type = int
+ lowlim = 0
+ help = Number of iterations before starting subpixel interpolation
+
+ [subpix]
+ default = 'linear'
+ type = str
+ help = Subpixel interpolation; 'fourier','linear' or None for no interpolation
+
+ [update_object_first]
+ default = True
+ type = bool
+ help = If True update object before probe
+
+ [overlap_converge_factor]
+ default = 0.05
+ type = float
+ lowlim = 0.0
+ help = Threshold for interruption of the inner overlap loop
+ doc = The inner overlap loop refines the probe and the object simultaneously. This loop is escaped as soon as the overall change in probe, relative to the first iteration, is less than this value.
+
+ [overlap_max_iterations]
+ default = 10
+ type = int
+ lowlim = 1
+ help = Maximum of iterations for the overlap constraint inner loop
+
+ [probe_inertia]
+ default = 1e-9
+ type = float
+ lowlim = 0.0
+ help = Weight of the current probe estimate in the update
+
+ [object_inertia]
+ default = 1e-4
+ type = float
+ lowlim = 0.0
+ help = Weight of the current object in the update
+
+ [fourier_relax_factor]
+ default = 0.05
+ type = float
+ lowlim = 0.0
+ help = If rms error of model vs diffraction data is smaller than this fraction, Fourier constraint is met
+ doc = Set this value higher for noisy data.
+
+ [obj_smooth_std]
+ default = None
+ type = int
+ lowlim = 0
+ help = Gaussian smoothing (pixel) of the current object prior to update
+ doc = If None, smoothing is deactivated. This smoothing can be used to reduce the amplitude of spurious pixels in the outer, least constrained areas of the object.
+
+ [clip_object]
+ default = None
+ type = tuple
+ help = Clip object amplitude into this interval
+
+ [probe_center_tol]
+ default = None
+ type = float
+ lowlim = 0.0
+ help = Pixel radius around optical axes that the probe mass center must reside in
+
+ """
+
+ SUPPORTED_MODELS = [Full, Vanilla, Bragg3dModel]
def __init__(self, ptycho_parent, pars=None):
"""
Difference map reconstruction engine.
"""
- if pars is None:
- pars = DEFAULT.copy()
-
super(DM, self).__init__(ptycho_parent, pars)
+ p = self.DEFAULT.copy()
+ if pars is not None:
+ p.update(pars)
+ self.p = p
+
# Instance attributes
self.error = None
@@ -73,6 +143,17 @@ def __init__(self, ptycho_parent, pars=None):
# Another possibility would be to use the maximum value of all probe storages.
self.mean_power = None
+ self.ptycho.citations.add_article(
+ title='Probe retrieval in ptychographic coherent diffractive imaging',
+ author='Thibault et al.',
+ journal='Ultramicroscopy',
+ volume=109,
+ year=2009,
+ page=338,
+ doi='10.1016/j.ultramic.2008.12.011',
+ comment='The difference map reconstruction algorithm',
+ )
+
def engine_initialize(self):
"""
Prepare for reconstruction.
@@ -99,7 +180,7 @@ def engine_prepare(self):
for name, s in self.di.storages.iteritems():
self.pbound[name] = (
.25 * self.p.fourier_relax_factor**2 * s.pbound_stub)
- mean_power += s.tot_power/np.prod(s.shape)
+ mean_power += s.mean_power
self.mean_power = mean_power / len(self.di.storages)
# Fill object with coverage of views
@@ -239,8 +320,7 @@ def object_update(self):
# power of the probe (which is estimated from the power in diffraction patterns).
# This estimate assumes that the probe power is uniformly distributed through the
# array and therefore underestimate the strength of the probe terms.
- cfact = self.p.object_inertia * self.mean_power *\
- (self.ob_viewcover.storages[name].data + 1.)
+ cfact = self.p.object_inertia * self.mean_power
if self.p.obj_smooth_std is not None:
logger.info(
diff --git a/ptypy/engines/DM_simple.py b/ptypy/engines/DM_simple.py
index fe25256a2..aedd3ee73 100644
--- a/ptypy/engines/DM_simple.py
+++ b/ptypy/engines/DM_simple.py
@@ -7,36 +7,59 @@
:copyright: Copyright 2014 by the PTYPY team, see AUTHORS.
:license: GPLv2, see LICENSE for details.
"""
-from .. import utils as u
-from utils import basic_fourier_update
-from . import BaseEngine
-from ..utils.verbose import logger
import time
import numpy as np
-from ..utils import parallel
-DEFAULT = u.Param(
- alpha=1.0,
- overlap_max_iterations=10,
- overlap_converge_factor=0.05,
- # the following BaseEngine parameters are not used
- probe_support=None,
- clip_object=None,
- subpix_start=None,
- subpix=None,
- probe_update_start=None,
- probe_inertia=None,
- object_inertia=None,
- obj_smooth_std=None,
- probe_center_tol=None,
-)
+from .. import utils as u
+from .utils import basic_fourier_update
+from . import BaseEngine, register
+from ..utils.verbose import logger
+from ..utils import parallel
+from .. import defaults_tree
+from ..core.manager import Full, Vanilla
__all__ = ['DM_simple']
-
+@register()
class DM_simple(BaseEngine):
+ """
+ Bare-bones DM reconstruction engine.
+
- DEFAULT = DEFAULT
+ Defaults:
+
+ [name]
+ default = DM_simple
+ type = str
+ help =
+ doc =
+
+ [numiter]
+ default = 123
+ type = int
+
+ [alpha]
+ default = 1
+ type = float
+ lowlim = 0.0
+ help = Difference map parameter
+
+ [overlap_converge_factor]
+ default = 0.05
+ type = float
+ lowlim = 0.0
+ help = Threshold for interruption of the inner overlap loop
+ doc = The inner overlap loop refines the probe and the object simultaneously. This loop is escaped as soon as the overall change in probe, relative to the first iteration, is less than this value.
+
+ [overlap_max_iterations]
+ default = 10
+ type = int
+ lowlim = 1
+ help = Maximum of iterations for the overlap constraint inner loop
+
+ """
+
+ SUPPORTED_MODELS = [Full, Vanilla]
def __init__(self, ptycho, pars=None):
"""
@@ -44,6 +67,11 @@ def __init__(self, ptycho, pars=None):
"""
super(DM_simple, self).__init__(ptycho, pars)
+ p = self.DEFAULT.copy()
+ if pars is not None:
+ p.update(pars)
+ self.p = p
+
self.ptycho = ptycho
self.ob_nrm = None
self.pr_nrm = None
diff --git a/ptypy/engines/ML.py b/ptypy/engines/ML.py
index 39f4c1670..950c72bcd 100644
--- a/ptypy/engines/ML.py
+++ b/ptypy/engines/ML.py
@@ -2,8 +2,9 @@
"""
Maximum Likelihood reconstruction engine.
-TODO:
- * Implement other regularizers
+TODO.
+
+ * Implement other regularizers
This file is part of the PTYPY package.
@@ -12,40 +13,107 @@
"""
import numpy as np
import time
+
from .. import utils as u
from ..utils.verbose import logger
from ..utils import parallel
-from utils import Cnorm2, Cdot
-from . import BaseEngine
+from .utils import Cnorm2, Cdot
+from . import BaseEngine, register
+from .. import defaults_tree
+from ..core.manager import Full, Vanilla
__all__ = ['ML']
-DEFAULT = u.Param(
- ML_type='gaussian',
- floating_intensities=False,
- intensity_renormalization=1.,
- reg_del2=False,
- reg_del2_amplitude=.01,
- smooth_gradient=0,
- smooth_gradient_decay=0,
- scale_precond=False,
- scale_probe_object=1.
-)
-
+@register()
class ML(BaseEngine):
+ """
+ Maximum likelihood reconstruction engine.
+
+
+ Defaults:
+
+ [name]
+ default = ML
+ type = str
+ help =
+ doc =
+
+ [ML_type]
+ default = 'gaussian'
+ type = str
+ help = Likelihood model
+ choices = ['gaussian','poisson','euclid']
+ doc = One of ‘gaussian’, poisson’ or ‘euclid’. Only 'gaussian' is implemented.
+
+ [floating_intensities]
+ default = False
+ type = bool
+ help = Adaptive diffraction pattern rescaling
+ doc = If True, allow for adaptative rescaling of the diffraction pattern intensities (to correct for incident beam intensity fluctuations).
+
+ [intensity_renormalization]
+ default = 1.
+ type = float
+ lowlim = 0.0
+ help = Rescales the intensities so they can be interpreted as Poisson counts.
+
+ [reg_del2]
+ default = False
+ type = bool
+ help = Whether to use a Gaussian prior (smoothing) regularizer
+
+ [reg_del2_amplitude]
+ default = .01
+ type = float
+ lowlim = 0.0
+ help = Amplitude of the Gaussian prior if used
+
+ [smooth_gradient]
+ default = 0.0
+ type = float
+ help = Smoothing preconditioner
+ doc = Sigma for gaussian filter (turned off if 0.)
+
+ [smooth_gradient_decay]
+ default = 0.
+ type = float
+ help = Decay rate for smoothing preconditioner
+ doc = Sigma for gaussian filter will reduce exponentially at this rate
+
+ [scale_precond]
+ default = False
+ type = bool
+ help = Whether to use the object/probe scaling preconditioner
+ doc = This parameter can give faster convergence for weakly scattering samples.
+
+ [scale_probe_object]
+ default = 1.
+ type = float
+ lowlim = 0.0
+ help = Relative scale of probe to object
+
+ [probe_update_start]
+ default = 2
+ type = int
+ lowlim = 0
+ help = Number of iterations before probe update starts
- DEFAULT = DEFAULT
+ """
+
+ SUPPORTED_MODELS = [Full, Vanilla]
def __init__(self, ptycho_parent, pars=None):
"""
Maximum likelihood reconstruction engine.
"""
- if pars is None:
- pars = DEFAULT.copy()
-
super(ML, self).__init__(ptycho_parent, pars)
+ p = self.DEFAULT.copy()
+ if pars is not None:
+ p.update(pars)
+ self.p = p
+
# Instance attributes
# Object gradient
@@ -67,6 +135,17 @@ def __init__(self, ptycho_parent, pars=None):
self.scale_p_o = None
self.scale_p_o_memory = .9
+ self.ptycho.citations.add_article(
+ title='Maximum-likelihood refinement for coherent diffractive imaging',
+ author='Thibault P. and Guizar-Sicairos M.',
+ journal='New Journal of Physics',
+ volume=14,
+ year=2012,
+ page=63004,
+ doi='10.1088/1367-2630/14/6/063004',
+ comment='The maximum likelihood reconstruction algorithm',
+ )
+
def engine_initialize(self):
"""
Prepare for ML reconstruction.
diff --git a/ptypy/engines/ML_old.py b/ptypy/engines/ML_old.py
deleted file mode 100644
index 7b78da3ed..000000000
--- a/ptypy/engines/ML_old.py
+++ /dev/null
@@ -1,485 +0,0 @@
-# -*- coding: utf-8 -*-
-"""
-Maximum Likelihood reconstruction engine.
-
-TODO:
- * Implement other regularizers
-
-This file is part of the PTYPY package.
-
- :copyright: Copyright 2014 by the PTYPY team, see AUTHORS.
- :license: GPLv2, see LICENSE for details.
-"""
-import numpy as np
-from .. import utils as u
-from ..utils.verbose import logger
-from ..utils import parallel
-from engine_utils import Cnorm2, Cdot
-from . import BaseEngine
-#from .. import core
-
-__all__=['ML']
-import warnings
-warnings.simplefilter('always', DeprecationWarning)
-warnings.warn('This module is deprecated and will be removed from the package on 30/11/16',DeprecationWarning)
-DEFAULT = u.Param(
- ML_type = 'gaussian',
- floating_intensities = False,
- intensity_renormalization = 1.,
- reg_del2 = False,
- reg_del2_amplitude = .01,
- smooth_gradient = 0,
- scale_precond = False,
- scale_probe_object = 1.
-
-
- #overlap_converge_factor = .1,
- #overlap_max_iterations = 10,
- #probe_inertia = 1e-9, # Portion of probe that is kept from iteraiton to iteration, formally cfact
- #object_inertia = 1e-4, # Portion of object that is kept from iteraiton to iteration, formally DM_smooth_amplitude
- #obj_smooth_std = None, # Standard deviation for smoothing of object between iterations
- #clip_object = None, # None or tuple(min,max) of desired limits of the object modulus
-)
-
-
-class ML(BaseEngine):# pragma: no cover
-
- DEFAULT = DEFAULT
-
- def __init__(self, ptycho_parent, pars=None):
- """
- Maximum likelihood reconstruction engine.
- """
- if pars is None:
- pars = DEFAULT.copy()
- super(ML, self).__init__(ptycho_parent, pars)
-
- def engine_initialize(self):
- """
- Prepare for ML reconstruction.
- """
- # Container for the "errors"
- #self.error = np.
-
- # Object gradient and minimization direction
- self.ob_grad = self.ob.copy(self.ob.ID+'_grad',fill=0.) # Object gradient
- self.ob_h = self.ob.copy(self.ob.ID+'_h',fill=0.) # Object minimization direction
-
- # Probe gradient
- self.pr_grad = self.pr.copy(self.pr.ID+'_grad',fill=0.) # Probe gradient
- self.pr_h = self.pr.copy(self.pr.ID+'_h',fill=0.) # Probe minimization direction
-
- # Create noise model
- if self.p.ML_type.lower() == "gaussian":
- self.ML_model = ML_Gaussian(self)
- elif self.p.ML_type.lower() == "poisson":
- self.ML_model = ML_Gaussian(self)
- elif self.p.ML_type.lower() == "euclid":
- self.ML_model = ML_Gaussian(self)
- else:
- raise RuntimeError("Unsupported ML_type: '%s'" % self.p.ML_type)
-
- # Other options
- self.smooth_gradient = prepare_smoothing_preconditioner(self.p.smooth_gradient)
-
- def engine_prepare(self):
- """
- last minute initialization, everything, that needs to be recalculated, when new data arrives
- """
- #- # fill object with coverage of views
- #- for name,s in self.ob_viewcover.S.iteritems():
- #- s.fill(s.get_view_coverage())
- pass
-
-
- def engine_iterate(self, num=1):
- """
- Compute `num` iterations.
- """
- ########################
- # Compute new gradient
- ########################
- for it in range(num):
- new_ob_grad, new_pr_grad, error_dct = self.ML_model.new_grad()
-
- if (self.p.probe_update_start <= self.curiter):
- # Apply probe support if needed
- for name,s in new_pr_grad.S.iteritems():
- support = self.probe_support.get(name)
- if support is not None:
- s.data *= support
- else:
- new_pr_grad.fill(0.)
-
- # Smoothing preconditioner
- # !!! Lets make this consistent with the smoothing already done in DM
- #if self.smooth_gradient:
- # for name,s in new_ob_grad.S.iteritems()
- # s.data[:] = self.smooth_gradient(s.data)
-
- # probe/object rescaling
- if self.p.scale_precond:
- scale_p_o = self.p.scale_probe_object * Cnorm2(new_ob_grad) / Cnorm2(new_pr_grad)
- logger.debug('Scale P/O: %6.3g' % scale_p_o)
- else:
- scale_p_o = self.p.scale_probe_object
-
- ############################
- # Compute next conjugate
- ############################
- if self.curiter == 0:
- bt = 0.
- else:
- bt_num = scale_p_o * ( Cnorm2(new_pr_grad) - np.real(Cdot(new_pr_grad, self.pr_grad))) +\
- ( Cnorm2(new_ob_grad) - np.real(Cdot(new_ob_grad, self.ob_grad)))
- bt_denom = scale_p_o * Cnorm2(self.pr_grad) + Cnorm2(self.ob_grad)
-
- bt = max(0,bt_num/bt_denom)
-
- #verbose(3,'Polak-Ribiere coefficient: %f ' % bt)
-
- # It would be nice to have something more elegant than the following
- for name,s in self.ob_grad.S.iteritems():
- s.data[:] = new_ob_grad.S[name].data
- for name,s in self.pr_grad.S.iteritems():
- s.data[:] = new_pr_grad.S[name].data
-
- # 3. Next conjugate
- for name,s in self.ob_h.S.iteritems():
- s.data *= bt
- s.data -= self.ob_grad.S[name].data
-
- for name,s in self.pr_h.S.iteritems():
- s.data *= bt
- s.data -= scale_p_o * self.pr_grad.S[name].data
-
- # 3. Next conjugate
- #ob_h = self.ob_h
- #ob_h *= bt
-
- # Smoothing preconditioner not implemented.
- #if self.smooth_gradient:
- # ob_h -= object_smooth_filter(grad_obj)
- #else:
- # ob_h -= ob_grad
-
- #ob_h -= ob_grad
- #pr_h *= bt
- #pr_h -= scale_p_o * pr_grad
-
- # Minimize - for now always use quadratic approximation (i.e. single Newton-Raphson step)
- # In principle, the way things are now programmed this part could be iterated over in
- # a real NR style.
- B = self.ML_model.poly_line_coeffs(self.ob_h, self.pr_h)
-
- if np.isinf(B).any() or np.isnan(B).any():
- print 'Warning! inf or nan found! Trying to continue...'
- B[np.isinf(B)] = 0.
- B[np.isnan(B)] = 0.
-
- tmin = -.5*B[1]/B[2]
-
- for name,s in self.ob.S.iteritems():
- s.data += tmin*self.ob_h.S[name].data
- for name,s in self.pr.S.iteritems():
- s.data += tmin*self.pr_h.S[name].data
- # Newton-Raphson loop would end here
-
- return error_dct #np.array([[self.ML_model.LL[0]] * 3])
-
- def engine_finalize(self):
- """
- Delete temporary containers.
- """
- del self.ptycho.containers[self.ob_grad.ID]
- del self.ob_grad
- del self.ptycho.containers[self.ob_h.ID]
- del self.ob_h
- del self.ptycho.containers[self.pr_grad.ID]
- del self.pr_grad
- del self.ptycho.containers[self.pr_h.ID]
- del self.pr_h
-
-class ML_Gaussian(object):
- """
- """
-
- def __init__(self, MLengine):
- """
- Core functions for ML computation using a Gaussian model.
- """
- self.engine = MLengine
-
- # Transfer commonly used attributes from ML engine
- self.di = self.engine.di
- self.p = self.engine.p
- self.ob = self.engine.ob
- self.pr = self.engine.pr
-
- # Create working variables
- self.ob_grad = self.engine.ob.copy(self.ob.ID+'_ngrad',fill=0.) # New object gradient
- self.pr_grad = self.engine.pr.copy(self.pr.ID+'_ngrad',fill=0.) # New probe gradient
- self.LL = 0.
-
- # Gaussian model requires weights
- self.weights = self.engine.di.copy(self.engine.di.ID+'_weights')
- for name,di_view in self.di.V.iteritems():
- if not di_view.active: continue
- self.weights[di_view] = di_view.pod.ma_view.data / (1. + di_view.data)
-
- # Useful quantities
- self.tot_measpts = len(self.di.V)
- self.tot_power = sum(s.tot_power for s in self.di.S.values())
-
- # Prepare regularizer
- if self.p.reg_del2:
- obj_Npix = self.ob.size
- expected_obj_var = obj_Npix / self.tot_power # Poisson
- reg_rescale = self.tot_measpts / (8. * obj_Npix * expected_obj_var)
- logger.debug('Rescaling regularization amplitude using the Poisson distribution assumption.')
- logger.debug('Factor: %8.5g' % reg_rescale)
- reg_del2_amplitude = self.p.reg_del2_amplitude * reg_rescale
- self.regularizer = Regul_del2(amplitude=reg_del2_amplitude)
- else:
- self.regularizer = None
-
- def __del__(self):
- """
- Clean up routine
- """
- # Delete containers
- del self.engine.ptycho.containers[self.weights.ID]
- del self.weights
- del self.engine.ptycho.containers[self.ob_grad.ID]
- del self.ob_grad
- del self.engine.ptycho.containers[self.pr_grad.ID]
- del self.pr_grad
-
- # Remove working attributes
- for name,diff_view in self.di.V.iteritems():
- if not diff_view.active: continue
- try:
- del diff_view.float_intens_coeff
- del diff_view.error
- except:
- pass
-
- def new_grad(self):
- """
- Compute a new gradient direction according to a Gaussian noise model.
-
- Note: The negative log-likelihood and local errors are also computed
- here.
- """
- ob_grad = self.ob_grad
- pr_grad = self.pr_grad
- ob_grad.fill(0.)
- pr_grad.fill(0.)
-
- LL = np.array([0.]) # We need an array for MPI
- error_dct={}
-
- # Outer loop: through diffraction patterns
- for dname,diff_view in self.di.V.iteritems():
- if not diff_view.active: continue
-
- # Weights and intensities for this view
- w = self.weights[diff_view]
- I = diff_view.data
-
- Imodel = np.zeros_like(I)
- f = {}
-
- # First pod loop: compute total intensity
- for name,pod in diff_view.pods.iteritems():
- if not pod.active: continue
- f[name] = pod.fw(pod.probe*pod.object)
- Imodel += u.abs2(f[name])
-
- # Floating intensity option
- if self.p.floating_intensities:
- diff_view.float_intens_coeff = (w * Imodel * I).sum() / (w * Imodel**2).sum()
- Imodel *= diff_view.float_intens_coeff
-
- DI = Imodel - I
-
- # Second pod loop: gradients computation
- LLL = np.sum((w * DI**2).astype(np.float64))
- #print LLL
- for name,pod in diff_view.pods.iteritems():
- if not pod.active: continue
- xi = pod.bw(w*DI*f[name])
- ob_grad[pod.ob_view] += 2. * xi * pod.probe.conj()
- pr_grad[pod.pr_view] += 2. * xi * pod.object.conj()
-
- # Negative log-likelihood term
- #LLL += (w * DI**2).sum()
-
- #LLL
- diff_view.error = LLL
- error_dct[dname]= np.array([0,LLL / np.prod(DI.shape),0])
- LL += LLL
-
- # MPI reduction of gradients
- for name,s in ob_grad.S.iteritems():
- parallel.allreduce(s.data)
- for name,s in pr_grad.S.iteritems():
- parallel.allreduce(s.data)
- parallel.allreduce(LL)
-
- # Object regularizer
- if self.regularizer:
- for name,s in self.ob.S.iteritems():
- ob_grad.S[name].data += self.regularizer.grad(s.data)
-
- self.LL = LL / self.tot_measpts
-
- return ob_grad, pr_grad, error_dct
-
- def poly_line_coeffs(self, ob_h, pr_h):
- """
- Compute the coefficients of the polynomial for line minimization
- in direction h
- """
-
- B = np.zeros((3,))
- Brenorm = 1./ self.LL[0]**2
-
- # Outer loop: through diffraction patterns
- for dname,diff_view in self.di.V.iteritems():
- if not diff_view.active: continue
-
- # Weights and intensities for this view
- w = self.weights[diff_view]
- I = diff_view.data
-
- A0 = None
- A1 = None
- A2 = None
-
- for name,pod in diff_view.pods.iteritems():
- if not pod.active: continue
- f = pod.fw(pod.probe*pod.object)
- a = pod.fw(pod.probe * ob_h[pod.ob_view] + pr_h[pod.pr_view] * pod.object)
- b = pod.fw(pr_h[pod.pr_view] * ob_h[pod.ob_view])
-
- if A0 is None:
- A0 = u.abs2(f)
- A1 = 2*np.real(f*a.conj())
- A2 = 2*np.real(f*b.conj()) + u.abs2(a)
- else:
- A0 += u.abs2(f)
- A1 += 2*np.real(f*a.conj())
- A2 += 2*np.real(f*b.conj()) + u.abs2(a)
-
- if self.p.floating_intensities:
- A0 *= diff_view.float_intens_coeff
- A1 *= diff_view.float_intens_coeff
- A2 *= diff_view.float_intens_coeff
- A0 -= I
-
- B[0] += np.dot(w.flat,(A0**2).flat) * Brenorm
- B[1] += np.dot(w.flat,(2*A0*A1).flat) * Brenorm
- B[2] += np.dot(w.flat,(A1**2 + 2*A0*A2).flat) * Brenorm
-
- parallel.allreduce(B)
-
- # Object regularizer
- if self.regularizer:
- for name,s in self.ob.S.iteritems():
- B += Brenorm * self.regularizer.poly_line_coeffs(ob_h.S[name].data, s.data)
-
- self.B = B
-
- return B
-
-# Regul class does not exist, replace by objectclass
-#class Regul_del2(Regul):
-class Regul_del2(object):
- """\
- Squared gradient regularizer (Gaussian prior).
- This class applies to any numpy array.
- """
- def __init__(self, amplitude, axes=[-2,-1]):
- #Regul.__init__(self, axes)
- self.axes = axes
- self.amplitude = amplitude
- self.delxy = None
-
- def grad(self, x):
- """
- Compute and return the regularizer gradient given the array x.
- """
- ax0,ax1 = self.axes
- del_xf = u.delxf(x,axis=ax0)
- del_yf = u.delxf(x,axis=ax1)
- del_xb = u.delxb(x,axis=ax0)
- del_yb = u.delxb(x,axis=ax1)
-
- self.delxy = [del_xf, del_yf, del_xb, del_yb]
- self.g = 2. * self.amplitude*(del_xb + del_yb - del_xf - del_yf)
-
- return self.g
-
- def poly_line_coeffs(self, h, x=None):
- ax0,ax1 = self.axes
- if x is None:
- del_xf,del_yf,del_xb,del_yb = self.delxy
- else:
- del_xf = u.delxf(x,axis=ax0)
- del_yf = u.delxf(x,axis=ax1)
- del_xb = u.delxb(x,axis=ax0)
- del_yb = u.delxb(x,axis=ax1)
-
- hdel_xf = u.delxf(h,axis=ax0)
- hdel_yf = u.delxf(h,axis=ax1)
- hdel_xb = u.delxb(h,axis=ax0)
- hdel_yb = u.delxb(h,axis=ax1)
-
- c0 = self.amplitude * (u.norm2(del_xf) + u.norm2(del_yf) + u.norm2(del_xb) + u.norm2(del_yb))
- c1 = 2 * self.amplitude * np.real(np.vdot(del_xf, hdel_xf) + np.vdot(del_yf, hdel_yf) +\
- np.vdot(del_xb, hdel_xb) + np.vdot(del_yb, hdel_yb))
- c2 = self.amplitude * (u.norm2(hdel_xf) + u.norm2(hdel_yf) + u.norm2(hdel_xb) + u.norm2(hdel_yb))
-
- self.coeff = np.array([c0,c1,c2])
- return self.coeff
-
-def prepare_smoothing_preconditioner(amplitude):
- """\
- Factory for smoothing preconditioner.
- """
- if amplitude == 0.: return None
-
- class GaussFilt:
- def __init__(self,sigma):
- self.sigma = sigma
- def __call__(self,x):
- y = np.empty_like(x)
- sh = x.shape
- xf = x.reshape((-1,)+sh[-2:])
- yf = y.reshape((-1,)+sh[-2:])
- for i in range(len(xf)):
- yf[i] = gaussian_filter(xf[i], self.sigma)
- return y
-
- from scipy.signal import correlate2d
- class HannFilt:
- def __call__(self,x):
- y = np.empty_like(x)
- sh = x.shape
- xf = x.reshape((-1,)+sh[-2:])
- yf = y.reshape((-1,)+sh[-2:])
- for i in range(len(xf)):
- yf[i] = correlate2d(xf[i], np.array([[.0625, .125, .0625], [.125, .25, .125], [.0625, .125, .0625]]), mode='same')
- return y
-
- if object_smooth_gradient > 0.:
- logger.debug('Using a smooth gradient filter (Gaussian blur - only for ML)')
- return GaussFilt(object_smooth_gradient)
-
- elif object_smooth_gradient < 0.:
- logger.debug('Using a smooth gradient filter (Hann window - only for ML)')
- return HannFilt()
-
-
-
diff --git a/ptypy/engines/MLtmp.py b/ptypy/engines/MLtmp.py
new file mode 100644
index 000000000..f76ec81ce
--- /dev/null
+++ b/ptypy/engines/MLtmp.py
@@ -0,0 +1,659 @@
+# -*- coding: utf-8 -*-
+"""
+Maximum Likelihood reconstruction engine.
+
+TODO:
+ * Implement other regularizers
+
+This file is part of the PTYPY package.
+
+ :copyright: Copyright 2014 by the PTYPY team, see AUTHORS.
+ :license: GPLv2, see LICENSE for details.
+"""
+import numpy as np
+import time
+from .. import utils as u
+from ..utils.verbose import logger
+from ..utils import parallel
+from utils import Cnorm2, Cdot
+from . import BaseEngine
+from .. import defaults_tree
+
+__all__ = ['ML']
+
+@register()
+class ML(BaseEngine):
+ """
+ Maximum likelihood reconstruction engine.
+
+
+ Defaults:
+
+ [name]
+ default = ML
+ type = str
+ help =
+ doc =
+
+ [ML_type]
+ default = 'gaussian'
+ type = str
+ help = Likelihood model
+ choices = ['gaussian','poisson','euclid']
+ doc = One of ‘gaussian’, poisson’ or ‘euclid’. Only 'gaussian' is implemented.
+
+ [floating_intensities]
+ default = False
+ type = bool
+ help = Adaptive diffraction pattern rescaling
+ doc = If True, allow for adaptative rescaling of the diffraction pattern intensities (to correct for incident beam intensity fluctuations).
+
+ [intensity_renormalization]
+ default = 1.
+ type = float
+ lowlim = 0.0
+ help = Rescales the intensities so they can be interpreted as Poisson counts.
+
+ [reg_del2]
+ default = False
+ type = bool
+ help = Whether to use a Gaussian prior (smoothing) regularizer
+
+ [reg_del2_amplitude]
+ default = .01
+ type = float
+ lowlim = 0.0
+ help = Amplitude of the Gaussian prior if used
+
+ [smooth_gradient]
+ default = 0.0
+ type = float
+ help = Smoothing preconditioner
+ doc = Sigma for gaussian filter (turned off if 0.)
+
+ [smooth_gradient_decay]
+ default = 0.
+ type = float
+ help = Decay rate for smoothing preconditioner
+ doc = Sigma for gaussian filter will reduce exponentially at this rate
+
+ [scale_precond]
+ default = False
+ type = bool
+ help = Whether to use the object/probe scaling preconditioner
+ doc = This parameter can give faster convergence for weakly scattering samples.
+
+ [scale_probe_object]
+ default = 1.
+ type = float
+ lowlim = 0.0
+ help = Relative scale of probe to object
+
+ [probe_update_start]
+ default = 2
+ type = int
+ lowlim = 0
+ help = Number of iterations before probe update starts
+
+ """
+
+ def __init__(self, ptycho_parent, pars=None):
+ """
+ Maximum likelihood reconstruction engine.
+ """
+ super(ML, self).__init__(ptycho_parent, pars)
+
+ p = self.DEFAULT.copy()
+ if pars is not None:
+ p.update(pars)
+ self.p = p
+
+ # Instance attributes
+
+ # Object gradient
+ self.ob_grad = None
+
+ # Object minimization direction
+ self.ob_h = None
+
+ # Probe gradient
+ self.pr_grad = None
+
+ # Probe minimization direction
+ self.pr_h = None
+
+ # Other
+ self.tmin = None
+ self.ML_model = None
+ self.smooth_gradient = None
+ self.scale_p_o = None
+ self.scale_p_o_memory = .9
+
+ def engine_initialize(self):
+ """
+ Prepare for ML reconstruction.
+ """
+
+ # Object gradient and minimization direction
+ self.ob_grad = self.ob.copy(self.ob.ID + '_grad', fill=0.)
+ self.ob_h = self.ob.copy(self.ob.ID + '_h', fill=0.)
+
+ # Probe gradient and minimization direction
+ self.pr_grad = self.pr.copy(self.pr.ID + '_grad', fill=0.)
+ self.pr_h = self.pr.copy(self.pr.ID + '_h', fill=0.)
+
+ self.tmin = 1.
+
+ # Create noise model
+ if self.p.ML_type.lower() == "gaussian":
+ self.ML_model = ML_Gaussian(self)
+ elif self.p.ML_type.lower() == "poisson":
+ self.ML_model = ML_Gaussian(self)
+ elif self.p.ML_type.lower() == "euclid":
+ self.ML_model = ML_Gaussian(self)
+ else:
+ raise RuntimeError("Unsupported ML_type: '%s'" % self.p.ML_type)
+
+ # Other options
+ self.smooth_gradient = prepare_smoothing_preconditioner(
+ self.p.smooth_gradient)
+
+ def engine_prepare(self):
+ """
+ Last minute initialization, everything, that needs to be recalculated,
+ when new data arrives.
+ """
+ # - # fill object with coverage of views
+ # - for name,s in self.ob_viewcover.S.iteritems():
+ # - s.fill(s.get_view_coverage())
+ pass
+
+ def engine_iterate(self, num=1):
+ """
+ Compute `num` iterations.
+ """
+ ########################
+ # Compute new gradient
+ ########################
+ tg = 0.
+ tc = 0.
+ ta = time.time()
+ for it in range(num):
+ t1 = time.time()
+ new_ob_grad, new_pr_grad, error_dct = self.ML_model.new_grad()
+ tg += time.time() - t1
+
+ if self.p.probe_update_start <= self.curiter:
+ # Apply probe support if needed
+ for name, s in new_pr_grad.storages.iteritems():
+ support = self.probe_support.get(name)
+ if support is not None:
+ s.data *= support
+ else:
+ new_pr_grad.fill(0.)
+
+ # Smoothing preconditioner
+ if self.smooth_gradient:
+ self.smooth_gradient.sigma *= (1. - self.p.smooth_gradient_decay)
+ for name, s in new_ob_grad.storages.iteritems():
+ s.data[:] = self.smooth_gradient(s.data)
+
+ # probe/object rescaling
+ if self.p.scale_precond:
+ cn2_new_pr_grad = Cnorm2(new_pr_grad)
+ if cn2_new_pr_grad > 1e-5:
+ scale_p_o = (self.p.scale_probe_object * Cnorm2(new_ob_grad)
+ / Cnorm2(new_pr_grad))
+ else:
+ scale_p_o = self.p.scale_probe_object
+ if self.scale_p_o is None:
+ self.scale_p_o = scale_p_o
+ else:
+ self.scale_p_o = self.scale_p_o ** self.scale_p_o_memory
+ self.scale_p_o *= scale_p_o ** (1-self.scale_p_o_memory)
+ logger.debug('Scale P/O: %6.3g' % scale_p_o)
+ else:
+ self.scale_p_o = self.p.scale_probe_object
+
+ ############################
+ # Compute next conjugate
+ ############################
+ if self.curiter == 0:
+ bt = 0.
+ else:
+ bt_num = (self.scale_p_o
+ * (Cnorm2(new_pr_grad)
+ - np.real(Cdot(new_pr_grad, self.pr_grad)))
+ + (Cnorm2(new_ob_grad)
+ - np.real(Cdot(new_ob_grad, self.ob_grad))))
+
+ bt_denom = self.scale_p_o*Cnorm2(self.pr_grad) + Cnorm2(self.ob_grad)
+
+ bt = max(0, bt_num/bt_denom)
+
+ # verbose(3,'Polak-Ribiere coefficient: %f ' % bt)
+
+ self.ob_grad << new_ob_grad
+ self.pr_grad << new_pr_grad
+ """
+ for name, s in self.ob_grad.storages.iteritems():
+ s.data[:] = new_ob_grad.storages[name].data
+ for name, s in self.pr_grad.storages.iteritems():
+ s.data[:] = new_pr_grad.storages[name].data
+ """
+ # 3. Next conjugate
+ self.ob_h *= bt / self.tmin
+
+ # Smoothing preconditioner
+ if self.smooth_gradient:
+ for name, s in self.ob_h.storages.iteritems():
+ s.data[:] -= self.smooth_gradient(self.ob_grad.storages[name].data)
+ else:
+ self.ob_h -= self.ob_grad
+ self.pr_h *= bt / self.tmin
+ self.pr_grad *= self.scale_p_o
+ self.pr_h -= self.pr_grad
+ """
+ for name,s in self.ob_h.storages.iteritems():
+ s.data *= bt
+ s.data -= self.ob_grad.storages[name].data
+
+ for name,s in self.pr_h.storages.iteritems():
+ s.data *= bt
+ s.data -= scale_p_o * self.pr_grad.storages[name].data
+ """
+ # 3. Next conjugate
+ # ob_h = self.ob_h
+ # ob_h *= bt
+
+ # Smoothing preconditioner not implemented.
+ # if self.smooth_gradient:
+ # ob_h -= object_smooth_filter(grad_obj)
+ # else:
+ # ob_h -= ob_grad
+
+ # ob_h -= ob_grad
+ # pr_h *= bt
+ # pr_h -= scale_p_o * pr_grad
+
+ # Minimize - for now always use quadratic approximation
+ # (i.e. single Newton-Raphson step)
+ # In principle, the way things are now programmed this part
+ # could be iterated over in a real NR style.
+ t2 = time.time()
+ B = self.ML_model.poly_line_coeffs(self.ob_h, self.pr_h)
+ tc += time.time() - t2
+
+ if np.isinf(B).any() or np.isnan(B).any():
+ logger.warning(
+ 'Warning! inf or nan found! Trying to continue...')
+ B[np.isinf(B)] = 0.
+ B[np.isnan(B)] = 0.
+
+ self.tmin = -.5 * B[1] / B[2]
+ self.ob_h *= self.tmin
+ self.pr_h *= self.tmin
+ self.ob += self.ob_h
+ self.pr += self.pr_h
+ """
+ for name,s in self.ob.storages.iteritems():
+ s.data += tmin*self.ob_h.storages[name].data
+ for name,s in self.pr.storages.iteritems():
+ s.data += tmin*self.pr_h.storages[name].data
+ """
+ # Newton-Raphson loop would end here
+
+ # increase iteration counter
+ self.curiter +=1
+
+ logger.info('Time spent in gradient calculation: %.2f' % tg)
+ logger.info(' .... in coefficient calculation: %.2f' % tc)
+ return error_dct # np.array([[self.ML_model.LL[0]] * 3])
+
+ def engine_finalize(self):
+ """
+ Delete temporary containers.
+ """
+ del self.ptycho.containers[self.ob_grad.ID]
+ del self.ob_grad
+ del self.ptycho.containers[self.ob_h.ID]
+ del self.ob_h
+ del self.ptycho.containers[self.pr_grad.ID]
+ del self.pr_grad
+ del self.ptycho.containers[self.pr_h.ID]
+ del self.pr_h
+
+
+class ML_Gaussian(object):
+ """
+ """
+
+ def __init__(self, MLengine):
+ """
+ Core functions for ML computation using a Gaussian model.
+ """
+ self.engine = MLengine
+
+ # Transfer commonly used attributes from ML engine
+ self.di = self.engine.di
+ self.p = self.engine.p
+ self.ob = self.engine.ob
+ self.pr = self.engine.pr
+
+ if self.p.intensity_renormalization is None:
+ self.Irenorm = 1.
+ else:
+ self.Irenorm = self.p.intensity_renormalization
+
+ # Create working variables
+ # New object gradient
+ self.ob_grad = self.engine.ob.copy(self.ob.ID + '_ngrad', fill=0.)
+ # New probe gradient
+ self.pr_grad = self.engine.pr.copy(self.pr.ID + '_ngrad', fill=0.)
+ self.LL = 0.
+
+ # Gaussian model requires weights
+ # TODO: update this part of the code once actual weights are passed in the PODs
+ self.weights = self.engine.di.copy(self.engine.di.ID + '_weights')
+ # FIXME: This part needs to be updated once statistical weights are properly
+ # supported in the data preparation.
+ for name, di_view in self.di.views.iteritems():
+ if not di_view.active:
+ continue
+ self.weights[di_view] = (self.Irenorm * di_view.pod.ma_view.data
+ / (1./self.Irenorm + di_view.data))
+
+ # Useful quantities
+ self.tot_measpts = sum(s.data.size
+ for s in self.di.storages.values())
+ self.tot_power = self.Irenorm * sum(s.tot_power
+ for s in self.di.storages.values())
+ # Prepare regularizer
+ if self.p.reg_del2:
+ obj_Npix = self.ob.size
+ expected_obj_var = obj_Npix / self.tot_power # Poisson
+ reg_rescale = self.tot_measpts / (8. * obj_Npix * expected_obj_var)
+ logger.debug(
+ 'Rescaling regularization amplitude using '
+ 'the Poisson distribution assumption.')
+ logger.debug('Factor: %8.5g' % reg_rescale)
+ reg_del2_amplitude = self.p.reg_del2_amplitude * reg_rescale
+ self.regularizer = Regul_del2(amplitude=reg_del2_amplitude)
+ else:
+ self.regularizer = None
+
+ def __del__(self):
+ """
+ Clean up routine
+ """
+ # Delete containers
+ del self.engine.ptycho.containers[self.weights.ID]
+ del self.weights
+ del self.engine.ptycho.containers[self.ob_grad.ID]
+ del self.ob_grad
+ del self.engine.ptycho.containers[self.pr_grad.ID]
+ del self.pr_grad
+
+ # Remove working attributes
+ for name, diff_view in self.di.views.iteritems():
+ if not diff_view.active:
+ continue
+ try:
+ del diff_view.float_intens_coeff
+ del diff_view.error
+ except:
+ pass
+
+ def new_grad(self):
+ """
+ Compute a new gradient direction according to a Gaussian noise model.
+
+ Note: The negative log-likelihood and local errors are also computed
+ here.
+ """
+ self.ob_grad.fill(0.)
+ self.pr_grad.fill(0.)
+
+ # We need an array for MPI
+ LL = np.array([0.])
+ error_dct = {}
+
+ # Outer loop: through diffraction patterns
+ for dname, diff_view in self.di.views.iteritems():
+ if not diff_view.active:
+ continue
+
+ # Weights and intensities for this view
+ w = self.weights[diff_view]
+ I = diff_view.data
+
+ Imodel = np.zeros_like(I)
+ f = {}
+
+ # First pod loop: compute total intensity
+ for name, pod in diff_view.pods.iteritems():
+ if not pod.active:
+ continue
+ f[name] = pod.fw(pod.probe * pod.object)
+ Imodel += u.abs2(f[name])
+
+ # Floating intensity option
+ if self.p.floating_intensities:
+ diff_view.float_intens_coeff = ((w * Imodel * I).sum()
+ / (w * Imodel**2).sum())
+ Imodel *= diff_view.float_intens_coeff
+
+ DI = Imodel - I
+
+ # Second pod loop: gradients computation
+ LLL = np.sum((w * DI**2).astype(np.float64))
+ for name, pod in diff_view.pods.iteritems():
+ if not pod.active:
+ continue
+ xi = pod.bw(w * DI * f[name])
+ self.ob_grad[pod.ob_view] += 2. * xi * pod.probe.conj()
+ self.pr_grad[pod.pr_view] += 2. * xi * pod.object.conj()
+
+ # Negative log-likelihood term
+ # LLL += (w * DI**2).sum()
+
+ # LLL
+ diff_view.error = LLL
+ error_dct[dname] = np.array([0, LLL / np.prod(DI.shape), 0])
+ LL += LLL
+
+ # MPI reduction of gradients
+ self.ob_grad.allreduce()
+ self.pr_grad.allreduce()
+ """
+ for name, s in ob_grad.storages.iteritems():
+ parallel.allreduce(s.data)
+ for name, s in pr_grad.storages.iteritems():
+ parallel.allreduce(s.data)
+ """
+ parallel.allreduce(LL)
+
+ # Object regularizer
+ if self.regularizer:
+ for name, s in self.ob.storages.iteritems():
+ self.ob_grad.storages[name].data += self.regularizer.grad(
+ s.data)
+ LL += self.regularizer.LL
+
+ self.LL = LL / self.tot_measpts
+
+ return self.ob_grad, self.pr_grad, error_dct
+
+ def poly_line_coeffs(self, ob_h, pr_h):
+ """
+ Compute the coefficients of the polynomial for line minimization
+ in direction h
+ """
+
+ B = np.zeros((3,), dtype=np.longdouble)
+ Brenorm = 1. / self.LL[0]**2
+
+ # Outer loop: through diffraction patterns
+ for dname, diff_view in self.di.views.iteritems():
+ if not diff_view.active:
+ continue
+
+ # Weights and intensities for this view
+ w = self.weights[diff_view]
+ I = diff_view.data
+
+ A0 = None
+ A1 = None
+ A2 = None
+
+ for name, pod in diff_view.pods.iteritems():
+ if not pod.active:
+ continue
+ f = pod.fw(pod.probe * pod.object)
+ a = pod.fw(pod.probe * ob_h[pod.ob_view]
+ + pr_h[pod.pr_view] * pod.object)
+ b = pod.fw(pr_h[pod.pr_view] * ob_h[pod.ob_view])
+
+ if A0 is None:
+ A0 = u.abs2(f).astype(np.longdouble)
+ A1 = 2 * np.real(f * a.conj()).astype(np.longdouble)
+ A2 = (2 * np.real(f * b.conj()).astype(np.longdouble)
+ + u.abs2(a).astype(np.longdouble))
+ else:
+ A0 += u.abs2(f)
+ A1 += 2 * np.real(f * a.conj())
+ A2 += 2 * np.real(f * b.conj()) + u.abs2(a)
+
+ if self.p.floating_intensities:
+ A0 *= diff_view.float_intens_coeff
+ A1 *= diff_view.float_intens_coeff
+ A2 *= diff_view.float_intens_coeff
+ A0 -= I
+
+ B[0] += np.dot(w.flat, (A0**2).flat) * Brenorm
+ B[1] += np.dot(w.flat, (2 * A0 * A1).flat) * Brenorm
+ B[2] += np.dot(w.flat, (A1**2 + 2*A0*A2).flat) * Brenorm
+
+ parallel.allreduce(B)
+
+ # Object regularizer
+ if self.regularizer:
+ for name, s in self.ob.storages.iteritems():
+ B += Brenorm * self.regularizer.poly_line_coeffs(
+ ob_h.storages[name].data, s.data)
+
+ self.B = B
+
+ return B
+
+# Regul class does not exist, replace by objectclass
+# class Regul_del2(Regul):
+
+
+class Regul_del2(object):
+ """\
+ Squared gradient regularizer (Gaussian prior).
+
+ This class applies to any numpy array.
+ """
+ def __init__(self, amplitude, axes=[-2, -1]):
+ # Regul.__init__(self, axes)
+ self.axes = axes
+ self.amplitude = amplitude
+ self.delxy = None
+ self.g = None
+ self.LL = None
+
+ def grad(self, x):
+ """
+ Compute and return the regularizer gradient given the array x.
+ """
+ ax0, ax1 = self.axes
+ del_xf = u.delxf(x, axis=ax0)
+ del_yf = u.delxf(x, axis=ax1)
+ del_xb = u.delxb(x, axis=ax0)
+ del_yb = u.delxb(x, axis=ax1)
+
+ self.delxy = [del_xf, del_yf, del_xb, del_yb]
+ self.g = 2. * self.amplitude*(del_xb + del_yb - del_xf - del_yf)
+
+ self.LL = self.amplitude * (u.norm2(del_xf)
+ + u.norm2(del_yf)
+ + u.norm2(del_xb)
+ + u.norm2(del_yb))
+
+ return self.g
+
+ def poly_line_coeffs(self, h, x=None):
+ ax0, ax1 = self.axes
+ if x is None:
+ del_xf, del_yf, del_xb, del_yb = self.delxy
+ else:
+ del_xf = u.delxf(x, axis=ax0)
+ del_yf = u.delxf(x, axis=ax1)
+ del_xb = u.delxb(x, axis=ax0)
+ del_yb = u.delxb(x, axis=ax1)
+
+ hdel_xf = u.delxf(h, axis=ax0)
+ hdel_yf = u.delxf(h, axis=ax1)
+ hdel_xb = u.delxb(h, axis=ax0)
+ hdel_yb = u.delxb(h, axis=ax1)
+
+ c0 = self.amplitude * (u.norm2(del_xf)
+ + u.norm2(del_yf)
+ + u.norm2(del_xb)
+ + u.norm2(del_yb))
+
+ c1 = 2 * self.amplitude * np.real(np.vdot(del_xf, hdel_xf)
+ + np.vdot(del_yf, hdel_yf)
+ + np.vdot(del_xb, hdel_xb)
+ + np.vdot(del_yb, hdel_yb))
+
+ c2 = self.amplitude * (u.norm2(hdel_xf)
+ + u.norm2(hdel_yf)
+ + u.norm2(hdel_xb)
+ + u.norm2(hdel_yb))
+
+ self.coeff = np.array([c0, c1, c2])
+ return self.coeff
+
+
+def prepare_smoothing_preconditioner(amplitude):
+ """
+ Factory for smoothing preconditioner.
+ """
+ if amplitude == 0.:
+ return None
+
+ class GaussFilt:
+ def __init__(self, sigma):
+ self.sigma = sigma
+
+ def __call__(self, x):
+ return u.c_gf(x, [0, self.sigma, self.sigma])
+
+ # from scipy.signal import correlate2d
+ # class HannFilt:
+ # def __call__(self, x):
+ # y = np.empty_like(x)
+ # sh = x.shape
+ # xf = x.reshape((-1,) + sh[-2:])
+ # yf = y.reshape((-1,) + sh[-2:])
+ # for i in range(len(xf)):
+ # yf[i] = correlate2d(xf[i],
+ # np.array([[.0625, .125, .0625],
+ # [.125, .25, .125],
+ # [.0625, .125, .0625]]),
+ # mode='same')
+ # return y
+
+ if amplitude > 0.:
+ logger.debug(
+ 'Using a smooth gradient filter (Gaussian blur - only for ML)')
+ return GaussFilt(amplitude)
+
+ elif amplitude < 0.:
+ raise RuntimeError('Hann filter not implemented (negative smoothing amplitude not supported)')
+ # logger.debug(
+ # 'Using a smooth gradient filter (Hann window - only for ML)')
+ # return HannFilt()
diff --git a/ptypy/engines/__init__.py b/ptypy/engines/__init__.py
index b2ac451d3..84a2e5a28 100644
--- a/ptypy/engines/__init__.py
+++ b/ptypy/engines/__init__.py
@@ -11,37 +11,23 @@
:license: GPLv2, see LICENSE for details.
"""
from .. import utils as u
-from base import BaseEngine, DEFAULT_iter_info
-from base import DEFAULT as COMMON
-import DM
-import DM_simple
-import ML
-import dummy
-import ePIE
-
-__all__ = ['DM', 'ML', 'ePIE', 'BaseEngine']
-
-# List of supported engines
-engine_names = ['Dummy', 'DM_simple', 'DM', 'ML', 'ML_new', 'ePIE']
-
-# Supported engines defaults
-DEFAULTS = u.Param(
- common=COMMON,
- Dummy=dummy.DEFAULT,
- DM_simple=DM_simple.DEFAULT,
- DM=DM.DEFAULT,
- ML=ML.DEFAULT,
- ePIE=ePIE.DEFAULT
-)
-
-# Engine objects
-ENGINES = u.Param(
- Dummy=dummy.Dummy,
- DM_simple=DM_simple.DM_simple,
- DM=DM.DM,
- ML=ML.ML,
- ePIE=ePIE.EPIE
-)
+from .. import defaults_tree
+from .utils import *
+
+ENGINES = dict()
+
+
+def register(name=None):
+ """Engine registration decorator"""
+ return lambda cls: _register_engine_class(cls, name)
+
+
+def _register_engine_class(cls, name=None):
+ from ptypy import defaults_tree
+ name = cls.__name__ if name is None else name
+ ENGINES[name] = cls
+ cls = defaults_tree.parse_doc('engine.' + name, True)(cls)
+ return cls
def by_name(name):
@@ -49,82 +35,19 @@ def by_name(name):
raise RuntimeError('Unknown engine: %s' % name)
return ENGINES[name]
-try:
- import os
- import glob
- import re
- import imp
-
- # Default search paths for engines
- DEFAULT_ENGINE_PATHS = ['./', '~/.ptypy/']
-
- def dynamic_load(path=None):
- """
- Load an engine dynamically from the given paths
-
- :param path: Path or list of paths
- """
-
- # Update list of paths to search for
- if path is not None:
- if str(path) == path:
- path_list = [path] + DEFAULT_ENGINE_PATHS
- else:
- path_list = path + DEFAULT_ENGINE_PATHS
- else:
- path_list = DEFAULT_ENGINE_PATHS
-
- # List of base classes an engine could derive from
- baselist = ['BaseEngine'] + engine_names
-
- # Loop through paths
- engine_path = {}
- for path in path_list:
- # Complete directory path
- directory = os.path.abspath(os.path.expanduser(path))
-
- if not os.path.exists(directory):
- # Continue silently
- continue
- # raise IOError('Engine path %s does not exist.'
- # % str(directory))
-
- # Get list of python files
- py_files = glob.glob(directory + '/*.py')
- if not py_files:
- continue
-
- # Loop through files to find engines
- for filename in py_files:
- modname = os.path.splitext(os.path.split(filename)[-1])[0]
-
- # Find classes
- res = re.findall(
- '^class (.*)\((.*)\)', file(filename, 'r').read(), re.M)
-
- for classname, basename in res:
- if (basename in baselist) and classname not in baselist:
- # Match!
- engine_path[classname] = (modname, filename)
- u.logger.info("Found Engine '%s' in file '%s'"
- % (classname, filename))
-
- # Load engines that have been found
- for classname, mf in engine_path.iteritems():
-
- # Import module
- modname, filename = mf
- engine_module = imp.load_source(modname, filename)
-
- # Update list
- DEFAULTS[classname] = getattr(engine_module, 'DEFAULT')
- ENGINES[classname] = getattr(engine_module, classname)
- engine_names.append(classname)
-
- dynamic_load()
-except Exception as e:
- u.logger.warning("Attempt at loading Engines dynamically failed")
- import sys
- import traceback
- ex_type, ex, tb = sys.exc_info()
- u.logger.warning(traceback.format_exc(tb))
+from base import BaseEngine
+
+# These imports should be executable separately
+from . import DM
+from . import DM_simple
+from . import ML
+from . import dummy
+from . import ePIE
+
+# dynamic load, maybe discarded in future
+dynamic_load('./', ['BaseEngine'] + ENGINES.keys(), True)
+dynamic_load('~/.ptypy/', ['BaseEngine'] + ENGINES.keys(), True)
+
+
+
+
diff --git a/ptypy/engines/base.py b/ptypy/engines/base.py
index 50b4e827c..eaca87f69 100644
--- a/ptypy/engines/base.py
+++ b/ptypy/engines/base.py
@@ -13,36 +13,9 @@
from .. import utils as u
from ..utils import parallel
from ..utils.verbose import logger, headerline
+from ..utils.descriptor import EvalDescriptor
-__all__ = ['BaseEngine', 'DEFAULT_iter_info']
-
-DEFAULT = u.Param(
- # Total number of iterations
- numiter=20,
- # Number of iterations without interruption
- numiter_contiguous=1,
- # Fraction of valid probe area (circular) in probe frame
- probe_support=0.7,
- # Note: if probe support becomes of more complex nature,
- # consider putting it in illumination.py
- # Clip object amplitude into this interval
- clip_object=None,
- # Number of iterations before starting subpixel interpolation
- subpix_start=0,
- # Subpixel interpolation; 'fourier','linear' or None for no interpolation
- subpix='linear',
- # Number of iterations before probe update starts
- probe_update_start=2,
- # Weight of the current probe estimate in the update
- probe_inertia=0.001,
- # Weight of the current object in the update
- object_inertia=0.1,
- # Gaussian smoothing (pixel) of the current object prior to update
- obj_smooth_std=20,
- # Pixel radius around optical axes that the probe mass center must reside in
- # None or float
- probe_center_tol=None,
-)
+__all__ = ['BaseEngine', 'Base3dBraggEngine', 'DEFAULT_iter_info']
DEFAULT_iter_info = u.Param(
iteration=0,
@@ -53,6 +26,8 @@
)
+#local_tree = EvalDescriptor('')
+#@local_tree.parse_doc('engine.common')
class BaseEngine(object):
"""
Base reconstruction engine.
@@ -63,9 +38,34 @@ class BaseEngine(object):
engine_prepare
engine_iterate
engine_finalize
+
+
+ Defaults:
+
+ [numiter]
+ default = 20
+ type = int
+ lowlim = 1
+ help = Total number of iterations
+
+ [numiter_contiguous]
+ default = 1
+ type = int
+ lowlim = 1
+ help = Number of iterations without interruption
+ doc = The engine will not return control to the caller until this number of iterations is completed (not processing server requests, I/O operations, ...).
+
+ [probe_support]
+ default = 0.7
+ type = float
+ lowlim = 0.0
+ help = Valid probe area as fraction of the probe frame
+ doc = Defines a circular area centered on the probe frame, in which the probe is allowed to be nonzero.
+
"""
- DEFAULT = DEFAULT.copy()
+ # Define with which models this engine can work.
+ COMPATIBLE_MODELS = []
def __init__(self, ptycho, pars=None):
"""
@@ -80,8 +80,8 @@ def __init__(self, ptycho, pars=None):
Initialization parameters
"""
self.ptycho = ptycho
- p = u.Param(self.DEFAULT)
+ p = self.DEFAULT.copy()
if pars is not None:
p.update(pars)
self.p = p
@@ -152,6 +152,11 @@ def prepare(self):
support = (np.pi * (xx**2 + yy**2) < supp * sh[1] * sh[2])
self.probe_support[name] = support
+ # Make sure all the pods are supported
+ for label_, pod_ in self.pods.iteritems():
+ if not pod_.model.__class__ in self.SUPPORTED_MODELS:
+ raise Exception('Model %s not supported by engine' % pod_.model.__class__)
+
# Call engine specific preparation
self.engine_prepare()
@@ -275,3 +280,33 @@ def engine_finalize(self):
self.finalize()
"""
raise NotImplementedError()
+
+
+class Base3dBraggEngine(BaseEngine):
+ """
+ 3d Bragg engines need a slightly different prepare() method, because
+ a 2d probe support makes no sense (at least not yet...)
+
+ Defaults:
+
+ [probe_support]
+ default = None
+ """
+
+ def prepare(self):
+ """
+ Last-minute preparation before iterating.
+ """
+ self.finished = False
+ # Simple 2d probe support isn't applicable to the 3d case.
+ supp = self.p.probe_support
+ if supp is not None:
+ raise NotImplementedError
+
+ # Make sure all the pods are supported
+ for label_, pod_ in self.pods.iteritems():
+ if not pod_.model.__class__ in self.SUPPORTED_MODELS:
+ raise Exception('Model %s not supported by engine' % pod_.model.__class__)
+
+ # Call engine specific preparation
+ self.engine_prepare()
diff --git a/ptypy/engines/dummy.py b/ptypy/engines/dummy.py
index bfcb8dd4c..de531a36b 100644
--- a/ptypy/engines/dummy.py
+++ b/ptypy/engines/dummy.py
@@ -11,32 +11,47 @@
import time
import numpy as np
-import ptypy
-from .. import utils as u
from ..utils import parallel
-from . import BaseEngine
-
-#import utils
+from . import BaseEngine, register
+from .. import defaults_tree
+from ..core.manager import Full, Vanilla
__all__ = ['Dummy']
-DEFAULT = u.Param(
- itertime = 2., # Sleep time for a single iteration (in seconds)
-)
-
+@register()
class Dummy(BaseEngine):
-
- """
- Minimum implementation of BaseEngine
"""
+ Dummy reconstruction engine.
+
+
+ Defaults:
+
+ [name]
+ default = Dummy
+ type = str
+ help =
+ doc =
+
+ [itertime]
+ default = .2
+ type = float
+ help = Sleep time for a single iteration (in seconds)
- DEFAULT = DEFAULT
+ """
+ SUPPORTED_MODELS = [Full, Vanilla]
+
def __init__(self, ptycho_parent, pars=None):
"""
Dummy reconstruction engine.
"""
- super(Dummy,self).__init__(ptycho_parent,pars)
+ super(Dummy,self).__init__(ptycho_parent, pars)
+
+ p = self.DEFAULT.copy()
+ if pars is not None:
+ p.update(pars)
+ self.p = p
+
self.ntimescalled = 0
def engine_initialize(self):
@@ -73,8 +88,3 @@ def engine_finalize(self):
Clean up after iterations are done
"""
pass
-
-
-
-
-
diff --git a/ptypy/engines/ePIE.py b/ptypy/engines/ePIE.py
index 61b0cd330..5a1267d6c 100644
--- a/ptypy/engines/ePIE.py
+++ b/ptypy/engines/ePIE.py
@@ -25,64 +25,110 @@
import numpy as np
import time
import random
+
from .. import utils as u
from ..utils.verbose import logger
from ..utils import parallel
-from . import BaseEngine
+from . import BaseEngine, register
+from .. import defaults_tree
+from ..core.manager import Full, Vanilla
__all__ = ['EPIE']
-DEFAULT = u.Param(
- # ePIE-specific defaults:
- alpha=1., # ePIE object update parameter
- beta=1., # ePIE probe update parameter
- synchronization=1, # Period with which to synchronize the
- # object (and optionally the probe)
- # among nodes.
- redistribute_data=True, # Whether or not to redistribute data
- # among nodes to keep each node's
- # views in a contiguous geographic
- # block, even if new data is added
- # during reconstruction.
- average_probe=False, # Whether or not to average the probe
- # among nodes, otherwise each node
- # has its own probe as in the
- # original publication. Averaging
- # seems to work the best.
- random_order=True, # Whether to cycle through the positions
- # in random order on each ePIE
- # iteration. Otherwise does the pods
- # in alphabetical order as per
- # list.sort(). Disabling is useful for
- # debugging.
- # Overridden base class defaults:
- object_inertia=None, # Not used in ePIE.
- probe_inertia=None, # Not used in ePIE.
- obj_smooth_std=None, # Object smoothing prior to all updates.
- probe_center_tol=3, # See base class
-
-)
-
+@register(name = 'ePIE')
class EPIE(BaseEngine):
-
- DEFAULT = DEFAULT
+ """
+ ePIE reconstruction engine.
+
+
+ Defaults:
+
+ [name]
+ default = ePIE
+ type = str
+ help =
+ doc =
+
+ [alpha]
+ default = 1.
+ type = float
+ lowlim = 0.0
+ uplim = 1.0
+ help = ePIE object update parameter
+ doc = Step size for the object update, a higher value will give faster change.
+
+ [beta]
+ default = 1.
+ type = float
+ lowlim = 0.0
+ uplim = 1.0
+ help = ePIE probe update parameter
+ doc = Step size for the probe update, a higher value will give faster change.
+
+ [probe_update_start]
+ default = 2
+ type = int
+ lowlim = 0
+ help = Number of iterations before probe update starts
+
+ [synchronization]
+ default = 1
+ type = int
+ lowlim = 1
+ help = Probe/object synchronization period
+ doc = Period with which to synchronize the object (and optionally the probe) among parallel nodes.
+
+ [redistribute_data]
+ default = True
+ type = bool
+ help = Redistribute views to form blocks
+ doc = Whether or not to redistribute data among nodes to keep each node's views in a contiguous geographic block, even if new data is added during reconstruction.
+
+ [average_probe]
+ default = False
+ type = bool
+ help = Average probe among nodes
+ doc = Whether or not to average the probe among nodes, otherwise each node has its own probe as in the original publication. Averaging seems to work the best.
+
+ [random_order]
+ default = True
+ type = bool
+ help = Visit positions in random order
+ doc = Whether to cycle through the positions in random order on each ePIE iteration. Otherwise does the pods in alphabetical order as per list.sort(). Disabling is useful for debugging.
+
+ [clip_object]
+ default = None
+ type = tuple
+ help = Clip object amplitude into this interval
+
+ [obj_smooth_std]
+ default = None
+ type = int
+ lowlim = 0
+ help = Gaussian smoothing (pixel) of the current object prior to update
+ doc = If None, smoothing is deactivated. This smoothing can be used to reduce the amplitude of spurious pixels in the outer, least constrained areas of the object.
+
+ [probe_center_tol]
+ default = 3
+ type = float
+ lowlim = 0.0
+ help = Pixel radius around optical axes that the probe mass center must reside in
+
+ """
+
+ SUPPORTED_MODELS = [Full, Vanilla]
def __init__(self, ptycho_parent, pars=None):
"""
ePIE reconstruction engine.
"""
- if pars is None:
- pars = DEFAULT.copy()
-
super(EPIE, self).__init__(ptycho_parent, pars)
- # Check that none of the base class parameters that aren't used
- # for ePIE are specified.
- if ((self.p.object_inertia is not None) or
- (self.p.probe_inertia is not None)):
- logger.warning(
- 'Probe and/or object inertias were specified, but will be ignored by the ePIE engine.')
+ p = self.DEFAULT.copy()
+ if pars is not None:
+ p.update(pars)
+ self.p = p
# Check that smoothing doesn't outrun object sharing
if ((self.p.obj_smooth_std is not None) and
@@ -93,6 +139,17 @@ def __init__(self, ptycho_parent, pars=None):
# Instance attributes
self.ob_nodecover = None
+ self.ptycho.citations.add_article(
+ title='An improved ptychographical phase retrieval algorithm for diffractive imaging',
+ author='Maiden A. and Rodenburg J.',
+ journal='Ultramicroscopy',
+ volume=10,
+ year=2009,
+ page=1256,
+ doi='10.1016/j.ultramic.2009.05.012',
+ comment='The ePIE reconstruction algorithm',
+ )
+
def engine_initialize(self):
"""
Prepare for reconstruction.
diff --git a/ptypy/engines/utils.py b/ptypy/engines/utils.py
index c83baf81a..8e39981d3 100644
--- a/ptypy/engines/utils.py
+++ b/ptypy/engines/utils.py
@@ -11,6 +11,67 @@
import numpy as np
from .. import utils as u
+# This dynamic loas could easily be generalized to other types.
+def dynamic_load(path, baselist, fail_silently = True):
+ """
+ Load an derivatives `of baselist` dynamically from .py files in the given path.
+
+ :param path: Path to Python files.
+ """
+
+ import os
+ import glob
+ import re
+ import imp
+
+ # Loop through paths
+ engine_path = {}
+
+ try:
+ # Complete directory path
+ directory = os.path.abspath(os.path.expanduser(path))
+
+ if not os.path.exists(directory):
+ # Continue silently
+ raise IOError('Engine path %s does not exist.'
+ % str(directory))
+
+ # Get list of python files
+ py_files = glob.glob(directory + '/*.py')
+ if not py_files:
+ raise IOError('Directory %s does not contain Python files,'
+ % str(directory))
+
+ # Loop through files to find engines
+ for filename in py_files:
+ modname = os.path.splitext(os.path.split(filename)[-1])[0]
+
+ # Find classes
+ res = re.findall(
+ '^class (.*)\((.*)\)', file(filename, 'r').read(), re.M)
+
+ for classname, basename in res:
+ if (basename in baselist) and classname not in baselist:
+ # Match!
+ engine_path[classname] = (modname, filename)
+ u.logger.info("Found Engine '%s' in file '%s'"
+ % (classname, filename))
+
+ # Load engines that have been found
+ for classname, mf in engine_path.iteritems():
+
+ # Import module
+ modname, filename = mf
+ engine_module = imp.load_source(modname, filename)
+
+ except Exception as e:
+ if not fail_silently:
+ u.logger.warning("Attempt at loading Engines dynamically failed")
+ import sys
+ import traceback
+ ex_type, ex, tb = sys.exc_info()
+ u.logger.warning(traceback.format_exc(tb))
+
def basic_fourier_update(diff_view, pbound=None, alpha=1., LL_error=True):
"""\
diff --git a/ptypy/experiment/ALS_5321.py b/ptypy/experiment/ALS_5321.py
index af65c51c6..3f959448d 100644
--- a/ptypy/experiment/ALS_5321.py
+++ b/ptypy/experiment/ALS_5321.py
@@ -4,78 +4,118 @@
future experiments at that beamline.
"""
-import ptypy
-from ptypy.core.data import PtyScan
-import ptypy.utils as u
import h5py
import numpy as np
-import time
-logger = u.verbose.logger
+from ..core.data import PtyScan
+from .. import utils as u
+from . import register
-# Default recipe tree.
-RECIPE = u.Param()
-RECIPE.dataPath = None
-RECIPE.datafile = None
-RECIPE.maskfile = None
-RECIPE.energy = 0.820
-RECIPE.CXI_PATHS = u.Param()
-RECIPE.CXI_PATHS.CXI_DATA_PATH = 'entry_1/data_1/data'
-RECIPE.CXI_PATHS.CXI_MASK_PATH = 'mask'
-RECIPE.CXI_PATHS.CXI_POS_PATH = 'entry_1/data_1/translation'
-RECIPE.CXI_PATHS.CXI_DISTANCE = 'entry_1/instrument_1/detector_1/distance'
-RECIPE.CXI_PATHS.CXI_PSIZES = [
- 'entry_1/instrument_1/detector_1/x_pixel_size',
- 'entry_1/instrument_1/detector_1/y_pixel_size'
-]
+logger = u.verbose.logger
+@register()
class ALS5321Scan(PtyScan):
"""
Basic class to load 5.3.2.1 data offline.
+
+ Defaults:
+
+ [name]
+ default = ALS5321Scan
+ type = str
+ help =
+
+ [dataPath]
+ default = None
+ type = str
+ help = Path to folder containing the data
+
+ [dataile]
+ default = None
+ type = str
+ help = CXI data file
+
+ [maskfile]
+ default = None
+ type = str
+ help = Optional mask file
+ doc = Should contain an array called 'mask' at the root level.
+
+ [energy]
+ default = 0.820
+
+ [CXI_PATHS]
+ default = None
+ type = Param
+ help = Container for CXI path options
+
+ [CXI_PATHS.CXI_DATA_PATH]
+ default = 'entry_1/data_1/data'
+ type = str
+ help = Data path within the CXI file
+
+ [CXI_PATHS.CXI_MASK_PATH]
+ default = 'mask'
+ type = str
+ help = Mask path within the CXI file
+
+ [CXI_PATHS.CXI_POS_PATH]
+ default = 'entry_1/data_1/translation'
+ type = str
+ help = Positions path within the CXI file
+
+ [CXI_PATHS.CXI_DISTANCE]
+ default = 'entry_1/instrument_1/detector_1/distance'
+ type = str
+ help = Distance path within the CXI file
+
+ [CXI_PATHS.CXI_PSIZES]
+ default = ['entry_1/instrument_1/detector_1/x_pixel_size', 'entry_1/instrument_1/detector_1/y_pixel_size']
+ type = list
+ help = Pixel size path within the CXI file
+
"""
def __init__(self, pars=None, **kwargs):
- # Get the default parameter tree and add a recipe branch.
- p = PtyScan.DEFAULT.copy(depth=10)
- p.recipe = RECIPE.copy()
- p.update(pars, in_place_depth=10)
+ # Get the default parameter tree.
+ p = self.DEFAULT.copy(99)
+ p.update(pars)
+
+ # Call the base class constructor with the new updated parameters.
+ # This constructor only reads params, it doesn't modify them.
+ super(ALS5321Scan, self).__init__(p, **kwargs)
# Extract geometrical information
- filename = p.recipe.dataPath + p.recipe.datafile
+ filename = p.dataPath + p.datafile
with h5py.File(filename, 'r') as hf:
- p.energy = RECIPE.energy
- p.distance = hf.get(RECIPE.CXI_PATHS.CXI_DISTANCE).value
- p.psize = [hf.get(RECIPE.CXI_PATHS.CXI_PSIZES[0]).value,
- hf.get(RECIPE.CXI_PATHS.CXI_PSIZES[1]).value]
-
- # Call the base class constructor with the new updated parameters.
- # This constructor only reads params, it doesn't modify them. We've
- # already put the kwargs in p, so we don't need to pass them here.
- super(ALS5321Scan, self).__init__(p)
+ self.info.energy = p.energy
+ self.info.distance = hf.get(p.CXI_PATHS.CXI_DISTANCE).value
+ self.info.psize = [hf.get(p.CXI_PATHS.CXI_PSIZES[0]).value,
+ hf.get(p.CXI_PATHS.CXI_PSIZES[1]).value]
def load_positions(self):
- filename = self.info.recipe.dataPath + self.info.recipe.datafile
+ filename = self.info.dataPath + self.info.datafile
# first get the total number of positions
with h5py.File(filename, 'r') as hf:
- data = hf.get(self.info.recipe.CXI_PATHS.CXI_POS_PATH)
+ data = hf.get(self.info.CXI_PATHS.CXI_POS_PATH)
positions = np.asarray(data)[:,:2]
positions = np.fliplr(positions)
return positions
def load(self, indices):
raw, weights, positions = {}, {}, {}
- filename = self.info.recipe.dataPath + self.info.recipe.datafile
+ filename = self.info.dataPath + self.info.datafile
with h5py.File(filename) as hf:
- data = hf.get(self.info.recipe.CXI_PATHS.CXI_DATA_PATH)
+ data = hf.get(self.info.CXI_PATHS.CXI_DATA_PATH)
for i in indices:
raw[i] = np.asarray(data[i])
return raw, positions, weights
def load_weight(self):
- filename = self.info.recipe.dataPath + self.info.recipe.maskfile
+ filename = self.info.dataPath + self.info.maskfile
with h5py.File(filename) as hf:
- mask = hf.get(self.info.recipe.CXI_PATHS.CXI_MASK_PATH)
+ mask = hf.get(self.info.CXI_PATHS.CXI_MASK_PATH)
mask = np.asarray(mask)
return mask
diff --git a/ptypy/experiment/AMO_LCLS.py b/ptypy/experiment/AMO_LCLS.py
index 9d0cf585f..56978b264 100644
--- a/ptypy/experiment/AMO_LCLS.py
+++ b/ptypy/experiment/AMO_LCLS.py
@@ -10,14 +10,16 @@
import numpy as np
import os
+
from .. import utils as u
from .. import io
from .. import core
-from ..utils import parallel
-from ..core.data import PtyScan
from ..utils.verbose import log
from ..core.paths import Paths
-from ..core import DEFAULT_io as io_par
+from ..core import Ptycho
+from . import register
+
+IO_par = Ptycho.DEFAULT['io']
logger = u.verbose.logger
@@ -25,49 +27,101 @@
H5_PATHS = u.Param()
H5_PATHS.frame_pattern = 'data/photons'
-# Recipe defaults
-RECIPE = u.Param()
-RECIPE.experimentID = None # Experiment identifier
-RECIPE.scan_number = None # scan number
-RECIPE.dark_number = None
-RECIPE.flat_number = None
-RECIPE.energy = None
-RECIPE.lam = None # 1.2398e-9 / RECIPE.energy
-RECIPE.z = None # Distance from object to screen
-RECIPE.detector_name = None # Name of the detector as specified in the nexus file
-RECIPE.motors = ['t1_sx', 't1_sy'] # Motor names to determine the sample translation
-RECIPE.motors_multiplier = 1e-3 #for AMO #1e-6 #for CXI # Motor conversion factor to meters
-RECIPE.base_path = './'
-RECIPE.data_file_pattern = '%(base_path)s' + 'input/r%(scan_number)04d.h5'
-RECIPE.dark_file_pattern = '%(base_path)s' + 'input/r%(dark_number)04d.h5'
-RECIPE.flat_file_pattern = '%(base_path)s' + 'input/r%(flat_number)04d.h5'
-RECIPE.mask_file = None # '%(base_path)s' + 'processing/mask.h5'
-RECIPE.averaging_number = 1 # Number of frames to be averaged
-
-
-# Generic defaults
-AMODEFAULT = core.data.PtyScan.DEFAULT.copy()
-AMODEFAULT.recipe = RECIPE
-AMODEFAULT.auto_center = False
-AMODEFAULT.orientation = (False, False, False)
-
+@register()
class AMOScan(core.data.PtyScan):
- DEFAULT = AMODEFAULT
+ """
+ Defaults:
+
+ [name]
+ default = AMOScan
+ type = str
+ help =
+
+ [experimentID]
+ default = None
+
+ [scan_number]
+ default = None
+ type = int
+ help =
+
+ [dark_number]
+ default = None
+ type = int
+ help =
+
+ [flat_number]
+ default = None
+ type = int
+ help =
+
+ [detector_name]
+ default = None
+ type = str
+ help = Name of the detector as specified in the nexus file
+
+ [motors]
+ default = ['t1_sx', 't1_sy']
+ type = list
+ help = Motor names to determine the sample translation
+
+ [motors_multiplier]
+ default = 1e-3
+ type = float
+ help = Motor conversion factor to meters
+ doc = 1e-3 AMO, 1e-6 for CXI
+
+ [base_path]
+ default = './'
+ type = str
+ help =
+
+ [data_file_pattern]
+ default = '%(base_path)sinput/r%(scan_number)04d.h5'
+ type = str
+ help =
+
+ [dark_file_pattern]
+ default = '%(base_path)sinput/r%(dark_number)04d.h5'
+ type = str
+ help =
+
+ [flat_file_pattern]
+ default = '%(base_path)sinput/r%(flat_number)04d.h5'
+ type = str
+ help =
+
+ [mask_file]
+ default = None
+ type = str
+ help =
+
+ [averaging_number]
+ default = 1
+ type = int
+ help = Number of frames to be averaged
+
+ [auto_center]
+ default = False
+
+ [orientation]
+ default = (False, False, False)
+
+ """
def __init__(self, pars=None, **kwargs):
"""
AMO (Atomic Molecular and Optical Science, LCLS) data preparation class.
"""
# Initialise parent class
- RDEFAULT = RECIPE.copy()
- RDEFAULT.update(pars.recipe)
- pars.recipe.update(RDEFAULT)
+ p = self.DEFAULT.copy(99)
+ p.update(pars)
- super(AMOScan, self).__init__(pars, **kwargs)
+ super(AMOScan, self).__init__(p, **kwargs)
# Try to extract base_path to access data files
- if self.info.recipe.base_path is None:
+ if self.info.base_path is None:
d = os.getcwd()
base_path = None
while True:
@@ -80,32 +134,32 @@ def __init__(self, pars=None, **kwargs):
if base_path is None:
raise RuntimeError('Could not guess base_path.')
else:
- self.info.recipe.base_path = base_path
+ self.info.base_path = base_path
# Default scan label
# if self.info.label is None:
# self.info.label = 'S%5d' % rinfo.scan_number
# Construct file names
- self.data_file = self.info.recipe.data_file_pattern % self.info.recipe
+ self.data_file = self.info.data_file_pattern % self.info
log(3, 'Will read data from file %s' % self.data_file)
- if self.info.recipe.dark_number is None:
+ if self.info.dark_number is None:
self.dark_file = None
log(3, 'No data for dark')
else:
- self.dark_file = self.info.recipe.dark_file_pattern % self.info.recipe
+ self.dark_file = self.info.dark_file_pattern % self.info
log(3, 'Will read dark from file %s' % self.dark_file)
- if self.info.recipe.flat_number is None:
+ if self.info.flat_number is None:
self.flat_file = None
log(3, 'No data for flat')
else:
- self.flat_file = self.info.recipe.flat_file_pattern % self.info.recipe
+ self.flat_file = self.info.flat_file_pattern % self.info
log(3, 'Will read flat from file %s' % self.flat_file)
# Create the ptyd file name if not specified
if self.info.dfile is None:
- home = Paths(io_par).home
- self.info.dfile = '%s/prepdata/data_%05d.ptyd' % (home, self.info.recipe.scan_number)
+ home = Paths(IO_par).home
+ self.info.dfile = '%s/prepdata/data_%05d.ptyd' % (home, self.info.scan_number)
log(3, 'Save file is %s' % self.info.dfile)
log(4, u.verbose.report(self.info))
@@ -118,8 +172,8 @@ def load_common(self):
# FIXME: do something better here. (detector-dependent)
# Load mask
# common.weight2d = None
- if self.info.recipe.mask_file is not None:
- common.weight2d = io.h5read(self.info.recipe.mask_file, 'mask')['mask'].astype(float)
+ if self.info.mask_file is not None:
+ common.weight2d = io.h5read(self.info.mask_file, 'mask')['mask'].astype(float)
return common
@@ -129,12 +183,12 @@ def load_positions(self):
"""
# Load positions from file if possible.
- mmult = u.expect2(self.info.recipe.motors_multiplier)
+ mmult = u.expect2(self.info.motors_multiplier)
x = mmult[0] * io.h5read(self.data_file, 'data.posx')['posx']
y = mmult[1] * io.h5read(self.data_file, 'data.posy')['posy']
pos_list = []
- for i in range(0,len(x),self.info.recipe.averaging_number):
+ for i in range(0,len(x),self.info.averaging_number):
pos_list.append([y[i],x[i]])
positions = np.array(pos_list)
@@ -169,14 +223,14 @@ def load(self, indices):
i = 0
h = 0
- key = H5_PATHS.frame_pattern % self.info.recipe
+ key = H5_PATHS.frame_pattern % self.info
for j in indices:
mean = []
- while h < (i+self.info.recipe.averaging_number):
- mean.append(io.h5read(self.data_file, H5_PATHS.frame_pattern % self.info.recipe, slice=h)[key].astype(np.float32))
+ while h < (i+self.info.averaging_number):
+ mean.append(io.h5read(self.data_file, H5_PATHS.frame_pattern % self.info, slice=h)[key].astype(np.float32))
h+=1
raw[j] = np.array(mean).mean(0).T
- i+=self.info.recipe.averaging_number
+ i+=self.info.averaging_number
log(3, 'Data loaded successfully.')
return raw, pos, weights
diff --git a/ptypy/experiment/Bragg3dSim.py b/ptypy/experiment/Bragg3dSim.py
new file mode 100644
index 000000000..9f5ba5056
--- /dev/null
+++ b/ptypy/experiment/Bragg3dSim.py
@@ -0,0 +1,293 @@
+"""
+This module provides simulated 3D Bragg data.
+"""
+
+import ptypy
+from ptypy.core.data import PtyScan
+import ptypy.utils as u
+from ptypy import defaults_tree
+from ptypy.experiment import register
+from ptypy.utils.descriptor import EvalDescriptor
+from ptypy.core import geometry_bragg
+from ptypy.core import illumination
+from ptypy.core import xy
+
+import numpy as np
+import time
+
+logger = u.verbose.logger
+
+@register()
+class Bragg3dSimScan(PtyScan):
+ """
+ Provides simulated 3D Bragg data based on the numerical
+ experiment in Berenguer et al., PRB 88 (2013) 144101. The settings
+ from that paper are very expensive, so the defaults here are simpler
+ and do not necessarily make physical sense.
+
+ Defaults:
+
+ [name]
+ default = Bragg3dSimScan
+ type = str
+ help = PtyScan subclass identifier
+
+ [shape]
+ default = 256
+
+ [distance]
+ default = 2
+
+ [psize]
+ default = 13e-6
+
+ [energy]
+ default = 8.5
+
+ [rocking_step]
+ default = .0025
+ type = float
+ help = Step size in the rocking curve in degrees
+
+ [n_rocking_positions]
+ default = 40
+ type = int
+ help = Number of rocking positions
+
+ [theta_bragg]
+ default = 22.32
+ type = float
+ help = Bragg angle in degrees
+
+ [shuffle]
+ default = False
+ type = bool
+ help = Shuffles all diffraction patterns
+ doc = Mainly to test that they are still assembled correctly.
+
+ [dump]
+ default = None
+ type = str
+ help = Dump raw simulated 3d diffraction data to npz file
+
+ [dry_run]
+ default = False
+ type = bool
+ help = Don't calculate diffraction patterns
+ doc = Skips the heavy FFT and just returns empty diff patterns.
+
+ [illumination.aperture.form]
+ default = 'rect'
+ type = str
+ help = default override, see :any:`scan.Full.illumination`
+ doc =
+
+ [illumination.aperture.size]
+ default = 3e-6
+ type = float, tuple
+ help = default override, see :any:`scan.Full.illumination`
+ doc =
+
+ [scantype]
+ default = '1d'
+ type = str
+ help = Type of position scan
+ doc = '1d' for scan along y as in the paper, '2d' for xy spiral scan
+
+ [stepsize]
+ default = .5e-6
+ type = float
+ help = Step size of the spiral scan
+
+ """
+
+ def __init__(self, pars=None, **kwargs):
+ self.p = self.DEFAULT.copy(99)
+ self.p.update(pars)
+ self.p.update(kwargs)
+ super(Bragg3dSimScan, self).__init__(self.p)
+
+ # do the simulation
+ self.simulate()
+
+ def simulate(self):
+ ### Set up a 3D geometry
+ shape = tuple(u.expect2(self.p.shape))
+ psize = tuple(u.expect2(self.p.psize))
+ g = ptypy.core.geometry_bragg.Geo_Bragg(
+ psize=(self.p.rocking_step,) + psize,
+ shape=(self.p.n_rocking_positions,) + shape,
+ energy=self.p.energy,
+ distance=self.p.distance,
+ theta_bragg=self.p.theta_bragg)
+
+ logger.info('Data will be simulated with these geometric parameters:')
+ logger.info(g)
+
+ ### Set up scan positions in the xy plane
+ if self.p.scantype == '2d':
+ pos = u.Param()
+ pos.spacing = self.p.stepsize
+ pos.extent = (2e-6, 5e-6)
+ pos.model = 'spiral'
+ pos = xy.from_pars(pos)
+ Npos = pos.shape[0]
+ positions = np.zeros((Npos, 3))
+ positions[:, 0] = pos[:, 0]
+ positions[:, 2] = pos[:, 1]
+ elif self.p.scantype == '1d':
+ pos = np.arange(-2.5e-6, 2.5e-6, self.p.stepsize)
+ Npos = len(pos)
+ positions = np.zeros((Npos, 3))
+ positions[:, 2] = pos
+
+ ### Set up the object and its views
+
+ # Create a container for the object array, which will represent
+ # the object in the non-orthogonal coordinate system conjugate
+ # to the q-space measurement frame.
+ C = ptypy.core.Container(data_type=np.complex128, data_dims=3)
+
+ # For each scan position in the orthogonal coordinate system,
+ # find the natural coordinates and create a View instance there.
+ views = []
+ for pos in positions:
+ pos_ = g._r3r1r2(pos)
+ views.append(ptypy.core.View(C, storageID='Sobj',
+ psize=g.resolution, coord=pos_, shape=g.shape))
+ S = C.storages['Sobj']
+ C.reformat()
+
+ # Define the test sample based on the orthogonal position of
+ # each voxel. First, the cartesian grid is obtained from the
+ # geometry object, then this grid is used as a condition for the
+ # sample's magnitude.
+ xx, zz, yy = g.transformed_grid(S, input_space='real',
+ input_system='natural')
+ S.fill(0.0)
+ S.data[(zz >= -90e-9) & (zz < 90e-9) & (yy + .3*zz >= 1e-6) &
+ (yy - .3*zz< 2e-6) & (xx < 1e-6)] = 1
+ S.data[(zz >= -90e-9) & (zz < 90e-9) & (yy + .3*zz >= -2e-6) &
+ (yy - .3*zz < -1e-6)] = 1
+
+ # save this for possible export
+ self.simulated_object = S
+
+ ### Set up the probe and calculate diffraction patterns
+
+ # First set up a two-dimensional representation of the incoming
+ # probe, with arbitrary pixel spacing.
+ extent = max(g.probe_extent_vs_fov())
+ psize = g.resolution.min() / 5
+ shape = int(np.ceil(extent / psize))
+ logger.info('Generating incoming probe %d x %d (%.3e x %.3e) with psize %.3e...'
+ % (shape, shape, extent, extent, psize))
+ t0 = time.time()
+
+ Cprobe = ptypy.core.Container(data_dims=2, data_type='float')
+ Sprobe = Cprobe.new_storage(psize=psize, shape=shape)
+
+ # fill the incoming probe
+ illumination.init_storage(Sprobe, self.p.illumination, energy=g.energy)
+ logger.info('...done in %.3f seconds' % (time.time() - t0))
+
+ # The Bragg geometry has a method to prepare a 3d Storage by extruding
+ # the 2d probe and interpolating to the right grid. The returned storage
+ # contains a single view compatible with the object views.
+ Sprobe_3d = g.prepare_3d_probe(Sprobe, system='natural')
+ probeView = Sprobe_3d.views[0]
+
+ # Calculate diffraction patterns by using the geometry's propagator.
+ diff = []
+ if self.p.dry_run:
+ for v in views:
+ diff.append(np.zeros(probeView.shape))
+ else:
+ for v in views:
+ diff.append(np.abs(g.propagator.fw(
+ v.data * probeView.data))**2)
+
+ # dump the 3d arrays for testing
+ if self.p.dump is not None:
+ np.savez(self.p.dump,
+ **{'diff%02d'%i : diff[i] for i in range(len(diff))})
+
+ # stack the 2d diffraction patterns and save
+ self.diff = []
+ for i in range(len(diff)):
+ for j in range(len(diff[i])):
+ self.diff.append(diff[i][j,:,:])
+
+ # convert the positions from (x, z, y) to (angle, x, z, y) and
+ # save, we need the angle and in future we won't know in which
+ # plane the scan was done (although here it is in xy). these xyz
+ # axis still follow Berenguer et al PRB 2013.
+ self.positions = np.empty((g.shape[0] * Npos, 4), dtype=float)
+ angles = (np.arange(g.shape[0]) - g.shape[0] / 2.0 + 1.0/2) * g.psize[0]
+ for i in range(Npos):
+ for j in range(g.shape[0]):
+ self.positions[i * g.shape[0] + j, 1:] = positions[i, :]
+ self.positions[i * g.shape[0] + j, 0] = angles[j]
+
+ # shuffle everything as a test
+ if self.p.shuffle:
+ order = range(len(self.diff))
+ from random import shuffle
+ shuffle(order)
+ self.diff = [self.diff[i] for i in order]
+ new_pos = np.empty_like(self.positions)
+ for i in range(len(new_pos)):
+ new_pos[i] = self.positions[order[i]]
+ self.positions = new_pos
+
+ def load_common(self):
+ """
+ We have to communicate the number of rocking positions that the
+ model should expect, otherwise it never knows when there is data
+ for a complete POD.
+ """
+ return {
+ 'rocking_step': self.p.rocking_step,
+ 'n_rocking_positions': self.p.n_rocking_positions,
+ 'theta_bragg': self.p.theta_bragg,
+ }
+
+ def load_positions(self):
+ """
+ For the 3d Bragg model, load_positions returns N-by-4 positions,
+ (angle, x, z, y). The angle can be relative or absolute, the
+ model doesn't care, but it does have to be uniformly spaced for
+ the analysis to make any sense.
+ """
+ return self.positions
+
+ def load(self, indices):
+ """
+ This function returns diffraction image indexed from top left as
+ viewed along the beam, i e they have (-q1, q2) indexing. PtyScan
+ can always flip/rotate images.
+ """
+ raw, positions, weights = {}, {}, {}
+
+ # pick out the requested indices
+ for i in indices:
+ raw[i] = self.diff[i][::-1,:]
+
+ return raw, positions, weights
+
+ def load_weight(self):
+ return np.ones_like(self.diff[0])
+
+defaults_tree['scandata.Bragg3dSimScan'].add_child(illumination.illumination_desc, copy=True)
+defaults_tree['scandata.Bragg3dSimScan.illumination'].prune_child('diversity')
+
+
+if __name__ == '__main__':
+ u.verbose.set_level(3)
+ ps = Bragg3dSimScan()
+ ps.initialize()
+ while True:
+ msg = ps.auto(23)
+ if msg == ps.EOS:
+ break
+ logger.info('Got %d images' % len(msg['iterable']))
diff --git a/ptypy/experiment/DiProI_FERMI.py b/ptypy/experiment/DiProI_FERMI.py
index ccdf81a85..c15d6459c 100644
--- a/ptypy/experiment/DiProI_FERMI.py
+++ b/ptypy/experiment/DiProI_FERMI.py
@@ -10,13 +10,17 @@
:license: GPLv2, see LICENSE for details.
"""
+
import numpy as np
import os
+
from .. import utils as u
+from . import register
from .. import io
from ..core.data import PtyScan
-from ..core.paths import Paths
-from ..core import DEFAULT_io as IO_par
+from ..core import Ptycho
+
+IO_par = Ptycho.DEFAULT['io']
# testing commits again
# Parameters for the h5 file
H5_PATHS = u.Param()
@@ -27,67 +31,133 @@
FLAT_PATHS = u.Param()
FLAT_PATHS.key = "flat"
-# DiProI recipe default parameters
-RECIPE = u.Param()
-RECIPE.base_path = None
-RECIPE.scan_name = None # this has to be a string (e.g. 'Cycle001')
-RECIPE.run_ID = None # this has to be a string (e.g. 'Scan018')
-RECIPE.dark_name = None # this has to be a string (e.g. 'Dark')
-RECIPE.dark_value = 200. # Used if dark_number is None
-RECIPE.detector_flat_file = None
-RECIPE.h5_file_pattern = '%(base_path)s/imported/%(run_ID)s/%(scan_name)s/rawdata/'
-RECIPE.dark_h5_file_pattern = '%(base_path)s/imported/%(run_ID)s/%(dark_name)s/rawdata/'
-RECIPE.date = None
-RECIPE.motors = ['sample_x', 'sample_y'] # check orientation
-RECIPE.energy = None
-RECIPE.lam = None
-RECIPE.z = None
-RECIPE.motors_multiplier = 1e-3 # DiProI-specific
-RECIPE.mask_file = None # Mask file name
-RECIPE.use_refined_positions = False
-RECIPE.refined_positions_pattern = '%(base_path)s/imported/%(run_ID)s/%(scan_name)s/'
-RECIPE.flat_division = False # Switch for flat division
-RECIPE.dark_subtraction = False # Switch for dark subtraction
-
-# Default generic parameter set from
-DiProI_FERMIDEFAULT = PtyScan.DEFAULT.copy()
-DiProI_FERMIDEFAULT.recipe = RECIPE
-DiProI_FERMIDEFAULT.auto_center = False
-
+@register()
class DiProIFERMIScan(PtyScan):
- DEFAULT = DiProI_FERMIDEFAULT
+ """
+ DiProI (FERMI) data preparation class.
+
+ Defaults:
+
+ [name]
+ default = DiProIFERMIScan
+ type = str
+ help =
+
+ [base_path]
+ default = None
+ type = str
+ help =
+
+ [scan_name]
+ default = None
+ type = str
+ help = has to be a string (e.g. 'Cycle001')
+
+ [run_ID]
+ default = None
+ type = str
+ help = has to be a string (e.g. 'Scan018')
+
+ [dark_name]
+ default = None
+ type = str
+ help = has to be a string (e.g. 'Dark')
+
+ [dark_value]
+ default = 200.0
+ type = float
+ help = Used if dark_number is None (?!)
+
+ [detector_flat_file]
+ default = None
+ type = str
+ help =
+
+ [h5_file_pattern]
+ default = '%(base_path)s/imported/%(run_ID)s/%(scan_name)s/rawdata/'
+ type = str
+ help =
+
+ [dark_h5_file_pattern]
+ default = '%(base_path)s/imported/%(run_ID)s/%(dark_name)s/rawdata/'
+ type = str
+ help =
+
+ [date]
+ default = None
+ type = str
+ help =
+
+ [motors]
+ default = ['sample_x', 'sample_y']
+ type = list
+ help = check orientation
+
+ [motors_multiplier]
+ default = 1e-3
+ type = float
+ help = DiProI-specific
+
+ [mask_file]
+ default = None
+ type = str
+ help = Mask file name
+
+ [use_refined_positions]
+ default = False
+ type = bool
+ help =
+
+ [refined_positions_pattern]
+ default = '%(base_path)s/imported/%(run_ID)s/%(scan_name)s/'
+ type = str
+ help =
+
+ [flat_division]
+ default = False
+ type = bool
+ help = Switch for flat division
+
+ [dark_subtraction]
+ default = False
+ type = bool
+ help = Switch for dark subtraction
+
+ [auto_center]
+ default = False
+
+ """
def __init__(self, pars=None, **kwargs):
"""
DiProI (FERMI) data preparation class.
"""
- # Initialize parent class. All updated parameters are now in self.info
- recipe_default = RECIPE.copy()
- recipe_default.update(pars.recipe, in_place_depth=1)
- pars.recipe.update(recipe_default)
- super(DiProIFERMIScan, self).__init__(pars, **kwargs)
+ p = self.DEFAULT.copy(99)
+ p.update(pars)
+
+ # Initialize parent class. All updated parameters are now in self.info
+ super(DiProIFERMIScan, self).__init__(p, **kwargs)
# Check whether base_path exists
- if self.info.recipe.base_path is None:
+ if self.info.base_path is None:
raise RuntimeError('Base path missing.')
# Construct the file names
self.h5_filename_list = sorted([i for i in os.listdir(
- self.info.recipe.h5_file_pattern % self.info.recipe)
+ self.info.h5_file_pattern % self.info)
if not i.startswith('.')])
# Path to data files
- self.data_path = (self.info.recipe.h5_file_pattern %
- self.info.recipe)
+ self.data_path = (self.info.h5_file_pattern % self.info)
u.log(3, 'Will read data from h5 files in {data_path}'.format(
data_path=self.data_path))
# Path to data files
- self.dark_path = (self.info.recipe.dark_h5_file_pattern %
- self.info.recipe)
+ self.dark_path = (self.info.dark_h5_file_pattern %
+ self.info)
u.log(3, 'Will read dark from h5 files in {dark_path}'.format(
dark_path=self.dark_path))
@@ -103,21 +173,21 @@ def load_weight(self):
"""
# FIXME: do something better here. (detector-dependent)
# Load mask as weight
- if self.info.recipe.mask_file is not None:
- return io.h5read(self.info.recipe.mask_file, 'mask')['mask'].astype(
+ if self.info.mask_file is not None:
+ return io.h5read(self.info.mask_file, 'mask')['mask'].astype(
np.float32)
def load_positions(self):
"""
Load the positions and return as an (N, 2) array.
"""
- mmult = u.expect2(self.info.recipe.motors_multiplier)
+ mmult = u.expect2(self.info.motors_multiplier)
# Load positions
- if self.info.recipe.use_refined_positions:
+ if self.info.use_refined_positions:
# From prepared .h5 file
- positions = io.h5read(self.info.recipe.refined_positions_pattern %
- self.info.recipe + '/Fermi_reconstruction.h5',
+ positions = io.h5read(self.info.refined_positions_pattern %
+ self.info + '/Fermi_reconstruction.h5',
'data.probe_positions')['probe_positions']
positions = [(positions[0, i], positions[1, i])
@@ -145,14 +215,14 @@ def load_common(self):
common = u.Param()
key = H5_PATHS.frame_pattern
- if self.info.recipe.dark_name is not None:
+ if self.info.dark_name is not None:
dark = [io.h5read(self.dark_path + i, key)[key].astype(np.float32)
for i in os.listdir(self.dark_path) if i.startswith('Dark')]
else:
- dark = self.info.recipe.dark_value
+ dark = self.info.dark_value
- if self.info.recipe.detector_flat_file is not None:
- flat = io.h5read(self.info.recipe.detector_flat_file,
+ if self.info.detector_flat_file is not None:
+ flat = io.h5read(self.info.detector_flat_file,
FLAT_PATHS.key)[FLAT_PATHS.key]
else:
flat = 1.
@@ -190,12 +260,12 @@ def correct(self, raw, weights, common):
:return:
"""
# Apply flat and dark, only dark, or no correction
- if self.info.recipe.flat_division and self.info.recipe.dark_subtraction:
+ if self.info.flat_division and self.info.dark_subtraction:
for j in raw:
raw[j] = (raw[j] - common.dark) / (common.flat - common.dark)
raw[j][raw[j] < 0] = 0
data = raw
- elif self.info.recipe.dark_subtraction:
+ elif self.info.dark_subtraction:
for j in raw:
raw[j] = raw[j] - common.dark
raw[j][raw[j] < 0] = 0
diff --git a/ptypy/experiment/I13DLS_legacy.py b/ptypy/experiment/I13DLS_legacy.py
deleted file mode 100644
index 04f5ddd05..000000000
--- a/ptypy/experiment/I13DLS_legacy.py
+++ /dev/null
@@ -1,358 +0,0 @@
-# -*- coding: utf-8 -*-
-"""\
-Tools specific to the I13 beamline, Diamond.
-
-TODO:
- * smarter way to guess parameters.
-
-This file is part of the PTYPY package.
-
- :copyright: Copyright 2014 by the PTYPY team, see AUTHORS.
- :license: GPLv2, see LICENSE for details.
-"""
-
-__all__ = ['prepare_data', 'DataReader']
-
-from matplotlib import pyplot as plt
-import ptypy.utils as u
-import numpy as np
-import glob
-import os
-import fnmatch
-import time
-import re
-
-from ptypy.utils.scripts import expect2, mass_center
-from ptypy.utils.verbose import logger
-from ptypy import io
-
-lastDR = None
-
-# I13DLS defaults
-SAVE_FILENAME_PATTERN = '{write_path}/{scan_label}_data_{dpsize[0]:03d}x{dpsize[1]:03d}.h5'
-WRITE_PATH_PATTERN = '{base_path}/processing/analysis/{scan_label}'
-#READ_PATH_PATTERN = '{base_path}/raw/{scan_number:05d}/mpx'
-NEXUS_PATTERN = '{base_path}/raw/{scan_number:05d}.nxs'
-FRAME_IN_NEXUS_FILE = 'entry1/instrument/{detector_name}/data'
-EXPOSURE_IN_NEXUS_FILE = 'entry1/instrument/{detector_name}/count_time'
-MOTORS_IN_NEXUS_FILE = 'entry1/instrument/t1_sxy'
-COMMAND_IN_NEXUS_FILE = 'entry1/scan_command'
-LABEL_IN_NEXUS_FILE = 'entry1/entry_identifier'
-EXPERIMENT_IN_NEXUS_FILE = 'entry1/experiment_identifier'
-#FILENAME_PATTERN = '{read_path}/{index}.{file_extension}'
-#INDEX_REPLACEMENT_STRING = '????'
-
-# Standard parameter structure for prepare_data
-DEFAULT = u.Param(
- verbose_level = 2, # Verbosity level
- base_path = None, #'The root directory where all experiment information can be found'
- detector_name = None, # That is the name of the detector in the nexus file
- experimentID = None, #'Experiment identifier'
- scan = None, #'Scan number' or file
- dpsize = None, #'Cropped array size for one frame'
- ctr = None, #'Center of the diffraction pattern.'
- energy = None, #'Nominal Radiation energy in keV'
- detector_pixel_size = None, #'Detector pixel size in meters'
- detector_distance = None, #'Detector distance to sample in meters'
- motors = ['t1_sy','t1_sx'], #'Motor names to determine the sample translation'
- motors_multiplier = 1e-6, #'Motor conversion factor to meters'
- mask = None, #'Mask or name or number of the file containing the mask'
- dark = None, #'Dark frame or name or number of the file containing the dark'
- flat = None, #'Flat frame or name of number the file containing the flat'
- flip = None, #None, 'lr','ud'
- rebin = 1,
- rotate = False,
-)
-
-def flip(A,what):
- if what is None:
- return A
- elif what == 'lr':
- return A[...,::-1,:]
- elif what == 'ud':
- return A[...,::-1]
- elif what in ['all', 'udlr']:
- return A[...,::-1,::-1]
- else:
- raise RuntimeError('Unknown flipping option %s' % str(what))
-
-def load(filename,name=None):
- """\
- Helper function to read mask, flat and dark.
- """
-
- #try:
- # a = io.h5read(filename)
- #except:
- # a = io.loadmat(filename)
-
- if name is not None:
- return io.h5read(filename,name)[name]
- else:
- return io.h5read(filename).values()[0] # Take first array we find - hint: better have a single array in the file.
-
-class DataReader(object):
- """\
- I13 DLS data reading class.
- Set flat, dark and mask correction for all data read
- """
-
- def __init__(self, pars=None):#base_path, experimentID=None, mask=None, flat=None, dark=None):
- """\
- I13 DLS data reading class. The only mandatory argument for this constructor are 'base_path' and 'user'.
-
- Mask, flat or dark should be either None, scan numbers (integer) or file paths
- """
- # Semi-smart base path detection: we are looking for a "raw" directory
- p = u.Param(DEFAULT)
- if pars is not None:
- p.update(pars)
-
- try:
- verbose_level = p.verbose_level
- verbose.set_level(verbose_level)
- except:
- pass
- self.p=p
- base_path = p.base_path if p.base_path.endswith('/') else p.base_path + '/'
- experimentID = p.experimentID
-
- if base_path is None:
- d = os.getcwd()
- while True:
- if 'raw' in os.listdir(d):
- base_path = d
- break
- d,rest = os.path.split(d)
- if not rest:
- break
- if base_path is None:
- raise RuntimeError('Could not figure out base_path')
- logger.debug( 'base_path: "%s" (automatically set).' % base_path)
- else:
- logger.debug( 'base_path: "%s".' % base_path)
- self.base_path = base_path
-
- self.nxs = u.Param()
- self.nxs.frame = FRAME_IN_NEXUS_FILE.format(**p)
- self.nxs.exp = EXPOSURE_IN_NEXUS_FILE.format(**p)
- self.nxs.motors = MOTORS_IN_NEXUS_FILE.format(**p)
- self.nxs.command = COMMAND_IN_NEXUS_FILE.format(**p)
- self.nxs.experiment = EXPERIMENT_IN_NEXUS_FILE.format(**p)
- self.nxs.label = LABEL_IN_NEXUS_FILE.format(**p)
- # Load mask, flat and dark
-
- try:
- self.mask = load(self.get_nexus_file(p.mask),self.nxs.frame)
- except:
- self.mask = p.mask
- #assert self.mask.shape[-2:] == sh
-
- try:
- self.dark = load(self.get_nexus_file(p.dark),self.nxs.frame)
- except:
- self.dark = p.dark
- #assert self.dark.shape[-2:] == sh
-
- try:
- self.flat = load(self.get_nexus_file(p.flat),self.nxs.frame)
- except:
- self.flat = p.flat
- #assert self.flat.shape[-2:] == sh
-
-
- def read(self, scan=None, **kwargs):
- """\
- Read in the data
- TODO: (maybe?) MPI to avoid loading all data in a single process for large scans.
- """
-
- scan=scan if scan is not None else self.p.scan
- logger.info( 'Processing scan number %s' % str(scan))
-
- self.scan = self.get_nexus_file(scan)
- logger.debug( 'Data will be read from path: %s' % self.scan)
-
- self.exp = load(self.scan,self.nxs.frame)
- try:
- self.motors = load(self.scan,self.nxs.motors)
- except:
- self.motors=None
- self.command = load(self.scan,self.nxs.command)
- self.data = load(self.scan, self.nxs.frame).astype(float)
- self.label = load(self.scan, self.nxs.label)[0]
-
-
- if self.p.experimentID is None:
- try:
- experimentID = load(self.scan, self.nxs.experiment)[0]
- except:
- logger.debug('Could not find experiment ID from nexus file %s.' % self.scan)
- experimentID = os.path.split(base_path[:-1])[1]
- logger.debug( 'experimentID: "%s" (automatically set).' % experimentID)
- else:
- logger.debug( 'experimentID: "%s".' % self.p.experimentID)
- self.experimentID = self.p.experimentID
-
-
-
- dpsize = self.p.dpsize
- ctr = self.p.ctr
-
- sh = self.data.shape[-2:]
- fullframe = False
- if dpsize is None:
- dpsize = sh
- logger.debug( 'Full frames (%d x %d) will be saved (so no recentering).' % (sh))
- fullframe = True
- self.p.dpsize = expect2(dpsize)
-
- #data_filename = self.get_save_filename(scan_number, dpsize)
- #logger.info( 'Data will be saved to %s' % data_filename)
- f = self.data
- if not fullframe:
- # Compute center of mass
- if self.mask is None:
- ctr_auto = mass_center(f.sum(0))
- else:
- ctr_auto = mass_center(f.sum(0)*self.mask)
- print ctr_auto
- # Check for center position
- if ctr is None:
- ctr = ctr_auto
- logger.debug( 'Using center: (%d, %d)' % (ctr[0],ctr[1]))
-
- #elif ctr == 'inter':
- #import matplotlib as mpl
- #fig = mpl.pyplot.figure()
- #ax = fig.add_subplot(1,1,1)
- #ax.imshow(np.log(f))
- #ax.set_title('Select center point (hit return to finish)')
- #s = u.Multiclicks(ax, True, mode='replace')
- #mpl.pyplot.show()
- #s.wait_until_closed()
- #ctr = np.round(np.array(s.pts[0][::-1]));
- #logger.debug( 'Using center: (%d, %d) - I would have guessed it is (%d, %d)' % (ctr[0], ctr[1], ctr_auto[0], ctr_auto[1]))
-
- else:
- logger.debug( 'Using center: (%d, %d) - I would have guessed it is (%d, %d)' % (ctr[0], ctr[1], ctr_auto[0], ctr_auto[1]))
-
- self.dpsize = dpsize
- self.ctr = np.array(ctr)
- lim_inf = -np.ceil(ctr - dpsize/2.).astype(int)
- lim_sup = np.ceil(ctr + dpsize/2.).astype(int) - np.array(sh)
- hplane_list = [(lim_inf[0], lim_sup[0]), (lim_inf[1], lim_sup[1])]
- logger.debug( 'Going from %s to %s (hplane_list = %s)' % (str(sh), str(dpsize), str(hplane_list)))
- if self.mask is not None: self.mask = u.crop_pad(self.mask, hplane_list).astype(bool)
- if self.flat is not None: self.flat = u.crop_pad(self.flat, hplane_list,fillpar=1.)
- if self.dark is not None: self.dark = u.crop_pad(self.dark, hplane_list)
- if self.data is not None: self.data = u.crop_pad(self.data, hplane_list)
- # obsolete
-
- #return data, self.mask, self.flat, self.dark
-
- def prepare(self,scan=None,filename=None,dtype=np.uint32,**kwargs):
-
- self.p.update(kwargs) #rebin = rebin if rebin is not None else self.p.rebin
-
- scan = scan if scan is not None else self.p.scan
-
- self.read( scan, **kwargs)
- DS = u.Param()
-
- dark = self.dark
- data = self.data
- if dark is not None:
- if dark.ndim == 3:
- dark = dark.mean(0)
- dark = np.resize(dark,data.shape)
-
- flat = self.flat
- if flat is not None:
- if flat.ndim == 3:
- flat = flat.mean(0)
- flat = np.resize(flat,self.data.shape)
- #plt.ion();
- #plt.figure();plt.imshow(dark[0]);plt.colorbar()
- #plt.figure();plt.imshow((flat-dark)[0]);plt.colorbar()
- #plt.figure();plt.imshow(data[0]);plt.colorbar()
- if flat is not None and dark is not None:
- data = (data-dark )/(flat-dark)
- elif dark is not None:
- data = data - dark
- else:
- data = data
- # remove negative values
- data[data<0]=0
- #
- #plt.figure();plt.imshow(DS.data[0],vmin=0);plt.colorbar()
- #
- if self.mask is None:
- mask = np.ones_like(data,dtype=np.bool)
- #
- #DS.flat = self.flat
- #DS.dark = self.dark
- DS.scan_info = u.Param()
- s = DS.scan_info
- p = self.p
- #s.scan_number = p.scan_number
- s.scan_label = 'S'+self.label #'S%05d' % p.scan_number
- s.data_filename = self.scan #scandict['data_filename']
- s.wavelength = u.keV2m( p.energy )
- s.energy = p.energy
- rebin = self.p.rebin
- s.detector_pixel_size = p.detector_pixel_size * rebin if p.detector_pixel_size is not None else None
- p.dpsize = p.dpsize / rebin
- s.detector_distance = p.detector_distance
- s.initial_ctr = self.ctr / rebin
- if rebin!=1:
- sh = data.shape
- data = u.rebin(data,sh[0],sh[1]/rebin,sh[2]/rebin)
- mask = u.rebin(mask.astype(int),sh[0],sh[1]/rebin,sh[2]/rebin).astype(bool)
-
- data = flip(data,self.p.flip)
- mask = flip(mask,self.p.flip)
-
- DS.data = data.astype(dtype)
- DS.mask = mask
- DS.data[np.invert(DS.mask)]=0
- #s.date_collected = scandict['date']
- s.date_processed = time.asctime()
- s.exposure_time = self.exp
-
- #if meta is not None: s.raw_filenames = meta['filename']
- s.preparation_basepath = self.base_path
- s.preparation_other = {}
- s.shape = DS.data.shape
-
- s.positions_theory = None
-
- s.scan_command = self.command
-
- motors = p.motors
- if self.motors is not None:
- Nmotors = len(motors)
- logger.debug( 'Motors are : %s' % str(p.motors))
- mmult = u.expect2(p.motors_multiplier)
-
- pos_list = [mmult[i]*np.array(self.motors[motors[i]]) for i in range(Nmotors)]
- s.positions = np.array(pos_list).T
- else:
- s.positions = None
-
- self.p.scan_label = s.scan_label
- if filename is None:
- p.write_path = WRITE_PATH_PATTERN.format(**p)
- filename = SAVE_FILENAME_PATTERN.format(**p)
-
- s.data_filename = u.clean_path(filename)
- io.h5write(filename,DS)
- return DS
-
- def get_nexus_file(self, number):
- if str(number)==number or number is None:
- # ok this is not a number but a filename
- return number
- else:
- return NEXUS_PATTERN.format(base_path=self.base_path,
- scan_number=number)
diff --git a/ptypy/experiment/ID16Anfp.py b/ptypy/experiment/ID16Anfp.py
index 6cae55df6..d1835298d 100644
--- a/ptypy/experiment/ID16Anfp.py
+++ b/ptypy/experiment/ID16Anfp.py
@@ -10,14 +10,16 @@
import numpy as np
import os
+
from .. import utils as u
from .. import io
-from ..utils import parallel
+from . import register
from ..core.data import PtyScan
from ..utils.verbose import log
from ..core.paths import Paths
-from ..core import DEFAULT_io as IO_par
-from .. import core
+from ..core import Ptycho
+
+IO_par = Ptycho.DEFAULT['io']
logger = u.verbose.logger
@@ -38,48 +40,114 @@
H5_PATHS.frames = '{entry}/ptycho/data'
H5_PATHS.motors = '{entry}/ptycho/motors'
-# Recipe defaults
-RECIPE = u.Param()
-RECIPE.experimentID = None # Experiment identifier - will be read from h5
-RECIPE.energy = None # Energy in keV - will be read from h5
-RECIPE.lam = None # 1.2398e-9 / RECIPE.energy
-RECIPE.z = None # Distance from object to screen
-RECIPE.motors = ['spy', 'spz'] # 'Motor names to determine the sample translation'
-RECIPE.motors_multiplier = 1e-6 # 'Motor conversion factor to meters'
-RECIPE.base_path = None # Base path to read and write data - can be guessed.
-RECIPE.sample_name = None # Sample name - will be read from h5
-RECIPE.scan_label = None # Scan label - will be read from h5
-RECIPE.flat_label = None # Flat label - equal to scan_label by default
-RECIPE.dark_label = None # Dark label - equal to scan_label by default
-RECIPE.mask_file = None # Mask file name
-RECIPE.use_h5 = False # Load data from prepared h5 file
-RECIPE.flat_division = False # Switch for flat division
-RECIPE.dark_subtraction = False # Switch for dark subtraction
-
-# These are home-made wrapped data
-RECIPE.data_file_pattern = '{[base_path]}/{[sample_name]}/{[scan_label]}_data.h5'
-RECIPE.flat_file_pattern = '{[base_path]}/{[sample_name]}/{[flat_label]}_flat.h5'
-RECIPE.dark_file_pattern = '{[base_path]}/{[sample_name]}/{[dark_label]}_dark.h5'
-
-# The h and v are inverted here - that's on purpose!
-RECIPE.distortion_h_file = '/data/id16a/inhouse1/instrument/img1/optique_peter_distortion/detector_distortion2d_v.edf'
-RECIPE.distortion_v_file = '/data/id16a/inhouse1/instrument/img1/optique_peter_distortion/detector_distortion2d_h.edf'
-RECIPE.whitefield_file = '/data/id16a/inhouse1/instrument/whitefield/white.edf'
-
-# Generic defaults
-ID16A_DEFAULT = PtyScan.DEFAULT.copy()
-ID16A_DEFAULT.recipe = RECIPE
-ID16A_DEFAULT.auto_center = False
-# Orientation of Frelon frame - only LR flip
-ID16A_DEFAULT.orientation = (False, True, False)
-
+@register()
class ID16AScan(PtyScan):
"""
Subclass of PtyScan for ID16A beamline (specifically for near-field
ptychography).
+
+ Defaults:
+
+ [name]
+ default = 'ID16AScan'
+ type = str
+ help =
+
+ [experimentID]
+ default = None
+
+ [motors]
+ default = ['spy', 'spz']
+ type = list
+ help = Motor names to determine the sample translation
+
+ [motors_multiplier]
+ default = 1e-6
+ type = float
+ help = Motor conversion factor to meters
+
+ [base_path]
+ default = None
+ type = str
+ help = Base path to read and write data - can be guessed
+
+ [sample_name]
+ default = None
+ type = str
+ help = Sample name - will be read from h5
+
+ [scan_label]
+ default = None
+ type = int
+ help = Scan label - will be read from h5
+
+ [flat_label]
+ default = None
+ type = int
+ help = Flat label - equal to scan_label by default
+
+ [dark_label]
+ default = None
+ type = int
+ help = Dark label - equal to scan_label by default
+
+ [mask_file]
+ default = None
+ type = str
+ help = Mask file name
+
+ [use_h5]
+ default = False
+ type = bool
+ help = Load data from prepared h5 file
+
+ [flat_division]
+ default = False
+ type = bool
+ help = Switch for flat division
+
+ [dark_subtraction]
+ default = False
+ type = bool
+ help = Switch for dark subtraction
+
+ [data_file_pattern]
+ default = '{[base_path]}/{[sample_name]}/{[scan_label]}_data.h5'
+ type = str
+ help =
+
+ [flat_file_pattern]
+ default = '{[base_path]}/{[sample_name]}/{[flat_label]}_flat.h5'
+ type = str
+ help =
+
+ [dark_file_pattern]
+ default = '{[base_path]}/{[sample_name]}/{[dark_label]}_dark.h5'
+ type = str
+ help =
+
+ [distortion_h_file]
+ default = '/data/id16a/inhouse1/instrument/img1/optique_peter_distortion/detector_distortion2d_v.edf'
+ type = str
+ help = The h and v are inverted here - that's on purpose!
+
+ [distortion_v_file]
+ default = '/data/id16a/inhouse1/instrument/img1/optique_peter_distortion/detector_distortion2d_h.edf'
+ type = str
+ help = The h and v are inverted here - that's on purpose!
+
+ [whitefield_file]
+ default = '/data/id16a/inhouse1/instrument/whitefield/white.edf'
+ type = str
+ help =
+
+ [auto_center]
+ default = False
+
+ [orientation]
+ default = (False, True, False)
"""
- DEFAULT = ID16A_DEFAULT
def __init__(self, pars=None, **kwargs):
"""
@@ -88,59 +156,15 @@ def __init__(self, pars=None, **kwargs):
:param pars: preparation parameters
:param kwargs: Additive parameters
"""
+
+ p = self.DEFAULT.copy(99)
+ p.update(pars)
+
# Initialise parent class
- recipe_default = RECIPE.copy()
- recipe_default.update(pars.recipe, in_place_depth=1)
- pars.recipe.update(recipe_default)
-
- super(ID16AScan, self).__init__(pars, **kwargs)
-
- # Apply beamline-specific generic defaults
- #pars = PREP_DEFAULT.copy().update(pars)
- #pars.update(**kwargs)
-
- # Apply beamline parameters ("recipe")
- #rinfo = DEFAULT.copy()
- #rinfo.update(pars.recipe)
-
- # Initialise parent class with input parameters
- #super(self.__class__, self).__init__(pars)
-
- # Store recipe parameters in self.info
- #self.info.recipe = rinfo
-
- # Default scan label
- #if self.info.label is not None:
- # assert (self.info.label == rinfo.scan_label), (
- # 'Incompatible scan labels')
- #self.info.label = rinfo.scan_label
- #logger.info('Scan label: %s' % rinfo.scan_label)
-
- # Default flat and dark labels.
- #if rinfo.flat_label is None:
- # rinfo.flat_label = rinfo.scan_label
- #if rinfo.dark_label is None:
- # rinfo.dark_label = rinfo.scan_label
-
- # Attempt to extract base_path if missing
- #if rinfo.base_path is None:
- # d = os.getcwd()
- # base_path = None
- # while True:
- # if 'id16a' in os.listdir(d):
- # base_path = os.path.join(d, 'id16a')
- # break
- # d, rest = os.path.split(d)
- # if not rest:
- # break
- # if base_path is None:
- # raise RuntimeError('Could not guess base_path.')
- # else:
- # rinfo.base_path = base_path
- # logger.info('Base path: %s' % base_path)
+ super(ID16AScan, self).__init__(p, **kwargs)
# Try to extract base_path to access data files
- if self.info.recipe.base_path is None:
+ if self.info.base_path is None:
d = os.getcwd()
base_path = None
while True:
@@ -153,51 +177,13 @@ def __init__(self, pars=None, **kwargs):
if base_path is None:
raise RuntimeError('Could not guess base_path.')
else:
- self.info.recipe.base_path = base_path
-
- # Data file names
- #rinfo.data_file = rinfo.data_file_pattern.format(rinfo)
- #rinfo.dark_file = rinfo.dark_file_pattern.format(rinfo)
- #rinfo.flat_file = rinfo.flat_file_pattern.format(rinfo)
-
- # Read metadata
- #h = io.h5read(rinfo.data_file)
- #entry = h.keys()[0]
- #rinfo.entry = entry
-
- # Energy
- #k = H5_PATHS.energy.format(rinfo)
- #energy = float(io.h5read(rinfo.data_file, k)[k])
- #if self.info.energy is not None:
- # assert (self.info.energy == energy), (
- # "Energy (%f keV) is read from file - please don't attempt to "
- # "overwrite it" % energy)
-
- # Attempt to extract experiment ID
- #if rinfo.experimentID is None:
- # # We use the path structure for this
- # experimentID = os.path.split(os.path.split(
- # rinfo.base_path[:-1])[0])[1]
- # logger.info('experiment ID: %s' % experimentID)
- # rinfo.experimentID = experimentID
-
- # Effective pixel size
-
- # Data file names
- #rinfo.data_file = rinfo.data_file_pattern.format(rinfo)
- #rinfo.dark_file = rinfo.dark_file_pattern.format(rinfo)
- #rinfo.flat_file = rinfo.flat_file_pattern.format(rinfo)
-
- #self.rinfo = rinfo
- #self.info.recipe = rinfo
-
- #logger.info(u.verbose.report(self.info))
+ self.info.base_path = base_path
# Create the ptyd file name if not specified
if self.info.dfile is None:
home = Paths(IO_par).home
self.info.dfile = '%s/prepdata/data_%d.ptyd' % (
- home, self.info.recipe.scan_label)
+ home, self.info.scan_label)
log(3, 'Save file is %s' % self.info.dfile)
log(4, u.verbose.report(self.info))
@@ -209,8 +195,8 @@ def load_weight(self):
"""
# FIXME: do something better here. (detector-dependent)
# Load mask as weight
- if self.info.recipe.mask_file is not None:
- return io.h5read(self.info.recipe.mask_file, 'mask')['mask'].astype(
+ if self.info.mask_file is not None:
+ return io.h5read(self.info.mask_file, 'mask')['mask'].astype(
np.float32)
def load_positions(self):
@@ -218,12 +204,12 @@ def load_positions(self):
Load the positions and return as an (N, 2) array.
"""
positions = []
- mmult = u.expect2(self.info.recipe.motors_multiplier)
+ mmult = u.expect2(self.info.motors_multiplier)
# Load positions
- if self.info.recipe.use_h5:
+ if self.info.use_h5:
# From prepared .h5 file
- data = io.h5read(self.info.recipe.base_path + '/raw/data.h5')
+ data = io.h5read(self.info.base_path + '/raw/data.h5')
for i in np.arange(1, len(data) + 1, 1):
positions.append((data['data_%04d' % i]['positions'][0, 0],
data['data_%04d' % i]['positions'][0, 1]))
@@ -231,19 +217,19 @@ def load_positions(self):
# From .edf files
pos_files = []
# Count available images given through scan_label
- for i in os.listdir(self.info.recipe.base_path +
- self.info.recipe.scan_label):
- if i.startswith(self.info.recipe.scan_label):
+ for i in os.listdir(self.info.base_path +
+ self.info.scan_label):
+ if i.startswith(self.info.scan_label):
pos_files.append(i)
for i in np.arange(1, len(pos_files) + 1, 1):
- data, meta = io.edfread(self.info.recipe.base_path +
- self.info.recipe.scan_label + '/' +
- self.info.recipe.scan_label +
+ data, meta = io.edfread(self.info.base_path +
+ self.info.scan_label + '/' +
+ self.info.scan_label +
'_%04d.edf' % i)
- positions.append((meta['motor'][self.info.recipe.motors[0]],
- meta['motor'][self.info.recipe.motors[1]]))
+ positions.append((meta['motor'][self.info.motors[0]],
+ meta['motor'][self.info.motors[1]]))
return np.array(positions) * mmult[0]
@@ -285,23 +271,23 @@ def load_common(self):
#return common._to_dict()
# Load dark
- if self.info.recipe.use_h5:
+ if self.info.use_h5:
# From prepared .h5 file
- dark = io.h5read(self.info.recipe.base_path + '/raw/dark.h5')
+ dark = io.h5read(self.info.base_path + '/raw/dark.h5')
common.dark = dark['dark_avg']['avgdata'].astype(np.float32)
else:
# From .edf files
dark_files = []
# Count available dark given through scan_label
- for i in os.listdir(self.info.recipe.base_path +
- self.info.recipe.scan_label):
+ for i in os.listdir(self.info.base_path +
+ self.info.scan_label):
if i.startswith('dark'):
dark_files.append(i)
dark = []
for i in np.arange(1, len(dark_files) + 1, 1):
- data, meta = io.edfread(self.info.recipe.base_path +
- self.info.recipe.scan_label + '/' +
+ data, meta = io.edfread(self.info.base_path +
+ self.info.scan_label + '/' +
'dark_%04d.edf' % i)
dark.append(data.astype(np.float32))
@@ -310,23 +296,23 @@ def load_common(self):
log(3, 'Dark loaded successfully.')
# Load flat
- if self.info.recipe.use_h5:
+ if self.info.use_h5:
# From prepared .h5 file
- flat = io.h5read(self.info.recipe.base_path + '/raw/ref.h5')
+ flat = io.h5read(self.info.base_path + '/raw/ref.h5')
common.flat = flat['ref_avg']['avgdata'].astype(np.float32)
else:
# From .edf files
flat_files = []
# Count available dark given through scan_label
- for i in os.listdir(self.info.recipe.base_path +
- self.info.recipe.scan_label):
+ for i in os.listdir(self.info.base_path +
+ self.info.scan_label):
if i.startswith('flat'):
flat_files.append(i)
flat = []
for i in np.arange(1, len(flat_files) + 1, 1):
- data, meta = io.edfread(self.info.recipe.base_path +
- self.info.recipe.scan_label + '/' +
+ data, meta = io.edfread(self.info.base_path +
+ self.info.scan_label + '/' +
'ref_%04d.edf' % i)
flat.append(data.astype(np.float32))
@@ -344,9 +330,11 @@ def check(self, frames, start=0):
:param frames: Number of frames to load
:param start: starting point
:return: (frames_available, end_of_scan)
+
- the number of frames available from a starting point `start`
- bool if the end of scan was reached
(None if this routine doesn't know)
+
"""
npos = self.num_frames
frames_accessible = min((frames, npos - start))
@@ -369,9 +357,9 @@ def load(self, indices):
# self.rinfo, slice=j)[key].astype(np.float32)
# Load data
- if self.info.recipe.use_h5:
+ if self.info.use_h5:
# From prepared .h5 file
- data = io.h5read(self.info.recipe.base_path + '/raw/data.h5')
+ data = io.h5read(self.info.base_path + '/raw/data.h5')
for j in indices:
i = j + 1
raw[j] = data['data_%04d' % i]['data'].astype(np.float32)
@@ -379,9 +367,9 @@ def load(self, indices):
# From .edf files
for j in indices:
i = j + 1
- data, meta = io.edfread(self.info.recipe.base_path +
- self.info.recipe.scan_label + '/' +
- self.info.recipe.scan_label +
+ data, meta = io.edfread(self.info.base_path +
+ self.info.scan_label + '/' +
+ self.info.scan_label +
'_%04d.edf' % i)
raw[j] = data.astype(np.float32)
@@ -415,12 +403,12 @@ def correct(self, raw, weights, common):
#data = raw_wl_ml_ud
# Apply flat and dark, only dark, or no correction
- if self.info.recipe.flat_division and self.info.recipe.dark_subtraction:
+ if self.info.flat_division and self.info.dark_subtraction:
for j in raw:
raw[j] = (raw[j] - common.dark) / (common.flat - common.dark)
raw[j][raw[j] < 0] = 0
data = raw
- elif self.info.recipe.dark_subtraction:
+ elif self.info.dark_subtraction:
for j in raw:
raw[j] = raw[j] - common.dark
raw[j][raw[j] < 0] = 0
@@ -457,6 +445,7 @@ def undistort(frame, delta):
deltah, deltav = delta
+ sh = frame.shape
x, y = np.meshgrid(np.arange(sh[0]), np.arange(sh[1]))
sh = frame.shape
@@ -471,7 +460,7 @@ def undistort(frame, delta):
pts_in = (nyc < sh[0]) & (nyc > 0) & (nxc < sh[1]) & (nxc > 0)
pts_out = ~pts_in
- outf = np.zeros_like(ff)
+ outf = np.zeros_like(frame)
outf[pts_out] = frame.mean()
nxf = nxf[pts_in]
nxc = nxc[pts_in]
@@ -541,7 +530,7 @@ def undistort(frame, delta):
#
# def __init__(self, pars=None):
# super(ID16Scan, self).__init__(pars)
-# r = self.info.recipe
+# r = self.info
# # filename analysis
# body, ext = os.path.splitext(
# os.path.expanduser(r.base_path + r.first_frame))
@@ -562,7 +551,7 @@ def undistort(frame, delta):
# return self.frame_format % (index + 1)
#
# def _load_dark(self):
-# r = self.info.recipe
+# r = self.info
# print('Loading the dark files...')
# darklist = []
# for ff in sorted(glob.glob(r.base_path + 'dark*.edf')):
diff --git a/ptypy/experiment/UCL.py b/ptypy/experiment/UCL.py
index 616e5b7bb..21aa62f02 100644
--- a/ptypy/experiment/UCL.py
+++ b/ptypy/experiment/UCL.py
@@ -9,95 +9,187 @@
"""
import numpy as np
import os
+
from .. import utils as u
from .. import io
from ..core.data import PtyScan
from ..utils.verbose import log
from ..core.paths import Paths
-from ..core import DEFAULT_io as IO_par
+from ..core import Ptycho
+from . import register
-logger = u.verbose.logger
+IO_par = Ptycho.DEFAULT['io']
-# Recipe defaults
-RECIPE = u.Param()
-# Experiment identifier
-RECIPE.experimentID = None
-# Scan number
-RECIPE.scan_number = None
-RECIPE.dark_number = None
-RECIPE.flat_number = None
-RECIPE.energy = None
-RECIPE.lam = None
-# Distance from object to screen
-RECIPE.z = None
-# Name of the detector as specified in the nexus file
-RECIPE.detector_name = None
-# Motor names to determine the sample translation
-RECIPE.motors = ['t1_sx', 't1_sy']
-# Motor conversion factor to meters
-RECIPE.motors_multiplier = 1e-3
-RECIPE.base_path = './'
-RECIPE.data_file_path = '%(base_path)s' + 'raw/%(scan_number)06d'
-RECIPE.dark_file_path = '%(base_path)s' + 'raw/%(dark_number)06d'
-RECIPE.flat_file_path = '%(base_path)s' + 'raw/%(flat_number)06d'
-RECIPE.mask_file = None # '%(base_path)s' + 'processing/mask.h5'
-# Use flat as Empty Probe (EP) for probe sharing;
-# needs to be set to True in the recipe of the scan that will act as EP
-RECIPE.use_EP = False
-# Apply hot pixel correction
-RECIPE.remove_hot_pixels = u.Param(
- # Initiate by setting to True;
- # DEFAULT parameters will be used if not specified otherwise
- apply=False,
- # Size of the window on which the median filter will be applied
- # around every data point
- size=3,
- # Tolerance multiplied with the standard deviation of the data array
- # subtracted by the blurred array (difference array)
- # yields the threshold for cutoff.
- tolerance=10,
- # If True, edges of the array are ignored, which speeds up the code
- ignore_edges=False,
-)
-
-# Apply Richardson Lucy deconvolution
-RECIPE.rl_deconvolution = u.Param(
- # Initiate by setting to True;
- # DEFAULT parameters will be used if not specified otherwise
- apply=False,
- # Number of iterations
- numiter=5,
- # Provide MTF from file; no loading procedure present for now,
- # loading through recon script required
- dfile=None,
- # Create fake psf as a sum of gaussians if no MTF provided
- gaussians=u.Param(
- # DEFAULT list of gaussians for Richardson Lucy deconvolution
- g1=u.Param(
- # Standard deviation in x direction
- std_x=1.0,
- # Standard deviation in y direction
- std_y=1.0,
- # Offset / shift in x direction
- off_x=0.,
- # Offset / shift in y direction
- off_y=0.,
- )
- ),
-)
-
-# Generic defaults
-UCLDEFAULT = PtyScan.DEFAULT.copy()
-UCLDEFAULT.recipe = RECIPE
-UCLDEFAULT.auto_center = False
-UCLDEFAULT.orientation = (False, False, False)
+logger = u.verbose.logger
+@register()
class UCLLaserScan(PtyScan):
"""
Laser imaging setup (UCL) data preparation class.
+
+ Defaults:
+
+ [name]
+ default = UCLLaserScan
+ type = str
+ help =
+
+ [auto_center]
+ default = False
+
+ [orientation]
+ default = (False, False, False)
+
+ [scan_number]
+ default = None
+ type = int
+ help = Scan number
+
+ [dark_number]
+ default = None
+ type = int
+ help =
+
+ [flat_number]
+ default = None
+ type = int
+ help =
+
+ [energy]
+ default = None
+
+ [lam]
+ default = None
+ type = float
+ help =
+
+ [z]
+ default = None
+ type = float
+ help = Distance from object to screen
+
+ [detector_name]
+ default = None
+ type = str
+ help = Name of the detector as specified in the nexus file
+
+ [motors]
+ default = ['t1_sx', 't1_sy']
+ type = list
+ help = Motor names to determine the sample translation
+
+ [motors_multiplier]
+ default = 1e-3
+ type = float
+ help = Motor conversion factor to meters
+
+ [base_path]
+ default = './'
+ type = str
+ help =
+
+ [data_file_path]
+ default = '%(base_path)s' + 'raw/%(scan_number)06d'
+ type = str
+ help =
+
+ [dark_file_path]
+ default = '%(base_path)s' + 'raw/%(dark_number)06d'
+ type = str
+ help =
+
+ [flat_file_path]
+ default = '%(base_path)s' + 'raw/%(flat_number)06d'
+ type = str
+ help =
+
+ [mask_file]
+ default = None
+ type = str
+ help =
+
+ [use_EP]
+ default = False
+ type = bool
+ help = Use flat as Empty Probe (EP) for probe sharing needs to be set to True in the recipe of the scan that will act as EP
+
+ [remove_hot_pixels]
+ default =
+ type = Param
+ help = Apply hot pixel correction
+
+ [remove_hot_pixels.apply]
+ default = False
+ type = bool
+ help = Initiate by setting to True
+
+ [remove_hot_pixels.size]
+ default = 3
+ type = int
+ help = Size of the window on which the median filter will be applied around every data point
+
+ [remove_hot_pixels.tolerance]
+ default = 10
+ type = int
+ help = Tolerance multiplied with the standard deviation of the data array subtracted by the blurred array (difference array) yields the threshold for cutoff.
+
+ [remove_hot_pixels.ignore_edges]
+ default = False
+ type = bool
+ help = If True, edges of the array are ignored, which speeds up the code
+
+ [rl_deconvolution]
+ default =
+ type = Param
+ help = Apply Richardson Lucy deconvolution
+
+ [rl_deconvolution.apply]
+ default = False
+ type = bool
+ help = Initiate by setting to True
+
+ [rl_deconvolution.numiter]
+ default = 5
+ type = int
+ help = Number of iterations
+
+ [rl_deconvolution.dfile]
+ default = None
+ type = str
+ help = Provide MTF from file; no loading procedure present for now, loading through recon script required
+
+ [rl_deconvolution.gaussians]
+ default =
+ type = Param
+ help = Create fake psf as a sum of gaussians if no MTF provided
+
+ [rl_deconvolution.gaussians.g1]
+ default =
+ type = Param
+ help = list of gaussians for Richardson Lucy deconvolution
+
+ [rl_deconvolution.gaussians.g1.std_x]
+ default = 1.0
+ type = float
+ help = Standard deviation in x direction
+
+ [rl_deconvolution.gaussians.g1.std_y]
+ default = 1.0
+ type = float
+ help = Standard deviation in y direction
+
+ [rl_deconvolution.gaussians.g1.off_x]
+ default = 0.0
+ type = float
+ help = Offset / shift in x direction
+
+ [rl_deconvolution.gaussians.g1.off_y]
+ default = 0.0
+ type = float
+ help = Offset / shift in y direction
+
"""
- DEFAULT = UCLDEFAULT
def __init__(self, pars=None, **kwargs):
"""
@@ -108,14 +200,14 @@ def __init__(self, pars=None, **kwargs):
:param kwargs: key-value pair
- additional parameters.
"""
- recipe_default = RECIPE.copy()
- recipe_default.update(pars.recipe, in_place_depth=1)
- pars.recipe.update(recipe_default)
+ p = self.DEFAULT.copy(99)
+ p.update(pars)
+ pars = p
super(UCLLaserScan, self).__init__(pars, **kwargs)
# Try to extract base_path to access data files
- if self.info.recipe.base_path is None:
+ if self.info.base_path is None:
d = os.getcwd()
base_path = None
while True:
@@ -128,32 +220,32 @@ def __init__(self, pars=None, **kwargs):
if base_path is None:
raise RuntimeError('Could not guess base_path.')
else:
- self.info.recipe.base_path = base_path
+ self.info.base_path = base_path
# Construct path names
- self.data_path = self.info.recipe.data_file_path % self.info.recipe
+ self.data_path = self.info.data_file_path % self.info
log(3, 'Will read data from directory %s' % self.data_path)
- if self.info.recipe.dark_number is None:
+ if self.info.dark_number is None:
self.dark_file = None
log(3, 'No data for dark')
else:
- self.dark_path = self.info.recipe.dark_file_path % self.info.recipe
+ self.dark_path = self.info.dark_file_path % self.info
log(3, 'Will read dark from directory %s' % self.dark_path)
- if self.info.recipe.flat_number is None:
+ if self.info.flat_number is None:
self.flat_file = None
log(3, 'No data for flat')
else:
- self.flat_path = self.info.recipe.flat_file_path % self.info.recipe
+ self.flat_path = self.info.flat_file_path % self.info
log(3, 'Will read flat from file %s' % self.flat_path)
# Load data information
self.instrument = io.h5read(self.data_path + '/%06d_%04d.nxs'
- % (self.info.recipe.scan_number, 1),
+ % (self.info.scan_number, 1),
'entry.instrument')['instrument']
# Extract detector name if not set or wrong
- if (self.info.recipe.detector_name is None
- or self.info.recipe.detector_name
+ if (self.info.detector_name is None
+ or self.info.detector_name
not in self.instrument.keys()):
detector_name = None
for k in self.instrument.keys():
@@ -165,14 +257,14 @@ def __init__(self, pars=None, **kwargs):
raise RuntimeError(
'Not possible to extract detector name. '
'Please specify in recipe instead.')
- elif (self.info.recipe.detector_name is not None
- and detector_name is not self.info.recipe.detector_name):
+ elif (self.info.detector_name is not None
+ and detector_name is not self.info.detector_name):
u.log(2, 'Detector name changed from %s to %s.'
- % (self.info.recipe.detector_name, detector_name))
+ % (self.info.detector_name, detector_name))
else:
- detector_name = self.info.recipe.detector_name
+ detector_name = self.info.detector_name
- self.info.recipe.detector_name = detector_name
+ self.info.detector_name = detector_name
# Set up dimensions for cropping
try:
@@ -189,7 +281,7 @@ def __init__(self, pars=None, **kwargs):
# If center unset, extract offset from raw data
elif center == 'unset':
raw_shape = self.instrument[
- self.info.recipe.detector_name]['data'].shape
+ self.info.detector_name]['data'].shape
offset_x = raw_shape[-1] // 2
offset_y = raw_shape[-2] // 2
else:
@@ -200,13 +292,13 @@ def __init__(self, pars=None, **kwargs):
xdim = (offset_x - pars.shape // 2, offset_x + pars.shape // 2)
ydim = (offset_y - pars.shape // 2, offset_y + pars.shape // 2)
- self.info.recipe.array_dim = [xdim, ydim]
+ self.info.array_dim = [xdim, ydim]
# Create the ptyd file name if not specified
if self.info.dfile is None:
home = Paths(IO_par).home
self.info.dfile = ('%s/prepdata/data_%d.ptyd'
- % (home, self.info.recipe.scan_number))
+ % (home, self.info.scan_number))
log(3, 'Save file is %s' % self.info.dfile)
log(4, u.verbose.report(self.info))
@@ -222,9 +314,9 @@ def load_weight(self):
"""
# FIXME: do something better here. (detector-dependent)
# Load mask as weight
- if self.info.recipe.mask_file is not None:
+ if self.info.mask_file is not None:
return io.h5read(
- self.info.recipe.mask_file, 'mask')['mask'].astype(float)
+ self.info.mask_file, 'mask')['mask'].astype(float)
def load_positions(self):
"""
@@ -235,8 +327,8 @@ def load_positions(self):
"""
# Load positions from file if possible.
motor_positions = io.h5read(
- self.info.recipe.base_path + '/raw/%06d/%06d_metadata.h5'
- % (self.info.recipe.scan_number, self.info.recipe.scan_number),
+ self.info.base_path + '/raw/%06d/%06d_metadata.h5'
+ % (self.info.scan_number, self.info.scan_number),
'positions')['positions']
# If no positions are found at all, raise error.
@@ -244,7 +336,7 @@ def load_positions(self):
raise RuntimeError('Could not find motors.')
# Apply motor conversion factor and create transposed array.
- mmult = u.expect2(self.info.recipe.motors_multiplier)
+ mmult = u.expect2(self.info.motors_multiplier)
positions = motor_positions * mmult[0]
return positions
@@ -259,14 +351,14 @@ def load_common(self):
common = u.Param()
# Load dark.
- if self.info.recipe.dark_number is not None:
+ if self.info.dark_number is not None:
dark = [io.h5read(self.dark_path + '/%06d_%04d.nxs'
- % (self.info.recipe.dark_number, j),
+ % (self.info.dark_number, j),
'entry.instrument.detector.data')['data'][0][
- self.info.recipe.array_dim[1][0]:
- self.info.recipe.array_dim[1][1],
- self.info.recipe.array_dim[0][0]:
- self.info.recipe.array_dim[0][1]].astype(np.float32)
+ self.info.array_dim[1][0]:
+ self.info.array_dim[1][1],
+ self.info.array_dim[0][0]:
+ self.info.array_dim[0][1]].astype(np.float32)
for j in np.arange(1, len(os.listdir(self.dark_path)))]
dark = np.array(dark).mean(0)
@@ -274,14 +366,14 @@ def load_common(self):
log(3, 'Dark loaded successfully.')
# Load flat.
- if self.info.recipe.flat_number is not None:
+ if self.info.flat_number is not None:
flat = [io.h5read(self.flat_path + '/%06d_%04d.nxs'
- % (self.info.recipe.flat_number, j),
+ % (self.info.flat_number, j),
'entry.instrument.detector.data')['data'][0][
- self.info.recipe.array_dim[1][0]:
- self.info.recipe.array_dim[1][1],
- self.info.recipe.array_dim[0][0]:
- self.info.recipe.array_dim[0][1]].astype(np.float32)
+ self.info.array_dim[1][0]:
+ self.info.array_dim[1][1],
+ self.info.array_dim[0][0]:
+ self.info.array_dim[0][1]].astype(np.float32)
for j in np.arange(1, len(os.listdir(self.flat_path)))]
flat = np.array(flat).mean(0)
@@ -307,12 +399,12 @@ def load(self, indices):
for j in np.arange(1, len(indices) + 1):
data = io.h5read(self.data_path + '/%06d_%04d.nxs'
- % (self.info.recipe.scan_number, j),
+ % (self.info.scan_number, j),
'entry.instrument.detector.data')['data'][0][
- self.info.recipe.array_dim[1][0]:
- self.info.recipe.array_dim[1][1],
- self.info.recipe.array_dim[0][0]:
- self.info.recipe.array_dim[0][1]].astype(np.float32)
+ self.info.array_dim[1][0]:
+ self.info.array_dim[1][1],
+ self.info.array_dim[0][0]:
+ self.info.array_dim[0][1]].astype(np.float32)
raw[j - 1] = data
log(3, 'Data loaded successfully.')
@@ -343,43 +435,43 @@ def correct(self, raw, weights, common):
- dict: contains modified weights.
"""
# Apply hot pixel removal
- if self.info.recipe.remove_hot_pixels.apply:
+ if self.info.remove_hot_pixels.apply:
u.log(3, 'Applying hot pixel removal...')
for j in raw:
raw[j] = u.remove_hot_pixels(
raw[j],
- self.info.recipe.remove_hot_pixels.size,
- self.info.recipe.remove_hot_pixels.tolerance,
- self.info.recipe.remove_hot_pixels.ignore_edges)[0]
+ self.info.remove_hot_pixels.size,
+ self.info.remove_hot_pixels.tolerance,
+ self.info.remove_hot_pixels.ignore_edges)[0]
- if self.info.recipe.flat_number is not None:
+ if self.info.flat_number is not None:
common.dark = u.remove_hot_pixels(
common.dark,
- self.info.recipe.remove_hot_pixels.size,
- self.info.recipe.remove_hot_pixels.tolerance,
- self.info.recipe.remove_hot_pixels.ignore_edges)[0]
+ self.info.remove_hot_pixels.size,
+ self.info.remove_hot_pixels.tolerance,
+ self.info.remove_hot_pixels.ignore_edges)[0]
- if self.info.recipe.flat_number is not None:
+ if self.info.flat_number is not None:
common.flat = u.remove_hot_pixels(
common.flat,
- self.info.recipe.remove_hot_pixels.size,
- self.info.recipe.remove_hot_pixels.tolerance,
- self.info.recipe.remove_hot_pixels.ignore_edges)[0]
+ self.info.remove_hot_pixels.size,
+ self.info.remove_hot_pixels.tolerance,
+ self.info.remove_hot_pixels.ignore_edges)[0]
u.log(3, 'Hot pixel removal completed.')
# Apply deconvolution
- if self.info.recipe.rl_deconvolution.apply:
+ if self.info.rl_deconvolution.apply:
u.log(3, 'Applying deconvolution...')
# Use mtf from a file if provided in recon script
- if self.info.recipe.rl_deconvolution.dfile is not None:
+ if self.info.rl_deconvolution.dfile is not None:
mtf = self.info.rl_deconvolution.dfile
# Create fake psf as a sum of gaussians from parameters
else:
gau_sum = 0
for k in (
- self.info.recipe.rl_deconvolution.gaussians.iteritems()):
+ self.info.rl_deconvolution.gaussians.iteritems()):
gau_sum += u.gaussian2D(raw[0].shape[0],
k[1].std_x,
k[1].std_y,
@@ -393,18 +485,18 @@ def correct(self, raw, weights, common):
raw[j] = u.rl_deconvolution(
raw[j],
mtf,
- self.info.recipe.rl_deconvolution.numiter)
+ self.info.rl_deconvolution.numiter)
u.log(3, 'Deconvolution completed.')
# Apply flat and dark, only dark, or no correction
- if (self.info.recipe.flat_number is not None
- and self.info.recipe.dark_number is not None):
+ if (self.info.flat_number is not None
+ and self.info.dark_number is not None):
for j in raw:
raw[j] = (raw[j] - common.dark) / (common.flat - common.dark)
raw[j][raw[j] < 0] = 0
data = raw
- elif self.info.recipe.dark_number is not None:
+ elif self.info.dark_number is not None:
for j in raw:
raw[j] = raw[j] - common.dark
raw[j][raw[j] < 0] = 0
diff --git a/ptypy/experiment/__init__.py b/ptypy/experiment/__init__.py
index 620e2c9e3..1dddea2a3 100644
--- a/ptypy/experiment/__init__.py
+++ b/ptypy/experiment/__init__.py
@@ -3,6 +3,7 @@
Beamline-specific data preparation modules.
Currently available:
+ * cSAXS
* I13DLS, FFP and NFP
* I08DLS, FFP and NFP
* ID16A ESRF, NFP
@@ -15,39 +16,53 @@
:copyright: Copyright 2014 by the PTYPY team, see AUTHORS.
:license: GPLv2, see LICENSE for details.
"""
-#from data_structure import *
+from .. import defaults_tree
+from ..core.data import MoonFlowerScan, PtydScan, PtyScan, QuickScan
+from ..simulations import SimScan
+
+__all__ = ['MoonFlowerScan', 'PtydScan', 'PtyScan', 'QuickScan', 'SimScan']
+PTYSCANS = {'MoonFlowerScan': MoonFlowerScan,
+ 'PtydScan': PtydScan,
+ 'PtyScan': PtyScan,
+ 'QuickScan': QuickScan,
+ 'SimScan': SimScan}
+
+
+def register(name=None):
+ """PtyScan subclass registration decorator"""
+ return lambda cls: _register_PtyScan_class(cls, name)
+
+
+def _register_PtyScan_class(cls, name=None):
+ # Get class name
+ name = cls.__name__ if name is None else name
+ PTYSCANS[name] = cls
+
+ # Apply descriptor decorator
+ cls = defaults_tree.parse_doc('scandata.' + name, True)(cls)
+
+ # Add class to namespace
+ globals()[name] = cls
+ __all__.append(name)
+ return cls
+
# Import instrument-specific modules
-#import cSAXS
-from I13_ffp import I13ScanFFP
-from I13_nfp import I13ScanNFP
-from DLS import DlsScan
-from I08 import I08Scan
-from savu import Savu
-from plugin import makeScanPlugin
-from ID16Anfp import ID16AScan
-from AMO_LCLS import AMOScan
-from DiProI_FERMI import DiProIFERMIScan
-from optiklabor import FliSpecScanMultexp
-from UCL import UCLLaserScan
-from nanomax import NanomaxStepscanNov2016, NanomaxStepscanMay2017, NanomaxFlyscanJune2017
-from ALS_5321 import ALS5321Scan
-
-PtyScanTypes = dict(
- i13dls_ffp = I13ScanFFP,
- i13dls_nfp = I13ScanNFP,
- dls = DlsScan,
- i08dls = I08Scan,
- savu = Savu,
- plugin = makeScanPlugin,
- id16a_nfp = ID16AScan,
- amo_lcls = AMOScan,
- diproi_fermi = DiProIFERMIScan,
- fli_spec_multexp = FliSpecScanMultexp,
- laser_ucl = UCLLaserScan,
- nanomaxstepscannov2016 = NanomaxStepscanNov2016,
- nanomaxstepscanmay2017 = NanomaxStepscanMay2017,
- nanomaxflyscanjune2017 = NanomaxFlyscanJune2017,
- als5321 = ALS5321Scan,
-)
+try:
+ from cSAXS import cSAXSScan
+ from savu import Savu
+ from plugin import makeScanPlugin
+ from ID16Anfp import ID16AScan
+ from AMO_LCLS import AMOScan
+ from DiProI_FERMI import DiProIFERMIScan
+ from optiklabor import FliSpecScanMultexp
+ from UCL import UCLLaserScan
+ from nanomax import NanomaxStepscanMay2017, NanomaxStepscanNov2016, NanomaxFlyscanJune2017
+ from ALS_5321 import ALS5321Scan
+except:
+ pass
+#from I13_ffp import I13ScanFFP
+#from I13_nfp import I13ScanNFP
+#from DLS import DlsScan
+#from I08 import I08Scan
diff --git a/ptypy/experiment/cSAXS.py b/ptypy/experiment/cSAXS.py
index 2afc965a9..d67e930ea 100644
--- a/ptypy/experiment/cSAXS.py
+++ b/ptypy/experiment/cSAXS.py
@@ -1,639 +1,184 @@
# -*- coding: utf-8 -*-
-"""\
-Tools specific to the cSAXS beamline, Swiss Light Source.
+"""
+Data preparation class for cSAXS at the SLS
+This scan file was correct as of the end of 2017. It's not yet maintained by cSAXS,
+ but if you raise a ticket if you notice any oddities, we will try to fix it up.
+"""
-TODO:
- * smarter way to guess parameters.
+import numpy as np
+import os
+import fabio
+from collections import OrderedDict
+from scipy.io import loadmat
+
+from .. import utils as u
+from ..core.data import PtyScan
+from ..utils.verbose import log
+from . import register
+logger = u.verbose.logger
+
+
+
+@register()
+class cSAXSScan(PtyScan):
+ def __init__(self, pars=None, **kwargs):
+ '''
+ Defaults:
+ [name]
+ default = cSAXS
+ type = str
+ help = the reference name of this loader
+ doc =
+
+ [base_path]
+ default = None
+ type = str
+ help = the base path for reconstruction
+ doc =
+
+ [visit]
+ default = None
+ type = str
+ help = the visit number
+ doc =
+
+ [detector]
+ default = pilatus_1
+ type = str
+ help = the detector used for acquisition
+ doc =
+
+ [scan_number]
+ default = None
+ type = int
+ help = the scan number for reconstruction
+ doc =
+
+ [motors]
+ default = ('Target_y', 'Target_x')
+ type = tuple
+ help = the motor names, this should be a 2-tuple
+ doc =
+
+
+ [mask_path]
+ default = None
+ type = str
+ help = the motor names, this should be a 2-tuple
+ doc =
+
+ [mask_file]
+ default = None
+ type = str
+ help = the mask mat file
+ doc =
+
+ '''
+
+ # Initialise parent class
+ self.p = self.DEFAULT.copy(99)
+ self.p.update(pars)
+ self.data_object = None
+ super(cSAXSScan, self).__init__(pars, **kwargs)
+
+ # if parallel.master: # populate the fabio object
+ self.data_object = get_data_object(self.info.recipe)
+ self.num_frames = self.data_object.shape[0]
+ log(4, u.verbose.report(self.info))
+
+ def load_weight(self):
+ return loadmat(get_mask_path(self.info.recipe))['mask']
+
+ def load_positions(self):
+ return get_positions(self.info.recipe)
+
+ def check(self, frames, start):
+ frames_accessible = min((frames, self.num_frames - start))
+ stop = self.frames_accessible + start
+ return frames_accessible, (stop >= self.num_frames)
+
+ def load(self, indices):
+ raw = {}
+ for i in indices:
+ raw[i] = self.data_object.getframe(i).data.astype(np.float)
+ return raw, {}, {}
+
+
+def get_position_path(inargs):
+ '''
+ returns a validated data path to the first file for a given scan number. If it's not valid, it will return None.
+ '''
+ pathargs = inargs.copy() # this is ok here
+ file_path = '%(base_path)s/specES1/scan_positions/' % pathargs
+ positions_path = file_path + 'scan_%(scan_number)05d.dat' % pathargs
+ # check that it exists
+ if not os.path.isfile(positions_path):
+ print "File:%s does not exist." % positions_path
+ exists = False
+ else:
+ exists = True
+ return positions_path, exists
+
+
+def get_positions(inargs):
+ path, exists = get_position_path(inargs)
+ k = 0
+ if exists:
+ with open(path) as f:
+ for line in f:
+ if k == 1:
+ keys = line.split()
+ break
+ k += 1
-This file is part of the PTYPY package.
+ _DataDict = OrderedDict.fromkeys(keys) #
- :copyright: Copyright 2014 by the PTYPY team, see AUTHORS.
- :license: GPLv2, see LICENSE for details.
-"""
+ for i, key in enumerate(_DataDict.keys()):
+ _DataDict[key] = np.loadtxt(path, skiprows=2, usecols=[i]) # data[i]
+ positions = np.zeros((len(_DataDict[inargs.motors[0]]), 2))
+ positions[:, 0] = _DataDict[inargs.motors[0]] * inargs.motors_multiplier[0]
+ positions[:, 1] = _DataDict[inargs.motors[1]] * inargs.motors_multiplier[1]
+ return positions
+ else:
+ raise IOError('File:%s does not exist.' % path)
-__all__ = ['prepare_data', 'DataReader']
-import os
-import numpy as np
-import glob
-import fnmatch
-import time
-import re
-
-from ..utils import verbose
-from .. import parameters
-from data_structure import DataScan
-from .. import io
-import spec
-
-lastDR = None
-
-# cSAXS defaults
-SAVE_FILENAME_PATTERN = '{write_path}/S{scan_number:05d}_data_{dpsize[0]:03d}x{dpsize[1]:03d}.h5'
-WRITE_PATH_PATTERN = '{base_path}/analysis/S{scan_number:05d}'
-READ_PATH_PATTERN_CONVERT = '{base_path}/analysis/S{scan_number:05d}'
-READ_PATH_PATTERN = '{base_path}/{pilatus_dir}/S{smin:05d}-{smax:05d}/S{scan_number:05d}'
-FILENAME_PATTERN = '{read_path}/{prefix}{scan_number:05d}_{index}.{file_extension}'
-FILENAME_MULTEXP_PATTERN = '{read_path}/{prefix}{scan_number:05d}_{index}_{exp}.{file_extension}'
-PREFIX_PATTERN = '{user}_1_'
-INDEX_REPLACEMENT_STRING = '?????'
-_PILATUS_SLICES = {(407,487):(slice(636,1043), slice(494,981)),
- (407,1475):(slice(636,1043), slice(0,1475)),
- (831, 1475):(slice(424,1255), slice(0,1475)),
- (1679, 1475):(slice(0,1679), slice(0,1475))}
-
-
-def prepare_data(params):
- """\
- Prepare data according to cSAXS conventions.
-
- Returns a complete DataScan object.
-
- TODO: Fragment this code in smaller pieces to make it easier to combine
- with a GUI.
- """
- p = parameters.asParam(params, autoviv=False, none_if_absent=True)
-
- try:
- verbose_level = p.verbose_level
- verbose.set_level(verbose_level)
- except:
- pass
-
- base_path = p.base_path
- user = p.user
- pilatus_dir = p.pilatus_dir
- pilatus_mask = p.pilatus_mask
- if not p.spec_filename:
- spec_filename = p.spec_file
- else:
- spec_filename = p.spec_filename
- scan_number = p.scan_number
-
- DR = DataReader(base_path=base_path, user=user, pilatus_dir=pilatus_dir, pilatus_mask=pilatus_mask, spec_filename=spec_filename)
- DC = DataConverter(base_path=base_path, spec_filename=DR.specinfo)
-
- # Prepare data
-
- file_list, fpattern = DR.get_pilatus_files(scan_number)
- if file_list is None:
- verbose(1, 'Could not find the pilatus files (no match for %s or %s)' % fpattern)
- verbose(1, 'Trying to convert old data instead...')
- data, mask, scandict = DC.read(scan_number, dpsize=p.dpsize)
- meta = None
- #write_path = DR.get_write_path(scan_number)
- #verbose(2, 'Will write to path "%s"' % write_path)
+def get_mask_path(inargs):
+ pathargs = inargs.copy() # this is ok here
+ if pathargs.mask_path is None:
+ file_path = '%(base_path)s/matlab/ptycho/%(mask_file)s' % pathargs
else:
- data, mask, meta, scandict = DR.read(scan_number, dpsize=p.dpsize, ctr=p.ctr, multexp=False)
-
- s = parameters.Param()
- s.scan_number = p.scan_number
- s.scan_label = 'S%05d' % p.scan_number
- s.data_filename = scandict['data_filename']
- s.wavelength = 1.2398e-9 / p.energy
- s.energy = p.energy
- s.detector_pixel_size = p.detector_pixel_size
- s.detector_distance = p.detector_distance
- s.initial_ctr = scandict['ctr']
- s.date_collected = scandict['date'].ctime()
- s.date_processed = time.asctime()
- s.exposure_time = scandict['exposure_time']
- if scandict['exposure_time'] is None:
- s.exposure_time = p.exposure_time
-
- if meta is not None: s.raw_filenames = meta['filename']
- s.preparation_basepath = DR.base_path
- s.preparation_user = DR.user
- s.preparation_spec_file = DR.spec_filename
- s.shape = data.shape
-
- if p.scan_type is not None:
- sp = p.scan_params
- if p.scan_type == 'raster':
- #raise RuntimeError('Raster needs to be implemented')
- scanpos = np.array(scan.raster_scan_positions(sp.nx, sp.ny, sp.step_size_x, sp.step_size_y))
- elif p.scan_type == 'round':
- scanpos = np.array(scan.round_scan_positions(sp.radius_in, sp.radius_out, sp.nr, sp.nth))
- elif p.scan_type == 'round_roi':
- scanpos = np.array(scan.round_scan_ROI_positions(sp.dr, sp.lx, sp.ly, sp.nth))
- elif p.scan_type == 'custom':
- scanpos = np.asarray(sp.positions)
- s.positions_theory = scanpos
+ file_path = pathargs.mask_path + pathargs.mask_file
+ return file_path
+
+
+def get_data_path(inargs):
+ '''
+ returns a validated data path to the first file for a given scan number. If it's not valid, it will return None.
+ '''
+ pathargs = inargs.copy() # this is ok here
+ pathargs['inner_number'] = 0
+ pathargs['frame_number'] = 0
+ scan_int = pathargs['scan_number'] // 1000
+ pathargs['group_folder'] = "S%02d000-%02d999" % (scan_int, scan_int)
+ file_path = '%(base_path)s/%(detector)s/%(group_folder)s/S%(scan_number)05d/' % pathargs
+ frame_path = file_path + '%(visit)s_1_%(scan_number)05d_%(inner_number)05d_%(frame_number)05d.cbf' % pathargs
+ num_frames = len([name for name in os.listdir(file_path) if os.path.isfile(file_path + name)])
+ # check that it exists
+ if not os.path.isfile(frame_path):
+ print "File:%s does not exist." % frame_path
+ exists = False
else:
- s.positions_theory = None
-
- if scandict['spec_command'] is not None:
- # Extract scanning information from spec
- s.spec_command = scandict['spec_command']
-
- # Position counters
- motors = [] if p.motors is None else p.motors
- Nmotors = len(motors)
- if Nmotors > 0:
- verbose(3, 'Motors are : %s' % str(motors))
- if np.isscalar(p.motors_multiplier):
- mmult = [p.motors_multiplier]*Nmotors
- else:
- mmult = p.motors_multiplier
- pos_list = [mmult[i]*np.array(scandict['counters'][motors[i]]) for i in range(Nmotors)]
- s.positions = np.array(pos_list).T
- else:
- s.positions = 0.
- # Scan type and parameters
- #TODO: positions = parse_spec_command(scandict.spec_command)
+ exists = True
+ return frame_path, num_frames, exists
- else:
- s.positions = 0. + s.positions_theory
-
- # Put everything together in a DataScan object
- DS = DataScan()
-
- DS.data = data
- DS.mask = mask
- s.autoviv=False
- DS.scan_info = s
- DS.save()
- return DS
-
-class DataReaderBase(object):
- """\
- cSAXS data reading base class (contains what is in common between DataReader and DataConverter).
- """
-
- def __init__(self, base_path, spec_filename=None):
- """\
- cSAXS data reading class. The only mendatory argument for this constructor are 'base_path' and 'user'.
-
- Optional arguments:
- * file_extension [default: '*']
- * pilatus_dir [default: 'pilatus']
- * pilatus_mask (file name or actual mask)
- * spec_file
- """
-
- # Semi-smart base path detection
- if base_path is None:
- d = os.getcwd()
- while True:
- if 'analysis' in os.listdir(d):
- base_path = d
- break
- d,rest = os.path.split(d)
- if not rest:
- break
- if base_path is None:
- raise RuntimeError('Could not figure out base_path')
- verbose(1, 'base_path automatically set to "%s".' % base_path)
- self.base_path = base_path
-
-
- self.specinfo = None
- if spec_filename is None:
- spec_base_path = None
- for d in os.listdir(base_path):
- if d.startswith('spec'):
- spec_base_path = d
- break
- if spec_base_path is None:
- raise RuntimeError('Could not find spec directory')
- matches = []
- spec_base_path = os.path.join(base_path, spec_base_path,'dat-files')
- for root, dirnames, filenames in os.walk(spec_base_path):
- for filename in fnmatch.filter(filenames, '*.dat'):
- matches.append(os.path.join(root, filename))
- if not matches:
- raise RuntimeError('Could not find a spec dat-file (looked into %s)' % spec_base_path)
- if len(matches) > 1:
- print len(matches)
- spec_filename = sorted([(os.stat(x).st_mtime, x) for x in matches])[-1][1]
- verbose(1, 'Found multiple spec files fitting the default location. Using the most recent.')
- else:
- spec_filename = matches[0]
-
- if isinstance(spec_filename, spec.SpecInfo):
- self.specinfo = spec_filename
- self.spec_filename = self.specinfo.spec_filename
- else:
- self.spec_filename = spec_filename
- if spec_filename is not None:
- verbose(1, 'Using spec file: %s' % spec_filename)
- if spec.lastSpecInfo is not None and spec.lastSpecInfo.spec_filename == spec_filename:
- self.specinfo = spec.lastSpecInfo
- print 'Using already parsed spec info.'
- else:
- self.specinfo = spec.SpecInfo(spec_filename)
- else:
- verbose(1, 'Warning: could not open spec_file')
- self.specinfo = None
-
- def read(self, scan_number, dpsize=None, ctr=None, multexp=False, **kwargs):
- """\
- Read in the data
- TODO: (maybe?) MPI to avoid loading all data in a single process for large scans.
- """
- raise NotImplementedError()
-
- def test(self):
- """\
- TODO: Add simple tests here to check the existance of files and paths.
- """
- pass
-
- def summary(self):
- """\
- TODO: Pretty-print content information.
- """
- return 'Not yet implemented'
-
- def get_write_path(self, scan_number):
- return WRITE_PATH_PATTERN.format(base_path=self.base_path, scan_number=scan_number)
-
- def get_save_filename(self, scan_number, dpsize):
- write_path = self.get_write_path(scan_number)
- return SAVE_FILENAME_PATTERN.format(write_path=write_path,
- scan_number=scan_number,
- dpsize=dpsize)
-
-
-class DataConverter(DataReaderBase):
- """\
- cSAXS data conversion class.
- """
- def __init__(self, base_path, spec_filename=None):
- DataReaderBase.__init__(self, base_path, spec_filename)
-
- def read(self, scan_number, dpsize):
- """\
- Convert old prepared data into new format.
- """
-
- scaninfo = None
- if self.specinfo is not None:
- if not self.specinfo.scans.has_key(scan_number): self.specinfo.parse()
- scaninfo = self.specinfo.scans.get(scan_number, None)
- if scaninfo is None:
- raise RuntimeError('Scan #S %d could not be found in spec file!' % scan_number)
-
- # Load data
- read_path = self.get_read_path(scan_number)
- file_to_read = (read_path + '/S%05d_data_%dx%d.h5') % (scan_number, dpsize[0], dpsize[1])
- try:
- a = io.h5read(file_to_read)
- data = a['data']
- fmask = a['fmask']
- except IOError:
- file_to_read = (read_path + '/S%05d_data_%dx%d.mat') % (scan_number, dpsize[0], dpsize[1])
- a = io.loadmat(file_to_read)
- data = a['data'].transpose((2,0,1))
- fmask = a['fmask']
- if fmask.ndim==3:
- fmask=fmask.transpose((2,0,1))
-
-
- # Store additional info from spec file
- write_path = self.get_write_path(scan_number)
- data_filename = self.get_save_filename(scan_number, dpsize)
- ctr = tuple(np.asarray(dpsize)/2)
-
- scandict = {}
- scandict['write_path'] = write_path
- scandict['data_filename'] = data_filename
- scandict['read_path'] = read_path
- scandict['dpsize'] = dpsize
- scandict['ctr'] = ctr
-
- scandict['exposure_time'] = None
-
- if scaninfo is not None:
- scandict['date'] = scaninfo.date
- scandict['counters'] = scaninfo.data
- scandict['motors'] = scaninfo.motors
- scandict['spec_command'] = scaninfo.command
- else:
- scandict['date'] = None
- scandict['counters'] = None
- scandict['motors'] = None
- scandict['spec_command'] = None
-
- return data, fmask, scandict
-
- def get_read_path(self, scan_number):
- return READ_PATH_PATTERN_CONVERT.format(base_path=self.base_path, scan_number=scan_number)
-
-class DataReader(DataReaderBase):
- """\
- cSAXS data reading class.
- """
-
- def __init__(self, base_path, user=None, file_extension='*', pilatus_dir=None, pilatus_mask=None, spec_filename=None):
- """\
- cSAXS data reading class. The only mendatory argument for this constructor are 'base_path' and 'user'.
-
- Optional arguments:
- * file_extension [default: '*']
- * pilatus_dir [default: 'pilatus']
- * pilatus_mask (file name or actual mask)
- * spec_file
- """
- DataReaderBase.__init__(self, base_path, spec_filename)
- base_path = self.base_path
-
- # Semi-smart user determination
- if user is None:
- # use system login name
- import getpass
- user = getpass.getuser()
- verbose(1, 'User name automatically set to "%s".' % user)
- self.user = user
-
- self.file_extension = file_extension
-
- # Semi-smart pilatus directory determination
- if pilatus_dir is None:
- l = os.listdir(base_path)
- for x in l:
- if x.lower().startswith('pilatus'):
- pilatus_dir = x
- break
- if pilatus_dir is None:
- raise RuntimeError('Could not figure out pilatus_dir')
- verbose(1, 'pilatus_dir automatically set to "%s".' % pilatus_dir)
- self.pilatus_dir = pilatus_dir
-
- # Look for standard file location
- if pilatus_mask is None:
- candidates = glob.glob(base_path + '/matlab/ptycho/binary_mask*.mat')
- if not candidates:
- verbose(1, 'Could not find a pilatus mask in the default location')
- elif len(candidates) > 1:
- pilatus_mask = sorted([(os.stat(x).st_mtime, x) for x in candidates])[-1][1]
- verbose(1, 'Found multiple pilatus masks fitting the default location')
- verbose(1, 'Using the most recent (%s)' % pilatus_mask)
- else:
- pilatus_mask = candidates[0]
-
- self.pilatus_mask = pilatus_mask
- if pilatus_mask is not None:
- if str(pilatus_mask) == pilatus_mask:
- verbose(1, 'Using pilatus mask: %s' % str(pilatus_mask))
- else:
- verbose(1, 'Using pilatus mask: <%d x %d array>' % pilatus_mask.shape)
- self.set_pilatus_mask(pilatus_mask)
-
- # [TODO] Find a more robust way of dealing with the prefix
- self.prefix = PREFIX_PATTERN.format(user=user)
- print PREFIX_PATTERN.format(user=user)
-
- def get_pilatus_files(self, scan_number):
- """\
- Return the list of pilatus files for a given scan_number and the glob pattern.
- Returns None if no file is found.
- """
- fpattern_wildcard = self.get_read_filename(scan_number, index=None)
- file_list = glob.glob(fpattern_wildcard)
- if file_list:
- return file_list, fpattern_wildcard
- else:
- nomatch = fpattern_wildcard
- # Try again with the exposure suffix
- fpattern_wildcard = self.get_read_filename(scan_number, index=None, exposure='00000')
- file_list = glob.glob(fpattern_wildcard)
- if file_list:
- return file_list, fpattern_wildcard
- else:
- return None, (nomatch, fpattern_wildcard)
-
- def read(self, scan_number, dpsize=None, ctr=None, multexp=False, **kwargs):
- """\
- Read in the data
- TODO: (maybe?) MPI to avoid loading all data in a single process for large scans.
- """
- verbose(2, 'Processing scan number %d' % scan_number)
-
- scaninfo = None
- if self.specinfo is not None:
- if not self.specinfo.scans.has_key(scan_number): self.specinfo.parse()
- scaninfo = self.specinfo.scans.get(scan_number, None)
- if scaninfo is None:
- raise RuntimeError('Scan #S %d could not be found in spec file!' % scan_number)
-
- write_path = self.get_write_path(scan_number)
-
- read_path = self.get_read_path(scan_number)
- verbose(2, 'Will read from path: %s' % read_path)
-
- if multexp:
- raise RuntimeError('Multiple exposure scans are not yet supported')
-
- verbose(3, 'Looking for a first file...')
- file_list, fpattern_wildcard = self.get_pilatus_files(scan_number)
- if file_list is None:
- raise IOError('No file matching "%s" or "%s"' % fpattern_wildcard)
- verbose(3, 'Found %d files matching "%s"' % (len(file_list), fpattern_wildcard))
-
- multexp_stuff = """\
- # multiple exposures
- fpattern = p.filename_multexp_pattern.format(p, exp='*', index='{index}')
- this_fp = fpattern.format(index='00000')
- file_list = glob.glob(this_fp)
- p.multexp = True
- verbose(3, 'Found a file matching "%s"' % this_fp)
- if not file_list:
- raise IOError('No file match!')
- else:
- verbose(2, 'This is a multiple-exposure dataset')
- """
-
- # Read in a first dataset to evaluate the size and (eventually) the center.
- first_filename = sorted(file_list)[0]
- f,meta = io.image_read(first_filename, doglob=True)
-
- amultexp_stuff = """\
- if num_exp > 1:
-
- exp_times = np.array([float(m['time_of_frame']) for m in meta])
-
- min_exp = exp_times.min()
- low_exp_index = exp_times.argmin()
-
- all_same = False
- if min_exp == exp_times.mean():
- # all exposure times are the same
- all_same = True
- verbose(2, 'Exposure time: %f' % min_exp)
- else:
- max_exp = exp_times.max()
- high_exp_index = exp_times.argmax()
- verbose(2, 'Lowest exposure time: %f (number %d)' % (min_exp, low_exp_index))
- verbose(2, 'Highest exposure time: %f (number %d)' % (max_exp, high_exp_index))
- """
- f = f[0]
- sh = f.shape
-
- if self.pilatus_mask is None:
- fmask = np.ones_like(f)
- verbose(3, 'pilatus_mask is not defined.')
- else:
- verbose(3, 'Using provided pilatus_mask')
- slicetuple = _PILATUS_SLICES.get(sh, None)
- if slicetuple is None:
- raise RuntimeError('Array shape %s is incompatible with known shapes.' % str(sh))
- fmask = self.pilatus_mask[slicetuple]
-
- fullframe = False
- if dpsize is None:
- dpsize = sh
- verbose(2, 'Full frames (%d x %d) will be saved (so no recentering).' % (sh))
- fullframe = True
- elif np.isscalar(dpsize):
- dpsize = (dpsize,dpsize)
- dpsize = np.array(dpsize)
-
- data_filename = self.get_save_filename(scan_number, dpsize)
- verbose(2, 'Data will be saved to %s' % data_filename)
-
- if not fullframe:
- # Compute center of mass
- f0 = (f*fmask).sum(axis=0)
- f1 = (f*fmask).sum(axis=1)
- c0 = (np.arange(len(f0))*f0).sum()/f0.sum()
- c1 = (np.arange(len(f1))*f1).sum()/f1.sum()
-
- ctr_auto = (c1, c0)
-
- # Check for center position
- if ctr is None:
- ctr = ctr_auto
- verbose(2, 'Using center: (%d, %d)' % ctr)
- elif ctr == 'inter':
- import matplotlib as mpl
- fig = mpl.pyplot.figure()
- ax = fig.add_subplot(1,1,1)
- ax.imshow(np.log(f))
- ax.set_title('Select center point (hit return to finish)')
- s = U.Multiclicks(ax, True, mode='replace')
- mpl.pyplot.show()
- s.wait_until_closed()
- ctr = np.round(np.array(s.pts[0][::-1]));
- verbose(2, 'Using center: (%d, %d) - I would have guessed it is (%d, %d)' % (ctr[0], ctr[1], ctr_auto[0], ctr_auto[1]))
- else:
- verbose(2, 'Using center: (%d, %d) - I would have guessed it is (%d, %d)' % (ctr[0], ctr[1], ctr_auto[0], ctr_auto[1]))
-
- ctr = np.array(ctr)
- lim_inf = ctr - dpsize/2.
- lim_sup = ctr + dpsize/2.
- if (lim_inf < 0).any() or (lim_sup >= np.array(sh)).any():
- verbose(1, 'Warning: chosen center is too close to the edge! Changing the coordinates to make it fit.')
- out_string = 'From ' + str(ctr)
- ctr -= lim_inf * (lim_inf < 0)
- ctr -= (lim_sup - np.array(sh)) * (lim_sup >= np.array(sh))
- lim_inf = ctr - dpsize/2.
- lim_sup = ctr + dpsize/2.
- out_string += ' to ' + str(ctr)
- verbose(1, out_string)
-
- fmask = fmask[lim_inf[0]:lim_sup[0], lim_inf[1]:lim_sup[1]]
-
- # Prepare the general meta-information dictionnary
- meta_list = dict([(k, []) for k in meta[0].keys()])
-
- if fullframe:
- f,meta = io.image_read(fpattern_wildcard, doglob=True)
- else:
- f,meta = io.image_read(fpattern_wildcard, doglob=True, roi=(lim_inf[0],lim_sup[0],lim_inf[1],lim_sup[1]))
-
- for mm in meta:
- for k,v in mm.items():
- meta_list[k].append(v)
-
- npts = len(f)
- verbose(2, 'Read %d files' % npts)
-
- # Store in a 3D array
- data = np.zeros((npts, dpsize[0], dpsize[1]), dtype=np.single);
- for nn in range(npts):
- data[nn,:,:] = f[nn]
-
- del f
-
- # Store additional info from spec file
- scandict = {}
- scandict['write_path'] = write_path
- scandict['data_filename'] = data_filename
- scandict['read_path'] = read_path
- scandict['dpsize'] = dpsize
- scandict['ctr'] = ctr
-
- try:
- rawheader = meta_list['rawheader'][0].lower()
- scandict['exposure_time'] = float(rawheader[rawheader.find('exposure_time'):].split()[1])
- except KeyError:
- pass
-
- if scaninfo is not None:
- scandict['date'] = scaninfo.date
- scandict['counters'] = scaninfo.data
- scandict['motors'] = scaninfo.motors
- scandict['spec_command'] = scaninfo.command
- if not scandict.has_key('exposure_time'):
- scandict['exposure_time'] = float(scaninfo.command.split()[-1])
- else:
- # Try to extract date from first frame
- tm = re.findall("([0-9]{4}-[0-9]{2}-[0-9]{2}[^\n]*)", rawheader)
- if tm:
- t = tm[0].strip().split('.')[0] # remove the fraction of second
- scandict['date'] = time.strptime(t, "%Y-%m-%dt%H:%M:%S")
- scandict['counters'] = None
- scandict['motors'] = None
- scandict['spec_command'] = None
-
- return data, fmask, meta_list, scandict
-
-
- def get_read_path(self, scan_number):
- return READ_PATH_PATTERN.format(base_path=self.base_path,
- pilatus_dir=self.pilatus_dir,
- smin=int(1000*np.floor(scan_number/1000.)),
- smax=int(1000*np.floor(scan_number/1000.)+999),
- scan_number=scan_number)
-
- def get_read_filename(self, scan_number, index=None, exposure=None):
- read_path = self.get_read_path(scan_number)
- if index is None:
- index = INDEX_REPLACEMENT_STRING
- else:
- try:
- index = '%05d' % index
- except TypeError:
- pass
- if exposure is None:
- filename = FILENAME_PATTERN.format(read_path=read_path,
- prefix=self.prefix,
- scan_number=scan_number,
- index=index,
- file_extension=self.file_extension)
- else:
- try:
- exposure = '%05d' % exposure
- except TypeError:
- pass
- filename = FILENAME_MULTEXP_PATTERN.format(read_path=read_path,
- prefix=self.prefix,
- scan_number=scan_number,
- index=index,
- exp=exposure,
- file_extension=self.file_extension)
- return filename
-
- def set_pilatus_mask(self,mask):
- """\
- Stores a pilatus mask (or loads it if the provided input is a filename).
- """
- if type(mask)==str:
- try:
- mask,meta = io.image_read(mask)
- except IOError:
- mask = io.loadmat(mask)['mask']
- mask = np.rot90(mask,2)
- self.pilatus_mask = (mask != 0)
+def get_data_object(inargs):
+ path, numframes, exists = get_data_path(inargs)
+ if not exists:
+ raise IOError('File:%s does not exist.' % path)
+ else:
+ _data = fabio.open(path, 0)
+ _data.shape = (numframes, _data.dim1, _data.dim2)
+ _data.scan_number = inargs['scan_number']
+ return _data
diff --git a/ptypy/experiment/hdf5_loader.py b/ptypy/experiment/hdf5_loader.py
new file mode 100644
index 000000000..108f9bcb7
--- /dev/null
+++ b/ptypy/experiment/hdf5_loader.py
@@ -0,0 +1,535 @@
+# -*- coding: utf-8 -*-
+"""\
+Scan loading recipe for the I13 beamline, Diamond.
+
+This file is part of the PTYPY package.
+
+ :copyright: Copyright 2014 by the PTYPY team, see AUTHORS.
+ :license: GPLv2, see LICENSE for details.
+"""
+
+import h5py as h5
+import numpy as np
+
+from ptypy import utils as u
+from ptypy.core.data import PtyScan
+from ptypy.experiment import register
+from ptypy.utils.verbose import log
+from ptypy.utils.array_utils import _translate_to_pix
+
+
+@register()
+class Hdf5Loader(PtyScan):
+ """
+ First attempt to make a generalised hdf5 loader for data. Please raise a ticket in github if changes are required
+ so we can coordinate. There will be a Nexus and CXI subclass to this in the future.
+
+ Defaults:
+
+ [name]
+ default = 'Hdf5Loader'
+ type = str
+ help =
+
+ [intensities]
+ default =
+ type = Param
+ help = This parameter contains the diffraction data. Data shapes can be either (A, B, frame_size_m, frame_size_n) or
+ (C, frame_size_m, frame_size_n). It's assumed in this latter case that the fast axis in the scan corresponds
+ the fast axis on disc (i.e. C-ordered layout).
+
+ [intensities.is_swmr]
+ default = False
+ type = bool
+ help = If this is set to be true, then intensities are assumed to be a swmr dataset that is being written as processing
+ is taking place
+
+ [intensities.live_key]
+ default = None
+ type = str
+ help = If intensities.is_swmr is true then we need a live_key to know where the data collection has progressed to.
+ This is the key to these live keys inside the intensities.file. They are zero at the scan start, but non-zero
+ when the position is complete.
+
+ [intensities.file]
+ default = None
+ type = str
+ help = This is the path to the file containing the diffraction intensities.
+
+ [intensities.key]
+ default = None
+ type = str
+ help = This is the key to the intensities entry in the hdf5 file.
+
+ [positions]
+ default =
+ type = Param
+ help = This parameter contains the position information data. Shapes for each axis that are currently covered and
+ tested corresponding to the intensity shapes are:
+ axis_data.shape (A, B) for data.shape (A, B, frame_size_m, frame_size_n),
+ axis_data.shape (k,) for data.shape (k, frame_size_m, frame_size_n),
+ axis_data.shape (C, D) for data.shape (C*D, frame_size_m, frame_size_n) ,
+ axis_data.shape (C,) for data.shape (C, D, frame_size_m, frame_size_n) where D is the
+ size of the other axis,
+ and axis_data.shape (C,) for data.shape (C*D, frame_size_m, frame_size_n) where D is the
+ size of the other axis.
+
+ [positions.is_swmr]
+ default = False
+ type = bool
+ help = If this is set to be true, then positions are assumed to be swmr datasets that are being written as processing
+ is taking place.
+
+ [positions.live_key]
+ default = None
+ type = str
+ help = If positions.is_swmr is true then we need a live_key to know where the data collection has progressed to.
+ This is the key to these live keys inside the positions.file. If None, whilst positions.is_swmr is True,
+ then we just assume the same keys work for both positions and intensities. They are zero at the scan start,
+ but non-zero when the position is complete.
+
+ [positions.file]
+ default = None
+ type = str
+ help = This is the path to the file containing the position information. If None then we try to find the information
+ in the "intensities.file" location.
+
+ [positions.slow_key]
+ default = None
+ type = str
+ help = This is the key to the slow-axis positions entry in the hdf5 file.
+
+ [positions.slow_multiplier]
+ default = 1.0
+ type = float
+ help = This is a scaling factor to get the motor position into metres.
+
+ [positions.fast_key]
+ default = None
+ type = str
+ help = This is the key to the fast-axis positions entry in the hdf5 file.
+
+ [positions.fast_multiplier]
+ default = 1.0
+ type = float
+ help = This is a scaling factor to get the motor position into metres.
+
+ [mask]
+ default =
+ type = Param
+ help = This parameter contains the mask data. The shape is assumed to be (frame_size_m, frame_size_n) or the same
+ shape of the full intensities data.
+
+ [mask.file]
+ default = None
+ type = str
+ help = This is the path to the file containing the diffraction mask.
+
+ [mask.key]
+ default = None
+ type = str
+ help = This is the key to the mask entry in the hdf5 file.
+
+ [flatfield]
+ default =
+ type = Param
+ help = This parameter contains the flatfield data. The shape is assumed to be (frame_size_m, frame_size_n) or the same
+ shape of the full intensities data.
+
+ [flatfield.file]
+ default = None
+ type = str
+ help = This is the path to the file containing the diffraction flatfield.
+
+ [flatfield.key]
+ default = None
+ type = str
+ help = This is the key to the flatfield entry in the hdf5 file.
+
+ [darkfield]
+ default =
+ type = Param
+ help = This parameter contains the darkfield data.The shape is assumed to be (frame_size_m, frame_size_n) or the same
+ shape of the full intensities data.
+
+ [darkfield.file]
+ default = None
+ type = str
+ help = This is the path to the file containing the diffraction darkfield.
+
+ [darkfield.key]
+ default = None
+ type = str
+ help = This is the key to the darkfield entry in the hdf5 file.
+
+ [normalisation]
+ default =
+ type = Param
+ help = This parameter contains information about the per-point normalisation (i.e. ion chamber reading).
+ It is assumed to have the same dimensionality as data.shape[:-2]
+
+ [normalisation.is_swmr]
+ default = False
+ type = bool
+ help = If this is set to be true, then normalisations are assumed to be swmr datasets that are being written as processing
+ is taking place.
+
+ [normalisation.live_key]
+ default = None
+ type = str
+ help = If normalisation.is_swmr is true then we need a live_key to know where the data collection has progressed to.
+ This is the key to these live keys inside the normalisation.file. If None, whilst normalisation.is_swmr is
+ True, then we just assume the same keys work for both normalisation and intensities. They are zero at the
+ scan start, but non-zero when the position is complete.
+
+ [normalisation.file]
+ default = None
+ type = str
+ help = This is the path to the file containing the normalisation information. If None then we try to find the information
+ in the "intensities.file" location.
+
+ [normalisation.key]
+ default = None
+ type = str
+ help = This is the key to the normalisation entry in the hdf5 file.
+
+ [recorded_energy]
+ default =
+ type = Param
+ help = This parameter contains information if we are use the recorded energy rather than as a parameter.
+ It should be a scalar value.
+
+ [recorded_energy.file]
+ default = None
+ type = str
+ help = This is the path to the file containing the recorded_energy.
+
+ [recorded_energy.key]
+ default = None
+ type = str
+ help = This is the key to the recorded_energy entry in the hdf5 file.
+
+ [recorded_distance]
+ default =
+ type = Param
+ help = This parameter contains information if we are use the recorded distance to the detector rather than as a parameter,
+ It should be a scalar value.
+
+ [recorded_distance.file]
+ default = None
+ type = str
+ help = This is the path to the file containing the recorded_distance between sample and detector.
+
+ [recorded_distance.key]
+ default = None
+ type = str
+ help = This is the key to the recorded_distance entry in the hdf5 file.
+
+ [recorded_psize]
+ default =
+ type = Param
+ help = This parameter contains information if we are use the recorded psize to the detector rather than as a parameter,
+ It should be a scalar value.
+
+ [recorded_psize.file]
+ default = None
+ type = str
+ help = This is the path to the file containing the recorded detector psize.
+
+ [recorded_psize.key]
+ default = None
+ type = str
+ help = This is the key to the recorded_psize entry in the hdf5 file.
+
+ [shape]
+ type = int, tuple
+ default = None
+ help = Shape of the region of interest cropped from the raw data.
+ doc = Cropping dimension of the diffraction frame
+ Can be None, (dimx, dimy), or dim. In the latter case shape will be (dim, dim).
+ userlevel = 1
+
+ """
+
+ def __init__(self, pars=None, **kwargs):
+ """
+ hdf5 data loader
+ """
+ self.p = self.DEFAULT.copy(99)
+ self.p.update(pars, in_place_depth=99)
+
+ super(Hdf5Loader, self).__init__(self.p, **kwargs)
+
+ self._scantype = None
+ self._ismapped = None
+ self.intensities = None
+ self.slow_axis = None
+ self.fast_axis = None
+ self.darkfield = None
+ self.flatfield = None
+ self.mask = None
+ self.normalisation = None
+ self.normalisation_laid_out_like_positions = None
+ self.darkfield_laid_out_like_data = None
+ self.flatfield_field_laid_out_like_data = None
+ self.mask_laid_out_like_data = None
+
+ # lets raise some exceptions here for the essentials
+ if None in [self.p.intensities.file,
+ self.p.intensities.key,
+ self.p.positions.file,
+ self.p.positions.slow_key,
+ self.p.positions.fast_key]:
+ raise RuntimeError("Missing some information about either the positions or the intensity mapping!")
+
+ log(4, u.verbose.report(self.info))
+ if True in [self.p.intensities.is_swmr,
+ self.p.positions.is_swmr,
+ self.p.normalisation.is_swmr]:
+ raise NotImplementedError("Currently swmr functionality is not implemented! Coming soon...")
+
+ self.intensities = h5.File(self.p.intensities.file, 'r')[self.p.intensities.key]
+ data_shape = self.intensities.shape
+ slow_axis = h5.File(self.p.positions.file, 'r')[self.p.positions.slow_key][...]
+ self.slow_axis = np.squeeze(slow_axis) if slow_axis.ndim>2 else slow_axis
+ positions_slow_shape = self.slow_axis.shape
+ fast_axis = h5.File(self.p.positions.file, 'r')[self.p.positions.fast_key][...]
+ self.fast_axis = np.squeeze(fast_axis) if fast_axis.ndim>2 else fast_axis
+ positions_fast_shape = self.fast_axis.shape
+ log(3, "The shape of the \n\tdiffraction intensities is: {}\n\tslow axis data:{}\n\tfast axis data:{}".format(data_shape,
+ positions_slow_shape,
+ positions_fast_shape))
+ self.compute_scan_mapping_and_trajectory(data_shape, positions_fast_shape, positions_slow_shape)
+
+ if None not in [self.p.darkfield.file, self.p.darkfield.key]:
+ self.darkfield = h5.File(self.p.darkfield.file, 'r')[self.p.darkfield.key]
+ log(3, "The darkfield has shape: {}".format(self.darkfield.shape))
+ if self.darkfield.shape == data_shape:
+ log(3, "The darkfield is laid out like the data.")
+ self.darkfield_laid_out_like_data = True
+ elif self.darkfield.shape == data_shape[-2:]:
+ log(3, "The darkfield is not laid out like the data.")
+ self.darkfield_laid_out_like_data = False
+ else:
+ raise RuntimeError("I have no idea what to do with this shape of darkfield data.")
+ else:
+ log(3, "No darkfield will be applied.")
+
+ if None not in [self.p.flatfield.file, self.p.flatfield.key]:
+ self.flatfield = h5.File(self.p.flatfield.file, 'r')[self.p.flatfield.key]
+ log(3, "The flatfield has shape: {}".format(self.flatfield.shape))
+ if self.flatfield.shape == data_shape:
+ log(3, "The flatfield is laid out like the data.")
+ self.flatfield_laid_out_like_data = True
+ elif self.flatfield.shape == data_shape[-2:]:
+ log(3, "The flatfield is not laid out like the data.")
+ self.flatfield_laid_out_like_data = False
+ else:
+ raise RuntimeError("I have no idea what to do with this shape of flatfield data.")
+ else:
+ log(3, "No flatfield will be applied.")
+
+ if None not in [self.p.mask.file, self.p.mask.key]:
+ self.mask = h5.File(self.p.mask.file, 'r')[self.p.mask.key]
+ log(3, "The mask has shape: {}".format(self.mask.shape))
+ if self.mask.shape == data_shape:
+ log(3, "The mask is laid out like the data.")
+ self.mask_laid_out_like_data = True
+ elif self.mask.shape == data_shape[-2:]:
+ log(3, "The mask is not laid out like the data.")
+ self.mask_laid_out_like_data = False
+ else:
+ raise RuntimeError("I have no idea what to do with this shape of mask data.")
+ else:
+ log(3, "No mask will be applied.")
+
+
+ if None not in [self.p.normalisation.file, self.p.normalisation.key]:
+ self.normalisation = h5.File(self.p.normalisation.file, 'r')[self.p.normalisation.key]
+ if (self.normalisation.shape == self.fast_axis.shape == self.slow_axis.shape):
+ log(3, "The normalisation is the same dimensionality as the axis information.")
+ self.normalisation_laid_out_like_positions = True
+ elif self.normalisation.shape[:2] == self.fast_axis.shape == self.slow_axis.shape:
+ log(3, "The normalisation matches the axis information, but will average the other dimensions.")
+ self.normalisation_laid_out_like_positions = False
+ else:
+ raise RuntimeError("I have no idea what to do with this is shape of normalisation data.")
+ else:
+ log(3, "No normalisation will be applied.")
+
+ if None not in [self.p.recorded_energy.file, self.p.recorded_energy.key]:
+ self.p.energy = h5.File(self.p.recorded_energy.file, 'r')[self.p.recorded_energy.key][0]
+ log(3, "loading energy={} from file".format(self.p.energy))
+
+
+ if None not in [self.p.recorded_distance.file, self.p.recorded_distance.key]:
+ self.p.distance = h5.File(self.p.recorded_distance.file, 'r')[self.p.recorded_distance.key][0]
+ log(3, "loading distance={} from file".format(self.p.distance))
+
+ if None not in [self.p.recorded_psize.file, self.p.recorded_psize.key]:
+ self.p.psize = h5.File(self.p.recorded_psize.file, 'r')[self.p.recorded_psize.key][0]
+ log(3, "loading psize={} from file".format(self.p.psize))
+
+
+ # now lets figure out the cropping and centering roughly so we don't load the full data in.
+ frame_shape = np.array(data_shape[-2:])
+ center = frame_shape // 2 if self.p.center is None else u.expect2(self.p.center)
+ center = np.array([_translate_to_pix(frame_shape[ix], center[ix]) for ix in range(len(frame_shape))])
+
+ if self.p.shape is None:
+ self.frame_slices = (slice(None, None, 1), slice(None, None, 1))
+ self.p.shape = frame_shape
+ else:
+ pshape = u.expect2(self.p.shape)
+ low_pix = center - pshape // 2
+ high_pix = low_pix + pshape
+ self.frame_slices = (slice(low_pix[0], high_pix[0], 1), slice(low_pix[1], high_pix[1], 1))
+ self.p.center = pshape // 2 #the new center going forward
+ self.info.center = self.p.center
+ self.p.shape = pshape
+
+ # it's much better to have this logic here than in load!
+ if (self._ismapped and (self._scantype is 'arb')):
+ # easy peasy
+ log(3, "This scan looks to be a mapped arbitrary trajectory scan.")
+ self.load = self.load_mapped_and_arbitrary_scan
+
+ if (self._ismapped and (self._scantype is 'raster')):
+ log(3, "This scan looks to be a mapped raster scan.")
+ self.load = self.loaded_mapped_and_raster_scan
+
+ if (self._scantype is 'raster') and not self._ismapped:
+ log(3, "This scan looks to be an unmapped raster scan.")
+ self.load = self.load_unmapped_raster_scan
+
+ def load_unmapped_raster_scan(self, indices):
+ intensities = {}
+ positions = {}
+ weights = {}
+ sh = self.slow_axis.shape
+ for jj in indices:
+ weights[jj], intensities[jj] = self.get_corrected_intensities(jj)
+ positions[jj] = np.array([self.slow_axis[jj % sh[0], jj // sh[1]] * self.p.positions.slow_multiplier,
+ self.fast_axis[jj % sh[0], jj // sh[1]] * self.p.positions.fast_multiplier])
+ log(3, 'Data loaded successfully.')
+ return intensities, positions, weights
+
+ def loaded_mapped_and_raster_scan(self, indices):
+ intensities = {}
+ positions = {}
+ weights = {}
+ sh = self.slow_axis.shape
+ for jj in indices:
+ fast_idx = jj % sh[1]
+ slow_idx = jj // sh[1]
+ weights[jj], intensities[jj] = self.get_corrected_intensities((slow_idx, fast_idx)) # or the other way round???
+ positions[jj] = np.array([self.slow_axis[slow_idx, fast_idx] * self.p.positions.slow_multiplier,
+ self.fast_axis[slow_idx, fast_idx] * self.p.positions.fast_multiplier])
+ log(3, 'Data loaded successfully.')
+ return intensities, positions, weights
+
+ def load_mapped_and_arbitrary_scan(self, indices):
+ intensities = {}
+ positions = {}
+ weights = {}
+ for jj in indices:
+ weights[jj], intensities[jj] = self.get_corrected_intensities(jj)
+ positions[jj] = np.array([self.slow_axis[jj] * self.p.positions.slow_multiplier,
+ self.fast_axis[jj] * self.p.positions.fast_multiplier])
+
+ log(3, 'Data loaded successfully.')
+ return intensities, positions, weights
+
+ def get_corrected_intensities(self, index):
+ '''
+ Corrects the intensities for darkfield, flatfield and normalisations if they exist.
+ There is a lot of logic here, I wonder if there is a better way to get rid of it.
+ Limited a bit by the MPI, adn thinking about extension to large data size.
+ '''
+ if isinstance(index, int):
+ index = (index,)
+ indexed_frame_slices = tuple([slice(ix, ix+1, 1) for ix in index])
+ indexed_frame_slices += self.frame_slices
+
+ intensity = self.intensities[indexed_frame_slices]
+ # TODO: Remove these logic blocks into something a bit more sensible.
+ if self.darkfield is not None:
+ if self.darkfield_laid_out_like_data:
+ intensity -= self.darkfield[indexed_frame_slices]
+ else:
+ intensity -= self.darkfield[self.frame_slices]
+
+ if self.flatfield is not None:
+ if self.flatfield_laid_out_like_data:
+ intensity /= self.flatfield[indexed_frame_slices]
+ else:
+ intensity /= self.flatfield[self.frame_slices]
+
+ if self.normalisation is not None:
+ if self.normalisation_laid_out_like_positions:
+ intensity /= self.normalisation[index]
+ else:
+ intensity /= self.normalisation
+
+ if self.mask is not None:
+ if self.mask_laid_out_like_data:
+ mask = self.mask[indexed_frame_slices]
+ else:
+ mask = self.mask[self.frame_slices]
+ else:
+ mask = np.ones_like(intensity, dtype=np.int)
+ return mask, intensity
+
+ def compute_scan_mapping_and_trajectory(self, data_shape, positions_fast_shape, positions_slow_shape):
+ '''
+ This horrendous block of logic is all to do with making a semi-intelligent guess at what the data looks like.
+ '''
+ if data_shape[:-2] == positions_slow_shape == positions_fast_shape:
+ '''
+ cases covered:
+ axis_data.shape (A, B) for data.shape (A, B, frame_size_m, frame_size_n) or
+ axis_data.shape (k,) for data.shape (k, frame_size_m, frame_size_n)
+ '''
+ log(3, "Everything is wonderful, each diffraction point has a co-ordinate.")
+ self.num_frames = np.prod(positions_fast_shape)
+ self._ismapped = True
+ if len(data_shape) == 4:
+ self._scantype = "raster"
+ else:
+ self._scantype = "arb"
+
+ elif data_shape[0] == np.prod(positions_fast_shape) == np.prod(positions_slow_shape):
+ '''
+ cases covered:
+ axis_data.shape (C, D) for data.shape (C*D, frame_size_m, frame_size_n) ,
+ '''
+ log(3, "Positions are raster, but data is a list of frames. Unpacking the data to match the positions...")
+ self.num_frames = np.prod(positions_fast_shape)
+ self._ismapped = False
+ self._scantype = 'raster'
+
+ elif (len(positions_slow_shape) == 1) and (len(positions_fast_shape) == 1):
+ if data_shape[:-2] == (positions_slow_shape[0], positions_fast_shape[0]):
+ '''
+ cases covered:
+ axis_data.shape (C,) for data.shape (C, D, frame_size_m, frame_size_n) where D is the size of the other axis,
+ '''
+ log(3, "Assuming the axes are 1D and need to be meshed to match the raster style data")
+ self.num_frames = np.prod(data_shape[:-2])
+ self.fast_axis, self.slow_axis = np.meshgrid(self.fast_axis[...], self.slow_axis[...])
+ self._ismapped = True
+ self._scantype = 'raster'
+ elif data_shape[0] == (positions_slow_shape[0] * positions_fast_shape[0]):
+ '''
+ cases covered:
+ axis_data.shape (C,) for data.shape (C*D, frame_size_m, frame_size_n) where D is the size of the other axis.
+ '''
+ self.num_frames = data_shape[0]
+ self.fast_axis, self.slow_axis = np.meshgrid(self.fast_axis[...], self.slow_axis[...])
+ self._ismapped = False
+ self._scantype = 'raster'
+ else:
+ raise IOError("I don't know what to do with these positions/data shapes")
+ else:
+ raise IOError("I don't know what to do with these positions/data shapes")
+
diff --git a/ptypy/experiment/DLS.py b/ptypy/experiment/legacy/DLS.py
similarity index 56%
rename from ptypy/experiment/DLS.py
rename to ptypy/experiment/legacy/DLS.py
index 7e875a7b4..da5733d35 100644
--- a/ptypy/experiment/DLS.py
+++ b/ptypy/experiment/legacy/DLS.py
@@ -9,16 +9,18 @@
"""
import numpy as np
-import os
-from .. import utils as u
-from .. import io
-from ..utils import parallel
-from ..core.data import PtyScan
-from ..utils.verbose import log
-from ..core.paths import Paths
-from ..core import DEFAULT_io as IO_par
import h5py as h5
+from ptypy import utils as u
+from ptypy import io
+from ptypy.experiment import register
+from ptypy.core.data import PtyScan
+from ptypy.utils.verbose import log
+from ptypy.core.paths import Paths
+from ptypy.core import Ptycho
+
+IO_par = Ptycho.DEFAULT['io']
+
logger = u.verbose.logger
# Parameters for the nexus file saved by GDA
@@ -36,63 +38,153 @@
NEXUS_PATHS.label = 'entry1/entry_identifier'
NEXUS_PATHS.experiment = 'entry1/experiment_identifier'
-# Recipe defaults
-RECIPE = u.Param()
-RECIPE.is_swmr = False
-RECIPE.israster = 0
-RECIPE.experimentID = None # Experiment identifier
-RECIPE.scan_number = None # scan number
-RECIPE.dark_number = None
-RECIPE.flat_number = None
-RECIPE.energy = None
-RECIPE.lam = None # 1.2398e-9 / RECIPE.energy
-RECIPE.z = None # Distance from object to screen
-RECIPE.detector_name = 'merlin_sw_hdf' # Name of the detector as specified in the nexus file
-RECIPE.motors = ['t1_sx', 't1_sy'] # Motor names to determine the sample translation
-# RECIPE.motors_multiplier = 1e-6 # Motor conversion factor to meters
-RECIPE.motors_multiplier = [1e-6,-1e-6] # Motor conversion factor to meters
-RECIPE.base_path = './'
-RECIPE.data_file_pattern = '%(base_path)s' + 'raw/%(scan_number)05d.nxs'
-RECIPE.dark_file_pattern = '%(base_path)s' + 'raw/%(dark_number)05d.nxs'
-RECIPE.flat_file_pattern = '%(base_path)s' + 'raw/%(flat_number)05d.nxs'
-RECIPE.mask_file = None # '%(base_path)s' + 'processing/mask.h5'
-RECIPE.NFP_correct_positions = False # Position corrections for NFP beamtime Oct 2014
-RECIPE.use_EP = False # Use flat as Empty Probe (EP) for probe sharing; needs to be set to True in the recipe of the scan that will act as EP'
-RECIPE.remove_hot_pixels = u.Param( # Apply hot pixel correction
- apply = False, # Initiate by setting to True; DEFAULT parameters will be used if not specified otherwise
- size = 3, # Size of the window on which the median filter will be applied around every data point
- tolerance = 10, # Tolerance multiplied with the standard deviation of the data array subtracted by the blurred array
- # (difference array) yields the threshold for cutoff.
- ignore_edges = False, # If True, edges of the array are ignored, which speeds up the code
-)
-
-# Generic defaults
-I13DEFAULT = PtyScan.DEFAULT.copy()
-I13DEFAULT.recipe = RECIPE
-I13DEFAULT.auto_center = False
-I13DEFAULT.orientation = (False, False, False)
-
+@register()
class DlsScan(PtyScan):
- DEFAULT = I13DEFAULT
+ """
+ I13 (Diamond Light Source) data preparation class.
+
+ Defaults:
+
+ [name]
+ default = 'DlsScan'
+ type = str
+ help =
+
+ [is_swmr]
+ default = False
+ type = bool
+ help =
+
+ [israster]
+ default = 0
+ type = int
+ help =
+
+ [experimentID]
+ default = None
+
+ [scan_number]
+ default = None
+ type = int
+ help = Scan number
+
+ [dark_number]
+ default = None
+ type = int
+ help =
+
+ [flat_number]
+ default = None
+ type = int
+ help =
+
+ [detector_name]
+ default = 'merlin_sw_hdf'
+ type = str
+ help = Name of the detector
+ doc = As specified in the nexus file.
+
+ [motors]
+ default = ['t1_sx', 't1_sy']
+ type = list
+ help = Motor names to determine the sample translation
+
+ [motors_multiplier]
+ default = [1e-6,-1e-6]
+ type = list
+ help = Motor conversion factor to meters
+
+ [base_path]
+ default = './'
+ type = str
+ help =
+
+ [data_file_pattern]
+ default = '%(base_path)sraw/%(scan_number)05d.nxs'
+ type = str
+ help =
+
+ [dark_file_pattern]
+ default = '%(base_path)sraw/%(dark_number)05d.nxs'
+ type = str
+ help =
+
+ [flat_file_pattern]
+ default = '%(base_path)sraw/%(flat_number)05d.nxs'
+ type = str
+ help =
+
+ [mask_file]
+ default = None
+ type = str
+ help =
+
+ [NFP_correct_positions]
+ default = False
+ type = bool
+ help = Position corrections for NFP beamtime Oct 2014
+
+ [use_EP]
+ default = False
+ type = bool
+ help = Use flat as Empty Probe (EP) for probe sharing
+ doc = Needs to be set to True in the recipe of the scan that will act as EP.
+
+ [remove_hot_pixels]
+ default =
+ type = Param
+ help = Apply hot pixel correction
+
+ [remove_hot_pixels.apply]
+ default = False
+ type = bool
+ help =
+
+ [remove_hot_pixels.size]
+ default = 3
+ type = int
+ help = Size of the window
+ doc = The median filter will be applied around every data point.
+
+ [remove_hot_pixels.tolerance]
+ default = 10
+ type = int
+ help =
+ doc = Tolerance multiplied with the standard deviation of the data array subtracted by the blurred array (difference array) yields the threshold for cutoff.
+
+ [remove_hot_pixels.ignore_edges]
+ default = False
+ type = bool
+ help = Ignore edges of the array
+ doc = Enabling speeds up the code.
+
+ [auto_center]
+ default = False
+
+ [orientation]
+ default = (False, False, False)
+ type = int, tuple, list
+
+ """
def __init__(self, pars=None, **kwargs):
"""
I13 (Diamond Light Source) data preparation class.
"""
+ log(2, "The DLS loader will be deprecated in the next release. Please use the Hdf5Loader.")
# Initialise parent class
- recipe_default = RECIPE.copy()
- recipe_default.update(pars.recipe, in_place_depth=5)
- pars.recipe.update(recipe_default)
+ p = self.DEFAULT.copy(99)
+ p.update(pars)
- super(DlsScan, self).__init__(pars, **kwargs)
- self.data_file = self.info.recipe.data_file_pattern % self.info.recipe
+ super(DlsScan, self).__init__(p, **kwargs)
+ self.data_file = self.info.data_file_pattern % self.info
# Create the ptyd file name if not specified
if self.info.dfile is None:
home = Paths(IO_par).home
- self.info.dfile = '%s/prepdata/data_%d.ptyd' % (home, self.info.recipe.scan_number)
+ self.info.dfile = '%s/prepdata/data_%d.ptyd' % (home, self.info.scan_number)
log(3, 'Save file is %s' % self.info.dfile)
log(4, u.verbose.report(self.info))
@@ -102,26 +194,26 @@ def load_weight(self):
"""
# FIXME: do something better here. (detector-dependent)
# Load mask as weight
- if self.info.recipe.mask_file is not None:
- return io.h5read(self.info.recipe.mask_file % self.info.recipe, 'mask')['mask'].astype(float)
+ if self.info.mask_file is not None:
+ return io.h5read(self.info.mask_file % self.info, 'mask')['mask'].astype(float)
def load_positions(self):
"""
Load the positions and return as an (N,2) array
"""
# Load positions from file if possible.
- if self.info.recipe.is_swmr:
- instrument = h5.File(self.data_file, 'r', libver='latest', swmr=True)[NEXUS_PATHS.instrument % self.info.recipe]
+ if self.info.is_swmr:
+ instrument = h5.File(self.data_file, 'r', libver='latest', swmr=True)[NEXUS_PATHS.instrument % self.info]
else:
- instrument = h5.File(self.data_file, 'r')[NEXUS_PATHS.instrument % self.info.recipe]
- if self.info.recipe.israster:
+ instrument = h5.File(self.data_file, 'r')[NEXUS_PATHS.instrument % self.info]
+ if self.info.israster:
self.position_shape = instrument[0].shape
motor_positions = []
i=0
- mmult = u.expect2(self.info.recipe.motors_multiplier)
+ mmult = u.expect2(self.info.motors_multiplier)
for k in NEXUS_PATHS.motors:
- if not self.info.recipe.israster:
+ if not self.info.israster:
motor_positions.append(instrument[k]*mmult[i])
else:
motor_positions.append((instrument[k]*mmult[i]).ravel())
@@ -141,14 +233,14 @@ def check(self, frames, start):
- the number of frames available from a starting point `start`
- bool if the end of scan was reached (None if this routine doesn't know)
"""
- if not self.info.recipe.is_swmr:
+ if not self.info.is_swmr:
npos = self.num_frames
frames_accessible = min((frames, npos - start))
stop = self.frames_accessible + start
return frames_accessible, (stop >= npos)
else:
f = h5.File(self.data_file, 'r', libver='latest', swmr=True)
- dset= f[NEXUS_PATHS.live_key_pattern % self.info.recipe]
+ dset= f[NEXUS_PATHS.live_key_pattern % self.info]
dset.id.refresh()
num_avail = len(dset)-start
frames_accessible = min((frames, num_avail))
@@ -167,10 +259,10 @@ def load(self, indices):
raw = {}
pos = {}
weights = {}
- key = NEXUS_PATHS.frame_pattern % self.info.recipe
- if not self.info.recipe.israster:
+ key = NEXUS_PATHS.frame_pattern % self.info
+ if not self.info.israster:
for j in indices:
- if not self.info.recipe.is_swmr:
+ if not self.info.is_swmr:
# print "frame number "+str(j)
data = io.h5read(self.data_file, key, slice=j)[key].astype(np.float32)
raw[j] = data
@@ -183,7 +275,7 @@ def load(self, indices):
raw[j] = dset[j]
dset.file.close()
else:
- if not self.info.recipe.is_swmr:
+ if not self.info.is_swmr:
data = h5.File(self.data_file)[key]
sh = data.shape
for j in indices:
diff --git a/ptypy/experiment/DLS_mapping.py b/ptypy/experiment/legacy/DLS_mapping.py
similarity index 96%
rename from ptypy/experiment/DLS_mapping.py
rename to ptypy/experiment/legacy/DLS_mapping.py
index 56502cfb1..72212b031 100644
--- a/ptypy/experiment/DLS_mapping.py
+++ b/ptypy/experiment/legacy/DLS_mapping.py
@@ -9,16 +9,17 @@
"""
import numpy as np
-import os
-from .. import utils as u
-from .. import io
-from ..utils import parallel
-from ..core.data import PtyScan
-from ..utils.verbose import log
-from ..core.paths import Paths
-from ..core import DEFAULT_io as IO_par
import h5py as h5
+from ptypy import utils as u
+from ptypy import io
+from ptypy.core.data import PtyScan
+from ptypy.utils.verbose import log
+from ptypy.core.paths import Paths
+from ptypy.core import Ptycho
+
+IO_par = Ptycho.DEFAULT['io']
+
logger = u.verbose.logger
@@ -33,8 +34,6 @@
NEXUS_PATHS.label = 'entry/entry_identifier'
NEXUS_PATHS.experiment = 'entry/experiment_identifier'
-
-
# Recipe defaults
RECIPE = u.Param()
RECIPE.is_swmr = False
@@ -79,6 +78,7 @@ def __init__(self, pars=None, **kwargs):
"""
I13 (Diamond Light Source) data preparation class.
"""
+ log(2, "The DlsScan loader will be deprecated in the next release. Please use the Hdf5Loader.")
# Initialise parent class
recipe_default = RECIPE.copy()
recipe_default.update(pars.recipe, in_place_depth=5)
diff --git a/ptypy/experiment/I08.py b/ptypy/experiment/legacy/I08.py
similarity index 62%
rename from ptypy/experiment/I08.py
rename to ptypy/experiment/legacy/I08.py
index b7b4f3d14..fc60b97b2 100644
--- a/ptypy/experiment/I08.py
+++ b/ptypy/experiment/legacy/I08.py
@@ -14,22 +14,17 @@
import time
import os
-if True: #__name__ == "__main__":
- import ptypy
- from ptypy import utils as u
- from ptypy import io
- from ptypy.utils.verbose import log
- from ptypy.core.paths import Paths
- # FIXME: Accessing the real "io" from the parent Ptycho class would be much better
- from ptypy.core import DEFAULT_io as IO_par
-else:
- from .. import utils as u
- from .. import io
- from ..core.data import PtyScan
- from ..utils.verbose import log
- from ..core.paths import Paths
- from ..core import DEFAULT_io as IO_par
-
+from ptypy import utils as u
+from ptypy import io
+from ptypy.utils.verbose import log
+from ptypy.core.paths import Paths
+# FIXME: Accessing the real "io" from the parent Ptycho class would be much better
+#from ptypy.core import DEFAULT_io as IO_par
+from ptypy.core import Ptycho
+from ptypy.core.data import PtyScan
+from ptypy.experiment import register
+
+IO_par = Ptycho.DEFAULT['io']
# Parameters for the nexus file saved by GDA
NXS_PATHS = u.Param()
@@ -41,37 +36,88 @@
STXM_PATHS.motors = 'entry1/Counter1/'
STXM_PATHS.energy = 'entry1/Counter1/'
-# I08 recipe default parameters
-RECIPE = u.Param()
-RECIPE.base_path = None
-RECIPE.scan_number = None
-RECIPE.scan_number_stxm = None
-RECIPE.dark_number = None
-RECIPE.dark_number_stxm = None
-RECIPE.dark_value = 200. # Used if dark_number is None
-RECIPE.detector_flat_file = None
-RECIPE.nxs_file_pattern = '%(base_path)s/nexus/i08-%(scan_number)s.nxs'
-RECIPE.dark_nxs_file_pattern = '%(base_path)s/nexus/i08-%(dark_number)s.nxs'
-RECIPE.date = None
-RECIPE.stxm_file_pattern = '%(base_path)s/%(date)s/discard/Sample_Image_%(date)s_%(scan_number_stxm)s.hdf5'
-
-
-RECIPE.motors = ['sample_y','sample_x'] # same orientation as I13 for now
-RECIPE.energy = None
-RECIPE.lam=None
-RECIPE.z = None
-RECIPE.motors_multiplier = 1e-6
-
-# Default generic parameter set from
-I08DEFAULT = ptypy.core.data.PtyScan.DEFAULT.copy()
-I08DEFAULT.recipe = RECIPE
-
-I08DEFAULT.auto_center = False
-
-
-class I08Scan(ptypy.core.data.PtyScan):
- DEFAULT = I08DEFAULT
+@register()
+class I08Scan(PtyScan):
+ """
+
+ I08 (Diamond Light Source) data preparation class.
+
+ Defaults:
+
+ [name]
+ default = 'I08Scan'
+ type = str
+ help =
+
+ [base_path]
+ default = None
+ type = str
+ help =
+
+ [scan_number]
+ default = None
+ type = int
+ help =
+
+ [scan_number_stxm]
+ default = None
+ type = int
+ help =
+
+ [dark_number]
+ default = None
+ type = int
+ help =
+
+ [dark_number_stxm]
+ default = None
+ type = int
+ help =
+
+ [dark_value]
+ default = 200.0
+ type = float
+ help = Used if dark_number is None
+
+ [detector_flat_file]
+ default = None
+ type = str
+ help =
+
+ [nxs_file_pattern]
+ default = '%(base_path)s/nexus/i08-%(scan_number)s.nxs'
+ type = str
+ help =
+
+ [dark_nxs_file_pattern]
+ default = '%(base_path)s/nexus/i08-%(dark_number)s.nxs'
+ type = str
+ help =
+
+ [data]
+ default = None
+ type = str
+ help =
+
+ [stxm_file_pattern]
+ default = '%(base_path)s/%(date)s/discard/Sample_Image_%(date)s_%(scan_number_stxm)s.hdf5'
+ type = str
+ help =
+
+ [motors]
+ default = ['sample_y','sample_x']
+ type = list
+ help = same orientation as I13 for now
+
+ [motors_multiplier]
+ default = 1e-6
+ type = float
+ help = Conversion factor to meters
+
+ [auto_center]
+ default = False
+ """
def __init__(self, pars=None, **kwargs):
"""
@@ -79,14 +125,13 @@ def __init__(self, pars=None, **kwargs):
"""
# Initialize parent class. All updated parameters are now in
# self.info
- RDEFAULT = RECIPE.copy()
- RDEFAULT.update(pars.recipe)
- pars.recipe.update(RDEFAULT)
-
- super(I08Scan, self).__init__(pars, **kwargs)
+ log(2, "The I08Scan loader will be deprecated in the next release. Please use the Hdf5Loader.")
+ p = self.DEFAULT.copy(99)
+ p.update(pars)
+ super(I08Scan, self).__init__(p, **kwargs)
# Try to extract base_path to access data files
- if self.info.recipe.base_path is None:
+ if self.info.base_path is None:
d = os.getcwd()
base_path = None
while True:
@@ -99,28 +144,28 @@ def __init__(self, pars=None, **kwargs):
if base_path is None:
raise RuntimeError('Could not guess base_path.')
else:
- self.info.recipe.base_path = base_path
+ self.info.base_path = base_path
# Sanity check: for now we need a date to identify the SXTM file
- if self.info.recipe.date is None:
- raise RuntimeError('recipe.date has to be specified to find the STXM file name.')
+ if self.info.date is None:
+ raise RuntimeError('date has to be specified to find the STXM file name.')
else:
try:
- time.strptime(self.info.recipe.date, '%Y-%m-%d')
+ time.strptime(self.info.date, '%Y-%m-%d')
except ValueError:
print('The date should be in format "YYYY-MM-DD"')
raise
# Construct the file names
- self.nxs_filename = self.info.recipe.nxs_file_pattern % self.info.recipe
- self.stxm_filename = self.info.recipe.stxm_file_pattern % self.info.recipe
+ self.nxs_filename = self.info.nxs_file_pattern % self.info
+ self.stxm_filename = self.info.stxm_file_pattern % self.info
log(3, 'Will read from nxs file %s' % self.nxs_filename)
log(3, 'Will read from STXM file %s' % self.stxm_filename)
# Create the ptyd file name if not specified
if self.info.dfile is None:
home = Paths(IO_par).home
- self.info.dfile = '%s/prepdata/data_%d.ptyd' % (home, self.info.recipe.scan_number)
+ self.info.dfile = '%s/prepdata/data_%d.ptyd' % (home, self.info.scan_number)
log(3, 'Save file is %s' % self.info.dfile)
def load_common(self):
@@ -132,8 +177,8 @@ def load_common(self):
"""
common = u.Param()
key = NXS_PATHS.frame_pattern
- if self.info.recipe.dark_number is not None:
- self.dark_nxs_filename = self.info.recipe.dark_nxs_file_pattern % self.info.recipe
+ if self.info.dark_number is not None:
+ self.dark_nxs_filename = self.info.dark_nxs_file_pattern % self.info
#dark = io.h5read(self.dark_nxs_filename,key)[key][0,0,:,:]# this was a problem with the dark collection. a 2x2 grid was collected.
dark = io.h5read(self.dark_nxs_filename, key)[key]
if dark.ndim == 4:
@@ -141,10 +186,10 @@ def load_common(self):
if dark.ndim == 3:
dark = np.median(dark, axis=0)
else:
- dark = self.info.recipe.dark_value
+ dark = self.info.dark_value
- if self.info.recipe.detector_flat_file is not None:
- flat = io.h5read(self.info.recipe.detector_flat_file,FLAT_PATHS.key)[FLAT_PATHS.key]
+ if self.info.detector_flat_file is not None:
+ flat = io.h5read(self.info.detector_flat_file,FLAT_PATHS.key)[FLAT_PATHS.key]
else:
flat = 1.
@@ -160,10 +205,10 @@ def load_positions(self):
"""
Load the positions and return as an (N,2) array
"""
- base_path = self.info.recipe.base_path
- mmult = u.expect2(self.info.recipe.motors_multiplier)
- keyx = STXM_PATHS.motors+str(self.info.recipe.motors[0])
- keyy=STXM_PATHS.motors+str(self.info.recipe.motors[1])
+ base_path = self.info.base_path
+ mmult = u.expect2(self.info.motors_multiplier)
+ keyx = STXM_PATHS.motors+str(self.info.motors[0])
+ keyy=STXM_PATHS.motors+str(self.info.motors[1])
print "file name is:%s" % self.stxm_filename
x1 = io.h5read(self.stxm_filename,keyx)
y1 = io.h5read(self.stxm_filename,keyy)
diff --git a/ptypy/experiment/I13_ffp.py b/ptypy/experiment/legacy/I13_ffp.py
similarity index 69%
rename from ptypy/experiment/I13_ffp.py
rename to ptypy/experiment/legacy/I13_ffp.py
index d6cbc04c6..68713b063 100644
--- a/ptypy/experiment/I13_ffp.py
+++ b/ptypy/experiment/legacy/I13_ffp.py
@@ -9,11 +9,16 @@
"""
import numpy as np
import os
-from .. import utils as u
-from .. import io
-from ..core.data import PtyScan
-from ..core.paths import Paths
-from ..core import DEFAULT_io as IO_par
+
+from ptypy import utils as u
+from ptypy.experiment import register
+from ptypy import io
+from ptypy.core.data import PtyScan
+from ptypy.core.paths import Paths
+from ptypy.core import Ptycho
+from ptypy.utils.verbose import log
+
+IO_par = Ptycho.DEFAULT['io']
# Parameters for the nexus file saved by GDA
NEXUS_PATHS = u.Param()
@@ -25,43 +30,90 @@
NEXUS_PATHS.label = 'entry1/entry_identifier'
NEXUS_PATHS.experiment = 'entry1/experiment_identifier'
-# Recipe defaults
-RECIPE = u.Param()
-# Experiment identifier
-RECIPE.experimentID = None
-# Scan number
-RECIPE.scan_number = None
-RECIPE.dark_number = None
-RECIPE.flat_number = None
-RECIPE.energy = None
-RECIPE.lam = None
-# Distance from object to screen
-RECIPE.z = None
-# Name of the detector as specified in the nexus file
-RECIPE.detector_name = None
-# Motor names to determine the sample translation
-RECIPE.motors = ['t1_sx', 't1_sy']
-RECIPE.theta = 'entry1/before_scan/t1_theta/t1_theta'
-# Motor conversion factor to meters
-RECIPE.motors_multiplier = 1e-6
-RECIPE.base_path = './'
-RECIPE.data_file_pattern = '%(base_path)s' + 'raw/%(scan_number)05d.nxs'
-RECIPE.dark_file_pattern = '%(base_path)s' + 'raw/%(dark_number)05d.nxs'
-RECIPE.flat_file_pattern = '%(base_path)s' + 'raw/%(flat_number)05d.nxs'
-RECIPE.mask_file = None
-
-# Generic defaults
-I13DEFAULT = PtyScan.DEFAULT.copy()
-I13DEFAULT.recipe = RECIPE
-I13DEFAULT.auto_center = False
-I13DEFAULT.orientation = (False, False, False)
-
+@register()
class I13ScanFFP(PtyScan):
"""
I13 (Diamond Light Source) data preparation class for FFP.
+
+ Defaults:
+
+ [name]
+ default = 'I13ScanFFP'
+ type = str
+ help =
+
+ [experimentID]
+ default = None
+
+ [scan_number]
+ default = None
+ type = int
+ help = Scan number
+
+ [dark_number]
+ default = None
+ type = int
+ help =
+
+ [flat_number]
+ default = None
+ type = int
+ help =
+
+ [detector_name]
+ default = None
+ type = str
+ help = Name of the detector
+ doc = As specified in the nexus file.
+
+ [motors]
+ default = ['t1_sx', 't1_sy']
+ type = list
+ help = Motor names to determine the sample translation
+
+ [motors_multiplier]
+ default = 1e-6
+ type = float
+ help = Motor conversion factor to meters
+
+ [base_path]
+ default = './'
+ type = str
+ help =
+
+ [data_file_pattern]
+ default = '%(base_path)sraw/%(scan_number)05d.nxs'
+ type = str
+ help =
+
+ [dark_file_pattern]
+ default = '%(base_path)sraw/%(dark_number)05d.nxs'
+ type = str
+ help =
+
+ [flat_file_pattern]
+ default = '%(base_path)sraw/%(flat_number)05d.nxs'
+ type = str
+ help =
+
+ [mask_file]
+ default = None
+ type = str
+ help =
+
+ [theta]
+ default = 0.0
+ type = float
+ help = Angle of rotation
+
+ [auto_center]
+ default = False
+
+ [orientation]
+ default = (False, False, False)
+
"""
- DEFAULT = I13DEFAULT
def __init__(self, pars=None, **kwargs):
"""
@@ -72,14 +124,13 @@ def __init__(self, pars=None, **kwargs):
:param kwargs: key-value pair
- additional parameters.
"""
- recipe_default = RECIPE.copy()
- recipe_default.update(pars.recipe, in_place_depth=1)
- pars.recipe.update(recipe_default)
-
- super(I13ScanFFP, self).__init__(pars, **kwargs)
+ log(2, "The DLS loader will be deprecated in the next release. Please use the Hdf5Loader.")
+ p = self.DEFAULT.copy(99)
+ p.update(pars)
+ super(I13ScanFFP, self).__init__(p, **kwargs)
# Try to extract base_path to access data files
- if self.info.recipe.base_path is None:
+ if self.info.base_path is None:
d = os.getcwd()
base_path = None
while True:
@@ -92,10 +143,10 @@ def __init__(self, pars=None, **kwargs):
if base_path is None:
raise RuntimeError('Could not guess base_path.')
else:
- self.info.recipe.base_path = base_path
+ self.info.base_path = base_path
# Construct file names
- self.data_file = self.info.recipe.data_file_pattern % self.info.recipe
+ self.data_file = self.info.data_file_pattern % self.info
u.log(3, 'Will read data from file %s' % self.data_file)
# Load data information
@@ -103,8 +154,8 @@ def __init__(self, pars=None, **kwargs):
NEXUS_PATHS.instrument]
# Extract detector name if not set or wrong
- if (self.info.recipe.detector_name is None
- or self.info.recipe.detector_name
+ if (self.info.detector_name is None
+ or self.info.detector_name
not in self.instrument.keys()):
detector_name = None
for k in self.instrument.keys():
@@ -116,35 +167,35 @@ def __init__(self, pars=None, **kwargs):
raise RuntimeError(
'Not possible to extract detector name. '
'Please specify in recipe instead.')
- elif (self.info.recipe.detector_name is not None
+ elif (self.info.detector_name is not None
and detector_name
- is not self.info.recipe.detector_name):
+ is not self.info.detector_name):
u.log(2, 'Detector name changed from %s to %s.'
- % (self.info.recipe.detector_name, detector_name))
+ % (self.info.detector_name, detector_name))
else:
- detector_name = self.info.recipe.detector_name
+ detector_name = self.info.detector_name
- self.info.recipe.detector_name = detector_name
+ self.info.detector_name = detector_name
# Attempt to extract experiment ID
- if self.info.recipe.experimentID is None:
+ if self.info.experimentID is None:
try:
experiment_id = io.h5read(
self.data_file, NEXUS_PATHS.experiment)[
NEXUS_PATHS.experiment][0]
except (AttributeError, KeyError):
experiment_id = os.path.split(
- self.info.recipe.base_path[:-1])[1]
+ self.info.base_path[:-1])[1]
u.logger.debug(
'Could not find experiment ID from nexus file %s. '
'Using %s instead.' % (self.data_file, experiment_id))
- self.info.recipe.experimentID = experiment_id
+ self.info.experimentID = experiment_id
# Create the ptyd file name if not specified
if self.info.dfile is None:
home = Paths(IO_par).home
self.info.dfile = ('%s/prepdata/data_%d.ptyd'
- % (home, self.info.recipe.scan_number))
+ % (home, self.info.scan_number))
u.log(3, 'Save file is %s' % self.info.dfile)
u.log(4, u.verbose.report(self.info))
@@ -163,9 +214,9 @@ def load_weight(self):
"""
# FIXME: do something better here. (detector-dependent)
# Load mask as weight
- if self.info.recipe.mask_file is not None:
+ if self.info.mask_file is not None:
return io.h5read(
- self.info.recipe.mask_file, 'mask')['mask'].astype(float)
+ self.info.mask_file, 'mask')['mask'].astype(float)
def load_positions(self):
"""
@@ -181,21 +232,21 @@ def load_positions(self):
break
# Apply motor conversion factor and create transposed position array
- if len(self.info.recipe.motors) == 3:
- self.theta = io.h5read(self.data_file, self.info.recipe.theta)[
- self.info.recipe.theta]
+ if len(self.info.motors) == 3:
+ self.theta = io.h5read(self.data_file, self.info.theta)[
+ self.info.theta]
# Convert from degree to radians
self.theta *= np.pi / 180.
- mmult = u.expect3(self.info.recipe.motors_multiplier)
+ mmult = u.expect3(self.info.motors_multiplier)
pos_list = [mmult[i] * np.array(motor_positions[motor_name])
- for i, motor_name in enumerate(self.info.recipe.motors)]
+ for i, motor_name in enumerate(self.info.motors)]
positions = 1. * np.array([np.cos(self.theta) * pos_list[0] -
np.sin(self.theta) * pos_list[2],
pos_list[1]]).T
else:
- mmult = u.expect2(self.info.recipe.motors_multiplier)
+ mmult = u.expect2(self.info.motors_multiplier)
pos_list = [mmult[i] * np.array(motor_positions[motor_name])
- for i, motor_name in enumerate(self.info.recipe.motors)]
+ for i, motor_name in enumerate(self.info.motors)]
positions = 1. * np.array(pos_list).T
return positions
@@ -243,7 +294,7 @@ def load(self, indices):
pos = {}
weights = {}
raw = {j: self.instrument[
- self.info.recipe.detector_name]['data'][j].astype(np.float32)
+ self.info.detector_name]['data'][j].astype(np.float32)
for j in indices}
u.log(3, 'Data loaded successfully.')
diff --git a/ptypy/experiment/I13_nfp.py b/ptypy/experiment/legacy/I13_nfp.py
similarity index 57%
rename from ptypy/experiment/I13_nfp.py
rename to ptypy/experiment/legacy/I13_nfp.py
index 6abf56588..807e8b4d9 100644
--- a/ptypy/experiment/I13_nfp.py
+++ b/ptypy/experiment/legacy/I13_nfp.py
@@ -9,11 +9,16 @@
"""
import numpy as np
import os
-from .. import utils as u
-from .. import io
-from ..core.data import PtyScan
-from ..core.paths import Paths
-from ..core import DEFAULT_io as IO_par
+
+from ptypy import utils as u
+from ptypy.experiment import register
+from ptypy import io
+from ptypy.core.data import PtyScan
+from ptypy.core.paths import Paths
+from ptypy.utils.verbose import log
+from ptypy.core import Ptycho
+
+IO_par = Ptycho.DEFAULT['io']
# Parameters for the nexus file saved by GDA
NEXUS_PATHS = u.Param()
@@ -25,92 +30,184 @@
NEXUS_PATHS.label = 'entry1/entry_identifier'
NEXUS_PATHS.experiment = 'entry1/experiment_identifier'
-# Recipe defaults
-RECIPE = u.Param()
-# Experiment identifier
-RECIPE.experimentID = None
-# Scan number
-RECIPE.scan_number = None
-RECIPE.dark_number = None
-RECIPE.flat_number = None
-RECIPE.energy = None
-RECIPE.lam = None
-# Distance from object to screen
-RECIPE.z = None
-# Name of the detector as specified in the nexus file
-RECIPE.detector_name = None
-# Motor names to determine the sample translation
-RECIPE.motors = ['t1_sx', 't1_sy']
-# Motor conversion factor to meters
-RECIPE.motors_multiplier = 1e-6
-RECIPE.base_path = './'
-RECIPE.data_file_pattern = '%(base_path)s' + 'raw/%(scan_number)05d.nxs'
-RECIPE.dark_file_pattern = '%(base_path)s' + 'raw/%(dark_number)05d.nxs'
-RECIPE.flat_file_pattern = '%(base_path)s' + 'raw/%(flat_number)05d.nxs'
-RECIPE.mask_file = None
-# Position corrections for NFP beamtime Oct 2014
-RECIPE.correct_positions_Oct14 = False
-# Use flat as Empty Probe (EP) for probe sharing;
-# needs to be set to True in the recipe of the scan that will act as EP
-RECIPE.use_EP = False
-# Maximum number of scan points to be loaded from origin
-RECIPE.max_scan_points = 100000
-# Angle of rotation (as used in NFP beamtime Jul 2015)
-RECIPE.theta = 0
-# Apply hot pixel correction
-RECIPE.remove_hot_pixels = u.Param(
- # Initiate by setting to True;
- # DEFAULT parameters will be used if not specified otherwise
- apply=False,
- # Size of the window on which the median filter will be applied
- # around every data point
- size=3,
- # Tolerance multiplied with the standard deviation of the data array
- # subtracted by the blurred array (difference array)
- # yields the threshold for cutoff.
- tolerance=3,
- # If True, edges of the array are ignored, which speeds up the code
- ignore_edges=False,
-)
-
-# Apply Richardson Lucy deconvolution
-RECIPE.rl_deconvolution = u.Param(
- # Initiate by setting to True;
- # DEFAULT parameters will be used if not specified otherwise
- apply=False,
- # Number of iterations
- numiter=5,
- # Provide MTF from file; no loading procedure present for now,
- # loading through recon script required
- dfile=None,
- # Create fake psf as a sum of gaussians if no MTF provided
- gaussians=u.Param(
- # DEFAULT list of gaussians for Richardson Lucy deconvolution
- g1=u.Param(
- # Standard deviation in x direction
- std_x=1.0,
- # Standard deviation in y direction
- std_y=1.0,
- # Offset / shift in x direction
- off_x=0.,
- # Offset / shift in y direction
- off_y=0.,
- )
- ),
-)
-
-# Generic defaults
-I13DEFAULT = PtyScan.DEFAULT.copy()
-I13DEFAULT.recipe = RECIPE
-I13DEFAULT.auto_center = False
-I13DEFAULT.orientation = (False, False, False)
-
+@register()
class I13ScanNFP(PtyScan):
"""
I13 (Diamond Light Source) data preparation class for NFP.
+
+ Defaults:
+
+ [name]
+ default = 'I13ScanNFP'
+ type = str
+ help =
+
+ [experimentID]
+ default = None
+
+ [scan_number]
+ default = None
+ type = int
+ help = Scan number
+
+ [dark_number]
+ default = None
+ type = int
+ help =
+
+ [flat_number]
+ default = None
+ type = int
+ help =
+
+ [detector_name]
+ default = None
+ type = str
+ help = Name of the detector
+ doc = As specified in the nexus file.
+
+ [motors]
+ default = ['t1_sx', 't1_sy']
+ type = list
+ help = Motor names to determine the sample translation
+
+ [motors_multiplier]
+ default = 1e-6
+ type = float
+ help = Motor conversion factor to meters
+
+ [base_path]
+ default = './'
+ type = str
+ help =
+
+ [data_file_pattern]
+ default = '%(base_path)sraw/%(scan_number)05d.nxs'
+ type = str
+ help =
+
+ [dark_file_pattern]
+ default = '%(base_path)sraw/%(dark_number)05d.nxs'
+ type = str
+ help =
+
+ [flat_file_pattern]
+ default = '%(base_path)sraw/%(flat_number)05d.nxs'
+ type = str
+ help =
+
+ [mask_file]
+ default = None
+ type = str
+ help =
+
+ [correct_positions_Oct14]
+ default = False
+ type = bool
+ help =
+
+ [use_EP]
+ default = False
+ type = bool
+ help = Use flat as Empty Probe (EP) for probe sharing
+ doc = Needs to be set to True in the recipe of the scan that will act as EP.
+
+ [max_scan_points]
+ default = 100000
+ type = int
+ help = Maximum number of scan points to be loaded from origin
+
+ [theta]
+ default = 0.0
+ type = float
+ help = Angle of rotation (as used in NFP beamtime Jul 2015)
+
+ [remove_hot_pixels]
+ default =
+ type = Param
+ help = Apply hot pixel correction
+
+ [remove_hot_pixels.apply]
+ default = False
+ type = bool
+ help =
+
+ [remove_hot_pixels.size]
+ default = 3
+ type = int
+ help = Size of the window
+ doc = The median filter will be applied around every data point.
+
+ [remove_hot_pixels.tolerance]
+ default = 3
+ type = int
+ help =
+ doc = Tolerance multiplied with the standard deviation of the data array subtracted by the blurred array (difference array) yields the threshold for cutoff.
+
+ [remove_hot_pixels.ignore_edges]
+ default = False
+ type = bool
+ help = Ignore edges of the array
+ doc = Enabling speeds up the code.
+
+ [auto_center]
+ default = False
+
+ [orientation]
+ default = (False, False, False)
+
+ [rl_deconvolution]
+ default =
+ type = Param
+ help = Apply Richardson Lucy deconvolution
+
+ [rl_deconvolution.apply]
+ default = False
+ type = bool
+ help = Initiate by setting to True
+
+ [rl_deconvolution.numiter]
+ default = 5
+ type = int
+ help = Number of iterations
+
+ [rl_deconvolution.dfile]
+ default = None
+ type = str
+ help = Provide MTF from file; no loading procedure present for now, loading through recon script required
+
+ [rl_deconvolution.gaussians]
+ default =
+ type = Param
+ help = Create fake psf as a sum of gaussians if no MTF provided
+
+ [rl_deconvolution.gaussians.g1]
+ default =
+ type = Param
+ help = list of gaussians for Richardson Lucy deconvolution
+
+ [rl_deconvolution.gaussians.g1.std_x]
+ default = 1.0
+ type = float
+ help = Standard deviation in x direction
+
+ [rl_deconvolution.gaussians.g1.std_y]
+ default = 1.0
+ type = float
+ help = Standard deviation in y direction
+
+ [rl_deconvolution.gaussians.g1.off_x]
+ default = 0.0
+ type = float
+ help = Offset / shift in x direction
+
+ [rl_deconvolution.gaussians.g1.off_y]
+ default = 0.0
+ type = float
+ help = Offset / shift in y direction
+
"""
- DEFAULT = I13DEFAULT
def __init__(self, pars=None, **kwargs):
"""
@@ -121,14 +218,13 @@ def __init__(self, pars=None, **kwargs):
:param kwargs: key-value pair
- additional parameters.
"""
- recipe_default = RECIPE.copy()
- recipe_default.update(pars.recipe, in_place_depth=1)
- pars.recipe.update(recipe_default)
-
+ log(2, "The I13ScanNFP loader will be deprecated in the next release. Please use the Hdf5Loader.")
+ p = self.DEFAULT.copy(99)
+ p.update(pars)
super(I13ScanNFP, self).__init__(pars, **kwargs)
# Try to extract base_path to access data files
- if self.info.recipe.base_path is None:
+ if self.info.base_path is None:
d = os.getcwd()
base_path = None
while True:
@@ -141,26 +237,26 @@ def __init__(self, pars=None, **kwargs):
if base_path is None:
raise RuntimeError('Could not guess base_path.')
else:
- self.info.recipe.base_path = base_path
+ self.info.base_path = base_path
# Construct file names
- self.data_file = self.info.recipe.data_file_pattern % self.info.recipe
+ self.data_file = self.info.data_file_pattern % self.info
u.log(3, 'Will read data from file %s' % self.data_file)
- if self.info.recipe.dark_number is None:
+ if self.info.dark_number is None:
self.dark_file = None
u.log(3, 'No data for dark')
else:
- self.dark_file = (self.info.recipe.dark_file_pattern
- % self.info.recipe)
+ self.dark_file = (self.info.dark_file_pattern
+ % self.info)
u.log(3, 'Will read dark from file %s' % self.dark_file)
- if self.info.recipe.flat_number is None:
+ if self.info.flat_number is None:
self.flat_file = None
u.log(3, 'No data for flat')
else:
- self.flat_file = (self.info.recipe.flat_file_pattern
- % self.info.recipe)
+ self.flat_file = (self.info.flat_file_pattern
+ % self.info)
u.log(3, 'Will read flat from file %s' % self.flat_file)
# Load data information
@@ -168,8 +264,8 @@ def __init__(self, pars=None, **kwargs):
NEXUS_PATHS.instrument]
# Extract detector name if not set or wrong
- if (self.info.recipe.detector_name is None
- or self.info.recipe.detector_name
+ if (self.info.detector_name is None
+ or self.info.detector_name
not in self.instrument.keys()):
detector_name = None
for k in self.instrument.keys():
@@ -181,32 +277,32 @@ def __init__(self, pars=None, **kwargs):
raise RuntimeError(
'Not possible to extract detector name. '
'Please specify in recipe instead.')
- elif (self.info.recipe.detector_name is not None
+ elif (self.info.detector_name is not None
and detector_name
- is not self.info.recipe.detector_name):
+ is not self.info.detector_name):
u.log(2, 'Detector name changed from %s to %s.'
- % (self.info.recipe.detector_name, detector_name))
+ % (self.info.detector_name, detector_name))
else:
- detector_name = self.info.recipe.detector_name
+ detector_name = self.info.detector_name
- self.info.recipe.detector_name = detector_name
+ self.info.detector_name = detector_name
# Set up dimensions for cropping
try:
# Switch for attributes which are set to None
# Will be removed once None attributes are removed
- center = pars.center
+ center = p.center
except AttributeError:
center = 'unset'
# Check if dimension tuple is provided
if type(center) == tuple:
- offset_x = pars.center[0]
- offset_y = pars.center[1]
+ offset_x = p.center[0]
+ offset_y = p.center[1]
# If center unset, extract offset from raw data
elif center == 'unset':
raw_shape = self.instrument[
- self.info.recipe.detector_name]['data'].shape
+ self.info.detector_name]['data'].shape
offset_x = raw_shape[-1] // 2
offset_y = raw_shape[-2] // 2
else:
@@ -214,30 +310,30 @@ def __init__(self, pars=None, **kwargs):
'Center provided is not of type tuple or set to "unset". '
'Please correct input parameters.')
- xdim = (offset_x - pars.shape // 2, offset_x + pars.shape // 2)
- ydim = (offset_y - pars.shape // 2, offset_y + pars.shape // 2)
+ xdim = (offset_x - p.shape // 2, offset_x + p.shape // 2)
+ ydim = (offset_y - p.shape // 2, offset_y + p.shape // 2)
- self.info.recipe.array_dim = [xdim, ydim]
+ self.info.array_dim = [xdim, ydim]
# Attempt to extract experiment ID
- if self.info.recipe.experimentID is None:
+ if self.info.experimentID is None:
try:
experiment_id = io.h5read(
self.data_file, NEXUS_PATHS.experiment)[
NEXUS_PATHS.experiment][0]
except (AttributeError, KeyError):
experiment_id = os.path.split(
- self.info.recipe.base_path[:-1])[1]
+ self.info.base_path[:-1])[1]
u.logger.debug(
'Could not find experiment ID from nexus file %s. '
'Using %s instead.' % (self.data_file, experiment_id))
- self.info.recipe.experimentID = experiment_id
+ self.info.experimentID = experiment_id
# Create the ptyd file name if not specified
if self.info.dfile is None:
home = Paths(IO_par).home
self.info.dfile = ('%s/prepdata/data_%d.ptyd'
- % (home, self.info.recipe.scan_number))
+ % (home, self.info.scan_number))
u.log(3, 'Save file is %s' % self.info.dfile)
u.log(4, u.verbose.report(self.info))
@@ -253,9 +349,9 @@ def load_weight(self):
"""
# FIXME: do something better here. (detector-dependent)
# Load mask as weight
- if self.info.recipe.mask_file is not None:
+ if self.info.mask_file is not None:
return io.h5read(
- self.info.recipe.mask_file, 'mask')['mask'].astype(float)
+ self.info.mask_file, 'mask')['mask'].astype(float)
def load_positions(self):
"""
@@ -273,7 +369,7 @@ def load_positions(self):
# If Empty Probe sharing is enabled, assign pseudo center position to
# scan and skip the rest of the function. If no positions are found at
# all, raise error.
- if motor_positions is None and self.info.recipe.use_EP:
+ if motor_positions is None and self.info.use_EP:
positions = 1. * np.array([[0., 0.]])
return positions
elif motor_positions is None:
@@ -281,17 +377,17 @@ def load_positions(self):
% str(NEXUS_PATHS.motors))
# Apply motor conversion factor and create transposed position array
- mmult = u.expect2(self.info.recipe.motors_multiplier)
+ mmult = u.expect2(self.info.motors_multiplier)
pos_list = [mmult[i] * np.array(motor_positions[motor_name])[
- :self.info.recipe.max_scan_points]
- for i, motor_name in enumerate(self.info.recipe.motors)]
+ :self.info.max_scan_points]
+ for i, motor_name in enumerate(self.info.motors)]
positions = 1. * np.array(pos_list).T
# Correct positions for angle of rotation if necessary
- positions[:, 1] *= np.cos(np.pi * self.info.recipe.theta / 180.)
+ positions[:, 1] *= np.cos(np.pi * self.info.theta / 180.)
# Position corrections for NFP beamtime Oct 2014.
- if self.info.recipe.correct_positions_Oct14:
+ if self.info.correct_positions_Oct14:
r = np.array([[0.99987485, 0.01582042], [-0.01582042, 0.99987485]])
p0 = positions.mean(axis=0)
positions = np.dot(r, (positions - p0).T).T + p0
@@ -309,36 +405,36 @@ def load_common(self):
common = u.Param()
# Load dark.
- if self.info.recipe.dark_number is not None:
- key = NEXUS_PATHS.frame_pattern % self.info.recipe
+ if self.info.dark_number is not None:
+ key = NEXUS_PATHS.frame_pattern % self.info
dark_indices = range(len(
io.h5read(self.dark_file, NEXUS_PATHS.frame_pattern
- % self.info.recipe)[key]))
+ % self.info)[key]))
dark = [io.h5read(self.dark_file, NEXUS_PATHS.frame_pattern
- % self.info.recipe, slice=j)[key][
- self.info.recipe.array_dim[1][0]:
- self.info.recipe.array_dim[1][1],
- self.info.recipe.array_dim[0][0]:
- self.info.recipe.array_dim[0][1]].astype(np.float32)
+ % self.info, slice=j)[key][
+ self.info.array_dim[1][0]:
+ self.info.array_dim[1][1],
+ self.info.array_dim[0][0]:
+ self.info.array_dim[0][1]].astype(np.float32)
for j in dark_indices]
common.dark = np.array(dark).mean(0)
u.log(3, 'Dark loaded successfully.')
# Load flat.
- if self.info.recipe.flat_number is not None:
- key = NEXUS_PATHS.frame_pattern % self.info.recipe
+ if self.info.flat_number is not None:
+ key = NEXUS_PATHS.frame_pattern % self.info
flat_indices = range(len(
io.h5read(self.flat_file, NEXUS_PATHS.frame_pattern
- % self.info.recipe)[key]))
+ % self.info)[key]))
flat = [io.h5read(self.flat_file, NEXUS_PATHS.frame_pattern
- % self.info.recipe, slice=j)[key][
- self.info.recipe.array_dim[1][0]:
- self.info.recipe.array_dim[1][1],
- self.info.recipe.array_dim[0][0]:
- self.info.recipe.array_dim[0][1]].astype(np.float32)
+ % self.info, slice=j)[key][
+ self.info.array_dim[1][0]:
+ self.info.array_dim[1][1],
+ self.info.array_dim[0][0]:
+ self.info.array_dim[0][1]].astype(np.float32)
for j in flat_indices]
common.flat = np.array(flat).mean(0)
@@ -377,11 +473,11 @@ def load(self, indices):
"""
pos = {}
weights = {}
- raw = {j: self.instrument[self.info.recipe.detector_name]['data'][j][
- self.info.recipe.array_dim[1][0]:
- self.info.recipe.array_dim[1][1],
- self.info.recipe.array_dim[0][0]:
- self.info.recipe.array_dim[0][1]].astype(np.float32)
+ raw = {j: self.instrument[self.info.detector_name]['data'][j][
+ self.info.array_dim[1][0]:
+ self.info.array_dim[1][1],
+ self.info.array_dim[0][0]:
+ self.info.array_dim[0][1]].astype(np.float32)
for j in indices}
u.log(3, 'Data loaded successfully.')
@@ -413,43 +509,43 @@ def correct(self, raw, weights, common):
- dict: contains modified weights.
"""
# Apply hot pixel removal
- if self.info.recipe.remove_hot_pixels.apply:
+ if self.info.remove_hot_pixels.apply:
u.log(3, 'Applying hot pixel removal...')
for j in raw:
raw[j] = u.remove_hot_pixels(
raw[j],
- self.info.recipe.remove_hot_pixels.size,
- self.info.recipe.remove_hot_pixels.tolerance,
- self.info.recipe.remove_hot_pixels.ignore_edges)[0]
+ self.info.remove_hot_pixels.size,
+ self.info.remove_hot_pixels.tolerance,
+ self.info.remove_hot_pixels.ignore_edges)[0]
- if self.info.recipe.flat_number is not None:
+ if self.info.flat_number is not None:
common.dark = u.remove_hot_pixels(
common.dark,
- self.info.recipe.remove_hot_pixels.size,
- self.info.recipe.remove_hot_pixels.tolerance,
- self.info.recipe.remove_hot_pixels.ignore_edges)[0]
+ self.info.remove_hot_pixels.size,
+ self.info.remove_hot_pixels.tolerance,
+ self.info.remove_hot_pixels.ignore_edges)[0]
- if self.info.recipe.flat_number is not None:
+ if self.info.flat_number is not None:
common.flat = u.remove_hot_pixels(
common.flat,
- self.info.recipe.remove_hot_pixels.size,
- self.info.recipe.remove_hot_pixels.tolerance,
- self.info.recipe.remove_hot_pixels.ignore_edges)[0]
+ self.info.remove_hot_pixels.size,
+ self.info.remove_hot_pixels.tolerance,
+ self.info.remove_hot_pixels.ignore_edges)[0]
u.log(3, 'Hot pixel removal completed.')
# Apply deconvolution
- if self.info.recipe.rl_deconvolution.apply:
+ if self.info.rl_deconvolution.apply:
u.log(3, 'Applying deconvolution...')
# Use mtf from a file if provided in recon script
- if self.info.recipe.rl_deconvolution.dfile is not None:
+ if self.info.rl_deconvolution.dfile is not None:
mtf = self.info.rl_deconvolution.dfile
# Create fake psf as a sum of gaussians from parameters
else:
gau_sum = 0
for k in (
- self.info.recipe.rl_deconvolution.gaussians.iteritems()):
+ self.info.rl_deconvolution.gaussians.iteritems()):
gau_sum += u.gaussian2D(raw[0].shape[0],
k[1].std_x,
k[1].std_y,
@@ -463,18 +559,18 @@ def correct(self, raw, weights, common):
raw[j] = u.rl_deconvolution(
raw[j],
mtf,
- self.info.recipe.rl_deconvolution.numiter)
+ self.info.rl_deconvolution.numiter)
u.log(3, 'Deconvolution completed.')
# Apply flat and dark, only dark, or no correction
- if (self.info.recipe.flat_number is not None
- and self.info.recipe.dark_number is not None):
+ if (self.info.flat_number is not None
+ and self.info.dark_number is not None):
for j in raw:
raw[j] = (raw[j] - common.dark) / (common.flat - common.dark)
raw[j][raw[j] < 0] = 0
data = raw
- elif self.info.recipe.dark_number is not None:
+ elif self.info.dark_number is not None:
for j in raw:
raw[j] = raw[j] - common.dark
raw[j][raw[j] < 0] = 0
diff --git a/ptypy/test/engine_tests/engine_tests/__init__.py b/ptypy/experiment/legacy/__init__.py
similarity index 100%
rename from ptypy/test/engine_tests/engine_tests/__init__.py
rename to ptypy/experiment/legacy/__init__.py
diff --git a/ptypy/experiment/nanomax.py b/ptypy/experiment/nanomax.py
index a37a5acee..9afb9c48a 100644
--- a/ptypy/experiment/nanomax.py
+++ b/ptypy/experiment/nanomax.py
@@ -3,42 +3,82 @@
and user experiments rely on different classes depending on
measurement campaign. """
-
-import ptypy
-from ptypy.core.data import PtyScan
-import ptypy.utils as u
-
-import h5py
import numpy as np
-import time
+import h5py
+
+from ..core.data import PtyScan
+from .. import utils as u
+from . import register
logger = u.verbose.logger
-# new recipe for this one
-RECIPE = u.Param()
-RECIPE.dataPath = None
-RECIPE.datafile = None
-RECIPE.maskfile = None
-RECIPE.pilatusPath = None
-RECIPE.pilatusPattern = None
-RECIPE.scannr = None
+
+class NanomaxBase(PtyScan):
+ """
+ Defaults:
+
+ [dataPath]
+ default = None
+ type = str
+ help = Path to folder containing the Sardana master file
+ doc =
+
+ [datafile]
+ default = None
+ type = str
+ help = Sardana master file
+ doc =
+
+ [maskfile]
+ default = None
+ type = str
+ help = Arbitrary mask file
+ doc = Hdf5 file containing an array called 'mask' at the root level.
+
+ [pilatusPath]
+ default = None
+ type = str
+ help = Path to folder containing detector image files
+ doc =
+
+ [pilatusPattern]
+ default = None
+ type = str
+ help = Format string for detector image files
+ doc = A format string with two integer fields, the first holds the scan number while the second holds the image number.
+
+ [scannr]
+ default = None
+ type = int
+ help = Scan number
+ doc =
+ """
+ pass
-class NanomaxStepscanNov2016(PtyScan):
+@register()
+class NanomaxStepscanNov2016(NanomaxBase):
"""
Loads Nanomax step scan data in the format of week Nov/Dec 2016
+
+ Defaults:
+
+ [name]
+ default = NanomaxStepscanNov2016
+ type = str
+ help =
+ doc =
+
"""
def __init__(self, pars=None, **kwargs):
-
- p = PtyScan.DEFAULT.copy(depth=10)
- p.recipe = RECIPE.copy()
- p.update(pars, in_place_depth=10)
- super(NanomaxStepscanNov2016, self).__init__(p)
+ self.p = self.DEFAULT.copy(99)
+ self.p.update(pars)
+ super(NanomaxStepscanNov2016, self).__init__(self.p)
def load_positions(self):
- fileName = self.info.recipe.dataPath + self.info.recipe.datafile
- entry = 'entry%d' % self.info.recipe.scannr
+ fileName = self.p.dataPath + self.p.datafile
+ entry = 'entry%d' % self.p.scannr
with h5py.File(fileName, 'r') as hf:
x = np.array(hf.get(entry + '/measurement/samx'))
@@ -50,9 +90,9 @@ def load_positions(self):
def load(self, indices):
raw, weights, positions = {}, {}, {}
- scannr = self.info.recipe.scannr
- path = self.info.recipe.pilatusPath
- filepattern = self.info.recipe.pilatusPattern
+ scannr = self.p.scannr
+ path = self.p.pilatusPath
+ filepattern = self.p.pilatusPattern
if not (path[-1] == '/'):
path += '/'
@@ -74,19 +114,19 @@ def load_weight(self):
frame.
"""
- scannr = self.info.recipe.scannr
- path = self.info.recipe.pilatusPath
- pattern = self.info.recipe.pilatusPattern
+ scannr = self.p.scannr
+ path = self.p.pilatusPath
+ pattern = self.p.pilatusPattern
if not (path[-1] == '/'):
path += '/'
- if self.info.recipe.maskfile:
- with h5py.File(self.info.recipe.maskfile, 'r') as hf:
+ if self.p.maskfile:
+ with h5py.File(self.p.maskfile, 'r') as hf:
mask = np.array(hf.get('mask'))
logger.info("loaded mask, %u x %u, sum %u" %
(mask.shape + (np.sum(mask),)))
else:
- filename = self.info.recipe.dataPath + self.info.recipe.datafile
+ filename = self.p.dataPath + self.p.datafile
with h5py.File(path + pattern % (scannr, 0), 'r') as hf:
data = hf.get('entry_0000/measurement/Pilatus/data')
shape = np.asarray(data[0]).shape
@@ -95,46 +135,65 @@ def load_weight(self):
(mask.shape + (np.sum(mask),)))
return mask
-# new recipe for this one too
-RECIPE = u.Param()
-RECIPE.dataPath = None
-RECIPE.datafile = None
-RECIPE.maskfile = None
-RECIPE.pilatusPath = None
-RECIPE.pilatusPattern = None
-RECIPE.hdfPath = 'entry_0000/measurement/Pilatus/data'
-RECIPE.scannr = None
-RECIPE.xMotorFlipped = None
-RECIPE.yMotorFlipped = None
-RECIPE.xMotorAngle = 0.0
-
-
-class NanomaxStepscanMay2017(PtyScan):
+
+@register()
+class NanomaxStepscanMay2017(NanomaxBase):
"""
Loads Nanomax step scan data in the format of May 2017.
+
+ Defaults:
+
+ [name]
+ default = NanomaxStepscanMay2017
+ type = str
+ help =
+ doc =
+
+ [hdfPath]
+ default = 'entry_0000/measurement/Pilatus/data'
+ type = str
+ help = Path to image array within detector hdf5 file
+ doc =
+
+ [xMotorFlipped]
+ default = False
+ type = bool
+ help = Flip detector x positions
+ doc =
+
+ [yMotorFlipped]
+ default = False
+ type = bool
+ help = Flip detector y positions
+ doc =
+
+ [xMotorAngle]
+ default = 0.0
+ type = float
+ help = Angle of the motor x axis relative to the lab x axis
+ doc = Use this if the stage is mounted at an angle around the y axis, the sign doesn't matter as a cos factor is added.
+
"""
def __init__(self, pars=None, **kwargs):
-
- p = PtyScan.DEFAULT.copy(depth=10)
- p.recipe = RECIPE.copy()
- p.update(pars, in_place_depth=10)
- super(NanomaxStepscanMay2017, self).__init__(p)
+ self.p = self.DEFAULT.copy(99)
+ self.p.update(pars)
+ super(NanomaxStepscanMay2017, self).__init__(self.p)
def load_positions(self):
- fileName = self.info.recipe.dataPath + self.info.recipe.datafile
- entry = 'entry%d' % self.info.recipe.scannr
+ fileName = self.p.dataPath + self.p.datafile
+ entry = 'entry%d' % self.p.scannr
xFlipper, yFlipper = 1, 1
- if self.info.recipe.xMotorFlipped:
+ if self.p.xMotorFlipped:
xFlipper = -1
logger.warning("note: x motor is specified as flipped")
- if self.info.recipe.yMotorFlipped:
+ if self.p.yMotorFlipped:
yFlipper = -1
logger.warning("note: y motor is specified as flipped")
# if the x axis is tilted, take that into account.
- xCosFactor = np.cos(self.info.recipe.xMotorAngle / 180.0 * np.pi)
+ xCosFactor = np.cos(self.p.xMotorAngle / 180.0 * np.pi)
logger.info(
"x motor angle results in multiplication by %.2f" % xCosFactor)
@@ -149,16 +208,16 @@ def load_positions(self):
def load(self, indices):
raw, weights, positions = {}, {}, {}
- scannr = self.info.recipe.scannr
- path = self.info.recipe.pilatusPath
- filepattern = self.info.recipe.pilatusPattern
+ scannr = self.p.scannr
+ path = self.p.pilatusPath
+ filepattern = self.p.pilatusPattern
if not (path[-1] == '/'):
path += '/'
data = []
for im in range(self.info.positions_scan.shape[0]):
with h5py.File(path + filepattern % (scannr, im), 'r') as hf:
- dataset = hf.get(self.info.recipe.hdfPath)
+ dataset = hf.get(self.p.hdfPath)
data.append(np.array(dataset)[0])
# pick out the requested indices
@@ -173,23 +232,23 @@ def load_weight(self):
frame.
"""
- scannr = self.info.recipe.scannr
- path = self.info.recipe.pilatusPath
- pattern = self.info.recipe.pilatusPattern
+ scannr = self.p.scannr
+ path = self.p.pilatusPath
+ pattern = self.p.pilatusPattern
if not (path[-1] == '/'):
path += '/'
- filename = self.info.recipe.dataPath + self.info.recipe.datafile
+ filename = self.p.dataPath + self.p.datafile
with h5py.File(path + pattern % (scannr, 0), 'r') as hf:
- data = hf.get(self.info.recipe.hdfPath)
+ data = hf.get(self.p.hdfPath)
shape = np.asarray(data[0]).shape
mask = np.ones(shape)
mask[np.where(data[0] == -2)] = 0
logger.info("took account of the pilatus mask, %u x %u, sum %u" %
(mask.shape + (np.sum(mask),)))
- if self.info.recipe.maskfile:
- with h5py.File(self.info.recipe.maskfile, 'r') as hf:
+ if self.p.maskfile:
+ with h5py.File(self.p.maskfile, 'r') as hf:
mask2 = np.array(hf.get('mask'))
logger.info("loaded additional mask, %u x %u, sum %u" %
(mask2.shape + (np.sum(mask2),)))
@@ -199,36 +258,39 @@ def load_weight(self):
return mask
-# new recipe for this one too
-RECIPE = u.Param()
-RECIPE.dataPath = None
-RECIPE.datafile = None
-RECIPE.maskfile = None
-RECIPE.detFilePath = None
-RECIPE.detFilePattern = None
-RECIPE.detNormalizationFilePattern = None
-RECIPE.detNormalizationIndices = None
-RECIPE.hdfPath = 'entry_0000/measurement/Pilatus/data'
-RECIPE.scannr = None
-RECIPE.xMotorFlipped = None
-RECIPE.yMotorFlipped = None
-RECIPE.xMotorAngle = 0.0
-
-
-class NanomaxFlyscanJune2017(PtyScan):
+
+@register()
+class NanomaxFlyscanJune2017(NanomaxStepscanMay2017):
"""
Loads Nanomax fly scan data in the format of June 2017.
+
+ Defaults:
+
+ [name]
+ default = NanomaxFlyscanJune2017
+ type = str
+ help =
+
+ [detNormalizationFilePattern]
+ default = None
+ type = str
+ help = Format string for detector file containing data over which to normalize
+
+ [detNormalizationIndices]
+ default = None
+ type = str
+ help = Indices over which to normalize
+
"""
def __init__(self, pars=None, **kwargs):
- p = PtyScan.DEFAULT.copy(depth=10)
- p.recipe = RECIPE.copy()
- p.update(pars, in_place_depth=10)
- super(NanomaxFlyscanJune2017, self).__init__(p)
+ self.p = self.DEFAULT.copy(99)
+ self.p.update(pars)
+ super(NanomaxFlyscanJune2017, self).__init__(self.p)
def load_positions(self):
- fileName = self.info.recipe.dataPath + self.info.recipe.datafile
- entry = 'entry%d' % self.info.recipe.scannr
+ fileName = self.p.dataPath + self.p.datafile
+ entry = 'entry%d' % self.p.scannr
x, y = None, None
with h5py.File(fileName, 'r') as hf:
@@ -250,15 +312,15 @@ def load_positions(self):
raise Exception('Something''s wrong with the positions')
y = np.repeat(yall, Nx)
- if self.info.recipe.xMotorFlipped:
+ if self.p.xMotorFlipped:
x *= -1
logger.warning("note: x motor is specified as flipped")
- if self.info.recipe.yMotorFlipped:
+ if self.p.yMotorFlipped:
y *= -1
logger.warning("note: y motor is specified as flipped")
# if the x axis is tilted, take that into account.
- xCosFactor = np.cos(self.info.recipe.xMotorAngle / 180.0 * np.pi)
+ xCosFactor = np.cos(self.p.xMotorAngle / 180.0 * np.pi)
x *= xCosFactor
logger.info(
"x motor angle results in multiplication by %.2f" % xCosFactor)
@@ -269,11 +331,11 @@ def load_positions(self):
def load(self, indices):
raw, weights, positions = {}, {}, {}
- scannr = self.info.recipe.scannr
- path = self.info.recipe.detFilePath
- pattern = self.info.recipe.detFilePattern
- normfile = self.info.recipe.detNormalizationFilePattern
- normind = self.info.recipe.detNormalizationIndices
+ scannr = self.p.scannr
+ path = self.p.detFilePath
+ pattern = self.p.detFilePattern
+ normfile = self.p.detNormalizationFilePattern
+ normind = self.p.detNormalizationIndices
# read the entire dataset
done = False
@@ -283,7 +345,7 @@ def load(self, indices):
try:
with h5py.File(path + pattern % (scannr, line), 'r') as hf:
logger.info('loading data: ' + pattern % (scannr, line))
- dataset = hf.get(self.info.recipe.hdfPath)
+ dataset = hf.get(self.p.hdfPath)
linedata = np.array(dataset)
if normfile:
dtype = linedata.dtype
@@ -292,7 +354,7 @@ def load(self, indices):
logger.info('loading normalization data: ' +
normfile % (scannr, line))
dataset = hf.get(
- self.info.recipe.detNormalizationHdfPath)
+ self.p.detNormalizationHdfPath)
normdata = np.array(dataset)
if not normind:
shape = linedata[0].shape
@@ -326,23 +388,23 @@ def load_weight(self):
frame.
"""
- scannr = self.info.recipe.scannr
- path = self.info.recipe.detFilePath
- pattern = self.info.recipe.detFilePattern
+ scannr = self.p.scannr
+ path = self.p.detFilePath
+ pattern = self.p.detFilePattern
if not (path[-1] == '/'):
path += '/'
- filename = self.info.recipe.dataPath + self.info.recipe.datafile
+ filename = self.p.dataPath + self.p.datafile
with h5py.File(path + pattern % (scannr, 0), 'r') as hf:
- data = hf.get(self.info.recipe.hdfPath)
+ data = hf.get(self.p.hdfPath)
shape = np.asarray(data[0]).shape
mask = np.ones(shape)
mask[np.where(data[0] == -2)] = 0
logger.info("took account of the pilatus mask, %u x %u, sum %u" %
(mask.shape + (np.sum(mask),)))
- if self.info.recipe.maskfile:
- with h5py.File(self.info.recipe.maskfile, 'r') as hf:
+ if self.p.maskfile:
+ with h5py.File(self.p.maskfile, 'r') as hf:
mask2 = np.array(hf.get('mask'))
logger.info("loaded additional mask, %u x %u, sum %u" %
(mask2.shape + (np.sum(mask2),)))
diff --git a/ptypy/experiment/nanomax3d.py b/ptypy/experiment/nanomax3d.py
new file mode 100644
index 000000000..0223fb6b3
--- /dev/null
+++ b/ptypy/experiment/nanomax3d.py
@@ -0,0 +1,179 @@
+"""
+This module provides simulated 3D Bragg data.
+"""
+
+import ptypy
+from ptypy.core.data import PtyScan
+import ptypy.utils as u
+from ptypy import defaults_tree
+
+import h5py
+import numpy as np
+import time
+
+logger = u.verbose.logger
+
+@defaults_tree.parse_doc('scandata.NanomaxBraggJune2017')
+class NanomaxBraggJune2017(PtyScan):
+ """
+ Reads an early Bragg 3d ptycho format from Nanomax, multiple fly
+ scans are run with rocking angle steps in between.
+
+ Defaults:
+
+ [name]
+ default = NanomaxBraggJune2017
+ type = str
+ help = PtyScan subclass identifier
+
+ [scans]
+ default = []
+ type = list
+ help = List of scan numbers
+
+ [theta_bragg]
+ default = 0.0
+ type = float
+ help = Bragg angle
+
+ [datapath]
+ default = None
+ type = str
+ help = Path to folder containing the Sardana master file
+ doc =
+
+ [datafile]
+ default = None
+ type = str
+ help = Sardana master file
+ doc =
+
+ [maskfile]
+ default = None
+ type = str
+ help = Arbitrary mask file
+ doc = Hdf5 file containing an array called 'mask' at the root level.
+
+ [detfilepattern]
+ default = None
+ type = str
+ help = Format string for detector image files
+ doc = A format string with two integer fields, the first holds the scan number while the second holds the image number.
+
+ """
+
+ def __init__(self, pars=None, **kwargs):
+ self.p = self.DEFAULT.copy(99)
+ self.p.update(pars)
+ self.p.update(kwargs)
+ super(self.__class__, self).__init__(self.p)
+
+ def load_common(self):
+ """
+ We have to communicate the number of rocking positions that the
+ model should expect, otherwise it never knows when there is data
+ for a complete POD. We also have to specify a single number for
+ the rocking step size.
+ """
+ print '*** load_common'
+ angles = []
+ for scannr in self.p.scans:
+ with h5py.File(self.p.datapath + self.p.datafile) as fp:
+ angles.append(float(fp.get('entry%d'%scannr + '/measurement/gonphi').value))
+ print angles
+ step = np.mean(np.diff(sorted(angles)))
+ print step
+ return {
+ 'rocking_step': step,
+ 'n_rocking_positions': len(angles),
+ 'theta_bragg': self.p.theta_bragg,
+ }
+
+ def load_positions(self):
+ """
+ For the 3d Bragg model, load_positions returns N-by-4 positions,
+ (angle, x, z, y). The angle can be relative or absolute, the
+ model doesn't care, but it does have to be uniformly spaced for
+ the analysis to make any sense.
+
+ Let's load the positions (and images) in the order they were
+ acquired: x fastest, then y, then scan number in the order
+ provided.
+ """
+ print '*** load_positions'
+
+ # first, calculate mean x and y positions for all scans, they
+ # have to match anyway so may as well average them.
+ x, y = [], []
+ for scan in self.p.scans:
+ with h5py.File(self.p.datapath + self.p.datafile) as fp:
+ if scan == self.p.scans[0]:
+ # first pass: find out how many zeros to remove from the samx buffer
+ entry = 'entry%d' % scan
+ tmp = np.array(fp[entry + '/measurement/AdLinkAI_buff'])
+ for i in range(tmp.shape[1]):
+ if np.allclose(tmp[:, i:], 0.0):
+ cutoff = i
+ print 'using %i samx values' % cutoff
+ break
+ x.append(np.array(fp[entry + '/measurement/AdLinkAI_buff'][:, :cutoff]))
+ y.append(np.array(fp[entry + '/measurement/samy']))
+ x_mean = -np.mean(x, axis=0) * 1e-6
+ y_mean = np.mean(y, axis=0) * 1e-6
+ Nx = x_mean.shape[1]
+ Ny = x_mean.shape[0]
+ Nxy = Nx * Ny
+ assert Ny == y_mean.shape[0]
+ print 'Scan positions are Nx=%d, Ny=%d, Nxy=%d' % (Nx, Ny, Nxy)
+
+ # save these numbers for the diff image loader
+ self.Nx = Nx
+ self.Ny = Ny
+ self.Nxy = Nxy
+
+ # then, go through the scans and fill in a N-by-4 array of positions per diff pattern
+ x = np.empty(len(self.p.scans) * Nx * Ny, dtype=float)
+ y = np.copy(x)
+ theta = np.copy(x)
+ z = np.zeros_like(x)
+ for i in range(len(self.p.scans)):
+ with h5py.File(self.p.datapath + self.p.datafile) as fp:
+ entry = 'entry%d' % self.p.scans[i]
+ th = fp[entry + '/measurement/gonphi'].value[0]
+ x[i*Nxy:(i+1)*Nxy] = x_mean.flatten()
+ y[i*Nxy:(i+1)*Nxy] = np.repeat(y_mean, Nx)
+ theta[i*Nxy:(i+1)*Nxy] = th
+
+ # adapt to our geometry
+ tmp = z.copy()
+ z = x # our x motor goes toward positive z on the sample
+ y = y # our y motor goes toward more positive y on the sample
+ x = tmp # this is the unimportant direction
+
+ return np.vstack([theta, x, z, y]).T
+
+ def load(self, indices):
+ """
+ This function returns diffraction image indexed from top left as
+ viewed along the beam, i e they have (-q1, q2) indexing. PtyScan
+ can always flip/rotate images.
+ """
+ print '*** load'
+ raw, positions, weights = {}, {}, {}
+
+ for ind in indices:
+ scan = self.p.scans[ind // self.Nxy] # which scan to look in
+ file = (ind % self.Nxy) // self.Nx # which line of this scan
+ frame = (ind % self.Nxy) % self.Nx # which frame of this line
+ with h5py.File(self.p.datapath + self.p.detfilepattern % (scan, file)) as fp:
+ data = np.array(fp['entry_0000/measurement/Merlin/data'][frame])
+ raw[ind] = data
+
+ return raw, positions, weights
+
+ def load_weight(self):
+ print '*** load_weight'
+ with h5py.File(self.p.maskfile) as fp:
+ mask = np.array(fp['mask'])
+ return mask
+
diff --git a/ptypy/experiment/optiklabor.py b/ptypy/experiment/optiklabor.py
index 14f56156a..2e77a1fe6 100644
--- a/ptypy/experiment/optiklabor.py
+++ b/ptypy/experiment/optiklabor.py
@@ -4,53 +4,111 @@
@author: Bjeorn Enders
"""
-from scipy import ndimage as ndi
import numpy as np
-import os, time
+import time
import glob
-from .. import io
-import spec
-import sys
+from . import spec
from .. import utils as u
-#from pyE17 import io as io17
from ..core.data import PtyScan
+from . import register
logger = u.verbose.logger
-DEFAULT = u.Param()
-DEFAULT.base_path = '/data/CDI/opticslab_sxdm_2013/'
-DEFAULT.scan_number = 74 #35 # scan number
-DEFAULT.dark_number = 72
-#DEFAULT.scan_label = 'S%05d' % p.scan_number
-DEFAULT.exp_string='exp_time'
-DEFAULT.hdr_thresholds = [500,50000]
-DEFAULT.lam = 650e-9
-
-DEFAULT.energy = 1.2398e-9 /DEFAULT.lam
-DEFAULT.z = 0.158 # Distance from object to screen
-DEFAULT.psize_det = 24e-6 # Camera pixel size
-DEFAULT.center = 'auto'
-DEFAULT.orientation = (True,True,False)
-# IO
-DEFAULT.base_path = '/data/CDI/opticslab_sxdm_2013/'
-#DEFAULT.base_path = './'
-DEFAULT.scan_dir = 'ccdfli/S00000-00999/'
-#DEFAULT.scan_path = DEFAULT.base_path + 'raw/'
-#DEFAULT.log_file_pattern = '%(base_path)s' + '/spec/dat-files/spec_started_2014_07_28_2158.dat' # log file
-DEFAULT.log_file_pattern = '%(base_path)s' + 'spec/dat-files/spec_started_2013_11_21_1659.dat' # log file
-DEFAULT.data_dir_pattern = '%(base_path)s'+'%(scan_dir)s'+ 'S%(scan_number)05d/'
-DEFAULT.dark_dir_pattern = '%(base_path)s'+'%(scan_dir)s'+ 'S%(dark_number)05d/'
-
pp = u.Param()
pp.filename = './foo.ptyd'
pp.roi =None
pp.num_frames = 50
pp.save = 'extlink'
+
+@register()
class FliSpecScanMultexp(PtyScan):
+ """
+ Defaults:
+
+ [name]
+ default = FliSpecScanMultexp
+ type = str
+ help =
+
+ [base_path]
+ default = '/data/CDI/opticslab_sxdm_2013/'
+ type = str
+ help =
+
+ [scan_number]
+ default = 74
+ type = int
+ help =
+
+ [dark_number]
+ default = 72
+ type = int
+ help =
+
+ [exp_string]
+ default = 'exp_time'
+ type = str
+ help =
+
+ [hdr_thresholds]
+ default = [500,50000]
+ type = list
+ help =
+
+ [lam]
+ default = 650e-9
+ type = float
+ help =
+
+ [energy]
+ default = None
+
+ [z]
+ default = 0.158
+ type = float
+ help =
+
+ [psize_det]
+ default = 24e-6
+ type = float
+ help =
+
+ [center]
+ default = 'auto'
+
+ [orientation]
+ default = (True,True,False)
+
+ [base_path]
+ default = '/data/CDI/opticslab_sxdm_2013/'
+ type = str
+ help =
+
+ [scan_dir]
+ default = 'ccdfli/S00000-00999/'
+ type = str
+ help =
+
+ [log_file_pattern]
+ default = '%(base_path)sspec/dat-files/spec_started_2013_11_21_1659.dat'
+ type = str
+ help =
+
+ [data_dir_pattern]
+ default = '%(base_path)s%(scan_dir)sS%(scan_number)05d/'
+ type = str
+ help =
+
+ [dark_dir_pattern]
+ default = '%(base_path)s%(scan_dir)sS%(dark_number)05d/'
+ type = str
+ help =
+
+ """
def __init__(self,pars=None,**kwargs):
- p= DEFAULT.copy()
+ p = self.DEFAULT.copy()
if pars is not None:
p.update(pars)
#self.p = pars
diff --git a/ptypy/experiment/savu.py b/ptypy/experiment/savu.py
index e8bce1f6e..edcd5b27b 100644
--- a/ptypy/experiment/savu.py
+++ b/ptypy/experiment/savu.py
@@ -8,50 +8,60 @@
:license: GPLv2, see LICENSE for details.
"""
-import numpy as np
-import os
from .. import utils as u
-from .. import io
-from ..utils import parallel
from ..core.data import PtyScan
from ..utils.verbose import log
-from ..core.paths import Paths
-from ..core import DEFAULT_io as IO_par
-import h5py as h5
+from . import register
logger = u.verbose.logger
-# Recipe defaults
-SAVU = PtyScan.DEFAULT.copy()
-SAVU.mask = None
-SAVU.data = None
-SAVU.positions = None
+@register()
+class Savu(PtyScan):
+ """
+ Defaults:
+ [name]
+ default = 'Savu'
+ type = str
+ help =
-class Savu(PtyScan):
- DEFAULT = SAVU
+ [mask]
+ default = None
+ type = array
+ help =
+
+ [data]
+ default = None
+ type = array
+ help =
+
+ [positions]
+ default = None
+ type = array
+ help =
+
+ """
def __init__(self, pars=None, **kwargs):
"""
savu data preparation class.
"""
# Initialise parent class
- recipe_default = SAVU.copy()
- recipe_default.update(pars.recipe, in_place_depth=99)
- pars.recipe.update(recipe_default)
- super(Savu, self).__init__(pars, **kwargs)
+ p = self.DEFAULT.copy(99)
+ p.update(pars)
+ super(Savu, self).__init__(p, **kwargs)
log(4, u.verbose.report(self.info))
def load_weight(self):
if self.info.mask is not None:
- return self.info.recipe.mask.astype(float)
+ return self.info.mask.astype(float)
else:
log(2,'The mask was a None')
def load_positions(self):
- if self.info.recipe.positions is not None:
- return self.info.recipe.positions
+ if self.info.positions is not None:
+ return self.info.positions
else:
log(2,'The positions were None')
@@ -65,9 +75,9 @@ def load(self, indices):
raw = {}
pos = {}
weights = {}
- if self.info.recipe.data is not None:
+ if self.info.data is not None:
for j in indices:
- raw[j] = self.info.recipe.data[j]
+ raw[j] = self.info.data[j]
else:
log(2,'The data had None')
return raw, pos, weights
diff --git a/ptypy/experiment/spec.py b/ptypy/experiment/spec.py
index fee5c3933..615975cb7 100644
--- a/ptypy/experiment/spec.py
+++ b/ptypy/experiment/spec.py
@@ -14,6 +14,7 @@
global lastSpecInfo
lastSpecInfo = None
+
def verbose(n,s):
"""
This function should be replaced by the real verbose class after import.
diff --git a/ptypy/io/__init__.py b/ptypy/io/__init__.py
index 003b26186..f2cee42d7 100644
--- a/ptypy/io/__init__.py
+++ b/ptypy/io/__init__.py
@@ -7,12 +7,12 @@
:copyright: Copyright 2014 by the PTYPY team, see AUTHORS.
:license: GPLv2, see LICENSE for details.
"""
-#from plotting import *
-from h5rw import *
-from json_rw import *
-from image_read import image_read
-from edfIO import edfread
+from .h5rw import *
+from .json_rw import *
+from .image_read import image_read
+from .edfIO import edfread
from .. import __has_zmq__ as hzmq
-if hzmq: import interaction
+if hzmq:
+ import interaction
del hzmq
diff --git a/ptypy/io/edfIO.py b/ptypy/io/edfIO.py
index 71987df0e..ebbe586ef 100644
--- a/ptypy/io/edfIO.py
+++ b/ptypy/io/edfIO.py
@@ -10,8 +10,8 @@
writeData WHICH IS NOT YET IMPLEMENTED
"""
import numpy as np
-import os
import glob
+
from .. import utils
logger = utils.verbose.logger
diff --git a/ptypy/io/h5rw.py b/ptypy/io/h5rw.py
index 3856601ca..d7d6620c7 100644
--- a/ptypy/io/h5rw.py
+++ b/ptypy/io/h5rw.py
@@ -13,40 +13,48 @@
import time
import os
import glob
+from collections import OrderedDict
-# ok now also Param class native support
from ..utils import Param
from ..utils.verbose import logger
__all__ = ['h5write', 'h5append', 'h5read', 'h5info', 'h5options']
-h5options=dict(
-H5RW_VERSION = '0.1',
-H5PY_VERSION = h5py.version.version,
-#UNSUPPORTED = 'pickle'
-#UNSUPPORTED = 'ignore'
-UNSUPPORTED = 'fail',
-SLASH_ESCAPE = '_SLASH_')
+h5options = dict(
+ H5RW_VERSION='0.1',
+ H5PY_VERSION=h5py.version.version,
+ # UNSUPPORTED = 'pickle'
+ # UNSUPPORTED = 'ignore'
+ UNSUPPORTED='fail',
+ SLASH_ESCAPE='_SLASH_')
STR_CONVERT = [type]
+
def sdebug(f):
"""
debugging decorator for _store functions
"""
+
def newf(*args, **kwds):
print '{0:20} {1:20}'.format(f.func_name, args[2])
return f(*args, **kwds)
+
newf.__doc__ = f.__doc__
return newf
+
# Helper functions to load slices
class Str_to_Slice(object):
def __getitem__(self, x):
return x
+
def __call__(self, s):
return eval('self' + s)
+
+
str_to_slice = Str_to_Slice()
+
def _h5write(filename, mode, *args, **kwargs):
"""\
_h5write(filename, mode, {'var1'=..., 'var2'=..., ...})
@@ -79,7 +87,7 @@ def _h5write(filename, mode, *args, **kwargs):
# Update input dictionnary
if args:
- d = args[0].copy() # shallow copy
+ d = args[0].copy() # shallow copy
else:
d = {}
d.update(kwargs)
@@ -97,9 +105,9 @@ def check_id(id):
ids.append(id)
def pop_id(id):
- ids[:] = [x for x in ids if x!=id]
+ ids[:] = [x for x in ids if x != id]
- #@sdebug
+ # @sdebug
def _store_numpy(group, a, name, compress=True):
if compress:
dset = group.create_dataset(name, data=a, compression='gzip')
@@ -108,20 +116,19 @@ def _store_numpy(group, a, name, compress=True):
dset.attrs['type'] = 'array'
return dset
- #@sdebug
+ # @sdebug
def _store_string(group, s, name):
dset = group.create_dataset(name, data=np.asarray(s), dtype=dt)
dset.attrs['type'] = 'string'
return dset
- #@sdebug
+ # @sdebug
def _store_unicode(group, s, name):
dset = group.create_dataset(name, data=np.asarray(s.encode('utf8')), dtype=dt)
dset.attrs['type'] = 'unicode'
return dset
-
- #@sdebug
+ # @sdebug
def _store_list(group, l, name):
check_id(id(l))
arrayOK = len(set([type(x) for x in l])) == 1
@@ -132,36 +139,54 @@ def _store_list(group, l, name):
if la.dtype.type is np.string_:
arrayOK = False
else:
- dset = _store_numpy(group,la,name)
+ dset = _store_numpy(group, la, name)
dset.attrs['type'] = 'arraylist'
except:
arrayOK = False
if not arrayOK:
# inhomogenous list. Store all elements individually
dset = group.create_group(name)
- for i,v in enumerate(l):
+ for i, v in enumerate(l):
_store(dset, v, '%05d' % i)
dset.attrs['type'] = 'list'
pop_id(id(l))
return dset
- #@sdebug
+ # @sdebug
def _store_tuple(group, t, name):
dset = _store_list(group, list(t), name)
dset_type = dset.attrs['type']
dset.attrs['type'] = 'arraytuple' if dset_type == 'arraylist' else 'tuple'
return dset
- #@sdebug
+ # @sdebug
def _store_dict(group, d, name):
check_id(id(d))
if any([type(k) not in [str, unicode] for k in d.keys()]):
raise RuntimeError('Only dictionaries with string keys are supported.')
dset = group.create_group(name)
dset.attrs['type'] = 'dict'
- for k,v in d.iteritems():
+ for k, v in d.iteritems():
+ if k.find('/') > -1:
+ k = k.replace('/', h5options['SLASH_ESCAPE'])
+ ndset = _store(dset, v, k)
+ if ndset is not None:
+ ndset.attrs['escaped'] = '1'
+ else:
+ _store(dset, v, k)
+ pop_id(id(d))
+ return dset
+
+ # @sdebug
+ def _store_ordered_dict(group, d, name):
+ check_id(id(d))
+ if any([type(k) not in [str, unicode] for k in d.keys()]):
+ raise RuntimeError('Only dictionaries with string keys are supported.')
+ dset = group.create_group(name)
+ dset.attrs['type'] = 'ordered_dict'
+ for k, v in d.iteritems():
if k.find('/') > -1:
- k = k.replace('/',h5options['SLASH_ESCAPE'])
+ k = k.replace('/', h5options['SLASH_ESCAPE'])
ndset = _store(dset, v, k)
if ndset is not None:
ndset.attrs['escaped'] = '1'
@@ -170,10 +195,10 @@ def _store_dict(group, d, name):
pop_id(id(d))
return dset
- #@sdebug
+ # @sdebug
def _store_param(group, d, name):
# call _to_dict method
- dset = _store_dict(group,d._to_dict(),name)
+ dset = _store_dict(group, d._to_dict(), name)
dset.attrs['type'] = 'param'
return dset
@@ -181,66 +206,77 @@ def _store_dict_new(group, d, name):
check_id(id(d))
dset = group.create_group(name)
dset.attrs['type'] = 'dict'
- for i,kv in enumerate(d.iteritems()):
+ for i, kv in enumerate(d.iteritems()):
_store(dset, kv, '%05d' % i)
pop_id(id(d))
return dset
- #@sdebug
+ # @sdebug
def _store_None(group, a, name):
- dset = group.create_dataset(name, data = np.zeros((1,)))
+ dset = group.create_dataset(name, data=np.zeros((1,)))
dset.attrs['type'] = 'None'
return dset
- #@sdebug
+ # @sdebug
def _store_pickle(group, a, name):
apic = cPickle.dumps(a)
dset = group.create_dataset(name, data=np.asarray(apic), dtype=dt)
dset.attrs['type'] = 'pickle'
return dset
- #@sdebug
+ # @sdebug
+ def _store_numpy_record_array(group, a, name):
+ apic = cPickle.dumps(a)
+ dset = group.create_dataset(name, data=np.asarray(apic), dtype=h5py.special_dtype(vlen=unicode))
+ dset.attrs['type'] = 'record_array'
+ return dset
+
+ # @sdebug
def _store(group, a, name):
if type(a) is str:
- dset = _store_string(group,a,name)
+ dset = _store_string(group, a, name)
elif type(a) is unicode:
- dset = _store_unicode(group,a,name)
+ dset = _store_unicode(group, a, name)
elif type(a) is dict:
- dset = _store_dict(group,a,name)
+ dset = _store_dict(group, a, name)
+ elif type(a) is OrderedDict:
+ dset = _store_ordered_dict(group, a, name)
elif type(a) is Param:
- dset = _store_param(group,a,name)
+ dset = _store_param(group, a, name)
elif type(a) is list:
- dset = _store_list(group,a,name)
+ dset = _store_list(group, a, name)
elif type(a) is tuple:
- dset = _store_tuple(group,a,name)
+ dset = _store_tuple(group, a, name)
elif type(a) is np.ndarray:
- dset = _store_numpy(group,a,name)
+ dset = _store_numpy(group, a, name)
+ elif isinstance(a, (np.record, np.recarray)): # h5py can't handle this.
+ dset = _store_numpy_record_array(group, a, name)
elif np.isscalar(a):
- dset = _store_numpy(group,np.asarray(a),name, compress=False)
+ dset = _store_numpy(group, np.asarray(a), name, compress=False)
dset.attrs['type'] = 'scalar'
elif a is None:
dset = _store_None(group, a, name)
elif type(a) in STR_CONVERT:
- dset = _store_string(group,str(a),name)
+ dset = _store_string(group, str(a), name)
else:
- if h5options['UNSUPPORTED']=='fail':
+ if h5options['UNSUPPORTED'] == 'fail':
raise RuntimeError('Unsupported data type : %s' % type(a))
- elif h5options['UNSUPPORTED']=='pickle':
- dset = _store_pickle(group,a,name)
+ elif h5options['UNSUPPORTED'] == 'pickle':
+ dset = _store_pickle(group, a, name)
else:
dset = None
return dset
# generate all parent directories
- base= os.path.split(filename)[0]
+ base = os.path.split(filename)[0]
if not os.path.exists(base):
os.makedirs(base)
# Open the file and save everything
- with h5py.File(filename,mode) as f:
+ with h5py.File(filename, mode) as f:
f.attrs['h5rw_version'] = h5options['H5RW_VERSION']
f.attrs['ctime'] = ctime
f.attrs['mtime'] = mtime
- for k,v in d.iteritems():
+ for k, v in d.iteritems():
# if the first group key exists, make an overwrite, i.e. delete group `k`
# Otherwise it was not possible in this framework to write
# into an existing file, where a key is already occupied,
@@ -248,9 +284,10 @@ def _store(group, a, name):
# the pure 'appending' nature of h5append
if k in f.keys():
del f[k]
- _store(f,v,k)
+ _store(f, v, k)
return
+
def h5write(filename, *args, **kwargs):
"""\
h5write(filename, {'var1'=..., 'var2'=..., ...})
@@ -278,6 +315,7 @@ def h5write(filename, *args, **kwargs):
_h5write(filename, 'w', *args, **kwargs)
return
+
def h5append(filename, *args, **kwargs):
"""\
h5append(filename, {'var1'=..., 'var2'=..., ...})
@@ -305,6 +343,7 @@ def h5append(filename, *args, **kwargs):
_h5write(filename, 'a', *args, **kwargs)
return
+
def h5read(filename, *args, **kwargs):
"""\
h5read(filename)
@@ -334,8 +373,8 @@ def h5read(filename, *args, **kwargs):
"""
doglob = kwargs.pop('doglob', None)
- depth = kwargs.pop('depth',None)
- depth = 99 if depth is None else depth+1
+ depth = kwargs.pop('depth', None)
+ depth = 99 if depth is None else depth + 1
# Used if we read a list of files
fnames = []
@@ -374,25 +413,25 @@ def _load_dict_new(dset):
d[dk] = dv
return d
- def _load_dict(dset,depth):
+ def _load_dict(dset, depth):
d = {}
- if depth>0:
- for k,v in dset.items():
+ if depth > 0:
+ for k, v in dset.items():
if v.attrs.get('escaped', None) is not None:
k = k.replace(h5options['SLASH_ESCAPE'], '/')
- d[k] = _load(v,depth-1)
+ d[str(k)] = _load(v, depth - 1)
return d
- def _load_list(dset,depth):
+ def _load_list(dset, depth):
l = []
- if depth>0:
+ if depth > 0:
keys = dset.keys()
keys.sort()
for k in keys:
- l.append(_load(dset[k],depth-1))
+ l.append(_load(dset[k], depth - 1))
return l
- def _load_numpy(dset,sl=None):
+ def _load_numpy(dset, sl=None):
if sl is not None:
return dset[sl]
else:
@@ -404,40 +443,56 @@ def _load_scalar(dset):
except:
return dset[...]
+ def _load_ordered_dict(dset, depth):
+ d = OrderedDict()
+ if depth > 0:
+ for k, v in dset.items():
+ if v.attrs.get('escaped', None) is not None:
+ k = k.replace(h5options['SLASH_ESCAPE'], '/')
+ d[k] = _load(v, depth - 1)
+ return d
+
+ def _load_numpy_record_array(dset):
+ return cPickle.loads(dset.value.encode('utf-8'))
+
def _load_str(dset):
- return dset.value
+ return str(dset.value)
def _load_unicode(dset):
- return dset.value.decode('utf8')
+ return dset.value.decode('utf-8')
def _load_pickle(dset):
- return cPickle.loads(dset[...])
+ return cPickle.loads(dset.value)
- def _load(dset, depth,sl=None):
- dset_type = dset.attrs.get('type',None)
+ def _load(dset, depth, sl=None):
+ dset_type = dset.attrs.get('type', None)
# Treat groups as dicts
if (dset_type is None) and (type(dset) is h5py.Group):
dset_type = 'dict'
- if dset_type == 'dict' or dset_type =='param':
+ if dset_type == 'dict' or dset_type == 'param':
if sl is not None:
raise RuntimeError('Dictionaries or ptypy.Param do not support slicing')
- val = _load_dict(dset,depth)
- if dset_type =='param':
+ val = _load_dict(dset, depth)
+ if dset_type == 'param':
val = Param(val)
elif dset_type == 'list':
- val = _load_list(dset,depth)
+ val = _load_list(dset, depth)
if sl is not None:
val = val[sl]
+ elif dset_type == 'ordered_dict':
+ val = _load_ordered_dict(dset, depth)
+ elif dset_type == 'record_array':
+ val = _load_numpy_record_array(dset)
elif dset_type == 'array':
- val = _load_numpy(dset,sl)
+ val = _load_numpy(dset, sl)
elif dset_type == 'arraylist':
val = [x for x in _load_numpy(dset)]
if sl is not None:
val = val[sl]
elif dset_type == 'tuple':
- val = tuple(_load_list(dset,depth))
+ val = tuple(_load_list(dset, depth))
if sl is not None:
val = val[sl]
elif dset_type == 'arraytuple':
@@ -449,22 +504,22 @@ def _load(dset, depth,sl=None):
if sl is not None:
val = val[sl]
elif dset_type == 'unicode':
- val = _load_str(dset)
+ val = _load_unicode(dset)
if sl is not None:
val = val[sl]
elif dset_type == 'scalar':
val = _load_scalar(dset)
elif dset_type == 'None':
# 24.4.13 : B.E. commented due to hr5read not being able to return None type
- #try:
+ # try:
# val = _load_numpy(dset)
- #except:
+ # except:
# val = None
val = None
elif dset_type == 'pickle':
val = _load_pickle(dset)
elif dset_type is None:
- val = _load_numpy(dset,sl)
+ val = _load_numpy(dset, sl)
else:
raise RuntimeError('Unsupported data type : %s' % dset_type)
return val
@@ -475,7 +530,7 @@ def _load(dset, depth,sl=None):
slice = kwargs.get('slice', None)
try:
- f = h5py.File(filename,'r')
+ f = h5py.File(filename, 'r')
except:
print 'Error when opening file %s.' % filename
raise
@@ -507,7 +562,7 @@ def _load(dset, depth,sl=None):
else:
# detect slicing
if '[' in k:
- k,slice_string = k.split('[')
+ k, slice_string = k.split('[')
slice_string = slice_string.split(']')[0]
sl = str_to_slice('[' + slice_string + ']')
else:
@@ -520,133 +575,135 @@ def _load(dset, depth,sl=None):
gr = f[glist[0]]
for gname in glist[1:-1]:
gr = gr[gname]
- outdict[k] = _load(gr[k],depth,sl=sl)
+ outdict[k] = _load(gr[k], depth, sl=sl)
else:
- outdict[k] = _load(f[k],depth,sl=sl)
+ outdict[k] = _load(f[k], depth, sl=sl)
return outdict
-
def h5info(filename, path='', output=None, depth=8):
"""\
h5info(filename)
Prints out a tree structure of given h5 file.
"""
- depth=8 if depth is None else depth
+ depth = 8 if depth is None else depth
indent = 4
filename = os.path.abspath(os.path.expanduser(filename))
- def _format_dict(d,key, dset, isParam=False):
- ss='Param' if isParam else 'dict'
- stringout = ' '*key[0] + ' * %s [%s %d]:\n' % (key[1],ss,len(dset))
- if d>0:
- for k,v in dset.items():
+ def _format_dict(d, key, dset, isParam=False):
+ ss = 'Param' if isParam else 'dict'
+ stringout = ' ' * key[0] + ' * %s [%s %d]:\n' % (key[1], ss, len(dset))
+ if d > 0:
+ for k, v in dset.items():
if v is not None and v.attrs.get('escaped', None) is not None:
k = k.replace(h5options['SLASH_ESCAPE'], '/')
- stringout += _format(d-1,(key[0]+indent, k), v)
+ stringout += _format(d - 1, (key[0] + indent, k), v)
return stringout
- def _format_list(d,key, dset):
- stringout = ' '*key[0] + ' * %s [list %d]:\n' % (key[1],len(dset))
- if d>0:
+ def _format_list(d, key, dset):
+ stringout = ' ' * key[0] + ' * %s [list %d]:\n' % (key[1], len(dset))
+ if d > 0:
keys = dset.keys()
keys.sort()
for k in keys:
- stringout += _format(d-1,(key[0]+indent, ''), dset[k])
+ stringout += _format(d - 1, (key[0] + indent, ''), dset[k])
return stringout
def _format_tuple(key, dset):
- stringout = ' '*key[0] + ' * %s [tuple]:\n' % key[1]
- if d>0:
+ stringout = ' ' * key[0] + ' * %s [tuple]:\n' % key[1]
+ if d > 0:
keys = dset.keys()
keys.sort()
for k in keys:
- stringout += _format(d-1,(key[0]+indent, ''), dset[k])
+ stringout += _format(d - 1, (key[0] + indent, ''), dset[k])
return stringout
def _format_arraytuple(key, dset):
a = dset[...]
if len(a) < 5:
- stringout = ' '*key[0] + ' * ' + key[1] + ' [tuple = ' + str(tuple(a.ravel())) + ']\n'
+ stringout = ' ' * key[0] + ' * ' + key[1] + ' [tuple = ' + str(tuple(a.ravel())) + ']\n'
else:
try:
float(a.ravel()[0])
- stringout = ' '*key[0] + ' * ' + key[1] + ' [tuple = (' + (('%f, '*4) % tuple(a.ravel()[:4]) ) + ' ...)]\n'
+ stringout = ' ' * key[0] + ' * ' + key[1] + ' [tuple = (' + (
+ ('%f, ' * 4) % tuple(a.ravel()[:4])) + ' ...)]\n'
except ValueError:
- stringout = ' '*key[0] + ' * ' + key[1] + ' [tuple = (%d x %s objects)]\n' % (a.size, str(a.dtype))
+ stringout = ' ' * key[0] + ' * ' + key[1] + ' [tuple = (%d x %s objects)]\n' % (a.size, str(a.dtype))
return stringout
def _format_arraylist(key, dset):
a = dset[...]
if len(a) < 5:
- stringout = ' '*key[0] + ' * ' + key[1] + ' [list = ' + str(a.tolist()) + ']\n'
+ stringout = ' ' * key[0] + ' * ' + key[1] + ' [list = ' + str(a.tolist()) + ']\n'
else:
try:
float(a.ravel()[0])
- stringout = ' '*key[0] + ' * ' + key[1] + ' [list = [' + (('%f, '*4) % tuple(a.ravel()[:4]) ) + ' ...]]\n'
+ stringout = ' ' * key[0] + ' * ' + key[1] + ' [list = [' + (
+ ('%f, ' * 4) % tuple(a.ravel()[:4])) + ' ...]]\n'
except ValueError:
- stringout = ' '*key[0] + ' * ' + key[1] + ' [list = [%d x %s objects]]\n' % (a.size, str(a.dtype))
+ stringout = ' ' * key[0] + ' * ' + key[1] + ' [list = [%d x %s objects]]\n' % (a.size, str(a.dtype))
return stringout
def _format_numpy(key, dset):
a = dset[...]
if len(a) < 5 and a.ndim == 1:
- stringout = ' '*key[0] + ' * ' + key[1] + ' [array = ' + str(a.ravel()) + ']\n'
+ stringout = ' ' * key[0] + ' * ' + key[1] + ' [array = ' + str(a.ravel()) + ']\n'
else:
- stringout = ' '*key[0] + ' * ' + key[1] + ' [' + (('%dx'*(a.ndim-1) + '%d') % a.shape) + ' ' + str(a.dtype) + ' array]\n'
+ stringout = ' ' * key[0] + ' * ' + key[1] + ' [' + (('%dx' * (a.ndim - 1) + '%d') % a.shape) + ' ' + str(
+ a.dtype) + ' array]\n'
return stringout
def _format_scalar(key, dset):
- stringout = ' '*key[0] + ' * ' + key[1] + ' [scalar = ' + str(dset[...]) + ']\n'
+ stringout = ' ' * key[0] + ' * ' + key[1] + ' [scalar = ' + str(dset[...]) + ']\n'
return stringout
def _format_str(key, dset):
s = str(dset[...])
if len(s) > 40:
s = s[:40] + '...'
- stringout = ' '*key[0] + ' * ' + key[1] + ' [string = "' + s + '"]\n'
+ stringout = ' ' * key[0] + ' * ' + key[1] + ' [string = "' + s + '"]\n'
return stringout
def _format_unicode(key, dset):
s = str(dset[...]).decode('utf8')
if len(s) > 40:
s = s[:40] + '...'
- stringout = ' '*key[0] + ' * ' + key[1] + ' [unicode = "' + s + '"]\n'
+ stringout = ' ' * key[0] + ' * ' + key[1] + ' [unicode = "' + s + '"]\n'
return stringout
def _format_pickle(key, dset):
- stringout = ' '*key[0] + ' * ' + key[1] + ' [pickled object]\n'
+ stringout = ' ' * key[0] + ' * ' + key[1] + ' [pickled object]\n'
return stringout
def _format_None(key, dset):
- stringout = ' '*key[0] + ' * ' + key[1] + ' [None]\n'
+ stringout = ' ' * key[0] + ' * ' + key[1] + ' [None]\n'
return stringout
def _format_unknown(key, dset):
- stringout = ' '*key[0] + ' * ' + key[1] + ' [unknown]\n'
+ stringout = ' ' * key[0] + ' * ' + key[1] + ' [unknown]\n'
return stringout
- def _format(d,key, dset):
- dset_type = 'None' if dset is None else dset.attrs.get('type',None)
+ def _format(d, key, dset):
+ dset_type = 'None' if dset is None else dset.attrs.get('type', None)
# Treat groups as dicts
if (dset_type is None) and (type(dset) is h5py.Group):
dset_type = 'dict'
if dset_type == 'dict':
- stringout = _format_dict(d,key, dset,False)
+ stringout = _format_dict(d, key, dset, False)
elif dset_type == 'param':
- stringout = _format_dict(d,key, dset,True)
+ stringout = _format_dict(d, key, dset, True)
elif dset_type == 'list':
- stringout = _format_list(d,key, dset)
+ stringout = _format_list(d, key, dset)
elif dset_type == 'array':
stringout = _format_numpy(key, dset)
elif dset_type == 'arraylist':
stringout = _format_arraylist(key, dset)
elif dset_type == 'tuple':
- stringout = _format_tuple(d,key, dset)
+ stringout = _format_tuple(d, key, dset)
elif dset_type == 'arraytuple':
stringout = _format_arraytuple(key, dset)
elif dset_type == 'string':
@@ -665,22 +722,21 @@ def _format(d,key, dset):
stringout = _format_unknown(key, dset)
return stringout
- with h5py.File(filename,'r') as f:
+ with h5py.File(filename, 'r') as f:
# h5rw_version = f.attrs.get('h5rw_version',None)
# if h5rw_version is None:
# print('Warning: this file does not seem to follow h5read format.')
ctime = f.attrs.get('ctime', None)
if ctime is not None:
print('File created : ' + ctime)
- if not path.endswith('/'): path+='/'
+ if not path.endswith('/'): path += '/'
key_list = f[path].keys()
outstring = ''
for k in key_list:
- outstring += _format(depth,(0,k),f[path+k])
+ outstring += _format(depth, (0, k), f[path + k])
print outstring
# return string if output variable passed as option
if output != None:
return outstring
-
diff --git a/ptypy/io/imageIO.py b/ptypy/io/imageIO.py
index 256ba59b2..014abb8c6 100644
--- a/ptypy/io/imageIO.py
+++ b/ptypy/io/imageIO.py
@@ -3,13 +3,11 @@
Wrapper over Python Imaging Library to read images (+ metadata) and write images.
"""
import numpy as np
-import os
import glob
-import PIL.Image as PIL
-from .. import utils
-from .. import verbose
+
# Tentative support for 12-bit tiff. Requires libtiff library
+# This trick might be deprecated if using pillow
try:
import libtiff12bit
except:
@@ -17,6 +15,7 @@
__all__ = ['imread']
+
def imread(filename, doglob=None, roi=None):
"""\
d,meta = imread(filename)
@@ -37,6 +36,7 @@ def imread(filename, doglob=None, roi=None):
returns a region of interest (applied on all files if gobbing or if filename is a list)
"""
+ import PIL.Image as PIL
if not isinstance(filename, str):
# We have a list
fnames = filename
@@ -53,31 +53,32 @@ def imread(filename, doglob=None, roi=None):
ldat = []
lmeta = []
for f in fnames:
- verbose(3, 'Reading "%s"' % f)
try:
im = PIL.open(f)
meta = readHeader(im)
dat = np.array(im)
except IOError:
# Maybe we have an unsupported tiff...
- if libtiff12bit is None: raise
+ if libtiff12bit is None:
+ raise
im = libtiff12bit.TIFF.open(f)
c = im.GetField('Compression')
b = im.GetField('BitsPerSample')
compname = libtiff12bit.define_to_name_map['Compression'][c]
- meta = {'compression': compname[12:].lower(), 'format':'TIFF', 'mode': ('I;%d' % b)}
+ meta = {'compression': compname[12:].lower(), 'format': 'TIFF', 'mode': ('I;%d' % b)}
dat = im.read_image()
meta['filename'] = f
lmeta.append(meta)
if roi is not None:
- ldat.append(dat[roi[0]:roi[1],roi[2]:roi[3],...].copy())
+ ldat.append(dat[roi[0]:roi[1], roi[2]:roi[3], ...].copy())
else:
ldat.append(dat)
if doglob:
dat = ldat
meta = lmeta
- return dat,meta
+ return dat, meta
+
def readHeader(im):
"""\
diff --git a/ptypy/io/image_read.py b/ptypy/io/image_read.py
index ac9bf1716..ecf8d40ae 100644
--- a/ptypy/io/image_read.py
+++ b/ptypy/io/image_read.py
@@ -1,17 +1,27 @@
import numpy as np
-from imageIO import imread
-from edfIO import edfread
-from rawIO import rawread
+import os
+import glob
+
+from .imageIO import imread
+from .edfIO import edfread
+from .rawIO import rawread
+from .h5rw import h5read
__all__ = ['image_read']
def image_read(filename, *args, **kwargs):
- """\
- Attempts to import image data from any file.
"""
- import os
- import glob
+ Try to load image data from any file.
+
+ Parameters
+ ----------
+ filename: str
+ Name of the file. If filename contains a wildcard, load all files that match the pattern.
+ Returns
+ -------
+ (image, metadict) or (list of images, list of metadicts)
+ """
use_imread = True
special_format = ['.raw', '.edf', '.h5']
@@ -37,13 +47,15 @@ def image_read(filename, *args, **kwargs):
elif ext == '.h5':
h5_image = h5read(filename, *args, **kwargs)
def look_for_ndarray(d):
- for k,v in d.iteritems():
- if type(v) is np.ndarray:
- return k,v
- elif type(v) is type({}):
+ for k, v in d.iteritems():
+ if isinstance(v, np.ndarray):
+ return k, v
+ elif isinstance(v, dict):
out = look_for_ndarray(v)
- if out is not None: return (k,) +out
- else: pass
+ if out is not None:
+ return (k,) + out
+ else:
+ pass
return None
if isinstance(h5_image, list):
h5_arrays = []
@@ -51,10 +63,10 @@ def look_for_ndarray(d):
for h5s in h5_image:
h5a = look_for_ndarray(h5s)
h5_arrays.append(h5a[-1])
- h5_metas.append({'filename':filename, 'path': '/'.join(h5a[0:-1])})
+ h5_metas.append({'filename': filename, 'path': '/'.join(h5a[0:-1])})
return h5_arrays, h5_metas
else:
h5_array = look_for_ndarray(h5_image)
- return h5_array[-1], {'filename':filename, 'path': '/'.join(h5_array[0:-1])}
+ return h5_array[-1], {'filename': filename, 'path': '/'.join(h5_array[0:-1])}
else:
- raise RuntimeError('Unkown file type')
+ raise RuntimeError('Unknown file type')
diff --git a/ptypy/io/interaction.py b/ptypy/io/interaction.py
index 6ce36260c..e2b19a436 100644
--- a/ptypy/io/interaction.py
+++ b/ptypy/io/interaction.py
@@ -21,36 +21,17 @@
import numpy as np
import re
import json
-from .. import utils as u
+
from ..utils.verbose import logger
+from .. import defaults_tree
__all__ = ['Server', 'Client']
DEBUG = lambda x: None
-#DEBUG = print
-
-#: Default parameters for networking
-network_DEFAULT = u.Param(
- address = "tcp://127.0.0.1", # Default address for primary connection
- port = 5560, # Default port for primary connection
- connections = 10 # Port range for secondary connections
-)
-
-#: Default parameters for the server
-Server_DEFAULT = u.Param(network_DEFAULT,
- poll_timeout=10, # Network polling interval (in milliseconds!)
- pinginterval=2, # Interval to check pings (in seconds)
- pingtimeout=10 # Ping time out: client disconnected after this period (in seconds)
-)
-
-#: Default parameters for the client
-Client_DEFAULT = u.Param(network_DEFAULT,
- poll_timeout=100, # Network polling interval (in milliseconds!)
- pinginterval=1, # Interval to check pings (in seconds)
- connection_timeout=3600000., # Timeout for dead server (in milliseconds!)
-)
+# DEBUG = print
+
def ID_generator(size=6, chars=string.ascii_uppercase + string.digits):
"""\
Generate a random ID string made of capital letters and digits.
@@ -76,6 +57,7 @@ class NumpyEncoder(json.JSONEncoder):
Custom JSON Encoder class that take out numpy arrays from a structure
and replace them with a code string.
"""
+
def encode(self, obj):
# Prepare the array list
self.npy_arrays = []
@@ -98,6 +80,7 @@ def default(self, obj):
return json.JSONEncoder.default(self, obj)
+
NE = NumpyEncoder()
# This is the string to match against when decoding
@@ -116,7 +99,7 @@ def numpy_replace(obj, arraylist):
return obj
elif isinstance(obj, dict):
newobj = {}
- for k,v in obj.iteritems():
+ for k, v in obj.iteritems():
newobj[k] = numpy_replace(v, arraylist)
return newobj
elif isinstance(obj, list):
@@ -177,13 +160,60 @@ def numpy_zmq_recv(in_socket):
return message
+@defaults_tree.parse_doc('io.interaction.server')
class Server(object):
- """\
- Main server class.
"""
+ Main ZMQ server class.
+
+ Defaults:
+
+ [active]
+ default = True
+ help = Activation switch
+ doc = Set to ``False`` for no ZeroMQ interaction server
+ type = bool
+
+ [address]
+ default = 'tcp://127.0.0.1'
+ help = The address the server is listening to.
+ doc = Wenn running ptypy on a remote server, it is the servers network address.
+ type = str
+ userlevel = 2
+
+ [port]
+ default = 5560
+ help = The port the server is listening to.
+ doc = Make sure to pick an unused port with a few unused ports close to it.
+ type = int
+ userlevel = 2
+
+ [connections]
+ default = 10
+ help = Number of concurrent connections on the server
+ doc = A range ``[port : port+connections]`` of ports adjacent :py:data:`~.io.interaction.port`
+ will be opened on demand for connecting clients.
+ type = int
+ userlevel = 2
+
+ [poll_timeout]
+ default = 10.0
+ type = float
+ help = Network polling interval
+ doc = Network polling interval, in milliseconds.
+
+ [pinginterval]
+ default = 2
+ type = float
+ help = Interval to check pings
+ doc = Interval with which to check pings, in seconds.
+
+ [pingtimeout]
+ default = 10
+ type = float
+ help = Ping time out
+ doc = Ping time out: client disconnected after this period, in seconds.
- #: Default parameters, see also :py:data:`.io.interaction`
- DEFAULT = Server_DEFAULT
+ """
def __init__(self, pars=None, **kwargs):
"""
@@ -218,14 +248,14 @@ def __init__(self, pars=None, **kwargs):
#################################
# Initialize all parameters
#################################
- p = u.Param(self.DEFAULT)
+
+ p = self.DEFAULT
p.update(pars)
p.update(kwargs)
self.p = p
-
# sanity check for port range:
- #if str(p.port_range)==p.port_range:
+ # if str(p.port_range)==p.port_range:
# from ptypy.utils import str2range
# p.port_range = str2range(p.port_range)
@@ -260,7 +290,7 @@ def __init__(self, pars=None, **kwargs):
self._activated = False
# Bind command names to methods
- self.cmds = {'CONNECT':self._cmd_connect, # Initial connection from client
+ self.cmds = {'CONNECT': self._cmd_connect, # Initial connection from client
'DISCONNECT': self._cmd_disconnect, # Disconnect from client
'DO': self._cmd_queue_do, # Execute a command (synchronous)
'GET': self._cmd_queue_get, # Send an object to the client (synchronous)
@@ -312,7 +342,7 @@ def send_warning(self, warning_message):
for ID in self.names.keys():
self.queue.put({'ID': ID, 'cmd': 'WARN', 'ticket': 'WARN', 'str': warning_message})
self._need_process = True
- return {'status':'ok'}
+ return {'status': 'ok'}
def send_error(self, error_message):
"""
@@ -334,8 +364,8 @@ def _run(self):
self.context = zmq.Context()
self.in_socket = self.context.socket(zmq.REP)
success = False
- for pp in range(0,200,20):
- port = self.port+pp
+ for pp in range(0, 200, 20):
+ port = self.port + pp
fulladdress = self.address + ':' + str(port)
try:
self.in_socket.bind(fulladdress)
@@ -472,7 +502,7 @@ def _cmd_disconnect(self, ID, args):
# Place this back in the pool
self.ID_pool.append(IDtuple)
- return{'status': 'ok'}
+ return {'status': 'ok'}
def _cmd_shutdown(self, ID, args):
"""
@@ -604,7 +634,7 @@ def _process(self):
out = None
elif q['cmd'] == 'DO':
try:
- exec(q['str'], {}, self.objects)
+ exec (q['str'], {}, self.objects)
out = None
except:
status = sys.exc_info()[0]
@@ -659,13 +689,47 @@ def stop(self):
self._thread.join(3)
+@defaults_tree.parse_doc('io.interaction.client')
class Client(object):
"""
Basic but complete client to interact with the server.
- """
- #: Default parameters, see also :py:data:`.io.interaction`
- DEFAULT = Client_DEFAULT
+
+ Defaults:
+
+ [address]
+ default = 'tcp://127.0.0.1'
+ help = The address the server is listening to.
+ doc = Wenn running ptypy on a remote server, it is the servers network address.
+ type = str
+ userlevel = 2
+
+ [port]
+ default = 5560
+ help = The port the server is listening to.
+ doc = Make sure to pick an unused port with a few unused ports close to it.
+ type = int
+ userlevel = 2
+
+ [poll_timeout]
+ default = 100.0
+ type = float
+ help = Network polling interval
+ doc = Network polling interval, in milliseconds.
+
+ [pinginterval]
+ default = 1
+ type = float
+ help = Interval to check pings
+ doc = Interval with which to check pings, in seconds.
+
+ [connection_timeout]
+ default = 3600000.
+ type = float
+ help = Timeout for dead server
+ doc = Timeout for dead server, in milliseconds.
+
+ """
def __init__(self, pars=None, **kwargs):
"""
@@ -690,7 +754,7 @@ def __init__(self, pars=None, **kwargs):
"""
- p = u.Param(self.DEFAULT)
+ p = self.DEFAULT.copy()
p.update(pars)
p.update(kwargs)
self.p = p
@@ -766,7 +830,7 @@ def _run(self):
self.req_socket.connect(fulladdress)
# Establish connection with the interactor by sending a "CONNECT" command
- self._send(self.req_socket, {'ID': None, 'cmd': 'CONNECT', 'args': {'name':self.name}})
+ self._send(self.req_socket, {'ID': None, 'cmd': 'CONNECT', 'args': {'name': self.name}})
reply = self._recv(self.req_socket)
if reply['status'] != 'ok':
raise RuntimeError('Connection failed! (answer: %s)' % reply['status'])
@@ -1040,6 +1104,3 @@ def get_now(self, evalstr):
self.getnow_flag.wait()
return self.last_reply['out']
-
-
-
diff --git a/ptypy/io/json_rw.py b/ptypy/io/json_rw.py
index b4a2fbf1d..0185bdeb3 100644
--- a/ptypy/io/json_rw.py
+++ b/ptypy/io/json_rw.py
@@ -13,13 +13,13 @@
import time
import os
import glob
-import re
import base64
__all__ = ['jwrite', 'jread']
VERSION = 0.1
+
class NumpyEncoder(json.JSONEncoder):
"""
This class is adapted from http://stackoverflow.com/questions/3488934/simplejson-and-numpy-array.
diff --git a/ptypy/io/rawIO.py b/ptypy/io/rawIO.py
index 8ffbf155e..8cdca3891 100644
--- a/ptypy/io/rawIO.py
+++ b/ptypy/io/rawIO.py
@@ -5,12 +5,8 @@
TODO: add masking function to mask out hot/dead pixels of detector
@author: Bjoern Enders
"""
-import os
import glob
-#from pyE17 import utils
-#from pyE17 import verbose
import numpy as np
-#import cbf_uncompress as c
from ptypy.utils.verbose import log
__all__ = ['rawread']
diff --git a/ptypy/resources/__init__.py b/ptypy/resources/__init__.py
index 5e8abb3af..f0872bb1c 100644
--- a/ptypy/resources/__init__.py
+++ b/ptypy/resources/__init__.py
@@ -8,7 +8,7 @@ def flower_obj(shape=None):
from ptypy import utils as u
import numpy as np
from matplotlib.image import imread
-
+
im = u.rgb2complex(imread(flowerfile))
if shape is not None:
sh = u.expect2(shape)
@@ -36,7 +36,7 @@ def moon_pr(shape=None):
from ptypy import utils as u
import numpy as np
from matplotlib.image import imread
-
+
im = u.rgb2complex(imread(moonfile))
if shape is not None:
sh = u.expect2(shape)
diff --git a/ptypy/resources/default_parameters_configparser.txt b/ptypy/resources/default_parameters_configparser.txt
new file mode 100644
index 000000000..2199ceb9b
--- /dev/null
+++ b/ptypy/resources/default_parameters_configparser.txt
@@ -0,0 +1,1187 @@
+[engine]
+help = Reconstruction engine parameters
+default = None
+doc =
+userlevel = None
+type = Param
+
+[engine.ML]
+help = Maximum Likelihood parameters
+default = None
+doc =
+userlevel = None
+type = Param
+
+[engine.ML.floating_intensities]
+help = If True, allow for adaptative rescaling of the diffraction pattern intensities (to correct
+ for incident beam intensity fluctuations).
+default = FALSE
+doc =
+userlevel = 2
+type = bool
+
+[engine.ML.reg_del2_amplitude]
+help = Amplitude of the Gaussian prior if used.
+default = 0.01
+doc =
+userlevel = 0
+type = float
+
+[engine.ML.probe_update_start]
+help = Number of iterations before probe update starts
+default = 0
+doc =
+userlevel = 1
+type = int
+
+[engine.ML.smooth_gradient]
+help = Smoothing preconditioner. If 0, not used, if > 0 gaussian filter if < 0 Hann window.
+default = 0
+doc =
+userlevel = 1
+type = float
+
+[engine.ML.intensity_renormalization]
+help = A rescaling of the intensity so they can be interpreted as Poisson counts.
+default = 1
+doc =
+userlevel = 0
+type = float
+
+[engine.ML.scale_probe_object]
+help = Relative scale of probe to object.
+default = 1
+doc =
+userlevel = 2
+type = float
+
+[engine.ML.reg_del2]
+help = Whether to use a Gaussian prior (smoothing) regularizer.
+default = TRUE
+doc =
+userlevel = 0
+type = bool
+
+[engine.ML.type]
+help = Likelihood model. One of 'gaussian', 'poisson' or 'euclid'
+default = gaussian
+doc = [only 'gaussian' is implemented for now]
+userlevel = 2
+type = str
+
+[engine.ML.scale_precond]
+help = Whether to use the object/probe scaling preconditioner.
+default = FALSE
+doc = This parameter can give faster convergence for weakly scattering samples.
+userlevel = 2
+type = bool
+
+[engine.DM]
+help = Parameters for Difference map engine
+default = None
+doc =
+userlevel = None
+type = Param
+
+[engine.DM.fourier_relax_factor]
+help = If rms error of model vs diffraction data is smaller than this fraction, Fourier
+ constraint is met
+default = 0.01
+doc = Set this value higher for noisy data
+userlevel = 1
+type = float
+
+[engine.DM.overlap_converge_factor]
+help = Threshold for interruption of the inner overlap loop
+default = 0.05
+doc = The inner overlap loop refines the probe and the object simultaneously. This loop is
+ escaped as soon as the overall change in probe, relative to the first iteration, is less
+ than this value.
+userlevel = 2
+type = float
+
+[engine.DM.probe_update_start]
+help = Number of iterations before probe update starts
+default = 2
+doc =
+userlevel = 1
+type = int
+
+[engine.DM.obj_smooth_std]
+help = Gaussian smoothing (pixel) of the current object prior to update
+default = 20
+doc = If None, smoothing is deactivated. This smoothing can be used to reduce the amplitude of
+ spurious pixels in the outer, least constrained areas of the object.
+userlevel = 2
+type = int
+
+[engine.DM.update_object_first]
+help = If False update object before probe
+default = TRUE
+doc =
+userlevel = 2
+type = bool
+
+[engine.DM.probe_inertia]
+help = Weight of the current probe estimate in the update
+default = 0.001
+doc =
+userlevel = 2
+type = float
+
+[engine.DM.alpha]
+help = Difference map parameter
+default = 1
+doc =
+userlevel = 1
+type = int
+
+[engine.DM.overlap_max_iterations]
+help = Maximum of iterations for the overlap constraint inner loop
+default = 10
+doc =
+userlevel = 2
+type = int
+
+[engine.DM.object_inertia]
+help = Weight of the current object in the update
+default = 0.1
+doc =
+userlevel = 2
+type = float
+
+[engine.common]
+help = Parameters common to all engines
+default = None
+doc =
+userlevel = None
+type = Param
+
+[engine.common.clip_object]
+help = Clip object amplitude into this intrervall
+default = None
+doc =
+userlevel = 1
+type = tuple
+
+[engine.common.probe_support]
+help = Fraction of valid probe area (circular) in probe frame
+default = 0.7
+doc =
+userlevel = 0
+type = float
+
+[engine.common.numiter_contiguous]
+help = Number of iterations without interruption
+default = 1
+doc = The engine will not return control to the caller until this number of iterations is
+ completed (not processing server requests, I/O operations, ...)
+userlevel = 2
+type = int
+
+[engine.common.name]
+help = Name of engine.
+default = DM
+doc = Dependent on the name given here, the default parameter set will be a superset of `common`
+ and parameters to the entry of the same name.
+userlevel = None
+type = str
+
+[engine.common.numiter]
+help = Total number of iterations
+default = 20
+doc =
+userlevel = 0
+type = int
+
+[engines]
+help = Container for instances of "engine" parameters
+default = None
+doc = All engines registered in this structure will be executed sequentially.
+userlevel = None
+type = Param
+
+[engines.engine_00]
+help = Default first engines entry
+default = None
+doc = Default first engine is difference map (DM)
+userlevel = 0
+type = engine
+
+[run]
+help = Reconstruction identifier
+default = None
+doc = Reconstruction run identifier. If ``None``, the run name will be constructed at run time
+ from other information.
+userlevel = 0
+type = str
+
+[data_type]
+help = Reconstruction floating number precision
+default = single
+doc = Reconstruction floating number precision (``'single'`` or ``'double'``)
+userlevel = 1
+type = str
+
+[dry_run]
+help = Dry run switch
+default = FALSE
+doc = Run everything skipping all memory and cpu-heavy steps (and file saving).
+ **NOT IMPLEMENTED**
+userlevel = 2
+type = bool
+
+[scan]
+help = Scan parameters
+default = None
+doc = This categrogy specifies defaults for all scans. Scan-specific parameters are stored in
+ scans.scan_%%
+userlevel = None
+type = Param
+
+[scan.sharing]
+help = Scan sharing options
+default = None
+doc =
+userlevel = None
+type = Param
+
+[scan.sharing.probe_share_with]
+help = Label or index of scan to share probe with.
+default = None
+doc = Possible values:
+ - ``None``: Do not share
+ - *(string)*: Label of the scan to share with
+ - *(int)*: Index of scan to share with
+userlevel = 1
+type = str
+
+[scan.sharing.probe_share_power]
+help = Relative power for probe sharing
+default = 1
+doc =
+userlevel = 1
+type = float
+
+[scan.sharing.object_share_with]
+help = Label or index of scan to share object with.
+default = None
+doc = Possible values:
+ - ``None``: Do not share
+ - *(string)*: Label of the scan to share with
+ - *(int)*: Index of scan to share with
+userlevel = 1
+type = str
+
+[scan.sharing.object_share_power]
+help = Relative power for object sharing
+default = 1
+doc =
+userlevel = 1
+type = float
+
+[scan.tags]
+help = Comma seperated string tags describing the data input
+default = None
+doc = [deprecated?]
+userlevel = 2
+type = str
+
+[scan.geometry]
+help = Physical parameters
+default = None
+doc = All distances are in meters. Other units are specified in the documentation strings.
+ These paramters have very low priority in the :any:`Ptycho` construction process and can
+ usually left out in script if either :py:data:`.scan.data` ot the invoked preparation
+ subclass provide enough geometric information. You can change this behavior with the
+ `precedence` parameter.
+userlevel = None
+type = Param
+
+[scan.geometry.lam]
+help = Wavelength
+default = None
+doc = Used only if `energy` is ``None``
+userlevel = 0
+type = float
+
+[scan.geometry.precedence]
+help = Where geometry parameters take precence over others
+default = None
+doc = Possible options if parameters are not None:
+ - ``None``: Fill only missing parameters (default) at the very last moment making meta
+ data from :any:`PtyScan` the default source of geometric information.
+ - ``'meta'``: Overwrite meta after data was loaded, does not affect data preparation.
+ - ``'data'``: Overwrite entries in :py:data:`.scan.data`. This affects data preparation
+ too.
+userlevel = 2
+type = str
+
+[scan.geometry.distance]
+help = Distance from object to detector
+default = 7.19
+doc =
+userlevel = 0
+type = float
+
+[scan.geometry.energy]
+help = Energy (in keV)
+default = 6.2
+doc = If ``None``, uses `lam` instead.
+userlevel = 0
+type = float
+
+[scan.geometry.psize]
+help = Pixel size in Detector plane
+default = 0.000172
+doc =
+userlevel = 1
+type = float
+
+[scan.geometry.propagation]
+help = Propagation type
+default = farfield
+doc = Either "farfield" or "nearfield"
+userlevel = 1
+type = str
+
+[scan.geometry.resolution]
+help = Pixel size in Sample plane
+default = None
+doc = This parameter is used only for simulations
+userlevel = 2
+type = float
+
+[scan.coherence]
+help = Coherence parameters
+default = None
+doc =
+userlevel = None
+type = Param
+
+[scan.coherence.object_dispersion]
+help = Energy dispersive response of the object
+default = None
+doc = One of:
+ - ``None`` or ``'achromatic'``: no dispersion
+ - ``'linear'``: linear response model
+ - ``'irregular'``: no assumption
+ **[not implemented]**
+userlevel = 2
+type = str
+
+[scan.coherence.num_object_modes]
+help = Number of object modes
+default = 1
+doc =
+userlevel = 0
+type = int
+
+[scan.coherence.num_probe_modes]
+help = Number of probe modes
+default = 1
+doc =
+userlevel = 0
+type = int
+
+[scan.coherence.probe_dispersion]
+help = Energy dispersive response of the probe
+default = None
+doc = One of:
+ - ``None`` or ``'achromatic'``: no dispersion
+ - ``'linear'``: linear response model
+ - ``'irregular'``: no assumption
+ **[not implemented]**
+userlevel = 2
+type = str
+
+[scan.coherence.spectrum]
+help = Amplitude of relative energy bins if the probe modes have a different energy
+default = None
+doc =
+userlevel = 2
+type = list
+
+[scan.sample]
+help = Initial object modelization parameters
+default = None
+doc = In script, you may pass a numpy.array here directly as the model. This array will be
+ passed to the storage instance with no checking whatsoever. Used in `~ptypy.core.sample`
+userlevel = None
+type = Param
+
+[scan.sample.diversity]
+help = Probe mode(s) diversity parameters
+default = None
+doc = Can be ``None`` i.e. no diversity
+userlevel = None
+type = Param
+
+[scan.sample.diversity.shift]
+help = Lateral shift of modes relative to main mode
+default = None
+doc = **[not implemented]**
+userlevel = 2
+type = float
+
+[scan.sample.diversity.noise]
+help = Noise in the generated modes of the illumination
+default = None
+doc = Can be either:
+ - ``None`` : no noise
+ - ``2-tuple`` : noise in phase (amplitude (rms), minimum feature size)
+ - ``4-tuple`` : noise in phase & modulus (rms, mfs, rms_mod, mfs_mod)
+userlevel = 1
+type = tuple
+
+[scan.sample.diversity.power]
+help = Power of modes relative to main mode (zero-layer)
+default = 0.1
+doc =
+userlevel = 1
+type = tuple, float
+
+[scan.sample.recon]
+help = Parameters to load from previous reconstruction
+default = None
+doc =
+userlevel = None
+type = Param
+
+[scan.sample.recon.layer]
+help = Layer (mode) of storage data to load
+default = None
+doc = ``None`` is all layers, choose ``0`` for main mode
+userlevel = 1
+type = float
+
+[scan.sample.recon.rfile]
+help = Path to a ``.ptyr`` compatible file
+default = \*.ptyr
+doc =
+userlevel = 0
+type = str
+
+[scan.sample.recon.ID]
+help = ID (label) of storage data to load
+default = None
+doc = ``None`` is any ID
+userlevel = 1
+type = NoneType
+
+[scan.sample.process]
+help = Model processing parameters
+default = None
+doc = Can be ``None``, i.e. no processing
+userlevel = None
+type = Param
+
+[scan.sample.process.density]
+help = Density in [g/ccm]
+default = 1
+doc = Only used if `formula` is not None
+userlevel = 2
+type = float
+
+[scan.sample.process.offset]
+help = Offset between center of object array and scan pattern
+default = (0,0)
+doc =
+userlevel = 2
+type = tuple
+
+[scan.sample.process.zoom]
+help = Zoom value for object simulation.
+default = None
+doc = If ``None``, leave the array untouched. Otherwise the modeled or loaded image will be
+ resized using :py:func:`zoom`.
+userlevel = 2
+type = tuple
+
+[scan.sample.process.thickness]
+help = Maximum thickness of sample
+default = 1.00E-06
+doc = If ``None``, the absolute values of loaded source array will be used
+userlevel = 2
+type = float
+
+[scan.sample.process.ref_index]
+ default = (0.5, 0.0)
+ help = Assigned refractive index, tuple of format (real, complex)
+ doc = If ``None``, treat source array as projection of refractive index a+bj for (a, b). If a refractive index
+ is provided the array's absolute value will be used to scale the refractive index.
+userlevel = 2
+type = complex
+
+[scan.sample.process.formula]
+help = Chemical formula
+default = None
+doc = A Formula compatible with a cxro database query,e.g. ``'Au'`` or ``'NaCl'`` or ``'H2O'``
+userlevel = 2
+type = str
+
+[scan.sample.process.smoothing]
+help = Smoothing scale
+default = 2
+doc = Smooth the projection with gaussian kernel of width given by `smoothing_mfs`
+userlevel = 2
+type = int
+
+[scan.sample.stxm]
+help = STXM analysis parameters
+default = None
+doc =
+userlevel = 1
+type = Param
+
+[scan.sample.stxm.label]
+help = Scan label of diffraction that is to be used for probe estimate
+default = None
+doc = ``None``, own scan label is used
+userlevel = 1
+type = str
+
+[scan.sample.model]
+help = Type of initial object model
+default = None
+doc = One of:
+ - ``None`` : model initialitziation defaults to flat array filled `fill`
+ - ``'recon'`` : load model from STXM analysis of diffraction data
+ - ``'stxm'`` : Estimate model from autocorrelation of mean diffraction data
+ - ** : one of ptypys internal model resource strings
+ - ** : one of the templates in sample module
+ In script, you may pass a numpy.array here directly as the model. This array will be
+ processed according to `process` in order to *simulate* a sample from e.g. a thickness
+ profile.
+userlevel = 0
+type = str
+
+[scan.sample.fill]
+help = Default fill value
+default = 1
+doc =
+userlevel = None
+type = float, complex
+
+[scan.xy]
+help = Parameters for scan patterns
+default = None
+doc = These parameters are useful in two cases:
+ - When the experimental positions are not known (no encoders)
+ - When using the package to simulate data.
+ In script an array of shape *(N,2)* may be passed here instead of a Param or dictionary as
+ an **override**
+userlevel = None
+type = Param
+
+[scan.xy.count]
+help = Number of scan points
+default = None
+doc = Only return return positions up to number of `count`.
+userlevel = 1
+type = int
+
+[scan.xy.jitter]
+help = RMS of jitter on sample position
+default = 0
+doc = **Only use in simulation**. Adds a random jitter to positions.
+userlevel = 2
+type = float, tuple
+
+[scan.xy.spacing]
+help = Pattern spacing
+default = 1.50E-06
+doc = Spacing between scan positions. If the model supports asymmetric scans, a tuple passed
+ here will be interpreted as *(dy,dx)* with *dx* as horizontal spacing and *dy* as vertical
+ spacing. If ``None`` the value is calculated from `extent` and `steps`
+userlevel = 0
+type = float, tuple
+
+[scan.xy.steps]
+help = Pattern step count
+default = 10
+doc = Number of steps with length *spacing* in the grid. A tuple *(ny,nx)* provided here can be
+ used for a different step in vertical ( *ny* ) and horizontal direction ( *nx* ). If
+ ``None`` the, step count is calculated from `extent` and `spacing`
+userlevel = 0
+type = int, tuple
+
+[scan.xy.extent]
+help = Rectangular extent of pattern
+default = 1.50E-05
+doc = Defines the absolut maximum extent. If a tuple *(ly,lx)* is provided the extent may be
+ rectangular rather than square. All positions outside of `extent` will be discarded. If
+ ``None`` the extent will is `spacing` times `steps`
+userlevel = 0
+type = float, tuple
+
+[scan.xy.offset]
+help = Offset of scan pattern relative to origin
+default = 0
+doc = If tuple, the offset may differ in *x* and *y*. Please not that the offset will be
+ included when removing scan points outside of `extend`.
+userlevel = 2
+type = float, tuple
+
+[scan.xy.model]
+help = Scan pattern type
+default = None
+doc = The type must be one of the following:
+ - ``None``: positions are read from data file.
+ - ``'raster'``: raster grid pattern
+ - ``'round'``: concentric circles pattern
+ - ``'spiral'``: spiral pattern
+ In script an array of shape *(N,2)* may be passed here instead
+userlevel = 0
+type = str
+
+[scan.if_conflict_use_meta]
+help = Give priority to metadata relative to input parameters
+default = TRUE
+doc = [deprecated, use :py:data:`.scan.geometry.precedence` insteead]
+userlevel = 2
+type = bool
+
+[scan.illumination]
+help = Illumination model (probe)
+default = None
+doc = In script, you may pass directly a three dimensional numpy.ndarray here instead of a
+ `Param`. This array will be copied to the storage instance with no checking whatsoever.
+ Used in `~ptypy.core.illumination`
+userlevel = None
+type = Param
+
+[scan.illumination.diversity]
+help = Probe mode(s) diversity parameters
+default = None
+doc = Can be ``None`` i.e. no diversity
+userlevel = None
+type = Param
+
+[scan.illumination.diversity.shift]
+help = Lateral shift of modes relative to main mode
+default = None
+doc = **[not implemented]**
+userlevel = 2
+type = float
+
+[scan.illumination.diversity.noise]
+help = Noise in the generated modes of the illumination
+default = None
+doc = Can be either:
+ - ``None`` : no noise
+ - ``2-tuple`` : noise in phase (amplitude (rms), minimum feature size)
+ - ``4-tuple`` : noise in phase & modulus (rms, mfs, rms_mod, mfs_mod)
+userlevel = 1
+type = tuple
+
+[scan.illumination.diversity.power]
+help = Power of modes relative to main mode (zero-layer)
+default = 0.1
+doc =
+userlevel = 1
+type = tuple, float
+
+[scan.illumination.recon]
+help = Parameters to load from previous reconstruction
+default = None
+doc =
+userlevel = None
+type = Param
+
+[scan.illumination.recon.layer]
+help = Layer (mode) of storage data to load
+default = None
+doc = ``None`` means all layers, choose ``0`` for main mode
+userlevel = 0
+type = float
+
+[scan.illumination.recon.rfile]
+help = Path to a ``.ptyr`` compatible file
+default = \*.ptyr
+doc =
+userlevel = 0
+type = str
+
+[scan.illumination.recon.ID]
+help = ID (label) of storage data to load
+default = None
+doc = ``None`` means any ID
+userlevel = 0
+type = NoneType
+
+[scan.illumination.photons]
+help = Number of photons in the incident illumination
+default = None
+doc = A value specified here will take precedence over calculated statistics from the loaded
+ data.
+userlevel = 2
+type = int
+
+[scan.illumination.propagation]
+help = Parameters for propagation after aperture plane
+default = None
+doc = Propagation to focus takes precedence to parallel propagation if `foccused` is not
+ ``None``
+userlevel = None
+type = Param
+
+[scan.illumination.propagation.focussed]
+help = Propagation distance from aperture to focus
+default = None
+doc = If ``None`` or ``0`` : No focus propagation
+userlevel = 0
+type = float
+
+[scan.illumination.propagation.antialiasing]
+help = Antialiasing factor
+default = 1
+doc = Antialiasing factor used when generating the probe. (numbers larger than 2 or 3 are memory
+ hungry)
+ **[Untested]**
+userlevel = 2
+type = float
+
+[scan.illumination.propagation.parallel]
+help = Parallel propagation distance
+default = None
+doc = If ``None`` or ``0`` : No parallel propagation
+userlevel = 0
+type = float
+
+[scan.illumination.propagation.spot_size]
+help = Focal spot diameter
+default = None
+doc = If not ``None``, this parameter is used to generate the appropriate aperture size instead
+ of :py:data:`size`
+userlevel = 1
+type = float
+
+[scan.illumination.stxm]
+help = Parameters to initialize illumination from diffraction data
+default = None
+doc =
+userlevel = 1
+type = Param
+
+[scan.illumination.stxm.label]
+help = Scan label of diffraction that is to be used for probe estimate
+default = None
+doc = ``None``, own scan label is used
+userlevel = 1
+type = str
+
+[scan.illumination.aperture]
+help = Beam aperture parameters
+default = None
+doc =
+userlevel = None
+type = Param
+
+[scan.illumination.aperture.form]
+help = One of None, 'rect' or 'circ'
+default = circ
+doc = One of:
+ - ``None`` : no aperture, this may be useful for nearfield
+ - ``'rect'`` : rectangular aperture
+ - ``'circ'`` : circular aperture
+userlevel = 0
+type = str
+
+[scan.illumination.aperture.central_stop]
+help = size of central stop as a fraction of aperture.size
+default = None
+doc = If not None: places a central beam stop in aperture. The value given here is the fraction
+ of the beam stop compared to `size`
+uplim = 1.0
+userlevel = 1
+type = float
+
+[scan.illumination.aperture.edge]
+help = Edge width of aperture (in pixels!)
+default = 2
+doc =
+userlevel = 2
+type = int
+
+[scan.illumination.aperture.diffuser]
+help = Noise in the transparen part of the aperture
+default = None
+doc = Can be either:
+ - ``None`` : no noise
+ - ``2-tuple`` : noise in phase (amplitude (rms), minimum feature size)
+ - ``4-tuple`` : noise in phase & modulus (rms, mfs, rms_mod, mfs_mod)
+userlevel = 2
+type = float
+
+[scan.illumination.aperture.offset]
+help = Offset between center of aperture and optical axes
+default = 0
+doc = May also be a tuple (vertical,horizontal) for size in case of an asymmetric offset
+userlevel = 2
+type = float, tuple
+
+[scan.illumination.aperture.size]
+help = Aperture width or diameter
+default = None
+doc = May also be a tuple *(vertical,horizontal)* in case of an asymmetric aperture
+userlevel = 0
+type = float
+
+[scan.illumination.model]
+help = Type of illumination model
+default = None
+doc = One of:
+ - ``None`` : model initialitziation defaults to flat array filled with the specified
+ number of photons
+ - ``'recon'`` : load model from previous reconstruction, see `recon` Parameters
+ - ``'stxm'`` : Estimate model from autocorrelation of mean diffraction data
+ - ** : one of ptypys internal image resource strings
+ - ** : one of the templates inillumination module
+ In script, you may pass a numpy.ndarray here directly as the model. It is considered as
+ incoming wavefront and will be propagated according to `propagation` with an optional
+ `aperture` applied before
+userlevel = 0
+type = str
+
+[scan.data]
+help = Data preparation parameters
+default = None
+doc =
+userlevel = None
+type = Param
+
+[scan.data.auto_center]
+help = Determine if center in data is calculated automatically
+default = None
+doc = - ``False``, no automatic centering
+ - ``None``, only if :py:data:`center` is ``None``
+ - ``True``, it will be enforced
+userlevel = 0
+type = bool
+
+[scan.data.distance]
+help = Sample-to-detector distance
+default = None
+doc = In meters.
+userlevel = 0
+type = float
+
+[scan.data.chunk_format]
+help = Appendix to saved files if save == 'link'
+default = .chunk%02d
+doc =
+userlevel = 2
+type = str
+
+[scan.data.dfile]
+help = Prepared data file path
+default = None
+doc = If source was ``None`` or ``'file'``, data will be loaded from this file and processing as
+ well as saving is deactivated. If source is the name of an experiment recipe or path to a
+ file, data will be saved to this file
+userlevel = 0
+type = str
+
+[scan.data.center]
+help = Center (pixel) of the optical axes in raw data
+default = None
+doc = If ``None``, this parameter will be set by :py:data:`~.scan.data.auto_center` or elsewhere
+userlevel = 1
+type = tuple
+
+[scan.data.num_frames]
+help = Maximum number of frames to be prepared
+default = None
+doc = If `positions_theory` are provided, num_frames will be ovverriden with the number of
+ positions available
+userlevel = 1
+type = int
+
+[scan.data.energy]
+help = Photon energy of the incident radiation
+default = None
+doc =
+userlevel = 0
+type = float
+
+[scan.data.recipe]
+help = Data preparation recipe container
+default = None
+doc =
+userlevel = None
+type = ext
+
+[scan.data.positions_theory]
+help = Theoretical positions for this scan
+default = None
+doc = If provided, experimental positions from :any:`PtyScan` subclass will be ignored. If data
+ preparation is called from Ptycho instance, the calculated positions from the
+ :py:func:`ptypy.core.xy.from_pars` dict will be inserted here
+userlevel = 2
+type = ndarray
+
+[scan.data.psize]
+help = Detector pixel size
+default = None
+doc = Dimensions of the detector pixels (in meters)
+userlevel = 0
+type = float, tuple
+
+[scan.data.label]
+help = The scan label
+default = None
+doc = Unique string identifying the scan
+userlevel = 1
+type = str
+
+[scan.data.load_parallel]
+help = Determines what will be loaded in parallel
+default = data
+doc = Choose from ``None``, ``'data'``, ``'common'``, ``'all'``
+userlevel = None
+type = str
+
+[scan.data.source]
+help = Describes where to get the data from.
+default = None
+doc = Accepted values are:
+ - ``'file'``: data will be read from a .ptyd file.
+ - any valid recipe name: data will be prepared using the recipe.
+ - ``'sim'`` : data will be simulated according to parameters in simulation
+userlevel = 0
+type = str
+
+[scan.data.shape]
+help = Shape of the region of interest cropped from the raw data.
+default = None
+doc = Cropping dimension of the diffraction frame
+ Can be None, (dimx, dimy), or dim. In the latter case shape will be (dim, dim).
+userlevel = 1
+type = int, tuple
+
+[scan.data.rebin]
+help = Rebinning factor
+default = None
+doc = Rebinning factor for the raw data frames. ``'None'`` or ``1`` both mean *no binning*
+lowlim = 1
+uplim = 8
+userlevel = 1
+type = int
+
+[scan.data.experimentID]
+help = Name of the experiment
+default = None
+doc = If None, a default value will be provided by the recipe. **unused**
+userlevel = 2
+type = str
+
+[scan.data.min_frames]
+help = Minimum number of frames loaded by each node
+default = 1
+doc =
+userlevel = 2
+type = int
+
+[scan.data.save]
+help = Saving mode
+default = None
+doc = Mode to use to save data to file.
+ - ``None``: No saving
+ - ``'merge'``: attemts to merge data in single chunk **[not implemented]**
+ - ``'append'``: appends each chunk in master \*.ptyd file
+ - ``'link'``: appends external links in master \*.ptyd file and stores chunks separately
+ in the path given by the link. Links file paths are relative to master file.
+userlevel = 1
+type = str
+
+[scan.data.orientation]
+help = Data frame orientation
+default = None
+doc = - ``None`` or ``0``: correct orientation
+ - ``1``: invert columns (numpy.flip_lr)
+ - ``2``: invert columns, invert rows
+ - ``3``: invert rows (numpy.flip_ud)
+ - ``4``: transpose (numpy.transpose)
+ - ``4+i``: tranpose + other operations from above
+ Alternatively, a 3-tuple of booleans may be provided ``(do_transpose, do_flipud,
+ do_fliplr)``
+userlevel = 1
+type = int, tuple
+
+[io]
+help = Global parameters for I/O
+default = None
+doc =
+userlevel = None
+type = Param
+
+[io.autoplot]
+help = Plotting client parameters
+default = None
+doc = In script you may set this parameter to ``None`` or ``False`` for no automatic plotting.
+userlevel = None
+type = Param
+
+[io.autoplot.threaded]
+help = Live plotting switch
+default = TRUE
+doc = If ``True``, a plotting client will be spawned in a new thread and connected at
+ initialization. If ``False``, the master node will carry out the plotting, pausing the
+ reconstruction. This option should be set to ``True`` when ptypy is run on an isolated
+ workstation.
+userlevel = 1
+type = bool
+
+[io.autoplot.layout]
+help = Options for default plotter or template name
+default = None
+doc = Flexible layout for default plotter is not implemented yet. Please choose one of the
+ templates ``'default'``,``'black_and_white'``,``'nearfield'``, ``'minimal'`` or ``'weak'``
+userlevel = 2
+type = str, Param
+
+[io.autoplot.dump]
+help = Switch to dump plots as image files
+default = FALSE
+doc =
+userlevel = 1
+type = bool
+
+[io.autoplot.imfile]
+help = Plot images file name (or format string)
+default = plots/%(run)s/%(run)s_%(engine)s_%(iterations)04d.png
+doc =
+userlevel = 1
+type = str
+
+[io.autoplot.interval]
+help = Number of iterations between plot updates
+default = 1
+doc = Requests to the server will happen with this iteration intervals. Note that this will work
+ only if interaction.polling_interval is smaller or equal to this number. If ``interval
+ =0`` plotting is disabled which should be used, when ptypy is run on a cluster.
+lowlim = -1
+userlevel = 1
+type = int
+
+[io.autoplot.make_movie]
+help = Produce reconstruction movie after the reconstruction.
+default = FALSE
+doc = Switch to request the production of a movie from the dumped plots at the end of the
+ reconstruction.
+userlevel = 1
+type = bool
+
+[io.home]
+help = Base directory for all I/O
+default = ./
+doc = home is the root directory for all input/output operations. All other path parameters that
+ are relative paths will be relative to this directory.
+userlevel = 1
+type = dir
+
+[io.interaction]
+help = Server / Client parameters
+default = None
+doc = If ``None`` or ``False`` is passed here in script instead of a Param, it translates to
+ ``active=False`` i.e. no ZeroMQ interaction server.
+userlevel = None
+type = Param
+
+[io.interaction.active]
+help = Activation switch
+default = TRUE
+doc = Set to ``False`` for no ZeroMQ interaction server
+userlevel = None
+type = bool
+
+[io.interaction.connections]
+help = Number of concurrent connections on the server
+default = 10
+doc = A range ``[port : port+connections]`` of ports adjacent :py:data:`~.io.interaction.port`
+ will be opened on demand for connecting clients.
+userlevel = 2
+type = int
+
+[io.interaction.port]
+help = The port the server is listening to.
+default = 5560
+doc = Make sure to pick an unused port with a few unused ports close to it.
+userlevel = 2
+type = int
+
+[io.interaction.address]
+help = The address the server is listening to.
+default = tcp://127.0.0.1
+doc = Wenn running ptypy on a remote server, it is the servers network address.
+userlevel = 2
+type = str
+
+[io.rfile]
+help = Reconstruction file name (or format string)
+default = recons/%(run)s/%(run)s_%(engine)s_%(iterations)04d.ptyr
+doc = Reconstruction file name or format string (constructed against runtime dictionary)
+userlevel = 1
+type = str
+
+[io.autosave]
+help = Auto-save options
+default = None
+doc =
+userlevel = None
+type = Param
+
+[io.autosave.active]
+help = Activation switch
+default = TRUE
+doc = If ``True`` the current reconstruction will be saved at regular intervals. **unused**
+userlevel = 1
+type = bool
+
+[io.autosave.interval]
+help = Auto-save interval
+default = 10
+doc = If ``>0`` the current reconstruction will be saved at regular intervals according to the
+ pattern in :py:data:`paths.autosave` . If ``<=0`` not automatic saving
+lowlim = -1
+userlevel = 1
+type = int
+
+[io.autosave.rfile]
+help = Auto-save file name (or format string)
+default = dumps/%(run)s/%(run)s_%(engine)s_%(iterations)04d.ptyr
+doc = Auto-save file name or format string (constructed against runtime dictionary)
+userlevel = 1
+type = str
+
+[verbose_level]
+help = Verbosity level
+default = 1
+doc = Verbosity level for information logging.
+ - ``0``: Only errors
+ - ``1``: Warning
+ - ``2``: Process Information
+ - ``3``: Object Information
+ - ``4``: Debug
+uplim = 4
+userlevel = 0
+type = int
+
+[scans]
+help = Param container for instances of `scan` parameters
+default = None
+doc = If not specified otherwise, entries in *scans* will use parameter defaults from
+ :py:data:`.scan`
+userlevel = 0
+type = Param
+
+[scans.scan_00]
+help = Default first scans entry
+default = None
+doc = If only a single scan is used in the reconstruction, this entry may be left unchanged. If
+ more than one scan is used, please make an entry for each scan. The name *scan_00* is an
+ arbitrary choice and may be set to any other string.
+userlevel = 0
+type = scan
+
diff --git a/ptypy/resources/parameter_descriptions.configparser b/ptypy/resources/parameter_descriptions.configparser
new file mode 100644
index 000000000..40980781e
--- /dev/null
+++ b/ptypy/resources/parameter_descriptions.configparser
@@ -0,0 +1,1175 @@
+[verbose_level]
+default = 1
+help = Verbosity level
+doc = Verbosity level for information logging.
+ - ``0``: Only errors
+ - ``1``: Warning
+ - ``2``: Process Information
+ - ``3``: Object Information
+ - ``4``: Debug
+type = int
+userlevel = 0
+lowlim = 0
+uplim = 4
+
+[data_type]
+default = single
+help = Reconstruction floating number precision
+doc = Reconstruction floating number precision (``'single'`` or ``'double'``)
+type = str
+userlevel = 1
+
+[run]
+default = None
+help = Reconstruction identifier
+doc = Reconstruction run identifier. If ``None``, the run name will be constructed at run time
+ from other information.
+type = str
+userlevel = 0
+
+[dry_run]
+default = FALSE
+help = Dry run switch
+doc = Run everything skipping all memory and cpu-heavy steps (and file saving).
+ **NOT IMPLEMENTED**
+type = bool
+userlevel = 2
+
+[io]
+default =
+help = Global parameters for I/O
+doc =
+type = Param
+userlevel =
+
+[io.home]
+default = ./
+help = Base directory for all I/O
+doc = home is the root directory for all input/output operations. All other path parameters that
+ are relative paths will be relative to this directory.
+type = dir
+userlevel = 1
+
+[io.rfile]
+default = recons/%(run)s/%(run)s_%(engine)s_%(iterations)04d.ptyr
+help = Reconstruction file name (or format string)
+doc = Reconstruction file name or format string (constructed against runtime dictionary)
+type = str
+userlevel = 1
+
+[io.autosave]
+default =
+help = Auto-save options
+doc =
+type = Param
+userlevel =
+
+[io.autosave.active]
+default = TRUE
+help = Activation switch
+doc = If ``True`` the current reconstruction will be saved at regular intervals. **unused**
+type = bool
+userlevel = 1
+
+[io.autosave.interval]
+default = 10
+help = Auto-save interval
+doc = If ``>0`` the current reconstruction will be saved at regular intervals according to the
+ pattern in :py:data:`paths.autosave` . If ``<=0`` not automatic saving
+type = int
+userlevel = 1
+lowlim = -1
+
+[io.autosave.rfile]
+default = dumps/%(run)s/%(run)s_%(engine)s_%(iterations)04d.ptyr
+help = Auto-save file name (or format string)
+doc = Auto-save file name or format string (constructed against runtime dictionary)
+type = str
+userlevel = 1
+
+[io.interaction]
+default =
+help = Server / Client parameters
+doc = If ``None`` or ``False`` is passed here in script instead of a Param, it translates to
+ ``active=False`` i.e. no ZeroMQ interaction server.
+type = Param
+userlevel =
+
+[io.interaction.active]
+default = TRUE
+help = Activation switch
+doc = Set to ``False`` for no ZeroMQ interaction server
+type = bool
+userlevel =
+
+[io.interaction.address]
+default = tcp://127.0.0.1
+help = The address the server is listening to.
+doc = Wenn running ptypy on a remote server, it is the servers network address.
+type = str
+userlevel = 2
+
+[io.interaction.port]
+default = 5560
+help = The port the server is listening to.
+doc = Make sure to pick an unused port with a few unused ports close to it.
+type = int
+userlevel = 2
+
+[io.interaction.connections]
+default = 10
+help = Number of concurrent connections on the server
+doc = A range ``[port : port+connections]`` of ports adjacent :py:data:`~.io.interaction.port`
+ will be opened on demand for connecting clients.
+type = int
+userlevel = 2
+
+[io.autoplot]
+default =
+help = Plotting client parameters
+doc = In script you may set this parameter to ``None`` or ``False`` for no automatic plotting.
+type = Param
+userlevel =
+
+[io.autoplot.imfile]
+default = plots/%(run)s/%(run)s_%(engine)s_%(iterations)04d.png
+help = Plot images file name (or format string)
+doc =
+type = str
+userlevel = 1
+
+[io.autoplot.interval]
+default = 1
+help = Number of iterations between plot updates
+doc = Requests to the server will happen with this iteration intervals. Note that this will work
+ only if interaction.polling_interval is smaller or equal to this number. If ``interval
+ =0`` plotting is disabled which should be used, when ptypy is run on a cluster.
+type = int
+userlevel = 1
+lowlim = -1
+
+[io.autoplot.threaded]
+default = TRUE
+help = Live plotting switch
+doc = If ``True``, a plotting client will be spawned in a new thread and connected at
+ initialization. If ``False``, the master node will carry out the plotting, pausing the
+ reconstruction. This option should be set to ``True`` when ptypy is run on an isolated
+ workstation.
+type = bool
+userlevel = 1
+
+[io.autoplot.layout]
+default = None
+help = Options for default plotter or template name
+doc = Flexible layout for default plotter is not implemented yet. Please choose one of the
+ templates ``'default'``,``'black_and_white'``,``'nearfield'``, ``'minimal'`` or ``'weak'``
+type = str, Param
+userlevel = 2
+
+[io.autoplot.dump]
+default = FALSE
+help = Switch to dump plots as image files
+doc =
+type = bool
+userlevel = 1
+
+[io.autoplot.make_movie]
+default = FALSE
+help = Produce reconstruction movie after the reconstruction.
+doc = Switch to request the production of a movie from the dumped plots at the end of the
+ reconstruction.
+type = bool
+userlevel = 1
+
+[scan]
+default =
+help = Scan parameters
+doc = This categrogy specifies defaults for all scans. Scan-specific parameters are stored in
+ scans.scan_%%
+type = Param
+userlevel =
+
+[scan.tags]
+default = None
+help = Comma seperated string tags describing the data input
+doc = [deprecated?]
+type = str
+userlevel = 2
+
+[scan.if_conflict_use_meta]
+default = TRUE
+help = Give priority to metadata relative to input parameters
+doc = [deprecated, use :py:data:`.scan.geometry.precedence` insteead]
+type = bool
+userlevel = 2
+
+[scan.data]
+default =
+help = Data preparation parameters
+doc =
+type = Param
+userlevel =
+
+[scan.data.recipe]
+default =
+help = Data preparation recipe container
+doc =
+type = ext
+userlevel =
+
+[scan.data.source]
+default = None
+help = Describes where to get the data from.
+doc = Accepted values are:
+ - ``'file'``: data will be read from a .ptyd file.
+ - any valid recipe name: data will be prepared using the recipe.
+ - ``'sim'`` : data will be simulated according to parameters in simulation
+type = file
+userlevel = 0
+
+[scan.data.dfile]
+default = None
+help = Prepared data file path
+doc = If source was ``None`` or ``'file'``, data will be loaded from this file and processing as
+ well as saving is deactivated. If source is the name of an experiment recipe or path to a
+ file, data will be saved to this file
+type = file
+userlevel = 0
+
+[scan.data.label]
+default = None
+help = The scan label
+doc = Unique string identifying the scan
+type = str
+userlevel = 1
+
+[scan.data.shape]
+default = None
+help = Shape of the region of interest cropped from the raw data.
+doc = Cropping dimension of the diffraction frame
+ Can be None, (dimx, dimy), or dim. In the latter case shape will be (dim, dim).
+type = int, tuple
+userlevel = 1
+
+[scan.data.save]
+default = None
+help = Saving mode
+doc = Mode to use to save data to file.
+ - ``None``: No saving
+ - ``'merge'``: attemts to merge data in single chunk **[not implemented]**
+ - ``'append'``: appends each chunk in master \*.ptyd file
+ - ``'link'``: appends external links in master \*.ptyd file and stores chunks separately
+ in the path given by the link. Links file paths are relative to master file.
+type = str
+userlevel = 1
+
+[scan.data.center]
+default = None
+help = Center (pixel) of the optical axes in raw data
+doc = If ``None``, this parameter will be set by :py:data:`~.scan.data.auto_center` or elsewhere
+type = tuple
+userlevel = 1
+
+[scan.data.psize]
+default = None
+help = Detector pixel size
+doc = Dimensions of the detector pixels (in meters)
+type = float, tuple
+userlevel = 0
+lowlim = 0
+
+[scan.data.distance]
+default = None
+help = Sample-to-detector distance
+doc = In meters.
+type = float
+userlevel = 0
+lowlim = 0
+
+[scan.data.rebin]
+default = None
+help = Rebinning factor
+doc = Rebinning factor for the raw data frames. ``'None'`` or ``1`` both mean *no binning*
+type = int
+userlevel = 1
+lowlim = 1
+uplim = 8
+
+[scan.data.orientation]
+default = None
+help = Data frame orientation
+doc = - ``None`` or ``0``: correct orientation
+ - ``1``: invert columns (numpy.flip_lr)
+ - ``2``: invert columns, invert rows
+ - ``3``: invert rows (numpy.flip_ud)
+ - ``4``: transpose (numpy.transpose)
+ - ``4+i``: tranpose + other operations from above
+ Alternatively, a 3-tuple of booleans may be provided ``(do_transpose, do_flipud,
+ do_fliplr)``
+type = int, tuple
+userlevel = 1
+
+[scan.data.energy]
+default = None
+help = Photon energy of the incident radiation
+doc =
+type = float
+userlevel = 0
+lowlim = 0
+
+[scan.data.min_frames]
+default = 1
+help = Minimum number of frames loaded by each node
+doc =
+type = int
+userlevel = 2
+
+[scan.data.num_frames]
+default = None
+help = Maximum number of frames to be prepared
+doc = If `positions_theory` are provided, num_frames will be ovverriden with the number of
+ positions available
+type = int
+userlevel = 1
+
+[scan.data.chunk_format]
+default = .chunk%02d
+help = Appendix to saved files if save == 'link'
+doc =
+type = str
+userlevel = 2
+
+[scan.data.auto_center]
+default = None
+help = Determine if center in data is calculated automatically
+doc = - ``False``, no automatic centering
+ - ``None``, only if :py:data:`center` is ``None``
+ - ``True``, it will be enforced
+type = bool
+userlevel = 0
+
+[scan.data.load_parallel]
+default = data
+help = Determines what will be loaded in parallel
+doc = Choose from ``None``, ``'data'``, ``'common'``, ``'all'``
+type = str
+userlevel =
+
+[scan.data.positions_theory]
+default = None
+help = Theoretical positions for this scan
+doc = If provided, experimental positions from :any:`PtyScan` subclass will be ignored. If data
+ preparation is called from Ptycho instance, the calculated positions from the
+ :py:func:`ptypy.core.xy.from_pars` dict will be inserted here
+type = ndarray
+userlevel = 2
+
+[scan.data.experimentID]
+default = None
+help = Name of the experiment
+doc = If None, a default value will be provided by the recipe. **unused**
+type = str
+userlevel = 2
+
+[scan.sharing]
+default =
+help = Scan sharing options
+doc =
+type = Param
+userlevel =
+
+[scan.sharing.object_share_with]
+default = None
+help = Label or index of scan to share object with.
+doc = Possible values:
+ - ``None``: Do not share
+ - *(string)*: Label of the scan to share with
+ - *(int)*: Index of scan to share with
+type = str
+userlevel = 1
+
+[scan.sharing.object_share_power]
+default = 1
+help = Relative power for object sharing
+doc =
+type = float
+userlevel = 1
+lowlim = 0
+
+[scan.sharing.probe_share_with]
+default = None
+help = Label or index of scan to share probe with.
+doc = Possible values:
+ - ``None``: Do not share
+ - *(string)*: Label of the scan to share with
+ - *(int)*: Index of scan to share with
+type = str
+userlevel = 1
+
+[scan.sharing.probe_share_power]
+default = 1
+help = Relative power for probe sharing
+doc =
+type = float
+userlevel = 1
+lowlim = 0
+
+[scan.geometry]
+default =
+help = Physical parameters
+doc = All distances are in meters. Other units are specified in the documentation strings.
+ These paramters have very low priority in the :any:`Ptycho` construction process and can
+ usually left out in script if either :py:data:`.scan.data` ot the invoked preparation
+ subclass provide enough geometric information. You can change this behavior with the
+ `precedence` parameter.
+type = Param
+userlevel =
+
+[scan.geometry.precedence]
+default = None
+help = Where geometry parameters take precence over others
+doc = Possible options if parameters are not None:
+ - ``None``: Fill only missing parameters (default) at the very last moment making meta
+ data from :any:`PtyScan` the default source of geometric information.
+ - ``'meta'``: Overwrite meta after data was loaded, does not affect data preparation.
+ - ``'data'``: Overwrite entries in :py:data:`.scan.data`. This affects data preparation
+ too.
+type = str
+userlevel = 2
+
+[scan.geometry.energy]
+default = 6.2
+help = Energy (in keV)
+doc = If ``None``, uses `lam` instead.
+type = float
+userlevel = 0
+lowlim = 0
+
+[scan.geometry.lam]
+default = None
+help = Wavelength
+doc = Used only if `energy` is ``None``
+type = float
+userlevel = 0
+lowlim = 0
+
+[scan.geometry.distance]
+default = 7.19
+help = Distance from object to detector
+doc =
+type = float
+userlevel = 0
+lowlim = 0
+
+[scan.geometry.psize]
+default = 0.000172
+help = Pixel size in Detector plane
+doc =
+type = float
+userlevel = 1
+lowlim = 0
+
+[scan.geometry.resolution]
+default = None
+help = Pixel size in Sample plane
+doc = This parameter is used only for simulations
+type = float
+userlevel = 2
+lowlim = 0
+
+[scan.geometry.propagation]
+default = farfield
+help = Propagation type
+doc = Either "farfield" or "nearfield"
+type = str
+userlevel = 1
+
+[scan.xy]
+default =
+help = Parameters for scan patterns
+doc = These parameters are useful in two cases:
+ - When the experimental positions are not known (no encoders)
+ - When using the package to simulate data.
+ In script an array of shape *(N,2)* may be passed here instead of a Param or dictionary as
+ an **override**
+type = Param
+userlevel =
+
+[scan.xy.spacing]
+default = 1.50E-06
+help = Pattern spacing
+doc = Spacing between scan positions. If the model supports asymmetric scans, a tuple passed
+ here will be interpreted as *(dy,dx)* with *dx* as horizontal spacing and *dy* as vertical
+ spacing. If ``None`` the value is calculated from `extent` and `steps`
+type = float, tuple
+userlevel = 0
+lowlim = 0
+
+[scan.xy.steps]
+default = 10
+help = Pattern step count
+doc = Number of steps with length *spacing* in the grid. A tuple *(ny,nx)* provided here can be
+ used for a different step in vertical ( *ny* ) and horizontal direction ( *nx* ). If
+ ``None`` the, step count is calculated from `extent` and `spacing`
+type = int, tuple
+userlevel = 0
+lowlim = 0
+
+[scan.xy.extent]
+default = 1.50E-05
+help = Rectangular extent of pattern
+doc = Defines the absolut maximum extent. If a tuple *(ly,lx)* is provided the extent may be
+ rectangular rather than square. All positions outside of `extent` will be discarded. If
+ ``None`` the extent will is `spacing` times `steps`
+type = float, tuple
+userlevel = 0
+lowlim = 0
+
+[scan.xy.offset]
+default = 0
+help = Offset of scan pattern relative to origin
+doc = If tuple, the offset may differ in *x* and *y*. Please not that the offset will be
+ included when removing scan points outside of `extend`.
+type = float, tuple
+userlevel = 2
+
+[scan.xy.jitter]
+default = 0
+help = RMS of jitter on sample position
+doc = **Only use in simulation**. Adds a random jitter to positions.
+type = float, tuple
+userlevel = 2
+
+[scan.xy.count]
+default = None
+help = Number of scan points
+doc = Only return return positions up to number of `count`.
+type = int
+userlevel = 1
+
+[scan.illumination]
+default =
+help = Illumination model (probe)
+doc = In script, you may pass directly a three dimensional numpy.ndarray here instead of a
+ `Param`. This array will be copied to the storage instance with no checking whatsoever.
+ Used in `~ptypy.core.illumination`
+type = Param
+userlevel =
+lowlim = 0
+
+[scan.illumination.model]
+default = None
+help = Type of illumination model
+doc = One of:
+ - ``None`` : model initialitziation defaults to flat array filled with the specified
+ number of photons
+ - ``'recon'`` : load model from previous reconstruction, see `recon` Parameters
+ - ``'stxm'`` : Estimate model from autocorrelation of mean diffraction data
+ - ** : one of ptypys internal image resource strings
+ - ** : one of the templates inillumination module
+ In script, you may pass a numpy.ndarray here directly as the model. It is considered as
+ incoming wavefront and will be propagated according to `propagation` with an optional
+ `aperture` applied before
+type = str
+userlevel = 0
+
+[scan.illumination.photons]
+default = None
+help = Number of photons in the incident illumination
+doc = A value specified here will take precedence over calculated statistics from the loaded
+ data.
+type = int
+userlevel = 2
+lowlim = 0
+
+[scan.illumination.recon]
+default =
+help = Parameters to load from previous reconstruction
+doc =
+type = Param
+userlevel =
+
+[scan.illumination.recon.rfile]
+default = \*.ptyr
+help = Path to a ``.ptyr`` compatible file
+doc =
+type = file
+userlevel = 0
+
+[scan.illumination.recon.label]
+default = None
+help = Scan label of diffraction that is to be used for probe estimate
+doc = ``None``, own scan label is used
+type = str
+userlevel = 1
+
+[scan.illumination.aperture]
+default =
+help = Beam aperture parameters
+doc =
+type = Param
+userlevel =
+
+[scan.illumination.aperture.form]
+default = circ
+help = One of None, 'rect' or 'circ'
+doc = One of:
+ - ``None`` : no aperture, this may be useful for nearfield
+ - ``'rect'`` : rectangular aperture
+ - ``'circ'`` : circular aperture
+type = str
+userlevel = 0
+lowlim = 0
+
+[scan.illumination.aperture.diffuser]
+default = None
+help = Noise in the transparen part of the aperture
+doc = Can be either:
+ - ``None`` : no noise
+ - ``2-tuple`` : noise in phase (amplitude (rms), minimum feature size)
+ - ``4-tuple`` : noise in phase & modulus (rms, mfs, rms_mod, mfs_mod)
+type = float
+userlevel = 2
+lowlim = 0
+
+[scan.illumination.aperture.size]
+default = None
+help = Aperture width or diameter
+doc = May also be a tuple *(vertical,horizontal)* in case of an asymmetric aperture
+type = float
+userlevel = 0
+lowlim = 0
+
+[scan.illumination.aperture.edge]
+default = 2
+help = Edge width of aperture (in pixels!)
+doc =
+type = int
+userlevel = 2
+lowlim = 0
+
+[scan.illumination.aperture.central_stop]
+default = None
+help = size of central stop as a fraction of aperture.size
+doc = If not None: places a central beam stop in aperture. The value given here is the fraction
+ of the beam stop compared to `size`
+type = float
+userlevel = 1
+lowlim = 0
+uplim = 1
+
+[scan.illumination.aperture.offset]
+default = 0
+help = Offset between center of aperture and optical axes
+doc = May also be a tuple (vertical,horizontal) for size in case of an asymmetric offset
+type = float, tuple
+userlevel = 2
+
+[scan.illumination.propagation]
+default =
+help = Parameters for propagation after aperture plane
+doc = Propagation to focus takes precedence to parallel propagation if `foccused` is not
+ ``None``
+type = Param
+userlevel =
+
+[scan.illumination.propagation.parallel]
+default = None
+help = Parallel propagation distance
+doc = If ``None`` or ``0`` : No parallel propagation
+type = float
+userlevel = 0
+
+[scan.illumination.propagation.focussed]
+default = None
+help = Propagation distance from aperture to focus
+doc = If ``None`` or ``0`` : No focus propagation
+type = float
+userlevel = 0
+
+[scan.illumination.propagation.antialiasing]
+default = 1
+help = Antialiasing factor
+doc = Antialiasing factor used when generating the probe. (numbers larger than 2 or 3 are memory
+ hungry)
+ **[Untested]**
+type = float
+userlevel = 2
+
+[scan.illumination.propagation.spot_size]
+default = None
+help = Focal spot diameter
+doc = If not ``None``, this parameter is used to generate the appropriate aperture size instead
+ of :py:data:`size`
+type = float
+userlevel = 1
+lowlim = 0
+
+[scan.illumination.diversity]
+default =
+help = Probe mode(s) diversity parameters
+doc = Can be ``None`` i.e. no diversity
+type = Param
+userlevel =
+
+[scan.illumination.diversity.noise]
+default = None
+help = Noise in the generated modes of the illumination
+doc = Can be either:
+ - ``None`` : no noise
+ - ``2-tuple`` : noise in phase (amplitude (rms), minimum feature size)
+ - ``4-tuple`` : noise in phase & modulus (rms, mfs, rms_mod, mfs_mod)
+type = tuple
+userlevel = 1
+
+[scan.illumination.diversity.power]
+default = 0.1
+help = Power of modes relative to main mode (zero-layer)
+doc =
+type = tuple, float
+userlevel = 1
+
+[scan.illumination.diversity.shift]
+default = None
+help = Lateral shift of modes relative to main mode
+doc = **[not implemented]**
+type = float
+userlevel = 2
+
+[scan.sample]
+default =
+help = Initial object modelization parameters
+doc = In script, you may pass a numpy.array here directly as the model. This array will be
+ passed to the storage instance with no checking whatsoever. Used in `~ptypy.core.sample`
+type = Param
+userlevel =
+lowlim = 0
+
+[scan.sample.model]
+default = None
+help = Type of initial object model
+doc = One of:
+ - ``None`` : model initialitziation defaults to flat array filled `fill`
+ - ``'recon'`` : load model from STXM analysis of diffraction data
+ - ``'stxm'`` : Estimate model from autocorrelation of mean diffraction data
+ - ** : one of ptypys internal model resource strings
+ - ** : one of the templates in sample module
+ In script, you may pass a numpy.array here directly as the model. This array will be
+ processed according to `process` in order to *simulate* a sample from e.g. a thickness
+ profile.
+type = str
+userlevel = 0
+
+[scan.sample.fill]
+default = 1
+help = Default fill value
+doc =
+type = float, complex
+userlevel =
+
+[scan.sample.recon]
+default =
+help = Parameters to load from previous reconstruction
+doc =
+type = Param
+userlevel =
+
+[scan.sample.recon.rfile]
+default = \*.ptyr
+help = Path to a ``.ptyr`` compatible file
+doc =
+type = file
+userlevel = 0
+
+[scan.sample.stxm]
+default =
+help = STXM analysis parameters
+doc =
+type = Param
+userlevel = 1
+
+[scan.sample.stxm.label]
+default = None
+help = Scan label of diffraction that is to be used for probe estimate
+doc = ``None``, own scan label is used
+type = str
+userlevel = 1
+
+[scan.sample.process]
+default = None
+help = Model processing parameters
+doc = Can be ``None``, i.e. no processing
+type = Param
+userlevel =
+
+[scan.sample.process.offset]
+default = (0,0)
+help = Offset between center of object array and scan pattern
+doc =
+type = tuple
+userlevel = 2
+lowlim = 0
+
+[scan.sample.process.zoom]
+default = None
+help = Zoom value for object simulation.
+doc = If ``None``, leave the array untouched. Otherwise the modeled or loaded image will be
+ resized using :py:func:`zoom`.
+type = tuple
+userlevel = 2
+lowlim = 0
+
+[scan.sample.process.formula]
+default = None
+help = Chemical formula
+doc = A Formula compatible with a cxro database query,e.g. ``'Au'`` or ``'NaCl'`` or ``'H2O'``
+type = str
+userlevel = 2
+
+[scan.sample.process.density]
+default = 1
+help = Density in [g/ccm]
+doc = Only used if `formula` is not None
+type = float
+userlevel = 2
+
+[scan.sample.process.thickness]
+default = 1.00E-06
+help = Maximum thickness of sample
+doc = If ``None``, the absolute values of loaded source array will be used
+type = float
+userlevel = 2
+
+[scan.sample.process.ref_index]
+default = 0.5+0.j
+help = Assigned refractive index
+doc = If ``None``, treat source array as projection of refractive index. If a refractive index
+ is provided the array's absolute value will be used to scale the refractive index.
+type = complex
+userlevel = 2
+lowlim = 0
+
+[scan.sample.process.smoothing]
+default = 2
+help = Smoothing scale
+doc = Smooth the projection with gaussian kernel of width given by `smoothing_mfs`
+type = int
+userlevel = 2
+lowlim = 0
+
+[scan.sample.diversity]
+default =
+help = Probe mode(s) diversity parameters
+doc = Can be ``None`` i.e. no diversity
+type = Param
+userlevel =
+
+[scan.sample.diversity.noise]
+default = None
+help = Noise in the generated modes of the illumination
+doc = Can be either:
+ - ``None`` : no noise
+ - ``2-tuple`` : noise in phase (amplitude (rms), minimum feature size)
+ - ``4-tuple`` : noise in phase & modulus (rms, mfs, rms_mod, mfs_mod)
+type = tuple
+userlevel = 1
+
+[scan.sample.diversity.power]
+default = 0.1
+help = Power of modes relative to main mode (zero-layer)
+doc =
+type = tuple, float
+userlevel = 1
+
+[scan.sample.diversity.shift]
+default = None
+help = Lateral shift of modes relative to main mode
+doc = **[not implemented]**
+type = float
+userlevel = 2
+
+[scan.coherence]
+default =
+help = Coherence parameters
+doc =
+type = Param
+userlevel =
+lowlim = 0
+
+[scan.coherence.num_probe_modes]
+default = 1
+help = Number of probe modes
+doc =
+type = int
+userlevel = 0
+lowlim = 0
+
+[scan.coherence.num_object_modes]
+default = 1
+help = Number of object modes
+doc =
+type = int
+userlevel = 0
+lowlim = 0
+
+[scan.coherence.spectrum]
+default = None
+help = Amplitude of relative energy bins if the probe modes have a different energy
+doc =
+type = list
+userlevel = 2
+lowlim = 0
+
+[scan.coherence.object_dispersion]
+default = None
+help = Energy dispersive response of the object
+doc = One of:
+ - ``None`` or ``'achromatic'``: no dispersion
+ - ``'linear'``: linear response model
+ - ``'irregular'``: no assumption
+ **[not implemented]**
+type = str
+userlevel = 2
+
+[scan.coherence.probe_dispersion]
+default = None
+help = Energy dispersive response of the probe
+doc = One of:
+ - ``None`` or ``'achromatic'``: no dispersion
+ - ``'linear'``: linear response model
+ - ``'irregular'``: no assumption
+ **[not implemented]**
+type = str
+userlevel = 2
+
+[scans]
+default =
+help = Param container for instances of `scan` parameters
+doc = If not specified otherwise, entries in *scans* will use parameter defaults from
+ :py:data:`.scan`
+type = Param
+userlevel = 0
+
+[engine]
+default =
+help = Reconstruction engine parameters
+doc =
+type = Param
+userlevel =
+
+[engine.common]
+default =
+help = Parameters common to all engines
+doc =
+type = Param
+userlevel =
+
+[engine.common.name]
+default = DM
+help = Name of engine.
+doc = Dependent on the name given here, the default parameter set will be a superset of `common`
+ and parameters to the entry of the same name.
+type = str
+userlevel =
+
+[engine.common.numiter]
+default = 20
+help = Total number of iterations
+doc =
+type = int
+userlevel = 0
+lowlim = 0
+
+[engine.common.numiter_contiguous]
+default = 1
+help = Number of iterations without interruption
+doc = The engine will not return control to the caller until this number of iterations is
+ completed (not processing server requests, I/O operations, ...)
+type = int
+userlevel = 2
+lowlim = 0
+
+[engine.common.probe_support]
+default = 0.7
+help = Fraction of valid probe area (circular) in probe frame
+doc =
+type = float
+userlevel = 0
+lowlim = 0
+
+[engine.common.clip_object]
+default = None
+help = Clip object amplitude into this intrervall
+doc =
+type = tuple
+userlevel = 1
+lowlim = 0
+
+[engine.DM]
+default =
+help = Parameters for Difference map engine
+doc =
+type = Param
+userlevel =
+
+[engine.DM.alpha]
+default = 1
+help = Difference map parameter
+doc =
+type = int
+userlevel = 1
+lowlim = 0
+
+[engine.DM.probe_update_start]
+default = 2
+help = Number of iterations before probe update starts
+doc =
+type = int
+userlevel = 1
+lowlim = 0
+
+[engine.DM.update_object_first]
+default = TRUE
+help = If False update object before probe
+doc =
+type = bool
+userlevel = 2
+lowlim = 0
+
+[engine.DM.overlap_converge_factor]
+default = 0.05
+help = Threshold for interruption of the inner overlap loop
+doc = The inner overlap loop refines the probe and the object simultaneously. This loop is
+ escaped as soon as the overall change in probe, relative to the first iteration, is less
+ than this value.
+type = float
+userlevel = 2
+lowlim = 0
+
+[engine.DM.overlap_max_iterations]
+default = 10
+help = Maximum of iterations for the overlap constraint inner loop
+doc =
+type = int
+userlevel = 2
+lowlim = 0
+
+[engine.DM.probe_inertia]
+default = 0.001
+help = Weight of the current probe estimate in the update
+doc =
+type = float
+userlevel = 2
+lowlim = 0
+
+[engine.DM.object_inertia]
+default = 0.1
+help = Weight of the current object in the update
+doc =
+type = float
+userlevel = 2
+lowlim = 0
+
+[engine.DM.fourier_relax_factor]
+default = 0.01
+help = If rms error of model vs diffraction data is smaller than this fraction, Fourier
+ constraint is met
+doc = Set this value higher for noisy data
+type = float
+userlevel = 1
+lowlim = 0
+
+[engine.DM.obj_smooth_std]
+default = 20
+help = Gaussian smoothing (pixel) of the current object prior to update
+doc = If None, smoothing is deactivated. This smoothing can be used to reduce the amplitude of
+ spurious pixels in the outer, least constrained areas of the object.
+type = int
+userlevel = 2
+lowlim = 0
+
+[engine.ML]
+default =
+help = Maximum Likelihood parameters
+doc =
+type = Param
+userlevel =
+
+[engine.ML.type]
+default = gaussian
+help = Likelihood model. One of 'gaussian', 'poisson' or 'euclid'
+doc = [only 'gaussian' is implemented for now]
+type = str
+userlevel = 2
+lowlim = 0
+
+[engine.ML.floating_intensities]
+default = FALSE
+help = If True, allow for adaptative rescaling of the diffraction pattern intensities (to correct
+ for incident beam intensity fluctuations).
+doc =
+type = bool
+userlevel = 2
+
+[engine.ML.intensity_renormalization]
+default = 1
+help = A rescaling of the intensity so they can be interpreted as Poisson counts.
+doc =
+type = float
+userlevel = 0
+lowlim = 0
+
+[engine.ML.reg_del2]
+default = TRUE
+help = Whether to use a Gaussian prior (smoothing) regularizer.
+doc =
+type = bool
+userlevel = 0
+lowlim = 0
+
+[engine.ML.reg_del2_amplitude]
+default = 0.01
+help = Amplitude of the Gaussian prior if used.
+doc =
+type = float
+userlevel = 0
+lowlim = 0
+
+[engine.ML.smooth_gradient]
+default = 0
+help = Smoothing preconditioner. If 0, not used, if > 0 gaussian filter if < 0 Hann window.
+doc =
+type = float
+userlevel = 1
+lowlim = 0
+
+[engine.ML.scale_precond]
+default = FALSE
+help = Whether to use the object/probe scaling preconditioner.
+doc = This parameter can give faster convergence for weakly scattering samples.
+type = bool
+userlevel = 2
+lowlim = 0
+
+[engine.ML.scale_probe_object]
+default = 1
+help = Relative scale of probe to object.
+doc =
+type = float
+userlevel = 2
+lowlim = 0
+
+[engine.ML.probe_update_start]
+default = 0
+help = Number of iterations before probe update starts
+doc =
+type = int
+userlevel = 1
+
+[engines]
+default =
+help = Container for instances of "engine" parameters
+doc = All engines registered in this structure will be executed sequentially.
+type = Param
+dynamic = True
+
diff --git a/ptypy/simulations/detector.py b/ptypy/simulations/detector.py
index 7e6769724..1d13357c6 100644
--- a/ptypy/simulations/detector.py
+++ b/ptypy/simulations/detector.py
@@ -17,7 +17,7 @@
__all__=['shot','Detector','conv','fill2D']
-DEFAULTS= dict(
+DEFAULT= dict(
sci_psf = None, # (None or float, 2-tuple, array) Parameters for gaussian convolution or convolution kernel after exposure of scintillator
sci_qe = 1, # (float) how many optical photons per x-ray photon
psf = None, # (None or float, 2-tuple, array) Parameters for gaussian convolution or convolution kernel after exposure
@@ -39,9 +39,9 @@
)
TEMPLATES = {}
-TEMPLATES['GenericCCD16bit'] = DEFAULTS.copy()
+TEMPLATES['GenericCCD16bit'] = DEFAULT.copy()
-TEMPLATES['GenericCCD32bit'] = DEFAULTS.copy()
+TEMPLATES['GenericCCD32bit'] = DEFAULT.copy()
TEMPLATES['GenericCCD32bit']['full_well']= 2**32-1
TEMPLATES['GenericCCD32bit']['dtype']=np.uint32
@@ -91,7 +91,7 @@
class Detector(object):
def __init__(self,pars=None):
- self._update(DEFAULTS)
+ self._update(DEFAULT)
if str(pars)==pars:
t = TEMPLATES.get(pars,{})
self._update(t)
diff --git a/ptypy/simulations/simscan.py b/ptypy/simulations/simscan.py
index 0724e7523..6f4fa0328 100644
--- a/ptypy/simulations/simscan.py
+++ b/ptypy/simulations/simscan.py
@@ -16,101 +16,138 @@
from detector import Detector, conv
from ptypy.core.data import PtyScan
from ptypy.core.ptycho import Ptycho
- from ptypy.core.manager import DEFAULT as scan_DEFAULT
+ from ptypy.core.manager import Full as ScanModel
+ from ptypy.core.sample import sample_desc
+ from ptypy.core.illumination import illumination_desc
+ from ptypy.core import xy
+ from ptypy import defaults_tree
else:
from .. import utils as u
from detector import Detector, conv
from ..core.data import PtyScan
from ..core.ptycho import Ptycho
- from ..core.manager import DEFAULT as scan_DEFAULT
+ from ..core.manager import Full as ScanModel
+ from .. import defaults_tree
+ from ..core.sample import sample_desc
+ from ..core.illumination import illumination_desc
+ from ..core import xy
logger = u.verbose.logger
-DEFAULT = u.Param(
- pos_noise = 1e-10, # (float) unformly distributed noise in xy experimental positions
- pos_scale = 0, # (float, list) amplifier for noise. Will be extended to match number of positions. Maybe used to only put nois on individual points
- pos_drift = 0, # (float, list) drift or offset paramter. Noise independent drift. Will be extended like pos_scale.
- detector = 'PILATUS_300K',
- frame_size = None , # (None, or float, 2-tuple) final frame size when saving if None, no cropping/padding happens
- psf = None, # (None or float, 2-tuple, array) Parameters for gaussian convolution or convolution kernel after propagation
- # use it for simulating partial coherence
- verbose_level = 1, # verbose level when simulating
- plot = True,
-)
-
__all__ = ['SimScan']
+defaults_tree['scandata'].add_child(u.descriptor.EvalDescriptor('SimScan'))
+defaults_tree['scandata.SimScan'].add_child(illumination_desc, copy=True)
+defaults_tree['scandata.SimScan'].add_child(sample_desc, copy=True)
+defaults_tree['scandata.SimScan'].add_child(xy.xy_desc, copy=True)
+@defaults_tree.parse_doc('scandata.SimScan')
class SimScan(PtyScan):
"""
Simulates a ptychographic scan and acts as Virtual data source.
- The simulation is carried out in the following way.
- TO BE FILLED WITH CONTENT
+ Defaults:
+
+ [name]
+ default = 'SimScan'
+ type = str
+ help =
+
+ [pos_noise]
+ default = 1e-10
+ type = float
+ help = Uniformly distributed noise in xy experimental positions
+
+ [pos_scale]
+ default = 0.
+ type = float, list
+ help = Amplifier for noise.
+ doc = Will be extended to match number of positions. Maybe used to only put nois on individual points
+
+ [pos_drift]
+ default = 0.
+ type = float, list
+ help = Drift or offset paramter
+ doc = Noise independent drift. Will be extended like pos_scale.
+
+ [detector]
+ default = 'PILATUS_300K'
+ type = str, Param
+ help =
+
+ [frame_size]
+ default =
+ type = float, tuple
+ help = Final frame size when saving
+ doc = If None, no cropping/padding happens.
+
+ [psf]
+ default =
+ type = float, tuple, array
+ help = Parameters for gaussian convolution or convolution kernel after propagation
+ doc = Use it for simulating partial coherence.
+
+ [verbose_level]
+ default = 1
+ type = int
+ help = Verbose level when simulating
+
+ [plot]
+ default = True
+ type = bool
+ help =
+
+ [propagation]
+ default = farfield
+ type = str
+ help = farfield or nearfield
+
"""
- RECIPE = DEFAULT.copy(depth=4)
- def __init__(self, pars = None,scan_pars=None,recipe_pars=None,**kwargs):
+ def __init__(self, pars=None, **kwargs):
"""
Parameters
----------
pars : Param
- PtyScan parameters including a *recipe*. See :any:`data.GENERIC`.
+ PtyScan parameters. See :py:data:`scandata.SimScan`.
- scan_pars : Param or dict
- Parameters for a single scan. Use this especially to set
- positions, illumination and sample.
-
- recipe_pars : Param or dict
- Equivalent alternative to *pars['recipe']*.
"""
- # Initialize parent class
- super(SimScan, self).__init__(pars, **kwargs)
+ p = self.DEFAULT.copy(99)
+ p.update(pars)
+
+ # Initialize parent class
+ super(SimScan, self).__init__(p, **kwargs)
# we will use ptypy to figure out everything
pp = u.Param()
# we don't want a server
- pp.interaction = None
-
- # get scan parameters
- if scan_pars is None:
- pp.scan = scan_DEFAULT.copy(depth =4)
- else:
- pp.scan = scan_pars.copy(depth =4)
-
- # note that shape cannot be None
- if self.info.shape is None:
- self.info.shape = pp.scan.geometry.shape
-
- rinfo = self.RECIPE.copy()
- rinfo.update(self.info.recipe, in_place_depth = 4)
- if recipe_pars is not None:
- rinfo.update(recipe_pars, in_place_depth = 4)
- rinfo.update(kwargs, in_place_depth = 4)
-
- self.rinfo = rinfo
- self.info.recipe = rinfo
+ pp.io = u.Param()
+ pp.io.interaction = None
# be as silent as possible
self.verbose_level = u.verbose.get_level()
- pp.verbose_level = rinfo.verbose_level
-
- # update changes specified in recipe
- pp.scan.update(rinfo, in_place_depth = 4)
+ pp.verbose_level = p.verbose_level
# Create a Scan that will deliver empty diffraction patterns
# FIXME: This may be obsolete if the dry_run switch works.
pp.scans=u.Param()
pp.scans.sim = u.Param()
+ pp.scans.sim.name = 'Full'
+ pp.scans.sim.propagation = self.info.propagation
pp.scans.sim.data=u.Param()
- pp.scans.sim.data.source = 'empty'
- pp.scans.sim.data.shape = pp.scan.geometry.shape
+ pp.scans.sim.data.positions_theory = xy.from_pars(self.info.xy)
+ pp.scans.sim.data.name = 'PtyScan'
+ pp.scans.sim.data.shape = self.info.shape
+ pp.scans.sim.data.psize = self.info.psize
+ pp.scans.sim.data.energy = self.info.energy
+ pp.scans.sim.data.distance = self.info.distance
+ pp.scans.sim.sample = self.info.sample
+ pp.scans.sim.illumination = self.info.illumination
+
pp.scans.sim.data.auto_center = False
- # deactivate sharing since we create a seperate Ptycho instance fro each scan
- pp.scans.sim.sharing = None
# Now we let Ptycho sort out things
logger.info('Generating simulating Ptycho instance for scan `%s`.' % str(self.info.get('label')))
@@ -131,11 +168,11 @@ def __init__(self, pars = None,scan_pars=None,recipe_pars=None,**kwargs):
logger.info('Propagating exit waves.')
for name,pod in P.pods.iteritems():
if not pod.active: continue
- pod.diff += conv(u.abs2(pod.fw(pod.exit)),rinfo.psf)
+ pod.diff += conv(u.abs2(pod.fw(pod.exit)), self.info.psf)
# Simulate detector reponse
- if rinfo.detector is not None:
- Det = Detector(rinfo.detector)
+ if self.info.detector is not None:
+ Det = Detector(self.info.detector)
save_dtype = Det.dtype
acquire = Det.filter
else:
@@ -149,7 +186,7 @@ def __init__(self, pars = None,scan_pars=None,recipe_pars=None,**kwargs):
ID,Sdiff = P.diff.S.items()[0]
- logger.info('Collectiong simulated `raw` data.')
+ logger.info('Collecting simulated `raw` data.')
for view in Sdiff.views:
ind = view.layer
dat, mask = acquire(view.data)
@@ -162,7 +199,7 @@ def __init__(self, pars = None,scan_pars=None,recipe_pars=None,**kwargs):
self.pos[ind] = pos
# plot overview
- if self.rinfo.plot and u.parallel.master:
+ if self.info.plot and u.parallel.master:
logger.info('Plotting simulation overview')
P.plot_overview(200)
u.pause(5.)
diff --git a/ptypy/test/core_tests/bragg_scanmodel_test.py b/ptypy/test/core_tests/bragg_scanmodel_test.py
new file mode 100644
index 000000000..aa8f13e71
--- /dev/null
+++ b/ptypy/test/core_tests/bragg_scanmodel_test.py
@@ -0,0 +1,50 @@
+"""
+Tests that the assembly of frames into 3d pods gives the original
+3d diffraction patterns.
+"""
+
+import unittest
+from ptypy.core import Ptycho
+from ptypy import utils as u
+import numpy as np
+import os
+import tempfile
+
+class Bragg3dModelTest(unittest.TestCase):
+ def test_frame_assembly(self):
+ from ptypy.experiment.Bragg3dSim import Bragg3dSimScan
+ # parameter tree
+ outpath = tempfile.mkdtemp(suffix="Bragg3dModelTest")
+ p = u.Param()
+ p.scans = u.Param()
+ p.scans.scan01 = u.Param()
+ p.scans.scan01.name = 'Bragg3dModel'
+ p.scans.scan01.data = u.Param()
+ p.scans.scan01.data.name = 'Bragg3dSimScan'
+ p.scans.scan01.data.dump = os.path.join(outpath, 'tmp.npz')
+ p.scans.scan01.data.shuffle = True
+
+ # simulate and then load data
+ P = Ptycho(p, level=2)
+
+ # load raw simulation data
+ diff = np.load(p.scans.scan01.data.dump)
+
+ # check that the pods reflect the raw data
+ assert len(diff.keys()) == len(P.pods)
+ checked_pods = []
+ for i in range(len(diff.keys())):
+ diff_raw = diff['diff%02d'%i]
+ ok = False
+ for j in range(len(P.pods)):
+ if j in checked_pods:
+ continue
+ diff_pod = P.pods['P%04d'%j].diff
+ if np.allclose(diff_raw, diff_pod):
+ checked_pods.append(j)
+ ok = True
+ break
+ assert ok
+
+if __name__ == '__main__':
+ unittest.main()
diff --git a/ptypy/test/core_tests/test_classes.py b/ptypy/test/core_tests/classes_test.py
similarity index 98%
rename from ptypy/test/core_tests/test_classes.py
rename to ptypy/test/core_tests/classes_test.py
index 370214980..56ac708fc 100644
--- a/ptypy/test/core_tests/test_classes.py
+++ b/ptypy/test/core_tests/classes_test.py
@@ -181,9 +181,9 @@ def test_post_dict_import(self):
def test_to_dict(self):
"""Extract information from container object and store in a dict"""
- self.assertDictEqual(
- self.basic_base._to_dict(),
- self.basic_base.__dict__,
+ self.assertListEqual(
+ self.basic_base._to_dict().keys(),
+ self.basic_base.__slots__,
'Converting container object information to dictionary failed.'
)
@@ -340,7 +340,7 @@ def setUp(self):
self.basic_container_nst = c.Container()
# Maybe test empty container and also real container
-
+
def test_container_default_constants(self):
"""Default container constants unaltered"""
self.assertEqual(
@@ -555,6 +555,7 @@ def test_report(self):
'all storages in container failed.'
)
+ @unittest.skip('Testing string output is a bit greedy.')
def test_formatted_report(self):
"""Return formatted string and a dict containing the information"""
# Note: can probably be done more gracefully
@@ -621,8 +622,10 @@ def test__setitem__(self):
newdata='more_random_input'
)
+ @unittest.skip('String comparison not useful')
def test_info(self):
"""Return container's total buffer space in bytes and storage info"""
+
self.assertTupleEqual(
self.basic_container_dpt.info(),
(16,
@@ -862,6 +865,7 @@ def test_init(self):
'Assigning of instance attribute model_initialized failed.'
)
+ @unittest.skip("Doesn't work with slotted classes.")
def test_to_dict(self):
"""Extract information from storage object and store in a dict"""
# ToDo: specify Exception --> KeyError
@@ -1158,6 +1162,7 @@ def test_report(self):
msg='Returning storage report as formatted string failed'
)
+ @unittest.skip('Testing string output is a bit greedy.')
def test_formatted_report(self):
"""Return formatted string and a dict containing the information"""
# Test first part of report
@@ -1217,6 +1222,7 @@ def test__setitem__(self):
newdata='more_random_input'
)
+ @unittest.skip('Testing string output is a bit greedy.')
def test__str__(self):
"""Return __str__ of storage"""
self.assertEqual(
@@ -1267,10 +1273,9 @@ def test_init(self):
'View is not Base instance.'
)
- self.assertIsInstance(
- self.basic_view_dpt.pods,
- wr.WeakValueDictionary,
- 'Assigning of attribute pods failed.'
+ self.assertIsNone(
+ self.basic_view_dpt._pods,
+ 'Assigning of instance attribute _pods failed.'
)
self.assertIsNone(
@@ -1296,26 +1301,6 @@ def test_init(self):
'Assigning of instance attribute storageID failed.'
)
- # This is a bit messy as _arint gets updated with _set
- self.assertTrue(
- np.array_equal(
- self.basic_view_dpt._arint,
- np.array([[1, 1], [0, 0], [1, 1], [0, 0]], dtype=np.int)
- ),
- 'Assigning of instance attribute _arint failed.'
- )
-
- # This is a bit messy as _arfloat gets updated with _set
- self.assertTrue(
- np.array_equal(
- self.basic_view_dpt._arfloat,
- np.array(
- [[1, 1], [0., 0.], [0., 0.], [0, 0]], dtype=np.float
- )
- ),
- 'Assigning of instance attribute _arfloat failed.'
- )
-
self.assertEqual(
self.basic_view_dpt.dlayer,
0,
@@ -1348,6 +1333,7 @@ def test_init(self):
'Assigning of instance attribute psize failed.'
)
+ @unittest.skip('Testing string output is a bit greedy.')
def test__str__(self):
"""Return __str__ of view"""
self.assertEqual(
diff --git a/ptypy/test/core_tests/container_test.py b/ptypy/test/core_tests/container_test.py
index 3436bfab6..91c8887b5 100644
--- a/ptypy/test/core_tests/container_test.py
+++ b/ptypy/test/core_tests/container_test.py
@@ -13,7 +13,7 @@ def test_container(self):
def test_container_copy(self):
# make a container with a storage and two views
B = Base()
- C = Container(data_type=float, ptycho=B)
+ C = Container(B,data_type=float)
V1 = View(container=C, shape=10, coord=(0, 0), storageID='S0')
V2 = View(container=C, shape=10, coord=(3, 3), storageID='S0')
C.reformat()
diff --git a/ptypy/test/core_tests/core_test.py b/ptypy/test/core_tests/core_test.py
index 6641283c8..f17f78889 100644
--- a/ptypy/test/core_tests/core_test.py
+++ b/ptypy/test/core_tests/core_test.py
@@ -29,6 +29,7 @@ class CoreTest(unittest.TestCase):
data = S1[V1]
V1.coord = (0.28, 0.28)
S1.update_views(V1)
+ print V1.storageID
mn = S1[V1].mean()
S1.fill_value = mn
S1.reformat()
@@ -36,6 +37,7 @@ class CoreTest(unittest.TestCase):
ar2 = ar.copy()
ar2.coord = (-0.82, -0.82)
V2 = View(C1, ID=None, accessrule=ar2)
+
S1.fill_value = 0.
S1.reformat()
@@ -55,4 +57,4 @@ class CoreTest(unittest.TestCase):
if __name__ == '__main__':
- unittest.main()
\ No newline at end of file
+ unittest.main()
diff --git a/ptypy/test/core_tests/geometry_bragg_test.py b/ptypy/test/core_tests/geometry_bragg_test.py
new file mode 100644
index 000000000..73fcab3c5
--- /dev/null
+++ b/ptypy/test/core_tests/geometry_bragg_test.py
@@ -0,0 +1,44 @@
+'''
+A test for the Base
+'''
+
+import unittest
+import ptypy.utils as u
+import numpy as np
+from ptypy.core.geometry_bragg import Geo_Bragg
+from ptypy.core import Container, Storage, View
+
+class BraggGeometryTest(unittest.TestCase):
+
+ def testSetup(self):
+ g = Geo_Bragg(
+ psize=(0.005, 13e-6, 13e-6),
+ shape=(9, 128, 128),
+ energy=8.5,
+ distance=2.0,
+ theta_bragg=22.32)
+ # some numerical checks
+ assert np.round(g.dq1) == 279992
+ assert np.round(g.dq2) == 279992
+ assert np.round(g.dq3) == 2855229
+ assert np.allclose(
+ g.resolution,
+ [2.64312974e-07, 1.89516090e-07, 1.75317015e-07], # (dr3, dr1, dr2)
+ atol = 0, rtol = .001)
+
+ def testViews(self):
+ C = Container(data_dims=3)
+ positions = np.array([np.arange(10), np.arange(10), np.arange(10)]).T
+ for pos_ in positions:
+ View(C, storageID='S0', psize=.2, coord=pos_, shape=12)
+ S = C.storages.values()[0]
+ S.reformat()
+ cov = np.array(np.real(S.get_view_coverage()), dtype=int)
+ # some numerical checks
+ assert S.shape == (1, 57, 57, 57)
+ assert cov.max() == 3
+ assert cov.min() == 0
+ assert cov.sum() == 17280
+
+if __name__ == '__main__':
+ unittest.main()
\ No newline at end of file
diff --git a/ptypy/test/core_tests/geometry_test.py b/ptypy/test/core_tests/geometry_test.py
index aa5674f65..445505c0c 100644
--- a/ptypy/test/core_tests/geometry_test.py
+++ b/ptypy/test/core_tests/geometry_test.py
@@ -5,7 +5,11 @@
import unittest
import ptypy.utils as u
import numpy as np
-from ptypy.core import geometry, Base
+from ptypy.core import geometry
+from ptypy.core import Base as theBase
+
+# subclass for dictionary access
+Base = type('Base',(theBase,),{})
class GeometryTest(unittest.TestCase):
@@ -56,4 +60,4 @@ def test_geometry_nearfield_resolution(self):
if __name__ == '__main__':
- unittest.main()
\ No newline at end of file
+ unittest.main()
diff --git a/ptypy/test/core_tests/probe_test.py b/ptypy/test/core_tests/probe_test.py
index 1b222671e..60e9085f2 100644
--- a/ptypy/test/core_tests/probe_test.py
+++ b/ptypy/test/core_tests/probe_test.py
@@ -5,8 +5,12 @@
import unittest
import ptypy.utils as u
import numpy as np
-from ptypy.core import geometry, Base, Container
+from ptypy.core import geometry, Container
from ptypy.resources import moon_pr
+from ptypy.core import Base as theBase
+
+# subclass for dictionary access
+Base = type('Base',(theBase,),{})
def get_P():
P = Base()
@@ -31,13 +35,17 @@ def set_up_probes(P,G):
P.probe = Container(get_P(), 'Cprobe', data_type='complex')
y, x = G.propagator.grids_sam
apert = u.smooth_step(fsize[0]/5-np.sqrt(x**2+y**2), 1e-6)
- pr = P.probe.new_storage(data=-moon_pr(G.shape), psize=G.resolution)
- pr2 = P.probe.new_storage(data=apert, psize=G.resolution)
- pr3 = P.probe.new_storage(shape=G.shape, psize=G.resolution)
+ sh = (1,) + tuple(G.shape)
+ #pr = P.probe.new_storage(shape=sh, psize=G.resolution)
+ #pr.fill(-moon_pr(G.shape))
+ pr2 = P.probe.new_storage(shape=sh, psize=G.resolution)
+ pr2.fill(apert)
+ pr3 = P.probe.new_storage(shape=sh, psize=G.resolution)
y, x = pr3.grids()
apert = u.smooth_step(fsize[0]/5-np.abs(x), 3e-5)*u.smooth_step(fsize[1]/5-np.abs(y), 3e-5)
pr3.fill(apert)
- return [pr, pr2, pr3]
+ #return [pr, pr2, pr3]
+ return [pr2, pr3]
def propagate_probes(G, probes):
propagated_ill = []
@@ -54,7 +62,8 @@ def test_probe(self):
probes = set_up_probes(P,G)
prop_probes = propagate_probes(G,probes)
av = [np.mean(foo) for foo in prop_probes]
- res = np.array([0.006+26.9j, 0.023+23.638j,-0.027+26.908j])
+ #res = np.array([0.006+26.9j, 0.023+23.638j,-0.027+26.908j])
+ res = np.array([0.023+23.638j,-0.027+26.908j])
assert (np.round(np.array(av),3)==res).all(), "Probes are not equal"
if __name__ == '__main__':
diff --git a/ptypy/test/core_tests/xy_test.py b/ptypy/test/core_tests/xy_test.py
index a9a2242b6..38f3f9089 100644
--- a/ptypy/test/core_tests/xy_test.py
+++ b/ptypy/test/core_tests/xy_test.py
@@ -4,9 +4,8 @@
import unittest
-from ptypy.core.xy import from_pars, augment_to_coordlist,spiral_scan,spiral_scan_legacy, round_scan, round_scan_legacy, round_scan_roi_legacy, spiral_scan_roi_legacy,\
- raster_scan, raster_scan_legacy
-
+import numpy as np
+from ptypy.core.xy import from_pars, augment_to_coordlist,spiral_scan, round_scan, raster_scan
class XyTest(unittest.TestCase):
def test_from_pars(self):
@@ -22,6 +21,46 @@ def test_spiral_scan(self):
def test_raster_scan(self):
a = raster_scan()
+ def test_raster_scan_regression(self):
+ NY = 5
+ NX = 6
+ DX = 2.0e-6
+ DY = 1.0e-6
+ expected_result = np.array([[0.0, 0.0],
+ [0.0, 1.e-6],
+ [0.0,2.e-6],
+ [0.0, 3.e-6],
+ [0.0, 4.e-6],
+ [2.e-6, 0.0],
+ [2.e-6, 1.e-6],
+ [2.e-6, 2.e-6],
+ [2.e-6, 3.e-6],
+ [2.e-6, 4.e-6],
+ [4.e-6, 0.0],
+ [4.e-6, 1.e-6],
+ [4.e-6, 2.e-6],
+ [4.e-6, 3.e-6],
+ [4.e-6, 4.e-6],
+ [6.e-6, 0.0],
+ [6.e-6, 1.e-6],
+ [6.e-6, 2.e-6],
+ [6.e-6, 3.e-6],
+ [6.e-6, 4.e-6],
+ [8.e-6, 0.0],
+ [8.e-6, 1.e-6],
+ [8.e-6, 2.e-6],
+ [8.e-6, 3.e-6],
+ [8.e-6, 4.e-6],
+ [1.e-05, 0.0],
+ [1.e-05, 1.e-6],
+ [1.e-05, 2.e-6],
+ [1.e-05, 3.e-6],
+ [1.e-05, 4.e-6]], dtype=np.float64)
+ a = raster_scan(dy=DY, dx=DX, ny=NY, nx=NX)
+ np.testing.assert_equal(a.shape, (NX*NY, 2))
+ np.testing.assert_allclose(a, expected_result)
+
+
def test_round_scan(self):
a = round_scan()
diff --git a/ptypy/test/engine_tests/DM_simple_test.py b/ptypy/test/engine_tests/DM_simple_test.py
index 1bf4b4b43..eb896de33 100644
--- a/ptypy/test/engine_tests/DM_simple_test.py
+++ b/ptypy/test/engine_tests/DM_simple_test.py
@@ -7,7 +7,7 @@
"""
import unittest
-from ptypy.test import test_utils as tu
+from ptypy.test import utils as tu
from ptypy import utils as u
@@ -17,7 +17,6 @@ class DMSimpleTest(unittest.TestCase):
def test_DM_simple(self):
engine_params = u.Param()
engine_params.name = 'DM_simple'
- engine_params.fourier_relax_factor = 0.01
engine_params.alpha = 1.0
tu.EngineTestRunner(engine_params)
if __name__ == "__main__":
diff --git a/ptypy/test/engine_tests/DM_test.py b/ptypy/test/engine_tests/DM_test.py
index 460ddd77d..5c10359f0 100644
--- a/ptypy/test/engine_tests/DM_test.py
+++ b/ptypy/test/engine_tests/DM_test.py
@@ -7,7 +7,7 @@
"""
import unittest
-from ptypy.test import test_utils as tu
+from ptypy.test import utils as tu
from ptypy import utils as u
class DMTest(unittest.TestCase):
diff --git a/ptypy/test/engine_tests/ML_new_test.py b/ptypy/test/engine_tests/ML_old_test.py
similarity index 97%
rename from ptypy/test/engine_tests/ML_new_test.py
rename to ptypy/test/engine_tests/ML_old_test.py
index 1f09f51a8..5c9607bec 100644
--- a/ptypy/test/engine_tests/ML_new_test.py
+++ b/ptypy/test/engine_tests/ML_old_test.py
@@ -7,7 +7,7 @@
"""
import unittest
-from ptypy.test import test_utils as tu
+from ptypy.test import utils as tu
from ptypy import utils as u
class MLNewTest(unittest.TestCase):
diff --git a/ptypy/test/engine_tests/ML_test.py b/ptypy/test/engine_tests/ML_test.py
index 8d592622d..113b59852 100644
--- a/ptypy/test/engine_tests/ML_test.py
+++ b/ptypy/test/engine_tests/ML_test.py
@@ -7,7 +7,7 @@
"""
import unittest
-from ptypy.test import test_utils as tu
+from ptypy.test import utils as tu
from ptypy import utils as u
class MLTest(unittest.TestCase):
@@ -39,7 +39,7 @@ def test_ML_nearfield(self):
engine_params.scale_precond =False
engine_params.probe_update_start = 0
- tu.EngineTestRunner(engine_params,propagator='nearfield')
+ tu.EngineTestRunner(engine_params, propagator='nearfield')
if __name__ == "__main__":
unittest.main()
\ No newline at end of file
diff --git a/ptypy/test/engine_tests/dummy_test.py b/ptypy/test/engine_tests/dummy_test.py
index e0dc41506..7588ac67e 100644
--- a/ptypy/test/engine_tests/dummy_test.py
+++ b/ptypy/test/engine_tests/dummy_test.py
@@ -7,7 +7,7 @@
"""
import unittest
-from ptypy.test import test_utils as tu
+from ptypy.test import utils as tu
from ptypy import utils as u
class DummyTest(unittest.TestCase):
diff --git a/ptypy/test/engine_tests/engine_tests/DM_simple_test.py b/ptypy/test/engine_tests/engine_tests/DM_simple_test.py
deleted file mode 100644
index 0784bab7a..000000000
--- a/ptypy/test/engine_tests/engine_tests/DM_simple_test.py
+++ /dev/null
@@ -1,24 +0,0 @@
-"""
-Test for the DM_simple engine.
-
-This file is part of the PTYPY package.
- :copyright: Copyright 2014 by the PTYPY team, see AUTHORS.
- :license: GPLv2, see LICENSE for details.
-"""
-
-import unittest
-from ptypy.test import test_utils as tu
-from ptypy import utils as u
-
-
-
-class DMSimpleTest(unittest.TestCase):
- def test_DM_simple(self):
- engine_params = u.Param()
- engine_params.name = 'DM_simple'
- engine_params.fourier_relax_factor = 0.01
- engine_params.alpha = 1.0
- engine_params.numiter = 5
- tu.EngineTestRunner(engine_params)
-if __name__ == "__main__":
- unittest.main()
\ No newline at end of file
diff --git a/ptypy/test/engine_tests/engine_tests/DM_test.py b/ptypy/test/engine_tests/engine_tests/DM_test.py
deleted file mode 100644
index 0515968bd..000000000
--- a/ptypy/test/engine_tests/engine_tests/DM_test.py
+++ /dev/null
@@ -1,30 +0,0 @@
-"""
-Test for the DM engine.
-
-This file is part of the PTYPY package.
- :copyright: Copyright 2014 by the PTYPY team, see AUTHORS.
- :license: GPLv2, see LICENSE for details.
-"""
-
-import unittest
-from ptypy.test import test_utils as tu
-from ptypy import utils as u
-
-class DMTest(unittest.TestCase):
- def test_DM(self):
- engine_params = u.Param()
- engine_params.name = 'DM'
- engine_params.numiter = 5
- engine_params.alpha =1
- engine_params.probe_update_start = 2
- engine_params.overlap_converge_factor = 0.05
- engine_params.overlap_max_iterations = 10
- engine_params.probe_inertia = 1e-3
- engine_params.object_inertia = 0.1
- engine_params.fourier_relax_factor = 0.01
- engine_params.obj_smooth_std = 20
- tu.EngineTestRunner(engine_params)
-
-
-if __name__ == "__main__":
- unittest.main()
\ No newline at end of file
diff --git a/ptypy/test/engine_tests/engine_tests/ML_old_test.py b/ptypy/test/engine_tests/engine_tests/ML_old_test.py
deleted file mode 100644
index bed8ce91d..000000000
--- a/ptypy/test/engine_tests/engine_tests/ML_old_test.py
+++ /dev/null
@@ -1,30 +0,0 @@
-"""
-Test for the ML_old engine.
-
-This file is part of the PTYPY package.
- :copyright: Copyright 2014 by the PTYPY team, see AUTHORS.
- :license: GPLv2, see LICENSE for details.
-"""
-
-import unittest
-from ptypy.test import test_utils as tu
-from ptypy import utils as u
-
-class MLOldTest(unittest.TestCase):
- @unittest.skip('skip this because it is not supported')
- def test_ML_old(self):
- engine_params = u.Param()
- engine_params.name = 'ML_old'
- engine_params.numiter = 5
- engine_params.ML_type = 'gaussian'
- engine_params.floating_intensities = False
- engine_params.intensity_renormalization = 1.
- engine_params.reg_del2 = False
- engine_params.reg_del2_amplitude = .01
- engine_params.smooth_gradient = 0
- engine_params.scale_precond = False
- engine_params.scale_probe_object = 1.
- tu.EngineTestRunner(engine_params)
-
-if __name__ == "__main__":
- unittest.main()
\ No newline at end of file
diff --git a/ptypy/test/engine_tests/engine_tests/ML_test.py b/ptypy/test/engine_tests/engine_tests/ML_test.py
deleted file mode 100644
index c3a40212d..000000000
--- a/ptypy/test/engine_tests/engine_tests/ML_test.py
+++ /dev/null
@@ -1,30 +0,0 @@
-"""
-Test for the ML engine.
-
-This file is part of the PTYPY package.
- :copyright: Copyright 2014 by the PTYPY team, see AUTHORS.
- :license: GPLv2, see LICENSE for details.
-"""
-
-import unittest
-from ptypy.test import test_utils as tu
-from ptypy import utils as u
-
-class MLTest(unittest.TestCase):
- def test_ML(self):
- engine_params = u.Param()
- engine_params.name = 'ML'
- engine_params.numiter = 5
- engine_params.probe_update_start = 2
- engine_params.floating_intensities = False
- engine_params.intensity_renormalization = 1.0
- engine_params.reg_del2 =True
- engine_params.reg_del2_amplitude = 0.01
- engine_params.smooth_gradient = 0.0
- engine_params.scale_precond =False
- engine_params.probe_update_start = 0
- tu.EngineTestRunner(engine_params)
-
-
-if __name__ == "__main__":
- unittest.main()
\ No newline at end of file
diff --git a/ptypy/test/engine_tests/engine_tests/dummy_test.py b/ptypy/test/engine_tests/engine_tests/dummy_test.py
deleted file mode 100644
index 37a3311a4..000000000
--- a/ptypy/test/engine_tests/engine_tests/dummy_test.py
+++ /dev/null
@@ -1,22 +0,0 @@
-"""
-Test for the Dummy engine.
-
-This file is part of the PTYPY package.
- :copyright: Copyright 2014 by the PTYPY team, see AUTHORS.
- :license: GPLv2, see LICENSE for details.
-"""
-
-import unittest
-from ptypy.test import test_utils as tu
-from ptypy import utils as u
-
-class DummyTest(unittest.TestCase):
- def test_dummy(self):
- engine_params = u.Param()
- engine_params.name = 'Dummy'
- engine_params.itertime = 2.0
- engine_params.numiter = 5
- tu.EngineTestRunner(engine_params)
-
-if __name__ == "__main__":
- unittest.main()
\ No newline at end of file
diff --git a/ptypy/test/io_tests/__init__.py b/ptypy/test/io_tests/__init__.py
new file mode 100644
index 000000000..e69de29bb
diff --git a/ptypy/test/io_tests/file_saving_test.py b/ptypy/test/io_tests/file_saving_test.py
new file mode 100644
index 000000000..aaaf3d358
--- /dev/null
+++ b/ptypy/test/io_tests/file_saving_test.py
@@ -0,0 +1,182 @@
+'''
+Tests whether the framework saves the output files
+'''
+
+import unittest
+import tempfile
+import h5py as h5
+
+from ptypy.test import utils as tu
+import ptypy.utils as u
+
+class FileSavingTest(unittest.TestCase):
+ def test_output_file_saving_rfile_is_None(self):
+ engine_params = u.Param()
+ engine_params.name = 'DM'
+ engine_params.numiter = 5
+ engine_params.alpha =1
+ engine_params.probe_update_start = 2
+ engine_params.overlap_converge_factor = 0.05
+ engine_params.overlap_max_iterations = 10
+ engine_params.probe_inertia = 1e-3
+ engine_params.object_inertia = 0.1
+ engine_params.fourier_relax_factor = 0.01
+ engine_params.obj_smooth_std = 20
+ outpath = tempfile.mkdtemp(prefix='something')
+ PtychoOutput = tu.EngineTestRunner(engine_params,propagator='farfield',output_path=outpath, output_file=None)
+ # should have an assert to check no file is output. How do I do that?
+
+
+ def test_output_file_saving_rfile_set(self):
+ engine_params = u.Param()
+ engine_params.name = 'DM'
+ engine_params.numiter = 5
+ engine_params.alpha =1
+ engine_params.probe_update_start = 2
+ engine_params.overlap_converge_factor = 0.05
+ engine_params.overlap_max_iterations = 10
+ engine_params.probe_inertia = 1e-3
+ engine_params.object_inertia = 0.1
+ engine_params.fourier_relax_factor = 0.01
+ engine_params.obj_smooth_std = 20
+ outpath = tempfile.mkdtemp(prefix='something')
+ PtychoOutput = tu.EngineTestRunner(engine_params,propagator='farfield',output_path=outpath, output_file=outpath+'reconstruction')
+
+ file_path = outpath + 'reconstruction.ptyr'
+
+ expected_nodes = ['/content',
+ '/content/obj',
+ '/content/obj/SMFG00',
+ '/content/obj/SMFG00/data',
+ '/content',
+ '/content/probe',
+ '/content/probe/SMFG00',
+ '/content/probe/SMFG00/data']
+
+
+
+ try:
+ f = h5.File(file_path, 'r')
+ except IOError:
+ self.fail(msg="File does not exist")
+
+ for node in expected_nodes:
+ self.assertTrue(node in f, msg='No entry for %s in this file' % node)
+
+ # and now explictly can I load the data?
+ try:
+ obj = f['/content/obj/SMFG00/data'][0]
+ except KeyError:
+ self.fail(msg="Couldn't load the object data")
+ try:
+ probe = f['/content/probe/SMFG00/data'][0]
+ except KeyError:
+ self.fail(msg="Couldn't load the probe data")
+
+
+ def test_output_file_saving_rfile_set_kind_minimal(self):
+ '''
+
+ kin can be minimal, fullflat or dump
+ '''
+ engine_params = u.Param()
+ engine_params.name = 'DM'
+ engine_params.numiter = 5
+ engine_params.alpha =1
+ engine_params.probe_update_start = 2
+ engine_params.overlap_converge_factor = 0.05
+ engine_params.overlap_max_iterations = 10
+ engine_params.probe_inertia = 1e-3
+ engine_params.object_inertia = 0.1
+ engine_params.fourier_relax_factor = 0.01
+ engine_params.obj_smooth_std = 20
+ outpath = tempfile.mkdtemp(prefix='something')
+ PtychoOutput = tu.EngineTestRunner(engine_params,propagator='farfield', output_path=outpath, output_file=outpath+'reconstruction')
+
+ file_path = outpath + 'reconstruction.ptyr'
+
+ expected_nodes = ['/content',
+ '/content/obj',
+ '/content/obj/SMFG00',
+ '/content/obj/SMFG00/data',
+ '/content',
+ '/content/probe',
+ '/content/probe/SMFG00',
+ '/content/probe/SMFG00/data']
+
+
+
+ try:
+ f = h5.File(file_path, 'r')
+ except IOError:
+ self.fail(msg="File does not exist")
+
+ for node in expected_nodes:
+ self.assertTrue(node in f, msg='No entry for %s in this file' % node)
+
+ # and now explictly can I load the data?
+ try:
+ obj = f['/content/obj/SMFG00/data'][0]
+ except KeyError:
+ self.fail(msg="Couldn't load the object data")
+ try:
+ probe = f['/content/probe/SMFG00/data'][0]
+ except KeyError:
+ self.fail(msg="Couldn't load the probe data")
+
+
+ def test_output_file_saving_separate_save_run_kind_minimal(self):
+ '''
+
+ kin can be minimal, fullflat or dump
+ '''
+ engine_params = u.Param()
+ engine_params.name = 'DM'
+ engine_params.numiter = 5
+ engine_params.alpha =1
+ engine_params.probe_update_start = 2
+ engine_params.overlap_converge_factor = 0.05
+ engine_params.overlap_max_iterations = 10
+ engine_params.probe_inertia = 1e-3
+ engine_params.object_inertia = 0.1
+ engine_params.fourier_relax_factor = 0.01
+ engine_params.obj_smooth_std = 20
+ outpath = tempfile.mkdtemp(prefix='something')
+ PtychoOutput = tu.EngineTestRunner(engine_params,propagator='farfield', output_path=outpath, output_file=None)
+ file_path = outpath + 'reconstruction.h5'
+
+ print "now I am saving with save_run"
+ PtychoOutput.save_run(file_path, kind='minimal')
+
+
+
+ # object_string = '/content/obj'
+ # probe_string = '/content/probe'
+ expected_nodes = ['/content',
+ '/content/obj',
+ '/content/obj/SMFG00',
+ '/content/obj/SMFG00/data',
+ '/content',
+ '/content/probe',
+ '/content/probe/SMFG00',
+ '/content/probe/SMFG00/data']
+
+
+
+ try:
+ f = h5.File(file_path, 'r')
+ except IOError:
+ self.fail(msg="File does not exist")
+
+ for node in expected_nodes:
+ self.assertTrue(node in f, msg='No entry for %s in this file' % node)
+
+ # and now explictly can I load the data?
+ try:
+ obj = f['/content/obj/SMFG00/data'][0]
+ except KeyError:
+ self.fail(msg="Couldn't load the object data")
+ try:
+ probe = f['/content/probe/SMFG00/data'][0]
+ except KeyError:
+ self.fail(msg="Couldn't load the probe data")
diff --git a/ptypy/test/io_tests/h5rw_load_test.py b/ptypy/test/io_tests/h5rw_load_test.py
new file mode 100644
index 000000000..bca0ce680
--- /dev/null
+++ b/ptypy/test/io_tests/h5rw_load_test.py
@@ -0,0 +1,140 @@
+'''
+
+This tests the functionality of h5rw reading it's own data back in and can be broken to include new types
+Since it's primary purpose is to read back it's own types, we create the test files using h5write. Lazy, but covers the use case.
+'''
+
+import unittest
+import tempfile
+import shutil
+import h5py as h5
+import ptypy.io as io
+import ptypy.utils as u
+import numpy as np
+import cPickle
+import collections
+
+
+class H5rwLoadTest(unittest.TestCase):
+
+ def setUp(self):
+ io.h5options['UNSUPPORTED'] = 'fail'
+ self.folder = tempfile.mkdtemp(suffix="H5rwTest")
+ self.filepath = self.folder +'%s.h5'
+
+ def tearDown(self):
+ shutil.rmtree(self.folder)
+
+ # @unittest.skip("Not yet implemented")
+ def test_load_str(self):
+ data = 'test_string'
+ content = {'string data': data}
+ io.h5write(self.filepath % "load_string_test", content=content)
+ # and now read it back in...
+ out = io.h5read(self.filepath % "load_string_test", "content")["content"]
+ np.testing.assert_equal(content["string data"], out["string data"], err_msg="Can't read back in a string that we saved.")
+
+ def test_load_unicode(self):
+ data = u'test_string \xe1'
+ content = {'unicode data': data}
+ io.h5write(self.filepath % "load_unicode_test", content=content)
+ out = io.h5read(self.filepath % "load_unicode_test", "content")["content"]
+ np.testing.assert_equal(content["unicode data"], out["unicode data"], err_msg="Can't read back in a unicode string that we saved.")
+
+ def test_load_dict(self):
+ data = {'Monkey': "apple", "Moon" : 1, "flower": 2.0, 'an array' : np.ones((3,3))}
+ content = {'dict data': data}
+ io.h5write(self.filepath % "load_dict_test", content=content)
+ out = io.h5read(self.filepath % "load_dict_test", "content")["content"]
+ np.testing.assert_equal(content["dict data"], out["dict data"], err_msg="Can't read back in a dict that we saved.")
+
+ def test_load_ordered_dict(self):
+ data = collections.OrderedDict()
+ data['Monkey'] = "apple"
+ data["Moon"] = "1"
+ data["flower"] = 2.0
+ data['an array'] = np.ones((3,3))
+ content = {'ordered dict data': data}
+ io.h5write(self.filepath % "load_ordered_dict_test", content=content)
+ out = io.h5read(self.filepath % "load_ordered_dict_test", "content")["content"]
+ np.testing.assert_equal(content["ordered dict data"], out["ordered dict data"],
+ err_msg="Can't read back in an ordered dict that we saved.")
+
+ def test_load_param(self):
+ data = u.Param()
+ data.monkey = "apple"
+ data.moon = u.Param()
+ data.moon.flower = 2.0
+
+ content = {'param data': data}
+ io.h5write(self.filepath % "load_param_test", content=content)
+ out = io.h5read(self.filepath % "load_param_test", "content")["content"]
+ np.testing.assert_equal(content["param data"], out["param data"],
+ err_msg="Can't read back in a param that we saved.")
+
+ def test_load_list(self):
+ data = ['monkey', 'apple', 1.0, 'moon']
+ content = {'list data': data}
+ io.h5write(self.filepath % "load_list_test", content=content)
+ out = io.h5read(self.filepath % "load_list_test", "content")["content"]
+ np.testing.assert_equal(content["list data"], out["list data"],
+ err_msg="Can't read back in a list that we saved.")
+
+ def test_load_tuple(self):
+ data = ('monkey', 'apple', (1.0, u'moon'))
+ content = {'tuple data': data}
+ io.h5write(self.filepath % "load_tuple_test", content=content)
+ out = io.h5read(self.filepath % "load_tuple_test", "content")["content"]
+ np.testing.assert_equal(content["tuple data"], out["tuple data"],
+ err_msg="Can't read back in a tuple that we saved.")
+
+ def test_load_ndarray(self):
+ data = np.array([[1 ,2 ,3],[4, 5, 6], [7, 8, 9]])
+ content = {'array data': data}
+ io.h5write(self.filepath % "load_array_test", content=content)
+ out = io.h5read(self.filepath % "load_array_test", "content")["content"]
+ np.testing.assert_array_equal(content["array data"], out["array data"],
+ err_msg="Can't read back in an array that we saved.")
+
+ def test_load_numpy_record_array(self):
+ data = np.recarray((8,), dtype=[('ID','
- :copyright: Copyright 2014 by the PTYPY team, see AUTHORS.
- :license: GPLv2, see LICENSE for details.
- :platform: Unix
- :synopsis: utilities for the test framework
-"""
-import inspect
-import shutil
-import os
-import tempfile
-from .. import utils as u
-from ptypy.core import Ptycho
-
-
-def get_test_data_path(name):
- path = inspect.stack()[0][1]
- return '/'.join(os.path.split(path)[0].split(os.sep)[:-2] +
- ['test_data/', name,'/'])
-
-
-def PtyscanTestRunner(ptyscan_instance,r=u.Param(),data=u.Param(),save_type='append', auto_frames=20, ncalls=1, cleanup=True):
- u.verbose.set_level(3)
- out_dict = {}
- outdir = tempfile.mkdtemp()
- data.recipe = r
- data.dfile = '%s/prep.ptyd' % outdir
- out_dict['output_file'] = data.dfile
- data.save = save_type
- a = ptyscan_instance(data)
- a.initialize()
- out_dict['msgs'] = []
- i=0
- while i
+ :copyright: Copyright 2014 by the PTYPY team, see AUTHORS.
+ :license: GPLv2, see LICENSE for details.
+ :platform: Unix
+ :synopsis: utilities for the test framework
+"""
+import inspect
+import shutil
+import os
+import tempfile
+from .. import utils as u
+from ..core import Ptycho
+
+
+def get_test_data_path(name):
+ path = inspect.stack()[0][1]
+ return '/'.join(os.path.split(path)[0].split(os.sep)[:-2] +
+ ['test_data/', name,'/'])
+
+
+def PtyscanTestRunner(ptyscan_instance, data_params, save_type='append', auto_frames=20, ncalls=1, cleanup=True):
+ u.verbose.set_level(3)
+ out_dict = {}
+ outdir = tempfile.mkdtemp()
+ data_params.dfile = '%s/prep.h5' % outdir
+ out_dict['output_file'] = data_params.dfile
+ data_params.save = save_type
+ a = ptyscan_instance(data_params)
+ a.initialize()
+ out_dict['msgs'] = []
+ i=0
+ while i`_ function and shares
+ the same input arguments.
+
+ It performs a complex overlaod if the input is complex.
+
+ Parameters
+ ----------
+ c : numpy.ndarray
+ Array to shiftzoom. Can be float or complex
- See also
- --------
- c_zoom
+ Returns
+ -------
+ numpy.ndarray
+ Zoomed array
"""
+
+ from scipy.ndimage import zoom as _zoom
+
if np.iscomplexobj(c):
- return c_zoom(c,*arg,**kwargs)
+ return complex_overload(_zoom)(c,*arg,**kwargs)
else:
- return ndi.zoom(c,*arg,**kwargs)
+ return _zoom(c,*arg,**kwargs)
-c_zoom = complex_overload(ndi.zoom)
-c_zoom.__doc__='*complex input*\n\n'+c_zoom.__doc__
+c_zoom = zoom
+c_zoom.__doc__='*Deprecated*, kept for backward compatibility only.\n\n' + zoom.__doc__
+"""
c_affine_transform=complex_overload(ndi.affine_transform)
c_affine_transform.__doc__='*complex input*\n\n'+c_affine_transform.__doc__
+"""
def shift_zoom(c,zoom,cen_old,cen_new,**kwargs):
"""\
Move array from center `cen_old` to `cen_new` and perform a zoom `zoom`.
+
+ This function wraps `scipy.ndimage.affine_transform `_ and
+ uses the same keyword arguments.
- This function wraps scipy.ndimage.affine_transfrom or the complex
- overloaded version :any:`c_affine_transform`, which the user is
- refered to for keyword args.
+ Addiionally, it allows for complex input and out by complex overloading, see
+ :any:`complex_overload`\ .
Parameters
----------
- c : ndarray
+ c : numpy.ndarray
Array to shiftzoom. Can be float or complex
zoom : float
@@ -317,20 +353,19 @@ def shift_zoom(c,zoom,cen_old,cen_new,**kwargs):
cen_new : array_like
Desired new center position in shiftzoomed array
- See also
- --------
- c_affine_transform
-
Returns
-------
- Shifted and zoomed array
+ numpy.ndarray
+ Shifted and zoomed array
"""
+
+ from scipy.ndimage import affine_transform as at
zoom = np.diag(zoom)
offset=np.asarray(cen_old)-np.asarray(cen_new).dot(zoom)
if np.iscomplexobj(c):
- return c_affine_transform(c,zoom,offset,**kwargs)
+ return complex_overload(at)(c,zoom,offset,**kwargs)
else:
- return ndi.affine_transform(c,zoom,offset,**kwargs)
+ return at(c,zoom,offset,**kwargs)
def fill3D(A,B,offset=[0,0,0]):
diff --git a/ptypy/utils/citations.py b/ptypy/utils/citations.py
new file mode 100644
index 000000000..fe4638624
--- /dev/null
+++ b/ptypy/utils/citations.py
@@ -0,0 +1,49 @@
+"""
+This module provides basic bibliography management.
+
+This file is part of the PTYPY package.
+
+ :copyright: Copyright 2014 by the PTYPY team, see AUTHORS.
+ :license: GPLv2, see LICENSE for details.
+"""
+
+class JournalArticle(object):
+ def __init__(self, title, author, journal, volume, year, page=None, doi=None):
+ self.title = title
+ self.author = author
+ self.journal = journal
+ self.volume = volume
+ self.year = year
+ self.page = page
+ self.doi = doi
+
+ def __str__(self):
+ out = ('%s, "%s" %s %s (%s)'
+ % (self.author, self.title, self.journal, self.volume, str(self.year)))
+ if self.page is not None:
+ out += ' %s' % str(self.page)
+ if self.doi is not None:
+ out += ', doi: %s.' % self.doi
+ else:
+ out += '.'
+ return out
+
+class Bibliography(object):
+ def __init__(self):
+ self.entries = []
+
+ def add_article(self, comment, *args, **kwargs):
+ # avoid double entries
+ for entry in self.entries:
+ if entry['comment'] == comment: return None
+
+ self.entries.append({
+ 'comment': comment,
+ 'citation': JournalArticle(*args, **kwargs)
+ })
+
+ def __str__(self):
+ out = []
+ for entry in self.entries:
+ out.append(entry['comment'] + ':\n ' + str(entry['citation']))
+ return '\n'.join(out)
diff --git a/ptypy/utils/descriptor.py b/ptypy/utils/descriptor.py
new file mode 100644
index 000000000..37aad58b4
--- /dev/null
+++ b/ptypy/utils/descriptor.py
@@ -0,0 +1,1221 @@
+# -*- coding: utf-8 -*-
+"""\
+Parameter validation. This module parses the file
+``resources/parameters_descriptions.csv`` to extract the parameter
+defaults for |ptypy|. It saves all parameters in the form of
+a :py:class:`PDesc` object, which are flat listed in
+`parameter_descriptions` or in `entry_points_dct`, which only contains
+parameters with subparameters (children).
+
+This file is part of the PTYPY package.
+
+ :copyright: Copyright 2014 by the PTYPY team, see AUTHORS.
+ :license: GPLv2, see LICENSE for details.
+"""
+
+import ast
+from collections import OrderedDict
+import textwrap
+from copy import deepcopy
+
+from .parameters import Param
+
+
+__all__ = ['Descriptor', 'ArgParseDescriptor', 'EvalDescriptor']
+
+
+class CODES:
+ PASS = 1
+ FAIL = 0
+ UNKNOWN = 2
+ MISSING = 3
+ INVALID = 4
+
+# ! Inverse message codes
+CODE_LABEL = dict((v, k) for k, v in CODES.__dict__.items())
+
+
+class Descriptor(object):
+ """
+ Base class for parameter descriptions and validation. This class is used to
+ hold both command line arguments and Param-type parameter descriptions.
+
+ Attributes
+ ----------
+
+ OPTIONS_DEF :
+ A dictionary whose keys are attribute names and values are description
+ of the attribute. It this description contains the text "required" or
+ "mandatory", the attribute is registered as required.
+
+ """
+
+ OPTIONS_DEF = None
+
+ def __init__(self, name, parent=None, separator='.'):
+ """
+
+ Parameters
+ ----------
+
+ name : str
+ The name of the parameter represented by this instance
+
+ parent : Descriptor or None
+ Parent parameter or None if no parent parameter.
+
+ separator : str
+ Subtree separator character. Defaults to '.'.
+
+ """
+
+ #: Name of parameter
+ self.name = name
+
+ #: Parent parameter (:py:class:`Descriptor` type) if it has one.
+ self.parent = parent
+
+ #: Hierarchical tree of sub-Parameters.
+ self.children = OrderedDict()
+
+ self.separator = separator
+
+ # Required and optional attributes
+ self.required = []
+ self.optional = []
+ self.options_def = OrderedDict()
+ self._parse_options_def()
+
+ self.num_id = 0
+
+ #: Attributes to the parameters.
+ self.options = dict.fromkeys(self.required, '')
+
+ self._all_options = {}
+
+ self.implicit = False
+
+ @property
+ def option_keys(self):
+ return self._all_options.keys()
+
+ @property
+ def is_child(self):
+ """
+ Type check
+ """
+ return type(self.parent) is self.__class__
+
+ def _parse_options_def(self):
+ """
+ Parse and store options definitions.
+ """
+ if self.OPTIONS_DEF is not None:
+ r = []
+ o = []
+
+ for option, text in self.OPTIONS_DEF.items():
+ if 'required' in text or 'mandatory' in text:
+ r += [option]
+ else:
+ o += [option]
+ self.required = r
+ self.optional = o
+
+ def new_child(self, name, options=None, implicit=False):
+ """
+ Create a new descendant and pass new options.
+
+ If name contains separators, intermediate children are created.
+
+ If name already exists, update options and return existing child.
+
+ If name already exists and had been created implicitly to create
+ a child further down, the order in self.children is corrected to
+ honor the order of explicitly created children.
+ This behaviour can be deactivated by setting implicit=True.
+ """
+ if options is None:
+ options = self.options
+
+ if self.separator in name:
+ # Creating a sub-level
+ name, next_name = name.split(self.separator, 1)
+ subparent = self.children.get(name, None)
+ if subparent is None:
+ # Create subparent
+ subparent = self.__class__(name=name, parent=self, separator=self.separator)
+
+ # Remember that creation was implicit
+ subparent.implicit = True
+
+ # Insert in children dict
+ self.children[name] = subparent
+ child = subparent.new_child(next_name, options, implicit)
+ self._all_options.update(subparent.options)
+ else:
+ if name in self.children.keys():
+ # The child already exists
+ child = self.children[name]
+ if child.implicit and not implicit:
+ # Tricky bit: this child had already been created implicitly, but now is being created
+ # explicitly. We use this to enforce a proper order in self.children
+ self.children.pop(name)
+ child.implicit = False
+
+ explicit = [(k, v) for k, v in self.children.items() if not v.implicit]
+ implicit = [(k, v) for k, v in self.children.items() if v.implicit]
+ self.children = OrderedDict(explicit + [(name, child)] + implicit)
+ else:
+ child = self.__class__(name=name, parent=self, separator=self.separator)
+ self.children[name] = child
+ child.implicit = implicit
+ child._store_options(options)
+ self._all_options.update(child.options)
+
+ return child
+
+ def _store_options(self, dct):
+ """
+ Read and store options and check that the minimum selections
+ of options is present.
+ """
+
+ if self.required is not None and type(self.required) is list:
+ missing = [r for r in self.required if r not in dct.keys()]
+ if missing:
+ raise ValueError('Missing required option(s) <%s> for parameter %s.' % (', '.join(missing), self.name))
+
+ self.options.update(dct)
+
+ def _find(self, name):
+ """
+ Walk the tree and return the first encountered element called "name", None if none is found.
+ :param name:
+ :return:
+ """
+ root = None
+ for k, d in self.descendants:
+ if k.endswith(name):
+ root = d.parent
+ break
+ return root
+
+ def __getitem__(self, name):
+ """
+ Get a descendant
+ """
+ if self.separator in name:
+ root, name = name.split(self.separator, 1)
+ # dot-prefix as a way to search through the tree deactivated for now.
+ # if not root:
+ # parent = self._find(name.split(self.separator)[0])
+ # else:
+ # parent = self.children[root]
+ # return parent[name]
+ return self.children[root][name]
+ else:
+ return self.children[name]
+
+ def __setitem__(self, name, desc):
+ """
+ Insert a descendant
+ """
+ if self.separator not in name:
+ if name != desc.name:
+ raise RuntimeError("Descendant '%s' being inserted in '%s' as '%s'." % (desc.name, self.path, name))
+ self.children[name] = desc
+ desc.parent = self
+ self._all_options.update(desc.options)
+ else:
+ root, name = name.split(self.separator, 1)
+ # dot-prefix as a way to search the tree is deactivated for now.
+ # if not root:
+ # subparent = self._find(name.split(self.separator)[0])
+ # if subparent is None:
+ # raise RuntimeError('No attachment point for .%s found.' % name)
+ # else:
+ # subparent = self.children.get(root, None)
+ # if not subparent:
+ # subparent = self.new_child(root)
+ # subparent[name] = desc
+ subparent = self.children.get(root, None)
+ if not subparent:
+ subparent = self.new_child(root)
+ subparent[name] = desc
+
+ def get(self, path):
+ """
+ return self.root[path] if it exists, None otherwise.
+ """
+ try:
+ link = self.root[path]
+ return link
+ except (KeyError, TypeError) as e:
+ return None
+
+ def add_child(self, desc, copy=False):
+ if copy:
+ desc = deepcopy(desc)
+ self[desc.name] = desc
+
+ def prune_child(self, name):
+ """
+ Remove and return the parameter "name" and all its children.
+ :param name: The descendant name
+ :return: The Parameter object.
+ """
+ # Use __getitem__ to take care of all naming syntaxes
+ desc = self[name]
+
+ # Pop out from parent
+ desc.parent.children.pop(desc.name)
+
+ # Make standalone
+ desc.parent = None
+
+ return desc
+
+ @property
+ def root(self):
+ """
+ Return root of parameter tree.
+ """
+ if self.parent is None:
+ return self
+ else:
+ return self.parent.root
+
+ @property
+ def path(self):
+ """
+ Return complete path from root of parameter tree.
+ (self.root[self.path] == self should always be True unless self.root is root)
+ """
+ if self.parent is None:
+ # The base node has no path
+ return None
+ elif self.parent.parent is None:
+ return self.name
+ else:
+ return self.parent.path + self.separator + self.name
+
+ @property
+ def descendants(self):
+ """
+ Iterator over all descendants as a pair (path name, object).
+ """
+ for k, v in self.children.items():
+ yield (k, v)
+ for d, v1 in v.descendants:
+ yield (k + self.separator + d, v1)
+
+ def load_csv(self, fbuffer, **kwargs):
+ """
+ Load from csv as a fielded array. Keyword arguments are passed
+ on to csv.DictReader
+ """
+ from csv import DictReader
+ CD = DictReader(fbuffer, **kwargs)
+
+ if 'level' in CD.fieldnames:
+ chain = []
+
+ # old style CSV, name + level sets the path
+ for num, dct in enumerate(list(CD)):
+
+ # Get parameter name and level in the hierarchy
+ level = int(dct.pop('level'))
+ name = dct.pop('name')
+
+ # translations
+ dct['help'] = dct.pop('shortdoc')
+ dct['doc'] = dct.pop('longdoc')
+ if dct.pop('static').lower() != 'yes':
+ continue
+
+ if level == 0:
+ chain = [name]
+ else:
+ chain = chain[:level] + [name]
+
+ name = self.separator.join(chain)
+ desc = self.new_child(name, options=dct)
+ else:
+ # new style csv, name and path are synonymous
+ for dct in list(CD):
+ name = dct['path']
+ desc = self.new_child(name, options=dct)
+
+ def save_csv(self, fbuffer, **kwargs):
+ """
+ Save to fbuffer. Keyword arguments are passed
+ on to csv.DictWriter
+ """
+ from csv import DictWriter
+
+ fieldnames = self.required + self.optional
+ fieldnames += [k for k in self._all_options.keys() if k not in fieldnames]
+
+ DW = DictWriter(fbuffer, ['path'] + fieldnames)
+ DW.writeheader()
+ for key, desc in self.descendants:
+ dct = {'path': key}
+ dct.update(desc.options)
+ DW.writerow(dct)
+
+ def load_json(self, fbuffer):
+
+ raise NotImplementedError
+
+ def save_json(self, fbuffer):
+
+ raise NotImplementedError
+
+ def load_conf_parser(self, fbuffer, **kwargs):
+ """
+ Load Parameter defaults using Python's ConfigParser
+
+ Each parameter occupies its own section.
+ Separator characters in sections names map to a tree-hierarchy.
+
+ Keyword arguments are forwarded to `ConfigParser.RawConfigParser`
+ """
+ from ConfigParser import RawConfigParser as Parser
+ #kwargs['empty_lines_in_values'] = True # This will only work in Python3
+ parser = Parser(**kwargs)
+ parser.readfp(fbuffer)
+ for num, sec in enumerate(parser.sections()):
+ desc = self.new_child(name=sec, options=dict(parser.items(sec)))
+
+ return parser
+
+ def from_string(self, s, **kwargs):
+ """
+ Load Parameter from string using Python's ConfigParser
+
+ Each parameter occupies its own section.
+ Separator characters in sections names map to a tree-hierarchy.
+
+ Keyword arguments are forwarded to `ConfigParser.RawConfigParser`
+ """
+ from StringIO import StringIO
+ s = textwrap.dedent(s)
+ return self.load_conf_parser(StringIO(s), **kwargs)
+
+ def save_conf_parser(self, fbuffer, print_optional=True):
+ """
+ Save Parameter defaults using Pythons ConfigParser
+
+ Each parameter occupies its own section.
+ Separator characters in sections names map to a tree-hierarchy.
+ """
+ from ConfigParser import RawConfigParser as Parser
+ parser = Parser()
+ for name, desc in self.descendants:
+ parser.add_section(name)
+ for k, v in desc.options.items():
+ if (v or print_optional) or (k in self.required):
+ parser.set(name, k, v)
+
+ parser.write(fbuffer)
+ return parser
+
+ def to_string(self):
+ """
+ Return the full content of descriptor as a string in configparser format.
+ """
+ import StringIO
+ s = StringIO.StringIO()
+ self.save_conf_parser(s)
+ return s.getvalue().strip()
+
+ def __str__(self):
+ """
+ Pretty-print the Parameter options in ConfigParser format.
+ """
+ from ConfigParser import RawConfigParser as Parser
+ import StringIO
+ parser = Parser()
+ parser.add_section(self.name)
+ for k, v in self.options.items():
+ parser.set(self.name, k, v)
+ s = StringIO.StringIO()
+ parser.write(s)
+ return s.getvalue().strip()
+
+
+class ArgParseDescriptor(Descriptor):
+ OPTIONS_DEF = OrderedDict([
+ ('default', 'Default value for parameter.'),
+ ('help', 'A small docstring for command line parsing (required).'),
+ ('choices', 'If parameter is list of choices, these are listed here.')
+ ])
+
+ @property
+ def help(self):
+ """
+ Short descriptive explanation of parameter
+ """
+ return self.options.get('help', '')
+
+ @property
+ def default(self):
+ """
+ Returns default as a Python type
+ """
+ default = str(self.options.get('default', ''))
+
+ if not default:
+ return None
+ else:
+ return self.eval(default)
+
+ @default.setter
+ def default(self, val):
+ """
+ Set default, ensuring that is it stored as a string.
+ """
+ if val is None:
+ self.options['default'] = ''
+ elif str(val) == val:
+ self.options['default'] = "'%s'" % val
+ else:
+ self.options['default'] = str(val)
+
+ def eval(self, val):
+ """
+ A more verbose wrapper around `ast.literal_eval`
+ """
+ try:
+ return ast.literal_eval(val)
+ except ValueError or SyntaxError as e:
+ msg = e.args[0] + ". could not read %s for parameter %s" % (val, self.path)
+ raise ValueError(msg)
+ except SyntaxError as e:
+ msg = e.args[0] + ". could not read %s for parameter %s" % (val, self.path)
+ raise SyntaxError(msg)
+
+ @property
+ def choices(self):
+ """
+ If parameter is a list of choices, these are listed here.
+ """
+ # choices is an evaluable list
+ c = self.options.get('choices', '')
+ if str(c) == '':
+ c = None
+ else:
+ try:
+ c = ast.literal_eval(c.strip())
+ except SyntaxError('Evaluating `choices` %s for parameter %s failed' % (str(c), self.name)):
+ c = None
+
+ return c
+
+ def _get_type_argparse(self):
+ """
+ Returns type or callable that the argparser uses for
+ reading in cmd line argements.
+ """
+ return type(self.default)
+
+ def add2argparser(self, parser=None, prefix='', excludes=('scans', 'engines'), mode='add'):
+ """
+ Add parameter to an argparse.ArgumentParser instance (or create and return one if parser is None)
+ prefix is
+ """
+ import argparse
+
+ # Command line arguments will use '-'
+ sep = self.separator
+ argsep = '-'
+
+ # Create parser if none has been provided
+ if parser is None:
+ description = """
+ Parser for %s
+ Doc: %s
+ """ % (self.name, self.help)
+ parser = argparse.ArgumentParser(description=description)
+
+ # Create the list of descendants with properly formatted command-line options
+ ndesc = dict((k.replace(sep, argsep).replace('_', argsep), desc) for k, desc in self.descendants)
+
+ # Identify argument groups (first level children)
+ groups = {}
+ for argname, desc in ndesc.items():
+ if desc.name in excludes:
+ continue
+ if desc.children:
+ groups[argname] = parser.add_argument_group(title=prefix + argname, description=desc.help.replace('%', '%%'))
+
+ # Factory function that creates custom actions for argparse. This is needed to
+ # update the defaults
+ def mk_custom_action(ParentCls, inner_desc):
+
+ # Custom action to change defaults
+ class CustomAction(ParentCls):
+
+ # Store parent descriptor and descendant name as class attributes
+ desc = inner_desc
+
+ def __call__(self, parser, namespace, values, option_string=None):
+ # Store new default
+ self.desc.options['default'] = str(values)
+
+ # Usual stuff - though useless here.
+ setattr(namespace, self.dest, values)
+
+ return CustomAction
+
+ # Add all arguments
+ for argname, desc in ndesc.iteritems():
+
+ if desc.name in excludes or argname in groups:
+ continue
+
+ # Attempt to retrieve the group
+ up = argsep.join(argname.split(argsep)[:-1])
+ parse = groups.get(up, parser)
+
+ # Manage boolean parameters as switches
+ typ = desc._get_type_argparse()
+ if typ is bool:
+ # Command line switches have no arguments, so treated differently
+ if desc.default:
+ # Default is true, so option is to turn it off
+ flag = '--no-' + argname
+ action = mk_custom_action(argparse._StoreFalseAction, desc)
+ else:
+ flag = '--' + argname
+ action = mk_custom_action(argparse._StoreTrueAction, desc)
+
+ parse.add_argument(flag, dest=argname, action=action, help=desc.help.replace('%', '%%'))
+ else:
+ d = desc.default
+ defstr = d.replace('%(', '%%(') if str(d) == d else str(d)
+ action = mk_custom_action(argparse.Action, desc)
+ parse.add_argument('--' + argname, dest=argname, type=typ, default=desc.default,
+ choices=desc.choices, action=action,
+ help=desc.help.replace('%', '%%') + ' (default=%s)' % defstr.replace('%', '%%'))
+
+ return parser
+
+
+class EvalDescriptor(ArgParseDescriptor):
+ """
+ Parameter class to store metadata for all ptypy parameters (default, limits, documentation, etc.)
+ """
+ _typemap = {'int': 'int',
+ 'float': 'float',
+ 'complex': 'complex',
+ 'str': 'str',
+ 'bool': 'bool',
+ 'tuple': 'tuple',
+ 'list': 'list',
+ 'array': 'ndarray',
+ 'Param': 'Param',
+ 'None': 'NoneType',
+ 'file': 'str',
+ '': 'NoneType'}
+
+ _evaltypes = ['int', 'float', 'tuple', 'list', 'complex']
+ _copytypes = ['str', 'file']
+ _limtypes = ['int', 'float']
+
+ OPTIONS_DEF = OrderedDict([
+ ('default', 'Default value for parameter (required).'),
+ ('help', 'A small docstring for command line parsing (required).'),
+ ('doc', 'A longer explanation for the online docs.'),
+ ('type', 'Comma separated list of acceptable types.'),
+ ('userlevel', """User level, a higher level means a parameter that is
+ less likely to vary or harder to understand."""),
+ ('choices', 'If parameter is list of choices, these are listed here.'),
+ ('uplim', 'Upper limit for scalar / integer values'),
+ ('lowlim', 'Lower limit for scalar / integer values'),
+ ])
+
+ def __init__(self, name, parent=None, separator='.'):
+ """
+ Parameter class to store metadata for all ptypy parameters (default, limits, documentation, etc.)
+ """
+ super(EvalDescriptor, self).__init__(name, parent=parent, separator=separator)
+ self.options['type'] = 'Param'
+
+ @property
+ def default(self):
+ """
+ Default value as a Python type
+ """
+ default = str(self.options.get('default', ''))
+ types = str(self.options.get('type', []))
+
+ # this destroys empty strings
+ default = default if default else None
+
+ if 'Param' in types or 'dict' in types:
+ out = Param()
+ elif default is None:
+ out = None
+ # should be only strings now
+ elif default.lower() == 'none':
+ out = None
+ elif default.lower() == 'true':
+ out = True
+ elif default.lower() == 'false':
+ out = False
+ elif default.startswith('@'):
+ out = self.get(default[1:])
+ elif self.is_evaluable:
+ out = ast.literal_eval(default)
+ else:
+ out = default
+
+ # strip any extra quotation marks
+ if type(out) == str:
+ out = out.strip('"').strip("'")
+
+ return out
+
+ @default.setter
+ def default(self, val):
+ """
+ Set default.
+ """
+ if val is None:
+ self.options['default'] = ''
+ elif str(val) == val:
+ self.options['default'] = "'%s'" % val
+ else:
+ self.options['default'] = str(val)
+
+ @property
+ def is_evaluable(self):
+ for t in self.type:
+ if t in self._evaltypes:
+ return True
+ break
+ return False
+
+ @property
+ def is_symlink(self):
+ """
+ True if type/default are symlinks.
+ """
+ types = self.options.get('type', '')
+ return '@' in types
+
+ @property
+ def is_target(self):
+ """
+ True if parent of symlink targets.
+ """
+ if self.parent is not self.root:
+ return False
+ for n, d in self.root.descendants:
+ if d.is_symlink and d.type[0].path.startswith(self.name):
+ return True
+ return False
+
+ @property
+ def type(self):
+ """
+ List of possible data types.
+ """
+ types = self.options.get('type', None)
+ tm = self._typemap
+ if types is not None:
+ types = [tm[x.strip()] if x.strip() in tm else x.strip() for x in types.split(',')]
+ # symlinks
+ if types[0].startswith('@'):
+ # wildcard in symlink: needed to grab dynamically added entries
+ if types[0].endswith('.*'):
+ parent = self.get(types[0][1:-2])
+ types = [c for n, c in parent.children.items()]
+ else:
+ types = [self.get(t[1:]) for t in types]
+ return types
+
+ @property
+ def limits(self):
+ """
+ (lower, upper) limits if applicable. (None, None) otherwise
+ """
+ if self.type is None:
+ return None, None
+
+ ll = self.options.get('lowlim', None)
+ ul = self.options.get('uplim', None)
+ if 'int' in self.type:
+ lowlim = int(ll) if ll else None
+ uplim = int(ul) if ul else None
+ else:
+ lowlim = float(ll) if ll else None
+ uplim = float(ul) if ul else None
+
+ return lowlim, uplim
+
+ @property
+ def doc(self):
+ """
+ Long documentation, may contain *sphinx* inline markup.
+ """
+ return self.options.get('doc', '')
+
+ @property
+ def userlevel(self):
+ """
+ User level, a higher level means a parameter that is less
+ likely to vary or harder to understand.
+ """
+ # User level (for gui stuff) is an int
+ ul = self.options.get('userlevel', 1)
+ if ul == 'None':
+ ul = None
+ return int(ul) if ul else None
+
+ def _walk(self, depth=0, pars=None, ignore_symlinks=False, ignore_wildcards=False, path=None):
+ """
+ Generator that traverses the complete tree up to given depth, either on its own,
+ or following parameter structure in pars, following symlinks and honouring wildcards.
+
+ Parameters
+ ----------
+ depth: How many levels to traverse in the tree.
+ pars: optional parameter tree to match with descriptor tree
+ ignore_symlinks: If True, do not follow symlinks. Default to False.
+ ignore_wildcards: If True, do not interpret wildcards. Default to False.
+ path: Used internally for recursion.
+
+ Returns
+ -------
+ A generator. Yields a dict with structure
+ {'d': ,
+ 'path': ,
+ 'status': ,
+ 'info': }
+ """
+
+ if path is None:
+ # This happens only at top level: ensure proper construction of relative paths.
+ path = ''
+
+ # Resolve symlinks
+ if self.is_symlink and not ignore_symlinks:
+ if len(self.type) == 1:
+ # No name needed
+ s = self.type[0]
+ else:
+ if pars is not None:
+ # Look for name in pars
+ name = pars.get('name', None)
+ # Is this the intended behaviour? Instead maybe s = self.default
+ if name is None:
+ s = None
+ yield {'d': self, 'path': path, 'status': 'noname', 'info': ''}
+ else:
+ s = dict((link.name, link) for link in self.type).get(name, None)
+ if not s:
+ yield {'d': self, 'path': path, 'status': 'nolink', 'info': name}
+ else:
+ # No pars, resolve default
+ s = self.default
+ # Follow links
+ if s:
+ for x in s._walk(depth=depth, pars=pars, ignore_symlinks=ignore_symlinks,
+ ignore_wildcards=ignore_wildcards, path=path):
+ yield x
+ return
+
+ # Detect wildcard
+ wildcard = (self.children.keys() == ['*'])
+
+ # Grab or check children
+ if wildcard:
+ if ignore_wildcards:
+ yield {'d': self, 'path': path, 'status': 'wildcard', 'info': ''}
+ return
+ else:
+ if pars is None:
+ # Generate default name for single entry
+ children = {self.name[:-1] + '_00': self.children['*']}
+ else:
+ # Grab all names from pars
+ children = {k: self.children['*'] for k in pars.keys()}
+ else:
+ children = self.children
+
+ # Main yield: check type here.
+ if pars is None or \
+ (type(pars).__name__ in self.type) or \
+ (hasattr(pars, 'items') and 'Param' in self.type) or \
+ (type(pars).__name__ == 'int' and 'float' in self.type) or \
+ (type(pars).__name__[:5] == 'float' and 'float' in self.type):
+ yield {'d': self, 'path': path, 'status': 'ok', 'info': ''}
+ else:
+ yield {'d': self, 'path': path, 'status': 'wrongtype', 'info': type(pars).__name__}
+ return
+
+ if (depth == 0) or \
+ (not children) or \
+ (not hasattr(pars, 'items') and (pars is not None)):
+ # Nothing else to do
+ return
+
+ # Look for unrecognised entries in pars
+ if pars:
+ for k, v in pars.items():
+ if k not in children:
+ yield {'d': self, 'path': path, 'status': 'nochild', 'info': k}
+
+ # Loop through children
+ for cname, c in children.items():
+ new_path = '.'.join([path, cname]) if path else cname
+ if pars:
+ if cname not in pars or pars[cname] is None:
+ yield {'d': c, 'path': path, 'status': 'nopar', 'info': cname}
+ else:
+ for x in c._walk(depth=depth-1, pars=pars[cname], ignore_symlinks=ignore_symlinks,
+ ignore_wildcards=ignore_wildcards, path=new_path):
+ yield x
+ else:
+ for x in c._walk(depth=depth-1, ignore_symlinks=ignore_symlinks,
+ ignore_wildcards=ignore_wildcards, path=new_path):
+ yield x
+ return
+
+ def check(self, pars, depth=99):
+ """
+ Check that input parameter pars is consistent with parameter description, up to given depth.
+
+ Parameters
+ ----------
+ pars: The input parameter or parameter tree
+ depth: The level at wich verification is done.
+
+ Returns
+ -------
+ A dictionary report using CODES values.
+
+ """
+ out = OrderedDict()
+ for res in self._walk(depth=depth, pars=pars):
+ path = res['path']
+ if not path in out.keys():
+ out[path] = {}
+ # Switch through all possible statuses
+ if res['status'] == 'ok':
+ # Check limits
+ d = res['d']
+ out[path]['type'] = CODES.PASS
+ if any([i in d._limtypes for i in d.type]):
+ lowlim, uplim = d.limits
+ if (lowlim is None) or (path not in pars) or (pars[path] is None):
+ out[path]['lowlim'] = CODES.PASS
+ else:
+ out[path]['lowlim'] = CODES.PASS if (pars[path] >= lowlim) else CODES.FAIL
+ if uplim is None or pars[path] is None:
+ out[path]['uplim'] = CODES.PASS
+ else:
+ out[path]['uplim'] = CODES.PASS if (pars[path] <= uplim) else CODES.FAIL
+ elif res['status'] == 'wrongtype':
+ # Wrong type
+ out[path]['type'] = CODES.INVALID
+ elif res['status'] == 'noname':
+ # Symlink name could not be found
+ out[path]['symlink'] = CODES.INVALID
+ out[path]['name'] = CODES.MISSING
+ elif res['status'] == 'nolink':
+ # Link was not resolved
+ out[path]['symlink'] = CODES.INVALID
+ out[path]['name'] = CODES.UNKNOWN
+ elif res['status'] == 'nochild':
+ # Parameter entry without corresponding Descriptor
+ out[path][res['info']] = CODES.INVALID
+ elif res['status'] == 'nopar':
+ # Missing parameter entry
+ out[path][res['info']] = CODES.MISSING
+ return out
+
+ def validate(self, pars, raisecodes=(CODES.FAIL, CODES.INVALID)):
+ """
+ Check that the parameter structure `pars` matches the documented
+ constraints for this node / parameter.
+
+ The function raises a RuntimeError if one of the code in the list
+ `raisecodes` has been found. If raisecode is empty, the function will
+ always return successfully but problems will be logged using logger.
+
+ Parameters
+ ----------
+ pars : Param, dict
+ A parameter set to validate
+
+ raisecodes: list
+ List of codes that will raise a RuntimeError.
+ """
+ from ptypy.utils.verbose import logger
+ import logging
+
+ _logging_levels = dict(
+ PASS=logging.DEBUG,
+ FAIL=logging.CRITICAL,
+ UNKNOWN=logging.WARN,
+ MISSING=logging.DEBUG,
+ INVALID=logging.ERROR
+ )
+
+ d = self.check(pars)
+ do_raise = False
+ raise_reasons = []
+ for ep, v in d.items():
+ for tocheck, outcome in v.items():
+ logger.log(_logging_levels[CODE_LABEL[outcome]], '%-50s %-20s %7s' % (ep, tocheck, CODE_LABEL[outcome]))
+ if outcome in raisecodes:
+ do_raise = True
+ reason = str(ep)
+ if tocheck == 'symlink':
+ reason += ' - make sure to specify the .name field'
+ else:
+ reason += ' - %s' % tocheck
+ raise_reasons.append(reason)
+ if do_raise:
+ raise RuntimeError('Parameter validation failed:\n ' + '\n '.join(raise_reasons))
+
+ def sanity_check(self, depth=10):
+ """
+ Checks if default parameters from configuration are
+ self-constistent with limits and choices.
+ """
+ self.validate(self.make_default(depth=depth))
+
+ def make_default(self, depth=0):
+ """
+ Creates a default parameter structure.
+
+ Parameters
+ ----------
+ depth : int
+ The depth in the structure to which all sub nodes are expanded
+ All nodes beyond depth will be ignored.
+
+ Returns
+ -------
+ pars : Param
+ A parameter branch as Param.
+
+ Examples
+ --------
+ >>> from ptypy.utils.descriptor import defaults_tree
+ >>> print(defaults_tree['io'].make_default(depth=5))
+ """
+ out = Param()
+ for ret in self._walk(depth=depth, ignore_symlinks=False, ignore_wildcards=True):
+ path = ret['path']
+ if path == '': continue
+ out[path] = ret['d'].default
+ return out
+
+ def make_doc_rst(self, prst, use_root=True):
+ """
+ Pretty-print in RST format the whole structure.
+ """
+ Header = '.. _parameters:\n\n'
+ Header += '************************\n'
+ Header += 'Parameter tree structure\n'
+ Header += '************************\n\n'
+ prst.write(Header)
+
+ root = self.root
+
+ for name, desc in root.descendants:
+ if name == '':
+ continue
+ if hasattr(desc, 'children') and desc.parent is root:
+ prst.write('\n' + name + '\n')
+ prst.write('=' * len(name) + '\n\n')
+ if hasattr(desc, 'children') and desc.parent.parent is root:
+ prst.write('\n' + name + '\n')
+ prst.write('-' * len(name) + '\n\n')
+
+ prst.write('.. py:data:: ' + name)
+ # prst.write('('+', '.join([t for t in opt['type']])+')')
+ prst.write('(' + ', '.join(desc.type) + ')')
+ prst.write('\n\n')
+ # num = str(desc.num_id)
+ # prst.write(' *(' + num + ')* ' + desc.help + '\n\n')
+ prst.write(' ' + desc.doc.replace('\n', '\n ') + '\n\n')
+ prst.write(' *default* = ``' + str(desc.default))
+ lowlim, uplim = desc.limits
+ if lowlim is not None and uplim is not None:
+ prst.write(' (>' + str(lowlim) + ', <' + str(uplim) + ')``\n')
+ elif lowlim is not None and uplim is None:
+ prst.write(' (>' + str(lowlim) + ')``\n')
+ elif lowlim is None and uplim is not None:
+ prst.write(' (<' + str(uplim) + ')``\n')
+ else:
+ prst.write('``\n')
+
+ prst.write('\n')
+ prst.close()
+
+ def parse_doc(self, name=None, recursive=True):
+ """
+ Decorator to parse docstring and automatically attach new parameters.
+ The parameter section is identified by a line starting with the word "Parameters"
+
+ Parameters
+ ----------
+ name: str
+ The descendant name under which all parameters will be held. If None, use self.
+ recursive: bool
+ Whether or not to traverse the docstring of base classes. *Is there are use case for this?*
+
+ Returns
+ -------
+ The decorator function.
+ """
+ return lambda cls: self._parse_doc_decorator(name, cls, recursive)
+
+ def _parse_doc_decorator(self, name, cls, recursive):
+ """
+ Actual decorator returned by parse_doc.
+
+ Parameters
+ ----------
+ name: str
+ Descendant name.
+ cls:
+ Class to decorate.
+ recursive:
+ If false do not parse base class doc.
+
+ Returns
+ -------
+ Decorated class.
+ """
+ # Find or create insertion point
+ if name is None:
+ desc = self
+ else:
+ try:
+ desc = self[name]
+ except KeyError:
+ desc = self.new_child(name, implicit=True)
+
+ # Get the parameter section, including from base class(es) if recursive.
+ parameter_string = self._extract_doc_from_class(cls, recursive)
+
+ # Maybe check here if a non-Param descendant is being overwritten?
+ desc.options['type'] = 'Param'
+
+ # PT: I don't understand this.
+ if not recursive and cls.__base__ != object:
+ desc_base = getattr(cls.__base__, '_descriptor')
+ typ = desc_base().path if desc_base is not None else None
+ desc.default = typ
+
+ # Parse parameter section and store in desc
+ desc.from_string(parameter_string)
+
+ # Attach the Parameter group to cls
+ from weakref import ref
+ cls._descriptor = ref(desc)
+
+ # Render the defaults
+ cls.DEFAULT = desc.make_default(depth=99)
+
+ return cls
+
+ def _extract_doc_from_class(self, cls, recursive=True):
+ """
+ Utility method used recursively by _parse_doc_decorator to extract doc strings
+ from all base classes and cobble the "Parameters" section.
+ """
+ if cls == object:
+ # Reached "object" base class. No doc string here.
+ return ''
+
+ # Get doc from base
+ base_parameters = ''
+
+ if recursive:
+ for bcls in cls.__bases__:
+ base_parameters += self._extract_doc_from_class(bcls)
+
+ # Append doc from class
+ docstring = cls.__doc__ if cls.__doc__ is not None else ' '
+
+ # Because of indentation it is safer to work line by line
+ doclines = docstring.splitlines()
+ for n, line in enumerate(doclines):
+ if line.strip().startswith('Defaults:'):
+ break
+ parameter_string = textwrap.dedent('\n'.join(doclines[n + 1:]))
+
+ return base_parameters + parameter_string
+
+ def create_template(self, filename=None, start_at_root=True, user_level=0, doc_level=2):
+ """
+ Creates templates for ptypy scripts from an EvalDescriptor instance.
+ """
+ desc = self
+ if start_at_root:
+ desc = self.root
+
+ base = 'p'
+
+ # open file
+ filename = 'ptypy_template.py' if filename is None else filename
+ with open(filename, 'w') as fp:
+
+ # write header
+ h = '"""\nThis template was autogenerated using an EvalDescriptor instance.\n'
+ h += 'It is only a template and not a working reconstruction script.\n"""\n\n'
+ h += "import numpy as np\n"
+ h += "import ptypy\n"
+ h += "from ptypy.core import Ptycho\n"
+ h += "from ptypy import utils as u\n\n"
+ h += '### Ptypy parameter tree ###' + '\n\n'
+ fp.write(h)
+
+ # write the parameter defaults
+ fp.write(base + ' = Param()\n\n')
+ for ret in self._walk(depth=99, ignore_wildcards=False):
+ d = ret['d']
+ # user level
+ if d.userlevel > user_level: continue
+ # skip the root, handled above
+ if d.root is d: continue
+ # handle line breaks already in the help/doc strings
+ hlp = '# ' + d.help.replace('\n', '\n# ')
+ doc = '# ' + d.doc.replace('\n', '\n# ')
+ # doclevel 2: help and doc before parameter
+ if doc_level == 2:
+ fp.write('\n')
+ fp.write(hlp + '\n')
+ fp.write(doc + '\n')
+ # Container defaults come as Params. It would be more elegant
+ # to check 'if d.children' here but not sure that is safe
+ if isinstance(d.default, Param):
+ if doc_level < 2:
+ fp.write('\n')
+ line = base + '.' + ret['path'] + ' = u.Param()'
+ fp.write(line)
+ # not Param: actual default value
+ else:
+ val = str(d.default)
+ if 'str' in d.type and not d.default is None:
+ val = "'" + val + "'"
+ line = base + '.' + ret['path'] + ' = ' + val
+ fp.write(line)
+ # doclevel 1: inline help comments
+ if doc_level == 1:
+ fp.write(' ' * max(1, 50 - len(line)) + hlp + '\n')
+ else:
+ fp.write('\n')
+
+ # write the Ptycho instantiation
+ fp.write('\n\n### Reconstruction ###\n\n')
+ fp.write('Ptycho(%s,level=5)\n'%base)
diff --git a/ptypy/utils/math_utils.py b/ptypy/utils/math_utils.py
index 1f51cc8d4..068bcd4b9 100644
--- a/ptypy/utils/math_utils.py
+++ b/ptypy/utils/math_utils.py
@@ -7,18 +7,18 @@
:copyright: Copyright 2014 by the PTYPY team, see AUTHORS.
:license: GPLv2, see LICENSE for details.
"""
-
-
+import numpy as np
from scipy.special import erf
from scipy.linalg import eig
-import numpy as np
-from misc import *
from scipy import ndimage as ndi
+from .misc import *
+
__all__ = ['smooth_step', 'abs2', 'norm2', 'norm', 'delxb', 'delxc', 'delxf',
'ortho', 'gauss_fwhm', 'gaussian', 'gf', 'cabs2', 'gf_2d', 'c_gf',
'gaussian2D', 'rl_deconvolution']
+
def cabs2(A):
"""
Squared absolute value for an array `A`.
diff --git a/ptypy/utils/misc.py b/ptypy/utils/misc.py
index aa0e7c256..765fcdf7a 100644
--- a/ptypy/utils/misc.py
+++ b/ptypy/utils/misc.py
@@ -10,11 +10,140 @@
import os
import numpy as np
from functools import wraps
+from collections import OrderedDict
+from collections import namedtuple
-__all__ = ['str2int','str2range',\
- 'complex_overload','expect2','expect3',\
- 'keV2m','keV2nm','nm2keV', 'clean_path','unique_path']
+__all__ = ['str2int', 'str2range', 'complex_overload', 'expect2',
+ 'expect3', 'keV2m', 'keV2nm', 'nm2keV', 'clean_path',
+ 'unique_path', 'Table', 'all_subclasses', 'expectN', 'isstr']
+
+def all_subclasses(cls, names=False):
+ """
+ Helper function for finding all subclasses of a base class.
+ If names is True, returns the names of the classes rather than
+ their object handles.
+ """
+ subs = cls.__subclasses__() + [g for s in cls.__subclasses__()
+ for g in all_subclasses(s)]
+ if names:
+ return [c.__name__ for c in subs]
+ else:
+ return subs
+
+class Table(object):
+ """
+ Basic table implemented using numpy.recarray
+ Ideally subclassed to be used with faster or more
+ flexible databases.
+ """
+ def __init__(self,dct,name='pods'):
+ self._table_name = name
+ self._record_factory_from_dict(dct)
+
+ def _record_factory_from_dict(self,dct,suffix='_record'):
+ self._record_factory = namedtuple(self._table_name+suffix,dct.keys())
+ self._record_default = self._record_factory._make(dct.values())
+ self._record_dtype = [np.array(v).dtype for v in self._record_default]
+
+ def new_table(self, records = 0):
+ r = self._record_default
+ dtype = zip(r._fields,self._record_dtype)
+ self._table = np.array([tuple(self._record_default)] * records,dtype)
+
+ def new_fields(self,**kwargs):
+ """
+ Add fields (columns) to the table. This is probably slow.
+ """
+ # save old stuff
+ addition = OrderedDict(**kwargs)
+ t = self._table
+ records = self.pull_records()
+ new = self._record_factory._asdict(self._record_default)
+ new.update(addition)
+ self._record_factory_from_dict(new)
+ self.new_table()
+ a = tuple(addition.values())
+ self.add_records( [r + a for r in records] )
+
+
+ def pull_records(self,record_ids=None):
+ if record_ids is None:
+ return map(self._record_factory._make, self._table)
+ else:
+ return map(self._record_factory._make, self._table[record_ids])
+
+ def add_records(self,records):
+ """ Add records at the end of the table. """
+ start = len(self._table)
+ stop = len(records)+start
+ record_ids = range(start,stop)
+ self._table.resize((len(self._table)+len(records),))
+ self._table[start:stop]=records
+
+ return record_ids
+
+ def insert_records(self,records, record_ids):
+ """ Insert records and overwrite existing content. """
+ self._table[record_ids]=records
+
+ def select_func(self,func,fields=None):
+ """
+ Find all records where search function `func` evaluates True.
+ Arguments to the function are selected by `fields`.
+ The search function will always receive the record_id as first argument.
+ """
+ a = range(len(self._table))
+ if fields is None:
+ res = [n for n in a if func(a)]
+ else:
+ t = self._table[fields].T # pretty inefficient. Is like a dual transpose
+ res =[n for n,rec in zip(a,t) if func(n, *rec)]
+
+ return np.array(res)
+
+ def select_range(self,field,low,high):
+ """
+ Find all records whose values are in the range [`low`,`high`] for
+ the field entry `field`. Should be a numerical value.
+ """
+ t = self._table
+ record_ids = np.argwhere(t[field] >=low and t[field] <=high).squeeze(-1)
+ return record_ids
+
+ def select_match(self,field,match):
+ """
+ Find all records whose values are in the range [`low`,`high`] for
+ the field entry `field`
+ """
+ t = self._table
+ record_ids = np.argwhere(t[field] == match).squeeze(-1)
+ return record_ids
+
+ def modify_add(self,record_ids,**kwargs):
+ """
+ Take selected record ids and overwrite fields with values
+ `**kwargs`.
+ """
+ old_records = self.pull_records(record_ids)
+ recs = [r._replace(**kwargs) for r in old_records]
+ self.add_records(recs)
+
+
+def isstr(s):
+ """
+ This function should be used for checking if an object is of string type
+ """
+ import sys
+
+ if sys.version_info[0] == 3:
+ string_types = str,
+ else:
+ string_types = basestring,
+
+ return isinstance(s, string_types)
+
+
def str2index(s):
"""
Converts a str that is supposed to represent a numpy index expression
@@ -152,6 +281,14 @@ def expect3(a):
b=np.array([a.flat[0],a.flat[1],a.flat[2]])
return b
+def expectN(a, N):
+ if N==2:
+ return expect2(a)
+ elif N==3:
+ return expect3(a)
+ else:
+ raise ValueError('N must be 2 or 3')
+
def complex_overload(func):
"""\
Overloads function specified only for floats in the following manner
@@ -162,7 +299,7 @@ def complex_overload(func):
"""
@wraps(func)
def overloaded(c,*args,**kwargs):
- return func(np.real(c),*args,**kwargs) +1j *func(np.imag(c),*args,**kwargs)
+ return func(np.real(c),*args,**kwargs).astype(c.dtype) +1j *func(np.imag(c),*args,**kwargs)
return overloaded
diff --git a/ptypy/utils/parallel.py b/ptypy/utils/parallel.py
index 6cf21d9ac..af6400d3d 100644
--- a/ptypy/utils/parallel.py
+++ b/ptypy/utils/parallel.py
@@ -7,17 +7,14 @@
:copyright: Copyright 2014 by the PTYPY team, see AUTHORS.
:license: GPLv2, see LICENSE for details.
"""
-__all__ = ['MPIenabled', 'comm', 'MPI', 'master','barrier',
- 'LoadManager', 'loadmanager','allreduce','send','receive','bcast',
- 'bcast_dict', 'gather_dict','MPIrand_normal', 'MPIrand_uniform','MPInoise2d']
-
import numpy as np
+
+from .. import __has_mpi4py__ as hmpi
+
size = 1
rank = 0
MPI = None
comm = None
-
-from .. import __has_mpi4py__ as hmpi
if hmpi:
from mpi4py import MPI
comm = MPI.COMM_WORLD
@@ -28,10 +25,14 @@
MPIenabled = not (size == 1)
master = (rank == 0)
+__all__ = ['MPIenabled', 'comm', 'MPI', 'master','barrier',
+ 'LoadManager', 'loadmanager','allreduce','send','receive','bcast',
+ 'bcast_dict', 'gather_dict','MPIrand_normal', 'MPIrand_uniform','MPInoise2d']
+
def useMPI(do=None):
"""\
- Toggle using MPI or not. Is this useful?
+ Toggle using MPI or not. Is this useful? YES!
"""
global MPIenabled
if do is None:
@@ -40,6 +41,15 @@ def useMPI(do=None):
MPIenabled = False
else:
MPIenabled = do
+ if do is False:
+ global size
+ global master
+ global loadmanager
+ size = 1
+ master = True
+ loadmanager = LoadManager()
+
+
###################################
@@ -92,7 +102,6 @@ def assign(self, idlist=None):
A nested list, (a list of lists) such that ``R[rank]=list``
of elements of `idlist` managed by process of given `rank`.
"""
-
# Simplest case
if idlist is None:
r = size - 1 - self.load[::-1].argmin()
@@ -124,6 +133,7 @@ def assign(self, idlist=None):
# Update the loads
part = np.zeros_like(self.load)
part[li] = ipart
+
self.load += part
# Cumulative sum give the index boundaries between the ranks
@@ -131,6 +141,7 @@ def assign(self, idlist=None):
# Now assign the rank
rlist = np.arange(size)
+
out = [[] for x in range(size)]
for i, k in enumerate(idlist):
r = rlist[i < cumpart][0]
@@ -204,11 +215,12 @@ def _MPIop(a, op, axis=None):
Apply operation op on accross a list of arrays distributed between
processes. Supported operations are SUM, MAX, MIN, and PROD.
"""
+
+ if MPIenabled:
+ MPIop = {'SUM': MPI.SUM, 'MAX': MPI.MAX, 'MIN': MPI.MIN, 'PROD': MPI.PROD}[op.upper()]
- MPIop, npop = \
- {'SUM': (MPI.SUM, np.sum), 'MAX': (MPI.MAX, np.max), 'MIN': (MPI.MIN, np.min), 'PROD': (MPI.PROD, np.prod)}[
- op.upper()]
-
+ npop = {'SUM': np.sum, 'MAX': np.max, 'MIN': np.min, 'PROD': np.prod}[op.upper()]
+
# Total op
if axis is None:
# Very special case: calling with an empty object might make sense in a few situations.
@@ -405,8 +417,8 @@ def bcast(data, source=0):
msg = comm.bcast('pickle', source)
else:
msg = comm.bcast(None, source)
-
- if str(msg)=='array':
+
+ if str(msg) == 'array':
# Communicate size before sending array
if rank == source:
shape, dtypestr = comm.bcast((data.shape, data.dtype.str), source)
@@ -433,7 +445,7 @@ def bcast(data, source=0):
thing = comm.bcast(data, source)
else:
thing = comm.bcast(None, source)
-
+
return thing
def bcast_dict(dct, keys='all', source=0):
diff --git a/ptypy/utils/parameters.py b/ptypy/utils/parameters.py
index 969762043..d40efb0c0 100644
--- a/ptypy/utils/parameters.py
+++ b/ptypy/utils/parameters.py
@@ -9,10 +9,11 @@
"""
import os
-__all__ = ['Param', 'asParam'] # 'load',]
+__all__ = ['Param', 'asParam'] # 'load',]
PARAM_PREFIX = 'pars'
+
class Param(dict):
"""
Convenience class: a dictionary that gives access to its keys
@@ -23,8 +24,8 @@ class Param(dict):
>>> p = Param()
>>> p.x = {}
>>> p
- Param({})
-
+ Param({'x': {}})
+
While dict(p) returns a dictionary, it is not recursive, so it is better in this case
to use p.todict(). However, p.todict does not check for infinite recursion. So please
don't store a dictionary (or a Param) inside itself.
@@ -33,16 +34,17 @@ class Param(dict):
new references. This will lead inconsistency if other objects refer to dicts or Params
in the updated Param instance.
"""
- _display_items_as_attributes=True
+ _display_items_as_attributes = True
_PREFIX = PARAM_PREFIX
- def __init__(self,__d__=None,**kwargs):
+ def __init__(self, __d__=None, **kwargs):
"""
A Dictionary that enables access to its keys as attributes.
Same constructor as dict.
"""
dict.__init__(self)
- if __d__ is not None: self.update(__d__)
+ if __d__ is not None:
+ self.update(__d__)
self.update(kwargs)
def __getstate__(self):
@@ -57,17 +59,35 @@ def __repr__(self):
def __str__(self):
from .verbose import report
- return report(self,depth=7,noheader=True)
+ return report(self, depth=7, noheader=True)
def __setitem__(self, key, value):
# BE: original behavior modified as implicit conversion may destroy references
# Use update(value,Convert=True) instead
- #return super(Param, self).__setitem__(key, Param(value) if type(value) == dict else value)
- return super(Param, self).__setitem__(key, value)
+ # return super(Param, self).__setitem__(key, Param(value) if type(value) == dict else value)
+
+ s = self
+ # Parse dots in key
+ if type(key) == str and '.' in key:
+ keys = key.split('.')
+ # Extract or generate subtree
+ for k in keys[:-1]:
+ try:
+ s = s.__getitem__(k)
+ except KeyError:
+ super(Param, s).__setitem__(k, Param())
+ s = super(Param, s).__getitem__(k)
+ key = keys[-1]
+ return super(Param, s).__setitem__(key, value)
def __getitem__(self, name):
- #item = super(Param, self).__getitem__(name)
- #return Param(item) if type(item) == dict else item
+ # Parse dots in key
+ if type(name) == str and '.' in name:
+ name_, name__ = name.split('.', 1)
+ s = self[name_]
+ if type(s) is not type(self):
+ raise KeyError(name)
+ return s[name__]
return super(Param, self).__getitem__(name)
def __delitem__(self, name):
@@ -76,7 +96,7 @@ def __delitem__(self, name):
def __delattr__(self, name):
return super(Param, self).__delitem__(name)
- #__getattr__ = __getitem__
+ # __getattr__ = __getitem__
def __getattr__(self, name):
try:
return self.__getitem__(name)
@@ -85,17 +105,16 @@ def __getattr__(self, name):
__setattr__ = __setitem__
- def copy(self,depth=0):
+ def copy(self, depth=0):
"""
:returns Param: A (recursive) copy of P with depth `depth`
"""
d = Param(self)
- if depth>0:
- for k,v in d.iteritems():
- if isinstance(v,self.__class__): d[k] = v.copy(depth-1)
+ if depth > 0:
+ for k, v in d.iteritems():
+ if isinstance(v, self.__class__): d[k] = v.copy(depth - 1)
return d
-
def __dir__(self):
"""
Defined to include the keys when using dir(). Useful for
@@ -107,10 +126,26 @@ def __dir__(self):
"""
if self._display_items_as_attributes:
return self.keys()
- #return [item.__dict__.get('name',str(key)) for key,item in self.iteritems()]
+ # return [item.__dict__.get('name',str(key)) for key,item in self.iteritems()]
else:
return []
+ def __contains__(self, key):
+ """
+ Redefined to support dot keys.
+ """
+ if '.' in key:
+ name_, name__ = key.split('.', 1)
+ if name_ in self:
+ s = self[name_]
+ if type(s) is type(self):
+ return name__ in self[name_]
+ else:
+ return False
+ else:
+ return False
+ return super(Param, self).__contains__(key)
+
def update(self, __d__=None, in_place_depth=0, Convert=False, **kwargs):
"""
Update Param - almost same behavior as dict.update, except
@@ -129,17 +164,21 @@ def update(self, __d__=None, in_place_depth=0, Convert=False, **kwargs):
If the counter reaches zero, the Param to a key is
replaced instead of updated
"""
- def _k_v_update(k,v):
+
+ def _k_v_update(k, v):
# If an element is itself a dict, convert it to Param
- if Convert and hasattr(v, 'keys'):
- #print 'converting'
+ if Convert and hasattr(v, 'keys') and not isinstance(v, self.__class__):
v = Param(v)
+ v.update(v.items(), in_place_depth - 1, Convert)
+
# new key
- if not self.has_key(k):
+ if k not in self:
self[k] = v
+
# If this key already exists and is already dict-like, update it
- elif in_place_depth > 0 and hasattr(v,'keys') and isinstance(self[k],self.__class__):
- self[k].update(v, in_place_depth - 1)
+ elif in_place_depth > 0 and hasattr(v, 'keys') and isinstance(self[k], self.__class__):
+ self[k].update(v, in_place_depth - 1, Convert)
+
"""
if isinstance(self[k],self.__class__):
# Param gets recursive in_place updates
@@ -148,6 +187,7 @@ def _k_v_update(k,v):
# dicts are only updated in-place once
self[k].update(v)
"""
+
# Otherwise just replace it
else:
self[k] = v
@@ -155,20 +195,20 @@ def _k_v_update(k,v):
if __d__ is not None:
if hasattr(__d__, 'keys'):
# Iterate through dict-like argument
- for k,v in __d__.iteritems():
- _k_v_update(k,v)
+ for k, v in __d__.iteritems():
+ _k_v_update(k, v)
else:
# here we assume a (key,value) list.
- for (k,v) in __d__:
- _k_v_update(k,v)
+ for (k, v) in __d__:
+ _k_v_update(k, v)
- for k,v in kwargs.iteritems():
- _k_v_update(k,v)
+ for k, v in kwargs.iteritems():
+ _k_v_update(k, v)
return None
- def _to_dict(self,Recursive=False):
+ def _to_dict(self, Recursive=False):
"""
Convert to dictionary (recursively if needed).
"""
@@ -176,20 +216,21 @@ def _to_dict(self,Recursive=False):
return dict(self)
else:
d = dict(self)
- for k,v in d.iteritems():
- if isinstance(v,self.__class__): d[k] = v._to_dict(Recursive)
+ for k, v in d.iteritems():
+ if isinstance(v, self.__class__): d[k] = v._to_dict(Recursive)
return d
@classmethod
- def _from_dict(cls,dct):
+ def _from_dict(cls, dct):
"""
Make Param from dict. This is similar to the __init__ call
but kept here for coherent usage among ptypy.core.Base children
"""
- #p=Param()
- #p.update(dct.copy())
+ # p=Param()
+ # p.update(dct.copy())
return Param(dct.copy())
+
def validate_standard_param(sp, p=None, prefix=None):
"""\
validate_standard_param(sp) checks if sp follows the standard parameter convention.
@@ -199,7 +240,7 @@ def validate_standard_param(sp, p=None, prefix=None):
"""
if p is None:
good = True
- for k,v in sp.iteritems():
+ for k, v in sp.iteritems():
if k.startswith('_'): continue
if type(v) == type(sp):
pref = k if prefix is None else '.'.join([prefix, k])
@@ -207,7 +248,7 @@ def validate_standard_param(sp, p=None, prefix=None):
continue
else:
try:
- a,b,c = v
+ a, b, c = v
if prefix is not None:
print ' %s.%s = %s' % (prefix, k, str(v))
else:
@@ -223,6 +264,7 @@ def validate_standard_param(sp, p=None, prefix=None):
else:
raise NotimplementedError('Checking if a param fits with a standard is not yet implemented')
+
def format_standard_param(p):
"""\
Pretty-print a Standard Param class.
@@ -230,13 +272,13 @@ def format_standard_param(p):
lines = []
if not validate_standard_param(p):
print 'Standard parameter does not'
- for k,v in p.iteritems():
+ for k, v in p.iteritems():
if k.startswith('_'): continue
if type(v) == type(p):
sublines = format_standard_param(v)
lines += [k + '.' + s for s in sublines]
else:
- lines += ['%s = %s #[%s] %s' % (k, str(v[1]),v[0],v[2])]
+ lines += ['%s = %s #[%s] %s' % (k, str(v[1]), v[0], v[2])]
return lines
@@ -256,6 +298,7 @@ def asParam(obj):
"""
return obj if isinstance(obj, Param) else Param(obj)
+
def load(filename):
"""\
Helper function to load a parameter file
@@ -269,6 +312,7 @@ def load(filename):
param.autoviv = False
return param
+
def make_default(default_dict_or_file):
"""
convert description dict to a module dict using a possibly verbose Q & A game
diff --git a/ptypy/utils/plot_client.py b/ptypy/utils/plot_client.py
index 14a47698f..bd2e5fa01 100644
--- a/ptypy/utils/plot_client.py
+++ b/ptypy/utils/plot_client.py
@@ -10,26 +10,16 @@
import time
import numpy as np
from threading import Thread, Lock
-#import matplotlib as mpl
-#mpl.rcParams['backend'] = 'Qt4Agg'
-#from matplotlib import gridspec
-#from matplotlib import pyplot as plt
import os
-__all__=['MPLClient','MPLplotter','PlotClient','spawn_MPLClient', 'TEMPLATES', 'DEFAULT']
-
-if __name__ == "__main__":
- from ptypy.utils.verbose import logger, report, log
- from ptypy.utils.parameters import Param
- from ptypy.utils.array_utils import crop_pad, clean_path
- from ptypy.utils.plot_utils import plt # pyplot import with all the specialized settings
- from ptypy.utils.plot_utils import PtyAxis, imsave, pause, rmphaseramp
-else:
- from .verbose import logger, report, log
- from .parameters import Param
- from .array_utils import crop_pad, clean_path
- from .plot_utils import plt # pyplot import with all the specialized settings
- from .plot_utils import PtyAxis, imsave, pause, rmphaseramp
+from .verbose import logger, report, log
+from .parameters import Param
+from .array_utils import crop_pad, clean_path
+from .plot_utils import plt # pyplot import with all the specialized settings
+from .plot_utils import PtyAxis, imsave, pause, rmphaseramp
+
+__all__ = ['MPLClient', 'MPLplotter', 'PlotClient', 'spawn_MPLClient', 'TEMPLATES', 'DEFAULT']
+
Storage_DEFAULT = Param(
# maybe we would want this container specific
@@ -124,7 +114,7 @@ def __init__(self, client_pars=None, in_thread=False):
self.cmd_dct = {}
# Initialize data containers. Here we use our own "Param" class, which adds attribute access
- # On top of dictionary.
+ # on top of dictionary.
self.pr = Param() # Probe
self.ob = Param() # Object
self.runtime = Param() # Runtime information (contains reconstruction metrics)
@@ -540,7 +530,6 @@ def plot_storage(self, storage, plot_pars, title="", typ='obj'):
if np.isscalar(pp.mask):
x, y = np.indices(sh)-np.reshape(np.array(sh)//2, (len(sh),)+len(sh)*(1,))
mask = (np.sqrt(x**2+y**2) < pp.mask*min(sh)/2.)
- pp.mask = mask
# cropping
crop = np.array(sh)*np.array(pp.crop)//2
@@ -568,6 +557,7 @@ def plot_storage(self, storage, plot_pars, title="", typ='obj'):
ptya._resize_cid = self.plot_fig.canvas.mpl_connect('resize_event', ptya._after_resize_event)
pty_axes.append(ptya)
# get the layer
+ ptya.set_mask(mask, False)
ptya.set_data(data[layer])
ptya.ax.set_ylim(crop[0],sh[0]-crop[0])
ptya.ax.set_xlim(crop[1],sh[1]-crop[1])
@@ -622,14 +612,14 @@ class MPLClient(MPLplotter):
DEFAULT = DEFAULT
- def __init__(self, client_pars=None, autoplot_pars=None,\
+ def __init__(self, client_pars=None, autoplot_pars=None, home=None,\
layout_pars=None, in_thread=False, is_slave=False):
-
- from ptypy.core.ptycho import DEFAULT_autoplot
- self.config = DEFAULT_autoplot.copy(depth=3)
+
+ from ptypy.core.ptycho import Ptycho
+ self.config = Ptycho.DEFAULT.io.autoplot.copy(depth=3)
self.config.update(autoplot_pars)
# set a home directory
- self.config.home = self.config.get('home',self.DEFAULT.get('home'))
+ self.config.home = home if home is not None else self.DEFAULT.get('home')
layout = self.config.get('layout',layout_pars)
@@ -676,11 +666,177 @@ def loop_plot(self):
u.png2mpg(self._framefile, RemoveImages=True)
-def spawn_MPLClient(client_pars, autoplot_pars):
+class Bragg3dClient(object):
+ """
+ MPLClient analog for 3d Bragg data, which needs to be reduced to 2d
+ before plotting.
+ """
+
+ def __init__(self, client_pars=None, autoplot_pars=None, home=None,
+ in_thread=False, is_slave=False):
+
+ from ptypy.core.ptycho import Ptycho
+ self.p = Ptycho.DEFAULT.io.autoplot.copy(depth=3)
+ self.p.update(autoplot_pars)
+ # need a home directory
+ self.p.home = home if home is not None else DEFAULT.get('home')
+
+ self.runtime = Param()
+ self.ob = Param()
+ self.pr = Param()
+
+ self.log_level = 5 if in_thread else 3
+
+ self.pc = PlotClient(client_pars, in_thread=in_thread)
+ self.pc.start()
+
+ # set up axes
+ self.plotted = False
+ import matplotlib.pyplot as plt
+ self.plt = plt
+ plt.ion()
+ fig, self.ax = plt.subplots(nrows=2, ncols=2)
+ self.plot_fig = fig
+ self.ax_err = self.ax[1,1]
+ self.ax_obj = (self.ax[0,0], self.ax[0,1], self.ax[1,0])
+
+ def loop_plot(self):
+ """
+ Plot forever.
+ """
+ count = 0
+ initialized = False
+ while True:
+ status = self.pc.status
+ if status == self.pc.DATA:
+ self.pr, self.ob, runtime = self.pc.get_data()
+ self.runtime.update(runtime)
+ self.plot_all()
+ count+=1
+ if self.p.dump:
+ self.save(self.p.home + self.p.imfile, count)
+ #plot_file = clean_path(runtime['plot_file'])
+
+ elif status == self.pc.STOPPED:
+ break
+ self.plt.pause(.1)
+
+ if self.p.get('make_movie'):
+ from ptypy import utils as u
+ u.png2mpg(self._framefile, RemoveImages=True)
+
+ def plot_all(self):
+ self.plot_error()
+ self.plot_object()
+ self.plot_probe()
+
+ if 'shrinkwrap' in self.runtime.iter_info[-1].keys():
+ self.plot_shrinkwrap()
+
+ def plot_shrinkwrap(self):
+ try:
+ self.ax_shrinkwrap
+ except:
+ _, self.ax_shrinkwrap = plt.subplots()
+ sx, sprofile, low, high = self.runtime.iter_info[-1]['shrinkwrap']
+ iteration = self.runtime.iter_info[-1]['iteration']
+ self.ax_shrinkwrap.clear()
+ self.ax_shrinkwrap.plot(sx, sprofile, 'b')
+ self.ax_shrinkwrap.axvline(low, color='red')
+ self.ax_shrinkwrap.axvline(high, color='red')
+ self.ax_shrinkwrap.set_title('iter: %d, interval: %.3e'
+ %(iteration, (high-low)))
+
+ def plot_object(self):
+
+ data = self.ob.values()[0]['data'][0]
+ center = self.ob.values()[0]['center']
+ psize = self.ob.values()[0]['psize']
+ lims_r3 = (-center[0] * psize[0], (data.shape[0] - center[0]) * psize[0])
+ lims_r1 = (-center[1] * psize[1], (data.shape[1] - center[1]) * psize[1])
+ lims_r2 = (-center[2] * psize[2], (data.shape[2] - center[2]) * psize[2])
+
+ if self.plotted:
+ for ax_ in self.ax_obj:
+ ax_.old_xlim = ax_.get_xlim()
+ ax_.old_ylim = ax_.get_ylim()
+ ax_.clear()
+
+ arr = np.mean(np.abs(data), axis=2).T # (r1, r3) from top left
+ arr = np.flipud(arr) # (r1, r3) from bottom left
+ self.ax_obj[0].imshow(arr, interpolation='none',
+ extent=lims_r3+lims_r1) # extent changes limits, not image orientation
+ self.plt.setp(self.ax_obj[0], ylabel='r1', xlabel='r3', title='side view')
+
+ arr = np.mean(np.abs(data), axis=1).T # (r2, r3) from top left
+ self.ax_obj[1].imshow(arr, interpolation='none',
+ extent=lims_r3+lims_r2[::-1])
+ self.plt.setp(self.ax_obj[1], ylabel='r2', xlabel='r3', title='top view')
+
+ arr = np.mean(np.abs(data), axis=0) # (r1, r2) from top left
+ arr = np.flipud(arr) # (r1, r2) from bottom left
+ self.ax_obj[2].imshow(arr, interpolation='none',
+ extent=lims_r2+lims_r1)
+ self.plt.setp(self.ax_obj[2], ylabel='r1', xlabel='r2', title='front view')
+
+ for ax_ in self.ax_obj:
+ ax_.ticklabel_format(style='sci', axis='both', scilimits=(0,0))
+
+ if self.plotted:
+ for ax_ in self.ax_obj:
+ ax_.set_xlim(ax_.old_xlim)
+ ax_.set_ylim(ax_.old_ylim)
+
+ self.plotted = True
+
+ def plot_probe(self):
+ pass
+
+ def plot_error(self):
+ # error
+ error = np.array([info['error'] for info in self.runtime.iter_info])
+ err_fmag = error[:, 0] / np.max(error[:, 0])
+ err_phot = error[:, 1] / np.max(error[:, 1])
+ err_exit = error[:, 2] / np.max(error[:, 2])
+
+ self.ax_err.clear()
+ self.ax_err.plot(err_fmag, label='err_fmag')
+ self.ax_err.plot(err_phot, label='err_phot')
+ self.ax_err.plot(err_exit, label='err_exit')
+ self.ax_err.legend(loc='upper right')
+
+ def save(self, pattern, count=0):
+ try:
+ r = self.runtime.copy(depth=1)
+ r.update(r.iter_info[-1])
+ plot_file = clean_path(pattern % r)
+ except BaseException:
+ log(self.log_level,'Could not auto generate image dump file from runtime.')
+ plot_file = 'ptypy_%05d.png' % count
+
+ log(self.log_level,'Dumping plot to %s' % plot_file)
+ self.plot_fig.savefig(plot_file,dpi=300)
+ folder,fname = os.path.split(plot_file)
+ mode ='w' if count==1 else 'a'
+ self._framefile = folder+os.path.sep+'frames.txt'
+ with open(self._framefile,mode) as f:
+ f.write(plot_file+'\n')
+ f.close()
+
+
+def spawn_MPLClient(client_pars, autoplot_pars, home=None):
"""
A function that creates and runs a silent instance of MPLClient.
"""
- mplc = MPLClient(client_pars,autoplot_pars, in_thread=True, is_slave=True)
+ cls = MPLClient
+ # 3d Bragg is handled by a MPLClient subclass and is identified by the layout
+ try:
+ if autoplot_pars.layout == 'bragg3d':
+ cls = Bragg3dClient
+ except:
+ pass
+
+ mplc = cls(client_pars, autoplot_pars, home, in_thread=True, is_slave=True)
try:
mplc.loop_plot()
except KeyboardInterrupt:
diff --git a/ptypy/utils/plot_utils.py b/ptypy/utils/plot_utils.py
index 41b07ae68..308e95328 100644
--- a/ptypy/utils/plot_utils.py
+++ b/ptypy/utils/plot_utils.py
@@ -13,20 +13,19 @@
from PIL import Image
import matplotlib as mpl
import matplotlib.cm
-from .verbose import logger
-from .array_utils import grids
-
-__all__ = ['pause', 'rmphaseramp', 'plot_storage', 'imsave', 'imload',
- 'complex2hsv', 'complex2rgb', 'hsv2rgb', 'rgb2complex', 'rgb2hsv',
- 'hsv2complex', 'franzmap']
# importing pyplot may fail when no display is available.
NODISPLAY = (os.getenv("DISPLAY") is None)
if NODISPLAY:
matplotlib.use('agg')
+import matplotlib.pyplot as plt
+from .verbose import logger
+from .array_utils import grids
-import matplotlib.pyplot as plt
+__all__ = ['pause', 'rmphaseramp', 'plot_storage', 'imsave', 'imload',
+ 'complex2hsv', 'complex2rgb', 'hsv2rgb', 'rgb2complex', 'rgb2hsv',
+ 'hsv2complex', 'franzmap','PtyAxis']
# Improved interactive behavior for old versions of matplotlib
try:
@@ -473,7 +472,7 @@ def plot_storage(S, fignum=100, modulus='linear', slices=(slice(1), slice(None),
"""\
Quickly display the data buffer of a :any:`Storage` instance.
- Keyword arguments are those of :any:`PtyAxis`
+ Keyword arguments are the same as :any:`PtyAxis`
Parameters
----------
@@ -567,8 +566,50 @@ def plot_storage(S, fignum=100, modulus='linear', slices=(slice(1), slice(None),
class PtyAxis(object):
+ """
+ Plot environment for matplotlib to allow for a Image plot with color axis,
+ potentially of a potentially complex array.
+
+ Please note that this class may undergo changes or become obsolete altogether.
+ """
def __init__(self, ax=None, data=None, channel='r', cmap=None, fontsize=8, **kwargs):
-
+ """
+
+ Parameters
+ ----------
+
+ ax : pyplot.axis
+ An axis in matplotlib figure. If ``None`` a figure with a single
+ axis will be created.
+
+ data : numpy.ndarray
+ The (complex) twodimensional data to be displayed.
+
+ channel : str
+ Choose
+
+ - ``'a'`` to plot absolute/modulus value of the data,
+ - ``'p'`` to plot phase value of the data,
+ - ``'a'`` to plot real value of the data,
+ - ``'a'`` to plot imaginary value of the data,
+ - ``'a'`` to plot a composite image where phase channel maps to hue and
+ modulus channel maps to brightness of the color.
+
+ cmap : str
+ String representation of one of matplotlibs colormaps.
+
+ fontsize : int
+ Base font size of labels, etc.
+
+ Keyword Arguments
+ -----------------
+ vmin, vmax : float
+ Minimum and maximum value for colormap scaling
+
+ rmramp : bool
+ Remove phase ramp if ``True``, default is ``False``
+
+ """
if ax is None:
fig = plt.figure()
ax = fig.add_subplot(111)
@@ -636,8 +677,6 @@ def set_mask(self, mask, update=True):
self.mask = mask
if self.shape is None:
self.shape = self.mask.shape
- else:
- assert self.shape == self.mask.shape
else:
self.mask = None
if update:
@@ -703,10 +742,10 @@ def _update(self,renew_image=False):
plt.setp(self.ax.get_yticklabels(), fontsize=self.fontsize)
# determine number of points.
v, h = self.shape
- steps = [1, 2, 5, 10, 20, 50, 100, 200, 500]
- Nindex = steps[max([v/s <= 4 for s in steps].index(True)-1, 0)]
+ steps = [1, 2, 5, 10, 20, 50, 100, 200, 500, 1000, 1500, 2000]
+ Nindex = steps[max([v / s <= 4 for s in steps].index(True) - 1, 0)]
self.ax.yaxis.set_major_locator(mpl.ticker.IndexLocator(Nindex, 0.5))
- Nindex = steps[max([h/s <= 4 for s in steps].index(True)-1, 0)]
+ Nindex = steps[max([h / s <= 4 for s in steps].index(True) - 1, 0)]
self.ax.xaxis.set_major_locator(mpl.ticker.IndexLocator(Nindex, 0.5))
else:
self.ax.images[0].set_data(pilim)
diff --git a/ptypy/utils/scripts.py b/ptypy/utils/scripts.py
index 319d81c2a..f17f22092 100644
--- a/ptypy/utils/scripts.py
+++ b/ptypy/utils/scripts.py
@@ -8,16 +8,17 @@
"""
import numpy as np
import parallel
-import urllib
import urllib2 # TODO: make compatible with python 3.5
-import array_utils as au
from scipy import ndimage as ndi
-from misc import expect2
+
+from . import array_utils as au
+from .misc import expect2
# from .. import io # FIXME: SC: when moved here, io fails
__all__ = ['hdr_image', 'diversify', 'cxro_iref', 'xradia_star', 'png2mpg',
'mass_center', 'phase_from_dpc', 'radial_distribution',
- 'stxm_analysis','stxm_init', 'load_from_ptyr', 'remove_hot_pixels']
+ 'stxm_analysis', 'stxm_init', 'load_from_ptyr', 'remove_hot_pixels']
+
def diversify(A, noise=None, shift=None, power=1.0):
"""
@@ -44,6 +45,10 @@ def diversify(A, noise=None, shift=None, power=1.0):
--------
ptypy.utils.parallel.MPInoise2d
"""
+ # Nothing to do if only one mode
+ if A.shape[0] == 1:
+ return
+
if noise is not None:
noise = parallel.MPInoise2d(A.shape, *noise)
# No noise where the main mode is
diff --git a/ptypy/utils/validator.py b/ptypy/utils/validator.py
deleted file mode 100644
index d9bcfad68..000000000
--- a/ptypy/utils/validator.py
+++ /dev/null
@@ -1,573 +0,0 @@
-# -*- coding: utf-8 -*-
-"""\
-Parameter validation. This module parses the file
-``resources/parameters_descriptions.csv`` to extract the parameter
-defaults for |ptypy|. It saves all parameters in the form of
-a :py:class:`PDesc` object, which are flat listed in
-`parameter_descriptions` or in `entry_points_dct`, which only contains
-parameters with subparameters (children).
-
-This file is part of the PTYPY package.
-
- :copyright: Copyright 2014 by the PTYPY team, see AUTHORS.
- :license: GPLv2, see LICENSE for details.
-"""
-import pkg_resources
-import csv
-# Load all documentation on import
-_csvfile = pkg_resources.resource_filename('ptypy', 'resources/parameter_descriptions.csv')
-_desc_list = list(csv.DictReader(file(_csvfile, 'r')))
-del csv
-del pkg_resources
-
-if __name__=='__main__':
- from ptypy.utils.parameters import Param
-else:
- from .parameters import Param
-
-__all__= ['create_default_template','make_sub_default','validate',\
- 'entry_points_dct', 'parameter_descriptions', 'PDesc']
-
-#! Validator message codes
-CODES = Param(
- PASS=1,
- FAIL=0,
- UNKNOWN=2,
- MISSING=3,
- INVALID=4)
-
-#! Inverse message codes
-CODE_LABEL = dict((v, k) for k, v in CODES.items())
-
-# Populate the dictionary of all entry points.
-from collections import OrderedDict
-
-# All ptypy parameters in an ordered dictionary as (entry_point, PDesc) pairs.
-parameter_descriptions = OrderedDict()
-del OrderedDict
-
-# All parameter containers in a dictionary as (entry_point, PDesc) pairs.
-# Subset of :py:data:`parameter_descriptions`
-entry_points_dct = {}
-
-# Logging levels
-import logging
-
-_logging_levels = Param(
- PASS=logging.INFO,
- FAIL=logging.CRITICAL,
- UNKNOWN=logging.WARN,
- MISSING=logging.WARN,
- INVALID=logging.ERROR)
-
-del logging
-
-_typemap = {'int': 'int',
- 'float': 'float',
- 'complex': 'complex',
- 'str': 'str',
- 'bool': 'bool',
- 'tuple': 'tuple',
- 'list': 'list',
- 'array': 'ndarray',
- 'Param': 'Param',
- 'None': 'NoneType',
- 'file': 'str',
- '': 'NoneType'}
-
-_evaltypes = ['int','float','tuple','list','complex']
-_copytypes = ['str','file']
-
-
-
-class PDesc(object):
- """
- Small class to store all attributes of a ptypy parameter
-
- """
-
- def __init__(self, description, parent=None):
- """
- Stores description list for validation and documentation.
- """
- #: Name of parameter
- self.name = description.get('name', '')
-
- #: Parent parameter (:py:class:`PDesc` type) if it has one.
- self.parent = parent
-
- if parent is not None:
- parent.children[self.name] = self
-
- #: Type can be a comma-separated list of types
- self.type = description.get('type', None)
-
- if 'param' in self.type.lower() or 'dict' in self.type.lower():
- self.children = {}
- entry_points_dct[self.entry_point] = self
- else:
- self.children = None
-
- if self.type is not None:
- self.type = [_typemap[x.strip()] if x.strip() in _typemap else x.strip() for x in self.type.split(',')]
-
-
- self.default = None
- """ Default value can be any type. None if unknown. """
-
- self.set_default(description.get('default', ''))
-
- # Static is 'TRUE' or 'FALSE'
- self.static = (description.get('static', 'TRUE') == 'TRUE')
-
- #: Lower limit of parameter, None if unknown
- self.lowlim = None
- #: Upper limit of parameter, None if unknown
- self.uplim = None
-
- ll = description.get('lowlim', None)
- ul = description.get('uplim', None)
- if 'int' in self.type:
- self.lowlim = int(ll) if ll else None
- self.uplim = int(ul) if ul else None
- else:
- self.lowlim = float(ll) if ll else None
- self.uplim = float(ul) if ul else None
-
- # choices is an evaluable list
- c = description.get('choices', '')
- #print c, self.name
- if str(c)=='':
- c=None
- else:
- try:
- c = eval(c.strip())
- except SyntaxError('Evaluating `choices` %s for parameter %s failed' %(str(c),self.name)):
- c = None
-
- #: If parameter is a list of choices, these are listed here.
- self.choices = c
-
-
- # Docs are strings
-
- #: Short descriptive string of parameter
- self.shortdoc = description.get('shortdoc', '')
-
- #: Longer documentation, may contain *sphinx* inline markup.
- self.longdoc = description.get('longdoc', '')
-
- # User level (for gui stuff) is an int
- ul = description.get('userlevel', 1)
-
- self.userlevel = int(ul) if ul else None
- """User level, a higher level means a parameter that is less
- likely to vary or harder to understand.
- """
-
- # Validity is a string (the name of another parameter)
- # FIXME: this is not used currently
- self.validity = description.get('validity', '')
-
- parameter_descriptions[self.entry_point] = self
-
- @property
- def entry_point(self):
- if self.parent is None:
- return ''
- else:
- return '.'.join([self.parent.entry_point, self.name])
-
- @property
- def is_evaluable(self):
- for t in self.type:
- if t in _evaltypes:
- return True
- break
- return False
-
- def set_default(self,default='', check=False):
- """
- Sets default (str) and derives value (python type)
- """
- default = str(default)
-
- # this destroys empty strings
- self.default = default if default else None
-
- if self.default is None:
- out = None
- # should be only strings now
- elif self.default.lower()=='none':
- out = None
- elif self.default.lower()=='true':
- out = True
- elif self.default.lower()=='false':
- out = False
- elif self.is_evaluable:
- out = eval(self.default)
- else:
- out = self.default
-
- self.value = out
- return out
- """
- @property
- def value(self):
-
- if self.default is None:
- out = None
- # should be only strings now
- elif self.default.lower()=='none':
- out = None
- elif self.default.lower()=='true':
- out = True
- elif self.default.lower()=='false':
- out = False
- elif self.is_evaluable:
- out = eval(self.default)
- else:
- out = self.default
-
- return out
- """
-
- def check(self, pars, walk):
- """
- Check that input parameter pars is consistent with parameter description.
- If walk is True and pars is a Param object, checks are also conducted for all
- sub-parameters.
- """
- ep = self.entry_point
- out = {}
- val = {}
-
- # 1. Data type
- if self.type is None:
- # Unconclusive
- val['type'] = CODES.UNKNOWN
- else:
- val['type'] = CODES.PASS if (type(pars).__name__ in self.type) else CODES.FAIL
-
- # 2. limits
- if self.lowlim is None:
- val['lowlim'] = CODES.UNKNOWN
- else:
- val['lowlim'] = CODES.PASS if (pars >= self.lowlim) else CODES.FAIL
- if self.uplim is None:
- val['uplim'] = CODES.UNKNOWN
- else:
- val['uplim'] = CODES.PASS if (pars <= self.uplim) else CODES.FAIL
-
- # 3. Extra work for parameter entries
- if 'Param' in self.type:
- # Check for missing entries
- for k, v in self.children.items():
- if k not in pars:
- val[k] = CODES.MISSING
-
- # Check for excess entries
- for k, v in pars.items():
- if k not in self.children:
- val[k] = CODES.INVALID
- elif walk:
- # Validate child
- out.update(self.children[k].check(v, walk))
-
- out[ep] = val
- return out
-
- def __str__(self):
- return ''
-
- def make_doc(self):
- """
- Create documentation.
- """
- return '{self.entry_point}\n\n{self.shortdoc}\n{self.longdoc}'.format(self=self)
-
-
-
-## maybe this should be a function in the end.
-# Create the root
-pdroot = PDesc(description={'name': '', 'type': 'Param', 'shortdoc': 'Parameter root'}, parent=None)
-entry_pts = ['']
-entry_dcts = [pdroot] # [{}]
-entry_level = 0
-
-for num, desc in enumerate(_desc_list):
- # Get parameter name and level in the hierarchy
- level = int(desc.pop('level'))
- name = desc['name']
-
- # Manage end of branches
- if level < entry_level:
- # End of a branch
- entry_pts = entry_pts[:(level + 1)]
- entry_dcts = entry_dcts[:(level + 1)]
- entry_level = level
- elif level > entry_level:
- raise RuntimeError('Problem parsing csv file %s, entry %d, name %s' % (_csvfile, num,name))
-
- # Create Parameter description object
- pd = PDesc(desc, parent=entry_dcts[level])
-
- # save a number
- pd.ID = num
- # Manage new branches
- if 'param' in desc['type'].lower() or 'dict' in desc['type'].lower():
- # A new node
- entry_pt = pd.entry_point
- entry_dcts.append(pd) # entry_dcts.append(new_desc)
- entry_level = level + 1
-
-#cleanup
-del level, name, entry_level, pd, entry_pt, entry_dcts
-
-def make_sub_default(entry_point, depth=1):
- """
- Creates a default parameter structure, from the loaded parameter
- descriptions in this module
-
- Parameters
- ----------
- entry_point : str
- The node in the default parameter file
-
- depth : int
- The depth in the structure to which all sub nodes are expanded
- All nodes beyond depth will be returned as empty :any:`Param`
- structure.
-
- Returns
- -------
- pars : Param
- A parameter branch.
-
- Examples
- --------
- >>> from ptypy.utils import validator
- >>> print validator.make_sub_default('.io')
- """
- pd = entry_points_dct[entry_point]
- out = Param()
- if depth<=0:
- return out
- for name,child in pd.children.iteritems():
- if child.children is not None and child.value is None:
- out[name] = make_sub_default(child.entry_point, depth=depth-1)
- else:
- out[name] = child.value
- return out
-
-def validate(pars, entry_point, walk=True, raisecodes=[CODES.FAIL, CODES.INVALID]):
- """
- Check that the parameter structure `pars` matches the documented
- constraints at the given entry_point.
-
- The function raises a RuntimeError if one of the code in the list
- `raisecodes` has been found. If raisecode is empty, the function will
- always return successfully but problems will be logged using logger.
-
- Parameters
- ----------
- pars : Param
- A parameter set to validate
-
- entry_point : str
- The node in the parameter structure to match to.
-
- walk : bool
- If ``True`` (*default*), navigate sub-parameters.
-
- raisecodes: list
- List of codes that will raise a RuntimeError.
- """
- from ptypy.utils.verbose import logger
-
- pdesc = parameter_descriptions[entry_point]
- d = pdesc.check(pars, walk=walk)
- do_raise = False
- for ep, v in d.items():
- for tocheck, outcome in v.items():
- logger.log(_logging_levels[CODE_LABEL[outcome]], '%-50s %-20s %7s' % (ep, tocheck, CODE_LABEL[outcome]))
- do_raise |= (outcome in raisecode)
- if do_raise:
- raise RuntimeError('Parameter validation failed.')
-
-def create_default_template(filename=None,user_level=0,doc_level=2):
- """
- Creates a (descriptive) template for ptypy.
-
- Parameters
- ----------
- filename : str
- python file (.py) to generate, will be overriden if it exists
-
- user_level : int
- Filter parameters to display on those with less/equal user level
-
- doc_level : int
- - if ``0``, no comments.
- - if ``1``, *short_doc* as comment in script
- - if ``>2``, *long_doc* and *short_doc* as comment in script
- """
- def _format_longdoc(doc):
- ld = doc.strip().split('\n')
- out = []
- for line in ld:
- if len(line)==0:
- continue
- if len(line)>75:
- words = line.split(' ')
- nline = ''
- count = 0
- for word in words:
- nline+=word+' '
- count+=len(word)
- if count > 70:
- count = 0
- out.append(nline[:-1])
- nline=""
- out.append(nline[:-1])
- else:
- out.append(line)
- if out:
- return '# '+'\n# '.join(out)+'\n'
- else:
- return ''
-
- if filename is None:
- f = open('ptypy_template.py','w')
- else:
- f = open(filename,'w')
- h = '"""\nThis Script was autogenerated using\n'
- h+= '``u.create_default_template("%s",%d,%d)``\n' %(str(filename),user_level,doc_level)
- h+= 'It is only a TEMPLATE and not a working reconstruction script.\n"""\n\n'
- h+= "import numpy as np\n"
- h+= "import ptypy\n"
- h+= "from ptypy.core import Ptycho\n"
- h+= "from ptypy import utils as u\n\n"
- try:
- from ptypy.utils.verbose import headerline
- h+= headerline('Ptypy Parameter Tree','l','#')+'\n'
- except ImportError:
- h+= '### Ptypy Parameter Tree ###\n\n'
- #h+= "p = u.Param()\n"
- f.write(h)
- for entry, pd in parameter_descriptions.iteritems():
- if user_level < pd.userlevel:
- continue
- if pd.children is not None:
- value = "u.Param()"
- else:
- val = pd.value
- if str(val)== val :
- value = '"%s"' % str(val)
- else:
- value = str(val)
- ID ="%02d" % pd.ID if hasattr(pd,'ID') else 'NA'
- if doc_level > 0:
- f.write('\n'+"## (%s) " % ID +pd.shortdoc.strip()+'\n')
- if doc_level > 1:
- f.write(_format_longdoc(pd.longdoc))
- f.write('p'+entry+ ' = ' + value+'\n')
-
- f.write('\n\nPtycho(p,level=5)\n')
- f.close()
-
-def _add2argparser(parser=None, entry_point='',root=None,\
- excludes=('scans','engines'), mode = 'add',group=None):
-
- sep = '.'
-
- pd = parameter_descriptions[entry_point]
-
- has_children = hasattr(pd,'children') and pd.children is not None
-
- if root is None:
- root = pd.entry_point if has_children else pd.parent.entry_point
-
- assert root in entry_points_dct.keys()
-
- if excludes is not None:
- # remove leading '.'
- lexcludes = [e.strip()[1:] for e in excludes if e.strip().startswith(sep) ]
- loc_exclude = [e.split(sep)[0] for e in lexcludes if len(e.split())==1 ]
- else:
- excludes =['baguette'] #:)
-
- if pd.name in excludes: return
-
- if parser is None:
- from argparse import ArgumentParser
- description = """
- Parser for PDesc %s
- Doc: %s
- """ % (pd.name, pd.shortdoc)
- parser = ArgumentParser(description=description)
-
- # overload the parser
- if not hasattr(parser,'_ptypy_translator'):
- parser._ptypy_translator={}
-
-
- # convert to parser variable
- name = pd.entry_point.replace(root,'',1).partition(sep)[2].replace('.','-')
-
- if has_children:
- entry = pd.entry_point
- if name !='':
- ngroup=parser.add_argument_group(title=name, description=None)
- else:
- ngroup=None
- # recursive behavior here
- new_excludes = [e[len(entry):] for e in excludes if e.startswith(entry)]
- for key,child in pd.children.iteritems():
- _add2argparser(parser, entry_point=child.entry_point,root=root,\
- excludes=excludes,mode = 'add',group=ngroup)
-
- if name=='': return parser
-
- parse = parser if group is None else group
-
- # this should be part of PDesc I guess.
- typ = None
- for t in pd.type:
- try:
- typ= eval(t)
- except BaseException:
- continue
- if typ is not None:
- break
- if typ is None:
- u.verbose.logger.debug('Failed evaluate type strings %s of parameter %s in python' % (str(pd.type),name))
- return parser
-
- if type(typ) is not type:
- u.verbose.logger.debug('Type %s of parameter %s is not python type' % (str(typ),name))
- return parser
-
- elif typ is bool:
- flag = '--no-'+name if pd.value else '--'+name
- action='store_false' if pd.value else 'store_true'
- parse.add_argument(flag, dest=name, action=action,
- help=pd.shortdoc )
- else:
- d = pd.default
- defstr = d.replace('%(','%%(') if str(d)==d else str(d)
- parse.add_argument('--'+name, dest=name, type=typ, default = pd.value, choices=pd.choices,
- help=pd.shortdoc +' (default=%s)' % defstr)
-
- parser._ptypy_translator[name] = pd
-
- return parser
-
-if __name__ =='__main__':
- from ptypy import utils as u
-
-
-
- parser = _add2argparser(entry_point='.scan.illumination')
- parser.parse_args()
-
diff --git a/release_notes.md b/release_notes.md
new file mode 100644
index 000000000..bac602b98
--- /dev/null
+++ b/release_notes.md
@@ -0,0 +1,121 @@
+# Ptypy 0.3 release notes
+
+We are happy to announce that ptypy 0.3 is now out. If you have been using the ptypy 0.2 (from the master branch), the transition should be smooth but far from automatic - see below. The essence of this new release is
+ 1. a redesign of ptypy's internal structure, especially the introduction of an extendable [`ScanModel`](https://github.com/ptycho/ptypy/blob/master/ptypy/core/manager.py), which should make new ideas and new algorithms easier to implement (a big collective effort involving A. Björling, A. Parsons, B. Enders and P. Thibault),
+ 2. support for 3D Bragg ptychography, which uses the new `ScanModel` structure (all thanks to A. Björling),
+
+ 3. extensive testing of most components of the code, and Travis CI integration (huge work by A. Parsons and important contributions by S. Chalkidis),
+
+ 4. more dimensions for `Storage` classes, reduced memory footprint and reduced object count, as `Views` are now slotted and don't hold other objects (B. Enders and A. Björling), and
+
+ 5. the introduction of the [`descriptor`](https://github.com/ptycho/ptypy/blob/master/ptypy/utils/descriptor.py) submodule, which manages the whole parameter tree, including validation, defaults, and documentation (collective effort led by B. Enders and P. Thibault)
+
+
+## Breaking changes
+
+The streamlining of the input parameters means that *all reconstruction scripts for version 0.2 will now fail*. We had no choice.
+
+The changes were required in order to solve the following problems:
+ 1. Parameter definitions, documentations and defaults were in different locations, so hard to track and maintain
+ 2. The meaning of a branch set to `None` was ambiguous.
+ 3. Basic experiment geometry (some distances, radiation energy, etc) could be specified at two different locations.
+ 4. In general, the standards were not clear.
+
+The solution to all these problems came with the `descriptor` submodule. For a user, what matters most is that `ptypy.defaults_tree` now contains the description of the full set of parameters known to ptypy.
+
+### `defaults_tree`
+
+Here's a short example of how `defaults_tree` is used internally, and how you can used it in your scripts or on the command line to inspect `ptypy`'s parameter structure.
+
+```python
+import ptypy
+
+# Extract one branch
+desc_DM_simple = ptypy.defaults_tree['engine.DM_simple']
+
+# Print out the description of all sub-parameters
+print desc_DM_simple.to_string()
+```
+```
+[numiter]
+lowlim = 1
+help = Total number of iterations
+default = 123
+type = int
+
+[numiter_contiguous]
+lowlim = 1
+help = Number of iterations without interruption
+default = 1
+doc = The engine will not return control to the caller until this number of iterations is completed (not processing server requests, I/O operations, ...).
+type = int
+
+[probe_support]
+lowlim = 0.0
+help = Valid probe area as fraction of the probe frame
+default = 0.7
+doc = Defines a circular area centered on the probe frame, in which the probe is allowed to be nonzero.
+type = float
+
+[name]
+help =
+default = DM_simple
+doc =
+type = str
+
+[alpha]
+lowlim = 0.0
+help = Difference map parameter
+default = 1
+type = float
+
+[overlap_converge_factor]
+lowlim = 0.0
+help = Threshold for interruption of the inner overlap loop
+default = 0.05
+doc = The inner overlap loop refines the probe and the object simultaneously. This loop is escaped as soon as the overall change in probe, relative to the first iteration, is less than this value.
+type = float
+
+[overlap_max_iterations]
+lowlim = 1
+help = Maximum of iterations for the overlap constraint inner loop
+default = 10
+type = int
+```
+```python
+# Generate defaults
+p = desc_DM_simple.make_default(depth=1)
+
+# Validate
+# (try with ptypy.utils.verbose.set_level(5) to get detailed DEBUG output)
+desc_DM_simple.validate(p)
+
+# Here's what happens if a parameter is wrong:
+p.numiter = 'a'
+desc_DM_simple.validate(p)
+```
+```
+ERROR root - numiter type INVALID
+(...)
+```
+
+### How reconstruction scripts have to be changed
+
+1. All `scans` sub-entry have a `name`. This name is one of the `ScanModel` classes, for now only `Vanilla`, `Full`, and `Bragg3dModel`. Most users will want to use `Full`. Others will come as we implement engines that require fundamental changes in the `pod` creation.
+
+2. Data preparation: the sub-tree `recipe` does not exist anymore, and all parameters associated to a `PtyScan` subclass are specified directly in the `scan.???.data` sub-tree. The `geometry` sub-tree is also gone, with all relevant parameters also in the `scan.???.data` sub-tree.
+
+3. There is no more an `.engine` sub-tree. This used to be present to change the default parameters of specific engines (or all of them using `engine.common`) before engines are declared in the `engines.*` sub-tree. We have found that this duplication is a source of confusion. Now the single place where engine parameters are set are within the `engines.*` sub-tree.
+
+4. A sub-tree cannot be set to `None`. To deactivate a feature associated to a sub-tree, one has to set `active=False`. For instance `io.autoplot = None` is not valid anymore, and `io.autoplot.active = False` has to be used instead.
+
+## Other contributions
+
+ * Option to use pyfftw (thanks to L. Bloch, ESRF)
+ * Scalability tests (thanks to C. Kewish, Australian Synchrotron)
+ * A first draft jupyter-based plot client (B. Daurer, now National University of Singapore)
+ * Bug fixes and tests (many people)
+
+## Roadmap
+
+The next release will focus on optimisation and speed. We will also soon switch to python 3.
diff --git a/scripts/ptypy.new b/scripts/ptypy.new
index b0ef4bed3..7bfc6d25d 100644
--- a/scripts/ptypy.new
+++ b/scripts/ptypy.new
@@ -3,45 +3,32 @@
import sys
import argparse
-parser = argparse.ArgumentParser(description='Creates a new ptypy script from defaults.')
+class MoreGentleParser(argparse.ArgumentParser):
+ def error(self, message):
+ sys.stderr.write('error: %s\n\n' % message)
+ self.print_help()
+ sys.exit(2)
+
+parser = MoreGentleParser(description='Creates a new ptypy reconstruction script from defaults.')
parser.add_argument('pyfile', type=str,
- help='file path of target .py scriptfile, existing file will be overriden')
+ help='target .py script file, existing files will be overrwritten')
parser.add_argument('-u','--user-level', dest='ulevel', type=int, default=0,
help='use parameters up to this complexity level (0--2)')
-parser.add_argument('-d','--doc-level', dest='dlevel', type=int, default=2,
- help='unused')
parser.add_argument('--short-doc', dest='sdoc', action='store_true',
- help='include short doc as comment')
+ help='include help info as inline comments')
parser.add_argument('--long-doc', dest='ldoc', action='store_true',
- help='include extended doc as comment')
+ help='include both help and doc info')
pars = parser.parse_args()
-usage = """\
-Usage:
-%s [script_name.py] [doc_level=2] [user_level=0]
-
-Creates a new ptypy script from defaults of name 'ptypy_script.py'
-doc_level = 0 : no docs
-doc_level = 1 : short docstrings
-doc_level = 2 : long docstrings & short doc_strings
-user_level = 0 : basic parameters
-user_level = 1 : advanced parameters
-user_level = 2 : all parameters
-""" % sys.argv[0]
-"""
-if len(sys.argv) not in [2,3,4]:
- print usage
- sys.exit(0)
-filename = sys.argv[1] if len(sys.argv)>1 else 'ptypy_template.py'
-doc_level = int(sys.argv[2]) if len(sys.argv)>2 else 2
-user_level = int(sys.argv[3]) if len(sys.argv)>3 else 0
-"""
from ptypy import utils as u
if pars.sdoc:
doc_level = 1
elif pars.ldoc:
doc_level = 2
else:
- doc_level = 0
-u.validator.create_default_template(pars.pyfile, pars.ulevel,doc_level)
+ doc_level = 0
+
+from ptypy import defaults_tree
+defaults_tree.create_template(filename=pars.pyfile,
+ user_level=pars.ulevel, doc_level=doc_level, start_at_root=True)
diff --git a/scripts/ptypy.plot b/scripts/ptypy.plot
index 2a7ef8319..6f2ca4cc9 100644
--- a/scripts/ptypy.plot
+++ b/scripts/ptypy.plot
@@ -57,7 +57,7 @@ Plotter.plot_fig.canvas.mpl_connect('close_event', handle_close)
#Plotter.plot_fig.canvas.mpl_connect('resize_event', handle_resize)
if save is not None:
- Plotter.plot_fig.savefig(save, dpi = 300)
+ Plotter.plot_fig.savefig(save,dpi = 300)
else:
while Plotter:
#Plotter.plot_all()
diff --git a/scripts/ptypy.plotclient b/scripts/ptypy.plotclient
index 0b2577cfd..e4945ddf0 100644
--- a/scripts/ptypy.plotclient
+++ b/scripts/ptypy.plotclient
@@ -2,39 +2,25 @@
import sys
import argparse
-from ptypy import utils as u
+from ptypy import defaults_tree
+from ptypy.utils.plot_client import MPLClient
parser = argparse.ArgumentParser(description='Create a ZeroMQ plotting client.')
-excludes =['active','threaded','connections','interval']
-u.validator._add2argparser(parser, entry_point = '.io.autoplot',excludes= excludes)
-parser = u.validator._add2argparser(parser, entry_point = '.io.interaction',excludes= excludes)
+plot_desc = defaults_tree['io.autoplot']
+excludes = ['active']
+parser = plot_desc.add2argparser(parser, excludes=excludes)
-parser.add_argument('-d','--dir', dest='directory', type=str,default='./' ,
- help='image dump directory' )
+excludes = ['active', 'threaded', 'connections', 'interval']
+client_desc = defaults_tree['io.interaction']
+parser = client_desc.add2argparser(parser, excludes=excludes)
+parser.add_argument('-d', '--dir', dest='directory', type=str, default='./',
+ help='image dump directory')
+
+# Parse command line arguments. This will update defaults in the defaults_tree
args = parser.parse_args()
-for key,pd in parser._ptypy_translator.items():
- pd.set_default(getattr(args,key,pd.default))
-plot_pars = u.validator.make_sub_default('.io.autoplot')
-plot_pars['home'] = getattr(args,'directory',pd.value)
-client_pars = u.validator.make_sub_default('.io.interaction')
-"""
-parser = argparse.ArgumentParser(description='Create a ZeroMQ plotting client.')
-parser.add_argument('-l', dest='layout', type=str,
- help='layout of the plotter')
-parser.add_argument('--dump', dest='dump', action='store_true',
- help='dump .png images')
-parser.add_argument('--movie', dest='dump', action='store_false',
- help='create movie when reconstruction is finished')
-parser.add_argument('-i', dest='interval', type=int,
- help='minimum plotting interval')
-parser.add_argument('-d', dest='directory', type=str,
- help='image dump directory')
-parser.add_argument('-a', dest='address', type=str,
- help='address of reconstruction server')
-"""
-#args = parser.parse_args()
-from ptypy import io
-u.verbose.set_level(3)
-plotter=u.MPLClient(client_pars=client_pars, autoplot_pars=plot_pars)
+plot_pars = plot_desc.make_default(depth=5)
+client_pars = client_desc.make_default(depth=5)
+
+plotter = MPLClient(client_pars=client_pars, autoplot_pars=plot_pars)
plotter.loop_plot()
diff --git a/scripts/ptypy.run b/scripts/ptypy.run
new file mode 100644
index 000000000..f2fd78eaf
--- /dev/null
+++ b/scripts/ptypy.run
@@ -0,0 +1,103 @@
+#!/usr/bin/env python
+
+import argparse
+import json
+from ptypy.core import Ptycho
+from ptypy.utils.verbose import log
+from ptypy import utils as u
+
+u.verbose.set_level(3)
+parser = argparse.ArgumentParser(description='Runs a ptychography from the command line.')
+
+parser.add_argument("json_file",
+ help="The path to the json configuration file.",
+ type=str)
+
+parser.add_argument('--output-folder', '-O',
+ dest="output_folder",
+ help="The path we want the outputs to exist in (will get created).",
+ type=str)
+
+parser.add_argument('--ptypy-level', '-L',
+ dest="ptypy_level",
+ help="The level we want to run to ptypy to.",
+ default=5,
+ type=str)
+
+parser.add_argument('--identifier', '-I',
+ dest="identifier",
+ help="This is the same as p.run.",
+ default=None,
+ type=str)
+
+parser.add_argument('--plot', '-P',
+ dest="plotting",
+ help="A switch for the plotting. 1: on, 0:off",
+ default=1,
+ type=int)
+
+args = parser.parse_args()
+
+def _byteify(data, ignore_dicts = False):
+ # if this is a unicode string, return its string representation
+ if isinstance(data, unicode):
+ return data.encode('utf-8')
+ # if this is a list of values, return list of byteified values
+ if isinstance(data, list):
+ return [ _byteify(item, ignore_dicts=True) for item in data ]
+ # if this is a dictionary, return dictionary of byteified keys and values
+ # but only if we haven't already byteified it
+ if isinstance(data, dict) and not ignore_dicts:
+ return {
+ _byteify(key, ignore_dicts=True): _byteify(value, ignore_dicts=True)
+ for key, value in data.iteritems()
+ }
+ # if it's anything else, return it in its original form
+ return data
+
+def get_parameters(args):
+ in_dict = json.load(open(args.json_file), object_hook=_byteify)
+ parameters_to_run = u.Param()
+ if in_dict['base_file'] is not None:
+ log(3, "Basing this scan off of the scan in {}".format(in_dict['base_file']))
+ previous_scan = Ptycho.load_run(in_dict['base_file'], False) # load in the run but without the data
+ previous_parameters = previous_scan.p
+ parameters_to_run.update(previous_parameters)
+ if in_dict['parameter_tree'] is not None:
+ parameters_to_run.update(in_dict['parameter_tree'], Convert=True)
+ return parameters_to_run
+
+def get_file_name(args):
+ from datetime import datetime
+ now = datetime.now()
+ if args.identifier is not None:
+ output_path = "{}scan_{}_{}".format(args.output_folder, args.identifier, now.strftime("%Y%m%d%H%M%S"))
+ else:
+ output_path = "{}scan_{}".format(args.output_folder, now.strftime("%Y%m%d%H%M%S"))
+ log(3, "Output is going in: {}".format(output_path))
+ return output_path
+
+
+parameters = get_parameters(args)
+parameters.run = args.identifier
+if args.plotting:
+ log(3, "Turning the plotting on.")
+ parameters.io.autoplot = u.Param(active=True)
+else:
+ log(3, "Turning the plotting off.")
+ parameters.io.autoplot = u.Param(active=False)
+# make sure we aren't randomly writing somewhere if this isn't set.
+if args.output_folder is not None:
+ parameters.io.home = get_file_name(args)
+ parameters.io.rfile = "%s.ptyr" % get_file_name(args)
+ parameters.io.autosave = u.Param(active=True)
+ log(3, "Autosave is on, with io going in {}, and the final reconstruction into {}".format(parameters.io.home,
+ parameters.io.rfile))
+else:
+ parameters.io.rfile = None
+ parameters.io.autosave = u.Param(active=False)
+ log(3, "Autosave is off. No output will be saved.")
+
+
+P = Ptycho(parameters, level=args.ptypy_level)
+
diff --git a/setup.py b/setup.py
index 954b16dc5..930f3111c 100644
--- a/setup.py
+++ b/setup.py
@@ -21,6 +21,7 @@
#import os
#if os.path.exists('MANIFEST'): os.remove('MANIFEST')
+
def write_version_py(filename='ptypy/version.py'):
cnt = """
# THIS FILE IS GENERATED FROM ptypy/setup.py
@@ -47,43 +48,46 @@ def write_version_py(filename='ptypy/version.py'):
finally:
a.close()
-if __name__=='__main__':
+if __name__ == '__main__':
write_version_py()
try:
execfile('ptypy/version.py')
- vers= version
+ vers = version
except:
vers = VERSION
setup(
- name = 'Python Ptychography toolbox',
- version = VERSION,
- author = 'Pierre Thibault, Bjoern Enders, Martin Dierolf and others',
- description = 'Ptychographic reconstruction toolbox',
- long_description = file('README.rst','r').read(),
+ name='Python Ptychography toolbox',
+ version=VERSION,
+ author='Pierre Thibault, Bjoern Enders, Martin Dierolf and others',
+ description='Ptychographic reconstruction toolbox',
+ long_description=file('README.rst', 'r').read(),
#install_requires = ['numpy>=1.8',\
#'h5py>=2.2',\
#'matplotlib>=1.3',\
#'pyzmq>=14.0',\
#'scipy>=0.13',\
#'mpi4py>=1.3'],
- package_dir = {'ptypy':'ptypy'},
- packages = ['ptypy',
- 'ptypy.core',\
- 'ptypy.debug',\
- 'ptypy.utils',\
- 'ptypy.simulations',\
- 'ptypy.engines',\
- 'ptypy.io',\
- 'ptypy.resources',\
- 'ptypy.experiment'],
- package_data={'ptypy':['resources/*',]},
+ package_dir={'ptypy': 'ptypy'},
+ packages=['ptypy',
+ 'ptypy.core',
+ 'ptypy.debug',
+ 'ptypy.utils',
+ 'ptypy.simulations',
+ 'ptypy.engines',
+ 'ptypy.io',
+ 'ptypy.resources',
+ 'ptypy.experiment',
+ 'ptypy.experiment.legacy',
+ 'ptypy.test'],
+ package_data={'ptypy': ['resources/*', ]},
#include_package_data=True
- scripts = [
+ scripts=[
'scripts/ptypy.plot',
'scripts/ptypy.inspect',
'scripts/ptypy.plotclient',
'scripts/ptypy.new',
- 'scripts/ptypy.csv2cp'
+ 'scripts/ptypy.csv2cp',
+ 'scripts/ptypy.run'
],
)
diff --git a/templates/bragg_field_of_view.py b/templates/bragg_field_of_view.py
new file mode 100644
index 000000000..16d726458
--- /dev/null
+++ b/templates/bragg_field_of_view.py
@@ -0,0 +1,94 @@
+from ptypy.core import Ptycho
+from ptypy import utils as u
+import matplotlib.pyplot as plt
+import numpy as np
+
+# Set up a parameter tree
+p = u.Param()
+
+p.verbose_level = 3
+
+# illumination for data simulation and pods
+illumination = u.Param()
+illumination.aperture = u.Param()
+illumination.aperture.size = (1e-6, 3e-6)
+illumination.aperture.form = 'rect'
+
+# these parameters determine the whole geometry (rocking steps, theta, energy, ...)
+p.scans = u.Param()
+p.scans.scan01 = u.Param()
+p.scans.scan01.name = 'Bragg3dModel' # 3d Bragg
+p.scans.scan01.illumination = illumination
+p.scans.scan01.data= u.Param()
+p.scans.scan01.data.name = 'Bragg3dSimScan' # PtyScan which provides simulated data
+p.scans.scan01.data.illumination = illumination
+p.scans.scan01.data.theta_bragg = 20.0 # the central Bragg angle
+p.scans.scan01.data.shape = 512
+p.scans.scan01.data.psize = 40e-6
+p.scans.scan01.data.n_rocking_positions = 40 # 40 rocking positions per scanning position
+p.scans.scan01.data.dry_run = True # Don't actually calculate diff patterns
+
+# Create a Ptycho instance, this creates a numerical sample and simulates
+# the diffraction experiment
+P = Ptycho(p,level=2)
+
+# This particular PtyScan also exports the object used for simulation as an attribute
+S_true = P.modelm.scans['scan01'].ptyscan.simulated_object
+
+# We can grab the object storage from the Ptycho instance
+S = P.obj.storages.values()[0]
+
+# Similarly, we can find a view of the probe
+probeView = P.probe.views.values()[0]
+
+# Let's define an object view to study
+objView = S.views[1]
+
+# In order to visualize the field of view, we'll create an empty copy of
+# the object and set its value to 1 where covered by the chosen view.
+S_display = S.copy(owner=S.owner, ID='Sdisplay')
+S_display.fill(0.0)
+S_display[objView] = 1
+
+# Then, to see how the probe is contained by this field of view, we add
+# the probe and the numerical sample itself to the above view.
+S_display[objView][np.where(np.abs(probeView.data) > .1)] = 2
+S_display.data[np.where(S_true.data)] = 3
+
+# Until now, we've been operating in the non-orthogonal 'natural'
+# coordinate system, which is good but hard to understand. We can
+# convert to orthogonal (z, x, y) space by using a method on the
+# geometry object, found from any of the pods.
+geo = P.pods.values()[0].geometry
+S_display_cart = geo.coordinate_shift(S_display, input_system='natural', input_space='real', keep_dims=True)
+
+# Plot some slices
+fig, ax = plt.subplots(nrows=1, ncols=3)
+x, z, y = S_display_cart.grids()
+
+# all Bragg storages are (r3, r1, r2) or (x, z, y), so...
+
+cmap = plt.get_cmap('viridis', lut=4)
+
+arr = np.abs(S_display_cart.data[0][:,:,objView.dcoord[2]]).T # (z, x) from top left
+arr = np.flipud(arr) # (z, x) from bottom left
+ax[0].imshow(arr, extent=[x.min(), x.max(), z.min(), z.max()],
+ interpolation='none', vmin=0, vmax=3, cmap=cmap)
+plt.setp(ax[0], ylabel='z', xlabel='x', title='side view')
+
+arr = np.abs(S_display_cart.data[0][:,objView.dcoord[1],:]).T # (y, x) from top left
+ax[1].imshow(arr, extent=[x.min(), x.max(), y.max(), y.min()],
+ interpolation='none', vmin=0, vmax=3, cmap=cmap)
+plt.setp(ax[1], ylabel='y', xlabel='x', title='top view')
+
+arr = np.abs(S_display_cart.data[0][objView.dcoord[0],:,:]) # (z, y) from top left
+arr = np.flipud(arr) # (z, y) from bottom left
+im = ax[2].imshow(arr, extent=[y.min(), y.max(), z.min(), z.max()],
+ interpolation='none', vmin=0, vmax=3, cmap=cmap)
+plt.setp(ax[2], ylabel='z', xlabel='y', title='front view')
+
+pc = plt.colorbar(im, ax=list(ax))
+pc.set_ticks([1, 2, 3])
+pc.set_ticklabels(['field of view', 'probe', 'object'])
+
+plt.show()
diff --git a/templates/bragg_prep_and_run.py b/templates/bragg_prep_and_run.py
new file mode 100644
index 000000000..24ed35f15
--- /dev/null
+++ b/templates/bragg_prep_and_run.py
@@ -0,0 +1,49 @@
+from ptypy.core import Ptycho
+from ptypy import utils as u
+
+p = u.Param()
+p.run = 'Si110_stripes'
+
+# for verbose output
+p.verbose_level = 3
+
+# use special plot layout for 3d data
+p.io = u.Param()
+p.io.home = '/tmp/ptypy/'
+p.io.autoplot = u.Param()
+p.io.autoplot.layout = 'bragg3d'
+p.io.autoplot.dump = True
+
+# illumination for simulation and reconstruction
+illumination = u.Param()
+illumination.aperture = u.Param()
+illumination.aperture.size = 3e-6
+illumination.aperture.form = 'circ'
+
+p.scans = u.Param()
+p.scans.scan01 = u.Param()
+p.scans.scan01.name = 'Bragg3dModel'
+p.scans.scan01.illumination = illumination
+p.scans.scan01.data= u.Param()
+p.scans.scan01.data.name = 'Bragg3dSimScan'
+p.scans.scan01.data.illumination = illumination
+p.scans.scan01.sample = u.Param()
+p.scans.scan01.sample.fill = 1e-3
+
+p.engines = u.Param()
+p.engines.engine00 = u.Param()
+p.engines.engine00.name = 'DM_3dBragg'
+p.engines.engine00.numiter = 100
+p.engines.engine00.probe_update_start = 100000
+p.engines.engine00.probe_support = None
+p.engines.engine00.sample_support = u.Param()
+p.engines.engine00.sample_support.coefficient = 0.0
+p.engines.engine00.sample_support.type = 'thinlayer'
+p.engines.engine00.sample_support.shrinkwrap = u.Param()
+p.engines.engine00.sample_support.shrinkwrap.cutoff = .3
+p.engines.engine00.sample_support.shrinkwrap.smooth = None
+p.engines.engine00.sample_support.shrinkwrap.start = 15
+p.engines.engine00.sample_support.shrinkwrap.plot = True
+
+# prepare and run
+P = Ptycho(p,level=5)
diff --git a/templates/make_sample_ptyd.py b/templates/make_sample_ptyd.py
index a1c98b36a..09153fe05 100644
--- a/templates/make_sample_ptyd.py
+++ b/templates/make_sample_ptyd.py
@@ -6,31 +6,27 @@
import time
import ptypy
from ptypy import utils as u
-
+from ptypy.core.data import MoonFlowerScan
# for verbose output
u.verbose.set_level(3)
# create data parameter branch
data = u.Param()
data.dfile = 'sample.ptyd'
-data.shape = 128
-data.num_frames = 100
+data.num_frames = 200
data.save = 'append'
-data.label=None
-data.psize=172e-6
-data.energy=6.2
-data.center='fftshift'
-data.distance =7
+data.label = None
data.auto_center = None
data.rebin = None
data.orientation = None
# create PtyScan instance
-MF = ptypy.core.data.MoonFlowerScan(data)
+MF = MoonFlowerScan(data)
+
MF.initialize()
for i in range(2):
# autoprocess data
- msg = MF.auto(100)
+ msg = MF.auto(200)
time.sleep(2)
# logs the out put of .auto() to terminal prompt
u.verbose.logger.info(u.verbose.report(msg), extra={'allprocesses': True})
diff --git a/templates/minimal_load_and_run.py b/templates/minimal_load_and_run.py
index eac8977bf..3705d42e4 100644
--- a/templates/minimal_load_and_run.py
+++ b/templates/minimal_load_and_run.py
@@ -1,7 +1,8 @@
"""
This script is a test for ptychographic reconstruction after an
experiment has been carried out and the data is available in ptypy's
-data file format in "/tmp/ptypy/sample.ptyd"
+data file format in the current directory as "sample.ptyd". Use together
+with `make_sample_ptyd.py`.
"""
import ptypy
from ptypy.core import Ptycho
@@ -11,56 +12,22 @@
p.verbose_level = 3
p.io = u.Param()
p.io.home = "/tmp/ptypy/"
-p.autosave = None
p.scans = u.Param()
p.scans.MF = u.Param()
p.scans.MF.data= u.Param()
-p.scans.MF.data.source = 'sample.ptyd'#'file'
-p.scans.MF.data.dfile = None#'sample.ptyd'
-
-p.engine = u.Param()
-
-## Common defaults for all engines
-p.engine.common = u.Param()
-# Total number of iterations
-p.engine.common.numiter = 100
-# Number of iterations to be executed in one go
-p.engine.common.numiter_contiguous = 1
-# Fraction of valid probe area (circular) in probe frame
-p.engine.common.probe_support = None
-# Number of iterations before probe update starts
-p.engine.common.probe_update_start = 2
-# Clip object amplitude into this intrervall
-p.engine.common.clip_object = None # [0,1]
-
-## DM default parameters
-p.engine.DM = u.Param()
-p.engine.DM.name = "DM"
-# HIO parameter
-p.engine.DM.alpha = 1
-# Probe fraction kept from iteration to iteration
-p.engine.DM.probe_inertia = 0.01
-# Object fraction kept from iteration to iteration
-p.engine.DM.object_inertia = 0.1
-# If False: update object before probe
-p.engine.DM.update_object_first = True
-# Gaussian smoothing (FWHM, pixel units) of object
-p.engine.DM.obj_smooth_std = 10
-# Loop the overlap constraint until probe changes lesser than this fraction
-p.engine.DM.overlap_converge_factor = 0.5
-# Maximum iterations to be spent inoverlap constraint
-p.engine.DM.overlap_max_iterations = 100
-# If rms of model vs diffraction data is smaller than this fraction,
-# Fourier constraint is considered fullfilled
-p.engine.DM.fourier_relax_factor = 0.05
+p.scans.MF.name = 'Vanilla'
+p.scans.MF.data.name = 'PtydScan'
+p.scans.MF.data.source = 'file'
+p.scans.MF.data.dfile = 'sample.ptyd'
p.engines = u.Param()
p.engines.engine00 = u.Param()
p.engines.engine00.name = 'DM'
-p.engines.engine00.numiter = 30
+p.engines.engine00.numiter = 80
+"""
p.engines.engine01 = u.Param()
p.engines.engine01.name = 'ML'
p.engines.engine01.numiter = 20
-
+"""
P = Ptycho(p,level=5)
diff --git a/templates/minimal_prep_and_run.py b/templates/minimal_prep_and_run.py
index 1e4f7344b..cb292211a 100644
--- a/templates/minimal_prep_and_run.py
+++ b/templates/minimal_prep_and_run.py
@@ -3,7 +3,7 @@
of actual data. It uses the test Scan class
`ptypy.core.data.MoonFlowerScan` to provide "data".
"""
-import ptypy
+
from ptypy.core import Ptycho
from ptypy import utils as u
p = u.Param()
@@ -16,29 +16,30 @@
p.io.home = "/tmp/ptypy/"
p.io.autosave = None
-# max 100 frames (128x128px) of diffraction data
+# max 200 frames (128x128px) of diffraction data
p.scans = u.Param()
p.scans.MF = u.Param()
+# now you have to specify which ScanModel to use with scans.XX.name,
+# just as you have to give 'name' for engines and PtyScan subclasses.
+p.scans.MF.name = 'Vanilla' # or 'Full'
p.scans.MF.data= u.Param()
-p.scans.MF.data.source = 'test'
+p.scans.MF.data.name = 'MoonFlowerScan'
p.scans.MF.data.shape = 128
-p.scans.MF.data.num_frames = 100
+p.scans.MF.data.num_frames = 200
p.scans.MF.data.save = None
-## special recipe paramters for this scan ##
-p.scans.MF.data.recipe = u.Param()
# position distance in fraction of illumination frame
-p.scans.MF.data.recipe.density = 0.2
+p.scans.MF.data.density = 0.2
# total number of photon in empty beam
-p.scans.MF.data.recipe.photons = 1e8
+p.scans.MF.data.photons = 1e8
# Gaussian FWHM of possible detector blurring
-p.scans.MF.data.recipe.psf = 0.
+p.scans.MF.data.psf = 0.
# attach a reconstrucion engine
p.engines = u.Param()
p.engines.engine00 = u.Param()
p.engines.engine00.name = 'DM'
-p.engines.engine00.numiter = 30
+p.engines.engine00.numiter = 80
# prepare and run
P = Ptycho(p,level=5)
diff --git a/templates/minimal_prep_and_run_ML.py b/templates/minimal_prep_and_run_ML.py
index d8496ee0a..b3751bf93 100644
--- a/templates/minimal_prep_and_run_ML.py
+++ b/templates/minimal_prep_and_run_ML.py
@@ -22,8 +22,9 @@
# max 100 frames (128x128px) of diffraction data
p.scans = u.Param()
p.scans.MF = u.Param()
+p.scans.MF.name = 'Full'
p.scans.MF.data= u.Param()
-p.scans.MF.data.source = 'test'
+p.scans.MF.data.name = 'MoonFlowerScan'
p.scans.MF.data.shape = 128
p.scans.MF.data.num_frames = 100
p.scans.MF.data.save = None
diff --git a/templates/minimal_prep_and_run_probe_from_array.py b/templates/minimal_prep_and_run_probe_from_array.py
new file mode 100644
index 000000000..33f9ae910
--- /dev/null
+++ b/templates/minimal_prep_and_run_probe_from_array.py
@@ -0,0 +1,51 @@
+"""
+This script is a test for ptychographic reconstruction in the absence
+of actual data. It uses the test Scan class
+`ptypy.core.data.MoonFlowerScan` to provide "data".
+"""
+
+from ptypy.core import Ptycho
+from ptypy import utils as u
+import numpy as np
+
+p = u.Param()
+
+# for verbose output
+p.verbose_level = 3
+
+# set home path
+p.io = u.Param()
+p.io.home = "/tmp/ptypy/"
+p.io.autosave = None
+
+# max 200 frames (128x128px) of diffraction data
+p.scans = u.Param()
+p.scans.MF = u.Param()
+# now you have to specify which ScanModel to use with scans.XX.name,
+# just as you have to give 'name' for engines and PtyScan subclasses.
+p.scans.MF.name = 'Full' # or 'Full'
+p.scans.MF.data= u.Param()
+p.scans.MF.data.name = 'MoonFlowerScan'
+p.scans.MF.data.shape = 256
+p.scans.MF.data.num_frames = 200
+p.scans.MF.data.save = None
+p.scans.MF.illumination = u.Param()
+p.scans.MF.illumination.model = np.random.rand(*u.expect2(p.scans.MF.data.shape))
+print type(p.scans.MF.illumination.model)
+# position distance in fraction of illumination frame
+p.scans.MF.data.density = 0.2
+# total number of photon in empty beam
+p.scans.MF.data.photons = 1e8
+# Gaussian FWHM of possible detector blurring
+p.scans.MF.data.psf = 0.
+
+
+
+# attach a reconstrucion engine
+p.engines = u.Param()
+p.engines.engine00 = u.Param()
+p.engines.engine00.name = 'DM'
+p.engines.engine00.numiter = 80
+
+# prepare and run
+P = Ptycho(p,level=5)
diff --git a/templates/minimal_prep_and_run_probe_modes.py b/templates/minimal_prep_and_run_probe_modes.py
new file mode 100644
index 000000000..8dbcb4dc4
--- /dev/null
+++ b/templates/minimal_prep_and_run_probe_modes.py
@@ -0,0 +1,48 @@
+"""
+This script is a test for ptychographic reconstruction in the absence
+of actual data. It uses the test Scan class
+`ptypy.core.data.MoonFlowerScan` to provide "data".
+"""
+
+from ptypy.core import Ptycho
+from ptypy import utils as u
+p = u.Param()
+
+# for verbose output
+p.verbose_level = 3
+
+# set home path
+p.io = u.Param()
+p.io.home = "/tmp/ptypy/"
+p.io.autosave = None
+
+# max 200 frames (128x128px) of diffraction data
+p.scans = u.Param()
+p.scans.MF = u.Param()
+# now you have to specify which ScanModel to use with scans.XX.name,
+# just as you have to give 'name' for engines and PtyScan subclasses.
+p.scans.MF.name = 'Full' # or 'Full'
+p.scans.MF.data= u.Param()
+p.scans.MF.data.name = 'MoonFlowerScan'
+p.scans.MF.data.shape = 128
+p.scans.MF.data.num_frames = 200
+p.scans.MF.data.save = None
+
+# position distance in fraction of illumination frame
+p.scans.MF.data.density = 0.2
+# total number of photon in empty beam
+p.scans.MF.data.photons = 1e8
+# Gaussian FWHM of possible detector blurring
+p.scans.MF.data.psf = 0.
+p.scans.MF.coherence=u.Param()
+p.scans.MF.coherence.num_probe_modes = 2
+
+
+# attach a reconstrucion engine
+p.engines = u.Param()
+p.engines.engine00 = u.Param()
+p.engines.engine00.name = 'DM'
+p.engines.engine00.numiter = 80
+
+# prepare and run
+P = Ptycho(p,level=5)
diff --git a/templates/on_the_fly_ptyd.py b/templates/on_the_fly_ptyd.py
index 08c14d69e..23aed062f 100644
--- a/templates/on_the_fly_ptyd.py
+++ b/templates/on_the_fly_ptyd.py
@@ -32,6 +32,9 @@
data.rebin = None
data.orientation = None
+# optionally validate the parameter tree
+u.descriptor.defaults_tree['scandata.MoonFlowerScan'].validate(data)
+
# create PtyScan instance
MF = ptypy.core.data.MoonFlowerScan(data)
MF.initialize()
diff --git a/templates/on_the_fly_rec.py b/templates/on_the_fly_rec.py
index ad1b8d38e..34095ba02 100644
--- a/templates/on_the_fly_rec.py
+++ b/templates/on_the_fly_rec.py
@@ -22,49 +22,24 @@
p.scans = u.Param()
p.scans.MF = u.Param()
+p.scans.MF.name = 'Full'
p.scans.MF.data= u.Param()
-p.scans.MF.data.source = 'file'
+p.scans.MF.data.name = 'PtydScan'
p.scans.MF.data.dfile ='/tmp/ptypy/sample.ptyd'
p.scans.MF.data.min_frames = 10
-p.engine = u.Param()
-
-## Common defaults for all engines
-p.engine.common = u.Param()
-# Total number of iterations
-p.engine.common.numiter = 100
-# Number of iterations to be executed in one go
-p.engine.common.numiter_contiguous = 2
-# Fraction of valid probe area (circular) in probe frame
-p.engine.common.probe_support = None
-# Number of iterations before probe update starts
-p.engine.common.probe_update_start = 2
-# Clip object amplitude into this intrervall
-p.engine.common.clip_object = None # [0,1]
-
-## DM default parameters
-p.engine.DM = u.Param()
-p.engine.DM.name = "DM"
-# HIO parameter
-p.engine.DM.alpha = 1
-# Probe fraction kept from iteration to iteration
-p.engine.DM.probe_inertia = 0.01
-# Object fraction kept from iteration to iteration
-p.engine.DM.object_inertia = 0.1
-# If False: update object before probe
-p.engine.DM.update_object_first = True
-# Gaussian smoothing (FWHM, pixel units) of object
-p.engine.DM.obj_smooth_std = None
-# Loop the overlap constraint until probe changes lesser than this fraction
-p.engine.DM.overlap_converge_factor = 0.5
-# Maximum iterations to be spent inoverlap constraint
-p.engine.DM.overlap_max_iterations = 100
-# If rms of model vs diffraction data is smaller than this fraction,
-# Fourier constraint is considered fullfilled
-p.engine.DM.fourier_relax_factor = 0.05
p.engines = u.Param()
p.engines.engine00 = u.Param()
p.engines.engine00.name = 'DM'
p.engines.engine00.numiter = 100
+p.engines.engine00.numiter_contiguous = 2
+p.engines.engine00.probe_support = None
+p.engines.engine00.probe_update_start = 2
+p.engines.engine00.probe_inertia = 0.01
+p.engines.engine00.object_inertia = 0.1
+p.engines.engine00.update_object_first = True
+p.engines.engine00.overlap_converge_factor = 0.5
+p.engines.engine00.overlap_max_iterations = 100
+p.engines.engine00.fourier_relax_factor = 0.05
P = Ptycho(p,level=5)
diff --git a/templates/pars_all_nodoc.py b/templates/pars_all_nodoc.py
deleted file mode 100644
index 316f86902..000000000
--- a/templates/pars_all_nodoc.py
+++ /dev/null
@@ -1,168 +0,0 @@
-"""
-This Script was autogenerated using
-``u.create_default_template("pars_all_nodoc.py",2,0)``
-It is only a TEMPLATE and not a working reconstruction script.
-"""
-
-import numpy as np
-import ptypy
-from ptypy.core import Ptycho
-from ptypy import utils as u
-
-#### Ptypy Parameter Tree ######################################################
-p = u.Param()
-p.verbose_level = 1
-p.data_type = "single"
-p.run = None
-p.dry_run = False
-p.io = u.Param()
-p.io.home = "./"
-p.io.rfile = "recons/%(run)s/%(run)s_%(engine)s_%(iterations)04d.ptyr"
-p.io.autosave = u.Param()
-p.io.autosave.active = True
-p.io.autosave.interval = 10
-p.io.autosave.rfile = "dumps/%(run)s/%(run)s_%(engine)s_%(iterations)04d.ptyr"
-p.io.interaction = u.Param()
-p.io.interaction.active = True
-p.io.interaction.primary_address = "tcp://127.0.0.1"
-p.io.interaction.primary_port = 5560
-p.io.interaction.port_range = ""5571:5571""
-p.io.autoplot = u.Param()
-p.io.autoplot.imfile = "plots/%(run)s/%(run)s_%(engine)s_%(iterations)04d.png"
-p.io.autoplot.interval = 1
-p.io.autoplot.threaded = True
-p.io.autoplot.layout = u.Param()
-p.io.autoplot.dump = True
-p.io.autoplot.make_movie = False
-p.scan = u.Param()
-p.scan.tags = None
-p.scan.if_conflict_use_meta = True
-p.scan.data = u.Param()
-p.scan.data.recipe = None
-p.scan.data.source = None
-p.scan.data.dfile = None
-p.scan.data.label = None
-p.scan.data.shape = None
-p.scan.data.save = None
-p.scan.data.center = None
-p.scan.data.psize = None
-p.scan.data.distance = None
-p.scan.data.rebin = None
-p.scan.data.orientation = None
-p.scan.data.energy = None
-p.scan.data.min_frames = 1
-p.scan.data.num_frames = None
-p.scan.data.chunk_format = ".chunk%02d"
-p.scan.data.auto_center = None
-p.scan.data.load_parallel = "data"
-p.scan.data.positions_theory = None
-p.scan.data.experimentID = None
-p.scan.data.simulation = u.Param()
-p.scan.data.simulation.detector = u.Param()
-p.scan.data.simulation.psf = None
-p.scan.sharing = u.Param()
-p.scan.sharing.object_share_with = None
-p.scan.sharing.object_share_power = 1
-p.scan.sharing.probe_share_with = None
-p.scan.sharing.probe_share_power = 1
-p.scan.geometry = u.Param()
-p.scan.geometry.energy = 6.2
-p.scan.geometry.lam = None
-p.scan.geometry.distance = 7.19
-p.scan.geometry.psize = 0.000172
-p.scan.geometry.resolution = None
-p.scan.geometry.propagation = "farfield"
-p.scan.xy = u.Param()
-p.scan.xy.model = None
-p.scan.xy.spacing = 1.5e-06
-p.scan.xy.steps = 10
-p.scan.xy.extent = 1.5e-05
-p.scan.xy.offset = 0
-p.scan.xy.jitter = 0
-p.scan.xy.count = None
-p.scan.illumination = u.Param()
-p.scan.illumination.model = None
-p.scan.illumination.photons = None
-p.scan.illumination.recon = u.Param()
-p.scan.illumination.recon.rfile = "\*.ptyr"
-p.scan.illumination.recon.ID = None
-p.scan.illumination.recon.layer = None
-p.scan.illumination.stxm = u.Param()
-p.scan.illumination.stxm.label = None
-p.scan.illumination.aperture = u.Param()
-p.scan.illumination.aperture.form = "circ"
-p.scan.illumination.aperture.diffuser = None
-p.scan.illumination.aperture.size = None
-p.scan.illumination.aperture.edge = 2
-p.scan.illumination.aperture.central_stop = None
-p.scan.illumination.aperture.offset = 0
-p.scan.illumination.propagation = u.Param()
-p.scan.illumination.propagation.parallel = None
-p.scan.illumination.propagation.focussed = None
-p.scan.illumination.propagation.antialiasing = 1
-p.scan.illumination.propagation.spot_size = None
-p.scan.illumination.diversity = u.Param()
-p.scan.illumination.diversity.noise = None
-p.scan.illumination.diversity.power = 0.1
-p.scan.illumination.diversity.shift = None
-p.scan.sample = u.Param()
-p.scan.sample.model = None
-p.scan.sample.fill = 1
-p.scan.sample.recon = u.Param()
-p.scan.sample.recon.rfile = "\*.ptyr"
-p.scan.sample.recon.ID = None
-p.scan.sample.recon.layer = None
-p.scan.sample.stxm = u.Param()
-p.scan.sample.stxm.label = None
-p.scan.sample.process = u.Param()
-p.scan.sample.process.offset = (0, 0)
-p.scan.sample.process.zoom = None
-p.scan.sample.process.formula = None
-p.scan.sample.process.density = 1
-p.scan.sample.process.thickness = 1e-06
-p.scan.sample.process.ref_index = (0.5+0j)
-p.scan.sample.process.smoothing = 2
-p.scan.sample.diversity = u.Param()
-p.scan.sample.diversity.noise = None
-p.scan.sample.diversity.power = 0.1
-p.scan.sample.diversity.shift = None
-p.scan.coherence = u.Param()
-p.scan.coherence.num_probe_modes = 1
-p.scan.coherence.num_object_modes = 1
-p.scan.coherence.spectrum = None
-p.scan.coherence.object_dispersion = None
-p.scan.coherence.probe_dispersion = None
-p.scans = u.Param()
-p.scans.scan_00 = None
-p.engine = u.Param()
-p.engine.common = u.Param()
-p.engine.common.name = "DM"
-p.engine.common.numiter = 2000
-p.engine.common.numiter_contiguous = 1
-p.engine.common.probe_support = 0.7
-p.engine.common.clip_object = None
-p.engine.DM = u.Param()
-p.engine.DM.alpha = 1
-p.engine.DM.probe_update_start = 2
-p.engine.DM.update_object_first = True
-p.engine.DM.overlap_converge_factor = 0.05
-p.engine.DM.overlap_max_iterations = 10
-p.engine.DM.probe_inertia = 0.001
-p.engine.DM.object_inertia = 0.1
-p.engine.DM.fourier_relax_factor = 0.01
-p.engine.DM.obj_smooth_std = 20
-p.engine.ML = u.Param()
-p.engine.ML.type = "gaussian"
-p.engine.ML.floating_intensities = False
-p.engine.ML.intensity_renormalization = 1
-p.engine.ML.reg_del2 = True
-p.engine.ML.reg_del2_amplitude = 0.01
-p.engine.ML.smooth_gradient = 0
-p.engine.ML.scale_precond = False
-p.engine.ML.scale_probe_object = 1
-p.engine.ML.probe_update_start = 0
-p.engines = u.Param()
-p.engines.engine_00 = None
-
-
-Ptycho(p,level=5)
diff --git a/templates/pars_few_alldoc.py b/templates/pars_few_alldoc.py
index f640debfc..3457e499f 100644
--- a/templates/pars_few_alldoc.py
+++ b/templates/pars_few_alldoc.py
@@ -1,7 +1,6 @@
"""
-This Script was autogenerated using
-``u.create_default_template("pars_few_alldoc.py",0,2)``
-It is only a TEMPLATE and not a working reconstruction script.
+This template was autogenerated using an EvalDescriptor instance.
+It is only a template and not a working reconstruction script.
"""
import numpy as np
@@ -9,304 +8,587 @@
from ptypy.core import Ptycho
from ptypy import utils as u
-#### Ptypy Parameter Tree ######################################################
+### Ptypy parameter tree ###
-## (00) Verbosity level
+p = Param()
+
+
+# Verbosity level
# Verbosity level for information logging.
-# - ``0``: Only errors
-# - ``1``: Warning
-# - ``2``: Process Information
-# - ``3``: Object Information
-# - ``4``: Debug
+# - ``0``: Only critical errors
+# - ``1``: All errors
+# - ``2``: Warning
+# - ``3``: Process Information
+# - ``4``: Object Information
+# - ``5``: Debug
p.verbose_level = 1
-## (02) Reconstruction identifier
-# Reconstruction run identifier. If ``None``, the run name will be constructed at run
-# time from other information.
+# Reconstruction identifier
+# Reconstruction run identifier. If ``None``, the run name will
+# be constructed at run time from other information.
p.run = None
-## (04) Global parameters for I/O
-p.io = u.Param()
+# Prepared data file path
+# If source was ``None`` or ``'file'``, data will be loaded from this file and processing as
+# well as saving is deactivated. If source is the name of an experiment recipe or path to a
+# file, data will be saved to this file
+p.scandata.PtyScan.dfile = None
-## (07) Auto-save options
-p.io.autosave = u.Param()
+# Determine if center in data is calculated automatically
+# - ``False``, no automatic centering
+# - ``None``, only if :py:data:`center` is ``None``
+# - ``True``, it will be enforced
+p.scandata.PtyScan.auto_center = None
-## (11) Server / Client parameters
-# If ``None`` or ``False`` is passed here in script instead of a Param, it translates to
-# ``active=False`` i.e. no ZeroMQ interaction server.
-p.io.interaction = u.Param()
+# Detector pixel size
+# Dimensions of the detector pixels (in meters)
+p.scandata.PtyScan.psize = 0.000172
-## (12) Activation switch
-# Set to ``False`` for no ZeroMQ interaction server
-p.io.interaction.active = True
+# Sample to detector distance
+# In meters.
+p.scandata.PtyScan.distance = 7.19
-## (16) Plotting client parameters
-# In script you may set this parameter to ``None`` or ``False`` for no automatic plotting.
-#
-p.io.autoplot = u.Param()
+# Photon energy of the incident radiation in keV
+#
+p.scandata.PtyScan.energy = 7.2
-## (23) Scan parameters
-# This categrogy specifies defaults for all scans. Scan-specific parameters are stored
-# in scans.scan_%%
-p.scan = u.Param()
+# Prepared data file path
+# If source was ``None`` or ``'file'``, data will be loaded from this file and processing as
+# well as saving is deactivated. If source is the name of an experiment recipe or path to a
+# file, data will be saved to this file
+p.scandata.PtydScan.dfile = None
+
+# Determine if center in data is calculated automatically
+# - ``False``, no automatic centering
+# - ``None``, only if :py:data:`center` is ``None``
+# - ``True``, it will be enforced
+p.scandata.PtydScan.auto_center = None
+
+# Detector pixel size
+# Dimensions of the detector pixels (in meters)
+p.scandata.PtydScan.psize = 0.000172
-## (26) Data preparation parameters
-p.scan.data = u.Param()
+# Sample to detector distance
+# In meters.
+p.scandata.PtydScan.distance = 7.19
-## (27) Data preparation recipe container
-p.scan.data.recipe = None
+# Photon energy of the incident radiation in keV
+#
+p.scandata.PtydScan.energy = 7.2
-## (28) Describes where to get the data from.
-# Accepted values are:
-# - ``'file'``: data will be read from a .ptyd file.
-# - any valid recipe name: data will be prepared using the recipe.
-# - ``'sim'`` : data will be simulated according to parameters in simulation
-p.scan.data.source = None
+# Prepared data file path
+# If source was ``None`` or ``'file'``, data will be loaded from this file and processing as
+# well as saving is deactivated. If source is the name of an experiment recipe or path to a
+# file, data will be saved to this file
+p.scandata.MoonFlowerScan.dfile = None
-## (29) Prepared data file path
-# If source was ``None`` or ``'file'``, data will be loaded from this file and processing
-# as well as saving is deactivated. If source is the name of an experiment recipe or path
-# to a file, data will be saved to this file
-p.scan.data.dfile = None
+# Determine if center in data is calculated automatically
+# - ``False``, no automatic centering
+# - ``None``, only if :py:data:`center` is ``None``
+# - ``True``, it will be enforced
+p.scandata.MoonFlowerScan.auto_center = None
-## (34) Detector pixel size
+# Detector pixel size
# Dimensions of the detector pixels (in meters)
-p.scan.data.psize = None
+p.scandata.MoonFlowerScan.psize = 0.000172
-## (35) Sample-to-detector distance
+# Sample to detector distance
# In meters.
-p.scan.data.distance = None
+p.scandata.MoonFlowerScan.distance = 7.19
-## (38) Photon energy of the incident radiation
-p.scan.data.energy = None
+# Photon energy of the incident radiation in keV
+#
+p.scandata.MoonFlowerScan.energy = 7.2
-## (40) Maximum number of frames to be prepared
-# If `positions_theory` are provided, num_frames will be ovverriden with the number
-# of positions available
-p.scan.data.num_frames = None
+# Prepared data file path
+# If source was ``None`` or ``'file'``, data will be loaded from this file and processing as
+# well as saving is deactivated. If source is the name of an experiment recipe or path to a
+# file, data will be saved to this file
+p.scandata.I13ScanFFP.dfile = None
-## (42) Determine if center in data is calculated automatically
+# Determine if center in data is calculated automatically
# - ``False``, no automatic centering
-# - ``None``, only if :py:data:`center` is ``None``
-# - ``True``, it will be enforced
-p.scan.data.auto_center = None
-
-## (43) Determines what will be loaded in parallel
-# Choose from ``None``, ``'data'``, ``'common'``, ``'all'``
-p.scan.data.load_parallel = "data"
-
-## (49) Scan sharing options
-p.scan.sharing = u.Param()
-
-## (54) Physical parameters
-# All distances are in meters. Other units are specified in the documentation strings.
-#
-p.scan.geometry = u.Param()
-
-## (55) Energy (in keV)
-# If ``None``, uses `lam` instead.
-p.scan.geometry.energy = 6.2
-
-## (56) Wavelength
-# Used only if `energy` is ``None``
-p.scan.geometry.lam = None
-
-## (57) Distance from object to detector
-p.scan.geometry.distance = 7.19
-
-## (61) Parameters for scan patterns
-# These parameters are useful in two cases:
-# - When the experimental positions are not known (no encoders)
-# - When using the package to simulate data.
-# In script an array of shape *(N,2)* may be passed here instead of a Param or dictionary
-# as an **override**
-p.scan.xy = u.Param()
-
-## (62) Scan pattern type
-# The type must be one of the following:
-# - ``None``: positions are read from data file.
-# - ``'raster'``: raster grid pattern
-# - ``'round'``: concentric circles pattern
-# - ``'spiral'``: spiral pattern
-# In script an array of shape *(N,2)* may be passed here instead
-p.scan.xy.model = None
-
-## (63) Pattern spacing
-# Spacing between scan positions. If the model supports asymmetric scans, a tuple passed
-# here will be interpreted as *(dy,dx)* with *dx* as horizontal spacing and *dy* as vertical
-# spacing. If ``None`` the value is calculated from `extent` and `steps`
-p.scan.xy.spacing = 1.5e-06
-
-## (64) Pattern step count
-# Number of steps with length *spacing* in the grid. A tuple *(ny,nx)* provided here can
-# be used for a different step in vertical ( *ny* ) and horizontal direction ( *nx* ). If ``None``
-# the, step count is calculated from `extent` and `spacing`
-p.scan.xy.steps = 10
-
-## (65) Rectangular extent of pattern
-# Defines the absolut maximum extent. If a tuple *(ly,lx)* is provided the extent may be
-# rectangular rather than square. All positions outside of `extent` will be discarded.
-# If ``None`` the extent will is `spacing` times `steps`
-p.scan.xy.extent = 1.5e-05
-
-## (69) Illumination model (probe)
-# In script, you may pass directly a three dimensional numpy.ndarray here instead of a
-# `Param`. This array will be copied to the storage instance with no checking whatsoever.
-# Used in `~ptypy.core.illumination`
-p.scan.illumination = u.Param()
-
-## (70) Type of illumination model
-# One of:
-# - ``None`` : model initialitziation defaults to flat array filled with the specified
-# number of photons
-# - ``'recon'`` : load model from previous reconstruction, see `recon` Parameters
-# - ``'stxm'`` : Estimate model from autocorrelation of mean diffraction data
-# - ** : one of ptypys internal image resource strings
-# - ** : one of the templates inillumination module
-# In script, you may pass a numpy.ndarray here directly as the model. It is considered as
-# incoming wavefront and will be propagated according to `propagation` with an optional
-# `aperture` applied before
-p.scan.illumination.model = None
-
-## (72) Parameters to load from previous reconstruction
-p.scan.illumination.recon = u.Param()
-
-## (73) Path to a ``.ptyr`` compatible file
-p.scan.illumination.recon.rfile = "\*.ptyr"
-
-## (74) ID (label) of storage data to load
-# ``None`` means any ID
-p.scan.illumination.recon.ID = None
-
-## (75) Layer (mode) of storage data to load
-# ``None`` means all layers, choose ``0`` for main mode
-p.scan.illumination.recon.layer = None
-
-## (78) Beam aperture parameters
-p.scan.illumination.aperture = u.Param()
-
-## (79) One of None, 'rect' or 'circ'
-# One of:
-# - ``None`` : no aperture, this may be useful for nearfield
-# - ``'rect'`` : rectangular aperture
-# - ``'circ'`` : circular aperture
-p.scan.illumination.aperture.form = "circ"
+# - ``None``, only if :py:data:`center` is ``None``
+# - ``True``, it will be enforced
+p.scandata.I13ScanFFP.auto_center = False
-## (81) Aperture width or diameter
-# May also be a tuple *(vertical,horizontal)* in case of an asymmetric aperture
-p.scan.illumination.aperture.size = None
+# Detector pixel size
+# Dimensions of the detector pixels (in meters)
+p.scandata.I13ScanFFP.psize = 0.000172
+
+# Sample to detector distance
+# In meters.
+p.scandata.I13ScanFFP.distance = 7.19
-## (85) Parameters for propagation after aperture plane
-# Propagation to focus takes precedence to parallel propagation if `foccused` is not
-# ``None``
-p.scan.illumination.propagation = u.Param()
+# Photon energy of the incident radiation in keV
+#
+p.scandata.I13ScanFFP.energy = 7.2
-## (86) Parallel propagation distance
-# If ``None`` or ``0`` : No parallel propagation
-p.scan.illumination.propagation.parallel = None
+# Prepared data file path
+# If source was ``None`` or ``'file'``, data will be loaded from this file and processing as
+# well as saving is deactivated. If source is the name of an experiment recipe or path to a
+# file, data will be saved to this file
+p.scandata.I13ScanNFP.dfile = None
+
+# Determine if center in data is calculated automatically
+# - ``False``, no automatic centering
+# - ``None``, only if :py:data:`center` is ``None``
+# - ``True``, it will be enforced
+p.scandata.I13ScanNFP.auto_center = False
+
+# Detector pixel size
+# Dimensions of the detector pixels (in meters)
+p.scandata.I13ScanNFP.psize = 0.000172
+
+# Sample to detector distance
+# In meters.
+p.scandata.I13ScanNFP.distance = 7.19
+
+# Photon energy of the incident radiation in keV
+#
+p.scandata.I13ScanNFP.energy = 7.2
+
+# Prepared data file path
+# If source was ``None`` or ``'file'``, data will be loaded from this file and processing as
+# well as saving is deactivated. If source is the name of an experiment recipe or path to a
+# file, data will be saved to this file
+p.scandata.DlsScan.dfile = None
+
+# Determine if center in data is calculated automatically
+# - ``False``, no automatic centering
+# - ``None``, only if :py:data:`center` is ``None``
+# - ``True``, it will be enforced
+p.scandata.DlsScan.auto_center = False
+
+# Detector pixel size
+# Dimensions of the detector pixels (in meters)
+p.scandata.DlsScan.psize = 0.000172
+
+# Sample to detector distance
+# In meters.
+p.scandata.DlsScan.distance = 7.19
+
+# Photon energy of the incident radiation in keV
+#
+p.scandata.DlsScan.energy = 7.2
+
+# Prepared data file path
+# If source was ``None`` or ``'file'``, data will be loaded from this file and processing as
+# well as saving is deactivated. If source is the name of an experiment recipe or path to a
+# file, data will be saved to this file
+p.scandata.I08Scan.dfile = None
+
+# Determine if center in data is calculated automatically
+# - ``False``, no automatic centering
+# - ``None``, only if :py:data:`center` is ``None``
+# - ``True``, it will be enforced
+p.scandata.I08Scan.auto_center = False
+
+# Detector pixel size
+# Dimensions of the detector pixels (in meters)
+p.scandata.I08Scan.psize = 0.000172
+
+# Sample to detector distance
+# In meters.
+p.scandata.I08Scan.distance = 7.19
+
+# Photon energy of the incident radiation in keV
+#
+p.scandata.I08Scan.energy = 7.2
+
+# Prepared data file path
+# If source was ``None`` or ``'file'``, data will be loaded from this file and processing as
+# well as saving is deactivated. If source is the name of an experiment recipe or path to a
+# file, data will be saved to this file
+p.scandata.Savu.dfile = None
+
+# Determine if center in data is calculated automatically
+# - ``False``, no automatic centering
+# - ``None``, only if :py:data:`center` is ``None``
+# - ``True``, it will be enforced
+p.scandata.Savu.auto_center = None
+
+# Detector pixel size
+# Dimensions of the detector pixels (in meters)
+p.scandata.Savu.psize = 0.000172
+
+# Sample to detector distance
+# In meters.
+p.scandata.Savu.distance = 7.19
+
+# Photon energy of the incident radiation in keV
+#
+p.scandata.Savu.energy = 7.2
+
+# Prepared data file path
+# If source was ``None`` or ``'file'``, data will be loaded from this file and processing as
+# well as saving is deactivated. If source is the name of an experiment recipe or path to a
+# file, data will be saved to this file
+p.scandata.ID16AScan.dfile = None
+
+# Determine if center in data is calculated automatically
+# - ``False``, no automatic centering
+# - ``None``, only if :py:data:`center` is ``None``
+# - ``True``, it will be enforced
+p.scandata.ID16AScan.auto_center = False
+
+# Detector pixel size
+# Dimensions of the detector pixels (in meters)
+p.scandata.ID16AScan.psize = 0.000172
+
+# Sample to detector distance
+# In meters.
+p.scandata.ID16AScan.distance = 7.19
+
+# Photon energy of the incident radiation in keV
+#
+p.scandata.ID16AScan.energy = 7.2
+
+# Prepared data file path
+# If source was ``None`` or ``'file'``, data will be loaded from this file and processing as
+# well as saving is deactivated. If source is the name of an experiment recipe or path to a
+# file, data will be saved to this file
+p.scandata.AMOScan.dfile = None
+
+# Determine if center in data is calculated automatically
+# - ``False``, no automatic centering
+# - ``None``, only if :py:data:`center` is ``None``
+# - ``True``, it will be enforced
+p.scandata.AMOScan.auto_center = False
+
+# Detector pixel size
+# Dimensions of the detector pixels (in meters)
+p.scandata.AMOScan.psize = 0.000172
+
+# Sample to detector distance
+# In meters.
+p.scandata.AMOScan.distance = 7.19
+
+# Photon energy of the incident radiation in keV
+#
+p.scandata.AMOScan.energy = 7.2
+
+# Prepared data file path
+# If source was ``None`` or ``'file'``, data will be loaded from this file and processing as
+# well as saving is deactivated. If source is the name of an experiment recipe or path to a
+# file, data will be saved to this file
+p.scandata.DiProIFERMIScan.dfile = None
+
+# Determine if center in data is calculated automatically
+# - ``False``, no automatic centering
+# - ``None``, only if :py:data:`center` is ``None``
+# - ``True``, it will be enforced
+p.scandata.DiProIFERMIScan.auto_center = False
+
+# Detector pixel size
+# Dimensions of the detector pixels (in meters)
+p.scandata.DiProIFERMIScan.psize = 0.000172
+
+# Sample to detector distance
+# In meters.
+p.scandata.DiProIFERMIScan.distance = 7.19
+
+# Photon energy of the incident radiation in keV
+#
+p.scandata.DiProIFERMIScan.energy = 7.2
+
+# Prepared data file path
+# If source was ``None`` or ``'file'``, data will be loaded from this file and processing as
+# well as saving is deactivated. If source is the name of an experiment recipe or path to a
+# file, data will be saved to this file
+p.scandata.FliSpecScanMultexp.dfile = None
+
+# Determine if center in data is calculated automatically
+# - ``False``, no automatic centering
+# - ``None``, only if :py:data:`center` is ``None``
+# - ``True``, it will be enforced
+p.scandata.FliSpecScanMultexp.auto_center = None
+
+# Detector pixel size
+# Dimensions of the detector pixels (in meters)
+p.scandata.FliSpecScanMultexp.psize = 0.000172
+
+# Sample to detector distance
+# In meters.
+p.scandata.FliSpecScanMultexp.distance = 7.19
+
+# Photon energy of the incident radiation in keV
+#
+p.scandata.FliSpecScanMultexp.energy = None
+
+# Prepared data file path
+# If source was ``None`` or ``'file'``, data will be loaded from this file and processing as
+# well as saving is deactivated. If source is the name of an experiment recipe or path to a
+# file, data will be saved to this file
+p.scandata.UCLLaserScan.dfile = None
+
+# Determine if center in data is calculated automatically
+# - ``False``, no automatic centering
+# - ``None``, only if :py:data:`center` is ``None``
+# - ``True``, it will be enforced
+p.scandata.UCLLaserScan.auto_center = False
+
+# Detector pixel size
+# Dimensions of the detector pixels (in meters)
+p.scandata.UCLLaserScan.psize = 0.000172
+
+# Sample to detector distance
+# In meters.
+p.scandata.UCLLaserScan.distance = 7.19
+
+# Photon energy of the incident radiation in keV
+#
+p.scandata.UCLLaserScan.energy = None
+
+# Prepared data file path
+# If source was ``None`` or ``'file'``, data will be loaded from this file and processing as
+# well as saving is deactivated. If source is the name of an experiment recipe or path to a
+# file, data will be saved to this file
+p.scandata.NanomaxStepscanNov2016.dfile = None
+
+# Determine if center in data is calculated automatically
+# - ``False``, no automatic centering
+# - ``None``, only if :py:data:`center` is ``None``
+# - ``True``, it will be enforced
+p.scandata.NanomaxStepscanNov2016.auto_center = None
+
+# Detector pixel size
+# Dimensions of the detector pixels (in meters)
+p.scandata.NanomaxStepscanNov2016.psize = 0.000172
+
+# Sample to detector distance
+# In meters.
+p.scandata.NanomaxStepscanNov2016.distance = 7.19
+
+# Photon energy of the incident radiation in keV
+#
+p.scandata.NanomaxStepscanNov2016.energy = 7.2
+
+# Prepared data file path
+# If source was ``None`` or ``'file'``, data will be loaded from this file and processing as
+# well as saving is deactivated. If source is the name of an experiment recipe or path to a
+# file, data will be saved to this file
+p.scandata.NanomaxStepscanMay2017.dfile = None
+
+# Determine if center in data is calculated automatically
+# - ``False``, no automatic centering
+# - ``None``, only if :py:data:`center` is ``None``
+# - ``True``, it will be enforced
+p.scandata.NanomaxStepscanMay2017.auto_center = None
+
+# Detector pixel size
+# Dimensions of the detector pixels (in meters)
+p.scandata.NanomaxStepscanMay2017.psize = 0.000172
+
+# Sample to detector distance
+# In meters.
+p.scandata.NanomaxStepscanMay2017.distance = 7.19
-## (87) Propagation distance from aperture to focus
+# Photon energy of the incident radiation in keV
+#
+p.scandata.NanomaxStepscanMay2017.energy = 7.2
+
+# Prepared data file path
+# If source was ``None`` or ``'file'``, data will be loaded from this file and processing as
+# well as saving is deactivated. If source is the name of an experiment recipe or path to a
+# file, data will be saved to this file
+p.scandata.NanomaxFlyscanJune2017.dfile = None
+
+# Determine if center in data is calculated automatically
+# - ``False``, no automatic centering
+# - ``None``, only if :py:data:`center` is ``None``
+# - ``True``, it will be enforced
+p.scandata.NanomaxFlyscanJune2017.auto_center = None
+
+# Detector pixel size
+# Dimensions of the detector pixels (in meters)
+p.scandata.NanomaxFlyscanJune2017.psize = 0.000172
+
+# Sample to detector distance
+# In meters.
+p.scandata.NanomaxFlyscanJune2017.distance = 7.19
+
+# Photon energy of the incident radiation in keV
+#
+p.scandata.NanomaxFlyscanJune2017.energy = 7.2
+
+# Prepared data file path
+# If source was ``None`` or ``'file'``, data will be loaded from this file and processing as
+# well as saving is deactivated. If source is the name of an experiment recipe or path to a
+# file, data will be saved to this file
+p.scandata.ALS5321Scan.dfile = None
+
+# Determine if center in data is calculated automatically
+# - ``False``, no automatic centering
+# - ``None``, only if :py:data:`center` is ``None``
+# - ``True``, it will be enforced
+p.scandata.ALS5321Scan.auto_center = None
+
+# Detector pixel size
+# Dimensions of the detector pixels (in meters)
+p.scandata.ALS5321Scan.psize = 0.000172
+
+# Sample to detector distance
+# In meters.
+p.scandata.ALS5321Scan.distance = 7.19
+
+# Photon energy of the incident radiation in keV
+#
+p.scandata.ALS5321Scan.energy = 0.82
+
+# Aperture width or diameter
+# May also be a tuple *(vertical,horizontal)* in case of an asymmetric aperture
+p.scandata.SimScan.illumination.aperture.size = None
+
+# Type of illumination model
+# One of:
+# - ``None`` : model initialitziation defaults to flat array filled with the specified number of photons
+# - ``'recon'`` : load model from previous reconstruction, see `recon` Parameters
+# - ``'stxm'`` : Estimate model from autocorrelation of mean diffraction data
+# - ** : one of ptypys internal image resource strings
+# - ** : one of the templates inillumination module
+# In script, you may pass a numpy.ndarray here directly as the model. It is considered as incoming wavefront and will be propagated according to `propagation` with an optional `aperture` applied before.
+p.scandata.SimScan.illumination.model = None
+
+# Propagation distance from aperture to focus
# If ``None`` or ``0`` : No focus propagation
-p.scan.illumination.propagation.focussed = None
+p.scandata.SimScan.illumination.propagation.focussed = None
-## (90) Probe mode(s) diversity parameters
-# Can be ``None`` i.e. no diversity
-p.scan.illumination.diversity = u.Param()
+# Parallel propagation distance
+# If ``None`` or ``0`` : No parallel propagation
+p.scandata.SimScan.illumination.propagation.parallel = None
-## (94) Initial object modelization parameters
-# In script, you may pass a numpy.array here directly as the model. This array will be passed
-# to the storage instance with no checking whatsoever. Used in `~ptypy.core.sample`
-#
-p.scan.sample = u.Param()
+# Path to a ``.ptyr`` compatible file
+#
+p.scandata.SimScan.illumination.recon.rfile = '\*.ptyr'
-## (95) Type of initial object model
+# Type of initial object model
# One of:
-# - ``None`` : model initialitziation defaults to flat array filled `fill`
-# - ``'recon'`` : load model from STXM analysis of diffraction data
-# - ``'stxm'`` : Estimate model from autocorrelation of mean diffraction data
-# - ** : one of ptypys internal model resource strings
-# - ** : one of the templates in sample module
-# In script, you may pass a numpy.array here directly as the model. This array will be processed
-# according to `process` in order to *simulate* a sample from e.g. a thickness profile.
-#
-p.scan.sample.model = None
-
-## (96) Default fill value
-p.scan.sample.fill = 1
-
-## (97) Parameters to load from previous reconstruction
-p.scan.sample.recon = u.Param()
-
-## (98) Path to a ``.ptyr`` compatible file
-p.scan.sample.recon.rfile = "\*.ptyr"
-
-## (103) Model processing parameters
+# - ``None`` : model initialitziation defaults to flat array filled `fill`
+# - ``'recon'`` : load model from STXM analysis of diffraction data
+# - ``'stxm'`` : Estimate model from autocorrelation of mean diffraction data
+# - ** : one of ptypys internal model resource strings
+# - ** : one of the templates in sample module
+# In script, you may pass a numpy.array here directly as the model. This array will be
+# processed according to `process` in order to *simulate* a sample from e.g. a thickness
+# profile.
+p.scandata.SimScan.sample.model = None
+
+# Default fill value
+#
+p.scandata.SimScan.sample.fill = 1
+
+# Parameters to load from previous reconstruction
+#
+p.scandata.SimScan.sample.recon = u.Param()
+
+# Path to a ``.ptyr`` compatible file
+#
+p.scandata.SimScan.sample.recon.rfile = '\*.ptyr'
+
+# Model processing parameters
# Can be ``None``, i.e. no processing
-p.scan.sample.process = u.Param()
+p.scandata.SimScan.sample.process = u.Param()
-## (111) Probe mode(s) diversity parameters
+# Probe mode(s) diversity parameters
# Can be ``None`` i.e. no diversity
-p.scan.sample.diversity = u.Param()
+p.scandata.SimScan.sample.diversity = u.Param()
-## (115) Coherence parameters
-p.scan.coherence = u.Param()
+# Prepared data file path
+# If source was ``None`` or ``'file'``, data will be loaded from this file and processing as
+# well as saving is deactivated. If source is the name of an experiment recipe or path to a
+# file, data will be saved to this file
+p.scandata.SimScan.dfile = None
-## (116) Number of probe modes
-p.scan.coherence.num_probe_modes = 1
-
-## (117) Number of object modes
-p.scan.coherence.num_object_modes = 1
-
-## (121) Param container for instances of `scan` parameters
-# If not specified otherwise, entries in *scans* will use parameter defaults from :py:data:`.scan`
-#
-p.scans = u.Param()
+# Determine if center in data is calculated automatically
+# - ``False``, no automatic centering
+# - ``None``, only if :py:data:`center` is ``None``
+# - ``True``, it will be enforced
+p.scandata.SimScan.auto_center = None
-## (122) Default first scans entry
-# If only a single scan is used in the reconstruction, this entry may be left unchanged.
-# If more than one scan is used, please make an entry for each scan. The name *scan_00* is
-# an arbitrary choice and may be set to any other string.
-p.scans.scan_00 = None
+# Detector pixel size
+# Dimensions of the detector pixels (in meters)
+p.scandata.SimScan.psize = 0.000172
-## (123) Reconstruction engine parameters
-p.engine = u.Param()
+# Sample to detector distance
+# In meters.
+p.scandata.SimScan.distance = 7.19
-## (124) Parameters common to all engines
-p.engine.common = u.Param()
+# Photon energy of the incident radiation in keV
+#
+p.scandata.SimScan.energy = 7.2
-## (125) Name of engine.
-# Dependent on the name given here, the default parameter set will be a superset of `common`
-# and parameters to the entry of the same name.
-p.engine.common.name = "DM"
+# Aperture width or diameter
+# May also be a tuple *(vertical,horizontal)* in case of an asymmetric aperture
+p.scan.Full.illumination.aperture.size = None
-## (126) Total number of iterations
-p.engine.common.numiter = 2000
+# Type of illumination model
+# One of:
+# - ``None`` : model initialitziation defaults to flat array filled with the specified number of photons
+# - ``'recon'`` : load model from previous reconstruction, see `recon` Parameters
+# - ``'stxm'`` : Estimate model from autocorrelation of mean diffraction data
+# - ** : one of ptypys internal image resource strings
+# - ** : one of the templates inillumination module
+# In script, you may pass a numpy.ndarray here directly as the model. It is considered as incoming wavefront and will be propagated according to `propagation` with an optional `aperture` applied before.
+p.scan.Full.illumination.model = None
+
+# Propagation distance from aperture to focus
+# If ``None`` or ``0`` : No focus propagation
+p.scan.Full.illumination.propagation.focussed = None
-## (128) Fraction of valid probe area (circular) in probe frame
-p.engine.common.probe_support = 0.7
+# Parallel propagation distance
+# If ``None`` or ``0`` : No parallel propagation
+p.scan.Full.illumination.propagation.parallel = None
-## (130) Parameters for Difference map engine
-p.engine.DM = u.Param()
+# Path to a ``.ptyr`` compatible file
+#
+p.scan.Full.illumination.recon.rfile = '\*.ptyr'
-## (140) Maximum Likelihood parameters
-p.engine.ML = u.Param()
+# Type of initial object model
+# One of:
+# - ``None`` : model initialitziation defaults to flat array filled `fill`
+# - ``'recon'`` : load model from STXM analysis of diffraction data
+# - ``'stxm'`` : Estimate model from autocorrelation of mean diffraction data
+# - ** : one of ptypys internal model resource strings
+# - ** : one of the templates in sample module
+# In script, you may pass a numpy.array here directly as the model. This array will be
+# processed according to `process` in order to *simulate* a sample from e.g. a thickness
+# profile.
+p.scan.Full.sample.model = None
+
+# Default fill value
+#
+p.scan.Full.sample.fill = 1
+
+# Parameters to load from previous reconstruction
+#
+p.scan.Full.sample.recon = u.Param()
+
+# Path to a ``.ptyr`` compatible file
+#
+p.scan.Full.sample.recon.rfile = '\*.ptyr'
+
+# Model processing parameters
+# Can be ``None``, i.e. no processing
+p.scan.Full.sample.process = u.Param()
-## (143) A rescaling of the intensity so they can be interpreted as Poisson counts.
-p.engine.ML.intensity_renormalization = 1
+# Probe mode(s) diversity parameters
+# Can be ``None`` i.e. no diversity
+p.scan.Full.sample.diversity = u.Param()
-## (144) Whether to use a Gaussian prior (smoothing) regularizer.
-p.engine.ML.reg_del2 = True
+# Coherence parameters
+#
+p.scan.Full.coherence = u.Param()
-## (145) Amplitude of the Gaussian prior if used.
-p.engine.ML.reg_del2_amplitude = 0.01
+# Number of probe modes
+#
+p.scan.Full.coherence.num_probe_modes = 1
-## (150) Container for instances of "engine" parameters
-# All engines registered in this structure will be executed sequentially.
-p.engines = u.Param()
+# Number of object modes
+#
+p.scan.Full.coherence.num_object_modes = 1
-## (151) Default first engines entry
-# Default first engine is difference map (DM)
-p.engines.engine_00 = None
+### Reconstruction ###
Ptycho(p,level=5)
diff --git a/templates/probe_sharing.py b/templates/probe_sharing.py
index 267e81677..17b93101d 100644
--- a/templates/probe_sharing.py
+++ b/templates/probe_sharing.py
@@ -43,50 +43,60 @@ def make_sample(outpath):
p.verbose_level = 3
p.io = u.Param()
p.io.home = "/tmp/ptypy/"
-p.autosave = None
+p.io.autosave = None
p.scans = u.Param()
+
p.scans.MF_01 = u.Param()
+p.scans.MF_01.name= "Vanilla"
p.scans.MF_01.data= u.Param()
+p.scans.MF_01.data.name = 'PtydScan'
p.scans.MF_01.data.label = 'MF_01'
p.scans.MF_01.data.source ='file'
p.scans.MF_01.data.dfile = tmp % 'scan1.ptyd'
p.scans.MF_02 = u.Param()
+p.scans.MF_02.name= "Vanilla"
p.scans.MF_02.data= u.Param()
+p.scans.MF_02.data.name = 'PtydScan'
p.scans.MF_02.data.label = 'MF_02'
p.scans.MF_02.data.source = 'file'
p.scans.MF_02.data.dfile = tmp % 'scan2.ptyd'
-p.scans.MF_02.sharing = u.Param()
-p.scans.MF_02.sharing.probe_share_with = 'MF_01'
-
-p.engine = u.Param()
-p.engine.common = u.Param()
-p.engine.common.numiter = 100
-p.engine.common.numiter_contiguous = 1
-p.engine.common.probe_support = None
-p.engine.common.probe_update_start = 2
-p.engine.common.clip_object = None # [0,1]
-
-p.engine.DM = u.Param()
-p.engine.DM.name = "DM"
-p.engine.DM.alpha = 1
-p.engine.DM.probe_inertia = 0.01
-p.engine.DM.object_inertia = 0.1
-p.engine.DM.update_object_first = True
-p.engine.DM.obj_smooth_std = 10
-p.engine.DM.overlap_converge_factor = 0.5
-p.engine.DM.overlap_max_iterations = 100
-p.engine.DM.fourier_relax_factor = 0.05
-
-p.engines = u.Param()
-p.engines.engine00 = u.Param()
-p.engines.engine00.name = 'DM'
-p.engines.engine00.numiter = 30
-P = Ptycho(p,level=5)
-
+p.engines = u.Param()
+p.engines.DM = u.Param()
+p.engines.DM.name = "DM"
+p.engines.DM.alpha = 1
+p.engines.DM.probe_inertia = 0.01
+p.engines.DM.object_inertia = 0.1
+p.engines.DM.update_object_first = True
+p.engines.DM.obj_smooth_std = 10
+p.engines.DM.overlap_converge_factor = 0.5
+p.engines.DM.overlap_max_iterations = 5
+p.engines.DM.fourier_relax_factor = 0.05
+p.engines.DM.numiter = 60
+
+
+
+P = Ptycho(p,level=3)
+
+s1, s2 = P.probe.storages.values()
+# Transfer views
+for v in s2.views:
+ v.storage = s1
+ v.storageID = s1.ID
+P.probe.reformat()
+
+# Unforunately we need to delete the storage here due to DM being unable
+# to ignore unused storages. This is due to the /=nrm division in the
+# probe update
+P.probe.storages.pop(s2.ID)
+
+P.print_stats()
+P.init_engine()
+P.run()
+P.finalize()
diff --git a/templates/ptypy_cSAXS_PseudoBone_6p2keV.py b/templates/ptypy_cSAXS_PseudoBone_6p2keV.py
deleted file mode 100644
index c4a5c6ca8..000000000
--- a/templates/ptypy_cSAXS_PseudoBone_6p2keV.py
+++ /dev/null
@@ -1,139 +0,0 @@
-import ptypy
-from ptypy.core import Ptycho
-from ptypy import utils as u
-import ptypy.simulations as sim
-import numpy as np
-p = u.Param()
-
-### PTYCHO PARAMETERS
-p.verbose_level = 3
-p.data_type = "single"
-p.run = None
-p.io = u.Param()
-p.io.home = "/tmp/ptypy/"
-p.io.run = None
-p.io.autosave = None
-p.io.autoplot = False
-
-p.scan = u.Param()
-p.scan.geometry = u.Param()
-p.scan.geometry.energy = 6.2
-p.scan.geometry.lam = None
-p.scan.geometry.distance = 7
-p.scan.geometry.psize = 172e-6
-p.scan.geometry.shape = 256
-p.scan.geometry.propagation = "farfield"
-
-p.scan.illumination = u.Param()
-p.scan.illumination.model = None
-p.scan.illumination.aperture = u.Param()
-p.scan.illumination.propagation = None
-p.scan.illumination.diversity = None
-
-p.scan.sample = u.Param()
-p.scan.sample.model = 'stxm'
-p.scan.sample.process = None
-p.scan.sample.diversity = None
-
-p.scan.coherence = u.Param()
-p.scan.coherence.Nprobe_modes = 1
-p.scan.coherence.Nobject_modes = 1
-p.scan.coherence.energies = [1.0]
-
-p.scan.sharing = u.Param()
-p.scan.sharing.object_shared_with = None
-p.scan.sharing.object_share_power = 1
-p.scan.sharing.probe_shared_with = None
-p.scan.sharing.probe_share_power = 1
-
-sim = u.Param()
-sim.xy = u.Param()
-sim.xy.model = "round"
-sim.xy.spacing = 1e-6
-sim.xy.steps = 10
-sim.xy.extent = 10e-6
-
-sim.illumination = u.Param()
-sim.illumination.model = None
-sim.illumination.photons = 1e8
-sim.illumination.aperture = u.Param()
-sim.illumination.aperture.diffuser = None
-sim.illumination.aperture.form = "circ"
-sim.illumination.aperture.size = 2.5e-6
-sim.illumination.aperture.edge = 1
-sim.illumination.aperture.central_stop = None
-sim.illumination.propagation = u.Param()
-sim.illumination.propagation.focussed = None
-sim.illumination.propagation.parallel = 0.004
-sim.illumination.propagation.spot_size = None
-
-sim.sample = u.Param()
-#sim.sample.model = 255-u.imload('../resources/tree.bmp').astype(float).mean(-1)
-from ptypy.resources import tree_obj
-sim.sample.model = tree_obj()
-sim.sample.process = u.Param()
-sim.sample.process.offset = (100,400)
-sim.sample.process.zoom = 1.0
-sim.sample.process.formula = "Ca"
-sim.sample.process.density = 1.5
-sim.sample.process.thickness = 20e-6
-sim.sample.process.ref_index = None
-sim.sample.process.smoothing = None
-sim.sample.fill = 1.0+0.j
-
-sim.verbose_level = 1
-sim.plot = False
-
-p.scans = u.Param()
-p.scans.CircSim = u.Param()
-p.scans.CircSim.data=u.Param()
-p.scans.CircSim.data.source = 'sim'
-p.scans.CircSim.data.recipe = sim.copy(depth=4)
-p.scans.CircSim.data.recipe.illumination.aperture.form = "circ"
-p.scans.CircSim.data.save = None
-
-p.scans.RectSim = u.Param()
-p.scans.RectSim.sharing= u.Param(object_share_with = 'CircSim')
-p.scans.RectSim.data=u.Param()
-p.scans.RectSim.data.source = 'sim'
-p.scans.RectSim.data.recipe = sim.copy(depth=4)
-p.scans.RectSim.data.recipe.illumination.aperture.form = "rect"
-p.scans.RectSim.data.save = None
-
-
-p.engine = u.Param()
-p.engine.common = u.Param()
-p.engine.common.numiter = 100
-p.engine.common.numiter_contiguous = 1
-p.engine.common.probe_support = 0.7
-p.engine.common.probe_inertia = 0.01
-p.engine.common.object_inertia = 0.1
-p.engine.common.obj_smooth_std = 20
-p.engine.common.clip_object = None
-p.engine.common.probe_update_start = 2
-
-p.engine.DM = u.Param()
-p.engine.DM.name = "DM"
-p.engine.DM.alpha = 1
-p.engine.DM.update_object_first = True
-p.engine.DM.overlap_converge_factor = 0.05
-p.engine.DM.overlap_max_iterations = 100
-p.engine.DM.fourier_relax_factor = 0.1
-
-p.engine.ML = u.Param()
-
-p.engines = u.Param()
-p.engines.engine00 = u.Param()
-p.engines.engine00.name = 'DM'
-p.engines.engine00.numiter = 50
-p.engines.engine00.fourier_relax_factor = 0.05
-p.engines.engine01 = u.Param()
-p.engines.engine01.name = 'ML'
-p.engines.engine01.numiter = 100
-
-
-
-u.verbose.set_level(3)
-P = Ptycho(p,level=5)
-
-
diff --git a/templates/ptypy_i13_AuStar_nearfield_9p7keV.py b/templates/ptypy_i13_AuStar_nearfield_9p7keV.py
index 7690d5f3a..cf2ad546f 100644
--- a/templates/ptypy_i13_AuStar_nearfield_9p7keV.py
+++ b/templates/ptypy_i13_AuStar_nearfield_9p7keV.py
@@ -3,33 +3,26 @@
from ptypy.core import Ptycho
from ptypy import utils as u
import numpy as np
-p = u.Param()
### PTYCHO PARAMETERS
+p = u.Param()
p.verbose_level = 3
+p.run = None
p.data_type = "single"
p.run = None
p.io = u.Param()
p.io.home = "/tmp/ptypy/"
-p.io.run = None
p.io.autoplot = u.Param()
p.io.autoplot.layout ='nearfield'
-p.scan = u.Param()
-p.scan.source = None
-p.scan.geometry = u.Param()
-p.scan.geometry.energy = 9.7
-p.scan.geometry.lam = None
-p.scan.geometry.distance = 8.46e-2
-p.scan.geometry.psize = 100e-9
-p.scan.geometry.shape = 1024
-p.scan.geometry.propagation = "nearfield"
-
-p.scan.geometry.precedence = 'meta'
-
+# Simulation parameters
sim = u.Param()
+sim.energy = 9.7
+sim.distance = 8.46e-2
+sim.psize = 100e-9
+sim.shape = 1024
sim.xy = u.Param()
sim.xy.override = u.parallel.MPIrand_uniform(0.0,10e-6,(20,2))
#sim.xy.positions = np.random.normal(0.0,3e-6,(20,2))
@@ -63,64 +56,50 @@
sim.detector = 'GenericCCD32bit'
sim.plot = False
-p.scan.update(sim.copy(depth=4))
-p.scan.coherence = u.Param()
-p.scan.coherence.Nprobe_modes = 1
-p.scan.coherence.Nobject_modes = 1
-p.scan.coherence.energies = [1.0]
-
-# p.scan.sharing = u.Param()
-# p.scan.sharing.EP_sharing = False
-# p.scan.sharing.object_shared_with = None
-# p.scan.sharing.object_share_power = 1
-# p.scan.sharing.probe_shared_with = None
-# p.scan.sharing.probe_share_power = 1
-
-p.scan.sample = u.Param()
-#p.scan.sample.model = 'stxm'
-#p.scan.sample.process = None
-#p.scan.sample.diversity = None
-p.scan.xy = u.Param()
-p.scan.xy.model=None
-#p.scan.illumination = sim.illumination.copy()
-p.scan.illumination.model = 'stxm'
-
+# Scan model and initial value parameters
p.scans = u.Param()
-p.scans.sim = u.Param()
-p.scans.sim.data=u.Param()
-p.scans.sim.data.source = 'sim'
-p.scans.sim.data.recipe = sim.copy(depth=4)
-p.scans.sim.data.save = None #'append'
-p.scans.sim.data.shape = None
-p.scans.sim.data.num_frames = None
-
-p.engine = u.Param()
-p.engine.common = u.Param()
-p.engine.common.numiter = 100
-p.engine.common.numiter_contiguous = 1
-p.engine.common.probe_support = None
-p.engine.common.probe_inertia = 0.001
-p.engine.common.object_inertia = 0.1
-p.engine.common.obj_smooth_std = 10
-p.engine.common.clip_object = None
-
-p.engine.DM = u.Param()
-p.engine.DM.name = "DM"
-p.engine.DM.alpha = 1
-p.engine.DM.probe_update_start = 2
-p.engine.DM.update_object_first = True
-p.engine.DM.overlap_converge_factor = 0.5
-p.engine.DM.overlap_max_iterations = 100
-p.engine.DM.fourier_relax_factor = 0.05
-
-p.engine.ML = u.Param()
-
+p.scans.scan00 = u.Param()
+p.scans.scan00.name = 'Full'
+
+p.scans.scan00.coherence = u.Param()
+p.scans.scan00.coherence.num_probe_modes = 1
+p.scans.scan00.coherence.num_object_modes = 1
+p.scans.scan00.coherence.energies = [1.0]
+
+p.scans.scan00.sample = u.Param()
+
+# (copy simulation illumination and modify some things)
+p.scans.scan00.illumination = sim.illumination.copy(99)
+p.scans.scan00.illumination.aperture.size = 105e-6
+p.scans.scan00.illumination.aperture.central_stop = None
+
+# Scan data (simulation) parameters
+p.scans.scan00.data=u.Param()
+p.scans.scan00.data.name = 'SimScan'
+p.scans.scan00.data.propagation = 'nearfield'
+p.scans.scan00.data.save = None #'append'
+p.scans.scan00.data.shape = None
+p.scans.scan00.data.num_frames = None
+p.scans.scan00.data.update(sim)
+
+# Reconstruction parameters
p.engines = u.Param()
p.engines.engine00 = u.Param()
p.engines.engine00.name = 'DM'
p.engines.engine00.numiter = 100
p.engines.engine00.object_inertia = 1.
-p.engines.engine00.fourier_relax_factor = 0.1
+p.engines.engine00.numiter_contiguous = 1
+p.engines.engine00.probe_support = None
+p.engines.engine00.probe_inertia = 0.001
+p.engines.engine00.obj_smooth_std = 10
+p.engines.engine00.clip_object = None
+p.engines.engine00.alpha = 1
+p.engines.engine00.probe_update_start = 2
+p.engines.engine00.update_object_first = True
+p.engines.engine00.overlap_converge_factor = 0.5
+p.engines.engine00.overlap_max_iterations = 100
+p.engines.engine00.fourier_relax_factor = 0.05
+
#p.engines.engine01 = u.Param()
#p.engines.engine01.name = 'ML'
#p.engines.engine01.numiter = 50
diff --git a/templates/ptypy_id16a_ffp_recons.py b/templates/ptypy_id16a_ffp_recons.py
deleted file mode 100644
index 21d9ab39a..000000000
--- a/templates/ptypy_id16a_ffp_recons.py
+++ /dev/null
@@ -1,86 +0,0 @@
-import ptypy
-from ptypy.core import Ptycho
-from ptypy import utils as u
-import numpy as np
-import sys
-
-"""
-Reconstruction script for already prepared far-field ptychography data taken at ID16A beamline - ESRF
-First version by B. Enders (12/05/2015)
-Modifications by J. C. da Silva (30/05/2015)
-"""
-
-try:
- filename = sys.argv[1]
-except IndexError:
- print('Missing arguments with .ptyd file')
- print('Usage: {} '.format(sys.argv[0]))
-
-p = u.Param()
-
-### PTYCHO PARAMETERS
-p.verbose_level = 3
-
-p.data_type = "single"
-
-p.io = u.Param()
-p.io.autoplot = u.Param()
-p.io.autoplot.interval = 1
-p.io.autoplot.layout = 'weak'
-p.io.autoplot.threaded = False
-p.io.autoplot.make_movie = False
-p.io.autoplot.dump = True
-
-p.autosave = None
-p.paths = u.Param()
-
-p.model = u.Param()
-
-p.scan = u.Param()
-p.scans = u.Param()
-
-# Scan entry for the Siemensstar
-p.scans.sstar = u.Param()
-p.scans.sstar.if_conflict_use_meta = True
-
-p.scans.sstar.data= u.Param()
-p.scans.sstar.data.source = filename
-p.scans.sstar.data.dfile = filename
-
-p.scans.sstar.illumination = u.Param()
-p.scans.sstar.illumination.aperture = u.Param()
-p.scans.sstar.illumination.aperture.form = 'rect'
-p.scans.sstar.illumination.aperture.size = 60e-6
-p.scans.sstar.illumination.propagation=u.Param()
-p.scans.sstar.illumination.propagation.focussed = 0.1
-p.scans.sstar.illumination.propagation.parallel = 2e-3
-p.scans.sstar.illumination.diversity = u.Param()
-p.scans.sstar.illumination.diversity.power = 0.1
-p.scans.sstar.illumination.diversity.noise = (1,2)
-
-p.scans.sstar.sample= u.Param()
-
-p.scans.sstar.coherence = u.Param()
-p.scans.sstar.coherence.num_probe_modes = 3
-
-p.engines = u.Param()
-p.engines.engine00 = u.Param()
-p.engines.engine00.name = 'DM'
-p.engines.engine00.numiter =200
-p.engines.engine00.fourier_relax_factor = 0.02
-p.engines.engine00.overlap_converge_factor = .05
-p.engines.engine00.overlap_max_iterations = 10
-p.engines.engine00.update_object_first = True
-p.engines.engine00.probe_update_start = 2
-p.engines.engine00.object_smooth_std = 5
-p.engines.engine00.probe_inertia = 0.01
-p.engines.engine00.object_inertia = 0.1
-p.engines.engine01 = u.Param()
-p.engines.engine01.name = 'ML'
-p.engines.engine01.numiter = 100
-
-p.engine = u.Param()
-p.engine.ML = u.Param()
-p.engine.ML.floating_intensities = True
-
-P = Ptycho(p,level=5)
diff --git a/templates/ptypy_id16a_nfp_recons.py b/templates/ptypy_id16a_nfp_recons.py
deleted file mode 100644
index 1ff49ee41..000000000
--- a/templates/ptypy_id16a_nfp_recons.py
+++ /dev/null
@@ -1,220 +0,0 @@
-"""
-Reconstruction script for ptypy version 0.2.0
-
--- Additional parameters can be manually added to the tree --
--- Please refer to documentation for further details --
-"""
-
-import numpy as np
-import ptypy
-from ptypy.core import Ptycho
-from ptypy import utils as u
-
-base_path = '/data/2016_07_ID16A_NFP/'
-
-#### ID16A Recipe #####
-
-r = u.Param()
-r.base_path = base_path
-r.scan_label = 'fibbed_chip_5nm_94d_17p_6px_singlenfp1'
-r.flat_division = False
-r.dark_subtraction = True
-r.date = '2016-08-02'
-r.use_h5 = False
-#r.mask_file = [1,2,3]
-#r.motors = ['spy', 'spz']
-
-
-#### Ptypy Parameter Tree ######################################################
-
-
-## General parameter container
-p = u.Param()
-
-p.verbose_level = 3 ## (00) Verbosity level
-p.data_type = "single" ## (01) Reconstruction floating number precision
-p.run = None ## (02) Reconstruction identifier
-p.dry_run = False ## (03) Dry run switch
-p.ipython_kernel = False ## New feature?
-
-
-## (04) Global parameters for I/O
-p.io = u.Param()
-
-p.io.home = "./" ## (05) Base directory for all I/O
-p.io.rfile = "recons/%(run)s/%(run)s_%(engine)s_%(iterations)04d.ptyr" ## (06) Reconstruction file name (or format string)
-
-
-## (07) Auto-save options
-p.io.autosave = u.Param()
-
-p.io.autosave.active = False ## (08) Activation switch
-p.io.autosave.interval = 100 ## (09) Auto-save interval
-p.io.autosave.rfile = "dumps/%(run)s/%(run)s_%(engine)s_%(iterations)04d.ptyr" ## (10) Auto-save file name (or format string)
-
-
-## (11) Server / Client parameters
-p.io.interaction = u.Param()
-
-p.io.interaction.active = True ## (12) Activation switch
-p.io.interaction.address = "tcp://127.0.0.1" ## (13) The address the server is listening to.
-p.io.interaction.port = 5560 ## (14) The port the server is listening to.
-p.io.interaction.connections = 10 ## (15) Number of concurrent connections on the server
-
-
-## (16) Plotting client parameters
-p.io.autoplot = u.Param()
-
-p.io.autoplot.imfile = "plots/%(run)s/%(run)s_%(engine)s_%(iterations)04d.png" ## (17) Plot images file name (or format string)
-p.io.autoplot.interval = 1 ## (18) Number of iterations between plot updates
-p.io.autoplot.threaded = False ## (19) Live plotting switch
-p.io.autoplot.layout = u.Param() ## (20) Options for default plotter or template name
-p.io.autoplot.dump = False ## (21) Switch to dump plots as image files
-p.io.autoplot.make_movie = False ## (22) Produce reconstruction movie after the reconstruction.
-
-
-## (23) Scan parameters
-p.scan = u.Param()
-
-## (51) Physical parameters
-p.scan.geometry = u.Param()
-
-p.scan.geometry.precedence = None ## (52) Where geometry parameters take precence over others
-p.scan.geometry.energy = 17.05 ## (53) Energy (in keV)
-p.scan.geometry.lam = None ## (54) Wavelength
-p.scan.geometry.distance = 0.002 ## (55) Distance from object to detector
-p.scan.geometry.psize = 10e-9 ## (56) Pixel size in Detector plane
-p.scan.geometry.resolution = None ## (57) Pixel size in Sample plane
-p.scan.geometry.propagation = "nearfield" ## (58) Propagation type
-
-
-## (67) Illumination model (probe)
-p.scan.illumination = u.Param()
-
-p.scan.illumination.model = None ## (68) Type of illumination model
-p.scan.illumination.recon = None ## (70) Parameters to load from previous reconstruction
-#p.scan.illumination.recon.rfile = "\*.ptyr" ## (71) Path to a ``.ptyr`` compatible file
-p.scan.illumination.stxm = None ## (74) Parameters to initialize illumination from diffraction data
-#p.scan.illumination.stxm.label = None ## (75) Scan label of diffraction that is to be used for probe estimate
-p.scan.illumination.aperture = u.Param() ## (76) Beam aperture parameters
-p.scan.illumination.aperture.form = 'rect' ## (77) One of None, 'rect' or 'circ'
-p.scan.illumination.aperture.size = 560e-6 ## (79) Aperture width or diameter
-#p.scan.illumination.propagation = u.Param() ## (83) Parameters for propagation after aperture plane
-#p.scan.illumination.propagation.parallel = 1.206 ## (84) Parallel propagation distance
-#p.scan.illumination.propagation.focussed = 0.343 ## (85) Propagation distance from aperture to focus
-p.scan.illumination.diversity = None ## (88) Probe mode(s) diversity parameters
-
-
-## (92) Initial object modelization parameters
-p.scan.sample = u.Param()
-
-p.scan.sample.model = None ## (93) Type of initial object model
-p.scan.sample.fill = 1 ## (94) Default fill value
-p.scan.sample.recon = None ## (95) Parameters to load from previous reconstruction
-#p.scan.sample.process = u.Param() ## (101) Model processing parameters
-#p.scan.sample.process.offset = (0,0) ## (102) Offset between center of object array and scan pattern
-#p.scan.sample.process.zoom = 1.0 ## (103) Zoom value for object simulation
-#p.scan.sample.process.formula = None ## (104) Chemical formula
-#p.scan.sample.process.density = 19.3 ## (105) Density in [g/ccm]
-#p.scan.sample.process.thickness = 2000e-9 ## (106) Maximum thickness of sample
-#p.scan.sample.process.ref_index = None ## (107) Assigned refractive index
-#p.scan.sample.process.smoothing = None ## (108) Smoothing scale
-#p.scan.sample.diversity = u.Param() ## (109) Probe mode(s) diversity parameters
-#p.scan.sample.diversity.noise = (None, 10) ## (110) Noise in the generated modes of the illumination
-
-## (113) Coherence parameters
-p.scan.coherence = u.Param()
-
-p.scan.coherence.num_probe_modes = 1 ## (114) Number of probe modes
-p.scan.coherence.num_object_modes = 1 ## (115) Number of object modes
-
-
-## (119) Param container for instances of `scan` parameters
-p.scans = u.Param()
-p.scans.ID16A = u.Param()
-
-p.scans.ID16A.if_conflict_use_meta = True ## (25) Give priority to metadata relative to input parameters
-
-
-## (26) Data preparation parameters
-p.scans.ID16A.data = u.Param()
-
-p.scans.ID16A.data.recipe = r ## (27) Data preparation recipe container
-p.scans.ID16A.data.source = 'id16a_nfp' ## (28) Describes where to get the data from.
-p.scans.ID16A.data.dfile = 'prepdata/fibbed_chip.ptyd' ## (29) Prepared data file path
-p.scans.ID16A.data.shape = 2048 ## (31) Shape of the region of interest cropped from the raw data
-p.scans.ID16A.data.save = 'append' ## (32) Saving mode
-#p.scans.ID16A.data.center = (1280,1080) ## (33) Center (pixel) of the optical axes in raw data
-p.scans.ID16A.data.psize = 5e-9 ## (34) Detector pixel size before rebinning
-p.scans.ID16A.data.distance = 0.002 ## (35) Sample-to-detector distance
-p.scans.ID16A.data.rebin = 2 ## (36) Rebinning factor
-p.scans.ID16A.data.orientation = 6 ## (37) Data frame orientation
-p.scans.ID16A.data.energy = 17.05 ## (38) Photon energy of the incident radiation
-p.scans.ID16A.data.load_parallel = "data" ## (43) Determines what will be loaded in parallel
-
-## (46) Scan sharing parameters
-p.scans.ID16A.sharing = u.Param()
-
-p.scans.ID16A.sharing.object_share_with = None ## (47) Label or index of scan to share object with
-p.scans.ID16A.sharing.object_share_power = 1 ## (48) Relative power for object sharing
-p.scans.ID16A.sharing.probe_share_with = None ## (49) Label or index of scan to share probe with
-p.scans.ID16A.sharing.probe_share_power = 1 ## (50) Relative power for probe sharing
-
-
-## (121) Reconstruction engine parameters
-p.engine = u.Param()
-
-
-## (122) Parameters common to all engines
-p.engine.common = u.Param()
-
-p.engine.common.name = "DM" ## (123) Name of engine.
-p.engine.common.numiter = 500 ## (124) Total number of iterations
-p.engine.common.numiter_contiguous = 1 ## (125) Number of iterations without interruption
-p.engine.common.probe_support = None ## (126) Fraction of valid probe area (circular) in probe frame
-p.engine.common.clip_object = None ## (127) Clip object amplitude into this intrervall
-
-
-## (128) Parameters for Difference map engine
-p.engine.DM = u.Param()
-
-p.engine.DM.alpha = 1 ## (129) Difference map parameter
-p.engine.DM.probe_update_start = 1 ## (130) Number of iterations before probe update starts
-p.engine.DM.update_object_first = True ## (131) If False update object before probe
-p.engine.DM.overlap_converge_factor = 0.05 ## (132) Threshold for interruption of the inner overlap loop
-p.engine.DM.overlap_max_iterations = 100 ## (133) Maximum of iterations for the overlap constraint inner loop
-p.engine.DM.probe_inertia = 0.001 ## (134) Weight of the current probe estimate in the update
-p.engine.DM.object_inertia = 0.1 ## (135) Weight of the current object in the update
-p.engine.DM.fourier_relax_factor = 0.05 ## (136) If rms error of model vs diffraction data is smaller than this fraction, Fourier constraint is met
-p.engine.DM.obj_smooth_std = 20 ## (137) Gaussian smoothing (pixel) of the current object prior to update
-
-
-## (138) Maximum Likelihood parameters
-p.engine.ML = u.Param()
-
-p.engine.ML.type = "gaussian" ## (139) Likelihood model. One of 'gaussian', 'poisson' or 'euclid'
-p.engine.ML.floating_intensities = False ## (140) If True, allow for adaptative rescaling of the diffraction pattern intensities (to correct for incident beam intensity fluctuations).
-#p.engine.ML.intensity_renormalization = 1 ## (141) A rescaling of the intensity so they can be interpreted as Poisson counts.
-#p.engine.ML.reg_del2 = True ## (142) Whether to use a Gaussian prior (smoothing) regularizer.
-#p.engine.ML.reg_del2_amplitude = 0.01 ## (143) Amplitude of the Gaussian prior if used.
-#p.engine.ML.smooth_gradient = 0 ## (144) Smoothing preconditioner. If 0, not used, if > 0 gaussian filter if < 0 Hann window.
-#p.engine.ML.scale_precond = False ## (145) Whether to use the object/probe scaling preconditioner.
-#p.engine.ML.scale_probe_object = 1. ## (146) Relative scale of probe to object.
-#p.engine.ML.probe_update_start = 0 ## (147) Number of iterations before probe update starts
-
-
-## (148) Container for instances of "engine" parameters
-p.engines = u.Param()
-
-## (149) First engines entry
-p.engines.engine_00 = u.Param()
-p.engines.engine_00.name = 'DM'
-p.engines.engine_00.numiter = 500
-p.engines.engine_01 = u.Param()
-p.engines.engine_01.name = 'ML'
-p.engines.engine_01.numiter = 1
-
-P = Ptycho(p, level=4)
-
-P.run()
-P.finalize()
diff --git a/templates/ptypy_id22ni_AuStar_focussed_17keV.py b/templates/ptypy_id22ni_AuStar_focussed_17keV.py
index b45582d03..c09071fd3 100644
--- a/templates/ptypy_id22ni_AuStar_focussed_17keV.py
+++ b/templates/ptypy_id22ni_AuStar_focussed_17keV.py
@@ -7,19 +7,22 @@
### PTYCHO PARAMETERS
p.verbose_level = 3
-p.interaction = u.Param()
-
p.data_type = "single"
p.run = None
p.io = u.Param()
p.io.home = "/tmp/ptypy/"
-p.io.run = None
p.io.autoplot = u.Param()
p.io.autoplot.layout='weak'
p.io.autosave = None
+p.io.interaction = u.Param()
+# Simulation parameters
sim = u.Param()
+sim.energy = 17.0
+sim.distance = 2.886
+sim.psize = 51e-6
+sim.shape = 256
sim.xy = u.Param()
sim.xy.model = "round"
sim.xy.spacing = 250e-9
@@ -54,74 +57,54 @@
#sim.detector = 'FRELON_TAPER'
sim.detector = 'GenericCCD32bit'
sim.verbose_level = 1
-sim.coherence = u.Param()
-sim.coherence.num_probe_modes = 1
-sim.psf = 1.
+sim.psf = 1. # emulates partial coherence
sim.plot = False
-p.scan = sim.copy(depth=4)
-
-p.scan.geometry = u.Param()
-p.scan.geometry.energy = 17.0
-p.scan.geometry.lam = None
-p.scan.geometry.distance = 2.886
-p.scan.geometry.psize = 51e-6
-p.scan.geometry.shape = 256
-p.scan.geometry.propagation = "farfield"
-
-
-p.scan.coherence = u.Param()
-p.scan.coherence.num_probe_modes = 4
-p.scan.coherence.num_object_modes = 1
-p.scan.coherence.energies = [1.0]
-
-
-p.scan.sample.model = 'stxm'
-p.scan.sample.process = None
-p.scan.illumination.aperture.form = 'circ'
-p.scan.illumination.propagation.focussed = 0.06
-p.scan.illumination.diversity = u.Param()
-p.scan.illumination.diversity.power = 0.1
-p.scan.illumination.diversity.noise = (np.pi,3.0)
-
+# Scan model and initial value parameters
p.scans = u.Param()
-p.scans.sim = u.Param()
-p.scans.sim.data=u.Param()
-p.scans.sim.data.source = 'sim'
-p.scans.sim.data.recipe = sim
-p.scans.sim.data.save = None
-
-
-p.engine = u.Param()
-p.engine.common = u.Param()
-p.engine.common.numiter = 100
-p.engine.common.numiter_contiguous = 1
-p.engine.common.probe_support = 0.7
-p.engine.common.probe_inertia = 0.01
-p.engine.common.object_inertia = 0.1
-p.engine.common.clip_object = [0,1.]
-
-p.engine.DM = u.Param()
-p.engine.DM.name = "DM"
-p.engine.DM.alpha = 1
-p.engine.DM.probe_update_start = 2
-p.engine.DM.update_object_first = True
-p.engine.DM.overlap_converge_factor = 0.05
-p.engine.DM.overlap_max_iterations = 100
-p.engine.DM.fourier_relax_factor = 0.1
-p.engine.DM.obj_smooth_std = 5
-
-p.engine.ML = u.Param()
-
+p.scans.scan00 = u.Param()
+p.scans.scan00.name = 'Full'
+
+p.scans.scan00.coherence = u.Param()
+p.scans.scan00.coherence.num_probe_modes = 4
+p.scans.scan00.coherence.num_object_modes = 1
+p.scans.scan00.coherence.energies = [1.0]
+
+p.scans.scan00.sample = u.Param()
+p.scans.scan00.sample.model = 'stxm'
+p.scans.scan00.sample.process = None
+
+# (copy the simulation illumination and change specific things)
+p.scans.scan00.illumination = sim.illumination.copy(99)
+p.scans.scan00.illumination.aperture.form = 'circ'
+p.scans.scan00.illumination.propagation.focussed = 0.06
+p.scans.scan00.illumination.diversity = u.Param()
+p.scans.scan00.illumination.diversity.power = 0.1
+p.scans.scan00.illumination.diversity.noise = (np.pi,3.0)
+
+# Scan data (simulation) parameters
+p.scans.scan00.data = u.Param()
+p.scans.scan00.data.name = 'SimScan'
+p.scans.scan00.data.update(sim)
+p.scans.scan00.data.save = None
+
+# Reconstruction parameters
p.engines = u.Param()
p.engines.engine00 = u.Param()
p.engines.engine00.name = 'DM'
p.engines.engine00.numiter = 150
p.engines.engine00.fourier_relax_factor = 0.05
+p.engines.engine00.numiter_contiguous = 1
+p.engines.engine00.probe_support = 0.7
+p.engines.engine00.probe_inertia = 0.01
+p.engines.engine00.object_inertia = 0.1
+p.engines.engine00.clip_object = (0, 1.)
+p.engines.engine00.alpha = 1
+p.engines.engine00.probe_update_start = 2
+p.engines.engine00.update_object_first = True
+p.engines.engine00.overlap_converge_factor = 0.05
+p.engines.engine00.overlap_max_iterations = 100
+p.engines.engine00.obj_smooth_std = 5
u.verbose.set_level(3)
P = Ptycho(p,level=5)
-
-
-
-
diff --git a/templates/ptypy_laser_logo_focussed_632nm.py b/templates/ptypy_laser_logo_focussed_632nm.py
index 726793402..fc2dd15b7 100644
--- a/templates/ptypy_laser_logo_focussed_632nm.py
+++ b/templates/ptypy_laser_logo_focussed_632nm.py
@@ -3,32 +3,25 @@
from ptypy import utils as u
import ptypy.simulations as sim
import numpy as np
-p = u.Param()
-
### PTYCHO PARAMETERS
+p = u.Param()
p.verbose_level = 3
p.data_type = "single"
p.run = None
p.io = u.Param()
p.io.home = "/tmp/ptypy/"
-p.io.run = None
p.io.autosave = None
p.io.autoplot = u.Param()
p.io.autoplot.layout='minimal'
-p.scan = u.Param()
-p.scan.geometry = u.Param()
-p.scan.geometry.energy = u.keV2m(1.0)/6.32e-7
-p.scan.geometry.lam = None
-p.scan.geometry.distance = 15e-2
-p.scan.geometry.psize = 24e-6
-p.scan.geometry.shape = 256
-p.scan.geometry.propagation = "farfield"
-
-
+# Simulation parameters
sim = u.Param()
+sim.energy = u.keV2m(1.0)/6.32e-7
+sim.distance = 15e-2
+sim.psize = 24e-6
+sim.shape = 256
sim.xy = u.Param()
sim.xy.model = "round"
sim.xy.spacing = 0.3e-3
@@ -37,7 +30,7 @@
sim.illumination = u.Param()
sim.illumination.model = None
-sim.illumination.photons = 1e9
+sim.illumination.photons = int(1e9)
sim.illumination.aperture = u.Param()
sim.illumination.aperture.diffuser = None#(0.7,3)
sim.illumination.aperture.form = "circ"
@@ -61,13 +54,12 @@
sim.sample.process.smoothing = None
sim.sample.fill = 1.0+0.j
sim.plot=False
-sim.detector = dict(dtype=np.uint32,full_well=2**32-1,psf=None)
+sim.detector = u.Param(dtype=np.uint32,full_well=2**32-1,psf=None)
+# Scan model and initial value parameters
p.scans = u.Param()
p.scans.ptypy = u.Param()
-p.scans.ptypy.data = u.Param()
-p.scans.ptypy.data.source = 'sim'
-p.scans.ptypy.data.recipe = sim.copy(depth=4)
+p.scans.ptypy.name = 'Full'
p.scans.ptypy.coherence = u.Param()
p.scans.ptypy.coherence.num_probe_modes=1
@@ -80,36 +72,17 @@
p.scans.ptypy.illumination.aperture.size = 1.0e-3
p.scans.ptypy.illumination.aperture.edge = 10
-p.engine = u.Param()
-p.engine.common = u.Param()
-p.engine.common.numiter = 100
-p.engine.common.numiter_contiguous = 1
-p.engine.common.probe_support = 0.9
-p.engine.common.probe_inertia = 0.01
-p.engine.common.object_inertia = 0.1
-p.engine.common.clip_object = None
-
-p.engine.DM = u.Param()
-p.engine.DM.name = "DM"
-p.engine.DM.alpha = 1
-p.engine.DM.probe_update_start = 2
-p.engine.DM.update_object_first = True
-p.engine.DM.overlap_converge_factor = 0.05
-p.engine.DM.overlap_max_iterations = 100
-p.engine.DM.fourier_relax_factor = 0.05
-p.engine.DM.obj_smooth_std = 5
-
-p.engine.ML = u.Param()
+# Scan data (simulation) parameters
+p.scans.ptypy.data = u.Param()
+p.scans.ptypy.data.name = 'SimScan'
+p.scans.ptypy.data.update(sim)
+# Reconstruction parameters
p.engines = u.Param()
p.engines.engine00 = u.Param()
p.engines.engine00.name = 'DM'
p.engines.engine00.numiter = 40
p.engines.engine00.fourier_relax_factor = 0.05
-
u.verbose.set_level(3)
P = Ptycho(p,level=5)
-
-
-
diff --git a/tutorial/bragg3d_initial.py b/tutorial/bragg3d_initial.py
new file mode 100644
index 000000000..5f480cd71
--- /dev/null
+++ b/tutorial/bragg3d_initial.py
@@ -0,0 +1,289 @@
+# This script is a demonstration of how the new container and geometry
+# classes can be used to do 3d Bragg ptychography. Everything is done
+# manually on the View and Geo_Bragg instances, no PtyScan objects or
+# model managers or pods are involved yet.
+
+# I've taken the sample and geometry from the numerical experiment in
+# Berenguer et al., PRB 2013. Unlike that example, the probe here is
+# just a flat rectangle.
+
+# The imports. I've found it useful to visualize 3d data in mayavi, but
+# here we just look at projections with matplotlib.
+import ptypy
+import numpy as np
+import matplotlib.pyplot as plt
+
+# Set up a 3D geometry and a scan
+# -------------------------------
+
+# The following geometry corresponds to an oversampling ratio of 1 along
+# the rocking curve, which means that the numerical field of view
+# tightly contains the exit wave.
+g = ptypy.core.geometry_bragg.Geo_Bragg(psize=(0.01/4, 13e-6, 13e-6), shape=(9*4, 128, 128), energy=8.5, distance=2.0, theta_bragg=22.32)
+
+# The Geo_Bragg object contains mostly the same things as Geo, but in
+# three dimensions. The first element of the shape is the number of
+# rocking curve positions, the first element of the psize denotes theta
+# step in degrees.
+print g
+
+# Set up scan positions along y, perpendicular to the incoming beam and
+# to the thin layer stripes.
+Npos = 11
+positions = np.zeros((Npos,3))
+positions[:, 2] = np.arange(Npos) - Npos/2.0
+positions *= .5e-6
+
+# Set up the object and its views
+# -------------------------------
+
+# Create a container for the object array, which will represent the
+# object in the non-orthogonal coordinate system conjugate to the
+# q-space measurement frame.
+C = ptypy.core.Container(data_type=np.complex128, data_dims=3)
+
+# For each scan position in the orthogonal coordinate system, find the
+# natural coordinates and create a View instance there.
+views = []
+for pos in positions:
+ pos_ = g._r3r1r2(pos)
+ views.append(ptypy.core.View(C, storageID='Sobj', psize=g.resolution, coord=pos_, shape=g.shape))
+S = C.storages['Sobj']
+C.reformat()
+
+# Define the test sample based on the orthogonal position of each voxel.
+# First, the cartesian grid is obtained from the geometry object, then
+# this grid is used as a condition for the sample's magnitude.
+xx, zz, yy = g.transformed_grid(S, input_space='real', input_system='natural')
+S.fill(0.0)
+S.data[(zz >= -90e-9) & (zz < 90e-9) & (yy >= 1e-6) & (yy < 2e-6) & (xx < 1e-6)] = 1
+S.data[(zz >= -90e-9) & (zz < 90e-9) & (yy >= -2e-6) & (yy < -1e-6)] = 1
+
+# Set up the probe and calculate diffraction patterns
+# ---------------------------------------------------
+
+# First set up a two-dimensional representation of the probe, with
+# arbitrary pixel spacing. The probe here is defined as a 1.5 um by 3 um
+# flat square, but this container will typically come from a 2d
+# transmission ptycho scan of an easy test object.
+Cprobe = ptypy.core.Container(data_dims=2, data_type='float')
+Sprobe = Cprobe.new_storage(psize=10e-9, shape=500)
+zi, yi = Sprobe.grids()
+# square probe
+Sprobe.data[(zi > -.75e-6) & (zi < .75e-6) & (yi > -1.5e-6) & (yi < 1.5e-6)] = 1
+# gaussian probe
+Sprobe.data = np.exp(-zi**2 / (2 * (.75e-6)**2) - yi**2 / (2 * (1.0e-6)**2))
+
+# The Bragg geometry has a method to prepare a 3d Storage by extruding
+# the 2d probe and interpolating to the right grid. The returned storage
+# contains a single view compatible with the object views.
+Sprobe_3d = g.prepare_3d_probe(Sprobe, system='natural')
+probeView = Sprobe_3d.views[0]
+
+# Calculate diffraction patterns by using the geometry's propagator.
+diff = []
+for v in views:
+ diff.append(np.abs(g.propagator.fw(v.data * probeView.data))**2)
+
+# Visualize a single field of view with probe and object
+# ------------------------------------------------------
+
+# A risk with 3d Bragg ptycho is that undersampling along theta leads to
+# a situation where the actual exit wave is not contained within the
+# field of view. This plot shows the situation for this combination of
+# object, probe, and geometry. Note that the thin sample is what makes
+# the current test experiment possible.
+
+# In order to visualize the field of view, we'll create a copy of the
+# object storage and set its value equal to 1 where covered by the first
+# view.
+S_display = S.copy(owner=C, ID='Sdisplay')
+S_display.fill(0.0)
+S_display[S.views[0]] = 1
+
+# Then, to see how the probe is contained by this field of view, we add
+# the probe and the object itself to the above view.
+S_display[S.views[0]] += probeView.data
+S_display.data += S.data * 2
+
+# To visualize how this looks in cartesian real space, make a shifted
+# (nearest-neighbor interpolated) copy of the object Storage.
+S_display_cart = g.coordinate_shift(S_display, input_system='natural', input_space='real', keep_dims=False)
+
+# Plot that
+fig, ax = plt.subplots(nrows=1, ncols=2)
+x, z, y = S_display_cart.grids()
+ax[0].imshow(np.mean(np.abs(S_display_cart.data[0]), axis=2).T, extent=[x.min(), x.max(), z.min(), z.max()], interpolation='none', origin='lower')
+plt.setp(ax[0], ylabel='z', xlabel='x', title='side view')
+ax[1].imshow(np.mean(np.abs(S_display_cart.data[0]), axis=1).T, extent=[x.min(), x.max(), y.min(), y.max()], interpolation='none', origin='lower')
+plt.setp(ax[1], ylabel='y', xlabel='x', title='top view')
+
+# Visualize the probe positions along the scan
+# --------------------------------------------
+
+# beam/sample overlap in non-orthogonal coordinates
+import matplotlib.gridspec as gridspec
+plt.figure()
+gs = gridspec.GridSpec(Npos, 2, width_ratios=[3,1], wspace=.0)
+ax, ax2 = [], []
+r3, r1, r2 = Sprobe_3d.grids()
+for i in range(len(S.views)):
+ # overlap
+ ax.append(plt.subplot(gs[i, 0]))
+ ax[-1].imshow(np.mean(np.abs(views[i].data + probeView.data), axis=1), vmin=0, vmax=.07, extent=[r2.min(), r2.max(), r3.min(), r3.max()])
+ plt.setp(ax[-1], xlabel='r2', ylabel='r3', xlim=[r2.min(), r2.max()], ylim=[r3.min(), r3.max()], yticks=[])
+ # diffraction
+ ax2.append(plt.subplot(gs[i, 1]))
+ ax2[-1].imshow(diff[i][18,:,:])
+ plt.setp(ax2[-1], ylabel='q1', xlabel='q2', xticks=[], yticks=[])
+plt.suptitle('Probe, sample, and slices of 3d diffraction peaks along the scan')
+plt.draw()
+
+# Reconstruct the numerical data
+# ------------------------------
+
+# Here I compare different algorithms and scaling options.
+algorithm = 'PIE'
+
+# Keep a copy of the object storage, and fill the actual one with an
+# initial guess (like zeros everywhere).
+S_true = S.copy(owner=C, ID='Strue')
+# zero everything
+S.fill(0.0)
+# unit magnitude, random phase:
+# S.data[:] = 1.0 * np.exp(1j * (2 * np.random.rand(*S.data.shape) - 1) * np.pi)
+# random magnitude, random phase
+# S.data[:] = np.random.rand(*S.data.shape) * np.exp(1j * (2 * np.random.rand(*S.data.shape) - 1) * np.pi)
+
+# Here's an implementation of the OS (preconditioned PIE) algorithm from
+# Pateras' thesis.
+if algorithm == 'OS':
+ alpha, beta = .1, 1.0
+ fig, ax = plt.subplots(ncols=3)
+ errors = []
+ criterion = []
+ # first calculate the weighting factor Lambda, here called scaling = 1/Lambda
+ scaling = S.copy(owner=C, ID='Sscaling')
+ scaling.fill(alpha)
+ for v in views:
+ scaling[v] += np.abs(probeView.data)**2
+ scaling.data[:] = 1 / scaling.data
+ # then iterate with the appropriate update rule
+ for i in range(100):
+ print i
+ criterion_ = 0.0
+ obj_error_ = 0.0
+ for j in range(len(views)):
+ prop = g.propagator.fw(views[j].data * probeView.data)
+ criterion_ += np.sum(np.sqrt(diff[j]) - np.abs(prop))**2
+ prop_ = np.sqrt(diff[j]) * np.exp(1j * np.angle(prop))
+ gradient = 2 * probeView.data * g.propagator.bw(prop - prop_)
+ views[j].data -= beta * gradient * scaling[views[j]]
+ errors.append(np.abs(S.data - S_true.data).sum())
+ criterion.append(criterion_)
+
+ if not (i % 5):
+ ax[0].clear()
+ ax[0].plot(errors/errors[0])
+ #ax[0].plot(criterion/criterion[0])
+ ax[1].clear()
+ S_cart = g.coordinate_shift(S, input_space='real', input_system='natural', keep_dims=False)
+ x, z, y = S_cart.grids()
+ ax[1].imshow(np.mean(np.abs(S_cart.data[0]), axis=1).T, extent=[x.min(), x.max(), y.min(), y.max()], interpolation='none', origin='lower')
+ plt.setp(ax[1], ylabel='y', xlabel='x', title='top view')
+ ax[2].clear()
+ ax[2].imshow(np.mean(np.abs(S_cart.data[0]), axis=2).T, extent=[x.min(), x.max(), z.min(), z.max()], interpolation='none', origin='lower')
+ plt.setp(ax[2], ylabel='z', xlabel='x', title='side view')
+ plt.draw()
+ plt.pause(.01)
+
+
+# Here's a PIE/cPIE implementation
+if algorithm == 'PIE':
+ beta = 1.0
+ eps = 1e-3
+ fig, ax = plt.subplots(ncols=3)
+ errors = []
+ ferrors = []
+ for i in range(10):
+ print i
+ ferrors_ = []
+ for j in range(len(views)):
+ exit_ = views[j].data * probeView.data
+ prop = g.propagator.fw(exit_)
+ ferrors_.append(np.abs(prop)**2 - diff[j])
+ prop[:] = np.sqrt(diff[j]) * np.exp(1j * np.angle(prop))
+ exit = g.propagator.bw(prop)
+ # ePIE scaling (Maiden2009)
+ #views[j].data += beta * np.conj(probeView.data) / (np.abs(probeView.data).max())**2 * (exit - exit_)
+ # PIE and cPIE scaling (Rodenburg2004 and Godard2011b)
+ views[j].data += beta * np.abs(probeView.data) / np.abs(probeView.data).max() * np.conj(probeView.data) / (np.abs(probeView.data)**2 + eps) * (exit - exit_)
+ errors.append(np.abs(S.data - S_true.data).sum())
+ ferrors.append(np.mean(ferrors_))
+
+ if not (i % 5):
+ ax[0].clear()
+ ax[0].plot(errors/errors[0])
+ ax[0].plot(ferrors/ferrors[0])
+ #ax[0].plot(criterion/criterion[0])
+ ax[1].clear()
+ S_cart = g.coordinate_shift(S, input_space='real', input_system='natural', keep_dims=False)
+ x, z, y = S_cart.grids()
+ ax[1].imshow(np.mean(np.abs(S_cart.data[0]), axis=1).T, extent=[x.min(), x.max(), y.min(), y.max()], interpolation='none', origin='lower')
+ plt.setp(ax[1], ylabel='y', xlabel='x', title='top view')
+ ax[2].clear()
+ ax[2].imshow(np.mean(np.abs(S_cart.data[0]), axis=2).T, extent=[x.min(), x.max(), z.min(), z.max()], interpolation='none', origin='lower')
+ plt.setp(ax[2], ylabel='z', xlabel='x', title='side view')
+ plt.draw()
+ plt.pause(.01)
+
+if algorithm == 'DM':
+ alpha = 1.0
+ fig, ax = plt.subplots(ncols=3)
+ errors = []
+ ferrors = []
+ # create initial exit waves
+ exitwaves = []
+ for j in range(len(views)):
+ exitwaves.append(views[j].data * probeView.data)
+ # we also need a constant normalization storage, which contains the
+ # denominator of the DM object update equation.
+ Snorm = S.copy(owner=C)
+ Snorm.fill(0.0)
+ for j in range(len(views)):
+ Snorm[views[j]] += np.abs(probeView.data)**2
+
+ # iterate
+ for i in range(100):
+ print i
+ ferrors_ = []
+ # fourier update, updates all the exit waves
+ for j in range(len(views)):
+ # in DM, you propagate the following linear combination
+ im = g.propagator.fw((1 + alpha) * probeView.data * views[j].data - alpha * exitwaves[j])
+ im = np.sqrt(diff[j]) * np.exp(1j * np.angle(im))
+ exitwaves[j][:] += g.propagator.bw(im) - views[j].data * probeView.data
+ # object update, now skipping the iteration because the probe is constant
+ S.fill(0.0)
+ for j in range(len(views)):
+ views[j].data += np.conj(probeView.data) * exitwaves[j]
+ S.data[:] /= Snorm.data + 1e-10
+ errors.append(np.abs(S.data - S_true.data).sum())
+ ferrors.append(np.mean(ferrors_))
+
+ if not (i % 5):
+ ax[0].clear()
+ ax[0].plot(errors/errors[0])
+ #ax[0].plot(criterion/criterion[0])
+ ax[1].clear()
+ S_cart = g.coordinate_shift(S, input_space='real', input_system='natural', keep_dims=False)
+ x, z, y = S_cart.grids()
+ ax[1].imshow(np.mean(np.abs(S_cart.data[0]), axis=1).T, extent=[x.min(), x.max(), y.min(), y.max()], interpolation='none', origin='lower')
+ plt.setp(ax[1], ylabel='y', xlabel='x', title='top view')
+ ax[2].clear()
+ ax[2].imshow(np.mean(np.abs(S_cart.data[0]), axis=2).T, extent=[x.min(), x.max(), z.min(), z.max()], interpolation='none', origin='lower')
+ plt.setp(ax[2], ylabel='z', xlabel='x', title='side view')
+ plt.draw()
+ plt.pause(.01)
+
+plt.show()
\ No newline at end of file
diff --git a/tutorial/minimal_script.py b/tutorial/minimal_script.py
index 014ec7186..94cfc33ca 100644
--- a/tutorial/minimal_script.py
+++ b/tutorial/minimal_script.py
@@ -24,7 +24,7 @@
# We set the verbosity to a high level, in order to have information on the
# reconstruction process printed to the terminal.
-# See :py:data:`.verbose_level`.
+# See :py:data:`~ptycho.verbose_level`.
p.verbose_level = 3
# We limit this reconstruction to single precision. The other choice is to
@@ -32,8 +32,8 @@
p.data_type = "single"
# We give this reconstruction the name ``'minimal'`` although it
-# will automatically choose one from the file name of the script if we put in ``None``.
-# (But then the tutorial may not work on your computer as the chosen
+# will automatically choose one from the file name of the script if we put in
+# ``None``. (But then the tutorial may not work on your computer as the chosen
# run name may differ from the one that this tutorial was created with)
p.run = 'minimal'
@@ -51,11 +51,15 @@
p.io.autosave.interval = 20
# In this tutorial we switch off the threaded plotting client.
-p.io.autoplot = False
+# (alternative one-liners would be `p['io.autoplot.active'] = False`
+# or `p.io['autoplot.active'] = False`)
+p.io.autoplot = u.Param()
+p.io.autoplot.active = False
# Since we do not want to plot anything, we don't need the
# interaction server either.
-p.io.interaction = False
+p.io.interaction = u.Param()
+p.io.interaction.active = False
# Now we have to insert actual parameters associated with a
# ptychographic scan.
@@ -74,20 +78,19 @@
# branch mentioned above.
# Obviously at least the ``data`` branch will differ from
# :py:data:`.scan.data`. In this tutorial we
-# create a new scan parameter branch ``MF`` where we only specify
-# the data branch and tell |ptypy| to use scan meta data when possible.
+# create a new scan parameter branch ``MF``.
p.scans = u.Param()
p.scans.MF = u.Param()
-p.scans.MF.if_conflict_use_meta = True
+p.scans.MF.name = 'Vanilla'
p.scans.MF.data = u.Param()
-# As data source we have choosen the *'test'* source.
+# As data source we have choosen the *'MoonFlowerScan'* test source.
# That will make |ptypy| use the internal
-# :py:class:`~ptypy.core.data.MoonFlowerScan` class.
+# :py:class:`~ptypy.core.data.MoonFlowerScan` class to generate data.
# This class is meant for testing, and it provides/simulates
# diffraction patterns without using the more complex generic
-# :any:`SimScan` class.
-p.scans.MF.data.source = 'test'
+# :py:class:`SimScan` class.
+p.scans.MF.data.name = 'MoonFlowerScan'
# We set the diffraction frame shape to a small value (128x128px) and
# limit the number af diffraction patterns at 100. The
diff --git a/tutorial/ownengine.py b/tutorial/ownengine.py
index 43d9a9b87..a363ed6f3 100644
--- a/tutorial/ownengine.py
+++ b/tutorial/ownengine.py
@@ -1,6 +1,6 @@
# In this tutorial, we want to provide the information
# needed to create an engine compatible with the state mixture
-# expansion of ptychogrpahy as described in Thibault et. al 2013 [Thi2013]_ .
+# expansion of ptychogrpahy as described in Thibault et. al 2013 [#modes]_ .
# First we import ptypy and the utility module
import ptypy
@@ -23,11 +23,12 @@
# Now, we need to create a set of scans that we wish to reconstruct
# in the reconstruction run. We will use a single scan that we call 'MF' and
# mark the data source as 'test' to use the |ptypy| internal
-# :any:`MoonFlowerScan`
+# :py:class:`MoonFlowerScan`
p.scans = u.Param()
p.scans.MF = u.Param()
+p.scans.MF.name = 'Full'
p.scans.MF.data = u.Param()
-p.scans.MF.data.source = 'test'
+p.scans.MF.data.name = 'MoonFlowerScan'
p.scans.MF.data.shape = 128
p.scans.MF.data.num_frames = 400
@@ -51,7 +52,7 @@
# Probe and object are not so exciting to look at for now. As default,
# probes are initialized with an aperture like support.
probe_storage = P.probe.storages.values()[0]
-fig = u.plot_storage(P.probe.S['S00G00'], 1)
+fig = u.plot_storage(P.probe.S.values()[0], 1)
fig.savefig('ownengine_%d.png' % fig.number, dpi=300)
# Plot of the starting guess for the probe.
@@ -80,7 +81,7 @@ def fourier_update(pods):
# Propagate the exit waves
for gamma, pod in pods.iteritems():
Dphi[gamma] = pod.fw(2*pod.probe*pod.object - pod.exit)
- Imodel += Dphi[gamma] * Dphi[gamma].conj()
+ Imodel += np.abs(Dphi[gamma] * Dphi[gamma].conj())
# Calculate common correction factor
factor = (1-mask) + mask * modulus / (np.sqrt(Imodel) + 1e-10)
# Apply correction and propagate back
@@ -152,14 +153,14 @@ def iterate(Ptycho, num):
# We note that the error (here only displayed for 3 iterations) is
# already declining. That is a good sign.
# Let us have a look how the probe has developed.
-fig = u.plot_storage(P.probe.S['S00G00'], 2)
+fig = u.plot_storage(P.probe.S.values()[0], 2)
fig.savefig('ownengine_%d.png' % fig.number, dpi=300)
# Plot of the reconstructed probe after 9 iterations. We observe that
# the actaul illumination of the sample must be larger than the initial
# guess.
# Looks like the probe is on a good way. How about the object?
-fig = u.plot_storage(P.obj.S['S00G00'], 3, slices='0,120:-120,120:-120')
+fig = u.plot_storage(P.obj.S.values()[0], 3, slices='0,120:-120,120:-120')
fig.savefig('ownengine_%d.png' % fig.number, dpi=300)
# Plot of the reconstructed object after 9 iterations. It is not quite
# clear what object is reconstructed
@@ -169,16 +170,16 @@ def iterate(Ptycho, num):
# Error is still on a steady descent. Let us look at the final
# reconstructed probe and object.
-fig = u.plot_storage(P.probe.S['S00G00'], 4)
+fig = u.plot_storage(P.probe.S.values()[0], 4)
fig.savefig('ownengine_%d.png' % fig.number, dpi=300)
# Plot of the reconstructed probe after a total of 45 iterations.
# It's a moon !
-fig = u.plot_storage(P.obj.S['S00G00'], 5, slices='0,120:-120,120:-120')
+fig = u.plot_storage(P.obj.S.values()[0], 5, slices='0,120:-120,120:-120')
fig.savefig('ownengine_%d.png' % fig.number, dpi=300)
# Plot of the reconstructed object after a total of 45 iterations.
# It's a bunch of flowers !
-# .. [Thi2013] P. Thibault and A. Menzel, **Nature** 494, 68 (2013)
+# .. [#modes] P. Thibault and A. Menzel, **Nature** 494, 68 (2013)
diff --git a/tutorial/simupod.py b/tutorial/simupod.py
index a0705352b..627ded6aa 100644
--- a/tutorial/simupod.py
+++ b/tutorial/simupod.py
@@ -24,9 +24,18 @@
# We create a managing top-level class instance. We will not use the
# the :any:`Ptycho` class for now, as its rich set of methods may be
-# a bit overwhelming to start with. Instead we take a plain
-# :py:class:`~ptypy.core.classes.Base` instance.
-P = Base()
+# a bit overwhelming to start with. Instead we start of a plain
+# :py:class:`~ptypy.core.classes.Base` instance.
+
+# As the ``Base`` class is slotted starting with version 0.3.0, we
+# can't attach attributes after initialisation as we could with a normal
+# python class. To reenable ths feature we have to subclasse once.
+class Base2(Base):
+ pass
+
+P = Base2()
+
+# Now we can start attaching attributes
P.CType = np.complex128
P.FType = np.float64
@@ -65,8 +74,16 @@
# For convenience, there is a test probing illumination in |ptypy|'s
# resources.
from ptypy.resources import moon_pr
-pr = -moon_pr(G.shape)
-pr = P.probe.new_storage(data=pr, psize=G.resolution)
+moon_probe = -moon_pr(G.shape)
+
+# As of version 0.3.0 the Storage classes are no longer limited to 3d arrays.
+# This means, however, that the shape is better explicitly specified upon
+# creation to align the coordinate system.
+pr_shape = (1,)+ tuple(G.shape)
+pr = P.probe.new_storage(shape=pr_shape, psize=G.resolution)
+
+# Probe is loaded through the :py:meth:`~ptypy.core.classes.Storage.fill` method.
+pr.fill(moon_probe)
fig = u.plot_storage(pr, 0)
fig.savefig('%s_%d.png' % (scriptname, fig.number), dpi=300)
# Ptypy's default testing illumination, an image of the moon.
@@ -75,13 +92,14 @@
# from the propagator to model a probe,
y, x = G.propagator.grids_sam
apert = u.smooth_step(fsize[0]/5-np.sqrt(x**2+y**2), 1e-6)
-pr2 = P.probe.new_storage(data=apert, psize=G.resolution)
+pr2 = P.probe.new_storage(shape=pr_shape, psize=G.resolution)
+pr2.fill(apert)
fig = u.plot_storage(pr2, 1, channel='c')
fig.savefig('%s_%d.png' % (scriptname, fig.number), dpi=300)
# Round test illumination (not necessarily smoothly varying).
# or the coordinate grids from the Storage itself.
-pr3 = P.probe.new_storage(shape=G.shape, psize=G.resolution)
+pr3 = P.probe.new_storage(shape=pr_shape, psize=G.resolution)
y, x = pr3.grids()
apert = u.smooth_step(fsize[0]/5-np.abs(x), 3e-5)*u.smooth_step(fsize[1]/5-np.abs(y), 3e-5)
pr3.fill(apert)
diff --git a/tutorial/subclassptyscan.py b/tutorial/subclassptyscan.py
index 81847f19c..a592129ae 100644
--- a/tutorial/subclassptyscan.py
+++ b/tutorial/subclassptyscan.py
@@ -1,16 +1,13 @@
-# In this tutorial, we learn how to subclass :any:`PtyScan` to make
+# In this tutorial, we learn how to subclass :py:class:`PtyScan` to make
# ptypy work with any experimental setup.
# This tutorial can be used as a direct follow-up to :ref:`simupod`
# if section :ref:`store` was completed
# Again, the imports first.
-import matplotlib as mpl
import numpy as np
-import ptypy
+from ptypy.core.data import PtyScan
from ptypy import utils as u
-plt = mpl.pyplot
-import sys
# For this tutorial we assume that the data and meta information is
# in this path:
@@ -19,64 +16,64 @@
# Furthermore, we assume that a file about the experimental geometry is
# located at
geofilepath = save_path + 'geometry.txt'
-print geofilepath
+print(geofilepath)
# and has contents of the following form
-print ''.join([line for line in open(geofilepath, 'r')])
+print(''.join([line for line in open(geofilepath, 'r')]))
# The scanning positions are in
positionpath = save_path + 'positions.txt'
-print positionpath
+print(positionpath)
# with a list of positions for vertical and horizontanl movement and the
# image frame from the "camera"
-print ''.join([line for line in open(positionpath, 'r')][:6])+'....'
+print(''.join([line for line in open(positionpath, 'r')][:6])+'....')
# Writing a subclass
# ------------------
-# A subclass of :any:`PtyScan` takes the same input parameter
-# tree as PtyScan itself, i.e :py:data:`.scan.data`. As the subclass
-# will most certainly require additional parameters, there has to be
-# a flexible additional container. For PtyScan, that is the
-# :py:data:`.scan.data.recipe` parameter. A subclass must extract all
-# additional parameters from this source and, in script, you fill
-# the recipe with the appropriate items.
-
-# In this case we can assume that the only parameter of the recipe
-# is the base path ``/tmp/ptypy/sim/``\ . Hence we write
-RECIPE = u.Param()
-RECIPE.base_path = '/tmp/ptypy/sim/'
-
-# Now we import the default generic parameter set from
-from ptypy.core.data import PtyScan
-DEFAULT = PtyScan.DEFAULT.copy()
-
-# This would be the perfect point to change any default value.
-# For sure we need to set the recipe parameter
-DEFAULT.recipe = RECIPE
-
-# A default data file location may be handy too and we allow saving of
-# data in a single file. And since we know it is simulated data we do not
-# have to find the optical axes in the diffraction pattern with
-# the help of auto_center
-DEFAULT.dfile = '/tmp/ptypy/sim/npy.ptyd'
-DEFAULT.auto_center = False
-
-# Our defaults are now
-print u.verbose.report(DEFAULT, noheader=True)
-
# The simplest subclass of PtyScan would look like this
class NumpyScan(PtyScan):
- # We overwrite the DEFAULT with the new DEFAULT.
- DEFAULT = DEFAULT
+ """
+ A PtyScan subclass to extract data from a numpy array.
+ """
def __init__(self, pars=None, **kwargs):
# In init we need to call the parent.
super(NumpyScan, self).__init__(pars, **kwargs)
# Of course this class does nothing special beyond PtyScan.
+# As it is, the class also cannot be used as a real PtyScan instance
+# because its defaults are not properly managed. For this, Ptypy provides a
+# powerful self-documenting tool call a "descriptor" which can be applied
+# to any new class using a decorator. The tree of all valid ptypy parameters
+# is located at :ref:`here `. To manage the default
+# parameters of our subclass and document its existence, we would need to write
+from ptypy import defaults_tree
+
+@defaults_tree.parse_doc('scandata.numpyscan')
+class NumpyScan(PtyScan):
+ """
+ A PtyScan subclass to extract data from a numpy array.
+ """
+
+ def __init__(self, pars=None, **kwargs):
+ # In init we need to call the parent.
+ super(NumpyScan, self).__init__(pars, **kwargs)
+
+# The decorator extracts information from the docstring of the subclass and
+# parent classes about the expected input parameters. Currently the docstring
+# of `NumpyScan` does not contain anything special, thus the only parameters
+# registered are those of the parent class, `PtyScan`:
+print(defaults_tree['scandata.numpyscan'].to_string())
-# An additional step of initialisation would be to retrieve
+# As you can see, there are already many parameters documented in `PtyScan`'s
+# class. For each parameter, most important are the *type*, *default* value and
+# *help* string. The decorator does more than collect this information: it also
+# generates from it a class variable called `DEFAULT`, which stores all defaults:
+print(u.verbose.report(NumpyScan.DEFAULT, noheader=True))
+
+# Now we are ready to add functionality to our subclass.
+# A first step of initialisation would be to retrieve
# the geometric information that we stored in ``geofilepath`` and update
# the input parameters with it.
@@ -90,21 +87,35 @@ def extract_geo(base_path):
return out
# We test it.
-print extract_geo(save_path)
+print(extract_geo(save_path))
# That seems to work. We can integrate this parser into
# the initialisation as we assume that this small access can be
# done by all MPI nodes without data access problems. Hence,
# our subclass becomes
+@defaults_tree.parse_doc('scandata.numpyscan')
class NumpyScan(PtyScan):
- # We overwrite the DEFAULT with the new DEFAULT.
- DEFAULT = DEFAULT
+ """
+ A PtyScan subclass to extract data from a numpy array.
+
+ Defaults:
+
+ [name]
+ type = str
+ default = numpyscan
+ help =
+
+ [base_path]
+ type = str
+ default = './'
+ help = Base path to extract data files from.
+ """
def __init__(self, pars=None, **kwargs):
- p = DEFAULT.copy(depth=2)
+ p = self.DEFAULT.copy(depth=2)
p.update(pars)
- with open(p.recipe.base_path+'geometry.txt') as f:
+ with open(p.base_path+'geometry.txt') as f:
for line in f:
key, value = line.strip().split()
# we only replace Nones or missing keys
@@ -113,11 +124,20 @@ def __init__(self, pars=None, **kwargs):
super(NumpyScan, self).__init__(p, **kwargs)
+# We now need a new input parameter called `base_path`, so we documented it
+# in the docstring after the section header "Defaults:".
+print(defaults_tree['scandata.numpyscan.base_path'])
+
+# As you can see, the first step in `__init__` is to build a default
+# parameter structure to ensure that all input parameters are available.
+# The next line updates this structure to overwrite the entries specified by
+# the user.
+
# Good! Next, we need to implement how the class finds out about
# the positions in the scan. The method
# :py:meth:`~ptypy.core.data.PtyScan.load_positions` can be used
# for this purpose.
-print PtyScan.load_positions.__doc__
+print(PtyScan.load_positions.__doc__)
# The parser for the positions file would look like this.
def extract_pos(base_path):
@@ -132,34 +152,48 @@ def extract_pos(base_path):
# And the test:
files, pos = extract_pos(save_path)
-print files[:2]
-print pos[:2]
+print(files[:2])
+print(pos[:2])
+@defaults_tree.parse_doc('scandata.numpyscan')
class NumpyScan(PtyScan):
- # We overwrite the DEFAULT with the new DEFAULT.
- DEFAULT = DEFAULT
+ """
+ A PtyScan subclass to extract data from a numpy array.
- def __init__(self,pars=None, **kwargs):
- p = DEFAULT.copy(depth=2)
+ Defaults:
+
+ [name]
+ type = str
+ default = numpyscan
+ help =
+
+ [base_path]
+ type = str
+ default = /tmp/ptypy/sim/
+ help = Base path to extract data files from.
+ """
+
+ def __init__(self, pars=None, **kwargs):
+ p = self.DEFAULT.copy(depth=2)
p.update(pars)
- with open(p.recipe.base_path+'geometry.txt') as f:
+ with open(p.base_path+'geometry.txt') as f:
for line in f:
key, value = line.strip().split()
# we only replace Nones or missing keys
if p.get(key) is None:
- p[key]=eval(value)
+ p[key] = eval(value)
super(NumpyScan, self).__init__(p, **kwargs)
- # all input data is now in self.info
def load_positions(self):
# the base path is now stored in
- base_path = self.info.recipe.base_path
+ base_path = self.info.base_path
+ pos = []
with open(base_path+'positions.txt') as f:
for line in f:
fname, y, x = line.strip().split()
- pos.append((eval(y),eval(x)))
+ pos.append((eval(y), eval(x)))
files.append(fname)
return np.asarray(pos)
@@ -177,39 +211,59 @@ def load_positions(self):
# and positions, as we have already adapted ``self.load_positions``
# and there were no bad pixels in the (linear) detector
-# The final subclass looks like this.
+# The final subclass looks like this. We overwrite two defaults from
+# `PtyScan`:
+@defaults_tree.parse_doc('scandata.numpyscan')
class NumpyScan(PtyScan):
- # We overwrite the DEFAULT with the new DEFAULT.
- DEFAULT = DEFAULT
+ """
+ A PtyScan subclass to extract data from a numpy array.
+
+ Defaults:
+
+ [name]
+ type = str
+ default = numpyscan
+ help =
+
+ [base_path]
+ type = str
+ default = /tmp/ptypy/sim/
+ help = Base path to extract data files from.
- def __init__(self,pars=None, **kwargs):
- p = DEFAULT.copy(depth=2)
+ [auto_center]
+ default = False
+
+ [dfile]
+ default = /tmp/ptypy/sim/npy.ptyd
+ """
+
+ def __init__(self, pars=None, **kwargs):
+ p = self.DEFAULT.copy(depth=2)
p.update(pars)
- with open(p.recipe.base_path+'geometry.txt') as f:
+ with open(p.base_path+'geometry.txt') as f:
for line in f:
key, value = line.strip().split()
# we only replace Nones or missing keys
if p.get(key) is None:
- p[key]=eval(value)
+ p[key] = eval(value)
super(NumpyScan, self).__init__(p, **kwargs)
- # all input data is now in self.info
def load_positions(self):
# the base path is now stored in
- pos=[]
- base_path = self.info.recipe.base_path
+ base_path = self.info.base_path
+ pos = []
with open(base_path+'positions.txt') as f:
for line in f:
fname, y, x = line.strip().split()
- pos.append((eval(y),eval(x)))
+ pos.append((eval(y), eval(x)))
files.append(fname)
return np.asarray(pos)
def load(self, indices):
raw = {}
- bp = self.info.recipe.base_path
+ bp = self.info.base_path
for ii in indices:
raw[ii] = np.load(bp+'ccd/diffraction_%04d.npy' % ii)
return raw, {}, {}