Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Initial Py3 W/O Tests #17

Open
wants to merge 2 commits into
base: master
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
8 changes: 4 additions & 4 deletions dimarray/__init__.py
Original file line number Diff line number Diff line change
@@ -1,11 +1,11 @@
#emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-
#ex: set sts=4 ts=4 sw=4 et:
# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-
# ex: set sts=4 ts=4 sw=4 et:
### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##
#
# See the COPYING file distributed along with the PTSA package for the
# copyright and license terms.
#
### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##

from dimarray import DimArray,Dim
from attrarray import AttrArray
from .dimarray import DimArray, Dim
from .attrarray import AttrArray
120 changes: 59 additions & 61 deletions dimarray/attrarray.py
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
#emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-
#ex: set sts=4 ts=4 sw=4 et:
# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-
# ex: set sts=4 ts=4 sw=4 et:
### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##
#
# See the COPYING file distributed along with the PTSA package for the
Expand All @@ -16,7 +16,7 @@
HAS_H5PY = True
except ImportError:
HAS_H5PY = False


#################################
# New array class with attributes
Expand All @@ -38,7 +38,7 @@ class AttrArray(np.ndarray):
None when no attributes are required (as is the case for instances
of AttrArray) or a dictionary that specifies required attributes
(for child classes of AttrArray, such as Dim and DimArray).

Examples
--------
>>> import numpy as np
Expand All @@ -55,7 +55,7 @@ class AttrArray(np.ndarray):

These custom attributes are maintained when copying or
manipulating the data in an AttrArray:

>>> data2 = data.mean()
>>> data2.hello
good bye
Expand All @@ -67,7 +67,7 @@ class AttrArray(np.ndarray):
# type is required, "object" should be specified. E.g.,
# {'name':str} or {'misc':object}
_required_attrs = None

def __new__(cls, data, dtype=None, copy=False,
hdf5_group=None, **kwargs):
# see if linking to hdf5 file
Expand All @@ -79,7 +79,7 @@ def __new__(cls, data, dtype=None, copy=False,
# else:
# cls._hdf5_file = None
# #cls.hdf5_group = hdf5_group

# get the data in the proper format, copied if desired
# PBS: Does this clobber the attrs?
result = np.array(data, dtype=dtype, copy=copy)
Expand All @@ -92,7 +92,7 @@ def __new__(cls, data, dtype=None, copy=False,
# get the new attrs, kwargs has priority
# see if do deep copy of attrs
newattrs = {}
if hasattr(data,'_attrs'):
if hasattr(data, '_attrs'):
# add those to the list of attributes
if copy:
newattrs = copylib.deepcopy(data._attrs)
Expand All @@ -109,35 +109,33 @@ def __new__(cls, data, dtype=None, copy=False,
result._chk_req_attr()

return result


def __array_finalize__(self,obj):

def __array_finalize__(self, obj):
if not hasattr(self, '_attrs'):
self._attrs = copylib.deepcopy(getattr(obj, '_attrs', {}))
# Set all attributes:
self._set_all_attr()
# Ensure that the required attributes are present:
# PBS: I don't think we need to call this here
#self._chk_req_attr()
# self._chk_req_attr()


def __setattr__(self, name, value):
# Do not allow changes to _required_attrs
if name == '_required_attrs':
raise AttributeError(
"The attribute '_required_attrs' is read-only!")
# set the value in the attribute list
if self._required_attrs:
if (self._required_attrs.has_key(name) and
(not isinstance(value,self._required_attrs[name]))):
raise AttributeError("Attribute '"+name +"' must be "+
str(self._required_attrs[name])+
"\nSupplied value and type:\n"+
str(value)+"\n"+str(type(value)))
if (name in self._required_attrs and
(not isinstance(value, self._required_attrs[name]))):
raise AttributeError("Attribute '" + name + "' must be " +
str(self._required_attrs[name]) +
"\nSupplied value and type:\n" +
str(value) + "\n" + str(type(value)))

# save whether it already existed
# must do this before the call to ndarray.__setattr__
if hasattr(self,name):
if hasattr(self, name):
attr_existed = True
else:
attr_existed = False
Expand All @@ -147,7 +145,7 @@ def __setattr__(self, name, value):

# update the attrs if necessary
# CTW: shouln't _attr be always updated?
if self._attrs.has_key(name) or \
if name in self._attrs or \
(name != '_attrs' and not attr_existed):
self._attrs[name] = value

Expand All @@ -158,11 +156,11 @@ def __delattr__(self, name):
if name == '_required_attrs':
raise AttributeError(
"The attribute '_required_attrs' is read-only!")
if name in self._required_attrs.keys():
raise AttributeError("Attribute '"+name +"' is required, and cannot "+
if name in list(self._required_attrs.keys()):
raise AttributeError("Attribute '" + name + "' is required, and cannot " +
"be deleted!")
ret = np.ndarray.__delattr__(self, name)
if self._attrs.has_key(name):
if name in self._attrs:
del self._attrs[name]
return ret

Expand All @@ -179,14 +177,15 @@ def _chk_req_attr(self):
Make sure the required attributes are set
"""
# if there are no required attributes, no check is required:
if self._required_attrs is None: return

for name in self._required_attrs.keys():
if ((not self._attrs.has_key(name)) or
(not isinstance(self._attrs[name], self._required_attrs[name]))):
raise AttributeError("Attribute '"+name+"' is required, and "+
"must be "+str(self._required_attrs[name]))

if self._required_attrs is None:
return

for name in list(self._required_attrs.keys()):
if ((name not in self._attrs) or
(not isinstance(self._attrs[name], self._required_attrs[name]))):
raise AttributeError("Attribute '" + name + "' is required, and " +
"must be " + str(self._required_attrs[name]))


# def __repr__(self):
# # make the attribute kwargs list
Expand All @@ -199,7 +198,7 @@ def _chk_req_attr(self):
# else:
# retrepr = "AttrArray(%s)" % \
# (np.ndarray.__repr__(self))

# return retrepr

def __reduce__(self):
Expand All @@ -208,17 +207,17 @@ def __reduce__(self):

# append the custom object attributes
subclass_state = (self._attrs,)
object_state[2] = (object_state[2],subclass_state)
object_state[2] = (object_state[2], subclass_state)

# convert back to tuple and return
return tuple(object_state)
def __setstate__(self,state):

def __setstate__(self, state):
# get the ndarray state and the subclass state
nd_state, own_state = state

# refresh the ndarray state
np.ndarray.__setstate__(self,nd_state)
np.ndarray.__setstate__(self, nd_state)

# get the subclass attributes
attrs, = own_state
Expand Down Expand Up @@ -249,15 +248,15 @@ def h5save(self, filename, group=None, mode='w', **kwargs):
# see if already exists
grp_name = ''
for name in os.path.split(group):
grp_name = '/'.join([grp_name,name])
grp_name = '/'.join([grp_name, name])
if grp_name in f:
grp = f[grp_name]
else:
grp = grp.create_group(name)

# grp now has the group where we're going to put the new group
# for this AttrArray

pass

def nanvar(a, axis=None, ddof=0):
Expand All @@ -268,7 +267,7 @@ def nanvar(a, axis=None, ddof=0):
spread of a distribution, treating nans as missing values. The
variance is computed for the flattened array by default,
otherwise over the specified axis.

Parameters
----------
a : array_like
Expand All @@ -281,7 +280,7 @@ def nanvar(a, axis=None, ddof=0):
calculations is ``N - ddof``, where ``N`` represents the
number of elements. By default `ddof` is zero (biased
estimate).

Returns
-------
variance : {ndarray, scalar}
Expand All @@ -301,7 +300,7 @@ def nanvar(a, axis=None, ddof=0):
If no nan values are present, returns the same value as
numpy.var, otherwise, the variance is calculated as
if the nan values were not present.

The variance is the average of the squared deviations from the
mean, i.e., var = mean(abs(x - x.mean())**2). The mean is
normally calculated as ``x.sum() / N``, where ``N = len(x)``.
Expand All @@ -328,7 +327,7 @@ def nanvar(a, axis=None, ddof=0):
>>> a.nanvar(1)
AttrArray([ 0.0, 0.25, 0.25])
"""

if axis is None:
return a[~np.isnan(a)].var(ddof=ddof)

Expand All @@ -339,30 +338,30 @@ def nanvar(a, axis=None, ddof=0):
n_orig = a.shape[axis]

# number of nans:
n_nan = np.float64(np.sum(np.isnan(a),axis))
n_nan = np.float64(np.sum(np.isnan(a), axis))

# number of non-nan values:
n = n_orig - n_nan

# compute the mean for all non-nan values:
a[np.isnan(a)] = 0.
m1 = np.sum(a,axis)/n
m1 = np.sum(a, axis) / n

# Kludge to subtract m1 from the correct axis
if axis!=0:
if axis != 0:
shape = np.arange(a.ndim).tolist()
shape.remove(axis)
shape.insert(0,axis)
shape.insert(0, axis)
a = a.transpose(tuple(shape))
d = (a-m1)**2.0
d = (a - m1)**2.0
shape = tuple(np.array(shape).argsort())
d = d.transpose(shape)
else:
d = (a-m1)**2.0
d = (a - m1)**2.0

# calculate numerator for variance:
m2 = np.float64(np.sum(d,axis)-(m1*m1)*n_nan)
m2 = np.float64(np.sum(d, axis) - (m1 * m1) * n_nan)

# devide by appropriate denominator:
m2c = m2 / (n - ddof)
return(m2c)
Expand All @@ -376,7 +375,7 @@ def nanstd(a, axis=None, ddof=0):
distribution, of the array elements, treating nans as missing
values. The standard deviation is computed for the flattened
array by default, otherwise over the specified axis.

Parameters
----------
a : array_like
Expand All @@ -390,7 +389,7 @@ def nanstd(a, axis=None, ddof=0):
calculations is ``N - ddof``, where ``N`` represents the
number of elements. By default `ddof` is zero (biased
estimate).

Returns
-------
standard_deviation : {ndarray, scalar}
Expand All @@ -410,7 +409,7 @@ def nanstd(a, axis=None, ddof=0):
If no nan values are present, returns the same value as
numpy.std, otherwise, the standard deviation is calculated as
if the nan values were not present.

The standard deviation is the square root of the average of
the squared deviations from the mean, i.e., ``var =
sqrt(mean(abs(x - x.mean())**2))``.
Expand Down Expand Up @@ -438,8 +437,8 @@ def nanstd(a, axis=None, ddof=0):
AttrArray([ 1., 1.6329931618554521])
>>> a.nanstd(1)
AttrArray([ 0.0, 0.5, 0.5])
"""
return np.sqrt(a.nanvar(axis,ddof))
"""
return np.sqrt(a.nanvar(axis, ddof))

def nanmean(a, axis=None):
"""
Expand All @@ -458,7 +457,7 @@ def nanmean(a, axis=None):
axis : int, optional
Axis along which the means are computed. The default is
to compute the mean of the flattened array.

Returns
-------
mean : {ndarray, scalar}
Expand Down Expand Up @@ -492,7 +491,7 @@ def nanmean(a, axis=None):
>>> a.nanmean(1)
AttrArray([ 2.0, 3.5, 5.5])
"""

if axis is None:
return a[~np.isnan(a)].mean()

Expand All @@ -502,8 +501,7 @@ def nanmean(a, axis=None):
# number of all observations
n_orig = a.shape[axis]

factor = 1.0-np.sum(np.isnan(a),axis)*1.0/n_orig
factor = 1.0 - np.sum(np.isnan(a), axis) * 1.0 / n_orig
a[np.isnan(a)] = 0

return(np.mean(a,axis)/factor)

return(np.mean(a, axis) / factor)
Loading