[Git][debian-gis-team/python-hdf4][upstream] New upstream version 0.10.2
Antonio Valentino
gitlab at salsa.debian.org
Fri Dec 6 07:17:54 GMT 2019
Antonio Valentino pushed to branch upstream at Debian GIS Project / python-hdf4
Commits:
c1b4f616 by Antonio Valentino at 2019-12-06T07:05:54Z
New upstream version 0.10.2
- - - - -
15 changed files:
- + .github/ISSUE_TEMPLATE
- .travis.yml
- MANIFEST.in
- doc/conf.py
- examples/compress/test-compress.py
- examples/hdfstruct/hdfstruct.py
- pyhdf/HC.py
- pyhdf/HDF.py
- pyhdf/SD.py
- pyhdf/V.py
- pyhdf/VS.py
- pyhdf/hdfext.i
- pyhdf/hdfext_wrap.c
- + pyproject.toml
- setup.py
Changes:
=====================================
.github/ISSUE_TEMPLATE
=====================================
@@ -0,0 +1,34 @@
+<!--
+If this issue is related to conda, please submit it to:
+https://github.com/conda-forge/pyhdf-feedstock/issues.
+
+Please answer these questions before submitting your issue. Thanks!
+-->
+
+### What version of pyhdf, HDF4, and Python are you using?
+
+pyhdf version:
+HDF4 C library version:
+Python version:
+
+### What operating system are you using?
+
+<!--
+Choose between Linux, Windows, and OS X.
+Also indicate the processor architecture if it's relevant.
+-->
+
+
+### What did you do?
+
+<!--
+If possible, provide a recipe for reproducing the error.
+A complete runnable program with the input HDF4 file is best.
+-->
+
+
+### What did you expect to see?
+
+
+
+### What did you see instead?
=====================================
.travis.yml
=====================================
@@ -2,12 +2,8 @@ language: python
matrix:
include:
- - os: linux
- python: "2.6"
- os: linux
python: "2.7"
- - os: linux
- python: "3.3"
- os: linux
python: "3.4"
- os: linux
=====================================
MANIFEST.in
=====================================
@@ -1,6 +1,7 @@
include README.md
include LICENSE
include AUTHORS
+include pyproject.toml
include pyhdf/hdfext.i
recursive-include examples *
recursive-include doc *
=====================================
doc/conf.py
=====================================
@@ -56,7 +56,7 @@ copyright = u'2019, pyhdf authors'
# The short X.Y version.
version = '0.10'
# The full version, including alpha/beta/rc tags.
-release = '0.10.1'
+release = '0.10.2'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
=====================================
examples/compress/test-compress.py
=====================================
@@ -56,7 +56,7 @@ HDF_DATATYPE = SDC.INT32
def doCompress(compType, value=0, v2=0):
"""Create and validate an HDF file using a compression scheme
- sepcified by the parameters"""
+ specified by the parameters"""
# Build a significant file name
if compType == SDC.COMP_NONE:
=====================================
examples/hdfstruct/hdfstruct.py
=====================================
@@ -31,9 +31,9 @@ hdfFile = sys.argv[1] # Get first command line argument
try: # Catch pyhdf.SD errors
# Open HDF file named on the command line
f = SD(hdfFile)
- # Get global attribute dictionnary
+ # Get global attribute dictionary
attr = f.attributes(full=1)
- # Get dataset dictionnary
+ # Get dataset dictionary
dsets = f.datasets()
# File name, number of attributes and number of variables.
@@ -97,7 +97,7 @@ try: # Catch pyhdf.SD errors
for name in dsNames:
# Access the dataset
dsObj = f.select(name)
- # Get dataset attribute dictionnary
+ # Get dataset attribute dictionary
dsAttr = dsObj.attributes(full=1)
if len(dsAttr) > 0:
printf("%s attributes" % name); eol(2)
@@ -114,7 +114,7 @@ try: # Catch pyhdf.SD errors
printf(" %-20s %3d %-7s %3d %s" %
(nm, t[1], typeTab[t[2]], t[3], t[0])); eol()
eol()
- # Get dataset dimension dictionnary
+ # Get dataset dimension dictionary
dsDim = dsObj.dimensions(full=1)
if len(dsDim) > 0:
printf ("%s dimensions" % name); eol(2)
=====================================
pyhdf/HC.py
=====================================
@@ -7,7 +7,7 @@
from . import hdfext as _C
class HC(object):
- """The HC class holds contants defining opening modes and data types.
+ """The HC class holds constants defining opening modes and data types.
File opening modes (flags ORed together)
=====================================
pyhdf/HDF.py
=====================================
@@ -173,7 +173,7 @@ class HDF(object):
file be opened in read-write mode, or created
if it does not exist.
- nblocks number of data descriptor blocks in a block wit which
+ nblocks number of data descriptor blocks in a block with which
to create the file; the parameter is ignored if the file
is not created; 0 asks to use the default
=====================================
pyhdf/SD.py
=====================================
@@ -72,10 +72,10 @@ SD key features are as follows.
- SDisdimval_bwcomp(), SDsetdimval_comp()
- It is quite straightforward to go from a C version to a python version
- of a program accessing the SD API, and to learn SD usage by refering to
+ of a program accessing the SD API, and to learn SD usage by referring to
the C API documentation.
-- A few high-level python methods have been developped to ease
+- A few high-level python methods have been developed to ease
programmers task. Of greatest interest are those allowing access
to SD datasets through familiar python idioms.
@@ -88,14 +88,14 @@ SD key features are as follows.
See "High level attribute access" and "High level variable access"
sections for details.
- - SD offers methods to retrieve a dictionnary of the attributes,
+ - SD offers methods to retrieve a dictionary of the attributes,
dimensions and variables defined on a dataset, and of the attributes
set on a variable and a dimension. Querying a dataset is thus geatly
simplified.
- SD datasets are read/written through "numpy", a sophisticated
python package for efficiently handling multi-dimensional arrays of
- numbers. numpy can nicely extend the SD functionnality, eg.
+ numbers. numpy can nicely extend the SD functionality, eg.
adding/subtracting arrays with the '+/-' operators.
Accessing the SD module
@@ -180,7 +180,7 @@ the naming conventions and calling sequences documented inside the
to the C API, the manual can be easily used as a documentary source
for pyhdf, once the class to which a function belongs has been
identified, and of course once requirements imposed by the Python
-langage have been taken into account. Consequently, this documentation
+language have been taken into account. Consequently, this documentation
will not attempt to provide an exhaustive coverage of the HDF SD
API. For this, the user is referred to the above manual.
The documentation of each pyhdf method will indicate the name
@@ -473,9 +473,9 @@ In more detail::
HDF file
inquiry
- attributes() return a dictionnary describing every global
+ attributes() return a dictionary describing every global
attribute attached to the HDF file
- datasets() return a dictionnary describing every dataset
+ datasets() return a dictionary describing every dataset
stored inside the file
info() get the number of datasets stored in the file
and the number of attributes attached to it
@@ -514,7 +514,7 @@ In more detail::
values
- SDC The SDC class holds contants defining file opening modes and
+ SDC The SDC class holds constants defining file opening modes and
data types. Constants are named after their C API counterparts.
file opening modes:
@@ -575,10 +575,10 @@ In more detail::
endaccess() terminate access to the dataset
inquiry
- attributes() return a dictionnary describing every
+ attributes() return a dictionary describing every
attribute defined on the dataset
checkempty() determine whether the dataset is empty
- dimensions() return a dictionnary describing all the
+ dimensions() return a dictionary describing all the
dataset dimensions
info() get the dataset name, rank, dimension lengths,
data type and number of attributes
@@ -633,7 +633,7 @@ In more detail::
get and set an attribute
inquiry
- attributes() return a dictionnary describing every
+ attributes() return a dictionary describing every
attribute defined on the dimension
info() get the dimension name, length, scale data type
and number of attributes
@@ -678,7 +678,7 @@ Programming models
Writing
^^^^^^^
The following code can be used as a model to create an SD dataset.
-It shows how to use the most important functionnalities
+It shows how to use the most important functionalities
of the SD interface needed to initialize a dataset.
A real program should of course add error handling::
@@ -694,7 +694,7 @@ A real program should of course add error handling::
hdfFile.priority = 2
# Create a dataset named 'd1' to hold a 3x3 float array.
d1 = hdfFile.create('d1', SDC.FLOAT32, (3,3))
- # Set some attributs on 'd1'
+ # Set some attributes on 'd1'
d1.description = 'Sample 3x3 float array'
d1.units = 'celsius'
# Name 'd1' dimensions and assign them attributes.
@@ -768,8 +768,8 @@ hold matrix values, one row per line. The following procedure will load
into an HDF dataset the contents of any one of those text files. The
procedure computes the matrix min and max values, storing them as
dataset attributes. It also assigns to the variable the group of
-attributes passed as a dictionnary by the calling program. Note how simple
-such an assignment becomes with pyhdf: the dictionnary can contain any
+attributes passed as a dictionary by the calling program. Note how simple
+such an assignment becomes with pyhdf: the dictionary can contain any
number of attributes, of different types, single or multi-valued. Doing
the same in a conventional language would be a much more challenging task.
@@ -846,10 +846,10 @@ We could now call the procedure as follows::
Example 2
^^^^^^^^^
-This example shows a usefull python program that will display the
+This example shows a useful python program that will display the
structure of the SD component of any HDF file whose name is given on
the command line. After the HDF file is opened, high level inquiry methods
-are called to obtain dictionnaries descrybing attributes, dimensions and
+are called to obtain dictionaries describing attributes, dimensions and
datasets. The rest of the program mostly consists in nicely formatting
the contents of those dictionaries::
@@ -857,7 +857,7 @@ the contents of those dictionaries::
from pyhdf.SD import *
from numpy import *
- # Dictionnary used to convert from a numeric data type to its symbolic
+ # Dictionary used to convert from a numeric data type to its symbolic
# representation
typeTab = {
SDC.CHAR: 'CHAR',
@@ -883,9 +883,9 @@ the contents of those dictionaries::
try: # Catch pyhdf.SD errors
# Open HDF file named on the command line
f = SD(hdfFile)
- # Get global attribute dictionnary
+ # Get global attribute dictionary
attr = f.attributes(full=1)
- # Get dataset dictionnary
+ # Get dataset dictionary
dsets = f.datasets()
# File name, number of attributes and number of variables.
@@ -952,7 +952,7 @@ the contents of those dictionaries::
for name in dsNames:
# Access the dataset
dsObj = f.select(name)
- # Get dataset attribute dictionnary
+ # Get dataset attribute dictionary
dsAttr = dsObj.attributes(full=1)
if len(dsAttr) > 0:
printf("%s attributes" % name); eol(2)
@@ -970,7 +970,7 @@ the contents of those dictionaries::
printf(" %-20s %3d %-7s %3d %s" %
(nm, t[1], typeTab[t[2]], t[3], t[0])); eol()
eol()
- # Get dataset dimension dictionnary
+ # Get dataset dimension dictionary
dsDim = dsObj.dimensions(full=1)
if len(dsDim) > 0:
printf ("%s dimensions" % name); eol(2)
@@ -1016,7 +1016,7 @@ except ImportError:
raise HDF4Error("numpy package required but not installed")
class SDC(object):
- """The SDC class holds contants defining opening modes and data types.
+ """The SDC class holds constants defining opening modes and data types.
file opening modes:
========== === ===============================
@@ -1649,7 +1649,7 @@ class SD(object):
def attributes(self, full=0):
- """Return a dictionnary describing every global
+ """Return a dictionary describing every global
attribute attached to the SD interface.
Args::
@@ -1659,8 +1659,8 @@ class SD(object):
Returns::
- Empty dictionnary if no global attribute defined
- Otherwise, dictionnary where each key is the name of a
+ Empty dictionary if no global attribute defined
+ Otherwise, dictionary where each key is the name of a
global attribute. If parameter 'full' is false,
key value is the attribute value. If 'full' is true,
key value is a tuple with the following elements:
@@ -1689,7 +1689,7 @@ class SD(object):
return res
def datasets(self):
- """Return a dictionnary describing all the file datasets.
+ """Return a dictionary describing all the file datasets.
Args::
@@ -1697,8 +1697,8 @@ class SD(object):
Returns::
- Empty dictionnary if no dataset is defined.
- Otherwise, dictionnary whose keys are the file dataset names,
+ Empty dictionary if no dataset is defined.
+ Otherwise, dictionary whose keys are the file dataset names,
and values are tuples describing the corresponding datasets.
Each tuple holds the following elements in order:
@@ -1755,7 +1755,7 @@ class SDS(object):
# Private attributes
- # _sd SD intance
+ # _sd SD instance
# _id SDS identifier
self._sd = sd
self._id = id
@@ -1914,7 +1914,7 @@ class SDS(object):
'the size (%d) of dimension %d' \
% (dim_sizes[n], n))
if not data_type in SDC.equivNumericTypes:
- raise HDF4Error('get cannot currrently deal with '\
+ raise HDF4Error('get cannot currently deal with '\
'the SDS data type')
return _C._SDreaddata_0(self._id, data_type, start, count, stride)
@@ -1995,7 +1995,7 @@ class SDS(object):
% (dim_sizes[n], n))
# ??? Check support for UINT16
if not data_type in SDC.equivNumericTypes:
- raise HDF4Error('set cannot currrently deal '\
+ raise HDF4Error('set cannot currently deal '\
'with the SDS data type')
_C._SDwritedata_0(self._id, data_type, start, count, data, stride)
@@ -2758,7 +2758,7 @@ class SDS(object):
return SDAttr(self, name_or_index)
def attributes(self, full=0):
- """Return a dictionnary describing every attribute defined
+ """Return a dictionary describing every attribute defined
on the dataset.
Args::
@@ -2768,8 +2768,8 @@ class SDS(object):
Returns::
- Empty dictionnary if no attribute defined.
- Otherwise, dictionnary where each key is the name of a
+ Empty dictionary if no attribute defined.
+ Otherwise, dictionary where each key is the name of a
dataset attribute. If parameter 'full' is false,
key value is the attribute value. If 'full' is true,
key value is a tuple with the following elements:
@@ -2798,7 +2798,7 @@ class SDS(object):
return res
def dimensions(self, full=0):
- """Return a dictionnary describing every dataset dimension.
+ """Return a dictionary describing every dataset dimension.
Args::
@@ -2807,7 +2807,7 @@ class SDS(object):
Returns::
- Dictionnary where each key is a dimension name. If no name
+ Dictionary where each key is a dimension name. If no name
has been given to the dimension, the key is set to
'fakeDimx' where 'x' is the dimension index number.
If parameter 'full' is false, key value is the dimension
@@ -2918,7 +2918,7 @@ class SDim(object):
return dim_name, dim_size, data_type, n_attrs
def length(self):
- """Return the dimension length. This method is usefull
+ """Return the dimension length. This method is useful
to quickly retrieve the current length of an unlimited
dimension.
@@ -2943,7 +2943,7 @@ class SDim(object):
dim_name dimension name; setting 2 dimensions to the same
name make the dimensions "shared"; in order to be
- shared, the dimesions must be deined similarly.
+ shared, the dimensions must be defined similarly.
Returns::
@@ -3163,7 +3163,7 @@ class SDim(object):
return SDAttr(self, name_or_index)
def attributes(self, full=0):
- """Return a dictionnary describing every attribute defined
+ """Return a dictionary describing every attribute defined
on the dimension.
Args::
@@ -3173,8 +3173,8 @@ class SDim(object):
Returns::
- Empty dictionnary if no attribute defined.
- Otherwise, dictionnary where each key is the name of a
+ Empty dictionary if no attribute defined.
+ Otherwise, dictionary where each key is the name of a
dimension attribute. If parameter 'full' is false,
key value is the attribute value. If 'full' is true,
key value is a tuple with the following elements:
=====================================
pyhdf/V.py
=====================================
@@ -74,7 +74,7 @@ To access the V module a python program can say one of:
This document assumes the last import style is used.
-V is not self-contained, and needs functionnality provided by another
+V is not self-contained, and needs functionality provided by another
pyhdf module, namely the HDF module. This module must thus be imported
also:
@@ -214,7 +214,7 @@ In more detail::
getid() return the reference number of the vgroup
following the one with the given reference number
- VG The VG class encapsulates the functionnality of a vgroup.
+ VG The VG class encapsulates the functionality of a vgroup.
To instantiate a VG class, call the attach() or create() methods
of a V class instance.
@@ -267,7 +267,7 @@ In more detail::
Remember that vgroup attributes can also be set and queried by
applying the standard python "dot notation" on a VG instance.
- get attibute value(s)
+ get attribute value(s)
get() obtain the attribute value(s)
@@ -356,7 +356,7 @@ to HDF attributes. pyhdf uses the following rule: an attribute whose name
starts with an underscore ('_') is either a "predefined" HDF attribute
(see below) or a standard python attribute. Otherwise, the attribute
is handled as an HDF attribute. Also, HDF attributes are not stored inside
-the object dictionnary: the python dir() function will not list them.
+the object dictionary: the python dir() function will not list them.
Attribute values can be updated, but it is illegal to try to change the
value type, or the attribute order (number of values). This is important
@@ -873,12 +873,12 @@ class V(object):
class VG(object):
- """The VG class encapsulates the functionnality of a vgroup.
+ """The VG class encapsulates the functionality of a vgroup.
To instantiate a VG class, call the attach() or create() methods
of a V class instance."""
def __init__(self, vinst, id):
- # This construtor is not intended to be called directly
+ # This constructor is not intended to be called directly
# by the user program. The attach() method of an
# V class instance should be called instead.
@@ -922,7 +922,7 @@ class VG(object):
"""
# NOTE: python will call this method only if the attribute
- # is not found in the object dictionnary.
+ # is not found in the object dictionary.
# Check for a user defined attribute first.
att = self.attr(name)
@@ -1224,7 +1224,7 @@ class VG(object):
Returns::
- dictionnary describing each vgroup attribute; for each attribute,
+ dictionary describing each vgroup attribute; for each attribute,
a (name,data) pair is added to the dictionary, where 'data' is
a tuple holding:
=====================================
pyhdf/VS.py
=====================================
@@ -89,10 +89,10 @@ VS key features are as follows.
- VSlone
- It is quite straightforward to go from a C version to a python version
- of a program accessing the VS API, and to learn VS usage by refering to
+ of a program accessing the VS API, and to learn VS usage by referring to
the C API documentation.
-- A few high-level python methods have been developped to ease
+- A few high-level python methods have been developed to ease
programmers task. Of greatest interest are the following:
- Access to attributes through the familiar "dot notation".
@@ -111,7 +111,7 @@ To access the VS module a python program can say one of:
This document assumes the last import style is used.
-VS is not self-contained, and needs functionnality provided by another
+VS is not self-contained, and needs functionality provided by another
pyhdf module, namely the HDF module. This module must thus be imported
also:
@@ -170,7 +170,7 @@ the naming conventions and calling sequences documented inside the
to the C API, the manual can be easily used as a documentary source
for pyhdf, once the class to which a function belongs has been
identified, and of course once requirements imposed by the Python
-langage have been taken into account. Consequently, this documentation
+language have been taken into account. Consequently, this documentation
will not attempt to provide an exhaustive coverage of the HDF VS
API. For this, the user is referred to the above manual.
The documentation of each pyhdf method will indicate the name
@@ -464,7 +464,7 @@ to HDF attributes. pyhdf uses the following rule: an attribute whose name
starts with an underscore ('_') is either a "predefined" attribute
(see below) or a standard python attribute. Otherwise, the attribute
is handled as an HDF attribute. Also, HDF attributes are not stored inside
-the object dictionnary: the python dir() function will not list them.
+the object dictionary: the python dir() function will not list them.
Attribute values can be updated, but it is illegal to try to change the
value type, or the attribute order (number of values). This is important
@@ -643,7 +643,7 @@ vdata attribute. We want to be able update this attribute (see
following examples). However, the VS API prohibits changing an attribute
type when updating its value. Since the length (order) of an attribute
is part of its type, we make sure of setting the attribute to a length
-long enough to accomodate the longest possible string we migh want to
+long enough to accommodate the longest possible string we migh want to
assign to the attribute.
Appending records to a vdata
@@ -1160,12 +1160,12 @@ class VS(object):
class VD(object):
- """The VD class encapsulates the functionnality of a vdata.
+ """The VD class encapsulates the functionality of a vdata.
To instantiate a VD class, call the attach() or the create()
method of a VS class instance."""
def __init__(self, vsinst, id):
- # This construtor is not intended to be called directly
+ # This constructor is not intended to be called directly
# by the user program. The attach() method of an
# VS class instance should be called instead.
@@ -1338,7 +1338,7 @@ class VD(object):
# like a Python sequence.
#
# When indexing the vdata, 'data' must specify exactly
- # one record, which must be specifed as a sequence. If the index is
+ # one record, which must be specified as a sequence. If the index is
# equal to the current number of records, the record
# is appended to the vdata.
#
@@ -2021,7 +2021,7 @@ class VD(object):
Returns::
- dictionnary describing each vdata attribute; for each attribute
+ dictionary describing each vdata attribute; for each attribute
a (name,data) pair is added to the dictionary, where 'data' is
a tuple holding:
- attribute data type (one of HC.xxx constants)
@@ -2265,7 +2265,7 @@ class VDField(object):
Returns::
- dictionnary describing each vdata attribute; for each attribute
+ dictionary describing each vdata attribute; for each attribute
a (name,data) pair is added to the dictionary, where 'data' is
a tuple holding:
=====================================
pyhdf/hdfext.i
=====================================
@@ -56,9 +56,9 @@
#define DFNT_UINT128 30 /* No current plans for support */
#define DFNT_UCHAR8 3 /* 3 chosen for backward compatibility */
-#define DFNT_UCHAR 3 /* uchar=uchar8 for backward combatibility */
+#define DFNT_UCHAR 3 /* uchar=uchar8 for backward compatibility */
#define DFNT_CHAR8 4 /* 4 chosen for backward compatibility */
-#define DFNT_CHAR 4 /* uchar=uchar8 for backward combatibility */
+#define DFNT_CHAR 4 /* uchar=uchar8 for backward compatibility */
#define DFNT_CHAR16 42 /* No current plans for support */
#define DFNT_UCHAR16 43 /* No current plans for support */
@@ -224,9 +224,9 @@ extern void _HEprint(void);
#define DFNT_UINT64 27
#define DFNT_UCHAR8 3 /* 3 chosen for backward compatibility */
-#define DFNT_UCHAR 3 /* uchar=uchar8 for backward combatibility */
+#define DFNT_UCHAR 3 /* uchar=uchar8 for backward compatibility */
#define DFNT_CHAR8 4 /* 4 chosen for backward compatibility */
-#define DFNT_CHAR 4 /* uchar=uchar8 for backward combatibility */
+#define DFNT_CHAR 4 /* uchar=uchar8 for backward compatibility */
static int HDFtoNumericType(int hdf) {
=====================================
pyhdf/hdfext_wrap.c
=====================================
@@ -3875,9 +3875,9 @@ void _HEprint(void) {
#define DFNT_UINT64 27
#define DFNT_UCHAR8 3 /* 3 chosen for backward compatibility */
-#define DFNT_UCHAR 3 /* uchar=uchar8 for backward combatibility */
+#define DFNT_UCHAR 3 /* uchar=uchar8 for backward compatibility */
#define DFNT_CHAR8 4 /* 4 chosen for backward compatibility */
-#define DFNT_CHAR 4 /* uchar=uchar8 for backward combatibility */
+#define DFNT_CHAR 4 /* uchar=uchar8 for backward compatibility */
static int HDFtoNumericType(int hdf) {
=====================================
pyproject.toml
=====================================
@@ -0,0 +1,7 @@
+[build-system]
+# Minimum requirements for the build system to execute.
+requires = [ # PEP 508 specifications.
+ "numpy",
+ "setuptools",
+ "wheel"
+]
=====================================
setup.py
=====================================
@@ -153,7 +153,7 @@ setup(name = 'pyhdf',
license = 'MIT',
long_description = "\n".join(DOCLINES[2:]),
url = 'https://github.com/fhs/pyhdf',
- version = '0.10.1',
+ version = '0.10.2',
packages = ['pyhdf'],
ext_modules = [_hdfext],
data_files = data_files,
View it on GitLab: https://salsa.debian.org/debian-gis-team/python-hdf4/commit/c1b4f61662abc47631e1966f9deaec89da0faa2a
--
View it on GitLab: https://salsa.debian.org/debian-gis-team/python-hdf4/commit/c1b4f61662abc47631e1966f9deaec89da0faa2a
You're receiving this email because of your account on salsa.debian.org.
-------------- next part --------------
An HTML attachment was scrubbed...
URL: <http://alioth-lists.debian.net/pipermail/pkg-grass-devel/attachments/20191206/b5e457b0/attachment-0001.html>
More information about the Pkg-grass-devel
mailing list