[python-rtree] 01/03: Imported Upstream version 0.8.2+ds

Sebastiaan Couwenberg sebastic at moszumanska.debian.org
Mon Aug 31 16:43:33 UTC 2015


This is an automated email from the git hooks/post-receive script.

sebastic pushed a commit to branch master
in repository python-rtree.

commit 78a5396fb01a40b7c54598754f8d6923db1a2d2f
Author: Bas Couwenberg <sebastic at xs4all.nl>
Date:   Mon Aug 31 17:55:22 2015 +0200

    Imported Upstream version 0.8.2+ds
---
 DEPENDENCIES.txt                 |    4 +
 FAQ.txt                          |    0
 LICENSE.txt                      |  504 +++++++++++++++
 MANIFEST.in                      |    7 +
 PKG-INFO                         |   62 ++
 docs/Makefile                    |  110 ++++
 docs/source/README.txt           |   41 ++
 docs/source/changes.txt          |   95 +++
 docs/source/class.txt            |   13 +
 docs/source/conf.py              |  217 +++++++
 docs/source/examples.txt         |    5 +
 docs/source/history.txt          |   47 ++
 docs/source/index.txt            |   41 ++
 docs/source/install.txt          |   46 ++
 docs/source/performance.txt      |   78 +++
 docs/source/tutorial.txt         |  208 ++++++
 rtree/__init__.py                |    5 +
 rtree/core.py                    |  488 ++++++++++++++
 rtree/index.py                   | 1295 ++++++++++++++++++++++++++++++++++++++
 setup.cfg                        |   10 +
 setup.py                         |   47 ++
 tests/BoundsCheck.txt            |   26 +
 tests/__init__.py                |    2 +
 tests/benchmarks.py              |  152 +++++
 tests/boxes_15x15.data           |  100 +++
 tests/boxes_3x3.data             |  100 +++
 tests/data.py                    |   39 ++
 tests/index.txt                  |  308 +++++++++
 tests/off/BoundsCheck.txt        |   26 +
 tests/off/index.txt              |  308 +++++++++
 tests/off/properties.txt         |  257 ++++++++
 tests/off/test_customStorage.txt |  157 +++++
 tests/off/test_misc.txt          |   42 ++
 tests/off/z_cleanup.txt          |   18 +
 tests/point_clusters.data        |  200 ++++++
 tests/properties.txt             |  257 ++++++++
 tests/rungrind.dist              |    3 +
 tests/test_customStorage.txt     |  157 +++++
 tests/test_doctests.py           |   46 ++
 tests/test_index.py              |   19 +
 tests/test_misc.txt              |   42 ++
 tests/z_cleanup.txt              |   18 +
 42 files changed, 5600 insertions(+)

diff --git a/DEPENDENCIES.txt b/DEPENDENCIES.txt
new file mode 100644
index 0000000..e07cc08
--- /dev/null
+++ b/DEPENDENCIES.txt
@@ -0,0 +1,4 @@
+- setuptools
+- libspatialindex C library 1.7.0+: 
+  http://libspatialindex.github.com
+
diff --git a/FAQ.txt b/FAQ.txt
new file mode 100644
index 0000000..e69de29
diff --git a/LICENSE.txt b/LICENSE.txt
new file mode 100644
index 0000000..5ab7695
--- /dev/null
+++ b/LICENSE.txt
@@ -0,0 +1,504 @@
+		  GNU LESSER GENERAL PUBLIC LICENSE
+		       Version 2.1, February 1999
+
+ Copyright (C) 1991, 1999 Free Software Foundation, Inc.
+ 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301  USA
+ Everyone is permitted to copy and distribute verbatim copies
+ of this license document, but changing it is not allowed.
+
+[This is the first released version of the Lesser GPL.  It also counts
+ as the successor of the GNU Library Public License, version 2, hence
+ the version number 2.1.]
+
+			    Preamble
+
+  The licenses for most software are designed to take away your
+freedom to share and change it.  By contrast, the GNU General Public
+Licenses are intended to guarantee your freedom to share and change
+free software--to make sure the software is free for all its users.
+
+  This license, the Lesser General Public License, applies to some
+specially designated software packages--typically libraries--of the
+Free Software Foundation and other authors who decide to use it.  You
+can use it too, but we suggest you first think carefully about whether
+this license or the ordinary General Public License is the better
+strategy to use in any particular case, based on the explanations below.
+
+  When we speak of free software, we are referring to freedom of use,
+not price.  Our General Public Licenses are designed to make sure that
+you have the freedom to distribute copies of free software (and charge
+for this service if you wish); that you receive source code or can get
+it if you want it; that you can change the software and use pieces of
+it in new free programs; and that you are informed that you can do
+these things.
+
+  To protect your rights, we need to make restrictions that forbid
+distributors to deny you these rights or to ask you to surrender these
+rights.  These restrictions translate to certain responsibilities for
+you if you distribute copies of the library or if you modify it.
+
+  For example, if you distribute copies of the library, whether gratis
+or for a fee, you must give the recipients all the rights that we gave
+you.  You must make sure that they, too, receive or can get the source
+code.  If you link other code with the library, you must provide
+complete object files to the recipients, so that they can relink them
+with the library after making changes to the library and recompiling
+it.  And you must show them these terms so they know their rights.
+
+  We protect your rights with a two-step method: (1) we copyright the
+library, and (2) we offer you this license, which gives you legal
+permission to copy, distribute and/or modify the library.
+
+  To protect each distributor, we want to make it very clear that
+there is no warranty for the free library.  Also, if the library is
+modified by someone else and passed on, the recipients should know
+that what they have is not the original version, so that the original
+author's reputation will not be affected by problems that might be
+introduced by others.
+

+  Finally, software patents pose a constant threat to the existence of
+any free program.  We wish to make sure that a company cannot
+effectively restrict the users of a free program by obtaining a
+restrictive license from a patent holder.  Therefore, we insist that
+any patent license obtained for a version of the library must be
+consistent with the full freedom of use specified in this license.
+
+  Most GNU software, including some libraries, is covered by the
+ordinary GNU General Public License.  This license, the GNU Lesser
+General Public License, applies to certain designated libraries, and
+is quite different from the ordinary General Public License.  We use
+this license for certain libraries in order to permit linking those
+libraries into non-free programs.
+
+  When a program is linked with a library, whether statically or using
+a shared library, the combination of the two is legally speaking a
+combined work, a derivative of the original library.  The ordinary
+General Public License therefore permits such linking only if the
+entire combination fits its criteria of freedom.  The Lesser General
+Public License permits more lax criteria for linking other code with
+the library.
+
+  We call this license the "Lesser" General Public License because it
+does Less to protect the user's freedom than the ordinary General
+Public License.  It also provides other free software developers Less
+of an advantage over competing non-free programs.  These disadvantages
+are the reason we use the ordinary General Public License for many
+libraries.  However, the Lesser license provides advantages in certain
+special circumstances.
+
+  For example, on rare occasions, there may be a special need to
+encourage the widest possible use of a certain library, so that it becomes
+a de-facto standard.  To achieve this, non-free programs must be
+allowed to use the library.  A more frequent case is that a free
+library does the same job as widely used non-free libraries.  In this
+case, there is little to gain by limiting the free library to free
+software only, so we use the Lesser General Public License.
+
+  In other cases, permission to use a particular library in non-free
+programs enables a greater number of people to use a large body of
+free software.  For example, permission to use the GNU C Library in
+non-free programs enables many more people to use the whole GNU
+operating system, as well as its variant, the GNU/Linux operating
+system.
+
+  Although the Lesser General Public License is Less protective of the
+users' freedom, it does ensure that the user of a program that is
+linked with the Library has the freedom and the wherewithal to run
+that program using a modified version of the Library.
+
+  The precise terms and conditions for copying, distribution and
+modification follow.  Pay close attention to the difference between a
+"work based on the library" and a "work that uses the library".  The
+former contains code derived from the library, whereas the latter must
+be combined with the library in order to run.
+

+		  GNU LESSER GENERAL PUBLIC LICENSE
+   TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION
+
+  0. This License Agreement applies to any software library or other
+program which contains a notice placed by the copyright holder or
+other authorized party saying it may be distributed under the terms of
+this Lesser General Public License (also called "this License").
+Each licensee is addressed as "you".
+
+  A "library" means a collection of software functions and/or data
+prepared so as to be conveniently linked with application programs
+(which use some of those functions and data) to form executables.
+
+  The "Library", below, refers to any such software library or work
+which has been distributed under these terms.  A "work based on the
+Library" means either the Library or any derivative work under
+copyright law: that is to say, a work containing the Library or a
+portion of it, either verbatim or with modifications and/or translated
+straightforwardly into another language.  (Hereinafter, translation is
+included without limitation in the term "modification".)
+
+  "Source code" for a work means the preferred form of the work for
+making modifications to it.  For a library, complete source code means
+all the source code for all modules it contains, plus any associated
+interface definition files, plus the scripts used to control compilation
+and installation of the library.
+
+  Activities other than copying, distribution and modification are not
+covered by this License; they are outside its scope.  The act of
+running a program using the Library is not restricted, and output from
+such a program is covered only if its contents constitute a work based
+on the Library (independent of the use of the Library in a tool for
+writing it).  Whether that is true depends on what the Library does
+and what the program that uses the Library does.
+  
+  1. You may copy and distribute verbatim copies of the Library's
+complete source code as you receive it, in any medium, provided that
+you conspicuously and appropriately publish on each copy an
+appropriate copyright notice and disclaimer of warranty; keep intact
+all the notices that refer to this License and to the absence of any
+warranty; and distribute a copy of this License along with the
+Library.
+
+  You may charge a fee for the physical act of transferring a copy,
+and you may at your option offer warranty protection in exchange for a
+fee.
+

+  2. You may modify your copy or copies of the Library or any portion
+of it, thus forming a work based on the Library, and copy and
+distribute such modifications or work under the terms of Section 1
+above, provided that you also meet all of these conditions:
+
+    a) The modified work must itself be a software library.
+
+    b) You must cause the files modified to carry prominent notices
+    stating that you changed the files and the date of any change.
+
+    c) You must cause the whole of the work to be licensed at no
+    charge to all third parties under the terms of this License.
+
+    d) If a facility in the modified Library refers to a function or a
+    table of data to be supplied by an application program that uses
+    the facility, other than as an argument passed when the facility
+    is invoked, then you must make a good faith effort to ensure that,
+    in the event an application does not supply such function or
+    table, the facility still operates, and performs whatever part of
+    its purpose remains meaningful.
+
+    (For example, a function in a library to compute square roots has
+    a purpose that is entirely well-defined independent of the
+    application.  Therefore, Subsection 2d requires that any
+    application-supplied function or table used by this function must
+    be optional: if the application does not supply it, the square
+    root function must still compute square roots.)
+
+These requirements apply to the modified work as a whole.  If
+identifiable sections of that work are not derived from the Library,
+and can be reasonably considered independent and separate works in
+themselves, then this License, and its terms, do not apply to those
+sections when you distribute them as separate works.  But when you
+distribute the same sections as part of a whole which is a work based
+on the Library, the distribution of the whole must be on the terms of
+this License, whose permissions for other licensees extend to the
+entire whole, and thus to each and every part regardless of who wrote
+it.
+
+Thus, it is not the intent of this section to claim rights or contest
+your rights to work written entirely by you; rather, the intent is to
+exercise the right to control the distribution of derivative or
+collective works based on the Library.
+
+In addition, mere aggregation of another work not based on the Library
+with the Library (or with a work based on the Library) on a volume of
+a storage or distribution medium does not bring the other work under
+the scope of this License.
+
+  3. You may opt to apply the terms of the ordinary GNU General Public
+License instead of this License to a given copy of the Library.  To do
+this, you must alter all the notices that refer to this License, so
+that they refer to the ordinary GNU General Public License, version 2,
+instead of to this License.  (If a newer version than version 2 of the
+ordinary GNU General Public License has appeared, then you can specify
+that version instead if you wish.)  Do not make any other change in
+these notices.
+

+  Once this change is made in a given copy, it is irreversible for
+that copy, so the ordinary GNU General Public License applies to all
+subsequent copies and derivative works made from that copy.
+
+  This option is useful when you wish to copy part of the code of
+the Library into a program that is not a library.
+
+  4. You may copy and distribute the Library (or a portion or
+derivative of it, under Section 2) in object code or executable form
+under the terms of Sections 1 and 2 above provided that you accompany
+it with the complete corresponding machine-readable source code, which
+must be distributed under the terms of Sections 1 and 2 above on a
+medium customarily used for software interchange.
+
+  If distribution of object code is made by offering access to copy
+from a designated place, then offering equivalent access to copy the
+source code from the same place satisfies the requirement to
+distribute the source code, even though third parties are not
+compelled to copy the source along with the object code.
+
+  5. A program that contains no derivative of any portion of the
+Library, but is designed to work with the Library by being compiled or
+linked with it, is called a "work that uses the Library".  Such a
+work, in isolation, is not a derivative work of the Library, and
+therefore falls outside the scope of this License.
+
+  However, linking a "work that uses the Library" with the Library
+creates an executable that is a derivative of the Library (because it
+contains portions of the Library), rather than a "work that uses the
+library".  The executable is therefore covered by this License.
+Section 6 states terms for distribution of such executables.
+
+  When a "work that uses the Library" uses material from a header file
+that is part of the Library, the object code for the work may be a
+derivative work of the Library even though the source code is not.
+Whether this is true is especially significant if the work can be
+linked without the Library, or if the work is itself a library.  The
+threshold for this to be true is not precisely defined by law.
+
+  If such an object file uses only numerical parameters, data
+structure layouts and accessors, and small macros and small inline
+functions (ten lines or less in length), then the use of the object
+file is unrestricted, regardless of whether it is legally a derivative
+work.  (Executables containing this object code plus portions of the
+Library will still fall under Section 6.)
+
+  Otherwise, if the work is a derivative of the Library, you may
+distribute the object code for the work under the terms of Section 6.
+Any executables containing that work also fall under Section 6,
+whether or not they are linked directly with the Library itself.
+

+  6. As an exception to the Sections above, you may also combine or
+link a "work that uses the Library" with the Library to produce a
+work containing portions of the Library, and distribute that work
+under terms of your choice, provided that the terms permit
+modification of the work for the customer's own use and reverse
+engineering for debugging such modifications.
+
+  You must give prominent notice with each copy of the work that the
+Library is used in it and that the Library and its use are covered by
+this License.  You must supply a copy of this License.  If the work
+during execution displays copyright notices, you must include the
+copyright notice for the Library among them, as well as a reference
+directing the user to the copy of this License.  Also, you must do one
+of these things:
+
+    a) Accompany the work with the complete corresponding
+    machine-readable source code for the Library including whatever
+    changes were used in the work (which must be distributed under
+    Sections 1 and 2 above); and, if the work is an executable linked
+    with the Library, with the complete machine-readable "work that
+    uses the Library", as object code and/or source code, so that the
+    user can modify the Library and then relink to produce a modified
+    executable containing the modified Library.  (It is understood
+    that the user who changes the contents of definitions files in the
+    Library will not necessarily be able to recompile the application
+    to use the modified definitions.)
+
+    b) Use a suitable shared library mechanism for linking with the
+    Library.  A suitable mechanism is one that (1) uses at run time a
+    copy of the library already present on the user's computer system,
+    rather than copying library functions into the executable, and (2)
+    will operate properly with a modified version of the library, if
+    the user installs one, as long as the modified version is
+    interface-compatible with the version that the work was made with.
+
+    c) Accompany the work with a written offer, valid for at
+    least three years, to give the same user the materials
+    specified in Subsection 6a, above, for a charge no more
+    than the cost of performing this distribution.
+
+    d) If distribution of the work is made by offering access to copy
+    from a designated place, offer equivalent access to copy the above
+    specified materials from the same place.
+
+    e) Verify that the user has already received a copy of these
+    materials or that you have already sent this user a copy.
+
+  For an executable, the required form of the "work that uses the
+Library" must include any data and utility programs needed for
+reproducing the executable from it.  However, as a special exception,
+the materials to be distributed need not include anything that is
+normally distributed (in either source or binary form) with the major
+components (compiler, kernel, and so on) of the operating system on
+which the executable runs, unless that component itself accompanies
+the executable.
+
+  It may happen that this requirement contradicts the license
+restrictions of other proprietary libraries that do not normally
+accompany the operating system.  Such a contradiction means you cannot
+use both them and the Library together in an executable that you
+distribute.
+

+  7. You may place library facilities that are a work based on the
+Library side-by-side in a single library together with other library
+facilities not covered by this License, and distribute such a combined
+library, provided that the separate distribution of the work based on
+the Library and of the other library facilities is otherwise
+permitted, and provided that you do these two things:
+
+    a) Accompany the combined library with a copy of the same work
+    based on the Library, uncombined with any other library
+    facilities.  This must be distributed under the terms of the
+    Sections above.
+
+    b) Give prominent notice with the combined library of the fact
+    that part of it is a work based on the Library, and explaining
+    where to find the accompanying uncombined form of the same work.
+
+  8. You may not copy, modify, sublicense, link with, or distribute
+the Library except as expressly provided under this License.  Any
+attempt otherwise to copy, modify, sublicense, link with, or
+distribute the Library is void, and will automatically terminate your
+rights under this License.  However, parties who have received copies,
+or rights, from you under this License will not have their licenses
+terminated so long as such parties remain in full compliance.
+
+  9. You are not required to accept this License, since you have not
+signed it.  However, nothing else grants you permission to modify or
+distribute the Library or its derivative works.  These actions are
+prohibited by law if you do not accept this License.  Therefore, by
+modifying or distributing the Library (or any work based on the
+Library), you indicate your acceptance of this License to do so, and
+all its terms and conditions for copying, distributing or modifying
+the Library or works based on it.
+
+  10. Each time you redistribute the Library (or any work based on the
+Library), the recipient automatically receives a license from the
+original licensor to copy, distribute, link with or modify the Library
+subject to these terms and conditions.  You may not impose any further
+restrictions on the recipients' exercise of the rights granted herein.
+You are not responsible for enforcing compliance by third parties with
+this License.
+

+  11. If, as a consequence of a court judgment or allegation of patent
+infringement or for any other reason (not limited to patent issues),
+conditions are imposed on you (whether by court order, agreement or
+otherwise) that contradict the conditions of this License, they do not
+excuse you from the conditions of this License.  If you cannot
+distribute so as to satisfy simultaneously your obligations under this
+License and any other pertinent obligations, then as a consequence you
+may not distribute the Library at all.  For example, if a patent
+license would not permit royalty-free redistribution of the Library by
+all those who receive copies directly or indirectly through you, then
+the only way you could satisfy both it and this License would be to
+refrain entirely from distribution of the Library.
+
+If any portion of this section is held invalid or unenforceable under any
+particular circumstance, the balance of the section is intended to apply,
+and the section as a whole is intended to apply in other circumstances.
+
+It is not the purpose of this section to induce you to infringe any
+patents or other property right claims or to contest validity of any
+such claims; this section has the sole purpose of protecting the
+integrity of the free software distribution system which is
+implemented by public license practices.  Many people have made
+generous contributions to the wide range of software distributed
+through that system in reliance on consistent application of that
+system; it is up to the author/donor to decide if he or she is willing
+to distribute software through any other system and a licensee cannot
+impose that choice.
+
+This section is intended to make thoroughly clear what is believed to
+be a consequence of the rest of this License.
+
+  12. If the distribution and/or use of the Library is restricted in
+certain countries either by patents or by copyrighted interfaces, the
+original copyright holder who places the Library under this License may add
+an explicit geographical distribution limitation excluding those countries,
+so that distribution is permitted only in or among countries not thus
+excluded.  In such case, this License incorporates the limitation as if
+written in the body of this License.
+
+  13. The Free Software Foundation may publish revised and/or new
+versions of the Lesser General Public License from time to time.
+Such new versions will be similar in spirit to the present version,
+but may differ in detail to address new problems or concerns.
+
+Each version is given a distinguishing version number.  If the Library
+specifies a version number of this License which applies to it and
+"any later version", you have the option of following the terms and
+conditions either of that version or of any later version published by
+the Free Software Foundation.  If the Library does not specify a
+license version number, you may choose any version ever published by
+the Free Software Foundation.
+

+  14. If you wish to incorporate parts of the Library into other free
+programs whose distribution conditions are incompatible with these,
+write to the author to ask for permission.  For software which is
+copyrighted by the Free Software Foundation, write to the Free
+Software Foundation; we sometimes make exceptions for this.  Our
+decision will be guided by the two goals of preserving the free status
+of all derivatives of our free software and of promoting the sharing
+and reuse of software generally.
+
+			    NO WARRANTY
+
+  15. BECAUSE THE LIBRARY IS LICENSED FREE OF CHARGE, THERE IS NO
+WARRANTY FOR THE LIBRARY, TO THE EXTENT PERMITTED BY APPLICABLE LAW.
+EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR
+OTHER PARTIES PROVIDE THE LIBRARY "AS IS" WITHOUT WARRANTY OF ANY
+KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE
+IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+PURPOSE.  THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE
+LIBRARY IS WITH YOU.  SHOULD THE LIBRARY PROVE DEFECTIVE, YOU ASSUME
+THE COST OF ALL NECESSARY SERVICING, REPAIR OR CORRECTION.
+
+  16. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN
+WRITING WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY
+AND/OR REDISTRIBUTE THE LIBRARY AS PERMITTED ABOVE, BE LIABLE TO YOU
+FOR DAMAGES, INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR
+CONSEQUENTIAL DAMAGES ARISING OUT OF THE USE OR INABILITY TO USE THE
+LIBRARY (INCLUDING BUT NOT LIMITED TO LOSS OF DATA OR DATA BEING
+RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD PARTIES OR A
+FAILURE OF THE LIBRARY TO OPERATE WITH ANY OTHER SOFTWARE), EVEN IF
+SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH
+DAMAGES.
+
+		     END OF TERMS AND CONDITIONS
+

+           How to Apply These Terms to Your New Libraries
+
+  If you develop a new library, and you want it to be of the greatest
+possible use to the public, we recommend making it free software that
+everyone can redistribute and change.  You can do so by permitting
+redistribution under these terms (or, alternatively, under the terms of the
+ordinary General Public License).
+
+  To apply these terms, attach the following notices to the library.  It is
+safest to attach them to the start of each source file to most effectively
+convey the exclusion of warranty; and each file should have at least the
+"copyright" line and a pointer to where the full notice is found.
+
+    <one line to give the library's name and a brief idea of what it does.>
+    Copyright (C) <year>  <name of author>
+
+    This library is free software; you can redistribute it and/or
+    modify it under the terms of the GNU Lesser General Public
+    License as published by the Free Software Foundation; either
+    version 2.1 of the License, or (at your option) any later version.
+
+    This library is distributed in the hope that it will be useful,
+    but WITHOUT ANY WARRANTY; without even the implied warranty of
+    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+    Lesser General Public License for more details.
+
+    You should have received a copy of the GNU Lesser General Public
+    License along with this library; if not, write to the Free Software
+    Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301  USA
+
+Also add information on how to contact you by electronic and paper mail.
+
+You should also get your employer (if you work as a programmer) or your
+school, if any, to sign a "copyright disclaimer" for the library, if
+necessary.  Here is a sample; alter the names:
+
+  Yoyodyne, Inc., hereby disclaims all copyright interest in the
+  library `Frob' (a library for tweaking knobs) written by James Random Hacker.
+
+  <signature of Ty Coon>, 1 April 1990
+  Ty Coon, President of Vice
+
+That's all there is to it!
+
+
diff --git a/MANIFEST.in b/MANIFEST.in
new file mode 100644
index 0000000..6e71403
--- /dev/null
+++ b/MANIFEST.in
@@ -0,0 +1,7 @@
+include README.txt
+include MANIFEST.in
+include DEPENDENCIES.txt
+include FAQ.txt
+include LICENSE.txt
+recursive-include tests *
+recursive-include docs *
diff --git a/PKG-INFO b/PKG-INFO
new file mode 100644
index 0000000..07f8041
--- /dev/null
+++ b/PKG-INFO
@@ -0,0 +1,62 @@
+Metadata-Version: 1.1
+Name: Rtree
+Version: 0.8.2
+Summary: R-Tree spatial index for Python GIS
+Home-page: http://toblerity.github.com/rtree/
+Author: Howard Butler
+Author-email: hobu at hobu.net
+License: LGPL
+Description: Rtree: Spatial indexing for Python
+        ------------------------------------------------------------------------------
+        
+        `Rtree`_ is a `ctypes`_ Python wrapper of `libspatialindex`_ that provides a 
+        number of advanced spatial indexing features for the spatially curious Python 
+        user.  These features include:
+        
+        * Nearest neighbor search
+        * Intersection search
+        * Multi-dimensional indexes
+        * Clustered indexes (store Python pickles directly with index entries)
+        * Bulk loading
+        * Deletion
+        * Disk serialization
+        * Custom storage implementation (to implement spatial indexing in ZODB, for example)
+        
+        Documentation and Website
+        ..............................................................................
+        
+        http://toblerity.github.com/rtree/
+        
+        Requirements
+        ..............................................................................
+        
+        * `libspatialindex`_ 1.7.0+.
+        
+        Download
+        ..............................................................................
+        
+        * PyPI http://pypi.python.org/pypi/Rtree/
+        * Windows binaries http://www.lfd.uci.edu/~gohlke/pythonlibs/#rtree
+        
+        Development
+        ..............................................................................
+        
+        * https://github.com/Toblerity/Rtree
+        
+        .. _`R-trees`: http://en.wikipedia.org/wiki/R-tree
+        .. _`ctypes`: http://docs.python.org/library/ctypes.html
+        .. _`libspatialindex`: http://libspatialindex.github.com
+        .. _`Rtree`: http://toblerity.github.com/rtree/
+        
+Keywords: gis spatial index r-tree
+Platform: UNKNOWN
+Classifier: Development Status :: 5 - Production/Stable
+Classifier: Intended Audience :: Developers
+Classifier: Intended Audience :: Science/Research
+Classifier: License :: OSI Approved :: GNU Library or Lesser General Public License (LGPL)
+Classifier: Operating System :: OS Independent
+Classifier: Programming Language :: C
+Classifier: Programming Language :: C++
+Classifier: Programming Language :: Python
+Classifier: Topic :: Scientific/Engineering :: GIS
+Classifier: Topic :: Database
diff --git a/docs/Makefile b/docs/Makefile
new file mode 100644
index 0000000..dec94d2
--- /dev/null
+++ b/docs/Makefile
@@ -0,0 +1,110 @@
+# Makefile for Sphinx documentation
+#
+
+# You can set these variables from the command line.
+SPHINXOPTS    =
+SPHINXBUILD   = sphinx-build
+PAPER         =
+
+# Internal variables.
+PAPEROPT_a4     = -D latex_paper_size=a4
+PAPEROPT_letter = -D latex_paper_size=letter
+ALLSPHINXOPTS   = -d build/doctrees $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) source
+
+.PHONY: help clean html dirhtml pickle json htmlhelp qthelp latex changes linkcheck doctest
+
+help:
+	@echo "Please use \`make <target>' where <target> is one of"
+	@echo "  html      to make standalone HTML files"
+	@echo "  dirhtml   to make HTML files named index.html in directories"
+	@echo "  pickle    to make pickle files"
+	@echo "  json      to make JSON files"
+	@echo "  htmlhelp  to make HTML files and a HTML help project"
+	@echo "  qthelp    to make HTML files and a qthelp project"
+	@echo "  devhelp   to make HTML files and a Devhelp project"
+	@echo "  latex     to make LaTeX files, you can set PAPER=a4 or PAPER=letter"
+	@echo "  latex_paper_size  to make LaTeX files and run them through pdflatex"
+	@echo "  changes   to make an overview of all changed/added/deprecated items"
+	@echo "  linkcheck to check all external links for integrity"
+	@echo "  doctest   to run all doctests embedded in the documentation (if enabled)"
+
+clean:
+	-rm -rf build/*
+
+html:
+	$(SPHINXBUILD) -b html $(ALLSPHINXOPTS) build/html
+	@echo
+	@echo "Build finished. The HTML pages are in build/html."
+
+dirhtml:
+	$(SPHINXBUILD) -b dirhtml $(ALLSPHINXOPTS) build/dirhtml
+	@echo
+	@echo "Build finished. The HTML pages are in build/dirhtml."
+
+pickle:
+	$(SPHINXBUILD) -b pickle $(ALLSPHINXOPTS) build/pickle
+	@echo
+	@echo "Build finished; now you can process the pickle files."
+
+json:
+	$(SPHINXBUILD) -b json $(ALLSPHINXOPTS) build/json
+	@echo
+	@echo "Build finished; now you can process the JSON files."
+
+htmlhelp:
+	$(SPHINXBUILD) -b htmlhelp $(ALLSPHINXOPTS) build/htmlhelp
+	@echo
+	@echo "Build finished; now you can run HTML Help Workshop with the" \
+	      ".hhp project file in build/htmlhelp."
+
+qthelp:
+	$(SPHINXBUILD) -b qthelp $(ALLSPHINXOPTS) build/qthelp
+	@echo
+	@echo "Build finished; now you can run "qcollectiongenerator" with the" \
+	      ".qhcp project file in build/qthelp, like this:"
+	@echo "# qcollectiongenerator build/qthelp/Rtree.qhcp"
+	@echo "To view the help file:"
+	@echo "# assistant -collectionFile build/qthelp/Rtree.qhc"
+
+devhelp:
+	$(SPHINXBUILD) -b devhelp $(ALLSPHINXOPTS) build/devhelp
+	@echo
+	@echo "Build finished."
+	@echo "To view the help file:"
+	@echo "# mkdir -p $$HOME/.local/share/devhelp/Rtree"
+	@echo "# ln -s build/devhelp $$HOME/.local/share/devhelp/Rtree"
+	@echo "# devhelp"
+
+latex:
+	$(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) build/latex
+	@echo
+	@echo "Build finished; the LaTeX files are in build/latex."
+	@echo "Run \`make all-pdf' or \`make all-ps' in that directory to" \
+	      "run these through (pdf)latex."
+
+latexpdf: latex
+	$(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) build/latex
+	@echo "Running LaTeX files through pdflatex..."
+	make -C build/latex all-pdf
+	@echo "pdflatex finished; the PDF files are in build/latex."
+
+changes:
+	$(SPHINXBUILD) -b changes $(ALLSPHINXOPTS) build/changes
+	@echo
+	@echo "The overview file is in build/changes."
+
+linkcheck:
+	$(SPHINXBUILD) -b linkcheck $(ALLSPHINXOPTS) build/linkcheck
+	@echo
+	@echo "Link check complete; look for any errors in the above output " \
+	      "or in build/linkcheck/output.txt."
+
+doctest:
+	$(SPHINXBUILD) -b doctest $(ALLSPHINXOPTS) build/doctest
+	@echo "Testing of doctests in the sources finished, look at the " \
+	      "results in build/doctest/output.txt."
+
+pdf:
+	$(SPHINXBUILD) -b pdf $(ALLSPHINXOPTS) build/pdf
+	@echo
+	@echo "Build finished; now you can process the PDF files."
diff --git a/docs/source/README.txt b/docs/source/README.txt
new file mode 100644
index 0000000..9e00ce7
--- /dev/null
+++ b/docs/source/README.txt
@@ -0,0 +1,41 @@
+Rtree: Spatial indexing for Python
+------------------------------------------------------------------------------
+
+`Rtree`_ is a `ctypes`_ Python wrapper of `libspatialindex`_ that provides a 
+number of advanced spatial indexing features for the spatially curious Python 
+user.  These features include:
+
+* Nearest neighbor search
+* Intersection search
+* Multi-dimensional indexes
+* Clustered indexes (store Python pickles directly with index entries)
+* Bulk loading
+* Deletion
+* Disk serialization
+* Custom storage implementation (to implement spatial indexing in ZODB, for example)
+
+Documentation and Website
+..............................................................................
+
+http://toblerity.github.com/rtree/
+
+Requirements
+..............................................................................
+
+* `libspatialindex`_ 1.7.0+.
+
+Download
+..............................................................................
+
+* PyPI http://pypi.python.org/pypi/Rtree/
+* Windows binaries http://www.lfd.uci.edu/~gohlke/pythonlibs/#rtree
+
+Development
+..............................................................................
+
+* https://github.com/Toblerity/Rtree
+
+.. _`R-trees`: http://en.wikipedia.org/wiki/R-tree
+.. _`ctypes`: http://docs.python.org/library/ctypes.html
+.. _`libspatialindex`: http://libspatialindex.github.com
+.. _`Rtree`: http://toblerity.github.com/rtree/
diff --git a/docs/source/changes.txt b/docs/source/changes.txt
new file mode 100644
index 0000000..52b61c7
--- /dev/null
+++ b/docs/source/changes.txt
@@ -0,0 +1,95 @@
+.. _changes:
+
+Changes
+..............................................................................
+
+0.8: 2014-07-17
+===============
+
+- Support for Python 3 added.
+
+0.7.0: 2011-12-29
+=================
+
+- 0.7.0 relies on libspatialindex 1.7.1+.
+- int64_t's should be used for IDs instead of uint64_t (requires libspatialindex 1.7.1 C API changes)
+- Fix __version__
+- More documentation at http://toblerity.github.com/rtree/
+- Class documentation at http://toblerity.github.com/rtree/class.html
+- Tweaks for PyPy compatibility. Still not compatible yet, however.
+- Custom storage support by Mattias (requires libspatialindex 1.7.1)
+
+0.6.0: 2010-04-13
+=================
+
+- 0.6.0 relies on libspatialindex 1.5.0+.
+- :py:meth:`~rtree.index.Index.intersection` and :py:meth:`~rtree.index.Index.nearest` methods return iterators over results instead of
+  lists.
+- Number of results for :py:meth:`~rtree.index.Index.nearest` defaults to 1.
+- libsidx C library of 0.5.0 removed and included in libspatialindex
+- objects="raw" in :py:meth:`~rtree.index.Index.intersection` to return the object sent in (for speed).
+- :py:meth:`~rtree.index.Index.count` method to return the intersection count without the overhead 
+  of returning a list (thanks Leonard Norrgård).
+- Improved bulk loading performance
+- Supposedly no memory leaks :)
+- Many other performance tweaks (see docs).
+- Bulk loader supports interleaved coordinates
+- Leaf queries.  You can return the box and ids of the leaf nodes of the index.  
+  Useful for visualization, etc.
+- Many more docstrings, sphinx docs, etc
+
+
+0.5.0: 2009-08-XX
+=================
+
+0.5.0 was a complete refactoring to use libsidx - a C API for libspatialindex.
+The code is now ctypes over libsidx, and a number of new features are now
+available as a result of this refactoring.
+
+* ability to store pickles within the index (clustered index)
+* ability to use custom extension names for disk-based indexes
+* ability to modify many index parameters at instantiation time
+* storage of point data reduced by a factor of 4
+* bulk loading of indexes at instantiation time
+* ability to quickly return the bounds of the entire index
+* ability to return the bounds of index entries
+* much better windows support 
+* libspatialindex 1.4.0 required.
+  
+0.4.3: 2009-06-05
+=================
+- Fix reference counting leak #181
+
+0.4.2: 2009-05-25
+=================
+- Windows support
+
+0.4.1: 2008-03-24
+=================
+
+- Eliminate uncounted references in add, delete, nearestNeighbor (#157).
+
+0.4: 2008-01-24
+===============
+
+- Testing improvements.
+- Switch dependency to the single consolidated spatialindex library (1.3).
+
+0.3: 26 November 2007
+=====================
+- Change to Python long integer identifiers (#126).
+- Allow deletion of objects from indexes.
+- Reraise index query errors as Python exceptions.
+- Improved persistence.
+
+0.2: 
+==================
+- Link spatialindex system library.
+
+0.1: 13 April 2007
+==================
+- Add disk storage option for indexes (#320).
+- Change license to LGPL.
+- Moved from Pleiades to GIS-Python repo.
+- Initial release.
+
diff --git a/docs/source/class.txt b/docs/source/class.txt
new file mode 100644
index 0000000..f024e49
--- /dev/null
+++ b/docs/source/class.txt
@@ -0,0 +1,13 @@
+.. _class:
+
+Class Documentation
+------------------------------------------------------------------------------
+
+.. autoclass:: rtree.index.Index
+    :members: __init__, insert, intersection, nearest, delete, bounds, count, close, dumps, loads, interleaved
+    
+.. autoclass:: rtree.index.Property
+    :members: 
+
+.. autoclass:: rtree.index.Item
+    :members:  __init__, bbox, object
\ No newline at end of file
diff --git a/docs/source/conf.py b/docs/source/conf.py
new file mode 100644
index 0000000..3684034
--- /dev/null
+++ b/docs/source/conf.py
@@ -0,0 +1,217 @@
+# -*- coding: utf-8 -*-
+#
+# Rtree documentation build configuration file, created by
+# sphinx-quickstart on Tue Aug 18 13:21:07 2009.
+#
+# This file is execfile()d with the current directory set to its containing dir.
+#
+# Note that not all possible configuration values are present in this
+# autogenerated file.
+#
+# All configuration values have a default; values that are commented out
+# serve to show the default.
+
+import sys, os
+sys.path.append('../../')
+
+import rtree
+
+# If extensions (or modules to document with autodoc) are in another directory,
+# add these directories to sys.path here. If the directory is relative to the
+# documentation root, use os.path.abspath to make it absolute, like shown here.
+#sys.path.append(os.path.abspath('.'))
+
+# -- General configuration -----------------------------------------------------
+
+# Add any Sphinx extension module names here, as strings. They can be extensions
+# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
+extensions = ['sphinx.ext.autodoc', 'sphinx.ext.doctest', 'sphinx.ext.intersphinx', 'sphinx.ext.todo', 'sphinx.ext.coverage', 'sphinx.ext.ifconfig']
+
+# Add any paths that contain templates here, relative to this directory.
+templates_path = ['_templates']
+
+# The suffix of source filenames.
+source_suffix = '.txt'
+
+# The encoding of source files.
+#source_encoding = 'utf-8-sig'
+
+# The master toctree document.
+master_doc = 'index'
+
+# General information about the project.
+project = u'Rtree'
+copyright = u'2011, Howard Butler, Brent Pedersen, Sean Gilles, and others.'
+
+# The version info for the project you're documenting, acts as replacement for
+# |version| and |release|, also used in various other places throughout the
+# built documents.
+#
+# The short X.Y version.
+version = rtree.__version__
+# The full version, including alpha/beta/rc tags.
+release = rtree.__version__
+
+# The language for content autogenerated by Sphinx. Refer to documentation
+# for a list of supported languages.
+#language = None
+
+# There are two options for replacing |today|: either, you set today to some
+# non-false value, then it is used:
+#today = ''
+# Else, today_fmt is used as the format for a strftime call.
+#today_fmt = '%B %d, %Y'
+
+# List of documents that shouldn't be included in the build.
+#unused_docs = []
+
+# List of directories, relative to source directory, that shouldn't be searched
+# for source files.
+exclude_trees = []
+
+# The reST default role (used for this markup: `text`) to use for all documents.
+#default_role = None
+
+# If true, '()' will be appended to :func: etc. cross-reference text.
+#add_function_parentheses = True
+
+# If true, the current module name will be prepended to all description
+# unit titles (such as .. function::).
+#add_module_names = True
+
+# If true, sectionauthor and moduleauthor directives will be shown in the
+# output. They are ignored by default.
+#show_authors = False
+
+# The name of the Pygments (syntax highlighting) style to use.
+pygments_style = 'sphinx'
+
+# A list of ignored prefixes for module index sorting.
+#modindex_common_prefix = []
+
+
+# -- Options for HTML output ---------------------------------------------------
+
+# The theme to use for HTML and HTML Help pages.  Major themes that come with
+# Sphinx are currently 'default' and 'sphinxdoc'.
+html_theme = 'nature'
+
+# Theme options are theme-specific and customize the look and feel of a theme
+# further.  For a list of options available for each theme, see the
+# documentation.
+#html_theme_options = {}
+
+# Add any paths that contain custom themes here, relative to this directory.
+#html_theme_path = []
+
+# The name for this set of Sphinx documents.  If None, it defaults to
+# "<project> v<release> documentation".
+#html_title = None
+
+# A shorter title for the navigation bar.  Default is the same as html_title.
+#html_short_title = None
+
+# The name of an image file (relative to this directory) to place at the top
+# of the sidebar.
+#html_logo = None
+
+# The name of an image file (within the static path) to use as favicon of the
+# docs.  This file should be a Windows icon file (.ico) being 16x16 or 32x32
+# pixels large.
+#html_favicon = None
+
+# Add any paths that contain custom static files (such as style sheets) here,
+# relative to this directory. They are copied after the builtin static files,
+# so a file named "default.css" will overwrite the builtin "default.css".
+html_static_path = ['_static']
+
+# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
+# using the given strftime format.
+#html_last_updated_fmt = '%b %d, %Y'
+
+# If true, SmartyPants will be used to convert quotes and dashes to
+# typographically correct entities.
+#html_use_smartypants = True
+
+# Custom sidebar templates, maps document names to template names.
+#html_sidebars = {}
+
+# Additional templates that should be rendered to pages, maps page names to
+# template names.
+#html_additional_pages = {}
+
+# If false, no module index is generated.
+#html_use_modindex = True
+
+# If false, no index is generated.
+#html_use_index = True
+
+# If true, the index is split into individual pages for each letter.
+#html_split_index = False
+
+# If true, links to the reST sources are added to the pages.
+#html_show_sourcelink = True
+
+# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
+#html_show_sphinx = True
+
+# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
+#html_show_copyright = True
+
+# If true, an OpenSearch description file will be output, and all pages will
+# contain a <link> tag referring to it.  The value of this option must be the
+# base URL from which the finished HTML is served.
+#html_use_opensearch = ''
+
+# If nonempty, this is the file name suffix for HTML files (e.g. ".xhtml").
+#html_file_suffix = ''
+
+# Output file base name for HTML help builder.
+htmlhelp_basename = 'Rtreedoc'
+
+
+# -- Options for LaTeX output --------------------------------------------------
+
+# The paper size ('letter' or 'a4').
+#latex_paper_size = 'letter'
+
+# The font size ('10pt', '11pt' or '12pt').
+#latex_font_size = '10pt'
+
+# Grouping the document tree into LaTeX files. List of tuples
+# (source start file, target name, title, author, documentclass [howto/manual]).
+latex_documents = [
+  ('index', 'Rtree.tex', u'Rtree Documentation',
+   u'Sean Gilles', 'manual'),
+]
+
+# The name of an image file (relative to this directory) to place at the top of
+# the title page.
+#latex_logo = None
+
+# For "manual" documents, if this is true, then toplevel headings are parts,
+# not chapters.
+#latex_use_parts = False
+
+# Additional stuff for the LaTeX preamble.
+#latex_preamble = ''
+
+# Documents to append as an appendix to all manuals.
+#latex_appendices = []
+
+# If false, no module index is generated.
+#latex_use_modindex = True
+
+pdf_documents = [
+    ('index', u'Rtree', u'Rtree Documentation', u'The Rtree Team'),
+]
+
+# A comma-separated list of custom stylesheets. Example:
+pdf_language = "en_US"
+pdf_fit_mode = "overflow"
+
+# Example configuration for intersphinx: refer to the Python standard library.
+intersphinx_mapping = {'http://docs.python.org/': None}
+
+# Example configuration for intersphinx: refer to the Python standard library.
+intersphinx_mapping = {'http://docs.python.org/': None}
diff --git a/docs/source/examples.txt b/docs/source/examples.txt
new file mode 100644
index 0000000..76ebd01
--- /dev/null
+++ b/docs/source/examples.txt
@@ -0,0 +1,5 @@
+.. _examples:
+
+
+.. include:: ../../tests/index.txt
+    
\ No newline at end of file
diff --git a/docs/source/history.txt b/docs/source/history.txt
new file mode 100644
index 0000000..5431550
--- /dev/null
+++ b/docs/source/history.txt
@@ -0,0 +1,47 @@
+.. _history:
+
+History of Rtree
+------------------------------------------------------------------------------
+
+`Rtree`_ was started by `Sean Gillies`_ as a port of the `libspatialindex`_
+linkages that `QGIS`_ maintained to provide on-the-fly indexing support for
+GUI operations. A notable feature of `R-trees`_ is the ability to insert data
+into the structure without the need for a global partitioning bounds, and this
+drove Sean's adoption of this code. `Howard Butler`_ later picked up `Rtree`_
+and added a number of features that `libspatialindex`_ provided including disk
+serialization and bulk loading by writing a C API for `libspatialindex`_ and
+re-writing `Rtree`_ as a `ctypes`_ wrapper to utilize this C API. `Brent
+Pedersen`_ came along and added features to support alternative coordinate
+ordering, augmentation of the pickle storage, and lots of documentation.
+Mattias (http://dr-code.org) added support for custom storage backends to
+support using `Rtree`_ as an indexing type in `ZODB`_.
+
+`Rtree`_ has gone through a number of iterations, and at
+0.5.0, it was completely refactored to use a new internal architecture (ctypes
++ a C API over `libspatialindex`_). This refactoring has resulted in a number
+of new features and much more flexibility. See :ref:`changes` for more detail.
+
+.. note::
+    A significant bug in the 1.6.1+ `libspatialindex`_ C API was found where
+    it was using unsigned integers for index entry IDs instead of signed
+    integers. Because `Rtree`_ appeared to be the only significant user of the 
+    C API at this time, it was corrected immediately.  You should update 
+    immediately and re-insert data into new indexes if this is an important 
+    consideration for your application.
+    
+Rtree 0.5.0 included a C library that is now the C API for libspatialindex and
+is part of that source tree. The code bases are independent from each other
+and can now evolve separately. Rtree is pure Python as of 0.6.0+.
+
+
+.. _`Sean Gillies`: http://sgillies.net/blog/
+.. _`Howard Butler`: http://hobu.biz
+.. _`Brent Pedersen`: http://hackmap.blogspot.com/
+.. _`QGIS`: http://qgis.org
+
+
+.. _`ZODB`: http://www.zodb.org/
+.. _`R-trees`: http://en.wikipedia.org/wiki/R-tree
+.. _`ctypes`: http://docs.python.org/library/ctypes.html
+.. _`libspatialindex`: http://libspatialindex.github.com
+.. _`Rtree`: http://rtree.github.com
\ No newline at end of file
diff --git a/docs/source/index.txt b/docs/source/index.txt
new file mode 100644
index 0000000..c34349d
--- /dev/null
+++ b/docs/source/index.txt
@@ -0,0 +1,41 @@
+.. _home:
+
+Rtree: Spatial indexing for Python
+------------------------------------------------------------------------------
+
+`Rtree`_ is a `ctypes`_ Python wrapper of `libspatialindex`_ that provides a 
+number of advanced spatial indexing features for the spatially curious Python 
+user.  These features include:
+
+* Nearest neighbor search
+* Intersection search
+* Multi-dimensional indexes
+* Clustered indexes (store Python pickles directly with index entries)
+* Bulk loading
+* Deletion
+* Disk serialization
+* Custom storage implementation (to implement spatial indexing in ZODB, for example)
+
+Documentation
+..............................................................................
+
+.. toctree::
+   :maxdepth: 2
+   
+   install
+   tutorial
+   Mailing List <http://lists.gispython.org/mailman/listinfo/community>
+   class
+   changes
+   performance
+   examples
+   history
+
+* :ref:`genindex`
+* :ref:`modindex`
+* :ref:`search`
+
+.. _`R-trees`: http://en.wikipedia.org/wiki/R-tree
+.. _`ctypes`: http://docs.python.org/library/ctypes.html
+.. _`libspatialindex`: http://libspatialindex.github.com
+.. _`Rtree`: http://toblerity.github.com/rtree/
diff --git a/docs/source/install.txt b/docs/source/install.txt
new file mode 100644
index 0000000..1dca3f2
--- /dev/null
+++ b/docs/source/install.txt
@@ -0,0 +1,46 @@
+.. _installation:
+
+Installation
+------------------------------------------------------------------------------
+
+\*nix 
+..............................................................................
+
+First, download and install version 1.7.0 of the `libspatialindex`_ library from:
+
+http://libspatialindex.github.com
+
+The library is a GNU-style build, so it is a matter of::
+
+  $ ./configure; make; make install
+
+You may need to run the ``ldconfig`` command after installing the library to 
+ensure that applications can find it at startup time.  
+
+At this point you can get Rtree 0.7.0 via easy_install::
+
+  $ easy_install Rtree
+
+or by running the local setup.py::
+
+  $ python setup.py install
+
+You can build and test in place like::
+
+  $ python setup.py test
+
+Windows 
+..............................................................................
+
+The Windows DLLs of `libspatialindex`_ are pre-compiled in 
+windows installers that are available from `PyPI`_.  Installation on Windows 
+is as easy as::
+
+  c:\python2x\scripts\easy_install.exe Rtree
+
+
+.. _`PyPI`: http://pypi.python.org/pypi/Rtree/
+.. _`Rtree`: http://pypi.python.org/pypi/Rtree/
+
+.. _`libspatialindex`: http://libspatialindex.github.com
+
diff --git a/docs/source/performance.txt b/docs/source/performance.txt
new file mode 100644
index 0000000..af97dbc
--- /dev/null
+++ b/docs/source/performance.txt
@@ -0,0 +1,78 @@
+.. _performance:
+
+Performance
+------------------------------------------------------------------------------
+
+See the `tests/benchmarks.py`_ file for a comparison of various query methods 
+and how much acceleration can be obtained from using Rtree.
+
+.. _tests/benchmarks.py: https://raw.github.com/Rtree/Rtree/master/tests/benchmarks.py
+
+There are a few simple things that will improve performance.
+
+Use stream loading
+..............................................................................
+ 
+This will substantially (orders of magnitude in many cases) improve
+performance over :py:meth:`~rtree.index.Index.insert` by allowing the data to
+be pre-sorted   
+
+:: 
+
+   >>> def generator_function():
+   ...    for i, obj in enumerate(somedata):
+   ...        yield (i, (obj.xmin, obj.ymin, obj.xmax, obj.ymax), obj)
+   >>> r = index.Index(generator_function())
+
+After bulk loading the index, you can then insert additional records into 
+the index using :py:meth:`~rtree.index.Index.insert`
+
+Override :py:data:`~rtree.index.Index.dumps` to use the highest pickle protocol
+...............................................................................
+
+::
+
+    >>> import cPickle, rtree
+    >>> class FastRtree(rtree.Rtree):
+    ...     def dumps(self, obj):
+    ...         return cPickle.dumps(obj, -1)
+    >>> r = FastRtree()
+
+
+Use objects='raw'
+...............................................................................
+
+In any :py:meth:`~rtree.index.Index.intersection` or 
+:py:meth:`~rtree.index.Index.nearest` or query, use objects='raw' keyword
+argument ::
+
+    >>> objs = r.intersection((xmin, ymin, xmax, ymax), objects="raw")
+
+
+Adjust index properties
+...............................................................................
+
+Adjust :py:class:`rtree.index.Property` appropriate to your index.
+
+   * Set your :py:data:`~rtree.index.Property.leaf_capacity` to a higher value
+     than the default 100. 1000+ is fine for the default pagesize of 4096 in
+     many cases.
+
+   * Increase the :py:data:`~rtree.index.Property.fill_factor` to something
+     near 0.9. Smaller fill factors mean more splitting, which means more
+     nodes. This may be bad or good depending on your usage.
+   
+Limit dimensionality to the amount you need
+...............................................................................
+
+Don't use more dimensions than you actually need. If you only need 2, only use
+two. Otherwise, you will waste lots of storage and add that many more floating
+point comparisons for each query, search, and insert operation of the index.
+
+Use the correct query method
+...............................................................................
+ 
+Use :py:meth:`~rtree.index.Index.count` if you only need a count and
+:py:meth:`~rtree.index.Index.intersection` if you only need the ids.
+Otherwise, lots of data may potentially be copied.
+
diff --git a/docs/source/tutorial.txt b/docs/source/tutorial.txt
new file mode 100644
index 0000000..a56ecdd
--- /dev/null
+++ b/docs/source/tutorial.txt
@@ -0,0 +1,208 @@
+.. _tutorial:
+
+Tutorial
+------------------------------------------------------------------------------
+
+This tutorial demonstrates how to take advantage of :ref:`Rtree <home>` for 
+querying data that have a spatial component that can be modeled as bounding 
+boxes.
+
+
+Creating an index
+..............................................................................
+
+The following section describes the basic instantiation and usage of 
+:ref:`Rtree <home>`.
+
+Import
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+After :ref:`installing <installation>` :ref:`Rtree <home>`, you should be able to 
+open up a Python prompt and issue the following::
+
+  >>> from rtree import index
+
+:py:mod:`rtree` is organized as a Python package with a couple of modules
+and two major classes - :py:class:`rtree.index.Index` and
+:py:class:`rtree.index.Property`. Users manipulate these classes to interact
+with the index.
+
+Construct an instance
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+After importing the index module, construct an index with the default 
+construction::
+
+  >>> idx = index.Index()
+
+.. note::
+
+    While the default construction is useful in many cases, if you want to 
+    manipulate how the index is constructed you will need pass in a 
+    :py:class:`rtree.index.Property` instance when creating the index. 
+
+Create a bounding box
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+After instantiating the index, create a bounding box that we can 
+insert into the index::
+
+  >>> left, bottom, right, top = (0.0, 0.0, 1.0, 1.0)
+
+.. note::
+
+    The coordinate ordering for all functions are sensitive the the index's
+    :py:attr:`~rtree.index.Index.interleaved` data member. If
+    :py:attr:`~rtree.index.Index.interleaved` is False, the coordinates must
+    be in the form [xmin, xmax, ymin, ymax, ..., ..., kmin, kmax]. If
+    :py:attr:`~rtree.index.Index.interleaved` is True, the coordinates must be
+    in the form [xmin, ymin, ..., kmin, xmax, ymax, ..., kmax].
+
+Insert records into the index
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+Insert an entry into the index::
+
+  >>> idx.insert(0, (left, bottom, right, top))
+
+.. note::
+
+    Entries that are inserted into the index are not unique in either the 
+    sense of the `id` or of the bounding box that is inserted with index 
+    entries. If you need to maintain uniqueness, you need to manage that before 
+    inserting entries into the Rtree.
+
+.. note::
+
+    Inserting a point, i.e. where left == right && top == bottom, will
+    essentially insert a single point entry into the index instead of copying
+    extra coordinates and inserting them. There is no shortcut to explicitly 
+    insert a single point, however.
+
+Query the index
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+There are three primary methods for querying the index.
+:py:meth:`rtree.index.Index.intersection` will return you index entries that
+*cross* or are *contained* within the given query window.
+:py:meth:`rtree.index.Index.intersection`
+
+Intersection
+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
+
+Given a query window, return ids that are contained within the window::
+
+  >>> list(idx.intersection((1.0, 1.0, 2.0, 2.0)))
+  [0]
+
+Given a query window that is beyond the bounds of data we have in the 
+index::
+
+  >>> list(idx.intersection((1.0000001, 1.0000001, 2.0, 2.0)))
+  []
+
+Nearest Neighbors
+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
+
+The following finds the 1 nearest item to the given bounds. If multiple items
+are of equal distance to the bounds, both are returned::
+  
+  >>> idx.insert(1, (left, bottom, right, top))
+  >>> list(idx.nearest((1.0000001, 1.0000001, 2.0, 2.0), 1))
+  [0, 1]
+
+
+.. _clustered:
+
+Using Rtree as a cheapo spatial database
+..............................................................................
+
+Rtree also supports inserting any object you can pickle into the index (called
+a clustered index in `libspatialindex`_ parlance). The following inserts the
+picklable object ``42`` into the index with the given id::
+
+  >>> index.insert(id=id, bounds=(left, bottom, right, top), obj=42)
+
+You can then return a list of objects by giving the ``objects=True`` flag
+to intersection::
+
+  >>> [n.object for n in idx.intersection((left, bottom, right, top), objects=True)]
+  [None, None, 42]
+
+.. warning::
+    `libspatialindex`_'s clustered indexes were not designed to be a database.
+    You get none of the data integrity protections that a database would
+    purport to offer, but this behavior of :ref:`Rtree <home>` can be useful
+    nonetheless. Consider yourself warned. Now go do cool things with it.
+
+Serializing your index to a file
+..............................................................................
+
+One of :ref:`Rtree <home>`'s most useful properties is the ability to 
+serialize Rtree indexes to disk. These include the clustered indexes 
+described :ref:`here <clustered>`::
+  
+  >>> file_idx = index.Rtree('rtree')
+  >>> file_idx.insert(1, (left, bottom, right, top))
+  >>> file_idx.insert(2, (left - 1.0, bottom - 1.0, right + 1.0, top + 1.0))
+  >>> [n for n in file_idx.intersection((left, bottom, right, top))]
+  [1, 2]
+
+.. note::
+
+    By default, if an index file with the given name `rtree` in the example
+    above already exists on the file system, it will be opened in append mode
+    and not be re-created. You can control this behavior with the
+    :py:attr:`rtree.index.Property.overwrite` property of the index property
+    that can be given to the :py:class:`rtree.index.Index` constructor.
+
+.. seealso::
+
+    :ref:`performance` describes some parameters you can tune to make
+    file-based indexes run a bit faster. The choices you make for the
+    parameters is entirely dependent on your usage.
+
+Modifying file names
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+Rtree uses the extensions `dat` and `idx` by default for the two index files
+that are created when serializing index data to disk. These file extensions
+are controllable using the :py:attr:`rtree.index.Property.dat_extension` and
+:py:attr:`rtree.index.Property.idx_extension` index properties.
+
+::
+
+    >>> p = rtree.index.Property()
+    >>> p.dat_extension = 'data'
+    >>> p.idx_extension = 'index'
+    >>> file_idx = index.Index('rtree', properties = p)
+
+3D indexes
+..............................................................................
+
+As of Rtree version 0.5.0, you can create 3D (actually kD) indexes. The
+following is a 3D index that is to be stored on disk. Persisted indexes are
+stored on disk using two files -- an index file (.idx) and a data (.dat) file.
+You can modify the extensions these files use by altering the properties of
+the index at instantiation time. The following creates a 3D index that is
+stored on disk as the files ``3d_index.data`` and ``3d_index.index``::
+
+  >>> from rtree import index
+  >>> p = index.Property()
+  >>> p.dimension = 3
+  >>> p.dat_extension = 'data'
+  >>> p.idx_extension = 'index'  
+  >>> idx3d = index.Index('3d_index',properties=p)
+  >>> idx3d.insert(1, (0, 0, 60, 60, 23.0, 42.0))
+  >>> idx3d.intersection( (-1, -1, 62, 62, 22, 43))
+  [1L]
+
+ZODB and Custom Storages
+..............................................................................
+
+https://mail.zope.org/pipermail/zodb-dev/2010-June/013491.html contains a custom 
+storage backend for `ZODB`_
+
+.. _ZODB: http://www.zodb.org/
+
+.. _`libspatialindex`: http://libspatialindex.github.com    
\ No newline at end of file
diff --git a/rtree/__init__.py b/rtree/__init__.py
new file mode 100644
index 0000000..c62fc81
--- /dev/null
+++ b/rtree/__init__.py
@@ -0,0 +1,5 @@
+from .index import Rtree
+
+from .core import rt
+
+__version__ = '0.8.2'
diff --git a/rtree/core.py b/rtree/core.py
new file mode 100644
index 0000000..38179e5
--- /dev/null
+++ b/rtree/core.py
@@ -0,0 +1,488 @@
+import atexit, os, re, sys
+import ctypes
+from ctypes.util import find_library
+
+import ctypes
+
+class RTreeError(Exception):
+    "RTree exception, indicates a RTree-related error."
+    pass
+
+def check_return(result, func, cargs):
+    "Error checking for Error calls"
+    if result != 0:
+        msg = 'LASError in "%s": %s' % (func.__name__, rt.Error_GetLastErrorMsg() )
+        rt.Error_Reset()
+        raise RTreeError(msg)
+    return True
+
+def check_void(result, func, cargs):
+    "Error checking for void* returns"
+    if not bool(result):
+        msg = 'Error in "%s": %s' % (func.__name__, rt.Error_GetLastErrorMsg() )
+        rt.Error_Reset()
+        raise RTreeError(msg)
+    return result
+
+def check_void_done(result, func, cargs):
+    "Error checking for void* returns that might be empty with no error"
+    if rt.Error_GetErrorCount():
+        msg = 'Error in "%s": %s' % (func.__name__, rt.Error_GetLastErrorMsg() )
+        rt.Error_Reset()
+        raise RTreeError(msg)
+        
+    return result
+
+def check_value(result, func, cargs):
+    "Error checking proper value returns"
+    count = rt.Error_GetErrorCount()
+    if count != 0:
+        msg = 'Error in "%s": %s' % (func.__name__, rt.Error_GetLastErrorMsg() )
+        rt.Error_Reset()
+        raise RTreeError(msg)
+    return result
+
+def check_value_free(result, func, cargs):
+    "Error checking proper value returns"
+    count = rt.Error_GetErrorCount()
+    if count != 0:
+        msg = 'Error in "%s": %s' % (func.__name__, rt.Error_GetLastErrorMsg() )
+        rt.Error_Reset()
+        raise RTreeError(msg)
+    return result
+
+def free_returned_char_p(result, func, cargs):
+    retvalue = ctypes.string_at(result)
+    p = ctypes.cast(result, ctypes.POINTER(ctypes.c_void_p))
+    rt.Index_Free(p)
+    return retvalue
+    
+def free_error_msg_ptr(result, func, cargs):
+    retvalue = ctypes.string_at(result)
+    p = ctypes.cast(result, ctypes.POINTER(ctypes.c_void_p))
+    rt.Index_Free(p)
+    return retvalue
+    
+
+if os.name == 'nt':
+
+    def _load_library(dllname, loadfunction, dllpaths=('', )):
+        """Load a DLL via ctypes load function. Return None on failure.
+
+        Try loading the DLL from the current package directory first,
+        then from the Windows DLL search path.
+
+        """
+        try:
+            dllpaths = (os.path.abspath(os.path.dirname(__file__)),
+                        ) + dllpaths
+        except NameError:
+            pass # no __file__ attribute on PyPy and some frozen distributions
+        for path in dllpaths:
+            if path:
+                # temporarily add the path to the PATH environment variable
+                # so Windows can find additional DLL dependencies.
+                try:
+                    oldenv = os.environ['PATH']
+                    os.environ['PATH'] = path + ';' + oldenv
+                except KeyError:
+                    oldenv = None
+            try:
+                return loadfunction(os.path.join(path, dllname))
+            except (WindowsError, OSError):
+                pass
+            finally:
+                if path and oldenv is not None:
+                    os.environ['PATH'] = oldenv
+        return None
+
+    rt = _load_library('spatialindex_c.dll', ctypes.cdll.LoadLibrary)
+    if not rt:
+        raise OSError("could not find or load spatialindex_c.dll")
+
+elif os.name == 'posix':
+    platform = os.uname()[0]
+    lib_name = find_library('spatialindex_c')
+    rt = ctypes.CDLL(lib_name)
+else:
+    raise RTreeError('Unsupported OS "%s"' % os.name)
+
+rt.Error_GetLastErrorNum.restype = ctypes.c_int
+
+rt.Error_GetLastErrorMsg.argtypes = []
+rt.Error_GetLastErrorMsg.restype = ctypes.POINTER(ctypes.c_char)
+rt.Error_GetLastErrorMsg.errcheck = free_error_msg_ptr
+
+rt.Error_GetLastErrorMethod.restype = ctypes.POINTER(ctypes.c_char)
+rt.Error_GetLastErrorMethod.errcheck = free_returned_char_p
+
+rt.Error_GetErrorCount.argtypes = []
+rt.Error_GetErrorCount.restype=ctypes.c_int
+
+rt.Error_Reset.argtypes = []
+rt.Error_Reset.restype = None
+
+rt.Index_Create.argtypes = [ctypes.c_void_p]
+rt.Index_Create.restype = ctypes.c_void_p
+rt.Index_Create.errcheck = check_void
+
+NEXTFUNC = ctypes.CFUNCTYPE(ctypes.c_int, 
+                            ctypes.POINTER(ctypes.c_int64),
+                            ctypes.POINTER(ctypes.POINTER(ctypes.c_double)),
+                            ctypes.POINTER(ctypes.POINTER(ctypes.c_double)),
+                            ctypes.POINTER(ctypes.c_uint32),
+                            ctypes.POINTER(ctypes.POINTER(ctypes.c_ubyte)),
+                            ctypes.POINTER(ctypes.c_size_t))
+
+rt.Index_CreateWithStream.argtypes = [ctypes.c_void_p, NEXTFUNC] 
+rt.Index_CreateWithStream.restype = ctypes.c_void_p
+rt.Index_CreateWithStream.errcheck = check_void
+
+rt.Index_Destroy.argtypes = [ctypes.c_void_p]
+rt.Index_Destroy.restype = None
+rt.Index_Destroy.errcheck = check_void_done
+
+rt.Index_GetProperties.argtypes = [ctypes.c_void_p]
+rt.Index_GetProperties.restype = ctypes.c_void_p
+rt.Index_GetProperties.errcheck = check_void
+
+rt.Index_DeleteData.argtypes = [ctypes.c_void_p, 
+                                ctypes.c_int64, 
+                                ctypes.POINTER(ctypes.c_double), 
+                                ctypes.POINTER(ctypes.c_double), 
+                                ctypes.c_uint32]
+rt.Index_DeleteData.restype = ctypes.c_int
+rt.Index_DeleteData.errcheck = check_return
+
+rt.Index_InsertData.argtypes = [ctypes.c_void_p, 
+                                ctypes.c_int64, 
+                                ctypes.POINTER(ctypes.c_double), 
+                                ctypes.POINTER(ctypes.c_double), 
+                                ctypes.c_uint32, 
+                                ctypes.POINTER(ctypes.c_ubyte), 
+                                ctypes.c_uint32]
+rt.Index_InsertData.restype = ctypes.c_int
+rt.Index_InsertData.errcheck = check_return
+
+rt.Index_GetBounds.argtypes = [ ctypes.c_void_p,
+                                ctypes.POINTER(ctypes.POINTER(ctypes.c_double)),
+                                ctypes.POINTER(ctypes.POINTER(ctypes.c_double)),
+                                ctypes.POINTER(ctypes.c_uint32)]
+rt.Index_GetBounds.restype = ctypes.c_int
+rt.Index_GetBounds.errcheck = check_value
+
+rt.Index_IsValid.argtypes = [ctypes.c_void_p]
+rt.Index_IsValid.restype = ctypes.c_int
+rt.Index_IsValid.errcheck = check_value
+
+rt.Index_Intersects_obj.argtypes = [ctypes.c_void_p,
+                                    ctypes.POINTER(ctypes.c_double), 
+                                    ctypes.POINTER(ctypes.c_double), 
+                                    ctypes.c_uint32, 
+                                    ctypes.POINTER(ctypes.POINTER(ctypes.c_void_p)),
+                                    ctypes.POINTER(ctypes.c_uint64)]
+rt.Index_Intersects_obj.restype = ctypes.c_int
+rt.Index_Intersects_obj.errcheck = check_return
+
+rt.Index_Intersects_id.argtypes = [ctypes.c_void_p,
+                                    ctypes.POINTER(ctypes.c_double), 
+                                    ctypes.POINTER(ctypes.c_double), 
+                                    ctypes.c_uint32, 
+                                    ctypes.POINTER(ctypes.POINTER(ctypes.c_int64)),
+                                    ctypes.POINTER(ctypes.c_uint64)]
+rt.Index_Intersects_id.restype = ctypes.c_int
+rt.Index_Intersects_id.errcheck = check_return
+
+rt.Index_Intersects_count.argtypes = [  ctypes.c_void_p,
+                                        ctypes.POINTER(ctypes.c_double),
+                                        ctypes.POINTER(ctypes.c_double),
+                                        ctypes.c_uint32,
+                                        ctypes.POINTER(ctypes.c_uint64)]
+
+rt.Index_NearestNeighbors_obj.argtypes = [  ctypes.c_void_p,
+                                            ctypes.POINTER(ctypes.c_double), 
+                                            ctypes.POINTER(ctypes.c_double), 
+                                            ctypes.c_uint32, 
+                                            ctypes.POINTER(ctypes.POINTER(ctypes.c_void_p)),
+                                            ctypes.POINTER(ctypes.c_uint64)]
+rt.Index_NearestNeighbors_obj.restype = ctypes.c_int
+rt.Index_NearestNeighbors_obj.errcheck = check_return
+
+rt.Index_NearestNeighbors_id.argtypes = [  ctypes.c_void_p,
+                                            ctypes.POINTER(ctypes.c_double), 
+                                            ctypes.POINTER(ctypes.c_double), 
+                                            ctypes.c_uint32, 
+                                            ctypes.POINTER(ctypes.POINTER(ctypes.c_int64)),
+                                            ctypes.POINTER(ctypes.c_uint64)]
+rt.Index_NearestNeighbors_id.restype = ctypes.c_int
+rt.Index_NearestNeighbors_id.errcheck = check_return
+
+rt.Index_GetLeaves.argtypes = [ ctypes.c_void_p,
+                                ctypes.POINTER(ctypes.c_uint32), 
+                                ctypes.POINTER(ctypes.POINTER(ctypes.c_uint32)), 
+                                ctypes.POINTER(ctypes.POINTER(ctypes.c_int64)), 
+                                ctypes.POINTER(ctypes.POINTER(ctypes.POINTER(ctypes.c_int64))),
+                                ctypes.POINTER(ctypes.POINTER(ctypes.POINTER(ctypes.c_double))),
+                                ctypes.POINTER(ctypes.POINTER(ctypes.POINTER(ctypes.c_double))),
+                                ctypes.POINTER(ctypes.c_uint32)]
+rt.Index_GetLeaves.restype = ctypes.c_int
+rt.Index_GetLeaves.errcheck = check_return
+
+rt.Index_DestroyObjResults.argtypes = [ctypes.POINTER(ctypes.POINTER(ctypes.c_void_p)), ctypes.c_uint32]
+rt.Index_DestroyObjResults.restype = None
+rt.Index_DestroyObjResults.errcheck = check_void_done
+
+rt.Index_ClearBuffer.argtypes = [ctypes.c_void_p]
+rt.Index_ClearBuffer.restype = None
+rt.Index_ClearBuffer.errcheck = check_void_done
+
+rt.Index_Free.argtypes = [ctypes.POINTER(ctypes.c_void_p)]
+rt.Index_Free.restype = None
+
+rt.IndexItem_Destroy.argtypes = [ctypes.c_void_p]
+rt.IndexItem_Destroy.restype = None
+rt.IndexItem_Destroy.errcheck = check_void_done
+
+rt.IndexItem_GetData.argtypes = [   ctypes.c_void_p, 
+                                    ctypes.POINTER(ctypes.POINTER(ctypes.c_ubyte)), 
+                                    ctypes.POINTER(ctypes.c_uint64)]
+rt.IndexItem_GetData.restype = ctypes.c_int
+rt.IndexItem_GetData.errcheck = check_value
+
+rt.IndexItem_GetBounds.argtypes = [ ctypes.c_void_p,
+                                    ctypes.POINTER(ctypes.POINTER(ctypes.c_double)),
+                                    ctypes.POINTER(ctypes.POINTER(ctypes.c_double)),
+                                    ctypes.POINTER(ctypes.c_uint32)]
+rt.IndexItem_GetBounds.restype = ctypes.c_int
+rt.IndexItem_GetBounds.errcheck = check_value
+
+rt.IndexItem_GetID.argtypes = [ctypes.c_void_p]
+rt.IndexItem_GetID.restype = ctypes.c_int64
+rt.IndexItem_GetID.errcheck = check_value
+
+rt.IndexProperty_Create.argtypes = []
+rt.IndexProperty_Create.restype = ctypes.c_void_p
+rt.IndexProperty_Create.errcheck = check_void
+
+rt.IndexProperty_Destroy.argtypes = [ctypes.c_void_p]
+rt.IndexProperty_Destroy.restype = None
+rt.IndexProperty_Destroy.errcheck = check_void_done
+
+rt.IndexProperty_SetIndexType.argtypes = [ctypes.c_void_p, ctypes.c_int32]
+rt.IndexProperty_SetIndexType.restype = ctypes.c_int
+rt.IndexProperty_SetIndexType.errcheck = check_return
+
+rt.IndexProperty_GetIndexType.argtypes = [ctypes.c_void_p]
+rt.IndexProperty_GetIndexType.restype = ctypes.c_int
+rt.IndexProperty_GetIndexType.errcheck = check_value
+
+rt.IndexProperty_SetDimension.argtypes = [ctypes.c_void_p, ctypes.c_uint32]
+rt.IndexProperty_SetDimension.restype = ctypes.c_int
+rt.IndexProperty_SetDimension.errcheck = check_return
+
+rt.IndexProperty_GetDimension.argtypes = [ctypes.c_void_p]
+rt.IndexProperty_GetDimension.restype = ctypes.c_int
+rt.IndexProperty_GetDimension.errcheck = check_value
+
+rt.IndexProperty_SetIndexVariant.argtypes = [ctypes.c_void_p, ctypes.c_uint32]
+rt.IndexProperty_SetIndexVariant.restype = ctypes.c_int
+rt.IndexProperty_SetIndexVariant.errcheck = check_return
+
+rt.IndexProperty_GetIndexVariant.argtypes = [ctypes.c_void_p]
+rt.IndexProperty_GetIndexVariant.restype = ctypes.c_int
+rt.IndexProperty_GetIndexVariant.errcheck = check_value
+
+rt.IndexProperty_SetIndexStorage.argtypes = [ctypes.c_void_p, ctypes.c_uint32]
+rt.IndexProperty_SetIndexStorage.restype = ctypes.c_int
+rt.IndexProperty_SetIndexStorage.errcheck = check_return
+
+rt.IndexProperty_GetIndexStorage.argtypes = [ctypes.c_void_p]
+rt.IndexProperty_GetIndexStorage.restype = ctypes.c_int
+rt.IndexProperty_GetIndexStorage.errcheck = check_value
+
+rt.IndexProperty_SetIndexCapacity.argtypes = [ctypes.c_void_p, ctypes.c_uint32]
+rt.IndexProperty_SetIndexCapacity.restype = ctypes.c_int
+rt.IndexProperty_SetIndexCapacity.errcheck = check_return
+
+rt.IndexProperty_GetIndexCapacity.argtypes = [ctypes.c_void_p]
+rt.IndexProperty_GetIndexCapacity.restype = ctypes.c_int
+rt.IndexProperty_GetIndexCapacity.errcheck = check_value
+
+rt.IndexProperty_SetLeafCapacity.argtypes = [ctypes.c_void_p, ctypes.c_uint32]
+rt.IndexProperty_SetLeafCapacity.restype = ctypes.c_int
+rt.IndexProperty_SetLeafCapacity.errcheck = check_return
+
+rt.IndexProperty_GetLeafCapacity.argtypes = [ctypes.c_void_p]
+rt.IndexProperty_GetLeafCapacity.restype = ctypes.c_int
+rt.IndexProperty_GetLeafCapacity.errcheck = check_value
+
+rt.IndexProperty_SetPagesize.argtypes = [ctypes.c_void_p, ctypes.c_uint32]
+rt.IndexProperty_SetPagesize.restype = ctypes.c_int
+rt.IndexProperty_SetPagesize.errcheck = check_return
+
+rt.IndexProperty_GetPagesize.argtypes = [ctypes.c_void_p]
+rt.IndexProperty_GetPagesize.restype = ctypes.c_int
+rt.IndexProperty_GetPagesize.errcheck = check_value
+
+rt.IndexProperty_SetLeafPoolCapacity.argtypes = [ctypes.c_void_p, ctypes.c_uint32]
+rt.IndexProperty_SetLeafPoolCapacity.restype = ctypes.c_int
+rt.IndexProperty_SetLeafPoolCapacity.errcheck = check_return
+
+rt.IndexProperty_GetLeafPoolCapacity.argtypes = [ctypes.c_void_p]
+rt.IndexProperty_GetLeafPoolCapacity.restype = ctypes.c_int
+rt.IndexProperty_GetLeafPoolCapacity.errcheck = check_value
+
+rt.IndexProperty_SetIndexPoolCapacity.argtypes = [ctypes.c_void_p, ctypes.c_uint32]
+rt.IndexProperty_SetIndexPoolCapacity.restype = ctypes.c_int
+rt.IndexProperty_SetIndexPoolCapacity.errcheck = check_return
+
+rt.IndexProperty_GetIndexPoolCapacity.argtypes = [ctypes.c_void_p]
+rt.IndexProperty_GetIndexPoolCapacity.restype = ctypes.c_int
+rt.IndexProperty_GetIndexPoolCapacity.errcheck = check_value
+
+rt.IndexProperty_SetRegionPoolCapacity.argtypes = [ctypes.c_void_p, ctypes.c_uint32]
+rt.IndexProperty_SetRegionPoolCapacity.restype = ctypes.c_int
+rt.IndexProperty_SetRegionPoolCapacity.errcheck = check_return
+
+rt.IndexProperty_GetRegionPoolCapacity.argtypes = [ctypes.c_void_p]
+rt.IndexProperty_GetRegionPoolCapacity.restype = ctypes.c_int
+rt.IndexProperty_GetRegionPoolCapacity.errcheck = check_value
+
+rt.IndexProperty_SetPointPoolCapacity.argtypes = [ctypes.c_void_p, ctypes.c_uint32]
+rt.IndexProperty_SetPointPoolCapacity.restype = ctypes.c_int
+rt.IndexProperty_SetPointPoolCapacity.errcheck = check_return
+
+rt.IndexProperty_GetPointPoolCapacity.argtypes = [ctypes.c_void_p]
+rt.IndexProperty_GetPointPoolCapacity.restype = ctypes.c_int
+rt.IndexProperty_GetPointPoolCapacity.errcheck = check_value
+
+rt.IndexProperty_SetBufferingCapacity.argtypes = [ctypes.c_void_p, ctypes.c_uint32]
+rt.IndexProperty_SetBufferingCapacity.restype = ctypes.c_int
+rt.IndexProperty_SetBufferingCapacity.errcheck = check_return
+
+rt.IndexProperty_GetBufferingCapacity.argtypes = [ctypes.c_void_p]
+rt.IndexProperty_GetBufferingCapacity.restype = ctypes.c_int
+rt.IndexProperty_GetBufferingCapacity.errcheck = check_value
+
+rt.IndexProperty_SetEnsureTightMBRs.argtypes = [ctypes.c_void_p, ctypes.c_uint32]
+rt.IndexProperty_SetEnsureTightMBRs.restype = ctypes.c_int
+rt.IndexProperty_SetEnsureTightMBRs.errcheck = check_return
+
+rt.IndexProperty_GetEnsureTightMBRs.argtypes = [ctypes.c_void_p]
+rt.IndexProperty_GetEnsureTightMBRs.restype = ctypes.c_int
+rt.IndexProperty_GetEnsureTightMBRs.errcheck = check_value
+
+rt.IndexProperty_SetOverwrite.argtypes = [ctypes.c_void_p, ctypes.c_uint32]
+rt.IndexProperty_SetOverwrite.restype = ctypes.c_int
+rt.IndexProperty_SetOverwrite.errcheck = check_return
+
+rt.IndexProperty_GetOverwrite.argtypes = [ctypes.c_void_p]
+rt.IndexProperty_GetOverwrite.restype = ctypes.c_int
+rt.IndexProperty_GetOverwrite.errcheck = check_value
+
+rt.IndexProperty_SetNearMinimumOverlapFactor.argtypes = [ctypes.c_void_p, ctypes.c_uint32]
+rt.IndexProperty_SetNearMinimumOverlapFactor.restype = ctypes.c_int
+rt.IndexProperty_SetNearMinimumOverlapFactor.errcheck = check_return
+
+rt.IndexProperty_GetNearMinimumOverlapFactor.argtypes = [ctypes.c_void_p]
+rt.IndexProperty_GetNearMinimumOverlapFactor.restype = ctypes.c_int
+rt.IndexProperty_GetNearMinimumOverlapFactor.errcheck = check_value
+
+rt.IndexProperty_SetWriteThrough.argtypes = [ctypes.c_void_p, ctypes.c_uint32]
+rt.IndexProperty_SetWriteThrough.restype = ctypes.c_int
+rt.IndexProperty_SetWriteThrough.errcheck = check_return
+
+rt.IndexProperty_GetWriteThrough.argtypes = [ctypes.c_void_p]
+rt.IndexProperty_GetWriteThrough.restype = ctypes.c_int
+rt.IndexProperty_GetWriteThrough.errcheck = check_value
+
+rt.IndexProperty_SetFillFactor.argtypes = [ctypes.c_void_p, ctypes.c_double]
+rt.IndexProperty_SetFillFactor.restype = ctypes.c_int
+rt.IndexProperty_SetFillFactor.errcheck = check_return
+
+rt.IndexProperty_GetFillFactor.argtypes = [ctypes.c_void_p]
+rt.IndexProperty_GetFillFactor.restype = ctypes.c_double
+rt.IndexProperty_GetFillFactor.errcheck = check_value
+
+rt.IndexProperty_SetSplitDistributionFactor.argtypes = [ctypes.c_void_p, ctypes.c_double]
+rt.IndexProperty_SetSplitDistributionFactor.restype = ctypes.c_int
+rt.IndexProperty_SetSplitDistributionFactor.errcheck = check_return
+
+rt.IndexProperty_GetSplitDistributionFactor.argtypes = [ctypes.c_void_p]
+rt.IndexProperty_GetSplitDistributionFactor.restype = ctypes.c_double
+rt.IndexProperty_GetSplitDistributionFactor.errcheck = check_value
+
+rt.IndexProperty_SetTPRHorizon.argtypes = [ctypes.c_void_p, ctypes.c_double]
+rt.IndexProperty_SetTPRHorizon.restype = ctypes.c_int
+rt.IndexProperty_SetTPRHorizon.errcheck = check_return
+
+rt.IndexProperty_GetTPRHorizon.argtypes = [ctypes.c_void_p]
+rt.IndexProperty_GetTPRHorizon.restype = ctypes.c_double
+rt.IndexProperty_GetTPRHorizon.errcheck = check_value
+
+rt.IndexProperty_SetReinsertFactor.argtypes = [ctypes.c_void_p, ctypes.c_double]
+rt.IndexProperty_SetReinsertFactor.restype = ctypes.c_int
+rt.IndexProperty_SetReinsertFactor.errcheck = check_return
+
+rt.IndexProperty_GetReinsertFactor.argtypes = [ctypes.c_void_p]
+rt.IndexProperty_GetReinsertFactor.restype = ctypes.c_double
+rt.IndexProperty_GetReinsertFactor.errcheck = check_value
+
+rt.IndexProperty_SetFileName.argtypes = [ctypes.c_void_p, ctypes.c_char_p]
+rt.IndexProperty_SetFileName.restype = ctypes.c_int
+rt.IndexProperty_SetFileName.errcheck = check_return
+
+rt.IndexProperty_GetFileName.argtypes = [ctypes.c_void_p]
+rt.IndexProperty_GetFileName.errcheck = free_returned_char_p
+rt.IndexProperty_GetFileName.restype = ctypes.POINTER(ctypes.c_char)
+
+rt.IndexProperty_SetFileNameExtensionDat.argtypes = [ctypes.c_void_p, ctypes.c_char_p]
+rt.IndexProperty_SetFileNameExtensionDat.restype = ctypes.c_int
+rt.IndexProperty_SetFileNameExtensionDat.errcheck = check_return
+
+rt.IndexProperty_GetFileNameExtensionDat.argtypes = [ctypes.c_void_p]
+rt.IndexProperty_GetFileNameExtensionDat.errcheck = free_returned_char_p
+rt.IndexProperty_GetFileNameExtensionDat.restype = ctypes.POINTER(ctypes.c_char)
+
+rt.IndexProperty_SetFileNameExtensionIdx.argtypes = [ctypes.c_void_p, ctypes.c_char_p]
+rt.IndexProperty_SetFileNameExtensionIdx.restype = ctypes.c_int
+rt.IndexProperty_SetFileNameExtensionIdx.errcheck = check_return
+
+rt.IndexProperty_GetFileNameExtensionIdx.argtypes = [ctypes.c_void_p]
+rt.IndexProperty_GetFileNameExtensionIdx.errcheck = free_returned_char_p
+rt.IndexProperty_GetFileNameExtensionIdx.restype = ctypes.POINTER(ctypes.c_char)
+
+rt.IndexProperty_SetCustomStorageCallbacksSize.argtypes = [ctypes.c_void_p, ctypes.c_uint32]
+rt.IndexProperty_SetCustomStorageCallbacksSize.restype = ctypes.c_int
+rt.IndexProperty_SetCustomStorageCallbacksSize.errcheck = check_return
+
+rt.IndexProperty_GetCustomStorageCallbacksSize.argtypes = [ctypes.c_void_p]
+rt.IndexProperty_GetCustomStorageCallbacksSize.restype = ctypes.c_uint32
+rt.IndexProperty_GetCustomStorageCallbacksSize.errcheck = check_value
+
+rt.IndexProperty_SetCustomStorageCallbacks.argtypes = [ctypes.c_void_p, ctypes.c_void_p]
+rt.IndexProperty_SetCustomStorageCallbacks.restype = ctypes.c_int
+rt.IndexProperty_SetCustomStorageCallbacks.errcheck = check_return
+
+rt.IndexProperty_GetCustomStorageCallbacks.argtypes = [ctypes.c_void_p]
+rt.IndexProperty_GetCustomStorageCallbacks.restype = ctypes.c_void_p
+rt.IndexProperty_GetCustomStorageCallbacks.errcheck = check_value
+
+rt.IndexProperty_SetIndexID.argtypes = [ctypes.c_void_p, ctypes.c_int64]
+rt.IndexProperty_SetIndexID.restype = ctypes.c_int
+rt.IndexProperty_SetIndexID.errcheck = check_return
+
+rt.IndexProperty_GetIndexID.argtypes = [ctypes.c_void_p]
+rt.IndexProperty_GetIndexID.restype = ctypes.c_int64
+rt.IndexProperty_GetIndexID.errcheck = check_value
+
+rt.SIDX_NewBuffer.argtypes = [ctypes.c_uint]
+rt.SIDX_NewBuffer.restype = ctypes.c_void_p
+rt.SIDX_NewBuffer.errcheck = check_void
+
+rt.SIDX_DeleteBuffer.argtypes = [ctypes.c_void_p]
+rt.SIDX_DeleteBuffer.restype = None
+
+rt.SIDX_Version.argtypes = []
+rt.SIDX_Version.restype = ctypes.POINTER(ctypes.c_char)
+rt.SIDX_Version.errcheck = free_returned_char_p
diff --git a/rtree/index.py b/rtree/index.py
new file mode 100644
index 0000000..def4643
--- /dev/null
+++ b/rtree/index.py
@@ -0,0 +1,1295 @@
+
+import os
+import os.path
+import pprint
+
+from . import core
+import ctypes
+try:
+    import cPickle as pickle
+except ImportError:
+    import pickle
+
+import sys
+if sys.version_info[0] == 2:
+    range = xrange
+    string_types = basestring
+elif sys.version_info[0] == 3:
+    string_types = str
+
+RT_Memory = 0
+RT_Disk = 1
+RT_Custom = 2
+
+RT_Linear = 0
+RT_Quadratic = 1
+RT_Star = 2
+
+RT_RTree = 0
+RT_MVRTree = 1
+RT_TPRTree = 2
+
+__c_api_version__ = core.rt.SIDX_Version()
+
+major_version, minor_version, patch_version = [
+    int(t) for t in __c_api_version__.decode('utf-8').split('.')]
+
+if (major_version < 2 and minor_version < 7):
+    raise Exception("This version of Rtree requires libspatialindex 1.7.0 or greater")
+
+__all__ = ['Rtree', 'Index', 'Property']
+
+def _get_bounds(handle, bounds_fn, interleaved):
+    pp_mins = ctypes.pointer(ctypes.c_double())
+    pp_maxs = ctypes.pointer(ctypes.c_double())
+    dimension = ctypes.c_uint32(0)
+
+    bounds_fn(handle,
+            ctypes.byref(pp_mins),
+            ctypes.byref(pp_maxs),
+            ctypes.byref(dimension))
+    if (dimension.value == 0): return None
+
+    mins = ctypes.cast(pp_mins,ctypes.POINTER(ctypes.c_double \
+                                                      * dimension.value))
+    maxs = ctypes.cast(pp_maxs,ctypes.POINTER(ctypes.c_double \
+                                                      * dimension.value))
+
+    results = [mins.contents[i] for i in range(dimension.value)]
+    results += [maxs.contents[i] for i in range(dimension.value)]
+
+    p_mins = ctypes.cast(mins,ctypes.POINTER(ctypes.c_double))
+    p_maxs = ctypes.cast(maxs,ctypes.POINTER(ctypes.c_double))
+    core.rt.Index_Free(ctypes.cast(p_mins, ctypes.POINTER(ctypes.c_void_p)))
+    core.rt.Index_Free(ctypes.cast(p_maxs, ctypes.POINTER(ctypes.c_void_p)))
+    if interleaved: # they want bbox order.
+        return results
+    return Index.deinterleave(results)
+
+def _get_data(handle):
+    length = ctypes.c_uint64(0)
+    d = ctypes.pointer(ctypes.c_uint8(0))
+    core.rt.IndexItem_GetData(handle, ctypes.byref(d), ctypes.byref(length))
+    c = ctypes.cast(d, ctypes.POINTER(ctypes.c_void_p))
+    if length.value == 0:
+        core.rt.Index_Free(c)
+        return None
+    s = ctypes.string_at(d, length.value)
+    core.rt.Index_Free(c)
+    return s
+
+class Index(object):
+    """An R-Tree, MVR-Tree, or TPR-Tree indexing object"""
+
+    def __init__(self,  *args, **kwargs):
+        """Creates a new index
+
+        :param filename:
+            The first argument in the constructor is assumed to be a filename
+            determining that a file-based storage for the index should be used.
+            If the first argument is not of type basestring, it is then assumed
+            to be an instance of ICustomStorage or derived class.
+            If the first argument is neither of type basestring nor an instance
+            of ICustomStorage, it is then assumed to be an input index item
+            stream.
+
+        :param stream:
+            If the first argument in the constructor is not of type basestring,
+            it is assumed to be an iterable stream of data that will raise a
+            StopIteration.  It must be in the form defined by the :attr:`interleaved`
+            attribute of the index.  The following example would assume
+            :attr:`interleaved` is False::
+
+            (id, (minx, maxx, miny, maxy, minz, maxz, ..., ..., mink, maxk), object)
+
+            The object can be None, but you must put a place holder of ``None`` there.
+
+        :param storage:
+            If the first argument in the constructor is an instance of ICustomStorage
+            then the given custom storage is used.
+
+        :param interleaved: True or False, defaults to True.
+            This parameter determines the coordinate order for all methods that
+            take in coordinates.
+
+        :param properties: An :class:`index.Property` object
+            This object sets both the creation and instantiation properties
+            for the object and they are passed down into libspatialindex.
+            A few properties are curried from instantiation parameters
+            for you like ``pagesize`` and ``overwrite``
+            to ensure compatibility with previous versions of the library.  All
+            other properties must be set on the object.
+
+        .. warning::
+            The coordinate ordering for all functions are sensitive the the
+            index's :attr:`interleaved` data member.  If :attr:`interleaved`
+            is False, the coordinates must be in the form
+            [xmin, xmax, ymin, ymax, ..., ..., kmin, kmax]. If :attr:`interleaved`
+            is True, the coordinates must be in the form
+            [xmin, ymin, ..., kmin, xmax, ymax, ..., kmax].
+
+        A basic example
+        ::
+
+            >>> from rtree import index
+            >>> p = index.Property()
+
+            >>> idx = index.Index(properties=p)
+            >>> idx  # doctest: +ELLIPSIS
+            <rtree.index.Index object at 0x...>
+
+        Insert an item into the index::
+
+            >>> idx.insert(4321, (34.3776829412, 26.7375853734, 49.3776829412, 41.7375853734), obj=42)
+
+        Query::
+
+            >>> hits = idx.intersection((0, 0, 60, 60), objects=True)
+            >>> for i in hits:
+            ...     if i.id == 4321:
+            ...         i.object
+            ...         i.bbox
+            42
+            [34.3776829412, 26.737585373400002, 49.3776829412, 41.737585373400002]
+
+
+        Using custom serializers
+        ::
+
+            >>> import simplejson
+            >>> class JSONIndex(index.Index):
+            ...     dumps = staticmethod(simplejson.dumps)
+            ...     loads = staticmethod(simplejson.loads)
+
+            >>> json_idx = JSONIndex()
+            >>> json_idx.insert(1, (0, 1, 0, 1), {"nums": [23, 45], "letters": "abcd"})
+            >>> list(json_idx.nearest((0, 0), 1, objects="raw"))
+            [{'letters': 'abcd', 'nums': [23, 45]}]
+
+        """
+        self.properties = kwargs.get('properties', Property())
+
+        # interleaved True gives 'bbox' order.
+        self.interleaved = bool(kwargs.get('interleaved', True))
+
+        stream = None
+        basename = None
+        storage = None
+        if args:
+            if isinstance(args[0], string_types) or isinstance(args[0], bytes):
+                # they sent in a filename
+                basename = args[0]
+                # they sent in a filename, stream
+                if len(args) > 1:
+                    stream = args[1]
+            elif isinstance(args[0], ICustomStorage):
+                storage = args[0]
+                # they sent in a storage, stream
+                if len(args) > 1:
+                    stream = args[1]
+            else:
+                stream = args[0]
+
+
+        if basename:
+            self.properties.storage = RT_Disk
+            self.properties.filename = basename
+
+            # check we can read the file
+            f = basename + "." + self.properties.idx_extension
+            p = os.path.abspath(f)
+
+
+            # assume if the file exists, we're not going to overwrite it
+            # unless the user explicitly set the property to do so
+            if os.path.exists(p):
+
+                self.properties.overwrite = bool(kwargs.get('overwrite', False))
+
+                # assume we're fetching the first index_id.  If the user
+                # set it, we'll fetch that one.
+                if not self.properties.overwrite:
+                    try:
+                        self.properties.index_id
+                    except core.RTreeError:
+                        self.properties.index_id=1
+
+            d = os.path.dirname(p)
+            if not os.access(d, os.W_OK):
+                message = "Unable to open file '%s' for index storage"%f
+                raise IOError(message)
+        elif storage:
+            if (major_version < 2 and minor_version < 8):
+                raise core.RTreeError("libspatialindex {0} does not support custom storage".format(__c_api_version__))
+
+            self.properties.storage = RT_Custom
+            if storage.hasData:
+                self.properties.overwrite = bool(kwargs.get('overwrite', False))
+                if not self.properties.overwrite:
+                    try:
+                        self.properties.index_id
+                    except core.RTreeError:
+                        self.properties.index_id=1
+                else:
+                    storage.clear()
+            self.customstorage = storage
+            storage.registerCallbacks( self.properties )
+        else:
+            self.properties.storage = RT_Memory
+
+        try:
+            self.properties.pagesize = int(kwargs['pagesize'])
+        except KeyError:
+            pass
+
+        if stream:
+            self.handle = self._create_idx_from_stream(stream)
+        else:
+            self.handle = core.rt.Index_Create(self.properties.handle)
+        self.owned = True
+
+    def __del__(self):
+        try:
+            self.owned
+        except AttributeError:
+            # we were partially constructed.  We're going to let it leak
+            # in that case
+            return
+        if self.owned:
+            if self.handle and core:
+                try:
+                    core.rt
+                except AttributeError:
+                    # uh, leak?  We're owned, and have a handle
+                    # but for some reason the dll isn't active
+                    return
+
+                core.rt.Index_Destroy(self.handle)
+                self.owned = False
+                self.handle = None
+
+    def dumps(self, obj):
+        return pickle.dumps(obj)
+
+    def loads(self, string):
+        return pickle.loads(string)
+
+    def close(self):
+        """Force a flush of the index to storage. Renders index
+        inaccessible.
+        """
+        if self.handle and core:
+            core.rt.Index_Destroy(self.handle)
+            self.handle = None
+            self.owned = False
+        else:
+            raise IOError("Unclosable index")
+
+    def get_coordinate_pointers(self, coordinates):
+
+        try:
+            iter(coordinates)
+        except TypeError:
+            raise TypeError('Bounds must be a sequence')
+        dimension = self.properties.dimension
+
+        mins = ctypes.c_double * dimension
+        maxs = ctypes.c_double * dimension
+
+        if not self.interleaved:
+            coordinates = Index.interleave(coordinates)
+
+        # it's a point make it into a bbox. [x, y] => [x, y, x, y]
+        if len(coordinates) == dimension:
+            coordinates += coordinates
+
+        if len(coordinates) != dimension * 2:
+            raise core.RTreeError("Coordinates must be in the form "
+                                    "(minx, miny, maxx, maxy) or (x, y) for 2D indexes")
+
+        # so here all coords are in the form:
+        # [xmin, ymin, zmin, xmax, ymax, zmax]
+        for i in range(dimension):
+            if not coordinates[i] <= coordinates[i + dimension]:
+                raise core.RTreeError("Coordinates must not have minimums more than maximums")
+
+        p_mins = mins(*[ctypes.c_double(\
+                            coordinates[i]) for i in range(dimension)])
+        p_maxs = maxs(*[ctypes.c_double(\
+                        coordinates[i + dimension]) for i in range(dimension)])
+
+        return (p_mins, p_maxs)
+
+    def _serialize(self, obj):
+        serialized = self.dumps(obj)
+        size = len(serialized)
+
+        d = ctypes.create_string_buffer(serialized)
+        #d.value = serialized
+        p = ctypes.pointer(d)
+
+        # return serialized to keep it alive for the pointer.
+        return size, ctypes.cast(p, ctypes.POINTER(ctypes.c_uint8)), serialized
+
+    def insert(self, id, coordinates, obj = None):
+        """Inserts an item into the index with the given coordinates.
+
+        :param id: long integer
+            A long integer that is the identifier for this index entry.  IDs
+            need not be unique to be inserted into the index, and it is up
+            to the user to ensure they are unique if this is a requirement.
+
+        :param coordinates: sequence or array
+            This may be an object that satisfies the numpy array
+            protocol, providing the index's dimension * 2 coordinate
+            pairs representing the `mink` and `maxk` coordinates in
+            each dimension defining the bounds of the query window.
+
+        :param obj: a pickleable object.  If not None, this object will be
+            stored in the index with the :attr:`id`.
+
+        The following example inserts an entry into the index with id `4321`,
+        and the object it stores with that id is the number `42`.  The coordinate
+        ordering in this instance is the default (interleaved=True) ordering::
+
+            >>> from rtree import index
+            >>> idx = index.Index()
+            >>> idx.insert(4321, (34.3776829412, 26.7375853734, 49.3776829412, 41.7375853734), obj=42)
+
+        """
+        p_mins, p_maxs = self.get_coordinate_pointers(coordinates)
+        data = ctypes.c_ubyte(0)
+        size = 0
+        pyserialized = None
+        if obj is not None:
+            size, data, pyserialized = self._serialize(obj)
+        core.rt.Index_InsertData(self.handle, id, p_mins, p_maxs, self.properties.dimension, data, size)
+    add = insert
+
+    def count(self, coordinates):
+        """Return number of objects that intersect the given coordinates.
+
+        :param coordinates: sequence or array
+            This may be an object that satisfies the numpy array
+            protocol, providing the index's dimension * 2 coordinate
+            pairs representing the `mink` and `maxk` coordinates in
+            each dimension defining the bounds of the query window.
+
+        The following example queries the index for any objects any objects
+        that were stored in the index intersect the bounds given in the coordinates::
+
+            >>> from rtree import index
+            >>> idx = index.Index()
+            >>> idx.insert(4321, (34.3776829412, 26.7375853734, 49.3776829412, 41.7375853734), obj=42)
+
+            >>> idx.count((0, 0, 60, 60))
+            1
+
+        """
+        p_mins, p_maxs = self.get_coordinate_pointers(coordinates)
+
+        p_num_results = ctypes.c_uint64(0)
+
+
+        core.rt.Index_Intersects_count(    self.handle,
+                                        p_mins,
+                                        p_maxs,
+                                        self.properties.dimension,
+                                        ctypes.byref(p_num_results))
+
+
+        return p_num_results.value
+
+    def intersection(self, coordinates, objects=False):
+        """Return ids or objects in the index that intersect the given coordinates.
+
+        :param coordinates: sequence or array
+            This may be an object that satisfies the numpy array
+            protocol, providing the index's dimension * 2 coordinate
+            pairs representing the `mink` and `maxk` coordinates in
+            each dimension defining the bounds of the query window.
+
+        :param objects: True or False or 'raw'
+            If True, the intersection method will return index objects that
+            were pickled when they were stored with each index entry, as well
+            as the id and bounds of the index entries. If 'raw', the objects
+            will be returned without the :class:`rtree.index.Item` wrapper.
+
+        The following example queries the index for any objects any objects
+        that were stored in the index intersect the bounds given in the coordinates::
+
+            >>> from rtree import index
+            >>> idx = index.Index()
+            >>> idx.insert(4321, (34.3776829412, 26.7375853734, 49.3776829412, 41.7375853734), obj=42)
+
+            >>> hits = list(idx.intersection((0, 0, 60, 60), objects=True))
+            >>> [(item.object, item.bbox) for item in hits if item.id == 4321]
+            [(42, [34.3776829412, 26.737585373400002, 49.3776829412, 41.737585373400002])]
+
+        If the :class:`rtree.index.Item` wrapper is not used, it is faster to
+        request the 'raw' objects::
+
+            >>> list(idx.intersection((0, 0, 60, 60), objects="raw"))
+            [42]
+
+
+        """
+
+        if objects: return self._intersection_obj(coordinates, objects)
+
+        p_mins, p_maxs = self.get_coordinate_pointers(coordinates)
+
+        p_num_results = ctypes.c_uint64(0)
+
+        it = ctypes.pointer(ctypes.c_int64())
+
+        core.rt.Index_Intersects_id(    self.handle,
+                                        p_mins,
+                                        p_maxs,
+                                        self.properties.dimension,
+                                        ctypes.byref(it),
+                                        ctypes.byref(p_num_results))
+        return self._get_ids(it, p_num_results.value)
+
+    def _intersection_obj(self, coordinates, objects):
+
+        p_mins, p_maxs = self.get_coordinate_pointers(coordinates)
+
+        p_num_results = ctypes.c_uint64(0)
+
+        it = ctypes.pointer(ctypes.c_void_p())
+
+        core.rt.Index_Intersects_obj(   self.handle,
+                                        p_mins,
+                                        p_maxs,
+                                        self.properties.dimension,
+                                        ctypes.byref(it),
+                                        ctypes.byref(p_num_results))
+        return self._get_objects(it, p_num_results.value, objects)
+
+    def _get_objects(self, it, num_results, objects):
+        # take the pointer, yield the result objects and free
+        items = ctypes.cast(it, ctypes.POINTER(ctypes.POINTER(ctypes.c_void_p * num_results)))
+        its = ctypes.cast(items, ctypes.POINTER(ctypes.POINTER(ctypes.c_void_p)))
+
+        try:
+            if objects != 'raw':
+                for i in range(num_results):
+                    yield Item(self.loads, items[i])
+            else:
+                for i in range(num_results):
+                    data = _get_data(items[i])
+                    if data is None:
+                        yield data
+                    else:
+                        yield self.loads(data)
+
+            core.rt.Index_DestroyObjResults(its, num_results)
+        except: # need to catch all exceptions, not just rtree.
+            core.rt.Index_DestroyObjResults(its, num_results)
+            raise
+
+    def _get_ids(self, it, num_results):
+        # take the pointer, yield the results  and free
+        items = ctypes.cast(it, ctypes.POINTER(ctypes.c_int64 * num_results))
+        its = ctypes.cast(items, ctypes.POINTER(ctypes.c_void_p))
+
+        try:
+            for i in range(num_results):
+                yield items.contents[i]
+            core.rt.Index_Free(its)
+        except:
+            core.rt.Index_Free(its)
+            raise
+
+    def _nearest_obj(self, coordinates, num_results, objects):
+
+        p_mins, p_maxs = self.get_coordinate_pointers(coordinates)
+
+        p_num_results = ctypes.pointer(ctypes.c_uint64(num_results))
+
+        it = ctypes.pointer(ctypes.c_void_p())
+
+        core.rt.Index_NearestNeighbors_obj( self.handle,
+                                            p_mins,
+                                            p_maxs,
+                                            self.properties.dimension,
+                                            ctypes.byref(it),
+                                            p_num_results)
+
+        return self._get_objects(it, p_num_results.contents.value, objects)
+
+    def nearest(self, coordinates, num_results=1, objects=False):
+        """Returns the ``k``-nearest objects to the given coordinates.
+
+        :param coordinates: sequence or array
+            This may be an object that satisfies the numpy array
+            protocol, providing the index's dimension * 2 coordinate
+            pairs representing the `mink` and `maxk` coordinates in
+            each dimension defining the bounds of the query window.
+
+        :param num_results: integer
+            The number of results to return nearest to the given coordinates.
+            If two index entries are equidistant, *both* are returned.
+            This property means that :attr:`num_results` may return more
+            items than specified
+
+        :param objects: True / False / 'raw'
+            If True, the nearest method will return index objects that
+            were pickled when they were stored with each index entry, as
+            well as the id and bounds of the index entries.
+            If 'raw', it will return the object as entered into the database
+            without the :class:`rtree.index.Item` wrapper.
+
+        Example of finding the three items nearest to this one::
+
+            >>> from rtree import index
+            >>> idx = index.Index()
+            >>> idx.insert(4321, (34.37, 26.73, 49.37, 41.73), obj=42)
+            >>> hits = idx.nearest((0, 0, 10, 10), 3, objects=True)
+        """
+        if objects: return self._nearest_obj(coordinates, num_results, objects)
+        p_mins, p_maxs = self.get_coordinate_pointers(coordinates)
+
+        p_num_results = ctypes.pointer(ctypes.c_uint64(num_results))
+
+        it = ctypes.pointer(ctypes.c_int64())
+
+        core.rt.Index_NearestNeighbors_id(  self.handle,
+                                            p_mins,
+                                            p_maxs,
+                                            self.properties.dimension,
+                                            ctypes.byref(it),
+                                            p_num_results)
+
+        return self._get_ids(it, p_num_results.contents.value)
+
+    def get_bounds(self, coordinate_interleaved=None):
+        """Returns the bounds of the index
+
+        :param coordinate_interleaved: If True, the coordinates are turned
+            in the form [xmin, ymin, ..., kmin, xmax, ymax, ..., kmax],
+            otherwise they are returned as
+            [xmin, xmax, ymin, ymax, ..., ..., kmin, kmax].  If not specified,
+            the :attr:`interleaved` member of the index is used, which
+            defaults to True.
+
+        """
+        if coordinate_interleaved is None:
+            coordinate_interleaved = self.interleaved
+        return _get_bounds(self.handle, core.rt.Index_GetBounds, coordinate_interleaved)
+    bounds = property(get_bounds)
+
+    def delete(self, id, coordinates):
+        """Deletes items from the index with the given ``'id'`` within the
+        specified coordinates.
+
+        :param id: long integer
+            A long integer that is the identifier for this index entry.  IDs
+            need not be unique to be inserted into the index, and it is up
+            to the user to ensure they are unique if this is a requirement.
+
+        :param coordinates: sequence or array
+            Dimension * 2 coordinate pairs, representing the min
+            and max coordinates in each dimension of the item to be
+            deleted from the index. Their ordering will depend on the
+            index's :attr:`interleaved` data member.
+            These are not the coordinates of a space containing the
+            item, but those of the item itself. Together with the
+            id parameter, they determine which item will be deleted.
+            This may be an object that satisfies the numpy array protocol.
+
+        Example::
+
+            >>> from rtree import index
+            >>> idx = index.Index()
+            >>> idx.delete(4321, (34.3776829412, 26.7375853734, 49.3776829412, 41.7375853734) )
+
+        """
+        p_mins, p_maxs = self.get_coordinate_pointers(coordinates)
+        core.rt.Index_DeleteData(self.handle, id, p_mins, p_maxs, self.properties.dimension)
+
+    def valid(self):
+        return bool(core.rt.Index_IsValid(self.handle))
+
+    def clearBuffer(self):
+        return core.rt.Index_ClearBuffer(self.handle)
+
+    @classmethod
+    def deinterleave(self, interleaved):
+        """
+        [xmin, ymin, xmax, ymax] => [xmin, xmax, ymin, ymax]
+
+        >>> Index.deinterleave([0, 10, 1, 11])
+        [0, 1, 10, 11]
+
+        >>> Index.deinterleave([0, 1, 2, 10, 11, 12])
+        [0, 10, 1, 11, 2, 12]
+
+        """
+        assert len(interleaved) % 2 == 0, ("must be a pairwise list")
+        dimension = len(interleaved) // 2
+        di = []
+        for i in range(dimension):
+            di.extend([interleaved[i], interleaved[i + dimension]])
+        return di
+
+    @classmethod
+    def interleave(self, deinterleaved):
+        """
+        [xmin, xmax, ymin, ymax, zmin, zmax] => [xmin, ymin, zmin, xmax, ymax, zmax]
+
+        >>> Index.interleave([0, 1, 10, 11])
+        [0, 10, 1, 11]
+
+        >>> Index.interleave([0, 10, 1, 11, 2, 12])
+        [0, 1, 2, 10, 11, 12]
+
+        >>> Index.interleave((-1, 1, 58, 62, 22, 24))
+        [-1, 58, 22, 1, 62, 24]
+
+        """
+        assert len(deinterleaved) % 2 == 0, ("must be a pairwise list")
+        dimension = len(deinterleaved) / 2
+        interleaved = []
+        for i in range(2):
+            interleaved.extend([deinterleaved[i + j] \
+                                for j in range(0, len(deinterleaved), 2)])
+        return interleaved
+
+    def _create_idx_from_stream(self, stream):
+        """This function is used to instantiate the index given an
+        iterable stream of data.  """
+
+        stream_iter = iter(stream)
+        dimension = self.properties.dimension
+        darray = ctypes.c_double * dimension
+        mins = darray()
+        maxs = darray()
+        no_data = ctypes.cast(ctypes.pointer(ctypes.c_ubyte(0)),
+                              ctypes.POINTER(ctypes.c_ubyte))
+
+        def py_next_item(p_id, p_mins, p_maxs, p_dimension, p_data, p_length):
+            """This function must fill pointers to individual entries that will
+            be added to the index.  The C API will actually call this function
+            to fill out the pointers.  If this function returns anything other
+            than 0, it is assumed that the stream of data is done."""
+
+            try:
+                p_id[0], coordinates, obj = next(stream_iter)
+            except StopIteration:
+               # we're done
+               return -1
+
+            # set the id
+            if self.interleaved:
+                coordinates = Index.deinterleave(coordinates)
+
+            # this code assumes the coords ar not interleaved.
+            # xmin, xmax, ymin, ymax, zmin, zmax
+            for i in range(dimension):
+                mins[i] = coordinates[i*2]
+                maxs[i] = coordinates[(i*2)+1]
+
+            p_mins[0] = ctypes.cast(mins, ctypes.POINTER(ctypes.c_double))
+            p_maxs[0] = ctypes.cast(maxs, ctypes.POINTER(ctypes.c_double))
+
+            # set the dimension
+            p_dimension[0] = dimension
+            if obj is None:
+                p_data[0] = no_data
+                p_length[0] = 0
+            else:
+                p_length[0], data, _ = self._serialize(obj)
+                p_data[0] = ctypes.cast(data, ctypes.POINTER(ctypes.c_ubyte))
+
+            return 0
+
+
+        stream = core.NEXTFUNC(py_next_item)
+        return core.rt.Index_CreateWithStream(self.properties.handle, stream)
+
+    def leaves(self):
+        leaf_node_count = ctypes.c_uint32()
+        p_leafsizes = ctypes.pointer(ctypes.c_uint32())
+        p_leafids  = ctypes.pointer(ctypes.c_int64())
+        pp_childids = ctypes.pointer(ctypes.pointer(ctypes.c_int64()))
+
+        pp_mins = ctypes.pointer(ctypes.pointer(ctypes.c_double()))
+        pp_maxs = ctypes.pointer(ctypes.pointer(ctypes.c_double()))
+        dimension = ctypes.c_uint32(0)
+
+
+        core.rt.Index_GetLeaves(   self.handle,
+                                ctypes.byref(leaf_node_count),
+                                ctypes.byref(p_leafsizes),
+                                ctypes.byref(p_leafids),
+                                ctypes.byref(pp_childids),
+                                ctypes.byref(pp_mins),
+                                ctypes.byref(pp_maxs),
+                                ctypes.byref(dimension)
+                            )
+
+        output = []
+
+        count = leaf_node_count.value
+        sizes = ctypes.cast(p_leafsizes, ctypes.POINTER(ctypes.c_uint32 * count))
+        ids = ctypes.cast(p_leafids, ctypes.POINTER(ctypes.c_int64 * count))
+        child =  ctypes.cast(pp_childids, ctypes.POINTER(ctypes.POINTER(ctypes.c_int64) * count))
+        mins =  ctypes.cast(pp_mins, ctypes.POINTER(ctypes.POINTER(ctypes.c_double) * count))
+        maxs =  ctypes.cast(pp_maxs, ctypes.POINTER(ctypes.POINTER(ctypes.c_double) * count))
+        for i in range(count):
+            p_child_ids = child.contents[i]
+
+            id = ids.contents[i]
+            size = sizes.contents[i]
+            child_ids_array =  ctypes.cast(p_child_ids, ctypes.POINTER(ctypes.c_int64 * size))
+
+            child_ids = []
+            for j in range(size):
+                child_ids.append(child_ids_array.contents[j])
+
+            # free the child ids list
+            core.rt.Index_Free(ctypes.cast(p_child_ids, ctypes.POINTER(ctypes.c_void_p)))
+
+            p_mins = mins.contents[i]
+            p_maxs = maxs.contents[i]
+
+            p_mins = ctypes.cast(p_mins, ctypes.POINTER(ctypes.c_double * dimension.value))
+            p_maxs = ctypes.cast(p_maxs, ctypes.POINTER(ctypes.c_double * dimension.value))
+
+            bounds = []
+            bounds = [p_mins.contents[i] for i in range(dimension.value)]
+            bounds += [p_maxs.contents[i] for i in range(dimension.value)]
+
+            # free the bounds
+            p_mins = ctypes.cast(p_mins,ctypes.POINTER(ctypes.c_double))
+            p_maxs = ctypes.cast(p_maxs,ctypes.POINTER(ctypes.c_double))
+            core.rt.Index_Free(ctypes.cast(p_mins, ctypes.POINTER(ctypes.c_void_p)))
+            core.rt.Index_Free(ctypes.cast(p_maxs, ctypes.POINTER(ctypes.c_void_p)))
+
+            output.append((id, child_ids, bounds))
+
+        return output
+
+# An alias to preserve backward compatibility
+Rtree = Index
+
+class Item(object):
+    """A container for index entries"""
+    __slots__ = ('handle', 'owned', 'id', 'object', 'bounds')
+    def __init__(self, loads, handle, owned=False):
+        """There should be no reason to instantiate these yourself. Items are
+        created automatically when you call
+        :meth:`rtree.index.Index.intersection` (or other index querying
+        methods) with objects=True given the parameters of the function."""
+
+        if handle:
+            self.handle = handle
+
+        self.owned = owned
+
+        self.id = core.rt.IndexItem_GetID(self.handle)
+
+        self.object = None
+        self.object = self.get_object(loads)
+        self.bounds = _get_bounds(self.handle, core.rt.IndexItem_GetBounds, False)
+
+    @property
+    def bbox(self):
+        """Returns the bounding box of the index entry"""
+        return Index.interleave(self.bounds)
+
+    def get_object(self, loads):
+        # short circuit this so we only do it at construction time
+        if self.object is not None: return self.object
+        data = _get_data(self.handle)
+        if data is None: return None
+        return loads(data)
+
+class Property(object):
+    """An index property object is a container that contains a number of
+    settable index properties.  Many of these properties must be set at
+    index creation times, while others can be used to adjust performance
+    or behavior."""
+
+    pkeys = (
+        'buffering_capacity', 'custom_storage_callbacks',
+        'custom_storage_callbacks_size', 'dat_extension', 'dimension',
+        'filename', 'fill_factor', 'idx_extension', 'index_capacity',
+        'index_id', 'leaf_capacity', 'near_minimum_overlap_factor',
+        'overwrite', 'pagesize', 'point_pool_capacity',
+        'region_pool_capacity', 'reinsert_factor',
+        'split_distribution_factor', 'storage', 'tight_mbr', 'tpr_horizon',
+        'type', 'variant', 'writethrough' )
+
+    def __init__(self, handle=None, owned=True, **kwargs):
+        if handle:
+            self.handle = handle
+        else:
+            self.handle = core.rt.IndexProperty_Create()
+        self.owned = owned
+        for k, v in list(kwargs.items()):
+            if v is not None:
+                setattr(self, k, v)
+
+    def __del__(self):
+        if self.owned:
+            if self.handle and core:
+                try:
+                    core.rt
+                except AttributeError:
+                    # uh, leak?  We're owned, and have a handle
+                    # but for some reason the dll isn't active
+                    return
+                core.rt.IndexProperty_Destroy(self.handle)
+
+    def as_dict(self):
+        d = {}
+        for k in self.pkeys:
+            try:
+                v = getattr(self, k)
+            except core.RTreeError:
+                v = None
+            d[k] = v
+        return d
+
+    def __repr__(self):
+        return repr(self.as_dict())
+
+    def __str__(self):
+        return pprint.pformat(self.as_dict())
+
+    def get_index_type(self):
+        return core.rt.IndexProperty_GetIndexType(self.handle)
+    def set_index_type(self, value):
+        return core.rt.IndexProperty_SetIndexType(self.handle, value)
+
+    type = property(get_index_type, set_index_type)
+    """Index type. Valid index type values are
+        :data:`RT_RTree`, :data:`RT_MVTree`, or :data:`RT_TPRTree`.  Only
+        RT_RTree (the default) is practically supported at this time."""
+
+    def get_variant(self):
+        return core.rt.IndexProperty_GetIndexVariant(self.handle)
+    def set_variant(self, value):
+        return core.rt.IndexProperty_SetIndexVariant(self.handle, value)
+
+    variant = property(get_variant, set_variant)
+    """Index variant.  Valid index variant values are
+    :data:`RT_Linear`, :data:`RT_Quadratic`, and :data:`RT_Star`"""
+
+    def get_dimension(self):
+        return core.rt.IndexProperty_GetDimension(self.handle)
+    def set_dimension(self, value):
+        if (value <= 0):
+            raise core.RTreeError("Negative or 0 dimensional indexes are not allowed")
+        return core.rt.IndexProperty_SetDimension(self.handle, value)
+
+    dimension = property(get_dimension, set_dimension)
+    """Index dimension.  Must be greater than 0, though a dimension of 1 might
+    have undefined behavior."""
+
+    def get_storage(self):
+        return core.rt.IndexProperty_GetIndexStorage(self.handle)
+    def set_storage(self, value):
+        return core.rt.IndexProperty_SetIndexStorage(self.handle, value)
+
+    storage = property(get_storage, set_storage)
+    """Index storage. One of :data:`RT_Disk`, :data:`RT_Memory` or :data:`RT_Custom`.
+    If a filename is passed as the first parameter to :class:index.Index, :data:`RT_Disk`
+    is assumed. If a CustomStorage instance is passed, :data:`RT_Custom` is assumed.
+    Otherwise, :data:`RT_Memory` is the default."""
+
+    def get_pagesize(self):
+        return core.rt.IndexProperty_GetPagesize(self.handle)
+    def set_pagesize(self, value):
+        if (value <= 0):
+            raise core.RTreeError("Pagesize must be > 0")
+        return core.rt.IndexProperty_SetPagesize(self.handle, value)
+
+    pagesize = property(get_pagesize, set_pagesize)
+    """The pagesize when disk storage is used.  It is ideal to ensure that your
+    index entries fit within a single page for best performance.  """
+
+    def get_index_capacity(self):
+        return core.rt.IndexProperty_GetIndexCapacity(self.handle)
+    def set_index_capacity(self, value):
+        if (value <= 0):
+            raise core.RTreeError("index_capacity must be > 0")
+        return core.rt.IndexProperty_SetIndexCapacity(self.handle, value)
+
+    index_capacity = property(get_index_capacity, set_index_capacity)
+    """Index capacity"""
+
+    def get_leaf_capacity(self):
+        return core.rt.IndexProperty_GetLeafCapacity(self.handle)
+    def set_leaf_capacity(self, value):
+        if (value <= 0):
+            raise core.RTreeError("leaf_capacity must be > 0")
+        return core.rt.IndexProperty_SetLeafCapacity(self.handle, value)
+
+    leaf_capacity = property(get_leaf_capacity, set_leaf_capacity)
+    """Leaf capacity"""
+
+    def get_index_pool_capacity(self):
+        return core.rt.IndexProperty_GetIndexPoolCapacity(self.handle)
+    def set_index_pool_capacity(self, value):
+        if (value <= 0):
+            raise core.RTreeError("index_pool_capacity must be > 0")
+        return core.rt.IndexProperty_SetIndexPoolCapacity(self.handle, value)
+
+    index_pool_capacity = property(get_index_pool_capacity, set_index_pool_capacity)
+    """Index pool capacity"""
+
+    def get_point_pool_capacity(self):
+        return core.rt.IndexProperty_GetPointPoolCapacity(self.handle)
+    def set_point_pool_capacity(self, value):
+        if (value <= 0):
+            raise core.RTreeError("point_pool_capacity must be > 0")
+        return core.rt.IndexProperty_SetPointPoolCapacity(self.handle, value)
+
+    point_pool_capacity = property(get_point_pool_capacity, set_point_pool_capacity)
+    """Point pool capacity"""
+
+    def get_region_pool_capacity(self):
+        return core.rt.IndexProperty_GetRegionPoolCapacity(self.handle)
+    def set_region_pool_capacity(self, value):
+        if (value <= 0):
+            raise core.RTreeError("region_pool_capacity must be > 0")
+        return core.rt.IndexProperty_SetRegionPoolCapacity(self.handle, value)
+
+    region_pool_capacity = property(get_region_pool_capacity, set_region_pool_capacity)
+    """Region pool capacity"""
+
+    def get_buffering_capacity(self):
+        return core.rt.IndexProperty_GetBufferingCapacity(self.handle)
+    def set_buffering_capacity(self, value):
+        if (value <= 0):
+            raise core.RTreeError("buffering_capacity must be > 0")
+        return core.rt.IndexProperty_SetBufferingCapacity(self.handle, value)
+
+    buffering_capacity = property(get_buffering_capacity, set_buffering_capacity)
+    """Buffering capacity"""
+
+    def get_tight_mbr(self):
+        return bool(core.rt.IndexProperty_GetEnsureTightMBRs(self.handle))
+    def set_tight_mbr(self, value):
+        value = bool(value)
+        return bool(core.rt.IndexProperty_SetEnsureTightMBRs(self.handle, value))
+
+    tight_mbr = property(get_tight_mbr, set_tight_mbr)
+    """Uses tight bounding rectangles"""
+
+    def get_overwrite(self):
+        return bool(core.rt.IndexProperty_GetOverwrite(self.handle))
+    def set_overwrite(self, value):
+        value = bool(value)
+        return bool(core.rt.IndexProperty_SetOverwrite(self.handle, value))
+
+    overwrite = property(get_overwrite, set_overwrite)
+    """Overwrite existing index files"""
+
+    def get_near_minimum_overlap_factor(self):
+        return core.rt.IndexProperty_GetNearMinimumOverlapFactor(self.handle)
+    def set_near_minimum_overlap_factor(self, value):
+        if (value <= 0):
+            raise core.RTreeError("near_minimum_overlap_factor must be > 0")
+        return core.rt.IndexProperty_SetNearMinimumOverlapFactor(self.handle, value)
+
+    near_minimum_overlap_factor = property(get_near_minimum_overlap_factor, set_near_minimum_overlap_factor)
+    """Overlap factor for MVRTrees"""
+
+    def get_writethrough(self):
+        return bool(core.rt.IndexProperty_GetWriteThrough(self.handle))
+    def set_writethrough(self, value):
+        value = bool(value)
+        return bool(core.rt.IndexProperty_SetWriteThrough(self.handle, value))
+
+    writethrough = property(get_writethrough, set_writethrough)
+    """Write through caching"""
+
+    def get_fill_factor(self):
+        return core.rt.IndexProperty_GetFillFactor(self.handle)
+    def set_fill_factor(self, value):
+        return core.rt.IndexProperty_SetFillFactor(self.handle, value)
+
+    fill_factor = property(get_fill_factor, set_fill_factor)
+    """Index node fill factor before branching"""
+
+    def get_split_distribution_factor(self):
+        return core.rt.IndexProperty_GetSplitDistributionFactor(self.handle)
+    def set_split_distribution_factor(self, value):
+        return core.rt.IndexProperty_SetSplitDistributionFactor(self.handle, value)
+
+    split_distribution_factor = property(get_split_distribution_factor, set_split_distribution_factor)
+    """Split distribution factor"""
+
+    def get_tpr_horizon(self):
+        return core.rt.IndexProperty_GetTPRHorizon(self.handle)
+    def set_tpr_horizon(self, value):
+        return core.rt.IndexProperty_SetTPRHorizon(self.handle, value)
+
+    tpr_horizon = property(get_tpr_horizon, set_tpr_horizon)
+    """TPR horizon"""
+
+    def get_reinsert_factor(self):
+        return core.rt.IndexProperty_GetReinsertFactor(self.handle)
+    def set_reinsert_factor(self, value):
+        return core.rt.IndexProperty_SetReinsertFactor(self.handle, value)
+
+    reinsert_factor = property(get_reinsert_factor, set_reinsert_factor)
+    """Reinsert factor"""
+
+    def get_filename(self):
+        return core.rt.IndexProperty_GetFileName(self.handle)
+    def set_filename(self, value):
+        v = value.encode('utf-8')
+        return core.rt.IndexProperty_SetFileName(self.handle, v)
+
+    filename = property(get_filename, set_filename)
+    """Index filename for disk storage"""
+
+    def get_dat_extension(self):
+        return core.rt.IndexProperty_GetFileNameExtensionDat(self.handle)
+    def set_dat_extension(self, value):
+        v = value.encode('utf-8')
+        return core.rt.IndexProperty_SetFileNameExtensionDat(self.handle, value)
+
+    dat_extension = property(get_dat_extension, set_dat_extension)
+    """Extension for .dat file"""
+
+    def get_idx_extension(self):
+        return core.rt.IndexProperty_GetFileNameExtensionIdx(self.handle)
+    def set_idx_extension(self, value):
+        v = value.encode('utf-8')
+        return core.rt.IndexProperty_SetFileNameExtensionIdx(self.handle, value)
+
+    idx_extension = property(get_idx_extension, set_idx_extension)
+    """Extension for .idx file"""
+
+    def get_custom_storage_callbacks_size(self):
+        return core.rt.IndexProperty_GetCustomStorageCallbacksSize(self.handle)
+    def set_custom_storage_callbacks_size(self, value):
+        return core.rt.IndexProperty_SetCustomStorageCallbacksSize(self.handle, value)
+
+    custom_storage_callbacks_size = property(get_custom_storage_callbacks_size, set_custom_storage_callbacks_size)
+    """Size of callbacks for custom storage"""
+
+    def get_custom_storage_callbacks(self):
+        return core.rt.IndexProperty_GetCustomStorageCallbacks(self.handle)
+    def set_custom_storage_callbacks(self, value):
+        return core.rt.IndexProperty_SetCustomStorageCallbacks(self.handle, value)
+
+    custom_storage_callbacks = property(get_custom_storage_callbacks, set_custom_storage_callbacks)
+    """Callbacks for custom storage"""
+
+    def get_index_id(self):
+        return core.rt.IndexProperty_GetIndexID(self.handle)
+    def set_index_id(self, value):
+        return core.rt.IndexProperty_SetIndexID(self.handle, value)
+
+    index_id = property(get_index_id, set_index_id)
+    """First node index id"""
+
+
+# custom storage implementation
+
+id_type = ctypes.c_int64
+
+class CustomStorageCallbacks(ctypes.Structure):
+    # callback types
+    createCallbackType  = ctypes.CFUNCTYPE(
+                            None, ctypes.c_void_p, ctypes.POINTER(ctypes.c_int)
+                          )
+    destroyCallbackType = ctypes.CFUNCTYPE(
+                            None, ctypes.c_void_p, ctypes.POINTER(ctypes.c_int)
+                          )
+    flushCallbackType = ctypes.CFUNCTYPE(
+                          None, ctypes.c_void_p, ctypes.POINTER(ctypes.c_int)
+                        )
+
+    loadCallbackType    = ctypes.CFUNCTYPE(
+                            None, ctypes.c_void_p, id_type, ctypes.POINTER(ctypes.c_uint32),
+                            ctypes.POINTER(ctypes.POINTER(ctypes.c_uint8)), ctypes.POINTER(ctypes.c_int)
+                          )
+    storeCallbackType   = ctypes.CFUNCTYPE(
+                            None, ctypes.c_void_p, ctypes.POINTER(id_type), ctypes.c_uint32,
+                            ctypes.POINTER(ctypes.c_uint8), ctypes.POINTER(ctypes.c_int)
+                          )
+    deleteCallbackType  = ctypes.CFUNCTYPE(
+                            None, ctypes.c_void_p, id_type, ctypes.POINTER(ctypes.c_int)
+                          )
+
+    _fields_ = [ ('context', ctypes.c_void_p),
+                 ('createCallback', createCallbackType),
+                 ('destroyCallback', destroyCallbackType),
+                 ('flushCallback', flushCallbackType),
+                 ('loadCallback', loadCallbackType),
+                 ('storeCallback', storeCallbackType),
+                 ('deleteCallback', deleteCallbackType),
+               ]
+
+    def __init__(self, context, createCallback, destroyCallback, flushCallback, loadCallback, storeCallback, deleteCallback):
+        ctypes.Structure.__init__( self,
+                                   ctypes.c_void_p( context ),
+                                   self.createCallbackType( createCallback ),
+                                   self.destroyCallbackType( destroyCallback ),
+                                   self.flushCallbackType ( flushCallback ),
+                                   self.loadCallbackType  ( loadCallback ),
+                                   self.storeCallbackType ( storeCallback ),
+                                   self.deleteCallbackType( deleteCallback ),
+                                  )
+
+class ICustomStorage(object):
+    # error codes
+    NoError = 0
+    InvalidPageError = 1
+    IllegalStateError = 2
+
+    # special pages
+    EmptyPage = -0x1
+    NewPage = -0x1
+
+    def allocateBuffer(self, length):
+        return core.rt.SIDX_NewBuffer( length )
+
+    def registerCallbacks(self, properties):
+        raise NotImplementedError()
+
+    def clear(self):
+        raise NotImplementedError()
+
+    hasData = property( lambda self: False )
+    ''' Override this property to allow for reloadable storages '''
+
+
+class CustomStorageBase(ICustomStorage):
+    """ Derive from this class to create your own storage manager with access
+        to the raw C buffers.
+    """
+
+    def registerCallbacks(self, properties):
+        callbacks = CustomStorageCallbacks( ctypes.c_void_p(), self.create,
+                                            self.destroy, self.flush,
+                                            self.loadByteArray, self.storeByteArray,
+                                            self.deleteByteArray )
+        properties.custom_storage_callbacks_size = ctypes.sizeof( callbacks )
+        self.callbacks = callbacks
+        properties.custom_storage_callbacks      = ctypes.cast( ctypes.pointer(callbacks), ctypes.c_void_p )
+
+    # the user must override these callback functions
+    def create(self, context, returnError):
+        returnError.contents.value = self.IllegalStateError
+        raise NotImplementedError( "You must override this method." )
+
+    def destroy(self, context, returnError):
+        """ please override """
+        returnError.contents.value = self.IllegalStateError
+        raise NotImplementedError( "You must override this method." )
+
+    def loadByteArray(self, context, page, resultLen, resultData, returnError):
+        """ please override """
+        returnError.contents.value = self.IllegalStateError
+        raise NotImplementedError( "You must override this method." )
+
+    def storeByteArray(self, context, page, len, data, returnError):
+        """ please override """
+        returnError.contents.value = self.IllegalStateError
+        raise NotImplementedError( "You must override this method." )
+
+    def deleteByteArray(self, context, page, returnError):
+        """ please override """
+        returnError.contents.value = self.IllegalStateError
+        raise NotImplementedError( "You must override this method." )
+
+    def flush(self, context, returnError):
+        """ please override """
+        returnError.contents.value = self.IllegalStateError
+        raise NotImplementedError( "You must override this method." )
+
+
+class CustomStorage(ICustomStorage):
+    """ Provides a useful default custom storage implementation which marshals
+        the buffers on the C side from/to python strings.
+        Derive from this class and override the necessary methods to provide
+        your own custom storage manager.
+    """
+
+    def registerCallbacks(self, properties):
+        callbacks = CustomStorageCallbacks( 0, self._create, self._destroy, self._flush, self._loadByteArray,
+                                               self._storeByteArray, self._deleteByteArray )
+        properties.custom_storage_callbacks_size = ctypes.sizeof( callbacks )
+        self.callbacks = callbacks
+        properties.custom_storage_callbacks      = ctypes.cast( ctypes.pointer(callbacks), ctypes.c_void_p )
+
+    # these functions handle the C callbacks and massage the data, then delegate
+    #  to the function without underscore below
+    def _create(self, context, returnError):
+        self.create( returnError )
+
+    def _destroy(self, context, returnError):
+        self.destroy( returnError )
+
+    def _flush(self, context, returnError):
+        self.flush( returnError )
+
+    def _loadByteArray(self, context, page, resultLen, resultData, returnError):
+        resultString = self.loadByteArray( page, returnError )
+        if returnError.contents.value != self.NoError:
+            return
+        # Copy python string over into a buffer allocated on the C side.
+        #  The buffer will later be freed by the C side. This prevents
+        #  possible heap corruption issues as buffers allocated by ctypes
+        #  and the c library might be allocated on different heaps.
+        # Freeing a buffer allocated on another heap might make the application
+        #  crash.
+        count = len(resultString)
+        resultLen.contents.value = count
+        buffer = self.allocateBuffer( count )
+        ctypes.memmove( buffer, ctypes.c_char_p(resultString), count )
+        resultData[0] = ctypes.cast( buffer, ctypes.POINTER(ctypes.c_uint8) )
+
+    def _storeByteArray(self, context, page, len, data, returnError):
+        str = ctypes.string_at( data, len )
+        newPageId = self.storeByteArray( page.contents.value, str, returnError )
+        page.contents.value = newPageId
+
+    def _deleteByteArray(self, context, page, returnError):
+        self.deleteByteArray( page, returnError )
+
+
+    # the user must override these callback functions
+    def create(self, returnError):
+        """ Must be overriden. No return value. """
+        returnError.contents.value = self.IllegalStateError
+        raise NotImplementedError( "You must override this method." )
+
+    def destroy(self, returnError):
+        """ Must be overriden. No return value. """
+        returnError.contents.value = self.IllegalStateError
+        raise NotImplementedError( "You must override this method." )
+
+    def flush(self, returnError):
+        """ Must be overriden. No return value. """
+        returnError.contents.value = self.IllegalStateError
+        raise NotImplementedError( "You must override this method." )
+
+    def loadByteArray(self, page, returnError):
+        """ Must be overriden. Must return a string with the loaded data. """
+        returnError.contents.value = self.IllegalStateError
+        raise NotImplementedError( "You must override this method." )
+        return ''
+
+    def storeByteArray(self, page, data, returnError):
+        """ Must be overriden. Must return the new 64-bit page ID of the stored
+            data if a new page had to be created (i.e. page is not NewPage).
+        """
+        returnError.contents.value = self.IllegalStateError
+        raise NotImplementedError( "You must override this method." )
+        return 0
+
+    def deleteByteArray(self, page, returnError):
+        """ please override """
+        returnError.contents.value = self.IllegalStateError
+        raise NotImplementedError( "You must override this method." )
diff --git a/setup.cfg b/setup.cfg
new file mode 100644
index 0000000..b6a2e65
--- /dev/null
+++ b/setup.cfg
@@ -0,0 +1,10 @@
+[build_ext]
+define = 
+include-dirs = /usr/local/include:/usr/local/include/spatialindex
+library-dirs = /usr/local/lib
+
+[egg_info]
+tag_build = 
+tag_date = 0
+tag_svn_revision = 0
+
diff --git a/setup.py b/setup.py
new file mode 100644
index 0000000..eb31397
--- /dev/null
+++ b/setup.py
@@ -0,0 +1,47 @@
+from glob import glob
+from setuptools import setup
+
+import rtree
+
+# Get text from README.txt
+readme_text = open('docs/source/README.txt', 'r').read()
+
+import os
+
+if os.name == 'nt':
+    data_files=[('Lib/site-packages/rtree',
+                 [r'D:\libspatialindex\bin\spatialindex.dll',
+                  r'D:\libspatialindex\bin\spatialindex_c.dll',]),]
+else:
+    data_files = None
+    
+setup(name          = 'Rtree',
+      version       = rtree.__version__,
+      description   = 'R-Tree spatial index for Python GIS',
+      license       = 'LGPL',
+      keywords      = 'gis spatial index r-tree',
+      author        = 'Sean Gillies',
+      author_email  = 'sean.gillies at gmail.com',
+      maintainer        = 'Howard Butler',
+      maintainer_email  = 'hobu at hobu.net',
+      url   = 'http://toblerity.github.com/rtree/',
+      long_description = readme_text,
+      packages      = ['rtree'],
+      install_requires = ['setuptools'],
+      test_suite = 'tests.test_suite',
+      data_files = data_files,
+      zip_safe = False,
+      classifiers   = [
+        'Development Status :: 5 - Production/Stable',
+        'Intended Audience :: Developers',
+        'Intended Audience :: Science/Research',
+        'License :: OSI Approved :: GNU Library or Lesser General Public License (LGPL)',
+        'Operating System :: OS Independent',
+        'Programming Language :: C',
+        'Programming Language :: C++',
+        'Programming Language :: Python',
+        'Topic :: Scientific/Engineering :: GIS',
+        'Topic :: Database',
+        ],
+)
+
diff --git a/tests/BoundsCheck.txt b/tests/BoundsCheck.txt
new file mode 100644
index 0000000..b068e2c
--- /dev/null
+++ b/tests/BoundsCheck.txt
@@ -0,0 +1,26 @@
+Bounding Box Checking
+=====================
+
+See http://trac.gispython.org/projects/PCL/ticket/127.
+
+Adding with bogus bounds
+------------------------
+
+  >>> import rtree
+  >>> index = rtree.Rtree()
+  >>> index.add(1, (0.0, 0.0, -1.0, 1.0))
+  Traceback (most recent call last):
+  ...
+  RTreeError: Coordinates must not have minimums more than maximums
+  
+  >>> index.intersection((0.0, 0.0, -1.0, 1.0))
+  Traceback (most recent call last):
+  ...
+  RTreeError: Coordinates must not have minimums more than maximums
+  
+Adding with invalid bounds argument should raise an exception
+
+  >>> index.add(1, 1)
+  Traceback (most recent call last):
+  ...
+  TypeError: Bounds must be a sequence
diff --git a/tests/__init__.py b/tests/__init__.py
new file mode 100644
index 0000000..7f6a095
--- /dev/null
+++ b/tests/__init__.py
@@ -0,0 +1,2 @@
+# package
+from .test_doctests import test_suite
diff --git a/tests/benchmarks.py b/tests/benchmarks.py
new file mode 100644
index 0000000..d01ca56
--- /dev/null
+++ b/tests/benchmarks.py
@@ -0,0 +1,152 @@
+# hobu's latest results on his 2006-era machine
+
+# Stream load:
+# 293710.04 usec/pass
+# 
+# One-at-a-time load:
+# 527883.95 usec/pass
+# 
+# 
+# 30000 points
+# Query box:  (1240000, 1010000, 1400000, 1390000)
+# 
+# 
+# Brute Force:
+# 46 hits
+# 13533.60 usec/pass
+# 
+# Memory-based Rtree Intersection:
+# 46 hits
+# 7516.19 usec/pass
+# 
+# Disk-based Rtree Intersection:
+# 46 hits
+# 7543.00 usec/pass
+# 
+# Disk-based Rtree Intersection without Item() wrapper (objects='raw'):
+# 46 raw hits
+# 347.60 usec/pass
+
+import random
+import timeit
+
+try:
+    import pkg_resources
+    pkg_resources.require('Rtree')
+except:
+    pass
+
+from rtree import Rtree as _Rtree
+
+TEST_TIMES = 20
+
+# a very basic Geometry
+class Point(object):
+    def __init__(self, x, y):
+        self.x = x
+        self.y = y
+
+# Scatter points randomly in a 1x1 box
+# 
+
+class Rtree(_Rtree):
+    pickle_protocol = -1
+
+bounds = (0, 0, 6000000, 6000000)
+count = 30000
+points = []
+
+insert_object = None
+insert_object = {'a': list(range(100)), 'b': 10, 'c': object(), 'd': dict(x=1), 'e': Point(2, 3)}
+
+index = Rtree()
+disk_index = Rtree('test', overwrite=1)
+
+coordinates = []
+for i in range(count):
+    x = random.randrange(bounds[0], bounds[2]) + random.random()
+    y = random.randrange(bounds[1], bounds[3]) + random.random()
+    point = Point(x, y)
+    points.append(point)
+
+    index.add(i, (x, y), insert_object)
+    disk_index.add(i, (x, y), insert_object)
+    coordinates.append((i, (x, y, x, y), insert_object))
+
+s ="""
+bulk = Rtree(coordinates[:2000])
+"""
+t = timeit.Timer(stmt=s, setup='from __main__ import coordinates, Rtree, insert_object')
+print("\nStream load:")
+print("%.2f usec/pass" % (1000000 * t.timeit(number=TEST_TIMES)/TEST_TIMES))
+
+s ="""
+idx = Rtree()
+i = 0
+for point in points[:2000]:
+    idx.add(i, (point.x, point.y), insert_object)
+    i+=1
+"""
+t = timeit.Timer(stmt=s, setup='from __main__ import points, Rtree, insert_object')
+print("\nOne-at-a-time load:")
+print("%.2f usec/pass\n\n" % (1000000 * t.timeit(number=TEST_TIMES)/TEST_TIMES))
+
+
+bbox = (1240000, 1010000, 1400000, 1390000)
+print(count, "points")
+print("Query box: ", bbox)
+print("")
+
+# Brute force all points within a 0.1x0.1 box
+s = """
+hits = [p for p in points if p.x >= bbox[0] and p.x <= bbox[2] and p.y >= bbox[1] and p.y <= bbox[3]]
+"""
+t = timeit.Timer(stmt=s, setup='from __main__ import points, bbox')
+print("\nBrute Force:")
+print(len([p for p in points if p.x >= bbox[0] and p.x <= bbox[2] and p.y >= bbox[1] and p.y <= bbox[3]]), "hits")
+print("%.2f usec/pass" % (1000000 * t.timeit(number=TEST_TIMES)/TEST_TIMES))
+
+# 0.1x0.1 box using intersection
+
+if insert_object is None:
+    s = """
+    hits = [points[id] for id in index.intersection(bbox)]
+    """
+else:
+    s = """
+    hits = [p.object for p in index.intersection(bbox, objects=insert_object)]
+    """
+
+t = timeit.Timer(stmt=s, setup='from __main__ import points, index, bbox, insert_object')
+print("\nMemory-based Rtree Intersection:")
+print(len([points[id] for id in index.intersection(bbox)]), "hits")
+print("%.2f usec/pass" % (1000000 * t.timeit(number=100)/100))
+
+
+# run same test on disk_index.
+s = s.replace("index.", "disk_index.")
+
+t = timeit.Timer(stmt=s, setup='from __main__ import points, disk_index, bbox, insert_object')
+print("\nDisk-based Rtree Intersection:")
+hits = list(disk_index.intersection(bbox))
+print(len(hits), "hits")
+print("%.2f usec/pass" % (1000000 * t.timeit(number=TEST_TIMES)/TEST_TIMES))
+
+
+if insert_object:
+    s = """
+        hits = disk_index.intersection(bbox, objects="raw")
+        """
+    t = timeit.Timer(stmt=s, setup='from __main__ import points, disk_index, bbox, insert_object')
+    print("\nDisk-based Rtree Intersection without Item() wrapper (objects='raw'):")
+    result = list(disk_index.intersection(bbox, objects="raw"))
+    print(len(result), "raw hits")
+    print("%.2f usec/pass" % (1000000 * t.timeit(number=TEST_TIMES)/TEST_TIMES))
+    assert 'a' in result[0], result[0]
+
+import os
+try:
+    os.remove('test.dat')
+    os.remove('test.idx')
+except:
+    pass
diff --git a/tests/boxes_15x15.data b/tests/boxes_15x15.data
new file mode 100644
index 0000000..d0994d5
--- /dev/null
+++ b/tests/boxes_15x15.data
@@ -0,0 +1,100 @@
+34.3776829412 26.7375853734 49.3776829412 41.7375853734
+-51.7912278527 56.5716384064 -36.7912278527 71.5716384064
+-132.417278478 -96.7177218184 -117.417278478 -81.7177218184
+19.9788779448 -53.1068061438 34.9788779448 -38.1068061438
+50.9432853241 53.830194296 65.9432853241 68.830194296
+114.777310066 -42.0534139041 129.777310066 -27.0534139041
+-80.5201136918 -60.5173650142 -65.5201136918 -45.5173650142
+-109.709042971 -88.8853631128 -94.7090429709 -73.8853631128
+163.797701593 49.0535662325 178.797701593 64.0535662325
+119.52474488 -47.8047995045 134.52474488 -32.8047995045
+-49.6358346107 25.7591536504 -34.6358346107 40.7591536504
+43.1951329802 -61.7003551556 58.1951329802 -46.7003551556
+5.07182469992 -32.9621617938 20.0718246999 -17.9621617938
+157.392784956 -59.9967638674 172.392784956 -44.9967638674
+169.761387556 77.3118040104 184.761387556 92.3118040104
+-90.9030625259 23.7969275036 -75.9030625259 38.7969275036
+13.3161023563 35.5651016032 28.3161023563 50.5651016032
+-71.4124633746 -27.8098115487 -56.4124633746 -12.8098115487
+-101.490578923 40.5161619529 -86.4905789231 55.5161619529
+-22.5493804457 -9.48190527182 -7.54938044566 5.51809472818
+22.7819453953 81.6043699778 37.7819453953 96.6043699778
+163.851232856 52.6576397095 178.851232856 67.6576397095
+8.7520267341 -82.9532179134 23.7520267341 -67.9532179134
+-25.1295517688 -52.9753074372 -10.1295517688 -37.9753074372
+125.380855923 53.093317371 140.380855923 68.093317371
+-79.9963004315 -8.58901526761 -64.9963004315 6.41098473239
+-3.49476632412 -93.5592177527 11.5052336759 -78.5592177527
+5.12311663372 38.9766284779 20.1231166337 53.9766284779
+-126.802193031 72.7620993955 -111.802193031 87.7620993955
+144.816733092 33.8296664631 159.816733092 48.8296664631
+-124.187243051 30.4856075292 -109.187243051 45.4856075292
+63.8011147852 -64.8232471563 78.8011147852 -49.8232471563
+125.091625278 10.0243913301 140.091625278 25.0243913301
+-79.6265618345 37.4238531184 -64.6265618345 52.4238531184
+84.0917344559 -61.9889564492 99.0917344559 -46.9889564492
+44.1303873224 36.9948838398 59.1303873224 51.9948838398
+57.579189376 -44.3308895399 72.579189376 -29.3308895399
+-135.915887605 -68.4604833795 -120.915887605 -53.4604833795
+-52.5931165731 -83.132095062 -37.5931165731 -68.132095062
+-3.66134703734 -24.6160151663 11.3386529627 -9.61601516627
+50.9138603775 6.66349450637 65.9138603775 21.6634945064
+-59.0308862561 -28.7050068456 -44.0308862561 -13.7050068456
+51.6601755093 -32.4794848001 66.6601755093 -17.4794848001
+-174.739939684 35.8453347176 -159.739939684 50.8453347176
+-107.905359545 -33.9905804035 -92.9053595447 -18.9905804035
+-43.8298865873 -38.8139629115 -28.8298865873 -23.8139629115
+-186.673789279 15.8707951216 -171.673789279 30.8707951216
+13.0878151873 18.9267257542 28.0878151873 33.9267257542
+-19.7764534411 -15.1648038653 -4.7764534411 -0.16480386529
+-136.725385806 -62.3357813894 -121.725385806 -47.3357813894
+56.3180682679 27.7748493606 71.3180682679 42.7748493606
+-117.234207271 -95.984091959 -102.234207271 -80.984091959
+-112.676334783 69.8614225716 -97.6763347829 84.8614225716
+63.4481415226 49.5185084111 78.4481415226 64.5185084111
+-164.583933393 -24.3224792074 -149.583933393 -9.32247920738
+29.8740632141 -94.4036564677 44.8740632141 -79.4036564677
+111.222002785 27.3091348937 126.222002785 42.3091348937
+153.388416036 -51.7982686059 168.388416036 -36.7982686059
+101.187835391 -79.2096166175 116.187835391 -64.2096166175
+88.5716895369 -0.592196575665 103.571689537 14.4078034243
+121.697565289 -20.4740930579 136.697565289 -5.47409305786
+-57.6430699458 32.6596016791 -42.6430699458 47.6596016791
+-51.9988160106 -16.5263906642 -36.9988160106 -1.52639066423
+-128.45654531 40.0833021378 -113.45654531 55.0833021378
+104.084274855 1.04302798395 119.084274855 16.0430279839
+-65.3078063084 52.8659272125 -50.3078063084 67.8659272125
+-185.575231871 0.603830128936 -170.575231871 15.6038301289
+-99.670852574 63.077063843 -84.670852574 78.077063843
+-97.5397037499 24.1544066414 -82.5397037499 39.1544066414
+17.1213365558 80.8998469932 32.1213365558 95.8998469932
+-66.0514693697 -67.879371904 -51.0514693697 -52.879371904
+-165.624597131 -28.2121530482 -150.624597131 -13.2121530482
+-153.938620771 -22.5333324395 -138.938620771 -7.5333324395
+108.059653776 -30.1015722619 123.059653776 -15.1015722619
+66.3357992327 33.4460170804 81.3357992327 48.4460170804
+122.051245261 62.1986667929 137.051245261 77.1986667929
+-9.14331797752 -4.94220638202 5.85668202248 10.057793618
+-6.21767716831 -37.4474638489 8.78232283169 -22.4474638489
+-10.2422235441 -36.7771789022 4.75777645591 -21.7771789022
+151.39952872 5.78259379576 166.39952872 20.7825937958
+53.0412866301 27.1060539476 68.0412866301 42.1060539476
+-179.969415049 -86.9431323167 -164.969415049 -71.9431323167
+-122.143517094 52.4812451482 -107.143517094 67.4812451482
+126.651232891 -71.3593917404 141.651232891 -56.3593917404
+35.5628371672 -44.4833782826 50.5628371672 -29.4833782826
+106.338230585 74.4980976394 121.338230585 89.4980976394
+2.49246106376 64.4571886404 17.4924610638 79.4571886404
+26.9239556956 74.8154250821 41.9239556956 89.8154250821
+-145.467051901 -23.3901235678 -130.467051901 -8.39012356782
+-31.1747618493 -78.3450857919 -16.1747618493 -63.3450857919
+-45.6363494594 41.8549865381 -30.6363494594 56.8549865381
+-139.598628861 -76.0620586165 -124.598628861 -61.0620586165
+75.3893757582 -96.3227872859 90.3893757582 -81.3227872859
+66.4127845964 -29.3758752649 81.4127845964 -14.3758752649
+71.002709831 5.93248532466 86.002709831 20.9324853247
+-166.73585749 -91.958750292 -151.73585749 -76.958750292
+-122.966652056 -44.5184865975 -107.966652056 -29.5184865975
+-114.787601823 -21.1179486167 -99.7876018227 -6.11794861667
+-37.7449906403 -70.1494304858 -22.7449906403 -55.1494304858
+70.2802523802 34.6578320934 85.2802523802 49.6578320934
diff --git a/tests/boxes_3x3.data b/tests/boxes_3x3.data
new file mode 100755
index 0000000..90be256
--- /dev/null
+++ b/tests/boxes_3x3.data
@@ -0,0 +1,100 @@
+-77.6266074937 17.9253077286 -74.6266074937 20.9253077286
+146.760813507 -66.1176158519 149.760813507 -63.1176158519
+-61.5952714867 1.53336501911 -58.5952714867 4.53336501911
+-97.6541571808 78.9279172851 -94.6541571808 81.9279172851
+-26.9653607563 -48.4712157725 -23.9653607563 -45.4712157725
+-143.552516091 14.3494488115 -140.552516091 17.3494488115
+-80.4613341911 17.1488336406 -77.4613341911 20.1488336406
+-170.539134443 -8.03564691796 -167.539134443 -5.03564691796
+41.1324604695 53.1528891157 44.1324604695 56.1528891157
+-16.4280335397 15.7994413301 -13.4280335397 18.7994413301
+-13.0608137513 79.8825849424 -10.0608137513 82.8825849424
+11.0220907685 -25.1820010025 14.0220907685 -22.1820010025
+-10.853938973 -83.415598855 -7.85393897295 -80.415598855
+154.64572196 -36.9887910088 157.64572196 -33.9887910088
+18.573136694 21.4354048786 21.573136694 24.4354048786
+-44.6555011074 -71.282412391 -41.6555011074 -68.282412391
+-145.411701186 4.4541144677 -142.411701186 7.4541144677
+5.40282526442 -35.1352567283 8.40282526442 -32.1352567283
+62.5944808962 -43.6191170071 65.5944808962 -40.6191170071
+-146.213229942 13.3263433101 -143.213229942 16.3263433101
+-77.4449126588 -22.6182449882 -74.4449126588 -19.6182449882
+-106.789240681 78.8103222748 -103.789240681 81.8103222748
+-3.73652421112 -19.5291285896 -0.736524211117 -16.5291285896
+-58.0281342568 32.8106143002 -55.0281342568 35.8106143002
+-13.8033575384 -50.2089822292 -10.8033575384 -47.2089822292
+-172.843024283 16.5468581097 -169.843024283 19.5468581097
+-172.107799874 -11.7749825659 -169.107799874 -8.7749825659
+-73.0310329326 54.460547423 -70.0310329326 57.460547423
+-21.5697127876 72.3233077645 -18.5697127876 75.3233077645
+-146.309829213 50.2891374472 -143.309829213 53.2891374472
+-26.1978262524 74.0518428004 -23.1978262524 77.0518428004
+-19.4804067324 -56.2863939382 -16.4804067324 -53.2863939382
+164.888440702 6.83914583504 167.888440702 9.83914583504
+-20.4710471678 60.8436455137 -17.4710471678 63.8436455137
+-162.20464081 14.3482977242 -159.20464081 17.3482977242
+-46.823944655 -57.0836083862 -43.823944655 -54.0836083862
+-116.933648325 -74.2067851587 -113.933648325 -71.2067851587
+14.9470581084 -3.10178427836 17.9470581084 -0.101784278362
+-174.718634431 -42.9059514464 -171.718634431 -39.9059514464
+-39.4478339796 -21.9917960894 -36.4478339796 -18.9917960894
+115.730938802 21.2135753799 118.730938802 24.2135753799
+10.8416658737 72.0678680529 13.8416658737 75.0678680529
+-95.9535577321 -49.1590716919 -92.9535577321 -46.1590716919
+-125.459235693 19.2496047235 -122.459235693 22.2496047235
+-132.545232353 -58.25552454 -129.545232353 -55.25552454
+-67.5395639913 -70.2635622306 -64.5395639913 -67.2635622306
+114.897795224 81.6755123276 117.897795224 84.6755123276
+-45.8275691135 51.4851475055 -42.8275691135 54.4851475055
+67.7315695721 81.6891131584 70.7315695721 84.6891131584
+-143.646124789 65.8539305596 -140.646124789 68.8539305596
+39.600377171 -65.278784271 42.600377171 -62.278784271
+-50.4241198338 -61.0588571002 -47.4241198338 -58.0588571002
+29.6470115345 -69.1121010466 32.6470115345 -66.1121010466
+74.7933751695 -87.0504609911 77.7933751695 -84.0504609911
+-44.6451547594 -21.6787016415 -41.6451547594 -18.6787016415
+-125.896784285 57.6216177466 -122.896784285 60.6216177466
+-177.918010191 39.075981359 -174.918010191 42.075981359
+149.458654065 -63.1555370915 152.458654065 -60.1555370915
+-93.8541244608 14.6920424922 -90.8541244608 17.6920424922
+103.015148455 -82.0537507881 106.015148455 -79.0537507881
+-14.1875994263 5.92016732751 -11.1875994263 8.92016732751
+-10.5260324823 -66.999980844 -7.5260324823 -63.999980844
+-77.4049966201 76.698477819 -74.4049966201 79.698477819
+163.365893138 36.3937967838 166.365893138 39.3937967838
+-77.6637113634 -20.3921897679 -74.6637113634 -17.3921897679
+-118.209984451 -89.6757056733 -115.209984451 -86.6757056733
+24.5096630884 -39.4951326405 27.5096630884 -36.4951326405
+104.683305708 -50.5163367082 107.683305708 -47.5163367082
+89.9633794652 -49.8790576673 92.9633794652 -46.8790576673
+74.1792004231 76.939779241 77.1792004231 79.939779241
+159.611093819 24.3012006505 162.611093819 27.3012006505
+-33.9960825337 48.0848879862 -30.9960825337 51.0848879862
+-74.0378541877 -74.4126488941 -71.0378541877 -71.4126488941
+92.6624431726 55.1115098398 95.6624431726 58.1115098398
+-115.093677605 27.8478080505 -112.093677605 30.8478080505
+-170.037980591 58.2298099844 -167.037980591 61.2298099844
+166.197199218 -38.4613177937 169.197199218 -35.4613177937
+63.6008145168 60.8908437143 66.6008145168 63.8908437143
+41.6381666956 74.698625008 44.6381666956 77.698625008
+30.4199356009 24.6821280736 33.4199356009 27.6821280736
+-160.657901861 46.5236688914 -157.657901861 49.5236688914
+124.039804763 -3.75214084639 127.039804763 -0.752140846393
+-98.4364817072 -34.640932721 -95.4364817072 -31.640932721
+85.2576310296 52.0416775746 88.2576310296 55.0416775746
+-135.299373946 -39.8575058091 -132.299373946 -36.8575058091
+-81.1726623037 -38.2018616886 -78.1726623037 -35.2018616886
+86.1432448082 -81.4944583964 89.1432448082 -78.4944583964
+-12.7133836326 12.0678158492 -9.71338363261 15.0678158492
+65.0162301938 -67.6995457631 68.0162301938 -64.6995457631
+169.200931012 32.4585152701 172.200931012 35.4585152701
+-105.391368296 42.7902931996 -102.391368296 45.7902931996
+-139.704228408 24.0433792599 -136.704228408 27.0433792599
+-153.800381092 16.5046872988 -150.800381092 19.5046872988
+-97.0657162703 27.2524937158 -94.0657162703 30.2524937158
+-1.60098774744 -14.9988726034 1.39901225256 -11.9988726034
+-81.0423533346 51.1588554456 -78.0423533346 54.1588554456
+157.601695863 -75.891662644 160.601695863 -72.891662644
+10.433405189 86.8920650943 13.433405189 89.8920650943
+113.813941489 -24.5868189503 116.813941489 -21.5868189503
+47.0050784943 -52.865903321 50.0050784943 -49.865903321
diff --git a/tests/data.py b/tests/data.py
new file mode 100755
index 0000000..6601814
--- /dev/null
+++ b/tests/data.py
@@ -0,0 +1,39 @@
+import os.path
+
+boxes15 = []
+f = open(os.path.join(os.path.dirname(__file__), 'boxes_15x15.data'), 'r')
+for line in f.readlines():
+    if not line:
+        break
+    [left, bottom, right, top] = [float(x) for x in line.split()]
+    boxes15.append((left, bottom, right, top))
+
+boxes3 = []
+f = open(os.path.join(os.path.dirname(__file__), 'boxes_3x3.data'), 'r')
+for line in f.readlines():
+    if not line:
+        break
+    [left, bottom, right, top] = [float(x) for x in line.split()]
+    boxes3.append((left, bottom, right, top))
+                
+points = []
+f = open(os.path.join(os.path.dirname(__file__), 'point_clusters.data'), 'r')
+for line in f.readlines():
+    if not line:
+        break
+    [left, bottom] = [float(x) for x in line.split()]
+    points.append((left, bottom))
+
+def draw_data(filename):
+    from PIL import Image, ImageDraw
+    im = Image.new('RGB', (1440, 720))
+    d = ImageDraw.Draw(im)
+    for box in boxes15:
+        coords = [4.0*(box[0]+180), 4.0*(box[1]+90), 4.0*(box[2]+180), 4.0*(box[3]+90)]
+        d.rectangle(coords, outline='red')
+    for box in boxes3:
+        coords = [4.0*(box[0]+180), 4.0*(box[1]+90), 4.0*(box[2]+180), 4.0*(box[3]+90)]
+        d.rectangle(coords, outline='blue')
+
+    im.save(filename)
+    
diff --git a/tests/index.txt b/tests/index.txt
new file mode 100644
index 0000000..6c4f9b3
--- /dev/null
+++ b/tests/index.txt
@@ -0,0 +1,308 @@
+.. _index_test:
+
+Examples
+..............................................................................
+
+    >>> from rtree import index
+    >>> from rtree.index import Rtree
+
+Ensure libspatialindex version is >= 1.7.0
+
+    >>> index.__c_api_version__.split('.')[1] >= 7
+    True
+    
+Make an instance, index stored in memory
+    
+    >>> p = index.Property()
+    
+    >>> idx = index.Index(properties=p)
+    >>> idx
+    <rtree.index.Index object at 0x...>
+    
+Add 100 largish boxes randomly distributed over the domain
+    
+    >>> for i, coords in enumerate(boxes15):
+    ...     idx.add(i, coords)
+    
+    >>> 0 in idx.intersection((0, 0, 60, 60))
+    True
+    >>> hits = list(idx.intersection((0, 0, 60, 60)))
+    >>> len(hits)
+    10
+    >>> hits
+    [0, 4, 16, 27, 35, 40, 47, 50, 76, 80]
+
+Insert an object into the index that can be pickled 
+
+    >>> idx.insert(4321, (34.3776829412, 26.7375853734, 49.3776829412, 41.7375853734), obj=42)
+
+Fetch our straggler that contains a pickled object    
+    >>> hits = idx.intersection((0, 0, 60, 60), objects=True)
+    >>> for i in hits:
+    ...     if i.id == 4321:
+    ...         i.object
+    ...         ['%.10f' % t for t in i.bbox]
+    42
+    ['34.3776829412', '26.7375853734', '49.3776829412', '41.7375853734']
+
+
+Find the three items nearest to this one
+    >>> hits = list(idx.nearest((0,0,10,10), 3))
+    >>> hits
+    [76, 48, 19]
+    >>> len(hits)
+    3
+    
+
+Default order is [xmin, ymin, xmax, ymax]
+    >>> ['%.10f' % t for t in idx.bounds]
+    ['-186.6737892790', '-96.7177218184', '184.7613875560', '96.6043699778']
+
+To get in order [xmin, xmax, ymin, ymax (... for n-d indexes)] use the kwarg:
+    >>> ['%.10f' % t for t in idx.get_bounds(coordinate_interleaved=False)]
+    ['-186.6737892790', '184.7613875560', '-96.7177218184', '96.6043699778']
+
+Delete index members
+
+    >>> for i, coords in enumerate(boxes15):
+    ...     idx.delete(i, coords)
+
+Delete our straggler too
+    >>> idx.delete(4321, (34.3776829412, 26.7375853734, 49.3776829412, 41.7375853734) )
+
+Check that we have deleted stuff
+
+    >>> hits = 0
+    >>> hits = list(idx.intersection((0, 0, 60, 60)))
+    >>> len(hits)
+    0
+    
+Check that nearest returns *all* of the items that are nearby
+
+    >>> idx2 = Rtree()
+    >>> idx2
+    <rtree.index.Index object at 0x...>
+
+    >>> locs = [(14, 10, 14, 10), 
+    ...         (16, 10, 16, 10)]
+    
+    >>> for i, (minx, miny, maxx, maxy) in enumerate(locs):
+    ...        idx2.add(i, (minx, miny, maxx, maxy))
+
+    >>> sorted(idx2.nearest((15, 10, 15, 10), 1))
+    [0, 1]
+
+
+Check that nearest returns *all* of the items that are nearby (with objects)
+    >>> idx2 = Rtree()
+    >>> idx2
+    <rtree.index.Index object at 0x...>
+
+    >>> locs = [(14, 10, 14, 10), 
+    ...         (16, 10, 16, 10)]
+    
+    >>> for i, (minx, miny, maxx, maxy) in enumerate(locs):
+    ...        idx2.add(i, (minx, miny, maxx, maxy), obj={'a': 42})
+
+    >>> sorted([(i.id, i.object) for i in idx2.nearest((15, 10, 15, 10), 1, objects=True)])
+    [(0, {'a': 42}), (1, {'a': 42})]
+
+
+    >>> idx2 = Rtree()
+    >>> idx2
+    <rtree.index.Index object at 0x...>
+            
+    >>> locs = [(2, 4), (6, 8), (10, 12), (11, 13), (15, 17), (13, 20)]
+    
+    >>> for i, (start, stop) in enumerate(locs):
+    ...        idx2.add(i, (start, 1, stop, 1))
+    
+    >>> sorted(idx2.nearest((13, 0, 20, 2), 1))
+    [3, 4, 5]
+
+Default page size 4096
+
+    >>> idx3 = Rtree("defaultidx")
+    >>> for i, coords in enumerate(boxes15):
+    ...     idx3.add(i, coords)
+    >>> hits = list(idx3.intersection((0, 0, 60, 60)))
+    >>> len(hits)
+    10
+
+Make sure to delete the index or the file is not flushed and it 
+will be invalid
+
+    >>> del idx3
+
+Page size 3
+
+    >>> idx4 = Rtree("pagesize3", pagesize=3)
+    >>> for i, coords in enumerate(boxes15):
+    ...     idx4.add(i, coords)
+    >>> hits = list(idx4.intersection((0, 0, 60, 60)))
+    >>> len(hits)
+    10
+    
+    >>> idx4.close()
+    >>> del idx4
+    
+Test invalid name
+
+    >>> inv = Rtree("bogus/foo")
+    Traceback (most recent call last):
+    ...
+    IOError: Unable to open file 'bogus/foo.idx' for index storage
+
+Load a persisted index
+
+    >>> import shutil
+    >>> _ = shutil.copy("defaultidx.dat", "testing.dat")
+    >>> _ = shutil.copy("defaultidx.idx", "testing.idx")
+
+    # >>> import pdb;pdb.set_trace()
+
+    >>> idx = Rtree("testing")
+    >>> hits = list(idx.intersection((0, 0, 60, 60)))
+    >>> len(hits)
+    10
+
+Make a 3D index
+    >>> p = index.Property()
+    >>> p.dimension = 3
+    
+
+with interleaved=False, the order of input and output is: 
+(xmin, xmax, ymin, ymax, zmin, zmax)
+
+    >>> idx3d = index.Index(properties=p, interleaved=False)
+    >>> idx3d
+    <rtree.index.Index object at 0x...>
+    
+    >>> idx3d.insert(1, (0, 0, 60, 60, 22, 22.0))
+    
+    >>> 1 in idx3d.intersection((-1, 1, 58, 62, 22, 24))
+    True
+
+
+Make a 4D index
+    >>> p = index.Property()
+    >>> p.dimension = 4
+    
+
+with interleaved=False, the order of input and output is: (xmin, xmax, ymin, ymax, zmin, zmax, kmin, kmax)
+
+    >>> idx4d = index.Index(properties=p, interleaved=False)
+    >>> idx4d
+    <rtree.index.Index object at 0x...>
+    
+    >>> idx4d.insert(1, (0, 0, 60, 60, 22, 22.0, 128, 142))
+    
+    >>> 1 in idx4d.intersection((-1, 1, 58, 62, 22, 24, 120, 150))
+    True
+
+Check that we can make an index with custom filename extensions
+
+    >>> p = index.Property()
+    >>> p.dat_extension = 'data'
+    >>> p.idx_extension = 'index'
+    
+    >>> idx_cust = Rtree('custom', properties=p)
+    >>> for i, coords in enumerate(boxes15):
+    ...     idx_cust.add(i, coords)
+    >>> hits = list(idx_cust.intersection((0, 0, 60, 60)))
+    >>> len(hits)
+    10
+    
+    >>> del idx_cust
+    
+Reopen the index
+    >>> p2 = index.Property()
+    >>> p2.dat_extension = 'data'
+    >>> p2.idx_extension = 'index'
+    
+    >>> idx_cust2 = Rtree('custom', properties=p2)    
+    >>> hits = list(idx_cust2.intersection((0, 0, 60, 60)))
+    >>> len(hits)
+    10
+    
+    >>> del idx_cust2
+
+Adding the same id twice does not overwrite existing data
+
+    >>> r = Rtree()
+    >>> r.add(1, (2, 2))
+    >>> r.add(1, (3, 3))
+    >>> list(r.intersection((0, 0, 5, 5)))
+    [1, 1]
+
+A stream of data need that needs to be an iterator that will raise a 
+StopIteration. The order depends on the interleaved kwarg sent to the 
+constructor.
+
+The object can be None, but you must put a place holder of 'None' there.
+
+    >>> p = index.Property()
+    >>> def data_gen(interleaved=True):
+    ...    for i, (minx, miny, maxx, maxy) in enumerate(boxes15):
+    ...        if interleaved:
+    ...            yield (i, (minx, miny, maxx, maxy), 42)
+    ...        else:
+    ...            yield (i, (minx, maxx, miny, maxy), 42)
+
+    >>> strm_idx = index.Rtree(data_gen(), properties = p)
+
+    >>> hits = list(strm_idx.intersection((0, 0, 60, 60)))
+
+    >>> len(hits)
+    10
+    
+    
+    >>> sorted(hits)
+    [0, 4, 16, 27, 35, 40, 47, 50, 76, 80]
+
+    >>> hits = list(strm_idx.intersection((0, 0, 60, 60), objects=True))
+    >>> len(hits)
+    10
+    
+    >>> hits[0].object
+    42
+
+Try streaming against a persisted index without interleaving.
+    >>> strm_idx = index.Rtree('streamed', data_gen(interleaved=False), properties = p, interleaved=False)
+
+Note the arguments to intersection must be xmin, xmax, ymin, ymax for interleaved=False
+    >>> hits = list(strm_idx.intersection((0, 60, 0, 60)))
+    >>> len(hits)
+    10
+    
+    >>> sorted(hits)
+    [0, 4, 16, 27, 35, 40, 47, 50, 76, 80]
+
+    >>> hits = list(strm_idx.intersection((0, 60, 0, 60), objects=True))
+    >>> len(hits)
+    10
+    
+    >>> hits[0].object
+    42
+
+    >>> hits = list(strm_idx.intersection((0, 60, 0, 60), objects='raw'))
+    >>> hits[0]
+    42
+    >>> len(hits)
+    10
+    
+    >>> strm_idx.count((0, 60, 0, 60))
+    10L
+    
+    >>> del strm_idx
+
+    >>> p = index.Property()
+    >>> p.leaf_capacity = 100
+    >>> p.fill_factor = 0.5
+    >>> p.index_capacity = 10
+    >>> p.near_minimum_overlap_factor = 7
+    >>> idx = index.Index(data_gen(interleaved=False), properties = p, interleaved=False)
+
+    >>> leaves = idx.leaves()
+
+    >>> del idx
diff --git a/tests/off/BoundsCheck.txt b/tests/off/BoundsCheck.txt
new file mode 100644
index 0000000..b068e2c
--- /dev/null
+++ b/tests/off/BoundsCheck.txt
@@ -0,0 +1,26 @@
+Bounding Box Checking
+=====================
+
+See http://trac.gispython.org/projects/PCL/ticket/127.
+
+Adding with bogus bounds
+------------------------
+
+  >>> import rtree
+  >>> index = rtree.Rtree()
+  >>> index.add(1, (0.0, 0.0, -1.0, 1.0))
+  Traceback (most recent call last):
+  ...
+  RTreeError: Coordinates must not have minimums more than maximums
+  
+  >>> index.intersection((0.0, 0.0, -1.0, 1.0))
+  Traceback (most recent call last):
+  ...
+  RTreeError: Coordinates must not have minimums more than maximums
+  
+Adding with invalid bounds argument should raise an exception
+
+  >>> index.add(1, 1)
+  Traceback (most recent call last):
+  ...
+  TypeError: Bounds must be a sequence
diff --git a/tests/off/index.txt b/tests/off/index.txt
new file mode 100644
index 0000000..6c4f9b3
--- /dev/null
+++ b/tests/off/index.txt
@@ -0,0 +1,308 @@
+.. _index_test:
+
+Examples
+..............................................................................
+
+    >>> from rtree import index
+    >>> from rtree.index import Rtree
+
+Ensure libspatialindex version is >= 1.7.0
+
+    >>> index.__c_api_version__.split('.')[1] >= 7
+    True
+    
+Make an instance, index stored in memory
+    
+    >>> p = index.Property()
+    
+    >>> idx = index.Index(properties=p)
+    >>> idx
+    <rtree.index.Index object at 0x...>
+    
+Add 100 largish boxes randomly distributed over the domain
+    
+    >>> for i, coords in enumerate(boxes15):
+    ...     idx.add(i, coords)
+    
+    >>> 0 in idx.intersection((0, 0, 60, 60))
+    True
+    >>> hits = list(idx.intersection((0, 0, 60, 60)))
+    >>> len(hits)
+    10
+    >>> hits
+    [0, 4, 16, 27, 35, 40, 47, 50, 76, 80]
+
+Insert an object into the index that can be pickled 
+
+    >>> idx.insert(4321, (34.3776829412, 26.7375853734, 49.3776829412, 41.7375853734), obj=42)
+
+Fetch our straggler that contains a pickled object    
+    >>> hits = idx.intersection((0, 0, 60, 60), objects=True)
+    >>> for i in hits:
+    ...     if i.id == 4321:
+    ...         i.object
+    ...         ['%.10f' % t for t in i.bbox]
+    42
+    ['34.3776829412', '26.7375853734', '49.3776829412', '41.7375853734']
+
+
+Find the three items nearest to this one
+    >>> hits = list(idx.nearest((0,0,10,10), 3))
+    >>> hits
+    [76, 48, 19]
+    >>> len(hits)
+    3
+    
+
+Default order is [xmin, ymin, xmax, ymax]
+    >>> ['%.10f' % t for t in idx.bounds]
+    ['-186.6737892790', '-96.7177218184', '184.7613875560', '96.6043699778']
+
+To get in order [xmin, xmax, ymin, ymax (... for n-d indexes)] use the kwarg:
+    >>> ['%.10f' % t for t in idx.get_bounds(coordinate_interleaved=False)]
+    ['-186.6737892790', '184.7613875560', '-96.7177218184', '96.6043699778']
+
+Delete index members
+
+    >>> for i, coords in enumerate(boxes15):
+    ...     idx.delete(i, coords)
+
+Delete our straggler too
+    >>> idx.delete(4321, (34.3776829412, 26.7375853734, 49.3776829412, 41.7375853734) )
+
+Check that we have deleted stuff
+
+    >>> hits = 0
+    >>> hits = list(idx.intersection((0, 0, 60, 60)))
+    >>> len(hits)
+    0
+    
+Check that nearest returns *all* of the items that are nearby
+
+    >>> idx2 = Rtree()
+    >>> idx2
+    <rtree.index.Index object at 0x...>
+
+    >>> locs = [(14, 10, 14, 10), 
+    ...         (16, 10, 16, 10)]
+    
+    >>> for i, (minx, miny, maxx, maxy) in enumerate(locs):
+    ...        idx2.add(i, (minx, miny, maxx, maxy))
+
+    >>> sorted(idx2.nearest((15, 10, 15, 10), 1))
+    [0, 1]
+
+
+Check that nearest returns *all* of the items that are nearby (with objects)
+    >>> idx2 = Rtree()
+    >>> idx2
+    <rtree.index.Index object at 0x...>
+
+    >>> locs = [(14, 10, 14, 10), 
+    ...         (16, 10, 16, 10)]
+    
+    >>> for i, (minx, miny, maxx, maxy) in enumerate(locs):
+    ...        idx2.add(i, (minx, miny, maxx, maxy), obj={'a': 42})
+
+    >>> sorted([(i.id, i.object) for i in idx2.nearest((15, 10, 15, 10), 1, objects=True)])
+    [(0, {'a': 42}), (1, {'a': 42})]
+
+
+    >>> idx2 = Rtree()
+    >>> idx2
+    <rtree.index.Index object at 0x...>
+            
+    >>> locs = [(2, 4), (6, 8), (10, 12), (11, 13), (15, 17), (13, 20)]
+    
+    >>> for i, (start, stop) in enumerate(locs):
+    ...        idx2.add(i, (start, 1, stop, 1))
+    
+    >>> sorted(idx2.nearest((13, 0, 20, 2), 1))
+    [3, 4, 5]
+
+Default page size 4096
+
+    >>> idx3 = Rtree("defaultidx")
+    >>> for i, coords in enumerate(boxes15):
+    ...     idx3.add(i, coords)
+    >>> hits = list(idx3.intersection((0, 0, 60, 60)))
+    >>> len(hits)
+    10
+
+Make sure to delete the index or the file is not flushed and it 
+will be invalid
+
+    >>> del idx3
+
+Page size 3
+
+    >>> idx4 = Rtree("pagesize3", pagesize=3)
+    >>> for i, coords in enumerate(boxes15):
+    ...     idx4.add(i, coords)
+    >>> hits = list(idx4.intersection((0, 0, 60, 60)))
+    >>> len(hits)
+    10
+    
+    >>> idx4.close()
+    >>> del idx4
+    
+Test invalid name
+
+    >>> inv = Rtree("bogus/foo")
+    Traceback (most recent call last):
+    ...
+    IOError: Unable to open file 'bogus/foo.idx' for index storage
+
+Load a persisted index
+
+    >>> import shutil
+    >>> _ = shutil.copy("defaultidx.dat", "testing.dat")
+    >>> _ = shutil.copy("defaultidx.idx", "testing.idx")
+
+    # >>> import pdb;pdb.set_trace()
+
+    >>> idx = Rtree("testing")
+    >>> hits = list(idx.intersection((0, 0, 60, 60)))
+    >>> len(hits)
+    10
+
+Make a 3D index
+    >>> p = index.Property()
+    >>> p.dimension = 3
+    
+
+with interleaved=False, the order of input and output is: 
+(xmin, xmax, ymin, ymax, zmin, zmax)
+
+    >>> idx3d = index.Index(properties=p, interleaved=False)
+    >>> idx3d
+    <rtree.index.Index object at 0x...>
+    
+    >>> idx3d.insert(1, (0, 0, 60, 60, 22, 22.0))
+    
+    >>> 1 in idx3d.intersection((-1, 1, 58, 62, 22, 24))
+    True
+
+
+Make a 4D index
+    >>> p = index.Property()
+    >>> p.dimension = 4
+    
+
+with interleaved=False, the order of input and output is: (xmin, xmax, ymin, ymax, zmin, zmax, kmin, kmax)
+
+    >>> idx4d = index.Index(properties=p, interleaved=False)
+    >>> idx4d
+    <rtree.index.Index object at 0x...>
+    
+    >>> idx4d.insert(1, (0, 0, 60, 60, 22, 22.0, 128, 142))
+    
+    >>> 1 in idx4d.intersection((-1, 1, 58, 62, 22, 24, 120, 150))
+    True
+
+Check that we can make an index with custom filename extensions
+
+    >>> p = index.Property()
+    >>> p.dat_extension = 'data'
+    >>> p.idx_extension = 'index'
+    
+    >>> idx_cust = Rtree('custom', properties=p)
+    >>> for i, coords in enumerate(boxes15):
+    ...     idx_cust.add(i, coords)
+    >>> hits = list(idx_cust.intersection((0, 0, 60, 60)))
+    >>> len(hits)
+    10
+    
+    >>> del idx_cust
+    
+Reopen the index
+    >>> p2 = index.Property()
+    >>> p2.dat_extension = 'data'
+    >>> p2.idx_extension = 'index'
+    
+    >>> idx_cust2 = Rtree('custom', properties=p2)    
+    >>> hits = list(idx_cust2.intersection((0, 0, 60, 60)))
+    >>> len(hits)
+    10
+    
+    >>> del idx_cust2
+
+Adding the same id twice does not overwrite existing data
+
+    >>> r = Rtree()
+    >>> r.add(1, (2, 2))
+    >>> r.add(1, (3, 3))
+    >>> list(r.intersection((0, 0, 5, 5)))
+    [1, 1]
+
+A stream of data need that needs to be an iterator that will raise a 
+StopIteration. The order depends on the interleaved kwarg sent to the 
+constructor.
+
+The object can be None, but you must put a place holder of 'None' there.
+
+    >>> p = index.Property()
+    >>> def data_gen(interleaved=True):
+    ...    for i, (minx, miny, maxx, maxy) in enumerate(boxes15):
+    ...        if interleaved:
+    ...            yield (i, (minx, miny, maxx, maxy), 42)
+    ...        else:
+    ...            yield (i, (minx, maxx, miny, maxy), 42)
+
+    >>> strm_idx = index.Rtree(data_gen(), properties = p)
+
+    >>> hits = list(strm_idx.intersection((0, 0, 60, 60)))
+
+    >>> len(hits)
+    10
+    
+    
+    >>> sorted(hits)
+    [0, 4, 16, 27, 35, 40, 47, 50, 76, 80]
+
+    >>> hits = list(strm_idx.intersection((0, 0, 60, 60), objects=True))
+    >>> len(hits)
+    10
+    
+    >>> hits[0].object
+    42
+
+Try streaming against a persisted index without interleaving.
+    >>> strm_idx = index.Rtree('streamed', data_gen(interleaved=False), properties = p, interleaved=False)
+
+Note the arguments to intersection must be xmin, xmax, ymin, ymax for interleaved=False
+    >>> hits = list(strm_idx.intersection((0, 60, 0, 60)))
+    >>> len(hits)
+    10
+    
+    >>> sorted(hits)
+    [0, 4, 16, 27, 35, 40, 47, 50, 76, 80]
+
+    >>> hits = list(strm_idx.intersection((0, 60, 0, 60), objects=True))
+    >>> len(hits)
+    10
+    
+    >>> hits[0].object
+    42
+
+    >>> hits = list(strm_idx.intersection((0, 60, 0, 60), objects='raw'))
+    >>> hits[0]
+    42
+    >>> len(hits)
+    10
+    
+    >>> strm_idx.count((0, 60, 0, 60))
+    10L
+    
+    >>> del strm_idx
+
+    >>> p = index.Property()
+    >>> p.leaf_capacity = 100
+    >>> p.fill_factor = 0.5
+    >>> p.index_capacity = 10
+    >>> p.near_minimum_overlap_factor = 7
+    >>> idx = index.Index(data_gen(interleaved=False), properties = p, interleaved=False)
+
+    >>> leaves = idx.leaves()
+
+    >>> del idx
diff --git a/tests/off/properties.txt b/tests/off/properties.txt
new file mode 100644
index 0000000..22842ae
--- /dev/null
+++ b/tests/off/properties.txt
@@ -0,0 +1,257 @@
+Testing rtree properties
+==========================
+
+Make a simple properties object
+
+    >>> from rtree import index
+    >>> p = index.Property()
+
+Test as_dict()
+
+    >>> d = p.as_dict()
+    >>> d['index_id'] is None
+    True
+
+Test creation from kwargs and eval() of its repr()
+
+    >>> q = index.Property(**d)
+    >>> eval(repr(q))['index_id'] is None
+    True
+
+Test pretty printed string
+
+    >>> print q
+    {'buffering_capacity': 10,
+     'custom_storage_callbacks': None,
+     'custom_storage_callbacks_size': 0L,
+     'dat_extension': 'dat',
+     'dimension': 2,
+     'filename': '',
+     'fill_factor': 0...,
+     'idx_extension': 'idx',
+     'index_capacity': 100,
+     'index_id': None,
+     'leaf_capacity': 100,
+     'near_minimum_overlap_factor': 32,
+     'overwrite': True,
+     'pagesize': 4096,
+     'point_pool_capacity': 500,
+     'region_pool_capacity': 1000,
+     'reinsert_factor': 0...,
+     'split_distribution_factor': 0...,
+     'storage': 1,
+     'tight_mbr': True,
+     'tpr_horizon': 20.0,
+     'type': 0,
+     'variant': 2,
+     'writethrough': False}
+
+Test property setting
+
+    >>> p = index.Property()
+    >>> p.type = 0
+    >>> p.type
+    0
+    
+    >>> p.type = 2
+    >>> p.type
+    2
+
+    >>> p.type = 6
+    Traceback (most recent call last):
+    ...
+    RTreeError: LASError in "IndexProperty_SetIndexType": Inputted value is not a valid index type
+
+    >>> p.dimension = 3
+    >>> p.dimension
+    3
+    
+    >>> p.dimension = 2
+    >>> p.dimension
+    2
+    
+    >>> p.dimension = -2
+    Traceback (most recent call last):
+    ...
+    RTreeError: Negative or 0 dimensional indexes are not allowed
+    
+    >>> p.variant = 0
+    >>> p.variant
+    0
+    
+    >>> p.variant = 6
+    Traceback (most recent call last):
+    ...
+    RTreeError: LASError in "IndexProperty_SetIndexVariant": Inputted value is not a valid index variant
+    
+    >>> p.storage = 0
+    >>> p.storage 
+    0
+    
+    >>> p.storage = 1
+    >>> p.storage
+    1
+    
+    >>> p.storage = 3
+    Traceback (most recent call last):
+    ...
+    RTreeError: LASError in "IndexProperty_SetIndexStorage": Inputted value is not a valid index storage type
+    
+    >>> p.index_capacity
+    100
+    
+    >>> p.index_capacity = 300
+    >>> p.index_capacity
+    300
+    
+    >>> p.index_capacity = -4321
+    Traceback (most recent call last):
+    ...
+    RTreeError: index_capacity must be > 0
+    
+    >>> p.pagesize
+    4096
+    
+    >>> p.pagesize = 8192
+    >>> p.pagesize
+    8192
+    
+    >>> p.pagesize = -4321
+    Traceback (most recent call last):
+    ...
+    RTreeError: Pagesize must be > 0
+
+    >>> p.leaf_capacity
+    100
+    
+    >>> p.leaf_capacity = 1000
+    >>> p.leaf_capacity
+    1000
+    >>> p.leaf_capacity = -4321
+    Traceback (most recent call last):
+    ...
+    RTreeError: leaf_capacity must be > 0
+    
+    >>> p.index_pool_capacity
+    100
+    
+    >>> p.index_pool_capacity = 1500
+    >>> p.index_pool_capacity = -4321
+    Traceback (most recent call last):
+    ...
+    RTreeError: index_pool_capacity must be > 0
+
+    >>> p.point_pool_capacity
+    500
+    
+    >>> p.point_pool_capacity = 1500
+    >>> p.point_pool_capacity = -4321
+    Traceback (most recent call last):
+    ...
+    RTreeError: point_pool_capacity must be > 0
+
+    >>> p.region_pool_capacity
+    1000
+    
+    >>> p.region_pool_capacity = 1500
+    >>> p.region_pool_capacity
+    1500
+    >>> p.region_pool_capacity = -4321
+    Traceback (most recent call last):
+    ...
+    RTreeError: region_pool_capacity must be > 0
+
+    >>> p.buffering_capacity
+    10
+    
+    >>> p.buffering_capacity = 100
+    >>> p.buffering_capacity = -4321
+    Traceback (most recent call last):
+    ...
+    RTreeError: buffering_capacity must be > 0    
+
+    >>> p.tight_mbr
+    True
+    
+    >>> p.tight_mbr = 100
+    >>> p.tight_mbr
+    True
+    
+    >>> p.tight_mbr = False
+    >>> p.tight_mbr
+    False
+
+    >>> p.overwrite
+    True
+    
+    >>> p.overwrite = 100
+    >>> p.overwrite
+    True
+    
+    >>> p.overwrite = False
+    >>> p.overwrite
+    False
+
+    >>> p.near_minimum_overlap_factor
+    32
+    
+    >>> p.near_minimum_overlap_factor = 100
+    >>> p.near_minimum_overlap_factor = -4321
+    Traceback (most recent call last):
+    ...
+    RTreeError: near_minimum_overlap_factor must be > 0  
+
+    >>> p.writethrough
+    False
+    
+    >>> p.writethrough = 100
+    >>> p.writethrough
+    True
+    
+    >>> p.writethrough = False
+    >>> p.writethrough
+    False    
+
+    >>> '%.2f' % p.fill_factor
+    '0.70'
+
+    >>> p.fill_factor = 0.99
+    >>> '%.2f' % p.fill_factor
+    '0.99'
+
+    >>> '%.2f' % p.split_distribution_factor
+    '0.40'
+
+    >>> p.tpr_horizon
+    20.0
+    
+    >>> '%.2f' % p.reinsert_factor
+    '0.30'
+
+    >>> p.filename
+    ''
+    
+    >>> p.filename = 'testing123testing'
+    >>> p.filename
+    'testing123testing'
+    
+    >>> p.dat_extension
+    'dat'
+
+    >>> p.dat_extension = 'data'
+    >>> p.dat_extension
+    'data'
+    
+    >>> p.idx_extension
+    'idx'
+    >>> p.idx_extension = 'index'
+    >>> p.idx_extension
+    'index'
+    
+    >>> p.index_id
+    Traceback (most recent call last):
+    ...
+    RTreeError: Error in "IndexProperty_GetIndexID": Property IndexIdentifier was empty
+    >>> p.index_id = -420
+    >>> int(p.index_id)
+    -420
diff --git a/tests/off/test_customStorage.txt b/tests/off/test_customStorage.txt
new file mode 100644
index 0000000..75cbd38
--- /dev/null
+++ b/tests/off/test_customStorage.txt
@@ -0,0 +1,157 @@
+
+Shows how to create custom storage backend.
+
+Derive your custom storage for rtree.index.CustomStorage and override the methods
+shown in this example.
+You can also derive from rtree.index.CustomStorageBase to get at the raw C buffers
+if you need the extra speed and want to avoid translating from/to python strings.
+
+The essential methods are the load/store/deleteByteArray. The rtree library calls
+them whenever it needs to access the data in any way.
+
+Example storage which maps the page (ids) to the page data.
+
+   >>> from rtree.index import Rtree, CustomStorage, Property
+   
+   >>> class DictStorage(CustomStorage):
+   ...     """ A simple storage which saves the pages in a python dictionary """
+   ...     def __init__(self):
+   ...         CustomStorage.__init__( self )
+   ...         self.clear()
+   ... 
+   ...     def create(self, returnError):
+   ...         """ Called when the storage is created on the C side """
+   ... 
+   ...     def destroy(self, returnError):
+   ...         """ Called when the storage is destroyed on the C side """
+   ... 
+   ...     def clear(self):
+   ...         """ Clear all our data """   
+   ...         self.dict = {}
+   ... 
+   ...     def loadByteArray(self, page, returnError):
+   ...         """ Returns the data for page or returns an error """   
+   ...         try:
+   ...             return self.dict[page]
+   ...         except KeyError:
+   ...             returnError.contents.value = self.InvalidPageError
+   ... 
+   ...     def storeByteArray(self, page, data, returnError):
+   ...         """ Stores the data for page """   
+   ...         if page == self.NewPage:
+   ...             newPageId = len(self.dict)
+   ...             self.dict[newPageId] = data
+   ...             return newPageId
+   ...         else:
+   ...             if page not in self.dict:
+   ...                 returnError.value = self.InvalidPageError
+   ...                 return 0
+   ...             self.dict[page] = data
+   ...             return page
+   ... 
+   ...     def deleteByteArray(self, page, returnError):
+   ...         """ Deletes a page """   
+   ...         try:
+   ...             del self.dict[page]
+   ...         except KeyError:
+   ...             returnError.contents.value = self.InvalidPageError
+   ... 
+   ...     hasData = property( lambda self: bool(self.dict) )
+   ...     """ Returns true if we contains some data """   
+
+
+Now let's test drive our custom storage.
+
+First let's define the basic properties we will use for all rtrees:
+
+    >>> settings = Property()
+    >>> settings.writethrough = True
+    >>> settings.buffering_capacity = 1
+
+Notice that there is a small in-memory buffer by default. We effectively disable
+it here so our storage directly receives any load/store/delete calls.
+This is not necessary in general and can hamper performance; we just use it here
+for illustrative and testing purposes.
+
+Let's start with a basic test:
+
+Create the storage and hook it up with a new rtree:
+
+    >>> storage = DictStorage()
+    >>> r = Rtree( storage, properties = settings )
+
+Interestingly enough, if we take a look at the contents of our storage now, we
+can see the Rtree has already written two pages to it. This is for header and
+index.
+
+    >>> state1 = storage.dict.copy()
+    >>> list(state1.keys())
+    [0, 1]
+    
+Let's add an item:
+
+    >>> r.add(123, (0, 0, 1, 1))
+
+Make sure the data in the storage before and after the addition of the new item
+is different:
+
+    >>> state2 = storage.dict.copy()
+    >>> state1 != state2
+    True
+
+Now perform a few queries and assure the tree is still valid:
+
+    >>> item = list(r.nearest((0, 0), 1, objects=True))[0]
+    >>> int(item.id)
+    123
+    >>> r.valid()
+    True
+    
+Check if the stored data is a byte string
+
+    >>> isinstance(list(storage.dict.values())[0], bytes)
+    True
+    
+Delete an item
+
+    >>> r.delete(123, (0, 0, 1, 1))
+    >>> r.valid()
+    True
+    
+Just for reference show how to flush the internal buffers (e.g. when
+properties.buffer_capacity is > 1)
+
+    >>> r.clearBuffer()
+    >>> r.valid()
+    True
+
+Let's get rid of the tree, we're done with it
+    
+    >>> del r
+
+Show how to empty the storage
+    
+    >>> storage.clear()
+    >>> storage.hasData
+    False
+    >>> del storage
+
+    
+Ok, let's create another small test. This time we'll test reopening our custom
+storage. This is useful for persistent storages.
+
+First create a storage and put some data into it:
+
+    >>> storage = DictStorage()
+    >>> r1 = Rtree( storage, properties = settings, overwrite = True )
+    >>> r1.add(555, (2, 2))
+    >>> del r1
+    >>> storage.hasData
+    True
+    
+Then reopen the storage with a new tree and see if the data is still there
+
+    >>> r2 = Rtree( storage, properties = settings, overwrite = False )
+    >>> r2.count( (0,0,10,10) ) == 1
+    True
+    >>> del r2
diff --git a/tests/off/test_misc.txt b/tests/off/test_misc.txt
new file mode 100644
index 0000000..fc02bac
--- /dev/null
+++ b/tests/off/test_misc.txt
@@ -0,0 +1,42 @@
+
+make sure a file-based index is overwriteable.
+
+    >>> from rtree.index import Rtree
+    >>> r = Rtree('overwriteme')
+    >>> del r
+    >>> r = Rtree('overwriteme', overwrite=True)
+
+
+the default serializer is pickle, can use any by overriding dumps, loads
+
+    >>> r = Rtree()
+    >>> some_data = {"a": 22, "b": [1, "ccc"]}
+    >>> try:
+    ...     import simplejson
+    ...     r.dumps = simplejson.dumps
+    ...     r.loads = simplejson.loads
+    ...     r.add(0, (0, 0, 1, 1), some_data)
+    ...     list(r.nearest((0, 0), 1, objects="raw"))[0] == some_data
+    ... except ImportError:
+    ...     # "no import, failed"
+    ...     True
+    True
+
+
+    >>> r = Rtree()
+    >>> r.add(123, (0, 0, 1, 1))
+    >>> item = list(r.nearest((0, 0), 1, objects=True))[0]
+    >>> item.id
+    123
+
+    >>> r.valid()
+    True
+
+test UTF-8 filenames
+
+    >>> f = u'gilename\u4500abc'
+
+    >>> r = Rtree(f)
+    >>> r.insert(4321, (34.3776829412, 26.7375853734, 49.3776829412, 41.7375853734), obj=42)
+
+    >>> del r
diff --git a/tests/off/z_cleanup.txt b/tests/off/z_cleanup.txt
new file mode 100644
index 0000000..5af62a9
--- /dev/null
+++ b/tests/off/z_cleanup.txt
@@ -0,0 +1,18 @@
+    >>> from rtree import core
+    >>> del core.rt
+    >>> files = ['defaultidx.dat','defaultidx.idx',
+    ...          'pagesize3.dat','pagesize3.idx',
+    ...          'testing.dat','testing.idx',
+    ...          'custom.data','custom.index',
+    ...          'streamed.idx','streamed.dat',
+    ...          'gilename䔀abc.dat','gilename䔀abc.idx',
+    ...          'overwriteme.idx', 'overwriteme.dat']
+    >>> import os
+    >>> import time
+    >>> for f in files:
+    ...     try:
+    ...         os.remove(f)
+    ...     except OSError:
+    ...         time.sleep(0.1)
+    ...         os.remove(f)
+
diff --git a/tests/point_clusters.data b/tests/point_clusters.data
new file mode 100755
index 0000000..2e3dc38
--- /dev/null
+++ b/tests/point_clusters.data
@@ -0,0 +1,200 @@
+-99.6892107034 39.8916853627
+-119.929145785 54.6975115102
+-87.9404916748 46.7461477222
+-103.34859903 27.3051127003
+-103.647596618 53.8807507917
+-91.8927833592 35.7753372219
+-89.1652931686 36.1142561824
+-110.126101209 23.1717348361
+-80.0045033941 38.1454752139
+-106.64011996 46.0721918659
+-112.18238306 50.0387811785
+-115.52666575 49.8014009131
+-84.7813828591 26.1819403837
+-106.036014203 20.4490174512
+-101.757539351 41.9304244741
+-95.8296753559 56.7690801746
+-92.2014788887 22.7196420456
+-92.4694650257 36.0372197509
+-95.9693778032 24.7743480763
+-116.892131043 58.3921666824
+-81.7116145102 43.0997450632
+-80.4274533745 57.264509789
+-100.141449392 28.7945317045
+-83.938309905 24.4706430566
+-116.825766821 54.4155675643
+-109.041900666 48.4070794953
+-104.948562232 46.755055732
+-93.3182267273 25.7153440898
+-105.692452228 34.1668233688
+-90.0421361623 44.3240654845
+-104.539878551 21.6407826234
+-101.237553395 41.1370129094
+-86.6978722357 42.3737781109
+-85.6212971271 54.3973766798
+-84.7087435581 30.9745681929
+-83.5560972991 27.2395110389
+-112.224540205 34.0412265793
+-112.217509712 21.0944835429
+-92.3342976535 23.0815733884
+-87.7231811048 42.0253658091
+-83.7711891214 35.846982396
+-114.489815147 39.9102600009
+-110.230199031 55.6397960984
+-89.7312460371 40.195281519
+-114.839230733 30.9793913832
+-91.8211592397 44.5025580473
+-80.6740618977 27.4621539695
+-89.0016046553 25.614226888
+-109.529957004 35.76933736
+-116.308211506 51.619356631
+-108.039179673 48.8786346853
+-114.688504417 38.6402036876
+-101.453481649 50.5354288275
+-89.2420289071 38.439732812
+-86.7313281606 27.3379253528
+-88.3419470249 31.4306493133
+-81.3513268328 34.3657607419
+-107.41532032 59.6742008238
+-115.004114428 49.2723700185
+-89.6973489591 22.8852207708
+-83.8397837025 39.9841988713
+-97.0186983466 22.0471178349
+-106.424157679 32.895362106
+-95.1848371325 51.5043201437
+-86.3616752194 21.9548312528
+-105.991425461 22.5286573131
+-80.5668902367 42.9792375503
+-112.783634301 40.0776845685
+-82.5477767834 28.0613388183
+-91.0515968771 23.1905128055
+-110.149810556 47.1166200287
+-83.1182736485 25.2447131214
+-84.5076122639 44.5323462279
+-105.648654014 20.1862355065
+-110.475101921 31.8228885917
+-99.8038044984 31.7219492613
+-99.2715379504 55.995839678
+-88.2132334567 43.2039556608
+-106.08196914 48.2430863629
+-112.208186359 53.1787640688
+-95.5574424354 37.5192150379
+-100.198900483 45.5786586794
+-102.750565726 39.2778615954
+-99.7750925736 39.5666262605
+-101.473586825 57.4200835182
+-80.299431091 32.2200518727
+-93.1827073287 29.7365078678
+-105.079941409 23.6106407608
+-108.660717273 50.6341222608
+-109.720823995 59.8332354648
+-99.0121042978 48.4492204324
+-116.279364519 56.5768009089
+-94.2288025492 55.4285838669
+-115.549238258 24.072445234
+-99.157995656 30.0012169357
+-97.0117440317 40.2556293841
+-113.839305491 43.173923535
+-80.4396012705 43.3278307342
+-80.4410933026 31.5999201013
+-91.4651975999 41.2493114192
+4.60579495227 49.8831194463
+3.20672814175 51.3609018809
+3.36845381787 51.2257002999
+5.07383783754 50.1890984029
+3.47062525041 54.4034196606
+9.42944118113 49.3310802203
+9.65029800292 52.3849774631
+3.15390911197 53.4356410134
+2.1641889441 46.3160467484
+2.76615096474 51.9333052166
+5.92610984385 52.3525895826
+8.27672843388 47.3971697671
+8.09018831965 45.0103660536
+0.295422443428 45.7047614237
+7.08973842847 50.6442234879
+9.17602294413 48.5340026698
+4.15118930812 51.8952792855
+2.28117868048 47.1178084718
+1.58374188707 52.2081804176
+2.53872504734 45.0849317176
+3.28711441668 45.432312898
+0.758226492991 53.2607340635
+3.25985095142 45.2573310672
+8.00601068476 47.0850066017
+1.28830154784 52.4991772013
+1.66996778024 51.6367913296
+3.9815072575 52.3482628229
+5.88977607375 53.9423490524
+8.96337945228 51.0139312609
+5.24818348639 46.134455909
+5.61289802982 54.5954320121
+3.62429244309 52.3359637309
+4.21198096829 50.0855475321
+7.94160546253 45.4384582083
+0.858870058152 47.360936282
+0.268199044279 54.6205941141
+8.8517468866 47.0177541617
+3.82634390799 49.6129475929
+7.18121039316 50.9102230118
+6.33987705575 48.5869965232
+4.27748859389 46.8445771091
+8.16102273984 52.0542397504
+8.90629765654 46.8267304009
+8.50418457135 54.2788836483
+2.83298541659 54.4015795236
+5.05727814519 46.8966220104
+5.42722723233 49.2522002876
+3.74779199012 47.3024407957
+5.5540513786 48.0000667884
+4.22046721694 48.4813710125
+7.77423935705 54.7851361942
+1.69100522731 49.1479444596
+9.88831364423 46.6307793199
+0.557972287014 49.2862367442
+3.833420502 54.5873993944
+6.46076039362 48.8972713871
+2.63955752349 54.232602505
+5.8885693494 45.5366348259
+4.43826955483 46.7887436394
+2.17158704423 48.0543749425
+7.72137464151 45.4366964436
+6.17791100255 50.2728073458
+8.76313855735 48.3295373887
+8.55934310911 45.7877488932
+5.44000186467 49.4093319122
+5.79751516319 54.5349162318
+8.1429940015 48.3476815956
+0.835864306517 49.0796610468
+0.696120353103 46.4340363637
+7.18918891741 52.2180907225
+2.52640501319 48.1556192069
+3.83156688721 48.4500486034
+9.22538921 45.0064651048
+3.14537149103 51.1287933121
+1.83951586739 50.9044177555
+2.71188429211 45.8851076637
+9.71187132506 47.3613845231
+4.88830479883 46.8400164971
+2.73227645129 52.5950604826
+0.0139182499152 49.8607853636
+2.72072453562 47.8977681874
+9.36967823942 46.9493204587
+4.10066520654 49.1711325962
+3.31840270526 52.9602125993
+2.4887190236 54.9710679569
+7.37847486987 52.8425110492
+1.0003078803 51.0441658531
+9.24208567999 54.4966902592
+3.29499693052 50.4276526188
+0.0878474024123 49.9046355191
+0.0235438079302 53.0495224168
+5.69863091011 46.2782685579
+8.32974835131 46.6179198084
+4.42226544448 50.304272252
+8.89982687198 51.7079435384
+7.32317795781 48.7452952961
+0.922452349264 47.7145307105
+4.83907582793 52.185114017
+4.30389764594 47.9144865159
+7.63593976114 47.452190841
diff --git a/tests/properties.txt b/tests/properties.txt
new file mode 100644
index 0000000..22842ae
--- /dev/null
+++ b/tests/properties.txt
@@ -0,0 +1,257 @@
+Testing rtree properties
+==========================
+
+Make a simple properties object
+
+    >>> from rtree import index
+    >>> p = index.Property()
+
+Test as_dict()
+
+    >>> d = p.as_dict()
+    >>> d['index_id'] is None
+    True
+
+Test creation from kwargs and eval() of its repr()
+
+    >>> q = index.Property(**d)
+    >>> eval(repr(q))['index_id'] is None
+    True
+
+Test pretty printed string
+
+    >>> print q
+    {'buffering_capacity': 10,
+     'custom_storage_callbacks': None,
+     'custom_storage_callbacks_size': 0L,
+     'dat_extension': 'dat',
+     'dimension': 2,
+     'filename': '',
+     'fill_factor': 0...,
+     'idx_extension': 'idx',
+     'index_capacity': 100,
+     'index_id': None,
+     'leaf_capacity': 100,
+     'near_minimum_overlap_factor': 32,
+     'overwrite': True,
+     'pagesize': 4096,
+     'point_pool_capacity': 500,
+     'region_pool_capacity': 1000,
+     'reinsert_factor': 0...,
+     'split_distribution_factor': 0...,
+     'storage': 1,
+     'tight_mbr': True,
+     'tpr_horizon': 20.0,
+     'type': 0,
+     'variant': 2,
+     'writethrough': False}
+
+Test property setting
+
+    >>> p = index.Property()
+    >>> p.type = 0
+    >>> p.type
+    0
+    
+    >>> p.type = 2
+    >>> p.type
+    2
+
+    >>> p.type = 6
+    Traceback (most recent call last):
+    ...
+    RTreeError: LASError in "IndexProperty_SetIndexType": Inputted value is not a valid index type
+
+    >>> p.dimension = 3
+    >>> p.dimension
+    3
+    
+    >>> p.dimension = 2
+    >>> p.dimension
+    2
+    
+    >>> p.dimension = -2
+    Traceback (most recent call last):
+    ...
+    RTreeError: Negative or 0 dimensional indexes are not allowed
+    
+    >>> p.variant = 0
+    >>> p.variant
+    0
+    
+    >>> p.variant = 6
+    Traceback (most recent call last):
+    ...
+    RTreeError: LASError in "IndexProperty_SetIndexVariant": Inputted value is not a valid index variant
+    
+    >>> p.storage = 0
+    >>> p.storage 
+    0
+    
+    >>> p.storage = 1
+    >>> p.storage
+    1
+    
+    >>> p.storage = 3
+    Traceback (most recent call last):
+    ...
+    RTreeError: LASError in "IndexProperty_SetIndexStorage": Inputted value is not a valid index storage type
+    
+    >>> p.index_capacity
+    100
+    
+    >>> p.index_capacity = 300
+    >>> p.index_capacity
+    300
+    
+    >>> p.index_capacity = -4321
+    Traceback (most recent call last):
+    ...
+    RTreeError: index_capacity must be > 0
+    
+    >>> p.pagesize
+    4096
+    
+    >>> p.pagesize = 8192
+    >>> p.pagesize
+    8192
+    
+    >>> p.pagesize = -4321
+    Traceback (most recent call last):
+    ...
+    RTreeError: Pagesize must be > 0
+
+    >>> p.leaf_capacity
+    100
+    
+    >>> p.leaf_capacity = 1000
+    >>> p.leaf_capacity
+    1000
+    >>> p.leaf_capacity = -4321
+    Traceback (most recent call last):
+    ...
+    RTreeError: leaf_capacity must be > 0
+    
+    >>> p.index_pool_capacity
+    100
+    
+    >>> p.index_pool_capacity = 1500
+    >>> p.index_pool_capacity = -4321
+    Traceback (most recent call last):
+    ...
+    RTreeError: index_pool_capacity must be > 0
+
+    >>> p.point_pool_capacity
+    500
+    
+    >>> p.point_pool_capacity = 1500
+    >>> p.point_pool_capacity = -4321
+    Traceback (most recent call last):
+    ...
+    RTreeError: point_pool_capacity must be > 0
+
+    >>> p.region_pool_capacity
+    1000
+    
+    >>> p.region_pool_capacity = 1500
+    >>> p.region_pool_capacity
+    1500
+    >>> p.region_pool_capacity = -4321
+    Traceback (most recent call last):
+    ...
+    RTreeError: region_pool_capacity must be > 0
+
+    >>> p.buffering_capacity
+    10
+    
+    >>> p.buffering_capacity = 100
+    >>> p.buffering_capacity = -4321
+    Traceback (most recent call last):
+    ...
+    RTreeError: buffering_capacity must be > 0    
+
+    >>> p.tight_mbr
+    True
+    
+    >>> p.tight_mbr = 100
+    >>> p.tight_mbr
+    True
+    
+    >>> p.tight_mbr = False
+    >>> p.tight_mbr
+    False
+
+    >>> p.overwrite
+    True
+    
+    >>> p.overwrite = 100
+    >>> p.overwrite
+    True
+    
+    >>> p.overwrite = False
+    >>> p.overwrite
+    False
+
+    >>> p.near_minimum_overlap_factor
+    32
+    
+    >>> p.near_minimum_overlap_factor = 100
+    >>> p.near_minimum_overlap_factor = -4321
+    Traceback (most recent call last):
+    ...
+    RTreeError: near_minimum_overlap_factor must be > 0  
+
+    >>> p.writethrough
+    False
+    
+    >>> p.writethrough = 100
+    >>> p.writethrough
+    True
+    
+    >>> p.writethrough = False
+    >>> p.writethrough
+    False    
+
+    >>> '%.2f' % p.fill_factor
+    '0.70'
+
+    >>> p.fill_factor = 0.99
+    >>> '%.2f' % p.fill_factor
+    '0.99'
+
+    >>> '%.2f' % p.split_distribution_factor
+    '0.40'
+
+    >>> p.tpr_horizon
+    20.0
+    
+    >>> '%.2f' % p.reinsert_factor
+    '0.30'
+
+    >>> p.filename
+    ''
+    
+    >>> p.filename = 'testing123testing'
+    >>> p.filename
+    'testing123testing'
+    
+    >>> p.dat_extension
+    'dat'
+
+    >>> p.dat_extension = 'data'
+    >>> p.dat_extension
+    'data'
+    
+    >>> p.idx_extension
+    'idx'
+    >>> p.idx_extension = 'index'
+    >>> p.idx_extension
+    'index'
+    
+    >>> p.index_id
+    Traceback (most recent call last):
+    ...
+    RTreeError: Error in "IndexProperty_GetIndexID": Property IndexIdentifier was empty
+    >>> p.index_id = -420
+    >>> int(p.index_id)
+    -420
diff --git a/tests/rungrind.dist b/tests/rungrind.dist
new file mode 100644
index 0000000..c1d7d4e
--- /dev/null
+++ b/tests/rungrind.dist
@@ -0,0 +1,3 @@
+#!/bin/sh
+valgrind --tool=memcheck --leak-check=yes --suppressions=/home/sean/Projects/valgrind-python.supp python test_doctests.py
+
diff --git a/tests/test_customStorage.txt b/tests/test_customStorage.txt
new file mode 100644
index 0000000..75cbd38
--- /dev/null
+++ b/tests/test_customStorage.txt
@@ -0,0 +1,157 @@
+
+Shows how to create custom storage backend.
+
+Derive your custom storage for rtree.index.CustomStorage and override the methods
+shown in this example.
+You can also derive from rtree.index.CustomStorageBase to get at the raw C buffers
+if you need the extra speed and want to avoid translating from/to python strings.
+
+The essential methods are the load/store/deleteByteArray. The rtree library calls
+them whenever it needs to access the data in any way.
+
+Example storage which maps the page (ids) to the page data.
+
+   >>> from rtree.index import Rtree, CustomStorage, Property
+   
+   >>> class DictStorage(CustomStorage):
+   ...     """ A simple storage which saves the pages in a python dictionary """
+   ...     def __init__(self):
+   ...         CustomStorage.__init__( self )
+   ...         self.clear()
+   ... 
+   ...     def create(self, returnError):
+   ...         """ Called when the storage is created on the C side """
+   ... 
+   ...     def destroy(self, returnError):
+   ...         """ Called when the storage is destroyed on the C side """
+   ... 
+   ...     def clear(self):
+   ...         """ Clear all our data """   
+   ...         self.dict = {}
+   ... 
+   ...     def loadByteArray(self, page, returnError):
+   ...         """ Returns the data for page or returns an error """   
+   ...         try:
+   ...             return self.dict[page]
+   ...         except KeyError:
+   ...             returnError.contents.value = self.InvalidPageError
+   ... 
+   ...     def storeByteArray(self, page, data, returnError):
+   ...         """ Stores the data for page """   
+   ...         if page == self.NewPage:
+   ...             newPageId = len(self.dict)
+   ...             self.dict[newPageId] = data
+   ...             return newPageId
+   ...         else:
+   ...             if page not in self.dict:
+   ...                 returnError.value = self.InvalidPageError
+   ...                 return 0
+   ...             self.dict[page] = data
+   ...             return page
+   ... 
+   ...     def deleteByteArray(self, page, returnError):
+   ...         """ Deletes a page """   
+   ...         try:
+   ...             del self.dict[page]
+   ...         except KeyError:
+   ...             returnError.contents.value = self.InvalidPageError
+   ... 
+   ...     hasData = property( lambda self: bool(self.dict) )
+   ...     """ Returns true if we contains some data """   
+
+
+Now let's test drive our custom storage.
+
+First let's define the basic properties we will use for all rtrees:
+
+    >>> settings = Property()
+    >>> settings.writethrough = True
+    >>> settings.buffering_capacity = 1
+
+Notice that there is a small in-memory buffer by default. We effectively disable
+it here so our storage directly receives any load/store/delete calls.
+This is not necessary in general and can hamper performance; we just use it here
+for illustrative and testing purposes.
+
+Let's start with a basic test:
+
+Create the storage and hook it up with a new rtree:
+
+    >>> storage = DictStorage()
+    >>> r = Rtree( storage, properties = settings )
+
+Interestingly enough, if we take a look at the contents of our storage now, we
+can see the Rtree has already written two pages to it. This is for header and
+index.
+
+    >>> state1 = storage.dict.copy()
+    >>> list(state1.keys())
+    [0, 1]
+    
+Let's add an item:
+
+    >>> r.add(123, (0, 0, 1, 1))
+
+Make sure the data in the storage before and after the addition of the new item
+is different:
+
+    >>> state2 = storage.dict.copy()
+    >>> state1 != state2
+    True
+
+Now perform a few queries and assure the tree is still valid:
+
+    >>> item = list(r.nearest((0, 0), 1, objects=True))[0]
+    >>> int(item.id)
+    123
+    >>> r.valid()
+    True
+    
+Check if the stored data is a byte string
+
+    >>> isinstance(list(storage.dict.values())[0], bytes)
+    True
+    
+Delete an item
+
+    >>> r.delete(123, (0, 0, 1, 1))
+    >>> r.valid()
+    True
+    
+Just for reference show how to flush the internal buffers (e.g. when
+properties.buffer_capacity is > 1)
+
+    >>> r.clearBuffer()
+    >>> r.valid()
+    True
+
+Let's get rid of the tree, we're done with it
+    
+    >>> del r
+
+Show how to empty the storage
+    
+    >>> storage.clear()
+    >>> storage.hasData
+    False
+    >>> del storage
+
+    
+Ok, let's create another small test. This time we'll test reopening our custom
+storage. This is useful for persistent storages.
+
+First create a storage and put some data into it:
+
+    >>> storage = DictStorage()
+    >>> r1 = Rtree( storage, properties = settings, overwrite = True )
+    >>> r1.add(555, (2, 2))
+    >>> del r1
+    >>> storage.hasData
+    True
+    
+Then reopen the storage with a new tree and see if the data is still there
+
+    >>> r2 = Rtree( storage, properties = settings, overwrite = False )
+    >>> r2.count( (0,0,10,10) ) == 1
+    True
+    >>> del r2
diff --git a/tests/test_doctests.py b/tests/test_doctests.py
new file mode 100644
index 0000000..fc2b53a
--- /dev/null
+++ b/tests/test_doctests.py
@@ -0,0 +1,46 @@
+import doctest
+import unittest
+import glob
+import os
+
+#from zope.testing import doctest
+from rtree.index import major_version, minor_version, patch_version
+
+from .data import boxes15, boxes3, points
+
+optionflags = (doctest.REPORT_ONLY_FIRST_FAILURE |
+               doctest.NORMALIZE_WHITESPACE |
+               doctest.ELLIPSIS)
+
+def list_doctests():
+    # Skip the custom storage test unless we have libspatialindex 1.8+.
+    return [filename
+            for filename
+            in glob.glob(os.path.join(os.path.dirname(__file__), '*.txt'))
+            if not (
+                filename.endswith('customStorage.txt') 
+                and major_version < 2 and minor_version < 8)]
+
+def open_file(filename, mode='r'):
+    """Helper function to open files from within the tests package."""
+    return open(os.path.join(os.path.dirname(__file__), filename), mode)
+
+def setUp(test):
+    test.globs.update(dict(
+            open_file = open_file,
+            boxes15=boxes15,
+            boxes3=boxes3,
+            points=points
+            ))
+
+def test_suite():
+    return unittest.TestSuite(
+        [doctest.DocFileSuite(os.path.basename(filename),
+                              optionflags=optionflags,
+                              setUp=setUp)
+         for filename
+         in sorted(list_doctests())])
+
+if __name__ == "__main__":
+    runner = unittest.TextTestRunner()
+    runner.run(test_suite())
diff --git a/tests/test_index.py b/tests/test_index.py
new file mode 100644
index 0000000..faacd16
--- /dev/null
+++ b/tests/test_index.py
@@ -0,0 +1,19 @@
+from rtree import index
+
+from .data import boxes15
+
+def boxes15_stream(interleaved=True):
+   for i, (minx, miny, maxx, maxy) in enumerate(boxes15):
+       if interleaved:
+           yield (i, (minx, miny, maxx, maxy), 42)
+       else:
+           yield (i, (minx, maxx, miny, maxy), 42)
+
+
+def test_rtree_constructor_stream_input():
+    p = index.Property()
+    sindex = index.Rtree(boxes15_stream(), properties=p)
+
+    bounds = (0, 0, 60, 60)
+    hits = list(sindex.intersection(bounds))
+    assert sorted(hits) == [0, 4, 16, 27, 35, 40, 47, 50, 76, 80]
diff --git a/tests/test_misc.txt b/tests/test_misc.txt
new file mode 100644
index 0000000..fc02bac
--- /dev/null
+++ b/tests/test_misc.txt
@@ -0,0 +1,42 @@
+
+make sure a file-based index is overwriteable.
+
+    >>> from rtree.index import Rtree
+    >>> r = Rtree('overwriteme')
+    >>> del r
+    >>> r = Rtree('overwriteme', overwrite=True)
+
+
+the default serializer is pickle, can use any by overriding dumps, loads
+
+    >>> r = Rtree()
+    >>> some_data = {"a": 22, "b": [1, "ccc"]}
+    >>> try:
+    ...     import simplejson
+    ...     r.dumps = simplejson.dumps
+    ...     r.loads = simplejson.loads
+    ...     r.add(0, (0, 0, 1, 1), some_data)
+    ...     list(r.nearest((0, 0), 1, objects="raw"))[0] == some_data
+    ... except ImportError:
+    ...     # "no import, failed"
+    ...     True
+    True
+
+
+    >>> r = Rtree()
+    >>> r.add(123, (0, 0, 1, 1))
+    >>> item = list(r.nearest((0, 0), 1, objects=True))[0]
+    >>> item.id
+    123
+
+    >>> r.valid()
+    True
+
+test UTF-8 filenames
+
+    >>> f = u'gilename\u4500abc'
+
+    >>> r = Rtree(f)
+    >>> r.insert(4321, (34.3776829412, 26.7375853734, 49.3776829412, 41.7375853734), obj=42)
+
+    >>> del r
diff --git a/tests/z_cleanup.txt b/tests/z_cleanup.txt
new file mode 100644
index 0000000..5af62a9
--- /dev/null
+++ b/tests/z_cleanup.txt
@@ -0,0 +1,18 @@
+    >>> from rtree import core
+    >>> del core.rt
+    >>> files = ['defaultidx.dat','defaultidx.idx',
+    ...          'pagesize3.dat','pagesize3.idx',
+    ...          'testing.dat','testing.idx',
+    ...          'custom.data','custom.index',
+    ...          'streamed.idx','streamed.dat',
+    ...          'gilename䔀abc.dat','gilename䔀abc.idx',
+    ...          'overwriteme.idx', 'overwriteme.dat']
+    >>> import os
+    >>> import time
+    >>> for f in files:
+    ...     try:
+    ...         os.remove(f)
+    ...     except OSError:
+    ...         time.sleep(0.1)
+    ...         os.remove(f)
+

-- 
Alioth's /usr/local/bin/git-commit-notice on /srv/git.debian.org/git/pkg-grass/python-rtree.git



More information about the Pkg-grass-devel mailing list