[python-rtree] 04/08: Imported Upstream version 0.8.3+ds

Bas Couwenberg sebastic at debian.org
Thu Dec 15 07:00:59 UTC 2016


This is an automated email from the git hooks/post-receive script.

sebastic pushed a commit to branch master
in repository python-rtree.

commit 3ded805f0e1bdb48d0bbbb64bdc8843c7520304e
Author: Bas Couwenberg <sebastic at xs4all.nl>
Date:   Thu Dec 15 07:31:51 2016 +0100

    Imported Upstream version 0.8.3+ds
---
 MANIFEST.in                                     |   2 +-
 PKG-INFO                                        |   2 +-
 README.md                                       |   7 +
 rtree/__init__.py                               |   2 +-
 rtree/core.py                                   | 227 +++---
 rtree/index.py                                  | 973 ++++++++++++++++--------
 setup.py                                        |  99 +--
 tests/benchmarks.py                             |  65 +-
 tests/data.py                                   |  12 +-
 tests/off/BoundsCheck.txt                       |  26 -
 tests/off/index.txt                             | 308 --------
 tests/off/properties.txt                        | 257 -------
 tests/off/test_customStorage.txt                | 157 ----
 tests/off/test_misc.txt                         |  42 -
 tests/stream-check.py                           |  81 ++
 tests/{BoundsCheck.txt => test_bounds.txt}      |  10 +-
 tests/test_doctests.py                          |  22 +-
 tests/test_index.py                             |  28 +-
 tests/{index.txt => test_index_doctests.txt}    | 123 +--
 tests/test_pickle.py                            |  20 +
 tests/{properties.txt => test_properties.txt}   | 127 ++--
 tests/{off/z_cleanup.txt => test_z_cleanup.txt} |   0
 tests/z_cleanup.txt                             |  18 -
 23 files changed, 1182 insertions(+), 1426 deletions(-)

diff --git a/MANIFEST.in b/MANIFEST.in
index 6e71403..d0411ea 100644
--- a/MANIFEST.in
+++ b/MANIFEST.in
@@ -1,4 +1,4 @@
-include README.txt
+include README.md
 include MANIFEST.in
 include DEPENDENCIES.txt
 include FAQ.txt
diff --git a/PKG-INFO b/PKG-INFO
index 07f8041..7b9f902 100644
--- a/PKG-INFO
+++ b/PKG-INFO
@@ -1,6 +1,6 @@
 Metadata-Version: 1.1
 Name: Rtree
-Version: 0.8.2
+Version: 0.8.3
 Summary: R-Tree spatial index for Python GIS
 Home-page: http://toblerity.github.com/rtree/
 Author: Howard Butler
diff --git a/README.md b/README.md
new file mode 100644
index 0000000..ed7deb3
--- /dev/null
+++ b/README.md
@@ -0,0 +1,7 @@
+Rtree
+=====
+
+[![Build Status](https://travis-ci.org/Toblerity/rtree.svg)](https://travis-ci.org/Toblerity/rtree)
+
+Python bindings for libspatialindex 1.7+.
+
diff --git a/rtree/__init__.py b/rtree/__init__.py
index c62fc81..dfa8349 100644
--- a/rtree/__init__.py
+++ b/rtree/__init__.py
@@ -2,4 +2,4 @@ from .index import Rtree
 
 from .core import rt
 
-__version__ = '0.8.2'
+__version__ = '0.8.3'
diff --git a/rtree/core.py b/rtree/core.py
index 38179e5..80ce6ba 100644
--- a/rtree/core.py
+++ b/rtree/core.py
@@ -1,68 +1,79 @@
-import atexit, os, re, sys
+import os
 import ctypes
 from ctypes.util import find_library
 
-import ctypes
 
 class RTreeError(Exception):
     "RTree exception, indicates a RTree-related error."
     pass
 
+
 def check_return(result, func, cargs):
     "Error checking for Error calls"
     if result != 0:
-        msg = 'LASError in "%s": %s' % (func.__name__, rt.Error_GetLastErrorMsg() )
+        s = rt.Error_GetLastErrorMsg().decode()
+        msg = 'LASError in "%s": %s' % \
+            (func.__name__, s)
         rt.Error_Reset()
         raise RTreeError(msg)
     return True
 
+
 def check_void(result, func, cargs):
     "Error checking for void* returns"
     if not bool(result):
-        msg = 'Error in "%s": %s' % (func.__name__, rt.Error_GetLastErrorMsg() )
+        s = rt.Error_GetLastErrorMsg().decode()
+        msg = 'Error in "%s": %s' % (func.__name__, s)
         rt.Error_Reset()
         raise RTreeError(msg)
     return result
 
+
 def check_void_done(result, func, cargs):
     "Error checking for void* returns that might be empty with no error"
     if rt.Error_GetErrorCount():
-        msg = 'Error in "%s": %s' % (func.__name__, rt.Error_GetLastErrorMsg() )
+        s = rt.Error_GetLastErrorMsg().decode()
+        msg = 'Error in "%s": %s' % (func.__name__, s)
         rt.Error_Reset()
         raise RTreeError(msg)
-        
     return result
 
+
 def check_value(result, func, cargs):
     "Error checking proper value returns"
     count = rt.Error_GetErrorCount()
     if count != 0:
-        msg = 'Error in "%s": %s' % (func.__name__, rt.Error_GetLastErrorMsg() )
+        s = rt.Error_GetLastErrorMsg().decode()
+        msg = 'Error in "%s": %s' % (func.__name__, s)
         rt.Error_Reset()
         raise RTreeError(msg)
     return result
 
+
 def check_value_free(result, func, cargs):
     "Error checking proper value returns"
     count = rt.Error_GetErrorCount()
     if count != 0:
-        msg = 'Error in "%s": %s' % (func.__name__, rt.Error_GetLastErrorMsg() )
+        s = rt.Error_GetLastErrorMsg().decode()
+        msg = 'Error in "%s": %s' % (func.__name__, s)
         rt.Error_Reset()
         raise RTreeError(msg)
     return result
 
+
 def free_returned_char_p(result, func, cargs):
     retvalue = ctypes.string_at(result)
     p = ctypes.cast(result, ctypes.POINTER(ctypes.c_void_p))
     rt.Index_Free(p)
     return retvalue
-    
+
+
 def free_error_msg_ptr(result, func, cargs):
     retvalue = ctypes.string_at(result)
     p = ctypes.cast(result, ctypes.POINTER(ctypes.c_void_p))
     rt.Index_Free(p)
     return retvalue
-    
+
 
 if os.name == 'nt':
 
@@ -77,7 +88,7 @@ if os.name == 'nt':
             dllpaths = (os.path.abspath(os.path.dirname(__file__)),
                         ) + dllpaths
         except NameError:
-            pass # no __file__ attribute on PyPy and some frozen distributions
+            pass  # no __file__ attribute on PyPy and some frozen distributions
         for path in dllpaths:
             if path:
                 # temporarily add the path to the PATH environment variable
@@ -96,13 +107,23 @@ if os.name == 'nt':
                     os.environ['PATH'] = oldenv
         return None
 
-    rt = _load_library('spatialindex_c.dll', ctypes.cdll.LoadLibrary)
+    if 'SPATIALINDEX_C_LIBRARY' in os.environ:
+        lib_path, lib_name = os.path.split(os.environ['SPATIALINDEX_C_LIBRARY'])
+        rt = _load_library(lib_name, ctypes.cdll.LoadLibrary, (lib_path,))
+    else:
+        rt = _load_library('spatialindex_c.dll', ctypes.cdll.LoadLibrary)
     if not rt:
         raise OSError("could not find or load spatialindex_c.dll")
 
 elif os.name == 'posix':
-    platform = os.uname()[0]
-    lib_name = find_library('spatialindex_c')
+    if 'SPATIALINDEX_C_LIBRARY' in os.environ:
+        lib_name = os.environ['SPATIALINDEX_C_LIBRARY']
+    else:
+        lib_name = find_library('spatialindex_c')
+
+    if lib_name is None:
+        raise OSError("Could not find libspatialindex_c library file")
+
     rt = ctypes.CDLL(lib_name)
 else:
     raise RTreeError('Unsupported OS "%s"' % os.name)
@@ -117,7 +138,7 @@ rt.Error_GetLastErrorMethod.restype = ctypes.POINTER(ctypes.c_char)
 rt.Error_GetLastErrorMethod.errcheck = free_returned_char_p
 
 rt.Error_GetErrorCount.argtypes = []
-rt.Error_GetErrorCount.restype=ctypes.c_int
+rt.Error_GetErrorCount.restype = ctypes.c_int
 
 rt.Error_Reset.argtypes = []
 rt.Error_Reset.restype = None
@@ -126,15 +147,15 @@ rt.Index_Create.argtypes = [ctypes.c_void_p]
 rt.Index_Create.restype = ctypes.c_void_p
 rt.Index_Create.errcheck = check_void
 
-NEXTFUNC = ctypes.CFUNCTYPE(ctypes.c_int, 
+NEXTFUNC = ctypes.CFUNCTYPE(ctypes.c_int,
                             ctypes.POINTER(ctypes.c_int64),
                             ctypes.POINTER(ctypes.POINTER(ctypes.c_double)),
                             ctypes.POINTER(ctypes.POINTER(ctypes.c_double)),
                             ctypes.POINTER(ctypes.c_uint32),
                             ctypes.POINTER(ctypes.POINTER(ctypes.c_ubyte)),
-                            ctypes.POINTER(ctypes.c_size_t))
+                            ctypes.POINTER(ctypes.c_uint32))
 
-rt.Index_CreateWithStream.argtypes = [ctypes.c_void_p, NEXTFUNC] 
+rt.Index_CreateWithStream.argtypes = [ctypes.c_void_p, NEXTFUNC]
 rt.Index_CreateWithStream.restype = ctypes.c_void_p
 rt.Index_CreateWithStream.errcheck = check_void
 
@@ -146,28 +167,28 @@ rt.Index_GetProperties.argtypes = [ctypes.c_void_p]
 rt.Index_GetProperties.restype = ctypes.c_void_p
 rt.Index_GetProperties.errcheck = check_void
 
-rt.Index_DeleteData.argtypes = [ctypes.c_void_p, 
-                                ctypes.c_int64, 
-                                ctypes.POINTER(ctypes.c_double), 
-                                ctypes.POINTER(ctypes.c_double), 
+rt.Index_DeleteData.argtypes = [ctypes.c_void_p,
+                                ctypes.c_int64,
+                                ctypes.POINTER(ctypes.c_double),
+                                ctypes.POINTER(ctypes.c_double),
                                 ctypes.c_uint32]
 rt.Index_DeleteData.restype = ctypes.c_int
 rt.Index_DeleteData.errcheck = check_return
 
-rt.Index_InsertData.argtypes = [ctypes.c_void_p, 
-                                ctypes.c_int64, 
-                                ctypes.POINTER(ctypes.c_double), 
-                                ctypes.POINTER(ctypes.c_double), 
-                                ctypes.c_uint32, 
-                                ctypes.POINTER(ctypes.c_ubyte), 
+rt.Index_InsertData.argtypes = [ctypes.c_void_p,
+                                ctypes.c_int64,
+                                ctypes.POINTER(ctypes.c_double),
+                                ctypes.POINTER(ctypes.c_double),
+                                ctypes.c_uint32,
+                                ctypes.POINTER(ctypes.c_ubyte),
                                 ctypes.c_uint32]
 rt.Index_InsertData.restype = ctypes.c_int
 rt.Index_InsertData.errcheck = check_return
 
-rt.Index_GetBounds.argtypes = [ ctypes.c_void_p,
-                                ctypes.POINTER(ctypes.POINTER(ctypes.c_double)),
-                                ctypes.POINTER(ctypes.POINTER(ctypes.c_double)),
-                                ctypes.POINTER(ctypes.c_uint32)]
+rt.Index_GetBounds.argtypes = [ctypes.c_void_p,
+                               ctypes.POINTER(ctypes.POINTER(ctypes.c_double)),
+                               ctypes.POINTER(ctypes.POINTER(ctypes.c_double)),
+                               ctypes.POINTER(ctypes.c_uint32)]
 rt.Index_GetBounds.restype = ctypes.c_int
 rt.Index_GetBounds.errcheck = check_value
 
@@ -176,59 +197,67 @@ rt.Index_IsValid.restype = ctypes.c_int
 rt.Index_IsValid.errcheck = check_value
 
 rt.Index_Intersects_obj.argtypes = [ctypes.c_void_p,
-                                    ctypes.POINTER(ctypes.c_double), 
-                                    ctypes.POINTER(ctypes.c_double), 
-                                    ctypes.c_uint32, 
-                                    ctypes.POINTER(ctypes.POINTER(ctypes.c_void_p)),
+                                    ctypes.POINTER(ctypes.c_double),
+                                    ctypes.POINTER(ctypes.c_double),
+                                    ctypes.c_uint32,
+                                    ctypes.POINTER(
+                                        ctypes.POINTER(ctypes.c_void_p)),
                                     ctypes.POINTER(ctypes.c_uint64)]
 rt.Index_Intersects_obj.restype = ctypes.c_int
 rt.Index_Intersects_obj.errcheck = check_return
 
 rt.Index_Intersects_id.argtypes = [ctypes.c_void_p,
-                                    ctypes.POINTER(ctypes.c_double), 
-                                    ctypes.POINTER(ctypes.c_double), 
-                                    ctypes.c_uint32, 
-                                    ctypes.POINTER(ctypes.POINTER(ctypes.c_int64)),
-                                    ctypes.POINTER(ctypes.c_uint64)]
+                                   ctypes.POINTER(ctypes.c_double),
+                                   ctypes.POINTER(ctypes.c_double),
+                                   ctypes.c_uint32,
+                                   ctypes.POINTER(
+                                       ctypes.POINTER(ctypes.c_int64)),
+                                   ctypes.POINTER(ctypes.c_uint64)]
 rt.Index_Intersects_id.restype = ctypes.c_int
 rt.Index_Intersects_id.errcheck = check_return
 
-rt.Index_Intersects_count.argtypes = [  ctypes.c_void_p,
-                                        ctypes.POINTER(ctypes.c_double),
-                                        ctypes.POINTER(ctypes.c_double),
-                                        ctypes.c_uint32,
-                                        ctypes.POINTER(ctypes.c_uint64)]
-
-rt.Index_NearestNeighbors_obj.argtypes = [  ctypes.c_void_p,
-                                            ctypes.POINTER(ctypes.c_double), 
-                                            ctypes.POINTER(ctypes.c_double), 
-                                            ctypes.c_uint32, 
-                                            ctypes.POINTER(ctypes.POINTER(ctypes.c_void_p)),
-                                            ctypes.POINTER(ctypes.c_uint64)]
+rt.Index_Intersects_count.argtypes = [ctypes.c_void_p,
+                                      ctypes.POINTER(ctypes.c_double),
+                                      ctypes.POINTER(ctypes.c_double),
+                                      ctypes.c_uint32,
+                                      ctypes.POINTER(ctypes.c_uint64)]
+
+rt.Index_NearestNeighbors_obj.argtypes = [ctypes.c_void_p,
+                                          ctypes.POINTER(ctypes.c_double),
+                                          ctypes.POINTER(ctypes.c_double),
+                                          ctypes.c_uint32,
+                                          ctypes.POINTER(
+                                              ctypes.POINTER(ctypes.c_void_p)),
+                                          ctypes.POINTER(ctypes.c_uint64)]
 rt.Index_NearestNeighbors_obj.restype = ctypes.c_int
 rt.Index_NearestNeighbors_obj.errcheck = check_return
 
-rt.Index_NearestNeighbors_id.argtypes = [  ctypes.c_void_p,
-                                            ctypes.POINTER(ctypes.c_double), 
-                                            ctypes.POINTER(ctypes.c_double), 
-                                            ctypes.c_uint32, 
-                                            ctypes.POINTER(ctypes.POINTER(ctypes.c_int64)),
-                                            ctypes.POINTER(ctypes.c_uint64)]
+rt.Index_NearestNeighbors_id.argtypes = [ctypes.c_void_p,
+                                         ctypes.POINTER(ctypes.c_double),
+                                         ctypes.POINTER(ctypes.c_double),
+                                         ctypes.c_uint32,
+                                         ctypes.POINTER(
+                                             ctypes.POINTER(ctypes.c_int64)),
+                                         ctypes.POINTER(ctypes.c_uint64)]
 rt.Index_NearestNeighbors_id.restype = ctypes.c_int
 rt.Index_NearestNeighbors_id.errcheck = check_return
 
-rt.Index_GetLeaves.argtypes = [ ctypes.c_void_p,
-                                ctypes.POINTER(ctypes.c_uint32), 
-                                ctypes.POINTER(ctypes.POINTER(ctypes.c_uint32)), 
-                                ctypes.POINTER(ctypes.POINTER(ctypes.c_int64)), 
-                                ctypes.POINTER(ctypes.POINTER(ctypes.POINTER(ctypes.c_int64))),
-                                ctypes.POINTER(ctypes.POINTER(ctypes.POINTER(ctypes.c_double))),
-                                ctypes.POINTER(ctypes.POINTER(ctypes.POINTER(ctypes.c_double))),
-                                ctypes.POINTER(ctypes.c_uint32)]
+rt.Index_GetLeaves.argtypes = [ctypes.c_void_p,
+                               ctypes.POINTER(ctypes.c_uint32),
+                               ctypes.POINTER(ctypes.POINTER(ctypes.c_uint32)),
+                               ctypes.POINTER(ctypes.POINTER(ctypes.c_int64)),
+                               ctypes.POINTER(ctypes.POINTER(
+                                   ctypes.POINTER(ctypes.c_int64))),
+                               ctypes.POINTER(ctypes.POINTER(
+                                   ctypes.POINTER(ctypes.c_double))),
+                               ctypes.POINTER(ctypes.POINTER(
+                                   ctypes.POINTER(ctypes.c_double))),
+                               ctypes.POINTER(ctypes.c_uint32)]
 rt.Index_GetLeaves.restype = ctypes.c_int
 rt.Index_GetLeaves.errcheck = check_return
 
-rt.Index_DestroyObjResults.argtypes = [ctypes.POINTER(ctypes.POINTER(ctypes.c_void_p)), ctypes.c_uint32]
+rt.Index_DestroyObjResults.argtypes = \
+    [ctypes.POINTER(ctypes.POINTER(ctypes.c_void_p)), ctypes.c_uint32]
 rt.Index_DestroyObjResults.restype = None
 rt.Index_DestroyObjResults.errcheck = check_void_done
 
@@ -243,16 +272,19 @@ rt.IndexItem_Destroy.argtypes = [ctypes.c_void_p]
 rt.IndexItem_Destroy.restype = None
 rt.IndexItem_Destroy.errcheck = check_void_done
 
-rt.IndexItem_GetData.argtypes = [   ctypes.c_void_p, 
-                                    ctypes.POINTER(ctypes.POINTER(ctypes.c_ubyte)), 
-                                    ctypes.POINTER(ctypes.c_uint64)]
+rt.IndexItem_GetData.argtypes = [ctypes.c_void_p,
+                                 ctypes.POINTER(
+                                     ctypes.POINTER(ctypes.c_ubyte)),
+                                 ctypes.POINTER(ctypes.c_uint64)]
 rt.IndexItem_GetData.restype = ctypes.c_int
 rt.IndexItem_GetData.errcheck = check_value
 
-rt.IndexItem_GetBounds.argtypes = [ ctypes.c_void_p,
-                                    ctypes.POINTER(ctypes.POINTER(ctypes.c_double)),
-                                    ctypes.POINTER(ctypes.POINTER(ctypes.c_double)),
-                                    ctypes.POINTER(ctypes.c_uint32)]
+rt.IndexItem_GetBounds.argtypes = [ctypes.c_void_p,
+                                   ctypes.POINTER(
+                                       ctypes.POINTER(ctypes.c_double)),
+                                   ctypes.POINTER(
+                                       ctypes.POINTER(ctypes.c_double)),
+                                   ctypes.POINTER(ctypes.c_uint32)]
 rt.IndexItem_GetBounds.restype = ctypes.c_int
 rt.IndexItem_GetBounds.errcheck = check_value
 
@@ -324,7 +356,8 @@ rt.IndexProperty_GetPagesize.argtypes = [ctypes.c_void_p]
 rt.IndexProperty_GetPagesize.restype = ctypes.c_int
 rt.IndexProperty_GetPagesize.errcheck = check_value
 
-rt.IndexProperty_SetLeafPoolCapacity.argtypes = [ctypes.c_void_p, ctypes.c_uint32]
+rt.IndexProperty_SetLeafPoolCapacity.argtypes = \
+    [ctypes.c_void_p, ctypes.c_uint32]
 rt.IndexProperty_SetLeafPoolCapacity.restype = ctypes.c_int
 rt.IndexProperty_SetLeafPoolCapacity.errcheck = check_return
 
@@ -332,7 +365,8 @@ rt.IndexProperty_GetLeafPoolCapacity.argtypes = [ctypes.c_void_p]
 rt.IndexProperty_GetLeafPoolCapacity.restype = ctypes.c_int
 rt.IndexProperty_GetLeafPoolCapacity.errcheck = check_value
 
-rt.IndexProperty_SetIndexPoolCapacity.argtypes = [ctypes.c_void_p, ctypes.c_uint32]
+rt.IndexProperty_SetIndexPoolCapacity.argtypes = \
+    [ctypes.c_void_p, ctypes.c_uint32]
 rt.IndexProperty_SetIndexPoolCapacity.restype = ctypes.c_int
 rt.IndexProperty_SetIndexPoolCapacity.errcheck = check_return
 
@@ -340,7 +374,8 @@ rt.IndexProperty_GetIndexPoolCapacity.argtypes = [ctypes.c_void_p]
 rt.IndexProperty_GetIndexPoolCapacity.restype = ctypes.c_int
 rt.IndexProperty_GetIndexPoolCapacity.errcheck = check_value
 
-rt.IndexProperty_SetRegionPoolCapacity.argtypes = [ctypes.c_void_p, ctypes.c_uint32]
+rt.IndexProperty_SetRegionPoolCapacity.argtypes = \
+    [ctypes.c_void_p, ctypes.c_uint32]
 rt.IndexProperty_SetRegionPoolCapacity.restype = ctypes.c_int
 rt.IndexProperty_SetRegionPoolCapacity.errcheck = check_return
 
@@ -348,7 +383,8 @@ rt.IndexProperty_GetRegionPoolCapacity.argtypes = [ctypes.c_void_p]
 rt.IndexProperty_GetRegionPoolCapacity.restype = ctypes.c_int
 rt.IndexProperty_GetRegionPoolCapacity.errcheck = check_value
 
-rt.IndexProperty_SetPointPoolCapacity.argtypes = [ctypes.c_void_p, ctypes.c_uint32]
+rt.IndexProperty_SetPointPoolCapacity.argtypes = \
+    [ctypes.c_void_p, ctypes.c_uint32]
 rt.IndexProperty_SetPointPoolCapacity.restype = ctypes.c_int
 rt.IndexProperty_SetPointPoolCapacity.errcheck = check_return
 
@@ -356,7 +392,8 @@ rt.IndexProperty_GetPointPoolCapacity.argtypes = [ctypes.c_void_p]
 rt.IndexProperty_GetPointPoolCapacity.restype = ctypes.c_int
 rt.IndexProperty_GetPointPoolCapacity.errcheck = check_value
 
-rt.IndexProperty_SetBufferingCapacity.argtypes = [ctypes.c_void_p, ctypes.c_uint32]
+rt.IndexProperty_SetBufferingCapacity.argtypes = \
+    [ctypes.c_void_p, ctypes.c_uint32]
 rt.IndexProperty_SetBufferingCapacity.restype = ctypes.c_int
 rt.IndexProperty_SetBufferingCapacity.errcheck = check_return
 
@@ -364,7 +401,8 @@ rt.IndexProperty_GetBufferingCapacity.argtypes = [ctypes.c_void_p]
 rt.IndexProperty_GetBufferingCapacity.restype = ctypes.c_int
 rt.IndexProperty_GetBufferingCapacity.errcheck = check_value
 
-rt.IndexProperty_SetEnsureTightMBRs.argtypes = [ctypes.c_void_p, ctypes.c_uint32]
+rt.IndexProperty_SetEnsureTightMBRs.argtypes = \
+    [ctypes.c_void_p, ctypes.c_uint32]
 rt.IndexProperty_SetEnsureTightMBRs.restype = ctypes.c_int
 rt.IndexProperty_SetEnsureTightMBRs.errcheck = check_return
 
@@ -380,7 +418,8 @@ rt.IndexProperty_GetOverwrite.argtypes = [ctypes.c_void_p]
 rt.IndexProperty_GetOverwrite.restype = ctypes.c_int
 rt.IndexProperty_GetOverwrite.errcheck = check_value
 
-rt.IndexProperty_SetNearMinimumOverlapFactor.argtypes = [ctypes.c_void_p, ctypes.c_uint32]
+rt.IndexProperty_SetNearMinimumOverlapFactor.argtypes = \
+    [ctypes.c_void_p, ctypes.c_uint32]
 rt.IndexProperty_SetNearMinimumOverlapFactor.restype = ctypes.c_int
 rt.IndexProperty_SetNearMinimumOverlapFactor.errcheck = check_return
 
@@ -404,7 +443,8 @@ rt.IndexProperty_GetFillFactor.argtypes = [ctypes.c_void_p]
 rt.IndexProperty_GetFillFactor.restype = ctypes.c_double
 rt.IndexProperty_GetFillFactor.errcheck = check_value
 
-rt.IndexProperty_SetSplitDistributionFactor.argtypes = [ctypes.c_void_p, ctypes.c_double]
+rt.IndexProperty_SetSplitDistributionFactor.argtypes = \
+    [ctypes.c_void_p, ctypes.c_double]
 rt.IndexProperty_SetSplitDistributionFactor.restype = ctypes.c_int
 rt.IndexProperty_SetSplitDistributionFactor.errcheck = check_return
 
@@ -420,7 +460,8 @@ rt.IndexProperty_GetTPRHorizon.argtypes = [ctypes.c_void_p]
 rt.IndexProperty_GetTPRHorizon.restype = ctypes.c_double
 rt.IndexProperty_GetTPRHorizon.errcheck = check_value
 
-rt.IndexProperty_SetReinsertFactor.argtypes = [ctypes.c_void_p, ctypes.c_double]
+rt.IndexProperty_SetReinsertFactor.argtypes = \
+    [ctypes.c_void_p, ctypes.c_double]
 rt.IndexProperty_SetReinsertFactor.restype = ctypes.c_int
 rt.IndexProperty_SetReinsertFactor.errcheck = check_return
 
@@ -436,23 +477,28 @@ rt.IndexProperty_GetFileName.argtypes = [ctypes.c_void_p]
 rt.IndexProperty_GetFileName.errcheck = free_returned_char_p
 rt.IndexProperty_GetFileName.restype = ctypes.POINTER(ctypes.c_char)
 
-rt.IndexProperty_SetFileNameExtensionDat.argtypes = [ctypes.c_void_p, ctypes.c_char_p]
+rt.IndexProperty_SetFileNameExtensionDat.argtypes = \
+    [ctypes.c_void_p, ctypes.c_char_p]
 rt.IndexProperty_SetFileNameExtensionDat.restype = ctypes.c_int
 rt.IndexProperty_SetFileNameExtensionDat.errcheck = check_return
 
 rt.IndexProperty_GetFileNameExtensionDat.argtypes = [ctypes.c_void_p]
 rt.IndexProperty_GetFileNameExtensionDat.errcheck = free_returned_char_p
-rt.IndexProperty_GetFileNameExtensionDat.restype = ctypes.POINTER(ctypes.c_char)
+rt.IndexProperty_GetFileNameExtensionDat.restype = \
+    ctypes.POINTER(ctypes.c_char)
 
-rt.IndexProperty_SetFileNameExtensionIdx.argtypes = [ctypes.c_void_p, ctypes.c_char_p]
+rt.IndexProperty_SetFileNameExtensionIdx.argtypes = \
+    [ctypes.c_void_p, ctypes.c_char_p]
 rt.IndexProperty_SetFileNameExtensionIdx.restype = ctypes.c_int
 rt.IndexProperty_SetFileNameExtensionIdx.errcheck = check_return
 
 rt.IndexProperty_GetFileNameExtensionIdx.argtypes = [ctypes.c_void_p]
 rt.IndexProperty_GetFileNameExtensionIdx.errcheck = free_returned_char_p
-rt.IndexProperty_GetFileNameExtensionIdx.restype = ctypes.POINTER(ctypes.c_char)
+rt.IndexProperty_GetFileNameExtensionIdx.restype = \
+    ctypes.POINTER(ctypes.c_char)
 
-rt.IndexProperty_SetCustomStorageCallbacksSize.argtypes = [ctypes.c_void_p, ctypes.c_uint32]
+rt.IndexProperty_SetCustomStorageCallbacksSize.argtypes = \
+    [ctypes.c_void_p, ctypes.c_uint32]
 rt.IndexProperty_SetCustomStorageCallbacksSize.restype = ctypes.c_int
 rt.IndexProperty_SetCustomStorageCallbacksSize.errcheck = check_return
 
@@ -460,7 +506,8 @@ rt.IndexProperty_GetCustomStorageCallbacksSize.argtypes = [ctypes.c_void_p]
 rt.IndexProperty_GetCustomStorageCallbacksSize.restype = ctypes.c_uint32
 rt.IndexProperty_GetCustomStorageCallbacksSize.errcheck = check_value
 
-rt.IndexProperty_SetCustomStorageCallbacks.argtypes = [ctypes.c_void_p, ctypes.c_void_p]
+rt.IndexProperty_SetCustomStorageCallbacks.argtypes = \
+    [ctypes.c_void_p, ctypes.c_void_p]
 rt.IndexProperty_SetCustomStorageCallbacks.restype = ctypes.c_int
 rt.IndexProperty_SetCustomStorageCallbacks.errcheck = check_return
 
diff --git a/rtree/index.py b/rtree/index.py
index def4643..b0e1033 100644
--- a/rtree/index.py
+++ b/rtree/index.py
@@ -1,4 +1,3 @@
-
 import os
 import os.path
 import pprint
@@ -17,6 +16,13 @@ if sys.version_info[0] == 2:
 elif sys.version_info[0] == 3:
     string_types = str
 
+
+def string_output(s):
+    if sys.version_info[0] == 2:
+        return s
+    elif sys.version_info[0] == 3:
+        return s.decode('UTF-8')
+
 RT_Memory = 0
 RT_Disk = 1
 RT_Custom = 2
@@ -35,37 +41,42 @@ major_version, minor_version, patch_version = [
     int(t) for t in __c_api_version__.decode('utf-8').split('.')]
 
 if (major_version < 2 and minor_version < 7):
-    raise Exception("This version of Rtree requires libspatialindex 1.7.0 or greater")
+    raise Exception(
+        "This version of Rtree requires libspatialindex 1.7.0 or greater")
 
 __all__ = ['Rtree', 'Index', 'Property']
 
+
 def _get_bounds(handle, bounds_fn, interleaved):
     pp_mins = ctypes.pointer(ctypes.c_double())
     pp_maxs = ctypes.pointer(ctypes.c_double())
     dimension = ctypes.c_uint32(0)
 
-    bounds_fn(handle,
-            ctypes.byref(pp_mins),
-            ctypes.byref(pp_maxs),
-            ctypes.byref(dimension))
-    if (dimension.value == 0): return None
+    bounds_fn(
+        handle,
+        ctypes.byref(pp_mins),
+        ctypes.byref(pp_maxs),
+        ctypes.byref(dimension))
+    if (dimension.value == 0):
+        return None
 
-    mins = ctypes.cast(pp_mins,ctypes.POINTER(ctypes.c_double \
-                                                      * dimension.value))
-    maxs = ctypes.cast(pp_maxs,ctypes.POINTER(ctypes.c_double \
-                                                      * dimension.value))
+    mins = ctypes.cast(pp_mins, ctypes.POINTER(ctypes.c_double
+                                               * dimension.value))
+    maxs = ctypes.cast(pp_maxs, ctypes.POINTER(ctypes.c_double
+                                               * dimension.value))
 
     results = [mins.contents[i] for i in range(dimension.value)]
     results += [maxs.contents[i] for i in range(dimension.value)]
 
-    p_mins = ctypes.cast(mins,ctypes.POINTER(ctypes.c_double))
-    p_maxs = ctypes.cast(maxs,ctypes.POINTER(ctypes.c_double))
+    p_mins = ctypes.cast(mins, ctypes.POINTER(ctypes.c_double))
+    p_maxs = ctypes.cast(maxs, ctypes.POINTER(ctypes.c_double))
     core.rt.Index_Free(ctypes.cast(p_mins, ctypes.POINTER(ctypes.c_void_p)))
     core.rt.Index_Free(ctypes.cast(p_maxs, ctypes.POINTER(ctypes.c_void_p)))
-    if interleaved: # they want bbox order.
+    if interleaved:  # they want bbox order.
         return results
     return Index.deinterleave(results)
 
+
 def _get_data(handle):
     length = ctypes.c_uint64(0)
     d = ctypes.pointer(ctypes.c_uint8(0))
@@ -78,6 +89,7 @@ def _get_data(handle):
     core.rt.Index_Free(c)
     return s
 
+
 class Index(object):
     """An R-Tree, MVR-Tree, or TPR-Tree indexing object"""
 
@@ -96,17 +108,19 @@ class Index(object):
         :param stream:
             If the first argument in the constructor is not of type basestring,
             it is assumed to be an iterable stream of data that will raise a
-            StopIteration.  It must be in the form defined by the :attr:`interleaved`
-            attribute of the index.  The following example would assume
-            :attr:`interleaved` is False::
+            StopIteration.  It must be in the form defined by the
+            :attr:`interleaved` attribute of the index. The following example
+            would assume :attr:`interleaved` is False::
 
-            (id, (minx, maxx, miny, maxy, minz, maxz, ..., ..., mink, maxk), object)
+            (id, (minx, maxx, miny, maxy, minz, maxz, ..., ..., mink, maxk),
+             object)
 
-            The object can be None, but you must put a place holder of ``None`` there.
+            The object can be None, but you must put a place holder of
+            ``None`` there.
 
         :param storage:
-            If the first argument in the constructor is an instance of ICustomStorage
-            then the given custom storage is used.
+            If the first argument in the constructor is an instance of
+            ICustomStorage then the given custom storage is used.
 
         :param interleaved: True or False, defaults to True.
             This parameter determines the coordinate order for all methods that
@@ -121,11 +135,11 @@ class Index(object):
             other properties must be set on the object.
 
         .. warning::
-            The coordinate ordering for all functions are sensitive the the
+            The coordinate ordering for all functions are sensitive the
             index's :attr:`interleaved` data member.  If :attr:`interleaved`
             is False, the coordinates must be in the form
-            [xmin, xmax, ymin, ymax, ..., ..., kmin, kmax]. If :attr:`interleaved`
-            is True, the coordinates must be in the form
+            [xmin, xmax, ymin, ymax, ..., ..., kmin, kmax]. If
+            :attr:`interleaved` is True, the coordinates must be in the form
             [xmin, ymin, ..., kmin, xmax, ymax, ..., kmax].
 
         A basic example
@@ -140,7 +154,8 @@ class Index(object):
 
         Insert an item into the index::
 
-            >>> idx.insert(4321, (34.3776829412, 26.7375853734, 49.3776829412, 41.7375853734), obj=42)
+            >>> idx.insert(4321, (34.3776829412, 26.7375853734, 49.3776829412,
+            41.7375853734), obj=42)
 
         Query::
 
@@ -150,11 +165,11 @@ class Index(object):
             ...         i.object
             ...         i.bbox
             42
-            [34.3776829412, 26.737585373400002, 49.3776829412, 41.737585373400002]
+            [34.3776829412, 26.737585373400002, 49.3776829412,
+            41.737585373400002]
 
 
-        Using custom serializers
-        ::
+        Using custom serializers::
 
             >>> import simplejson
             >>> class JSONIndex(index.Index):
@@ -162,7 +177,8 @@ class Index(object):
             ...     loads = staticmethod(simplejson.loads)
 
             >>> json_idx = JSONIndex()
-            >>> json_idx.insert(1, (0, 1, 0, 1), {"nums": [23, 45], "letters": "abcd"})
+            >>> json_idx.insert(1, (0, 1, 0, 1), {"nums": [23, 45],
+            "letters": "abcd"})
             >>> list(json_idx.nearest((0, 0), 1, objects="raw"))
             [{'letters': 'abcd', 'nums': [23, 45]}]
 
@@ -190,7 +206,6 @@ class Index(object):
             else:
                 stream = args[0]
 
-
         if basename:
             self.properties.storage = RT_Disk
             self.properties.filename = basename
@@ -199,12 +214,12 @@ class Index(object):
             f = basename + "." + self.properties.idx_extension
             p = os.path.abspath(f)
 
-
             # assume if the file exists, we're not going to overwrite it
             # unless the user explicitly set the property to do so
             if os.path.exists(p):
 
-                self.properties.overwrite = bool(kwargs.get('overwrite', False))
+                self.properties.overwrite = \
+                    bool(kwargs.get('overwrite', False))
 
                 # assume we're fetching the first index_id.  If the user
                 # set it, we'll fetch that one.
@@ -212,61 +227,52 @@ class Index(object):
                     try:
                         self.properties.index_id
                     except core.RTreeError:
-                        self.properties.index_id=1
+                        self.properties.index_id = 1
 
             d = os.path.dirname(p)
             if not os.access(d, os.W_OK):
-                message = "Unable to open file '%s' for index storage"%f
-                raise IOError(message)
+                message = "Unable to open file '%s' for index storage" % f
+                raise OSError(message)
         elif storage:
             if (major_version < 2 and minor_version < 8):
-                raise core.RTreeError("libspatialindex {0} does not support custom storage".format(__c_api_version__))
+                raise core.RTreeError(
+                    "libspatialindex {0} does not support custom storage"
+                    .format(__c_api_version__))
 
             self.properties.storage = RT_Custom
             if storage.hasData:
-                self.properties.overwrite = bool(kwargs.get('overwrite', False))
+                self.properties.overwrite = \
+                    bool(kwargs.get('overwrite', False))
                 if not self.properties.overwrite:
                     try:
                         self.properties.index_id
                     except core.RTreeError:
-                        self.properties.index_id=1
+                        self.properties.index_id = 1
                 else:
                     storage.clear()
             self.customstorage = storage
-            storage.registerCallbacks( self.properties )
+            storage.registerCallbacks(self.properties)
         else:
             self.properties.storage = RT_Memory
 
-        try:
-            self.properties.pagesize = int(kwargs['pagesize'])
-        except KeyError:
-            pass
+
+        ps = kwargs.get('pagesize', None)
+        if ps:
+            self.properties.pagesize = int(ps)
 
         if stream:
             self.handle = self._create_idx_from_stream(stream)
         else:
-            self.handle = core.rt.Index_Create(self.properties.handle)
-        self.owned = True
+            self.handle = IndexHandle(self.properties.handle)
 
-    def __del__(self):
-        try:
-            self.owned
-        except AttributeError:
-            # we were partially constructed.  We're going to let it leak
-            # in that case
-            return
-        if self.owned:
-            if self.handle and core:
-                try:
-                    core.rt
-                except AttributeError:
-                    # uh, leak?  We're owned, and have a handle
-                    # but for some reason the dll isn't active
-                    return
-
-                core.rt.Index_Destroy(self.handle)
-                self.owned = False
-                self.handle = None
+    def __getstate__(self):
+        state = self.__dict__.copy()
+        del state["handle"]
+        return state
+
+    def __setstate__(self, state):
+        self.__dict__.update(state)
+        self.handle = IndexHandle(self.properties.handle)
 
     def dumps(self, obj):
         return pickle.dumps(obj)
@@ -276,12 +282,10 @@ class Index(object):
 
     def close(self):
         """Force a flush of the index to storage. Renders index
-        inaccessible.
-        """
-        if self.handle and core:
-            core.rt.Index_Destroy(self.handle)
+        inaccessible."""
+        if self.handle:
+            self.handle.destroy()
             self.handle = None
-            self.owned = False
         else:
             raise IOError("Unclosable index")
 
@@ -304,19 +308,22 @@ class Index(object):
             coordinates += coordinates
 
         if len(coordinates) != dimension * 2:
-            raise core.RTreeError("Coordinates must be in the form "
-                                    "(minx, miny, maxx, maxy) or (x, y) for 2D indexes")
+            raise core.RTreeError(
+                "Coordinates must be in the form "
+                "(minx, miny, maxx, maxy) or (x, y) for 2D indexes")
 
         # so here all coords are in the form:
         # [xmin, ymin, zmin, xmax, ymax, zmax]
         for i in range(dimension):
             if not coordinates[i] <= coordinates[i + dimension]:
-                raise core.RTreeError("Coordinates must not have minimums more than maximums")
+                raise core.RTreeError(
+                    "Coordinates must not have minimums more than maximums")
 
-        p_mins = mins(*[ctypes.c_double(\
-                            coordinates[i]) for i in range(dimension)])
-        p_maxs = maxs(*[ctypes.c_double(\
-                        coordinates[i + dimension]) for i in range(dimension)])
+        p_mins = mins(
+            *[ctypes.c_double(coordinates[i]) for i in range(dimension)])
+        p_maxs = maxs(
+            *[ctypes.c_double(coordinates[i + dimension])
+              for i in range(dimension)])
 
         return (p_mins, p_maxs)
 
@@ -325,13 +332,13 @@ class Index(object):
         size = len(serialized)
 
         d = ctypes.create_string_buffer(serialized)
-        #d.value = serialized
+        # d.value = serialized
         p = ctypes.pointer(d)
 
         # return serialized to keep it alive for the pointer.
         return size, ctypes.cast(p, ctypes.POINTER(ctypes.c_uint8)), serialized
 
-    def insert(self, id, coordinates, obj = None):
+    def insert(self, id, coordinates, obj=None):
         """Inserts an item into the index with the given coordinates.
 
         :param id: long integer
@@ -349,12 +356,14 @@ class Index(object):
             stored in the index with the :attr:`id`.
 
         The following example inserts an entry into the index with id `4321`,
-        and the object it stores with that id is the number `42`.  The coordinate
-        ordering in this instance is the default (interleaved=True) ordering::
+        and the object it stores with that id is the number `42`.  The
+        coordinate ordering in this instance is the default (interleaved=True)
+        ordering::
 
             >>> from rtree import index
             >>> idx = index.Index()
-            >>> idx.insert(4321, (34.3776829412, 26.7375853734, 49.3776829412, 41.7375853734), obj=42)
+            >>> idx.insert(4321, (34.3776829412, 26.7375853734, 49.3776829412,
+            41.7375853734), obj=42)
 
         """
         p_mins, p_maxs = self.get_coordinate_pointers(coordinates)
@@ -363,7 +372,8 @@ class Index(object):
         pyserialized = None
         if obj is not None:
             size, data, pyserialized = self._serialize(obj)
-        core.rt.Index_InsertData(self.handle, id, p_mins, p_maxs, self.properties.dimension, data, size)
+        core.rt.Index_InsertData(self.handle, id, p_mins, p_maxs,
+                                 self.properties.dimension, data, size)
     add = insert
 
     def count(self, coordinates):
@@ -376,11 +386,13 @@ class Index(object):
             each dimension defining the bounds of the query window.
 
         The following example queries the index for any objects any objects
-        that were stored in the index intersect the bounds given in the coordinates::
+        that were stored in the index intersect the bounds given in the
+        coordinates::
 
             >>> from rtree import index
             >>> idx = index.Index()
-            >>> idx.insert(4321, (34.3776829412, 26.7375853734, 49.3776829412, 41.7375853734), obj=42)
+            >>> idx.insert(4321, (34.3776829412, 26.7375853734, 49.3776829412,
+            41.7375853734), obj=42)
 
             >>> idx.count((0, 0, 60, 60))
             1
@@ -390,18 +402,17 @@ class Index(object):
 
         p_num_results = ctypes.c_uint64(0)
 
-
-        core.rt.Index_Intersects_count(    self.handle,
-                                        p_mins,
-                                        p_maxs,
-                                        self.properties.dimension,
-                                        ctypes.byref(p_num_results))
-
+        core.rt.Index_Intersects_count(self.handle,
+                                       p_mins,
+                                       p_maxs,
+                                       self.properties.dimension,
+                                       ctypes.byref(p_num_results))
 
         return p_num_results.value
 
     def intersection(self, coordinates, objects=False):
-        """Return ids or objects in the index that intersect the given coordinates.
+        """Return ids or objects in the index that intersect the given
+        coordinates.
 
         :param coordinates: sequence or array
             This may be an object that satisfies the numpy array
@@ -416,15 +427,18 @@ class Index(object):
             will be returned without the :class:`rtree.index.Item` wrapper.
 
         The following example queries the index for any objects any objects
-        that were stored in the index intersect the bounds given in the coordinates::
+        that were stored in the index intersect the bounds given in the
+        coordinates::
 
             >>> from rtree import index
             >>> idx = index.Index()
-            >>> idx.insert(4321, (34.3776829412, 26.7375853734, 49.3776829412, 41.7375853734), obj=42)
+            >>> idx.insert(4321, (34.3776829412, 26.7375853734, 49.3776829412,
+            41.7375853734), obj=42)
 
             >>> hits = list(idx.intersection((0, 0, 60, 60), objects=True))
             >>> [(item.object, item.bbox) for item in hits if item.id == 4321]
-            [(42, [34.3776829412, 26.737585373400002, 49.3776829412, 41.737585373400002])]
+            [(42, [34.3776829412, 26.737585373400002, 49.3776829412,
+            41.737585373400002])]
 
         If the :class:`rtree.index.Item` wrapper is not used, it is faster to
         request the 'raw' objects::
@@ -432,10 +446,10 @@ class Index(object):
             >>> list(idx.intersection((0, 0, 60, 60), objects="raw"))
             [42]
 
-
         """
 
-        if objects: return self._intersection_obj(coordinates, objects)
+        if objects:
+            return self._intersection_obj(coordinates, objects)
 
         p_mins, p_maxs = self.get_coordinate_pointers(coordinates)
 
@@ -443,12 +457,12 @@ class Index(object):
 
         it = ctypes.pointer(ctypes.c_int64())
 
-        core.rt.Index_Intersects_id(    self.handle,
-                                        p_mins,
-                                        p_maxs,
-                                        self.properties.dimension,
-                                        ctypes.byref(it),
-                                        ctypes.byref(p_num_results))
+        core.rt.Index_Intersects_id(self.handle,
+                                    p_mins,
+                                    p_maxs,
+                                    self.properties.dimension,
+                                    ctypes.byref(it),
+                                    ctypes.byref(p_num_results))
         return self._get_ids(it, p_num_results.value)
 
     def _intersection_obj(self, coordinates, objects):
@@ -459,18 +473,20 @@ class Index(object):
 
         it = ctypes.pointer(ctypes.c_void_p())
 
-        core.rt.Index_Intersects_obj(   self.handle,
-                                        p_mins,
-                                        p_maxs,
-                                        self.properties.dimension,
-                                        ctypes.byref(it),
-                                        ctypes.byref(p_num_results))
+        core.rt.Index_Intersects_obj(self.handle,
+                                     p_mins,
+                                     p_maxs,
+                                     self.properties.dimension,
+                                     ctypes.byref(it),
+                                     ctypes.byref(p_num_results))
         return self._get_objects(it, p_num_results.value, objects)
 
     def _get_objects(self, it, num_results, objects):
         # take the pointer, yield the result objects and free
-        items = ctypes.cast(it, ctypes.POINTER(ctypes.POINTER(ctypes.c_void_p * num_results)))
-        its = ctypes.cast(items, ctypes.POINTER(ctypes.POINTER(ctypes.c_void_p)))
+        items = ctypes.cast(
+            it, ctypes.POINTER(ctypes.POINTER(ctypes.c_void_p * num_results)))
+        its = ctypes.cast(
+            items, ctypes.POINTER(ctypes.POINTER(ctypes.c_void_p)))
 
         try:
             if objects != 'raw':
@@ -485,7 +501,7 @@ class Index(object):
                         yield self.loads(data)
 
             core.rt.Index_DestroyObjResults(its, num_results)
-        except: # need to catch all exceptions, not just rtree.
+        except:  # need to catch all exceptions, not just rtree.
             core.rt.Index_DestroyObjResults(its, num_results)
             raise
 
@@ -510,12 +526,12 @@ class Index(object):
 
         it = ctypes.pointer(ctypes.c_void_p())
 
-        core.rt.Index_NearestNeighbors_obj( self.handle,
-                                            p_mins,
-                                            p_maxs,
-                                            self.properties.dimension,
-                                            ctypes.byref(it),
-                                            p_num_results)
+        core.rt.Index_NearestNeighbors_obj(self.handle,
+                                           p_mins,
+                                           p_maxs,
+                                           self.properties.dimension,
+                                           ctypes.byref(it),
+                                           p_num_results)
 
         return self._get_objects(it, p_num_results.contents.value, objects)
 
@@ -548,19 +564,20 @@ class Index(object):
             >>> idx.insert(4321, (34.37, 26.73, 49.37, 41.73), obj=42)
             >>> hits = idx.nearest((0, 0, 10, 10), 3, objects=True)
         """
-        if objects: return self._nearest_obj(coordinates, num_results, objects)
+        if objects:
+            return self._nearest_obj(coordinates, num_results, objects)
         p_mins, p_maxs = self.get_coordinate_pointers(coordinates)
 
         p_num_results = ctypes.pointer(ctypes.c_uint64(num_results))
 
         it = ctypes.pointer(ctypes.c_int64())
 
-        core.rt.Index_NearestNeighbors_id(  self.handle,
-                                            p_mins,
-                                            p_maxs,
-                                            self.properties.dimension,
-                                            ctypes.byref(it),
-                                            p_num_results)
+        core.rt.Index_NearestNeighbors_id(self.handle,
+                                          p_mins,
+                                          p_maxs,
+                                          self.properties.dimension,
+                                          ctypes.byref(it),
+                                          p_num_results)
 
         return self._get_ids(it, p_num_results.contents.value)
 
@@ -573,11 +590,11 @@ class Index(object):
             [xmin, xmax, ymin, ymax, ..., ..., kmin, kmax].  If not specified,
             the :attr:`interleaved` member of the index is used, which
             defaults to True.
-
         """
         if coordinate_interleaved is None:
             coordinate_interleaved = self.interleaved
-        return _get_bounds(self.handle, core.rt.Index_GetBounds, coordinate_interleaved)
+        return _get_bounds(
+            self.handle, core.rt.Index_GetBounds, coordinate_interleaved)
     bounds = property(get_bounds)
 
     def delete(self, id, coordinates):
@@ -603,11 +620,13 @@ class Index(object):
 
             >>> from rtree import index
             >>> idx = index.Index()
-            >>> idx.delete(4321, (34.3776829412, 26.7375853734, 49.3776829412, 41.7375853734) )
+            >>> idx.delete(4321, (34.3776829412, 26.7375853734, 49.3776829412,
+            41.7375853734))
 
         """
         p_mins, p_maxs = self.get_coordinate_pointers(coordinates)
-        core.rt.Index_DeleteData(self.handle, id, p_mins, p_maxs, self.properties.dimension)
+        core.rt.Index_DeleteData(
+            self.handle, id, p_mins, p_maxs, self.properties.dimension)
 
     def valid(self):
         return bool(core.rt.Index_IsValid(self.handle))
@@ -637,7 +656,8 @@ class Index(object):
     @classmethod
     def interleave(self, deinterleaved):
         """
-        [xmin, xmax, ymin, ymax, zmin, zmax] => [xmin, ymin, zmin, xmax, ymax, zmax]
+        [xmin, xmax, ymin, ymax, zmin, zmax]
+            => [xmin, ymin, zmin, xmax, ymax, zmax]
 
         >>> Index.interleave([0, 1, 10, 11])
         [0, 10, 1, 11]
@@ -650,16 +670,16 @@ class Index(object):
 
         """
         assert len(deinterleaved) % 2 == 0, ("must be a pairwise list")
-        dimension = len(deinterleaved) / 2
+        #  dimension = len(deinterleaved) / 2
         interleaved = []
         for i in range(2):
-            interleaved.extend([deinterleaved[i + j] \
+            interleaved.extend([deinterleaved[i + j]
                                 for j in range(0, len(deinterleaved), 2)])
         return interleaved
 
     def _create_idx_from_stream(self, stream):
         """This function is used to instantiate the index given an
-        iterable stream of data.  """
+        iterable stream of data."""
 
         stream_iter = iter(stream)
         dimension = self.properties.dimension
@@ -678,14 +698,13 @@ class Index(object):
             try:
                 p_id[0], coordinates, obj = next(stream_iter)
             except StopIteration:
-               # we're done
-               return -1
+                # we're done
+                return -1
 
-            # set the id
             if self.interleaved:
                 coordinates = Index.deinterleave(coordinates)
 
-            # this code assumes the coords ar not interleaved.
+            # this code assumes the coords are not interleaved.
             # xmin, xmax, ymin, ymax, zmin, zmax
             for i in range(dimension):
                 mins[i] = coordinates[i*2]
@@ -705,22 +724,20 @@ class Index(object):
 
             return 0
 
-
         stream = core.NEXTFUNC(py_next_item)
-        return core.rt.Index_CreateWithStream(self.properties.handle, stream)
+        return IndexStreamHandle(self.properties.handle, stream)
 
     def leaves(self):
         leaf_node_count = ctypes.c_uint32()
         p_leafsizes = ctypes.pointer(ctypes.c_uint32())
-        p_leafids  = ctypes.pointer(ctypes.c_int64())
+        p_leafids = ctypes.pointer(ctypes.c_int64())
         pp_childids = ctypes.pointer(ctypes.pointer(ctypes.c_int64()))
 
         pp_mins = ctypes.pointer(ctypes.pointer(ctypes.c_double()))
         pp_maxs = ctypes.pointer(ctypes.pointer(ctypes.c_double()))
         dimension = ctypes.c_uint32(0)
 
-
-        core.rt.Index_GetLeaves(   self.handle,
+        core.rt.Index_GetLeaves(self.handle,
                                 ctypes.byref(leaf_node_count),
                                 ctypes.byref(p_leafsizes),
                                 ctypes.byref(p_leafids),
@@ -728,45 +745,58 @@ class Index(object):
                                 ctypes.byref(pp_mins),
                                 ctypes.byref(pp_maxs),
                                 ctypes.byref(dimension)
-                            )
+                                )
 
         output = []
 
         count = leaf_node_count.value
-        sizes = ctypes.cast(p_leafsizes, ctypes.POINTER(ctypes.c_uint32 * count))
+        sizes = ctypes.cast(
+            p_leafsizes, ctypes.POINTER(ctypes.c_uint32 * count))
         ids = ctypes.cast(p_leafids, ctypes.POINTER(ctypes.c_int64 * count))
-        child =  ctypes.cast(pp_childids, ctypes.POINTER(ctypes.POINTER(ctypes.c_int64) * count))
-        mins =  ctypes.cast(pp_mins, ctypes.POINTER(ctypes.POINTER(ctypes.c_double) * count))
-        maxs =  ctypes.cast(pp_maxs, ctypes.POINTER(ctypes.POINTER(ctypes.c_double) * count))
+        child = ctypes.cast(
+            pp_childids,
+            ctypes.POINTER(ctypes.POINTER(ctypes.c_int64) * count))
+        mins = ctypes.cast(
+            pp_mins,
+            ctypes.POINTER(ctypes.POINTER(ctypes.c_double) * count))
+        maxs = ctypes.cast(
+            pp_maxs,
+            ctypes.POINTER(ctypes.POINTER(ctypes.c_double) * count))
         for i in range(count):
             p_child_ids = child.contents[i]
 
             id = ids.contents[i]
             size = sizes.contents[i]
-            child_ids_array =  ctypes.cast(p_child_ids, ctypes.POINTER(ctypes.c_int64 * size))
+            child_ids_array = ctypes.cast(
+                p_child_ids, ctypes.POINTER(ctypes.c_int64 * size))
 
             child_ids = []
             for j in range(size):
                 child_ids.append(child_ids_array.contents[j])
 
             # free the child ids list
-            core.rt.Index_Free(ctypes.cast(p_child_ids, ctypes.POINTER(ctypes.c_void_p)))
+            core.rt.Index_Free(
+                ctypes.cast(p_child_ids, ctypes.POINTER(ctypes.c_void_p)))
 
             p_mins = mins.contents[i]
             p_maxs = maxs.contents[i]
 
-            p_mins = ctypes.cast(p_mins, ctypes.POINTER(ctypes.c_double * dimension.value))
-            p_maxs = ctypes.cast(p_maxs, ctypes.POINTER(ctypes.c_double * dimension.value))
+            p_mins = ctypes.cast(
+                p_mins, ctypes.POINTER(ctypes.c_double * dimension.value))
+            p_maxs = ctypes.cast(
+                p_maxs, ctypes.POINTER(ctypes.c_double * dimension.value))
 
             bounds = []
             bounds = [p_mins.contents[i] for i in range(dimension.value)]
             bounds += [p_maxs.contents[i] for i in range(dimension.value)]
 
             # free the bounds
-            p_mins = ctypes.cast(p_mins,ctypes.POINTER(ctypes.c_double))
-            p_maxs = ctypes.cast(p_maxs,ctypes.POINTER(ctypes.c_double))
-            core.rt.Index_Free(ctypes.cast(p_mins, ctypes.POINTER(ctypes.c_void_p)))
-            core.rt.Index_Free(ctypes.cast(p_maxs, ctypes.POINTER(ctypes.c_void_p)))
+            p_mins = ctypes.cast(p_mins, ctypes.POINTER(ctypes.c_double))
+            p_maxs = ctypes.cast(p_maxs, ctypes.POINTER(ctypes.c_double))
+            core.rt.Index_Free(
+                ctypes.cast(p_mins, ctypes.POINTER(ctypes.c_void_p)))
+            core.rt.Index_Free(
+                ctypes.cast(p_maxs, ctypes.POINTER(ctypes.c_void_p)))
 
             output.append((id, child_ids, bounds))
 
@@ -775,9 +805,12 @@ class Index(object):
 # An alias to preserve backward compatibility
 Rtree = Index
 
+
 class Item(object):
     """A container for index entries"""
+
     __slots__ = ('handle', 'owned', 'id', 'object', 'bounds')
+
     def __init__(self, loads, handle, owned=False):
         """There should be no reason to instantiate these yourself. Items are
         created automatically when you call
@@ -793,7 +826,8 @@ class Item(object):
 
         self.object = None
         self.object = self.get_object(loads)
-        self.bounds = _get_bounds(self.handle, core.rt.IndexItem_GetBounds, False)
+        self.bounds = _get_bounds(
+            self.handle, core.rt.IndexItem_GetBounds, False)
 
     @property
     def bbox(self):
@@ -802,11 +836,68 @@ class Item(object):
 
     def get_object(self, loads):
         # short circuit this so we only do it at construction time
-        if self.object is not None: return self.object
+        if self.object is not None:
+            return self.object
         data = _get_data(self.handle)
-        if data is None: return None
+        if data is None:
+            return None
         return loads(data)
 
+
+class InvalidHandleException(Exception):
+    """Handle has been destroyed and can no longer be used"""
+
+
+class Handle(object):
+
+    def __init__(self, *args, **kwargs):
+        self._ptr = self._create(*args, **kwargs)
+
+    def _create(self, *args, **kwargs):
+        raise NotImplementedError
+
+    def _destroy(self, ptr):
+        raise NotImplementedError
+
+    def destroy(self):
+        if self._ptr is not None:
+            self._destroy(self._ptr)
+            self._ptr = None
+
+    @property
+    def _as_parameter_(self):
+        if self._ptr is None:
+            raise InvalidHandleException
+        return self._ptr
+
+    def __del__(self):
+        try:
+            self.destroy()
+        except NameError:
+            # The core.py model doesn't have
+            # core.rt available anymore and it was tore
+            # down. We don't want to try to do anything
+            # in that instance
+            return
+
+
+class IndexHandle(Handle):
+
+    _create = core.rt.Index_Create
+    _destroy = core.rt.Index_Destroy
+
+
+class IndexStreamHandle(IndexHandle):
+
+    _create = core.rt.Index_CreateWithStream
+
+
+class PropertyHandle(Handle):
+
+    _create = core.rt.IndexProperty_Create
+    _destroy = core.rt.IndexProperty_Destroy
+
+
 class Property(object):
     """An index property object is a container that contains a number of
     settable index properties.  Many of these properties must be set at
@@ -821,28 +912,25 @@ class Property(object):
         'overwrite', 'pagesize', 'point_pool_capacity',
         'region_pool_capacity', 'reinsert_factor',
         'split_distribution_factor', 'storage', 'tight_mbr', 'tpr_horizon',
-        'type', 'variant', 'writethrough' )
+        'type', 'variant', 'writethrough')
 
     def __init__(self, handle=None, owned=True, **kwargs):
-        if handle:
-            self.handle = handle
-        else:
-            self.handle = core.rt.IndexProperty_Create()
-        self.owned = owned
-        for k, v in list(kwargs.items()):
+        if handle is None:
+            handle = PropertyHandle()
+        self.handle = handle
+        self.initialize_from_dict(kwargs)
+
+    def initialize_from_dict(self, state):
+        for k, v in state.items():
             if v is not None:
                 setattr(self, k, v)
 
-    def __del__(self):
-        if self.owned:
-            if self.handle and core:
-                try:
-                    core.rt
-                except AttributeError:
-                    # uh, leak?  We're owned, and have a handle
-                    # but for some reason the dll isn't active
-                    return
-                core.rt.IndexProperty_Destroy(self.handle)
+    def __getstate__(self):
+        return self.as_dict()
+
+    def __setstate__(self, state):
+        self.handle = PropertyHandle()
+        self.initialize_from_dict(state)
 
     def as_dict(self):
         d = {}
@@ -862,16 +950,18 @@ class Property(object):
 
     def get_index_type(self):
         return core.rt.IndexProperty_GetIndexType(self.handle)
+
     def set_index_type(self, value):
         return core.rt.IndexProperty_SetIndexType(self.handle, value)
 
     type = property(get_index_type, set_index_type)
     """Index type. Valid index type values are
-        :data:`RT_RTree`, :data:`RT_MVTree`, or :data:`RT_TPRTree`.  Only
-        RT_RTree (the default) is practically supported at this time."""
+    :data:`RT_RTree`, :data:`RT_MVTree`, or :data:`RT_TPRTree`.  Only
+    RT_RTree (the default) is practically supported at this time."""
 
     def get_variant(self):
         return core.rt.IndexProperty_GetIndexVariant(self.handle)
+
     def set_variant(self, value):
         return core.rt.IndexProperty_SetIndexVariant(self.handle, value)
 
@@ -881,9 +971,11 @@ class Property(object):
 
     def get_dimension(self):
         return core.rt.IndexProperty_GetDimension(self.handle)
+
     def set_dimension(self, value):
         if (value <= 0):
-            raise core.RTreeError("Negative or 0 dimensional indexes are not allowed")
+            raise core.RTreeError(
+                "Negative or 0 dimensional indexes are not allowed")
         return core.rt.IndexProperty_SetDimension(self.handle, value)
 
     dimension = property(get_dimension, set_dimension)
@@ -892,17 +984,23 @@ class Property(object):
 
     def get_storage(self):
         return core.rt.IndexProperty_GetIndexStorage(self.handle)
+
     def set_storage(self, value):
         return core.rt.IndexProperty_SetIndexStorage(self.handle, value)
 
     storage = property(get_storage, set_storage)
-    """Index storage. One of :data:`RT_Disk`, :data:`RT_Memory` or :data:`RT_Custom`.
-    If a filename is passed as the first parameter to :class:index.Index, :data:`RT_Disk`
-    is assumed. If a CustomStorage instance is passed, :data:`RT_Custom` is assumed.
-    Otherwise, :data:`RT_Memory` is the default."""
+    """Index storage.
+
+    One of :data:`RT_Disk`, :data:`RT_Memory` or :data:`RT_Custom`.
+
+    If a filename is passed as the first parameter to :class:index.Index,
+    :data:`RT_Disk` is assumed. If a CustomStorage instance is passed,
+    :data:`RT_Custom` is assumed. Otherwise, :data:`RT_Memory` is the default.
+    """
 
     def get_pagesize(self):
         return core.rt.IndexProperty_GetPagesize(self.handle)
+
     def set_pagesize(self, value):
         if (value <= 0):
             raise core.RTreeError("Pagesize must be > 0")
@@ -910,10 +1008,11 @@ class Property(object):
 
     pagesize = property(get_pagesize, set_pagesize)
     """The pagesize when disk storage is used.  It is ideal to ensure that your
-    index entries fit within a single page for best performance.  """
+    index entries fit within a single page for best performance."""
 
     def get_index_capacity(self):
         return core.rt.IndexProperty_GetIndexCapacity(self.handle)
+
     def set_index_capacity(self, value):
         if (value <= 0):
             raise core.RTreeError("index_capacity must be > 0")
@@ -924,6 +1023,7 @@ class Property(object):
 
     def get_leaf_capacity(self):
         return core.rt.IndexProperty_GetLeafCapacity(self.handle)
+
     def set_leaf_capacity(self, value):
         if (value <= 0):
             raise core.RTreeError("leaf_capacity must be > 0")
@@ -934,55 +1034,66 @@ class Property(object):
 
     def get_index_pool_capacity(self):
         return core.rt.IndexProperty_GetIndexPoolCapacity(self.handle)
+
     def set_index_pool_capacity(self, value):
         if (value <= 0):
             raise core.RTreeError("index_pool_capacity must be > 0")
         return core.rt.IndexProperty_SetIndexPoolCapacity(self.handle, value)
 
-    index_pool_capacity = property(get_index_pool_capacity, set_index_pool_capacity)
+    index_pool_capacity = property(
+        get_index_pool_capacity, set_index_pool_capacity)
     """Index pool capacity"""
 
     def get_point_pool_capacity(self):
         return core.rt.IndexProperty_GetPointPoolCapacity(self.handle)
+
     def set_point_pool_capacity(self, value):
         if (value <= 0):
             raise core.RTreeError("point_pool_capacity must be > 0")
         return core.rt.IndexProperty_SetPointPoolCapacity(self.handle, value)
 
-    point_pool_capacity = property(get_point_pool_capacity, set_point_pool_capacity)
+    point_pool_capacity = property(
+        get_point_pool_capacity, set_point_pool_capacity)
     """Point pool capacity"""
 
     def get_region_pool_capacity(self):
         return core.rt.IndexProperty_GetRegionPoolCapacity(self.handle)
+
     def set_region_pool_capacity(self, value):
         if (value <= 0):
             raise core.RTreeError("region_pool_capacity must be > 0")
         return core.rt.IndexProperty_SetRegionPoolCapacity(self.handle, value)
 
-    region_pool_capacity = property(get_region_pool_capacity, set_region_pool_capacity)
+    region_pool_capacity = property(
+        get_region_pool_capacity, set_region_pool_capacity)
     """Region pool capacity"""
 
     def get_buffering_capacity(self):
         return core.rt.IndexProperty_GetBufferingCapacity(self.handle)
+
     def set_buffering_capacity(self, value):
         if (value <= 0):
             raise core.RTreeError("buffering_capacity must be > 0")
         return core.rt.IndexProperty_SetBufferingCapacity(self.handle, value)
 
-    buffering_capacity = property(get_buffering_capacity, set_buffering_capacity)
+    buffering_capacity = property(
+        get_buffering_capacity, set_buffering_capacity)
     """Buffering capacity"""
 
     def get_tight_mbr(self):
         return bool(core.rt.IndexProperty_GetEnsureTightMBRs(self.handle))
+
     def set_tight_mbr(self, value):
         value = bool(value)
-        return bool(core.rt.IndexProperty_SetEnsureTightMBRs(self.handle, value))
+        return bool(
+            core.rt.IndexProperty_SetEnsureTightMBRs(self.handle, value))
 
     tight_mbr = property(get_tight_mbr, set_tight_mbr)
     """Uses tight bounding rectangles"""
 
     def get_overwrite(self):
         return bool(core.rt.IndexProperty_GetOverwrite(self.handle))
+
     def set_overwrite(self, value):
         value = bool(value)
         return bool(core.rt.IndexProperty_SetOverwrite(self.handle, value))
@@ -992,16 +1103,20 @@ class Property(object):
 
     def get_near_minimum_overlap_factor(self):
         return core.rt.IndexProperty_GetNearMinimumOverlapFactor(self.handle)
+
     def set_near_minimum_overlap_factor(self, value):
         if (value <= 0):
             raise core.RTreeError("near_minimum_overlap_factor must be > 0")
-        return core.rt.IndexProperty_SetNearMinimumOverlapFactor(self.handle, value)
+        return core.rt.IndexProperty_SetNearMinimumOverlapFactor(
+            self.handle, value)
 
-    near_minimum_overlap_factor = property(get_near_minimum_overlap_factor, set_near_minimum_overlap_factor)
+    near_minimum_overlap_factor = property(
+        get_near_minimum_overlap_factor, set_near_minimum_overlap_factor)
     """Overlap factor for MVRTrees"""
 
     def get_writethrough(self):
         return bool(core.rt.IndexProperty_GetWriteThrough(self.handle))
+
     def set_writethrough(self, value):
         value = bool(value)
         return bool(core.rt.IndexProperty_SetWriteThrough(self.handle, value))
@@ -1011,6 +1126,7 @@ class Property(object):
 
     def get_fill_factor(self):
         return core.rt.IndexProperty_GetFillFactor(self.handle)
+
     def set_fill_factor(self, value):
         return core.rt.IndexProperty_SetFillFactor(self.handle, value)
 
@@ -1019,14 +1135,18 @@ class Property(object):
 
     def get_split_distribution_factor(self):
         return core.rt.IndexProperty_GetSplitDistributionFactor(self.handle)
+
     def set_split_distribution_factor(self, value):
-        return core.rt.IndexProperty_SetSplitDistributionFactor(self.handle, value)
+        return core.rt.IndexProperty_SetSplitDistributionFactor(
+            self.handle, value)
 
-    split_distribution_factor = property(get_split_distribution_factor, set_split_distribution_factor)
+    split_distribution_factor = property(
+        get_split_distribution_factor, set_split_distribution_factor)
     """Split distribution factor"""
 
     def get_tpr_horizon(self):
         return core.rt.IndexProperty_GetTPRHorizon(self.handle)
+
     def set_tpr_horizon(self, value):
         return core.rt.IndexProperty_SetTPRHorizon(self.handle, value)
 
@@ -1035,6 +1155,7 @@ class Property(object):
 
     def get_reinsert_factor(self):
         return core.rt.IndexProperty_GetReinsertFactor(self.handle)
+
     def set_reinsert_factor(self, value):
         return core.rt.IndexProperty_SetReinsertFactor(self.handle, value)
 
@@ -1042,50 +1163,68 @@ class Property(object):
     """Reinsert factor"""
 
     def get_filename(self):
-        return core.rt.IndexProperty_GetFileName(self.handle)
+        s = core.rt.IndexProperty_GetFileName(self.handle)
+        return string_output(s)
+
     def set_filename(self, value):
-        v = value.encode('utf-8')
-        return core.rt.IndexProperty_SetFileName(self.handle, v)
+        if isinstance(value, string_types):
+            value = value.encode('utf-8')
+        return core.rt.IndexProperty_SetFileName(self.handle, value)
 
     filename = property(get_filename, set_filename)
     """Index filename for disk storage"""
 
     def get_dat_extension(self):
-        return core.rt.IndexProperty_GetFileNameExtensionDat(self.handle)
+        s = core.rt.IndexProperty_GetFileNameExtensionDat(self.handle)
+        return string_output(s)
+
     def set_dat_extension(self, value):
-        v = value.encode('utf-8')
-        return core.rt.IndexProperty_SetFileNameExtensionDat(self.handle, value)
+        if isinstance(value, string_types):
+            value = value.encode('utf-8')
+        return core.rt.IndexProperty_SetFileNameExtensionDat(
+            self.handle, value)
 
     dat_extension = property(get_dat_extension, set_dat_extension)
     """Extension for .dat file"""
 
     def get_idx_extension(self):
-        return core.rt.IndexProperty_GetFileNameExtensionIdx(self.handle)
+        s = core.rt.IndexProperty_GetFileNameExtensionIdx(self.handle)
+        return string_output(s)
+
     def set_idx_extension(self, value):
-        v = value.encode('utf-8')
-        return core.rt.IndexProperty_SetFileNameExtensionIdx(self.handle, value)
+        if isinstance(value, string_types):
+            value = value.encode('utf-8')
+        return core.rt.IndexProperty_SetFileNameExtensionIdx(
+            self.handle, value)
 
     idx_extension = property(get_idx_extension, set_idx_extension)
     """Extension for .idx file"""
 
     def get_custom_storage_callbacks_size(self):
         return core.rt.IndexProperty_GetCustomStorageCallbacksSize(self.handle)
+
     def set_custom_storage_callbacks_size(self, value):
-        return core.rt.IndexProperty_SetCustomStorageCallbacksSize(self.handle, value)
+        return core.rt.IndexProperty_SetCustomStorageCallbacksSize(
+            self.handle, value)
 
-    custom_storage_callbacks_size = property(get_custom_storage_callbacks_size, set_custom_storage_callbacks_size)
+    custom_storage_callbacks_size = property(
+        get_custom_storage_callbacks_size, set_custom_storage_callbacks_size)
     """Size of callbacks for custom storage"""
 
     def get_custom_storage_callbacks(self):
         return core.rt.IndexProperty_GetCustomStorageCallbacks(self.handle)
+
     def set_custom_storage_callbacks(self, value):
-        return core.rt.IndexProperty_SetCustomStorageCallbacks(self.handle, value)
+        return core.rt.IndexProperty_SetCustomStorageCallbacks(
+            self.handle, value)
 
-    custom_storage_callbacks = property(get_custom_storage_callbacks, set_custom_storage_callbacks)
+    custom_storage_callbacks = property(
+        get_custom_storage_callbacks, set_custom_storage_callbacks)
     """Callbacks for custom storage"""
 
     def get_index_id(self):
         return core.rt.IndexProperty_GetIndexID(self.handle)
+
     def set_index_id(self, value):
         return core.rt.IndexProperty_SetIndexID(self.handle, value)
 
@@ -1097,50 +1236,48 @@ class Property(object):
 
 id_type = ctypes.c_int64
 
+
 class CustomStorageCallbacks(ctypes.Structure):
     # callback types
-    createCallbackType  = ctypes.CFUNCTYPE(
-                            None, ctypes.c_void_p, ctypes.POINTER(ctypes.c_int)
-                          )
+    createCallbackType = ctypes.CFUNCTYPE(
+        None, ctypes.c_void_p, ctypes.POINTER(ctypes.c_int))
     destroyCallbackType = ctypes.CFUNCTYPE(
-                            None, ctypes.c_void_p, ctypes.POINTER(ctypes.c_int)
-                          )
+        None, ctypes.c_void_p, ctypes.POINTER(ctypes.c_int))
     flushCallbackType = ctypes.CFUNCTYPE(
-                          None, ctypes.c_void_p, ctypes.POINTER(ctypes.c_int)
-                        )
-
-    loadCallbackType    = ctypes.CFUNCTYPE(
-                            None, ctypes.c_void_p, id_type, ctypes.POINTER(ctypes.c_uint32),
-                            ctypes.POINTER(ctypes.POINTER(ctypes.c_uint8)), ctypes.POINTER(ctypes.c_int)
-                          )
-    storeCallbackType   = ctypes.CFUNCTYPE(
-                            None, ctypes.c_void_p, ctypes.POINTER(id_type), ctypes.c_uint32,
-                            ctypes.POINTER(ctypes.c_uint8), ctypes.POINTER(ctypes.c_int)
-                          )
-    deleteCallbackType  = ctypes.CFUNCTYPE(
-                            None, ctypes.c_void_p, id_type, ctypes.POINTER(ctypes.c_int)
-                          )
-
-    _fields_ = [ ('context', ctypes.c_void_p),
-                 ('createCallback', createCallbackType),
-                 ('destroyCallback', destroyCallbackType),
-                 ('flushCallback', flushCallbackType),
-                 ('loadCallback', loadCallbackType),
-                 ('storeCallback', storeCallbackType),
-                 ('deleteCallback', deleteCallbackType),
-               ]
-
-    def __init__(self, context, createCallback, destroyCallback, flushCallback, loadCallback, storeCallback, deleteCallback):
-        ctypes.Structure.__init__( self,
-                                   ctypes.c_void_p( context ),
-                                   self.createCallbackType( createCallback ),
-                                   self.destroyCallbackType( destroyCallback ),
-                                   self.flushCallbackType ( flushCallback ),
-                                   self.loadCallbackType  ( loadCallback ),
-                                   self.storeCallbackType ( storeCallback ),
-                                   self.deleteCallbackType( deleteCallback ),
+        None, ctypes.c_void_p, ctypes.POINTER(ctypes.c_int))
+
+    loadCallbackType = ctypes.CFUNCTYPE(
+        None, ctypes.c_void_p, id_type, ctypes.POINTER(ctypes.c_uint32),
+        ctypes.POINTER(ctypes.POINTER(ctypes.c_uint8)),
+        ctypes.POINTER(ctypes.c_int))
+    storeCallbackType = ctypes.CFUNCTYPE(
+        None, ctypes.c_void_p, ctypes.POINTER(id_type), ctypes.c_uint32,
+        ctypes.POINTER(ctypes.c_uint8), ctypes.POINTER(ctypes.c_int))
+    deleteCallbackType = ctypes.CFUNCTYPE(
+        None, ctypes.c_void_p, id_type, ctypes.POINTER(ctypes.c_int))
+
+    _fields_ = [('context', ctypes.c_void_p),
+                ('createCallback', createCallbackType),
+                ('destroyCallback', destroyCallbackType),
+                ('flushCallback', flushCallbackType),
+                ('loadCallback', loadCallbackType),
+                ('storeCallback', storeCallbackType),
+                ('deleteCallback', deleteCallbackType),
+                ]
+
+    def __init__(self, context, createCallback, destroyCallback,
+                 flushCallback, loadCallback, storeCallback, deleteCallback):
+        ctypes.Structure.__init__(self,
+                                  ctypes.c_void_p(context),
+                                  self.createCallbackType(createCallback),
+                                  self.destroyCallbackType(destroyCallback),
+                                  self.flushCallbackType(flushCallback),
+                                  self.loadCallbackType(loadCallback),
+                                  self.storeCallbackType(storeCallback),
+                                  self.deleteCallbackType(deleteCallback),
                                   )
 
+
 class ICustomStorage(object):
     # error codes
     NoError = 0
@@ -1152,7 +1289,7 @@ class ICustomStorage(object):
     NewPage = -0x1
 
     def allocateBuffer(self, length):
-        return core.rt.SIDX_NewBuffer( length )
+        return core.rt.SIDX_NewBuffer(length)
 
     def registerCallbacks(self, properties):
         raise NotImplementedError()
@@ -1160,82 +1297,83 @@ class ICustomStorage(object):
     def clear(self):
         raise NotImplementedError()
 
-    hasData = property( lambda self: False )
-    ''' Override this property to allow for reloadable storages '''
+    hasData = property(lambda self: False)
+    '''Override this property to allow for reloadable storages'''
 
 
 class CustomStorageBase(ICustomStorage):
-    """ Derive from this class to create your own storage manager with access
-        to the raw C buffers.
-    """
+    """Derive from this class to create your own storage manager with access
+    to the raw C buffers."""
 
     def registerCallbacks(self, properties):
-        callbacks = CustomStorageCallbacks( ctypes.c_void_p(), self.create,
-                                            self.destroy, self.flush,
-                                            self.loadByteArray, self.storeByteArray,
-                                            self.deleteByteArray )
-        properties.custom_storage_callbacks_size = ctypes.sizeof( callbacks )
+        callbacks = CustomStorageCallbacks(
+            ctypes.c_void_p(), self.create, self.destroy, self.flush,
+            self.loadByteArray, self.storeByteArray, self.deleteByteArray)
+        properties.custom_storage_callbacks_size = ctypes.sizeof(callbacks)
         self.callbacks = callbacks
-        properties.custom_storage_callbacks      = ctypes.cast( ctypes.pointer(callbacks), ctypes.c_void_p )
+        properties.custom_storage_callbacks = \
+            ctypes.cast(ctypes.pointer(callbacks), ctypes.c_void_p)
 
     # the user must override these callback functions
     def create(self, context, returnError):
         returnError.contents.value = self.IllegalStateError
-        raise NotImplementedError( "You must override this method." )
+        raise NotImplementedError("You must override this method.")
 
     def destroy(self, context, returnError):
-        """ please override """
+        """please override"""
         returnError.contents.value = self.IllegalStateError
-        raise NotImplementedError( "You must override this method." )
+        raise NotImplementedError("You must override this method.")
 
     def loadByteArray(self, context, page, resultLen, resultData, returnError):
-        """ please override """
+        """please override"""
         returnError.contents.value = self.IllegalStateError
-        raise NotImplementedError( "You must override this method." )
+        raise NotImplementedError("You must override this method.")
 
     def storeByteArray(self, context, page, len, data, returnError):
-        """ please override """
+        """please override"""
         returnError.contents.value = self.IllegalStateError
-        raise NotImplementedError( "You must override this method." )
+        raise NotImplementedError("You must override this method.")
 
     def deleteByteArray(self, context, page, returnError):
-        """ please override """
+        """please override"""
         returnError.contents.value = self.IllegalStateError
-        raise NotImplementedError( "You must override this method." )
+        raise NotImplementedError("You must override this method.")
 
     def flush(self, context, returnError):
-        """ please override """
+        """please override"""
         returnError.contents.value = self.IllegalStateError
-        raise NotImplementedError( "You must override this method." )
+        raise NotImplementedError("You must override this method.")
 
 
 class CustomStorage(ICustomStorage):
-    """ Provides a useful default custom storage implementation which marshals
-        the buffers on the C side from/to python strings.
-        Derive from this class and override the necessary methods to provide
-        your own custom storage manager.
-    """
+    """Provides a useful default custom storage implementation which marshals
+    the buffers on the C side from/to python strings.
+    Derive from this class and override the necessary methods to provide
+    your own custom storage manager."""
 
     def registerCallbacks(self, properties):
-        callbacks = CustomStorageCallbacks( 0, self._create, self._destroy, self._flush, self._loadByteArray,
-                                               self._storeByteArray, self._deleteByteArray )
-        properties.custom_storage_callbacks_size = ctypes.sizeof( callbacks )
+        callbacks = CustomStorageCallbacks(
+            0, self._create, self._destroy, self._flush, self._loadByteArray,
+            self._storeByteArray, self._deleteByteArray)
+        properties.custom_storage_callbacks_size = ctypes.sizeof(callbacks)
         self.callbacks = callbacks
-        properties.custom_storage_callbacks      = ctypes.cast( ctypes.pointer(callbacks), ctypes.c_void_p )
+        properties.custom_storage_callbacks = \
+            ctypes.cast(ctypes.pointer(callbacks), ctypes.c_void_p)
 
-    # these functions handle the C callbacks and massage the data, then delegate
-    #  to the function without underscore below
+    # these functions handle the C callbacks and massage the data, then
+    # delegate to the function without underscore below
     def _create(self, context, returnError):
-        self.create( returnError )
+        self.create(returnError)
 
     def _destroy(self, context, returnError):
-        self.destroy( returnError )
+        self.destroy(returnError)
 
     def _flush(self, context, returnError):
-        self.flush( returnError )
+        self.flush(returnError)
 
-    def _loadByteArray(self, context, page, resultLen, resultData, returnError):
-        resultString = self.loadByteArray( page, returnError )
+    def _loadByteArray(self, context, page, resultLen,
+                       resultData, returnError):
+        resultString = self.loadByteArray(page, returnError)
         if returnError.contents.value != self.NoError:
             return
         # Copy python string over into a buffer allocated on the C side.
@@ -1246,50 +1384,279 @@ class CustomStorage(ICustomStorage):
         #  crash.
         count = len(resultString)
         resultLen.contents.value = count
-        buffer = self.allocateBuffer( count )
-        ctypes.memmove( buffer, ctypes.c_char_p(resultString), count )
-        resultData[0] = ctypes.cast( buffer, ctypes.POINTER(ctypes.c_uint8) )
+        buffer = self.allocateBuffer(count)
+        ctypes.memmove(buffer, ctypes.c_char_p(resultString), count)
+        resultData[0] = ctypes.cast(buffer, ctypes.POINTER(ctypes.c_uint8))
 
     def _storeByteArray(self, context, page, len, data, returnError):
-        str = ctypes.string_at( data, len )
-        newPageId = self.storeByteArray( page.contents.value, str, returnError )
+        str = ctypes.string_at(data, len)
+        newPageId = self.storeByteArray(page.contents.value, str, returnError)
         page.contents.value = newPageId
 
     def _deleteByteArray(self, context, page, returnError):
-        self.deleteByteArray( page, returnError )
-
+        self.deleteByteArray(page, returnError)
 
     # the user must override these callback functions
     def create(self, returnError):
-        """ Must be overriden. No return value. """
+        """Must be overridden. No return value."""
         returnError.contents.value = self.IllegalStateError
-        raise NotImplementedError( "You must override this method." )
+        raise NotImplementedError("You must override this method.")
 
     def destroy(self, returnError):
-        """ Must be overriden. No return value. """
+        """Must be overridden. No return value."""
         returnError.contents.value = self.IllegalStateError
-        raise NotImplementedError( "You must override this method." )
+        raise NotImplementedError("You must override this method.")
 
     def flush(self, returnError):
-        """ Must be overriden. No return value. """
+        """Must be overridden. No return value."""
         returnError.contents.value = self.IllegalStateError
-        raise NotImplementedError( "You must override this method." )
+        raise NotImplementedError("You must override this method.")
 
     def loadByteArray(self, page, returnError):
-        """ Must be overriden. Must return a string with the loaded data. """
+        """Must be overridden. Must return a string with the loaded data."""
         returnError.contents.value = self.IllegalStateError
-        raise NotImplementedError( "You must override this method." )
+        raise NotImplementedError("You must override this method.")
         return ''
 
     def storeByteArray(self, page, data, returnError):
-        """ Must be overriden. Must return the new 64-bit page ID of the stored
-            data if a new page had to be created (i.e. page is not NewPage).
-        """
+        """Must be overridden. Must return the new 64-bit page ID of the stored
+        data if a new page had to be created (i.e. page is not NewPage)."""
         returnError.contents.value = self.IllegalStateError
-        raise NotImplementedError( "You must override this method." )
+        raise NotImplementedError("You must override this method.")
         return 0
 
     def deleteByteArray(self, page, returnError):
-        """ please override """
+        """please override"""
         returnError.contents.value = self.IllegalStateError
-        raise NotImplementedError( "You must override this method." )
+        raise NotImplementedError("You must override this method.")
+
+
+class RtreeContainer(Rtree):
+    """An R-Tree, MVR-Tree, or TPR-Tree indexed container for python objects"""
+
+    def __init__(self, *args, **kwargs):
+        """Creates a new index
+
+        :param stream:
+            If the first argument in the constructor is not of type basestring,
+            it is assumed to be an iterable stream of data that will raise a
+            StopIteration.  It must be in the form defined by the
+            :attr:`interleaved` attribute of the index. The following example
+            would assume :attr:`interleaved` is False::
+
+            (obj, (minx, maxx, miny, maxy, minz, maxz, ..., ..., mink, maxk))
+
+        :param interleaved: True or False, defaults to True.
+            This parameter determines the coordinate order for all methods that
+            take in coordinates.
+
+        :param properties: An :class:`index.Property` object
+            This object sets both the creation and instantiation properties
+            for the object and they are passed down into libspatialindex.
+            A few properties are curried from instantiation parameters
+            for you like ``pagesize`` to ensure compatibility with previous
+            versions of the library.  All other properties must be set on the
+            object.
+
+        .. warning::
+            The coordinate ordering for all functions are sensitive the
+            index's :attr:`interleaved` data member.  If :attr:`interleaved`
+            is False, the coordinates must be in the form
+            [xmin, xmax, ymin, ymax, ..., ..., kmin, kmax]. If
+            :attr:`interleaved` is True, the coordinates must be in the form
+            [xmin, ymin, ..., kmin, xmax, ymax, ..., kmax].
+
+        A basic example
+        ::
+
+            >>> from rtree import index
+            >>> p = index.Property()
+
+            >>> idx = index.RtreeContainer(properties=p)
+            >>> idx  # doctest: +ELLIPSIS
+            <rtree.index.Rtree object at 0x...>
+
+        Insert an item into the index::
+
+            >>> idx.insert(object(), (34.3776829412, 26.7375853734,
+            49.3776829412, 41.7375853734))
+
+        Query::
+
+            >>> hits = idx.intersection((0, 0, 60, 60))
+            >>> for obj in hits:
+            ...     i.object
+            ...     i.bbox  # doctest: +ELLIPSIS
+            <object object at 0x...>
+            [34.3776829412, 26.737585373400002, 49.3776829412,
+            41.737585373400002]
+        """
+        if args:
+            if isinstance(args[0], rtree.index.string_types) \
+                    or isinstance(args[0], bytes) \
+                    or isinstance(args[0], rtree.index.ICustomStorage):
+                raise ValueError('%s supports only in-memory indexes'
+                                 % self.__class__)
+        self._objects = {}
+        return super(RtreeContainer, self).__init__(*args, **kwargs)
+
+    def insert(self, obj, coordinates):
+        """Inserts an item into the index with the given coordinates.
+
+        :param obj: object
+            Any object.
+
+        :param coordinates: sequence or array
+            This may be an object that satisfies the numpy array
+            protocol, providing the index's dimension * 2 coordinate
+            pairs representing the `mink` and `maxk` coordinates in
+            each dimension defining the bounds of the query window.
+
+        The following example inserts a simple object into the container.
+        The coordinate ordering in this instance is the default
+        (interleaved=True) ordering::
+
+            >>> from rtree import index
+            >>> idx = index.RTreeContainer()
+            >>> idx.insert(object(), (34.3776829412, 26.7375853734,
+            49.3776829412, 41.7375853734))
+
+        """
+        try:
+            count = self._objects[id(obj)] + 1
+        except KeyError:
+            count = 1
+        self._objects[id(obj)] = (count, obj)
+        return super(RtreeContainer, self).insert(id(obj), coordinates, None)
+
+    add = insert
+
+    def intersection(self, coordinates, bbox=False):
+        """Return ids or objects in the index that intersect the given
+        coordinates.
+
+        :param coordinates: sequence or array
+            This may be an object that satisfies the numpy array
+            protocol, providing the index's dimension * 2 coordinate
+            pairs representing the `mink` and `maxk` coordinates in
+            each dimension defining the bounds of the query window.
+
+        :param bbox: True or False
+            If True, the intersection method will return the stored objects,
+            as well as the bounds of the entry.
+
+        The following example queries the container for any stored objects that
+        intersect the bounds given in the coordinates::
+
+            >>> from rtree import index
+            >>> idx = index.RtreeContainer()
+            >>> idx.insert(object(), (34.3776829412, 26.7375853734,
+            49.3776829412, 41.7375853734))
+
+            >>> hits = list(idx.intersection((0, 0, 60, 60), bbox=True))
+            >>> [(item.object, item.bbox)
+            ...  for item in hits]   # doctest: +ELLIPSIS
+            [(<object object at 0x...>, [34.3776829412, 26.7375853734,
+            49.3776829412, 41.7375853734])]
+
+        If the :class:`rtree.index.Item` wrapper is not used, it is faster to
+        request only the stored objects::
+
+            >>> list(idx.intersection((0, 0, 60, 60)))   # doctest: +ELLIPSIS
+            [<object object at 0x...>]
+
+        """
+        if bbox == False:
+            for id in super(RtreeContainer,
+                            self).intersection(coordinates, bbox):
+                yield self._objects[id][1]
+        elif bbox == True:
+            for value in super(RtreeContainer,
+                               self).intersection(coordinates, bbox):
+                value.object = self._objects[value.id][1]
+                value.id = None
+                yield value
+        else:
+            raise ValueError(
+                "valid values for the bbox argument are True and False")
+
+    def nearest(self, coordinates, num_results = 1, bbox=False):
+        """Returns the ``k``-nearest objects to the given coordinates.
+
+        :param coordinates: sequence or array
+            This may be an object that satisfies the numpy array
+            protocol, providing the index's dimension * 2 coordinate
+            pairs representing the `mink` and `maxk` coordinates in
+            each dimension defining the bounds of the query window.
+
+        :param num_results: integer
+            The number of results to return nearest to the given coordinates.
+            If two entries are equidistant, *both* are returned.
+            This property means that :attr:`num_results` may return more
+            items than specified.
+
+        :param bbox: True or False
+            If True, the nearest method will return the stored objects, as
+            well as the bounds of the entry.
+
+        Example of finding the three items nearest to this one::
+
+            >>> from rtree import index
+            >>> idx = index.RtreeContainer()
+            >>> idx.insert(object(), (34.37, 26.73, 49.37, 41.73))
+            >>> hits = idx.nearest((0, 0, 10, 10), 3, bbox=True)
+        """
+        if bbox == False:
+            for id in super(RtreeContainer,
+                            self).nearest(coordinates, num_results, bbox):
+                yield self._objects[id][1]
+        elif bbox == True:
+            for value in super(RtreeContainer,
+                               self).nearest(coordinates, num_results, bbox):
+                value.object = self._objects[value.id][1]
+                value.id = None
+                yield value
+        else:
+            raise ValueError(
+                "valid values for the bbox argument are True and False")
+
+    def delete(self, obj, coordinates):
+        """Deletes the item from the container within the specified
+        coordinates.
+
+        :param obj: object
+            Any object.
+
+        :param coordinates: sequence or array
+            Dimension * 2 coordinate pairs, representing the min
+            and max coordinates in each dimension of the item to be
+            deleted from the index. Their ordering will depend on the
+            index's :attr:`interleaved` data member.
+            These are not the coordinates of a space containing the
+            item, but those of the item itself. Together with the
+            id parameter, they determine which item will be deleted.
+            This may be an object that satisfies the numpy array protocol.
+
+        Example::
+
+            >>> from rtree import index
+            >>> idx = index.RtreeContainer()
+            >>> idx.delete(object(), (34.3776829412, 26.7375853734,
+            49.3776829412, 41.7375853734))
+
+        """
+        try:
+            count = self._objects[id(obj)] - 1
+        except KeyError:
+            raise IndexError('object is not in the index')
+        if count == 0:
+            del self._objects[obj]
+        else:
+            self._objects[id(obj)] = (count, obj)
+        return super(RtreeContainer, self).delete(id, coordinates)
+
+    def leaves(self):
+        return [(self._objects[id][1], [self._objects[child_id][1]
+                                        for child_id in child_ids], bounds)
+                for id, child_ids, bounds
+                in super(RtreeContainer, self).leaves()]
diff --git a/setup.py b/setup.py
old mode 100644
new mode 100755
index eb31397..541d394
--- a/setup.py
+++ b/setup.py
@@ -1,47 +1,52 @@
-from glob import glob
-from setuptools import setup
-
-import rtree
-
-# Get text from README.txt
-readme_text = open('docs/source/README.txt', 'r').read()
-
-import os
-
-if os.name == 'nt':
-    data_files=[('Lib/site-packages/rtree',
-                 [r'D:\libspatialindex\bin\spatialindex.dll',
-                  r'D:\libspatialindex\bin\spatialindex_c.dll',]),]
-else:
-    data_files = None
-    
-setup(name          = 'Rtree',
-      version       = rtree.__version__,
-      description   = 'R-Tree spatial index for Python GIS',
-      license       = 'LGPL',
-      keywords      = 'gis spatial index r-tree',
-      author        = 'Sean Gillies',
-      author_email  = 'sean.gillies at gmail.com',
-      maintainer        = 'Howard Butler',
-      maintainer_email  = 'hobu at hobu.net',
-      url   = 'http://toblerity.github.com/rtree/',
-      long_description = readme_text,
-      packages      = ['rtree'],
-      install_requires = ['setuptools'],
-      test_suite = 'tests.test_suite',
-      data_files = data_files,
-      zip_safe = False,
-      classifiers   = [
-        'Development Status :: 5 - Production/Stable',
-        'Intended Audience :: Developers',
-        'Intended Audience :: Science/Research',
-        'License :: OSI Approved :: GNU Library or Lesser General Public License (LGPL)',
-        'Operating System :: OS Independent',
-        'Programming Language :: C',
-        'Programming Language :: C++',
-        'Programming Language :: Python',
-        'Topic :: Scientific/Engineering :: GIS',
-        'Topic :: Database',
-        ],
-)
-
+#!/usr/bin/env python
+from setuptools import setup
+
+import rtree
+
+# Get text from README.txt
+with open('docs/source/README.txt', 'r') as fp:
+    readme_text = fp.read()
+
+import os
+
+if os.name == 'nt':
+    data_files = [('Lib/site-packages/rtree',
+                  [os.environ['SPATIALINDEX_LIBRARY']
+                      if 'SPATIALINDEX_LIBRARY' in os.environ else
+                      r'D:\libspatialindex\bin\spatialindex.dll',
+                   os.environ['SPATIALINDEX_C_LIBRARY']
+                      if 'SPATIALINDEX_C_LIBRARY' in os.environ else
+                      r'D:\libspatialindex\bin\spatialindex_c.dll'])]
+else:
+    data_files = None
+
+setup(
+    name          = 'Rtree',
+    version       = rtree.__version__,
+    description   = 'R-Tree spatial index for Python GIS',
+    license       = 'LGPL',
+    keywords      = 'gis spatial index r-tree',
+    author        = 'Sean Gillies',
+    author_email  = 'sean.gillies at gmail.com',
+    maintainer        = 'Howard Butler',
+    maintainer_email  = 'hobu at hobu.net',
+    url   = 'http://toblerity.github.com/rtree/',
+    long_description = readme_text,
+    packages      = ['rtree'],
+    install_requires = ['setuptools'],
+    test_suite = 'tests.test_suite',
+    data_files = data_files,
+    zip_safe = False,
+    classifiers   = [
+      'Development Status :: 5 - Production/Stable',
+      'Intended Audience :: Developers',
+      'Intended Audience :: Science/Research',
+      'License :: OSI Approved :: GNU Library or Lesser General Public License (LGPL)',
+      'Operating System :: OS Independent',
+      'Programming Language :: C',
+      'Programming Language :: C++',
+      'Programming Language :: Python',
+      'Topic :: Scientific/Engineering :: GIS',
+      'Topic :: Database',
+      ],
+)
diff --git a/tests/benchmarks.py b/tests/benchmarks.py
index d01ca56..97a433f 100644
--- a/tests/benchmarks.py
+++ b/tests/benchmarks.py
@@ -2,27 +2,27 @@
 
 # Stream load:
 # 293710.04 usec/pass
-# 
+#
 # One-at-a-time load:
 # 527883.95 usec/pass
-# 
-# 
+#
+#
 # 30000 points
 # Query box:  (1240000, 1010000, 1400000, 1390000)
-# 
-# 
+#
+#
 # Brute Force:
 # 46 hits
 # 13533.60 usec/pass
-# 
+#
 # Memory-based Rtree Intersection:
 # 46 hits
 # 7516.19 usec/pass
-# 
+#
 # Disk-based Rtree Intersection:
 # 46 hits
 # 7543.00 usec/pass
-# 
+#
 # Disk-based Rtree Intersection without Item() wrapper (objects='raw'):
 # 46 raw hits
 # 347.60 usec/pass
@@ -40,6 +40,7 @@ from rtree import Rtree as _Rtree
 
 TEST_TIMES = 20
 
+
 # a very basic Geometry
 class Point(object):
     def __init__(self, x, y):
@@ -47,7 +48,7 @@ class Point(object):
         self.y = y
 
 # Scatter points randomly in a 1x1 box
-# 
+
 
 class Rtree(_Rtree):
     pickle_protocol = -1
@@ -57,7 +58,13 @@ count = 30000
 points = []
 
 insert_object = None
-insert_object = {'a': list(range(100)), 'b': 10, 'c': object(), 'd': dict(x=1), 'e': Point(2, 3)}
+insert_object = {
+    'a': list(range(100)),
+    'b': 10,
+    'c': object(),
+    'd': dict(x=1),
+    'e': Point(2, 3),
+}
 
 index = Rtree()
 disk_index = Rtree('test', overwrite=1)
@@ -73,23 +80,26 @@ for i in range(count):
     disk_index.add(i, (x, y), insert_object)
     coordinates.append((i, (x, y, x, y), insert_object))
 
-s ="""
+s = """
 bulk = Rtree(coordinates[:2000])
 """
-t = timeit.Timer(stmt=s, setup='from __main__ import coordinates, Rtree, insert_object')
+t = timeit.Timer(
+    stmt=s, setup='from __main__ import coordinates, Rtree, insert_object')
 print("\nStream load:")
 print("%.2f usec/pass" % (1000000 * t.timeit(number=TEST_TIMES)/TEST_TIMES))
 
-s ="""
+s = """
 idx = Rtree()
 i = 0
 for point in points[:2000]:
     idx.add(i, (point.x, point.y), insert_object)
     i+=1
 """
-t = timeit.Timer(stmt=s, setup='from __main__ import points, Rtree, insert_object')
+t = timeit.Timer(
+    stmt=s, setup='from __main__ import points, Rtree, insert_object')
 print("\nOne-at-a-time load:")
-print("%.2f usec/pass\n\n" % (1000000 * t.timeit(number=TEST_TIMES)/TEST_TIMES))
+print("%.2f usec/pass\n\n"
+      % (1000000 * t.timeit(number=TEST_TIMES)/TEST_TIMES))
 
 
 bbox = (1240000, 1010000, 1400000, 1390000)
@@ -99,11 +109,15 @@ print("")
 
 # Brute force all points within a 0.1x0.1 box
 s = """
-hits = [p for p in points if p.x >= bbox[0] and p.x <= bbox[2] and p.y >= bbox[1] and p.y <= bbox[3]]
+hits = [p for p in points
+        if p.x >= bbox[0] and p.x <= bbox[2]
+        and p.y >= bbox[1] and p.y <= bbox[3]]
 """
 t = timeit.Timer(stmt=s, setup='from __main__ import points, bbox')
 print("\nBrute Force:")
-print(len([p for p in points if p.x >= bbox[0] and p.x <= bbox[2] and p.y >= bbox[1] and p.y <= bbox[3]]), "hits")
+print(len([p for p in points
+           if p.x >= bbox[0] and p.x <= bbox[2] and p.y >= bbox[1]
+           and p.y <= bbox[3]]), "hits")
 print("%.2f usec/pass" % (1000000 * t.timeit(number=TEST_TIMES)/TEST_TIMES))
 
 # 0.1x0.1 box using intersection
@@ -117,7 +131,8 @@ else:
     hits = [p.object for p in index.intersection(bbox, objects=insert_object)]
     """
 
-t = timeit.Timer(stmt=s, setup='from __main__ import points, index, bbox, insert_object')
+t = timeit.Timer(
+    stmt=s, setup='from __main__ import points, index, bbox, insert_object')
 print("\nMemory-based Rtree Intersection:")
 print(len([points[id] for id in index.intersection(bbox)]), "hits")
 print("%.2f usec/pass" % (1000000 * t.timeit(number=100)/100))
@@ -126,7 +141,9 @@ print("%.2f usec/pass" % (1000000 * t.timeit(number=100)/100))
 # run same test on disk_index.
 s = s.replace("index.", "disk_index.")
 
-t = timeit.Timer(stmt=s, setup='from __main__ import points, disk_index, bbox, insert_object')
+t = timeit.Timer(
+    stmt=s,
+    setup='from __main__ import points, disk_index, bbox, insert_object')
 print("\nDisk-based Rtree Intersection:")
 hits = list(disk_index.intersection(bbox))
 print(len(hits), "hits")
@@ -137,11 +154,15 @@ if insert_object:
     s = """
         hits = disk_index.intersection(bbox, objects="raw")
         """
-    t = timeit.Timer(stmt=s, setup='from __main__ import points, disk_index, bbox, insert_object')
-    print("\nDisk-based Rtree Intersection without Item() wrapper (objects='raw'):")
+    t = timeit.Timer(
+        stmt=s,
+        setup='from __main__ import points, disk_index, bbox, insert_object')
+    print("\nDisk-based Rtree Intersection "
+          "without Item() wrapper (objects='raw'):")
     result = list(disk_index.intersection(bbox, objects="raw"))
     print(len(result), "raw hits")
-    print("%.2f usec/pass" % (1000000 * t.timeit(number=TEST_TIMES)/TEST_TIMES))
+    print("%.2f usec/pass"
+          % (1000000 * t.timeit(number=TEST_TIMES)/TEST_TIMES))
     assert 'a' in result[0], result[0]
 
 import os
diff --git a/tests/data.py b/tests/data.py
index 6601814..d9579d5 100755
--- a/tests/data.py
+++ b/tests/data.py
@@ -15,7 +15,7 @@ for line in f.readlines():
         break
     [left, bottom, right, top] = [float(x) for x in line.split()]
     boxes3.append((left, bottom, right, top))
-                
+
 points = []
 f = open(os.path.join(os.path.dirname(__file__), 'point_clusters.data'), 'r')
 for line in f.readlines():
@@ -24,16 +24,20 @@ for line in f.readlines():
     [left, bottom] = [float(x) for x in line.split()]
     points.append((left, bottom))
 
+
 def draw_data(filename):
     from PIL import Image, ImageDraw
     im = Image.new('RGB', (1440, 720))
     d = ImageDraw.Draw(im)
     for box in boxes15:
-        coords = [4.0*(box[0]+180), 4.0*(box[1]+90), 4.0*(box[2]+180), 4.0*(box[3]+90)]
+        coords = [
+            4.0*(box[0]+180), 4.0*(box[1]+90),
+            4.0*(box[2]+180), 4.0*(box[3]+90)]
         d.rectangle(coords, outline='red')
     for box in boxes3:
-        coords = [4.0*(box[0]+180), 4.0*(box[1]+90), 4.0*(box[2]+180), 4.0*(box[3]+90)]
+        coords = [
+            4.0*(box[0]+180), 4.0*(box[1]+90),
+            4.0*(box[2]+180), 4.0*(box[3]+90)]
         d.rectangle(coords, outline='blue')
 
     im.save(filename)
-    
diff --git a/tests/off/BoundsCheck.txt b/tests/off/BoundsCheck.txt
deleted file mode 100644
index b068e2c..0000000
--- a/tests/off/BoundsCheck.txt
+++ /dev/null
@@ -1,26 +0,0 @@
-Bounding Box Checking
-=====================
-
-See http://trac.gispython.org/projects/PCL/ticket/127.
-
-Adding with bogus bounds
-------------------------
-
-  >>> import rtree
-  >>> index = rtree.Rtree()
-  >>> index.add(1, (0.0, 0.0, -1.0, 1.0))
-  Traceback (most recent call last):
-  ...
-  RTreeError: Coordinates must not have minimums more than maximums
-  
-  >>> index.intersection((0.0, 0.0, -1.0, 1.0))
-  Traceback (most recent call last):
-  ...
-  RTreeError: Coordinates must not have minimums more than maximums
-  
-Adding with invalid bounds argument should raise an exception
-
-  >>> index.add(1, 1)
-  Traceback (most recent call last):
-  ...
-  TypeError: Bounds must be a sequence
diff --git a/tests/off/index.txt b/tests/off/index.txt
deleted file mode 100644
index 6c4f9b3..0000000
--- a/tests/off/index.txt
+++ /dev/null
@@ -1,308 +0,0 @@
-.. _index_test:
-
-Examples
-..............................................................................
-
-    >>> from rtree import index
-    >>> from rtree.index import Rtree
-
-Ensure libspatialindex version is >= 1.7.0
-
-    >>> index.__c_api_version__.split('.')[1] >= 7
-    True
-    
-Make an instance, index stored in memory
-    
-    >>> p = index.Property()
-    
-    >>> idx = index.Index(properties=p)
-    >>> idx
-    <rtree.index.Index object at 0x...>
-    
-Add 100 largish boxes randomly distributed over the domain
-    
-    >>> for i, coords in enumerate(boxes15):
-    ...     idx.add(i, coords)
-    
-    >>> 0 in idx.intersection((0, 0, 60, 60))
-    True
-    >>> hits = list(idx.intersection((0, 0, 60, 60)))
-    >>> len(hits)
-    10
-    >>> hits
-    [0, 4, 16, 27, 35, 40, 47, 50, 76, 80]
-
-Insert an object into the index that can be pickled 
-
-    >>> idx.insert(4321, (34.3776829412, 26.7375853734, 49.3776829412, 41.7375853734), obj=42)
-
-Fetch our straggler that contains a pickled object    
-    >>> hits = idx.intersection((0, 0, 60, 60), objects=True)
-    >>> for i in hits:
-    ...     if i.id == 4321:
-    ...         i.object
-    ...         ['%.10f' % t for t in i.bbox]
-    42
-    ['34.3776829412', '26.7375853734', '49.3776829412', '41.7375853734']
-
-
-Find the three items nearest to this one
-    >>> hits = list(idx.nearest((0,0,10,10), 3))
-    >>> hits
-    [76, 48, 19]
-    >>> len(hits)
-    3
-    
-
-Default order is [xmin, ymin, xmax, ymax]
-    >>> ['%.10f' % t for t in idx.bounds]
-    ['-186.6737892790', '-96.7177218184', '184.7613875560', '96.6043699778']
-
-To get in order [xmin, xmax, ymin, ymax (... for n-d indexes)] use the kwarg:
-    >>> ['%.10f' % t for t in idx.get_bounds(coordinate_interleaved=False)]
-    ['-186.6737892790', '184.7613875560', '-96.7177218184', '96.6043699778']
-
-Delete index members
-
-    >>> for i, coords in enumerate(boxes15):
-    ...     idx.delete(i, coords)
-
-Delete our straggler too
-    >>> idx.delete(4321, (34.3776829412, 26.7375853734, 49.3776829412, 41.7375853734) )
-
-Check that we have deleted stuff
-
-    >>> hits = 0
-    >>> hits = list(idx.intersection((0, 0, 60, 60)))
-    >>> len(hits)
-    0
-    
-Check that nearest returns *all* of the items that are nearby
-
-    >>> idx2 = Rtree()
-    >>> idx2
-    <rtree.index.Index object at 0x...>
-
-    >>> locs = [(14, 10, 14, 10), 
-    ...         (16, 10, 16, 10)]
-    
-    >>> for i, (minx, miny, maxx, maxy) in enumerate(locs):
-    ...        idx2.add(i, (minx, miny, maxx, maxy))
-
-    >>> sorted(idx2.nearest((15, 10, 15, 10), 1))
-    [0, 1]
-
-
-Check that nearest returns *all* of the items that are nearby (with objects)
-    >>> idx2 = Rtree()
-    >>> idx2
-    <rtree.index.Index object at 0x...>
-
-    >>> locs = [(14, 10, 14, 10), 
-    ...         (16, 10, 16, 10)]
-    
-    >>> for i, (minx, miny, maxx, maxy) in enumerate(locs):
-    ...        idx2.add(i, (minx, miny, maxx, maxy), obj={'a': 42})
-
-    >>> sorted([(i.id, i.object) for i in idx2.nearest((15, 10, 15, 10), 1, objects=True)])
-    [(0, {'a': 42}), (1, {'a': 42})]
-
-
-    >>> idx2 = Rtree()
-    >>> idx2
-    <rtree.index.Index object at 0x...>
-            
-    >>> locs = [(2, 4), (6, 8), (10, 12), (11, 13), (15, 17), (13, 20)]
-    
-    >>> for i, (start, stop) in enumerate(locs):
-    ...        idx2.add(i, (start, 1, stop, 1))
-    
-    >>> sorted(idx2.nearest((13, 0, 20, 2), 1))
-    [3, 4, 5]
-
-Default page size 4096
-
-    >>> idx3 = Rtree("defaultidx")
-    >>> for i, coords in enumerate(boxes15):
-    ...     idx3.add(i, coords)
-    >>> hits = list(idx3.intersection((0, 0, 60, 60)))
-    >>> len(hits)
-    10
-
-Make sure to delete the index or the file is not flushed and it 
-will be invalid
-
-    >>> del idx3
-
-Page size 3
-
-    >>> idx4 = Rtree("pagesize3", pagesize=3)
-    >>> for i, coords in enumerate(boxes15):
-    ...     idx4.add(i, coords)
-    >>> hits = list(idx4.intersection((0, 0, 60, 60)))
-    >>> len(hits)
-    10
-    
-    >>> idx4.close()
-    >>> del idx4
-    
-Test invalid name
-
-    >>> inv = Rtree("bogus/foo")
-    Traceback (most recent call last):
-    ...
-    IOError: Unable to open file 'bogus/foo.idx' for index storage
-
-Load a persisted index
-
-    >>> import shutil
-    >>> _ = shutil.copy("defaultidx.dat", "testing.dat")
-    >>> _ = shutil.copy("defaultidx.idx", "testing.idx")
-
-    # >>> import pdb;pdb.set_trace()
-
-    >>> idx = Rtree("testing")
-    >>> hits = list(idx.intersection((0, 0, 60, 60)))
-    >>> len(hits)
-    10
-
-Make a 3D index
-    >>> p = index.Property()
-    >>> p.dimension = 3
-    
-
-with interleaved=False, the order of input and output is: 
-(xmin, xmax, ymin, ymax, zmin, zmax)
-
-    >>> idx3d = index.Index(properties=p, interleaved=False)
-    >>> idx3d
-    <rtree.index.Index object at 0x...>
-    
-    >>> idx3d.insert(1, (0, 0, 60, 60, 22, 22.0))
-    
-    >>> 1 in idx3d.intersection((-1, 1, 58, 62, 22, 24))
-    True
-
-
-Make a 4D index
-    >>> p = index.Property()
-    >>> p.dimension = 4
-    
-
-with interleaved=False, the order of input and output is: (xmin, xmax, ymin, ymax, zmin, zmax, kmin, kmax)
-
-    >>> idx4d = index.Index(properties=p, interleaved=False)
-    >>> idx4d
-    <rtree.index.Index object at 0x...>
-    
-    >>> idx4d.insert(1, (0, 0, 60, 60, 22, 22.0, 128, 142))
-    
-    >>> 1 in idx4d.intersection((-1, 1, 58, 62, 22, 24, 120, 150))
-    True
-
-Check that we can make an index with custom filename extensions
-
-    >>> p = index.Property()
-    >>> p.dat_extension = 'data'
-    >>> p.idx_extension = 'index'
-    
-    >>> idx_cust = Rtree('custom', properties=p)
-    >>> for i, coords in enumerate(boxes15):
-    ...     idx_cust.add(i, coords)
-    >>> hits = list(idx_cust.intersection((0, 0, 60, 60)))
-    >>> len(hits)
-    10
-    
-    >>> del idx_cust
-    
-Reopen the index
-    >>> p2 = index.Property()
-    >>> p2.dat_extension = 'data'
-    >>> p2.idx_extension = 'index'
-    
-    >>> idx_cust2 = Rtree('custom', properties=p2)    
-    >>> hits = list(idx_cust2.intersection((0, 0, 60, 60)))
-    >>> len(hits)
-    10
-    
-    >>> del idx_cust2
-
-Adding the same id twice does not overwrite existing data
-
-    >>> r = Rtree()
-    >>> r.add(1, (2, 2))
-    >>> r.add(1, (3, 3))
-    >>> list(r.intersection((0, 0, 5, 5)))
-    [1, 1]
-
-A stream of data need that needs to be an iterator that will raise a 
-StopIteration. The order depends on the interleaved kwarg sent to the 
-constructor.
-
-The object can be None, but you must put a place holder of 'None' there.
-
-    >>> p = index.Property()
-    >>> def data_gen(interleaved=True):
-    ...    for i, (minx, miny, maxx, maxy) in enumerate(boxes15):
-    ...        if interleaved:
-    ...            yield (i, (minx, miny, maxx, maxy), 42)
-    ...        else:
-    ...            yield (i, (minx, maxx, miny, maxy), 42)
-
-    >>> strm_idx = index.Rtree(data_gen(), properties = p)
-
-    >>> hits = list(strm_idx.intersection((0, 0, 60, 60)))
-
-    >>> len(hits)
-    10
-    
-    
-    >>> sorted(hits)
-    [0, 4, 16, 27, 35, 40, 47, 50, 76, 80]
-
-    >>> hits = list(strm_idx.intersection((0, 0, 60, 60), objects=True))
-    >>> len(hits)
-    10
-    
-    >>> hits[0].object
-    42
-
-Try streaming against a persisted index without interleaving.
-    >>> strm_idx = index.Rtree('streamed', data_gen(interleaved=False), properties = p, interleaved=False)
-
-Note the arguments to intersection must be xmin, xmax, ymin, ymax for interleaved=False
-    >>> hits = list(strm_idx.intersection((0, 60, 0, 60)))
-    >>> len(hits)
-    10
-    
-    >>> sorted(hits)
-    [0, 4, 16, 27, 35, 40, 47, 50, 76, 80]
-
-    >>> hits = list(strm_idx.intersection((0, 60, 0, 60), objects=True))
-    >>> len(hits)
-    10
-    
-    >>> hits[0].object
-    42
-
-    >>> hits = list(strm_idx.intersection((0, 60, 0, 60), objects='raw'))
-    >>> hits[0]
-    42
-    >>> len(hits)
-    10
-    
-    >>> strm_idx.count((0, 60, 0, 60))
-    10L
-    
-    >>> del strm_idx
-
-    >>> p = index.Property()
-    >>> p.leaf_capacity = 100
-    >>> p.fill_factor = 0.5
-    >>> p.index_capacity = 10
-    >>> p.near_minimum_overlap_factor = 7
-    >>> idx = index.Index(data_gen(interleaved=False), properties = p, interleaved=False)
-
-    >>> leaves = idx.leaves()
-
-    >>> del idx
diff --git a/tests/off/properties.txt b/tests/off/properties.txt
deleted file mode 100644
index 22842ae..0000000
--- a/tests/off/properties.txt
+++ /dev/null
@@ -1,257 +0,0 @@
-Testing rtree properties
-==========================
-
-Make a simple properties object
-
-    >>> from rtree import index
-    >>> p = index.Property()
-
-Test as_dict()
-
-    >>> d = p.as_dict()
-    >>> d['index_id'] is None
-    True
-
-Test creation from kwargs and eval() of its repr()
-
-    >>> q = index.Property(**d)
-    >>> eval(repr(q))['index_id'] is None
-    True
-
-Test pretty printed string
-
-    >>> print q
-    {'buffering_capacity': 10,
-     'custom_storage_callbacks': None,
-     'custom_storage_callbacks_size': 0L,
-     'dat_extension': 'dat',
-     'dimension': 2,
-     'filename': '',
-     'fill_factor': 0...,
-     'idx_extension': 'idx',
-     'index_capacity': 100,
-     'index_id': None,
-     'leaf_capacity': 100,
-     'near_minimum_overlap_factor': 32,
-     'overwrite': True,
-     'pagesize': 4096,
-     'point_pool_capacity': 500,
-     'region_pool_capacity': 1000,
-     'reinsert_factor': 0...,
-     'split_distribution_factor': 0...,
-     'storage': 1,
-     'tight_mbr': True,
-     'tpr_horizon': 20.0,
-     'type': 0,
-     'variant': 2,
-     'writethrough': False}
-
-Test property setting
-
-    >>> p = index.Property()
-    >>> p.type = 0
-    >>> p.type
-    0
-    
-    >>> p.type = 2
-    >>> p.type
-    2
-
-    >>> p.type = 6
-    Traceback (most recent call last):
-    ...
-    RTreeError: LASError in "IndexProperty_SetIndexType": Inputted value is not a valid index type
-
-    >>> p.dimension = 3
-    >>> p.dimension
-    3
-    
-    >>> p.dimension = 2
-    >>> p.dimension
-    2
-    
-    >>> p.dimension = -2
-    Traceback (most recent call last):
-    ...
-    RTreeError: Negative or 0 dimensional indexes are not allowed
-    
-    >>> p.variant = 0
-    >>> p.variant
-    0
-    
-    >>> p.variant = 6
-    Traceback (most recent call last):
-    ...
-    RTreeError: LASError in "IndexProperty_SetIndexVariant": Inputted value is not a valid index variant
-    
-    >>> p.storage = 0
-    >>> p.storage 
-    0
-    
-    >>> p.storage = 1
-    >>> p.storage
-    1
-    
-    >>> p.storage = 3
-    Traceback (most recent call last):
-    ...
-    RTreeError: LASError in "IndexProperty_SetIndexStorage": Inputted value is not a valid index storage type
-    
-    >>> p.index_capacity
-    100
-    
-    >>> p.index_capacity = 300
-    >>> p.index_capacity
-    300
-    
-    >>> p.index_capacity = -4321
-    Traceback (most recent call last):
-    ...
-    RTreeError: index_capacity must be > 0
-    
-    >>> p.pagesize
-    4096
-    
-    >>> p.pagesize = 8192
-    >>> p.pagesize
-    8192
-    
-    >>> p.pagesize = -4321
-    Traceback (most recent call last):
-    ...
-    RTreeError: Pagesize must be > 0
-
-    >>> p.leaf_capacity
-    100
-    
-    >>> p.leaf_capacity = 1000
-    >>> p.leaf_capacity
-    1000
-    >>> p.leaf_capacity = -4321
-    Traceback (most recent call last):
-    ...
-    RTreeError: leaf_capacity must be > 0
-    
-    >>> p.index_pool_capacity
-    100
-    
-    >>> p.index_pool_capacity = 1500
-    >>> p.index_pool_capacity = -4321
-    Traceback (most recent call last):
-    ...
-    RTreeError: index_pool_capacity must be > 0
-
-    >>> p.point_pool_capacity
-    500
-    
-    >>> p.point_pool_capacity = 1500
-    >>> p.point_pool_capacity = -4321
-    Traceback (most recent call last):
-    ...
-    RTreeError: point_pool_capacity must be > 0
-
-    >>> p.region_pool_capacity
-    1000
-    
-    >>> p.region_pool_capacity = 1500
-    >>> p.region_pool_capacity
-    1500
-    >>> p.region_pool_capacity = -4321
-    Traceback (most recent call last):
-    ...
-    RTreeError: region_pool_capacity must be > 0
-
-    >>> p.buffering_capacity
-    10
-    
-    >>> p.buffering_capacity = 100
-    >>> p.buffering_capacity = -4321
-    Traceback (most recent call last):
-    ...
-    RTreeError: buffering_capacity must be > 0    
-
-    >>> p.tight_mbr
-    True
-    
-    >>> p.tight_mbr = 100
-    >>> p.tight_mbr
-    True
-    
-    >>> p.tight_mbr = False
-    >>> p.tight_mbr
-    False
-
-    >>> p.overwrite
-    True
-    
-    >>> p.overwrite = 100
-    >>> p.overwrite
-    True
-    
-    >>> p.overwrite = False
-    >>> p.overwrite
-    False
-
-    >>> p.near_minimum_overlap_factor
-    32
-    
-    >>> p.near_minimum_overlap_factor = 100
-    >>> p.near_minimum_overlap_factor = -4321
-    Traceback (most recent call last):
-    ...
-    RTreeError: near_minimum_overlap_factor must be > 0  
-
-    >>> p.writethrough
-    False
-    
-    >>> p.writethrough = 100
-    >>> p.writethrough
-    True
-    
-    >>> p.writethrough = False
-    >>> p.writethrough
-    False    
-
-    >>> '%.2f' % p.fill_factor
-    '0.70'
-
-    >>> p.fill_factor = 0.99
-    >>> '%.2f' % p.fill_factor
-    '0.99'
-
-    >>> '%.2f' % p.split_distribution_factor
-    '0.40'
-
-    >>> p.tpr_horizon
-    20.0
-    
-    >>> '%.2f' % p.reinsert_factor
-    '0.30'
-
-    >>> p.filename
-    ''
-    
-    >>> p.filename = 'testing123testing'
-    >>> p.filename
-    'testing123testing'
-    
-    >>> p.dat_extension
-    'dat'
-
-    >>> p.dat_extension = 'data'
-    >>> p.dat_extension
-    'data'
-    
-    >>> p.idx_extension
-    'idx'
-    >>> p.idx_extension = 'index'
-    >>> p.idx_extension
-    'index'
-    
-    >>> p.index_id
-    Traceback (most recent call last):
-    ...
-    RTreeError: Error in "IndexProperty_GetIndexID": Property IndexIdentifier was empty
-    >>> p.index_id = -420
-    >>> int(p.index_id)
-    -420
diff --git a/tests/off/test_customStorage.txt b/tests/off/test_customStorage.txt
deleted file mode 100644
index 75cbd38..0000000
--- a/tests/off/test_customStorage.txt
+++ /dev/null
@@ -1,157 +0,0 @@
-
-Shows how to create custom storage backend.
-
-Derive your custom storage for rtree.index.CustomStorage and override the methods
-shown in this example.
-You can also derive from rtree.index.CustomStorageBase to get at the raw C buffers
-if you need the extra speed and want to avoid translating from/to python strings.
-
-The essential methods are the load/store/deleteByteArray. The rtree library calls
-them whenever it needs to access the data in any way.
-
-Example storage which maps the page (ids) to the page data.
-
-   >>> from rtree.index import Rtree, CustomStorage, Property
-   
-   >>> class DictStorage(CustomStorage):
-   ...     """ A simple storage which saves the pages in a python dictionary """
-   ...     def __init__(self):
-   ...         CustomStorage.__init__( self )
-   ...         self.clear()
-   ... 
-   ...     def create(self, returnError):
-   ...         """ Called when the storage is created on the C side """
-   ... 
-   ...     def destroy(self, returnError):
-   ...         """ Called when the storage is destroyed on the C side """
-   ... 
-   ...     def clear(self):
-   ...         """ Clear all our data """   
-   ...         self.dict = {}
-   ... 
-   ...     def loadByteArray(self, page, returnError):
-   ...         """ Returns the data for page or returns an error """   
-   ...         try:
-   ...             return self.dict[page]
-   ...         except KeyError:
-   ...             returnError.contents.value = self.InvalidPageError
-   ... 
-   ...     def storeByteArray(self, page, data, returnError):
-   ...         """ Stores the data for page """   
-   ...         if page == self.NewPage:
-   ...             newPageId = len(self.dict)
-   ...             self.dict[newPageId] = data
-   ...             return newPageId
-   ...         else:
-   ...             if page not in self.dict:
-   ...                 returnError.value = self.InvalidPageError
-   ...                 return 0
-   ...             self.dict[page] = data
-   ...             return page
-   ... 
-   ...     def deleteByteArray(self, page, returnError):
-   ...         """ Deletes a page """   
-   ...         try:
-   ...             del self.dict[page]
-   ...         except KeyError:
-   ...             returnError.contents.value = self.InvalidPageError
-   ... 
-   ...     hasData = property( lambda self: bool(self.dict) )
-   ...     """ Returns true if we contains some data """   
-
-
-Now let's test drive our custom storage.
-
-First let's define the basic properties we will use for all rtrees:
-
-    >>> settings = Property()
-    >>> settings.writethrough = True
-    >>> settings.buffering_capacity = 1
-
-Notice that there is a small in-memory buffer by default. We effectively disable
-it here so our storage directly receives any load/store/delete calls.
-This is not necessary in general and can hamper performance; we just use it here
-for illustrative and testing purposes.
-
-Let's start with a basic test:
-
-Create the storage and hook it up with a new rtree:
-
-    >>> storage = DictStorage()
-    >>> r = Rtree( storage, properties = settings )
-
-Interestingly enough, if we take a look at the contents of our storage now, we
-can see the Rtree has already written two pages to it. This is for header and
-index.
-
-    >>> state1 = storage.dict.copy()
-    >>> list(state1.keys())
-    [0, 1]
-    
-Let's add an item:
-
-    >>> r.add(123, (0, 0, 1, 1))
-
-Make sure the data in the storage before and after the addition of the new item
-is different:
-
-    >>> state2 = storage.dict.copy()
-    >>> state1 != state2
-    True
-
-Now perform a few queries and assure the tree is still valid:
-
-    >>> item = list(r.nearest((0, 0), 1, objects=True))[0]
-    >>> int(item.id)
-    123
-    >>> r.valid()
-    True
-    
-Check if the stored data is a byte string
-
-    >>> isinstance(list(storage.dict.values())[0], bytes)
-    True
-    
-Delete an item
-
-    >>> r.delete(123, (0, 0, 1, 1))
-    >>> r.valid()
-    True
-    
-Just for reference show how to flush the internal buffers (e.g. when
-properties.buffer_capacity is > 1)
-
-    >>> r.clearBuffer()
-    >>> r.valid()
-    True
-
-Let's get rid of the tree, we're done with it
-    
-    >>> del r
-
-Show how to empty the storage
-    
-    >>> storage.clear()
-    >>> storage.hasData
-    False
-    >>> del storage
-
-    
-Ok, let's create another small test. This time we'll test reopening our custom
-storage. This is useful for persistent storages.
-
-First create a storage and put some data into it:
-
-    >>> storage = DictStorage()
-    >>> r1 = Rtree( storage, properties = settings, overwrite = True )
-    >>> r1.add(555, (2, 2))
-    >>> del r1
-    >>> storage.hasData
-    True
-    
-Then reopen the storage with a new tree and see if the data is still there
-
-    >>> r2 = Rtree( storage, properties = settings, overwrite = False )
-    >>> r2.count( (0,0,10,10) ) == 1
-    True
-    >>> del r2
diff --git a/tests/off/test_misc.txt b/tests/off/test_misc.txt
deleted file mode 100644
index fc02bac..0000000
--- a/tests/off/test_misc.txt
+++ /dev/null
@@ -1,42 +0,0 @@
-
-make sure a file-based index is overwriteable.
-
-    >>> from rtree.index import Rtree
-    >>> r = Rtree('overwriteme')
-    >>> del r
-    >>> r = Rtree('overwriteme', overwrite=True)
-
-
-the default serializer is pickle, can use any by overriding dumps, loads
-
-    >>> r = Rtree()
-    >>> some_data = {"a": 22, "b": [1, "ccc"]}
-    >>> try:
-    ...     import simplejson
-    ...     r.dumps = simplejson.dumps
-    ...     r.loads = simplejson.loads
-    ...     r.add(0, (0, 0, 1, 1), some_data)
-    ...     list(r.nearest((0, 0), 1, objects="raw"))[0] == some_data
-    ... except ImportError:
-    ...     # "no import, failed"
-    ...     True
-    True
-
-
-    >>> r = Rtree()
-    >>> r.add(123, (0, 0, 1, 1))
-    >>> item = list(r.nearest((0, 0), 1, objects=True))[0]
-    >>> item.id
-    123
-
-    >>> r.valid()
-    True
-
-test UTF-8 filenames
-
-    >>> f = u'gilename\u4500abc'
-
-    >>> r = Rtree(f)
-    >>> r.insert(4321, (34.3776829412, 26.7375853734, 49.3776829412, 41.7375853734), obj=42)
-
-    >>> del r
diff --git a/tests/stream-check.py b/tests/stream-check.py
new file mode 100644
index 0000000..367a5dc
--- /dev/null
+++ b/tests/stream-check.py
@@ -0,0 +1,81 @@
+import numpy as np                                                                  
+import rtree                                                                        
+import time
+
+def random_tree_stream(points_count, include_object):
+    properties = rtree.index.Property()                                             
+    properties.dimension = 3                                                        
+
+    points_random = np.random.random((points_count,3,3))                            
+    points_bounds = np.column_stack((points_random.min(axis=1),                     
+                                     points_random.max(axis=1)))
+
+    
+    stacked = zip(np.arange(points_count),                                          
+                  points_bounds,                                                    
+                  np.arange(points_count))                                              
+
+
+    tic = time.time()
+    tree = rtree.index.Index(stacked,                                               
+                             properties = properties)
+    toc = time.time()
+    print'creation, objects:', include_object, '\tstream method: ', toc-tic
+
+    return tree
+
+def random_tree_insert(points_count, include_object):
+    properties = rtree.index.Property()                                             
+    properties.dimension = 3                                                        
+
+    points_random = np.random.random((points_count,3,3))                            
+    points_bounds = np.column_stack((points_random.min(axis=1),                     
+                                     points_random.max(axis=1)))
+    tree = rtree.index.Index(properties = properties)
+    
+    if include_object:
+        stacked = zip(np.arange(points_count),                                          
+                      points_bounds,                                                    
+                      np.arange(points_count))
+    else:
+        stacked = zip(np.arange(points_count), 
+                                points_bounds)
+    tic = time.time()
+    for arg in stacked:
+        tree.insert(*arg)
+    toc = time.time()
+
+    print 'creation, objects:', include_object, '\tinsert method: ', toc-tic
+
+    return tree
+
+
+def check_tree(tree, count):
+    # tid should intersect every box, 
+    # as our random boxes are all inside [0,0,0,1,1,1]
+    tic = time.time()
+    tid = list(tree.intersection([-1,-1,-1,2,2,2]))
+    toc = time.time()
+    ok = (np.unique(tid) - np.arange(count) == 0).all()
+    print 'intersection, id method:    ', toc-tic, '\t query ok:', ok
+
+    tic = time.time()
+    tid = [i.object for i in tree.intersection([-1,-1,-1,2,2,2], objects=True)]
+    toc = time.time()
+    ok = (np.unique(tid) - np.arange(count) == 0).all()
+    print 'intersection, object method:', toc-tic, '\t query ok:', ok
+
+if __name__ == '__main__':
+    count = 10000
+
+    print '\nChecking stream loading\n---------------'
+    tree = random_tree_stream(count, False)
+    tree = random_tree_stream(count, True)
+    
+    check_tree(tree, count)
+
+    print '\nChecking insert loading\n---------------'
+    tree = random_tree_insert(count, False)
+    tree = random_tree_insert(count, True)
+
+    check_tree(tree, count)
\ No newline at end of file
diff --git a/tests/BoundsCheck.txt b/tests/test_bounds.txt
similarity index 71%
rename from tests/BoundsCheck.txt
rename to tests/test_bounds.txt
index b068e2c..34055f7 100644
--- a/tests/BoundsCheck.txt
+++ b/tests/test_bounds.txt
@@ -8,19 +8,19 @@ Adding with bogus bounds
 
   >>> import rtree
   >>> index = rtree.Rtree()
-  >>> index.add(1, (0.0, 0.0, -1.0, 1.0))
+  >>> index.add(1, (0.0, 0.0, -1.0, 1.0))  #doctest: +IGNORE_EXCEPTION_DETAIL
   Traceback (most recent call last):
   ...
   RTreeError: Coordinates must not have minimums more than maximums
-  
-  >>> index.intersection((0.0, 0.0, -1.0, 1.0))
+
+  >>> index.intersection((0.0, 0.0, -1.0, 1.0))  #doctest: +IGNORE_EXCEPTION_DETAIL
   Traceback (most recent call last):
   ...
   RTreeError: Coordinates must not have minimums more than maximums
-  
+
 Adding with invalid bounds argument should raise an exception
 
-  >>> index.add(1, 1)
+  >>> index.add(1, 1)  #doctest: +IGNORE_EXCEPTION_DETAIL
   Traceback (most recent call last):
   ...
   TypeError: Bounds must be a sequence
diff --git a/tests/test_doctests.py b/tests/test_doctests.py
index fc2b53a..0413844 100644
--- a/tests/test_doctests.py
+++ b/tests/test_doctests.py
@@ -3,8 +3,10 @@ import unittest
 import glob
 import os
 
-#from zope.testing import doctest
-from rtree.index import major_version, minor_version, patch_version
+doctest.IGNORE_EXCEPTION_DETAIL
+
+# from zope.testing import doctest
+from rtree.index import major_version, minor_version  # , patch_version
 
 from .data import boxes15, boxes3, points
 
@@ -12,26 +14,30 @@ optionflags = (doctest.REPORT_ONLY_FIRST_FAILURE |
                doctest.NORMALIZE_WHITESPACE |
                doctest.ELLIPSIS)
 
+
 def list_doctests():
     # Skip the custom storage test unless we have libspatialindex 1.8+.
     return [filename
             for filename
             in glob.glob(os.path.join(os.path.dirname(__file__), '*.txt'))
             if not (
-                filename.endswith('customStorage.txt') 
+                filename.endswith('customStorage.txt')
                 and major_version < 2 and minor_version < 8)]
 
+
 def open_file(filename, mode='r'):
     """Helper function to open files from within the tests package."""
     return open(os.path.join(os.path.dirname(__file__), filename), mode)
 
+
 def setUp(test):
     test.globs.update(dict(
-            open_file = open_file,
-            boxes15=boxes15,
-            boxes3=boxes3,
-            points=points
-            ))
+        open_file=open_file,
+        boxes15=boxes15,
+        boxes3=boxes3,
+        points=points
+        ))
+
 
 def test_suite():
     return unittest.TestSuite(
diff --git a/tests/test_index.py b/tests/test_index.py
index faacd16..b442820 100644
--- a/tests/test_index.py
+++ b/tests/test_index.py
@@ -1,19 +1,23 @@
+import unittest
+
 from rtree import index
 
 from .data import boxes15
 
-def boxes15_stream(interleaved=True):
-   for i, (minx, miny, maxx, maxy) in enumerate(boxes15):
-       if interleaved:
-           yield (i, (minx, miny, maxx, maxy), 42)
-       else:
-           yield (i, (minx, maxx, miny, maxy), 42)
 
+class IndexTests(unittest.TestCase):
 
-def test_rtree_constructor_stream_input():
-    p = index.Property()
-    sindex = index.Rtree(boxes15_stream(), properties=p)
+    def test_stream_input(self):
+        p = index.Property()
+        sindex = index.Index(boxes15_stream(), properties=p)
+        bounds = (0, 0, 60, 60)
+        hits = sindex.intersection(bounds)
+        self.assertEqual(sorted(hits), [0, 4, 16, 27, 35, 40, 47, 50, 76, 80])
 
-    bounds = (0, 0, 60, 60)
-    hits = list(sindex.intersection(bounds))
-    assert sorted(hits) == [0, 4, 16, 27, 35, 40, 47, 50, 76, 80]
+
+def boxes15_stream(interleaved=True):
+    for i, (minx, miny, maxx, maxy) in enumerate(boxes15):
+        if interleaved:
+            yield (i, (minx, miny, maxx, maxy), 42)
+        else:
+            yield (i, (minx, maxx, miny, maxy), 42)
diff --git a/tests/index.txt b/tests/test_index_doctests.txt
similarity index 79%
rename from tests/index.txt
rename to tests/test_index_doctests.txt
index 6c4f9b3..aeeab75 100644
--- a/tests/index.txt
+++ b/tests/test_index_doctests.txt
@@ -5,25 +5,29 @@ Examples
 
     >>> from rtree import index
     >>> from rtree.index import Rtree
+    >>> import sys;
+    >>> sys.path.insert(0, './tests')
+    >>> from data import boxes15, boxes3, points
+    >>>
 
 Ensure libspatialindex version is >= 1.7.0
 
-    >>> index.__c_api_version__.split('.')[1] >= 7
+    >>> int(index.__c_api_version__.decode('UTF-8').split('.')[1]) >= 7
     True
-    
+
 Make an instance, index stored in memory
-    
+
     >>> p = index.Property()
-    
+
     >>> idx = index.Index(properties=p)
     >>> idx
     <rtree.index.Index object at 0x...>
-    
+
 Add 100 largish boxes randomly distributed over the domain
-    
+
     >>> for i, coords in enumerate(boxes15):
     ...     idx.add(i, coords)
-    
+
     >>> 0 in idx.intersection((0, 0, 60, 60))
     True
     >>> hits = list(idx.intersection((0, 0, 60, 60)))
@@ -32,11 +36,11 @@ Add 100 largish boxes randomly distributed over the domain
     >>> hits
     [0, 4, 16, 27, 35, 40, 47, 50, 76, 80]
 
-Insert an object into the index that can be pickled 
+Insert an object into the index that can be pickled
 
     >>> idx.insert(4321, (34.3776829412, 26.7375853734, 49.3776829412, 41.7375853734), obj=42)
 
-Fetch our straggler that contains a pickled object    
+Fetch our straggler that contains a pickled object
     >>> hits = idx.intersection((0, 0, 60, 60), objects=True)
     >>> for i in hits:
     ...     if i.id == 4321:
@@ -52,7 +56,7 @@ Find the three items nearest to this one
     [76, 48, 19]
     >>> len(hits)
     3
-    
+
 
 Default order is [xmin, ymin, xmax, ymax]
     >>> ['%.10f' % t for t in idx.bounds]
@@ -76,16 +80,16 @@ Check that we have deleted stuff
     >>> hits = list(idx.intersection((0, 0, 60, 60)))
     >>> len(hits)
     0
-    
+
 Check that nearest returns *all* of the items that are nearby
 
     >>> idx2 = Rtree()
     >>> idx2
     <rtree.index.Index object at 0x...>
 
-    >>> locs = [(14, 10, 14, 10), 
+    >>> locs = [(14, 10, 14, 10),
     ...         (16, 10, 16, 10)]
-    
+
     >>> for i, (minx, miny, maxx, maxy) in enumerate(locs):
     ...        idx2.add(i, (minx, miny, maxx, maxy))
 
@@ -98,9 +102,9 @@ Check that nearest returns *all* of the items that are nearby (with objects)
     >>> idx2
     <rtree.index.Index object at 0x...>
 
-    >>> locs = [(14, 10, 14, 10), 
+    >>> locs = [(14, 10, 14, 10),
     ...         (16, 10, 16, 10)]
-    
+
     >>> for i, (minx, miny, maxx, maxy) in enumerate(locs):
     ...        idx2.add(i, (minx, miny, maxx, maxy), obj={'a': 42})
 
@@ -111,12 +115,12 @@ Check that nearest returns *all* of the items that are nearby (with objects)
     >>> idx2 = Rtree()
     >>> idx2
     <rtree.index.Index object at 0x...>
-            
+
     >>> locs = [(2, 4), (6, 8), (10, 12), (11, 13), (15, 17), (13, 20)]
-    
+
     >>> for i, (start, stop) in enumerate(locs):
     ...        idx2.add(i, (start, 1, stop, 1))
-    
+
     >>> sorted(idx2.nearest((13, 0, 20, 2), 1))
     [3, 4, 5]
 
@@ -129,7 +133,7 @@ Default page size 4096
     >>> len(hits)
     10
 
-Make sure to delete the index or the file is not flushed and it 
+Make sure to delete the index or the file is not flushed and it
 will be invalid
 
     >>> del idx3
@@ -142,16 +146,16 @@ Page size 3
     >>> hits = list(idx4.intersection((0, 0, 60, 60)))
     >>> len(hits)
     10
-    
+
     >>> idx4.close()
     >>> del idx4
-    
+
 Test invalid name
 
-    >>> inv = Rtree("bogus/foo")
+    >>> inv = Rtree("bogus/foo")  #doctest: +IGNORE_EXCEPTION_DETAIL
     Traceback (most recent call last):
     ...
-    IOError: Unable to open file 'bogus/foo.idx' for index storage
+    OSError: Unable to open file 'bogus/foo.idx' for index storage
 
 Load a persisted index
 
@@ -169,17 +173,17 @@ Load a persisted index
 Make a 3D index
     >>> p = index.Property()
     >>> p.dimension = 3
-    
 
-with interleaved=False, the order of input and output is: 
+
+with interleaved=False, the order of input and output is:
 (xmin, xmax, ymin, ymax, zmin, zmax)
 
     >>> idx3d = index.Index(properties=p, interleaved=False)
     >>> idx3d
     <rtree.index.Index object at 0x...>
-    
+
     >>> idx3d.insert(1, (0, 0, 60, 60, 22, 22.0))
-    
+
     >>> 1 in idx3d.intersection((-1, 1, 58, 62, 22, 24))
     True
 
@@ -187,16 +191,16 @@ with interleaved=False, the order of input and output is:
 Make a 4D index
     >>> p = index.Property()
     >>> p.dimension = 4
-    
+
 
 with interleaved=False, the order of input and output is: (xmin, xmax, ymin, ymax, zmin, zmax, kmin, kmax)
 
     >>> idx4d = index.Index(properties=p, interleaved=False)
     >>> idx4d
     <rtree.index.Index object at 0x...>
-    
+
     >>> idx4d.insert(1, (0, 0, 60, 60, 22, 22.0, 128, 142))
-    
+
     >>> 1 in idx4d.intersection((-1, 1, 58, 62, 22, 24, 120, 150))
     True
 
@@ -205,26 +209,26 @@ Check that we can make an index with custom filename extensions
     >>> p = index.Property()
     >>> p.dat_extension = 'data'
     >>> p.idx_extension = 'index'
-    
+
     >>> idx_cust = Rtree('custom', properties=p)
     >>> for i, coords in enumerate(boxes15):
     ...     idx_cust.add(i, coords)
     >>> hits = list(idx_cust.intersection((0, 0, 60, 60)))
     >>> len(hits)
     10
-    
+
     >>> del idx_cust
-    
+
 Reopen the index
     >>> p2 = index.Property()
     >>> p2.dat_extension = 'data'
     >>> p2.idx_extension = 'index'
-    
-    >>> idx_cust2 = Rtree('custom', properties=p2)    
+
+    >>> idx_cust2 = Rtree('custom', properties=p2)
     >>> hits = list(idx_cust2.intersection((0, 0, 60, 60)))
     >>> len(hits)
     10
-    
+
     >>> del idx_cust2
 
 Adding the same id twice does not overwrite existing data
@@ -235,8 +239,8 @@ Adding the same id twice does not overwrite existing data
     >>> list(r.intersection((0, 0, 5, 5)))
     [1, 1]
 
-A stream of data need that needs to be an iterator that will raise a 
-StopIteration. The order depends on the interleaved kwarg sent to the 
+A stream of data need that needs to be an iterator that will raise a
+StopIteration. The order depends on the interleaved kwarg sent to the
 constructor.
 
 The object can be None, but you must put a place holder of 'None' there.
@@ -255,15 +259,15 @@ The object can be None, but you must put a place holder of 'None' there.
 
     >>> len(hits)
     10
-    
-    
+
+
     >>> sorted(hits)
     [0, 4, 16, 27, 35, 40, 47, 50, 76, 80]
 
     >>> hits = list(strm_idx.intersection((0, 0, 60, 60), objects=True))
     >>> len(hits)
     10
-    
+
     >>> hits[0].object
     42
 
@@ -274,14 +278,17 @@ Note the arguments to intersection must be xmin, xmax, ymin, ymax for interleave
     >>> hits = list(strm_idx.intersection((0, 60, 0, 60)))
     >>> len(hits)
     10
-    
+
     >>> sorted(hits)
     [0, 4, 16, 27, 35, 40, 47, 50, 76, 80]
 
+    >>> strm_idx.leaves()
+    [(0, [2, 92, 51, 55, 26, 95, 7, 81, 38, 22, 58, 89, 91, 83, 98, 37, 70, 31, 49, 34, 11, 6, 13, 3, 23, 57, 9, 96, 84, 36, 5, 45, 77, 78, 44, 12, 42, 73, 93, 41, 71, 17, 39, 54, 88, 72, 97, 60, 62, 48, 19, 25, 76, 59, 66, 64, 79, 94, 40, 32, 46, 47, 15, 68, 10, 0, 80, 56, 50, 30], [-186.673789279, -96.7177218184, 172.392784956, 45.4856075292]), (2, [61, 74, 29, 99, 16, 43, 35, 33, 27, 63, 18, 90, 8, 53, 82, 21, 65, 24, 4, 1, 75, 67, 86, 52, 28, 85, 87, 14, 69, 20], [-174.739939684, 32. [...]
+
     >>> hits = list(strm_idx.intersection((0, 60, 0, 60), objects=True))
     >>> len(hits)
     10
-    
+
     >>> hits[0].object
     42
 
@@ -290,10 +297,10 @@ Note the arguments to intersection must be xmin, xmax, ymin, ymax for interleave
     42
     >>> len(hits)
     10
-    
-    >>> strm_idx.count((0, 60, 0, 60))
-    10L
-    
+
+    >>> int(strm_idx.count((0, 60, 0, 60)))
+    10
+
     >>> del strm_idx
 
     >>> p = index.Property()
@@ -306,3 +313,25 @@ Note the arguments to intersection must be xmin, xmax, ymin, ymax for interleave
     >>> leaves = idx.leaves()
 
     >>> del idx
+
+    >>> import numpy as np
+    >>> import rtree
+
+    >>> properties = rtree.index.Property()
+    >>> properties.dimension = 3
+
+    >>> points_count = 100
+    >>> points_random = np.random.random((points_count, 3,3))
+    >>> points_bounds = np.column_stack((points_random.min(axis=1), points_random.max(axis=1)))
+
+    >>> stacked = zip(np.arange(points_count), points_bounds, [None] * points_count)
+
+    >>> tree = rtree.index.Index(stacked, properties = properties)
+
+    >>> tid = list(tree.intersection([-1,-1,-1,2,2,2]))
+
+    >>> len(tid)
+    100
+
+    >>> (np.array(tid) == 0).all()
+    False
diff --git a/tests/test_pickle.py b/tests/test_pickle.py
new file mode 100644
index 0000000..afb9eb6
--- /dev/null
+++ b/tests/test_pickle.py
@@ -0,0 +1,20 @@
+import pickle
+import unittest
+import rtree.index
+
+
+class TestPickling(unittest.TestCase):
+
+    def test_index(self):
+        idx = rtree.index.Index()
+        unpickled = pickle.loads(pickle.dumps(idx))
+        self.assertNotEquals(idx.handle, unpickled.handle)
+        self.assertEquals(idx.properties.as_dict(),
+                          unpickled.properties.as_dict())
+        self.assertEquals(idx.interleaved, unpickled.interleaved)
+
+    def test_property(self):
+        p = rtree.index.Property()
+        unpickled = pickle.loads(pickle.dumps(p))
+        self.assertNotEquals(p.handle, unpickled.handle)
+        self.assertEquals(p.as_dict(), unpickled.as_dict())
diff --git a/tests/properties.txt b/tests/test_properties.txt
similarity index 72%
rename from tests/properties.txt
rename to tests/test_properties.txt
index 22842ae..567d13a 100644
--- a/tests/properties.txt
+++ b/tests/test_properties.txt
@@ -18,33 +18,6 @@ Test creation from kwargs and eval() of its repr()
     >>> eval(repr(q))['index_id'] is None
     True
 
-Test pretty printed string
-
-    >>> print q
-    {'buffering_capacity': 10,
-     'custom_storage_callbacks': None,
-     'custom_storage_callbacks_size': 0L,
-     'dat_extension': 'dat',
-     'dimension': 2,
-     'filename': '',
-     'fill_factor': 0...,
-     'idx_extension': 'idx',
-     'index_capacity': 100,
-     'index_id': None,
-     'leaf_capacity': 100,
-     'near_minimum_overlap_factor': 32,
-     'overwrite': True,
-     'pagesize': 4096,
-     'point_pool_capacity': 500,
-     'region_pool_capacity': 1000,
-     'reinsert_factor': 0...,
-     'split_distribution_factor': 0...,
-     'storage': 1,
-     'tight_mbr': True,
-     'tpr_horizon': 20.0,
-     'type': 0,
-     'variant': 2,
-     'writethrough': False}
 
 Test property setting
 
@@ -52,12 +25,12 @@ Test property setting
     >>> p.type = 0
     >>> p.type
     0
-    
+
     >>> p.type = 2
     >>> p.type
     2
 
-    >>> p.type = 6
+    >>> p.type = 6  #doctest: +IGNORE_EXCEPTION_DETAIL
     Traceback (most recent call last):
     ...
     RTreeError: LASError in "IndexProperty_SetIndexType": Inputted value is not a valid index type
@@ -65,152 +38,152 @@ Test property setting
     >>> p.dimension = 3
     >>> p.dimension
     3
-    
+
     >>> p.dimension = 2
     >>> p.dimension
     2
-    
-    >>> p.dimension = -2
+
+    >>> p.dimension = -2  #doctest: +IGNORE_EXCEPTION_DETAIL
     Traceback (most recent call last):
     ...
     RTreeError: Negative or 0 dimensional indexes are not allowed
-    
+
     >>> p.variant = 0
     >>> p.variant
     0
-    
-    >>> p.variant = 6
+
+    >>> p.variant = 6  #doctest: +IGNORE_EXCEPTION_DETAIL
     Traceback (most recent call last):
     ...
     RTreeError: LASError in "IndexProperty_SetIndexVariant": Inputted value is not a valid index variant
-    
+
     >>> p.storage = 0
-    >>> p.storage 
+    >>> p.storage
     0
-    
+
     >>> p.storage = 1
     >>> p.storage
     1
-    
-    >>> p.storage = 3
+
+    >>> p.storage = 3  #doctest: +IGNORE_EXCEPTION_DETAIL
     Traceback (most recent call last):
     ...
     RTreeError: LASError in "IndexProperty_SetIndexStorage": Inputted value is not a valid index storage type
-    
+
     >>> p.index_capacity
     100
-    
+
     >>> p.index_capacity = 300
     >>> p.index_capacity
     300
-    
-    >>> p.index_capacity = -4321
+
+    >>> p.index_capacity = -4321  #doctest: +IGNORE_EXCEPTION_DETAIL
     Traceback (most recent call last):
     ...
     RTreeError: index_capacity must be > 0
-    
+
     >>> p.pagesize
     4096
-    
+
     >>> p.pagesize = 8192
     >>> p.pagesize
     8192
-    
-    >>> p.pagesize = -4321
+
+    >>> p.pagesize = -4321  #doctest: +IGNORE_EXCEPTION_DETAIL
     Traceback (most recent call last):
     ...
     RTreeError: Pagesize must be > 0
 
     >>> p.leaf_capacity
     100
-    
+
     >>> p.leaf_capacity = 1000
     >>> p.leaf_capacity
     1000
-    >>> p.leaf_capacity = -4321
+    >>> p.leaf_capacity = -4321  #doctest: +IGNORE_EXCEPTION_DETAIL
     Traceback (most recent call last):
     ...
     RTreeError: leaf_capacity must be > 0
-    
+
     >>> p.index_pool_capacity
     100
-    
+
     >>> p.index_pool_capacity = 1500
-    >>> p.index_pool_capacity = -4321
+    >>> p.index_pool_capacity = -4321  #doctest: +IGNORE_EXCEPTION_DETAIL
     Traceback (most recent call last):
     ...
     RTreeError: index_pool_capacity must be > 0
 
     >>> p.point_pool_capacity
     500
-    
+
     >>> p.point_pool_capacity = 1500
-    >>> p.point_pool_capacity = -4321
+    >>> p.point_pool_capacity = -4321  #doctest: +IGNORE_EXCEPTION_DETAIL
     Traceback (most recent call last):
     ...
     RTreeError: point_pool_capacity must be > 0
 
     >>> p.region_pool_capacity
     1000
-    
+
     >>> p.region_pool_capacity = 1500
     >>> p.region_pool_capacity
     1500
-    >>> p.region_pool_capacity = -4321
+    >>> p.region_pool_capacity = -4321  #doctest: +IGNORE_EXCEPTION_DETAIL
     Traceback (most recent call last):
     ...
     RTreeError: region_pool_capacity must be > 0
 
     >>> p.buffering_capacity
     10
-    
+
     >>> p.buffering_capacity = 100
-    >>> p.buffering_capacity = -4321
+    >>> p.buffering_capacity = -4321  #doctest: +IGNORE_EXCEPTION_DETAIL
     Traceback (most recent call last):
     ...
-    RTreeError: buffering_capacity must be > 0    
+    RTreeError: buffering_capacity must be > 0
 
     >>> p.tight_mbr
     True
-    
+
     >>> p.tight_mbr = 100
     >>> p.tight_mbr
     True
-    
+
     >>> p.tight_mbr = False
     >>> p.tight_mbr
     False
 
     >>> p.overwrite
     True
-    
+
     >>> p.overwrite = 100
     >>> p.overwrite
     True
-    
+
     >>> p.overwrite = False
     >>> p.overwrite
     False
 
     >>> p.near_minimum_overlap_factor
     32
-    
+
     >>> p.near_minimum_overlap_factor = 100
-    >>> p.near_minimum_overlap_factor = -4321
+    >>> p.near_minimum_overlap_factor = -4321  #doctest: +IGNORE_EXCEPTION_DETAIL
     Traceback (most recent call last):
     ...
-    RTreeError: near_minimum_overlap_factor must be > 0  
+    RTreeError: near_minimum_overlap_factor must be > 0
 
     >>> p.writethrough
     False
-    
+
     >>> p.writethrough = 100
     >>> p.writethrough
     True
-    
+
     >>> p.writethrough = False
     >>> p.writethrough
-    False    
+    False
 
     >>> '%.2f' % p.fill_factor
     '0.70'
@@ -224,31 +197,31 @@ Test property setting
 
     >>> p.tpr_horizon
     20.0
-    
+
     >>> '%.2f' % p.reinsert_factor
     '0.30'
 
     >>> p.filename
     ''
-    
+
     >>> p.filename = 'testing123testing'
     >>> p.filename
     'testing123testing'
-    
+
     >>> p.dat_extension
     'dat'
 
-    >>> p.dat_extension = 'data'
+    >>> p.dat_extension = r'data'
     >>> p.dat_extension
     'data'
-    
+
     >>> p.idx_extension
     'idx'
     >>> p.idx_extension = 'index'
     >>> p.idx_extension
     'index'
-    
-    >>> p.index_id
+
+    >>> p.index_id  #doctest: +IGNORE_EXCEPTION_DETAIL
     Traceback (most recent call last):
     ...
     RTreeError: Error in "IndexProperty_GetIndexID": Property IndexIdentifier was empty
diff --git a/tests/off/z_cleanup.txt b/tests/test_z_cleanup.txt
similarity index 100%
rename from tests/off/z_cleanup.txt
rename to tests/test_z_cleanup.txt
diff --git a/tests/z_cleanup.txt b/tests/z_cleanup.txt
deleted file mode 100644
index 5af62a9..0000000
--- a/tests/z_cleanup.txt
+++ /dev/null
@@ -1,18 +0,0 @@
-    >>> from rtree import core
-    >>> del core.rt
-    >>> files = ['defaultidx.dat','defaultidx.idx',
-    ...          'pagesize3.dat','pagesize3.idx',
-    ...          'testing.dat','testing.idx',
-    ...          'custom.data','custom.index',
-    ...          'streamed.idx','streamed.dat',
-    ...          'gilename䔀abc.dat','gilename䔀abc.idx',
-    ...          'overwriteme.idx', 'overwriteme.dat']
-    >>> import os
-    >>> import time
-    >>> for f in files:
-    ...     try:
-    ...         os.remove(f)
-    ...     except OSError:
-    ...         time.sleep(0.1)
-    ...         os.remove(f)
-

-- 
Alioth's /usr/local/bin/git-commit-notice on /srv/git.debian.org/git/pkg-grass/python-rtree.git



More information about the Pkg-grass-devel mailing list