[Python-modules-commits] [dbf] 01/05: Import dbf_0.96.005.orig.tar.gz
Sandro Tosi
morph at moszumanska.debian.org
Wed Nov 4 20:48:42 UTC 2015
This is an automated email from the git hooks/post-receive script.
morph pushed a commit to branch master
in repository dbf.
commit dd802aed0f32291054fa1b43d99b7cdf42538c6e
Author: Sandro Tosi <morph at debian.org>
Date: Wed Nov 4 20:27:05 2015 +0000
Import dbf_0.96.005.orig.tar.gz
---
PKG-INFO | 4 +--
dbf/__init__.py | 2 +-
dbf/tests.py | 86 ++++++++++++++++++++++++++++++++++++++++++++++++---------
dbf/ver_2.py | 53 ++++++++++++-----------------------
dbf/ver_32.py | 67 +++++++++++++++++---------------------------
dbf/ver_33.py | 67 ++++++++++++++++----------------------------
setup.py | 4 +--
7 files changed, 145 insertions(+), 138 deletions(-)
diff --git a/PKG-INFO b/PKG-INFO
index cfaf7c2..8426927 100644
--- a/PKG-INFO
+++ b/PKG-INFO
@@ -1,13 +1,13 @@
Metadata-Version: 1.1
Name: dbf
-Version: 0.96.003
+Version: 0.96.005
Summary: Pure python package for reading/writing dBase, FoxPro, and Visual FoxPro .dbf files (including memos)
Home-page: https://pypi.python.org/pypi/dbf
Author: Ethan Furman
Author-email: ethan at stoneleaf.us
License: BSD License
Description:
- Currently supports dBase III, FoxPro, and Visual FoxPro tables. Text is returned as unicode, and codepage settings in tables are honored. Memos and Null fields are supported. Documentation needs work, but author is very responsive to e-mails.
+ Currently supports dBase III, Clipper, FoxPro, and Visual FoxPro tables. Text is returned as unicode, and codepage settings in tables are honored. Memos and Null fields are supported. Documentation needs work, but author is very responsive to e-mails.
Not supported: index files (but can create tempory non-file indexes), auto-incrementing fields, and Varchar fields.
diff --git a/dbf/__init__.py b/dbf/__init__.py
index da51f44..73ca282 100644
--- a/dbf/__init__.py
+++ b/dbf/__init__.py
@@ -1,6 +1,6 @@
import sys as _sys
-version = (0, 96, 3)
+version = (0, 96, 5)
py_ver = _sys.version_info[:2]
if py_ver >= (3, 3):
diff --git a/dbf/tests.py b/dbf/tests.py
index 1632925..1981b9a 100644
--- a/dbf/tests.py
+++ b/dbf/tests.py
@@ -15,10 +15,14 @@ from dbf.api import *
if py_ver < (3, 0):
EOF = '\x1a'
+ MISC = ''.join([chr(i) for i in range(256)])
+ PHOTO = ''.join(reversed([chr(i) for i in range(256)]))
else:
unicode = str
xrange = range
dbf.LatinByte.export_to(module)
+ MISC = ''.join([chr(i) for i in range(256)]).encode('latin-1')
+ PHOTO = ''.join(reversed([chr(i) for i in range(256)])).encode('latin-1')
print("\nTesting dbf version %d.%02d.%03d on %s with Python %s\n" % (
@@ -2594,7 +2598,7 @@ class TestDbfCreation(unittest.TestCase):
"Testing table creation..."
def test_db3_memory_tables(self):
"dbf tables in memory"
- fields = ['name C(25)', 'hiredate D', 'male L', 'wisdom M', 'qty N(3,0)']
+ fields = ['name C(25)', 'hiredate D', 'male L', 'wisdom M', 'qty N(3,0)', 'weight F(7,3)']
for i in range(1, len(fields)+1):
for fieldlist in combinate(fields, i):
table = Table(':memory:', fieldlist, dbf_type='db3', on_disk=False)
@@ -2603,7 +2607,7 @@ class TestDbfCreation(unittest.TestCase):
self.assertTrue(all([type(x) is unicode for x in table.field_names]))
def test_db3_disk_tables(self):
"dbf table on disk"
- fields = ['name C(25)', 'hiredate D', 'male L', 'wisdom M', 'qty N(3,0)']
+ fields = ['name C(25)', 'hiredate D', 'male L', 'wisdom M', 'qty N(3,0)', 'weight F(7,3)']
for i in range(1, len(fields)+1):
for fieldlist in combinate(fields, i):
table = Table(os.path.join(tempdir, 'temptable'), ';'.join(fieldlist), dbf_type='db3')
@@ -2618,7 +2622,7 @@ class TestDbfCreation(unittest.TestCase):
self.assertEqual(last_byte, EOF)
def test_clp_memory_tables(self):
"clp tables in memory"
- fields = ['name C(10977)', 'hiredate D', 'male L', 'wisdom M', 'qty N(3,0)']
+ fields = ['name C(10977)', 'hiredate D', 'male L', 'wisdom M', 'qty N(3,0)', 'weight F(7,3)']
for i in range(1, len(fields)+1):
for fieldlist in combinate(fields, i):
table = Table(':memory:', fieldlist, dbf_type='clp', on_disk=False)
@@ -2629,7 +2633,7 @@ class TestDbfCreation(unittest.TestCase):
"clp table on disk"
table = Table(os.path.join(tempdir, 'temptable'), 'name C(377); thesis C(20179)', dbf_type='clp')
self.assertEqual(table.record_length, 20557)
- fields = ['name C(10977)', 'hiredate D', 'male L', 'wisdom M', 'qty N(3,0)']
+ fields = ['name C(10977)', 'hiredate D', 'male L', 'wisdom M', 'qty N(3,0)', 'weight F(7,3)']
for i in range(1, len(fields)+1):
for fieldlist in combinate(fields, i):
table = Table(os.path.join(tempdir, 'temptable'), ';'.join(fieldlist), dbf_type='clp')
@@ -2645,7 +2649,7 @@ class TestDbfCreation(unittest.TestCase):
def test_fp_memory_tables(self):
"fp tables in memory"
fields = ['name C(25)', 'hiredate D', 'male L', 'wisdom M', 'qty N(3,0)',
- 'litres F(11,5)', 'blob G', 'graphic P']
+ 'litres F(11,5)', 'blob G', 'graphic P', 'weight F(7,3)']
for i in range(1, len(fields)+1):
for fieldlist in combinate(fields, i):
table = Table(':memory:', ';'.join(fieldlist), dbf_type='vfp', on_disk=False)
@@ -2654,7 +2658,7 @@ class TestDbfCreation(unittest.TestCase):
def test_fp_disk_tables(self):
"fp tables on disk"
fields = ['name C(25)', 'hiredate D', 'male L', 'wisdom M', 'qty N(3,0)',
- 'litres F(11,5)', 'blob G', 'graphic P']
+ 'litres F(11,5)', 'blob G', 'graphic P', 'weight F(7,3)']
for i in range(1, len(fields)+1):
for fieldlist in combinate(fields, i):
table = Table(os.path.join(tempdir, 'tempfp'), ';'.join(fieldlist), dbf_type='vfp')
@@ -2664,9 +2668,9 @@ class TestDbfCreation(unittest.TestCase):
def test_vfp_memory_tables(self):
"vfp tables in memory"
fields = ['name C(25)', 'hiredate D', 'male L', 'wisdom M', 'qty N(3,0)',
- 'weight B', 'litres F(11,5)', 'int I', 'birth T', 'blob G', 'graphic P',
+ 'mass B', 'litres F(11,5)', 'int I', 'birth T', 'blob G', 'graphic P',
'menu C(50) binary', 'graduated L null', 'fired D null', 'cipher C(50) nocptrans null',
- ]
+ 'weight F(7,3)']
for i in range(1, len(fields)+1):
for fieldlist in combinate(fields, i):
table = Table(':memory:', ';'.join(fieldlist), dbf_type='vfp', on_disk=False)
@@ -2676,9 +2680,9 @@ class TestDbfCreation(unittest.TestCase):
def test_vfp_disk_tables(self):
"vfp tables on disk"
fields = ['name C(25)', 'hiredate D', 'male L', 'wisdom M', 'qty N(3,0)',
- 'weight B', 'litres F(11,5)', 'int I', 'birth T', 'blob G', 'graphic P',
+ 'mass B', 'litres F(11,5)', 'int I', 'birth T', 'blob G', 'graphic P',
'menu C(50) binary', 'graduated L null', 'fired D null', 'cipher C(50) nocptrans null',
- ]
+ 'weight F(7,3)']
for i in range(1, len(fields)+1):
for fieldlist in combinate(fields, i):
table = Table(os.path.join(tempdir, 'tempvfp'), ';'.join(fieldlist), dbf_type='vfp')
@@ -2747,8 +2751,6 @@ class TestDbfCreation(unittest.TestCase):
class TestDbfRecords(unittest.TestCase):
"Testing records"
def setUp(self):
- #if not os.path.exists(tempdir):
- # os.mkdir(tempdir)
self.dbf_table = Table(
os.path.join(tempdir, 'dbf_table'),
'name C(25); paid L; qty N(11,5); orderdate D; desc M',
@@ -2764,7 +2766,6 @@ class TestDbfRecords(unittest.TestCase):
def tearDown(self):
self.dbf_table.close()
self.vfp_table.close()
- #shutil.rmtree(tempdir)
def test_slicing(self):
table = self.dbf_table
@@ -3203,6 +3204,17 @@ class TestDbfRecords(unittest.TestCase):
self.assertEqual(table[0].binmemo, high_ascii)
table.close()
+ # def test_backup_of_different_codepage(self):
+ # "check backups work when different codepage specified"
+ # table = Table(':memory:', 'string M', dbf_type='db3', on_disk=False, codepage='utf8')
+ # table.open()
+ # weird = ''.join(chr(i) for i in range(256))
+ # if py_ver < (3, 0):
+ # weird = weird.decode('latin-1')
+ # table.append((weird,))
+ # table.codepage = CodePage('cp437')
+ # self.assertRaises(UnicodeDecodeError, table.__getitem__, 0)
+
def test_add_null_field(self):
"adding a null field to an existing table"
table = Table(
@@ -3426,6 +3438,54 @@ class TestDbfRecords(unittest.TestCase):
))
self.assertNotEqual(old_data, dbf.scatter(record))
+class TestDbfRecordTemplates(unittest.TestCase):
+ "Testing records"
+ def setUp(self):
+ self.dbf_table = Table(
+ os.path.join(tempdir, 'dbf_table'),
+ 'name C(25); paid L; qty N(11,5); orderdate D; desc M',
+ dbf_type='db3',
+ )
+ self.vfp_table = Table(
+ os.path.join(tempdir, 'vfp_table'),
+ 'name C(25); paid L; qty N(11,5); orderdate D; desc M; mass B;' +
+ ' weight F(18,3); age I; meeting T; misc G; photo P; price Y',
+ dbf_type='vfp',
+ )
+
+ def tearDown(self):
+ self.dbf_table.close()
+ self.vfp_table.close()
+
+ def test_dbf_storage(self):
+ table = self.dbf_table
+ table.open()
+ record = table.create_template()
+ record.name = 'Stoneleaf'
+ record.paid = True
+ record.qty = 1
+ record.orderdate = Date.today()
+ record.desc = 'some Python dude'
+ table.append(record)
+
+ def test_vfp_storage(self):
+ table = self.vfp_table
+ table.open()
+ record = table.create_template()
+ record.name = 'Stoneleaf'
+ record.paid = True
+ record.qty = 1
+ record.orderdate = Date.today()
+ record.desc = 'some Python dude'
+ record.mass = 251.9287
+ record.weight = 971204.39
+ record.age = 29
+ record.meeting = DateTime.now()
+ record.misc = MISC
+ record.photo = PHOTO
+ record.price = 19.99
+ table.append(record)
+
class TestDbfFunctions(unittest.TestCase):
def setUp(self):
"create a dbf and vfp table"
diff --git a/dbf/ver_2.py b/dbf/ver_2.py
index 547ccf3..6fc2214 100644
--- a/dbf/ver_2.py
+++ b/dbf/ver_2.py
@@ -3007,7 +3007,6 @@ class RecordTemplate(object):
raise FieldMissingError(name)
if name in self._meta.memofields:
self._memos[name] = value
- self._dirty = True
return
index = self._meta.fields.index(name)
try:
@@ -5238,6 +5237,9 @@ class Table(_Navigation):
extra = ('_backup', '_BACKUP')[upper]
new_name = os.path.join(temp_dir or directory, name + extra + ext)
bkup = Table(new_name, self.structure(), codepage=self.codepage.name, dbf_type=self._versionabbr, on_disk=on_disk)
+ # use same encoder/decoder as current table, which may have been overridden
+ bkup._meta.encoder = self._meta.encoder
+ bkup._meta.decoder = self._meta.decoder
bkup.open()
for record in self:
bkup.append(record)
@@ -8131,10 +8133,16 @@ def from_csv(csvfile, to_disk=False, filename=None, field_names=None, extra_fiel
field_names = ['%s M' % fn for fn in field_names]
else:
field_names = ['f0 M']
- mtable = Table(':memory:', [field_names[0]], dbf_type=dbf_type, memo_size=memo_size, codepage=encoding, on_disk=False)
- mtable.open()
+ if filename:
+ to_disk = True
+ else:
+ filename = os.path.splitext(csvfile)[0]
+ if to_disk:
+ csv_table = Table(filename, [field_names[0]], dbf_type=dbf_type, memo_size=memo_size, codepage=encoding)
+ else:
+ csv_table = Table(':memory:', [field_names[0]], dbf_type=dbf_type, memo_size=memo_size, codepage=encoding, on_disk=False)
+ csv_table.open()
fields_so_far = 1
- #for row in reader:
while reader:
try:
row = next(reader)
@@ -8145,38 +8153,13 @@ def from_csv(csvfile, to_disk=False, filename=None, field_names=None, extra_fiel
while fields_so_far < len(row):
if fields_so_far == len(field_names):
field_names.append('f%d M' % fields_so_far)
- mtable.add_fields(field_names[fields_so_far])
+ csv_table.add_fields(field_names[fields_so_far])
fields_so_far += 1
- mtable.append(tuple(row))
- if filename:
- to_disk = True
- if not to_disk:
- if extra_fields:
- mtable.add_fields(extra_fields)
- else:
- if not filename:
- filename = os.path.splitext(csvfile)[0]
- length = [min_field_size] * len(field_names)
- for record in mtable:
- for i in index(mtable.field_names):
- length[i] = max(length[i], len(record[i]))
- fields = mtable.field_names
- fielddef = []
- for i in index(length):
- if length[i] < 255:
- fielddef.append('%s C(%d)' % (fields[i], length[i]))
- else:
- fielddef.append('%s M' % (fields[i]))
- if extra_fields:
- fielddef.extend(extra_fields)
- csvtable = Table(filename, fielddef, dbf_type=dbf_type, codepage=encoding)
- csvtable.open()
- for record in mtable:
- csvtable.append(scatter(record))
- csvtable.close()
- return csvtable
- mtable.close()
- return mtable
+ csv_table.append(tuple(row))
+ if extra_fields:
+ csv_table.add_fields(extra_fields)
+ csv_table.close()
+ return csv_table
def get_fields(table_name):
"""
diff --git a/dbf/ver_32.py b/dbf/ver_32.py
index 6219997..80f9f6f 100644
--- a/dbf/ver_32.py
+++ b/dbf/ver_32.py
@@ -2700,7 +2700,6 @@ class Record(object):
value = None
else:
null_data[byte] &= 0xff ^ 1 << bit
- # null_data = array('B', [chr(n) for n in null_data])
self._data[null_def[START]:null_def[END]] = null_data
if value is not Null:
bytes = array('B', update(value, fielddef, self._meta.memo, self._meta.input_decoder, self._meta.encoder))
@@ -2811,7 +2810,8 @@ class RecordTemplate(object):
if nullable:
byte, bit = divmod(index, 8)
null_def = self._meta['_nullflags']
- null_data = self._data[null_def[START]:null_def[END]]
+ null_data = self._data[null_def[START]:null_def[END]] #.tostring()
+ # null_data = [ord(c) for c in null_data]
if value is Null:
null_data[byte] |= 1 << bit
value = None
@@ -2954,7 +2954,6 @@ class RecordTemplate(object):
raise FieldMissingError(name)
if name in self._meta.memofields:
self._memos[name] = value
- self._dirty = True
return
index = self._meta.fields.index(name)
try:
@@ -4113,7 +4112,7 @@ class Table(_Navigation):
'Type':'Memo', 'Init':add_memo, 'Blank':lambda x: b' ', 'Retrieve':retrieve_memo, 'Update':update_memo,
'Class':str, 'Empty':str, 'flags':tuple(),
},
- NUMERIC: {
+ FLOAT: {
'Type':'Numeric', 'Init':add_numeric, 'Blank':lambda x: b' ' * x, 'Retrieve':retrieve_numeric, 'Update':update_numeric,
'Class':'default', 'Empty':none, 'flags':tuple(),
},
@@ -5210,6 +5209,9 @@ class Table(_Navigation):
extra = ('_backup', '_BACKUP')[upper]
new_name = os.path.join(temp_dir or directory, name + extra + ext)
bkup = Table(new_name, self.structure(), codepage=self.codepage.name, dbf_type=self._versionabbr, on_disk=on_disk)
+ # use same encoder/decoder as current table, which may have been overridden
+ bkup._meta.encoder = self._meta.encoder
+ bkup._meta.decoder = self._meta.decoder
bkup.open()
for record in self:
bkup.append(record)
@@ -5594,7 +5596,7 @@ class Db3Table(Table):
'Type':'Memo', 'Retrieve':retrieve_memo, 'Update':update_memo, 'Blank':lambda x: b' ', 'Init':add_memo,
'Class':str, 'Empty':str, 'flags':tuple(),
},
- NUMERIC: {
+ FLOAT: {
'Type':'Numeric', 'Retrieve':retrieve_numeric, 'Update':update_numeric, 'Blank':lambda x: b' ' * x, 'Init':add_numeric,
'Class':'default', 'Empty':none, 'flags':tuple(),
} }
@@ -5613,7 +5615,7 @@ class Db3Table(Table):
_logical_types = (LOGICAL, )
_memo_types = (MEMO, )
_numeric_types = (NUMERIC, FLOAT)
- _variable_types = (CHAR, NUMERIC)
+ _variable_types = (CHAR, NUMERIC, FLOAT)
_dbfTableHeader = array('B', [0] * 32)
_dbfTableHeader[0] = 3 # version - dBase III w/o memo's
_dbfTableHeader[8:10] = array('B', pack_short_int(33))
@@ -5737,7 +5739,7 @@ class ClpTable(Db3Table):
'Type':'Memo', 'Retrieve':retrieve_memo, 'Update':update_memo, 'Blank':lambda x: b' ', 'Init':add_memo,
'Class':str, 'Empty':str, 'flags':tuple(),
},
- NUMERIC: {
+ FLOAT: {
'Type':'Numeric', 'Retrieve':retrieve_numeric, 'Update':update_numeric, 'Blank':lambda x: b' ' * x, 'Init':add_numeric,
'Class':'default', 'Empty':none, 'flags':tuple(),
} }
@@ -5756,7 +5758,7 @@ class ClpTable(Db3Table):
_logical_types = (LOGICAL, )
_memo_types = (MEMO, )
_numeric_types = (NUMERIC, FLOAT)
- _variable_types = (CHAR, NUMERIC)
+ _variable_types = (CHAR, NUMERIC, FLOAT)
_dbfTableHeader = array('B', [0] * 32)
_dbfTableHeader[0] = 3 # version - dBase III w/o memo's
_dbfTableHeader[8:10] = array('B', pack_short_int(33))
@@ -7558,10 +7560,16 @@ def from_csv(csvfile, to_disk=False, filename=None, field_names=None, extra_fiel
field_names = ['%s M' % fn for fn in field_names]
else:
field_names = ['f0 M']
- mtable = Table(':memory:', [field_names[0]], dbf_type=dbf_type, memo_size=memo_size, codepage=encoding, on_disk=False)
- mtable.open()
+ if filename:
+ to_disk = True
+ else:
+ filename = os.path.splitext(csvfile)[0]
+ if to_disk:
+ csv_table = Table(filename, [field_names[0]], dbf_type=dbf_type, memo_size=memo_size, codepage=encoding)
+ else:
+ csv_table = Table(':memory:', [field_names[0]], dbf_type=dbf_type, memo_size=memo_size, codepage=encoding, on_disk=False)
+ csv_table.open()
fields_so_far = 1
- #for row in reader:
while reader:
try:
row = next(reader)
@@ -7572,38 +7580,13 @@ def from_csv(csvfile, to_disk=False, filename=None, field_names=None, extra_fiel
while fields_so_far < len(row):
if fields_so_far == len(field_names):
field_names.append('f%d M' % fields_so_far)
- mtable.add_fields(field_names[fields_so_far])
+ csv_table.add_fields(field_names[fields_so_far])
fields_so_far += 1
- mtable.append(tuple(row))
- if filename:
- to_disk = True
- if not to_disk:
- if extra_fields:
- mtable.add_fields(extra_fields)
- else:
- if not filename:
- filename = os.path.splitext(csvfile)[0]
- length = [min_field_size] * len(field_names)
- for record in mtable:
- for i in index(mtable.field_names):
- length[i] = max(length[i], len(record[i]))
- fields = mtable.field_names
- fielddef = []
- for i in index(length):
- if length[i] < 255:
- fielddef.append('%s C(%d)' % (fields[i], length[i]))
- else:
- fielddef.append('%s M' % (fields[i]))
- if extra_fields:
- fielddef.extend(extra_fields)
- csvtable = Table(filename, fielddef, dbf_type=dbf_type, codepage=encoding)
- csvtable.open()
- for record in mtable:
- csvtable.append(scatter(record))
- csvtable.close()
- return csvtable
- mtable.close()
- return mtable
+ csv_table.append(tuple(row))
+ if extra_fields:
+ csv_table.add_fields(extra_fields)
+ csv_table.close()
+ return csv_table
def get_fields(table_name):
"""
diff --git a/dbf/ver_33.py b/dbf/ver_33.py
index 95b8236..08edd4d 100644
--- a/dbf/ver_33.py
+++ b/dbf/ver_33.py
@@ -2694,14 +2694,12 @@ class Record(object):
if nullable:
byte, bit = divmod(index, 8)
null_def = self._meta['_nullflags']
- null_data = self._data[null_def[START]:null_def[END]] #.tostring()
- # null_data = [ord(c) for c in null_data]
+ null_data = self._data[null_def[START]:null_def[END]]
if value is Null:
null_data[byte] |= 1 << bit
value = None
else:
null_data[byte] &= 0xff ^ 1 << bit
- # null_data = array('B', [chr(n) for n in null_data])
self._data[null_def[START]:null_def[END]] = null_data
if value is not Null:
bytes = array('B', update(value, fielddef, self._meta.memo, self._meta.input_decoder, self._meta.encoder))
@@ -2956,7 +2954,6 @@ class RecordTemplate(object):
raise FieldMissingError(name)
if name in self._meta.memofields:
self._memos[name] = value
- self._dirty = True
return
index = self._meta.fields.index(name)
try:
@@ -4115,7 +4112,7 @@ class Table(_Navigation):
'Type':'Memo', 'Init':add_memo, 'Blank':lambda x: b' ', 'Retrieve':retrieve_memo, 'Update':update_memo,
'Class':str, 'Empty':str, 'flags':tuple(),
},
- NUMERIC: {
+ FLOAT: {
'Type':'Numeric', 'Init':add_numeric, 'Blank':lambda x: b' ' * x, 'Retrieve':retrieve_numeric, 'Update':update_numeric,
'Class':'default', 'Empty':none, 'flags':tuple(),
},
@@ -5212,6 +5209,9 @@ class Table(_Navigation):
extra = ('_backup', '_BACKUP')[upper]
new_name = os.path.join(temp_dir or directory, name + extra + ext)
bkup = Table(new_name, self.structure(), codepage=self.codepage.name, dbf_type=self._versionabbr, on_disk=on_disk)
+ # use same encoder/decoder as current table, which may have been overridden
+ bkup._meta.encoder = self._meta.encoder
+ bkup._meta.decoder = self._meta.decoder
bkup.open()
for record in self:
bkup.append(record)
@@ -5596,7 +5596,7 @@ class Db3Table(Table):
'Type':'Memo', 'Retrieve':retrieve_memo, 'Update':update_memo, 'Blank':lambda x: b' ', 'Init':add_memo,
'Class':str, 'Empty':str, 'flags':tuple(),
},
- NUMERIC: {
+ FLOAT: {
'Type':'Numeric', 'Retrieve':retrieve_numeric, 'Update':update_numeric, 'Blank':lambda x: b' ' * x, 'Init':add_numeric,
'Class':'default', 'Empty':none, 'flags':tuple(),
} }
@@ -5615,7 +5615,7 @@ class Db3Table(Table):
_logical_types = (LOGICAL, )
_memo_types = (MEMO, )
_numeric_types = (NUMERIC, FLOAT)
- _variable_types = (CHAR, NUMERIC)
+ _variable_types = (CHAR, NUMERIC, FLOAT)
_dbfTableHeader = array('B', [0] * 32)
_dbfTableHeader[0] = 3 # version - dBase III w/o memo's
_dbfTableHeader[8:10] = array('B', pack_short_int(33))
@@ -5739,7 +5739,7 @@ class ClpTable(Db3Table):
'Type':'Memo', 'Retrieve':retrieve_memo, 'Update':update_memo, 'Blank':lambda x: b' ', 'Init':add_memo,
'Class':str, 'Empty':str, 'flags':tuple(),
},
- NUMERIC: {
+ FLOAT: {
'Type':'Numeric', 'Retrieve':retrieve_numeric, 'Update':update_numeric, 'Blank':lambda x: b' ' * x, 'Init':add_numeric,
'Class':'default', 'Empty':none, 'flags':tuple(),
} }
@@ -5758,7 +5758,7 @@ class ClpTable(Db3Table):
_logical_types = (LOGICAL, )
_memo_types = (MEMO, )
_numeric_types = (NUMERIC, FLOAT)
- _variable_types = (CHAR, NUMERIC)
+ _variable_types = (CHAR, NUMERIC, FLOAT)
_dbfTableHeader = array('B', [0] * 32)
_dbfTableHeader[0] = 3 # version - dBase III w/o memo's
_dbfTableHeader[8:10] = array('B', pack_short_int(33))
@@ -7560,10 +7560,16 @@ def from_csv(csvfile, to_disk=False, filename=None, field_names=None, extra_fiel
field_names = ['%s M' % fn for fn in field_names]
else:
field_names = ['f0 M']
- mtable = Table(':memory:', [field_names[0]], dbf_type=dbf_type, memo_size=memo_size, codepage=encoding, on_disk=False)
- mtable.open()
+ if filename:
+ to_disk = True
+ else:
+ filename = os.path.splitext(csvfile)[0]
+ if to_disk:
+ csv_table = Table(filename, [field_names[0]], dbf_type=dbf_type, memo_size=memo_size, codepage=encoding)
+ else:
+ csv_table = Table(':memory:', [field_names[0]], dbf_type=dbf_type, memo_size=memo_size, codepage=encoding, on_disk=False)
+ csv_table.open()
fields_so_far = 1
- #for row in reader:
while reader:
try:
row = next(reader)
@@ -7574,38 +7580,13 @@ def from_csv(csvfile, to_disk=False, filename=None, field_names=None, extra_fiel
while fields_so_far < len(row):
if fields_so_far == len(field_names):
field_names.append('f%d M' % fields_so_far)
- mtable.add_fields(field_names[fields_so_far])
+ csv_table.add_fields(field_names[fields_so_far])
fields_so_far += 1
- mtable.append(tuple(row))
- if filename:
- to_disk = True
- if not to_disk:
- if extra_fields:
- mtable.add_fields(extra_fields)
- else:
- if not filename:
- filename = os.path.splitext(csvfile)[0]
- length = [min_field_size] * len(field_names)
- for record in mtable:
- for i in index(mtable.field_names):
- length[i] = max(length[i], len(record[i]))
- fields = mtable.field_names
- fielddef = []
- for i in index(length):
- if length[i] < 255:
- fielddef.append('%s C(%d)' % (fields[i], length[i]))
- else:
- fielddef.append('%s M' % (fields[i]))
- if extra_fields:
- fielddef.extend(extra_fields)
- csvtable = Table(filename, fielddef, dbf_type=dbf_type, codepage=encoding)
- csvtable.open()
- for record in mtable:
- csvtable.append(scatter(record))
- csvtable.close()
- return csvtable
- mtable.close()
- return mtable
+ csv_table.append(tuple(row))
+ if extra_fields:
+ csv_table.add_fields(extra_fields)
+ csv_table.close()
+ return csv_table
def get_fields(table_name):
"""
diff --git a/setup.py b/setup.py
index 7b0324e..3a07746 100644
--- a/setup.py
+++ b/setup.py
@@ -6,7 +6,7 @@ import sys
#html_docs = glob('dbf/html/*')
long_desc="""
-Currently supports dBase III, FoxPro, and Visual FoxPro tables. Text is returned as unicode, and codepage settings in tables are honored. Memos and Null fields are supported. Documentation needs work, but author is very responsive to e-mails.
+Currently supports dBase III, Clipper, FoxPro, and Visual FoxPro tables. Text is returned as unicode, and codepage settings in tables are honored. Memos and Null fields are supported. Documentation needs work, but author is very responsive to e-mails.
Not supported: index files (but can create tempory non-file indexes), auto-incrementing fields, and Varchar fields.
@@ -21,7 +21,7 @@ else:
requirements = []
setup( name='dbf',
- version= '0.96.003',
+ version= '0.96.005',
license='BSD License',
description='Pure python package for reading/writing dBase, FoxPro, and Visual FoxPro .dbf files (including memos)',
long_description=long_desc,
--
Alioth's /usr/local/bin/git-commit-notice on /srv/git.debian.org/git/python-modules/packages/dbf.git
More information about the Python-modules-commits
mailing list