[med-svn] [Git][med-team/deepnano][master] 2 commits: * Non-maintainer upload.

Thomas Goirand gitlab at salsa.debian.org
Sun Nov 3 17:06:18 GMT 2019



Thomas Goirand pushed to branch master at Debian Med / deepnano


Commits:
76a7928c by Thomas Goirand at 2019-11-03T16:58:31Z
  * Non-maintainer upload.
  * Add py3-compat.patch.

- - - - -
9473e9c3 by Thomas Goirand at 2019-11-03T17:04:03Z
Switched package to Python 3 (Closes: #936381, #943005).

- - - - -


6 changed files:

- debian/changelog
- debian/control
- + debian/patches/py3-compat.patch
- debian/patches/series
- debian/patches/she_bang.patch
- debian/rules


Changes:

=====================================
debian/changelog
=====================================
@@ -1,3 +1,11 @@
+deepnano (0.0+git20170813.e8a621e-3.1) unstable; urgency=medium
+
+  * Non-maintainer upload.
+  * Add py3-compat.patch.
+  * Switched package to Python 3 (Closes: #936381, #943005).
+
+ -- Thomas Goirand <zigo at debian.org>  Sun, 03 Nov 2019 17:22:33 +0100
+
 deepnano (0.0+git20170813.e8a621e-3) unstable; urgency=medium
 
   * Use less data for autopkgtest


=====================================
debian/control
=====================================
@@ -5,10 +5,10 @@ Uploaders: Çağrı ULAŞ <cagriulas at gmail.com>,
 Section: science
 Priority: optional
 Build-Depends: debhelper (>= 12~),
-               python-all,
+               python3-all,
                dh-python,
-               python-markdown,
-               python-theano
+               python3-markdown,
+               python3-theano
 Standards-Version: 4.3.0
 Vcs-Browser: https://salsa.debian.org/med-team/deepnano
 Vcs-Git: https://salsa.debian.org/med-team/deepnano.git
@@ -16,15 +16,15 @@ Homepage: https://bitbucket.org/vboza/deepnano
 
 Package: deepnano
 Architecture: any
-Depends: ${python:Depends},
+Depends: ${python3:Depends},
          ${misc:Depends},
          ${shlibs:Depends},
-         python-h5py,
-         python-numpy,
-         python-dateutil,
-         python-theano,
+         python3-h5py,
+         python3-numpy,
+         python3-dateutil,
+         python3-theano,
          deepnano-data
-Recommends: python-watchdog
+Recommends: python3-watchdog
 Description: alternative basecaller for MinION reads of genomic sequences
  DeepNano is alternative basecaller for Oxford Nanopore MinION reads
  based on deep recurrent neural networks.


=====================================
debian/patches/py3-compat.patch
=====================================
@@ -0,0 +1,1005 @@
+Description: Python 3 compat
+Author: Thomas Goirand <zigo at debian.org>
+Forwarded: no
+Last-Update: 2019-11-03
+
+Index: deepnano/basecall.py
+===================================================================
+--- deepnano.orig/basecall.py
++++ deepnano/basecall.py
+@@ -1,4 +1,5 @@
+ #!/usr/bin/python3
++from __future__ import print_function
+ import argparse
+ from rnn_fin import RnnPredictor
+ import h5py
+@@ -66,7 +67,7 @@ def load_read_data(read_file):
+       ret["2d_events"].append(ev) 
+     ret["2d_events"] = np.array(ret["2d_events"], dtype=np.float32)
+   except Exception as e:
+-    print e
++    print(e)
+     pass
+ 
+   h5.close()
+@@ -100,17 +101,17 @@ assert do_template or do_complement or d
+ assert len(args.reads) != 0 or len(args.directory) != 0, "Nothing to basecall"
+ 
+ if do_template:
+-  print "loading template net"
++  print("loading template net")
+   temp_net = RnnPredictor(args.template_net)
+-  print "done"
++  print("done")
+ if do_complement:
+-  print "loading complement net"
++  print("loading complement net")
+   comp_net = RnnPredictor(args.complement_net)
+-  print "done"
++  print("done")
+ if do_2d:
+-  print "loading 2D net"
++  print("loading 2D net")
+   big_net = RnnPredictor(args.big_net)
+-  print "done"
++  print("done")
+ 
+ chars = "ACGT"
+ mapping = {"A": 0, "C": 1, "G": 2, "T": 3, "N": 4}
+@@ -128,24 +129,24 @@ for i, read in enumerate(files):
+   try:
+     data = load_read_data(read)
+   except Exception as e:
+-    print "error at file", read
+-    print e
++    print("error at file", read)
++    print(e)
+     continue
+   if not data:  
+     continue
+-  print "\rcalling read %d/%d %s" % (i, len(files), read),
++  print("\rcalling read %d/%d %s" % (i, len(files), read))
+   sys.stdout.flush()
+   if args.output_orig:
+     try:
+       if "called_template" in data:
+-        print >>fo, ">%s_template" % basename
+-        print >>fo, data["called_template"]
++        print(">%s_template" % basename, file=fo)
++        print(data["called_template"], file=fo)
+       if "called_complement" in data:
+-        print >>fo, ">%s_complement" % basename
+-        print >>fo, data["called_complement"]
++        print(">%s_complement" % basename, file=fo)
++        print(data["called_complement"], file=fo)
+       if "called_2d" in data:
+-        print >>fo, ">%s_2d" % basename
+-        print >>fo, data["called_2d"]
++        print(">%s_2d" % basename, file=fo)
++        print(data["called_2d"], file=fo)
+     except:
+       pass
+ 
+@@ -166,18 +167,18 @@ for i, read in enumerate(files):
+ 
+   if args.timing:
+     try:
+-      print "Events: %d/%d" % (len(data["temp_events"]), len(data["comp_events"]))
+-      print "Our times: %f/%f/%f" % (temp_time.total_seconds(), comp_time.total_seconds(),
++      print("Events: %d/%d" % (len(data["temp_events"]), len(data["comp_events"])))
++      print("Our times: %f/%f/%f" % (temp_time.total_seconds(), comp_time.total_seconds()),
+          time_2d.total_seconds())
+-      print "Our times per base: %f/%f/%f" % (
++      print("Our times per base: %f/%f/%f" % (
+         temp_time.total_seconds() / len(data["temp_events"]),
+         comp_time.total_seconds() / len(data["comp_events"]),
+-        time_2d.total_seconds() / (len(data["comp_events"]) + len(data["temp_events"])))
+-      print "Their times: %f/%f/%f" % (data["temp_time"].total_seconds(), data["comp_time"].total_seconds(), data["2d_time"].total_seconds())
+-      print "Their times per base: %f/%f/%f" % (
++        time_2d.total_seconds() / (len(data["comp_events"]) + len(data["temp_events"]))))
++      print("Their times: %f/%f/%f" % (data["temp_time"].total_seconds(), data["comp_time"].total_seconds(), data["2d_time"].total_seconds()))
++      print("Their times per base: %f/%f/%f" % (
+         data["temp_time"].total_seconds() / len(data["temp_events"]),
+         data["comp_time"].total_seconds() / len(data["comp_events"]),
+-        data["2d_time"].total_seconds() / (len(data["comp_events"]) + len(data["temp_events"])))
++        data["2d_time"].total_seconds() / (len(data["comp_events"]) + len(data["temp_events"]))))
+     except:
+       # Don't let timing throw us out
+       pass
+Index: deepnano/basecall_no_metrichor.py
+===================================================================
+--- deepnano.orig/basecall_no_metrichor.py
++++ deepnano/basecall_no_metrichor.py
+@@ -1,4 +1,5 @@
+ #!/usr/bin/python3
++from __future__ import print_function
+ import argparse
+ from rnn_fin import RnnPredictor
+ import h5py
+@@ -82,7 +83,7 @@ def load_read_data(read_file):
+   h5 = h5py.File(read_file, "r")
+   ret = {}
+ 
+-  read_key = h5["Analyses/EventDetection_000/Reads"].keys()[0]
++  read_key = list(h5["Analyses/EventDetection_000/Reads"].keys())[0]
+   base_events = h5["Analyses/EventDetection_000/Reads"][read_key]["Events"]
+   temp_comp_loc = template_complement_loc(base_events)
+   sampling_rate = h5["UniqueGlobalKey/channel_id"].attrs["sampling_rate"]
+@@ -137,8 +138,8 @@ def basecall(read_file_name, fo):
+   try:
+     data = load_read_data(read_file_name)
+   except Exception as e:
+-    print e
+-    print "error at file", read_file_name
++    print(e)
++    print("error at file", read_file_name)
+     return
+ 
+   if do_template or do_2d:
+@@ -156,22 +157,22 @@ def basecall(read_file_name, fo):
+   if do_2d and "comp_events2" in data and\
+      len(data["comp_events2"]) <= args.max_2d_length and\
+      len(data["temp_events2"]) <= args.max_2d_length:
+-    p = subprocess.Popen("/usr/lib/deepnano/align_2d", stdin=subprocess.PIPE, stdout=subprocess.PIPE)
++    p = subprocess.Popen("./align_2d", stdin=subprocess.PIPE, stdout=subprocess.PIPE)
+     f2d = p.stdin
+-    print >>f2d, len(o1)+len(o2)
++    print(len(o1)+len(o2), file=f2d)
+     for a, b in zip(o1, o2):
+-      print >>f2d, " ".join(map(str, a))
+-      print >>f2d, " ".join(map(str, b))
+-    print >>f2d, len(o1c)+len(o2c)
++      print(" ".join(map(str, a)), file=f2d)
++      print(" ".join(map(str, b)), file=f2d)
++    print(len(o1c)+len(o2c), file=f2d)
+     for a, b in zip(o1c, o2c):
+-      print >>f2d, " ".join(map(str, a))
+-      print >>f2d, " ".join(map(str, b))
++      print(" ".join(map(str, a)), file=f2d)
++      print(" ".join(map(str, b)), file=f2d)
+     f2do, f2de = p.communicate()
+     if p.returncode != 0:
+       return
+     lines = f2do.strip().split('\n')
+-    print >>fo, ">%s_2d_rnn_simple" % basename
+-    print >>fo, lines[0].strip()
++    print(">%s_2d_rnn_simple" % basename, file=fo)
++    print(lines[0].strip(), file=fo)
+     events_2d = []
+     for l in lines[1:]:
+       temp_ind, comp_ind = map(int, l.strip().split())
+@@ -218,17 +219,17 @@ assert do_template or do_complement or d
+ assert len(args.reads) != 0 or len(args.directory) != 0 or len(args.watch) != 0, "Nothing to basecall"
+ 
+ if do_template or do_2d:
+-  print "loading template net"
++  print("loading template net")
+   temp_net = RnnPredictor(args.template_net)
+-  print "done"
++  print("done")
+ if do_complement or do_2d:
+-  print "loading complement net"
++  print("loading complement net")
+   comp_net = RnnPredictor(args.complement_net)
+-  print "done"
++  print("done")
+ if do_2d:
+-  print "loading 2D net"
++  print("loading 2D net")
+   big_net = RnnPredictor(args.big_net)
+-  print "done"
++  print("done")
+ 
+ chars = "ACGT"
+ mapping = {"A": 0, "C": 1, "G": 2, "T": 3, "N": 4}
+@@ -250,14 +251,14 @@ if len(args.watch) != 0:
+     from watchdog.observers import Observer
+     from watchdog.events import PatternMatchingEventHandler
+   except:
+-    print "Please install watchdog to watch directories"
++    print("Please install watchdog to watch directories")
+     sys.exit()
+ 
+   class Fast5Handler(PatternMatchingEventHandler):
+     """Class for handling creation fo fast5-files"""
+     patterns = ["*.fast5"]
+     def on_created(self, event):
+-      print "Calling", event
++      print("Calling", event)
+       file_name = str(os.path.basename(event.src_path))
+       fasta_file_name = os.path.splitext(event.src_path)[0] + '.fasta'
+       with open(fasta_file_name, "w") as fo:
+Index: deepnano/basecall_no_metrichor_devel.py
+===================================================================
+--- deepnano.orig/basecall_no_metrichor_devel.py
++++ deepnano/basecall_no_metrichor_devel.py
+@@ -1,3 +1,4 @@
++from __future__ import print_function
+ import argparse
+ from rnn_fin import RnnPredictor
+ import h5py
+@@ -76,17 +77,17 @@ def load_read_data(read_file):
+   h5 = h5py.File(read_file, "r")
+   ret = {}
+ 
+-  read_key = h5["Analyses/EventDetection_000/Reads"].keys()[0]
++  read_key = list(h5["Analyses/EventDetection_000/Reads"].keys())[0]
+   base_events = h5["Analyses/EventDetection_000/Reads"][read_key]["Events"]
+   temp_comp_loc = template_complement_loc(base_events)
+   if not temp_comp_loc:
+     return None
+ 
+-#  print "temp_comp_loc", temp_comp_loc["temp"], temp_comp_loc["comp"]
+-#  print h5["Analyses/Basecall_2D_000/Summary/split_hairpin"].attrs["start_index_temp"],
+-#  print h5["Analyses/Basecall_2D_000/Summary/split_hairpin"].attrs["end_index_temp"],
+-#  print h5["Analyses/Basecall_2D_000/Summary/split_hairpin"].attrs["start_index_comp"],
+-#  print h5["Analyses/Basecall_2D_000/Summary/split_hairpin"].attrs["end_index_comp"]
++#  print("temp_comp_loc", temp_comp_loc["temp"], temp_comp_loc["comp"])
++#  print(h5["Analyses/Basecall_2D_000/Summary/split_hairpin"].attrs["start_index_temp"], end=' ')
++#  print(h5["Analyses/Basecall_2D_000/Summary/split_hairpin"].attrs["end_index_temp"], end=' ')
++#  print(h5["Analyses/Basecall_2D_000/Summary/split_hairpin"].attrs["start_index_comp"], end=' ')
++#  print(h5["Analyses/Basecall_2D_000/Summary/split_hairpin"].attrs["end_index_comp"])
+ 
+   sampling_rate = h5["UniqueGlobalKey/channel_id"].attrs["sampling_rate"]
+ 
+@@ -95,7 +96,7 @@ def load_read_data(read_file):
+     ret["called_complement"] = h5["Analyses/Basecall_2D_000/BaseCalled_complement/Fastq"][()].split('\n')[1]
+     ret["called_2d"] = h5["Analyses/Basecall_2D_000/BaseCalled_2D/Fastq"][()].split('\n')[1]
+   except Exception as e:
+-    print "wat", e 
++    print("wat", e)
+     return None
+   events = base_events[temp_comp_loc["temp"][0]:temp_comp_loc["temp"][1]]
+   tscale2, tscale_sd2, tshift2 = get_scaling_template(events)
+@@ -203,18 +204,18 @@ if "all" in types or "2d" in types:
+ assert do_template or do_complement or do_2d, "Nothing to do"
+ 
+ if do_template or do_2d:
+-  print "loading template net"
++  print("loading template net")
+   temp_net = RnnPredictor(args.template_net)
+-  print "done"
++  print("done")
+ if do_complement or do_2d:
+-  print "loading complement net"
++  print("loading complement net")
+   comp_net = RnnPredictor(args.complement_net)
+-  print "done"
++  print("done")
+ if do_2d:
+-  print "loading 2D net"
++  print("loading 2D net")
+   big_net = RnnPredictor(args.big_net)
+   big_net_orig = RnnPredictor("nets_data/map6-2d-big.npz")
+-  print "done"
++  print("done")
+ 
+ chars = "ACGT"
+ mapping = {"A": 0, "C": 1, "G": 2, "T": 3, "N": 4}
+@@ -227,24 +228,24 @@ for i, read in enumerate(args.reads):
+   if True:
+     data = load_read_data(read)
+ #  except Exception as e:
+-#    print e
+-#    print "error at file", read
++#    print(e)
++#    print("error at file", read)
+ #    continue
+   if not data:  
+     continue
+   if args.output_orig:
+-    print >>fo, ">%d_template" % i
+-    print >>fo, data["called_template"]
+-    print >>fo, ">%d_complement" % i
+-    print >>fo, data["called_complement"]
+-    print >>fo, ">%d_2d" % i
+-    print >>fo, data["called_2d"]
++    print(">%d_template" % i, file=fo)
++    print(data["called_template"], file=fo)
++    print(">%d_complement" % i, file=fo)
++    print(data["called_complement"], file=fo)
++    print(">%d_2d" % i, file=fo)
++    print(data["called_2d"], file=fo)
+ 
+   if do_template or do_2d:
+     o1, o2 = temp_net.predict(data["temp_events"]) 
+     o1m = (np.argmax(o1, 1))
+     o2m = (np.argmax(o2, 1))
+-    print >>fo, ">%d_temp_rnn" % i
++    print(">%d_temp_rnn" % i, file=fo)
+     for a, b in zip(o1m, o2m):
+       if a < 4:
+         fo.write(chars[a])
+@@ -255,7 +256,7 @@ for i, read in enumerate(args.reads):
+     o1m = (np.argmax(o1, 1))
+     o2m = (np.argmax(o2, 1))
+     if do_template:
+-      print >>fo, ">%d_temp_rnn2" % i
++      print(">%d_temp_rnn2" % i, file=fo)
+       for a, b in zip(o1m, o2m):
+         if a < 4:
+           fo.write(chars[a])
+@@ -267,7 +268,7 @@ for i, read in enumerate(args.reads):
+     o1c, o2c = comp_net.predict(data["comp_events"]) 
+     o1cm = (np.argmax(o1c, 1))
+     o2cm = (np.argmax(o2c, 1))
+-    print >>fo, ">%d_comp_rnn" % i
++    print(">%d_comp_rnn" % i, file=fo)
+     for a, b in zip(o1cm, o2cm):
+       if a < 4:
+         fo.write(chars[a])
+@@ -278,7 +279,7 @@ for i, read in enumerate(args.reads):
+     o1cm = (np.argmax(o1c, 1))
+     o2cm = (np.argmax(o2c, 1))
+     if do_complement:
+-      print >>fo, ">%d_comp_rnn2" % i
++      print(">%d_comp_rnn2" % i, file=fo)
+       for a, b in zip(o1cm, o2cm):
+         if a < 4:
+           fo.write(chars[a])
+@@ -288,20 +289,20 @@ for i, read in enumerate(args.reads):
+ 
+   if do_2d:
+     f2d = open("2d.in", "w")
+-    print >>f2d, len(o1)+len(o2)
++    print(len(o1)+len(o2), file=f2d)
+     for a, b in zip(o1, o2):
+-      print >>f2d, " ".join(map(str, a))
+-      print >>f2d, " ".join(map(str, b))
+-    print >>f2d, len(o1c)+len(o2c)
++      print(" ".join(map(str, a)), file=f2d)
++      print(" ".join(map(str, b)), file=f2d)
++    print(len(o1c)+len(o2c), file=f2d)
+     for a, b in zip(o1c, o2c):
+-      print >>f2d, " ".join(map(str, a))
+-      print >>f2d, " ".join(map(str, b))
++      print(" ".join(map(str, a)), file=f2d)
++      print(" ".join(map(str, b)), file=f2d)
+     f2d.close()
+-    os.system("/usr/lib/deepnano/align_2d <2d.in >2d.out")
++    os.system("./align_2d <2d.in >2d.out")
+     f2do = open("2d.out")
+-    call2d = f2do.next().strip()
+-    print >>fo, ">%d_2d_rnn_simple" % i
+-    print >>fo, call2d
++    call2d = next(f2do).strip()
++    print(">%d_2d_rnn_simple" % i, file=fo)
++    print(call2d, file=fo)
+ 
+     start_temp_ours = None
+     end_temp_ours = None
+@@ -330,7 +331,7 @@ for i, read in enumerate(args.reads):
+     o1c, o2c = big_net.predict(events_2d) 
+     o1cm = (np.argmax(o1c, 1))
+     o2cm = (np.argmax(o2c, 1))
+-    print >>fo, ">%d_2d_rnn2" % i
++    print(">%d_2d_rnn2" % i, file=fo)
+     for a, b in zip(o1cm, o2cm):
+       if a < 4:
+         fo.write(chars[a])
+@@ -340,7 +341,7 @@ for i, read in enumerate(args.reads):
+     o1c, o2c = big_net.predict(data["2d_events"]) 
+     o1cm = (np.argmax(o1c, 1))
+     o2cm = (np.argmax(o2c, 1))
+-    print >>fo, ">%d_2d_rnn" % i
++    print(">%d_2d_rnn" % i, file=fo)
+     for a, b in zip(o1cm, o2cm):
+       if a < 4:
+         fo.write(chars[a])
+@@ -362,10 +363,10 @@ for i, read in enumerate(args.reads):
+           end_comp_th = a[1]
+         start_comp_th = a[1]
+ 
+-    print "Ours:",
+-    print start_temp_ours, end_temp_ours, start_comp_ours, end_comp_ours,
+-    print 1. * len(events_2d) / (end_temp_ours - start_temp_ours + end_comp_ours - start_comp_ours) 
+-    print "Their:",
+-    print start_temp_th, end_temp_th, start_comp_th, end_comp_th,
+-    print 1. * len(data["al"]) / (end_temp_th - start_temp_th + end_comp_th - start_comp_th) 
+-    print
++    print("Ours:", end=' ')
++    print(start_temp_ours, end_temp_ours, start_comp_ours, end_comp_ours)
++    print(1. * len(events_2d) / (end_temp_ours - start_temp_ours + end_comp_ours - start_comp_ours) )
++    print("Their:", end=' ')
++    print(start_temp_th, end_temp_th, start_comp_th, end_comp_th)
++    print(1. * len(data["al"]) / (end_temp_th - start_temp_th + end_comp_th - start_comp_th) )
++    print()
+Index: deepnano/helpers.py
+===================================================================
+--- deepnano.orig/helpers.py
++++ deepnano/helpers.py
+@@ -1,3 +1,4 @@
++from __future__ import print_function
+ from rnn_fin import RnnPredictor
+ import h5py
+ import sys
+@@ -22,7 +23,7 @@ def predict_and_write(events, ntwk, fo,
+   if fo:
+     o1m = (np.argmax(o1, 1))
+     o2m = (np.argmax(o2, 1))
+-    print >>fo, ">%s" % read_name
++    print(">%s" % read_name, file=fo)
+     for a, b in zip(o1m, o2m):
+       if a < 4:
+         fo.write(chars[a])
+Index: deepnano/r9/basecall.py
+===================================================================
+--- deepnano.orig/r9/basecall.py
++++ deepnano/r9/basecall.py
+@@ -1,3 +1,4 @@
++from __future__ import print_function
+ from rnnf import Rnn
+ from qrnnf import Rnn as Rnnq
+ import h5py
+@@ -65,12 +66,12 @@ def basecall(filename, output_file):
+     h5 = h5py.File(filename, "r")
+     events = get_events(h5)
+     if events is None:
+-      print "No events in file %s" % filename
++      print("No events in file %s" % filename)
+       h5.close()
+       return 0
+ 
+     if len(events) < 300:
+-      print "Read %s too short, not basecalling" % filename
++      print("Read %s too short, not basecalling" % filename)
+       h5.close()
+       return 0
+ 
+@@ -106,14 +107,14 @@ def basecall(filename, output_file):
+ 
+     om = np.vstack((o1m,o2m)).reshape((-1,),order='F')
+     output = "".join(map(lambda x: alph[x], om)).replace("N", "")
+-    print >>output_file, ">%s_template_deepnano" % filename
+-    print >>output_file, output
++    print(">%s_template_deepnano" % filename, file=output_file)
++    print(output, file=output_file)
+     output_file.flush()
+ 
+     h5.close()
+     return len(events)
+   except Exception as e:
+-    print "Read %s failed with %s" % (filename, e)
++    print("Read %s failed with %s" % (filename, e))
+     return 0
+ 
+ alph = "ACGTN"
+@@ -156,7 +157,7 @@ if len(args.reads) or len(args.directory
+     if args.debug:
+       total_events += current_events
+       time_diff = (datetime.datetime.now() - start_time).total_seconds() + 0.000001
+-      print "Basecalled %d events in %f (%f ev/s)" % (total_events, time_diff, total_events / time_diff)
++      print("Basecalled %d events in %f (%f ev/s)" % (total_events, time_diff, total_events / time_diff))
+ 
+   fo.close()
+ 
+@@ -165,14 +166,14 @@ if len(args.watch) != 0:
+     from watchdog.observers import Observer
+     from watchdog.events import PatternMatchingEventHandler
+   except:
+-    print "Please install watchdog to watch directories"
++    print("Please install watchdog to watch directories")
+     sys.exit()
+ 
+   class Fast5Handler(PatternMatchingEventHandler):
+     """Class for handling creation fo fast5-files"""
+     patterns = ["*.fast5"]
+     def on_created(self, event):
+-      print "Calling", event
++      print("Calling", event)
+       file_name = str(os.path.basename(event.src_path))
+       fasta_file_name = os.path.splitext(event.src_path)[0] + '.fasta'
+       with open(fasta_file_name, "w") as fo:
+Index: deepnano/r9/extract_events.py
+===================================================================
+--- deepnano.orig/r9/extract_events.py
++++ deepnano/r9/extract_events.py
+@@ -18,7 +18,7 @@ defs = {
+ }
+ 
+ def get_raw(h5):
+-  rk = h5["Raw/Reads"].keys()[0]
++  rk = list(h5["Raw/Reads"].keys())[0]
+ 
+   raw = h5["Raw/Reads"][rk]["Signal"]
+   meta = h5["UniqueGlobalKey/channel_id"].attrs
+@@ -69,7 +69,7 @@ def get_tstat(s, s2, wl):
+    
+ 
+ def extract_events(h5, chem):
+-  print "ed"
++  print("ed")
+   raw, sl = get_raw(h5)
+ 
+   events = event_detect(raw, sl, **defs[chem]["ed_params"])
+Index: deepnano/r9/training/realign.py
+===================================================================
+--- deepnano.orig/r9/training/realign.py
++++ deepnano/r9/training/realign.py
+@@ -1,3 +1,4 @@
++from __future__ import print_function
+ from qrnn import BatchNet
+ import pickle
+ import sys
+@@ -53,17 +54,17 @@ def realign(s):
+ #         fo.write(alph[b])
+ #      fo.close()      
+   f = open(base_dir+"tmpb-%s.in" % s, "w")
+-  print >>f, refs[ps]
++  print(refs[ps], file=f)
+   for a, b in zip(o1[0], o2[0]):
+-    print >>f, " ".join(map(str, a))
+-    print >>f, " ".join(map(str, b))
++    print(" ".join(map(str, a)), file=f)
++    print(" ".join(map(str, b)), file=f)
+   f.close()
+ 
+-  print "s", s
++  print("s", s)
+   ret = subprocess.call("ulimit -v 32000000; ./realign <%stmpb-%s.in >%stmpb-%s.out" % (base_dir, s, base_dir, s), shell=True)
+ 
+   if ret != 47:
+-    print "fail", s
++    print("fail", s)
+     return
+ 
+   f = open(base_dir+"tmpb-%s.out" % s)
+@@ -72,9 +73,9 @@ def realign(s):
+     data_y2[ps][i] = mapping[l[1]]
+ 
+   fo = open(names[s] + "r", "w")
+-  print >>fo, refs[s]
++  print(refs[s], file=fo)
+   for x, y, y2 in zip(data_x[s], data_y[s], data_y2[s]):
+-    print >>fo, " ".join(map(str, x)), "%c%c" % (alph[y], alph[y2])
++    print(" ".join(map(str, x)), "%c%c" % (alph[y], alph[y2]), file=fo)
+   fo.close()
+ 
+ if __name__ == '__main__':
+@@ -95,14 +96,14 @@ if __name__ == '__main__':
+     f = open(fn)
+     ref = f.readline()
+     if len(ref) > 30000:
+-      print "out", len(ref)
++      print("out", len(ref))
+       continue
+     refs.append(ref.strip())
+     names.append(fn)
+     X = []
+     Y = []
+     Y2 = []
+-    print "\rfn", fn,
++    print("\rfn", fn)
+     sys.stdout.flush()
+     for l in f:
+       its = l.strip().split()
+Index: deepnano/r9/training/theano_toolkit/parameters.py
+===================================================================
+--- deepnano.orig/r9/training/theano_toolkit/parameters.py
++++ deepnano/r9/training/theano_toolkit/parameters.py
+@@ -1,6 +1,7 @@
++from six.moves import cPickle as pickle
++from six.moves import reduce
+ import theano
+ import numpy as np
+-import cPickle as pickle
+ from functools import reduce
+ import inspect
+ 
+@@ -24,7 +25,7 @@ class Parameters():
+                 name=name
+             )
+         else:
+-            print >> sys.stderr, "%s already assigned" % name
++            print("%s already assigned" % name, file=sys.stderr)
+ 
+             params[name].set_value(np.asarray(
+                 array,
+@@ -60,7 +61,7 @@ class Parameters():
+             if k in loaded:
+                 params[k].set_value(loaded[k])
+             else:
+-                print >> sys.stderr, "%s does not exist." % k
++                print("%s does not exist." % k, file=sys.stderr)
+ 
+     def __enter__(self):
+         _, _, _, env_locals = inspect.getargvalues(
+Index: deepnano/r9/training/theano_toolkit/updates.py
+===================================================================
+--- deepnano.orig/r9/training/theano_toolkit/updates.py
++++ deepnano/r9/training/theano_toolkit/updates.py
+@@ -1,4 +1,4 @@
+-from itertools import izip
++import six
+ import theano.tensor as T
+ import numpy as np
+ from parameters import Parameters
+@@ -15,7 +15,7 @@ def nan_shield(parameters, deltas, other
+     delta_sum = sum(T.sum(d) for d in deltas)
+     not_finite = T.isnan(delta_sum) | T.isinf(delta_sum)
+     parameter_updates = [(p, T.switch(not_finite, 0.9 * p, p - d))
+-                         for p, d in izip(parameters, deltas)]
++                         for p, d in six.moves.zip(parameters, deltas)]
+     other_updates = [(p, T.switch(not_finite, p, u))
+                      for p, u in other_updates]
+     return parameter_updates, other_updates
+@@ -51,25 +51,25 @@ def adadelta(parameters, gradients,
+     shapes = get_shapes(parameters)
+ 
+     acc_gradients_sq = [create_param(P, "grad_sq_" + p.name, np.zeros(s))
+-                        for p, s in izip(parameters, shapes)]
++                        for p, s in six.moves.zip(parameters, shapes)]
+     acc_deltas_sq = [create_param(P, "deltas_sq_" + p.name, np.zeros(s))
+-                     for p, s in izip(parameters, shapes)]
++                     for p, s in six.moves.zip(parameters, shapes)]
+ 
+     gradients_sq = [T.sqr(g) for g in gradients]
+     gradients_sq_new = [rho * acc_g_sq + (np.float32(1) - rho) * g_sq
+-                        for acc_g_sq, g_sq in izip(
++                        for acc_g_sq, g_sq in six.moves.zip(
+                             acc_gradients_sq, gradients_sq)]
+     learning_rate_sq = [(d_sq + eps) / (g_sq + eps)
+-                        for d_sq, g_sq in izip(
++                        for d_sq, g_sq in six.moves.zip(
+                             acc_deltas_sq, gradients_sq_new)]
+ 
+     deltas_sq = [lr_sq * g_sq for lr_sq,
+-                 g_sq in izip(learning_rate_sq, gradients_sq)]
++                 g_sq in six.moves.zip(learning_rate_sq, gradients_sq)]
+     deltas_sq_new = [rho * acc_d_sq + (np.float32(1.) - rho) *
+-                     d_sq for acc_d_sq, d_sq in izip(acc_deltas_sq, deltas_sq)]
++                     d_sq for acc_d_sq, d_sq in six.moves.zip(acc_deltas_sq, deltas_sq)]
+ 
+     deltas = [T.sqrt(lr_sq) * g for lr_sq,
+-              g in izip(learning_rate_sq, gradients)]
++              g in six.moves.zip(learning_rate_sq, gradients)]
+ 
+     gradient_sq_updates = zip(acc_gradients_sq, gradients_sq_new)
+     deltas_sq_updates = zip(acc_deltas_sq, deltas_sq_new)
+@@ -81,11 +81,11 @@ def adagrad(parameters, gradients, learn
+     shapes = get_shapes(parameters)
+ 
+     grad_sq = [create_param(P, "acc_sq_" + p.name, np.zeros(s))
+-               for p, s in izip(parameters, shapes)]
++               for p, s in six.moves.zip(parameters, shapes)]
+ 
+-    grad_sq_new = [g_sq + g**2 for g, g_sq in izip(gradients, grad_sq)]
++    grad_sq_new = [g_sq + g**2 for g, g_sq in six.moves.zip(gradients, grad_sq)]
+     deltas = [learning_rate * g / T.sqrt(g_sq + 1e-6)
+-              for g, g_sq in izip(gradients, grad_sq_new)]
++              for g, g_sq in six.moves.zip(gradients, grad_sq_new)]
+     grad_sq_update = zip(grad_sq, grad_sq_new)
+ 
+     return deltas, grad_sq_update
+@@ -99,7 +99,7 @@ def momentum(parameters, gradients, mu=0
+     mu = m * (1 - 3.0 / (P.t + 5)) + (1 - m) * mu
+     shapes = get_shapes(parameters)
+     deltas = [create_param(P, "deltas_" + p.name, np.zeros(s))
+-              for p, s in izip(parameters, shapes)]
++              for p, s in six.moves.zip(parameters, shapes)]
+     delta_nexts = [mu * delta + eps * grad for delta,
+                    grad in zip(deltas, gradients)]
+     delta_updates = [(delta, delta_next)
+@@ -116,24 +116,24 @@ def rmsprop(parameters, gradients,
+             P=None):
+     shapes = get_shapes(parameters)
+     sq_acc = [create_param(P, "sq_acc_" + p.name, np.zeros(s))
+-              for p, s in izip(parameters, shapes)]
++              for p, s in six.moves.zip(parameters, shapes)]
+     acc = [create_param(P, "acc_" + p.name, np.zeros(s))
+-           for p, s in izip(parameters, shapes)]
++           for p, s in six.moves.zip(parameters, shapes)]
+     delta_acc = [create_param(P, "delta_acc_" + p.name, np.zeros(s))
+-                 for p, s in izip(parameters, shapes)]
++                 for p, s in six.moves.zip(parameters, shapes)]
+ 
+     sq_avg = [discount * sq_a + (1 - discount) * (g**2)
+-              for sq_a, g in izip(sq_acc, gradients)]
++              for sq_a, g in six.moves.zip(sq_acc, gradients)]
+     avg = [discount * a + (1 - discount) * g for a,
+-           g in izip(acc, gradients)]
++           g in six.moves.zip(acc, gradients)]
+     scaled_grads = [g / T.sqrt(sq_a - a**2 + epsilon)
+-                    for g, a, sq_a in izip(gradients, acc, sq_acc)]
++                    for g, a, sq_a in six.moves.zip(gradients, acc, sq_acc)]
+     deltas = [momentum * d_a + learning_rate *
+-              s_g for d_a, s_g in izip(delta_acc, scaled_grads)]
++              s_g for d_a, s_g in six.moves.zip(delta_acc, scaled_grads)]
+ 
+-    sq_acc_updates = [(sq_a, sq_aa) for sq_a, sq_aa in izip(sq_acc, sq_avg)]
+-    acc_updates = [(a,    aa) for a,   aa in izip(acc, avg)]
+-    delta_updates = [(d_a, d) for d_a, d in izip(delta_acc, deltas)]
++    sq_acc_updates = [(sq_a, sq_aa) for sq_a, sq_aa in six.moves.zip(sq_acc, sq_avg)]
++    acc_updates = [(a,    aa) for a,   aa in six.moves.zip(acc, avg)]
++    delta_updates = [(d_a, d) for d_a, d in six.moves.zip(delta_acc, deltas)]
+ 
+     return deltas, acc_updates + sq_acc_updates + delta_updates
+ 
+@@ -149,15 +149,15 @@ def adam(parameters, gradients,
+     P.t = np.float32(1)
+ 
+     moment1_acc = [create_param(P, "moment1_" + p.name, np.zeros(s))
+-                   for p, s in izip(parameters, shapes)]
++                   for p, s in six.moves.zip(parameters, shapes)]
+ 
+     moment2_acc = [create_param(P, "moment2_" + p.name, np.zeros(s))
+-                   for p, s in izip(parameters, shapes)]
++                   for p, s in six.moves.zip(parameters, shapes)]
+ 
+     deltas = []
+     updates = []
+     updates.append((P.t, P.t + 1))
+-    for m1, m2, g in izip(moment1_acc, moment2_acc, gradients):
++    for m1, m2, g in six.moves.zip(moment1_acc, moment2_acc, gradients):
+         new_m1 = moment1_decay * m1 + (1 - moment1_decay) * g
+         new_m2 = moment2_decay * m2 + (1 - moment2_decay) * T.sqr(g)
+         bc_m1 = new_m1 / (1 - moment1_decay**P.t)
+Index: deepnano/r9/training/train.py
+===================================================================
+--- deepnano.orig/r9/training/train.py
++++ deepnano/r9/training/train.py
+@@ -1,3 +1,4 @@
++from __future__ import print_function
+ from qrnn import BatchNet
+ import pickle
+ import sys
+@@ -55,16 +56,16 @@ def realign(s):
+ #      fo.close()      
+   f = open(base_dir+"tmpb-%s.in" % s, "w")
+   lc = 0
+-  print >>f, refs[ps]
++  print(refs[ps], file=f)
+   for a, b in zip(o1[0], o2[0]):
+-    print >>f, " ".join(map(str, a))
+-    print >>f, " ".join(map(str, b))
++    print(" ".join(map(str, a)), file=f)
++    print(" ".join(map(str, b)), file=f)
+     lc += 1
+   f.close()
+ 
+-  print "s", s, datetime.datetime.now(), len(refs[ps]), lc, len(data_x[ps])
++  print("s", s, datetime.datetime.now(), len(refs[ps]), lc, len(data_x[ps]))
+   if os.system("./realign %stmpb-%s.in <%stmpb-%s.in >%stmpb-%s.out" % (base_dir, s, base_dir, s, base_dir, s)) != 0:
+-    print "watwat", s
++    print("watwat", s)
+     sys.exit()
+ 
+   f = open(base_dir+"tmpb-%s.out" % s)
+@@ -73,9 +74,9 @@ def realign(s):
+     data_y2[ps][i] = mapping[l[1]]
+ 
+   fo = open(names[s] + "r", "w")
+-  print >>fo, refs[s]
++  print(refs[s], file=fo)
+   for x, y, y2 in zip(data_x[s], data_y[s], data_y2[s]):
+-    print >>fo, " ".join(map(str, x)), "%c%c" % (alph[y], alph[y2])
++    print(" ".join(map(str, x)), "%c%c" % (alph[y], alph[y2]), file=fo)
+   fo.close()
+   return data_y[ps], data_y2[ps]
+ 
+@@ -98,7 +99,7 @@ if __name__ == '__main__':
+     f = open(fn)
+     ref = f.readline()
+     if len(ref) > 30000:
+-      print "out", len(ref)
++      print("out", len(ref))
+       continue
+     X = []
+     Y = []
+@@ -111,12 +112,12 @@ if __name__ == '__main__':
+         Y.append(mapping[its[4][0]])
+         Y2.append(mapping[its[4][1]])
+       except:
+-        print "wat", fn
++        print("wat", fn)
+         good = False
+         break
+     if good:
+       if len(X) > 2000:
+-        print "\rfn", fn, len(X), len(Y), len(Y2), len(ref), len(refs),
++        print("\rfn", fn, len(X), len(Y), len(Y2), len(ref), len(refs))
+         sys.stdout.flush()
+         refs.append(ref.strip())
+         names.append(fn)
+@@ -124,7 +125,7 @@ if __name__ == '__main__':
+         data_y.append(np.array(Y, dtype=np.int32))
+         data_y2.append(np.array(Y2, dtype=np.int32))
+ 
+-  print
++  print()
+   print ("done", sum(len(x) for x in refs), sum(len(x) for x in data_x))
+   sys.stdout.flush()
+ 
+@@ -153,7 +154,7 @@ if __name__ == '__main__':
+   base_dir += "/"
+   batch_size = 8
+   n_batches = len(data_x) / batch_size + 1
+-  print len(data_x), batch_size, n_batches, datetime.datetime.now()
++  print(len(data_x), batch_size, n_batches, datetime.datetime.now())
+ 
+   for epoch in range(1000):
+     taken_gc = []
+@@ -226,19 +227,19 @@ if __name__ == '__main__':
+       sys.stdout.write('\r%d %f %f %f' % (s, tc / (s+1), tcs / max(1, ccs), tcb / max(1, ccb)))
+       sys.stdout.flush()
+ 
+-    print
++    print()
+     conf1 = confusion_matrix(y1mm, o1mm)
+     conf2 = confusion_matrix(y2mm, o2mm)
+     good = conf1[0,0] + conf1[1,1] + conf1[2,2] + conf1[3,3]
+     good += conf2[0,0] + conf2[1,1] + conf2[2,2] + conf2[3,3]
+     bad = np.sum(conf1) + np.sum(conf2) - good - conf1[4,4] - conf2[4,4]
+ 
+-    print epoch, tc / n_batches, 1.*tc2 / total_size, 1.*tc3 / total_size, 1.*good / (good + bad), datetime.datetime.now()
++    print(epoch, tc / n_batches, 1.*tc2 / total_size, 1.*tc3 / total_size, 1.*good / (good + bad), datetime.datetime.now())
+     print_stats(o1mm)
+     print_stats(o2mm)
+-    print conf1
+-    print conf2
+-  #  print "out", np.min(out_gc), np.median(out_gc), np.max(out_gc), len(out_gc)
++    print(conf1)
++    print(conf2)
++  #  print("out", np.min(out_gc), np.median(out_gc), np.max(out_gc), len(out_gc))
+     sys.stdout.flush()
+ 
+     if epoch % 20 == 19:
+Index: deepnano/training/prepare_dataset.py
+===================================================================
+--- deepnano.orig/training/prepare_dataset.py
++++ deepnano/training/prepare_dataset.py
+@@ -1,3 +1,5 @@
++from __future__ import print_function
++
+ import argparse
+ import os
+ import h5py
+@@ -18,7 +20,7 @@ for i, l in enumerate(finput):
+   h5 = h5py.File(filename, "r")
+   
+   fo = open(os.path.join(args.output_directory, "%s.txt" % i), "w")
+-  print >>fo, ref
++  print(ref, file=fo)
+   base_loc = get_base_loc(h5)
+   if args.type == 'temp':
+     scale, scale_sd, shift, drift = extract_scaling(h5, "template", base_loc)
+@@ -29,14 +31,14 @@ for i, l in enumerate(finput):
+       mean = (e["mean"] - shift) / scale
+       stdv = e["stdv"] / scale_sd
+       length = e["length"]
+-      print >>fo, " ".join(map(str, preproc_event(mean, stdv, length))),
++      print(" ".join(map(str, preproc_event(mean, stdv, length))), file=fo)
+       move = e["move"]
+       if move == 0:
+-        print >>fo, "NN"
++        print("NN", file=fo)
+       if move == 1:
+-        print >>fo, "N%s" % e["model_state"][2]
++        print("N%s" % e["model_state"][2], file=fo)
+       if move == 2:
+-        print >>fo, "%s%s" % (e["model_state"][1], e["model_state"][2])
++        print("%s%s" % (e["model_state"][1], e["model_state"][2]), file=fo)
+   if args.type == 'comp':
+     scale, scale_sd, shift, drift = extract_scaling(h5, "complement", base_loc)
+     events = h5[base_loc+"/BaseCalled_%s/Events" % "complement"]
+@@ -46,14 +48,14 @@ for i, l in enumerate(finput):
+       mean = (e["mean"] - shift) / scale
+       stdv = e["stdv"] / scale_sd
+       length = e["length"]
+-      print >>fo, " ".join(map(str, preproc_event(mean, stdv, length))),
++      print(" ".join(map(str, preproc_event(mean, stdv, length))), file=fo)
+       move = e["move"]
+       if move == 0:
+-        print >>fo, "NN"
++        print("NN", file=fo)
+       if move == 1:
+-        print >>fo, "N%s" % e["model_state"][2]
++        print("N%s" % e["model_state"][2], file=fo)
+       if move == 2:
+-        print >>fo, "%s%s" % (e["model_state"][1], e["model_state"][2])
++        print("%s%s" % (e["model_state"][1], e["model_state"][2]), file=fo)
+   if args.type == '2d':
+     tscale, tscale_sd, tshift, tdrift = extract_scaling(h5, "template", base_loc)
+     cscale, cscale_sd, cshift, cdrift = extract_scaling(h5, "complement", base_loc)
+@@ -79,13 +81,13 @@ for i, l in enumerate(finput):
+         stdv = e["stdv"] / cscale_sd
+         length = e["length"]
+         ev += [1] + preproc_event(mean, stdv, length)
+-      print >>fo, " ".join(map(str, ev)),
++      print(" ".join(map(str, ev)), file=fo)
+       if prev == a[2]:
+-        print >>fo, "NN"
++        print("NN", file=fo)
+       elif not prev or a[2][:-1] == prev[1:]:
+-        print >>fo, "N%c" % a[2][2]
++        print("N%c" % a[2][2], file=fo)
+       else:
+-        print >>fo, "%c%c" % (a[2][1], a[2][2])
++        print("%c%c" % (a[2][1], a[2][2]), file=fo)
+ 
+ 
+   fo.close()
+Index: deepnano/training/train.py
+===================================================================
+--- deepnano.orig/training/train.py
++++ deepnano/training/train.py
+@@ -1,3 +1,4 @@
++from __future__ import print_function
+ from rnn import Rnn
+ import pickle
+ import sys
+@@ -25,15 +26,15 @@ def realign(s):
+   o2m = (np.argmax(o2, 1))
+   alph = "ACGTN"
+   f = open(base_dir+"tmpb-%s.in" % s, "w")
+-  print >>f, refs[ps]
++  print(refs[ps], file=f)
+   for a, b in zip(o1, o2):
+-    print >>f, " ".join(map(str, a))
+-    print >>f, " ".join(map(str, b))
++    print(" ".join(map(str, a)), file=f)
++    print(" ".join(map(str, b)), file=f)
+   f.close()
+ 
+-  print "s", s
+-  if os.system("/usr/lib/deepnano/realign <%stmpb-%s.in >%stmpb-%s.out" % (base_dir, s, base_dir, s)) != 0:
+-    print "watwat", s
++  print("s", s)
++  if os.system("./realign <%stmpb-%s.in >%stmpb-%s.out" % (base_dir, s, base_dir, s)) != 0:
++    print("watwat", s)
+     sys.exit()
+ 
+   f = open(base_dir+"tmpb-%s.out" % s)
+@@ -60,7 +61,7 @@ if __name__ == '__main__':
+     f = open(fn)
+     ref = f.readline()
+     if len(ref) > 30000:
+-      print "out", len(ref)
++      print("out", len(ref))
+       continue
+     refs.append(ref.strip())
+     names.append(fn)
+@@ -76,12 +77,12 @@ if __name__ == '__main__':
+     data_y.append(np.array(Y, dtype=np.int32))
+     data_y2.append(np.array(Y2, dtype=np.int32))
+ 
+-  print ("done", sum(len(x) for x in refs))
++  print("done", sum(len(x) for x in refs))
+   sys.stdout.flush()
+ 
+   ntwk = Rnn(sys.argv[1])
+ 
+-  print ("net rdy")
++  print("net rdy")
+ 
+   s_arr = []
+   p_arr = []
+@@ -101,7 +102,7 @@ if __name__ == '__main__':
+   base_dir += "/"
+   batch_size = 1
+   n_batches = len(data_x) / batch_size
+-  print len(data_x), batch_size, n_batches, datetime.datetime.now()
++  print(len(data_x), batch_size, n_batches, datetime.datetime.now())
+ 
+   for epoch in range(1000):
+     if (epoch % 20 == 0 and epoch > 0) or (epoch == 0):
+@@ -151,14 +152,14 @@ if __name__ == '__main__':
+       sys.stdout.write('\r%d' % s)
+       sys.stdout.flush()
+ 
+-    print
++    print()
+ 
+-    print epoch, tc / n_batches, 1.*tc2 / n_batches / batch_size, 1.*tc3 / n_batches / batch_size, datetime.datetime.now()
++    print(epoch, tc / n_batches, 1.*tc2 / n_batches / batch_size, 1.*tc3 / n_batches / batch_size, datetime.datetime.now())
+     print_stats(o1mm)
+     print_stats(o2mm)
+-    print confusion_matrix(y1mm, o1mm)
+-    print confusion_matrix(y2mm, o2mm)
+-  #  print "out", np.min(out_gc), np.median(out_gc), np.max(out_gc), len(out_gc)
++    print(confusion_matrix(y1mm, o1mm))
++    print(confusion_matrix(y2mm, o2mm))
++  #  print("out", np.min(out_gc), np.median(out_gc), np.max(out_gc), len(out_gc))
+     sys.stdout.flush()
+ 
+     if epoch % 20 == 2:


=====================================
debian/patches/series
=====================================
@@ -1,2 +1,3 @@
 she_bang.patch
 path_align_2d.patch
+py3-compat.patch


=====================================
debian/patches/she_bang.patch
=====================================
@@ -5,14 +5,14 @@ Description: Add missing #! line
 --- a/basecall.py
 +++ b/basecall.py
 @@ -1,3 +1,4 @@
-+#!/usr/bin/python
++#!/usr/bin/python3
  import argparse
  from rnn_fin import RnnPredictor
  import h5py
 --- a/basecall_no_metrichor.py
 +++ b/basecall_no_metrichor.py
 @@ -1,3 +1,4 @@
-+#!/usr/bin/python
++#!/usr/bin/python3
  import argparse
  from rnn_fin import RnnPredictor
  import h5py


=====================================
debian/rules
=====================================
@@ -5,11 +5,11 @@
 export DEB_BUILD_MAINT_OPTIONS = hardening=+all
 
 %:
-	dh $@  --with python2
+	dh $@  --with python3
 
 override_dh_auto_build:
 	dh_auto_build
-	markdown_py -f README.html README.md
+	python3 -m markdown -f README.html README.md
 	g++ $(CFLAGS) $(CPPFLAGS) $(LDFLAGS) -std=gnu++0x align_2d.cc -o align_2d
 	g++ $(CFLAGS) $(CPPFLAGS) $(LDFLAGS) -std=gnu++0x training/realign.cc -o realign
 



View it on GitLab: https://salsa.debian.org/med-team/deepnano/compare/4d2d4a12dd933c03c16e75bf8c76dc56415fe1e4...9473e9c3befe446250b5270aae1065f1cdc2bce8

-- 
View it on GitLab: https://salsa.debian.org/med-team/deepnano/compare/4d2d4a12dd933c03c16e75bf8c76dc56415fe1e4...9473e9c3befe446250b5270aae1065f1cdc2bce8
You're receiving this email because of your account on salsa.debian.org.


-------------- next part --------------
An HTML attachment was scrubbed...
URL: <http://alioth-lists.debian.net/pipermail/debian-med-commit/attachments/20191103/9074a8b7/attachment-0001.html>


More information about the debian-med-commit mailing list