text stringlengths 0 93.6k |
|---|
assert len(FLAGS.labels) == 2 |
labels_a, labels_b = FLAGS.labels[0], FLAGS.labels[1] |
label_a_paths = os.path.join(FLAGS.dataset, "sequences", FLAGS.sequence, labels_a) |
label_b_paths = os.path.join(FLAGS.dataset, "sequences", FLAGS.sequence, labels_b) |
if os.path.isdir(label_a_paths): |
print("Labels folder a exists! Using labels from %s" % label_a_paths) |
else: |
print("Labels folder a doesn't exist! Exiting...") |
quit() |
if os.path.isdir(label_b_paths): |
print("Labels folder b exists! Using labels from %s" % label_b_paths) |
else: |
print("Labels folder b doesn't exist! Exiting...") |
quit() |
# populate the pointclouds |
label_a_names = [os.path.join(dp, f) for dp, dn, fn in os.walk(os.path.expanduser(label_a_paths)) for f in fn] |
label_a_names.sort() |
label_b_names = [os.path.join(dp, f) for dp, dn, fn in os.walk(os.path.expanduser(label_b_paths)) for f in fn] |
label_b_names.sort() |
# check that there are same amount of labels and scans |
if not FLAGS.ignore_safety: |
assert len(label_a_names) == len(scan_names) |
assert len(label_b_names) == len(scan_names) |
# create scans |
color_dict = CFG["color_map"] |
if FLAGS.color_learning_map: |
learning_map_inv = CFG["learning_map_inv"] |
learning_map = CFG["learning_map"] |
color_dict = {key: color_dict[learning_map_inv[learning_map[key]]] for key, value in color_dict.items()} |
scan_b = SemLaserScan(color_dict, project=True) |
scan_a = SemLaserScan(color_dict, project=True) |
# create a visualizer |
images = not FLAGS.ignore_images |
vis = LaserScanComp(scans=(scan_a, scan_b), |
scan_names=scan_names, |
label_names=(label_a_names, label_b_names), |
offset=FLAGS.offset, images=images, instances=FLAGS.do_instances, link=FLAGS.link) |
# print instructions |
print("To navigate:") |
print("\tb: back (previous scan)") |
print("\tn: next (next scan)") |
print("\tq: quit (exit program)") |
# run the visualizer |
vis.run() |
# <FILESEP> |
import difflib |
import logging |
import sys |
def reconstruct_full_result(result, processed_text): |
words_full_postprocessed = [] |
words_full = result["result"] |
words = [wi["word"] for wi in words_full] |
words_postprocessed = processed_text.split() |
#print(f"seq matching: {words} --- {words_postprocessed}") |
s = difflib.SequenceMatcher(None, words, words_postprocessed) |
for tag, i1, i2, j1, j2 in s.get_opcodes(): |
if tag in ["delete"]: |
print("Warning: postprocessor should only replace or insert words (or word blocks), but [%s] detected" % tag, file=sys.stderr) |
words_full_postprocessed = words_full |
break |
else: |
if tag == "equal": |
words_full_postprocessed.extend(words_full[i1:i2]) |
elif tag == "insert": |
if len(words_full_postprocessed) > 0: |
words_full_postprocessed[-1]["word"] += " " + " ".join(words_postprocessed[j1:j2]) |
elif tag == "replace": |
new_word = {"word" : " ".join(words_postprocessed[j1:j2])} |
new_word["start"] = words_full[i1]["start"] |
for key in words_full[i2-1].keys(): |
if key not in ["word", "start", "phones"]: |
new_word[key] = words_full[i2-1][key] |
if "word_with_punctuation" in new_word: |
new_word["word_with_punctuation"] = new_word["word"] + new_word["punctuation"] |
new_word["unnormalized_words"] = words_full[i1:i2] |
if "confidence" in new_word: |
new_word["confidence"] = min([w["confidence"] for w in words_full[i1:i2]]) |
words_full_postprocessed.append(new_word) |
result["result"] = words_full_postprocessed |
result["text"] = " ".join(wi["word"] for wi in result["result"]) |
return result |
def test_reconstruct(): |
result = { |
"final" : False, |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.