from lxml import etree
from lxml.etree import tostring
+import json
+
list_topic = False
filemakerxml = 'meksme-utf8.xml'
index += 1
#print(d)
words.append(d)
+
+ with open('meksme-utf8.json', 'w') as f:
+ json.dump(words, f)
+
def langsort(lang, e):
if lang in e:
return locale.strxfrm(e[lang])
}
)
+ def word2id(word):
+ return word \
+ .replace('[', '_') \
+ .replace(']', '_') \
+ .replace('(', '_') \
+ .replace(')', '_') \
+ .replace('/', '_') \
+ .replace('\'', '_') \
+ .replace(' ', '_')
+
def indexit(entry, wlist, lang=None):
for w in wlist.split(","):
if "" != w:
w += "[%s]" % lang
entry.append(E.indexterm(E.primary(w)))
ids = {}
+ redirects = {}
glossary = E.glossary()
for e in sorted(words, key=lambda x: langsort(lang, x)):
ldesc = 'desc-%s' % lang
e['topic'] = topicmap[lang][e['topic']]
if lang in e:
w = e[lang].split(',')
- id = w[0] \
- .replace('[', '_') \
- .replace(']', '_') \
- .replace('(', '_') \
- .replace(')', '_') \
- .replace('/', '_') \
- .replace('\'', '_') \
- .replace(' ', '_')
+ id = word2id(w[0])
while id in ids:
id = id + 'x'
ids[id] = True
- if ldesc not in e:
+
+ # First handle redirections with not extra info
+ if -1 != e[lang].find('>') and ldesc not in e:
+ p = e[lang].split(' > ')
+ if p[0] in redirects: # Skip if already added
+ continue
+ if -1 == p[1].find(','):
+ if '-' == p[1][-1]:
+ print("warning: Skipping dangling reference %s -> %s" %
+ (p[0], p[1]))
+ else:
+ seeentry = E.glossentry()
+ seeentry.append(E.glossterm(p[0]))
+ id = word2id(p[1])
+ seeentry.append(E.glosssee(otherterm=id))
+ glossary.append(seeentry)
+ redirects[p[0]] = id
+ else:
+ print("warning: skipping split refererence %s -> %s" %
+ (p[0], p[1]))
+ if False: # Not allowed in docbook
+ seeentry = E.glossentry()
+ seeentry.append(E.glossterm(p[0]))
+ for s in p[1].split(','):
+ s = s.strip().lstrip()
+ seeentry.append(E.glosssee(otherterm=word2id(s)))
+ glossary.append(seeentry)
+ continue
+
+ # Add See also entries pointing to main entry
+ if 1 < len(w):
+ for t in w[1:]:
+ t = t.strip().lstrip()
+ if t not in redirects:
+ #print("info: Adding see also entry for %s" % t)
+ seeentry = E.glossentry()
+ seeentry.append(E.glossterm(t))
+ seeentry.append(E.glosssee(otherterm=id))
+ glossary.append(seeentry)
+ redirects[t] = id
+ elif ldesc not in e:
print("warning: term %s missing primary language %s description" % (e[lang], lang))
entry = E.glossentry(id=id)
if list_topic and 'topic' in e:
e[codestr]))))
glossary.append(entry)
- # Add See also entries pointing to main entry
- if 1 < len(w):
- for t in w[1:]:
- t = t.strip().lstrip()
- #print("info: Adding see also entry for %s" % t)
- seeentry = E.glossentry()
- seeentry.append(E.glossterm(t))
- seeentry.append(E.glosssee(otherterm=id))
- glossary.append(seeentry)
-
def glosstermlocale(x):
# Look up glossterm (FIXME figure out more robust way)
t = x.getchildren()[0].text