from lxml import etree
from lxml.etree import tostring
-tree = etree.parse('mekanikk-1999/meksme-utf8.xml')
+filemakerxml = 'meksme-utf8.xml'
+
+tree = etree.parse(filemakerxml)
root = tree.getroot()
#print(root)
cols = (
'topic', 'sme', 'desc-sme', 'desc-nb', 'nb', 'sv', 'fi', 'en', 'is',
- 'unknown',
)
+topicmap = {
+ 'nb' : {
+ 'fáddá': 'tema',
+ 'ávnnas': 'emne',
+ 'eanan': 'land',
+ 'biras': 'miljø',
+ 'huksen': 'bygg',
+ 'bohcci': 'rør',
+ 'data': 'data',
+ 'hydr': 'hydraulikk',
+ 'fys': 'fysikk',
+ 'sveis': 'sveising',
+ 'mihttu': 'måling',
+ 'elektro': 'elektro',
+ 'neavvu': 'verktøy',
+ 'mohtor': 'motor',
+ 'mašiidna': 'maskin',
+ 'fuolahas': 'bearbeiding',
+ }
+}
+
resultset = root.find("{http://www.filemaker.com/fmpxmlresult}RESULTSET")
words = []
index += 1
#print(d)
words.append(d)
+def langsort(lang, e):
+ if lang in e:
+ return e[lang]
+ else:
+ return e['sme']
-def make_glossary(lang):
- print(".. glossary::")
- print()
+def make_glossary_docbook(lang, langcodes):
+ import lxml.builder
+ E = lxml.builder.ElementMaker(
+ nsmap={
+# 'xi': "http://www.w3.org/2001/XInclude",
+ }
+ )
- def langsort(e):
+ def indexit(entry, wlist, lang=None):
+ for w in wlist.split(","):
+ if "" != w:
+ if lang and '[' not in w:
+ w += "[%s]" % lang
+ entry.append(E.indexterm(E.primary(w)))
+ glossary = E.glosslist()
+ for e in sorted(words, key=lambda x: langsort(lang, x)):
+ ldesc = 'desc-%s' % lang
+ if 'topic' in e and lang in topicmap:
+ e['topic'] = topicmap[lang][e['topic']]
if lang in e:
- return e[lang]
- else:
- return e['sme']
- for e in sorted(words, key=langsort):
- if lang in e and 'desc-%s' % lang in e:
- if 'topic' not in e:
- e['topic'] = 'n/a'
- #print(e)
- print(" %s [%s]\n %s" % (e[lang], e['topic'], e['desc-%s' % lang]))
- print()
- else:
- # ERROR / missing definition
- pass
+ entry = E.glossentry()
+ if 'topic' in e:
+ entry.append(E.glossterm('%s [%s]' % (e[lang], e['topic'])))
+ else:
+ entry.append(E.glossterm(e[lang]))
+ indexit(entry, e[lang])
+ lstr = ""
+ for l in langcodes:
+ if l != lang and l in e:
+ lstr += "%s (%s) " % (e[l], l)
+ # Add foreign words to index, split on comma
+ indexit(entry, e[l], l)
+ if "" != lstr:
+ entry.append(E.glossdef(E.para(lstr)))
+ if ldesc in e:
+ entry.append(E.glossdef(E.para(e[ldesc])))
+ glossary.append(entry)
+
+ if False: # failed to set docbook glossary like xmlto and lint want it...
+ glossary =\
+ E.glossary(E.title("x"),
+ E.glossdiv(E.title("y"),
+ glossary))
-print("Nordsamisk")
-print("==========")
-print()
-make_glossary(lang='sme')
+ content = lxml.etree.tostring(glossary,
+ pretty_print=True,
+ xml_declaration=True,
+ encoding='UTF-8')
+# print(content)
+ with open('glossary.xml', 'wb') as f:
+ f.write(content)
-print("Norsk")
-print("=====")
-print()
-make_glossary(lang='nb')
+focus = 'nb'
+#focus = 'sme'
+#focus = 'sv'
+#focus = 'en'
-#print("Engelsk")
-#print("=====")
-#print()
-#make_glossary(lang='en')
+if 'nb' == focus:
+ print("Norsk/bokmål")
+ print()
+ make_glossary_docbook(lang='nb', langcodes=('en', 'sme', 'sv', 'da', 'fi', 'is',))
+elif 'sme' == focus:
+ print("Nordsamisk")
+ print()
+ make_glossary_docbook(lang='sme', langcodes=('nb', 'en', 'sv', 'da', 'fi', 'is',))
+elif 'en' == focus:
+ print("Engelsk")
+ print()
+ make_glossary_docbook(lang='en', langcodes=('en', 'nb', 'sme', 'sv', 'da', 'fi', 'is',))