~funderscore blog cgit wiki get in touch
aboutsummaryrefslogtreecommitdiff
blob: 422ab77665b4a7789fff20f3e98ac4f8f7406b7f (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
# SPDX-License-Identifier: AGPL-3.0-or-later
#
# Copyright (C) 2023 Leo Gavilieau <xmoo@vern.cc>
from pycmarkgfm import gfm_to_html as markdown
from api.base import api_call

def get_project_wiki_sitemap(instance, full_reponame):
    recv = api_call('https://%s/api/v4/projects/%s/wikis' \
		% (instance, full_reponame))

    if 'message' in recv:
        return "Could not retrieve pages: " + recv["message"]

    pages_html = "" # Convert JSON to HTML

    categories = {} # For storing links to pages inside categories
    individual = {} # For storing links to individual pages

    for subdict in recv:
        if "/" in subdict["slug"]:
            slashcount = subdict["slug"].count("/")
            slashlist = subdict["slug"].split("/")

            category = ""
            for i in range(slashcount):
                if i - 1 == slashcount:
                    continue
                category += slashlist[i]

            try:
                tmp = categories[category]
            except:
                tmp = ""

            tmp += '&#9;<a class="project_wiki_category_link" \
                href="/%s/%s/-/wikis/%s">%s</a> (%s)<br>\n' \
                % (instance, full_reponame.replace("%2F", "/"), \
                subdict["slug"], subdict["title"], subdict["format"])

            categories[category] = tmp
        else:
            if subdict["slug"] in categories:
                continue

            individual[subdict["slug"]] = '<a class="project_wiki_link" \
                href="/%s/%s/-/wikis/%s">%s</a> (%s)<br>\n' \
                % (instance, full_reponame.replace("%2F","/"), \
                subdict["slug"], subdict["title"], subdict["format"])


    # We parse individual and categories at the end
    # So we can get rid of redundant entries
    categories_html = ""
    if len(categories) > 0:
        for key,val in categories.items():
            categories_html += '<h3><a class="project_wiki_category" \
                href="/%s/%s/-/wikis/%s">%s</a></h3>\n%s' \
                % (instance, full_reponame.replace("%2F","/"), key, key, val)

    if len(individual) > 0:
        for key,val in individual.items():
            if key in categories:
                continue
            pages_html += val

    return pages_html + categories_html

def get_project_wiki_page(instance, full_reponame, wiki = ""):
	slug = '%s' % (wiki.replace('/', '%2F'))

	# Check if user is requesting a real wiki page
	# or the "Pages" wikipage, which is a sitemap with a special API
	recv = api_call('https://%s/api/v4/projects/%s/wikis/%s'\
		% (instance, full_reponame, slug))

	if 'message' in recv:
		# Error detected!
		return "Could not retrieve wiki: %s" % recv["message"]
	else:
		if 'content' in recv:
			# Now to decode the markdown...
			decode = markdown(recv["content"])
			# We want to replace all instance links with a link
			# to the frontend
			decode = decode.replace("https://" + instance, "/" + instance)
			decode = decode.replace("http://" + instance, "/" + instance)
			return decode
		else:
			return "Could not retrieve content..."