-
Notifications
You must be signed in to change notification settings - Fork 3
/
Copy pathrss_feed.go
125 lines (100 loc) · 3.05 KB
/
rss_feed.go
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
package andrew
import (
"bytes"
"fmt"
"io/fs"
"net/http"
"path"
"strings"
"time"
)
func (a Server) ServeRssFeed(w http.ResponseWriter, r *http.Request) {
rss := GenerateRssFeed(a.SiteFiles, a.BaseUrl, a.RssTitle, a.RssDescription)
w.WriteHeader(http.StatusOK)
_, err := fmt.Fprint(w, string(rss))
if err != nil {
panic(err)
}
}
// The RSS format's pretty simple.
// First we add a constant header identifying the vesion of the RSS feed.
// Then we add the "channel" information. A "channel" is this RSS document.
// Inside the "channel", we add all of the "items".
// For Andrew, an "item" is synonymous with a page that is not an index.html page.
// Finally, we close the channel.
// It's sort of an anachronistic site to visit, but https://www.rssboard.org/rss-specification is the reference for
// what I'm including in these items and the channel.
func GenerateRssFeed(f fs.FS, baseUrl string, rssChannelTitle string, rssChannelDescription string) []byte {
buff := new(bytes.Buffer)
rssUrl := baseUrl + "/rss.xml"
const (
header = `<?xml version="1.0"?>
<rss version="2.0" xmlns:atom="http://www.w3.org/2005/Atom">
<channel>
`
footer = `</channel>
</rss>
`
)
pages, err := getPages(f)
if err != nil {
panic(err)
}
fmt.Fprint(buff, header)
fmt.Fprintf(buff, "\t<title>%s</title>\n"+
"\t<link>%s</link>\n"+
"\t<description>%s</description>\n"+
"\t<generator>Andrew</generator>\n", rssChannelTitle, baseUrl, rssChannelDescription)
for _, page := range pages {
fmt.Fprintf(buff, "\t<item>\n"+
"\t\t<title>%s</title>\n"+
"\t\t<link>%s</link>\n"+
"\t\t<pubDate>%s</pubDate>\n"+
"\t\t<source url=\"%s\">%s</source>\n"+
"\t</item>\n", page.Title, baseUrl+"/"+page.UrlPath, page.PublishTime.Format(time.RFC1123Z), rssUrl, rssChannelTitle)
}
fmt.Fprint(buff, footer)
return buff.Bytes()
}
func getPages(siteFiles fs.FS) ([]Page, error) {
pages := []Page{}
localContentRoot := path.Dir(".")
err := fs.WalkDir(siteFiles, localContentRoot, func(path string, d fs.DirEntry, err error) error {
if err != nil {
return err
}
// We don't list index files in our collection of siblings and children, because I don't
// want a link back to a page that contains only links.
if strings.Contains(path, "index.html") {
return nil
}
// If the file we're considering isn't an html file, let's move on with our day.
if !strings.Contains(path, "html") {
return nil
}
pageContent, err := fs.ReadFile(siteFiles, path)
if err != nil {
return err
}
title, err := getTitle(path, pageContent)
if err != nil {
return err
}
publishTime, err := getPublishTime(siteFiles, path, pageContent)
if err != nil {
return err
}
// links require a URL relative to the page we're discovering siblings from, not from
// the root of the file system
s_page := Page{
Title: title,
UrlPath: strings.TrimPrefix(path, localContentRoot+"/"),
Content: string(pageContent),
PublishTime: publishTime,
}
pages = append(pages, s_page)
return nil
})
pages = SortPagesByDate(pages)
return pages, err
}