This repository has been archived on 2025-03-01. You can view files and clone it, but cannot push or open issues or pull requests.
trantor/reader.go

165 lines
3.5 KiB
Go
Raw Normal View History

2012-08-21 18:15:21 +02:00
package main
import (
"git.gitorious.org/go-pkg/epub.git"
2012-08-22 10:33:57 +02:00
"html/template"
2012-08-21 18:15:21 +02:00
"labix.org/v2/mgo"
"labix.org/v2/mgo/bson"
"net/http"
"regexp"
"strings"
2012-08-28 10:38:00 +02:00
"strconv"
2012-08-21 18:15:21 +02:00
)
2012-08-28 10:38:00 +02:00
type chapter struct {
Label string
Link string
Depth int
Active bool
2012-08-28 15:16:22 +02:00
In []bool // one level in depth
Out []bool // one level out depth
2012-08-28 10:38:00 +02:00
}
2012-08-21 18:15:21 +02:00
type readData struct {
S Status
Book Book
Txt template.HTML
2012-08-28 10:38:00 +02:00
Chapters []chapter
2012-08-22 10:33:57 +02:00
Next string
Prev string
2012-08-21 20:54:57 +02:00
Back string
2012-08-21 18:15:21 +02:00
}
2012-08-21 20:54:57 +02:00
func parseUrl(url string) (string, string, string, string) {
exp, _ := regexp.Compile("^(\\/read[^\\/]*\\/)([^\\/]*)\\/?(.*\\.([^\\.]*))?$")
2012-08-21 18:15:21 +02:00
res := exp.FindStringSubmatch(url)
2012-08-21 20:54:57 +02:00
base := res[1]
2012-08-21 21:22:56 +02:00
id := res[2]
2012-08-21 18:15:21 +02:00
file := ""
ext := ""
2012-08-21 20:54:57 +02:00
if len(res) == 5 {
file = res[3]
ext = res[4]
2012-08-21 18:15:21 +02:00
}
2012-08-21 21:22:56 +02:00
return base, id, file, ext
2012-08-21 18:15:21 +02:00
}
func cleanHtml(html string) string {
str := strings.Split(html, "<body")
if len(str) < 2 {
return html
}
str = strings.SplitN(str[1], ">", 2)
if len(str) < 2 {
return str[0]
}
return "<div " + str[0] + ">" + strings.Split(str[1], "</body>")[0] + "</div>"
}
2012-08-28 10:38:00 +02:00
func genLink(id string, base string, link string) string {
return base + id + "/" + link
}
func cleanLink(link string) string {
for i := 0; i < len(link); i++ {
if link[i] == '%' {
c, _ := strconv.ParseInt(link[i+1:i+3], 16, 0)
link = link[:i] + string(c) + link[i+3:]
}
}
return link
}
/* return next and prev urls from document and the list of chapters */
func chapterList(e *epub.Epub, file string, id string, base string) (string, string, []chapter) {
2012-08-28 12:24:11 +02:00
var chapters []chapter
prev := ""
next := ""
2012-08-28 10:38:00 +02:00
tit := e.Titerator(epub.TITERATOR_NAVMAP)
defer tit.Close()
activeIndx := -1
2012-08-28 11:50:08 +02:00
depth := 0
2012-08-28 10:38:00 +02:00
for ; tit.Valid(); tit.Next() {
var c chapter
c.Label = tit.Label()
c.Link = genLink(id, base, tit.Link())
if cleanLink(tit.Link()) == file {
c.Active = true
activeIndx = len(chapters)
}
2012-08-28 11:50:08 +02:00
c.Depth = tit.Depth()
2012-08-28 12:24:11 +02:00
for c.Depth > depth {
c.In = append(c.In, true)
depth++
2012-08-28 11:50:08 +02:00
}
2012-08-28 12:24:11 +02:00
for c.Depth < depth {
c.Out = append(c.Out, true)
depth--
2012-08-28 11:50:08 +02:00
}
2012-08-28 10:38:00 +02:00
chapters = append(chapters, c)
}
/* if is the same chapter check the previous */
i := activeIndx-1
for i > 0 && strings.Contains(chapters[i].Link, "#") {
i--
}
if i > 0 {
prev = chapters[i].Link
}
i = activeIndx+1
for i < len(chapters) && strings.Contains(chapters[i].Link, "#") {
i++
}
if i < len(chapters) {
next = chapters[i].Link
}
return next, prev, chapters
2012-08-28 10:38:00 +02:00
}
2012-08-21 18:15:21 +02:00
func readHandler(coll *mgo.Collection) func(http.ResponseWriter, *http.Request) {
return func(w http.ResponseWriter, r *http.Request) {
2012-08-21 21:22:56 +02:00
base, id, file, ext := parseUrl(r.URL.Path)
2012-08-21 21:27:19 +02:00
if base == "/readnew/" {
sess := GetSession(r)
if sess.User == "" {
http.NotFound(w, r)
return
}
}
2012-08-21 21:22:56 +02:00
books, _, err := GetBook(coll, bson.M{"_id": bson.ObjectIdHex(id)})
2012-08-21 18:15:21 +02:00
if err != nil || len(books) == 0 {
http.NotFound(w, r)
return
}
book := books[0]
e, _ := epub.Open(book.Path, 0)
defer e.Close()
if file == "" {
it := e.Iterator(epub.EITERATOR_LINEAR)
defer it.Close()
2012-08-22 10:33:57 +02:00
http.Redirect(w, r, base+id+"/"+it.CurrUrl(), 307)
2012-08-21 18:15:21 +02:00
return
}
2012-08-21 18:30:44 +02:00
if ext == "html" || ext == "htm" || ext == "xhtml" || ext == "xml" {
2012-08-21 18:15:21 +02:00
var data readData
data.S = GetStatus(w, r)
data.Book = book
data.Next, data.Prev, data.Chapters = chapterList(e, file, id, base)
2012-08-21 20:54:57 +02:00
if base == "/readnew/" {
data.Back = "/new/"
} else {
2012-08-21 21:22:56 +02:00
data.Back = "/book/" + id
2012-08-21 20:54:57 +02:00
}
2012-08-21 18:15:21 +02:00
page := string(e.Data(file))
data.Txt = template.HTML(cleanHtml(page))
loadTemplate(w, "read", data)
} else {
w.Write(e.Data(file))
}
}
}