started cleaning up for v0.7.0

I've decided instead to release a minimum viable product for v0.7.0 and
get some feedback from the community, and most importantly some
motivation as well to be able to keep working on magnetico as it
currently feels like a Sisyphean where the development seem to never
going to end...
This commit is contained in:
Bora Alper 2017-11-02 23:15:13 +00:00
parent 828e4691da
commit ae691ada79
76 changed files with 1568 additions and 3802 deletions

219
.gitignore vendored
View File

@ -1,11 +1,26 @@
src/magneticod/vendor
src/magneticod/Gopkg.lock
src/magneticow/vendor
src/magneticow/Gopkg.lock
src/persistence/vendor
src/persistence/Gopkg.lock
vendor/
.idea/
Gopkg.lock
magneticow/bindata.go
# Created by https://www.gitignore.io/api/linux,python,pycharm
# Created by https://www.gitignore.io/api/go,linux,macos,windows
### Go ###
# Binaries for programs and plugins
*.exe
*.dll
*.so
*.dylib
# Test binary, build with `go test -c`
*.test
# Output of the go coverage tool, specifically when used with LiteIDE
*.out
# Project-local glide cache, RE: https://github.com/Masterminds/glide/issues/736
.glide/
### Linux ###
*~
@ -22,164 +37,52 @@ src/persistence/Gopkg.lock
# .nfs files are created when an open file is removed but is still being accessed
.nfs*
### PyCharm ###
# Covers JetBrains IDEs: IntelliJ, RubyMine, PhpStorm, AppCode, PyCharm, CLion, Android Studio and Webstorm
# Reference: https://intellij-support.jetbrains.com/hc/en-us/articles/206544839
### macOS ###
*.DS_Store
.AppleDouble
.LSOverride
.idea/
# Icon must end with two \r
Icon
# User-specific stuff:
.idea/**/workspace.xml
.idea/**/tasks.xml
.idea/dictionaries
# Thumbnails
._*
# Sensitive or high-churn files:
.idea/**/dataSources/
.idea/**/dataSources.ids
.idea/**/dataSources.xml
.idea/**/dataSources.local.xml
.idea/**/sqlDataSources.xml
.idea/**/dynamic.xml
.idea/**/uiDesigner.xml
# Files that might appear in the root of a volume
.DocumentRevisions-V100
.fseventsd
.Spotlight-V100
.TemporaryItems
.Trashes
.VolumeIcon.icns
.com.apple.timemachine.donotpresent
# Gradle:
.idea/**/gradle.xml
.idea/**/libraries
# Directories potentially created on remote AFP share
.AppleDB
.AppleDesktop
Network Trash Folder
Temporary Items
.apdisk
# Mongo Explorer plugin:
.idea/**/mongoSettings.xml
### Windows ###
# Windows thumbnail cache files
Thumbs.db
ehthumbs.db
ehthumbs_vista.db
## File-based project format:
*.iws
# Folder config file
Desktop.ini
## Plugin-specific files:
# Recycle Bin used on file shares
$RECYCLE.BIN/
# IntelliJ
/out/
# Windows Installer files
*.cab
*.msi
*.msm
*.msp
# mpeltonen/sbt-idea plugin
.idea_modules/
# Windows shortcuts
*.lnk
# JIRA plugin
atlassian-ide-plugin.xml
# Cursive Clojure plugin
.idea/replstate.xml
# Crashlytics plugin (for Android Studio and IntelliJ)
com_crashlytics_export_strings.xml
crashlytics.properties
crashlytics-build.properties
fabric.properties
### PyCharm Patch ###
# Comment Reason: https://github.com/joeblau/gitignore.io/issues/186#issuecomment-215987721
# *.iml
# modules.xml
# .idea/misc.xml
# *.ipr
# Sonarlint plugin
.idea/sonarlint
### Python ###
# Byte-compiled / optimized / DLL files
__pycache__/
*.py[cod]
*$py.class
# C extensions
*.so
# Distribution / packaging
.Python
env/
build/
develop-eggs/
dist/
downloads/
eggs/
.eggs/
lib/
lib64/
parts/
sdist/
var/
wheels/
*.egg-info/
.installed.cfg
*.egg
# PyInstaller
# Usually these files are written by a python script from a template
# before PyInstaller builds the exe, so as to inject date/other infos into it.
*.manifest
*.spec
# Installer logs
pip-log.txt
pip-delete-this-directory.txt
# Unit test / coverage reports
htmlcov/
.tox/
.coverage
.coverage.*
.cache
nosetests.xml
coverage.xml
*,cover
.hypothesis/
# Translations
*.mo
*.pot
# Django stuff:
*.log
local_settings.py
# Flask stuff:
instance/
.webassets-cache
# Scrapy stuff:
.scrapy
# Sphinx documentation
docs/_build/
# PyBuilder
target/
# Jupyter Notebook
.ipynb_checkpoints
# pyenv
.python-version
# celery beat schedule file
celerybeat-schedule
# SageMath parsed files
*.sage.py
# dotenv
.env
# virtualenv
.venv
venv/
ENV/
# Spyder project settings
.spyderproject
# Rope project settings
.ropeproject
# End of https://www.gitignore.io/api/linux,python,pycharm
docker-compose.override.yml
.mypy_cache
# End of https://www.gitignore.io/api/go,linux,macos,windows

View File

@ -21,6 +21,14 @@
# version = "2.4.0"
[[constraint]]
branch = "master"
name = "github.com/Wessie/appdirs"
[[constraint]]
branch = "master"
name = "github.com/anacrolix/dht"
[[constraint]]
branch = "master"
name = "github.com/anacrolix/missinggo"
@ -31,7 +39,11 @@
[[constraint]]
branch = "master"
name = "github.com/bradfitz/iter"
name = "github.com/dustin/go-humanize"
[[constraint]]
name = "github.com/gorilla/mux"
version = "1.5.0"
[[constraint]]
name = "github.com/jessevdk/go-flags"
@ -39,8 +51,12 @@
[[constraint]]
name = "github.com/mattn/go-sqlite3"
version = "1.2.0"
version = "1.3.0"
[[constraint]]
name = "github.com/willf/bloom"
version = "2.0.3"
[[constraint]]
name = "go.uber.org/zap"
version = "1.5.0"
version = "1.7.1"

9
Makefile Normal file
View File

@ -0,0 +1,9 @@
all: magneticod magneticow
magneticod:
go install magnetico/magneticod
magneticow:
# TODO: minify files!
go-bindata -o="magneticow/bindata.go" -prefix="magneticow/data/" magneticow/data/...
go install magnetico/magneticow

4
bin/.gitignore vendored
View File

@ -1,4 +0,0 @@
# Ignore everything in this directory
*
# Except this file
!.gitignore

View File

@ -0,0 +1,463 @@
package bittorrent
import (
"bytes"
"crypto/sha1"
"encoding/binary"
"errors"
"fmt"
"io"
"math"
"net"
"time"
"github.com/anacrolix/torrent/bencode"
"github.com/anacrolix/torrent/metainfo"
"go.uber.org/zap"
"magnetico/persistence"
)
const MAX_METADATA_SIZE = 10 * 1024 * 1024
type rootDict struct {
M mDict `bencode:"m"`
MetadataSize int `bencode:"metadata_size"`
}
type mDict struct {
UTMetadata int `bencode:"ut_metadata"`
}
type extDict struct {
MsgType int `bencode:"msg_type"`
Piece int `bencode:"piece"`
}
func (ms *MetadataSink) awaitMetadata(infoHash metainfo.Hash, peer Peer) {
conn, err := net.DialTCP("tcp", nil, peer.Addr)
if err != nil {
zap.L().Debug(
"awaitMetadata couldn't connect to the peer!",
zap.ByteString("infoHash", infoHash[:]),
zap.String("remotePeerAddr", peer.Addr.String()),
zap.Error(err),
)
return
}
defer conn.Close()
err = conn.SetNoDelay(true)
if err != nil {
zap.L().Panic(
"Couldn't set NODELAY!",
zap.ByteString("infoHash", infoHash[:]),
zap.String("remotePeerAddr", peer.Addr.String()),
zap.Error(err),
)
return
}
err = conn.SetDeadline(time.Now().Add(ms.deadline))
if err != nil {
zap.L().Panic(
"Couldn't set the deadline!",
zap.ByteString("infoHash", infoHash[:]),
zap.String("remotePeerAddr", peer.Addr.String()),
zap.Error(err),
)
return
}
// State Variables
var isExtHandshakeDone, done bool
var ut_metadata, metadataReceived, metadataSize int
var metadata []byte
lHandshake := []byte(fmt.Sprintf(
"\x13BitTorrent protocol\x00\x00\x00\x00\x00\x10\x00\x01%s%s",
infoHash[:],
ms.clientID,
))
if len(lHandshake) != 68 {
zap.L().Panic(
"Generated BitTorrent handshake is not of length 68!",
zap.ByteString("infoHash", infoHash[:]),
zap.Int("len_lHandshake", len(lHandshake)),
)
}
err = writeAll(conn, lHandshake)
if err != nil {
zap.L().Debug(
"Couldn't write BitTorrent handshake!",
zap.ByteString("infoHash", infoHash[:]),
zap.String("remotePeerAddr", peer.Addr.String()),
zap.Error(err),
)
return
}
zap.L().Debug("BitTorrent handshake sent, waiting for the remote's...")
rHandshake, err := readExactly(conn, 68)
if err != nil {
zap.L().Debug(
"Couldn't read remote BitTorrent handshake!",
zap.ByteString("infoHash", infoHash[:]),
zap.String("remotePeerAddr", peer.Addr.String()),
zap.Error(err),
)
return
}
if !bytes.HasPrefix(rHandshake, []byte("\x13BitTorrent protocol")) {
zap.L().Debug(
"Remote BitTorrent handshake is not what it is supposed to be!",
zap.ByteString("infoHash", infoHash[:]),
zap.String("remotePeerAddr", peer.Addr.String()),
zap.ByteString("rHandshake[:20]", rHandshake[:20]),
)
return
}
// __on_bt_handshake
// ================
if rHandshake[25] != 16 { // TODO (later): do *not* compare the whole byte, check the bit instead! (0x10)
zap.L().Debug(
"Peer does not support the extension protocol!",
zap.ByteString("infoHash", infoHash[:]),
zap.String("remotePeerAddr", peer.Addr.String()),
)
return
}
writeAll(conn, []byte("\x00\x00\x00\x1a\x14\x00d1:md11:ut_metadatai1eee"))
zap.L().Debug(
"Extension handshake sent, waiting for the remote's...",
zap.ByteString("infoHash", infoHash[:]),
zap.String("remotePeerAddr", peer.Addr.String()),
)
// the loop!
// =========
for !done {
rLengthB, err := readExactly(conn, 4)
if err != nil {
zap.L().Debug(
"Couldn't read the first 4 bytes from the remote peer in the loop!",
zap.ByteString("infoHash", infoHash[:]),
zap.String("remotePeerAddr", peer.Addr.String()),
zap.Error(err),
)
return
}
rLength := bigEndianToInt(rLengthB)
rMessage, err := readExactly(conn, rLength)
if err != nil {
zap.L().Debug(
"Couldn't read the rest of the message from the remote peer in the loop!",
zap.ByteString("infoHash", infoHash[:]),
zap.String("remotePeerAddr", peer.Addr.String()),
zap.Error(err),
)
return
}
// __on_message
// ------------
if rMessage[0] != 0x14 { // We are interested only in extension messages, whose first byte is always 0x14
zap.L().Debug(
"Ignoring the non-extension message.",
zap.ByteString("infoHash", infoHash[:]),
)
continue
}
if rMessage[1] == 0x00 { // Extension Handshake has the Extension Message ID = 0x00
// __on_ext_handshake_message(message[2:])
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
// TODO: continue editing log messages from here
if isExtHandshakeDone {
return
}
rRootDict := new(rootDict)
err := bencode.Unmarshal(rMessage[2:], rRootDict)
if err != nil {
zap.L().Debug("Couldn't unmarshal extension handshake!", zap.Error(err))
return
}
if rRootDict.MetadataSize <= 0 || rRootDict.MetadataSize > MAX_METADATA_SIZE {
zap.L().Debug("Unacceptable metadata size!", zap.Int("metadata_size", rRootDict.MetadataSize))
return
}
ut_metadata = rRootDict.M.UTMetadata // Save the ut_metadata code the remote peer uses
metadataSize = rRootDict.MetadataSize
metadata = make([]byte, metadataSize)
isExtHandshakeDone = true
zap.L().Debug("GOT EXTENSION HANDSHAKE!", zap.Int("ut_metadata", ut_metadata), zap.Int("metadata_size", metadataSize))
// Request all the pieces of metadata
n_pieces := int(math.Ceil(float64(metadataSize) / math.Pow(2, 14)))
for piece := 0; piece < n_pieces; piece++ {
// __request_metadata_piece(piece)
// ...............................
extDictDump, err := bencode.Marshal(extDict{
MsgType: 0,
Piece: piece,
})
if err != nil {
zap.L().Warn("Couldn't marshal extDictDump!", zap.Error(err))
return
}
writeAll(conn, []byte(fmt.Sprintf(
"%s\x14%s%s",
intToBigEndian(2+len(extDictDump), 4),
intToBigEndian(ut_metadata, 1),
extDictDump,
)))
}
zap.L().Warn("requested all metadata pieces!")
} else if rMessage[1] == 0x01 {
// __on_ext_message(message[2:])
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
rMessageBuf := bytes.NewBuffer(rMessage[2:])
rExtDict := new(extDict)
// TODO: We monkey-patched anacrolix/torrent!
err := bencode.NewDecoder2(rMessageBuf).Decode(rExtDict)
if err != nil {
zap.L().Warn("Couldn't decode extension message in the loop!", zap.Error(err))
return
}
if rExtDict.MsgType == 1 { // data
// Get the unread bytes!
metadataPiece := rMessageBuf.Bytes()
piece := rExtDict.Piece
// metadata[piece * 2**14: piece * 2**14 + len(metadataPiece)] = metadataPiece is how it'd be done in Python
copy(metadata[piece*int(math.Pow(2, 14)):piece*int(math.Pow(2, 14))+len(metadataPiece)], metadataPiece)
metadataReceived += len(metadataPiece)
done = metadataReceived == metadataSize
// BEP 9 explicitly states:
// > If the piece is the last piece of the metadata, it may be less than 16kiB. If
// > it is not the last piece of the metadata, it MUST be 16kiB.
//
// Hence...
// ... if the length of @metadataPiece is more than 16kiB, we err.
if len(metadataPiece) > 16*1024 {
zap.L().Debug(
"metadataPiece is bigger than 16kiB!",
zap.Int("len_metadataPiece", len(metadataPiece)),
zap.Int("metadataReceived", metadataReceived),
zap.Int("metadataSize", metadataSize),
zap.Int("metadataPieceIndex", bytes.Index(rMessage, metadataPiece)),
)
return
}
// ... if the length of @metadataPiece is less than 16kiB AND metadata is NOT
// complete (!done) then we err.
if len(metadataPiece) < 16*1024 && !done {
zap.L().Debug(
"metadataPiece is less than 16kiB and metadata is incomplete!",
zap.Int("len_metadataPiece", len(metadataPiece)),
zap.Int("metadataReceived", metadataReceived),
zap.Int("metadataSize", metadataSize),
zap.Int("metadataPieceIndex", bytes.Index(rMessage, metadataPiece)),
)
return
}
if metadataReceived > metadataSize {
zap.L().Debug(
"metadataReceived is greater than metadataSize!",
zap.Int("len_metadataPiece", len(metadataPiece)),
zap.Int("metadataReceived", metadataReceived),
zap.Int("metadataSize", metadataSize),
zap.Int("metadataPieceIndex", bytes.Index(rMessage, metadataPiece)),
)
return
}
zap.L().Debug(
"Fetching...",
zap.ByteString("infoHash", infoHash[:]),
zap.String("remotePeerAddr", conn.RemoteAddr().String()),
zap.Int("metadataReceived", metadataReceived),
zap.Int("metadataSize", metadataSize),
)
} else if rExtDict.MsgType == 2 { // reject
zap.L().Debug(
"Remote peer rejected sending metadata!",
zap.ByteString("infoHash", infoHash[:]),
zap.String("remotePeerAddr", conn.RemoteAddr().String()),
)
return
}
} else {
zap.L().Debug(
"Message is not an ut_metadata message! (ignoring)",
zap.ByteString("msg", rMessage[:100]),
)
// no return!
}
}
zap.L().Debug(
"Metadata is complete, verifying the checksum...",
zap.ByteString("infoHash", infoHash[:]),
)
sha1Sum := sha1.Sum(metadata)
if !bytes.Equal(sha1Sum[:], infoHash[:]) {
zap.L().Debug(
"Info-hash mismatch!",
zap.ByteString("expectedInfoHash", infoHash[:]),
zap.ByteString("actualInfoHash", sha1Sum[:]),
)
return
}
zap.L().Debug(
"Checksum verified, checking the info dictionary...",
zap.ByteString("infoHash", infoHash[:]),
)
info := new(metainfo.Info)
err = bencode.Unmarshal(metadata, info)
if err != nil {
zap.L().Debug(
"Couldn't unmarshal info bytes!",
zap.ByteString("infoHash", infoHash[:]),
zap.Error(err),
)
return
}
err = validateInfo(info)
if err != nil {
zap.L().Debug(
"Bad info dictionary!",
zap.ByteString("infoHash", infoHash[:]),
zap.Error(err),
)
return
}
var files []persistence.File
for _, file := range info.Files {
if file.Length < 0 {
zap.L().Debug(
"File size is less than zero!",
zap.ByteString("infoHash", infoHash[:]),
zap.String("filePath", file.DisplayPath(info)),
zap.Int64("fileSize", file.Length),
)
return
}
files = append(files, persistence.File{
Size: file.Length,
Path: file.DisplayPath(info),
})
}
var totalSize uint64
for _, file := range files {
totalSize += uint64(file.Size)
}
zap.L().Debug(
"Flushing metadata...",
zap.ByteString("infoHash", infoHash[:]),
)
ms.flush(Metadata{
InfoHash: infoHash[:],
Name: info.Name,
TotalSize: totalSize,
DiscoveredOn: time.Now().Unix(),
Files: files,
})
}
// COPIED FROM anacrolix/torrent
func validateInfo(info *metainfo.Info) error {
if len(info.Pieces)%20 != 0 {
return errors.New("pieces has invalid length")
}
if info.PieceLength == 0 {
if info.TotalLength() != 0 {
return errors.New("zero piece length")
}
} else {
if int((info.TotalLength()+info.PieceLength-1)/info.PieceLength) != info.NumPieces() {
return errors.New("piece count and file lengths are at odds")
}
}
return nil
}
func writeAll(c *net.TCPConn, b []byte) error {
for len(b) != 0 {
n, err := c.Write(b)
if err != nil {
return err
}
b = b[n:]
}
return nil
}
func readExactly(c *net.TCPConn, n int) ([]byte, error) {
b := make([]byte, n)
_, err := io.ReadFull(c, b)
return b, err
}
// TODO: add bounds checking!
func intToBigEndian(i int, n int) []byte {
b := make([]byte, n)
switch n {
case 1:
b = []byte{byte(i)}
case 2:
binary.BigEndian.PutUint16(b, uint16(i))
case 4:
binary.BigEndian.PutUint32(b, uint32(i))
default:
panic(fmt.Sprintf("n must be 1, 2, or 4!"))
}
if len(b) != n {
panic(fmt.Sprintf("postcondition failed: len(b) != n in intToBigEndian (i %d, n %d, len b %d, b %s)", i, n, len(b), b))
}
return b
}
func bigEndianToInt(b []byte) int {
switch len(b) {
case 1:
return int(b[0])
case 2:
return int(binary.BigEndian.Uint16(b))
case 4:
return int(binary.BigEndian.Uint32(b))
default:
panic(fmt.Sprintf("bigEndianToInt: b is too long! (%d bytes)", len(b)))
}
}

View File

@ -0,0 +1,103 @@
package bittorrent
import (
"crypto/rand"
"fmt"
"net"
"strings"
"time"
"go.uber.org/zap"
"magnetico/magneticod/dht/mainline"
"magnetico/persistence"
)
type Metadata struct {
InfoHash []byte
// Name should be thought of "Title" of the torrent. For single-file torrents, it is the name
// of the file, and for multi-file torrents, it is the name of the root directory.
Name string
TotalSize uint64
DiscoveredOn int64
// Files must be populated for both single-file and multi-file torrents!
Files []persistence.File
}
type Peer struct {
Addr *net.TCPAddr
}
type MetadataSink struct {
clientID []byte
deadline time.Duration
drain chan Metadata
terminated bool
termination chan interface{}
}
func NewMetadataSink(deadline time.Duration) *MetadataSink {
ms := new(MetadataSink)
ms.clientID = make([]byte, 20)
_, err := rand.Read(ms.clientID)
if err != nil {
zap.L().Panic("sinkMetadata couldn't read 20 random bytes for client ID!", zap.Error(err))
}
// TODO: remove this
if len(ms.clientID) != 20 {
panic("CLIENT ID NOT 20!")
}
ms.deadline = deadline
ms.drain = make(chan Metadata)
ms.termination = make(chan interface{})
return ms
}
func (ms *MetadataSink) Sink(res mainline.TrawlingResult) {
if ms.terminated {
zap.L().Panic("Trying to Sink() an already closed MetadataSink!")
}
IPs := res.PeerIP.String()
var rhostport string
if IPs == "<nil>" {
zap.L().Debug("MetadataSink.Sink: Peer IP is nil!")
return
} else if IPs[0] == '?' {
zap.L().Debug("MetadataSink.Sink: Peer IP is invalid!")
return
} else if strings.ContainsRune(IPs, ':') { // IPv6
rhostport = fmt.Sprintf("[%s]:%d", IPs, res.PeerPort)
} else { // IPv4
rhostport = fmt.Sprintf("%s:%d", IPs, res.PeerPort)
}
raddr, err := net.ResolveTCPAddr("tcp", rhostport)
if err != nil {
zap.L().Debug("MetadataSink.Sink: Couldn't resolve peer address!", zap.Error(err))
return
}
go ms.awaitMetadata(res.InfoHash, Peer{Addr: raddr})
}
func (ms *MetadataSink) Drain() <-chan Metadata {
if ms.terminated {
zap.L().Panic("Trying to Drain() an already closed MetadataSink!")
}
return ms.drain
}
func (ms *MetadataSink) Terminate() {
ms.terminated = true
close(ms.termination)
close(ms.drain)
}
func (ms *MetadataSink) flush(result Metadata) {
if !ms.terminated {
ms.drain <- result
}
}

View File

@ -1,4 +1,3 @@
// TODO: This file, as a whole, needs a little skim-through to clear things up, sprinkle a little
// documentation here and there, and also to make the test coverage 100%.
// It, most importantly, lacks IPv6 support, if it's not altogether messy and unreliable
@ -7,17 +6,16 @@
package mainline
import (
"fmt"
"encoding/binary"
"fmt"
"net"
"github.com/anacrolix/torrent/bencode"
"github.com/anacrolix/missinggo/iter"
"regexp"
"github.com/anacrolix/torrent/bencode"
"github.com/willf/bloom"
"regexp"
)
type Message struct {
// Query method (one of 4: "ping", "find_node", "get_peers", "announce_peer")
Q string `bencode:"q,omitempty"`
@ -33,7 +31,6 @@ type Message struct {
E Error `bencode:"e,omitempty"`
}
type QueryArguments struct {
// ID of the quirying Node
ID []byte `bencode:"id"`
@ -64,7 +61,6 @@ type QueryArguments struct {
Scrape int `bencode:"noseed,omitempty"`
}
type ResponseValues struct {
// ID of the querying node
ID []byte `bencode:"id"`
@ -85,32 +81,26 @@ type ResponseValues struct {
// TODO: write marshallers for those fields above ^^
}
type Error struct {
Code int
Code int
Message []byte
}
// Represents peer address in either IPv6 or IPv4 form.
type CompactPeer struct {
IP net.IP
Port int
}
type CompactPeers []CompactPeer
type CompactNodeInfo struct {
ID []byte
Addr net.UDPAddr
}
type CompactNodeInfos []CompactNodeInfo
// This allows bencode.Unmarshal to do better than a string or []byte.
func (cps *CompactPeers) UnmarshalBencode(b []byte) (err error) {
var bb []byte
@ -131,7 +121,6 @@ func (cps CompactPeers) MarshalBinary() (ret []byte, err error) {
return
}
func (cp CompactPeer) MarshalBencode() (ret []byte, err error) {
ip := cp.IP
if ip4 := ip.To4(); ip4 != nil {
@ -143,7 +132,6 @@ func (cp CompactPeer) MarshalBencode() (ret []byte, err error) {
return bencode.Marshal(ret)
}
func (cp *CompactPeer) UnmarshalBinary(b []byte) error {
switch len(b) {
case 18:
@ -159,7 +147,6 @@ func (cp *CompactPeer) UnmarshalBinary(b []byte) error {
return nil
}
func (cp *CompactPeer) UnmarshalBencode(b []byte) (err error) {
var _b []byte
err = bencode.Unmarshal(b, &_b)
@ -169,7 +156,6 @@ func (cp *CompactPeer) UnmarshalBencode(b []byte) (err error) {
return cp.UnmarshalBinary(_b)
}
func UnmarshalCompactPeers(b []byte) (ret []CompactPeer, err error) {
num := len(b) / 6
ret = make([]CompactPeer, num)
@ -183,7 +169,6 @@ func UnmarshalCompactPeers(b []byte) (ret []CompactPeer, err error) {
return
}
// This allows bencode.Unmarshal to do better than a string or []byte.
func (cnis *CompactNodeInfos) UnmarshalBencode(b []byte) (err error) {
var bb []byte
@ -195,9 +180,8 @@ func (cnis *CompactNodeInfos) UnmarshalBencode(b []byte) (err error) {
return
}
func UnmarshalCompactNodeInfos(b []byte) (ret []CompactNodeInfo, err error) {
if len(b) % 26 != 0 {
if len(b)%26 != 0 {
err = fmt.Errorf("compact node is not a multiple of 26")
return
}
@ -215,7 +199,6 @@ func UnmarshalCompactNodeInfos(b []byte) (ret []CompactNodeInfo, err error) {
return
}
func (cni *CompactNodeInfo) UnmarshalBinary(b []byte) error {
copy(cni.ID[:], b)
b = b[len(cni.ID):]
@ -227,10 +210,13 @@ func (cni *CompactNodeInfo) UnmarshalBinary(b []byte) error {
return nil
}
func (cnis CompactNodeInfos) MarshalBencode() ([]byte, error) {
var ret []byte
if len(cnis) == 0 {
return []byte("0:"), nil
}
for _, cni := range cnis {
ret = append(ret, cni.MarshalBinary()...)
}
@ -238,7 +224,6 @@ func (cnis CompactNodeInfos) MarshalBencode() ([]byte, error) {
return bencode.Marshal(ret)
}
func (cni CompactNodeInfo) MarshalBinary() []byte {
ret := make([]byte, 20)
@ -252,12 +237,10 @@ func (cni CompactNodeInfo) MarshalBinary() []byte {
return ret
}
func (e Error) MarshalBencode() ([]byte, error) {
return []byte(fmt.Sprintf("li%de%d:%se", e.Code, len(e.Message), e.Message)), nil
}
func (e *Error) UnmarshalBencode(b []byte) (err error) {
var code, msgLen int

View File

@ -9,8 +9,7 @@ import (
"github.com/anacrolix/torrent/bencode"
)
var codecTest_validInstances = []struct{
var codecTest_validInstances = []struct {
data []byte
msg Message
}{
@ -46,7 +45,7 @@ var codecTest_validInstances = []struct{
Y: "q",
Q: "find_node",
A: QueryArguments{
ID: []byte("abcdefghij0123456789"),
ID: []byte("abcdefghij0123456789"),
Target: []byte("mnopqrstuvwxyz123456"),
},
},
@ -58,7 +57,7 @@ var codecTest_validInstances = []struct{
T: []byte("aa"),
Y: "r",
R: ResponseValues{
ID: []byte("0123456789abcdefghij"),
ID: []byte("0123456789abcdefghij"),
Nodes: []CompactNodeInfo{},
},
},
@ -73,7 +72,7 @@ var codecTest_validInstances = []struct{
ID: []byte("0123456789abcdefghij"),
Nodes: []CompactNodeInfo{
{
ID: []byte("abcdefghijklmnopqrst"),
ID: []byte("abcdefghijklmnopqrst"),
Addr: net.UDPAddr{IP: []byte("\x8b\x82\x8e\xf5"), Port: 3169, Zone: ""},
},
},
@ -90,35 +89,35 @@ var codecTest_validInstances = []struct{
ID: []byte("0123456789abcdefghij"),
Nodes: []CompactNodeInfo{
{
ID: []byte("abcdefghijklmnopqrst"),
ID: []byte("abcdefghijklmnopqrst"),
Addr: net.UDPAddr{IP: []byte("\x8b\x82\x8e\xf5"), Port: 3169, Zone: ""},
},
{
ID: []byte("abcdefghijklmnopqrst"),
ID: []byte("abcdefghijklmnopqrst"),
Addr: net.UDPAddr{IP: []byte("\x8b\x82\x8e\xf5"), Port: 3169, Zone: ""},
},
{
ID: []byte("abcdefghijklmnopqrst"),
ID: []byte("abcdefghijklmnopqrst"),
Addr: net.UDPAddr{IP: []byte("\x8b\x82\x8e\xf5"), Port: 3169, Zone: ""},
},
{
ID: []byte("abcdefghijklmnopqrst"),
ID: []byte("abcdefghijklmnopqrst"),
Addr: net.UDPAddr{IP: []byte("\x8b\x82\x8e\xf5"), Port: 3169, Zone: ""},
},
{
ID: []byte("abcdefghijklmnopqrst"),
ID: []byte("abcdefghijklmnopqrst"),
Addr: net.UDPAddr{IP: []byte("\x8b\x82\x8e\xf5"), Port: 3169, Zone: ""},
},
{
ID: []byte("abcdefghijklmnopqrst"),
ID: []byte("abcdefghijklmnopqrst"),
Addr: net.UDPAddr{IP: []byte("\x8b\x82\x8e\xf5"), Port: 3169, Zone: ""},
},
{
ID: []byte("abcdefghijklmnopqrst"),
ID: []byte("abcdefghijklmnopqrst"),
Addr: net.UDPAddr{IP: []byte("\x8b\x82\x8e\xf5"), Port: 3169, Zone: ""},
},
{
ID: []byte("zyxwvutsrqponmlkjihg"),
ID: []byte("zyxwvutsrqponmlkjihg"),
Addr: net.UDPAddr{IP: []byte("\xf5\x8e\x82\x8b"), Port: 6931, Zone: ""},
},
},
@ -133,7 +132,7 @@ var codecTest_validInstances = []struct{
Y: "q",
Q: "get_peers",
A: QueryArguments{
ID: []byte("abcdefghij0123456789"),
ID: []byte("abcdefghij0123456789"),
InfoHash: []byte("mnopqrstuvwxyz123456"),
},
},
@ -145,7 +144,7 @@ var codecTest_validInstances = []struct{
T: []byte("aa"),
Y: "r",
R: ResponseValues{
ID: []byte("abcdefghij0123456789"),
ID: []byte("abcdefghij0123456789"),
Token: []byte("aoeusnth"),
Values: []CompactPeer{
{IP: []byte("axje"), Port: 11893},
@ -161,15 +160,15 @@ var codecTest_validInstances = []struct{
T: []byte("aa"),
Y: "r",
R: ResponseValues{
ID: []byte("abcdefghij0123456789"),
ID: []byte("abcdefghij0123456789"),
Token: []byte("aoeusnth"),
Nodes: []CompactNodeInfo{
{
ID: []byte("abcdefghijklmnopqrst"),
ID: []byte("abcdefghijklmnopqrst"),
Addr: net.UDPAddr{IP: []byte("\x8b\x82\x8e\xf5"), Port: 3169, Zone: ""},
},
{
ID: []byte("zyxwvutsrqponmlkjihg"),
ID: []byte("zyxwvutsrqponmlkjihg"),
Addr: net.UDPAddr{IP: []byte("\xf5\x8e\x82\x8b"), Port: 6931, Zone: ""},
},
},
@ -184,10 +183,10 @@ var codecTest_validInstances = []struct{
Y: "q",
Q: "announce_peer",
A: QueryArguments{
ID: []byte("abcdefghij0123456789"),
ID: []byte("abcdefghij0123456789"),
InfoHash: []byte("mnopqrstuvwxyz123456"),
Port: 6881,
Token: []byte("aoeusnth"),
Port: 6881,
Token: []byte("aoeusnth"),
},
},
},
@ -203,32 +202,30 @@ var codecTest_validInstances = []struct{
// TODO: Add announce_peer Query with optional `implied_port` argument.
}
func TestUnmarshal(t *testing.T) {
for i, instance := range codecTest_validInstances {
msg := Message{}
err := bencode.Unmarshal(instance.data, &msg)
if err != nil {
t.Errorf("Error while unmarshalling valid data #%d: %v", i + 1, err)
t.Errorf("Error while unmarshalling valid data #%d: %v", i+1, err)
continue
}
if reflect.DeepEqual(msg, instance.msg) != true {
t.Errorf("Valid data #%d unmarshalled wrong!\n\tGot : %+v\n\tExpected: %+v",
i + 1, msg, instance.msg)
i+1, msg, instance.msg)
}
}
}
func TestMarshal(t *testing.T) {
for i, instance := range codecTest_validInstances {
data, err := bencode.Marshal(instance.msg)
if err != nil {
t.Errorf("Error while marshalling valid msg #%d: %v", i + 1, err)
t.Errorf("Error while marshalling valid msg #%d: %v", i+1, err)
}
if bytes.Equal(data, instance.data) != true {
t.Errorf("Valid msg #%d marshalled wrong!\n\tGot : %q\n\tExpected: %q",
i + 1, data, instance.data)
i+1, data, instance.data)
}
}
}

View File

@ -10,16 +10,14 @@ import (
"go.uber.org/zap"
)
type Protocol struct {
previousTokenSecret, currentTokenSecret []byte
tokenLock sync.Mutex
transport *Transport
eventHandlers ProtocolEventHandlers
started bool
tokenLock sync.Mutex
transport *Transport
eventHandlers ProtocolEventHandlers
started bool
}
type ProtocolEventHandlers struct {
OnPingQuery func(*Message, net.Addr)
OnFindNodeQuery func(*Message, net.Addr)
@ -30,7 +28,6 @@ type ProtocolEventHandlers struct {
OnPingORAnnouncePeerResponse func(*Message, net.Addr)
}
func NewProtocol(laddr string, eventHandlers ProtocolEventHandlers) (p *Protocol) {
p = new(Protocol)
p.transport = NewTransport(laddr, p.onMessage)
@ -46,7 +43,6 @@ func NewProtocol(laddr string, eventHandlers ProtocolEventHandlers) (p *Protocol
return
}
func (p *Protocol) Start() {
if p.started {
zap.L().Panic("Attempting to Start() a mainline/Transport that has been already started! (Programmer error.)")
@ -57,12 +53,10 @@ func (p *Protocol) Start() {
go p.updateTokenSecret()
}
func (p *Protocol) Terminate() {
p.transport.Terminate()
}
func (p *Protocol) onMessage(msg *Message, addr net.Addr) {
switch msg.Y {
case "q":
@ -115,23 +109,23 @@ func (p *Protocol) onMessage(msg *Message, addr net.Addr) {
}
case "r":
// get_peers > find_node > ping / announce_peer
if len(msg.R.Token) != 0 { // The message should be a get_peers response.
if len(msg.R.Token) != 0 { // The message should be a get_peers response.
if !validateGetPeersResponseMessage(msg) {
zap.L().Debug("An invalid get_peers response received!")
return
}
if p.eventHandlers.OnGetPeersResponse != nil{
if p.eventHandlers.OnGetPeersResponse != nil {
p.eventHandlers.OnGetPeersResponse(msg, addr)
}
} else if len(msg.R.Nodes) != 0 { // The message should be a find_node response.
} else if len(msg.R.Nodes) != 0 { // The message should be a find_node response.
if !validateFindNodeResponseMessage(msg) {
zap.L().Debug("An invalid find_node response received!")
return
}
if p.eventHandlers.OnFindNodeResponse != nil{
if p.eventHandlers.OnFindNodeResponse != nil {
p.eventHandlers.OnFindNodeResponse(msg, addr)
}
} else { // The message should be a ping or an announce_peer response.
} else { // The message should be a ping or an announce_peer response.
if !validatePingORannouncePeerResponseMessage(msg) {
zap.L().Debug("An invalid ping OR announce_peer response received!")
return
@ -141,50 +135,47 @@ func (p *Protocol) onMessage(msg *Message, addr net.Addr) {
}
}
case "e":
zap.L().Sugar().Debugf("Protocol error received: `%s` (%d)", msg.E.Message, msg.E.Code)
// TODO: currently ignoring Server Error 202
if msg.E.Code != 202 {
zap.L().Sugar().Debugf("Protocol error received: `%s` (%d)", msg.E.Message, msg.E.Code)
}
default:
/* zap.L().Debug("A KRPC message of an unknown type received!",
zap.String("type", msg.Y))
zap.String("type", msg.Y))
*/
}
}
func (p *Protocol) SendMessage(msg *Message, addr net.Addr) {
p.transport.WriteMessages(msg, addr)
}
func NewPingQuery(id []byte) *Message {
panic("Not implemented yet!")
}
func NewFindNodeQuery(id []byte, target []byte) *Message {
return &Message{
Y: "q",
T: []byte("aa"),
Q: "find_node",
A: QueryArguments{
ID: id,
ID: id,
Target: target,
},
}
}
func NewGetPeersQuery(id []byte, info_hash []byte) *Message {
panic("Not implemented yet!")
}
func NewAnnouncePeerQuery(id []byte, implied_port bool, info_hash []byte, port uint16,
token []byte) *Message {
panic("Not implemented yet!")
}
func NewPingResponse(t []byte, id []byte) *Message {
return &Message{
Y: "r",
@ -195,36 +186,31 @@ func NewPingResponse(t []byte, id []byte) *Message {
}
}
func NewFindNodeResponse(t []byte, id []byte, nodes []CompactNodeInfo) *Message {
panic("Not implemented yet!")
}
func NewGetPeersResponseWithValues(t []byte, id []byte, token []byte, values []CompactPeer) *Message {
panic("Not implemented yet!")
}
func NewGetPeersResponseWithNodes(t []byte, id []byte, token []byte, nodes []CompactNodeInfo) *Message {
return &Message{
Y: "r",
T: t,
R: ResponseValues{
ID: id,
ID: id,
Token: token,
Nodes: nodes,
},
}
}
func NewAnnouncePeerResponse(t []byte, id []byte) *Message {
// Because they are indistinguishable.
return NewPingResponse(t, id)
}
func (p *Protocol) CalculateToken(address net.IP) []byte {
p.tokenLock.Lock()
defer p.tokenLock.Unlock()
@ -232,7 +218,6 @@ func (p *Protocol) CalculateToken(address net.IP) []byte {
return sum[:]
}
func (p *Protocol) VerifyToken(address net.IP, token []byte) bool {
p.tokenLock.Lock()
defer p.tokenLock.Unlock()
@ -241,7 +226,6 @@ func (p *Protocol) VerifyToken(address net.IP, token []byte) bool {
return false
}
func (p *Protocol) updateTokenSecret() {
for range time.Tick(10 * time.Minute) {
p.tokenLock.Lock()
@ -255,24 +239,20 @@ func (p *Protocol) updateTokenSecret() {
}
}
func validatePingQueryMessage(msg *Message) bool {
return len(msg.A.ID) == 20
}
func validateFindNodeQueryMessage(msg *Message) bool {
return len(msg.A.ID) == 20 &&
len(msg.A.Target) == 20
}
func validateGetPeersQueryMessage(msg *Message) bool {
return len(msg.A.ID) == 20 &&
len(msg.A.InfoHash) == 20
}
func validateAnnouncePeerQueryMessage(msg *Message) bool {
return len(msg.A.ID) == 20 &&
len(msg.A.InfoHash) == 20 &&
@ -280,7 +260,6 @@ func validateAnnouncePeerQueryMessage(msg *Message) bool {
len(msg.A.Token) > 0
}
func validatePingORannouncePeerResponseMessage(msg *Message) bool {
return len(msg.R.ID) == 20
}
@ -295,7 +274,6 @@ func validateFindNodeResponseMessage(msg *Message) bool {
return true
}
func validateGetPeersResponseMessage(msg *Message) bool {
return len(msg.R.ID) == 20 &&
len(msg.R.Token) > 0

View File

@ -1,15 +1,14 @@
package mainline
import (
"testing"
"net"
"testing"
)
var protocolTest_validInstances = []struct {
validator func(*Message) bool
msg Message
} {
}{
// ping Query:
{
validator: validatePingQueryMessage,
@ -42,7 +41,7 @@ var protocolTest_validInstances = []struct {
Y: "q",
Q: "find_node",
A: QueryArguments{
ID: []byte("abcdefghij0123456789"),
ID: []byte("abcdefghij0123456789"),
Target: []byte("mnopqrstuvwxyz123456"),
},
},
@ -54,7 +53,7 @@ var protocolTest_validInstances = []struct {
T: []byte("aa"),
Y: "r",
R: ResponseValues{
ID: []byte("0123456789abcdefghij"),
ID: []byte("0123456789abcdefghij"),
Nodes: []CompactNodeInfo{},
},
},
@ -69,7 +68,7 @@ var protocolTest_validInstances = []struct {
ID: []byte("0123456789abcdefghij"),
Nodes: []CompactNodeInfo{
{
ID: []byte("abcdefghijklmnopqrst"),
ID: []byte("abcdefghijklmnopqrst"),
Addr: net.UDPAddr{IP: []byte("\x8b\x82\x8e\xf5"), Port: 3169, Zone: ""},
},
},
@ -86,35 +85,35 @@ var protocolTest_validInstances = []struct {
ID: []byte("0123456789abcdefghij"),
Nodes: []CompactNodeInfo{
{
ID: []byte("abcdefghijklmnopqrst"),
ID: []byte("abcdefghijklmnopqrst"),
Addr: net.UDPAddr{IP: []byte("\x8b\x82\x8e\xf5"), Port: 3169, Zone: ""},
},
{
ID: []byte("abcdefghijklmnopqrst"),
ID: []byte("abcdefghijklmnopqrst"),
Addr: net.UDPAddr{IP: []byte("\x8b\x82\x8e\xf5"), Port: 3169, Zone: ""},
},
{
ID: []byte("abcdefghijklmnopqrst"),
ID: []byte("abcdefghijklmnopqrst"),
Addr: net.UDPAddr{IP: []byte("\x8b\x82\x8e\xf5"), Port: 3169, Zone: ""},
},
{
ID: []byte("abcdefghijklmnopqrst"),
ID: []byte("abcdefghijklmnopqrst"),
Addr: net.UDPAddr{IP: []byte("\x8b\x82\x8e\xf5"), Port: 3169, Zone: ""},
},
{
ID: []byte("abcdefghijklmnopqrst"),
ID: []byte("abcdefghijklmnopqrst"),
Addr: net.UDPAddr{IP: []byte("\x8b\x82\x8e\xf5"), Port: 3169, Zone: ""},
},
{
ID: []byte("abcdefghijklmnopqrst"),
ID: []byte("abcdefghijklmnopqrst"),
Addr: net.UDPAddr{IP: []byte("\x8b\x82\x8e\xf5"), Port: 3169, Zone: ""},
},
{
ID: []byte("abcdefghijklmnopqrst"),
ID: []byte("abcdefghijklmnopqrst"),
Addr: net.UDPAddr{IP: []byte("\x8b\x82\x8e\xf5"), Port: 3169, Zone: ""},
},
{
ID: []byte("zyxwvutsrqponmlkjihg"),
ID: []byte("zyxwvutsrqponmlkjihg"),
Addr: net.UDPAddr{IP: []byte("\xf5\x8e\x82\x8b"), Port: 6931, Zone: ""},
},
},
@ -129,7 +128,7 @@ var protocolTest_validInstances = []struct {
Y: "q",
Q: "get_peers",
A: QueryArguments{
ID: []byte("abcdefghij0123456789"),
ID: []byte("abcdefghij0123456789"),
InfoHash: []byte("mnopqrstuvwxyz123456"),
},
},
@ -141,7 +140,7 @@ var protocolTest_validInstances = []struct {
T: []byte("aa"),
Y: "r",
R: ResponseValues{
ID: []byte("abcdefghij0123456789"),
ID: []byte("abcdefghij0123456789"),
Token: []byte("aoeusnth"),
Values: []CompactPeer{
{IP: []byte("axje"), Port: 11893},
@ -157,15 +156,15 @@ var protocolTest_validInstances = []struct {
T: []byte("aa"),
Y: "r",
R: ResponseValues{
ID: []byte("abcdefghij0123456789"),
ID: []byte("abcdefghij0123456789"),
Token: []byte("aoeusnth"),
Nodes: []CompactNodeInfo{
{
ID: []byte("abcdefghijklmnopqrst"),
ID: []byte("abcdefghijklmnopqrst"),
Addr: net.UDPAddr{IP: []byte("\x8b\x82\x8e\xf5"), Port: 3169, Zone: ""},
},
{
ID: []byte("zyxwvutsrqponmlkjihg"),
ID: []byte("zyxwvutsrqponmlkjihg"),
Addr: net.UDPAddr{IP: []byte("\xf5\x8e\x82\x8b"), Port: 6931, Zone: ""},
},
},
@ -180,42 +179,38 @@ var protocolTest_validInstances = []struct {
Y: "q",
Q: "announce_peer",
A: QueryArguments{
ID: []byte("abcdefghij0123456789"),
ID: []byte("abcdefghij0123456789"),
InfoHash: []byte("mnopqrstuvwxyz123456"),
Port: 6881,
Token: []byte("aoeusnth"),
Port: 6881,
Token: []byte("aoeusnth"),
},
},
},
// TODO: Add announce_peer Query with optional `implied_port` argument.
}
func TestValidators(t *testing.T) {
for i, instance := range protocolTest_validInstances {
if isValid := instance.validator(&instance.msg); !isValid {
t.Errorf("False-positive for valid msg #%d!", i + 1)
t.Errorf("False-positive for valid msg #%d!", i+1)
}
}
}
func TestNewFindNodeQuery(t *testing.T) {
if !validateFindNodeQueryMessage(NewFindNodeQuery([]byte("qwertyuopasdfghjklzx"), []byte("xzlkjhgfdsapouytrewq"))) {
t.Errorf("NewFindNodeQuery returned an invalid message!")
}
}
func TestNewPingResponse(t *testing.T) {
if !validatePingORannouncePeerResponseMessage(NewPingResponse([]byte("tt"), []byte("qwertyuopasdfghjklzx"))) {
t.Errorf("NewPingResponse returned an invalid message!")
}
}
func TestNewGetPeersResponseWithNodes(t *testing.T) {
if !validateGetPeersResponseMessage(NewGetPeersResponseWithNodes([]byte("tt"), []byte("qwertyuopasdfghjklzx"), []byte("token"), []CompactNodeInfo{})) {
t.Errorf("NewGetPeersResponseWithNodes returned an invalid message!")
}
}
}

View File

@ -1,7 +1,7 @@
package mainline
import (
"crypto/rand"
"math/rand"
"net"
"sync"
"time"
@ -13,14 +13,15 @@ import (
type TrawlingResult struct {
InfoHash metainfo.Hash
Peer torrent.Peer
Peer torrent.Peer
PeerIP net.IP
PeerPort int
}
type TrawlingService struct {
// Private
protocol *Protocol
started bool
protocol *Protocol
started bool
eventHandlers TrawlingServiceEventHandlers
trueNodeID []byte
@ -28,16 +29,14 @@ type TrawlingService struct {
// understandably) slices cannot be used as keys (since they are not hashable), and using arrays
// (or even the conversion between each other) is a pain; hence map[string]net.UDPAddr
// ^~~~~~
routingTable map[string]net.Addr
routingTable map[string]net.Addr
routingTableMutex *sync.Mutex
}
type TrawlingServiceEventHandlers struct {
OnResult func(TrawlingResult)
}
func NewTrawlingService(laddr string, eventHandlers TrawlingServiceEventHandlers) *TrawlingService {
service := new(TrawlingService)
service.protocol = NewProtocol(
@ -61,7 +60,6 @@ func NewTrawlingService(laddr string, eventHandlers TrawlingServiceEventHandlers
return service
}
func (s *TrawlingService) Start() {
if s.started {
zap.L().Panic("Attempting to Start() a mainline/TrawlingService that has been already started! (Programmer error.)")
@ -74,19 +72,17 @@ func (s *TrawlingService) Start() {
zap.L().Info("Trawling Service started!")
}
func (s *TrawlingService) Terminate() {
s.protocol.Terminate()
}
func (s *TrawlingService) trawl() {
for range time.Tick(1 * time.Second) {
s.routingTableMutex.Lock()
if len(s.routingTable) == 0 {
s.bootstrap()
} else {
zap.L().Info("Latest status:", zap.Int("n", len(s.routingTable)))
zap.L().Debug("Latest status:", zap.Int("n", len(s.routingTable)))
s.findNeighbors()
s.routingTable = make(map[string]net.Addr)
}
@ -94,7 +90,6 @@ func (s *TrawlingService) trawl() {
}
}
func (s *TrawlingService) bootstrap() {
bootstrappingNodes := []string{
"router.bittorrent.com:6881",
@ -119,7 +114,6 @@ func (s *TrawlingService) bootstrap() {
}
}
func (s *TrawlingService) findNeighbors() {
target := make([]byte, 20)
for nodeID, addr := range s.routingTable {
@ -135,7 +129,6 @@ func (s *TrawlingService) findNeighbors() {
}
}
func (s *TrawlingService) onGetPeersQuery(query *Message, addr net.Addr) {
s.protocol.SendMessage(
NewGetPeersResponseWithNodes(
@ -148,7 +141,6 @@ func (s *TrawlingService) onGetPeersQuery(query *Message, addr net.Addr) {
)
}
func (s *TrawlingService) onAnnouncePeerQuery(query *Message, addr net.Addr) {
var peerPort int
if query.A.ImpliedPort != 0 {
@ -166,8 +158,8 @@ func (s *TrawlingService) onAnnouncePeerQuery(query *Message, addr net.Addr) {
InfoHash: infoHash,
Peer: torrent.Peer{
// As we don't know the ID of the remote peer, set it empty.
Id: peerId,
IP: addr.(*net.UDPAddr).IP,
Id: peerId,
IP: addr.(*net.UDPAddr).IP,
Port: peerPort,
// "Ha" indicates that we discovered the peer through DHT Announce Peer (query); not
// sure how anacrolix/torrent utilizes that information though.
@ -176,6 +168,8 @@ func (s *TrawlingService) onAnnouncePeerQuery(query *Message, addr net.Addr) {
// that it doesn't.
SupportsEncryption: false,
},
PeerIP: addr.(*net.UDPAddr).IP,
PeerPort: peerPort,
})
s.protocol.SendMessage(
@ -187,14 +181,13 @@ func (s *TrawlingService) onAnnouncePeerQuery(query *Message, addr net.Addr) {
)
}
func (s *TrawlingService) onFindNodeResponse(response *Message, addr net.Addr) {
s.routingTableMutex.Lock()
defer s.routingTableMutex.Unlock()
for _, node := range response.R.Nodes {
if node.Addr.Port != 0 { // Ignore nodes who "use" port 0.
if len(s.routingTable) < 10000 {
if node.Addr.Port != 0 { // Ignore nodes who "use" port 0.
if len(s.routingTable) < 8000 {
s.routingTable[string(node.ID)] = &node.Addr
}
}

View File

@ -3,12 +3,11 @@ package mainline
import (
"net"
"go.uber.org/zap"
"github.com/anacrolix/torrent/bencode"
"go.uber.org/zap"
"strings"
)
type Transport struct {
conn *net.UDPConn
laddr *net.UDPAddr
@ -20,11 +19,11 @@ type Transport struct {
onMessage func(*Message, net.Addr)
}
func NewTransport(laddr string, onMessage func(*Message, net.Addr)) (*Transport) {
func NewTransport(laddr string, onMessage func(*Message, net.Addr)) *Transport {
transport := new(Transport)
transport.onMessage = onMessage
var err error; transport.laddr, err = net.ResolveUDPAddr("udp", laddr)
var err error
transport.laddr, err = net.ResolveUDPAddr("udp", laddr)
if err != nil {
zap.L().Panic("Could not resolve the UDP address for the trawler!", zap.Error(err))
}
@ -32,7 +31,6 @@ func NewTransport(laddr string, onMessage func(*Message, net.Addr)) (*Transport)
return transport
}
func (t *Transport) Start() {
// Why check whether the Transport `t` started or not, here and not -for instance- in
// t.Terminate()?
@ -56,12 +54,10 @@ func (t *Transport) Start() {
go t.readMessages()
}
func (t *Transport) Terminate() {
t.conn.Close()
}
// readMessages is a goroutine!
func (t *Transport) readMessages() {
buffer := make([]byte, 65536)
@ -87,7 +83,6 @@ func (t *Transport) readMessages() {
}
}
func (t *Transport) WriteMessages(msg *Message, addr net.Addr) {
data, err := bencode.Marshal(msg)
if err != nil {

View File

@ -6,7 +6,6 @@ import (
"testing"
)
func TestReadFromOnClosedConn(t *testing.T) {
// Initialization
laddr, err := net.ResolveUDPAddr("udp", "0.0.0.0:0")
@ -31,7 +30,6 @@ func TestReadFromOnClosedConn(t *testing.T) {
}
}
func TestWriteToOnClosedConn(t *testing.T) {
// Initialization
laddr, err := net.ResolveUDPAddr("udp", "0.0.0.0:0")
@ -52,4 +50,4 @@ func TestWriteToOnClosedConn(t *testing.T) {
if !(err != nil && strings.HasSuffix(err.Error(), "use of closed network connection")) {
t.Fatalf("Unexpected suffix in the error message!")
}
}
}

View File

@ -1,15 +1,13 @@
package dht
import "magneticod/dht/mainline"
import "magnetico/magneticod/dht/mainline"
type TrawlingManager struct {
// private
output chan mainline.TrawlingResult
output chan mainline.TrawlingResult
services []*mainline.TrawlingService
}
func NewTrawlingManager(mlAddrs []string) *TrawlingManager {
manager := new(TrawlingManager)
manager.output = make(chan mainline.TrawlingResult)
@ -33,17 +31,14 @@ func NewTrawlingManager(mlAddrs []string) *TrawlingManager {
return manager
}
func (m *TrawlingManager) onResult(res mainline.TrawlingResult) {
m.output <- res
}
func (m *TrawlingManager) Output() <-chan mainline.TrawlingResult {
return m.output
}
func (m *TrawlingManager) Terminate() {
for _, service := range m.services {
service.Terminate()

157
magneticod/main.go Normal file
View File

@ -0,0 +1,157 @@
package main
import (
"fmt"
"net"
"os"
"os/signal"
"time"
"github.com/jessevdk/go-flags"
"go.uber.org/zap"
"go.uber.org/zap/zapcore"
"magnetico/magneticod/bittorrent"
"magnetico/magneticod/dht"
"magnetico/persistence"
)
type cmdFlags struct {
DatabaseURL string `long:"database" description:"URL of the database." required:"yeah"`
TrawlerMlAddrs []string `long:"trawler-ml-addr" description:"Address(es) to be used by trawling DHT (Mainline) nodes." default:"0.0.0.0:0"`
TrawlerMlInterval uint `long:"trawler-ml-interval" description:"Trawling interval in integer deciseconds (one tenth of a second)."`
Verbose []bool `short:"v" long:"verbose" description:"Increases verbosity."`
}
type opFlags struct {
DatabaseURL string
TrawlerMlAddrs []string
TrawlerMlInterval time.Duration
Verbosity int
}
func main() {
loggerLevel := zap.NewAtomicLevel()
// Logging levels: ("debug", "info", "warn", "error", "dpanic", "panic", and "fatal").
logger := zap.New(zapcore.NewCore(
zapcore.NewConsoleEncoder(zap.NewDevelopmentEncoderConfig()),
zapcore.Lock(os.Stderr),
loggerLevel,
))
defer logger.Sync()
zap.ReplaceGlobals(logger)
// opFlags is the "operational flags"
opFlags, err := parseFlags()
if err != nil {
// Do not print any error messages as jessevdk/go-flags already did.
return
}
zap.L().Info("magneticod v0.7.0 has been started.")
zap.L().Info("Copyright (C) 2017 Mert Bora ALPER <bora@boramalper.org>.")
zap.L().Info("Dedicated to Cemile Binay, in whose hands I thrived.")
switch opFlags.Verbosity {
case 0:
loggerLevel.SetLevel(zap.WarnLevel)
case 1:
loggerLevel.SetLevel(zap.InfoLevel)
default: // Default: i.e. in case of 2 or more.
// TODO: print the caller (function)'s name and line number!
loggerLevel.SetLevel(zap.DebugLevel)
}
zap.ReplaceGlobals(logger)
// Handle Ctrl-C gracefully.
interruptChan := make(chan os.Signal)
signal.Notify(interruptChan, os.Interrupt)
database, err := persistence.MakeDatabase(opFlags.DatabaseURL, false, logger)
if err != nil {
logger.Sugar().Fatalf("Could not open the database at `%s`: %s", opFlags.DatabaseURL, err.Error())
}
trawlingManager := dht.NewTrawlingManager(opFlags.TrawlerMlAddrs)
metadataSink := bittorrent.NewMetadataSink(2 * time.Minute)
// The Event Loop
for stopped := false; !stopped; {
select {
case result := <-trawlingManager.Output():
logger.Info("result: ", zap.String("hash", result.InfoHash.String()))
exists, err := database.DoesTorrentExist(result.InfoHash[:])
if err != nil {
zap.L().Fatal("Could not check whether torrent exists!", zap.Error(err))
} else if !exists {
metadataSink.Sink(result)
}
case metadata := <-metadataSink.Drain():
if err := database.AddNewTorrent(metadata.InfoHash, metadata.Name, metadata.Files); err != nil {
logger.Sugar().Fatalf("Could not add new torrent %x to the database: %s",
metadata.InfoHash, err.Error())
}
logger.Sugar().Infof("D I S C O V E R E D: `%s` %x", metadata.Name, metadata.InfoHash)
case <-interruptChan:
trawlingManager.Terminate()
stopped = true
}
}
if err = database.Close(); err != nil {
zap.L().Error("Could not close database!", zap.Error(err))
}
}
func parseFlags() (*opFlags, error) {
opF := new(opFlags)
cmdF := new(cmdFlags)
_, err := flags.Parse(cmdF)
if err != nil {
return nil, err
}
if cmdF.DatabaseURL == "" {
zap.S().Fatal("database")
} else {
opF.DatabaseURL = cmdF.DatabaseURL
}
if err = checkAddrs(cmdF.TrawlerMlAddrs); err != nil {
zap.S().Fatalf("Of argument (list) `trawler-ml-addr` %s", err.Error())
} else {
opF.TrawlerMlAddrs = cmdF.TrawlerMlAddrs
}
// 1 decisecond = 100 milliseconds = 0.1 seconds
if cmdF.TrawlerMlInterval == 0 {
opF.TrawlerMlInterval = time.Duration(1) * 100 * time.Millisecond
} else {
opF.TrawlerMlInterval = time.Duration(cmdF.TrawlerMlInterval) * 100 * time.Millisecond
}
opF.Verbosity = len(cmdF.Verbose)
return opF, nil
}
func checkAddrs(addrs []string) error {
for i, addr := range addrs {
// We are using ResolveUDPAddr but it works equally well for checking TCPAddr(esses) as
// well.
_, err := net.ResolveUDPAddr("udp", addr)
if err != nil {
return fmt.Errorf("with %d(th) address `%s`: %s", i+1, addr, err.Error())
}
}
return nil
}

View File

@ -21,4 +21,3 @@ func TestAppdirs(t *testing.T) {
t.Errorf("UserCacheDir returned an unexpected value! `%s`", returned)
}
}

View File

@ -1,2 +0,0 @@
Dockerfile
.dockerignore

View File

@ -1,10 +0,0 @@
FROM python:3.6
RUN mkdir -p /usr/src/app
WORKDIR /usr/src/app
COPY . .
RUN pip install -e .
EXPOSE 8080
CMD ["python", "-mmagneticow", "--port", "8080", "--user", "user", "password"]

View File

@ -1,2 +0,0 @@
recursive-include magneticow/static *
recursive-include magneticow/templates *

View File

@ -1,205 +0,0 @@
==========
magneticow
==========
*Lightweight web interface for magnetico.*
**magneticow** is a lightweight web interface to search and to browse the torrents that its counterpart (**magneticod**)
discovered. It allows fast full text search of the names of the torrents, by correctly parsing them into their elements.
Installation
============
**magneticow** uses `gevent <http://www.gevent.org/>`_ as a "standalone WSGI container" (you can think of it as an
embedded HTTP server), and connects to the same SQLite 3 database that **magneticod** writes. Hence, **root or sudo
access is NOT required at any stage, during or after the installation process.**
Requirements
------------
- Python 3.5 or above.
Instructions
------------
**WARNING:**
**magnetico** currently does NOT have any filtering system NOR it allows individual torrents to be removed from the
database, and BitTorrent DHT network is full of the materials that are considered illegal in many countries
(violence, pornography, copyright infringing content, and even child-pornography). If you are afraid of the legal
consequences, or simply morally against (indirectly) assisting those content to spread around, follow the
**magneticow** installation instructions carefully to password-protect the web-interface from others.
\
**WARNING:**
**magneticow** is *NOT* designed to scale, and will fail miserably if you try to use it like a public torrent
website. This is a *deliberate* technical decision, not a bug or something to be fixed; another web interface with
more features to support such use cases and scalability *might* be developed, but **magneticow** will NEVER be the
case.
1. Download the latest version of **magneticow** from PyPI: ::
pip3 install magneticow --user
2. Add installation path to the ``$PATH``; append the following line to your ``~/.profile`` if you are using bash
*(you can skip to step 4 if you installed magneticod first as advised)* ::
export PATH=$PATH:~/.local/bin
**or if you are on macOS**, (assuming that you are using Python 3.5): ::
export PATH="${PATH}:${HOME}/Library/Python/3.5/bin/"
3. Activate the changes to ``$PATH`` (again, if you are using bash): ::
source ~/.profile
4. Confirm that it is running: ::
magneticow --port 8080 --user username_1 password_1 --user username_2 password_2
Do not forget to actually visit the website, and run a search without any keywords (i.e. simply press the enter
button); this should return a list of most recently discovered torrents. If **magneticod** has not been running long
enough, database might be completely empty and you might see no results (5 minutes should suffice to discover more
than a dozen torrents).
5. *(only for systemd users, skip the rest of the steps and proceed to the* `Using`_ *section if you are not a systemd
user or want to use a different solution)*
Download the magneticow systemd service file (at
`magneticow/systemd/magneticow.service <systemd/magneticow.service>`_) and expand the tilde symbol with the path of
your home directory. Also, choose a port (> 1024) for **magneticow** to listen on, and supply username(s) and
password(s).
For example, if my home directory is ``/home/bora``, and I want to create two users named ``bora`` and ``tolga`` with
passwords ``staatsangehörigkeit`` and ``bürgerschaft``, and then **magneticow** to listen on port 8080, this line ::
ExecStart=~/.local/bin/magneticow --port PORT --user USERNAME PASSWORD
should become this: ::
ExecStart=/home/bora/.local/bin/magneticow --port 8080 --user bora staatsangehörigkeit --user tolga bürgerschaft
Run ``echo ~`` to see the path of your own home directory, if you do not already know.
**WARNING:**
**At least one username and password MUST be supplied.** This is to protect the privacy of your **magneticow**
installation, although **beware that this does NOT encrypt the communication between your browser and the
server!**
6. Copy the magneticow systemd service file to your local systemd configuration directory: ::
cp magneticow.service ~/.config/systemd/user/
7. Start **magneticow**: ::
systemctl --user enable magneticow --now
**magneticow** should now be running under the supervision of systemd and it should also be automatically started
whenever you boot your machine.
You can check its status and most recent log entries using the following command: ::
systemctl --user status magneticow
To stop **magneticow**, issue the following: ::
systemctl --user stop magneticow
Using
=====
**magneticow** does not require user interference to operate, once it starts running. Hence, there is no "user manual",
although you should beware of these points:
1. **Resource Usage:**
To repeat it for the last time, **magneticow** is a lightweight web interface for magnetico and is not suitable for
handling many users simultaneously. Misusing **magneticow** will likely to lead high processor usage and increased
response times. If that is the case, you might consider lowering the priority of **magneticow** using ``renice``
command.
2. **Pre-Alpha Bugs:**
**magneticow** is *supposed* to work "just fine", but as being at pre-alpha stage, it's likely that you might find
some bugs. It will be much appreciated if you can report those bugs, so that **magneticow** can be improved. See the
next sub-section for how to mitigate the issue if you are *not* using systemd.
Automatic Restarting
--------------------
Due to minor bugs at this stage of its development, **magneticow** should be supervised by another program to be ensured
that it's running, and should be restarted if not. systemd service file supplied by **magneticow** implements that,
although (if you wish) you can also use a much more primitive approach using GNU screen (which comes pre-installed in
many GNU/Linux distributions):
1. Start screen session named ``magneticow``: ::
screen -S magneticow
2. Run **magneticow** forever: ::
until magneticow; do echo "restarting..."; sleep 5; done;
This will keep restarting **magneticow** after five seconds in case if it fails.
3. Detach the session by pressing Ctrl+A and after Ctrl+D.
4. If you wish to see the logs, or to kill **magneticow**, ``screen -r magneticow`` will attach the original screen
session back. **magneticow** will exit gracefully upon keyboard interrupt (Ctrl+C) [SIGINT].
Searching
---------
* Only the **titles** of the torrents are being searched.
* Search is case-insensitive.
* Titles that includes terms that are separated by space are returned from the search:
Example: ``king bad`` returns ``Stephen King - The Bazaar of Bad Dreams``
* If you would like terms to appear in the exact order you wrote them, enclose them in double quotes:
Example: ``"king bad"`` returns ``George Benson - Good King Bad``
* Use asteriks (``*``) to denote prefixes:
Example: ``The Godf*`` returns ``Francis Ford Coppola - The Godfather``
Asteriks works inside the double quotes too!
* Use caret (``^``) to indicate that the term it prefixes must be the first word in the title:
Example: ``linux`` returns ``Arch Linux`` while ``^linux`` would return ``Linux Mint``
* Caret works **inside** the double quotes **but not outside**:
Right: ``"^ubuntu linux"``
Wrong: ``^"ubuntu linux"``
* You can use ``AND``, ``OR`` and ``NOT`` and also parentheses for more complex queries:
Example: ``programming NOT (windows OR "os x" OR macos)``
Beware that the terms are all-caps and MUST be so.
======================= =======================================
Operator Enhanced Query Syntax Precedence
======================= =======================================
NOT Highest precedence (tightest grouping).
AND
OR Lowest precedence (loosest grouping).
======================= =======================================
REST-ful HTTP API
=================
**magneticow** offers a REST-ful HTTP API for 3rd-party applications to interact with **magnetico** setups. Examples
would be an Android app for searching torrents **magnetico** discovered and queueing them on your seedbox, or a
custom data analysis/statistics application developed for a research project on BitTorrent network. Nevertheless, it
is up to you what to do with it at the end of the day.
See `API documentation <./docs/API/README.md>`_ for more details.
License
=======
All the code is licensed under AGPLv3, unless otherwise stated in the source specific source. See ``COPYING`` file
in ``magnetico`` directory for the full license text.
----
Dedicated to Cemile Binay, in whose hands I thrived.
Bora M. ALPER <bora@boramalper.org>

View File

Before

Width:  |  Height:  |  Size: 531 B

After

Width:  |  Height:  |  Size: 531 B

View File

Before

Width:  |  Height:  |  Size: 148 B

After

Width:  |  Height:  |  Size: 148 B

View File

@ -39,7 +39,7 @@ html {
}
pre {
font-family: 'Noto Mono', monospace;
font-family: 'Noto Mono';
line-height: 1.2em;
}
@ -49,12 +49,6 @@ body {
line-height: 1.45;
}
@media (max-width: 616px) {
body {
padding: 1em 8px 1em 8px;
}
}
b {
font-weight: bold;
}

View File

@ -0,0 +1,27 @@
main {
display: flex;
align-items: center;
align-content: center;
height: calc(100vh - 2 * 3em);
width: calc(100vw - 2 * 3em);
}
main form {
max-width: 600px;
width: 100%;
margin-left: 0.5em;
}
main form input {
width: 100%;
}
main > div {
margin-right: 0.5em;
}
footer {
margin-top: 0.833em;
}

View File

@ -0,0 +1,13 @@
<?xml version="1.0" encoding="utf-8" standalone="yes"?>
<rss version="2.0">
<channel>
<title>{{ .Title }}</title>
{{ range .Items }}
<item>
<title>{{ .Title }}</title>
<guid>{{ .InfoHash }}</guid>
<enclosure url="magnet:?xt=urn:btih:{{ .InfoHash }}&amp;dn={{ .Title }}" type="application/x-bittorrent" />
</item>
{{ end }}
</channel>
</rss>

View File

@ -2,7 +2,6 @@
<html lang="en">
<head>
<meta charset="utf-8">
<meta name="viewport" content="width=device-width, initial-scale=1">
<title>magneticow</title>
<link rel="stylesheet" href="static/styles/reset.css">
<link rel="stylesheet" href="static/styles/essential.css">
@ -11,14 +10,14 @@
</head>
<body>
<main>
<div id="magneticow"><b>magnetico<sup>w</sup></b>&#8203;<sub>(pre-alpha)</sub></div>
<div><b>magnetico<sup>w</sup></b>&#8203;<sub>(pre-alpha)</sub></div>
<form action="/torrents" method="get" autocomplete="off" role="search">
<input type="search" name="search" placeholder="Search the BitTorrent DHT" autofocus>
</form>
</main>
<footer>
~{{ . }} torrents available (see the <a href="/statistics">statistics</a>).
~{{ "{:,}".format(n_torrents) }} torrents available (see the <a href="/statistics">statistics</a>).
</footer>
</body>
</html>

View File

@ -3,11 +3,11 @@
<head>
<meta charset="utf-8">
<title>Statistics - magneticow</title>
<link rel="stylesheet" href=" {{ url_for('static', filename='styles/reset.css') }} ">
<link rel="stylesheet" href=" {{ url_for('static', filename='styles/essential.css') }} ">
<link rel="stylesheet" href=" {{ url_for('static', filename='styles/statistics.css') }} ">
<script defer src=" {{ url_for('static', filename='scripts/plotly-v1.26.1.min.js') }} "></script>
<script defer src=" {{ url_for('static', filename='scripts/statistics.js') }} "></script>
<link rel="stylesheet" href="static/styles/reset.css">
<link rel="stylesheet" href="static/styles/essential.css">
<link rel="stylesheet" href="static/styles/statistics.css">
<script defer src="static/scripts/plotly-v1.26.1.min.js"></script>
<script defer src="static/scripts/statistics.js"></script>
</head>
<body>
<header>

View File

@ -3,10 +3,10 @@
<head>
<meta charset="utf-8">
<title>{{ torrent.name }} - magnetico</title>
<link rel="stylesheet" href="{{ url_for('static', filename='styles/reset.css') }}">
<link rel="stylesheet" href="{{ url_for('static', filename='styles/essential.css') }}">
<link rel="stylesheet" href="{{ url_for('static', filename='styles/torrent.css') }}">
<script defer src="{{ url_for('static', filename='scripts/torrent.js') }}"></script>
<link rel="stylesheet" href="static/styles/reset.css">
<link rel="stylesheet" href="static/styles/essential.css">
<link rel="stylesheet" href="static/styles/torrent.css">
<script defer src="static/scripts/torrent.js"></script>
</head>
<body>
<header>
@ -19,7 +19,7 @@
<div id="title">
<h2>{{ torrent.name }}</h2>
<a href="magnet:?xt=urn:btih:{{ torrent.info_hash }}&dn={{ torrent.name }}">
<img src="{{ url_for('static', filename='assets/magnet.gif') }}" alt="Magnet link"
<img src="static/assets/magnet.gif" alt="Magnet link"
title="Download this torrent using magnet" />
<small>{{ torrent.info_hash }}</small>
</a>

View File

@ -0,0 +1,90 @@
<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="utf-8">
<title>{% if search %}"{{search}}"{% else %}Most recent torrents{% endif %} - magneticow</title>
<link rel="stylesheet" href="static/styles/reset.css">
<link rel="stylesheet" href="static/styles/essential.css">
<link rel="stylesheet" href="static/styles/torrents.css">
<!-- <script src="script.js"></script> -->
</head>
<body>
<header>
<div><a href="/"><b>magnetico<sup>w</sup></b></a>&#8203;<sub>(pre-alpha)</sub></div>
<form action="/torrents" method="get" autocomplete="off" role="search">
<input type="search" name="search" placeholder="Search the BitTorrent DHT" value="{{ search }}">
</form>
<div>
<a href="{{ subscription_url }}"><img src="static/assets/feed.png"
alt="feed icon" title="subscribe" /> subscribe</a>
</div>
</header>
<main>
<table>
<thead>
<tr>
<th><!-- Magnet link --></th>
<th>
{% if sorted_by == "name ASC" %}
<a href="/torrents/?search={{ search }}&sort_by=name+DESC">Name ▲</a>
{% elif sorted_by == "name DESC" %}
<a href="/torrents/?search={{ search }}&sort_by=name+ASC">Name ▼</a>
{% else %}
<a href="/torrents/?search={{ search }}&sort_by=name+ASC">Name</a>
{% endif %}
</th>
<th>
{% if sorted_by == "total_size ASC" %}
<a href="/torrents/?search={{ search }}&sort_by=total_size+DESC">Size ▲</a>
{% elif sorted_by == "total_size DESC" %}
<a href="/torrents/?search={{ search }}&sort_by=total_size+ASC">Size ▼</a>
{% else %}
<a href="/torrents/?search={{ search }}&sort_by=total_size+ASC">Size</a>
{% endif %}
</th>
<th>
{% if sorted_by == "discovered_on ASC" %}
<a href="/torrents/?search={{ search }}&sort_by=discovered_on+DESC">Discovered on ▲</a>
{% elif sorted_by == "discovered_on DESC" %}
<a href="/torrents/?search={{ search }}&sort_by=discovered_on+ASC">Discovered on ▼</a>
{% else %}
<a href="/torrents/?search={{ search }}&sort_by=discovered_on+DESC">Discovered on</a>
{% endif %}
</th>
</tr>
</thead>
<tbody>
{% for torrent in torrents %}
<tr>
<td><a href="magnet:?xt=urn:btih:{{ torrent.info_hash }}&dn={{ torrent.name }}">
<img src="static/assets/magnet.gif') }}" alt="Magnet link"
title="Download this torrent using magnet" /></a></td>
<td><a href="/torrents/{{ torrent.info_hash }}/{{ torrent.name }}">{{ torrent.name }}</a></td>
<td>{{ torrent.size }}</td>
<td>{{ torrent.discovered_on }}</td>
</tr>
{% endfor %}
</tbody>
</table>
</main>
<footer>
<form action="/torrents" method="get">
<button {% if page == 0 %}disabled{% endif %}>Previous</button>
<input type="text" name="search" value="{{ search }}" hidden>
{% if sorted_by %}
<input type="text" name="sort_by" value="{{ sorted_by }}" hidden>
{% endif %}
<input type="number" name="page" value="{{ page - 1 }}" hidden>
</form>
<form action="/torrents" method="get">
<button {% if not next_page_exists %}disabled{% endif %}>Next</button>
<input type="text" name="search" value="{{ search }}" hidden>
{% if sorted_by %}
<input type="text" name="sort_by" value="{{ sorted_by }}" hidden>
{% endif %}
<input type="number" name="page" value="{{ page + 1 }}" hidden>
</form>
</footer>
</body>
</html>

View File

@ -1,33 +0,0 @@
# magnetico<sup>w</sup> API Documentation
**magneticow** offers a REST-ful HTTP API for 3rd-party applications to interact with **magnetico** setups. Examples
would be an Android app for searching torrents **magnetico** discovered and queueing them on your seedbox, or a custom
data analysis/statistics application developed for a research project on BitTorrent network. Nevertheless, it is up to
you what to do with it at the end of the day.
The rules stated above below to the API as a whole and across the all versions:
* The API root is `/api`.
* Right after the API root MUST come the API version in the format `vX` (*e.g.* `/api/v1`).
* Different API versions MAY be backwards-incompatible, but any changes within the same version of the API MUST NOT
break the backwards-compatibility.
* Version 0 (zero) of the API is considered to be experimental and MAY be backwards-incompatible.
* API documentation MUST be considered as a contract between the developers of **magnetico** and **magneticow**, and of
3rd party application developers, and MUST be respected as such.
The documentation for the API is organised as described below:
* Each version of the API MUST be documented in a separate document named `vX.md`. Everything (*i.e.* each
functionality, status codes, etc.) MUST be clearly indicated when they are introduced.
* Each document MUST clearly indicate at the beginning whether it is *finalized* or not. Not-finalised documents (called
*Draft*) CAN be changed, and finalised , but once finalised documents MUST NOT be modified afterwards.
* Documentation for the version 0 (zero) of the API MUST be considered free from the rules above, and always considered
a *draft*.
* Each document MUST be self-standing, that is, MUST be completely understandable and unambiguous without requiring to
refer another document.
* Hence, use quotations when necessary and reference them.
Remarks:
* Use British English, and serial comma.
* Documents should be formatted in GitHub Flavoured Markdown.

View File

@ -1,93 +0,0 @@
# magnetico<sup>w</sup> API v0 Documentation
__Status:__ Draft (NOT Finalised)
__Last Updated:__ 13 June 2017 _by_ Bora M. Alper.
## General remarks
* All requests MUST be encoded in UTF-8 and same applies for responses too.
* Clients MUST set `Content-type` header to ` application/json; charset=utf-8` for all their requests.
* All dates MUST be in [ISO 8601](https://en.wikipedia.org/wiki/ISO_8601) format, same applies for responses too.
## Collections
* `/torrents`, representing all the torrents in the database.
* `/statistics`, representing all the statistical data about the database.
## Methods defined on `/torrents` collection
### GET
__Parameters:__
* **`query`:** string to be queried in the torrent titles. If absent, all torrents will be returned.
* __ATTENTION:__ `query` MUST NOT be an empty string, if supplied.
* __Remark:__ The format of the `query` is to be clarified! [TODO]
* **`limit`:** number of results to be returned per page. If absent, it's by default 20.
* **`sort_by`:** string enum, MUST be one of the strings `discovered_on`, `relevance` (if `query` is non-empty), `size`,
separated by space, and followed by one of the strings `ASC` or `DESC`, for ascending and descending, respectively.
* __ATTENTION:__ If `sort_by` is `relevance`, `query` is MUST be supplied.
__Response:__
* __Status code:__ `200 OK`
```json
[
{
"info_hash": "ABCDEFABCDEFABCDEFAB",
"title": "EXAMPLE TITLE",
"discovered_on": "2017-06-13T14:06:01Z",
"files": [
{"path": "file.ext", "size": 1024},
{"path": "directory/file_2.ext", "size": 2048},
{"path": "directory/file_3.ext", "size": 4096},
...
]
},
...
]
```
* `info_hash` is a hex-encoded info hash of the torrent.
* `discovered_on` is the date the torrent is discovered on.
* __ATTENTION:__ Due to ambiguities about `time()` function in the C standard library, the effect of leap seconds, and
(being pedantic) even the epoch is **platform-dependent**. (The epoch is quite often 1970-01-01T00:00:00Z.)
* `files` is a list of files denoted by their relative-paths. `/` character (U+002F) is used as a path separator, hence
it can be safely assumed that none of the directory or file name can contain it. `\0` (U+0000) is also prohibited to
appear _anywhere_ in the path.
* __Remark:__ These restrictions are not in the BitTorrent specifications. So how **magnetico** enforces that? Well,
**magneticod** simmply ignores torrents with *illegal* file names!
## Methods defined on `/statistics` collection
### GET
__Parameters:__
* **`group_by`:** is how data-points are grouped by; MUST be one of the strings `hour`, `day`, `week`, `month`, or
`year`.
* **`period`:** is two dates, separated by a space chaacter (U+0020), denoting start and end, both inclusive.
* __ATTENTION:__ Depending on the `group_by`, `datetime` WILL be in one of the following formats:
- `yyyy` for `year` (_e.g._ `2017`)
- `yyyy-mm` for `month` (_e.g._ `2017-06`)
- `yyyy-Ww` for `week` (_e.g._ `2017-W25`)
- `yyyy-mm-dd` for `day` (_e.g._ `2017-06-04`)
- `yyyy-mm-ddThh` for `hour` (_e.g._ `2017-06-04:02`)
__Response:__
* __Status code:__ `200 OK`
```json
[
{
"datetime": "2017-06",
"new_torrents": 2591558
},
{
"datetime": "2017-07",
"new_torrents": 3448754
},
...
]
```
* `datetime` is the date (and if applicable, time) of a data-point.
* __Remark:__ Depending on the `group_by`, `datetime` WILL have the *same* format with `period`.

View File

@ -1,14 +0,0 @@
# magneticow - Lightweight web interface for magnetico.
# Copyright (C) 2017 Mert Bora ALPER <bora@boramalper.org>
# Dedicated to Cemile Binay, in whose hands I thrived.
#
# This program is free software: you can redistribute it and/or modify it under the terms of the GNU Affero General
# Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any
# later version.
#
# This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied
# warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Affero General Public License for more
# details.
#
# You should have received a copy of the GNU Affero General Public License along with this program. If not, see
# <http://www.gnu.org/licenses/>.

View File

@ -1,93 +0,0 @@
# magneticow - Lightweight web interface for magnetico.
# Copyright (C) 2017 Mert Bora ALPER <bora@boramalper.org>
# Dedicated to Cemile Binay, in whose hands I thrived.
#
# This program is free software: you can redistribute it and/or modify it under the terms of the GNU Affero General
# Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any
# later version.
#
# This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied
# warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Affero General Public License for more
# details.
#
# You should have received a copy of the GNU Affero General Public License along with this program. If not, see
# <http://www.gnu.org/licenses/>.
import argparse
import logging
import sys
import textwrap
import gevent.wsgi
from magneticow import magneticow
def main() -> int:
logging.basicConfig(level=logging.INFO, format="%(asctime)s %(levelname)-8s %(message)s")
arguments = parse_args()
magneticow.app.arguments = arguments
http_server = gevent.wsgi.WSGIServer((arguments.host, arguments.port), magneticow.app)
magneticow.initialize_magneticod_db()
try:
logging.info("magneticow is ready to serve!")
http_server.serve_forever()
except KeyboardInterrupt:
return 0
finally:
magneticow.close_db()
return 1
def parse_args() -> argparse.Namespace:
parser = argparse.ArgumentParser(
description="Lightweight web interface for magnetico.",
epilog=textwrap.dedent("""\
Copyright (C) 2017 Mert Bora ALPER <bora@boramalper.org>
Dedicated to Cemile Binay, in whose hands I thrived.
This program is free software: you can redistribute it and/or modify it under
the terms of the GNU Affero General Public License as published by the Free
Software Foundation, either version 3 of the License, or (at your option) any
later version.
This program is distributed in the hope that it will be useful, but WITHOUT ANY
WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A
PARTICULAR PURPOSE. See the GNU Affero General Public License for more
details.
You should have received a copy of the GNU Affero General Public License along
with this program. If not, see <http://www.gnu.org/licenses/>.
"""),
allow_abbrev=False,
formatter_class=argparse.RawDescriptionHelpFormatter
)
parser.add_argument(
"--host", action="store", type=str, required=False, default="",
help="the host address magneticow web server should listen on"
)
parser.add_argument(
"--port", action="store", type=int, required=True,
help="the port number magneticow web server should listen on"
)
auth_group = parser.add_mutually_exclusive_group(required=True)
auth_group.add_argument(
"--no-auth", dest='noauth', action="store_true", default=False,
help="make the web interface available without authentication"
)
auth_group.add_argument(
"--user", action="append", nargs=2, metavar=("USERNAME", "PASSWORD"), type=str,
help="the pair(s) of username and password for basic HTTP authentication"
)
return parser.parse_args(sys.argv[1:])
if __name__ == "__main__":
sys.exit(main())

View File

@ -1,61 +0,0 @@
# magneticow - Lightweight web interface for magnetico.
# Copyright (C) 2017 Mert Bora ALPER <bora@boramalper.org>
# Dedicated to Cemile Binay, in whose hands I thrived.
#
# This program is free software: you can redistribute it and/or modify it under the terms of the GNU Affero General
# Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any
# later version.
#
# This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied
# warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Affero General Public License for more
# details.
#
# You should have received a copy of the GNU Affero General Public License along with this program. If not, see
# <http://www.gnu.org/licenses/>.
import functools
import hashlib
import flask
# Adapted from: http://flask.pocoo.org/snippets/8/
# (c) Copyright 2010 - 2017 by Armin Ronacher
# BEGINNING OF THE 3RD PARTY COPYRIGHTED CONTENT
def is_authorized(supplied_username, supplied_password):
""" This function is called to check if a username / password combination is valid. """
# Because we do monkey-patch! [in magneticow.__main__.py:main()]
app = flask.current_app
for username, password in app.arguments.user: # pylint: disable=maybe-no-member
if supplied_username == username and supplied_password == password:
return True
return False
def authenticate():
""" Sends a 401 response that enables basic auth. """
return flask.Response(
"Could not verify your access level for that URL.\n"
"You have to login with proper credentials",
401,
{"WWW-Authenticate": 'Basic realm="Login Required"'}
)
def requires_auth(f):
@functools.wraps(f)
def decorated(*args, **kwargs):
auth = flask.request.authorization
if not flask.current_app.arguments.noauth:
if not auth or not is_authorized(auth.username, auth.password):
return authenticate()
return f(*args, **kwargs)
return decorated
# END OF THE 3RD PARTY COPYRIGHTED CONTENT
def generate_feed_hash(username: str, password: str, filter_: str) -> str:
"""
Deterministically generates the feed hash from given username, password, and filter.
Hash is the hex encoding of the SHA256 sum.
"""
return hashlib.sha256((username + "\0" + password + "\0" + filter_).encode()).digest().hex()

View File

@ -1,295 +0,0 @@
# magneticow - Lightweight web interface for magnetico.
# Copyright (C) 2017 Mert Bora ALPER <bora@boramalper.org>
# Dedicated to Cemile Binay, in whose hands I thrived.
#
# This program is free software: you can redistribute it and/or modify it under the terms of the GNU Affero General
# Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any
# later version.
#
# This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied
# warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Affero General Public License for more
# details.
#
# You should have received a copy of the GNU Affero General Public License along with this program. If not, see
# <http://www.gnu.org/licenses/>.
import collections
import datetime as dt
from datetime import datetime
import logging
import sqlite3
import os
import appdirs
import flask
from magneticow import utils
from magneticow.authorization import requires_auth, generate_feed_hash
File = collections.namedtuple("file", ["path", "size"])
Torrent = collections.namedtuple("torrent", ["info_hash", "name", "size", "discovered_on", "files"])
app = flask.Flask(__name__)
app.config.from_object(__name__)
# TODO: We should have been able to use flask.g but it does NOT persist across different requests so we resort back to
# this. Investigate the cause and fix it (I suspect of Gevent).
magneticod_db = None
@app.route("/")
@requires_auth
def home_page():
with magneticod_db:
# COUNT(ROWID) is much more inefficient since it scans the whole table, so use MAX(ROWID)
cur = magneticod_db.execute("SELECT MAX(ROWID) FROM torrents ;")
n_torrents = cur.fetchone()[0] or 0
return flask.render_template("homepage.html", n_torrents=n_torrents)
@app.route("/torrents/")
@requires_auth
def torrents():
search = flask.request.args.get("search")
page = int(flask.request.args.get("page", 0))
context = {
"search": search,
"page": page
}
SQL_query = """
SELECT
info_hash,
name,
total_size,
discovered_on
FROM torrents
"""
if search:
SQL_query += """
INNER JOIN (
SELECT docid AS id, rank(matchinfo(fts_torrents, 'pcnxal')) AS rank
FROM fts_torrents
WHERE name MATCH ?
) AS ranktable USING(id)
"""
SQL_query += """
ORDER BY {}
LIMIT 20 OFFSET ?
"""
sort_by = flask.request.args.get("sort_by")
allowed_sorts = [
None,
"name ASC",
"name DESC",
"total_size ASC",
"total_size DESC",
"discovered_on ASC",
"discovered_on DESC"
]
if sort_by not in allowed_sorts:
return flask.Response("Invalid value for `sort_by! (Allowed values are %s)" % (allowed_sorts, ), 400)
if search:
if sort_by:
SQL_query = SQL_query.format(sort_by + ", " + "rank ASC")
else:
SQL_query = SQL_query.format("rank ASC")
else:
if sort_by:
SQL_query = SQL_query.format(sort_by + ", " + "id DESC")
else:
SQL_query = SQL_query.format("id DESC")
with magneticod_db:
if search:
cur = magneticod_db.execute(SQL_query, (search, 20 * page))
else:
cur = magneticod_db.execute(SQL_query, (20 * page, ))
context["torrents"] = [Torrent(t[0].hex(), t[1], utils.to_human_size(t[2]),
datetime.fromtimestamp(t[3]).strftime("%d/%m/%Y"), [])
for t in cur.fetchall()]
if len(context["torrents"]) < 20:
context["next_page_exists"] = False
else:
context["next_page_exists"] = True
if app.arguments.noauth:
context["subscription_url"] = "/feed/?filter%s" % search
else:
username, password = flask.request.authorization.username, flask.request.authorization.password
context["subscription_url"] = "/feed?filter=%s&hash=%s" % (
search, generate_feed_hash(username, password, search))
if sort_by:
context["sorted_by"] = sort_by
return flask.render_template("torrents.html", **context)
@app.route("/torrents/<info_hash>/", defaults={"name": None})
@requires_auth
def torrent_redirect(**kwargs):
try:
info_hash = bytes.fromhex(kwargs["info_hash"])
assert len(info_hash) == 20
except (AssertionError, ValueError): # In case info_hash variable is not a proper hex-encoded bytes
return flask.abort(400)
with magneticod_db:
cur = magneticod_db.execute("SELECT name FROM torrents WHERE info_hash=? LIMIT 1;", (info_hash,))
try:
name = cur.fetchone()[0]
except TypeError: # In case no results returned, TypeError will be raised when we try to subscript None object
return flask.abort(404)
return flask.redirect("/torrents/%s/%s" % (kwargs["info_hash"], name), code=301)
@app.route("/torrents/<info_hash>/<name>")
@requires_auth
def torrent(**kwargs):
context = {}
try:
info_hash = bytes.fromhex(kwargs["info_hash"])
assert len(info_hash) == 20
except (AssertionError, ValueError): # In case info_hash variable is not a proper hex-encoded bytes
return flask.abort(400)
with magneticod_db:
cur = magneticod_db.execute("SELECT id, name, discovered_on FROM torrents WHERE info_hash=? LIMIT 1;",
(info_hash,))
try:
torrent_id, name, discovered_on = cur.fetchone()
except TypeError: # In case no results returned, TypeError will be raised when we try to subscript None object
return flask.abort(404)
cur = magneticod_db.execute("SELECT path, size FROM files WHERE torrent_id=?;", (torrent_id,))
raw_files = cur.fetchall()
size = sum(f[1] for f in raw_files)
files = [File(f[0], utils.to_human_size(f[1])) for f in raw_files]
context["torrent"] = Torrent(info_hash.hex(), name, utils.to_human_size(size), datetime.fromtimestamp(discovered_on).strftime("%d/%m/%Y"), files)
return flask.render_template("torrent.html", **context)
@app.route("/statistics")
@requires_auth
def statistics():
# Ahhh...
# Time is hard, really. magneticod used time.time() to save when a torrent is discovered, unaware that none of the
# specifications say anything about the timezones (or their irrelevance to the UNIX time) and about leap seconds in
# a year.
# Nevertheless, we still use it. In future, before v1.0.0, we may change it as we wish, offering a migration
# solution for the current users. But in the meanwhile, be aware that all your calculations will be a bit lousy,
# though within tolerable limits for a torrent search engine.
with magneticod_db:
# latest_today is the latest UNIX timestamp of today, the very last second.
latest_today = int((dt.date.today() + dt.timedelta(days=1) - dt.timedelta(seconds=1)).strftime("%s"))
# Retrieve all the torrents discovered in the past 30 days (30 days * 24 hours * 60 minutes * 60 seconds...)
# Also, see http://www.sqlite.org/lang_datefunc.html for details of `date()`.
# Function Equivalent strftime()
# date(...) strftime('%Y-%m-%d', ...)
cur = magneticod_db.execute(
"SELECT date(discovered_on, 'unixepoch') AS day, count() FROM torrents WHERE discovered_on >= ? "
"GROUP BY day;",
(latest_today - 30 * 24 * 60 * 60, )
)
results = cur.fetchall() # for instance, [('2017-04-01', 17428), ('2017-04-02', 28342)]
return flask.render_template("statistics.html", **{
# We directly substitute them in the JavaScript code.
"dates": str([t[0] for t in results]),
"amounts": str([t[1] for t in results])
})
@app.route("/feed")
def feed():
filter_ = flask.request.args["filter"]
# Check for all possible users who might be requesting.
# pylint disabled: because we do monkey-patch! [in magneticow.__main__.py:main()]
if not app.arguments.noauth:
hash_ = flask.request.args["hash"]
for username, password in app.arguments.user: # pylint: disable=maybe-no-member
if generate_feed_hash(username, password, filter_) == hash_:
break
else:
return flask.Response(
"Could not verify your access level for that URL (wrong hash).\n",
401
)
context = {}
if filter_:
context["title"] = "`%s` - magneticow" % (filter_,)
with magneticod_db:
cur = magneticod_db.execute(
"SELECT "
" name, "
" info_hash "
"FROM torrents "
"INNER JOIN ("
" SELECT docid AS id, rank(matchinfo(fts_torrents, 'pcnxal')) AS rank "
" FROM fts_torrents "
" WHERE name MATCH ? "
" ORDER BY rank ASC"
" LIMIT 50"
") AS ranktable USING(id);",
(filter_, )
)
context["items"] = [{"title": r[0], "info_hash": r[1].hex()} for r in cur]
else:
context["title"] = "The Newest Torrents - magneticow"
with magneticod_db:
cur = magneticod_db.execute(
"SELECT "
" name, "
" info_hash "
"FROM torrents "
"ORDER BY id DESC LIMIT 50"
)
context["items"] = [{"title": r[0], "info_hash": r[1].hex()} for r in cur]
return flask.render_template("feed.xml", **context), 200, {"Content-Type": "application/rss+xml; charset=utf-8"}
def initialize_magneticod_db() -> None:
global magneticod_db
logging.info("Connecting to magneticod's database...")
magneticod_db_path = os.path.join(appdirs.user_data_dir("magneticod"), "database.sqlite3")
magneticod_db = sqlite3.connect(magneticod_db_path, isolation_level=None)
logging.info("Preparing for the full-text search (this might take a while)...")
with magneticod_db:
magneticod_db.execute("PRAGMA journal_mode=WAL;")
magneticod_db.execute("CREATE INDEX IF NOT EXISTS discovered_on_index ON torrents (discovered_on);")
magneticod_db.execute("CREATE INDEX IF NOT EXISTS info_hash_index ON torrents (info_hash);")
magneticod_db.execute("CREATE INDEX IF NOT EXISTS file_info_hash_index ON files (torrent_id);")
magneticod_db.execute("CREATE VIRTUAL TABLE temp.fts_torrents USING fts4(name);")
magneticod_db.execute("INSERT INTO fts_torrents (docid, name) SELECT id, name FROM torrents;")
magneticod_db.execute("INSERT INTO fts_torrents (fts_torrents) VALUES ('optimize');")
magneticod_db.execute("CREATE TEMPORARY TRIGGER on_torrents_insert AFTER INSERT ON torrents FOR EACH ROW BEGIN"
" INSERT INTO fts_torrents (docid, name) VALUES (NEW.id, NEW.name);"
"END;")
magneticod_db.create_function("rank", 1, utils.rank)
def close_db() -> None:
logging.info("Closing magneticod database...")
if magneticod_db is not None:
magneticod_db.close()

View File

@ -1,69 +0,0 @@
# magneticow - Lightweight web interface for magnetico.
# Copyright (C) 2017 Mert Bora ALPER <bora@boramalper.org>
# Dedicated to Cemile Binay, in whose hands I thrived.
#
# This program is free software: you can redistribute it and/or modify it under the terms of the GNU Affero General
# Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any
# later version.
#
# This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied
# warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Affero General Public License for more
# details.
#
# You should have received a copy of the GNU Affero General Public License along with this program. If not, see
# <http://www.gnu.org/licenses/>.
from math import log10
from struct import unpack_from
# Source: http://stackoverflow.com/a/1094933
# (primarily: https://web.archive.org/web/20111010015624/http://blogmag.net/blog/read/38/Print_human_readable_file_size)
def to_human_size(num, suffix='B'):
for unit in ['', 'Ki', 'Mi', 'Gi', 'Ti', 'Pi', 'Ei', 'Zi']:
if abs(num) < 1024:
return "%3.1f %s%s" % (num, unit, suffix)
num /= 1024
return "%.1f %s%s" % (num, 'Yi', suffix)
def rank(blob):
# TODO: is there a way to futher optimize this?
p, c, n = unpack_from("=LLL", blob, 0)
x = [] # list of tuples
for i in range(12, 12 + 3*c*p*4, 3*4):
x0, x1, x2 = unpack_from("=LLL", blob, i)
if x1 != 0: # skip if it's index column
x.append((x0, x1, x2))
# Ignore the first column (torrent_id)
avgdl = unpack_from("=L", blob, 12 + 3*c*p*4)[0]
# Ignore the first column (torrent_id)
l = unpack_from("=L", blob, (12 + 3*c*p*4) + 4*c)[0]
# Multiply by -1 so that sorting in the ASC order would yield the best match first
return -1 * okapi_bm25(term_frequencies=[X[0] for X in x], dl=l, avgdl=avgdl, N=n, nq=[X[2] for X in x])
# TODO: check if I got it right =)
def okapi_bm25(term_frequencies, dl, avgdl, N, nq, k1=1.2, b=0.75):
"""
:param term_frequencies: List of frequencies of each term in the document.
:param dl: Length of the document in words.
:param avgdl: Average document length in the collection.
:param N: Total number of documents in the collection.
:param nq: List of each numbers of documents containing term[i] for each term.
:param k1: Adjustable constant; = 1.2 in FTS5 extension of SQLite3.
:param b: Adjustable constant; = 0.75 in FTS5 extension of SQLite3.
:return:
"""
return sum(
log10((N - nq[i] + 0.5) / (nq[i] + 0.5)) *
(
(term_frequencies[i] * (k1 + 1)) /
(term_frequencies[i] + k1 * (1 - b + b * dl / avgdl))
)
for i in range(len(term_frequencies))
)

304
magneticow/main.go Normal file
View File

@ -0,0 +1,304 @@
package main
import (
"html/template"
"log"
"net/http"
"os"
"strings"
"github.com/dustin/go-humanize"
"github.com/gorilla/mux"
"go.uber.org/zap"
"go.uber.org/zap/zapcore"
"encoding/hex"
"magnetico/persistence"
"strconv"
"time"
"unsafe"
)
const N_TORRENTS = 20
var templates map[string]*template.Template
var database persistence.Database
// ========= TD: TemplateData =========
type HomepageTD struct {
Count uint
}
type TorrentsTD struct {
Search string
SubscriptionURL string
Torrents []persistence.TorrentMetadata
Before int64
After int64
SortedBy string
NextPageExists bool
}
type TorrentTD struct {
}
type FeedTD struct {
}
type StatisticsTD struct {
}
func main() {
loggerLevel := zap.NewAtomicLevel()
// Logging levels: ("debug", "info", "warn", "error", "dpanic", "panic", and "fatal").
logger := zap.New(zapcore.NewCore(
zapcore.NewConsoleEncoder(zap.NewDevelopmentEncoderConfig()),
zapcore.Lock(os.Stderr),
loggerLevel,
))
defer logger.Sync()
zap.ReplaceGlobals(logger)
zap.L().Info("magneticow v0.7.0 has been started.")
zap.L().Info("Copyright (C) 2017 Mert Bora ALPER <bora@boramalper.org>.")
zap.L().Info("Dedicated to Cemile Binay, in whose hands I thrived.")
router := mux.NewRouter()
router.HandleFunc("/", rootHandler)
router.HandleFunc("/torrents", torrentsHandler)
router.HandleFunc("/torrents/{infohash:[a-z0-9]{40}}", torrentsInfohashHandler)
router.HandleFunc("/statistics", statisticsHandler)
router.PathPrefix("/static").HandlerFunc(staticHandler)
router.HandleFunc("/feed", feedHandler)
templateFunctions := template.FuncMap{
"add": func(augend int, addends int) int {
return augend + addends
},
"subtract": func(minuend int, subtrahend int) int {
return minuend - subtrahend
},
"bytesToHex": func(bytes []byte) string {
return hex.EncodeToString(bytes)
},
"unixTimeToYearMonthDay": func(s int64) string {
tm := time.Unix(s, 0)
// > Format and Parse use example-based layouts. Usually youll use a constant from time
// > for these layouts, but you can also supply custom layouts. Layouts must use the
// > reference time Mon Jan 2 15:04:05 MST 2006 to show the pattern with which to
// > format/parse a given time/string. The example time must be exactly as shown: the
// > year 2006, 15 for the hour, Monday for the day of the week, etc.
// https://gobyexample.com/time-formatting-parsing
// Why you gotta be so weird Go?
return tm.Format("02/01/2006")
},
"humanizeSize": func(s uint64) string {
return humanize.IBytes(s)
},
}
templates = make(map[string]*template.Template)
templates["feed"] = template.Must(template.New("feed").Parse(string(mustAsset("templates/feed.xml"))))
templates["homepage"] = template.Must(template.New("homepage").Parse(string(mustAsset("templates/homepage.html"))))
templates["statistics"] = template.Must(template.New("statistics").Parse(string(mustAsset("templates/statistics.html"))))
templates["torrent"] = template.Must(template.New("torrent").Funcs(templateFunctions).Parse(string(mustAsset("templates/torrent.html"))))
templates["torrents"] = template.Must(template.New("torrents").Funcs(templateFunctions).Parse(string(mustAsset("templates/torrents.html"))))
var err error
database, err = persistence.MakeDatabase("sqlite3:///home/bora/.local/share/magneticod/database.sqlite3", unsafe.Pointer(logger))
if err != nil {
panic(err.Error())
}
zap.L().Info("magneticow is ready to serve!")
http.ListenAndServe(":8080", router)
}
// DONE
func rootHandler(w http.ResponseWriter, r *http.Request) {
count, err := database.GetNumberOfTorrents()
if err != nil {
panic(err.Error())
}
templates["homepage"].Execute(w, HomepageTD{
Count: count,
})
}
func torrentsHandler(w http.ResponseWriter, r *http.Request) {
queryValues := r.URL.Query()
// Parses `before` and `after` parameters in the URL query following the conditions below:
// * `before` and `after` cannot be both supplied at the same time.
// * `before` -if supplied- cannot be less than or equal to zero.
// * `after` -if supplied- cannot be greater than the current Unix time.
// * if `before` is not supplied, it is set to the current Unix time.
qBefore, qAfter := (int64)(-1), (int64)(-1)
var err error
if queryValues.Get("before") != "" {
qBefore, err = strconv.ParseInt(queryValues.Get("before"), 10, 64)
if err != nil {
panic(err.Error())
}
if qBefore <= 0 {
panic("before parameter is less than or equal to zero!")
}
} else if queryValues.Get("after") != "" {
if qBefore != -1 {
panic("both before and after supplied")
}
qAfter, err = strconv.ParseInt(queryValues.Get("after"), 10, 64)
if err != nil {
panic(err.Error())
}
if qAfter > time.Now().Unix() {
panic("after parameter is greater than the current Unix time!")
}
} else {
qBefore = time.Now().Unix()
}
var torrents []persistence.TorrentMetadata
if qBefore != -1 {
torrents, err = database.GetNewestTorrents(N_TORRENTS, qBefore)
} else {
torrents, err = database.QueryTorrents(
queryValues.Get("search"),
persistence.BY_DISCOVERED_ON,
true,
false,
N_TORRENTS,
qAfter,
true,
)
}
if err != nil {
panic(err.Error())
}
// TODO: for testing, REMOVE
torrents[2].HasReadme = true
templates["torrents"].Execute(w, TorrentsTD{
Search: "",
SubscriptionURL: "borabora",
Torrents: torrents,
Before: torrents[len(torrents)-1].DiscoveredOn,
After: torrents[0].DiscoveredOn,
SortedBy: "anan",
NextPageExists: true,
})
}
func newestTorrentsHandler(w http.ResponseWriter, r *http.Request) {
queryValues := r.URL.Query()
qBefore, qAfter := (int64)(-1), (int64)(-1)
var err error
if queryValues.Get("before") != "" {
qBefore, err = strconv.ParseInt(queryValues.Get("before"), 10, 64)
if err != nil {
panic(err.Error())
}
} else if queryValues.Get("after") != "" {
if qBefore != -1 {
panic("both before and after supplied")
}
qAfter, err = strconv.ParseInt(queryValues.Get("after"), 10, 64)
if err != nil {
panic(err.Error())
}
} else {
qBefore = time.Now().Unix()
}
var torrents []persistence.TorrentMetadata
if qBefore != -1 {
torrents, err = database.QueryTorrents(
"",
persistence.BY_DISCOVERED_ON,
true,
false,
N_TORRENTS,
qBefore,
false,
)
} else {
torrents, err = database.QueryTorrents(
"",
persistence.BY_DISCOVERED_ON,
false,
false,
N_TORRENTS,
qAfter,
true,
)
}
if err != nil {
panic(err.Error())
}
templates["torrents"].Execute(w, TorrentsTD{
Search: "",
SubscriptionURL: "borabora",
Torrents: torrents,
Before: torrents[len(torrents)-1].DiscoveredOn,
After: torrents[0].DiscoveredOn,
SortedBy: "anan",
NextPageExists: true,
})
}
func torrentsInfohashHandler(w http.ResponseWriter, r *http.Request) {
// show torrents/{infohash}
infoHash, err := hex.DecodeString(mux.Vars(r)["infohash"])
if err != nil {
panic(err.Error())
}
torrent, err := database.GetTorrent(infoHash)
if err != nil {
panic(err.Error())
}
templates["torrent"].Execute(w, torrent)
}
func statisticsHandler(w http.ResponseWriter, r *http.Request) {
}
func feedHandler(w http.ResponseWriter, r *http.Request) {
}
func staticHandler(w http.ResponseWriter, r *http.Request) {
data, err := Asset(r.URL.Path[1:])
if err != nil {
http.NotFound(w, r)
return
}
var contentType string
if strings.HasSuffix(r.URL.Path, ".css") {
contentType = "text/css; charset=utf-8"
} else { // fallback option
contentType = http.DetectContentType(data)
}
w.Header().Set("Content-Type", contentType)
w.Write(data)
}
func mustAsset(name string) []byte {
data, err := Asset(name)
if err != nil {
log.Panicf("Could NOT access the requested resource `%s`: %s (please inform us, this is a BUG!)", name, err.Error())
}
return data
}

View File

@ -1,41 +0,0 @@
from setuptools import setup
def read_file(path):
with open(path) as file:
return file.read()
setup(
name="magneticow",
version="0.6.0",
description="Lightweight web interface for magnetico.",
long_description=read_file("README.rst"),
url="http://magnetico.org",
author="Mert Bora ALPER",
author_email="bora@boramalper.org",
license="GNU Affero General Public License v3 or later (AGPLv3+)",
packages=["magneticow"],
include_package_data=True,
zip_safe=False,
entry_points={
"console_scripts": ["magneticow=magneticow.__main__:main"]
},
install_requires=[
"appdirs >= 1.4.3",
"flask >= 0.12.1",
"gevent >= 1.2.1"
],
classifiers=[
"Development Status :: 2 - Pre-Alpha",
"Environment :: Web Environment",
"Intended Audience :: End Users/Desktop",
"License :: OSI Approved :: GNU Affero General Public License v3 or later (AGPLv3+)",
"Natural Language :: English",
"Operating System :: POSIX :: Linux",
"Programming Language :: Python :: 3 :: Only",
"Programming Language :: Python :: Implementation :: CPython"
]
)

View File

@ -1,10 +0,0 @@
[Unit]
Description=magneticow: lightweight web interface for magnetico
[Service]
ExecStart=~/.local/bin/magneticow --port PORT --user USERNAME PASSWORD
Restart=always
RestartSec=5
[Install]
WantedBy=multi-user.target

117
persistence/interface.go Normal file
View File

@ -0,0 +1,117 @@
package persistence
import (
"fmt"
"go.uber.org/zap"
"net/url"
)
type Database interface {
Engine() databaseEngine
DoesTorrentExist(infoHash []byte) (bool, error)
AddNewTorrent(infoHash []byte, name string, files []File) error
Close() error
// GetNumberOfTorrents returns the number of torrents saved in the database. Might be an
// approximation.
GetNumberOfTorrents() (uint, error)
// QueryTorrents returns @n torrents
// * that are discovered before the @timePoint if @isAfter is false, else that are
// discovered after the @timePoint,
// * that match the @query if it's not empty,
// ordered by the @orderBy in ascending order if @isDescending is false, else in descending
// order.
QueryTorrents(query string, orderBy orderingCriteria, ord order, n uint, when presence, timePoint int64) ([]TorrentMetadata, error)
// GetTorrents returns the TorrentExtMetadata for the torrent of the given InfoHash. Might return
// nil, nil if the torrent does not exist in the database.
GetTorrent(infoHash []byte) (*TorrentMetadata, error)
GetFiles(infoHash []byte) ([]File, error)
GetStatistics(from ISO8601, period uint) (*Statistics, error)
}
type orderingCriteria uint8
const (
BY_RELEVANCE orderingCriteria = 1
BY_SIZE = 2
BY_DISCOVERED_ON = 3
BY_N_FILES = 4
)
type order uint8
const (
ASCENDING order = 1
DESCENDING = 2
)
type presence uint8
const (
BEFORE presence = 1
AFTER = 2
)
type statisticsGranularity uint8
type ISO8601 string
const (
HOURLY_STATISTICS statisticsGranularity = 1
DAILY_STATISTICS = 2
WEEKLY_STATISTICS = 3
MONTHLY_STATISTICS = 4
YEARLY_STATISTICS = 5
)
type databaseEngine uint8
const (
SQLITE3_ENGINE databaseEngine = 1
)
type Statistics struct {
Granularity statisticsGranularity
From ISO8601
Period uint
// All these slices below have the exact length equal to the Period.
NTorrentsDiscovered []uint
NFilesDiscovered []uint
}
type File struct {
Size int64
Path string
}
type TorrentMetadata struct {
InfoHash []byte
Name string
Size uint64
DiscoveredOn int64
NFiles uint
}
func MakeDatabase(rawURL string, enableFTS bool, logger *zap.Logger) (Database, error) {
if logger != nil {
zap.ReplaceGlobals(logger)
}
url_, err := url.Parse(rawURL)
if err != nil {
return nil, err
}
switch url_.Scheme {
case "sqlite3":
return makeSqlite3Database(url_, enableFTS)
case "postgresql":
return nil, fmt.Errorf("postgresql is not yet supported!")
case "mysql":
return nil, fmt.Errorf("mysql is not yet supported!")
}
return nil, fmt.Errorf("unknown URI scheme (database engine)!")
}

View File

@ -1,15 +1,16 @@
package persistence
import (
"net/url"
"path"
"os"
"fmt"
"database/sql"
"regexp"
"go.uber.org/zap"
"fmt"
"net/url"
"os"
"path"
"time"
_ "github.com/mattn/go-sqlite3"
"go.uber.org/zap"
"math"
)
type sqlite3Database struct {
@ -20,7 +21,7 @@ func (db *sqlite3Database) Engine() databaseEngine {
return SQLITE3_ENGINE
}
func makeSqlite3Database(url_ *url.URL) (Database, error) {
func makeSqlite3Database(url_ *url.URL, enableFTS bool) (Database, error) {
db := new(sqlite3Database)
dbDir, _ := path.Split(url_.Path)
@ -31,18 +32,18 @@ func makeSqlite3Database(url_ *url.URL) (Database, error) {
var err error
db.conn, err = sql.Open("sqlite3", url_.Path)
if err != nil {
return nil, err
return nil, fmt.Errorf("sql.Open: %s", err.Error())
}
// > Open may just validate its arguments without creating a connection to the database. To
// > verify that the data source name is valid, call Ping.
// > verify that the data source Name is valid, call Ping.
// https://golang.org/pkg/database/sql/#Open
if err = db.conn.Ping(); err != nil {
return nil, err
return nil, fmt.Errorf("sql.DB.Ping: %s", err.Error())
}
if err := db.setupDatabase(); err != nil {
return nil, err
return nil, fmt.Errorf("setupDatabase: %s", err.Error())
}
return db, nil
@ -51,7 +52,7 @@ func makeSqlite3Database(url_ *url.URL) (Database, error) {
func (db *sqlite3Database) DoesTorrentExist(infoHash []byte) (bool, error) {
rows, err := db.conn.Query("SELECT 1 FROM torrents WHERE info_hash = ?;", infoHash)
if err != nil {
return false, err;
return false, err
}
// If rows.Next() returns true, meaning that the torrent is in the database, return true; else
@ -59,37 +60,10 @@ func (db *sqlite3Database) DoesTorrentExist(infoHash []byte) (bool, error) {
exists := rows.Next()
if err = rows.Close(); err != nil {
return false, err;
return false, err
}
return exists, nil;
}
func (db *sqlite3Database) GiveAnIncompleteTorrent(pathRegex *regexp.Regexp, maxSize uint) (infoHash []byte, path string, err error) {
rows, err := db.conn.Query("SELECT info_hash FROM torrents WHERE has_readme = 0;")
if err != nil {
return nil, "", err
}
if rows.Next() != true {
return nil, "", nil
}
if err = rows.Scan(&infoHash); err != nil {
return nil, "", err
}
if err = rows.Close(); err != nil {
return nil, "", err
}
// TODO
return infoHash, "", nil
}
func (db *sqlite3Database) GiveAStaleTorrent() (infoHash []byte, err error) {
// TODO
return nil, nil
return exists, nil
}
func (db *sqlite3Database) AddNewTorrent(infoHash []byte, name string, files []File) error {
@ -103,9 +77,9 @@ func (db *sqlite3Database) AddNewTorrent(infoHash []byte, name string, files []F
// add it.
exists, err := db.DoesTorrentExist(infoHash)
if err != nil {
return err;
return err
} else if exists {
return nil;
return nil
}
tx, err := db.conn.Begin()
@ -123,15 +97,19 @@ func (db *sqlite3Database) AddNewTorrent(infoHash []byte, name string, files []F
total_size += file.Size
}
// This is a workaround for a bug: the database will not accept total_size to be zero.
if total_size == 0 {
return nil
}
res, err := tx.Exec(`
INSERT INTO torrents (
info_hash,
name,
total_size,
discovered_on,
n_files,
) VALUES (?, ?, ?, ?, ?, ?, ?, ?);
`, infoHash, name, total_size, time.Now().Unix(), len(files))
discovered_on
) VALUES (?, ?, ?, ?);
`, infoHash, name, total_size, time.Now().Unix())
if err != nil {
return err
}
@ -142,7 +120,7 @@ func (db *sqlite3Database) AddNewTorrent(infoHash []byte, name string, files []F
}
for _, file := range files {
_, err = tx.Exec("INSERT INTO files (torrent_id, size, path) VALUES (?, ?, ?);",
_, err = tx.Exec("INSERT INTO files (torrent_id, Size, path) VALUES (?, ?, ?);",
lastInsertId, file.Size, file.Path,
)
if err != nil {
@ -158,25 +136,13 @@ func (db *sqlite3Database) AddNewTorrent(infoHash []byte, name string, files []F
return nil
}
func (db *sqlite3Database) AddReadme(infoHash []byte, path string, content string) error {
_, err := db.conn.Exec(
`UPDATE files SET is_readme = 1, content = ?
WHERE path = ? AND (SELECT id FROM torrents WHERE info_hash = ?) = torrent_id;`,
content, path, infoHash,
)
if err != nil {
return err
}
return nil
}
func (db *sqlite3Database) Close() error {
return db.conn.Close()
}
func (db *sqlite3Database) GetNumberOfTorrents() (uint, error) {
// COUNT(ROWID) is much more inefficient since it scans the whole table, so use MAX(ROWID)
// COUNT(1) is much more inefficient since it scans the whole table, so use MAX(ROWID)
rows, err := db.conn.Query("SELECT MAX(ROWID) FROM torrents;")
if err != nil {
return 0, err
@ -198,45 +164,21 @@ func (db *sqlite3Database) GetNumberOfTorrents() (uint, error) {
return n, nil
}
func (db *sqlite3Database) NewestTorrents(n uint) ([]TorrentMetadata, error) {
rows, err := db.conn.Query(`
SELECT
info_hash,
name,
total_size,
discovered_on,
has_readme,
n_files,
n_seeders,
n_leechers,
updated_on
FROM torrents
ORDER BY discovered_on DESC LIMIT ?;
`, n,
)
if err != nil {
return nil, err
func (db *sqlite3Database) QueryTorrents(query string, orderBy orderingCriteria, ord order, n uint, when presence, timePoint int64) ([]TorrentMetadata, error) {
if query == "" && orderBy == BY_RELEVANCE {
return nil, fmt.Errorf("torrents cannot be ordered by \"relevance\" when the query is empty")
}
var torrents []TorrentMetadata
for rows.Next() {
tm := new(TorrentMetadata)
rows.Scan(
&tm.infoHash, &tm.name, &tm.discoveredOn, &tm.hasReadme, &tm.nFiles, &tm.nSeeders,
&tm.nLeechers, &tm.updatedOn,
)
torrents = append(torrents, *tm)
if timePoint == 0 && when == BEFORE {
return nil, fmt.Errorf("nothing can come \"before\" time 0")
}
if err = rows.Close(); err != nil {
return nil, err
if timePoint == math.MaxInt64 && when == AFTER {
return nil, fmt.Errorf("nothing can come \"after\" time %d", math.MaxInt64)
}
return torrents, nil
}
// TODO
func (db *sqlite3Database) SearchTorrents(query string, orderBy orderingCriteria, descending bool, mustHaveReadme bool) ([]TorrentMetadata, error) { // TODO
// TODO:
return nil, nil
}
@ -245,13 +187,9 @@ func (db *sqlite3Database) GetTorrent(infoHash []byte) (*TorrentMetadata, error)
`SELECT
info_hash,
name,
size,
total_size,
discovered_on,
has_readme,
n_files,
n_seeders,
n_leechers,
updated_on
(SELECT COUNT(1) FROM files WHERE torrent_id = torrents.id) AS n_files
FROM torrents
WHERE info_hash = ?`,
infoHash,
@ -261,32 +199,34 @@ func (db *sqlite3Database) GetTorrent(infoHash []byte) (*TorrentMetadata, error)
}
if rows.Next() != true {
zap.L().Warn("torrent not found amk")
return nil, nil
}
tm := new(TorrentMetadata)
rows.Scan(
&tm.infoHash, &tm.name, &tm.discoveredOn, &tm.hasReadme, &tm.nFiles, &tm.nSeeders,
&tm.nLeechers, &tm.updatedOn,
)
var tm TorrentMetadata
rows.Scan(&tm.InfoHash, &tm.Name, &tm.Size, &tm.DiscoveredOn, &tm.NFiles)
if err = rows.Close(); err != nil {
return nil, err
}
return tm, nil
return &tm, nil
}
func (db *sqlite3Database) GetFiles(infoHash []byte) ([]File, error) {
// TODO
return nil, nil
}
rows, err := db.conn.Query("SELECT size, path FROM files WHERE torrent_id = ?;", infoHash)
if err != nil {
return nil, err
}
func (db *sqlite3Database) GetReadme(infoHash []byte) (string, error) {
// TODO
return "", nil
}
var files []File
for rows.Next() {
var file File
rows.Scan(&file.Size, &file.Path)
files = append(files, file)
}
return files, nil
}
func (db *sqlite3Database) GetStatistics(from ISO8601, period uint) (*Statistics, error) {
// TODO
@ -294,6 +234,7 @@ func (db *sqlite3Database) GetStatistics(from ISO8601, period uint) (*Statistics
}
func (db *sqlite3Database) commitQueuedTorrents() error {
// TODO
return nil
}
@ -317,14 +258,15 @@ func (db *sqlite3Database) setupDatabase() error {
PRAGMA journal_mode=WAL;
PRAGMA temp_store=1;
PRAGMA foreign_keys=ON;
PRAGMA encoding="UTF-8";
`)
if err != nil {
return err
return fmt.Errorf("sql.DB.Exec (PRAGMAs): %s", err.Error())
}
tx, err := db.conn.Begin()
if err != nil {
return err
return fmt.Errorf("sql.DB.Begin: %s", err.Error())
}
// If everything goes as planned and no error occurs, we will commit the transaction before
// returning from the function so the tx.Rollback() call will fail, trying to rollback a
@ -350,46 +292,54 @@ func (db *sqlite3Database) setupDatabase() error {
);
`)
if err != nil {
return err
return fmt.Errorf("sql.Tx.Exec (v0): %s", err.Error())
}
// Get the user_version:
res, err := tx.Query("PRAGMA user_version;")
rows, err := tx.Query("PRAGMA user_version;")
if err != nil {
return err
return fmt.Errorf("sql.Tx.Query (user_version): %s", err.Error())
}
var userVersion int;
if res.Next() != true {
return fmt.Errorf("PRAGMA user_version did not return any rows!")
var userVersion int
if rows.Next() != true {
return fmt.Errorf("sql.Rows.Next (user_version): PRAGMA user_version did not return any rows!")
}
if err = res.Scan(&userVersion); err != nil {
return err
if err = rows.Scan(&userVersion); err != nil {
return fmt.Errorf("sql.Rows.Scan (user_version): %s", err.Error())
}
// Close your rows lest you get "database table is locked" error(s)!
// See https://github.com/mattn/go-sqlite3/issues/2741
if err = rows.Close(); err != nil {
return fmt.Errorf("sql.Rows.Close (user_version): %s", err.Error())
}
switch userVersion {
// Upgrade from user_version 0 to 1
// The Change:
// * `info_hash_index` is recreated as UNIQUE.
case 0:
zap.S().Warnf("Updating database schema from 0 to 1... (this might take a while)")
// Upgrade from user_version 0 to 1
// Changes:
// * `info_hash_index` is recreated as UNIQUE.
zap.L().Warn("Updating database schema from 0 to 1... (this might take a while)")
_, err = tx.Exec(`
DROP INDEX info_hash_index;
CREATE UNIQUE INDEX info_hash_index ON torrents (info_hash);
PRAGMA user_version = 1;
`)
if err != nil {
return err
return fmt.Errorf("sql.Tx.Exec (v0 -> v1): %s", err.Error())
}
fallthrough
// Upgrade from user_version 1 to 2
// The Change:
// * Added `is_readme` and `content` columns to the `files` table, and the constraints & the
// the indices they entail.
// * Added unique index `readme_index` on `files` table.
case 1:
zap.S().Warnf("Updating database schema from 1 to 2... (this might take a while)")
// We introduce two new columns here: content BLOB, and is_readme INTEGER which we treat as
// a bool (hence the CHECK).
// Upgrade from user_version 1 to 2
// Changes:
// * Added `n_seeders`, `n_leechers`, and `updated_on` columns to the `torrents` table, and
// the constraints they entail.
// * Added `is_readme` and `content` columns to the `files` table, and the constraints & the
// the indices they entail.
// * Added unique index `readme_index` on `files` table.
zap.L().Warn("Updating database schema from 1 to 2... (this might take a while)")
// We introduce two new columns in `files`: content BLOB, and is_readme INTEGER which we
// treat as a bool (NULL for false, and 1 for true; see the CHECK statement).
// The reason for the change is that as we introduce the new "readme" feature which
// downloads a readme file as a torrent descriptor, we needed to store it somewhere in the
// database with the following conditions:
@ -402,26 +352,31 @@ func (db *sqlite3Database) setupDatabase() error {
//
// Regarding the implementation details, following constraints arise:
//
// 1. The column is_readme is either NULL or 1, and if it is 1, then content column cannot
// be NULL (but might be an empty BLOB). Vice versa, if content column of a row is,
// NULL then is_readme must be NULL.
// 1. The column is_readme is either NULL or 1, and if it is 1, then column content cannot
// be NULL (but might be an empty BLOB). Vice versa, if column content of a row is,
// NULL then column is_readme must be NULL.
//
// This is to prevent unused content fields filling up the database, and to catch
// programmers' errors.
_, err = tx.Exec(`
ALTER TABLE torrents ADD COLUMN updated_on INTEGER CHECK (updated_on > 0) DEFAULT NULL;
ALTER TABLE torrents ADD COLUMN n_seeders INTEGER CHECK ((updated_on IS NOT NULL AND n_seeders >= 0) OR (updated_on IS NULL AND n_seeders IS NULL)) DEFAULT NULL;
ALTER TABLE torrents ADD COLUMN n_leechers INTEGER CHECK ((updated_on IS NOT NULL AND n_leechers >= 0) OR (updated_on IS NULL AND n_leechers IS NULL)) DEFAULT NULL;
ALTER TABLE files ADD COLUMN is_readme INTEGER CHECK (is_readme IS NULL OR is_readme=1) DEFAULT NULL;
ALTER TABLE files ADD COLUMN content BLOB CHECK((content IS NULL AND is_readme IS NULL) OR (content IS NOT NULL AND is_readme=1)) DEFAULT NULL;
ALTER TABLE files ADD COLUMN content TEXT CHECK ((content IS NULL AND is_readme IS NULL) OR (content IS NOT NULL AND is_readme=1)) DEFAULT NULL;
CREATE UNIQUE INDEX readme_index ON files (torrent_id, is_readme);
PRAGMA user_version = 2;
`)
if err != nil {
return err
return fmt.Errorf("sql.Tx.Exec (v1 -> v2): %s", err.Error())
}
}
if err = tx.Commit(); err != nil {
return err
return fmt.Errorf("sql.Tx.Commit: %s", err.Error())
}
return nil
}
}

4
pkg/.gitignore vendored
View File

@ -1,4 +0,0 @@
# Ignore everything in this directory
*
# Except this file
!.gitignore

407
pylintrc
View File

@ -1,407 +0,0 @@
[MASTER]
# Specify a configuration file.
#rcfile=
# Python code to execute, usually for sys.path manipulation such as
# pygtk.require().
#init-hook=
# Add files or directories to the blacklist. They should be base names, not
# paths.
ignore=.git
# Add files or directories matching the regex patterns to the blacklist. The
# regex matches against base names, not paths.
ignore-patterns=
# Pickle collected data for later comparisons.
persistent=no
# List of plugins (as comma separated values of python modules names) to load,
# usually to register additional checkers.
load-plugins=
# Use multiple processes to speed up Pylint.
jobs=4
# Allow loading of arbitrary C extensions. Extensions are imported into the
# active Python interpreter and may run arbitrary code.
unsafe-load-any-extension=no
# A comma-separated list of package or module names from where C extensions may
# be loaded. Extensions are loading into the active Python interpreter and may
# run arbitrary code
extension-pkg-whitelist=
# Allow optimization of some AST trees. This will activate a peephole AST
# optimizer, which will apply various small optimizations. For instance, it can
# be used to obtain the result of joining multiple strings with the addition
# operator. Joining a lot of strings can lead to a maximum recursion error in
# Pylint and this flag can prevent that. It has one side effect, the resulting
# AST will be different than the one from reality. This option is deprecated
# and it will be removed in Pylint 2.0.
optimize-ast=no
[MESSAGES CONTROL]
# Only show warnings with the listed confidence levels. Leave empty to show
# all. Valid levels: HIGH, INFERENCE, INFERENCE_FAILURE, UNDEFINED
confidence=INFERENCE
# Enable the message, report, category or checker with the given id(s). You can
# either give multiple identifier separated by comma (,) or put this option
# multiple time (only on the command line, not in the configuration file where
# it should appear only once). See also the "--disable" option for examples.
#enable=
# Disable the message, report, category or checker with the given id(s). You
# can either give multiple identifiers separated by comma (,) or put this
# option multiple times (only on the command line, not in the configuration
# file where it should appear only once).You can also use "--disable=all" to
# disable everything first and then reenable specific checks. For example, if
# you want to run only the similarities checker, you can use "--disable=all
# --enable=similarities". If you want to run only the classes checker, but have
# no Warning level messages displayed, use"--disable=all --enable=classes
# --disable=W"
disable=range-builtin-not-iterating,coerce-builtin,old-ne-operator,reduce-builtin,suppressed-message,parameter-unpacking,unichr-builtin,round-builtin,hex-method,dict-iter-method,basestring-builtin,no-absolute-import,using-cmp-argument,buffer-builtin,raw_input-builtin,delslice-method,filter-builtin-not-iterating,setslice-method,nonzero-method,import-star-module-level,useless-suppression,map-builtin-not-iterating,raising-string,file-builtin,dict-view-method,standarderror-builtin,long-suffix,print-statement,xrange-builtin,intern-builtin,input-builtin,metaclass-assignment,cmp-method,unpacking-in-except,cmp-builtin,next-method-called,coerce-method,apply-builtin,long-builtin,getslice-method,zip-builtin-not-iterating,backtick,execfile-builtin,unicode-builtin,old-division,indexing-exception,old-raise-syntax,oct-method,reload-builtin,old-octal-literal
[REPORTS]
# Set the output format. Available formats are text, parseable, colorized, msvs
# (visual studio) and html. You can also give a reporter class, eg
# mypackage.mymodule.MyReporterClass.
output-format=colorized
# Put messages in a separate file for each module / package specified on the
# command line instead of printing them on stdout. Reports (if any) will be
# written in a file name "pylint_global.[txt|html]". This option is deprecated
# and it will be removed in Pylint 2.0.
files-output=no
# Tells whether to display a full report or only the messages
reports=no
# Python expression which should return a note less than 10 (10 is the highest
# note). You have access to the variables errors warning, statement which
# respectively contain the number of errors / warnings messages and the total
# number of statements analyzed. This is used by the global evaluation report
# (RP0004).
evaluation=10.0 - ((float(5 * error + warning + refactor + convention) / statement) * 10)
# Template used to display messages. This is a python new-style format string
# used to format the message information. See doc for all details
#msg-template=
[SIMILARITIES]
# Minimum lines number of a similarity.
min-similarity-lines=4
# Ignore comments when computing similarities.
ignore-comments=yes
# Ignore docstrings when computing similarities.
ignore-docstrings=yes
# Ignore imports when computing similarities.
ignore-imports=no
[MISCELLANEOUS]
# List of note tags to take in consideration, separated by a comma.
notes=FIXME,XXX,TODO
[VARIABLES]
# Tells whether we should check for unused import in __init__ files.
init-import=no
# A regular expression matching the name of dummy variables (i.e. expectedly
# not used).
dummy-variables-rgx=(_+[a-zA-Z0-9]*?$)|dummy
# List of additional names supposed to be defined in builtins. Remember that
# you should avoid to define new builtins when possible.
additional-builtins=
# List of strings which can identify a callback function by name. A callback
# name must start or end with one of those strings.
callbacks=cb_,_cb
# List of qualified module names which can have objects that can redefine
# builtins.
redefining-builtins-modules=six.moves,future.builtins
[FORMAT]
# Maximum number of characters on a single line.
max-line-length=120
# Regexp for a line that is allowed to be longer than the limit.
ignore-long-lines=^\s*(# )?<?https?://\S+>?$
# Allow the body of an if to be on the same line as the test if there is no
# else.
single-line-if-stmt=no
# List of optional constructs for which whitespace checking is disabled. `dict-
# separator` is used to allow tabulation in dicts, etc.: {1 : 1,\n222: 2}.
# `trailing-comma` allows a space between comma and closing bracket: (a, ).
# `empty-line` allows space-only lines.
no-space-check=trailing-comma,dict-separator
# Maximum number of lines in a module
max-module-lines=1000
# String used as indentation unit. This is usually " " (4 spaces) or "\t" (1
# tab).
indent-string=' '
# Number of spaces of indent required inside a hanging or continued line.
indent-after-paren=4
# Expected format of line ending, e.g. empty (any line ending), LF or CRLF.
expected-line-ending-format=
[LOGGING]
# Logging modules to check that the string format arguments are in logging
# function parameter format
logging-modules=logging
[TYPECHECK]
# Tells whether missing members accessed in mixin class should be ignored. A
# mixin class is detected if its name ends with "mixin" (case insensitive).
ignore-mixin-members=yes
# List of module names for which member attributes should not be checked
# (useful for modules/projects where namespaces are manipulated during runtime
# and thus existing member attributes cannot be deduced by static analysis. It
# supports qualified module names, as well as Unix pattern matching.
ignored-modules=
# List of class names for which member attributes should not be checked (useful
# for classes with dynamically set attributes). This supports the use of
# qualified names.
ignored-classes=optparse.Values,thread._local,_thread._local
# List of members which are set dynamically and missed by pylint inference
# system, and so shouldn't trigger E1101 when accessed. Python regular
# expressions are accepted.
generated-members=
# List of decorators that produce context managers, such as
# contextlib.contextmanager. Add to this list to register other decorators that
# produce valid context managers.
contextmanager-decorators=contextlib.contextmanager
[BASIC]
# Good variable names which should always be accepted, separated by a comma
good-names=i,j,k,ex,Run,_
# Bad variable names which should always be refused, separated by a comma
bad-names=foo,bar,baz,toto,tutu,tata
# Colon-delimited sets of names that determine each other's naming style when
# the name regexes allow several styles.
name-group=
# Include a hint for the correct naming format with invalid-name
include-naming-hint=no
# List of decorators that produce properties, such as abc.abstractproperty. Add
# to this list to register other decorators that produce valid properties.
property-classes=abc.abstractproperty
# Regular expression matching correct constant names
const-rgx=(([A-Z_][A-Z0-9_]*)|(__.*__))$
# Naming hint for constant names
const-name-hint=(([A-Z_][A-Z0-9_]*)|(__.*__))$
# Regular expression matching correct method names
method-rgx=[a-z_][a-z0-9_]{2,30}$
# Naming hint for method names
method-name-hint=[a-z_][a-z0-9_]{2,30}$
# Regular expression matching correct attribute names
attr-rgx=[a-z_][a-z0-9_]{2,30}$
# Naming hint for attribute names
attr-name-hint=[a-z_][a-z0-9_]{2,30}$
# Regular expression matching correct class names
class-rgx=[A-Z_][a-zA-Z0-9]+$
# Naming hint for class names
class-name-hint=[A-Z_][a-zA-Z0-9]+$
# Regular expression matching correct function names
function-rgx=[a-z_][a-z0-9_]{2,30}$
# Naming hint for function names
function-name-hint=[a-z_][a-z0-9_]{2,30}$
# Regular expression matching correct variable names
variable-rgx=[a-z_][a-z0-9_]{2,30}$
# Naming hint for variable names
variable-name-hint=[a-z_][a-z0-9_]{2,30}$
# Regular expression matching correct inline iteration names
inlinevar-rgx=[A-Za-z_][A-Za-z0-9_]*$
# Naming hint for inline iteration names
inlinevar-name-hint=[A-Za-z_][A-Za-z0-9_]*$
# Regular expression matching correct module names
module-rgx=(([a-z_][a-z0-9_]*)|([A-Z][a-zA-Z0-9]+))$
# Naming hint for module names
module-name-hint=(([a-z_][a-z0-9_]*)|([A-Z][a-zA-Z0-9]+))$
# Regular expression matching correct class attribute names
class-attribute-rgx=([A-Za-z_][A-Za-z0-9_]{2,30}|(__.*__))$
# Naming hint for class attribute names
class-attribute-name-hint=([A-Za-z_][A-Za-z0-9_]{2,30}|(__.*__))$
# Regular expression matching correct argument names
argument-rgx=[a-z_][a-z0-9_]{2,30}$
# Naming hint for argument names
argument-name-hint=[a-z_][a-z0-9_]{2,30}$
# Regular expression which should only match function or class names that do
# not require a docstring.
no-docstring-rgx=^_
# Minimum line length for functions/classes that require docstrings, shorter
# ones are exempt.
docstring-min-length=1000
[ELIF]
# Maximum number of nested blocks for function / method body
max-nested-blocks=5
[SPELLING]
# Spelling dictionary name. Available dictionaries: none. To make it working
# install python-enchant package.
spelling-dict=
# List of comma separated words that should not be checked.
spelling-ignore-words=
# A path to a file that contains private dictionary; one word per line.
spelling-private-dict-file=
# Tells whether to store unknown words to indicated private dictionary in
# --spelling-private-dict-file option instead of raising a message.
spelling-store-unknown-words=no
[CLASSES]
# List of method names used to declare (i.e. assign) instance attributes.
defining-attr-methods=__init__,__new__,setUp
# List of valid names for the first argument in a class method.
valid-classmethod-first-arg=cls
# List of valid names for the first argument in a metaclass class method.
valid-metaclass-classmethod-first-arg=mcs
# List of member names, which should be excluded from the protected access
# warning.
exclude-protected=_asdict,_fields,_replace,_source,_make
[IMPORTS]
# Deprecated modules which should not be used, separated by a comma
deprecated-modules=optparse
# Create a graph of every (i.e. internal and external) dependencies in the
# given file (report RP0402 must not be disabled)
import-graph=
# Create a graph of external dependencies in the given file (report RP0402 must
# not be disabled)
ext-import-graph=
# Create a graph of internal dependencies in the given file (report RP0402 must
# not be disabled)
int-import-graph=
# Force import order to recognize a module as part of the standard
# compatibility libraries.
known-standard-library=
# Force import order to recognize a module as part of a third party library.
known-third-party=enchant
# Analyse import fallback blocks. This can be used to support both Python 2 and
# 3 compatible code, which means that the block might have code that exists
# only in one or another interpreter, leading to false positives when analysed.
analyse-fallback-blocks=no
[DESIGN]
# Maximum number of arguments for function / method
max-args=5
# Argument names that match this expression will be ignored. Default to name
# with leading underscore
ignored-argument-names=_.*
# Maximum number of locals for function / method body
max-locals=15
# Maximum number of return / yield for function / method body
max-returns=6
# Maximum number of branch for function / method body
max-branches=12
# Maximum number of statements in function / method body
max-statements=50
# Maximum number of parents for a class (see R0901).
max-parents=7
# Maximum number of attributes for a class (see R0902).
max-attributes=7
# Minimum number of public methods for a class (see R0903).
min-public-methods=2
# Maximum number of public methods for a class (see R0904).
max-public-methods=20
# Maximum number of boolean expressions in a if statement
max-bool-expr=5
[EXCEPTIONS]
# Exceptions that will emit a warning when being caught. Defaults to
# "Exception"
overgeneral-exceptions=Exception

View File

@ -1,185 +0,0 @@
package bittorrent
import (
"time"
"strings"
"github.com/anacrolix/missinggo"
"github.com/anacrolix/torrent"
"github.com/anacrolix/torrent/metainfo"
"go.uber.org/zap"
"os"
"path"
"persistence"
)
func (ms *MetadataSink) awaitMetadata(infoHash metainfo.Hash, peer torrent.Peer) {
t, isNew := ms.client.AddTorrentInfoHash(infoHash)
// If the infoHash we added was not new (i.e. it's already being downloaded by the client)
// then t is the handle of the (old) torrent. We add the (presumably new) peer to the torrent
// so we can increase the chance of operation being successful, or that the metadata might be
// fetched.
t.AddPeers([]torrent.Peer{peer})
if !isNew {
// Return immediately if we are trying to await on an ongoing metadata-fetching operation.
// Each ongoing operation should have one and only one "await*" function waiting on it.
return
}
// Wait for the torrent client to receive the metadata for the torrent, meanwhile allowing
// termination to be handled gracefully.
var info *metainfo.Info
select {
case <- t.GotInfo():
info = t.Info()
t.Drop()
case <-time.After(5 * time.Minute):
zap.L().Sugar().Debugf("Fetcher timeout! %x", infoHash)
return
case <- ms.termination:
return
}
var files []persistence.File
for _, file := range info.Files {
files = append(files, persistence.File{
Size: file.Length,
Path: file.DisplayPath(info),
})
}
var totalSize uint64
for _, file := range files {
if file.Size < 0 {
// All files' sizes must be greater than or equal to zero, otherwise treat them as
// illegal and ignore.
zap.L().Sugar().Debugf("!!!! file size zero or less! \"%s\" (%d)", file.Path, file.Size)
return
}
totalSize += uint64(file.Size)
}
ms.flush(Metadata{
InfoHash: infoHash[:],
Name: info.Name,
TotalSize: totalSize,
DiscoveredOn: time.Now().Unix(),
Files: files,
Peers: nil,
})
}
func (fs *FileSink) awaitFile(req *FileRequest) {
// Remove the download directory of the torrent after the operation is completed.
// TODO: what if RemoveAll() returns error, do we care, and if we do, how to handle it?
defer os.RemoveAll(path.Join(fs.baseDownloadDir, string(req.InfoHash)))
var infoHash_ [20]byte
copy(infoHash_[:], req.InfoHash)
t, isNew := fs.client.AddTorrentInfoHash(infoHash_)
if len(req.Peers) > 0 {
t.AddPeers(req.Peers)
}
if !isNew {
// Return immediately if we are trying to await on an ongoing file-downloading operation.
// Each ongoing operation should have one and only one "await*" function waiting on it.
return
}
// Setup & start the timeout timer.
timeout := time.After(fs.timeoutDuration)
// Once we return from this function, drop the torrent from the client.
// TODO: Check if dropping a torrent also cancels any outstanding read operations?
defer t.Drop()
select {
case <-t.GotInfo():
case <- timeout:
return
}
var match *torrent.File
for _, file := range t.Files() {
if file.Path() == req.Path {
match = &file
} else {
file.Cancel()
}
}
if match == nil {
var filePaths []string
for _, file := range t.Files() { filePaths = append(filePaths, file.Path()) }
zap.L().Warn(
"The leech (FileSink) has been requested to download a file which does not exist!",
zap.ByteString("torrent", req.InfoHash),
zap.String("requestedFile", req.Path),
zap.Strings("allFiles", filePaths),
)
}
reader := t.NewReader()
defer reader.Close()
fileDataChan := make(chan []byte)
go downloadFile(*match, reader, fileDataChan)
select {
case fileData := <-fileDataChan:
if fileData != nil {
fs.flush(FileResult{
Request: req,
FileData: fileData,
})
}
case <- timeout:
zap.L().Debug(
"Timeout while downloading a file!",
zap.ByteString("torrent", req.InfoHash),
zap.String("file", req.Path),
)
}
}
func downloadFile(file torrent.File, reader *torrent.Reader, fileDataChan chan<- []byte) {
readSeeker := missinggo.NewSectionReadSeeker(reader, file.Offset(), file.Length())
fileData := make([]byte, file.Length())
n, err := readSeeker.Read(fileData)
if int64(n) != file.Length() {
infoHash := file.Torrent().InfoHash()
zap.L().Debug(
"Not all of a file could be read!",
zap.ByteString("torrent", infoHash[:]),
zap.String("file", file.Path()),
zap.Int64("fileLength", file.Length()),
zap.Int("n", n),
)
fileDataChan <- nil
return
}
if err != nil {
infoHash := file.Torrent().InfoHash()
zap.L().Debug(
"Error while downloading a file!",
zap.Error(err),
zap.ByteString("torrent", infoHash[:]),
zap.String("file", file.Path()),
zap.Int64("fileLength", file.Length()),
zap.Int("n", n),
)
fileDataChan <- nil
return
}
fileDataChan <- fileData
}

View File

@ -1,117 +0,0 @@
package bittorrent
import (
"net"
"path"
"time"
"github.com/anacrolix/dht"
"github.com/anacrolix/torrent"
"github.com/anacrolix/torrent/storage"
"github.com/Wessie/appdirs"
"go.uber.org/zap"
)
type FileRequest struct {
InfoHash []byte
Path string
Peers []torrent.Peer
}
type FileResult struct {
// Request field is the original Request
Request *FileRequest
FileData []byte
}
type FileSink struct {
baseDownloadDir string
client *torrent.Client
drain chan FileResult
terminated bool
termination chan interface{}
timeoutDuration time.Duration
}
// NewFileSink creates a new FileSink.
//
// cAddr : client address
// mlAddr: mainline DHT node address
func NewFileSink(cAddr, mlAddr string, timeoutDuration time.Duration) *FileSink {
fs := new(FileSink)
mlUDPAddr, err := net.ResolveUDPAddr("udp", mlAddr)
if err != nil {
zap.L().Fatal("Could NOT resolve UDP addr!", zap.Error(err))
return nil
}
// Make sure to close the mlUDPConn before returning from this function in case of an error.
mlUDPConn, err := net.ListenUDP("udp", mlUDPAddr)
if err != nil {
zap.L().Fatal("Could NOT listen UDP (file sink)!", zap.Error(err))
return nil
}
fs.baseDownloadDir = path.Join(
appdirs.UserCacheDir("magneticod", "", "", true),
"downloads",
)
fs.client, err = torrent.NewClient(&torrent.Config{
ListenAddr: cAddr,
DisableTrackers: true,
DHTConfig: dht.ServerConfig{
Conn: mlUDPConn,
Passive: true,
NoSecurity: true,
},
DefaultStorage: storage.NewFileByInfoHash(fs.baseDownloadDir),
})
if err != nil {
zap.L().Fatal("Leech could NOT create a new torrent client!", zap.Error(err))
mlUDPConn.Close()
return nil
}
fs.drain = make(chan FileResult)
fs.termination = make(chan interface{})
fs.timeoutDuration = timeoutDuration
return fs
}
// peer field is optional and might be nil.
func (fs *FileSink) Sink(infoHash []byte, path string, peers []torrent.Peer) {
if fs.terminated {
zap.L().Panic("Trying to Sink() an already closed FileSink!")
}
go fs.awaitFile(&FileRequest{
InfoHash: infoHash,
Path: path,
Peers: peers,
})
}
func (fs *FileSink) Drain() <-chan FileResult {
if fs.terminated {
zap.L().Panic("Trying to Drain() an already closed FileSink!")
}
return fs.drain
}
func (fs *FileSink) Terminate() {
fs.terminated = true
close(fs.termination)
fs.client.Close()
close(fs.drain)
}
func (fs *FileSink) flush(result FileResult) {
if !fs.terminated {
fs.drain <- result
}
}

View File

@ -1,89 +0,0 @@
package bittorrent
import (
"go.uber.org/zap"
"github.com/anacrolix/torrent"
"magneticod/dht/mainline"
"persistence"
)
type Metadata struct {
InfoHash []byte
// Name should be thought of "Title" of the torrent. For single-file torrents, it is the name
// of the file, and for multi-file torrents, it is the name of the root directory.
Name string
TotalSize uint64
DiscoveredOn int64
// Files must be populated for both single-file and multi-file torrents!
Files []persistence.File
// Peers is the list of the "active" peers at the time of fetching metadata. Currently, it's
// always nil as anacrolix/torrent does not support returning list of peers for a given torrent,
// but in the future, this information can be useful for the CompletingCoordinator which can use
// those Peers to download the README file (if any found).
Peers []torrent.Peer
}
type MetadataSink struct {
client *torrent.Client
drain chan Metadata
terminated bool
termination chan interface{}
}
func NewMetadataSink(laddr string) *MetadataSink {
ms := new(MetadataSink)
var err error
ms.client, err = torrent.NewClient(&torrent.Config{
ListenAddr: laddr,
DisableTrackers: true,
DisablePEX: true,
// TODO: Should we disable DHT to force the client to use the peers we supplied only, or not?
NoDHT: true,
Seed: false,
})
if err != nil {
zap.L().Fatal("Fetcher could NOT create a new torrent client!", zap.Error(err))
}
ms.drain = make(chan Metadata)
ms.termination = make(chan interface{})
return ms
}
func (ms *MetadataSink) Sink(res mainline.TrawlingResult) {
if ms.terminated {
zap.L().Panic("Trying to Sink() an already closed MetadataSink!")
}
go ms.awaitMetadata(res.InfoHash, res.Peer)
}
func (ms *MetadataSink) Drain() <-chan Metadata {
if ms.terminated {
zap.L().Panic("Trying to Drain() an already closed MetadataSink!")
}
return ms.drain
}
func (ms *MetadataSink) Terminate() {
ms.terminated = true
close(ms.termination)
ms.client.Close()
close(ms.drain)
}
func (ms *MetadataSink) flush(result Metadata) {
if !ms.terminated {
ms.drain <- result
}
}

View File

@ -1,111 +0,0 @@
package main
import (
"regexp"
"sync"
"time"
"github.com/anacrolix/torrent"
"persistence"
"magneticod/bittorrent"
)
type completionRequest struct {
infoHash []byte
path string
peers []torrent.Peer
time time.Time
}
type completionResult struct {
InfoHash []byte
Path string
Data []byte
}
type CompletingCoordinator struct {
database persistence.Database
maxReadmeSize uint
sink *bittorrent.FileSink
queue chan completionRequest
queueMutex sync.Mutex
outputChan chan completionResult
readmeRegex *regexp.Regexp
terminated bool
termination chan interface{}
}
type CompletingCoordinatorOpFlags struct {
LeechClAddr string
LeechMlAddr string
LeechTimeout time.Duration
ReadmeMaxSize uint
ReadmeRegex *regexp.Regexp
}
func NewCompletingCoordinator(database persistence.Database, opFlags CompletingCoordinatorOpFlags) (cc *CompletingCoordinator) {
cc = new(CompletingCoordinator)
cc.database = database
cc.maxReadmeSize = opFlags.ReadmeMaxSize
cc.sink = bittorrent.NewFileSink(opFlags.LeechClAddr, opFlags.LeechMlAddr, opFlags.LeechTimeout)
cc.queue = make(chan completionRequest, 100)
cc.readmeRegex = opFlags.ReadmeRegex
cc.termination = make(chan interface{})
return
}
func (cc *CompletingCoordinator) Request(infoHash []byte, path string, peers []torrent.Peer) {
cc.queueMutex.Lock()
defer cc.queueMutex.Unlock()
// If queue is full discard the oldest request as it is more likely to be outdated.
if len(cc.queue) == cap(cc.queue) {
<- cc.queue
}
// Imagine, if this function [Request()] was called by another goroutine right when we were
// here: the moment where we removed the oldest entry in the queue to free a single space for
// the newest one. Imagine, now, that the second Request() call manages to add its own entry
// to the queue, making the current goroutine wait until the cc.queue channel is available.
//
// Hence to prevent that we use cc.queueMutex
cc.queue <- completionRequest{
infoHash: infoHash,
path: path,
peers: peers,
time: time.Now(),
}
}
func (cc *CompletingCoordinator) Start() {
go cc.complete()
}
func (cc *CompletingCoordinator) Output() <-chan completionResult {
return cc.outputChan
}
func (cc *CompletingCoordinator) complete() {
for {
select {
case request := <-cc.queue:
// Discard requests older than 2 minutes.
// TODO: Instead of settling on 2 minutes as an arbitrary value, do some research to
// learn average peer lifetime in the BitTorrent network.
if time.Now().Sub(request.time) > 2 * time.Minute {
continue
}
cc.sink.Sink(request.infoHash, request.path, request.peers)
case <-cc.termination:
break
default:
cc.database.FindAnIncompleteTorrent(cc.readmeRegex, cc.maxReadmeSize)
}
}
}

View File

@ -1,59 +0,0 @@
package mainline
import (
"net"
"go.uber.org/zap"
"crypto/sha1"
"math/bits"
"math"
)
const (
k uint32 = 2
m uint32 = 256 * 8
)
type BloomFilter struct {
filter [m/8]byte
}
func (bf *BloomFilter) InsertIP(ip net.IP) {
if !(len(ip) == net.IPv4len || len(ip) == net.IPv6len) {
zap.S().Panicf("Attempted to insert an invalid IP to the bloom filter! %d", len(ip))
}
hash := sha1.Sum(ip)
var index1, index2 uint32
index1 = uint32(hash[0]) | uint32(hash[1]) << 8
index2 = uint32(hash[2]) | uint32(hash[3]) << 8
// truncate index to m (11 bits required)
index1 %= m
index2 %= m
// set bits at index1 and index2
bf.filter[index1 / 8] |= 0x01 << (index1 % 8)
bf.filter[index2 / 8] |= 0x01 << (index2 % 8)
}
func (bf *BloomFilter) Estimate() float64 {
// TODO: make it faster?
var nZeroes uint32 = 0
for _, b := range bf.filter {
nZeroes += 8 - uint32(bits.OnesCount8(uint8(b)))
}
var c uint32
if m - 1 < nZeroes {
c = m - 1
} else {
c = nZeroes
}
return math.Log(float64(c) / float64(m)) / (float64(k) * math.Log(1 - 1/float64(m)))
}
func (bf *BloomFilter) Filter() (filterCopy [m/8]byte) {
copy(filterCopy[:], bf.filter[:])
return filterCopy
}

View File

@ -1,64 +0,0 @@
package mainline
import (
"bytes"
"testing"
"encoding/hex"
"strings"
"fmt"
)
func TestBEP33Filter(t *testing.T) {
bf := new(BloomFilter)
populateForBEP33(bf)
resultingFilter := bf.Filter()
var expectedFilter [256]byte
hex.Decode(expectedFilter[:], []byte(strings.Replace(
"F6C3F5EA A07FFD91 BDE89F77 7F26FB2B FF37BDB8 FB2BBAA2 FD3DDDE7 BACFFF75 EE7CCBAE" +
"FE5EEDB1 FBFAFF67 F6ABFF5E 43DDBCA3 FD9B9FFD F4FFD3E9 DFF12D1B DF59DB53 DBE9FA5B" +
"7FF3B8FD FCDE1AFB 8BEDD7BE 2F3EE71E BBBFE93B CDEEFE14 8246C2BC 5DBFF7E7 EFDCF24F" +
"D8DC7ADF FD8FFFDF DDFFF7A4 BBEEDF5C B95CE81F C7FCFF1F F4FFFFDF E5F7FDCB B7FD79B3" +
"FA1FC77B FE07FFF9 05B7B7FF C7FEFEFF E0B8370B B0CD3F5B 7F2BD93F EB4386CF DD6F7FD5" +
"BFAF2E9E BFFFFEEC D67ADBF7 C67F17EF D5D75EBA 6FFEBA7F FF47A91E B1BFBB53 E8ABFB57" +
"62ABE8FF 237279BF EFBFEEF5 FFC5FEBF DFE5ADFF ADFEE1FB 737FFFFB FD9F6AEF FEEE76B6" +
"FD8F72EF",
" ", "", -1)))
if !bytes.Equal(resultingFilter[:], expectedFilter[:]) {
t.Fail()
}
}
func TestBEP33Estimation(t *testing.T) {
bf := new(BloomFilter)
populateForBEP33(bf)
// Because Go lacks a truncate function for floats...
if fmt.Sprintf("%.5f", bf.Estimate())[:9] != "1224.9308" {
t.Errorf("Expected 1224.9308 got %f instead!", bf.Estimate())
}
}
func populateForBEP33(bf *BloomFilter) {
// 192.0.2.0 to 192.0.2.255 (both ranges inclusive)
addr := []byte{192, 0, 2, 0}
for i := 0; i <= 255; i++ {
addr[3] = uint8(i)
bf.InsertIP(addr)
}
// 2001:DB8:: to 2001:DB8::3E7 (both ranges inclusive)
addr = []byte{32, 1, 13, 184, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}
for i := 0; i <= 2; i++ {
addr[14] = uint8(i)
for e := 0; e <= 255; e++ {
addr[15] = uint8(e)
bf.InsertIP(addr)
}
}
addr[14] = 3
for e := 0; e <= 231; e++ {
addr[15] = uint8(e)
bf.InsertIP(addr)
}
}

View File

@ -1,284 +0,0 @@
package main
import (
"fmt"
"net"
"os"
"os/signal"
"regexp"
"time"
"github.com/jessevdk/go-flags"
"github.com/pkg/profile"
"go.uber.org/zap"
"go.uber.org/zap/zapcore"
"persistence"
"magneticod/bittorrent"
"magneticod/dht"
"github.com/anacrolix/torrent/metainfo"
)
type cmdFlags struct {
DatabaseURL string `long:"database" description:"URL of the database."`
TrawlerMlAddrs []string `long:"trawler-ml-addr" description:"Address(es) to be used by trawling DHT (Mainline) nodes." default:"0.0.0.0:0"`
TrawlerMlInterval uint `long:"trawler-ml-interval" description:"Trawling interval in integer deciseconds (one tenth of a second)."`
// TODO: is this even supported by anacrolix/torrent?
FetcherAddr string `long:"fetcher-addr" description:"Address(es) to be used by ephemeral peers fetching torrent metadata." default:"0.0.0.0:0"`
FetcherTimeout uint `long:"fetcher-timeout" description:"Number of integer seconds before a fetcher timeouts."`
StatistMlAddrs []string `long:"statist-ml-addr" description:"Address(es) to be used by ephemeral nodes fetching latest statistics about individual torrents." default:"0.0.0.0:0"`
StatistMlTimeout uint `long:"statist-ml-timeout" description:"Number of integer seconds before a statist timeouts."`
// TODO: is this even supported by anacrolix/torrent?
LeechClAddr string `long:"leech-cl-addr" description:"Address to be used by the peer fetching README files." default:"0.0.0.0:0"`
LeechMlAddr string `long:"leech-ml-addr" descrition:"Address to be used by the mainline DHT node for fetching README files." default:"0.0.0.0:0"`
LeechTimeout uint `long:"leech-timeout" description:"Number of integer seconds to pass before a leech timeouts." default:"300"`
ReadmeMaxSize uint `long:"readme-max-size" description:"Maximum size -which must be greater than zero- of a description file in bytes." default:"20480"`
ReadmeRegex string `long:"readme-regex" description:"Regular expression(s) which will be tested against the name of the README files, in the supplied order."`
Verbose []bool `short:"v" long:"verbose" description:"Increases verbosity."`
Profile string `long:"profile" description:"Enable profiling." default:""`
// ==== OLD Flags ====
// DatabaseFile is akin to Database flag, except that it was used when SQLite was the only
// persistence backend ever conceived, so it's the path* to the database file, which was -by
// default- located in wherever appdata module on Python said:
// On GNU/Linux : `/home/<USER>/.local/share/magneticod/database.sqlite3`
// On Windows : TODO?
// On MacOS (OS X) : TODO?
// On BSDs? : TODO?
// On anywhere else: TODO?
// TODO: Is the path* absolute or can be relative as well?
}
const (
PROFILE_BLOCK = 1
PROFILE_CPU
PROFILE_MEM
PROFILE_MUTEX
PROFILE_A
)
type opFlags struct {
DatabaseURL string
TrawlerMlAddrs []string
TrawlerMlInterval time.Duration
FetcherAddr string
FetcherTimeout time.Duration
StatistMlAddrs []string
StatistMlTimeout time.Duration
LeechClAddr string
LeechMlAddr string
LeechTimeout time.Duration
ReadmeMaxSize uint
ReadmeRegex *regexp.Regexp
Verbosity int
Profile string
}
func main() {
loggerLevel := zap.NewAtomicLevel()
// Logging levels: ("debug", "info", "warn", "error", "dpanic", "panic", and "fatal").
logger := zap.New(zapcore.NewCore(
zapcore.NewConsoleEncoder(zap.NewDevelopmentEncoderConfig()),
zapcore.Lock(os.Stderr),
loggerLevel,
))
defer logger.Sync()
zap.ReplaceGlobals(logger)
defer profile.Start(profile.CPUProfile, profile.ProfilePath(".")).Stop()
zap.L().Info("magneticod v0.7.0 has been started.")
zap.L().Info("Copyright (C) 2017 Mert Bora ALPER <bora@boramalper.org>.")
zap.L().Info("Dedicated to Cemile Binay, in whose hands I thrived.")
// opFlags is the "operational flags"
opFlags := parseFlags()
switch opFlags.Verbosity {
case 0:
loggerLevel.SetLevel(zap.WarnLevel)
case 1:
loggerLevel.SetLevel(zap.InfoLevel)
// Default: i.e. in case of 2 or more.
default:
loggerLevel.SetLevel(zap.DebugLevel)
}
zap.ReplaceGlobals(logger)
// Handle Ctrl-C gracefully.
interruptChan := make(chan os.Signal)
signal.Notify(interruptChan, os.Interrupt)
database, err := persistence.MakeDatabase(opFlags.DatabaseURL)
if err != nil {
logger.Sugar().Fatalf("Could not open the database at `%s`: %s", opFlags.DatabaseURL, err.Error())
}
trawlingManager := dht.NewTrawlingManager(opFlags.TrawlerMlAddrs)
metadataSink := bittorrent.NewMetadataSink(opFlags.FetcherAddr)
completingCoordinator := NewCompletingCoordinator(database, CompletingCoordinatorOpFlags{
LeechClAddr: opFlags.LeechClAddr,
LeechMlAddr: opFlags.LeechMlAddr,
LeechTimeout: opFlags.LeechTimeout,
ReadmeMaxSize: opFlags.ReadmeMaxSize,
ReadmeRegex: opFlags.ReadmeRegex,
})
/*
refreshingCoordinator := NewRefreshingCoordinator(database, RefreshingCoordinatorOpFlags{
})
*/
for {
select {
case result := <-trawlingManager.Output():
logger.Debug("result: ", zap.String("hash", result.InfoHash.String()))
exists, err := database.DoesTorrentExist(result.InfoHash[:])
if err != nil {
zap.L().Fatal("Could not check whether torrent exists!", zap.Error(err))
} else if !exists {
metadataSink.Sink(result)
}
case metadata := <-metadataSink.Drain():
if err := database.AddNewTorrent(metadata.InfoHash, metadata.Name, metadata.Files); err != nil {
logger.Sugar().Fatalf("Could not add new torrent %x to the database: %s",
metadata.InfoHash, err.Error())
}
logger.Sugar().Infof("D I S C O V E R E D: `%s` %x", metadata.Name, metadata.InfoHash)
if readmePath := findReadme(opFlags.ReadmeRegex, metadata.Files); readmePath != nil {
completingCoordinator.Request(metadata.InfoHash, *readmePath, metadata.Peers)
}
case result := <-completingCoordinator.Output():
database.AddReadme(result.InfoHash, result.Path, result.Data)
case <-interruptChan:
trawlingManager.Terminate()
break
}
}
}
func parseFlags() (opF opFlags) {
var cmdF cmdFlags
_, err := flags.Parse(&cmdF)
if err != nil {
zap.S().Fatalf("Could not parse command-line flags! %s", err.Error())
}
// TODO: Check Database URL here
opF.DatabaseURL = cmdF.DatabaseURL
if err = checkAddrs(cmdF.TrawlerMlAddrs); err != nil {
zap.S().Fatalf("Of argument (list) `trawler-ml-addr` %s", err.Error())
} else {
opF.TrawlerMlAddrs = cmdF.TrawlerMlAddrs
}
if cmdF.TrawlerMlInterval <= 0 {
zap.L().Fatal("Argument `trawler-ml-interval` must be greater than zero, if supplied.")
} else {
// 1 decisecond = 100 milliseconds = 0.1 seconds
opF.TrawlerMlInterval = time.Duration(cmdF.TrawlerMlInterval) * 100 * time.Millisecond
}
if err = checkAddrs([]string{cmdF.FetcherAddr}); err != nil {
zap.S().Fatalf("Of argument `fetcher-addr` %s", err.Error())
} else {
opF.FetcherAddr = cmdF.FetcherAddr
}
if cmdF.FetcherTimeout <= 0 {
zap.L().Fatal("Argument `fetcher-timeout` must be greater than zero, if supplied.")
} else {
opF.FetcherTimeout = time.Duration(cmdF.FetcherTimeout) * time.Second
}
if err = checkAddrs(cmdF.StatistMlAddrs); err != nil {
zap.S().Fatalf("Of argument (list) `statist-ml-addr` %s", err.Error())
} else {
opF.StatistMlAddrs = cmdF.StatistMlAddrs
}
if cmdF.StatistMlTimeout <= 0 {
zap.L().Fatal("Argument `statist-ml-timeout` must be greater than zero, if supplied.")
} else {
opF.StatistMlTimeout = time.Duration(cmdF.StatistMlTimeout) * time.Second
}
if err = checkAddrs([]string{cmdF.LeechClAddr}); err != nil {
zap.S().Fatalf("Of argument `leech-cl-addr` %s", err.Error())
} else {
opF.LeechClAddr = cmdF.LeechClAddr
}
if err = checkAddrs([]string{cmdF.LeechMlAddr}); err != nil {
zap.S().Fatalf("Of argument `leech-ml-addr` %s", err.Error())
} else {
opF.LeechMlAddr = cmdF.LeechMlAddr
}
if cmdF.LeechTimeout <= 0 {
zap.L().Fatal("Argument `leech-timeout` must be greater than zero, if supplied.")
} else {
opF.LeechTimeout = time.Duration(cmdF.LeechTimeout) * time.Second
}
if cmdF.ReadmeMaxSize <= 0 {
zap.L().Fatal("Argument `readme-max-size` must be greater than zero, if supplied.")
} else {
opF.ReadmeMaxSize = cmdF.ReadmeMaxSize
}
opF.ReadmeRegex, err = regexp.Compile(cmdF.ReadmeRegex)
if err != nil {
zap.S().Fatalf("Argument `readme-regex` is not a valid regex: %s", err.Error())
}
opF.Verbosity = len(cmdF.Verbose)
opF.Profile = cmdF.Profile
return
}
func checkAddrs(addrs []string) error {
for i, addr := range addrs {
// We are using ResolveUDPAddr but it works equally well for checking TCPAddr(esses) as
// well.
_, err := net.ResolveUDPAddr("udp", addr)
if err != nil {
return fmt.Errorf("with %d(th) address `%s`: %s", i + 1, addr, err.Error())
}
}
return nil
}
// findReadme looks for a possible Readme file whose path is matched by the pathRegex.
// If there are multiple matches, the first one is returned.
// If there are no matches, nil returned.
func findReadme(pathRegex *regexp.Regexp, files []persistence.File) *string {
for _, file := range files {
if pathRegex.MatchString(file.Path) {
return &file.Path
}
}
return nil
}

View File

@ -1,385 +0,0 @@
package persistence
import (
"bytes"
"database/sql"
"fmt"
"net/url"
"path"
"os"
_ "github.com/go-sql-driver/mysql"
_ "github.com/mattn/go-sqlite3"
"go.uber.org/zap"
"magneticod/bittorrent"
"regexp"
)
type engineType uint8
const (
SQLITE engineType = 0
POSTGRESQL = 1
MYSQL = 2
)
type Database struct {
database *sql.DB
engine engineType
newTorrents [] bittorrent.Metadata
}
// NewDatabase creates a new Database.
//
// url either starts with "sqlite:" or "postgresql:"
func NewDatabase(rawurl string) (*Database, error) {
db := Database{}
dbURL, err := url.Parse(rawurl)
if err != nil {
return nil, err
}
switch dbURL.Scheme {
case "sqlite":
db.engine = SQLITE
dbDir, _ := path.Split(dbURL.Path)
if err := os.MkdirAll(dbDir, 0755); err != nil {
return nil, fmt.Errorf("for directory `%s`: %s", dbDir, err.Error())
}
db.database, err = sql.Open("sqlite3", dbURL.Path)
case "postgresql":
db.engine = POSTGRESQL
db.database, err = sql.Open("postgresql", rawurl)
case "mysql":
db.engine = MYSQL
db.database, err = sql.Open("mysql", rawurl)
default:
return nil, fmt.Errorf("unknown URI scheme (or malformed URI)!")
}
// Check for errors from sql.Open()
if err != nil {
return nil, fmt.Errorf("error in sql.Open(): %s", err.Error())
}
if err = db.database.Ping(); err != nil {
return nil, fmt.Errorf("error in DB.Ping(): %s", err.Error())
}
if err := db.setupDatabase(); err != nil {
return nil, fmt.Errorf("error in setupDatabase(): %s", err.Error())
}
return &db, nil
}
func (db *Database) DoesTorrentExist(infoHash []byte) bool {
for _, torrent := range db.newTorrents {
if bytes.Equal(infoHash, torrent.InfoHash) {
return true;
}
}
rows, err := db.database.Query("SELECT info_hash FROM torrents WHERE info_hash = ?;", infoHash)
if err != nil {
zap.L().Sugar().Fatalf("Could not query whether a torrent exists in the database! %s", err.Error())
}
defer rows.Close()
// If rows.Next() returns true, meaning that the torrent is in the database, return true; else
// return false.
return rows.Next()
}
func (db *Database) FindAnIncompleteTorrent(pathRegex *regexp.Regexp, maxSize uint) error {
switch db.engine {
case SQLITE:
return db.findAnIncompleteTorrent_SQLite(pathRegex, maxSize)
default:
zap.L().Fatal("Unknown database engine!", zap.Uint8("engine", uint8(db.engine)))
return nil
}
}
func (db *Database) findAnIncompleteTorrent_SQLite(pathRegex *regexp.Regexp, maxSize uint) error {
// TODO: Prefer torrents with most seeders & leechs (i.e. most popular)
_, err := db.database.Query(`
SELECT torrents.info_hash, files.path FROM files WHERE files.path REGEXP ?
INNER JOIN torrents ON files.torrent_id = torrents.id LIMIT 1;
`)
if err != nil {
return err
}
return nil
}
// AddNewTorrent adds a new torrent to the *queue* to be flushed to the persistent database.
func (db *Database) AddNewTorrent(torrent bittorrent.Metadata) error {
// Although we check whether the torrent exists in the database before asking MetadataSink to
// fetch its metadata, the torrent can also exists in the Sink before that. Now, if a torrent in
// the sink is still being fetched, that's still not a problem as we just add the new peer for
// the torrent and exit, but if the torrent is complete (i.e. its metadata) and if its waiting
// in the channel to be received, a race condition arises when we query the database and seeing
// that it doesn't exists there, add it to the sink.
// Hence check for the last time whether the torrent exists in the database, and only if not,
// add it.
if db.DoesTorrentExist(torrent.InfoHash) {
return nil;
}
db.newTorrents = append(db.newTorrents, torrent)
if len(db.newTorrents) >= 10 {
zap.L().Sugar().Debugf("newTorrents queue is full, attempting to commit %d torrents...",
len(db.newTorrents))
if err := db.commitNewTorrents(); err != nil {
return err
}
}
return nil
}
func (db *Database) AddReadme(infoHash []byte, path string, data []byte) error {
// TODO
return nil
}
func (db *Database) commitNewTorrents() error {
tx, err := db.database.Begin()
if err != nil {
return fmt.Errorf("sql.DB.Begin()! %s", err.Error())
}
var nTorrents, nFiles uint
nTorrents = uint(len(db.newTorrents))
for i, torrent := range db.newTorrents {
zap.L().Sugar().Debugf("Flushing torrent %d of %d: `%s` (%x)...",
i + 1, len(db.newTorrents), torrent.Name, torrent.InfoHash)
res, err := tx.Exec("INSERT INTO torrents (info_hash, name, total_size, discovered_on) VALUES (?, ?, ?, ?);",
torrent.InfoHash, torrent.Name, torrent.TotalSize, torrent.DiscoveredOn)
if err != nil {
ourError := fmt.Errorf("error while INSERTing INTO torrent: %s", err.Error())
if err := tx.Rollback(); err != nil {
return fmt.Errorf("%s\tmeanwhile, could not rollback the current transaction either! %s", ourError.Error(), err.Error())
}
return ourError
}
var lastInsertId int64
if lastInsertId, err = res.LastInsertId(); err != nil {
return fmt.Errorf("sql.Result.LastInsertId()! %s", err.Error())
}
for _, file := range torrent.Files {
zap.L().Sugar().Debugf("Flushing file `%s` (of torrent %x)", path.Join(file.Path...), torrent.InfoHash)
_, err := tx.Exec("INSERT INTO files (torrent_id, size, path) VALUES(?, ?, ?);",
lastInsertId, file.Length, path.Join(file.Path...))
if err != nil {
ourError := fmt.Errorf("error while INSERTing INTO files: %s", err.Error())
if err := tx.Rollback(); err != nil {
return fmt.Errorf("%s\tmeanwhile, could not rollback the current transaction either! %s", ourError.Error(), err.Error())
}
return ourError
}
}
nFiles += uint(len(torrent.Files))
}
if err = tx.Commit(); err != nil {
return fmt.Errorf("sql.Tx.Commit()! %s", err.Error())
}
// Clear the queue
db.newTorrents = nil
zap.L().Sugar().Infof("%d torrents (%d files) are flushed to the database successfully.",
nTorrents, nFiles)
return nil
}
func (db *Database) Close() {
// Be careful to not to get into an infinite loop. =)
db.database.Close()
}
func (db *Database) setupDatabase() error {
switch db.engine {
case SQLITE:
return setupSqliteDatabase(db.database)
case POSTGRESQL:
zap.L().Fatal("setupDatabase() is not implemented for PostgreSQL yet!")
case MYSQL:
return setupMySQLDatabase(db.database)
default:
zap.L().Sugar().Fatalf("Unknown database engine value %d! (programmer error)", db.engine)
}
return nil
}
func setupSqliteDatabase(database *sql.DB) error {
// Enable Write-Ahead Logging for SQLite as "WAL provides more concurrency as readers do not
// block writers and a writer does not block readers. Reading and writing can proceed
// concurrently."
// Caveats:
// * Might be unsupported by OSes other than Windows and UNIXes.
// * Does not work over a network filesystem.
// * Transactions that involve changes against multiple ATTACHed databases are not atomic
// across all databases as a set.
// See: https://www.sqlite.org/wal.html
//
// Force SQLite to use disk, instead of memory, for all temporary files to reduce the memory
// footprint.
//
// Enable foreign key constraints in SQLite which are crucial to prevent programmer errors on
// our side.
_, err := database.Exec(`
PRAGMA journal_mode=WAL;
PRAGMA temp_store=1;
PRAGMA foreign_keys=ON;
`)
if err != nil {
return err
}
tx, err := database.Begin()
if err != nil {
return err
}
// Essential, and valid for all user_version`s:
// TODO: "torrent_id" column of the "files" table can be NULL, how can we fix this in a new schema?
_, err = tx.Exec(`
CREATE TABLE IF NOT EXISTS torrents (
id INTEGER PRIMARY KEY,
info_hash BLOB NOT NULL UNIQUE,
name TEXT NOT NULL,
total_size INTEGER NOT NULL CHECK(total_size > 0),
discovered_on INTEGER NOT NULL CHECK(discovered_on > 0)
);
CREATE TABLE IF NOT EXISTS files (
id INTEGER PRIMARY KEY,
torrent_id INTEGER REFERENCES torrents ON DELETE CASCADE ON UPDATE RESTRICT,
size INTEGER NOT NULL,
path TEXT NOT NULL
);
`)
if err != nil {
return err
}
// Get the user_version:
res, err := tx.Query(`PRAGMA user_version;`)
if err != nil {
return err
}
var userVersion int;
res.Next()
res.Scan(&userVersion)
switch userVersion {
// Upgrade from user_version 0 to 1
// The Change:
// * `info_hash_index` is recreated as UNIQUE.
case 0:
zap.S().Warnf("Updating database schema from 0 to 1... (this might take a while)")
_, err = tx.Exec(`
DROP INDEX info_hash_index;
CREATE UNIQUE INDEX info_hash_index ON torrents (info_hash);
PRAGMA user_version = 1;
`)
if err != nil {
return err
}
fallthrough
// Upgrade from user_version 1 to 2
// The Change:
// * Added `is_readme` and `content` columns to the `files` table, and the constraints & the
// the indices they entail.
// * Added unique index `readme_index` on `files` table.
case 1:
zap.S().Warnf("Updating database schema from 1 to 2... (this might take a while)")
// We introduce two new columns here: content BLOB, and is_readme INTEGER which we treat as
// a bool (hence the CHECK).
// The reason for the change is that as we introduce the new "readme" feature which
// downloads a readme file as a torrent descriptor, we needed to store it somewhere in the
// database with the following conditions:
//
// 1. There can be one and only one readme (content) for a given torrent; hence the
// UNIQUE INDEX on (torrent_id, is_description) (remember that SQLite treats each NULL
// value as distinct [UNIQUE], see https://sqlite.org/nulls.html).
// 2. We would like to keep the readme (content) associated with the file it came from;
// hence we modify the files table instead of the torrents table.
//
// Regarding the implementation details, following constraints arise:
//
// 1. The column is_readme is either NULL or 1, and if it is 1, then content column cannot
// be NULL (but might be an empty BLOB). Vice versa, if content column of a row is,
// NULL then is_readme must be NULL.
//
// This is to prevent unused content fields filling up the database, and to catch
// programmers' errors.
_, err = tx.Exec(`
ALTER TABLE files ADD COLUMN is_readme INTEGER CHECK (is_readme IS NULL OR is_readme=1);
ALTER TABLE files ADD COLUMN content BLOB CHECK((content IS NULL AND is_readme IS NULL) OR (content IS NOT NULL AND is_readme=1));
CREATE UNIQUE INDEX readme_index ON files (torrent_id, is_readme);
PRAGMA user_version = 2;
`)
if err != nil {
return err
}
}
if err = tx.Commit(); err != nil {
return err
}
return nil
}
func setupMySQLDatabase(database *sql.DB) error {
// Set strict mode to prevent silent truncation
_, err := database.Exec(`SET SESSION SQL_MODE = 'STRICT_ALL_TABLES';`)
if err != nil {
return err
}
_, err = database.Exec(
`CREATE TABLE IF NOT EXISTS torrents ("
id INTEGER PRIMARY KEY AUTO_INCREMENT,
info_hash BINARY(20) NOT NULL UNIQUE,
name VARCHAR(1024) NOT NULL,
total_size BIGINT UNSIGNED NOT NULL,
discovered_on INTEGER UNSIGNED NOT NULL
);
ALTER TABLE torrents ADD INDEX info_hash_index (info_hash);
CREATE TABLE IF NOT EXISTS files (
id INTEGER PRIMARY KEY AUTO_INCREMENT,
torrent_id INTEGER REFERENCES torrents (id) ON DELETE CASCADE ON UPDATE RESTRICT,
size BIGINT NOT NULL,
path TEXT NOT NULL
);`,
)
if err != nil {
return err
}
return nil
}

View File

@ -1,20 +0,0 @@
package persistence
import (
"path"
"testing"
)
// TestPathJoin tests the assumption we made in flushNewTorrents() function where we assumed path
// separator to be the `/` (slash), and not `\` (backslash) character (which is used by Windows).
//
// Golang seems to use slash character on both platforms but we need to check that slash character
// is used in all cases. As a rule of thumb in secure programming, always check ONLY for the valid
// case AND IGNORE THE REST (e.g. do not check for backslashes but check for slashes).
func TestPathJoin(t *testing.T) {
if path.Join("a", "b", "c") != "a/b/c" {
t.Errorf("path.Join uses a different character than `/` (slash) character as path separator! (path: `%s`)",
path.Join("a", "b", "c"))
}
}

View File

@ -1,26 +0,0 @@
# Gopkg.toml example
#
# Refer to https://github.com/golang/dep/blob/master/docs/Gopkg.toml.md
# for detailed Gopkg.toml documentation.
#
# required = ["github.com/user/thing/cmd/thing"]
# ignored = ["github.com/user/project/pkgX", "bitbucket.org/user/project/pkgA/pkgY"]
#
# [[constraint]]
# name = "github.com/user/project"
# version = "1.0.0"
#
# [[constraint]]
# name = "github.com/user/project2"
# branch = "dev"
# source = "github.com/myfork/project2"
#
# [[override]]
# name = "github.com/x/y"
# version = "2.4.0"
[[constraint]]
name = "github.com/gorilla/mux"
version = "1.4.0"

View File

@ -1,262 +0,0 @@
package main
import (
"fmt"
"io/ioutil"
)
// bindata_read reads the given file from disk. It returns
// an error on failure.
func bindata_read(path, name string) ([]byte, error) {
buf, err := ioutil.ReadFile(path)
if err != nil {
err = fmt.Errorf("Error reading asset %s at %s: %v", name, path, err)
}
return buf, err
}
// templates_torrent_html reads file data from disk.
// It panics if something went wrong in the process.
func templates_torrent_html() ([]byte, error) {
return bindata_read(
"/home/bora/labs/magnetico/src/magneticow/data/templates/torrent.html",
"templates/torrent.html",
)
}
// templates_feed_xml reads file data from disk.
// It panics if something went wrong in the process.
func templates_feed_xml() ([]byte, error) {
return bindata_read(
"/home/bora/labs/magnetico/src/magneticow/data/templates/feed.xml",
"templates/feed.xml",
)
}
// templates_homepage_html reads file data from disk.
// It panics if something went wrong in the process.
func templates_homepage_html() ([]byte, error) {
return bindata_read(
"/home/bora/labs/magnetico/src/magneticow/data/templates/homepage.html",
"templates/homepage.html",
)
}
// templates_statistics_html reads file data from disk.
// It panics if something went wrong in the process.
func templates_statistics_html() ([]byte, error) {
return bindata_read(
"/home/bora/labs/magnetico/src/magneticow/data/templates/statistics.html",
"templates/statistics.html",
)
}
// templates_torrents_html reads file data from disk.
// It panics if something went wrong in the process.
func templates_torrents_html() ([]byte, error) {
return bindata_read(
"/home/bora/labs/magnetico/src/magneticow/data/templates/torrents.html",
"templates/torrents.html",
)
}
// static_scripts_plotly_v1_26_1_min_js reads file data from disk.
// It panics if something went wrong in the process.
func static_scripts_plotly_v1_26_1_min_js() ([]byte, error) {
return bindata_read(
"/home/bora/labs/magnetico/src/magneticow/data/static/scripts/plotly-v1.26.1.min.js",
"static/scripts/plotly-v1.26.1.min.js",
)
}
// static_scripts_statistics_js reads file data from disk.
// It panics if something went wrong in the process.
func static_scripts_statistics_js() ([]byte, error) {
return bindata_read(
"/home/bora/labs/magnetico/src/magneticow/data/static/scripts/statistics.js",
"static/scripts/statistics.js",
)
}
// static_scripts_torrent_js reads file data from disk.
// It panics if something went wrong in the process.
func static_scripts_torrent_js() ([]byte, error) {
return bindata_read(
"/home/bora/labs/magnetico/src/magneticow/data/static/scripts/torrent.js",
"static/scripts/torrent.js",
)
}
// static_styles_reset_css reads file data from disk.
// It panics if something went wrong in the process.
func static_styles_reset_css() ([]byte, error) {
return bindata_read(
"/home/bora/labs/magnetico/src/magneticow/data/static/styles/reset.css",
"static/styles/reset.css",
)
}
// static_styles_statistics_css reads file data from disk.
// It panics if something went wrong in the process.
func static_styles_statistics_css() ([]byte, error) {
return bindata_read(
"/home/bora/labs/magnetico/src/magneticow/data/static/styles/statistics.css",
"static/styles/statistics.css",
)
}
// static_styles_torrent_css reads file data from disk.
// It panics if something went wrong in the process.
func static_styles_torrent_css() ([]byte, error) {
return bindata_read(
"/home/bora/labs/magnetico/src/magneticow/data/static/styles/torrent.css",
"static/styles/torrent.css",
)
}
// static_styles_torrents_css reads file data from disk.
// It panics if something went wrong in the process.
func static_styles_torrents_css() ([]byte, error) {
return bindata_read(
"/home/bora/labs/magnetico/src/magneticow/data/static/styles/torrents.css",
"static/styles/torrents.css",
)
}
// static_styles_homepage_css reads file data from disk.
// It panics if something went wrong in the process.
func static_styles_homepage_css() ([]byte, error) {
return bindata_read(
"/home/bora/labs/magnetico/src/magneticow/data/static/styles/homepage.css",
"static/styles/homepage.css",
)
}
// static_styles_essential_css reads file data from disk.
// It panics if something went wrong in the process.
func static_styles_essential_css() ([]byte, error) {
return bindata_read(
"/home/bora/labs/magnetico/src/magneticow/data/static/styles/essential.css",
"static/styles/essential.css",
)
}
// static_assets_magnet_gif reads file data from disk.
// It panics if something went wrong in the process.
func static_assets_magnet_gif() ([]byte, error) {
return bindata_read(
"/home/bora/labs/magnetico/src/magneticow/data/static/assets/magnet.gif",
"static/assets/magnet.gif",
)
}
// static_assets_feed_png reads file data from disk.
// It panics if something went wrong in the process.
func static_assets_feed_png() ([]byte, error) {
return bindata_read(
"/home/bora/labs/magnetico/src/magneticow/data/static/assets/feed.png",
"static/assets/feed.png",
)
}
// static_fonts_notomono_license_ofl_txt reads file data from disk.
// It panics if something went wrong in the process.
func static_fonts_notomono_license_ofl_txt() ([]byte, error) {
return bindata_read(
"/home/bora/labs/magnetico/src/magneticow/data/static/fonts/NotoMono/LICENSE_OFL.txt",
"static/fonts/NotoMono/LICENSE_OFL.txt",
)
}
// static_fonts_notomono_regular_ttf reads file data from disk.
// It panics if something went wrong in the process.
func static_fonts_notomono_regular_ttf() ([]byte, error) {
return bindata_read(
"/home/bora/labs/magnetico/src/magneticow/data/static/fonts/NotoMono/Regular.ttf",
"static/fonts/NotoMono/Regular.ttf",
)
}
// static_fonts_notosansui_license_ofl_txt reads file data from disk.
// It panics if something went wrong in the process.
func static_fonts_notosansui_license_ofl_txt() ([]byte, error) {
return bindata_read(
"/home/bora/labs/magnetico/src/magneticow/data/static/fonts/NotoSansUI/LICENSE_OFL.txt",
"static/fonts/NotoSansUI/LICENSE_OFL.txt",
)
}
// static_fonts_notosansui_bold_ttf reads file data from disk.
// It panics if something went wrong in the process.
func static_fonts_notosansui_bold_ttf() ([]byte, error) {
return bindata_read(
"/home/bora/labs/magnetico/src/magneticow/data/static/fonts/NotoSansUI/Bold.ttf",
"static/fonts/NotoSansUI/Bold.ttf",
)
}
// static_fonts_notosansui_bolditalic_ttf reads file data from disk.
// It panics if something went wrong in the process.
func static_fonts_notosansui_bolditalic_ttf() ([]byte, error) {
return bindata_read(
"/home/bora/labs/magnetico/src/magneticow/data/static/fonts/NotoSansUI/BoldItalic.ttf",
"static/fonts/NotoSansUI/BoldItalic.ttf",
)
}
// static_fonts_notosansui_italic_ttf reads file data from disk.
// It panics if something went wrong in the process.
func static_fonts_notosansui_italic_ttf() ([]byte, error) {
return bindata_read(
"/home/bora/labs/magnetico/src/magneticow/data/static/fonts/NotoSansUI/Italic.ttf",
"static/fonts/NotoSansUI/Italic.ttf",
)
}
// static_fonts_notosansui_regular_ttf reads file data from disk.
// It panics if something went wrong in the process.
func static_fonts_notosansui_regular_ttf() ([]byte, error) {
return bindata_read(
"/home/bora/labs/magnetico/src/magneticow/data/static/fonts/NotoSansUI/Regular.ttf",
"static/fonts/NotoSansUI/Regular.ttf",
)
}
// Asset loads and returns the asset for the given name.
// It returns an error if the asset could not be found or
// could not be loaded.
func Asset(name string) ([]byte, error) {
if f, ok := _bindata[name]; ok {
return f()
}
return nil, fmt.Errorf("Asset %s not found", name)
}
// _bindata is a table, holding each asset generator, mapped to its name.
var _bindata = map[string] func() ([]byte, error) {
"templates/torrent.html": templates_torrent_html,
"templates/feed.xml": templates_feed_xml,
"templates/homepage.html": templates_homepage_html,
"templates/statistics.html": templates_statistics_html,
"templates/torrents.html": templates_torrents_html,
"static/scripts/plotly-v1.26.1.min.js": static_scripts_plotly_v1_26_1_min_js,
"static/scripts/statistics.js": static_scripts_statistics_js,
"static/scripts/torrent.js": static_scripts_torrent_js,
"static/styles/reset.css": static_styles_reset_css,
"static/styles/statistics.css": static_styles_statistics_css,
"static/styles/torrent.css": static_styles_torrent_css,
"static/styles/torrents.css": static_styles_torrents_css,
"static/styles/homepage.css": static_styles_homepage_css,
"static/styles/essential.css": static_styles_essential_css,
"static/assets/magnet.gif": static_assets_magnet_gif,
"static/assets/feed.png": static_assets_feed_png,
"static/fonts/NotoMono/LICENSE_OFL.txt": static_fonts_notomono_license_ofl_txt,
"static/fonts/NotoMono/Regular.ttf": static_fonts_notomono_regular_ttf,
"static/fonts/NotoSansUI/LICENSE_OFL.txt": static_fonts_notosansui_license_ofl_txt,
"static/fonts/NotoSansUI/Bold.ttf": static_fonts_notosansui_bold_ttf,
"static/fonts/NotoSansUI/BoldItalic.ttf": static_fonts_notosansui_bolditalic_ttf,
"static/fonts/NotoSansUI/Italic.ttf": static_fonts_notosansui_italic_ttf,
"static/fonts/NotoSansUI/Regular.ttf": static_fonts_notosansui_regular_ttf,
}

View File

@ -1,38 +0,0 @@
main {
display: flex;
align-items: center;
align-content: center;
height: calc(100vh - 2*16px - 0.833em - 23px); /* 100vh - body's padding(s) - footer margin - footer height */
width: 100%;
}
@media (max-width: 616px) {
main {
flex-direction: column;
justify-content: center;
align-items: flex-start;
}
}
main div#magneticow {
white-space: nowrap;
margin: 0 0.5em 0.5em 0;
}
main form {
max-width: 600px;
width: 100%;
}
main form input {
width: 100%;
}
main > div {
margin-right: 0.5em;
}
footer {
margin-top: 0.833em;
}

View File

@ -1,16 +0,0 @@
<?xml version="1.0" encoding="utf-8" standalone="yes"?>
<rss version="2.0">
<channel>
<title>{{ .Title }}</title>
{% for item in items %}
<item>
<title>{{ item.title }}</title>
<pubDate>{{ item.DiscoveredOn }}</pubDate>
<guid>{{ item.info_hash }}</guid>
<enclosure url="magnet:?xt=urn:btih:{{ item.info_hash }}&amp;dn={{ item.title }}" type="application/x-bittorrent" />
<description><![CDATA[Seeders: {{ item.NSeeders }} - Leechers: {{ item.NLeechers }}]]></description>
</item>
{% endfor %}
</channel>
</rss>

View File

@ -1,66 +0,0 @@
<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="utf-8">
<title>{% if .search %}"{{.search}}"{% else %}Most recent torrents{% endif %} - magneticow</title>
<link rel="stylesheet" href="static/styles/reset.css">
<link rel="stylesheet" href="static/styles/essential.css">
<link rel="stylesheet" href="static/styles/torrents.css">
<!-- <script src="script.js"></script> -->
</head>
<body>
<header>
<div><a href="/"><b>magnetico<sup>w</sup></b></a>&#8203;<sub>(pre-alpha)</sub></div>
<form action="/torrents" method="get" autocomplete="off" role="search">
<input type="search" name="search" placeholder="Search the BitTorrent DHT" value="{{ .search }}">
</form>
<div>
<a href="{{ .subscription_url }}"><img src="static/assets/feed.png"
alt="feed icon" title="subscribe" /> subscribe</a>
</div>
</header>
<main>
<table>
<thead>
<tr>
<th><!-- Magnet link --></th>
<th>Name</th>
<th>Size</th>
<th>Discovered on</th>
</tr>
</thead>
<tbody>
{% for torrent in torrents %}
<tr>
<td><a href="magnet:?xt=urn:btih:{{ .torrent.info_hash }}&dn={{ .torrent.name }}">
<img src="static/assets/magnet.gif" alt="Magnet link"
title="Download this torrent using magnet" /></a></td>
<td><a href="/torrents/{{ .torrent.info_hash }}/{{ .torrent.name }}">{{ torrent.name }}</a></td>
<td>{{ torrent.size }}</td>
<td>{{ torrent.discovered_on }}</td>
</tr>
{% endfor %}
</tbody>
</table>
</main>
<footer>
<form action="/torrents" method="get">
<button {% if page == 0 %}disabled{% endif %}>Previous</button>
<input type="text" name="search" value="{{ search }}" hidden>
{% if sorted_by %}
<input type="text" name="sort_by" value="{{ sorted_by }}" hidden>
{% endif %}
<input type="number" name="page" value="{{ page - 1 }}" hidden>
</form>
<form action="/torrents" method="get">
<button {% if not next_page_exists %}disabled{% endif %}>Next</button>
<input type="text" name="search" value="{{ search }}" hidden>
{% if sorted_by %}
<input type="text" name="sort_by" value="{{ sorted_by }}" hidden>
{% endif %}
<input type="number" name="page" value="{{ page + 1 }}" hidden>
</form>
</footer>
</body>
</html>

View File

@ -1,108 +0,0 @@
package main
import (
"html/template"
"log"
"net/http"
"strings"
"github.com/gorilla/mux"
"persistence"
)
const N_TORRENTS = 20
var templates map[string]*template.Template
var database persistence.Database
func main() {
router := mux.NewRouter()
router.HandleFunc("/", rootHandler)
router.HandleFunc("/torrents", torrentsHandler)
router.HandleFunc("/torrents/{infohash}", torrentsInfohashHandler)
router.HandleFunc("/torrents/{infohash}/{name}", torrentsInfohashNameHandler)
router.HandleFunc("/statistics", statisticsHandler)
router.PathPrefix("/static").HandlerFunc(staticHandler)
router.HandleFunc("/feed", feedHandler)
templates = make(map[string]*template.Template)
templates["feed"] = template.Must(template.New("feed").Parse(string(mustAsset("templates/feed.xml"))))
templates["homepage"] = template.Must(template.New("homepage").Parse(string(mustAsset("templates/homepage.html"))))
templates["statistics"] = template.Must(template.New("statistics").Parse(string(mustAsset("templates/statistics.html"))))
templates["torrent"] = template.Must(template.New("torrent").Parse(string(mustAsset("templates/torrent.html"))))
templates["torrents"] = template.Must(template.New("torrents").Parse(string(mustAsset("templates/torrents.html"))))
var err error
database, err = persistence.MakeDatabase("sqlite3:///home/bora/.local/share/magneticod/database.sqlite3")
if err != nil {
panic(err.Error())
}
http.ListenAndServe(":8080", router)
}
func rootHandler(w http.ResponseWriter, r *http.Request) {
count, err := database.GetNumberOfTorrents()
if err != nil {
panic(err.Error())
}
templates["homepage"].Execute(w, count)
}
func torrentsHandler(w http.ResponseWriter, r *http.Request) {
/*
newestTorrents, err := database.NewestTorrents(N_TORRENTS)
if err != nil {
panic(err.Error())
}
templates["torrents"].Execute(w, nil)
*/
}
func torrentsInfohashHandler(w http.ResponseWriter, r *http.Request) {
// redirect to torrents/{infohash}/name
}
func torrentsInfohashNameHandler(w http.ResponseWriter, r *http.Request) {
}
func statisticsHandler(w http.ResponseWriter, r *http.Request) {
}
func feedHandler(w http.ResponseWriter, r *http.Request) {
}
func staticHandler(w http.ResponseWriter, r *http.Request) {
data, err := Asset(r.URL.Path[1:])
if err != nil {
http.NotFound(w, r)
return
}
var contentType string
if strings.HasSuffix(r.URL.Path, ".css") {
contentType = "text/css; charset=utf-8"
} else { // fallback option
contentType = http.DetectContentType(data)
}
w.Header().Set("Content-Type", contentType)
w.Write(data)
}
func mustAsset(name string) []byte {
data, err := Asset(name)
if err != nil {
log.Panicf("Could NOT access the requested resource `%s`: %s", name, err.Error())
}
return data
}

View File

@ -1,26 +0,0 @@
# Gopkg.toml example
#
# Refer to https://github.com/golang/dep/blob/master/docs/Gopkg.toml.md
# for detailed Gopkg.toml documentation.
#
# required = ["github.com/user/thing/cmd/thing"]
# ignored = ["github.com/user/project/pkgX", "bitbucket.org/user/project/pkgA/pkgY"]
#
# [[constraint]]
# name = "github.com/user/project"
# version = "1.0.0"
#
# [[constraint]]
# name = "github.com/user/project2"
# branch = "dev"
# source = "github.com/myfork/project2"
#
# [[override]]
# name = "github.com/x/y"
# version = "2.4.0"
[[constraint]]
name = "go.uber.org/zap"
version = "1.6.0"

View File

@ -1,116 +0,0 @@
package persistence
import (
"fmt"
"regexp"
"net/url"
)
type Database interface {
Engine() databaseEngine
DoesTorrentExist(infoHash []byte) (bool, error)
// GiveAnIncompleteTorrentByInfoHash returns (*gives*) an incomplete -i.e. one that doesn't have
// readme downloaded yet- torrent from the database.
// GiveAnIncompleteTorrent might return a nil slice for infoHash, a nil string, and a nil err,
// meaning that no incomplete torrent could be found in the database (congrats!).
GiveAnIncompleteTorrent(pathRegex *regexp.Regexp, maxSize uint) (infoHash []byte, path string, err error)
GiveAStaleTorrent() (infoHash []byte, err error)
AddNewTorrent(infoHash []byte, name string, files []File) error
AddReadme(infoHash []byte, path string, content string) error
Close() error
// GetNumberOfTorrents returns the number of torrents saved in the database. Might be an
// approximation.
GetNumberOfTorrents() (uint, error)
NewestTorrents(n uint) ([]TorrentMetadata, error)
SearchTorrents(query string, orderBy orderingCriteria, descending bool, mustHaveReadme bool) ([]TorrentMetadata, error)
// GetTorrents returns the TorrentExtMetadata for the torrent of the given infoHash. Might return
// nil, nil if the torrent does not exist in the database.
GetTorrent(infoHash []byte) (*TorrentMetadata, error)
GetFiles(infoHash []byte) ([]File, error)
GetReadme(infoHash []byte) (string, error)
GetStatistics(from ISO8601, period uint) (*Statistics, error)
}
type orderingCriteria uint8
const (
BY_NAME orderingCriteria = 1
BY_SIZE = 2
BY_DISCOVERED_ON = 3
BY_N_FILES = 4
BY_N_SEEDERS = 5
BY_N_LEECHERS = 6
BY_UPDATED_ON = 7
BY_N_SEEDERS_TO_N_LEECHERS_RATIO = 8
BY_N_SEEDERS_PLUS_N_LEECHERS = 9
)
type statisticsGranularity uint8
type ISO8601 string
const (
MINUTELY_STATISTICS statisticsGranularity = 1
HOURLY_STATISTICS = 2
DAILY_STATISTICS = 3
WEEKLY_STATISTICS = 4
MONTHLY_STATISTICS = 5
YEARLY_STATISTICS = 6
)
type databaseEngine uint8
const (
SQLITE3_ENGINE databaseEngine = 1
)
type Statistics struct {
Granularity statisticsGranularity
From ISO8601
Period uint
// All these slices below have the exact length equal to the Period.
NTorrentsDiscovered []uint
NFilesDiscovered []uint
NReadmesDownloaded []uint
NTorrentsUpdated []uint
}
type File struct {
Size int64
Path string
}
type TorrentMetadata struct {
infoHash []byte
name string
size uint64
discoveredOn int64
hasReadme bool
nFiles uint
// values below 0 indicates that no data is available:
nSeeders int
nLeechers int
updatedOn int
}
func MakeDatabase(rawURL string) (Database, error) {
url_, err := url.Parse(rawURL)
if err != nil {
return nil, err
}
switch url_.Scheme {
case "sqlite3":
return makeSqlite3Database(url_)
case "postgresql":
return nil, fmt.Errorf("postgresql is not yet supported!")
case "mysql":
return nil, fmt.Errorf("mysql is not yet supported!")
}
return nil, fmt.Errorf("unknown URI scheme (database engine)!")
}

View File

@ -1 +0,0 @@
package persistence

View File

@ -1 +0,0 @@
package persistence