1
0
dwelling-upload/internal/handlers/handlers.go

185 lines
5.3 KiB
Go

package handlers
import (
"crypto/sha256"
"dwelling-upload/internal/configuration"
"dwelling-upload/pkg/logging"
"dwelling-upload/pkg/server"
"dwelling-upload/pkg/utils"
"dwelling-upload/web"
"encoding/base64"
"encoding/hex"
"fmt"
"io"
"net/http"
"net/url"
"os"
"path"
"strings"
"time"
)
type UploadHandlers struct {
conf *configuration.Configuration
logErr *logging.Logger
logUpload *logging.Logger
logDownload *logging.Logger
uploadDirSize *int64
}
func NewUploadHandlers(conf *configuration.Configuration, lErr, lUp, lDown *logging.Logger, uploadDirSize *int64) *UploadHandlers {
return &UploadHandlers{
conf: conf,
logErr: lErr,
logUpload: lUp,
logDownload: lDown,
uploadDirSize: uploadDirSize}
}
func (*UploadHandlers) AssetsFS() http.FileSystem {
return web.Assets()
}
func (h *UploadHandlers) Index(w http.ResponseWriter, r *http.Request) {
var storCapacity int64 = h.conf.Uploads.Limits.Storage << 20
var fMaxSize int64 = h.conf.Uploads.Limits.FileSize << 20
_, _, capStr := utils.ConvertFileSize(storCapacity)
_, _, usedStr := utils.ConvertFileSize(*h.uploadDirSize)
_, _, availStr := utils.ConvertFileSize(storCapacity - *h.uploadDirSize)
_, _, fMaxSzStr := utils.ConvertFileSize(fMaxSize)
web.Index(utils.MainSite(r.Host), storCapacity, *h.uploadDirSize, h.conf.Uploads.Limits.KeepForHours, fMaxSzStr, usedStr, capStr, availStr, w)
}
func (h *UploadHandlers) Upload(w http.ResponseWriter, r *http.Request) {
var fMaxSizeBytes int64 = h.conf.Uploads.Limits.FileSize << 20
var storCapacity int64 = h.conf.Uploads.Limits.Storage << 20
r.Body = http.MaxBytesReader(w, r.Body, fMaxSizeBytes)
if err := r.ParseMultipartForm(fMaxSizeBytes); err != nil {
h.logErr.Println("failed to parse form:", err)
http.Error(w, err.Error(), http.StatusExpectationFailed)
return
}
f, fHandler, err := r.FormFile("file")
if err != nil {
h.logErr.Println("failed to open incoming file:", err)
w.WriteHeader(http.StatusInternalServerError)
http.Error(w, "cannot read incoming file", http.StatusInternalServerError)
return
}
defer func() {
os.Remove(fHandler.Filename)
f.Close()
}()
var leftSpace int64 = storCapacity - *h.uploadDirSize
if leftSpace < fHandler.Size {
h.logErr.Println("not enough space left in storage, only", leftSpace>>20, "MiB left")
w.WriteHeader(http.StatusInternalServerError)
web.ErrorNoSpace(utils.MainSite(r.Host), w)
}
s256 := sha256.New()
if _, err := io.Copy(s256, f); err != nil {
h.logErr.Println("failed to compute SHA-256 hash:", err)
http.Error(w, "cannot compute hash for a file", http.StatusInternalServerError)
return
}
fHash := hex.EncodeToString(s256.Sum(nil))
s256.Write([]byte(h.conf.HashSalt))
fSaltedHash := base64.RawURLEncoding.EncodeToString(s256.Sum(nil))
f.Seek(0, io.SeekStart)
fPath := path.Join(h.conf.Uploads.Directory, fSaltedHash)
_, err = os.Stat(fPath)
if os.IsNotExist(err) {
fDst, err := os.Create(fPath)
if err != nil {
h.logErr.Println("failed to open file for writing", err)
http.Error(w, "cannot create your file", http.StatusInternalServerError)
return
}
defer fDst.Close()
// We initialy set a dst file size to occupy space equal to uploaded's size.
// This is called a sparse file, if you need to know.
// It allows us to have a relatively small buffer size for inotify watcher.
// And it really affects that. I tested it.
fDst.Seek(fHandler.Size-1, io.SeekStart)
fDst.Write([]byte{0})
fDst.Seek(0, io.SeekStart)
_, err = io.Copy(fDst, f)
if err != nil {
h.logErr.Println("failed to copy uploaded file to destination:", err)
http.Error(w, "cannot copy file's content", http.StatusInternalServerError)
return
}
typ, _ := utils.NetworkType(r.Host)
h.logUpload.Printf("| %s | %s | %s | SHA256 %s | %s | %d | %s", r.Header.Get("X-Real-IP"), typ,
fHandler.Filename, fHash, fSaltedHash, fHandler.Size, r.UserAgent())
w.WriteHeader(http.StatusCreated)
} else {
os.Chtimes(fPath, time.Now(), time.Now())
w.WriteHeader(http.StatusFound)
}
downloadURL := path.Join("/f", fSaltedHash, fHandler.Filename)
downloadURLParsed, _ := url.Parse(downloadURL)
if strings.Contains(r.UserAgent(), "curl") {
_, scheme := utils.NetworkType(r.Host)
w.Write([]byte(scheme + "://" + r.Host + downloadURLParsed.String() + "\r\n"))
return
}
web.Uploaded(utils.MainSite(r.Host), downloadURLParsed.String(), h.conf.Uploads.Limits.KeepForHours, w)
}
func (h *UploadHandlers) Download(w http.ResponseWriter, r *http.Request) {
saltedHash := server.GetURLParam(r, "hash")
path := path.Join(h.conf.Uploads.Directory, saltedHash)
stat, err := os.Stat(path)
if os.IsNotExist(err) {
h.NotFound(w, r)
return
}
name := server.GetURLParam(r, "name")
w.Header().Add("Content-Disposition", fmt.Sprintf("attachment; filename=\"%s\"", name))
fd, err := os.Open(path)
if err != nil {
h.logErr.Println("failed to open file to read:", err)
w.WriteHeader(http.StatusInternalServerError)
return
}
defer fd.Close()
netTyp, _ := utils.NetworkType(r.Host)
h.logDownload.Printf("| %s | %s | %s | %s | %s", r.Header.Get("X-Real-IP"), netTyp, name, saltedHash, r.UserAgent())
http.ServeContent(w, r, path, stat.ModTime(), fd)
}
func (h *UploadHandlers) NotFound(w http.ResponseWriter, r *http.Request) {
w.WriteHeader(http.StatusNotFound)
web.Error404(utils.MainSite(r.Host), w)
}