func main() {
rand.Seed(time.Now().UnixNano())
- ctx, cancelRoot := context.WithCancel(context.Background())
+ ctx, cancel := context.WithCancel(context.Background())
+
+ // TODO: implement graceful shutdown by closing context
+ // c := make(chan os.Signal, 1)
+ // signal.Notify(c, os.Interrupt)
+ // defer func() {
+ // signal.Stop(c)
+ // cancel()
+ // }()
+ // go func() {
+ // select {
+ // case <-c:
+ // cancel()
+ // case <-ctx.Done():
+ // }
+ // }()
basePort := flag.Int("bind", defaultPort, "Bind address and port")
statsPort := flag.String("stats-port", "", "Enable stats HTTP endpoint on address and port")
logger.Fatalw("Configuration directory not found", "path", configDir)
}
- srv, err := hotline.NewServer(*configDir, "", *basePort, logger, &hotline.OSFileStore{})
+ srv, err := hotline.NewServer(*configDir, *basePort, logger, &hotline.OSFileStore{})
if err != nil {
logger.Fatal(err)
}
}
// Serve Hotline requests until program exit
- logger.Fatal(srv.ListenAndServe(ctx, cancelRoot))
+ logger.Fatal(srv.ListenAndServe(ctx, cancel))
}
type statHandler struct {
MaxDownloads: 0
MaxDownloadsPerClient: 0
MaxConnectionsPerIP: 0
+PreserveResourceForks: true
github.com/rogpeppe/go-internal v1.6.1/go.mod h1:xXDCJY+GAPziupqXw64V24skbSoqbTEfhy4qGm1nDQc=
github.com/rogpeppe/go-internal v1.8.0 h1:FCbCCtXNOY3UtUuHUYaghJg4y7Fd14rXifAYUAtL9R8=
github.com/rogpeppe/go-internal v1.8.0/go.mod h1:WmiCO8CzOY8rg0OYDC4/i/2WRWAB6poM+XZ2dLUbcbE=
+github.com/ryboe/q v1.0.16 h1:BIRQwmpdD/YE4HTI8ZDIaRTx+m7Qk3WCVlEcDCHQ5U0=
+github.com/ryboe/q v1.0.16/go.mod h1:27Qxobs9LgGMjiUKOnVMUT6IlHKsUwjjh0HeXrsY3Kg=
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/objx v0.4.0 h1:M2gUjqZET1qApGOWNSnZ49BAIMX4F/1plDv3+l31EJ4=
github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw=
)
const (
- accessAlwaysAllow = -1 // Some transactions are always allowed
-
// File System Maintenance
accessDeleteFile = 0
accessUploadFile = 1
accessUploadAnywhere = 25
// accessAnyName = 26
// accessNoAgreement = 27
- // accessSetFileComment = 28
- // accessSetFolderComment = 29
- accessViewDropBoxes = 30
- accessMakeAlias = 31
- accessBroadcast = 32
- accessNewsDeleteArt = 33
- accessNewsCreateCat = 34
+ accessSetFileComment = 28
+ accessSetFolderComment = 29
+ accessViewDropBoxes = 30
+ accessMakeAlias = 31
+ accessBroadcast = 32
+ accessNewsDeleteArt = 33
+ accessNewsCreateCat = 34
// accessNewsDeleteCat = 35
accessNewsCreateFldr = 36
// accessNewsDeleteFldr = 37
// authorize checks if 64 bit access slice contain has accessBit set
// TODO: refactor to use accessBitmap type
func authorize(access *[]byte, accessBit int) bool {
- if accessBit == accessAlwaysAllow {
- return true
- }
bits := big.NewInt(int64(binary.BigEndian.Uint64(*access)))
return bits.Bit(63-accessBit) == 1
return db.TextView.Write(p)
}
-// Sync is a noop function that exists to satisfy the zapcore.WriteSyncer interface
+// Sync is a noop function that dataFile to satisfy the zapcore.WriteSyncer interface
func (db *DebugBuffer) Sync() error {
return nil
}
"golang.org/x/crypto/bcrypt"
"io"
"math/big"
+ "sort"
)
type byClientID []*ClientConn
return true
}
- accessBitmap := big.NewInt(int64(binary.BigEndian.Uint64(*cc.Account.Access)))
+ i := big.NewInt(int64(binary.BigEndian.Uint64(*cc.Account.Access)))
- return accessBitmap.Bit(63-access) == 1
+ return i.Bit(63-access) == 1
}
// Disconnect notifies other clients that a client has disconnected
},
}
}
+
+// sortedClients is a utility function that takes a map of *ClientConn and returns a sorted slice of the values.
+// The purpose of this is to ensure that the ordering of client connections is deterministic so that test assertions work.
+func sortedClients(unsortedClients map[uint16]*ClientConn) (clients []*ClientConn) {
+ for _, c := range unsortedClients {
+ clients = append(clients, c)
+ }
+ sort.Sort(byClientID(clients))
+ return clients
+}
MaxDownloads int `yaml:"MaxDownloads"` // Global simultaneous download limit
MaxDownloadsPerClient int `yaml:"MaxDownloadsPerClient"` // Per client simultaneous download limit
MaxConnectionsPerIP int `yaml:"MaxConnectionsPerIP"` // Max connections per IP
+ PreserveResourceForks bool `yaml:"PreserveResourceForks"` // Enable preservation of file info and resource forks in sidecar files
}
// fileNameWithInfoHeader contains the fixed length fields of FileNameWithInfo
type fileNameWithInfoHeader struct {
- Type [4]byte // file type code
+ Type [4]byte // File type code
Creator [4]byte // File creator code
FileSize [4]byte // File Size in bytes
RSVD [4]byte
return err
}
-
return path.Join(out...)
}
-func ReadFilePath(filePathFieldData []byte) string {
- var fp FilePath
- err := fp.UnmarshalBinary(filePathFieldData)
- if err != nil {
- // TODO
- }
- return fp.String()
-}
-
func readPath(fileRoot string, filePath, fileName []byte) (fullPath string, err error) {
var fp FilePath
if filePath != nil {
"github.com/stretchr/testify/mock"
"io/fs"
"os"
+ "time"
)
type FileStore interface {
+ Create(name string) (*os.File, error)
Mkdir(name string, perm os.FileMode) error
- Stat(name string) (os.FileInfo, error)
Open(name string) (*os.File, error)
- Symlink(oldname, newname string) error
+ OpenFile(name string, flag int, perm fs.FileMode) (*os.File, error)
Remove(name string) error
- Create(name string) (*os.File, error)
+ RemoveAll(path string) error
+ Rename(oldpath string, newpath string) error
+ Stat(name string) (fs.FileInfo, error)
+ Symlink(oldname, newname string) error
WriteFile(name string, data []byte, perm fs.FileMode) error
- // TODO: implement these
- // Rename(oldpath string, newpath string) error
- // RemoveAll(path string) error
+ ReadFile(name string) ([]byte, error)
}
type OSFileStore struct{}
return os.Symlink(oldname, newname)
}
+func (fs *OSFileStore) RemoveAll(name string) error {
+ return os.RemoveAll(name)
+}
+
func (fs *OSFileStore) Remove(name string) error {
return os.Remove(name)
}
return os.WriteFile(name, data, perm)
}
+func (fs *OSFileStore) Rename(oldpath string, newpath string) error {
+ return os.Rename(oldpath, newpath)
+}
+
+func (fs *OSFileStore) ReadFile(name string) ([]byte, error) {
+ return os.ReadFile(name)
+}
+
+func (fs *OSFileStore) OpenFile(name string, flag int, perm fs.FileMode) (*os.File, error) {
+ return os.OpenFile(name, flag, perm)
+}
+
type MockFileStore struct {
mock.Mock
}
return args.Get(0).(*os.File), args.Error(1)
}
+func (mfs *MockFileStore) OpenFile(name string, flag int, perm fs.FileMode) (*os.File, error) {
+ args := mfs.Called(name, flag, perm)
+ return args.Get(0).(*os.File), args.Error(1)
+}
+
func (mfs *MockFileStore) Symlink(oldname, newname string) error {
args := mfs.Called(oldname, newname)
return args.Error(0)
}
+func (mfs *MockFileStore) RemoveAll(name string) error {
+ args := mfs.Called(name)
+ return args.Error(0)
+}
+
func (mfs *MockFileStore) Remove(name string) error {
args := mfs.Called(name)
return args.Error(0)
args := mfs.Called(name, data, perm)
return args.Error(0)
}
+
+func (mfs *MockFileStore) Rename(oldpath, newpath string) error {
+ args := mfs.Called(oldpath, newpath)
+ return args.Error(0)
+}
+
+func (mfs *MockFileStore) ReadFile(name string) ([]byte, error) {
+ args := mfs.Called(name)
+ return args.Get(0).([]byte), args.Error(1)
+}
+
+type MockFileInfo struct {
+ mock.Mock
+}
+
+func (mfi *MockFileInfo) Name() string {
+ args := mfi.Called()
+ return args.String(0)
+}
+
+func (mfi *MockFileInfo) Size() int64 {
+ args := mfi.Called()
+ return args.Get(0).(int64)
+}
+
+func (mfi *MockFileInfo) Mode() fs.FileMode {
+ args := mfi.Called()
+ return args.Get(0).(fs.FileMode)
+}
+
+func (mfi *MockFileInfo) ModTime() time.Time {
+ _ = mfi.Called()
+ return time.Now()
+}
+
+func (mfi *MockFileInfo) IsDir() bool {
+ args := mfi.Called()
+ return args.Bool(0)
+}
+
+func (mfi *MockFileInfo) Sys() interface{} {
+ _ = mfi.Called()
+ return nil
+}
// A small number of type codes are displayed in the GetInfo window with a friendly name instead of the 4 letter code
var friendlyCreatorNames = map[string]string{
+ "APPL": "Application Program",
+ "HTbm": "Hotline Bookmark",
"fldr": "Folder",
"flda": "Folder Alias",
"HTft": "Incomplete File",
"SIT!": "StuffIt Archive",
"TEXT": "Text File",
+ "HTLC": "Hotline",
}
--- /dev/null
+package hotline
+
+import (
+ "encoding/binary"
+ "errors"
+ "fmt"
+ "io"
+ "io/fs"
+ "os"
+ "path"
+ "strings"
+)
+
+const (
+ incompleteFileSuffix = ".incomplete"
+ infoForkNameTemplate = "%s.info_%s" // template string for info fork filenames
+ rsrcForkNameTemplate = "%s.rsrc_%s" // template string for resource fork filenames
+)
+
+// fileWrapper encapsulates the data, info, and resource forks of a Hotline file and provides methods to manage the files.
+type fileWrapper struct {
+ fs FileStore
+ name string // name of the file
+ path string // path to file directory
+ dataPath string // path to the file data fork
+ dataOffset int64
+ rsrcPath string // path to the file resource fork
+ infoPath string // path to the file information fork
+ incompletePath string // path to partially transferred temp file
+ saveMetaData bool // if true, enables saving of info and resource forks in sidecar files
+ infoFork *FlatFileInformationFork
+ ffo *flattenedFileObject
+}
+
+func newFileWrapper(fs FileStore, path string, dataOffset int64) (*fileWrapper, error) {
+ pathSegs := strings.Split(path, pathSeparator)
+ dir := strings.Join(pathSegs[:len(pathSegs)-1], pathSeparator)
+ fName := pathSegs[len(pathSegs)-1]
+ f := fileWrapper{
+ fs: fs,
+ name: fName,
+ path: dir,
+ dataPath: path,
+ dataOffset: dataOffset,
+ rsrcPath: fmt.Sprintf(rsrcForkNameTemplate, dir+"/", fName),
+ infoPath: fmt.Sprintf(infoForkNameTemplate, dir+"/", fName),
+ incompletePath: dir + "/" + fName + incompleteFileSuffix,
+ ffo: &flattenedFileObject{},
+ }
+
+ var err error
+ f.ffo, err = f.flattenedFileObject()
+ if err != nil {
+ return nil, err
+ }
+
+ return &f, nil
+}
+
+func (f *fileWrapper) totalSize() []byte {
+ var s int64
+ size := make([]byte, 4)
+
+ info, err := f.fs.Stat(f.dataPath)
+ if err == nil {
+ s += info.Size() - f.dataOffset
+ }
+
+ info, err = f.fs.Stat(f.rsrcPath)
+ if err == nil {
+ s += info.Size()
+ }
+
+ binary.BigEndian.PutUint32(size, uint32(s))
+
+ return size
+}
+
+func (f *fileWrapper) rsrcForkSize() (s [4]byte) {
+ info, err := f.fs.Stat(f.rsrcPath)
+ if err != nil {
+ return s
+ }
+
+ binary.BigEndian.PutUint32(s[:], uint32(info.Size()))
+ return s
+}
+
+func (f *fileWrapper) rsrcForkHeader() FlatFileForkHeader {
+ return FlatFileForkHeader{
+ ForkType: [4]byte{0x4D, 0x41, 0x43, 0x52}, // "MACR"
+ CompressionType: [4]byte{},
+ RSVD: [4]byte{},
+ DataSize: f.rsrcForkSize(),
+ }
+}
+
+func (f *fileWrapper) incompleteDataName() string {
+ return f.name + incompleteFileSuffix
+}
+
+func (f *fileWrapper) rsrcForkName() string {
+ return fmt.Sprintf(rsrcForkNameTemplate, "", f.name)
+}
+
+func (f *fileWrapper) infoForkName() string {
+ return fmt.Sprintf(infoForkNameTemplate, "", f.name)
+}
+
+func (f *fileWrapper) creatorCode() []byte {
+ if f.ffo.FlatFileInformationFork.CreatorSignature != nil {
+ return f.infoFork.CreatorSignature
+ }
+ return []byte(fileTypeFromFilename(f.name).CreatorCode)
+}
+
+func (f *fileWrapper) typeCode() []byte {
+ if f.infoFork != nil {
+ return f.infoFork.TypeSignature
+ }
+ return []byte(fileTypeFromFilename(f.name).TypeCode)
+}
+
+func (f *fileWrapper) rsrcForkWriter() (io.Writer, error) {
+ file, err := os.OpenFile(f.rsrcPath, os.O_CREATE|os.O_WRONLY, 0644)
+ if err != nil {
+ return nil, err
+ }
+
+ return file, nil
+}
+
+func (f *fileWrapper) infoForkWriter() (io.Writer, error) {
+ file, err := os.OpenFile(f.infoPath, os.O_CREATE|os.O_WRONLY, 0644)
+ if err != nil {
+ return nil, err
+ }
+
+ return file, nil
+}
+
+func (f *fileWrapper) incFileWriter() (io.Writer, error) {
+ file, err := os.OpenFile(f.incompletePath, os.O_CREATE|os.O_APPEND|os.O_WRONLY, 0644)
+ if err != nil {
+ return nil, err
+ }
+
+ return file, nil
+}
+
+func (f *fileWrapper) dataForkReader() (io.Reader, error) {
+ return f.fs.Open(f.dataPath)
+}
+
+func (f *fileWrapper) rsrcForkFile() (*os.File, error) {
+ return f.fs.Open(f.rsrcPath)
+}
+
+func (f *fileWrapper) dataFile() (os.FileInfo, error) {
+ if fi, err := f.fs.Stat(f.dataPath); err == nil {
+ return fi, nil
+ }
+ if fi, err := f.fs.Stat(f.incompletePath); err == nil {
+ return fi, nil
+ }
+
+ return nil, errors.New("file or directory not found")
+}
+
+// move a fileWrapper and its associated metadata files to newPath
+func (f *fileWrapper) move(newPath string) error {
+ err := f.fs.Rename(f.dataPath, path.Join(newPath, f.name))
+ if err != nil {
+ // TODO
+ }
+
+ err = f.fs.Rename(f.incompletePath, path.Join(newPath, f.incompleteDataName()))
+ if err != nil {
+ // TODO
+ }
+
+ err = f.fs.Rename(f.rsrcPath, path.Join(newPath, f.rsrcForkName()))
+ if err != nil {
+ // TODO
+ }
+
+ err = f.fs.Rename(f.infoPath, path.Join(newPath, f.infoForkName()))
+ if err != nil {
+ // TODO
+ }
+
+ return nil
+}
+
+// delete a fileWrapper and its associated metadata files if they exist
+func (f *fileWrapper) delete() error {
+ err := f.fs.RemoveAll(f.dataPath)
+ if err != nil {
+ // TODO
+ }
+
+ err = f.fs.Remove(f.incompletePath)
+ if err != nil {
+ // TODO
+ }
+
+ err = f.fs.Remove(f.rsrcPath)
+ if err != nil {
+ // TODO
+ }
+
+ err = f.fs.Remove(f.infoPath)
+ if err != nil {
+ // TODO
+ }
+
+ return nil
+}
+
+func (f *fileWrapper) flattenedFileObject() (*flattenedFileObject, error) {
+ dataSize := make([]byte, 4)
+ mTime := make([]byte, 8)
+
+ ft := defaultFileType
+
+ fileInfo, err := f.fs.Stat(f.dataPath)
+ if err != nil && !errors.Is(err, fs.ErrNotExist) {
+ return nil, err
+ }
+ if errors.Is(err, fs.ErrNotExist) {
+ fileInfo, err = f.fs.Stat(f.incompletePath)
+ if err == nil {
+ mTime = toHotlineTime(fileInfo.ModTime())
+ binary.BigEndian.PutUint32(dataSize, uint32(fileInfo.Size()-f.dataOffset))
+ ft, _ = fileTypeFromInfo(fileInfo)
+ }
+ } else {
+ mTime = toHotlineTime(fileInfo.ModTime())
+ binary.BigEndian.PutUint32(dataSize, uint32(fileInfo.Size()-f.dataOffset))
+ ft, _ = fileTypeFromInfo(fileInfo)
+ }
+
+ f.ffo.FlatFileHeader = FlatFileHeader{
+ Format: [4]byte{0x46, 0x49, 0x4c, 0x50}, // "FILP"
+ Version: [2]byte{0, 1},
+ RSVD: [16]byte{},
+ ForkCount: [2]byte{0, 2},
+ }
+
+ _, err = f.fs.Stat(f.infoPath)
+ if err == nil {
+ b, err := f.fs.ReadFile(f.infoPath)
+ if err != nil {
+ return nil, err
+ }
+
+ f.ffo.FlatFileHeader.ForkCount[1] = 3
+
+ if err := f.ffo.FlatFileInformationFork.UnmarshalBinary(b); err != nil {
+ return nil, err
+ }
+ } else {
+ f.ffo.FlatFileInformationFork = FlatFileInformationFork{
+ Platform: []byte("AMAC"), // TODO: Remove hardcode to support "AWIN" Platform (maybe?)
+ TypeSignature: []byte(ft.TypeCode),
+ CreatorSignature: []byte(ft.CreatorCode),
+ Flags: []byte{0, 0, 0, 0},
+ PlatformFlags: []byte{0, 0, 1, 0}, // TODO: What is this?
+ RSVD: make([]byte, 32),
+ CreateDate: mTime, // some filesystems don't support createTime
+ ModifyDate: mTime,
+ NameScript: []byte{0, 0},
+ Name: []byte(f.name),
+ NameSize: []byte{0, 0},
+ CommentSize: []byte{0, 0},
+ Comment: []byte{},
+ }
+ binary.BigEndian.PutUint16(f.ffo.FlatFileInformationFork.NameSize, uint16(len(f.name)))
+ }
+
+ f.ffo.FlatFileInformationForkHeader = FlatFileForkHeader{
+ ForkType: [4]byte{0x49, 0x4E, 0x46, 0x4F}, // "INFO"
+ CompressionType: [4]byte{},
+ RSVD: [4]byte{},
+ DataSize: f.ffo.FlatFileInformationFork.Size(),
+ }
+
+ f.ffo.FlatFileDataForkHeader = FlatFileForkHeader{
+ ForkType: [4]byte{0x44, 0x41, 0x54, 0x41}, // "DATA"
+ CompressionType: [4]byte{},
+ RSVD: [4]byte{},
+ DataSize: [4]byte{dataSize[0], dataSize[1], dataSize[2], dataSize[3]},
+ }
+ f.ffo.FlatFileResForkHeader = f.rsrcForkHeader()
+
+ return f.ffo, nil
+}
"strings"
)
-const incompleteFileSuffix = ".incomplete"
-
func downcaseFileExtension(filename string) string {
splitStr := strings.Split(filename, ".")
ext := strings.ToLower(
return defaultFileType
}
-func fileTypeFromInfo(info os.FileInfo) (ft fileType, err error) {
+func fileTypeFromInfo(info fs.FileInfo) (ft fileType, err error) {
if info.IsDir() {
ft.CreatorCode = "n/a "
ft.TypeCode = "fldr"
}
func getFileNameList(filePath string) (fields []Field, err error) {
- files, err := ioutil.ReadDir(filePath)
+ files, err := os.ReadDir(filePath)
if err != nil {
return fields, nil
}
for _, file := range files {
var fnwi FileNameWithInfo
+ if strings.HasPrefix(file.Name(), ".") {
+ continue
+ }
+
fileCreator := make([]byte, 4)
- if file.Mode()&os.ModeSymlink != 0 {
+ fileInfo, err := file.Info()
+ if err != nil {
+ return fields, err
+ }
+
+ if fileInfo.Mode()&os.ModeSymlink != 0 {
resolvedPath, err := os.Readlink(filePath + "/" + file.Name())
if err != nil {
return fields, err
if err != nil {
return fields, err
}
- binary.BigEndian.PutUint32(fnwi.FileSize[:], uint32(len(dir)))
+
+ var c uint32
+ for _, f := range dir {
+ if !strings.HasPrefix(f.Name(), ".") {
+ c += 1
+ }
+ }
+
+ binary.BigEndian.PutUint32(fnwi.FileSize[:], c)
copy(fnwi.Type[:], []byte("fldr")[:])
copy(fnwi.Creator[:], fileCreator[:])
} else {
if err != nil {
return fields, err
}
- binary.BigEndian.PutUint32(fnwi.FileSize[:], uint32(len(dir)))
+
+ var c uint32
+ for _, f := range dir {
+ if !strings.HasPrefix(f.Name(), ".") {
+ c += 1
+ }
+ }
+
+ binary.BigEndian.PutUint32(fnwi.FileSize[:], c)
copy(fnwi.Type[:], []byte("fldr")[:])
copy(fnwi.Creator[:], fileCreator[:])
} else {
- // the Hotline protocol does not support file sizes > 4GiB due to the 4 byte field size, so skip them
- if file.Size() > 4294967296 {
+ // the Hotline protocol does not support fileWrapper sizes > 4GiB due to the 4 byte field size, so skip them
+ if fileInfo.Size() > 4294967296 {
continue
}
- binary.BigEndian.PutUint32(fnwi.FileSize[:], uint32(file.Size()))
- copy(fnwi.Type[:], []byte(fileTypeFromFilename(file.Name()).TypeCode)[:])
- copy(fnwi.Creator[:], []byte(fileTypeFromFilename(file.Name()).CreatorCode)[:])
+
+ hlFile, err := newFileWrapper(&OSFileStore{}, filePath+"/"+file.Name(), 0)
+ if err != nil {
+ return nil, err
+ }
+
+ copy(fnwi.FileSize[:], hlFile.totalSize()[:])
+ copy(fnwi.Type[:], hlFile.ffo.FlatFileInformationFork.TypeSignature[:])
+ copy(fnwi.Creator[:], hlFile.ffo.FlatFileInformationFork.CreatorSignature[:])
}
strippedName := strings.Replace(file.Name(), ".incomplete", "", -1)
func CalcItemCount(filePath string) ([]byte, error) {
var itemcount uint16
err := filepath.Walk(filePath, func(path string, info os.FileInfo, err error) error {
- itemcount += 1
-
if err != nil {
return err
}
+ if !strings.HasPrefix(info.Name(), ".") {
+ itemcount += 1
+ }
+
return nil
})
if err != nil {
return bytes
}
-
-// effectiveFile wraps os.Open to check for the presence of a partial file transfer as a fallback
-func effectiveFile(filePath string) (*os.File, error) {
- file, err := os.Open(filePath)
- if err != nil && !errors.Is(err, fs.ErrNotExist) {
- return nil, err
- }
-
- if errors.Is(err, fs.ErrNotExist) {
- file, err = os.OpenFile(filePath+incompleteFileSuffix, os.O_APPEND|os.O_WRONLY, 0644)
- if err != nil {
- return nil, err
- }
- }
- return file, nil
-}
import (
"encoding/binary"
- "os"
+ "io"
)
type flattenedFileObject struct {
FlatFileHeader FlatFileHeader
- FlatFileInformationForkHeader FlatFileInformationForkHeader
+ FlatFileInformationForkHeader FlatFileForkHeader
FlatFileInformationFork FlatFileInformationFork
- FlatFileDataForkHeader FlatFileDataForkHeader
- FileData []byte
+ FlatFileDataForkHeader FlatFileForkHeader
+ FlatFileResForkHeader FlatFileForkHeader
}
// FlatFileHeader is the first section of a "Flattened File Object". All fields have static values.
Format [4]byte // Always "FILP"
Version [2]byte // Always 1
RSVD [16]byte // Always empty zeros
- ForkCount [2]byte // Number of forks
-}
-
-// NewFlatFileHeader returns a FlatFileHeader struct
-func NewFlatFileHeader() FlatFileHeader {
- return FlatFileHeader{
- Format: [4]byte{0x46, 0x49, 0x4c, 0x50}, // FILP
- Version: [2]byte{0, 1},
- RSVD: [16]byte{},
- ForkCount: [2]byte{0, 2},
- }
-}
-
-// FlatFileInformationForkHeader is the second section of a "Flattened File Object"
-type FlatFileInformationForkHeader struct {
- ForkType [4]byte // Always "INFO"
- CompressionType [4]byte // Always 0; Compression was never implemented in the Hotline protocol
- RSVD [4]byte // Always zeros
- DataSize [4]byte // Size of the flat file information fork
+ ForkCount [2]byte // Number of forks, either 2 or 3 if there is a resource fork
}
type FlatFileInformationFork struct {
RSVD []byte
CreateDate []byte
ModifyDate []byte
- NameScript []byte // TODO: what is this?
+ NameScript []byte
NameSize []byte // Length of file name (Maximum 128 characters)
Name []byte // File name
- CommentSize []byte // Length of file comment
+ CommentSize []byte // Length of the comment
Comment []byte // File comment
}
}
func (ffif *FlatFileInformationFork) friendlyType() []byte {
-
if name, ok := friendlyCreatorNames[string(ffif.TypeSignature)]; ok {
return []byte(name)
}
+ return ffif.TypeSignature
+}
+
+func (ffif *FlatFileInformationFork) friendlyCreator() []byte {
+ if name, ok := friendlyCreatorNames[string(ffif.CreatorSignature)]; ok {
+ return []byte(name)
+ }
return ffif.CreatorSignature
}
+func (ffif *FlatFileInformationFork) setComment(comment []byte) error {
+ ffif.Comment = comment
+ binary.BigEndian.PutUint16(ffif.CommentSize, uint16(len(comment)))
+
+ // TODO: return err if comment is too long
+ return nil
+}
+
// DataSize calculates the size of the flat file information fork, which is
// 72 bytes for the fixed length fields plus the length of the Name + Comment
func (ffif *FlatFileInformationFork) DataSize() []byte {
size := make([]byte, 4)
- // TODO: Can I do math directly on two byte slices?
- dataSize := len(ffif.Name) + len(ffif.Comment) + 74
+ dataSize := len(ffif.Name) + len(ffif.Comment) + 74 // 74 = len of fixed size headers
binary.BigEndian.PutUint32(size, uint32(dataSize))
return size
}
-func (ffo *flattenedFileObject) TransferSize() []byte {
+func (ffif *FlatFileInformationFork) Size() [4]byte {
+ size := [4]byte{}
+
+ dataSize := len(ffif.Name) + len(ffif.Comment) + 74 // 74 = len of fixed size headers
+
+ binary.BigEndian.PutUint32(size[:], uint32(dataSize))
+
+ return size
+}
+
+func (ffo *flattenedFileObject) TransferSize(offset int64) []byte {
+ // get length of the flattenedFileObject, including the info fork
payloadSize := len(ffo.BinaryMarshal())
+
+ // length of data fork
dataSize := binary.BigEndian.Uint32(ffo.FlatFileDataForkHeader.DataSize[:])
- transferSize := make([]byte, 4)
- binary.BigEndian.PutUint32(transferSize, dataSize+uint32(payloadSize))
+ // length of resource fork
+ resForkSize := binary.BigEndian.Uint32(ffo.FlatFileResForkHeader.DataSize[:])
+
+ size := make([]byte, 4)
+ binary.BigEndian.PutUint32(size[:], dataSize+resForkSize+uint32(payloadSize)-uint32(offset))
- return transferSize
+ return size
}
func (ffif *FlatFileInformationFork) ReadNameSize() []byte {
return size
}
-type FlatFileDataForkHeader struct {
- ForkType [4]byte
+type FlatFileForkHeader struct {
+ ForkType [4]byte // Either INFO, DATA or MACR
CompressionType [4]byte
RSVD [4]byte
DataSize [4]byte
}
+func (ffif *FlatFileInformationFork) MarshalBinary() []byte {
+ var b []byte
+ b = append(b, ffif.Platform...)
+ b = append(b, ffif.TypeSignature...)
+ b = append(b, ffif.CreatorSignature...)
+ b = append(b, ffif.Flags...)
+ b = append(b, ffif.PlatformFlags...)
+ b = append(b, ffif.RSVD...)
+ b = append(b, ffif.CreateDate...)
+ b = append(b, ffif.ModifyDate...)
+ b = append(b, ffif.NameScript...)
+ b = append(b, ffif.ReadNameSize()...)
+ b = append(b, ffif.Name...)
+ b = append(b, ffif.CommentSize...)
+ b = append(b, ffif.Comment...)
+
+ return b
+}
+
func (ffif *FlatFileInformationFork) UnmarshalBinary(b []byte) error {
nameSize := b[70:72]
bs := binary.BigEndian.Uint16(nameSize)
return nil
}
-func (f *flattenedFileObject) BinaryMarshal() []byte {
+func (ffo *flattenedFileObject) BinaryMarshal() []byte {
var out []byte
- out = append(out, f.FlatFileHeader.Format[:]...)
- out = append(out, f.FlatFileHeader.Version[:]...)
- out = append(out, f.FlatFileHeader.RSVD[:]...)
- out = append(out, f.FlatFileHeader.ForkCount[:]...)
+ out = append(out, ffo.FlatFileHeader.Format[:]...)
+ out = append(out, ffo.FlatFileHeader.Version[:]...)
+ out = append(out, ffo.FlatFileHeader.RSVD[:]...)
+ out = append(out, ffo.FlatFileHeader.ForkCount[:]...)
out = append(out, []byte("INFO")...)
out = append(out, []byte{0, 0, 0, 0}...)
out = append(out, make([]byte, 4)...)
- out = append(out, f.FlatFileInformationFork.DataSize()...)
-
- out = append(out, f.FlatFileInformationFork.Platform...)
- out = append(out, f.FlatFileInformationFork.TypeSignature...)
- out = append(out, f.FlatFileInformationFork.CreatorSignature...)
- out = append(out, f.FlatFileInformationFork.Flags...)
- out = append(out, f.FlatFileInformationFork.PlatformFlags...)
- out = append(out, f.FlatFileInformationFork.RSVD...)
- out = append(out, f.FlatFileInformationFork.CreateDate...)
- out = append(out, f.FlatFileInformationFork.ModifyDate...)
- out = append(out, f.FlatFileInformationFork.NameScript...)
- out = append(out, f.FlatFileInformationFork.ReadNameSize()...)
- out = append(out, f.FlatFileInformationFork.Name...)
- out = append(out, f.FlatFileInformationFork.CommentSize...)
- out = append(out, f.FlatFileInformationFork.Comment...)
-
- out = append(out, f.FlatFileDataForkHeader.ForkType[:]...)
- out = append(out, f.FlatFileDataForkHeader.CompressionType[:]...)
- out = append(out, f.FlatFileDataForkHeader.RSVD[:]...)
- out = append(out, f.FlatFileDataForkHeader.DataSize[:]...)
+ out = append(out, ffo.FlatFileInformationFork.DataSize()...)
+
+ out = append(out, ffo.FlatFileInformationFork.Platform...)
+ out = append(out, ffo.FlatFileInformationFork.TypeSignature...)
+ out = append(out, ffo.FlatFileInformationFork.CreatorSignature...)
+ out = append(out, ffo.FlatFileInformationFork.Flags...)
+ out = append(out, ffo.FlatFileInformationFork.PlatformFlags...)
+ out = append(out, ffo.FlatFileInformationFork.RSVD...)
+ out = append(out, ffo.FlatFileInformationFork.CreateDate...)
+ out = append(out, ffo.FlatFileInformationFork.ModifyDate...)
+ out = append(out, ffo.FlatFileInformationFork.NameScript...)
+ out = append(out, ffo.FlatFileInformationFork.ReadNameSize()...)
+ out = append(out, ffo.FlatFileInformationFork.Name...)
+ out = append(out, ffo.FlatFileInformationFork.CommentSize...)
+ out = append(out, ffo.FlatFileInformationFork.Comment...)
+
+ out = append(out, ffo.FlatFileDataForkHeader.ForkType[:]...)
+ out = append(out, ffo.FlatFileDataForkHeader.CompressionType[:]...)
+ out = append(out, ffo.FlatFileDataForkHeader.RSVD[:]...)
+ out = append(out, ffo.FlatFileDataForkHeader.DataSize[:]...)
return out
}
-func NewFlattenedFileObject(fileRoot string, filePath, fileName []byte, dataOffset int64) (*flattenedFileObject, error) {
- fullFilePath, err := readPath(fileRoot, filePath, fileName)
- if err != nil {
- return nil, err
+func (ffo *flattenedFileObject) ReadFrom(r io.Reader) (int, error) {
+ var n int
+
+ if err := binary.Read(r, binary.BigEndian, &ffo.FlatFileHeader); err != nil {
+ return n, err
}
- file, err := effectiveFile(fullFilePath)
- if err != nil {
- return nil, err
+
+ if err := binary.Read(r, binary.BigEndian, &ffo.FlatFileInformationForkHeader); err != nil {
+ return n, err
}
- defer func(file *os.File) { _ = file.Close() }(file)
+ dataLen := binary.BigEndian.Uint32(ffo.FlatFileInformationForkHeader.DataSize[:])
+ ffifBuf := make([]byte, dataLen)
+ if _, err := io.ReadFull(r, ffifBuf); err != nil {
+ return n, err
+ }
- fileInfo, err := file.Stat()
- if err != nil {
- return nil, err
+ if err := ffo.FlatFileInformationFork.UnmarshalBinary(ffifBuf); err != nil {
+ return n, err
}
- dataSize := make([]byte, 4)
- binary.BigEndian.PutUint32(dataSize, uint32(fileInfo.Size()-dataOffset))
+ if err := binary.Read(r, binary.BigEndian, &ffo.FlatFileDataForkHeader); err != nil {
+ return n, err
+ }
- mTime := toHotlineTime(fileInfo.ModTime())
+ return n, nil
+}
- ft, _ := fileTypeFromInfo(fileInfo)
+func (ffo *flattenedFileObject) dataSize() int64 {
+ return int64(binary.BigEndian.Uint32(ffo.FlatFileDataForkHeader.DataSize[:]))
+}
- return &flattenedFileObject{
- FlatFileHeader: NewFlatFileHeader(),
- FlatFileInformationFork: NewFlatFileInformationFork(string(fileName), mTime, ft.TypeCode, ft.CreatorCode),
- FlatFileDataForkHeader: FlatFileDataForkHeader{
- ForkType: [4]byte{0x44, 0x41, 0x54, 0x41}, // "DATA"
- CompressionType: [4]byte{},
- RSVD: [4]byte{},
- DataSize: [4]byte{dataSize[0], dataSize[1], dataSize[2], dataSize[3]},
- },
- }, nil
+func (ffo *flattenedFileObject) rsrcSize() int64 {
+ return int64(binary.BigEndian.Uint32(ffo.FlatFileResForkHeader.DataSize[:]))
}
import (
"fmt"
"github.com/stretchr/testify/assert"
- "os"
"testing"
)
-func TestNewFlattenedFileObject(t *testing.T) {
- type args struct {
- fileRoot string
- filePath []byte
- fileName []byte
- }
- tests := []struct {
- name string
- args args
- want *flattenedFileObject
- wantErr assert.ErrorAssertionFunc
- }{
- {
- name: "with valid file",
- args: args{
- fileRoot: func() string { path, _ := os.Getwd(); return path + "/test/config/Files" }(),
- fileName: []byte("testfile.txt"),
- filePath: []byte{0, 0},
- },
- want: &flattenedFileObject{
- FlatFileHeader: NewFlatFileHeader(),
- FlatFileInformationForkHeader: FlatFileInformationForkHeader{},
- FlatFileInformationFork: NewFlatFileInformationFork("testfile.txt", make([]byte, 8), "", ""),
- FlatFileDataForkHeader: FlatFileDataForkHeader{
- ForkType: [4]byte{0x4d, 0x41, 0x43, 0x52}, // DATA
- CompressionType: [4]byte{0, 0, 0, 0},
- RSVD: [4]byte{0, 0, 0, 0},
- DataSize: [4]byte{0x00, 0x00, 0x00, 0x17},
- },
- FileData: nil,
- },
- wantErr: assert.NoError,
- },
- {
- name: "when file path is invalid",
- args: args{
- fileRoot: func() string { path, _ := os.Getwd(); return path + "/test/config/Files" }(),
- fileName: []byte("nope.txt"),
- },
- want: nil,
- wantErr: assert.Error,
- },
- }
- for _, tt := range tests {
- t.Run(tt.name, func(t *testing.T) {
- got, err := NewFlattenedFileObject(tt.args.fileRoot, tt.args.filePath, tt.args.fileName, 0)
- if tt.wantErr(t, err, fmt.Sprintf("NewFlattenedFileObject(%v, %v, %v)", tt.args.fileRoot, tt.args.filePath, tt.args.fileName)) {
- return
- }
-
- // Clear the file timestamp fields to work around problems running the tests in multiple timezones
- // TODO: revisit how to test this by mocking the stat calls
- got.FlatFileInformationFork.CreateDate = make([]byte, 8)
- got.FlatFileInformationFork.ModifyDate = make([]byte, 8)
- assert.Equalf(t, tt.want, got, "NewFlattenedFileObject(%v, %v, %v)", tt.args.fileRoot, tt.args.filePath, tt.args.fileName)
- })
- }
-}
-
func TestFlatFileInformationFork_UnmarshalBinary(t *testing.T) {
type args struct {
b []byte
SubVersion [2]byte
}
+var trtp = [4]byte{0x54, 0x52, 0x54, 0x50}
+
// Handshake
// After establishing TCP connection, both client and server start the handshake process
// in order to confirm that each of them comply with requirements of the other.
// Description Size Data Note
// Protocol ID 4 TRTP
// Error code 4 Error code returned by the server (0 = no error)
-func Handshake(conn io.ReadWriter) error {
+func Handshake(rw io.ReadWriter) error {
handshakeBuf := make([]byte, 12)
- if _, err := io.ReadFull(conn, handshakeBuf); err != nil {
+ if _, err := io.ReadFull(rw, handshakeBuf); err != nil {
return err
}
return err
}
- if h.Protocol != [4]byte{0x54, 0x52, 0x54, 0x50} {
+ if h.Protocol != trtp {
return errors.New("invalid handshake")
}
- _, err := conn.Write([]byte{84, 82, 84, 80, 0, 0, 0, 0})
+ _, err := rw.Write([]byte{84, 82, 84, 80, 0, 0, 0, 0})
return err
}
package hotline
import (
+ "bufio"
"bytes"
"context"
"encoding/binary"
"fmt"
"github.com/go-playground/validator/v10"
"go.uber.org/zap"
+ "gopkg.in/yaml.v3"
"io"
"io/fs"
"io/ioutil"
"path"
"path/filepath"
"runtime/debug"
- "sort"
"strings"
"sync"
"time"
-
- "gopkg.in/yaml.v3"
)
+type contextKey string
+
+var contextKeyReq = contextKey("req")
+
+type requestCtx struct {
+ remoteAddr string
+ login string
+ name string
+}
+
const (
userIdleSeconds = 300 // time in seconds before an inactive user is marked idle
idleCheckInterval = 10 // time in seconds to check for idle users
Accounts map[string]*Account
Agreement []byte
Clients map[uint16]*ClientConn
- FlatNews []byte
ThreadedNews *ThreadedNews
FileTransfers map[uint32]*FileTransfer
Config *Config
TrackerPassID [4]byte
Stats *Stats
- FS FileStore
-
- // newsReader io.Reader
- // newsWriter io.WriteCloser
+ FS FileStore // Storage backend to use for File storage
outbox chan Transaction
+ mux sync.Mutex
- mux sync.Mutex
flatNewsMux sync.Mutex
+ FlatNews []byte
}
type PrivateChat struct {
s.Logger.Fatal(err)
}
- s.Logger.Fatal(s.Serve(ctx, cancelRoot, ln))
+ s.Logger.Fatal(s.Serve(ctx, ln))
}()
wg.Add(1)
}
- s.Logger.Fatal(s.ServeFileTransfers(ln))
+ s.Logger.Fatal(s.ServeFileTransfers(ctx, ln))
}()
wg.Wait()
return nil
}
-func (s *Server) ServeFileTransfers(ln net.Listener) error {
+func (s *Server) ServeFileTransfers(ctx context.Context, ln net.Listener) error {
for {
conn, err := ln.Accept()
if err != nil {
}
go func() {
- if err := s.handleFileTransfer(conn); err != nil {
+ defer func() { _ = conn.Close() }()
+
+ err = s.handleFileTransfer(
+ context.WithValue(ctx, contextKeyReq, requestCtx{
+ remoteAddr: conn.RemoteAddr().String(),
+ }),
+ conn,
+ )
+
+ if err != nil {
s.Logger.Errorw("file transfer error", "reason", err)
}
}()
return nil
}
-func (s *Server) Serve(ctx context.Context, cancelRoot context.CancelFunc, ln net.Listener) error {
-
+func (s *Server) Serve(ctx context.Context, ln net.Listener) error {
for {
conn, err := ln.Accept()
if err != nil {
}
}()
go func() {
- if err := s.handleNewConnection(conn, conn.RemoteAddr().String()); err != nil {
+ if err := s.handleNewConnection(ctx, conn, conn.RemoteAddr().String()); err != nil {
+ s.Logger.Infow("New client connection established", "RemoteAddr", conn.RemoteAddr())
if err == io.EOF {
s.Logger.Infow("Client disconnected", "RemoteAddr", conn.RemoteAddr())
} else {
)
// NewServer constructs a new Server from a config dir
-func NewServer(configDir, netInterface string, netPort int, logger *zap.SugaredLogger, FS FileStore) (*Server, error) {
+func NewServer(configDir string, netPort int, logger *zap.SugaredLogger, FS FileStore) (*Server, error) {
server := Server{
Port: netPort,
Accounts: make(map[string]*Account),
}
// handleNewConnection takes a new net.Conn and performs the initial login sequence
-func (s *Server) handleNewConnection(conn net.Conn, remoteAddr string) error {
+func (s *Server) handleNewConnection(ctx context.Context, conn net.Conn, remoteAddr string) error {
defer dontPanic(s.Logger)
if err := Handshake(conn); err != nil {
c.Server.Logger.Errorw("Error handling transaction", "err", err)
}
- // iterate over all of the transactions that were parsed from the byte slice and handle them
+ // iterate over all the transactions that were parsed from the byte slice and handle them
for _, t := range transactions {
if err := c.handleTransaction(&t); err != nil {
c.Server.Logger.Errorw("Error handling transaction", "err", err)
}
// NewTransactionRef generates a random ID for the file transfer. The Hotline client includes this ID
-// in the file transfer request payload, and the file transfer server will use it to map the request
+// in the transfer request payload, and the file transfer server will use it to map the request
// to a transfer
func (s *Server) NewTransactionRef() []byte {
transactionRef := make([]byte, 4)
const dlFldrActionNextFile = 3
// handleFileTransfer receives a client net.Conn from the file transfer server, performs the requested transfer type, then closes the connection
-func (s *Server) handleFileTransfer(conn io.ReadWriteCloser) error {
- defer func() {
-
- if err := conn.Close(); err != nil {
- s.Logger.Errorw("error closing connection", "error", err)
- }
- }()
-
+func (s *Server) handleFileTransfer(ctx context.Context, rwc io.ReadWriter) error {
defer dontPanic(s.Logger)
txBuf := make([]byte, 16)
- if _, err := io.ReadFull(conn, txBuf); err != nil {
+ if _, err := io.ReadFull(rwc, txBuf); err != nil {
return err
}
return errors.New("invalid transaction ID")
}
+ rLogger := s.Logger.With(
+ "remoteAddr", ctx.Value(contextKeyReq).(requestCtx).remoteAddr,
+ "xferID", transferRefNum,
+ )
+
switch fileTransfer.Type {
case FileDownload:
s.Stats.DownloadCounter += 1
dataOffset = int64(binary.BigEndian.Uint32(fileTransfer.fileResumeData.ForkInfoList[0].DataSize[:]))
}
- ffo, err := NewFlattenedFileObject(s.Config.FileRoot, fileTransfer.FilePath, fileTransfer.FileName, dataOffset)
+ fw, err := newFileWrapper(s.FS, fullFilePath, 0)
if err != nil {
return err
}
- s.Logger.Infow("File download started", "filePath", fullFilePath, "transactionRef", fileTransfer.ReferenceNumber)
+ rLogger.Infow("File download started", "filePath", fullFilePath, "transactionRef", fileTransfer.ReferenceNumber)
+ wr := bufio.NewWriterSize(rwc, 1460)
+
+ // if file transfer options are included, that means this is a "quick preview" request from a 1.5+ client
if fileTransfer.options == nil {
// Start by sending flat file object to client
- if _, err := conn.Write(ffo.BinaryMarshal()); err != nil {
+ if _, err := wr.Write(fw.ffo.BinaryMarshal()); err != nil {
return err
}
}
- file, err := s.FS.Open(fullFilePath)
+ file, err := fw.dataForkReader()
if err != nil {
return err
}
- sendBuffer := make([]byte, 1048576)
- var totalSent int64
- for {
- var bytesRead int
- if bytesRead, err = file.ReadAt(sendBuffer, dataOffset+totalSent); err == io.EOF {
- if _, err := conn.Write(sendBuffer[:bytesRead]); err != nil {
- return err
- }
- break
- }
+ if err := sendFile(wr, file, int(dataOffset)); err != nil {
+ return err
+ }
+
+ if err := wr.Flush(); err != nil {
+ return err
+ }
+
+ // if the client requested to resume transfer, do not send the resource fork, or it will be appended into the fileWrapper data
+ if fileTransfer.fileResumeData == nil {
+ err = binary.Write(wr, binary.BigEndian, fw.rsrcForkHeader())
if err != nil {
return err
}
- totalSent += int64(bytesRead)
-
- fileTransfer.BytesSent += bytesRead
-
- if _, err := conn.Write(sendBuffer[:bytesRead]); err != nil {
+ if err := wr.Flush(); err != nil {
return err
}
}
+
+ rFile, err := fw.rsrcForkFile()
+ if err != nil {
+ return nil
+ }
+
+ err = sendFile(wr, rFile, int(dataOffset))
+
+ if err := wr.Flush(); err != nil {
+ return err
+ }
+
case FileUpload:
s.Stats.UploadCounter += 1
- destinationFile := s.Config.FileRoot + ReadFilePath(fileTransfer.FilePath) + "/" + string(fileTransfer.FileName)
+ destinationFile, err := readPath(s.Config.FileRoot, fileTransfer.FilePath, fileTransfer.FileName)
+ if err != nil {
+ return err
+ }
var file *os.File
// 1) Upload a new file
// 2) Resume a partially transferred file
// 3) Replace a fully uploaded file
- // Unfortunately we have to infer which case applies by inspecting what is already on the file system
+ // We have to infer which case applies by inspecting what is already on the filesystem
// 1) Check for existing file:
- _, err := os.Stat(destinationFile)
+ _, err = os.Stat(destinationFile)
if err == nil {
// If found, that means this upload is intended to replace the file
if err = os.Remove(destinationFile); err != nil {
file, err = os.Create(destinationFile + incompleteFileSuffix)
}
if errors.Is(err, fs.ErrNotExist) {
- // If not found, open or create a new incomplete file
+ // If not found, open or create a new .incomplete file
file, err = os.OpenFile(destinationFile+incompleteFileSuffix, os.O_CREATE|os.O_APPEND|os.O_WRONLY, 0644)
if err != nil {
return err
}
}
+ f, err := newFileWrapper(s.FS, destinationFile, 0)
+ if err != nil {
+ return err
+ }
+
defer func() { _ = file.Close() }()
s.Logger.Infow("File upload started", "transactionRef", fileTransfer.ReferenceNumber, "dstFile", destinationFile)
- // TODO: replace io.Discard with a real file when ready to implement storing of resource fork data
- if err := receiveFile(conn, file, io.Discard); err != nil {
+ rForkWriter := io.Discard
+ iForkWriter := io.Discard
+ if s.Config.PreserveResourceForks {
+ rForkWriter, err = f.rsrcForkWriter()
+ if err != nil {
+ return err
+ }
+
+ iForkWriter, err = f.infoForkWriter()
+ if err != nil {
+ return err
+ }
+ }
+
+ if err := receiveFile(rwc, file, rForkWriter, iForkWriter); err != nil {
return err
}
- if err := os.Rename(destinationFile+".incomplete", destinationFile); err != nil {
+ if err := s.FS.Rename(destinationFile+".incomplete", destinationFile); err != nil {
return err
}
// Folder Download flow:
// 1. Get filePath from the transfer
// 2. Iterate over files
- // 3. For each file:
- // Send file header to client
+ // 3. For each fileWrapper:
+ // Send fileWrapper header to client
// The client can reply in 3 ways:
//
- // 1. If type is an odd number (unknown type?), or file download for the current file is completed:
- // client sends []byte{0x00, 0x03} to tell the server to continue to the next file
+ // 1. If type is an odd number (unknown type?), or fileWrapper download for the current fileWrapper is completed:
+ // client sends []byte{0x00, 0x03} to tell the server to continue to the next fileWrapper
//
- // 2. If download of a file is to be resumed:
+ // 2. If download of a fileWrapper is to be resumed:
// client sends:
// []byte{0x00, 0x02} // download folder action
// [2]byte // Resume data size
- // []byte file resume data (see myField_FileResumeData)
+ // []byte fileWrapper resume data (see myField_FileResumeData)
//
- // 3. Otherwise, download of the file is requested and client sends []byte{0x00, 0x01}
+ // 3. Otherwise, download of the fileWrapper is requested and client sends []byte{0x00, 0x01}
//
// When download is requested (case 2 or 3), server replies with:
- // [4]byte - file size
+ // [4]byte - fileWrapper size
// []byte - Flattened File Object
//
- // After every file download, client could request next file with:
+ // After every fileWrapper download, client could request next fileWrapper with:
// []byte{0x00, 0x03}
//
// This notifies the server to send the next item header
s.Logger.Infow("Start folder download", "path", fullFilePath, "ReferenceNumber", fileTransfer.ReferenceNumber)
nextAction := make([]byte, 2)
- if _, err := io.ReadFull(conn, nextAction); err != nil {
+ if _, err := io.ReadFull(rwc, nextAction); err != nil {
return err
}
i := 0
err = filepath.Walk(fullFilePath+"/", func(path string, info os.FileInfo, err error) error {
s.Stats.DownloadCounter += 1
+ i += 1
if err != nil {
return err
}
- i += 1
+
+ // skip dot files
+ if strings.HasPrefix(info.Name(), ".") {
+ return nil
+ }
+
+ hlFile, err := newFileWrapper(s.FS, path, 0)
+ if err != nil {
+ return err
+ }
+
subPath := path[basePathLen+1:]
s.Logger.Infow("Sending fileheader", "i", i, "path", path, "fullFilePath", fullFilePath, "subPath", subPath, "IsDir", info.IsDir())
fileHeader := NewFileHeader(subPath, info.IsDir())
- // Send the file header to client
- if _, err := conn.Write(fileHeader.Payload()); err != nil {
+ // Send the fileWrapper header to client
+ if _, err := rwc.Write(fileHeader.Payload()); err != nil {
s.Logger.Errorf("error sending file header: %v", err)
return err
}
// Read the client's Next Action request
- if _, err := io.ReadFull(conn, nextAction); err != nil {
+ if _, err := io.ReadFull(rwc, nextAction); err != nil {
return err
}
switch nextAction[1] {
case dlFldrActionResumeFile:
- // client asked to resume this file
- var frd FileResumeData
// get size of resumeData
- if _, err := io.ReadFull(conn, nextAction); err != nil {
+ resumeDataByteLen := make([]byte, 2)
+ if _, err := io.ReadFull(rwc, resumeDataByteLen); err != nil {
return err
}
- resumeDataLen := binary.BigEndian.Uint16(nextAction)
+ resumeDataLen := binary.BigEndian.Uint16(resumeDataByteLen)
resumeDataBytes := make([]byte, resumeDataLen)
- if _, err := io.ReadFull(conn, resumeDataBytes); err != nil {
+ if _, err := io.ReadFull(rwc, resumeDataBytes); err != nil {
return err
}
+ var frd FileResumeData
if err := frd.UnmarshalBinary(resumeDataBytes); err != nil {
return err
}
return nil
}
- splitPath := strings.Split(path, "/")
-
- ffo, err := NewFlattenedFileObject(strings.Join(splitPath[:len(splitPath)-1], "/"), nil, []byte(info.Name()), dataOffset)
- if err != nil {
- return err
- }
s.Logger.Infow("File download started",
"fileName", info.Name(),
"transactionRef", fileTransfer.ReferenceNumber,
- "TransferSize", fmt.Sprintf("%x", ffo.TransferSize()),
+ "TransferSize", fmt.Sprintf("%x", hlFile.ffo.TransferSize(dataOffset)),
)
// Send file size to client
- if _, err := conn.Write(ffo.TransferSize()); err != nil {
+ if _, err := rwc.Write(hlFile.ffo.TransferSize(dataOffset)); err != nil {
s.Logger.Error(err)
return err
}
// Send ffo bytes to client
- if _, err := conn.Write(ffo.BinaryMarshal()); err != nil {
+ if _, err := rwc.Write(hlFile.ffo.BinaryMarshal()); err != nil {
s.Logger.Error(err)
return err
}
return err
}
- // // Copy N bytes from file to connection
- // _, err = io.CopyN(conn, file, int64(binary.BigEndian.Uint32(ffo.FlatFileDataForkHeader.DataSize[:])))
- // if err != nil {
- // return err
- // }
- // file.Close()
- sendBuffer := make([]byte, 1048576)
- var totalSent int64
- for {
- var bytesRead int
- if bytesRead, err = file.ReadAt(sendBuffer, dataOffset+totalSent); err == io.EOF {
- if _, err := conn.Write(sendBuffer[:bytesRead]); err != nil {
- return err
- }
- break
- }
+ // wr := bufio.NewWriterSize(rwc, 1460)
+ err = sendFile(rwc, file, int(dataOffset))
+ if err != nil {
+ return err
+ }
+
+ if nextAction[1] != 2 && hlFile.ffo.FlatFileHeader.ForkCount[1] == 3 {
+ err = binary.Write(rwc, binary.BigEndian, hlFile.rsrcForkHeader())
if err != nil {
- panic(err)
+ return err
}
- totalSent += int64(bytesRead)
- fileTransfer.BytesSent += bytesRead
+ rFile, err := hlFile.rsrcForkFile()
+ if err != nil {
+ return err
+ }
- if _, err := conn.Write(sendBuffer[:bytesRead]); err != nil {
+ err = sendFile(rwc, rFile, int(dataOffset))
+ if err != nil {
return err
}
}
- // TODO: optionally send resource fork header and resource fork data
-
// Read the client's Next Action request. This is always 3, I think?
- if _, err := io.ReadFull(conn, nextAction); err != nil {
+ if _, err := io.ReadFull(rwc, nextAction); err != nil {
return err
}
if err != nil {
return err
}
+
s.Logger.Infow(
"Folder upload started",
"transactionRef", fileTransfer.ReferenceNumber,
}
// Begin the folder upload flow by sending the "next file action" to client
- if _, err := conn.Write([]byte{0, dlFldrActionNextFile}); err != nil {
+ if _, err := rwc.Write([]byte{0, dlFldrActionNextFile}); err != nil {
return err
}
s.Stats.UploadCounter += 1
var fu folderUpload
- if _, err := io.ReadFull(conn, fu.DataSize[:]); err != nil {
+ if _, err := io.ReadFull(rwc, fu.DataSize[:]); err != nil {
return err
}
-
- if _, err := io.ReadFull(conn, fu.IsFolder[:]); err != nil {
+ if _, err := io.ReadFull(rwc, fu.IsFolder[:]); err != nil {
return err
}
- if _, err := io.ReadFull(conn, fu.PathItemCount[:]); err != nil {
+ if _, err := io.ReadFull(rwc, fu.PathItemCount[:]); err != nil {
return err
}
- fu.FileNamePath = make([]byte, binary.BigEndian.Uint16(fu.DataSize[:])-4)
- if _, err := io.ReadFull(conn, fu.FileNamePath); err != nil {
+ fu.FileNamePath = make([]byte, binary.BigEndian.Uint16(fu.DataSize[:])-4) // -4 to subtract the path separator bytes
+
+ if _, err := io.ReadFull(rwc, fu.FileNamePath); err != nil {
return err
}
}
// Tell client to send next file
- if _, err := conn.Write([]byte{0, dlFldrActionNextFile}); err != nil {
+ if _, err := rwc.Write([]byte{0, dlFldrActionNextFile}); err != nil {
return err
}
} else {
nextAction := dlFldrActionSendFile
// Check if we have the full file already. If so, send dlFldrAction_NextFile to client to skip.
- _, err := os.Stat(dstPath + "/" + fu.FormattedPath())
+ _, err = os.Stat(dstPath + "/" + fu.FormattedPath())
if err != nil && !errors.Is(err, fs.ErrNotExist) {
return err
}
}
// Check if we have a partial file already. If so, send dlFldrAction_ResumeFile to client to resume upload.
- inccompleteFile, err := os.Stat(dstPath + "/" + fu.FormattedPath() + incompleteFileSuffix)
+ incompleteFile, err := os.Stat(dstPath + "/" + fu.FormattedPath() + incompleteFileSuffix)
if err != nil && !errors.Is(err, fs.ErrNotExist) {
return err
}
nextAction = dlFldrActionResumeFile
}
- if _, err := conn.Write([]byte{0, uint8(nextAction)}); err != nil {
+ if _, err := rwc.Write([]byte{0, uint8(nextAction)}); err != nil {
return err
}
continue
case dlFldrActionResumeFile:
offset := make([]byte, 4)
- binary.BigEndian.PutUint32(offset, uint32(inccompleteFile.Size()))
+ binary.BigEndian.PutUint32(offset, uint32(incompleteFile.Size()))
file, err := os.OpenFile(dstPath+"/"+fu.FormattedPath()+incompleteFileSuffix, os.O_APPEND|os.O_CREATE|os.O_WRONLY, 0644)
if err != nil {
return err
}
- fileResumeData := NewFileResumeData([]ForkInfoList{
- *NewForkInfoList(offset),
- })
+ fileResumeData := NewFileResumeData([]ForkInfoList{*NewForkInfoList(offset)})
b, _ := fileResumeData.BinaryMarshal()
bs := make([]byte, 2)
binary.BigEndian.PutUint16(bs, uint16(len(b)))
- if _, err := conn.Write(append(bs, b...)); err != nil {
+ if _, err := rwc.Write(append(bs, b...)); err != nil {
return err
}
- if _, err := io.ReadFull(conn, fileSize); err != nil {
+ if _, err := io.ReadFull(rwc, fileSize); err != nil {
return err
}
- if err := receiveFile(conn, file, ioutil.Discard); err != nil {
+ if err := receiveFile(rwc, file, ioutil.Discard, ioutil.Discard); err != nil {
s.Logger.Error(err)
}
}
case dlFldrActionSendFile:
- if _, err := io.ReadFull(conn, fileSize); err != nil {
+ if _, err := io.ReadFull(rwc, fileSize); err != nil {
return err
}
filePath := dstPath + "/" + fu.FormattedPath()
- s.Logger.Infow("Starting file transfer", "path", filePath, "fileNum", i+1, "totalFiles", "zz", "fileSize", binary.BigEndian.Uint32(fileSize))
- newFile, err := s.FS.Create(filePath + ".incomplete")
+ hlFile, err := newFileWrapper(s.FS, filePath, 0)
if err != nil {
return err
}
- if err := receiveFile(conn, newFile, ioutil.Discard); err != nil {
- s.Logger.Error(err)
+ s.Logger.Infow("Starting file transfer", "path", filePath, "fileNum", i+1, "fileSize", binary.BigEndian.Uint32(fileSize))
+
+ incWriter, err := hlFile.incFileWriter()
+ if err != nil {
+ return err
+ }
+
+ rForkWriter := io.Discard
+ iForkWriter := io.Discard
+ if s.Config.PreserveResourceForks {
+ iForkWriter, err = hlFile.infoForkWriter()
+ if err != nil {
+ return err
+ }
+
+ rForkWriter, err = hlFile.rsrcForkWriter()
+ if err != nil {
+ return err
+ }
}
- _ = newFile.Close()
+ if err := receiveFile(rwc, incWriter, rForkWriter, iForkWriter); err != nil {
+ return err
+ }
+ // _ = newFile.Close()
if err := os.Rename(filePath+".incomplete", filePath); err != nil {
return err
}
}
- // Tell client to send next file
- if _, err := conn.Write([]byte{0, dlFldrActionNextFile}); err != nil {
+ // Tell client to send next fileWrapper
+ if _, err := rwc.Write([]byte{0, dlFldrActionNextFile}); err != nil {
return err
}
}
return nil
}
-
-// sortedClients is a utility function that takes a map of *ClientConn and returns a sorted slice of the values.
-// The purpose of this is to ensure that the ordering of client connections is deterministic so that test assertions work.
-func sortedClients(unsortedClients map[uint16]*ClientConn) (clients []*ClientConn) {
- for _, c := range unsortedClients {
- clients = append(clients, c)
- }
- sort.Sort(byClientID(clients))
- return clients
-}
package hotline
import (
+ "bytes"
+ "encoding/hex"
"github.com/stretchr/testify/assert"
"go.uber.org/zap"
"go.uber.org/zap/zapcore"
return l.Sugar()
}
+// assertTransferBytesEqual takes a string with a hexdump in the same format that `hexdump -C` produces and compares with
+// a hexdump for the bytes in got, after stripping the create/modify timestamps.
+// I don't love this, but as git does not preserve file create/modify timestamps, we either need to fully mock the
+// filesystem interactions or work around in this way.
+// TODO: figure out a better solution
+func assertTransferBytesEqual(t *testing.T, wantHexDump string, got []byte) bool {
+ if wantHexDump == "" {
+ return true
+ }
+
+ var clean []byte
+ clean = append(clean, got[:92]...) // keep the first 92 bytes
+ clean = append(clean, make([]byte, 16)...) // replace the next 16 bytes for create/modify timestamps
+ clean = append(clean, got[108:]...) // keep the rest
+
+ return assert.Equal(t, wantHexDump, hex.Dump(clean))
+}
+
// tranAssertEqual compares equality of transactions slices after stripping out the random ID
func tranAssertEqual(t *testing.T, tran1, tran2 []Transaction) bool {
var newT1 []Transaction
var newT2 []Transaction
+
for _, trans := range tran1 {
trans.ID = []byte{0, 0, 0, 0}
+ var fs []Field
+ for _, field := range trans.Fields {
+ if bytes.Equal(field.ID, []byte{0x00, 0x6b}) {
+ continue
+ }
+ fs = append(fs, field)
+ }
+ trans.Fields = fs
newT1 = append(newT1, trans)
}
for _, trans := range tran2 {
trans.ID = []byte{0, 0, 0, 0}
+ var fs []Field
+ for _, field := range trans.Fields {
+ if bytes.Equal(field.ID, []byte{0x00, 0x6b}) {
+ continue
+ }
+ fs = append(fs, field)
+ }
+ trans.Fields = fs
newT2 = append(newT2, trans)
-
}
return assert.Equal(t, newT1, newT2)
package hotline
-//
-// import (
-// "bytes"
-// "fmt"
-// "github.com/google/go-cmp/cmp"
-// "io/ioutil"
-// "math/big"
-// "net"
-// "strings"
-// "sync"
-// "testing"
-// )
-//
-// type transactionTest struct {
-// description string // Human understandable description
-// account Account // Account struct for a user that will test transaction will execute under
-// request Transaction // transaction that will be sent by the client to the server
-// want Transaction // transaction that the client expects to receive in response
-// setup func() // Optional setup required for the test scenario
-// teardown func() // Optional teardown for test scenario
-// }
-//
-// func (tt *transactionTest) Setup(srv *Server) error {
-// if err := srv.NewUser(tt.account.Login, tt.account.Name, NegatedUserString([]byte(tt.account.Password)), tt.account.Access); err != nil {
-// return err
-// }
-//
-// if tt.setup != nil {
-// tt.setup()
-// }
-//
-// return nil
-// }
-//
-// func (tt *transactionTest) Teardown(srv *Server) error {
-// if err := srv.DeleteUser(tt.account.Login); err != nil {
-// return err
-// }
-//
-// if tt.teardown != nil {
-// tt.teardown()
-// }
-//
-// return nil
-// }
-//
-// // StartTestServer
-// func StartTestServer() (srv *Server, lnPort int) {
-// hotlineServer, _ := NewServer("test/config/")
-// ln, err := net.Listen("tcp", ":0")
-//
-// if err != nil {
-// panic(err)
-// }
-// go func() {
-// for {
-// conn, _ := ln.Accept()
-// go hotlineServer.HandleConnection(conn)
-// }
-// }()
-// return hotlineServer, ln.Addr().(*net.TCPAddr).Port
-// }
-//
-// func StartTestClient(serverPort int, login, passwd string) (*Client, error) {
-// c := NewClient("")
-//
-// err := c.JoinServer(fmt.Sprintf(":%v", serverPort), login, passwd)
-// if err != nil {
-// return nil, err
-// }
-//
-// return c, nil
-// }
-//
-// func StartTestServerWithClients(clientCount int) ([]*Client, int) {
-// _, serverPort := StartTestServer()
-//
-// var clients []*Client
-// for i := 0; i < clientCount; i++ {
-// client, err := StartTestClient(serverPort, "admin", "")
-// if err != nil {
-// panic(err)
-// }
-// clients = append(clients, client)
-// }
-// clients[0].ReadN(2)
-//
-// return clients, serverPort
-// }
-//
+import (
+ "bytes"
+ "context"
+ "fmt"
+ "github.com/stretchr/testify/assert"
+ "go.uber.org/zap"
+ "io"
+ "os"
+ "sync"
+ "testing"
+)
-// //func TestHandleTranAgreed(t *testing.T) {
-// // clients, _ := StartTestServerWithClients(2)
-// //
-// // chatMsg := "Test Chat"
-// //
-// // // Assert that both clients should receive the user join notification
-// // var wg sync.WaitGroup
-// // for _, client := range clients {
-// // wg.Add(1)
-// // go func(wg *sync.WaitGroup, c *Client) {
-// // defer wg.Done()
-// //
-// // receivedMsg := c.ReadTransactions()[0].GetField(fieldData).Data
-// //
-// // want := []byte(fmt.Sprintf("test: %s\r", chatMsg))
-// // if bytes.Compare(receivedMsg, want) != 0 {
-// // t.Errorf("%q, want %q", receivedMsg, want)
-// // }
-// // }(&wg, client)
-// // }
-// //
-// // trans := clients[1].ReadTransactions()
-// // spew.Dump(trans)
-// //
-// // // Send the agreement
-// // clients[1].Connection.Write(
-// // NewTransaction(
-// // tranAgreed, 0,
-// // []Field{
-// // NewField(fieldUserName, []byte("testUser")),
-// // NewField(fieldUserIconID, []byte{0x00,0x07}),
-// // },
-// // ).Payload(),
-// // )
-// //
-// // wg.Wait()
-// //}
-//
-// func TestChatSend(t *testing.T) {
-// //srvPort := StartTestServer()
-// //
-// //senderClient := NewClient("senderClient")
-// //senderClient.JoinServer(fmt.Sprintf(":%v", srvPort), "", "")
-// //
-// //receiverClient := NewClient("receiverClient")
-// //receiverClient.JoinServer(fmt.Sprintf(":%v", srvPort), "", "")
-//
-// clients, _ := StartTestServerWithClients(2)
-//
-// chatMsg := "Test Chat"
-//
-// // Both clients should receive the chatMsg
-// var wg sync.WaitGroup
-// for _, client := range clients {
-// wg.Add(1)
-// go func(wg *sync.WaitGroup, c *Client) {
-// defer wg.Done()
-//
-// receivedMsg := c.ReadTransactions()[0].GetField(fieldData).Data
-//
-// want := []byte(fmt.Sprintf(" test: %s\r", chatMsg))
-// if bytes.Compare(receivedMsg, want) != 0 {
-// t.Errorf("%q, want %q", receivedMsg, want)
-// }
-// }(&wg, client)
-// }
-//
-// // Send the chatMsg
-// clients[1].Send(
-// NewTransaction(
-// tranChatSend, 0,
-// []Field{
-// NewField(fieldData, []byte(chatMsg)),
-// },
-// ),
-// )
-//
-// wg.Wait()
-// }
-//
-// func TestSetClientUserInfo(t *testing.T) {
-// clients, _ := StartTestServerWithClients(2)
-//
-// newIcon := []byte{0x00, 0x01}
-// newUserName := "newName"
-//
-// // Both clients should receive the chatMsg
-// var wg sync.WaitGroup
-// for _, client := range clients {
-// wg.Add(1)
-// go func(wg *sync.WaitGroup, c *Client) {
-// defer wg.Done()
-//
-// tran := c.ReadTransactions()[0]
-//
-// want := []byte(newUserName)
-// got := tran.GetField(fieldUserName).Data
-// if bytes.Compare(got, want) != 0 {
-// t.Errorf("%q, want %q", got, want)
-// }
-// }(&wg, client)
-// }
-//
-// _, err := clients[1].Connection.Write(
-// NewTransaction(
-// tranSetClientUserInfo, 0,
-// []Field{
-// NewField(fieldUserIconID, newIcon),
-// NewField(fieldUserName, []byte(newUserName)),
-// },
-// ).Payload(),
-// )
-// if err != nil {
-// t.Errorf("%v", err)
-// }
-//
-// wg.Wait()
-// }
-//
-// // TestSendInstantMsg tests that client A can send an instant message to client B
-// //
-// func TestSendInstantMsg(t *testing.T) {
-// clients, _ := StartTestServerWithClients(2)
-//
-// instantMsg := "Test IM"
-//
-// var wg sync.WaitGroup
-// wg.Add(1)
-// go func(wg *sync.WaitGroup, c *Client) {
-// defer wg.Done()
-//
-// tran := c.WaitForTransaction(tranServerMsg)
-//
-// receivedMsg := tran.GetField(fieldData).Data
-// want := []byte(fmt.Sprintf("%s", instantMsg))
-// if bytes.Compare(receivedMsg, want) != 0 {
-// t.Errorf("%q, want %q", receivedMsg, want)
-// }
-// }(&wg, clients[0])
-//
-// _ = clients[1].Send(
-// NewTransaction(tranGetUserNameList, 0, []Field{}),
-// )
-// //connectedUsersTran := clients[1].ReadTransactions()[0]
-// ////connectedUsers := connectedUsersTran.Fields[0].Data[0:2]
-// //spew.Dump(connectedUsersTran.Fields)
-// //firstUserID := connectedUsersTran.Fields[0].Data[0:2]
-// //
-// //spew.Dump(firstUserID)
-//
-// // Send the IM
-// err := clients[1].Send(
-// NewTransaction(
-// tranSendInstantMsg, 0,
-// []Field{
-// NewField(fieldData, []byte(instantMsg)),
-// NewField(fieldUserName, clients[1].UserName),
-// NewField(fieldUserID, []byte{0, 2}),
-// NewField(fieldOptions, []byte{0, 1}),
-// },
-// ),
-// )
-// if err != nil {
-// t.Error(err)
-// }
-//
-// wg.Wait()
-// }
-//
-// func TestOldPostNews(t *testing.T) {
-// clients, _ := StartTestServerWithClients(2)
-//
-// newsPost := "Test News Post"
-//
-// var wg sync.WaitGroup
-// wg.Add(1)
-// go func(wg *sync.WaitGroup, c *Client) {
-// defer wg.Done()
-//
-// receivedMsg := c.ReadTransactions()[0].GetField(fieldData).Data
-//
-// if strings.Contains(string(receivedMsg), newsPost) == false {
-// t.Errorf("news post missing")
-// }
-// }(&wg, clients[0])
-//
-// clients[1].Connection.Write(
-// NewTransaction(
-// tranOldPostNews, 0,
-// []Field{
-// NewField(fieldData, []byte(newsPost)),
-// },
-// ).Payload(),
-// )
-//
-// wg.Wait()
-// }
-//
-// // TODO: Fixme
-// //func TestGetFileNameList(t *testing.T) {
-// // clients, _ := StartTestServerWithClients(2)
-// //
-// // clients[0].Connection.Write(
-// // NewTransaction(
-// // tranGetFileNameList, 0,
-// // []Field{},
-// // ).Payload(),
-// // )
-// //
-// // ts := clients[0].ReadTransactions()
-// // testfileSit := ReadFileNameWithInfo(ts[0].Fields[1].Data)
-// //
-// // want := "testfile.sit"
-// // got := testfileSit.Name
-// // diff := cmp.Diff(want, got)
-// // if diff != "" {
-// // t.Fatalf(diff)
-// // }
-// // if testfileSit.Name != "testfile.sit" {
-// // t.Errorf("news post missing")
-// // t.Errorf("%q, want %q", testfileSit.Name, "testfile.sit")
-// // }
-// //}
-//
-// func TestNewsCategoryList(t *testing.T) {
-// clients, _ := StartTestServerWithClients(2)
-// client := clients[0]
-//
-// client.Send(
-// NewTransaction(
-// tranGetNewsCatNameList, 0,
-// []Field{},
-// ),
-// )
-//
-// ts := client.ReadTransactions()
-// cats := ts[0].GetFields(fieldNewsCatListData15)
-//
-// newsCat := ReadNewsCategoryListData(cats[0].Data)
-// want := "TestBundle"
-// got := newsCat.Name
-// diff := cmp.Diff(want, got)
-// if diff != "" {
-// t.Fatalf(diff)
-// }
-//
-// newsBundle := ReadNewsCategoryListData(cats[1].Data)
-// want = "TestCat"
-// got = newsBundle.Name
-// diff = cmp.Diff(want, got)
-// if diff != "" {
-// t.Fatalf(diff)
-// }
-// }
-//
-// func TestNestedNewsCategoryList(t *testing.T) {
-// clients, _ := StartTestServerWithClients(2)
-// client := clients[0]
-// newsPath := NewsPath{
-// []string{
-// "TestBundle",
-// "NestedBundle",
-// },
-// }
-//
-// _, err := client.Connection.Write(
-// NewTransaction(
-// tranGetNewsCatNameList, 0,
-// []Field{
-// NewField(
-// fieldNewsPath,
-// newsPath.Payload(),
-// ),
-// },
-// ).Payload(),
-// )
-// if err != nil {
-// t.Errorf("%v", err)
-// }
-//
-// ts := client.ReadTransactions()
-// cats := ts[0].GetFields(fieldNewsCatListData15)
-//
-// newsCat := ReadNewsCategoryListData(cats[0].Data)
-// want := "NestedCat"
-// got := newsCat.Name
-// diff := cmp.Diff(want, got)
-// if diff != "" {
-// t.Fatalf(diff)
-// }
-// }
-//
-// func TestFileDownload(t *testing.T) {
-// clients, _ := StartTestServerWithClients(2)
-// client := clients[0]
-//
-// type want struct {
-// fileSize []byte
-// transferSize []byte
-// waitingCount []byte
-// refNum []byte
-// }
-// var tests = []struct {
-// fileName string
-// want want
-// }{
-// {
-// fileName: "testfile.sit",
-// want: want{
-// fileSize: []byte{0x0, 0x0, 0x0, 0x13},
-// transferSize: []byte{0x0, 0x0, 0x0, 0xa1},
-// },
-// },
-// {
-// fileName: "testfile.txt",
-// want: want{
-// fileSize: []byte{0x0, 0x0, 0x0, 0x17},
-// transferSize: []byte{0x0, 0x0, 0x0, 0xa5},
-// },
-// },
-// }
-//
-// for _, test := range tests {
-// _, err := client.Connection.Write(
-// NewTransaction(
-// tranDownloadFile, 0,
-// []Field{
-// NewField(fieldFileName, []byte(test.fileName)),
-// NewField(fieldFilePath, []byte("")),
-// },
-// ).Payload(),
-// )
-// if err != nil {
-// t.Errorf("%v", err)
-// }
-// tran := client.ReadTransactions()[0]
-//
-// if got := tran.GetField(fieldFileSize).Data; bytes.Compare(got, test.want.fileSize) != 0 {
-// t.Errorf("TestFileDownload: fileSize got %#v, want %#v", got, test.want.fileSize)
-// }
-//
-// if got := tran.GetField(fieldTransferSize).Data; bytes.Compare(got, test.want.transferSize) != 0 {
-// t.Errorf("TestFileDownload: fieldTransferSize: %s: got %#v, want %#v", test.fileName, got, test.want.transferSize)
-// }
-// }
-// }
-//
-// func TestFileUpload(t *testing.T) {
-// clients, _ := StartTestServerWithClients(2)
-// client := clients[0]
-//
-// var tests = []struct {
-// fileName string
-// want Transaction
-// }{
-// {
-// fileName: "testfile.sit",
-// want: Transaction{
-// Fields: []Field{
-// NewField(fieldRefNum, []byte{0x16, 0x3f, 0x5f, 0xf}),
-// },
-// },
-// },
-// }
-//
-// for _, test := range tests {
-// err := client.Send(
-// NewTransaction(
-// tranUploadFile, 0,
-// []Field{
-// NewField(fieldFileName, []byte(test.fileName)),
-// NewField(fieldFilePath, []byte("")),
-// },
-// ),
-// )
-// if err != nil {
-// t.Errorf("%v", err)
-// }
-// tran := client.ReadTransactions()[0]
-//
-// for _, f := range test.want.Fields {
-// got := tran.GetField(f.Uint16ID()).Data
-// want := test.want.GetField(fieldRefNum).Data
-// if bytes.Compare(got, want) != 0 {
-// t.Errorf("xxx: yyy got %#v, want %#v", got, want)
-// }
-// }
-// }
-// }
-//
-// // TODO: Make canonical
-// func TestNewUser(t *testing.T) {
-// srv, port := StartTestServer()
-//
-// var tests = []struct {
-// description string
-// setup func()
-// teardown func()
-// account Account
-// request Transaction
-// want Transaction
-// }{
-// {
-// description: "a valid new account",
-// teardown: func() {
-// _ = srv.DeleteUser("testUser")
-// },
-// account: Account{
-// Login: "test",
-// Name: "unnamed",
-// Password: "test",
-// Access: []byte{255, 255, 255, 255, 255, 255, 255, 255},
-// },
-// request: NewTransaction(
-// tranNewUser, 0,
-// []Field{
-// NewField(fieldUserLogin, []byte(NegatedUserString([]byte("testUser")))),
-// NewField(fieldUserName, []byte("testUserName")),
-// NewField(fieldUserPassword, []byte(NegatedUserString([]byte("testPw")))),
-// NewField(fieldUserAccess, []byte{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}),
-// },
-// ),
-// want: Transaction{
-// Fields: []Field{},
-// },
-// },
-// {
-// description: "a newUser request from a user without the required access",
-// teardown: func() {
-// _ = srv.DeleteUser("testUser")
-// },
-// account: Account{
-// Login: "test",
-// Name: "unnamed",
-// Password: "test",
-// Access: []byte{0, 0, 0, 0, 0, 0, 0, 0},
-// },
-// request: NewTransaction(
-// tranNewUser, 0,
-// []Field{
-// NewField(fieldUserLogin, []byte(NegatedUserString([]byte("testUser")))),
-// NewField(fieldUserName, []byte("testUserName")),
-// NewField(fieldUserPassword, []byte(NegatedUserString([]byte("testPw")))),
-// NewField(fieldUserAccess, []byte{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}),
-// },
-// ),
-// want: Transaction{
-// Fields: []Field{
-// NewField(fieldError, []byte("You are not allowed to create new accounts.")),
-// },
-// },
-// },
-// {
-// description: "a request to create a user that already exists",
-// teardown: func() {
-// _ = srv.DeleteUser("testUser")
-// },
-// account: Account{
-// Login: "test",
-// Name: "unnamed",
-// Password: "test",
-// Access: []byte{255, 255, 255, 255, 255, 255, 255, 255},
-// },
-// request: NewTransaction(
-// tranNewUser, 0,
-// []Field{
-// NewField(fieldUserLogin, []byte(NegatedUserString([]byte("guest")))),
-// NewField(fieldUserName, []byte("testUserName")),
-// NewField(fieldUserPassword, []byte(NegatedUserString([]byte("testPw")))),
-// NewField(fieldUserAccess, []byte{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}),
-// },
-// ),
-// want: Transaction{
-// Fields: []Field{
-// NewField(fieldError, []byte("Cannot create account guest because there is already an account with that login.")),
-// },
-// },
-// },
-// }
-//
-// for _, test := range tests {
-// if test.setup != nil {
-// test.setup()
-// }
-//
-// if err := srv.NewUser(test.account.Login, test.account.Name, NegatedUserString([]byte(test.account.Password)), test.account.Access); err != nil {
-// t.Errorf("%v", err)
-// }
-//
-// c := NewClient("")
-// err := c.JoinServer(fmt.Sprintf(":%v", port), test.account.Login, test.account.Password)
-// if err != nil {
-// t.Errorf("login failed: %v", err)
-// }
-//
-// if err := c.Send(test.request); err != nil {
-// t.Errorf("%v", err)
-// }
-//
-// tran := c.ReadTransactions()[0]
-// for _, want := range test.want.Fields {
-// got := tran.GetField(want.Uint16ID())
-// if bytes.Compare(got.Data, want.Data) != 0 {
-// t.Errorf("%v: field mismatch: want: %#v got: %#v", test.description, want.Data, got.Data)
-// }
-// }
-//
-// srv.DeleteUser(test.account.Login)
-//
-// if test.teardown != nil {
-// test.teardown()
-// }
-// }
-// }
-//
-// func TestDeleteUser(t *testing.T) {
-// srv, port := StartTestServer()
-//
-// var tests = []transactionTest{
-// {
-// description: "a deleteUser request from a user without the required access",
-// account: Account{
-// Login: "test",
-// Name: "unnamed",
-// Password: "test",
-// Access: []byte{0, 0, 0, 0, 0, 0, 0, 0},
-// },
-// request: NewTransaction(
-// tranDeleteUser, 0,
-// []Field{
-// NewField(fieldUserLogin, []byte(NegatedUserString([]byte("foo")))),
-// },
-// ),
-// want: Transaction{
-// Fields: []Field{
-// NewField(fieldError, []byte("You are not allowed to delete accounts.")),
-// },
-// },
-// },
-// {
-// description: "a valid deleteUser request",
-// setup: func() {
-// _ = srv.NewUser("foo", "foo", "foo", []byte{0, 0, 0, 0, 0, 0, 0, 0})
-// },
-// account: Account{
-// Login: "test",
-// Name: "unnamed",
-// Password: "test",
-// Access: []byte{255, 255, 255, 255, 255, 255, 255, 255},
-// },
-// request: NewTransaction(
-// tranDeleteUser, 0,
-// []Field{
-// NewField(fieldUserLogin, []byte(NegatedUserString([]byte("foo")))),
-// },
-// ),
-// want: Transaction{
-// Fields: []Field{},
-// },
-// },
-// }
-//
-// for _, test := range tests {
-// test.Setup(srv)
-//
-// c := NewClient("")
-// err := c.JoinServer(fmt.Sprintf(":%v", port), test.account.Login, test.account.Password)
-// if err != nil {
-// t.Errorf("login failed: %v", err)
-// }
-//
-// if err := c.Send(test.request); err != nil {
-// t.Errorf("%v", err)
-// }
-//
-// tran := c.ReadTransactions()[0]
-// for _, want := range test.want.Fields {
-// got := tran.GetField(want.Uint16ID())
-// if bytes.Compare(got.Data, want.Data) != 0 {
-// t.Errorf("%v: field mismatch: want: %#v got: %#v", test.description, want.Data, got.Data)
-// }
-// }
-//
-// test.Teardown(srv)
-// }
-// }
-//
-// func TestDeleteFile(t *testing.T) {
-// srv, port := StartTestServer()
-//
-// var tests = []transactionTest{
-// {
-// description: "a request without the required access",
-// account: Account{
-// Login: "test",
-// Name: "unnamed",
-// Password: "test",
-// Access: []byte{0, 0, 0, 0, 0, 0, 0, 0},
-// },
-// request: NewTransaction(
-// tranDeleteFile, 0,
-// []Field{
-// NewField(fieldFileName, []byte("testFile")),
-// NewField(fieldFilePath, []byte("")),
-// },
-// ),
-// want: Transaction{
-// Fields: []Field{},
-// },
-// },
-// {
-// description: "a valid deleteFile request",
-// setup: func() {
-// _ = ioutil.WriteFile(srv.Config.FileRoot+"testFile", []byte{0x00}, 0666)
-// },
-// account: Account{
-// Login: "test",
-// Name: "unnamed",
-// Password: "test",
-// Access: []byte{255, 255, 255, 255, 255, 255, 255, 255},
-// },
-// request: NewTransaction(
-// tranDeleteFile, 0,
-// []Field{
-// NewField(fieldFileName, []byte("testFile")),
-// NewField(fieldFilePath, []byte("")),
-// },
-// ),
-// want: Transaction{
-// Fields: []Field{},
-// },
-// },
-// {
-// description: "an invalid request for a file that does not exist",
-// account: Account{
-// Login: "test",
-// Name: "unnamed",
-// Password: "test",
-// Access: []byte{255, 255, 255, 255, 255, 255, 255, 255},
-// },
-// request: NewTransaction(
-// tranDeleteFile, 0,
-// []Field{
-// NewField(fieldFileName, []byte("testFile")),
-// NewField(fieldFilePath, []byte("")),
-// },
-// ),
-// want: Transaction{
-// Fields: []Field{
-// NewField(fieldError, []byte("Cannot delete file testFile because it does not exist or cannot be found.")),
-// },
-// },
-// },
-// }
-//
-// for _, test := range tests {
-// test.Setup(srv)
-//
-// c := NewClient("")
-//
-// if err := c.JoinServer(fmt.Sprintf(":%v", port), test.account.Login, test.account.Password); err != nil {
-// t.Errorf("login failed: %v", err)
-// }
-//
-// if err := c.Send(test.request); err != nil {
-// t.Errorf("%v", err)
-// }
-//
-// tran := c.ReadTransactions()[0]
-// for _, want := range test.want.Fields {
-// got := tran.GetField(want.Uint16ID())
-// if bytes.Compare(got.Data, want.Data) != 0 {
-// t.Errorf("%v: field mismatch: want: %#v got: %#v", test.description, want.Data, got.Data)
-// }
-// }
-//
-// test.Teardown(srv)
-// }
-// }
-//
-// func Test_authorize(t *testing.T) {
-// accessBitmap := big.NewInt(int64(0))
-// accessBitmap.SetBit(accessBitmap, accessCreateFolder, 1)
-// fmt.Printf("%v %b %x\n", accessBitmap, accessBitmap, accessBitmap)
-// fmt.Printf("%b\n", 0b10000)
-//
-// type args struct {
-// access *[]byte
-// reqAccess int
-// }
-// tests := []struct {
-// name string
-// args args
-// want bool
-// }{
-// {
-// name: "fooz",
-// args: args{
-// access: &[]byte{4, 0, 0, 0, 0, 0, 0, 0x02},
-// reqAccess: accessDownloadFile,
-// },
-// want: true,
-// },
-// }
-// for _, tt := range tests {
-// t.Run(tt.name, func(t *testing.T) {
-// if got := authorize(tt.args.access, tt.args.reqAccess); got != tt.want {
-// t.Errorf("authorize() = %v, want %v", got, tt.want)
-// }
-// })
-// }
-// }
+type mockReadWriter struct {
+ RBuf bytes.Buffer
+ WBuf *bytes.Buffer
+}
+
+func (mrw mockReadWriter) Read(p []byte) (n int, err error) {
+ return mrw.RBuf.Read(p)
+}
+
+func (mrw mockReadWriter) Write(p []byte) (n int, err error) {
+ return mrw.WBuf.Write(p)
+}
+
+func TestServer_handleFileTransfer(t *testing.T) {
+ type fields struct {
+ Port int
+ Accounts map[string]*Account
+ Agreement []byte
+ Clients map[uint16]*ClientConn
+ ThreadedNews *ThreadedNews
+ FileTransfers map[uint32]*FileTransfer
+ Config *Config
+ ConfigDir string
+ Logger *zap.SugaredLogger
+ PrivateChats map[uint32]*PrivateChat
+ NextGuestID *uint16
+ TrackerPassID [4]byte
+ Stats *Stats
+ FS FileStore
+ outbox chan Transaction
+ mux sync.Mutex
+ flatNewsMux sync.Mutex
+ FlatNews []byte
+ }
+ type args struct {
+ ctx context.Context
+ rwc io.ReadWriter
+ }
+ tests := []struct {
+ name string
+ fields fields
+ args args
+ wantErr assert.ErrorAssertionFunc
+ wantDump string
+ }{
+ {
+ name: "with invalid protocol",
+ args: args{
+ ctx: func() context.Context {
+ ctx := context.Background()
+ ctx = context.WithValue(ctx, contextKeyReq, requestCtx{})
+ return ctx
+ }(),
+ rwc: func() io.ReadWriter {
+ mrw := mockReadWriter{}
+ mrw.WBuf = &bytes.Buffer{}
+ mrw.RBuf.Write(
+ []byte{
+ 0, 0, 0, 0,
+ 0, 0, 0, 5,
+ 0, 0, 0x01, 0,
+ 0, 0, 0, 0,
+ },
+ )
+ return mrw
+ }(),
+ },
+ wantErr: assert.Error,
+ },
+ {
+ name: "with invalid transfer ID",
+ args: args{
+ ctx: func() context.Context {
+ ctx := context.Background()
+ ctx = context.WithValue(ctx, contextKeyReq, requestCtx{})
+ return ctx
+ }(),
+ rwc: func() io.ReadWriter {
+ mrw := mockReadWriter{}
+ mrw.WBuf = &bytes.Buffer{}
+ mrw.RBuf.Write(
+ []byte{
+ 0x48, 0x54, 0x58, 0x46,
+ 0, 0, 0, 5,
+ 0, 0, 0x01, 0,
+ 0, 0, 0, 0,
+ },
+ )
+ return mrw
+ }(),
+ },
+ wantErr: assert.Error,
+ },
+ {
+ name: "file download",
+ fields: fields{
+ FS: &OSFileStore{},
+ Config: &Config{
+ FileRoot: func() string {
+ path, _ := os.Getwd()
+ return path + "/test/config/Files"
+ }()},
+ Logger: NewTestLogger(),
+ Stats: &Stats{},
+ FileTransfers: map[uint32]*FileTransfer{
+ uint32(5): {
+ ReferenceNumber: []byte{0, 0, 0, 5},
+ Type: FileDownload,
+ FileName: []byte("testfile-8b"),
+ FilePath: []byte{},
+ },
+ },
+ },
+ args: args{
+ ctx: func() context.Context {
+ ctx := context.Background()
+ ctx = context.WithValue(ctx, contextKeyReq, requestCtx{})
+ return ctx
+ }(),
+ rwc: func() io.ReadWriter {
+ mrw := mockReadWriter{}
+ mrw.WBuf = &bytes.Buffer{}
+ mrw.RBuf.Write(
+ []byte{
+ 0x48, 0x54, 0x58, 0x46,
+ 0, 0, 0, 5,
+ 0, 0, 0x01, 0,
+ 0, 0, 0, 0,
+ },
+ )
+ return mrw
+ }(),
+ },
+ wantErr: assert.NoError,
+ wantDump: `00000000 46 49 4c 50 00 01 00 00 00 00 00 00 00 00 00 00 |FILP............|
+00000010 00 00 00 00 00 00 00 02 49 4e 46 4f 00 00 00 00 |........INFO....|
+00000020 00 00 00 00 00 00 00 55 41 4d 41 43 54 45 58 54 |.......UAMACTEXT|
+00000030 54 54 58 54 00 00 00 00 00 00 01 00 00 00 00 00 |TTXT............|
+00000040 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 |................|
+00000050 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 |................|
+00000060 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 0b |................|
+00000070 74 65 73 74 66 69 6c 65 2d 38 62 00 00 44 41 54 |testfile-8b..DAT|
+00000080 41 00 00 00 00 00 00 00 00 00 00 00 08 7c 39 e0 |A............|9.|
+00000090 bc 64 e2 cd de 4d 41 43 52 00 00 00 00 00 00 00 |.d...MACR.......|
+000000a0 00 00 00 00 00 |.....|
+`,
+ },
+ }
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ s := &Server{
+ Port: tt.fields.Port,
+ Accounts: tt.fields.Accounts,
+ Agreement: tt.fields.Agreement,
+ Clients: tt.fields.Clients,
+ ThreadedNews: tt.fields.ThreadedNews,
+ FileTransfers: tt.fields.FileTransfers,
+ Config: tt.fields.Config,
+ ConfigDir: tt.fields.ConfigDir,
+ Logger: tt.fields.Logger,
+ Stats: tt.fields.Stats,
+ FS: tt.fields.FS,
+ }
+ tt.wantErr(t, s.handleFileTransfer(tt.args.ctx, tt.args.rwc), fmt.Sprintf("handleFileTransfer(%v, %v)", tt.args.ctx, tt.args.rwc))
+
+ assertTransferBytesEqual(t, tt.wantDump, tt.args.rwc.(mockReadWriter).WBuf.Bytes())
+ })
+ }
+}
package hotline
import (
- "fmt"
"time"
)
type Stats struct {
- LoginCount int `yaml:"login count"`
- StartTime time.Time `yaml:"start time"`
- DownloadCounter int
- UploadCounter int
-}
-
-func (s *Stats) String() string {
- template := `
-Server Stats:
- Start Time: %v
- Uptime: %s
- Login Count: %v
-`
- d := time.Since(s.StartTime)
- d = d.Round(time.Minute)
- h := d / time.Hour
- d -= h * time.Hour
- m := d / time.Minute
+ CurrentlyConnected int
+ DownloadsInProgress int
+ UploadsInProgress int
+ ConnectionPeak int
+ DownloadCounter int
+ UploadCounter int
- return fmt.Sprintf(
- template,
- s.StartTime.Format(time.RFC1123Z),
- fmt.Sprintf("%02d:%02d", h, m),
- s.LoginCount,
- )
+ LoginCount int `yaml:"login count"`
+ StartTime time.Time `yaml:"start time"`
}
--- /dev/null
+|9à¼dâÍÞ
\ No newline at end of file
tranGetFileInfo = 206
tranSetFileInfo = 207
tranMoveFile = 208
- tranMakeFileAlias = 209 // TODO: implement file alias command
+ tranMakeFileAlias = 209
tranDownloadFldr = 210
// tranDownloadInfo = 211 TODO: implement file transfer queue
// tranDownloadBanner = 212 TODO: figure out what this is used for
formattedMsg = fmt.Sprintf("\r*** %s %s", cc.UserName, t.GetField(fieldData).Data)
}
- if bytes.Equal(t.GetField(fieldData).Data, []byte("/stats")) {
- formattedMsg = strings.Replace(cc.Server.Stats.String(), "\n", "\r", -1)
- }
-
chatID := t.GetField(fieldChatID).Data
// a non-nil chatID indicates the message belongs to a private chat
if chatID != nil {
fileName := t.GetField(fieldFileName).Data
filePath := t.GetField(fieldFilePath).Data
- ffo, err := NewFlattenedFileObject(cc.Server.Config.FileRoot, filePath, fileName, 0)
+ fullFilePath, err := readPath(cc.Server.Config.FileRoot, filePath, fileName)
+ if err != nil {
+ return res, err
+ }
+
+ fw, err := newFileWrapper(cc.Server.FS, fullFilePath, 0)
if err != nil {
return res, err
}
res = append(res, cc.NewReply(t,
- NewField(fieldFileName, fileName),
- NewField(fieldFileTypeString, ffo.FlatFileInformationFork.friendlyType()),
- NewField(fieldFileCreatorString, ffo.FlatFileInformationFork.CreatorSignature),
- NewField(fieldFileComment, ffo.FlatFileInformationFork.Comment),
- NewField(fieldFileType, ffo.FlatFileInformationFork.TypeSignature),
- NewField(fieldFileCreateDate, ffo.FlatFileInformationFork.CreateDate),
- NewField(fieldFileModifyDate, ffo.FlatFileInformationFork.ModifyDate),
- NewField(fieldFileSize, ffo.FlatFileDataForkHeader.DataSize[:]),
+ NewField(fieldFileName, []byte(fw.name)),
+ NewField(fieldFileTypeString, fw.ffo.FlatFileInformationFork.friendlyType()),
+ NewField(fieldFileCreatorString, fw.ffo.FlatFileInformationFork.friendlyCreator()),
+ NewField(fieldFileComment, fw.ffo.FlatFileInformationFork.Comment),
+ NewField(fieldFileType, fw.ffo.FlatFileInformationFork.TypeSignature),
+ NewField(fieldFileCreateDate, fw.ffo.FlatFileInformationFork.CreateDate),
+ NewField(fieldFileModifyDate, fw.ffo.FlatFileInformationFork.ModifyDate),
+ NewField(fieldFileSize, fw.totalSize()),
))
return res, err
}
// HandleSetFileInfo updates a file or folder name and/or comment from the Get Info window
-// TODO: Implement support for comments
// Fields used in the request:
// * 201 File name
// * 202 File path Optional
return res, err
}
+ fi, err := cc.Server.FS.Stat(fullFilePath)
+ if err != nil {
+ return res, err
+ }
+
+ hlFile, err := newFileWrapper(cc.Server.FS, fullFilePath, 0)
+ if err != nil {
+ return res, err
+ }
+ if t.GetField(fieldFileComment).Data != nil {
+ switch mode := fi.Mode(); {
+ case mode.IsDir():
+ if !authorize(cc.Account.Access, accessSetFolderComment) {
+ res = append(res, cc.NewErrReply(t, "You are not allowed to set comments for folders."))
+ return res, err
+ }
+ case mode.IsRegular():
+ if !authorize(cc.Account.Access, accessSetFileComment) {
+ res = append(res, cc.NewErrReply(t, "You are not allowed to set comments for files."))
+ return res, err
+ }
+ }
+
+ hlFile.ffo.FlatFileInformationFork.setComment(t.GetField(fieldFileComment).Data)
+ w, err := hlFile.infoForkWriter()
+ if err != nil {
+ return res, err
+ }
+ _, err = w.Write(hlFile.ffo.FlatFileInformationFork.MarshalBinary())
+ if err != nil {
+ return res, err
+ }
+ }
+
fullNewFilePath, err := readPath(cc.Server.Config.FileRoot, filePath, t.GetField(fieldFileNewName).Data)
if err != nil {
return nil, err
}
- // fileComment := t.GetField(fieldFileComment).Data
fileNewName := t.GetField(fieldFileNewName).Data
if fileNewName != nil {
- fi, err := cc.Server.FS.Stat(fullFilePath)
- if err != nil {
- return res, err
- }
switch mode := fi.Mode(); {
case mode.IsDir():
if !authorize(cc.Account.Access, accessRenameFolder) {
res = append(res, cc.NewErrReply(t, "You are not allowed to rename folders."))
return res, err
}
+ err = os.Rename(fullFilePath, fullNewFilePath)
+ if os.IsNotExist(err) {
+ res = append(res, cc.NewErrReply(t, "Cannot rename folder "+string(fileName)+" because it does not exist or cannot be found."))
+ return res, err
+ }
case mode.IsRegular():
if !authorize(cc.Account.Access, accessRenameFile) {
res = append(res, cc.NewErrReply(t, "You are not allowed to rename files."))
return res, err
}
- }
-
- err = os.Rename(fullFilePath, fullNewFilePath)
- if os.IsNotExist(err) {
- res = append(res, cc.NewErrReply(t, "Cannot rename file "+string(fileName)+" because it does not exist or cannot be found."))
- return res, err
+ fileDir, err := readPath(cc.Server.Config.FileRoot, filePath, []byte{})
+ if err != nil {
+ return nil, err
+ }
+ hlFile.name = string(fileNewName)
+ err = hlFile.move(fileDir)
+ if os.IsNotExist(err) {
+ res = append(res, cc.NewErrReply(t, "Cannot rename file "+string(fileName)+" because it does not exist or cannot be found."))
+ return res, err
+ }
+ if err != nil {
+ panic(err)
+ }
}
}
return res, err
}
- cc.Server.Logger.Debugw("Delete file", "src", fullFilePath)
+ hlFile, err := newFileWrapper(cc.Server.FS, fullFilePath, 0)
+ if err != nil {
+ return res, err
+ }
- fi, err := os.Stat(fullFilePath)
+ fi, err := hlFile.dataFile()
if err != nil {
res = append(res, cc.NewErrReply(t, "Cannot delete file "+string(fileName)+" because it does not exist or cannot be found."))
return res, nil
}
+
switch mode := fi.Mode(); {
case mode.IsDir():
if !authorize(cc.Account.Access, accessDeleteFolder) {
}
}
- if err := os.RemoveAll(fullFilePath); err != nil {
+ if err := hlFile.delete(); err != nil {
return res, err
}
// HandleMoveFile moves files or folders. Note: seemingly not documented
func HandleMoveFile(cc *ClientConn, t *Transaction) (res []Transaction, err error) {
fileName := string(t.GetField(fieldFileName).Data)
- filePath := cc.Server.Config.FileRoot + ReadFilePath(t.GetField(fieldFilePath).Data)
- fileNewPath := cc.Server.Config.FileRoot + ReadFilePath(t.GetField(fieldFileNewPath).Data)
+
+ filePath, err := readPath(cc.Server.Config.FileRoot, t.GetField(fieldFilePath).Data, t.GetField(fieldFileName).Data)
+ if err != nil {
+ return res, err
+ }
+
+ fileNewPath, err := readPath(cc.Server.Config.FileRoot, t.GetField(fieldFileNewPath).Data, nil)
+ if err != nil {
+ return res, err
+ }
cc.Server.Logger.Debugw("Move file", "src", filePath+"/"+fileName, "dst", fileNewPath+"/"+fileName)
- fp := filePath + "/" + fileName
- fi, err := os.Stat(fp)
+ hlFile, err := newFileWrapper(cc.Server.FS, filePath, 0)
+
+ fi, err := hlFile.dataFile()
+ if err != nil {
+ res = append(res, cc.NewErrReply(t, "Cannot delete file "+fileName+" because it does not exist or cannot be found."))
+ return res, err
+ }
if err != nil {
return res, err
}
return res, err
}
}
-
- err = os.Rename(filePath+"/"+fileName, fileNewPath+"/"+fileName)
- if os.IsNotExist(err) {
- res = append(res, cc.NewErrReply(t, "Cannot delete file "+fileName+" because it does not exist or cannot be found."))
+ if err := hlFile.move(fileNewPath); err != nil {
return res, err
}
- if err != nil {
- return []Transaction{}, err
- }
- // TODO: handle other possible errors; e.g. file delete fails due to file permission issue
+ // TODO: handle other possible errors; e.g. fileWrapper delete fails due to fileWrapper permission issue
res = append(res, cc.NewReply(t))
return res, err
login := DecodeUserString(getField(fieldUserLogin, &subFields).Data)
- // check if the login exists; if so, we know we are updating an existing user
+ // check if the login dataFile; if so, we know we are updating an existing user
if acc, ok := cc.Server.Accounts[login]; ok {
cc.Server.Logger.Infow("UpdateUser", "login", login)
- // account exists, so this is an update action
+ // account dataFile, so this is an update action
if !authorize(cc.Account.Access, accessModifyUser) {
res = append(res, cc.NewErrReply(t, "You are not allowed to modify accounts."))
return res, err
login := DecodeUserString(t.GetField(fieldUserLogin).Data)
- // If the account already exists, reply with an error
+ // If the account already dataFile, reply with an error
if _, ok := cc.Server.Accounts[login]; ok {
res = append(res, cc.NewErrReply(t, "Cannot create account "+login+" because there is already an account with that login."))
return res, err
fileName := t.GetField(fieldFileName).Data
filePath := t.GetField(fieldFilePath).Data
-
resumeData := t.GetField(fieldFileResumeData).Data
var dataOffset int64
if err := frd.UnmarshalBinary(t.GetField(fieldFileResumeData).Data); err != nil {
return res, err
}
+ // TODO: handle rsrc fork offset
dataOffset = int64(binary.BigEndian.Uint32(frd.ForkInfoList[0].DataSize[:]))
}
- var fp FilePath
- err = fp.UnmarshalBinary(filePath)
+ fullFilePath, err := readPath(cc.Server.Config.FileRoot, filePath, fileName)
if err != nil {
return res, err
}
- ffo, err := NewFlattenedFileObject(cc.Server.Config.FileRoot, filePath, fileName, dataOffset)
+ hlFile, err := newFileWrapper(cc.Server.FS, fullFilePath, dataOffset)
if err != nil {
return res, err
}
Type: FileDownload,
}
+ // TODO: refactor to remove this
if resumeData != nil {
var frd FileResumeData
if err := frd.UnmarshalBinary(t.GetField(fieldFileResumeData).Data); err != nil {
ft.fileResumeData = &frd
}
- xferSize := ffo.TransferSize()
+ xferSize := hlFile.ffo.TransferSize(0)
// Optional field for when a HL v1.5+ client requests file preview
// Used only for TEXT, JPEG, GIFF, BMP or PICT files
// The value will always be 2
if t.GetField(fieldFileTransferOptions).Data != nil {
ft.options = t.GetField(fieldFileTransferOptions).Data
- xferSize = ffo.FlatFileDataForkHeader.DataSize[:]
+ xferSize = hlFile.ffo.FlatFileDataForkHeader.DataSize[:]
}
cc.Server.mux.Lock()
NewField(fieldRefNum, transactionRef),
NewField(fieldWaitingCount, []byte{0x00, 0x00}), // TODO: Implement waiting count
NewField(fieldTransferSize, xferSize),
- NewField(fieldFileSize, ffo.FlatFileDataForkHeader.DataSize[:]),
+ NewField(fieldFileSize, hlFile.ffo.FlatFileDataForkHeader.DataSize[:]),
))
return res, err
replyT := cc.NewReply(t, NewField(fieldRefNum, transactionRef))
- // client has requested to resume a partially transfered file
+ // client has requested to resume a partially transferred file
if transferOptions != nil {
fullFilePath, err := readPath(cc.Server.Config.FileRoot, filePath, fileName)
if err != nil {
return res, err
}
-// HandleMakeAlias makes a file alias using the specified path.
+// HandleMakeAlias makes a filer alias using the specified path.
// Fields used in the request:
// 201 File name
// 202 File path
"os"
"strings"
"testing"
+ "time"
)
func TestHandleSetChatSubject(t *testing.T) {
cc: &ClientConn{
ID: &[]byte{0x00, 0x01},
Server: &Server{
+ FS: &OSFileStore{},
Config: &Config{
FileRoot: func() string {
path, _ := os.Getwd()
return
}
- // Clear the file timestamp fields to work around problems running the tests in multiple timezones
+ // Clear the fileWrapper timestamp fields to work around problems running the tests in multiple timezones
// TODO: revisit how to test this by mocking the stat calls
gotRes[0].Fields[5].Data = make([]byte, 8)
gotRes[0].Fields[6].Data = make([]byte, 8)
wantErr assert.ErrorAssertionFunc
}{
{
- name: "when user exists",
+ name: "when user dataFile",
args: args{
cc: &ClientConn{
Account: &Account{
}(),
},
Server: &Server{
+ FS: &OSFileStore{},
FileTransfers: make(map[uint32]*FileTransfer),
Config: &Config{
FileRoot: func() string { path, _ := os.Getwd(); return path + "/test/config/Files" }(),
},
wantErr: assert.NoError,
},
+ {
+ name: "when client requests to resume 1k test file at offset 256",
+ args: args{
+ cc: &ClientConn{
+ Transfers: make(map[int][]*FileTransfer),
+ Account: &Account{
+ Access: func() *[]byte {
+ var bits accessBitmap
+ bits.Set(accessDownloadFile)
+ access := bits[:]
+ return &access
+ }(),
+ },
+ Server: &Server{
+ FS: &OSFileStore{},
+ // FS: func() *MockFileStore {
+ // path, _ := os.Getwd()
+ // testFile, err := os.Open(path + "/test/config/Files/testfile-1k")
+ // if err != nil {
+ // panic(err)
+ // }
+ //
+ // mfi := &MockFileInfo{}
+ // mfi.On("Mode").Return(fs.FileMode(0))
+ // mfs := &MockFileStore{}
+ // mfs.On("Stat", "/fakeRoot/Files/testfile.txt").Return(mfi, nil)
+ // mfs.On("Open", "/fakeRoot/Files/testfile.txt").Return(testFile, nil)
+ // mfs.On("Stat", "/fakeRoot/Files/.info_testfile.txt").Return(nil, errors.New("no"))
+ // mfs.On("Stat", "/fakeRoot/Files/.rsrc_testfile.txt").Return(nil, errors.New("no"))
+ //
+ // return mfs
+ // }(),
+ FileTransfers: make(map[uint32]*FileTransfer),
+ Config: &Config{
+ FileRoot: func() string { path, _ := os.Getwd(); return path + "/test/config/Files" }(),
+ },
+ Accounts: map[string]*Account{},
+ },
+ },
+ t: NewTransaction(
+ accessDownloadFile,
+ &[]byte{0, 1},
+ NewField(fieldFileName, []byte("testfile-1k")),
+ NewField(fieldFilePath, []byte{0x00, 0x00}),
+ NewField(
+ fieldFileResumeData,
+ func() []byte {
+ frd := FileResumeData{
+ Format: [4]byte{},
+ Version: [2]byte{},
+ RSVD: [34]byte{},
+ ForkCount: [2]byte{0, 2},
+ ForkInfoList: []ForkInfoList{
+ {
+ Fork: [4]byte{0x44, 0x41, 0x54, 0x41}, // "DATA"
+ DataSize: [4]byte{0, 0, 0x01, 0x00}, // request offset 256
+ RSVDA: [4]byte{},
+ RSVDB: [4]byte{},
+ },
+ {
+ Fork: [4]byte{0x4d, 0x41, 0x43, 0x52}, // "MACR"
+ DataSize: [4]byte{0, 0, 0, 0},
+ RSVDA: [4]byte{},
+ RSVDB: [4]byte{},
+ },
+ },
+ }
+ b, _ := frd.BinaryMarshal()
+ return b
+ }(),
+ ),
+ ),
+ },
+ wantRes: []Transaction{
+ {
+ Flags: 0x00,
+ IsReply: 0x01,
+ Type: []byte{0, 0x2},
+ ID: []byte{0x9a, 0xcb, 0x04, 0x42},
+ ErrorCode: []byte{0, 0, 0, 0},
+ Fields: []Field{
+ NewField(fieldRefNum, []byte{0x52, 0xfd, 0xfc, 0x07}),
+ NewField(fieldWaitingCount, []byte{0x00, 0x00}),
+ NewField(fieldTransferSize, []byte{0x00, 0x00, 0x03, 0x8d}),
+ NewField(fieldFileSize, []byte{0x00, 0x00, 0x03, 0x00}),
+ },
+ },
+ },
+ wantErr: assert.NoError,
+ },
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
- // reset the rand seed so that the random fieldRefNum will be deterministic
- rand.Seed(1)
-
gotRes, err := HandleDownloadFile(tt.args.cc, tt.args.t)
if !tt.wantErr(t, err, fmt.Sprintf("HandleDownloadFile(%v, %v)", tt.args.cc, tt.args.t)) {
return
})
}
}
+
+func TestHandleDeleteFile(t *testing.T) {
+ type args struct {
+ cc *ClientConn
+ t *Transaction
+ }
+ tests := []struct {
+ name string
+ args args
+ wantRes []Transaction
+ wantErr assert.ErrorAssertionFunc
+ }{
+ {
+ name: "when user does not have required permission to delete a folder",
+ args: args{
+ cc: &ClientConn{
+ Account: &Account{
+ Access: func() *[]byte {
+ var bits accessBitmap
+ access := bits[:]
+ return &access
+ }(),
+ },
+ Server: &Server{
+ Config: &Config{
+ FileRoot: func() string {
+ return "/fakeRoot/Files"
+ }(),
+ },
+ FS: func() *MockFileStore {
+ mfi := &MockFileInfo{}
+ mfi.On("Mode").Return(fs.FileMode(0))
+ mfi.On("Size").Return(int64(100))
+ mfi.On("ModTime").Return(time.Parse(time.Layout, time.Layout))
+ mfi.On("IsDir").Return(false)
+ mfi.On("Name").Return("testfile")
+
+ mfs := &MockFileStore{}
+ mfs.On("Stat", "/fakeRoot/Files/aaa/testfile").Return(mfi, nil)
+ mfs.On("Stat", "/fakeRoot/Files/aaa/.info_testfile").Return(nil, errors.New("err"))
+ mfs.On("Stat", "/fakeRoot/Files/aaa/.rsrc_testfile").Return(nil, errors.New("err"))
+
+ return mfs
+ }(),
+ Accounts: map[string]*Account{},
+ },
+ },
+ t: NewTransaction(
+ tranDeleteFile, &[]byte{0, 1},
+ NewField(fieldFileName, []byte("testfile")),
+ NewField(fieldFilePath, []byte{
+ 0x00, 0x01,
+ 0x00, 0x00,
+ 0x03,
+ 0x61, 0x61, 0x61,
+ }),
+ ),
+ },
+ wantRes: []Transaction{
+ {
+ Flags: 0x00,
+ IsReply: 0x01,
+ Type: []byte{0, 0x00},
+ ID: []byte{0x9a, 0xcb, 0x04, 0x42},
+ ErrorCode: []byte{0, 0, 0, 1},
+ Fields: []Field{
+ NewField(fieldError, []byte("You are not allowed to delete files.")),
+ },
+ },
+ },
+ wantErr: assert.NoError,
+ },
+ {
+ name: "deletes all associated metadata files",
+ args: args{
+ cc: &ClientConn{
+ Account: &Account{
+ Access: func() *[]byte {
+ var bits accessBitmap
+ bits.Set(accessDeleteFile)
+ access := bits[:]
+ return &access
+ }(),
+ },
+ Server: &Server{
+ Config: &Config{
+ FileRoot: func() string {
+ return "/fakeRoot/Files"
+ }(),
+ },
+ FS: func() *MockFileStore {
+ mfi := &MockFileInfo{}
+ mfi.On("Mode").Return(fs.FileMode(0))
+ mfi.On("Size").Return(int64(100))
+ mfi.On("ModTime").Return(time.Parse(time.Layout, time.Layout))
+ mfi.On("IsDir").Return(false)
+ mfi.On("Name").Return("testfile")
+
+ mfs := &MockFileStore{}
+ mfs.On("Stat", "/fakeRoot/Files/aaa/testfile").Return(mfi, nil)
+ mfs.On("Stat", "/fakeRoot/Files/aaa/.info_testfile").Return(nil, errors.New("err"))
+ mfs.On("Stat", "/fakeRoot/Files/aaa/.rsrc_testfile").Return(nil, errors.New("err"))
+
+ mfs.On("RemoveAll", "/fakeRoot/Files/aaa/testfile").Return(nil)
+ mfs.On("Remove", "/fakeRoot/Files/aaa/testfile.incomplete").Return(nil)
+ mfs.On("Remove", "/fakeRoot/Files/aaa/.rsrc_testfile").Return(nil)
+ mfs.On("Remove", "/fakeRoot/Files/aaa/.info_testfile").Return(nil)
+
+ return mfs
+ }(),
+ Accounts: map[string]*Account{},
+ },
+ },
+ t: NewTransaction(
+ tranDeleteFile, &[]byte{0, 1},
+ NewField(fieldFileName, []byte("testfile")),
+ NewField(fieldFilePath, []byte{
+ 0x00, 0x01,
+ 0x00, 0x00,
+ 0x03,
+ 0x61, 0x61, 0x61,
+ }),
+ ),
+ },
+ wantRes: []Transaction{
+ {
+ Flags: 0x00,
+ IsReply: 0x01,
+ Type: []byte{0x0, 0xcc},
+ ID: []byte{0x0, 0x0, 0x0, 0x0},
+ ErrorCode: []byte{0, 0, 0, 0},
+ Fields: []Field(nil),
+ },
+ },
+ wantErr: assert.NoError,
+ },
+ }
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ gotRes, err := HandleDeleteFile(tt.args.cc, tt.args.t)
+ if !tt.wantErr(t, err, fmt.Sprintf("HandleDeleteFile(%v, %v)", tt.args.cc, tt.args.t)) {
+ return
+ }
+
+ tranAssertEqual(t, tt.wantRes, gotRes)
+
+ tt.args.cc.Server.FS.(*MockFileStore).AssertExpectations(t)
+ })
+ }
+}
return len(b), nil
}
-const fileCopyBufSize = 524288 // 512k
-func receiveFile(conn io.Reader, targetFile io.Writer, resForkFile io.Writer) error {
- ffhBuf := make([]byte, 24)
- if _, err := io.ReadFull(conn, ffhBuf); err != nil {
- return err
- }
-
- var ffh FlatFileHeader
- err := binary.Read(bytes.NewReader(ffhBuf), binary.BigEndian, &ffh)
- if err != nil {
- return err
- }
-
- ffifhBuf := make([]byte, 16)
- if _, err := io.ReadFull(conn, ffifhBuf); err != nil {
- return err
- }
-
- var ffifh FlatFileInformationForkHeader
- err = binary.Read(bytes.NewReader(ffifhBuf), binary.BigEndian, &ffifh)
- if err != nil {
- return err
- }
+const fileCopyBufSize = 4096
- var ffif FlatFileInformationFork
-
- dataLen := binary.BigEndian.Uint32(ffifh.DataSize[:])
- ffifBuf := make([]byte, dataLen)
- if _, err := io.ReadFull(conn, ffifBuf); err != nil {
- return err
- }
- if err := ffif.UnmarshalBinary(ffifBuf); err != nil {
+func receiveFile(r io.Reader, targetFile, resForkFile, infoFork io.Writer) error {
+ var ffo flattenedFileObject
+ if _, err := ffo.ReadFrom(r); err != nil {
return err
}
- var ffdfh FlatFileDataForkHeader
- ffdfhBuf := make([]byte, 16)
- if _, err := io.ReadFull(conn, ffdfhBuf); err != nil {
- return err
- }
- err = binary.Read(bytes.NewReader(ffdfhBuf), binary.BigEndian, &ffdfh)
+ // Write the information fork
+ _, err := infoFork.Write(ffo.FlatFileInformationFork.MarshalBinary())
if err != nil {
return err
}
- // this will be zero if the file only has a resource fork
- fileSize := int(binary.BigEndian.Uint32(ffdfh.DataSize[:]))
-
+ // read and write the data fork
bw := bufio.NewWriterSize(targetFile, fileCopyBufSize)
- _, err = io.CopyN(bw, conn, int64(fileSize))
- if err != nil {
+ if _, err = io.CopyN(bw, r, ffo.dataSize()); err != nil {
return err
}
if err := bw.Flush(); err != nil {
return err
}
- if ffh.ForkCount == [2]byte{0, 3} {
- var resForkHeader FlatFileDataForkHeader
- if _, err := io.ReadFull(conn, resForkHeader.ForkType[:]); err != nil {
+ if ffo.FlatFileHeader.ForkCount == [2]byte{0, 3} {
+ if err := binary.Read(r, binary.BigEndian, &ffo.FlatFileResForkHeader); err != nil {
return err
}
- if _, err := io.ReadFull(conn, resForkHeader.CompressionType[:]); err != nil {
+ bw = bufio.NewWriterSize(resForkFile, fileCopyBufSize)
+ _, err = io.CopyN(resForkFile, r, ffo.rsrcSize())
+ if err != nil {
return err
}
-
- if _, err := io.ReadFull(conn, resForkHeader.RSVD[:]); err != nil {
+ if err := bw.Flush(); err != nil {
return err
}
+ }
+ return nil
+}
- if _, err := io.ReadFull(conn, resForkHeader.DataSize[:]); err != nil {
- return err
- }
+func sendFile(w io.Writer, r io.Reader, offset int) (err error) {
+ br := bufio.NewReader(r)
+ if _, err := br.Discard(offset); err != nil {
+ return err
+ }
- bw = bufio.NewWriterSize(resForkFile, fileCopyBufSize)
- _, err = io.CopyN(resForkFile, conn, int64(binary.BigEndian.Uint32(resForkHeader.DataSize[:])))
+ rSendBuffer := make([]byte, 1024)
+ for {
+ var bytesRead int
+
+ if bytesRead, err = br.Read(rSendBuffer); err == io.EOF {
+ if _, err := w.Write(rSendBuffer[:bytesRead]); err != nil {
+ return err
+ }
+ return nil
+ }
if err != nil {
return err
}
- if err := bw.Flush(); err != nil {
+ // totalSent += int64(bytesRead)
+
+ // fileTransfer.BytesSent += bytesRead
+
+ if _, err := w.Write(rSendBuffer[:bytesRead]); err != nil {
return err
}
}
- return nil
+
}
wantErr assert.ErrorAssertionFunc
}{
{
- name: "transfers file",
+ name: "transfers file when there is no resource fork",
args: args{
conn: func() io.Reader {
testFile := flattenedFileObject{
- FlatFileHeader: NewFlatFileHeader(),
- FlatFileInformationForkHeader: FlatFileInformationForkHeader{},
+ FlatFileHeader: FlatFileHeader{
+ Format: [4]byte{0x46, 0x49, 0x4c, 0x50}, // "FILP"
+ Version: [2]byte{0, 1},
+ RSVD: [16]byte{},
+ ForkCount: [2]byte{0, 2},
+ },
+ FlatFileInformationForkHeader: FlatFileForkHeader{},
FlatFileInformationFork: NewFlatFileInformationFork("testfile.txt", make([]byte, 8), "TEXT", "TEXT"),
- FlatFileDataForkHeader: FlatFileDataForkHeader{
+ FlatFileDataForkHeader: FlatFileForkHeader{
ForkType: [4]byte{0x4d, 0x41, 0x43, 0x52}, // DATA
CompressionType: [4]byte{0, 0, 0, 0},
RSVD: [4]byte{0, 0, 0, 0},
DataSize: [4]byte{0x00, 0x00, 0x00, 0x03},
},
- FileData: nil,
}
fakeFileData := []byte{1, 2, 3}
b := testFile.BinaryMarshal()
wantErr: assert.NoError,
},
+ // {
+ // name: "transfers fileWrapper when there is a resource fork",
+ // args: args{
+ // conn: func() io.Reader {
+ // testFile := flattenedFileObject{
+ // FlatFileHeader: FlatFileHeader{
+ // Format: [4]byte{0x46, 0x49, 0x4c, 0x50}, // "FILP"
+ // Version: [2]byte{0, 1},
+ // RSVD: [16]byte{},
+ // ForkCount: [2]byte{0, 3},
+ // },
+ // FlatFileInformationForkHeader: FlatFileForkHeader{},
+ // FlatFileInformationFork: NewFlatFileInformationFork("testfile.txt", make([]byte, 8), "TEXT", "TEXT"),
+ // FlatFileDataForkHeader: FlatFileForkHeader{
+ // ForkType: [4]byte{0x44, 0x41, 0x54, 0x41}, // DATA
+ // CompressionType: [4]byte{0, 0, 0, 0},
+ // RSVD: [4]byte{0, 0, 0, 0},
+ // DataSize: [4]byte{0x00, 0x00, 0x00, 0x03},
+ // },
+ // FlatFileResForkHeader: FlatFileForkHeader{
+ // ForkType: [4]byte{0x4d, 0x41, 0x43, 0x52}, // MACR
+ // CompressionType: [4]byte{0, 0, 0, 0},
+ // RSVD: [4]byte{0, 0, 0, 0},
+ // DataSize: [4]byte{0x00, 0x00, 0x00, 0x03},
+ // },
+ // }
+ // fakeFileData := []byte{1, 2, 3}
+ // b := testFile.BinaryMarshal()
+ // b = append(b, fakeFileData...)
+ // return bytes.NewReader(b)
+ // }(),
+ // },
+ // wantTargetFile: []byte{1, 2, 3},
+ // wantResForkFile: []byte(nil),
+ //
+ // wantErr: assert.NoError,
+ // },
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
targetFile := &bytes.Buffer{}
resForkFile := &bytes.Buffer{}
- err := receiveFile(tt.args.conn, targetFile, resForkFile)
+ infoForkFile := &bytes.Buffer{}
+ err := receiveFile(tt.args.conn, targetFile, resForkFile, infoForkFile)
if !tt.wantErr(t, err, fmt.Sprintf("receiveFile(%v, %v, %v)", tt.args.conn, targetFile, resForkFile)) {
return
}