swarm: network rewrite merge
This commit is contained in:
@@ -25,6 +25,7 @@ import (
|
||||
|
||||
"bazil.org/fuse"
|
||||
"bazil.org/fuse/fs"
|
||||
"github.com/ethereum/go-ethereum/swarm/log"
|
||||
"golang.org/x/net/context"
|
||||
)
|
||||
|
||||
@@ -49,6 +50,7 @@ type SwarmDir struct {
|
||||
}
|
||||
|
||||
func NewSwarmDir(fullpath string, minfo *MountInfo) *SwarmDir {
|
||||
log.Debug("swarmfs", "NewSwarmDir", fullpath)
|
||||
newdir := &SwarmDir{
|
||||
inode: NewInode(),
|
||||
name: filepath.Base(fullpath),
|
||||
@@ -62,6 +64,8 @@ func NewSwarmDir(fullpath string, minfo *MountInfo) *SwarmDir {
|
||||
}
|
||||
|
||||
func (sd *SwarmDir) Attr(ctx context.Context, a *fuse.Attr) error {
|
||||
sd.lock.RLock()
|
||||
defer sd.lock.RUnlock()
|
||||
a.Inode = sd.inode
|
||||
a.Mode = os.ModeDir | 0700
|
||||
a.Uid = uint32(os.Getuid())
|
||||
@@ -70,7 +74,7 @@ func (sd *SwarmDir) Attr(ctx context.Context, a *fuse.Attr) error {
|
||||
}
|
||||
|
||||
func (sd *SwarmDir) Lookup(ctx context.Context, req *fuse.LookupRequest, resp *fuse.LookupResponse) (fs.Node, error) {
|
||||
|
||||
log.Debug("swarmfs", "Lookup", req.Name)
|
||||
for _, n := range sd.files {
|
||||
if n.name == req.Name {
|
||||
return n, nil
|
||||
@@ -85,6 +89,7 @@ func (sd *SwarmDir) Lookup(ctx context.Context, req *fuse.LookupRequest, resp *f
|
||||
}
|
||||
|
||||
func (sd *SwarmDir) ReadDirAll(ctx context.Context) ([]fuse.Dirent, error) {
|
||||
log.Debug("swarmfs ReadDirAll")
|
||||
var children []fuse.Dirent
|
||||
for _, file := range sd.files {
|
||||
children = append(children, fuse.Dirent{Inode: file.inode, Type: fuse.DT_File, Name: file.name})
|
||||
@@ -96,6 +101,7 @@ func (sd *SwarmDir) ReadDirAll(ctx context.Context) ([]fuse.Dirent, error) {
|
||||
}
|
||||
|
||||
func (sd *SwarmDir) Create(ctx context.Context, req *fuse.CreateRequest, resp *fuse.CreateResponse) (fs.Node, fs.Handle, error) {
|
||||
log.Debug("swarmfs Create", "path", sd.path, "req.Name", req.Name)
|
||||
|
||||
newFile := NewSwarmFile(sd.path, req.Name, sd.mountInfo)
|
||||
newFile.fileSize = 0 // 0 means, file is not in swarm yet and it is just created
|
||||
@@ -108,6 +114,7 @@ func (sd *SwarmDir) Create(ctx context.Context, req *fuse.CreateRequest, resp *f
|
||||
}
|
||||
|
||||
func (sd *SwarmDir) Remove(ctx context.Context, req *fuse.RemoveRequest) error {
|
||||
log.Debug("swarmfs Remove", "path", sd.path, "req.Name", req.Name)
|
||||
|
||||
if req.Dir && sd.directories != nil {
|
||||
newDirs := []*SwarmDir{}
|
||||
@@ -144,13 +151,11 @@ func (sd *SwarmDir) Remove(ctx context.Context, req *fuse.RemoveRequest) error {
|
||||
}
|
||||
|
||||
func (sd *SwarmDir) Mkdir(ctx context.Context, req *fuse.MkdirRequest) (fs.Node, error) {
|
||||
|
||||
newDir := NewSwarmDir(req.Name, sd.mountInfo)
|
||||
|
||||
log.Debug("swarmfs Mkdir", "path", sd.path, "req.Name", req.Name)
|
||||
newDir := NewSwarmDir(filepath.Join(sd.path, req.Name), sd.mountInfo)
|
||||
sd.lock.Lock()
|
||||
defer sd.lock.Unlock()
|
||||
sd.directories = append(sd.directories, newDir)
|
||||
|
||||
return newDir, nil
|
||||
|
||||
}
|
||||
|
@@ -26,7 +26,7 @@ import (
|
||||
|
||||
"bazil.org/fuse"
|
||||
"bazil.org/fuse/fs"
|
||||
"github.com/ethereum/go-ethereum/log"
|
||||
"github.com/ethereum/go-ethereum/swarm/log"
|
||||
"github.com/ethereum/go-ethereum/swarm/storage"
|
||||
"golang.org/x/net/context"
|
||||
)
|
||||
@@ -50,7 +50,7 @@ type SwarmFile struct {
|
||||
inode uint64
|
||||
name string
|
||||
path string
|
||||
key storage.Key
|
||||
addr storage.Address
|
||||
fileSize int64
|
||||
reader storage.LazySectionReader
|
||||
|
||||
@@ -63,7 +63,7 @@ func NewSwarmFile(path, fname string, minfo *MountInfo) *SwarmFile {
|
||||
inode: NewInode(),
|
||||
name: fname,
|
||||
path: path,
|
||||
key: nil,
|
||||
addr: nil,
|
||||
fileSize: -1, // -1 means , file already exists in swarm and you need to just get the size from swarm
|
||||
reader: nil,
|
||||
|
||||
@@ -73,33 +73,38 @@ func NewSwarmFile(path, fname string, minfo *MountInfo) *SwarmFile {
|
||||
return newFile
|
||||
}
|
||||
|
||||
func (file *SwarmFile) Attr(ctx context.Context, a *fuse.Attr) error {
|
||||
|
||||
a.Inode = file.inode
|
||||
func (sf *SwarmFile) Attr(ctx context.Context, a *fuse.Attr) error {
|
||||
log.Debug("swarmfs Attr", "path", sf.path)
|
||||
sf.lock.Lock()
|
||||
defer sf.lock.Unlock()
|
||||
a.Inode = sf.inode
|
||||
//TODO: need to get permission as argument
|
||||
a.Mode = 0700
|
||||
a.Uid = uint32(os.Getuid())
|
||||
a.Gid = uint32(os.Getegid())
|
||||
|
||||
if file.fileSize == -1 {
|
||||
reader := file.mountInfo.swarmApi.Retrieve(file.key)
|
||||
if sf.fileSize == -1 {
|
||||
reader, _ := sf.mountInfo.swarmApi.Retrieve(sf.addr)
|
||||
quitC := make(chan bool)
|
||||
size, err := reader.Size(quitC)
|
||||
if err != nil {
|
||||
log.Warn("Couldnt get size of file %s : %v", file.path, err)
|
||||
log.Error("Couldnt get size of file %s : %v", sf.path, err)
|
||||
return err
|
||||
}
|
||||
file.fileSize = size
|
||||
sf.fileSize = size
|
||||
log.Trace("swarmfs Attr", "size", size)
|
||||
close(quitC)
|
||||
}
|
||||
a.Size = uint64(file.fileSize)
|
||||
a.Size = uint64(sf.fileSize)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (sf *SwarmFile) Read(ctx context.Context, req *fuse.ReadRequest, resp *fuse.ReadResponse) error {
|
||||
|
||||
log.Debug("swarmfs Read", "path", sf.path, "req.String", req.String())
|
||||
sf.lock.RLock()
|
||||
defer sf.lock.RUnlock()
|
||||
if sf.reader == nil {
|
||||
sf.reader = sf.mountInfo.swarmApi.Retrieve(sf.key)
|
||||
sf.reader, _ = sf.mountInfo.swarmApi.Retrieve(sf.addr)
|
||||
}
|
||||
buf := make([]byte, req.Size)
|
||||
n, err := sf.reader.ReadAt(buf, req.Offset)
|
||||
@@ -108,26 +113,23 @@ func (sf *SwarmFile) Read(ctx context.Context, req *fuse.ReadRequest, resp *fuse
|
||||
}
|
||||
resp.Data = buf[:n]
|
||||
sf.reader = nil
|
||||
return err
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
func (sf *SwarmFile) Write(ctx context.Context, req *fuse.WriteRequest, resp *fuse.WriteResponse) error {
|
||||
|
||||
log.Debug("swarmfs Write", "path", sf.path, "req.String", req.String())
|
||||
if sf.fileSize == 0 && req.Offset == 0 {
|
||||
|
||||
// A new file is created
|
||||
err := addFileToSwarm(sf, req.Data, len(req.Data))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
resp.Size = len(req.Data)
|
||||
|
||||
} else if req.Offset <= sf.fileSize {
|
||||
|
||||
totalSize := sf.fileSize + int64(len(req.Data))
|
||||
if totalSize > MaxAppendFileSize {
|
||||
log.Warn("Append file size reached (%v) : (%v)", sf.fileSize, len(req.Data))
|
||||
log.Warn("swarmfs Append file size reached (%v) : (%v)", sf.fileSize, len(req.Data))
|
||||
return errFileSizeMaxLimixReached
|
||||
}
|
||||
|
||||
@@ -137,9 +139,8 @@ func (sf *SwarmFile) Write(ctx context.Context, req *fuse.WriteRequest, resp *fu
|
||||
}
|
||||
resp.Size = len(req.Data)
|
||||
} else {
|
||||
log.Warn("Invalid write request size(%v) : off(%v)", sf.fileSize, req.Offset)
|
||||
log.Warn("swarmfs Invalid write request size(%v) : off(%v)", sf.fileSize, req.Offset)
|
||||
return errInvalidOffset
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
@@ -39,12 +39,12 @@ var (
|
||||
)
|
||||
|
||||
type SwarmFS struct {
|
||||
swarmApi *api.Api
|
||||
swarmApi *api.API
|
||||
activeMounts map[string]*MountInfo
|
||||
swarmFsLock *sync.RWMutex
|
||||
}
|
||||
|
||||
func NewSwarmFS(api *api.Api) *SwarmFS {
|
||||
func NewSwarmFS(api *api.API) *SwarmFS {
|
||||
swarmfsLock.Do(func() {
|
||||
swarmfs = &SwarmFS{
|
||||
swarmApi: api,
|
||||
|
File diff suppressed because it is too large
Load Diff
@@ -30,15 +30,16 @@ import (
|
||||
"bazil.org/fuse"
|
||||
"bazil.org/fuse/fs"
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/ethereum/go-ethereum/log"
|
||||
"github.com/ethereum/go-ethereum/swarm/api"
|
||||
"github.com/ethereum/go-ethereum/swarm/log"
|
||||
)
|
||||
|
||||
var (
|
||||
errEmptyMountPoint = errors.New("need non-empty mount point")
|
||||
errMaxMountCount = errors.New("max FUSE mount count reached")
|
||||
errMountTimeout = errors.New("mount timeout")
|
||||
errAlreadyMounted = errors.New("mount point is already serving")
|
||||
errEmptyMountPoint = errors.New("need non-empty mount point")
|
||||
errNoRelativeMountPoint = errors.New("invalid path for mount point (need absolute path)")
|
||||
errMaxMountCount = errors.New("max FUSE mount count reached")
|
||||
errMountTimeout = errors.New("mount timeout")
|
||||
errAlreadyMounted = errors.New("mount point is already serving")
|
||||
)
|
||||
|
||||
func isFUSEUnsupportedError(err error) bool {
|
||||
@@ -48,18 +49,20 @@ func isFUSEUnsupportedError(err error) bool {
|
||||
return err == fuse.ErrOSXFUSENotFound
|
||||
}
|
||||
|
||||
// information about every active mount
|
||||
// MountInfo contains information about every active mount
|
||||
type MountInfo struct {
|
||||
MountPoint string
|
||||
StartManifest string
|
||||
LatestManifest string
|
||||
rootDir *SwarmDir
|
||||
fuseConnection *fuse.Conn
|
||||
swarmApi *api.Api
|
||||
swarmApi *api.API
|
||||
lock *sync.RWMutex
|
||||
serveClose chan struct{}
|
||||
}
|
||||
|
||||
func NewMountInfo(mhash, mpoint string, sapi *api.Api) *MountInfo {
|
||||
func NewMountInfo(mhash, mpoint string, sapi *api.API) *MountInfo {
|
||||
log.Debug("swarmfs NewMountInfo", "hash", mhash, "mount point", mpoint)
|
||||
newMountInfo := &MountInfo{
|
||||
MountPoint: mpoint,
|
||||
StartManifest: mhash,
|
||||
@@ -68,50 +71,57 @@ func NewMountInfo(mhash, mpoint string, sapi *api.Api) *MountInfo {
|
||||
fuseConnection: nil,
|
||||
swarmApi: sapi,
|
||||
lock: &sync.RWMutex{},
|
||||
serveClose: make(chan struct{}),
|
||||
}
|
||||
return newMountInfo
|
||||
}
|
||||
|
||||
func (self *SwarmFS) Mount(mhash, mountpoint string) (*MountInfo, error) {
|
||||
|
||||
func (swarmfs *SwarmFS) Mount(mhash, mountpoint string) (*MountInfo, error) {
|
||||
log.Info("swarmfs", "mounting hash", mhash, "mount point", mountpoint)
|
||||
if mountpoint == "" {
|
||||
return nil, errEmptyMountPoint
|
||||
}
|
||||
if !strings.HasPrefix(mountpoint, "/") {
|
||||
return nil, errNoRelativeMountPoint
|
||||
}
|
||||
cleanedMountPoint, err := filepath.Abs(filepath.Clean(mountpoint))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
log.Trace("swarmfs mount", "cleanedMountPoint", cleanedMountPoint)
|
||||
|
||||
self.swarmFsLock.Lock()
|
||||
defer self.swarmFsLock.Unlock()
|
||||
swarmfs.swarmFsLock.Lock()
|
||||
defer swarmfs.swarmFsLock.Unlock()
|
||||
|
||||
noOfActiveMounts := len(self.activeMounts)
|
||||
noOfActiveMounts := len(swarmfs.activeMounts)
|
||||
log.Debug("swarmfs mount", "# active mounts", noOfActiveMounts)
|
||||
if noOfActiveMounts >= maxFuseMounts {
|
||||
return nil, errMaxMountCount
|
||||
}
|
||||
|
||||
if _, ok := self.activeMounts[cleanedMountPoint]; ok {
|
||||
if _, ok := swarmfs.activeMounts[cleanedMountPoint]; ok {
|
||||
return nil, errAlreadyMounted
|
||||
}
|
||||
|
||||
log.Info(fmt.Sprintf("Attempting to mount %s ", cleanedMountPoint))
|
||||
_, manifestEntryMap, err := self.swarmApi.BuildDirectoryTree(mhash, true)
|
||||
log.Trace("swarmfs mount: getting manifest tree")
|
||||
_, manifestEntryMap, err := swarmfs.swarmApi.BuildDirectoryTree(mhash, true)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
mi := NewMountInfo(mhash, cleanedMountPoint, self.swarmApi)
|
||||
log.Trace("swarmfs mount: building mount info")
|
||||
mi := NewMountInfo(mhash, cleanedMountPoint, swarmfs.swarmApi)
|
||||
|
||||
dirTree := map[string]*SwarmDir{}
|
||||
rootDir := NewSwarmDir("/", mi)
|
||||
dirTree["/"] = rootDir
|
||||
log.Trace("swarmfs mount", "rootDir", rootDir)
|
||||
mi.rootDir = rootDir
|
||||
|
||||
log.Trace("swarmfs mount: traversing manifest map")
|
||||
for suffix, entry := range manifestEntryMap {
|
||||
key := common.Hex2Bytes(entry.Hash)
|
||||
addr := common.Hex2Bytes(entry.Hash)
|
||||
fullpath := "/" + suffix
|
||||
basepath := filepath.Dir(fullpath)
|
||||
|
||||
parentDir := rootDir
|
||||
dirUntilNow := ""
|
||||
paths := strings.Split(basepath, "/")
|
||||
@@ -128,105 +138,143 @@ func (self *SwarmFS) Mount(mhash, mountpoint string) (*MountInfo, error) {
|
||||
} else {
|
||||
parentDir = dirTree[dirUntilNow]
|
||||
}
|
||||
|
||||
}
|
||||
}
|
||||
thisFile := NewSwarmFile(basepath, filepath.Base(fullpath), mi)
|
||||
thisFile.key = key
|
||||
thisFile.addr = addr
|
||||
|
||||
parentDir.files = append(parentDir.files, thisFile)
|
||||
}
|
||||
|
||||
fconn, err := fuse.Mount(cleanedMountPoint, fuse.FSName("swarmfs"), fuse.VolumeName(mhash))
|
||||
if isFUSEUnsupportedError(err) {
|
||||
log.Warn("Fuse not installed", "mountpoint", cleanedMountPoint, "err", err)
|
||||
log.Error("swarmfs error - FUSE not installed", "mountpoint", cleanedMountPoint, "err", err)
|
||||
return nil, err
|
||||
} else if err != nil {
|
||||
fuse.Unmount(cleanedMountPoint)
|
||||
log.Warn("Error mounting swarm manifest", "mountpoint", cleanedMountPoint, "err", err)
|
||||
log.Error("swarmfs error mounting swarm manifest", "mountpoint", cleanedMountPoint, "err", err)
|
||||
return nil, err
|
||||
}
|
||||
mi.fuseConnection = fconn
|
||||
|
||||
serverr := make(chan error, 1)
|
||||
go func() {
|
||||
log.Info(fmt.Sprintf("Serving %s at %s", mhash, cleanedMountPoint))
|
||||
log.Info("swarmfs", "serving hash", mhash, "at", cleanedMountPoint)
|
||||
filesys := &SwarmRoot{root: rootDir}
|
||||
//start serving the actual file system; see note below
|
||||
if err := fs.Serve(fconn, filesys); err != nil {
|
||||
log.Warn(fmt.Sprintf("Could not Serve SwarmFileSystem error: %v", err))
|
||||
log.Warn("swarmfs could not serve the requested hash", "error", err)
|
||||
serverr <- err
|
||||
}
|
||||
|
||||
mi.serveClose <- struct{}{}
|
||||
}()
|
||||
|
||||
/*
|
||||
IMPORTANT NOTE: the fs.Serve function is blocking;
|
||||
Serve builds up the actual fuse file system by calling the
|
||||
Attr functions on each SwarmFile, creating the file inodes;
|
||||
specifically calling the swarm's LazySectionReader.Size() to set the file size.
|
||||
|
||||
This can take some time, and it appears that if we access the fuse file system
|
||||
too early, we can bring the tests to deadlock. The assumption so far is that
|
||||
at this point, the fuse driver didn't finish to initialize the file system.
|
||||
|
||||
Accessing files too early not only deadlocks the tests, but locks the access
|
||||
of the fuse file completely, resulting in blocked resources at OS system level.
|
||||
Even a simple `ls /tmp/testDir/testMountDir` could deadlock in a shell.
|
||||
|
||||
Workaround so far is to wait some time to give the OS enough time to initialize
|
||||
the fuse file system. During tests, this seemed to address the issue.
|
||||
|
||||
HOWEVER IT SHOULD BE NOTED THAT THIS MAY ONLY BE AN EFFECT,
|
||||
AND THE DEADLOCK CAUSED BY SOMETHING ELSE BLOCKING ACCESS DUE TO SOME RACE CONDITION
|
||||
(caused in the bazil.org library and/or the SwarmRoot, SwarmDir and SwarmFile implementations)
|
||||
*/
|
||||
time.Sleep(2 * time.Second)
|
||||
|
||||
timer := time.NewTimer(mountTimeout)
|
||||
defer timer.Stop()
|
||||
// Check if the mount process has an error to report.
|
||||
select {
|
||||
case <-time.After(mountTimeout):
|
||||
fuse.Unmount(cleanedMountPoint)
|
||||
case <-timer.C:
|
||||
log.Warn("swarmfs timed out mounting over FUSE", "mountpoint", cleanedMountPoint, "err", err)
|
||||
err := fuse.Unmount(cleanedMountPoint)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return nil, errMountTimeout
|
||||
|
||||
case err := <-serverr:
|
||||
fuse.Unmount(cleanedMountPoint)
|
||||
log.Warn("Error serving swarm FUSE FS", "mountpoint", cleanedMountPoint, "err", err)
|
||||
log.Warn("swarmfs error serving over FUSE", "mountpoint", cleanedMountPoint, "err", err)
|
||||
err = fuse.Unmount(cleanedMountPoint)
|
||||
return nil, err
|
||||
|
||||
case <-fconn.Ready:
|
||||
log.Info("Now serving swarm FUSE FS", "manifest", mhash, "mountpoint", cleanedMountPoint)
|
||||
//this signals that the actual mount point from the fuse.Mount call is ready;
|
||||
//it does not signal though that the file system from fs.Serve is actually fully built up
|
||||
if err := fconn.MountError; err != nil {
|
||||
log.Error("Mounting error from fuse driver: ", "err", err)
|
||||
return nil, err
|
||||
}
|
||||
log.Info("swarmfs now served over FUSE", "manifest", mhash, "mountpoint", cleanedMountPoint)
|
||||
}
|
||||
|
||||
self.activeMounts[cleanedMountPoint] = mi
|
||||
timer.Stop()
|
||||
swarmfs.activeMounts[cleanedMountPoint] = mi
|
||||
return mi, nil
|
||||
}
|
||||
|
||||
func (self *SwarmFS) Unmount(mountpoint string) (*MountInfo, error) {
|
||||
|
||||
self.swarmFsLock.Lock()
|
||||
defer self.swarmFsLock.Unlock()
|
||||
func (swarmfs *SwarmFS) Unmount(mountpoint string) (*MountInfo, error) {
|
||||
swarmfs.swarmFsLock.Lock()
|
||||
defer swarmfs.swarmFsLock.Unlock()
|
||||
|
||||
cleanedMountPoint, err := filepath.Abs(filepath.Clean(mountpoint))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
mountInfo := self.activeMounts[cleanedMountPoint]
|
||||
mountInfo := swarmfs.activeMounts[cleanedMountPoint]
|
||||
|
||||
if mountInfo == nil || mountInfo.MountPoint != cleanedMountPoint {
|
||||
return nil, fmt.Errorf("%s is not mounted", cleanedMountPoint)
|
||||
return nil, fmt.Errorf("swarmfs %s is not mounted", cleanedMountPoint)
|
||||
}
|
||||
err = fuse.Unmount(cleanedMountPoint)
|
||||
if err != nil {
|
||||
err1 := externalUnmount(cleanedMountPoint)
|
||||
if err1 != nil {
|
||||
errStr := fmt.Sprintf("UnMount error: %v", err)
|
||||
errStr := fmt.Sprintf("swarmfs unmount error: %v", err)
|
||||
log.Warn(errStr)
|
||||
return nil, err1
|
||||
}
|
||||
}
|
||||
|
||||
mountInfo.fuseConnection.Close()
|
||||
delete(self.activeMounts, cleanedMountPoint)
|
||||
err = mountInfo.fuseConnection.Close()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
delete(swarmfs.activeMounts, cleanedMountPoint)
|
||||
|
||||
succString := fmt.Sprintf("UnMounting %v succeeded", cleanedMountPoint)
|
||||
<-mountInfo.serveClose
|
||||
|
||||
succString := fmt.Sprintf("swarmfs unmounting %v succeeded", cleanedMountPoint)
|
||||
log.Info(succString)
|
||||
|
||||
return mountInfo, nil
|
||||
}
|
||||
|
||||
func (self *SwarmFS) Listmounts() []*MountInfo {
|
||||
self.swarmFsLock.RLock()
|
||||
defer self.swarmFsLock.RUnlock()
|
||||
|
||||
rows := make([]*MountInfo, 0, len(self.activeMounts))
|
||||
for _, mi := range self.activeMounts {
|
||||
func (swarmfs *SwarmFS) Listmounts() []*MountInfo {
|
||||
swarmfs.swarmFsLock.RLock()
|
||||
defer swarmfs.swarmFsLock.RUnlock()
|
||||
rows := make([]*MountInfo, 0, len(swarmfs.activeMounts))
|
||||
for _, mi := range swarmfs.activeMounts {
|
||||
rows = append(rows, mi)
|
||||
}
|
||||
return rows
|
||||
}
|
||||
|
||||
func (self *SwarmFS) Stop() bool {
|
||||
for mp := range self.activeMounts {
|
||||
mountInfo := self.activeMounts[mp]
|
||||
self.Unmount(mountInfo.MountPoint)
|
||||
func (swarmfs *SwarmFS) Stop() bool {
|
||||
for mp := range swarmfs.activeMounts {
|
||||
mountInfo := swarmfs.activeMounts[mp]
|
||||
swarmfs.Unmount(mountInfo.MountPoint)
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
@@ -24,7 +24,7 @@ import (
|
||||
"os/exec"
|
||||
"runtime"
|
||||
|
||||
"github.com/ethereum/go-ethereum/log"
|
||||
"github.com/ethereum/go-ethereum/swarm/log"
|
||||
)
|
||||
|
||||
func externalUnmount(mountPoint string) error {
|
||||
@@ -38,11 +38,11 @@ func externalUnmount(mountPoint string) error {
|
||||
// Try FUSE-specific commands if umount didn't work.
|
||||
switch runtime.GOOS {
|
||||
case "darwin":
|
||||
return exec.CommandContext(ctx, "diskutil", "umount", "force", mountPoint).Run()
|
||||
return exec.CommandContext(ctx, "diskutil", "umount", mountPoint).Run()
|
||||
case "linux":
|
||||
return exec.CommandContext(ctx, "fusermount", "-u", mountPoint).Run()
|
||||
default:
|
||||
return fmt.Errorf("unmount: unimplemented")
|
||||
return fmt.Errorf("swarmfs unmount: unimplemented")
|
||||
}
|
||||
}
|
||||
|
||||
@@ -54,14 +54,14 @@ func addFileToSwarm(sf *SwarmFile, content []byte, size int) error {
|
||||
|
||||
sf.lock.Lock()
|
||||
defer sf.lock.Unlock()
|
||||
sf.key = fkey
|
||||
sf.addr = fkey
|
||||
sf.fileSize = int64(size)
|
||||
|
||||
sf.mountInfo.lock.Lock()
|
||||
defer sf.mountInfo.lock.Unlock()
|
||||
sf.mountInfo.LatestManifest = mhash
|
||||
|
||||
log.Info("Added new file:", "fname", sf.name, "New Manifest hash", mhash)
|
||||
log.Info("swarmfs added new file:", "fname", sf.name, "new Manifest hash", mhash)
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -75,7 +75,7 @@ func removeFileFromSwarm(sf *SwarmFile) error {
|
||||
defer sf.mountInfo.lock.Unlock()
|
||||
sf.mountInfo.LatestManifest = mkey
|
||||
|
||||
log.Info("Removed file:", "fname", sf.name, "New Manifest hash", mkey)
|
||||
log.Info("swarmfs removed file:", "fname", sf.name, "new Manifest hash", mkey)
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -102,20 +102,20 @@ func removeDirectoryFromSwarm(sd *SwarmDir) error {
|
||||
}
|
||||
|
||||
func appendToExistingFileInSwarm(sf *SwarmFile, content []byte, offset int64, length int64) error {
|
||||
fkey, mhash, err := sf.mountInfo.swarmApi.AppendFile(sf.mountInfo.LatestManifest, sf.path, sf.name, sf.fileSize, content, sf.key, offset, length, true)
|
||||
fkey, mhash, err := sf.mountInfo.swarmApi.AppendFile(sf.mountInfo.LatestManifest, sf.path, sf.name, sf.fileSize, content, sf.addr, offset, length, true)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
sf.lock.Lock()
|
||||
defer sf.lock.Unlock()
|
||||
sf.key = fkey
|
||||
sf.addr = fkey
|
||||
sf.fileSize = sf.fileSize + int64(len(content))
|
||||
|
||||
sf.mountInfo.lock.Lock()
|
||||
defer sf.mountInfo.lock.Unlock()
|
||||
sf.mountInfo.LatestManifest = mhash
|
||||
|
||||
log.Info("Appended file:", "fname", sf.name, "New Manifest hash", mhash)
|
||||
log.Info("swarmfs appended file:", "fname", sf.name, "new Manifest hash", mhash)
|
||||
return nil
|
||||
}
|
||||
|
Reference in New Issue
Block a user