build: enable unconvert linter (#15456)

* build: enable unconvert linter

 - fixes #15453
 - update code base for failing cases

* cmd/puppeth: replace syscall.Stdin with os.Stdin.Fd() for unconvert linter
This commit is contained in:
ferhat elmas
2017-11-10 18:06:45 +01:00
committed by Péter Szilágyi
parent 3ee86a57f3
commit 86f6568f66
31 changed files with 57 additions and 54 deletions

View File

@@ -238,7 +238,7 @@ func (self *manifestTrie) addEntry(entry *manifestTrieEntry, quitC chan bool) {
return
}
b := byte(entry.Path[0])
b := entry.Path[0]
oldentry := self.entries[b]
if (oldentry == nil) || (oldentry.Path == entry.Path && oldentry.ContentType != ManifestType) {
self.entries[b] = entry
@@ -294,7 +294,7 @@ func (self *manifestTrie) deleteEntry(path string, quitC chan bool) {
return
}
b := byte(path[0])
b := path[0]
entry := self.entries[b]
if entry == nil {
return
@@ -425,7 +425,7 @@ func (self *manifestTrie) findPrefixOf(path string, quitC chan bool) (entry *man
}
//see if first char is in manifest entries
b := byte(path[0])
b := path[0]
entry = self.entries[b]
if entry == nil {
return self.entries[256], 0

View File

@@ -19,15 +19,16 @@
package fuse
import (
"bazil.org/fuse"
"bazil.org/fuse/fs"
"errors"
"github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/swarm/storage"
"golang.org/x/net/context"
"io"
"os"
"sync"
"bazil.org/fuse"
"bazil.org/fuse/fs"
"github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/swarm/storage"
"golang.org/x/net/context"
)
const (
@@ -87,7 +88,7 @@ func (file *SwarmFile) Attr(ctx context.Context, a *fuse.Attr) error {
if err != nil {
log.Warn("Couldnt get size of file %s : %v", file.path, err)
}
file.fileSize = int64(size)
file.fileSize = size
}
a.Size = uint64(file.fileSize)
return nil

View File

@@ -55,7 +55,7 @@ func (self *Depo) HandleUnsyncedKeysMsg(req *unsyncedKeysMsgData, p *peer) error
var err error
for _, req := range unsynced {
// skip keys that are found,
chunk, err = self.localStore.Get(storage.Key(req.Key[:]))
chunk, err = self.localStore.Get(req.Key[:])
if err != nil || chunk.SData == nil {
missing = append(missing, req)
}

View File

@@ -67,7 +67,7 @@ func proximity(one, other Address) (ret int) {
for i := 0; i < len(one); i++ {
oxo := one[i] ^ other[i]
for j := 0; j < 8; j++ {
if (uint8(oxo)>>uint8(7-j))&0x01 != 0 {
if (oxo>>uint8(7-j))&0x01 != 0 {
return i*8 + j
}
}

View File

@@ -211,12 +211,12 @@ func (self *KadDb) findBest(maxBinSize int, binSize func(int) int) (node *NodeRe
}
// if node is scheduled to connect
if time.Time(node.After).After(time.Now()) {
if node.After.After(time.Now()) {
log.Debug(fmt.Sprintf("kaddb record %v (PO%03d:%d) skipped. seen at %v (%v ago), scheduled at %v", node.Addr, po, cursor, node.Seen, delta, node.After))
continue ROW
}
delta = time.Since(time.Time(node.Seen))
delta = time.Since(node.Seen)
if delta < self.initialRetryInterval {
delta = self.initialRetryInterval
}
@@ -230,7 +230,7 @@ func (self *KadDb) findBest(maxBinSize int, binSize func(int) int) (node *NodeRe
log.Debug(fmt.Sprintf("kaddb record %v (PO%03d:%d) ready to be tried. seen at %v (%v ago), scheduled at %v", node.Addr, po, cursor, node.Seen, delta, node.After))
// scheduling next check
interval = time.Duration(delta * time.Duration(self.connRetryExp))
interval = delta * time.Duration(self.connRetryExp)
after = time.Now().Add(interval)
log.Debug(fmt.Sprintf("kaddb record %v (PO%03d:%d) selected as candidate connection %v. seen at %v (%v ago), selectable since %v, retry after %v (in %v)", node.Addr, po, cursor, rounds, node.Seen, delta, node.After, after, interval))

View File

@@ -309,7 +309,7 @@ func (self *bzz) handleStatus() (err error) {
Version: uint64(Version),
ID: "honey",
Addr: self.selfAddr(),
NetworkId: uint64(self.NetworkId),
NetworkId: self.NetworkId,
Swap: &bzzswap.SwapProfile{
Profile: self.swapParams.Profile,
PayProfile: self.swapParams.PayProfile,

View File

@@ -378,7 +378,7 @@ func (self *syncer) syncHistory(state *syncState) chan interface{} {
}
select {
// blocking until history channel is read from
case history <- storage.Key(key):
case history <- key:
n++
log.Trace(fmt.Sprintf("syncer[%v]: history: %v (%v keys)", self.key.Log(), key.Log(), n))
state.Latest = key

View File

@@ -205,9 +205,9 @@ func (self *TreeChunker) split(depth int, treeSize int64, key Key, data io.Reade
}
// dept > 0
// intermediate chunk containing child nodes hashes
branchCnt := int64((size + treeSize - 1) / treeSize)
branchCnt := (size + treeSize - 1) / treeSize
var chunk []byte = make([]byte, branchCnt*self.hashSize+8)
var chunk = make([]byte, branchCnt*self.hashSize+8)
var pos, i int64
binary.LittleEndian.PutUint64(chunk[0:8], uint64(size))

View File

@@ -78,7 +78,7 @@ type memTree struct {
func newMemTree(b uint, parent *memTree, pidx uint) (node *memTree) {
node = new(memTree)
node.bits = b
node.width = 1 << uint(b)
node.width = 1 << b
node.subtree = make([]*memTree, node.width)
node.access = make([]uint64, node.width-1)
node.parent = parent

View File

@@ -327,7 +327,7 @@ func (self *PyramidChunker) loadTree(chunkLevel [][]*TreeEntry, key Key, chunkC
// Add the root chunk entry
branchCount := int64(len(chunk.SData)-8) / self.hashSize
newEntry := &TreeEntry{
level: int(depth - 1),
level: depth - 1,
branchCount: branchCount,
subtreeSize: uint64(chunk.Size),
chunk: chunk.SData,
@@ -352,7 +352,7 @@ func (self *PyramidChunker) loadTree(chunkLevel [][]*TreeEntry, key Key, chunkC
}
bewBranchCount := int64(len(newChunk.SData)-8) / self.hashSize
newEntry := &TreeEntry{
level: int(lvl - 1),
level: lvl - 1,
branchCount: bewBranchCount,
subtreeSize: uint64(newChunk.Size),
chunk: newChunk.SData,