Fix linter warnings

This commit is contained in:
cuigh 2018-05-26 14:57:32 +08:00
parent 14b166ea01
commit 8b10ce5964
6 changed files with 18 additions and 15 deletions

View File

@ -299,12 +299,12 @@ func createConfigs(ctx context.Context, cli *client.Client, configs []swarm.Conf
case err == nil:
// config already exists, then we update that
if err = cli.ConfigUpdate(ctx, config.ID, config.Meta.Version, configSpec); err != nil {
errors.Wrap(err, "failed to update config "+configSpec.Name)
return errors.Wrap(err, "failed to update config "+configSpec.Name)
}
case client.IsErrNotFound(err):
// config does not exist, then we create a new one.
if _, err = cli.ConfigCreate(ctx, configSpec); err != nil {
errors.Wrap(err, "failed to create config "+configSpec.Name)
return errors.Wrap(err, "failed to create config "+configSpec.Name)
}
default:
return err

View File

@ -26,7 +26,6 @@ func (b *eventBiz) Create(event *model.Event) {
log.Get("event").Errorf("Create event `%+v` failed: %v", event, err)
}
})
return
}
func (b *eventBiz) CreateRegistry(action model.EventAction, id, name string, user web.User) {

View File

@ -124,5 +124,4 @@ func (b *stackBiz) Migrate() {
do(func(d dao.Interface) {
d.StackMigrate()
})
return
}

View File

@ -1,7 +1,6 @@
package bolt
import (
"encoding/binary"
"encoding/json"
"path/filepath"
"strings"
@ -145,8 +144,8 @@ func matchAny(s string, list ...string) bool {
}
// itob returns an 8-byte big endian representation of v.
func itob(i uint64) []byte {
b := make([]byte, 8)
binary.BigEndian.PutUint64(b, i)
return b
}
//func itob(i uint64) []byte {
// b := make([]byte, 8)
// binary.BigEndian.PutUint64(b, i)
// return b
//}

View File

@ -39,7 +39,6 @@ func (d *Dao) ChartBatch(names ...string) (charts []*model.Chart, err error) {
q := bson.M{"_id": bson.M{"$in": names}}
charts = make([]*model.Chart, 0)
err = db.C("chart").Find(q).All(&charts)
return
})
return
}

View File

@ -101,16 +101,23 @@ func tryScale(service *swarm.Service, opts data.Options) {
return
}
logger := log.Get("scaler")
replicas := *service.Spec.Mode.Replicated.Replicas
if result.Type == scaleUp {
if replicas < max {
docker.ServiceScale(service.Spec.Name, service.Version.Index, replicas+step)
log.Get("scaler").Infof("scaler > Service '%s' scaled up for: %v", service.Spec.Name, result.Reasons)
if err := docker.ServiceScale(service.Spec.Name, service.Version.Index, replicas+step); err != nil {
logger.Errorf("scaler > Failed to scale service '%s': %v", service.Spec.Name, err)
} else {
logger.Infof("scaler > Service '%s' scaled up for: %v", service.Spec.Name, result.Reasons)
}
}
} else if result.Type == scaleDown {
if replicas > min {
docker.ServiceScale(service.Spec.Name, service.Version.Index, replicas-step)
log.Get("scaler").Infof("scaler > Service '%s' scaled down for: %v", service.Spec.Name, result.Reasons)
if err := docker.ServiceScale(service.Spec.Name, service.Version.Index, replicas-step); err != nil {
logger.Errorf("scaler > Failed to scale service '%s': %v", service.Spec.Name, err)
} else {
logger.Infof("scaler > Service '%s' scaled down for: %v", service.Spec.Name, result.Reasons)
}
}
}
}