diff options
| author | Konstantin Lebedev <9497591+kmlebedev@users.noreply.github.com> | 2022-04-05 13:29:46 +0500 |
|---|---|---|
| committer | Konstantin Lebedev <9497591+kmlebedev@users.noreply.github.com> | 2022-04-05 13:29:46 +0500 |
| commit | 17c6e8e39fd3dd2d3220032bd2ef0d537514257d (patch) | |
| tree | 462ede8ea61484114f2bde09b6eeb0e9e37b2ab6 /weed/shell | |
| parent | b7cdde14ae44508c59dbf5cf3835a0a68ba1aabb (diff) | |
| parent | 3176bf126ae21395b26d6f6531c05fb571fac54f (diff) | |
| download | seaweedfs-17c6e8e39fd3dd2d3220032bd2ef0d537514257d.tar.xz seaweedfs-17c6e8e39fd3dd2d3220032bd2ef0d537514257d.zip | |
Merge branch 'new_master' into hashicorp_raft
# Conflicts:
# go.mod
# go.sum
Diffstat (limited to 'weed/shell')
| -rw-r--r-- | weed/shell/command_cluster_check.go | 233 | ||||
| -rw-r--r-- | weed/shell/command_mount_configure.go | 64 |
2 files changed, 297 insertions, 0 deletions
diff --git a/weed/shell/command_cluster_check.go b/weed/shell/command_cluster_check.go new file mode 100644 index 000000000..f841bd224 --- /dev/null +++ b/weed/shell/command_cluster_check.go @@ -0,0 +1,233 @@ +package shell + +import ( + "context" + "flag" + "fmt" + "github.com/chrislusf/seaweedfs/weed/cluster" + "github.com/chrislusf/seaweedfs/weed/pb" + "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" + "github.com/chrislusf/seaweedfs/weed/pb/volume_server_pb" + "io" + + "github.com/chrislusf/seaweedfs/weed/pb/master_pb" +) + +func init() { + Commands = append(Commands, &commandClusterCheck{}) +} + +type commandClusterCheck struct { +} + +func (c *commandClusterCheck) Name() string { + return "cluster.check" +} + +func (c *commandClusterCheck) Help() string { + return `check current cluster network connectivity + + cluster.check + +` +} + +func (c *commandClusterCheck) Do(args []string, commandEnv *CommandEnv, writer io.Writer) (err error) { + + clusterPsCommand := flag.NewFlagSet(c.Name(), flag.ContinueOnError) + if err = clusterPsCommand.Parse(args); err != nil { + return nil + } + + // collect topology information + topologyInfo, volumeSizeLimitMb, err := collectTopologyInfo(commandEnv, 0) + if err != nil { + return err + } + fmt.Fprintf(writer, "Topology volumeSizeLimit:%d MB%s\n", volumeSizeLimitMb, diskInfosToString(topologyInfo.DiskInfos)) + + emptyDiskTypeDiskInfo, emptyDiskTypeFound := topologyInfo.DiskInfos[""] + hddDiskTypeDiskInfo, hddDiskTypeFound := topologyInfo.DiskInfos["hdd"] + if !emptyDiskTypeFound && !hddDiskTypeFound || emptyDiskTypeDiskInfo.VolumeCount == 0 && hddDiskTypeDiskInfo.VolumeCount == 0 { + return fmt.Errorf("Need to a hdd disk type!") + } + + // collect filers + var filers []pb.ServerAddress + err = commandEnv.MasterClient.WithClient(false, func(client master_pb.SeaweedClient) error { + resp, err := client.ListClusterNodes(context.Background(), &master_pb.ListClusterNodesRequest{ + ClientType: cluster.FilerType, + }) + + for _, node := range resp.ClusterNodes { + filers = append(filers, pb.ServerAddress(node.Address)) + } + return err + }) + if err != nil { + return + } + fmt.Fprintf(writer, "the cluster has %d filers: %+v\n", len(filers), filers) + + // collect volume servers + var volumeServers []pb.ServerAddress + t, _, err := collectTopologyInfo(commandEnv, 0) + if err != nil { + return err + } + for _, dc := range t.DataCenterInfos { + for _, r := range dc.RackInfos { + for _, dn := range r.DataNodeInfos { + volumeServers = append(volumeServers, pb.NewServerAddressFromDataNode(dn)) + } + } + } + fmt.Fprintf(writer, "the cluster has %d volume servers: %+v\n", len(volumeServers), volumeServers) + + // collect all masters + var masters []pb.ServerAddress + for _, master := range commandEnv.MasterClient.GetMasters() { + masters = append(masters, master) + } + + // check from master to volume servers + for _, master := range masters { + for _, volumeServer := range volumeServers { + fmt.Fprintf(writer, "checking master %s to volume server %s ... ", string(master), string(volumeServer)) + err := pb.WithMasterClient(false, master, commandEnv.option.GrpcDialOption, func(client master_pb.SeaweedClient) error { + _, err := client.Ping(context.Background(), &master_pb.PingRequest{ + Target: string(volumeServer), + TargetType: cluster.VolumeServerType, + }) + return err + }) + if err == nil { + fmt.Fprintf(writer, "ok\n") + } else { + fmt.Fprintf(writer, "%v\n", err) + } + } + } + + // check between masters + for _, sourceMaster := range masters { + for _, targetMaster := range masters { + if sourceMaster == targetMaster { + continue + } + fmt.Fprintf(writer, "checking master %s to %s ... ", string(sourceMaster), string(targetMaster)) + err := pb.WithMasterClient(false, sourceMaster, commandEnv.option.GrpcDialOption, func(client master_pb.SeaweedClient) error { + _, err := client.Ping(context.Background(), &master_pb.PingRequest{ + Target: string(targetMaster), + TargetType: cluster.MasterType, + }) + return err + }) + if err == nil { + fmt.Fprintf(writer, "ok\n") + } else { + fmt.Fprintf(writer, "%v\n", err) + } + } + } + + // check from volume servers to masters + for _, volumeServer := range volumeServers { + for _, master := range masters { + fmt.Fprintf(writer, "checking volume server %s to master %s ... ", string(volumeServer), string(master)) + err := pb.WithVolumeServerClient(false, volumeServer, commandEnv.option.GrpcDialOption, func(client volume_server_pb.VolumeServerClient) error { + _, err := client.Ping(context.Background(), &volume_server_pb.PingRequest{ + Target: string(master), + TargetType: cluster.MasterType, + }) + return err + }) + if err == nil { + fmt.Fprintf(writer, "ok\n") + } else { + fmt.Fprintf(writer, "%v\n", err) + } + } + } + + // check from filers to masters + for _, filer := range filers { + for _, master := range masters { + fmt.Fprintf(writer, "checking filer %s to master %s ... ", string(filer), string(master)) + err := pb.WithFilerClient(false, filer, commandEnv.option.GrpcDialOption, func(client filer_pb.SeaweedFilerClient) error { + _, err := client.Ping(context.Background(), &filer_pb.PingRequest{ + Target: string(master), + TargetType: cluster.MasterType, + }) + return err + }) + if err == nil { + fmt.Fprintf(writer, "ok\n") + } else { + fmt.Fprintf(writer, "%v\n", err) + } + } + } + + // check from filers to volume servers + for _, filer := range filers { + for _, volumeServer := range volumeServers { + fmt.Fprintf(writer, "checking filer %s to volume server %s ... ", string(filer), string(volumeServer)) + err := pb.WithFilerClient(false, filer, commandEnv.option.GrpcDialOption, func(client filer_pb.SeaweedFilerClient) error { + _, err := client.Ping(context.Background(), &filer_pb.PingRequest{ + Target: string(volumeServer), + TargetType: cluster.VolumeServerType, + }) + return err + }) + if err == nil { + fmt.Fprintf(writer, "ok\n") + } else { + fmt.Fprintf(writer, "%v\n", err) + } + } + } + + // check between volume servers + for _, sourceVolumeServer := range volumeServers { + for _, targetVolumeServer := range volumeServers { + if sourceVolumeServer == targetVolumeServer { + continue + } + fmt.Fprintf(writer, "checking volume server %s to %s ... ", string(sourceVolumeServer), string(targetVolumeServer)) + err := pb.WithVolumeServerClient(false, sourceVolumeServer, commandEnv.option.GrpcDialOption, func(client volume_server_pb.VolumeServerClient) error { + _, err := client.Ping(context.Background(), &volume_server_pb.PingRequest{ + Target: string(targetVolumeServer), + TargetType: cluster.VolumeServerType, + }) + return err + }) + if err == nil { + fmt.Fprintf(writer, "ok\n") + } else { + fmt.Fprintf(writer, "%v\n", err) + } + } + } + + // check between filers, and need to connect to itself + for _, sourceFiler := range filers { + for _, targetFiler := range filers { + fmt.Fprintf(writer, "checking filer %s to %s ... ", string(sourceFiler), string(targetFiler)) + err := pb.WithFilerClient(false, sourceFiler, commandEnv.option.GrpcDialOption, func(client filer_pb.SeaweedFilerClient) error { + _, err := client.Ping(context.Background(), &filer_pb.PingRequest{ + Target: string(targetFiler), + TargetType: cluster.FilerType, + }) + return err + }) + if err == nil { + fmt.Fprintf(writer, "ok\n") + } else { + fmt.Fprintf(writer, "%v\n", err) + } + } + } + + return nil +} diff --git a/weed/shell/command_mount_configure.go b/weed/shell/command_mount_configure.go new file mode 100644 index 000000000..8c268d35c --- /dev/null +++ b/weed/shell/command_mount_configure.go @@ -0,0 +1,64 @@ +package shell + +import ( + "context" + "flag" + "fmt" + "github.com/chrislusf/seaweedfs/weed/pb/mount_pb" + "github.com/chrislusf/seaweedfs/weed/util" + "google.golang.org/grpc" + "google.golang.org/grpc/credentials/insecure" + _ "google.golang.org/grpc/resolver/passthrough" + "io" +) + +func init() { + Commands = append(Commands, &commandMountConfigure{}) +} + +type commandMountConfigure struct { +} + +func (c *commandMountConfigure) Name() string { + return "mount.configure" +} + +func (c *commandMountConfigure) Help() string { + return `configure the mount on current server + + mount.configure -dir=<mount_directory> + + This command connects with local mount via unix socket, so it can only run locally. + The "mount_directory" value needs to be exactly the same as how mount was started in "weed mount -dir=<mount_directory>" + +` +} + +func (c *commandMountConfigure) Do(args []string, commandEnv *CommandEnv, writer io.Writer) (err error) { + + mountConfigureCommand := flag.NewFlagSet(c.Name(), flag.ContinueOnError) + mountDir := mountConfigureCommand.String("dir", "", "the mount directory same as how \"weed mount -dir=<mount_directory>\" was started") + mountQuota := mountConfigureCommand.Int("quotaMB", 0, "the quota in MB") + if err = mountConfigureCommand.Parse(args); err != nil { + return nil + } + + mountDirHash := util.HashToInt32([]byte(*mountDir)) + if mountDirHash < 0 { + mountDirHash = -mountDirHash + } + localSocket := fmt.Sprintf("/tmp/seaweefs-mount-%d.sock", mountDirHash) + + clientConn, err := grpc.Dial("passthrough:///unix://"+localSocket, grpc.WithTransportCredentials(insecure.NewCredentials())) + if err != nil { + return + } + defer clientConn.Close() + + client := mount_pb.NewSeaweedMountClient(clientConn) + _, err = client.Configure(context.Background(), &mount_pb.ConfigureRequest{ + CollectionCapacity: int64(*mountQuota) * 1024 * 1024, + }) + + return +} |
