|
@@ -1,9 +1,14 @@
|
|
|
package main
|
|
|
|
|
|
import (
|
|
|
+ "bytes"
|
|
|
+ "encoding/json"
|
|
|
"fmt"
|
|
|
+ "io"
|
|
|
"io/ioutil"
|
|
|
"math"
|
|
|
+ "mime/multipart"
|
|
|
+ "net/http"
|
|
|
"os"
|
|
|
"path"
|
|
|
"path/filepath"
|
|
@@ -123,17 +128,15 @@ func padZeros(thisInt string, maxval int) string {
|
|
|
|
|
|
}
|
|
|
|
|
|
+type clusterConfig struct {
|
|
|
+ Prefix string `json: "prefix"`
|
|
|
+ Port string `json: "port`
|
|
|
+}
|
|
|
+
|
|
|
//End of utilities functions
|
|
|
|
|
|
func init() {
|
|
|
//Check if the required directory exists. If not, create it.
|
|
|
- if !file_exists("setting.config") {
|
|
|
- setting, err := os.Create("setting.config")
|
|
|
- check(err)
|
|
|
- defer setting.Close()
|
|
|
- setting.WriteString("")
|
|
|
- setting.Sync()
|
|
|
- }
|
|
|
if !file_exists("chunks/") {
|
|
|
mkdir("chunks/")
|
|
|
}
|
|
@@ -143,44 +146,22 @@ func init() {
|
|
|
if !file_exists("index/") {
|
|
|
mkdir("index/")
|
|
|
}
|
|
|
+ if !file_exists("tmp/") {
|
|
|
+ mkdir("tmp/")
|
|
|
+ }
|
|
|
+ if !file_exists("remoteDisks.config") {
|
|
|
+ file_put_contents("remoteDisks.config", "")
|
|
|
+ }
|
|
|
}
|
|
|
|
|
|
func main() {
|
|
|
//arozdfs implementation in Golang
|
|
|
- /*
|
|
|
- Supported commands:
|
|
|
- help --> show all the help information
|
|
|
-
|
|
|
- [Uploading to arozdfs commands]
|
|
|
- slice
|
|
|
- -infile <filename> --> declare the input file
|
|
|
- -storepath <pathname> --> Relative path from the arozdfs root
|
|
|
- -slice <filesize> --> declare the slicing filesize
|
|
|
-
|
|
|
- upload
|
|
|
- -push <clusterlist.config> --> push to a list of clusters and sync file index to other clusters
|
|
|
-
|
|
|
- [Download from arozdfs commands]
|
|
|
- download
|
|
|
- -outfile <file.index> --> rebuild a file from cluster storage to local drive
|
|
|
-
|
|
|
- open
|
|
|
- -storepath <local path> --> the file chunks tmp folder
|
|
|
- -uuid <file uuid> --> the uuid which the file is stored
|
|
|
- -outfile <filename> --> filepath for the exported and merged file
|
|
|
+ //Refer to the help section for the usable commands and parameters
|
|
|
|
|
|
- [File Operations]
|
|
|
- remove <file.index> --> remove all chunks related to thie file index
|
|
|
- rename <file.index> <newfile.index> --> rename all records related to this file
|
|
|
- move <filepath/file.index> <newpath/file.index> --> move the file to a new path in index directory
|
|
|
-
|
|
|
-
|
|
|
- [System checking commands]
|
|
|
- checkfile <file.index> --> check if a file contains all chunks which has at least two copies of each chunks
|
|
|
- rebuild --> Check all files on the system and fix all chunks which has corrupted
|
|
|
- migrate <host-uuid>
|
|
|
-
|
|
|
- */
|
|
|
+ if len(os.Args) == 1 {
|
|
|
+ fmt.Println("ERROR. Undefined function group or operations. Type 'arozdfs help' for usage instructions. ")
|
|
|
+ return
|
|
|
+ }
|
|
|
|
|
|
//For each argument, start the processing
|
|
|
switch functgroup := os.Args[1]; functgroup {
|
|
@@ -221,10 +202,10 @@ func startDownloadProc() {
|
|
|
}
|
|
|
|
|
|
func startSlicingProc() {
|
|
|
- storepath := ""
|
|
|
infile := ""
|
|
|
slice := 64 //Default 64MB per file chunk
|
|
|
fileUUID := genUUIDv4()
|
|
|
+ storepath := fileUUID + "/"
|
|
|
for i, arg := range os.Args {
|
|
|
if strpos(arg, "-") == 0 {
|
|
|
//This is a parameter defining keyword
|
|
@@ -232,6 +213,10 @@ func startSlicingProc() {
|
|
|
infile = os.Args[i+1]
|
|
|
} else if arg == "-storepath" {
|
|
|
storepath = os.Args[i+1]
|
|
|
+ //Check if the storepath is end with /. if not, append it into the pathname
|
|
|
+ if storepath[len(storepath)-1:] != "/" {
|
|
|
+ storepath = storepath + "/"
|
|
|
+ }
|
|
|
} else if arg == "-slice" {
|
|
|
sliceSize, err := strconv.Atoi(os.Args[i+1])
|
|
|
check(err)
|
|
@@ -239,9 +224,14 @@ func startSlicingProc() {
|
|
|
}
|
|
|
}
|
|
|
}
|
|
|
+ if slice <= 0 {
|
|
|
+ fmt.Println("ERROR. slice size cannot be smaller or equal to 0")
|
|
|
+ os.Exit(0)
|
|
|
+ }
|
|
|
if storepath != "" && infile != "" {
|
|
|
fmt.Println(storepath + " " + infile + " " + strconv.Itoa(slice) + " " + fileUUID)
|
|
|
- splitFileChunks(infile, "chunks/"+storepath+"/", fileUUID, slice)
|
|
|
+ splitFileChunks(infile, "chunks/"+storepath, fileUUID, slice)
|
|
|
+ fmt.Println(fileUUID)
|
|
|
} else {
|
|
|
fmt.Println("ERROR. Undefined storepath or infile.")
|
|
|
}
|
|
@@ -264,7 +254,7 @@ func splitFileChunks(rawfile string, outputdir string, outfilename string, chunk
|
|
|
fileInfo, _ := file.Stat()
|
|
|
|
|
|
var fileSize int64 = fileInfo.Size()
|
|
|
- var fileChunk = float64(chunksize * 1000 * 1000) // chunksize in MB
|
|
|
+ var fileChunk = float64(chunksize * 1024 * 1024) // chunksize in MB
|
|
|
|
|
|
// calculate total number of parts the file will be chunked into
|
|
|
|
|
@@ -288,9 +278,10 @@ func splitFileChunks(rawfile string, outputdir string, outfilename string, chunk
|
|
|
}
|
|
|
|
|
|
func openChunkedFile() {
|
|
|
- storepath := ""
|
|
|
+ storepath := "tmp/"
|
|
|
uuid := ""
|
|
|
outfile := ""
|
|
|
+ removeAfterMerge := 0
|
|
|
for i, arg := range os.Args {
|
|
|
if strpos(arg, "-") == 0 {
|
|
|
//This is a parameter defining keyword
|
|
@@ -298,14 +289,33 @@ func openChunkedFile() {
|
|
|
uuid = os.Args[i+1]
|
|
|
} else if arg == "-storepath" {
|
|
|
storepath = os.Args[i+1]
|
|
|
+ //Check if the storepath is end with /. if not, append it into the pathname
|
|
|
+ if storepath[len(storepath)-1:] != "/" {
|
|
|
+ storepath = storepath + "/"
|
|
|
+ }
|
|
|
} else if arg == "-outfile" {
|
|
|
outfile = os.Args[i+1]
|
|
|
+ } else if arg == "-c" {
|
|
|
+ //Remove the file chunks after the merging process
|
|
|
+ removeAfterMerge = 1
|
|
|
}
|
|
|
}
|
|
|
}
|
|
|
if storepath != "" && uuid != "" && outfile != "" {
|
|
|
fmt.Println(storepath + " " + uuid + " " + outfile)
|
|
|
- joinFileChunks(storepath+"/"+uuid, outfile)
|
|
|
+ if joinFileChunks(storepath+uuid, outfile) {
|
|
|
+ //Do checksum here
|
|
|
+
|
|
|
+ //Remove all files if -c is used
|
|
|
+ if removeAfterMerge == 1 {
|
|
|
+ matches, _ := filepath.Glob(storepath + uuid + "_*")
|
|
|
+ for j := 0; j < len(matches); j++ {
|
|
|
+ os.Remove(matches[j])
|
|
|
+ }
|
|
|
+ }
|
|
|
+ } else {
|
|
|
+ fmt.Println("ERROR. Unable to merge file chunks.")
|
|
|
+ }
|
|
|
} else {
|
|
|
fmt.Println("ERROR. Undefined storepath, outfile or uuid.")
|
|
|
}
|
|
@@ -330,14 +340,211 @@ func joinFileChunks(fileuuid string, outfilename string) bool {
|
|
|
}
|
|
|
|
|
|
func startUploadProc() {
|
|
|
+ push := "remoteDisks.config"
|
|
|
+ storepath := "tmp/"
|
|
|
+ uuid := ""
|
|
|
+ vdir := ""
|
|
|
for i, arg := range os.Args {
|
|
|
- // print index and value
|
|
|
- fmt.Println("item", i, "is", arg)
|
|
|
+ if strpos(arg, "-") == 0 {
|
|
|
+ //This is a parameter defining keyword
|
|
|
+ if arg == "-uuid" {
|
|
|
+ uuid = os.Args[i+1]
|
|
|
+ } else if arg == "-storepath" {
|
|
|
+ storepath = os.Args[i+1]
|
|
|
+ //Check if the storepath is end with /. if not, append it into the pathname
|
|
|
+ if storepath[len(storepath)-1:] != "/" {
|
|
|
+ storepath = storepath + "/"
|
|
|
+ }
|
|
|
+ } else if arg == "-vdir" {
|
|
|
+ vdir = os.Args[i+1]
|
|
|
+ } else if arg == "-push" {
|
|
|
+ //Remove the file chunks after the merging process
|
|
|
+ push = os.Args[i+1]
|
|
|
+ }
|
|
|
+ }
|
|
|
+ }
|
|
|
+ //Check if the input data are valid
|
|
|
+ if uuid == "" || vdir == "" {
|
|
|
+ fmt.Println("ERROR. Undefined uuid or vdir.")
|
|
|
+ os.Exit(0)
|
|
|
+ }
|
|
|
+ if !file_exists("clusterSetting.config") {
|
|
|
+ fmt.Println("ERROR. clusterSetting configuration not found")
|
|
|
+ os.Exit(0)
|
|
|
+ }
|
|
|
+ if file_exists("index/" + vdir + string(".index")) {
|
|
|
+ fmt.Println("ERROR. Given file already exists in vdir. Please use remove before uploading a new file on the same vdir location.")
|
|
|
+ os.Exit(0)
|
|
|
+ }
|
|
|
+
|
|
|
+ //Starting the uuid to ip conversion process
|
|
|
+
|
|
|
+ var ipList []string
|
|
|
+ //Read cluster setting from clusterSetting.config
|
|
|
+ jsonFile, _ := os.Open("clusterSetting.config")
|
|
|
+ byteValue, _ := ioutil.ReadAll(jsonFile)
|
|
|
+ var config clusterConfig
|
|
|
+ var uuiddata []string
|
|
|
+ json.Unmarshal(byteValue, &config)
|
|
|
+ //Read cluster uuid list from remoteDisks.config
|
|
|
+ if file_exists(push) {
|
|
|
+ clusteruuids := file_get_contents(push)
|
|
|
+ if trim(clusteruuids) == "" {
|
|
|
+ fmt.Println("ERROR. remoteDisks not found or it is empty! ")
|
|
|
+ os.Exit(0)
|
|
|
+ }
|
|
|
+ clusteruuids = trim(strings.Trim(clusteruuids, "\n"))
|
|
|
+ uuiddata = explode("\n", clusteruuids)
|
|
|
+ //Generate iplist and ready for posting file chunks
|
|
|
+ for i := 0; i < len(uuiddata); i++ {
|
|
|
+ thisip := resolveUUID(uuiddata[i])
|
|
|
+ clusterConfig := ":" + string(config.Port) + "/" + string(config.Prefix) + "/"
|
|
|
+ fullip := "http://" + thisip + clusterConfig
|
|
|
+ ipList = append(ipList, fullip)
|
|
|
+ }
|
|
|
+ } else {
|
|
|
+ fmt.Println("ERROR. remoteDisks not found or it is empty! ")
|
|
|
+ os.Exit(0)
|
|
|
+ }
|
|
|
+ fmt.Println(ipList)
|
|
|
+ //Ready to push. Create index file.
|
|
|
+ file_put_contents("index/"+vdir+string(".index"), "")
|
|
|
+ fileList, _ := filepath.Glob(storepath + uuid + "_*")
|
|
|
+ var pushResultTarget []string
|
|
|
+ var pushResultFilename []string
|
|
|
+ var failed []string
|
|
|
+ var failedTarget []string
|
|
|
+ for i := 0; i < len(fileList); i++ {
|
|
|
+ uploadIP := (ipList[i%len(ipList)])
|
|
|
+ r := pushFileChunk(uuid, uploadIP, fileList[i])
|
|
|
+ if trim(r) == "DONE" {
|
|
|
+ //This upload process is doing fine. Append to the result list
|
|
|
+ pushResultTarget = append(pushResultTarget, uuiddata[i%len(ipList)])
|
|
|
+ pushResultFilename = append(pushResultFilename, filepath.Base(fileList[i]))
|
|
|
+ fmt.Println("[OK] " + fileList[i] + " uploaded.")
|
|
|
+ } else {
|
|
|
+ failed = append(failed, fileList[i])
|
|
|
+ failedTarget = append(failedTarget, uuid)
|
|
|
+ }
|
|
|
}
|
|
|
+
|
|
|
+ for j := 0; j < len(pushResultTarget); j++ {
|
|
|
+ f, _ := os.OpenFile("index/"+vdir+string(".index"), os.O_APPEND|os.O_WRONLY, 0600)
|
|
|
+ defer f.Close()
|
|
|
+ f.WriteString(pushResultFilename[j])
|
|
|
+ f.WriteString(",")
|
|
|
+ f.WriteString(pushResultTarget[j])
|
|
|
+ f.WriteString("\n")
|
|
|
+ }
|
|
|
+ fmt.Println("[OK] All chunks uploaded.")
|
|
|
}
|
|
|
|
|
|
-func showHelp() {
|
|
|
+func pushFileChunk(uuid string, targetEndpoint string, filename string) string {
|
|
|
+ response := string(SendPostRequest(targetEndpoint+"SystemAOB/functions/arozdfs/upload.php", filename, "file"))
|
|
|
+ return response
|
|
|
+}
|
|
|
+
|
|
|
+func resolveUUID(uuid string) string {
|
|
|
+ tmp := []byte(uuid)
|
|
|
+ uuid = string(bytes.Trim(tmp, "\xef\xbb\xbf"))
|
|
|
+ uuid = strings.Trim(strings.Trim(uuid, "\n"), "\r")
|
|
|
+ if file_exists("../cluster/mappers/") {
|
|
|
+ if file_exists("../cluster/mappers/" + uuid + ".inf") {
|
|
|
+ return file_get_contents("../cluster/mappers/" + uuid + ".inf")
|
|
|
+ } else {
|
|
|
+ fmt.Println("ERROR. UUID not found. Please perform a scan first before using arozdfs functions")
|
|
|
+ }
|
|
|
+ } else {
|
|
|
+ fmt.Println("ERROR. Unable to resolve UUID to IP: cluster services not found. Continuing with UUID as IP address.")
|
|
|
+
|
|
|
+ }
|
|
|
+ return uuid
|
|
|
+}
|
|
|
+
|
|
|
+func SendPostRequest(url string, filename string, fieldname string) []byte {
|
|
|
+ file, err := os.Open(filename)
|
|
|
+
|
|
|
+ if err != nil {
|
|
|
+ panic(err)
|
|
|
+ }
|
|
|
+ defer file.Close()
|
|
|
+
|
|
|
+ body := &bytes.Buffer{}
|
|
|
+ writer := multipart.NewWriter(body)
|
|
|
+ part, err := writer.CreateFormFile(fieldname, filepath.Base(file.Name()))
|
|
|
+
|
|
|
+ if err != nil {
|
|
|
+ panic(err)
|
|
|
+ }
|
|
|
+
|
|
|
+ io.Copy(part, file)
|
|
|
+ writer.Close()
|
|
|
+ request, err := http.NewRequest("POST", url, body)
|
|
|
+
|
|
|
+ if err != nil {
|
|
|
+ panic(err)
|
|
|
+ }
|
|
|
+
|
|
|
+ request.Header.Add("Content-Type", writer.FormDataContentType())
|
|
|
+ client := &http.Client{}
|
|
|
+
|
|
|
+ response, err := client.Do(request)
|
|
|
+
|
|
|
+ if err != nil {
|
|
|
+ panic(err)
|
|
|
+ }
|
|
|
+ defer response.Body.Close()
|
|
|
+
|
|
|
+ content, err := ioutil.ReadAll(response.Body)
|
|
|
|
|
|
+ if err != nil {
|
|
|
+ panic(err)
|
|
|
+ }
|
|
|
+
|
|
|
+ return content
|
|
|
+}
|
|
|
+
|
|
|
+func showHelp() {
|
|
|
+ fmt.Println(`[arozdfs - Distributed File Storage Management Tool for ArOZ Online Cloud System]
|
|
|
+ This is a command line tool build for the ArOZ Online distributed cloud platform file chunking and redundant data storage.
|
|
|
+ Please refer to the ArOZ Online Documentaion for more information.
|
|
|
+ `)
|
|
|
+ fmt.Println(`Supported commands:
|
|
|
+ help --> show all the help information
|
|
|
+
|
|
|
+ [Uploading to arozdfs commands]
|
|
|
+ slice
|
|
|
+ -infile <filename> --> declare the input file
|
|
|
+ -slice <filesize> --> declare the slicing filesize
|
|
|
+ -storepath <pathname> (Optional) --> Relative path for storing the sliced chunk files, default ./{file-uuid}
|
|
|
+
|
|
|
+ upload
|
|
|
+ -push <remoteDisks.config> --> push to a list of clusters and sync file index to other clusters
|
|
|
+ -storepath <pathname> --> The location where the file chunks are stored
|
|
|
+ -uuid <file uuid> --> uuid of the file to be uploaded
|
|
|
+ -vdir <file.index> --> where the file.index should be stored. (Use for file / folder navigation)
|
|
|
+
|
|
|
+ [Download from arozdfs commands]
|
|
|
+ download
|
|
|
+ -vdir <file.index> --> file.index location
|
|
|
+ -storepath <tmp directory> (Optional) --> define a special directory for caching the downloaded data chunks, default ./tmp
|
|
|
+
|
|
|
+ open
|
|
|
+ -storepath <tmp directory> --> the file chunks tmp folder, default ./tmp
|
|
|
+ -uuid <file uuid> --> the uuid which the file is stored
|
|
|
+ -outfile <filename> --> filepath for the exported and merged file
|
|
|
+ -c --> remove all stored file chunks after merging the file chunks.
|
|
|
+
|
|
|
+ [File Operations]
|
|
|
+ remove <file.index> --> remove all chunks related to thie file index
|
|
|
+ rename <file.index> <newfile.index> --> rename all records related to this file
|
|
|
+ move <filepath/file.index> <newpath/file.index> --> move the file to a new path in index directory
|
|
|
+
|
|
|
+
|
|
|
+ [System checking commands]
|
|
|
+ checkfile <file.index> --> check if a file contains all chunks which has at least two copies of each chunks
|
|
|
+ rebuild --> Check all files on the system and fix all chunks which has corrupted
|
|
|
+ migrate <remoteDisks.config> --> Move all chunks from this host to other servers in the list.`)
|
|
|
}
|
|
|
|
|
|
func showNotFound() {
|