main.go 20 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731
  1. package main
  2. import (
  3. "bytes"
  4. "encoding/json"
  5. "fmt"
  6. "io"
  7. "io/ioutil"
  8. "math"
  9. "mime/multipart"
  10. "net/http"
  11. "os"
  12. "path"
  13. "path/filepath"
  14. "strconv"
  15. "strings"
  16. "time"
  17. "github.com/FossoresLP/go-uuid-v4"
  18. )
  19. func check(e error) {
  20. if e != nil {
  21. panic(e)
  22. }
  23. }
  24. //ArOZ PHP-Golang Bridge
  25. //The following sections remap the PHP functions to golang for the ease of development
  26. func file_exists(filepath string) bool {
  27. if _, err := os.Stat(filepath); !os.IsNotExist(err) {
  28. return true
  29. }
  30. return false
  31. }
  32. func mkdir(filepath string) {
  33. os.MkdirAll(filepath, os.ModePerm)
  34. }
  35. func file_put_contents(file string, data string) bool {
  36. f, err := os.Create(file)
  37. check(err)
  38. _, err = f.WriteString(data)
  39. defer f.Close()
  40. if err != nil {
  41. return false
  42. }
  43. return true
  44. }
  45. func file_get_contents(file string) string {
  46. b, err := ioutil.ReadFile(file)
  47. check(err)
  48. return string(b)
  49. }
  50. func strtolower(word string) string {
  51. return strings.ToLower(word)
  52. }
  53. func strtoupper(word string) string {
  54. return strings.ToUpper(word)
  55. }
  56. func trim(word string) string {
  57. return strings.Trim(word, " ")
  58. }
  59. func strlen(word string) int {
  60. return len(word)
  61. }
  62. func count(array []string) int {
  63. return len(array)
  64. }
  65. func explode(key string, word string) []string {
  66. return strings.Split(word, key)
  67. }
  68. func implode(key string, array []string) string {
  69. return strings.Join(array[:], key)
  70. }
  71. func str_replace(target string, result string, word string) string {
  72. return strings.Replace(word, target, result, -1)
  73. }
  74. func in_array(a string, list []string) bool {
  75. for _, b := range list {
  76. if b == a {
  77. return true
  78. }
  79. }
  80. return false
  81. }
  82. func strpos(word string, target string) int {
  83. return strings.Index(word, target)
  84. }
  85. func dirname(filepath string) string {
  86. return path.Dir(filepath)
  87. }
  88. func basename(fullpath string) string {
  89. return filepath.Base(fullpath)
  90. }
  91. //End of mapping functions
  92. //Utilities functions
  93. func genUUIDv4() string {
  94. uuid, err := uuid.NewString()
  95. check(err)
  96. return uuid
  97. }
  98. func padZeros(thisInt string, maxval int) string {
  99. targetLength := len(strconv.Itoa(maxval))
  100. result := thisInt
  101. if len(thisInt) < targetLength {
  102. padzeros := targetLength - len(thisInt)
  103. for i := 0; i < padzeros; i++ {
  104. result = "0" + result
  105. }
  106. }
  107. return result
  108. }
  109. type clusterConfig struct {
  110. Prefix string `json:"prefix"`
  111. Port string `json:"port"`
  112. }
  113. //End of utilities functions
  114. //System constants
  115. const clusterServices = "../cluster/"
  116. const delChunkScript = "SystemAOB/functions/arozdfs/delChunk.php?chunkuuid="
  117. const requestScript = "SystemAOB/functions/arozdfs/request.php?chunkuuid="
  118. const uploadScript = "SystemAOB/functions/arozdfs/upload.php"
  119. const clusterSetting = "clusterSetting.config"
  120. var config clusterConfig
  121. func init() {
  122. //Check if the required directory exists. If not, create it.
  123. if !file_exists("chunks/") {
  124. mkdir("chunks/")
  125. }
  126. if !file_exists("uploads/") {
  127. mkdir("uploads/")
  128. }
  129. if !file_exists("index/") {
  130. mkdir("index/")
  131. }
  132. if !file_exists("tmp/") {
  133. mkdir("tmp/")
  134. }
  135. if !file_exists("remoteDisks.config") {
  136. file_put_contents("remoteDisks.config", "")
  137. }
  138. //Load config from clusterSetting.config
  139. jsonFile, _ := os.Open(clusterSetting)
  140. byteValue, _ := ioutil.ReadAll(jsonFile)
  141. json.Unmarshal(byteValue, &config)
  142. }
  143. func main() {
  144. //arozdfs implementation in Golang
  145. //Refer to the help section for the usable commands and parameters
  146. if len(os.Args) == 1 {
  147. fmt.Println("ERROR. Undefined function group or operations. Type 'arozdfs help' for usage instructions. ")
  148. return
  149. }
  150. //For each argument, start the processing
  151. switch functgroup := os.Args[1]; functgroup {
  152. case "help":
  153. showHelp()
  154. case "slice":
  155. startSlicingProc()
  156. case "upload":
  157. startUploadProc()
  158. case "download":
  159. startDownloadProc()
  160. case "open":
  161. openChunkedFile()
  162. case "remove":
  163. removeFile()
  164. case "debug":
  165. fmt.Println(config.Port + "/" + config.Prefix) //Debug function. Change this line for unit testing
  166. default:
  167. showNotFound()
  168. }
  169. /*
  170. //Examples for using the Go-PHP bridge functions
  171. file_put_contents("Hello World.txt", "This is the content of the file.")
  172. fmt.Println(file_get_contents("Hello World.txt"))
  173. array := explode(",", "Apple,Orange,Pizza")
  174. fmt.Println(array)
  175. newstring := implode(",", array)
  176. fmt.Println(newstring)
  177. fmt.Println(in_array("Pizza", array))
  178. fmt.Println(strpos(newstring, "Pizza"))
  179. fmt.Println(strtoupper("hello world"))
  180. fmt.Println(str_replace("Pizza", "Ramen", newstring))
  181. */
  182. }
  183. func startDownloadProc() {
  184. vdir := ""
  185. storepath := "tmp/"
  186. for i, arg := range os.Args {
  187. if strpos(arg, "-") == 0 {
  188. //This is a parameter defining keyword
  189. if arg == "-vdir" {
  190. vdir = os.Args[i+1]
  191. } else if arg == "-storepath" {
  192. storepath = os.Args[i+1]
  193. //Check if the storepath is end with /. if not, append it into the pathname
  194. if storepath[len(storepath)-1:] != "/" {
  195. storepath = storepath + "/"
  196. }
  197. }
  198. }
  199. }
  200. if vdir != "" {
  201. //Go ahead the download process and get the content of the file
  202. fc := strings.Trim(str_replace("\r\n", "\n", file_get_contents("index/"+vdir+".index")), "\n")
  203. datachunks := explode("\n", fc)
  204. var filelist []string
  205. var locations []string
  206. for i := 0; i < len(datachunks); i++ {
  207. tmp := explode(",", datachunks[i])
  208. filechunk := tmp[0]
  209. locationUUID := tmp[1]
  210. filelist = append(filelist, filechunk)
  211. thisip := resolveUUID(locationUUID)
  212. clusterConfig := ":" + string(config.Port) + "/" + string(config.Prefix) + "/"
  213. fullip := "http://" + thisip + clusterConfig
  214. locations = append(locations, fullip)
  215. }
  216. //fmt.Println(filelist)
  217. //fmt.Println(locations)
  218. //Start the request process
  219. for j := 0; j < len(filelist); j++ {
  220. //Multithreading download for each fileitem
  221. filename := filelist[j]
  222. targetURL := locations[j] + requestScript + string(filename)
  223. go downloadFileChunkWithOutput(storepath+filename, targetURL, filename)
  224. }
  225. fileUUID := explode("_", filelist[0])[0] //Getting the first part of a file with uuid, e.g. {uuid}_0 --> get only the {uuid} part
  226. //Wait for the go routine to finish
  227. downloadFinishIndicators, _ := filepath.Glob(storepath + fileUUID + "_*.done")
  228. for len(downloadFinishIndicators) < len(filelist) {
  229. time.Sleep(time.Duration(500) * time.Millisecond)
  230. downloadFinishIndicators, _ = filepath.Glob(storepath + fileUUID + "_*.done")
  231. }
  232. //Clear up all indicators
  233. for k := 0; k < len(downloadFinishIndicators); k++ {
  234. os.Remove(downloadFinishIndicators[k])
  235. }
  236. fmt.Println("[OK] All chunks downloaded")
  237. } else {
  238. fmt.Println("ERROR. vdir cannot be empty")
  239. os.Exit(0)
  240. }
  241. }
  242. func downloadFileChunkWithOutput(filepath string, url string, filename string) {
  243. if DownloadFile(filepath, url) {
  244. fmt.Println("[OK] " + filename)
  245. file_put_contents(filepath+".done", "")
  246. }
  247. }
  248. func DownloadFile(filepath string, url string) bool {
  249. // Get the data
  250. resp, err := http.Get(url)
  251. if err != nil {
  252. return false
  253. }
  254. defer resp.Body.Close()
  255. // Create the file
  256. out, err := os.Create(filepath)
  257. if err != nil {
  258. return false
  259. }
  260. defer out.Close()
  261. // Write the body to file
  262. _, err = io.Copy(out, resp.Body)
  263. return true
  264. }
  265. func startSlicingProc() {
  266. infile := ""
  267. slice := 64 //Default 64MB per file chunk
  268. fileUUID := genUUIDv4()
  269. storepath := fileUUID + "/"
  270. for i, arg := range os.Args {
  271. if strpos(arg, "-") == 0 {
  272. //This is a parameter defining keyword
  273. if arg == "-infile" {
  274. infile = os.Args[i+1]
  275. } else if arg == "-storepath" {
  276. storepath = os.Args[i+1]
  277. //Check if the storepath is end with /. if not, append it into the pathname
  278. if storepath[len(storepath)-1:] != "/" {
  279. storepath = storepath + "/"
  280. }
  281. } else if arg == "-slice" {
  282. sliceSize, err := strconv.Atoi(os.Args[i+1])
  283. check(err)
  284. slice = sliceSize
  285. }
  286. }
  287. }
  288. if slice <= 0 {
  289. fmt.Println("ERROR. slice size cannot be smaller or equal to 0")
  290. os.Exit(0)
  291. }
  292. if storepath != "" && infile != "" {
  293. //fmt.Println(storepath + " " + infile + " " + strconv.Itoa(slice) + " " + fileUUID)
  294. splitFileChunks(infile, "chunks/"+storepath, fileUUID, slice)
  295. //fmt.Println(fileUUID)
  296. } else {
  297. fmt.Println("ERROR. Undefined storepath or infile.")
  298. }
  299. }
  300. func splitFileChunks(rawfile string, outputdir string, outfilename string, chunksize int) bool {
  301. if !file_exists(outputdir) {
  302. mkdir(outputdir)
  303. }
  304. fileToBeChunked := rawfile
  305. file, err := os.Open(fileToBeChunked)
  306. if err != nil {
  307. return false
  308. }
  309. defer file.Close()
  310. fileInfo, _ := file.Stat()
  311. var fileSize int64 = fileInfo.Size()
  312. var fileChunk = float64(chunksize * 1024 * 1024) // chunksize in MB
  313. // calculate total number of parts the file will be chunked into
  314. totalPartsNum := uint64(math.Ceil(float64(fileSize) / float64(fileChunk)))
  315. fmt.Printf("[Info] Splitting to %d pieces.\n", totalPartsNum)
  316. for i := uint64(0); i < totalPartsNum; i++ {
  317. partSize := int(math.Min(fileChunk, float64(fileSize-int64(i*uint64(fileChunk)))))
  318. partBuffer := make([]byte, partSize)
  319. file.Read(partBuffer)
  320. // write to disk
  321. fileName := outputdir + outfilename + "_" + padZeros(strconv.FormatUint(i, 10), int(totalPartsNum))
  322. _, err := os.Create(fileName)
  323. if err != nil {
  324. return false
  325. }
  326. // write/save buffer to disk
  327. ioutil.WriteFile(fileName, partBuffer, os.ModeAppend)
  328. fmt.Println("[Export] ", fileName)
  329. }
  330. return true
  331. }
  332. func openChunkedFile() {
  333. storepath := "tmp/"
  334. uuid := ""
  335. outfile := ""
  336. removeAfterMerge := 0
  337. for i, arg := range os.Args {
  338. if strpos(arg, "-") == 0 {
  339. //This is a parameter defining keyword
  340. if arg == "-uuid" {
  341. uuid = os.Args[i+1]
  342. } else if arg == "-storepath" {
  343. storepath = os.Args[i+1]
  344. //Check if the storepath is end with /. if not, append it into the pathname
  345. if storepath[len(storepath)-1:] != "/" {
  346. storepath = storepath + "/"
  347. }
  348. } else if arg == "-outfile" {
  349. outfile = os.Args[i+1]
  350. } else if arg == "-c" {
  351. //Remove the file chunks after the merging process
  352. removeAfterMerge = 1
  353. }
  354. }
  355. }
  356. if storepath != "" && uuid != "" && outfile != "" {
  357. //fmt.Println(storepath + " " + uuid + " " + outfile)
  358. if joinFileChunks(storepath+uuid, outfile) {
  359. //Do checksum here
  360. //Remove all files if -c is used
  361. if removeAfterMerge == 1 {
  362. matches, _ := filepath.Glob(storepath + uuid + "_*")
  363. for j := 0; j < len(matches); j++ {
  364. os.Remove(matches[j])
  365. }
  366. }
  367. } else {
  368. fmt.Println("ERROR. Unable to merge file chunks.")
  369. }
  370. } else {
  371. fmt.Println("ERROR. Undefined storepath, outfile or uuid.")
  372. }
  373. }
  374. func joinFileChunks(fileuuid string, outfilename string) bool {
  375. matches, _ := filepath.Glob(fileuuid + "_*")
  376. if len(matches) == 0 {
  377. fmt.Println("ERROR. No filechunk file for this uuid.")
  378. return false
  379. }
  380. outfile, err := os.Create(outfilename)
  381. if err != nil {
  382. return false
  383. }
  384. //For each file chunk, merge them into the output file
  385. for j := 0; j < len(matches); j++ {
  386. b, _ := ioutil.ReadFile(matches[j])
  387. outfile.Write(b)
  388. }
  389. return true
  390. }
  391. func startUploadProc() {
  392. push := "remoteDisks.config"
  393. storepath := "chunks/"
  394. uuid := ""
  395. vdir := ""
  396. for i, arg := range os.Args {
  397. if strpos(arg, "-") == 0 {
  398. //This is a parameter defining keyword
  399. if arg == "-uuid" {
  400. uuid = os.Args[i+1]
  401. } else if arg == "-storepath" {
  402. storepath = os.Args[i+1]
  403. //Check if the storepath is end with /. if not, append it into the pathname
  404. if storepath[len(storepath)-1:] != "/" {
  405. storepath = "chunks/" + storepath + "/"
  406. }
  407. } else if arg == "-vdir" {
  408. vdir = os.Args[i+1]
  409. } else if arg == "-push" {
  410. //Remove the file chunks after the merging process
  411. push = os.Args[i+1]
  412. }
  413. }
  414. }
  415. //Check if the input data are valid
  416. if uuid == "" || vdir == "" {
  417. fmt.Println("ERROR. Undefined uuid or vdir.")
  418. os.Exit(0)
  419. }
  420. if !file_exists(clusterSetting) {
  421. fmt.Println("ERROR. clusterSetting configuration not found")
  422. os.Exit(0)
  423. }
  424. if file_exists("index/" + vdir + string(".index")) {
  425. fmt.Println("ERROR. Given file already exists in vdir. Please use remove before uploading a new file on the same vdir location.")
  426. os.Exit(0)
  427. }
  428. //Starting the uuid to ip conversion process
  429. var ipList []string
  430. var uuiddata []string
  431. var uploadUUIDList []string
  432. //Read cluster uuid list from remoteDisks.config
  433. if file_exists(push) {
  434. clusteruuids := file_get_contents(push)
  435. if trim(clusteruuids) == "" {
  436. fmt.Println("ERROR. remoteDisks not found or it is empty! ")
  437. os.Exit(0)
  438. }
  439. clusteruuids = trim(strings.Trim(clusteruuids, "\n"))
  440. uuiddata = explode("\n", clusteruuids)
  441. //Generate iplist and ready for posting file chunks
  442. for i := 0; i < len(uuiddata); i++ {
  443. thisuuid := uuiddata[i%len(uuiddata)]
  444. uploadUUIDList = append(uploadUUIDList, thisuuid)
  445. thisip := resolveUUID(thisuuid)
  446. clusterConfig := ":" + string(config.Port) + "/" + string(config.Prefix) + "/"
  447. fullip := "http://" + thisip + clusterConfig
  448. ipList = append(ipList, fullip)
  449. }
  450. } else {
  451. fmt.Println("ERROR. remoteDisks not found or it is empty! ")
  452. os.Exit(0)
  453. }
  454. //Handshake with clusters, create auth token if needed
  455. if !createToken(ipList) {
  456. fmt.Println("ERROR. Problem occured while trying to create token for one of the cluster's host. Upload process terminated.")
  457. os.Exit(0)
  458. }
  459. //Ready to push. Create index file.
  460. file_put_contents("index/"+vdir+string(".index"), "")
  461. fileList, _ := filepath.Glob(storepath + uuid + "_*")
  462. //Make a directory for storing the result of the upload
  463. if !file_exists(storepath + ".upload/") {
  464. mkdir(storepath + ".upload/")
  465. }
  466. for i := 0; i < len(fileList); i++ {
  467. uploadIP := (ipList[i%len(ipList)])
  468. uploadUUID := (uploadUUIDList[i%len(ipList)])
  469. go SendPostRequest(uploadIP+uploadScript, fileList[i], "file", storepath+".upload/"+basename(fileList[i])+".done", uploadUUID)
  470. }
  471. //Retry for error chunks. Not implemented yet
  472. //Wait for all upload process to end
  473. uploadFinishIndicators, _ := filepath.Glob(storepath + ".upload/" + uuid + "_*.done")
  474. for len(uploadFinishIndicators) < len(fileList) {
  475. time.Sleep(time.Duration(500) * time.Millisecond)
  476. uploadFinishIndicators, _ = filepath.Glob(storepath + ".upload/" + uuid + "_*.done")
  477. }
  478. //Write the upload results to index file
  479. for j := 0; j < len(uploadFinishIndicators); j++ {
  480. f, _ := os.OpenFile("index/"+vdir+string(".index"), os.O_APPEND|os.O_WRONLY, 0600)
  481. defer f.Close()
  482. f.WriteString(str_replace(".done", "", basename(uploadFinishIndicators[j])))
  483. f.WriteString(",")
  484. f.WriteString(file_get_contents(uploadFinishIndicators[j]))
  485. f.WriteString("\n")
  486. }
  487. //Clear up all indicators
  488. for k := 0; k < len(uploadFinishIndicators); k++ {
  489. os.Remove(uploadFinishIndicators[k])
  490. }
  491. os.Remove(storepath + ".upload/")
  492. fmt.Println("[OK] All chunks uploaded.")
  493. }
  494. func createToken(ipList []string) bool {
  495. //Not implemented
  496. return true
  497. }
  498. func resolveUUID(uuid string) string {
  499. tmp := []byte(uuid)
  500. uuid = string(bytes.Trim(tmp, "\xef\xbb\xbf"))
  501. uuid = strings.Trim(strings.Trim(uuid, "\n"), "\r")
  502. if file_exists(clusterServices + "mappers/") {
  503. if file_exists(clusterServices + "/mappers/" + uuid + ".inf") {
  504. return file_get_contents(clusterServices + "/mappers/" + uuid + ".inf")
  505. } else {
  506. fmt.Println("ERROR. UUID not found. Please perform a scan first before using arozdfs functions")
  507. }
  508. } else {
  509. fmt.Println("ERROR. Unable to resolve UUID to IP: cluster services not found. Continuing with UUID as IP address.")
  510. }
  511. return uuid
  512. }
  513. func SendPostRequest(url string, filename string, fieldname string, resultName string, targetUUID string) []byte {
  514. file, err := os.Open(filename)
  515. if err != nil {
  516. panic(err)
  517. }
  518. defer file.Close()
  519. body := &bytes.Buffer{}
  520. writer := multipart.NewWriter(body)
  521. part, err := writer.CreateFormFile(fieldname, filepath.Base(file.Name()))
  522. if err != nil {
  523. panic(err)
  524. }
  525. io.Copy(part, file)
  526. writer.Close()
  527. request, err := http.NewRequest("POST", url, body)
  528. if err != nil {
  529. panic(err)
  530. }
  531. request.Header.Add("Content-Type", writer.FormDataContentType())
  532. client := &http.Client{}
  533. response, err := client.Do(request)
  534. if err != nil {
  535. panic(err)
  536. }
  537. defer response.Body.Close()
  538. content, err := ioutil.ReadAll(response.Body)
  539. if err != nil {
  540. panic(err)
  541. }
  542. //Upload suceed. Create a .done file to indicate this file is done uploading
  543. file_put_contents(resultName, string(targetUUID))
  544. fmt.Println("[OK] " + str_replace(".done", "", basename(resultName)) + " uploaded.")
  545. return content
  546. }
  547. func removeFile() {
  548. fileindex := ""
  549. if len(os.Args) == 3 {
  550. fileindex = os.Args[2]
  551. }
  552. if fileindex == "" {
  553. fmt.Println("ERROR. undefined file index. Usage: arozdfs file.ext (Root as ./index)")
  554. os.Exit(0)
  555. }
  556. indexFileRealpath := "index/" + fileindex + ".index"
  557. if !file_exists(indexFileRealpath) {
  558. fmt.Println("ERROR. fileindex not found in " + indexFileRealpath)
  559. os.Exit(0)
  560. }
  561. //Everything checked and go ahead to load the list into variables
  562. var filelist []string
  563. var targetUUIDs []string
  564. fc := strings.Trim(str_replace("\r\n", "\n", file_get_contents(indexFileRealpath)), "\n")
  565. datachunks := explode("\n", fc)
  566. for i := 0; i < len(datachunks); i++ {
  567. thisChunk := datachunks[i]
  568. thisChunk = strings.Trim(strings.Trim(thisChunk, "\n"), "\r")
  569. chunkdata := explode(",", thisChunk)
  570. filelist = append(filelist, chunkdata[0])
  571. targetUUIDs = append(targetUUIDs, "http://"+resolveUUID(chunkdata[1])+":"+config.Port+"/"+config.Prefix+"/")
  572. }
  573. //fmt.Println(filelist)
  574. //fmt.Println(targetUUIDs)
  575. //Remove the chunks on each endpoints
  576. failed := len(filelist)
  577. var failedChunk []string
  578. for j := 0; j < len(filelist); j++ {
  579. targetEndpoint := targetUUIDs[j] + delChunkScript + filelist[j]
  580. resp, err := http.Get(targetEndpoint)
  581. if err != nil {
  582. // handle error
  583. fmt.Println("ERROR. Unable to connect to endpoint: " + targetEndpoint + ". Continue with the rest of the endpoints.")
  584. }
  585. body, _ := ioutil.ReadAll(resp.Body)
  586. fmt.Println("[REPLY] " + string(body) + " for " + filelist[j])
  587. if trim(string(body)) == "DONE" {
  588. failed--
  589. } else {
  590. failedChunk = append(failedChunk, filelist[j])
  591. }
  592. resp.Body.Close()
  593. }
  594. if failed == 0 {
  595. fmt.Println("[OK] All file chunks has been removed from the clusters")
  596. os.Remove(indexFileRealpath)
  597. } else {
  598. fmt.Println("[WARNING] Unable to remove at least one chunks from cluster. Index file is not removed.")
  599. fmt.Println(failedChunk)
  600. }
  601. }
  602. func showHelp() {
  603. fmt.Println(`[arozdfs - Distributed File Storage Management Tool for ArOZ Online Cloud System]
  604. This is a command line tool build for the ArOZ Online distributed cloud platform file chunking and redundant data storage.
  605. Please refer to the ArOZ Online Documentaion for more information.
  606. `)
  607. fmt.Println(`Supported commands:
  608. help --> show all the help information
  609. [Uploading to arozdfs commands]
  610. slice
  611. -infile <filename> --> declare the input file
  612. -slice <filesize> --> declare the slicing filesize
  613. -storepath <pathname> (Optional) --> Relative path for storing the sliced chunk files, default ./{file-uuid}
  614. upload
  615. -storepath <pathname> --> The location where the file chunks are stored, root start at ./chunks, not recommend for leaving this empty
  616. -uuid <file uuid> --> uuid of the file to be uploaded
  617. -vdir <file.index> --> where the file.index should be stored. (Use for file / folder navigation)
  618. -push <remoteDisks.config> (Optional) --> push to a list of clusters and sync file index to other clusters, default ./remoteDisks.config
  619. [Download from arozdfs commands]
  620. download
  621. -vdir <file.index> --> file.index location
  622. -storepath <tmp directory> (Optional) --> define a special directory for caching the downloaded data chunks, default ./tmp
  623. open
  624. -uuid <file uuid> --> the uuid which the file is stored
  625. -outfile <filename> --> filepath for the exported and merged file
  626. -storepath <tmp directory> (Optional)--> the file chunks tmp folder, default ./tmp
  627. -c (Optional) --> remove all stored file chunks after merging the file chunks.
  628. [File Operations]
  629. remove <file.index> --> remove all chunks related to this file index
  630. rename <file.index> <newfile.index> --> rename all records related to this file
  631. move <filepath/file.index> <newpath/file.index> --> move the file to a new path in index directory
  632. [System checking commands]
  633. checkfile <file.index> --> check if a file contains all chunks which has at least two copies of each chunks
  634. rebuild --> Check all files on the system and fix all chunks which has corrupted
  635. migrate <remoteDisks.config> --> Move all chunks from this host to other servers in the list.`)
  636. }
  637. func showNotFound() {
  638. fmt.Println("ERROR. Command not found: " + os.Args[1])
  639. }