|
|
|
@ -26,9 +26,9 @@ import (
|
|
|
|
|
)
|
|
|
|
|
|
|
|
|
|
var (
|
|
|
|
|
PoolMaxItems = 2500
|
|
|
|
|
PoolMaxItems = 500
|
|
|
|
|
PoolPathFinal = "/mnt/SC9000/storagePools"
|
|
|
|
|
PoolPathTemp = "/mnt/SC9000/storageTemp"
|
|
|
|
|
PoolPathTemp = "/mnt/ramfs/"
|
|
|
|
|
)
|
|
|
|
|
|
|
|
|
|
type Pool struct {
|
|
|
|
@ -58,9 +58,9 @@ type PoolMaster struct {
|
|
|
|
|
CurrentPool *Pool
|
|
|
|
|
lock sync.Mutex
|
|
|
|
|
|
|
|
|
|
LocalPools []Pool
|
|
|
|
|
FullPools []Pool
|
|
|
|
|
WORMPools map[string]Pool
|
|
|
|
|
LocalPools []*Pool
|
|
|
|
|
FullPools []*Pool
|
|
|
|
|
WORMPools map[string]*Pool
|
|
|
|
|
}
|
|
|
|
|
type PoolPackResult struct {
|
|
|
|
|
PoolID string
|
|
|
|
@ -207,7 +207,7 @@ func (p *Pool) Fetch(id string, writer io.Writer) (err error) {
|
|
|
|
|
func NewPoolMaster(finalPath string, cachePath string) (poolMaster PoolMaster, err error) {
|
|
|
|
|
poolMaster.finalPath = finalPath
|
|
|
|
|
poolMaster.cachePath = cachePath
|
|
|
|
|
poolMaster.WORMPools = make(map[string]Pool)
|
|
|
|
|
poolMaster.WORMPools = make(map[string]*Pool)
|
|
|
|
|
//poolMaster.lock = sync.Mutex{}
|
|
|
|
|
|
|
|
|
|
destPath := filepath.Join(poolMaster.cachePath, "pool")
|
|
|
|
@ -228,8 +228,9 @@ func NewPoolMaster(finalPath string, cachePath string) (poolMaster PoolMaster, e
|
|
|
|
|
}
|
|
|
|
|
return poolMaster, nil
|
|
|
|
|
}
|
|
|
|
|
func (p *PoolMaster) NewPool() (pool *Pool, err error) {
|
|
|
|
|
pool = &Pool{}
|
|
|
|
|
func (p *PoolMaster) NewPool() (*Pool, error) {
|
|
|
|
|
var err error
|
|
|
|
|
pool := Pool{}
|
|
|
|
|
pool.PoolID = uuid.NewV4().String()
|
|
|
|
|
pool.Finalized = false
|
|
|
|
|
pool.ReadOnly = false
|
|
|
|
@ -238,35 +239,39 @@ func (p *PoolMaster) NewPool() (pool *Pool, err error) {
|
|
|
|
|
//dir := filepath.Dir(destPath)
|
|
|
|
|
err = os.MkdirAll(destPath, os.ModePerm)
|
|
|
|
|
if err != nil {
|
|
|
|
|
return pool, err
|
|
|
|
|
return &pool, err
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
return pool, nil
|
|
|
|
|
return &pool, nil
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
func (p *PoolMaster) GetCurrentWriteablePool() (pool *Pool, err error) {
|
|
|
|
|
//fmt.Printf("Current Pool %s, ItemCount = %d\n", pool.PoolID, pool.itemCount)
|
|
|
|
|
if p.CurrentPool != nil && p.CurrentPool.itemCount >= PoolMaxItems {
|
|
|
|
|
fmt.Printf("Aquiring Lock for GetCurrentWriteablepool\n")
|
|
|
|
|
p.lock.Lock()
|
|
|
|
|
defer p.lock.Unlock()
|
|
|
|
|
defer fmt.Printf("unlock GetCurrentWriteablepool\n")
|
|
|
|
|
fmt.Printf("Aquired Lock success for GetCurrentWriteablepool\n")
|
|
|
|
|
p.CurrentPool.ReadOnly = true
|
|
|
|
|
p.FullPools = append(p.FullPools, *p.CurrentPool)
|
|
|
|
|
p.FullPools = append(p.FullPools, p.CurrentPool)
|
|
|
|
|
// queue for compression
|
|
|
|
|
fmt.Printf("GetCurrentWriteablePool(): current Pool (%s) is full (%d), creating new one", p.CurrentPool.PoolID, p.CurrentPool.itemCount)
|
|
|
|
|
fmt.Printf("GetCurrentWriteablePool(): current Pool (%s) is full (%d), creating new one\n", p.CurrentPool.PoolID, p.CurrentPool.itemCount)
|
|
|
|
|
p.CurrentPool = nil
|
|
|
|
|
}
|
|
|
|
|
if p.CurrentPool == nil {
|
|
|
|
|
pool, err = p.AcquireNewOrRecoverPool()
|
|
|
|
|
fmt.Printf("Creating new Pool")
|
|
|
|
|
p.CurrentPool, err = p.AcquireNewOrRecoverPool()
|
|
|
|
|
fmt.Printf("... got [%s]\n", p.CurrentPool.PoolID)
|
|
|
|
|
if err != nil {
|
|
|
|
|
return pool, err
|
|
|
|
|
}
|
|
|
|
|
p.CurrentPool = pool
|
|
|
|
|
return pool, nil
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
return p.CurrentPool, nil
|
|
|
|
|
}
|
|
|
|
|
func RestorePoolFromFolder(folderPath string) (pool Pool, err error) {
|
|
|
|
|
func RestorePoolFromFolder(folderPath string) (pool *Pool, err error) {
|
|
|
|
|
pool = &Pool{}
|
|
|
|
|
pool.PoolID = path.Base(folderPath)
|
|
|
|
|
|
|
|
|
|
entries, err := os.ReadDir(folderPath)
|
|
|
|
@ -285,8 +290,11 @@ func RestorePoolFromFolder(folderPath string) (pool Pool, err error) {
|
|
|
|
|
return pool, err
|
|
|
|
|
}
|
|
|
|
|
func (p *PoolMaster) ScanForLocalPools() (err error) {
|
|
|
|
|
fmt.Printf("Aquiring Lock for ScanForLocalPools\n")
|
|
|
|
|
p.lock.Lock()
|
|
|
|
|
defer p.lock.Unlock()
|
|
|
|
|
defer fmt.Printf("unlock ScanForLocalPools\n")
|
|
|
|
|
fmt.Printf("Aquired Lock success for ScanForLocalPools\n")
|
|
|
|
|
entries, err := os.ReadDir(filepath.Join(p.cachePath, "pool"))
|
|
|
|
|
if err != nil {
|
|
|
|
|
return err
|
|
|
|
@ -401,7 +409,10 @@ func (p *PoolMaster) MovePoolPackToWORM(packResult PoolPackResult) (err error) {
|
|
|
|
|
startTime := time.Now()
|
|
|
|
|
|
|
|
|
|
targetFileName := filepath.Join(p.finalPath, fmt.Sprintf("%s.tar", packResult.PoolID))
|
|
|
|
|
os.Rename(packResult.outputFileName, targetFileName)
|
|
|
|
|
err = common.MoveFile(packResult.outputFileName, targetFileName)
|
|
|
|
|
if err != nil {
|
|
|
|
|
return err
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
tarFileCheck, err := os.Open(targetFileName)
|
|
|
|
|
if err != nil {
|
|
|
|
@ -427,8 +438,11 @@ func (p *PoolMaster) PackPool(poolID string) (packResult PoolPackResult, err err
|
|
|
|
|
startTime := time.Now()
|
|
|
|
|
packResult.PoolID = poolID
|
|
|
|
|
|
|
|
|
|
fmt.Printf("Aquiring Lock for PackPool(%s)\n", poolID)
|
|
|
|
|
p.lock.Lock()
|
|
|
|
|
defer p.lock.Unlock()
|
|
|
|
|
defer fmt.Printf("unlock PackPool\n")
|
|
|
|
|
fmt.Printf("Aquired Lock success for PackPool(%s)\n", poolID)
|
|
|
|
|
|
|
|
|
|
packResult.outputFileName = filepath.Join(p.cachePath, "pool", fmt.Sprintf("%s.tar", poolID))
|
|
|
|
|
tarFile, err := os.Create(packResult.outputFileName)
|
|
|
|
@ -521,8 +535,8 @@ func (p *PoolMaster) PackPool(poolID string) (packResult PoolPackResult, err err
|
|
|
|
|
func (p *PoolMaster) AcquireNewOrRecoverPool() (pool *Pool, err error) {
|
|
|
|
|
// p.NewPool()
|
|
|
|
|
for _, localPool := range p.LocalPools {
|
|
|
|
|
if !localPool.ReadOnly {
|
|
|
|
|
return &localPool, nil
|
|
|
|
|
if !localPool.ReadOnly && localPool.itemCount < 500 {
|
|
|
|
|
return localPool, nil
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
return p.NewPool()
|
|
|
|
@ -585,8 +599,11 @@ func (p *PoolMaster) FetchLoadWORM(chunkID string, fileID string, writer io.Writ
|
|
|
|
|
// else load wormPool into disk-cache extract to "worm"
|
|
|
|
|
// wormMode
|
|
|
|
|
|
|
|
|
|
fmt.Printf("Aquiring Lock for FetchLoadWORM\n")
|
|
|
|
|
p.lock.Lock()
|
|
|
|
|
defer p.lock.Unlock()
|
|
|
|
|
defer fmt.Printf("unlock FetchLoadWORM\n")
|
|
|
|
|
fmt.Printf("Aquired Lock success for FetchLoadWORM\n")
|
|
|
|
|
|
|
|
|
|
var dboChunk common.DB_Chunk
|
|
|
|
|
_, err = colChunk.ReadDocument(arangoCTX, chunkID, &dboChunk)
|
|
|
|
@ -607,7 +624,7 @@ func (p *PoolMaster) FetchLoadWORM(chunkID string, fileID string, writer io.Writ
|
|
|
|
|
return err
|
|
|
|
|
}
|
|
|
|
|
fmt.Println("extracted")
|
|
|
|
|
p.WORMPools[loadedWormPool.PoolID] = loadedWormPool
|
|
|
|
|
p.WORMPools[loadedWormPool.PoolID] = &loadedWormPool
|
|
|
|
|
return loadedWormPool.Fetch(fileID, writer)
|
|
|
|
|
//return nil
|
|
|
|
|
}
|
|
|
|
@ -707,9 +724,11 @@ func (p *PoolMaster) Store(id string, src io.Reader, targetSize int64) (err erro
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
fmt.Printf("Store(%s)\n", id)
|
|
|
|
|
|
|
|
|
|
fmt.Printf("Aquiring Lock for Store\n")
|
|
|
|
|
p.lock.Lock()
|
|
|
|
|
defer p.lock.Unlock()
|
|
|
|
|
defer fmt.Printf("unlock Store\n")
|
|
|
|
|
fmt.Printf("Aquired Lock success for Store\n")
|
|
|
|
|
// figuring out paths
|
|
|
|
|
poolFolder := filepath.Join(p.cachePath, "pool", pool.PoolID)
|
|
|
|
|
destPath := filepath.Join(poolFolder, id)
|
|
|
|
@ -764,7 +783,9 @@ func (p *PoolMaster) Store(id string, src io.Reader, targetSize int64) (err erro
|
|
|
|
|
|
|
|
|
|
return nil
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
func removeFromSlice(slice []*Pool, s int) []*Pool {
|
|
|
|
|
return append(slice[:s], slice[s+1:]...)
|
|
|
|
|
}
|
|
|
|
|
func main() {
|
|
|
|
|
err := InitDatabase()
|
|
|
|
|
if err != nil {
|
|
|
|
@ -781,8 +802,39 @@ func main() {
|
|
|
|
|
panic(err)
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
go func() {
|
|
|
|
|
for {
|
|
|
|
|
for index, fullPool := range poolMaster.FullPools {
|
|
|
|
|
poolMaster.lock.Lock()
|
|
|
|
|
|
|
|
|
|
packResult, err := poolMaster.PackPool(fullPool.PoolID)
|
|
|
|
|
if err != nil {
|
|
|
|
|
panic(err)
|
|
|
|
|
}
|
|
|
|
|
err = poolMaster.ImportPoolPackResult(packResult)
|
|
|
|
|
if err != nil {
|
|
|
|
|
panic(err)
|
|
|
|
|
}
|
|
|
|
|
err = poolMaster.MovePoolPackToWORM(packResult)
|
|
|
|
|
if err != nil {
|
|
|
|
|
panic(err)
|
|
|
|
|
}
|
|
|
|
|
os.RemoveAll(filepath.Join(poolMaster.cachePath, "pool", fullPool.PoolID))
|
|
|
|
|
poolMaster.FullPools = removeFromSlice(poolMaster.FullPools, index)
|
|
|
|
|
|
|
|
|
|
poolMaster.lock.Unlock()
|
|
|
|
|
}
|
|
|
|
|
time.Sleep(time.Second * 10)
|
|
|
|
|
}
|
|
|
|
|
}()
|
|
|
|
|
for _, localPool := range poolMaster.LocalPools {
|
|
|
|
|
if localPool.ReadOnly {
|
|
|
|
|
|
|
|
|
|
dboChunkExists, err := colChunk.DocumentExists(arangoCTX, localPool.PoolID)
|
|
|
|
|
if err != nil {
|
|
|
|
|
panic(err)
|
|
|
|
|
}
|
|
|
|
|
if !dboChunkExists {
|
|
|
|
|
fmt.Printf("Packing Pool %s\n", localPool.PoolID)
|
|
|
|
|
packResult, err := poolMaster.PackPool(localPool.PoolID)
|
|
|
|
|
if err != nil {
|
|
|
|
@ -796,6 +848,9 @@ func main() {
|
|
|
|
|
if err != nil {
|
|
|
|
|
panic(err)
|
|
|
|
|
}
|
|
|
|
|
os.RemoveAll(filepath.Join(poolMaster.cachePath, "pool", localPool.PoolID))
|
|
|
|
|
}
|
|
|
|
|
//os.RemoveAll(filepath.Join(poolMaster.cachePath, "pool", localPool.PoolID))
|
|
|
|
|
}
|
|
|
|
|
// packResult.FileCount
|
|
|
|
|
}
|
|
|
|
|