GCS fs: move all gcsfs related implementations to its own package - afero - [fork] go afero port for 9front
(HTM) git clone git@git.drkhsh.at/afero.git
(DIR) Log
(DIR) Files
(DIR) Refs
(DIR) README
(DIR) LICENSE
---
(DIR) commit 165e3dc3a506e9c3f5871a897aa33ac2c5ced99b
(DIR) parent d70f944720bbf76b4f152dbe1b175ec4c03a3e83
(HTM) Author: Nicola Murino <nicola.murino@gmail.com>
Date: Mon, 27 Dec 2021 18:55:57 +0100
GCS fs: move all gcsfs related implementations to its own package
this way we don't force any application that import afero to include
gcfs deps in its binary
Diffstat:
D gcs.go | 115 -------------------------------
D gcs_mocks.go | 270 -------------------------------
D gcs_test.go | 807 -------------------------------
M gcsfs/file.go | 2 +-
M gcsfs/file_info.go | 2 +-
M gcsfs/file_resource.go | 2 +-
M gcsfs/fs.go | 48 ++++++++++++++++----------------
R gcs-fake-service-account.json -> g… | 0
A gcsfs/gcs.go | 114 +++++++++++++++++++++++++++++++
A gcsfs/gcs_mocks.go | 269 +++++++++++++++++++++++++++++++
A gcsfs/gcs_test.go | 806 +++++++++++++++++++++++++++++++
11 files changed, 1216 insertions(+), 1219 deletions(-)
---
(DIR) diff --git a/gcs.go b/gcs.go
@@ -1,115 +0,0 @@
-// Copyright © 2021 Vasily Ovchinnikov <vasily@remerge.io>.
-//
-// The code in this file is derived from afero fork github.com/Zatte/afero by Mikael Rapp
-// licensed under Apache License 2.0.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package afero
-
-import (
- "context"
- "os"
- "time"
-
- "github.com/spf13/afero/gcsfs"
-
- "cloud.google.com/go/storage"
- "github.com/googleapis/google-cloud-go-testing/storage/stiface"
-
- "google.golang.org/api/option"
-)
-
-type GcsFs struct {
- source *gcsfs.GcsFs
-}
-
-// NewGcsFS creates a GCS file system, automatically instantiating and decorating the storage client.
-// You can provide additional options to be passed to the client creation, as per
-// cloud.google.com/go/storage documentation
-func NewGcsFS(ctx context.Context, opts ...option.ClientOption) (Fs, error) {
- if json := os.Getenv("GOOGLE_APPLICATION_CREDENTIALS_JSON"); json != "" {
- opts = append(opts, option.WithCredentialsJSON([]byte(json)))
- }
- client, err := storage.NewClient(ctx, opts...)
- if err != nil {
- return nil, err
- }
-
- return NewGcsFSFromClient(ctx, client)
-}
-
-// NewGcsFSWithSeparator is the same as NewGcsFS, but the files system will use the provided folder separator.
-func NewGcsFSWithSeparator(ctx context.Context, folderSeparator string, opts ...option.ClientOption) (Fs, error) {
- client, err := storage.NewClient(ctx, opts...)
- if err != nil {
- return nil, err
- }
-
- return NewGcsFSFromClientWithSeparator(ctx, client, folderSeparator)
-}
-
-// NewGcsFSFromClient creates a GCS file system from a given storage client
-func NewGcsFSFromClient(ctx context.Context, client *storage.Client) (Fs, error) {
- c := stiface.AdaptClient(client)
-
- return &GcsFs{gcsfs.NewGcsFs(ctx, c)}, nil
-}
-
-// NewGcsFSFromClientWithSeparator is the same as NewGcsFSFromClient, but the file system will use the provided folder separator.
-func NewGcsFSFromClientWithSeparator(ctx context.Context, client *storage.Client, folderSeparator string) (Fs, error) {
- c := stiface.AdaptClient(client)
-
- return &GcsFs{gcsfs.NewGcsFsWithSeparator(ctx, c, folderSeparator)}, nil
-}
-
-// Wraps gcs.GcsFs and convert some return types to afero interfaces.
-
-func (fs *GcsFs) Name() string {
- return fs.source.Name()
-}
-func (fs *GcsFs) Create(name string) (File, error) {
- return fs.source.Create(name)
-}
-func (fs *GcsFs) Mkdir(name string, perm os.FileMode) error {
- return fs.source.Mkdir(name, perm)
-}
-func (fs *GcsFs) MkdirAll(path string, perm os.FileMode) error {
- return fs.source.MkdirAll(path, perm)
-}
-func (fs *GcsFs) Open(name string) (File, error) {
- return fs.source.Open(name)
-}
-func (fs *GcsFs) OpenFile(name string, flag int, perm os.FileMode) (File, error) {
- return fs.source.OpenFile(name, flag, perm)
-}
-func (fs *GcsFs) Remove(name string) error {
- return fs.source.Remove(name)
-}
-func (fs *GcsFs) RemoveAll(path string) error {
- return fs.source.RemoveAll(path)
-}
-func (fs *GcsFs) Rename(oldname, newname string) error {
- return fs.source.Rename(oldname, newname)
-}
-func (fs *GcsFs) Stat(name string) (os.FileInfo, error) {
- return fs.source.Stat(name)
-}
-func (fs *GcsFs) Chmod(name string, mode os.FileMode) error {
- return fs.source.Chmod(name, mode)
-}
-func (fs *GcsFs) Chtimes(name string, atime time.Time, mtime time.Time) error {
- return fs.source.Chtimes(name, atime, mtime)
-}
-func (fs *GcsFs) Chown(name string, uid, gid int) error {
- return fs.source.Chown(name, uid, gid)
-}
(DIR) diff --git a/gcs_mocks.go b/gcs_mocks.go
@@ -1,270 +0,0 @@
-// Copyright © 2021 Vasily Ovchinnikov <vasily@remerge.io>.
-//
-// A set of stiface-based mocks, replicating the GCS behavior, to make the tests not require any
-// internet connection or real buckets.
-// It is **not** a comprehensive set of mocks to test anything and everything GCS-related, rather
-// a very tailored one for the current implementation - thus the tests, written with the use of
-// these mocks are more of regression ones.
-// If any GCS behavior changes and breaks the implementation, then it should first be adjusted by
-// switching over to a real bucket - and then the mocks have to be adjusted to match the
-// implementation.
-
-package afero
-
-import (
- "context"
- "io"
- "os"
- "strings"
-
- "github.com/spf13/afero/gcsfs"
-
- "cloud.google.com/go/storage"
- "github.com/googleapis/google-cloud-go-testing/storage/stiface"
- "google.golang.org/api/iterator"
-)
-
-// sets filesystem separators to the one, expected (and hard-coded) in the tests
-func normSeparators(s string) string {
- return strings.Replace(s, "\\", "/", -1)
-}
-
-type clientMock struct {
- stiface.Client
- fs Fs
-}
-
-func newClientMock() *clientMock {
- return &clientMock{fs: NewMemMapFs()}
-}
-
-func (m *clientMock) Bucket(name string) stiface.BucketHandle {
- return &bucketMock{bucketName: name, fs: m.fs}
-}
-
-type bucketMock struct {
- stiface.BucketHandle
-
- bucketName string
-
- fs Fs
-}
-
-func (m *bucketMock) Attrs(context.Context) (*storage.BucketAttrs, error) {
- return &storage.BucketAttrs{}, nil
-}
-
-func (m *bucketMock) Object(name string) stiface.ObjectHandle {
- return &objectMock{name: name, fs: m.fs}
-}
-
-func (m *bucketMock) Objects(_ context.Context, q *storage.Query) (it stiface.ObjectIterator) {
- return &objectItMock{name: q.Prefix, fs: m.fs}
-}
-
-type objectMock struct {
- stiface.ObjectHandle
-
- name string
- fs Fs
-}
-
-func (o *objectMock) NewWriter(_ context.Context) stiface.Writer {
- return &writerMock{name: o.name, fs: o.fs}
-}
-
-func (o *objectMock) NewRangeReader(_ context.Context, offset, length int64) (stiface.Reader, error) {
- if o.name == "" {
- return nil, gcsfs.ErrEmptyObjectName
- }
-
- file, err := o.fs.Open(o.name)
- if err != nil {
- return nil, err
- }
-
- if offset > 0 {
- _, err = file.Seek(offset, io.SeekStart)
- if err != nil {
- return nil, err
- }
- }
-
- res := &readerMock{file: file}
- if length > -1 {
- res.buf = make([]byte, length)
- _, err = file.Read(res.buf)
- if err != nil {
- return nil, err
- }
- }
-
- return res, nil
-}
-
-func (o *objectMock) Delete(_ context.Context) error {
- if o.name == "" {
- return gcsfs.ErrEmptyObjectName
- }
- return o.fs.Remove(o.name)
-}
-
-func (o *objectMock) Attrs(_ context.Context) (*storage.ObjectAttrs, error) {
- if o.name == "" {
- return nil, gcsfs.ErrEmptyObjectName
- }
-
- info, err := o.fs.Stat(o.name)
- if err != nil {
- pathError, ok := err.(*os.PathError)
- if ok {
- if pathError.Err == os.ErrNotExist {
- return nil, storage.ErrObjectNotExist
- }
- }
-
- return nil, err
- }
-
- res := &storage.ObjectAttrs{Name: normSeparators(o.name), Size: info.Size(), Updated: info.ModTime()}
-
- if info.IsDir() {
- // we have to mock it here, because of FileInfo logic
- return nil, gcsfs.ErrObjectDoesNotExist
- }
-
- return res, nil
-}
-
-type writerMock struct {
- stiface.Writer
-
- name string
- fs Fs
-
- file File
-}
-
-func (w *writerMock) Write(p []byte) (n int, err error) {
- if w.name == "" {
- return 0, gcsfs.ErrEmptyObjectName
- }
-
- if w.file == nil {
- w.file, err = w.fs.Create(w.name)
- if err != nil {
- return 0, err
- }
- }
-
- return w.file.Write(p)
-}
-
-func (w *writerMock) Close() error {
- if w.name == "" {
- return gcsfs.ErrEmptyObjectName
- }
- if w.file == nil {
- var err error
- if strings.HasSuffix(w.name, "/") {
- err = w.fs.Mkdir(w.name, 0755)
- if err != nil {
- return err
- }
- } else {
- _, err = w.Write([]byte{})
- if err != nil {
- return err
- }
- }
- }
- if w.file != nil {
- return w.file.Close()
- }
- return nil
-}
-
-type readerMock struct {
- stiface.Reader
-
- file File
-
- buf []byte
-}
-
-func (r *readerMock) Remain() int64 {
- return 0
-}
-
-func (r *readerMock) Read(p []byte) (int, error) {
- if r.buf != nil {
- copy(p, r.buf)
- return len(r.buf), nil
- }
- return r.file.Read(p)
-}
-
-func (r *readerMock) Close() error {
- return r.file.Close()
-}
-
-type objectItMock struct {
- stiface.ObjectIterator
-
- name string
- fs Fs
-
- dir File
- infos []*storage.ObjectAttrs
-}
-
-func (it *objectItMock) Next() (*storage.ObjectAttrs, error) {
- var err error
- if it.dir == nil {
- it.dir, err = it.fs.Open(it.name)
- if err != nil {
- return nil, err
- }
-
- var isDir bool
- isDir, err = IsDir(it.fs, it.name)
- if err != nil {
- return nil, err
- }
-
- it.infos = []*storage.ObjectAttrs{}
-
- if !isDir {
- var info os.FileInfo
- info, err = it.dir.Stat()
- if err != nil {
- return nil, err
- }
- it.infos = append(it.infos, &storage.ObjectAttrs{Name: normSeparators(info.Name()), Size: info.Size(), Updated: info.ModTime()})
- } else {
- var fInfos []os.FileInfo
- fInfos, err = it.dir.Readdir(0)
- if err != nil {
- return nil, err
- }
- if it.name != "" {
- it.infos = append(it.infos, &storage.ObjectAttrs{
- Prefix: normSeparators(it.name) + "/",
- })
- }
-
- for _, info := range fInfos {
- it.infos = append(it.infos, &storage.ObjectAttrs{Name: normSeparators(info.Name()), Size: info.Size(), Updated: info.ModTime()})
- }
- }
- }
-
- if len(it.infos) == 0 {
- return nil, iterator.Done
- }
-
- res := it.infos[0]
- it.infos = it.infos[1:]
-
- return res, err
-}
(DIR) diff --git a/gcs_test.go b/gcs_test.go
@@ -1,807 +0,0 @@
-// Copyright © 2021 Vasily Ovchinnikov <vasily@remerge.io>.
-//
-// Most of the tests are "derived" from the Afero's own tarfs implementation.
-// Write-oriented tests and/or checks have been added on top of that
-
-package afero
-
-import (
- "context"
- "errors"
- "fmt"
- "io"
- "os"
- "path/filepath"
- "reflect"
- "strings"
- "syscall"
- "testing"
-
- "golang.org/x/oauth2/google"
-
- "github.com/spf13/afero/gcsfs"
-
- "cloud.google.com/go/storage"
- "github.com/googleapis/google-cloud-go-testing/storage/stiface"
-)
-
-const (
- testBytes = 8
- dirSize = 42
-)
-
-var bucketName = "a-test-bucket"
-
-var files = []struct {
- name string
- exists bool
- isdir bool
- size int64
- content string
- offset int64
- contentAtOffset string
-}{
- {"sub", true, true, dirSize, "", 0, ""},
- {"sub/testDir2", true, true, dirSize, "", 0, ""},
- {"sub/testDir2/testFile", true, false, 8 * 1024, "c", 4 * 1024, "d"},
- {"testFile", true, false, 12 * 1024, "a", 7 * 1024, "b"},
- {"testDir1/testFile", true, false, 3 * 512, "b", 512, "c"},
-
- {"", false, true, dirSize, "", 0, ""}, // special case
-
- {"nonExisting", false, false, dirSize, "", 0, ""},
-}
-
-var dirs = []struct {
- name string
- children []string
-}{
- {"", []string{"sub", "testDir1", "testFile"}}, // in this case it will be prepended with bucket name
- {"sub", []string{"testDir2"}},
- {"sub/testDir2", []string{"testFile"}},
- {"testDir1", []string{"testFile"}},
-}
-
-var gcsAfs *Afero
-
-func TestMain(m *testing.M) {
- ctx := context.Background()
- var err error
-
- // in order to respect deferring
- var exitCode int
- defer os.Exit(exitCode)
-
- defer func() {
- err := recover()
- if err != nil {
- fmt.Print(err)
- exitCode = 2
- }
- }()
-
- // Check if any credentials are present. If not, a fake service account, taken from the link
- // would be used: https://github.com/google/oauth2l/blob/master/integration/fixtures/fake-service-account.json
- cred, err := google.FindDefaultCredentials(ctx)
- if err != nil && !strings.HasPrefix(err.Error(), "google: could not find default credentials") {
- panic(err)
- }
-
- if cred == nil {
- var fakeCredentialsAbsPath string
- fakeCredentialsAbsPath, err = filepath.Abs("gcs-fake-service-account.json")
- if err != nil {
- panic(err)
- }
-
- err = os.Setenv("GOOGLE_APPLICATION_CREDENTIALS", fakeCredentialsAbsPath)
- if err != nil {
- panic(err)
- }
-
- // reset it after the run
- defer func() {
- err = os.Remove("GOOGLE_APPLICATION_CREDENTIALS")
- if err != nil {
- // it's worth printing it out explicitly, since it might have implications further down the road
- fmt.Print("failed to clear fake GOOGLE_APPLICATION_CREDENTIALS", err)
- }
- }()
- }
-
- var c *storage.Client
- c, err = storage.NewClient(ctx)
- if err != nil {
- panic(err)
- }
- client := stiface.AdaptClient(c)
-
- // This block is mocking the client for the sake of isolated testing
- mockClient := newClientMock()
- mockClient.Client = client
-
- gcsAfs = &Afero{Fs: &GcsFs{gcsfs.NewGcsFs(ctx, mockClient)}}
-
- // Uncomment to use the real, not mocked, client
- //gcsAfs = &Afero{Fs: &GcsFs{gcsfs.NewGcsFs(ctx, client)}}
-
- exitCode = m.Run()
-}
-
-func createFiles(t *testing.T) {
- t.Helper()
- var err error
-
- // the files have to be created first
- for _, f := range files {
- if !f.isdir && f.exists {
- name := filepath.Join(bucketName, f.name)
-
- var freshFile File
- freshFile, err = gcsAfs.Create(name)
- if err != nil {
- t.Fatalf("failed to create a file \"%s\": %s", f.name, err)
- }
-
- var written int
- var totalWritten int64
- for totalWritten < f.size {
- if totalWritten < f.offset {
- writeBuf := []byte(strings.Repeat(f.content, int(f.offset)))
- written, err = freshFile.WriteAt(writeBuf, totalWritten)
- } else {
- writeBuf := []byte(strings.Repeat(f.contentAtOffset, int(f.size-f.offset)))
- written, err = freshFile.WriteAt(writeBuf, totalWritten)
- }
- if err != nil {
- t.Fatalf("failed to write a file \"%s\": %s", f.name, err)
- }
-
- totalWritten += int64(written)
- }
-
- err = freshFile.Close()
- if err != nil {
- t.Fatalf("failed to close a file \"%s\": %s", f.name, err)
- }
- }
- }
-}
-
-func removeFiles(t *testing.T) {
- t.Helper()
- var err error
-
- // the files have to be created first
- for _, f := range files {
- if !f.isdir && f.exists {
- name := filepath.Join(bucketName, f.name)
-
- err = gcsAfs.Remove(name)
- if err != nil && err == syscall.ENOENT {
- t.Errorf("failed to remove file \"%s\": %s", f.name, err)
- }
- }
- }
-}
-
-func TestGcsFsOpen(t *testing.T) {
- createFiles(t)
- defer removeFiles(t)
-
- for _, f := range files {
- nameBase := filepath.Join(bucketName, f.name)
-
- names := []string{
- nameBase,
- string(os.PathSeparator) + nameBase,
- }
- if f.name == "" {
- names = []string{f.name}
- }
-
- for _, name := range names {
- file, err := gcsAfs.Open(name)
- if (err == nil) != f.exists {
- t.Errorf("%v exists = %v, but got err = %v", name, f.exists, err)
- }
-
- if !f.exists {
- continue
- }
- if err != nil {
- t.Fatalf("%v: %v", name, err)
- }
-
- if file.Name() != filepath.FromSlash(nameBase) {
- t.Errorf("Name(), got %v, expected %v", file.Name(), filepath.FromSlash(nameBase))
- }
-
- s, err := file.Stat()
- if err != nil {
- t.Fatalf("stat %v: got error '%v'", file.Name(), err)
- }
-
- if isdir := s.IsDir(); isdir != f.isdir {
- t.Errorf("%v directory, got: %v, expected: %v", file.Name(), isdir, f.isdir)
- }
-
- if size := s.Size(); size != f.size {
- t.Errorf("%v size, got: %v, expected: %v", file.Name(), size, f.size)
- }
- }
- }
-}
-
-func TestGcsRead(t *testing.T) {
- createFiles(t)
- defer removeFiles(t)
-
- for _, f := range files {
- if !f.exists {
- continue
- }
-
- nameBase := filepath.Join(bucketName, f.name)
-
- names := []string{
- nameBase,
- string(os.PathSeparator) + nameBase,
- }
- if f.name == "" {
- names = []string{f.name}
- }
-
- for _, name := range names {
- file, err := gcsAfs.Open(name)
- if err != nil {
- t.Fatalf("opening %v: %v", name, err)
- }
-
- buf := make([]byte, 8)
- n, err := file.Read(buf)
- if err != nil {
- if f.isdir && (err != syscall.EISDIR) {
- t.Errorf("%v got error %v, expected EISDIR", name, err)
- } else if !f.isdir {
- t.Errorf("%v: %v", name, err)
- }
- } else if n != 8 {
- t.Errorf("%v: got %d read bytes, expected 8", name, n)
- } else if string(buf) != strings.Repeat(f.content, testBytes) {
- t.Errorf("%v: got <%s>, expected <%s>", f.name, f.content, string(buf))
- }
- }
- }
-}
-
-func TestGcsReadAt(t *testing.T) {
- createFiles(t)
- defer removeFiles(t)
-
- for _, f := range files {
- if !f.exists {
- continue
- }
-
- nameBase := filepath.Join(bucketName, f.name)
-
- names := []string{
- nameBase,
- string(os.PathSeparator) + nameBase,
- }
- if f.name == "" {
- names = []string{f.name}
- }
-
- for _, name := range names {
- file, err := gcsAfs.Open(name)
- if err != nil {
- t.Fatalf("opening %v: %v", name, err)
- }
-
- buf := make([]byte, testBytes)
- n, err := file.ReadAt(buf, f.offset-testBytes/2)
- if err != nil {
- if f.isdir && (err != syscall.EISDIR) {
- t.Errorf("%v got error %v, expected EISDIR", name, err)
- } else if !f.isdir {
- t.Errorf("%v: %v", name, err)
- }
- } else if n != 8 {
- t.Errorf("%v: got %d read bytes, expected 8", f.name, n)
- } else if string(buf) != strings.Repeat(f.content, testBytes/2)+strings.Repeat(f.contentAtOffset, testBytes/2) {
- t.Errorf("%v: got <%s>, expected <%s>", f.name, f.contentAtOffset, string(buf))
- }
- }
- }
-}
-
-func TestGcsSeek(t *testing.T) {
- createFiles(t)
- defer removeFiles(t)
-
- for _, f := range files {
- if !f.exists {
- continue
- }
-
- nameBase := filepath.Join(bucketName, f.name)
-
- names := []string{
- nameBase,
- string(os.PathSeparator) + nameBase,
- }
- if f.name == "" {
- names = []string{f.name}
- }
-
- for _, name := range names {
- file, err := gcsAfs.Open(name)
- if err != nil {
- t.Fatalf("opening %v: %v", name, err)
- }
-
- var tests = []struct {
- offIn int64
- whence int
- offOut int64
- }{
- {0, io.SeekStart, 0},
- {10, io.SeekStart, 10},
- {1, io.SeekCurrent, 11},
- {10, io.SeekCurrent, 21},
- {0, io.SeekEnd, f.size},
- {-1, io.SeekEnd, f.size - 1},
- }
-
- for _, s := range tests {
- n, err := file.Seek(s.offIn, s.whence)
- if err != nil {
- if f.isdir && err == syscall.EISDIR {
- continue
- }
-
- t.Errorf("%v: %v", name, err)
- }
-
- if n != s.offOut {
- t.Errorf("%v: (off: %v, whence: %v): got %v, expected %v", f.name, s.offIn, s.whence, n, s.offOut)
- }
- }
- }
-
- }
-}
-
-func TestGcsName(t *testing.T) {
- createFiles(t)
- defer removeFiles(t)
-
- for _, f := range files {
- if !f.exists {
- continue
- }
-
- nameBase := filepath.Join(bucketName, f.name)
-
- names := []string{
- nameBase,
- string(os.PathSeparator) + nameBase,
- }
- if f.name == "" {
- names = []string{f.name}
- }
-
- for _, name := range names {
- file, err := gcsAfs.Open(name)
- if err != nil {
- t.Fatalf("opening %v: %v", name, err)
- }
-
- n := file.Name()
- if n != filepath.FromSlash(nameBase) {
- t.Errorf("got: %v, expected: %v", n, filepath.FromSlash(nameBase))
- }
- }
-
- }
-}
-
-func TestGcsClose(t *testing.T) {
- createFiles(t)
- defer removeFiles(t)
-
- for _, f := range files {
- if !f.exists {
- continue
- }
-
- nameBase := filepath.Join(bucketName, f.name)
-
- names := []string{
- nameBase,
- string(os.PathSeparator) + nameBase,
- }
- if f.name == "" {
- names = []string{f.name}
- }
-
- for _, name := range names {
- file, err := gcsAfs.Open(name)
- if err != nil {
- t.Fatalf("opening %v: %v", name, err)
- }
-
- err = file.Close()
- if err != nil {
- t.Errorf("%v: %v", name, err)
- }
-
- err = file.Close()
- if err == nil {
- t.Errorf("%v: closing twice should return an error", name)
- }
-
- buf := make([]byte, 8)
- n, err := file.Read(buf)
- if n != 0 || err == nil {
- t.Errorf("%v: could read from a closed file", name)
- }
-
- n, err = file.ReadAt(buf, 256)
- if n != 0 || err == nil {
- t.Errorf("%v: could readAt from a closed file", name)
- }
-
- off, err := file.Seek(0, io.SeekStart)
- if off != 0 || err == nil {
- t.Errorf("%v: could seek from a closed file", name)
- }
- }
- }
-}
-
-func TestGcsOpenFile(t *testing.T) {
- createFiles(t)
- defer removeFiles(t)
-
- for _, f := range files {
- nameBase := filepath.Join(bucketName, f.name)
-
- names := []string{
- nameBase,
- string(os.PathSeparator) + nameBase,
- }
- if f.name == "" {
- names = []string{f.name}
- }
-
- for _, name := range names {
- file, err := gcsAfs.OpenFile(name, os.O_RDONLY, 0400)
- if !f.exists {
- if (f.name != "" && !errors.Is(err, syscall.ENOENT)) ||
- (f.name == "" && !errors.Is(err, gcsfs.ErrNoBucketInName)) {
- t.Errorf("%v: got %v, expected%v", name, err, syscall.ENOENT)
- }
-
- continue
- }
-
- if err != nil {
- t.Fatalf("%v: %v", name, err)
- }
-
- err = file.Close()
- if err != nil {
- t.Fatalf("failed to close a file \"%s\": %s", name, err)
- }
-
- file, err = gcsAfs.OpenFile(name, os.O_CREATE, 0600)
- if !errors.Is(err, syscall.EPERM) {
- t.Errorf("%v: open for write: got %v, expected %v", name, err, syscall.EPERM)
- }
- }
- }
-}
-
-func TestGcsFsStat(t *testing.T) {
- createFiles(t)
- defer removeFiles(t)
-
- for _, f := range files {
- nameBase := filepath.Join(bucketName, f.name)
-
- names := []string{
- nameBase,
- string(os.PathSeparator) + nameBase,
- }
- if f.name == "" {
- names = []string{f.name}
- }
-
- for _, name := range names {
- fi, err := gcsAfs.Stat(name)
- if !f.exists {
- if (f.name != "" && !errors.Is(err, syscall.ENOENT)) ||
- (f.name == "" && !errors.Is(err, gcsfs.ErrNoBucketInName)) {
- t.Errorf("%v: got %v, expected%v", name, err, syscall.ENOENT)
- }
-
- continue
- }
-
- if err != nil {
- t.Fatalf("stat %v: got error '%v'", name, err)
- }
-
- if isdir := fi.IsDir(); isdir != f.isdir {
- t.Errorf("%v directory, got: %v, expected: %v", name, isdir, f.isdir)
- }
-
- if size := fi.Size(); size != f.size {
- t.Errorf("%v size, got: %v, expected: %v", name, size, f.size)
- }
- }
- }
-}
-
-func TestGcsReaddir(t *testing.T) {
- createFiles(t)
- defer removeFiles(t)
-
- for _, d := range dirs {
- nameBase := filepath.Join(bucketName, d.name)
-
- names := []string{
- nameBase,
- string(os.PathSeparator) + nameBase,
- }
-
- for _, name := range names {
- dir, err := gcsAfs.Open(name)
- if err != nil {
- t.Fatal(err)
- }
-
- fi, err := dir.Readdir(0)
- if err != nil {
- t.Fatal(err)
- }
- var fileNames []string
- for _, f := range fi {
- fileNames = append(fileNames, f.Name())
- }
-
- if !reflect.DeepEqual(fileNames, d.children) {
- t.Errorf("%v: children, got '%v', expected '%v'", name, fileNames, d.children)
- }
-
- fi, err = dir.Readdir(1)
- if err != nil {
- t.Fatal(err)
- }
-
- fileNames = []string{}
- for _, f := range fi {
- fileNames = append(fileNames, f.Name())
- }
-
- if !reflect.DeepEqual(fileNames, d.children[0:1]) {
- t.Errorf("%v: children, got '%v', expected '%v'", name, fileNames, d.children[0:1])
- }
- }
- }
-
- nameBase := filepath.Join(bucketName, "testFile")
-
- names := []string{
- nameBase,
- string(os.PathSeparator) + nameBase,
- }
-
- for _, name := range names {
- dir, err := gcsAfs.Open(name)
- if err != nil {
- t.Fatal(err)
- }
-
- _, err = dir.Readdir(-1)
- if err != syscall.ENOTDIR {
- t.Fatal("Expected error")
- }
- }
-}
-
-func TestGcsReaddirnames(t *testing.T) {
- createFiles(t)
- defer removeFiles(t)
-
- for _, d := range dirs {
- nameBase := filepath.Join(bucketName, d.name)
-
- names := []string{
- nameBase,
- string(os.PathSeparator) + nameBase,
- }
-
- for _, name := range names {
- dir, err := gcsAfs.Open(name)
- if err != nil {
- t.Fatal(err)
- }
-
- fileNames, err := dir.Readdirnames(0)
- if err != nil {
- t.Fatal(err)
- }
-
- if !reflect.DeepEqual(fileNames, d.children) {
- t.Errorf("%v: children, got '%v', expected '%v'", name, fileNames, d.children)
- }
-
- fileNames, err = dir.Readdirnames(1)
- if err != nil {
- t.Fatal(err)
- }
-
- if !reflect.DeepEqual(fileNames, d.children[0:1]) {
- t.Errorf("%v: children, got '%v', expected '%v'", name, fileNames, d.children[0:1])
- }
- }
- }
-
- nameBase := filepath.Join(bucketName, "testFile")
-
- names := []string{
- nameBase,
- string(os.PathSeparator) + nameBase,
- }
-
- for _, name := range names {
- dir, err := gcsAfs.Open(name)
- if err != nil {
- t.Fatal(err)
- }
-
- _, err = dir.Readdirnames(-1)
- if err != syscall.ENOTDIR {
- t.Fatal("Expected error")
- }
- }
-}
-
-func TestGcsGlob(t *testing.T) {
- createFiles(t)
- defer removeFiles(t)
-
- for _, s := range []struct {
- glob string
- entries []string
- }{
- {filepath.FromSlash("*"), []string{filepath.FromSlash("sub"), filepath.FromSlash("testDir1"), filepath.FromSlash("testFile")}},
- {filepath.FromSlash("sub/*"), []string{filepath.FromSlash("sub/testDir2")}},
- {filepath.FromSlash("sub/testDir2/*"), []string{filepath.FromSlash("sub/testDir2/testFile")}},
- {filepath.FromSlash("testDir1/*"), []string{filepath.FromSlash("testDir1/testFile")}},
- } {
- nameBase := filepath.Join(bucketName, s.glob)
-
- prefixedGlobs := []string{
- nameBase,
- string(os.PathSeparator) + nameBase,
- }
-
- prefixedEntries := [][]string{{}, {}}
- for _, entry := range s.entries {
- prefixedEntries[0] = append(prefixedEntries[0], filepath.Join(bucketName, entry))
- prefixedEntries[1] = append(prefixedEntries[1], string(os.PathSeparator)+filepath.Join(bucketName, entry))
- }
-
- for i, prefixedGlob := range prefixedGlobs {
- entries, err := Glob(gcsAfs.Fs, prefixedGlob)
- if err != nil {
- t.Error(err)
- }
- if reflect.DeepEqual(entries, prefixedEntries[i]) {
- t.Logf("glob: %s: glob ok", prefixedGlob)
- } else {
- t.Errorf("glob: %s: got %#v, expected %#v", prefixedGlob, entries, prefixedEntries)
- }
- }
- }
-}
-
-func TestGcsMkdir(t *testing.T) {
- t.Run("empty", func(t *testing.T) {
- emptyDirName := bucketName
-
- err := gcsAfs.Mkdir(emptyDirName, 0755)
- if err == nil {
- t.Fatal("did not fail upon creation of an empty folder")
- }
- })
- t.Run("success", func(t *testing.T) {
- dirName := filepath.Join(bucketName, "a-test-dir")
- var err error
-
- err = gcsAfs.Mkdir(dirName, 0755)
- if err != nil {
- t.Fatal("failed to create a folder with error", err)
- }
-
- info, err := gcsAfs.Stat(dirName)
- if err != nil {
- t.Fatal("failed to get info", err)
- }
- if !info.IsDir() {
- t.Fatalf("%s: not a dir", dirName)
- }
- if !info.Mode().IsDir() {
- t.Errorf("%s: mode is not directory", dirName)
- }
-
- if info.Mode() != os.ModeDir|0755 {
- t.Errorf("%s: wrong permissions, expected drwxr-xr-x, got %s", dirName, info.Mode())
- }
-
- err = gcsAfs.Remove(dirName)
- if err != nil {
- t.Fatalf("could not delete the folder %s after the test with error: %s", dirName, err)
- }
- })
-}
-
-func TestGcsMkdirAll(t *testing.T) {
- t.Run("empty", func(t *testing.T) {
- emptyDirName := bucketName
-
- err := gcsAfs.MkdirAll(emptyDirName, 0755)
- if err == nil {
- t.Fatal("did not fail upon creation of an empty folder")
- }
- })
- t.Run("success", func(t *testing.T) {
- dirName := filepath.Join(bucketName, "a/b/c")
-
- err := gcsAfs.MkdirAll(dirName, 0755)
- if err != nil {
- t.Fatal(err)
- }
-
- info, err := gcsAfs.Stat(filepath.Join(bucketName, "a"))
- if err != nil {
- t.Fatal(err)
- }
- if !info.Mode().IsDir() {
- t.Errorf("%s: mode is not directory", filepath.Join(bucketName, "a"))
- }
- if info.Mode() != os.ModeDir|0755 {
- t.Errorf("%s: wrong permissions, expected drwxr-xr-x, got %s", filepath.Join(bucketName, "a"), info.Mode())
- }
- info, err = gcsAfs.Stat(filepath.Join(bucketName, "a/b"))
- if err != nil {
- t.Fatal(err)
- }
- if !info.Mode().IsDir() {
- t.Errorf("%s: mode is not directory", filepath.Join(bucketName, "a/b"))
- }
- if info.Mode() != os.ModeDir|0755 {
- t.Errorf("%s: wrong permissions, expected drwxr-xr-x, got %s", filepath.Join(bucketName, "a/b"), info.Mode())
- }
- info, err = gcsAfs.Stat(dirName)
- if err != nil {
- t.Fatal(err)
- }
- if !info.Mode().IsDir() {
- t.Errorf("%s: mode is not directory", dirName)
- }
- if info.Mode() != os.ModeDir|0755 {
- t.Errorf("%s: wrong permissions, expected drwxr-xr-x, got %s", dirName, info.Mode())
- }
-
- err = gcsAfs.RemoveAll(filepath.Join(bucketName, "a"))
- if err != nil {
- t.Fatalf("failed to remove the folder %s with error: %s", filepath.Join(bucketName, "a"), err)
- }
- })
-}
(DIR) diff --git a/gcsfs/file.go b/gcsfs/file.go
@@ -44,7 +44,7 @@ type GcsFile struct {
func NewGcsFile(
ctx context.Context,
- fs *GcsFs,
+ fs *Fs,
obj stiface.ObjectHandle,
openFlags int,
// Unused: there is no use to the file mode in GCloud just yet - but we keep it here, just in case we need it
(DIR) diff --git a/gcsfs/file_info.go b/gcsfs/file_info.go
@@ -37,7 +37,7 @@ type FileInfo struct {
fileMode os.FileMode
}
-func newFileInfo(name string, fs *GcsFs, fileMode os.FileMode) (*FileInfo, error) {
+func newFileInfo(name string, fs *Fs, fileMode os.FileMode) (*FileInfo, error) {
res := &FileInfo{
name: name,
size: folderSize,
(DIR) diff --git a/gcsfs/file_resource.go b/gcsfs/file_resource.go
@@ -40,7 +40,7 @@ const (
type gcsFileResource struct {
ctx context.Context
- fs *GcsFs
+ fs *Fs
obj stiface.ObjectHandle
name string
(DIR) diff --git a/gcsfs/fs.go b/gcsfs/fs.go
@@ -33,8 +33,8 @@ const (
gsPrefix = "gs://"
)
-// GcsFs is a Fs implementation that uses functions provided by google cloud storage
-type GcsFs struct {
+// Fs is a Fs implementation that uses functions provided by google cloud storage
+type Fs struct {
ctx context.Context
client stiface.Client
separator string
@@ -45,12 +45,12 @@ type GcsFs struct {
autoRemoveEmptyFolders bool //trigger for creating "virtual folders" (not required by GCSs)
}
-func NewGcsFs(ctx context.Context, client stiface.Client) *GcsFs {
+func NewGcsFs(ctx context.Context, client stiface.Client) *Fs {
return NewGcsFsWithSeparator(ctx, client, "/")
}
-func NewGcsFsWithSeparator(ctx context.Context, client stiface.Client, folderSep string) *GcsFs {
- return &GcsFs{
+func NewGcsFsWithSeparator(ctx context.Context, client stiface.Client, folderSep string) *Fs {
+ return &Fs{
ctx: ctx,
client: client,
separator: folderSep,
@@ -61,17 +61,17 @@ func NewGcsFsWithSeparator(ctx context.Context, client stiface.Client, folderSep
}
// normSeparators will normalize all "\\" and "/" to the provided separator
-func (fs *GcsFs) normSeparators(s string) string {
+func (fs *Fs) normSeparators(s string) string {
return strings.Replace(strings.Replace(s, "\\", fs.separator, -1), "/", fs.separator, -1)
}
-func (fs *GcsFs) ensureTrailingSeparator(s string) string {
+func (fs *Fs) ensureTrailingSeparator(s string) string {
if len(s) > 0 && !strings.HasSuffix(s, fs.separator) {
return s + fs.separator
}
return s
}
-func (fs *GcsFs) ensureNoLeadingSeparator(s string) string {
+func (fs *Fs) ensureNoLeadingSeparator(s string) string {
if len(s) > 0 && strings.HasPrefix(s, fs.separator) {
s = s[len(fs.separator):]
}
@@ -94,13 +94,13 @@ func validateName(s string) error {
}
// Splits provided name into bucket name and path
-func (fs *GcsFs) splitName(name string) (bucketName string, path string) {
+func (fs *Fs) splitName(name string) (bucketName string, path string) {
splitName := strings.Split(name, fs.separator)
return splitName[0], strings.Join(splitName[1:], fs.separator)
}
-func (fs *GcsFs) getBucket(name string) (stiface.BucketHandle, error) {
+func (fs *Fs) getBucket(name string) (stiface.BucketHandle, error) {
bucket := fs.buckets[name]
if bucket == nil {
bucket = fs.client.Bucket(name)
@@ -112,7 +112,7 @@ func (fs *GcsFs) getBucket(name string) (stiface.BucketHandle, error) {
return bucket, nil
}
-func (fs *GcsFs) getObj(name string) (stiface.ObjectHandle, error) {
+func (fs *Fs) getObj(name string) (stiface.ObjectHandle, error) {
bucketName, path := fs.splitName(name)
bucket, err := fs.getBucket(bucketName)
@@ -123,9 +123,9 @@ func (fs *GcsFs) getObj(name string) (stiface.ObjectHandle, error) {
return bucket.Object(path), nil
}
-func (fs *GcsFs) Name() string { return "GcsFs" }
+func (fs *Fs) Name() string { return "GcsFs" }
-func (fs *GcsFs) Create(name string) (*GcsFile, error) {
+func (fs *Fs) Create(name string) (*GcsFile, error) {
name = fs.ensureNoLeadingSeparator(fs.normSeparators(ensureNoPrefix(name)))
if err := validateName(name); err != nil {
return nil, err
@@ -156,7 +156,7 @@ func (fs *GcsFs) Create(name string) (*GcsFile, error) {
return file, nil
}
-func (fs *GcsFs) Mkdir(name string, _ os.FileMode) error {
+func (fs *Fs) Mkdir(name string, _ os.FileMode) error {
name = fs.ensureNoLeadingSeparator(fs.ensureTrailingSeparator(fs.normSeparators(ensureNoPrefix(name))))
if err := validateName(name); err != nil {
return err
@@ -179,7 +179,7 @@ func (fs *GcsFs) Mkdir(name string, _ os.FileMode) error {
return w.Close()
}
-func (fs *GcsFs) MkdirAll(path string, perm os.FileMode) error {
+func (fs *Fs) MkdirAll(path string, perm os.FileMode) error {
path = fs.ensureNoLeadingSeparator(fs.ensureTrailingSeparator(fs.normSeparators(ensureNoPrefix(path))))
if err := validateName(path); err != nil {
return err
@@ -216,11 +216,11 @@ func (fs *GcsFs) MkdirAll(path string, perm os.FileMode) error {
return nil
}
-func (fs *GcsFs) Open(name string) (*GcsFile, error) {
+func (fs *Fs) Open(name string) (*GcsFile, error) {
return fs.OpenFile(name, os.O_RDONLY, 0)
}
-func (fs *GcsFs) OpenFile(name string, flag int, fileMode os.FileMode) (*GcsFile, error) {
+func (fs *Fs) OpenFile(name string, flag int, fileMode os.FileMode) (*GcsFile, error) {
var file *GcsFile
var err error
@@ -277,7 +277,7 @@ func (fs *GcsFs) OpenFile(name string, flag int, fileMode os.FileMode) (*GcsFile
return file, nil
}
-func (fs *GcsFs) Remove(name string) error {
+func (fs *Fs) Remove(name string) error {
name = fs.ensureNoLeadingSeparator(fs.normSeparators(ensureNoPrefix(name)))
if err := validateName(name); err != nil {
return err
@@ -318,7 +318,7 @@ func (fs *GcsFs) Remove(name string) error {
return obj.Delete(fs.ctx)
}
-func (fs *GcsFs) RemoveAll(path string) error {
+func (fs *Fs) RemoveAll(path string) error {
path = fs.ensureNoLeadingSeparator(fs.normSeparators(ensureNoPrefix(path)))
if err := validateName(path); err != nil {
return err
@@ -351,7 +351,7 @@ func (fs *GcsFs) RemoveAll(path string) error {
return fs.Remove(path)
}
-func (fs *GcsFs) Rename(oldName, newName string) error {
+func (fs *Fs) Rename(oldName, newName string) error {
oldName = fs.ensureNoLeadingSeparator(fs.normSeparators(ensureNoPrefix(oldName)))
if err := validateName(oldName); err != nil {
return err
@@ -378,7 +378,7 @@ func (fs *GcsFs) Rename(oldName, newName string) error {
return src.Delete(fs.ctx)
}
-func (fs *GcsFs) Stat(name string) (os.FileInfo, error) {
+func (fs *Fs) Stat(name string) (os.FileInfo, error) {
name = fs.ensureNoLeadingSeparator(fs.normSeparators(ensureNoPrefix(name)))
if err := validateName(name); err != nil {
return nil, err
@@ -387,14 +387,14 @@ func (fs *GcsFs) Stat(name string) (os.FileInfo, error) {
return newFileInfo(name, fs, defaultFileMode)
}
-func (fs *GcsFs) Chmod(_ string, _ os.FileMode) error {
+func (fs *Fs) Chmod(_ string, _ os.FileMode) error {
return errors.New("method Chmod is not implemented in GCS")
}
-func (fs *GcsFs) Chtimes(_ string, _, _ time.Time) error {
+func (fs *Fs) Chtimes(_ string, _, _ time.Time) error {
return errors.New("method Chtimes is not implemented. Create, Delete, Updated times are read only fields in GCS and set implicitly")
}
-func (fs *GcsFs) Chown(_ string, _, _ int) error {
+func (fs *Fs) Chown(_ string, _, _ int) error {
return errors.New("method Chown is not implemented for GCS")
}
(DIR) diff --git a/gcs-fake-service-account.json b/gcsfs/gcs-fake-service-account.json
(DIR) diff --git a/gcsfs/gcs.go b/gcsfs/gcs.go
@@ -0,0 +1,114 @@
+// Copyright © 2021 Vasily Ovchinnikov <vasily@remerge.io>.
+//
+// The code in this file is derived from afero fork github.com/Zatte/afero by Mikael Rapp
+// licensed under Apache License 2.0.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package gcsfs
+
+import (
+ "context"
+ "os"
+ "time"
+
+ "cloud.google.com/go/storage"
+ "github.com/googleapis/google-cloud-go-testing/storage/stiface"
+ "github.com/spf13/afero"
+
+ "google.golang.org/api/option"
+)
+
+type GcsFs struct {
+ source *Fs
+}
+
+// NewGcsFS creates a GCS file system, automatically instantiating and decorating the storage client.
+// You can provide additional options to be passed to the client creation, as per
+// cloud.google.com/go/storage documentation
+func NewGcsFS(ctx context.Context, opts ...option.ClientOption) (afero.Fs, error) {
+ if json := os.Getenv("GOOGLE_APPLICATION_CREDENTIALS_JSON"); json != "" {
+ opts = append(opts, option.WithCredentialsJSON([]byte(json)))
+ }
+ client, err := storage.NewClient(ctx, opts...)
+ if err != nil {
+ return nil, err
+ }
+
+ return NewGcsFSFromClient(ctx, client)
+}
+
+// NewGcsFSWithSeparator is the same as NewGcsFS, but the files system will use the provided folder separator.
+func NewGcsFSWithSeparator(ctx context.Context, folderSeparator string, opts ...option.ClientOption) (afero.Fs, error) {
+ client, err := storage.NewClient(ctx, opts...)
+ if err != nil {
+ return nil, err
+ }
+
+ return NewGcsFSFromClientWithSeparator(ctx, client, folderSeparator)
+}
+
+// NewGcsFSFromClient creates a GCS file system from a given storage client
+func NewGcsFSFromClient(ctx context.Context, client *storage.Client) (afero.Fs, error) {
+ c := stiface.AdaptClient(client)
+
+ return &GcsFs{NewGcsFs(ctx, c)}, nil
+}
+
+// NewGcsFSFromClientWithSeparator is the same as NewGcsFSFromClient, but the file system will use the provided folder separator.
+func NewGcsFSFromClientWithSeparator(ctx context.Context, client *storage.Client, folderSeparator string) (afero.Fs, error) {
+ c := stiface.AdaptClient(client)
+
+ return &GcsFs{NewGcsFsWithSeparator(ctx, c, folderSeparator)}, nil
+}
+
+// Wraps gcs.GcsFs and convert some return types to afero interfaces.
+
+func (fs *GcsFs) Name() string {
+ return fs.source.Name()
+}
+func (fs *GcsFs) Create(name string) (afero.File, error) {
+ return fs.source.Create(name)
+}
+func (fs *GcsFs) Mkdir(name string, perm os.FileMode) error {
+ return fs.source.Mkdir(name, perm)
+}
+func (fs *GcsFs) MkdirAll(path string, perm os.FileMode) error {
+ return fs.source.MkdirAll(path, perm)
+}
+func (fs *GcsFs) Open(name string) (afero.File, error) {
+ return fs.source.Open(name)
+}
+func (fs *GcsFs) OpenFile(name string, flag int, perm os.FileMode) (afero.File, error) {
+ return fs.source.OpenFile(name, flag, perm)
+}
+func (fs *GcsFs) Remove(name string) error {
+ return fs.source.Remove(name)
+}
+func (fs *GcsFs) RemoveAll(path string) error {
+ return fs.source.RemoveAll(path)
+}
+func (fs *GcsFs) Rename(oldname, newname string) error {
+ return fs.source.Rename(oldname, newname)
+}
+func (fs *GcsFs) Stat(name string) (os.FileInfo, error) {
+ return fs.source.Stat(name)
+}
+func (fs *GcsFs) Chmod(name string, mode os.FileMode) error {
+ return fs.source.Chmod(name, mode)
+}
+func (fs *GcsFs) Chtimes(name string, atime time.Time, mtime time.Time) error {
+ return fs.source.Chtimes(name, atime, mtime)
+}
+func (fs *GcsFs) Chown(name string, uid, gid int) error {
+ return fs.source.Chown(name, uid, gid)
+}
(DIR) diff --git a/gcsfs/gcs_mocks.go b/gcsfs/gcs_mocks.go
@@ -0,0 +1,269 @@
+// Copyright © 2021 Vasily Ovchinnikov <vasily@remerge.io>.
+//
+// A set of stiface-based mocks, replicating the GCS behavior, to make the tests not require any
+// internet connection or real buckets.
+// It is **not** a comprehensive set of mocks to test anything and everything GCS-related, rather
+// a very tailored one for the current implementation - thus the tests, written with the use of
+// these mocks are more of regression ones.
+// If any GCS behavior changes and breaks the implementation, then it should first be adjusted by
+// switching over to a real bucket - and then the mocks have to be adjusted to match the
+// implementation.
+
+package gcsfs
+
+import (
+ "context"
+ "io"
+ "os"
+ "strings"
+
+ "cloud.google.com/go/storage"
+ "github.com/googleapis/google-cloud-go-testing/storage/stiface"
+ "github.com/spf13/afero"
+ "google.golang.org/api/iterator"
+)
+
+// sets filesystem separators to the one, expected (and hard-coded) in the tests
+func normSeparators(s string) string {
+ return strings.Replace(s, "\\", "/", -1)
+}
+
+type clientMock struct {
+ stiface.Client
+ fs afero.Fs
+}
+
+func newClientMock() *clientMock {
+ return &clientMock{fs: afero.NewMemMapFs()}
+}
+
+func (m *clientMock) Bucket(name string) stiface.BucketHandle {
+ return &bucketMock{bucketName: name, fs: m.fs}
+}
+
+type bucketMock struct {
+ stiface.BucketHandle
+
+ bucketName string
+
+ fs afero.Fs
+}
+
+func (m *bucketMock) Attrs(context.Context) (*storage.BucketAttrs, error) {
+ return &storage.BucketAttrs{}, nil
+}
+
+func (m *bucketMock) Object(name string) stiface.ObjectHandle {
+ return &objectMock{name: name, fs: m.fs}
+}
+
+func (m *bucketMock) Objects(_ context.Context, q *storage.Query) (it stiface.ObjectIterator) {
+ return &objectItMock{name: q.Prefix, fs: m.fs}
+}
+
+type objectMock struct {
+ stiface.ObjectHandle
+
+ name string
+ fs afero.Fs
+}
+
+func (o *objectMock) NewWriter(_ context.Context) stiface.Writer {
+ return &writerMock{name: o.name, fs: o.fs}
+}
+
+func (o *objectMock) NewRangeReader(_ context.Context, offset, length int64) (stiface.Reader, error) {
+ if o.name == "" {
+ return nil, ErrEmptyObjectName
+ }
+
+ file, err := o.fs.Open(o.name)
+ if err != nil {
+ return nil, err
+ }
+
+ if offset > 0 {
+ _, err = file.Seek(offset, io.SeekStart)
+ if err != nil {
+ return nil, err
+ }
+ }
+
+ res := &readerMock{file: file}
+ if length > -1 {
+ res.buf = make([]byte, length)
+ _, err = file.Read(res.buf)
+ if err != nil {
+ return nil, err
+ }
+ }
+
+ return res, nil
+}
+
+func (o *objectMock) Delete(_ context.Context) error {
+ if o.name == "" {
+ return ErrEmptyObjectName
+ }
+ return o.fs.Remove(o.name)
+}
+
+func (o *objectMock) Attrs(_ context.Context) (*storage.ObjectAttrs, error) {
+ if o.name == "" {
+ return nil, ErrEmptyObjectName
+ }
+
+ info, err := o.fs.Stat(o.name)
+ if err != nil {
+ pathError, ok := err.(*os.PathError)
+ if ok {
+ if pathError.Err == os.ErrNotExist {
+ return nil, storage.ErrObjectNotExist
+ }
+ }
+
+ return nil, err
+ }
+
+ res := &storage.ObjectAttrs{Name: normSeparators(o.name), Size: info.Size(), Updated: info.ModTime()}
+
+ if info.IsDir() {
+ // we have to mock it here, because of FileInfo logic
+ return nil, ErrObjectDoesNotExist
+ }
+
+ return res, nil
+}
+
+type writerMock struct {
+ stiface.Writer
+
+ name string
+ fs afero.Fs
+
+ file afero.File
+}
+
+func (w *writerMock) Write(p []byte) (n int, err error) {
+ if w.name == "" {
+ return 0, ErrEmptyObjectName
+ }
+
+ if w.file == nil {
+ w.file, err = w.fs.Create(w.name)
+ if err != nil {
+ return 0, err
+ }
+ }
+
+ return w.file.Write(p)
+}
+
+func (w *writerMock) Close() error {
+ if w.name == "" {
+ return ErrEmptyObjectName
+ }
+ if w.file == nil {
+ var err error
+ if strings.HasSuffix(w.name, "/") {
+ err = w.fs.Mkdir(w.name, 0755)
+ if err != nil {
+ return err
+ }
+ } else {
+ _, err = w.Write([]byte{})
+ if err != nil {
+ return err
+ }
+ }
+ }
+ if w.file != nil {
+ return w.file.Close()
+ }
+ return nil
+}
+
+type readerMock struct {
+ stiface.Reader
+
+ file afero.File
+
+ buf []byte
+}
+
+func (r *readerMock) Remain() int64 {
+ return 0
+}
+
+func (r *readerMock) Read(p []byte) (int, error) {
+ if r.buf != nil {
+ copy(p, r.buf)
+ return len(r.buf), nil
+ }
+ return r.file.Read(p)
+}
+
+func (r *readerMock) Close() error {
+ return r.file.Close()
+}
+
+type objectItMock struct {
+ stiface.ObjectIterator
+
+ name string
+ fs afero.Fs
+
+ dir afero.File
+ infos []*storage.ObjectAttrs
+}
+
+func (it *objectItMock) Next() (*storage.ObjectAttrs, error) {
+ var err error
+ if it.dir == nil {
+ it.dir, err = it.fs.Open(it.name)
+ if err != nil {
+ return nil, err
+ }
+
+ var isDir bool
+ isDir, err = afero.IsDir(it.fs, it.name)
+ if err != nil {
+ return nil, err
+ }
+
+ it.infos = []*storage.ObjectAttrs{}
+
+ if !isDir {
+ var info os.FileInfo
+ info, err = it.dir.Stat()
+ if err != nil {
+ return nil, err
+ }
+ it.infos = append(it.infos, &storage.ObjectAttrs{Name: normSeparators(info.Name()), Size: info.Size(), Updated: info.ModTime()})
+ } else {
+ var fInfos []os.FileInfo
+ fInfos, err = it.dir.Readdir(0)
+ if err != nil {
+ return nil, err
+ }
+ if it.name != "" {
+ it.infos = append(it.infos, &storage.ObjectAttrs{
+ Prefix: normSeparators(it.name) + "/",
+ })
+ }
+
+ for _, info := range fInfos {
+ it.infos = append(it.infos, &storage.ObjectAttrs{Name: normSeparators(info.Name()), Size: info.Size(), Updated: info.ModTime()})
+ }
+ }
+ }
+
+ if len(it.infos) == 0 {
+ return nil, iterator.Done
+ }
+
+ res := it.infos[0]
+ it.infos = it.infos[1:]
+
+ return res, err
+}
(DIR) diff --git a/gcsfs/gcs_test.go b/gcsfs/gcs_test.go
@@ -0,0 +1,806 @@
+// Copyright © 2021 Vasily Ovchinnikov <vasily@remerge.io>.
+//
+// Most of the tests are "derived" from the Afero's own tarfs implementation.
+// Write-oriented tests and/or checks have been added on top of that
+
+package gcsfs
+
+import (
+ "context"
+ "errors"
+ "fmt"
+ "io"
+ "os"
+ "path/filepath"
+ "reflect"
+ "strings"
+ "syscall"
+ "testing"
+
+ "golang.org/x/oauth2/google"
+
+ "cloud.google.com/go/storage"
+ "github.com/googleapis/google-cloud-go-testing/storage/stiface"
+ "github.com/spf13/afero"
+)
+
+const (
+ testBytes = 8
+ dirSize = 42
+)
+
+var bucketName = "a-test-bucket"
+
+var files = []struct {
+ name string
+ exists bool
+ isdir bool
+ size int64
+ content string
+ offset int64
+ contentAtOffset string
+}{
+ {"sub", true, true, dirSize, "", 0, ""},
+ {"sub/testDir2", true, true, dirSize, "", 0, ""},
+ {"sub/testDir2/testFile", true, false, 8 * 1024, "c", 4 * 1024, "d"},
+ {"testFile", true, false, 12 * 1024, "a", 7 * 1024, "b"},
+ {"testDir1/testFile", true, false, 3 * 512, "b", 512, "c"},
+
+ {"", false, true, dirSize, "", 0, ""}, // special case
+
+ {"nonExisting", false, false, dirSize, "", 0, ""},
+}
+
+var dirs = []struct {
+ name string
+ children []string
+}{
+ {"", []string{"sub", "testDir1", "testFile"}}, // in this case it will be prepended with bucket name
+ {"sub", []string{"testDir2"}},
+ {"sub/testDir2", []string{"testFile"}},
+ {"testDir1", []string{"testFile"}},
+}
+
+var gcsAfs *afero.Afero
+
+func TestMain(m *testing.M) {
+ ctx := context.Background()
+ var err error
+
+ // in order to respect deferring
+ var exitCode int
+ defer os.Exit(exitCode)
+
+ defer func() {
+ err := recover()
+ if err != nil {
+ fmt.Print(err)
+ exitCode = 2
+ }
+ }()
+
+ // Check if any credentials are present. If not, a fake service account, taken from the link
+ // would be used: https://github.com/google/oauth2l/blob/master/integration/fixtures/fake-service-account.json
+ cred, err := google.FindDefaultCredentials(ctx)
+ if err != nil && !strings.HasPrefix(err.Error(), "google: could not find default credentials") {
+ panic(err)
+ }
+
+ if cred == nil {
+ var fakeCredentialsAbsPath string
+ fakeCredentialsAbsPath, err = filepath.Abs("gcs-fake-service-account.json")
+ if err != nil {
+ panic(err)
+ }
+
+ err = os.Setenv("GOOGLE_APPLICATION_CREDENTIALS", fakeCredentialsAbsPath)
+ if err != nil {
+ panic(err)
+ }
+
+ // reset it after the run
+ defer func() {
+ err = os.Remove("GOOGLE_APPLICATION_CREDENTIALS")
+ if err != nil {
+ // it's worth printing it out explicitly, since it might have implications further down the road
+ fmt.Print("failed to clear fake GOOGLE_APPLICATION_CREDENTIALS", err)
+ }
+ }()
+ }
+
+ var c *storage.Client
+ c, err = storage.NewClient(ctx)
+ if err != nil {
+ panic(err)
+ }
+ client := stiface.AdaptClient(c)
+
+ // This block is mocking the client for the sake of isolated testing
+ mockClient := newClientMock()
+ mockClient.Client = client
+
+ gcsAfs = &afero.Afero{Fs: &GcsFs{NewGcsFs(ctx, mockClient)}}
+
+ // Uncomment to use the real, not mocked, client
+ //gcsAfs = &Afero{Fs: &GcsFs{gcsfs.NewGcsFs(ctx, client)}}
+
+ exitCode = m.Run()
+}
+
+func createFiles(t *testing.T) {
+ t.Helper()
+ var err error
+
+ // the files have to be created first
+ for _, f := range files {
+ if !f.isdir && f.exists {
+ name := filepath.Join(bucketName, f.name)
+
+ var freshFile afero.File
+ freshFile, err = gcsAfs.Create(name)
+ if err != nil {
+ t.Fatalf("failed to create a file \"%s\": %s", f.name, err)
+ }
+
+ var written int
+ var totalWritten int64
+ for totalWritten < f.size {
+ if totalWritten < f.offset {
+ writeBuf := []byte(strings.Repeat(f.content, int(f.offset)))
+ written, err = freshFile.WriteAt(writeBuf, totalWritten)
+ } else {
+ writeBuf := []byte(strings.Repeat(f.contentAtOffset, int(f.size-f.offset)))
+ written, err = freshFile.WriteAt(writeBuf, totalWritten)
+ }
+ if err != nil {
+ t.Fatalf("failed to write a file \"%s\": %s", f.name, err)
+ }
+
+ totalWritten += int64(written)
+ }
+
+ err = freshFile.Close()
+ if err != nil {
+ t.Fatalf("failed to close a file \"%s\": %s", f.name, err)
+ }
+ }
+ }
+}
+
+func removeFiles(t *testing.T) {
+ t.Helper()
+ var err error
+
+ // the files have to be created first
+ for _, f := range files {
+ if !f.isdir && f.exists {
+ name := filepath.Join(bucketName, f.name)
+
+ err = gcsAfs.Remove(name)
+ if err != nil && err == syscall.ENOENT {
+ t.Errorf("failed to remove file \"%s\": %s", f.name, err)
+ }
+ }
+ }
+}
+
+func TestGcsFsOpen(t *testing.T) {
+ createFiles(t)
+ defer removeFiles(t)
+
+ for _, f := range files {
+ nameBase := filepath.Join(bucketName, f.name)
+
+ names := []string{
+ nameBase,
+ string(os.PathSeparator) + nameBase,
+ }
+ if f.name == "" {
+ names = []string{f.name}
+ }
+
+ for _, name := range names {
+ file, err := gcsAfs.Open(name)
+ if (err == nil) != f.exists {
+ t.Errorf("%v exists = %v, but got err = %v", name, f.exists, err)
+ }
+
+ if !f.exists {
+ continue
+ }
+ if err != nil {
+ t.Fatalf("%v: %v", name, err)
+ }
+
+ if file.Name() != filepath.FromSlash(nameBase) {
+ t.Errorf("Name(), got %v, expected %v", file.Name(), filepath.FromSlash(nameBase))
+ }
+
+ s, err := file.Stat()
+ if err != nil {
+ t.Fatalf("stat %v: got error '%v'", file.Name(), err)
+ }
+
+ if isdir := s.IsDir(); isdir != f.isdir {
+ t.Errorf("%v directory, got: %v, expected: %v", file.Name(), isdir, f.isdir)
+ }
+
+ if size := s.Size(); size != f.size {
+ t.Errorf("%v size, got: %v, expected: %v", file.Name(), size, f.size)
+ }
+ }
+ }
+}
+
+func TestGcsRead(t *testing.T) {
+ createFiles(t)
+ defer removeFiles(t)
+
+ for _, f := range files {
+ if !f.exists {
+ continue
+ }
+
+ nameBase := filepath.Join(bucketName, f.name)
+
+ names := []string{
+ nameBase,
+ string(os.PathSeparator) + nameBase,
+ }
+ if f.name == "" {
+ names = []string{f.name}
+ }
+
+ for _, name := range names {
+ file, err := gcsAfs.Open(name)
+ if err != nil {
+ t.Fatalf("opening %v: %v", name, err)
+ }
+
+ buf := make([]byte, 8)
+ n, err := file.Read(buf)
+ if err != nil {
+ if f.isdir && (err != syscall.EISDIR) {
+ t.Errorf("%v got error %v, expected EISDIR", name, err)
+ } else if !f.isdir {
+ t.Errorf("%v: %v", name, err)
+ }
+ } else if n != 8 {
+ t.Errorf("%v: got %d read bytes, expected 8", name, n)
+ } else if string(buf) != strings.Repeat(f.content, testBytes) {
+ t.Errorf("%v: got <%s>, expected <%s>", f.name, f.content, string(buf))
+ }
+ }
+ }
+}
+
+func TestGcsReadAt(t *testing.T) {
+ createFiles(t)
+ defer removeFiles(t)
+
+ for _, f := range files {
+ if !f.exists {
+ continue
+ }
+
+ nameBase := filepath.Join(bucketName, f.name)
+
+ names := []string{
+ nameBase,
+ string(os.PathSeparator) + nameBase,
+ }
+ if f.name == "" {
+ names = []string{f.name}
+ }
+
+ for _, name := range names {
+ file, err := gcsAfs.Open(name)
+ if err != nil {
+ t.Fatalf("opening %v: %v", name, err)
+ }
+
+ buf := make([]byte, testBytes)
+ n, err := file.ReadAt(buf, f.offset-testBytes/2)
+ if err != nil {
+ if f.isdir && (err != syscall.EISDIR) {
+ t.Errorf("%v got error %v, expected EISDIR", name, err)
+ } else if !f.isdir {
+ t.Errorf("%v: %v", name, err)
+ }
+ } else if n != 8 {
+ t.Errorf("%v: got %d read bytes, expected 8", f.name, n)
+ } else if string(buf) != strings.Repeat(f.content, testBytes/2)+strings.Repeat(f.contentAtOffset, testBytes/2) {
+ t.Errorf("%v: got <%s>, expected <%s>", f.name, f.contentAtOffset, string(buf))
+ }
+ }
+ }
+}
+
+func TestGcsSeek(t *testing.T) {
+ createFiles(t)
+ defer removeFiles(t)
+
+ for _, f := range files {
+ if !f.exists {
+ continue
+ }
+
+ nameBase := filepath.Join(bucketName, f.name)
+
+ names := []string{
+ nameBase,
+ string(os.PathSeparator) + nameBase,
+ }
+ if f.name == "" {
+ names = []string{f.name}
+ }
+
+ for _, name := range names {
+ file, err := gcsAfs.Open(name)
+ if err != nil {
+ t.Fatalf("opening %v: %v", name, err)
+ }
+
+ var tests = []struct {
+ offIn int64
+ whence int
+ offOut int64
+ }{
+ {0, io.SeekStart, 0},
+ {10, io.SeekStart, 10},
+ {1, io.SeekCurrent, 11},
+ {10, io.SeekCurrent, 21},
+ {0, io.SeekEnd, f.size},
+ {-1, io.SeekEnd, f.size - 1},
+ }
+
+ for _, s := range tests {
+ n, err := file.Seek(s.offIn, s.whence)
+ if err != nil {
+ if f.isdir && err == syscall.EISDIR {
+ continue
+ }
+
+ t.Errorf("%v: %v", name, err)
+ }
+
+ if n != s.offOut {
+ t.Errorf("%v: (off: %v, whence: %v): got %v, expected %v", f.name, s.offIn, s.whence, n, s.offOut)
+ }
+ }
+ }
+
+ }
+}
+
+func TestGcsName(t *testing.T) {
+ createFiles(t)
+ defer removeFiles(t)
+
+ for _, f := range files {
+ if !f.exists {
+ continue
+ }
+
+ nameBase := filepath.Join(bucketName, f.name)
+
+ names := []string{
+ nameBase,
+ string(os.PathSeparator) + nameBase,
+ }
+ if f.name == "" {
+ names = []string{f.name}
+ }
+
+ for _, name := range names {
+ file, err := gcsAfs.Open(name)
+ if err != nil {
+ t.Fatalf("opening %v: %v", name, err)
+ }
+
+ n := file.Name()
+ if n != filepath.FromSlash(nameBase) {
+ t.Errorf("got: %v, expected: %v", n, filepath.FromSlash(nameBase))
+ }
+ }
+
+ }
+}
+
+func TestGcsClose(t *testing.T) {
+ createFiles(t)
+ defer removeFiles(t)
+
+ for _, f := range files {
+ if !f.exists {
+ continue
+ }
+
+ nameBase := filepath.Join(bucketName, f.name)
+
+ names := []string{
+ nameBase,
+ string(os.PathSeparator) + nameBase,
+ }
+ if f.name == "" {
+ names = []string{f.name}
+ }
+
+ for _, name := range names {
+ file, err := gcsAfs.Open(name)
+ if err != nil {
+ t.Fatalf("opening %v: %v", name, err)
+ }
+
+ err = file.Close()
+ if err != nil {
+ t.Errorf("%v: %v", name, err)
+ }
+
+ err = file.Close()
+ if err == nil {
+ t.Errorf("%v: closing twice should return an error", name)
+ }
+
+ buf := make([]byte, 8)
+ n, err := file.Read(buf)
+ if n != 0 || err == nil {
+ t.Errorf("%v: could read from a closed file", name)
+ }
+
+ n, err = file.ReadAt(buf, 256)
+ if n != 0 || err == nil {
+ t.Errorf("%v: could readAt from a closed file", name)
+ }
+
+ off, err := file.Seek(0, io.SeekStart)
+ if off != 0 || err == nil {
+ t.Errorf("%v: could seek from a closed file", name)
+ }
+ }
+ }
+}
+
+func TestGcsOpenFile(t *testing.T) {
+ createFiles(t)
+ defer removeFiles(t)
+
+ for _, f := range files {
+ nameBase := filepath.Join(bucketName, f.name)
+
+ names := []string{
+ nameBase,
+ string(os.PathSeparator) + nameBase,
+ }
+ if f.name == "" {
+ names = []string{f.name}
+ }
+
+ for _, name := range names {
+ file, err := gcsAfs.OpenFile(name, os.O_RDONLY, 0400)
+ if !f.exists {
+ if (f.name != "" && !errors.Is(err, syscall.ENOENT)) ||
+ (f.name == "" && !errors.Is(err, ErrNoBucketInName)) {
+ t.Errorf("%v: got %v, expected%v", name, err, syscall.ENOENT)
+ }
+
+ continue
+ }
+
+ if err != nil {
+ t.Fatalf("%v: %v", name, err)
+ }
+
+ err = file.Close()
+ if err != nil {
+ t.Fatalf("failed to close a file \"%s\": %s", name, err)
+ }
+
+ file, err = gcsAfs.OpenFile(name, os.O_CREATE, 0600)
+ if !errors.Is(err, syscall.EPERM) {
+ t.Errorf("%v: open for write: got %v, expected %v", name, err, syscall.EPERM)
+ }
+ }
+ }
+}
+
+func TestGcsFsStat(t *testing.T) {
+ createFiles(t)
+ defer removeFiles(t)
+
+ for _, f := range files {
+ nameBase := filepath.Join(bucketName, f.name)
+
+ names := []string{
+ nameBase,
+ string(os.PathSeparator) + nameBase,
+ }
+ if f.name == "" {
+ names = []string{f.name}
+ }
+
+ for _, name := range names {
+ fi, err := gcsAfs.Stat(name)
+ if !f.exists {
+ if (f.name != "" && !errors.Is(err, syscall.ENOENT)) ||
+ (f.name == "" && !errors.Is(err, ErrNoBucketInName)) {
+ t.Errorf("%v: got %v, expected%v", name, err, syscall.ENOENT)
+ }
+
+ continue
+ }
+
+ if err != nil {
+ t.Fatalf("stat %v: got error '%v'", name, err)
+ }
+
+ if isdir := fi.IsDir(); isdir != f.isdir {
+ t.Errorf("%v directory, got: %v, expected: %v", name, isdir, f.isdir)
+ }
+
+ if size := fi.Size(); size != f.size {
+ t.Errorf("%v size, got: %v, expected: %v", name, size, f.size)
+ }
+ }
+ }
+}
+
+func TestGcsReaddir(t *testing.T) {
+ createFiles(t)
+ defer removeFiles(t)
+
+ for _, d := range dirs {
+ nameBase := filepath.Join(bucketName, d.name)
+
+ names := []string{
+ nameBase,
+ string(os.PathSeparator) + nameBase,
+ }
+
+ for _, name := range names {
+ dir, err := gcsAfs.Open(name)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ fi, err := dir.Readdir(0)
+ if err != nil {
+ t.Fatal(err)
+ }
+ var fileNames []string
+ for _, f := range fi {
+ fileNames = append(fileNames, f.Name())
+ }
+
+ if !reflect.DeepEqual(fileNames, d.children) {
+ t.Errorf("%v: children, got '%v', expected '%v'", name, fileNames, d.children)
+ }
+
+ fi, err = dir.Readdir(1)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ fileNames = []string{}
+ for _, f := range fi {
+ fileNames = append(fileNames, f.Name())
+ }
+
+ if !reflect.DeepEqual(fileNames, d.children[0:1]) {
+ t.Errorf("%v: children, got '%v', expected '%v'", name, fileNames, d.children[0:1])
+ }
+ }
+ }
+
+ nameBase := filepath.Join(bucketName, "testFile")
+
+ names := []string{
+ nameBase,
+ string(os.PathSeparator) + nameBase,
+ }
+
+ for _, name := range names {
+ dir, err := gcsAfs.Open(name)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ _, err = dir.Readdir(-1)
+ if err != syscall.ENOTDIR {
+ t.Fatal("Expected error")
+ }
+ }
+}
+
+func TestGcsReaddirnames(t *testing.T) {
+ createFiles(t)
+ defer removeFiles(t)
+
+ for _, d := range dirs {
+ nameBase := filepath.Join(bucketName, d.name)
+
+ names := []string{
+ nameBase,
+ string(os.PathSeparator) + nameBase,
+ }
+
+ for _, name := range names {
+ dir, err := gcsAfs.Open(name)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ fileNames, err := dir.Readdirnames(0)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ if !reflect.DeepEqual(fileNames, d.children) {
+ t.Errorf("%v: children, got '%v', expected '%v'", name, fileNames, d.children)
+ }
+
+ fileNames, err = dir.Readdirnames(1)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ if !reflect.DeepEqual(fileNames, d.children[0:1]) {
+ t.Errorf("%v: children, got '%v', expected '%v'", name, fileNames, d.children[0:1])
+ }
+ }
+ }
+
+ nameBase := filepath.Join(bucketName, "testFile")
+
+ names := []string{
+ nameBase,
+ string(os.PathSeparator) + nameBase,
+ }
+
+ for _, name := range names {
+ dir, err := gcsAfs.Open(name)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ _, err = dir.Readdirnames(-1)
+ if err != syscall.ENOTDIR {
+ t.Fatal("Expected error")
+ }
+ }
+}
+
+func TestGcsGlob(t *testing.T) {
+ createFiles(t)
+ defer removeFiles(t)
+
+ for _, s := range []struct {
+ glob string
+ entries []string
+ }{
+ {filepath.FromSlash("*"), []string{filepath.FromSlash("sub"), filepath.FromSlash("testDir1"), filepath.FromSlash("testFile")}},
+ {filepath.FromSlash("sub/*"), []string{filepath.FromSlash("sub/testDir2")}},
+ {filepath.FromSlash("sub/testDir2/*"), []string{filepath.FromSlash("sub/testDir2/testFile")}},
+ {filepath.FromSlash("testDir1/*"), []string{filepath.FromSlash("testDir1/testFile")}},
+ } {
+ nameBase := filepath.Join(bucketName, s.glob)
+
+ prefixedGlobs := []string{
+ nameBase,
+ string(os.PathSeparator) + nameBase,
+ }
+
+ prefixedEntries := [][]string{{}, {}}
+ for _, entry := range s.entries {
+ prefixedEntries[0] = append(prefixedEntries[0], filepath.Join(bucketName, entry))
+ prefixedEntries[1] = append(prefixedEntries[1], string(os.PathSeparator)+filepath.Join(bucketName, entry))
+ }
+
+ for i, prefixedGlob := range prefixedGlobs {
+ entries, err := afero.Glob(gcsAfs.Fs, prefixedGlob)
+ if err != nil {
+ t.Error(err)
+ }
+ if reflect.DeepEqual(entries, prefixedEntries[i]) {
+ t.Logf("glob: %s: glob ok", prefixedGlob)
+ } else {
+ t.Errorf("glob: %s: got %#v, expected %#v", prefixedGlob, entries, prefixedEntries)
+ }
+ }
+ }
+}
+
+func TestGcsMkdir(t *testing.T) {
+ t.Run("empty", func(t *testing.T) {
+ emptyDirName := bucketName
+
+ err := gcsAfs.Mkdir(emptyDirName, 0755)
+ if err == nil {
+ t.Fatal("did not fail upon creation of an empty folder")
+ }
+ })
+ t.Run("success", func(t *testing.T) {
+ dirName := filepath.Join(bucketName, "a-test-dir")
+ var err error
+
+ err = gcsAfs.Mkdir(dirName, 0755)
+ if err != nil {
+ t.Fatal("failed to create a folder with error", err)
+ }
+
+ info, err := gcsAfs.Stat(dirName)
+ if err != nil {
+ t.Fatal("failed to get info", err)
+ }
+ if !info.IsDir() {
+ t.Fatalf("%s: not a dir", dirName)
+ }
+ if !info.Mode().IsDir() {
+ t.Errorf("%s: mode is not directory", dirName)
+ }
+
+ if info.Mode() != os.ModeDir|0755 {
+ t.Errorf("%s: wrong permissions, expected drwxr-xr-x, got %s", dirName, info.Mode())
+ }
+
+ err = gcsAfs.Remove(dirName)
+ if err != nil {
+ t.Fatalf("could not delete the folder %s after the test with error: %s", dirName, err)
+ }
+ })
+}
+
+func TestGcsMkdirAll(t *testing.T) {
+ t.Run("empty", func(t *testing.T) {
+ emptyDirName := bucketName
+
+ err := gcsAfs.MkdirAll(emptyDirName, 0755)
+ if err == nil {
+ t.Fatal("did not fail upon creation of an empty folder")
+ }
+ })
+ t.Run("success", func(t *testing.T) {
+ dirName := filepath.Join(bucketName, "a/b/c")
+
+ err := gcsAfs.MkdirAll(dirName, 0755)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ info, err := gcsAfs.Stat(filepath.Join(bucketName, "a"))
+ if err != nil {
+ t.Fatal(err)
+ }
+ if !info.Mode().IsDir() {
+ t.Errorf("%s: mode is not directory", filepath.Join(bucketName, "a"))
+ }
+ if info.Mode() != os.ModeDir|0755 {
+ t.Errorf("%s: wrong permissions, expected drwxr-xr-x, got %s", filepath.Join(bucketName, "a"), info.Mode())
+ }
+ info, err = gcsAfs.Stat(filepath.Join(bucketName, "a/b"))
+ if err != nil {
+ t.Fatal(err)
+ }
+ if !info.Mode().IsDir() {
+ t.Errorf("%s: mode is not directory", filepath.Join(bucketName, "a/b"))
+ }
+ if info.Mode() != os.ModeDir|0755 {
+ t.Errorf("%s: wrong permissions, expected drwxr-xr-x, got %s", filepath.Join(bucketName, "a/b"), info.Mode())
+ }
+ info, err = gcsAfs.Stat(dirName)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if !info.Mode().IsDir() {
+ t.Errorf("%s: mode is not directory", dirName)
+ }
+ if info.Mode() != os.ModeDir|0755 {
+ t.Errorf("%s: wrong permissions, expected drwxr-xr-x, got %s", dirName, info.Mode())
+ }
+
+ err = gcsAfs.RemoveAll(filepath.Join(bucketName, "a"))
+ if err != nil {
+ t.Fatalf("failed to remove the folder %s with error: %s", filepath.Join(bucketName, "a"), err)
+ }
+ })
+}