file.go - afero - [fork] go afero port for 9front
(HTM) git clone https://git.drkhsh.at/afero.git
(DIR) Log
(DIR) Files
(DIR) Refs
(DIR) README
(DIR) LICENSE
---
file.go (6685B)
---
1 // Copyright © 2021 Vasily Ovchinnikov <vasily@remerge.io>.
2 //
3 // The code in this file is derived from afero fork github.com/Zatte/afero by Mikael Rapp
4 // licensed under Apache License 2.0.
5 //
6 // Licensed under the Apache License, Version 2.0 (the "License");
7 // you may not use this file except in compliance with the License.
8 // You may obtain a copy of the License at
9 // http://www.apache.org/licenses/LICENSE-2.0
10 //
11 // Unless required by applicable law or agreed to in writing, software
12 // distributed under the License is distributed on an "AS IS" BASIS,
13 // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 // See the License for the specific language governing permissions and
15 // limitations under the License.
16
17 package gcsfs
18
19 import (
20 "context"
21 "fmt"
22 "io"
23 "log"
24 "os"
25 "path/filepath"
26 "sort"
27 "syscall"
28
29 "cloud.google.com/go/storage"
30 "google.golang.org/api/iterator"
31
32 "github.com/spf13/afero/gcsfs/internal/stiface"
33 )
34
35 // GcsFs is the Afero version adapted for GCS
36 type GcsFile struct {
37 openFlags int
38 fhOffset int64 // File handle specific offset
39 closed bool
40 ReadDirIt stiface.ObjectIterator
41 resource *gcsFileResource
42 }
43
44 func NewGcsFile(
45 ctx context.Context,
46 fs *Fs,
47 obj stiface.ObjectHandle,
48 openFlags int,
49 // Unused: there is no use to the file mode in GCloud just yet - but we keep it here, just in case we need it
50 fileMode os.FileMode,
51 name string,
52 ) *GcsFile {
53 return &GcsFile{
54 openFlags: openFlags,
55 fhOffset: 0,
56 closed: false,
57 ReadDirIt: nil,
58 resource: &gcsFileResource{
59 ctx: ctx,
60 fs: fs,
61
62 obj: obj,
63 name: name,
64 fileMode: fileMode,
65
66 currentGcsSize: 0,
67
68 offset: 0,
69 reader: nil,
70 writer: nil,
71 },
72 }
73 }
74
75 func NewGcsFileFromOldFH(
76 openFlags int,
77 fileMode os.FileMode,
78 oldFile *gcsFileResource,
79 ) *GcsFile {
80 res := &GcsFile{
81 openFlags: openFlags,
82 fhOffset: 0,
83 closed: false,
84 ReadDirIt: nil,
85
86 resource: oldFile,
87 }
88 res.resource.fileMode = fileMode
89
90 return res
91 }
92
93 func (o *GcsFile) Close() error {
94 if o.closed {
95 // the afero spec expects the call to Close on a closed file to return an error
96 return ErrFileClosed
97 }
98 o.closed = true
99 return o.resource.Close()
100 }
101
102 func (o *GcsFile) Seek(newOffset int64, whence int) (int64, error) {
103 if o.closed {
104 return 0, ErrFileClosed
105 }
106
107 // Since this is an expensive operation; let's make sure we need it
108 if (whence == 0 && newOffset == o.fhOffset) || (whence == 1 && newOffset == 0) {
109 return o.fhOffset, nil
110 }
111 log.Printf("WARNING: Seek behavior triggered, highly inefficent. Offset before seek is at %d\n", o.fhOffset)
112
113 // Fore the reader/writers to be reopened (at correct offset)
114 err := o.Sync()
115 if err != nil {
116 return 0, err
117 }
118 stat, err := o.Stat()
119 if err != nil {
120 return 0, nil
121 }
122
123 switch whence {
124 case 0:
125 o.fhOffset = newOffset
126 case 1:
127 o.fhOffset += newOffset
128 case 2:
129 o.fhOffset = stat.Size() + newOffset
130 }
131 return o.fhOffset, nil
132 }
133
134 func (o *GcsFile) Read(p []byte) (n int, err error) {
135 return o.ReadAt(p, o.fhOffset)
136 }
137
138 func (o *GcsFile) ReadAt(p []byte, off int64) (n int, err error) {
139 if o.closed {
140 return 0, ErrFileClosed
141 }
142
143 read, err := o.resource.ReadAt(p, off)
144 o.fhOffset += int64(read)
145 return read, err
146 }
147
148 func (o *GcsFile) Write(p []byte) (n int, err error) {
149 return o.WriteAt(p, o.fhOffset)
150 }
151
152 func (o *GcsFile) WriteAt(b []byte, off int64) (n int, err error) {
153 if o.closed {
154 return 0, ErrFileClosed
155 }
156
157 if o.openFlags&os.O_RDONLY != 0 {
158 return 0, fmt.Errorf("file is opend as read only")
159 }
160
161 _, err = o.resource.obj.Attrs(o.resource.ctx)
162 if err != nil {
163 if err == storage.ErrObjectNotExist {
164 if o.openFlags&os.O_CREATE == 0 {
165 return 0, ErrFileNotFound
166 }
167 } else {
168 return 0, fmt.Errorf("error getting file attributes: %v", err)
169 }
170 }
171
172 written, err := o.resource.WriteAt(b, off)
173 o.fhOffset += int64(written)
174 return written, err
175 }
176
177 func (o *GcsFile) Name() string {
178 return filepath.FromSlash(o.resource.name)
179 }
180
181 func (o *GcsFile) readdirImpl(count int) ([]*FileInfo, error) {
182 err := o.Sync()
183 if err != nil {
184 return nil, err
185 }
186
187 var ownInfo os.FileInfo
188 ownInfo, err = o.Stat()
189 if err != nil {
190 return nil, err
191 }
192
193 if !ownInfo.IsDir() {
194 return nil, syscall.ENOTDIR
195 }
196
197 path := o.resource.fs.ensureTrailingSeparator(o.resource.name)
198 if o.ReadDirIt == nil {
199 // log.Printf("Querying path : %s\n", path)
200 bucketName, bucketPath := o.resource.fs.splitName(path)
201
202 o.ReadDirIt = o.resource.fs.client.Bucket(bucketName).Objects(
203 o.resource.ctx, &storage.Query{Delimiter: o.resource.fs.separator, Prefix: bucketPath, Versions: false})
204 }
205 var res []*FileInfo
206 for {
207 object, err := o.ReadDirIt.Next()
208 if err == iterator.Done {
209 // reset the iterator
210 o.ReadDirIt = nil
211
212 if len(res) > 0 || count <= 0 {
213 return res, nil
214 }
215
216 return res, io.EOF
217 }
218 if err != nil {
219 return res, err
220 }
221
222 tmp := newFileInfoFromAttrs(object, o.resource.fileMode)
223
224 if tmp.Name() == "" {
225 // neither object.Name, not object.Prefix were present - so let's skip this unknown thing
226 continue
227 }
228
229 if object.Name == "" && object.Prefix == "" {
230 continue
231 }
232
233 if tmp.Name() == ownInfo.Name() {
234 // Hmmm
235 continue
236 }
237
238 res = append(res, tmp)
239
240 // This would interrupt the iteration, once we reach the count.
241 // But it would then have files coming before folders - that's not what we want to have exactly,
242 // since it makes the results unpredictable. Hence, we iterate all the objects and then do
243 // the cut-off in a higher level method
244 //if count > 0 && len(res) >= count {
245 // break
246 //}
247 }
248 // return res, nil
249 }
250
251 func (o *GcsFile) Readdir(count int) ([]os.FileInfo, error) {
252 fi, err := o.readdirImpl(count)
253 if len(fi) > 0 {
254 sort.Sort(ByName(fi))
255 }
256
257 if count > 0 {
258 fi = fi[:count]
259 }
260
261 var res []os.FileInfo
262 for _, f := range fi {
263 res = append(res, f)
264 }
265 return res, err
266 }
267
268 func (o *GcsFile) Readdirnames(n int) ([]string, error) {
269 fi, err := o.Readdir(n)
270 if err != nil && err != io.EOF {
271 return nil, err
272 }
273 names := make([]string, len(fi))
274
275 for i, f := range fi {
276 names[i] = f.Name()
277 }
278 return names, err
279 }
280
281 func (o *GcsFile) Stat() (os.FileInfo, error) {
282 err := o.Sync()
283 if err != nil {
284 return nil, err
285 }
286
287 return newFileInfo(o.resource.name, o.resource.fs, o.resource.fileMode)
288 }
289
290 func (o *GcsFile) Sync() error {
291 return o.resource.maybeCloseIo()
292 }
293
294 func (o *GcsFile) Truncate(wantedSize int64) error {
295 if o.closed {
296 return ErrFileClosed
297 }
298 if o.openFlags == os.O_RDONLY {
299 return fmt.Errorf("file was opened as read only")
300 }
301 return o.resource.Truncate(wantedSize)
302 }
303
304 func (o *GcsFile) WriteString(s string) (ret int, err error) {
305 return o.Write([]byte(s))
306 }