libfuse
fuse_lowlevel.c
1 /*
2  FUSE: Filesystem in Userspace
3  Copyright (C) 2001-2007 Miklos Szeredi <miklos@szeredi.hu>
4 
5  Implementation of (most of) the low-level FUSE API. The session loop
6  functions are implemented in separate files.
7 
8  This program can be distributed under the terms of the GNU LGPLv2.
9  See the file COPYING.LIB
10 */
11 
12 #define _GNU_SOURCE
13 
14 #include "fuse_config.h"
15 #include "fuse_i.h"
16 #include "fuse_kernel.h"
17 #include "fuse_opt.h"
18 #include "fuse_misc.h"
19 #include "mount_util.h"
20 
21 #include <stdio.h>
22 #include <stdlib.h>
23 #include <stddef.h>
24 #include <string.h>
25 #include <unistd.h>
26 #include <limits.h>
27 #include <errno.h>
28 #include <assert.h>
29 #include <sys/file.h>
30 
31 #ifndef F_LINUX_SPECIFIC_BASE
32 #define F_LINUX_SPECIFIC_BASE 1024
33 #endif
34 #ifndef F_SETPIPE_SZ
35 #define F_SETPIPE_SZ (F_LINUX_SPECIFIC_BASE + 7)
36 #endif
37 
38 
39 #define PARAM(inarg) (((char *)(inarg)) + sizeof(*(inarg)))
40 #define OFFSET_MAX 0x7fffffffffffffffLL
41 
42 #define container_of(ptr, type, member) ({ \
43  const typeof( ((type *)0)->member ) *__mptr = (ptr); \
44  (type *)( (char *)__mptr - offsetof(type,member) );})
45 
46 struct fuse_pollhandle {
47  uint64_t kh;
48  struct fuse_session *se;
49 };
50 
51 static size_t pagesize;
52 
53 static __attribute__((constructor)) void fuse_ll_init_pagesize(void)
54 {
55  pagesize = getpagesize();
56 }
57 
58 static void convert_stat(const struct stat *stbuf, struct fuse_attr *attr)
59 {
60  attr->ino = stbuf->st_ino;
61  attr->mode = stbuf->st_mode;
62  attr->nlink = stbuf->st_nlink;
63  attr->uid = stbuf->st_uid;
64  attr->gid = stbuf->st_gid;
65  attr->rdev = stbuf->st_rdev;
66  attr->size = stbuf->st_size;
67  attr->blksize = stbuf->st_blksize;
68  attr->blocks = stbuf->st_blocks;
69  attr->atime = stbuf->st_atime;
70  attr->mtime = stbuf->st_mtime;
71  attr->ctime = stbuf->st_ctime;
72  attr->atimensec = ST_ATIM_NSEC(stbuf);
73  attr->mtimensec = ST_MTIM_NSEC(stbuf);
74  attr->ctimensec = ST_CTIM_NSEC(stbuf);
75 }
76 
77 static void convert_attr(const struct fuse_setattr_in *attr, struct stat *stbuf)
78 {
79  stbuf->st_mode = attr->mode;
80  stbuf->st_uid = attr->uid;
81  stbuf->st_gid = attr->gid;
82  stbuf->st_size = attr->size;
83  stbuf->st_atime = attr->atime;
84  stbuf->st_mtime = attr->mtime;
85  stbuf->st_ctime = attr->ctime;
86  ST_ATIM_NSEC_SET(stbuf, attr->atimensec);
87  ST_MTIM_NSEC_SET(stbuf, attr->mtimensec);
88  ST_CTIM_NSEC_SET(stbuf, attr->ctimensec);
89 }
90 
91 static size_t iov_length(const struct iovec *iov, size_t count)
92 {
93  size_t seg;
94  size_t ret = 0;
95 
96  for (seg = 0; seg < count; seg++)
97  ret += iov[seg].iov_len;
98  return ret;
99 }
100 
101 static void list_init_req(struct fuse_req *req)
102 {
103  req->next = req;
104  req->prev = req;
105 }
106 
107 static void list_del_req(struct fuse_req *req)
108 {
109  struct fuse_req *prev = req->prev;
110  struct fuse_req *next = req->next;
111  prev->next = next;
112  next->prev = prev;
113 }
114 
115 static void list_add_req(struct fuse_req *req, struct fuse_req *next)
116 {
117  struct fuse_req *prev = next->prev;
118  req->next = next;
119  req->prev = prev;
120  prev->next = req;
121  next->prev = req;
122 }
123 
124 static void destroy_req(fuse_req_t req)
125 {
126  assert(req->ch == NULL);
127  pthread_mutex_destroy(&req->lock);
128  free(req);
129 }
130 
131 void fuse_free_req(fuse_req_t req)
132 {
133  int ctr;
134  struct fuse_session *se = req->se;
135 
136  pthread_mutex_lock(&se->lock);
137  req->u.ni.func = NULL;
138  req->u.ni.data = NULL;
139  list_del_req(req);
140  ctr = --req->ctr;
141  fuse_chan_put(req->ch);
142  req->ch = NULL;
143  pthread_mutex_unlock(&se->lock);
144  if (!ctr)
145  destroy_req(req);
146 }
147 
148 static struct fuse_req *fuse_ll_alloc_req(struct fuse_session *se)
149 {
150  struct fuse_req *req;
151 
152  req = (struct fuse_req *) calloc(1, sizeof(struct fuse_req));
153  if (req == NULL) {
154  fuse_log(FUSE_LOG_ERR, "fuse: failed to allocate request\n");
155  } else {
156  req->se = se;
157  req->ctr = 1;
158  list_init_req(req);
159  pthread_mutex_init(&req->lock, NULL);
160  }
161 
162  return req;
163 }
164 
165 /* Send data. If *ch* is NULL, send via session master fd */
166 static int fuse_send_msg(struct fuse_session *se, struct fuse_chan *ch,
167  struct iovec *iov, int count)
168 {
169  struct fuse_out_header *out = iov[0].iov_base;
170 
171  assert(se != NULL);
172  out->len = iov_length(iov, count);
173  if (se->debug) {
174  if (out->unique == 0) {
175  fuse_log(FUSE_LOG_DEBUG, "NOTIFY: code=%d length=%u\n",
176  out->error, out->len);
177  } else if (out->error) {
178  fuse_log(FUSE_LOG_DEBUG,
179  " unique: %llu, error: %i (%s), outsize: %i\n",
180  (unsigned long long) out->unique, out->error,
181  strerror(-out->error), out->len);
182  } else {
183  fuse_log(FUSE_LOG_DEBUG,
184  " unique: %llu, success, outsize: %i\n",
185  (unsigned long long) out->unique, out->len);
186  }
187  }
188 
189  ssize_t res;
190  if (se->io != NULL)
191  /* se->io->writev is never NULL if se->io is not NULL as
192  specified by fuse_session_custom_io()*/
193  res = se->io->writev(ch ? ch->fd : se->fd, iov, count,
194  se->userdata);
195  else
196  res = writev(ch ? ch->fd : se->fd, iov, count);
197 
198  int err = errno;
199 
200  if (res == -1) {
201  /* ENOENT means the operation was interrupted */
202  if (!fuse_session_exited(se) && err != ENOENT)
203  perror("fuse: writing device");
204  return -err;
205  }
206 
207  return 0;
208 }
209 
210 
211 int fuse_send_reply_iov_nofree(fuse_req_t req, int error, struct iovec *iov,
212  int count)
213 {
214  struct fuse_out_header out;
215 
216  if (error <= -1000 || error > 0) {
217  fuse_log(FUSE_LOG_ERR, "fuse: bad error value: %i\n", error);
218  error = -ERANGE;
219  }
220 
221  out.unique = req->unique;
222  out.error = error;
223 
224  iov[0].iov_base = &out;
225  iov[0].iov_len = sizeof(struct fuse_out_header);
226 
227  return fuse_send_msg(req->se, req->ch, iov, count);
228 }
229 
230 static int send_reply_iov(fuse_req_t req, int error, struct iovec *iov,
231  int count)
232 {
233  int res;
234 
235  res = fuse_send_reply_iov_nofree(req, error, iov, count);
236  fuse_free_req(req);
237  return res;
238 }
239 
240 static int send_reply(fuse_req_t req, int error, const void *arg,
241  size_t argsize)
242 {
243  struct iovec iov[2];
244  int count = 1;
245  if (argsize) {
246  iov[1].iov_base = (void *) arg;
247  iov[1].iov_len = argsize;
248  count++;
249  }
250  return send_reply_iov(req, error, iov, count);
251 }
252 
253 int fuse_reply_iov(fuse_req_t req, const struct iovec *iov, int count)
254 {
255  int res;
256  struct iovec *padded_iov;
257 
258  padded_iov = malloc((count + 1) * sizeof(struct iovec));
259  if (padded_iov == NULL)
260  return fuse_reply_err(req, ENOMEM);
261 
262  memcpy(padded_iov + 1, iov, count * sizeof(struct iovec));
263  count++;
264 
265  res = send_reply_iov(req, 0, padded_iov, count);
266  free(padded_iov);
267 
268  return res;
269 }
270 
271 
272 /* `buf` is allowed to be empty so that the proper size may be
273  allocated by the caller */
274 size_t fuse_add_direntry(fuse_req_t req, char *buf, size_t bufsize,
275  const char *name, const struct stat *stbuf, off_t off)
276 {
277  (void)req;
278  size_t namelen;
279  size_t entlen;
280  size_t entlen_padded;
281  struct fuse_dirent *dirent;
282 
283  namelen = strlen(name);
284  entlen = FUSE_NAME_OFFSET + namelen;
285  entlen_padded = FUSE_DIRENT_ALIGN(entlen);
286 
287  if ((buf == NULL) || (entlen_padded > bufsize))
288  return entlen_padded;
289 
290  dirent = (struct fuse_dirent*) buf;
291  dirent->ino = stbuf->st_ino;
292  dirent->off = off;
293  dirent->namelen = namelen;
294  dirent->type = (stbuf->st_mode & S_IFMT) >> 12;
295  memcpy(dirent->name, name, namelen);
296  memset(dirent->name + namelen, 0, entlen_padded - entlen);
297 
298  return entlen_padded;
299 }
300 
301 static void convert_statfs(const struct statvfs *stbuf,
302  struct fuse_kstatfs *kstatfs)
303 {
304  kstatfs->bsize = stbuf->f_bsize;
305  kstatfs->frsize = stbuf->f_frsize;
306  kstatfs->blocks = stbuf->f_blocks;
307  kstatfs->bfree = stbuf->f_bfree;
308  kstatfs->bavail = stbuf->f_bavail;
309  kstatfs->files = stbuf->f_files;
310  kstatfs->ffree = stbuf->f_ffree;
311  kstatfs->namelen = stbuf->f_namemax;
312 }
313 
314 static int send_reply_ok(fuse_req_t req, const void *arg, size_t argsize)
315 {
316  return send_reply(req, 0, arg, argsize);
317 }
318 
319 int fuse_reply_err(fuse_req_t req, int err)
320 {
321  return send_reply(req, -err, NULL, 0);
322 }
323 
324 void fuse_reply_none(fuse_req_t req)
325 {
326  fuse_free_req(req);
327 }
328 
329 static unsigned long calc_timeout_sec(double t)
330 {
331  if (t > (double) ULONG_MAX)
332  return ULONG_MAX;
333  else if (t < 0.0)
334  return 0;
335  else
336  return (unsigned long) t;
337 }
338 
339 static unsigned int calc_timeout_nsec(double t)
340 {
341  double f = t - (double) calc_timeout_sec(t);
342  if (f < 0.0)
343  return 0;
344  else if (f >= 0.999999999)
345  return 999999999;
346  else
347  return (unsigned int) (f * 1.0e9);
348 }
349 
350 static void fill_entry(struct fuse_entry_out *arg,
351  const struct fuse_entry_param *e)
352 {
353  arg->nodeid = e->ino;
354  arg->generation = e->generation;
355  arg->entry_valid = calc_timeout_sec(e->entry_timeout);
356  arg->entry_valid_nsec = calc_timeout_nsec(e->entry_timeout);
357  arg->attr_valid = calc_timeout_sec(e->attr_timeout);
358  arg->attr_valid_nsec = calc_timeout_nsec(e->attr_timeout);
359  convert_stat(&e->attr, &arg->attr);
360 }
361 
362 /* `buf` is allowed to be empty so that the proper size may be
363  allocated by the caller */
364 size_t fuse_add_direntry_plus(fuse_req_t req, char *buf, size_t bufsize,
365  const char *name,
366  const struct fuse_entry_param *e, off_t off)
367 {
368  (void)req;
369  size_t namelen;
370  size_t entlen;
371  size_t entlen_padded;
372 
373  namelen = strlen(name);
374  entlen = FUSE_NAME_OFFSET_DIRENTPLUS + namelen;
375  entlen_padded = FUSE_DIRENT_ALIGN(entlen);
376  if ((buf == NULL) || (entlen_padded > bufsize))
377  return entlen_padded;
378 
379  struct fuse_direntplus *dp = (struct fuse_direntplus *) buf;
380  memset(&dp->entry_out, 0, sizeof(dp->entry_out));
381  fill_entry(&dp->entry_out, e);
382 
383  struct fuse_dirent *dirent = &dp->dirent;
384  dirent->ino = e->attr.st_ino;
385  dirent->off = off;
386  dirent->namelen = namelen;
387  dirent->type = (e->attr.st_mode & S_IFMT) >> 12;
388  memcpy(dirent->name, name, namelen);
389  memset(dirent->name + namelen, 0, entlen_padded - entlen);
390 
391  return entlen_padded;
392 }
393 
394 static void fill_open(struct fuse_open_out *arg,
395  const struct fuse_file_info *f)
396 {
397  arg->fh = f->fh;
398  if (f->direct_io)
399  arg->open_flags |= FOPEN_DIRECT_IO;
400  if (f->keep_cache)
401  arg->open_flags |= FOPEN_KEEP_CACHE;
402  if (f->cache_readdir)
403  arg->open_flags |= FOPEN_CACHE_DIR;
404  if (f->nonseekable)
405  arg->open_flags |= FOPEN_NONSEEKABLE;
406  if (f->noflush)
407  arg->open_flags |= FOPEN_NOFLUSH;
408 }
409 
410 int fuse_reply_entry(fuse_req_t req, const struct fuse_entry_param *e)
411 {
412  struct fuse_entry_out arg;
413  size_t size = req->se->conn.proto_minor < 9 ?
414  FUSE_COMPAT_ENTRY_OUT_SIZE : sizeof(arg);
415 
416  /* before ABI 7.4 e->ino == 0 was invalid, only ENOENT meant
417  negative entry */
418  if (!e->ino && req->se->conn.proto_minor < 4)
419  return fuse_reply_err(req, ENOENT);
420 
421  memset(&arg, 0, sizeof(arg));
422  fill_entry(&arg, e);
423  return send_reply_ok(req, &arg, size);
424 }
425 
426 int fuse_reply_create(fuse_req_t req, const struct fuse_entry_param *e,
427  const struct fuse_file_info *f)
428 {
429  char buf[sizeof(struct fuse_entry_out) + sizeof(struct fuse_open_out)];
430  size_t entrysize = req->se->conn.proto_minor < 9 ?
431  FUSE_COMPAT_ENTRY_OUT_SIZE : sizeof(struct fuse_entry_out);
432  struct fuse_entry_out *earg = (struct fuse_entry_out *) buf;
433  struct fuse_open_out *oarg = (struct fuse_open_out *) (buf + entrysize);
434 
435  memset(buf, 0, sizeof(buf));
436  fill_entry(earg, e);
437  fill_open(oarg, f);
438  return send_reply_ok(req, buf,
439  entrysize + sizeof(struct fuse_open_out));
440 }
441 
442 int fuse_reply_attr(fuse_req_t req, const struct stat *attr,
443  double attr_timeout)
444 {
445  struct fuse_attr_out arg;
446  size_t size = req->se->conn.proto_minor < 9 ?
447  FUSE_COMPAT_ATTR_OUT_SIZE : sizeof(arg);
448 
449  memset(&arg, 0, sizeof(arg));
450  arg.attr_valid = calc_timeout_sec(attr_timeout);
451  arg.attr_valid_nsec = calc_timeout_nsec(attr_timeout);
452  convert_stat(attr, &arg.attr);
453 
454  return send_reply_ok(req, &arg, size);
455 }
456 
457 int fuse_reply_readlink(fuse_req_t req, const char *linkname)
458 {
459  return send_reply_ok(req, linkname, strlen(linkname));
460 }
461 
462 int fuse_reply_open(fuse_req_t req, const struct fuse_file_info *f)
463 {
464  struct fuse_open_out arg;
465 
466  memset(&arg, 0, sizeof(arg));
467  fill_open(&arg, f);
468  return send_reply_ok(req, &arg, sizeof(arg));
469 }
470 
471 int fuse_reply_write(fuse_req_t req, size_t count)
472 {
473  struct fuse_write_out arg;
474 
475  memset(&arg, 0, sizeof(arg));
476  arg.size = count;
477 
478  return send_reply_ok(req, &arg, sizeof(arg));
479 }
480 
481 int fuse_reply_buf(fuse_req_t req, const char *buf, size_t size)
482 {
483  return send_reply_ok(req, buf, size);
484 }
485 
486 static int fuse_send_data_iov_fallback(struct fuse_session *se,
487  struct fuse_chan *ch,
488  struct iovec *iov, int iov_count,
489  struct fuse_bufvec *buf,
490  size_t len)
491 {
492  struct fuse_bufvec mem_buf = FUSE_BUFVEC_INIT(len);
493  void *mbuf;
494  int res;
495 
496  /* Optimize common case */
497  if (buf->count == 1 && buf->idx == 0 && buf->off == 0 &&
498  !(buf->buf[0].flags & FUSE_BUF_IS_FD)) {
499  /* FIXME: also avoid memory copy if there are multiple buffers
500  but none of them contain an fd */
501 
502  iov[iov_count].iov_base = buf->buf[0].mem;
503  iov[iov_count].iov_len = len;
504  iov_count++;
505  return fuse_send_msg(se, ch, iov, iov_count);
506  }
507 
508  res = posix_memalign(&mbuf, pagesize, len);
509  if (res != 0)
510  return res;
511 
512  mem_buf.buf[0].mem = mbuf;
513  res = fuse_buf_copy(&mem_buf, buf, 0);
514  if (res < 0) {
515  free(mbuf);
516  return -res;
517  }
518  len = res;
519 
520  iov[iov_count].iov_base = mbuf;
521  iov[iov_count].iov_len = len;
522  iov_count++;
523  res = fuse_send_msg(se, ch, iov, iov_count);
524  free(mbuf);
525 
526  return res;
527 }
528 
529 struct fuse_ll_pipe {
530  size_t size;
531  int can_grow;
532  int pipe[2];
533 };
534 
535 static void fuse_ll_pipe_free(struct fuse_ll_pipe *llp)
536 {
537  close(llp->pipe[0]);
538  close(llp->pipe[1]);
539  free(llp);
540 }
541 
542 #ifdef HAVE_SPLICE
543 #if !defined(HAVE_PIPE2) || !defined(O_CLOEXEC)
544 static int fuse_pipe(int fds[2])
545 {
546  int rv = pipe(fds);
547 
548  if (rv == -1)
549  return rv;
550 
551  if (fcntl(fds[0], F_SETFL, O_NONBLOCK) == -1 ||
552  fcntl(fds[1], F_SETFL, O_NONBLOCK) == -1 ||
553  fcntl(fds[0], F_SETFD, FD_CLOEXEC) == -1 ||
554  fcntl(fds[1], F_SETFD, FD_CLOEXEC) == -1) {
555  close(fds[0]);
556  close(fds[1]);
557  rv = -1;
558  }
559  return rv;
560 }
561 #else
562 static int fuse_pipe(int fds[2])
563 {
564  return pipe2(fds, O_CLOEXEC | O_NONBLOCK);
565 }
566 #endif
567 
568 static struct fuse_ll_pipe *fuse_ll_get_pipe(struct fuse_session *se)
569 {
570  struct fuse_ll_pipe *llp = pthread_getspecific(se->pipe_key);
571  if (llp == NULL) {
572  int res;
573 
574  llp = malloc(sizeof(struct fuse_ll_pipe));
575  if (llp == NULL)
576  return NULL;
577 
578  res = fuse_pipe(llp->pipe);
579  if (res == -1) {
580  free(llp);
581  return NULL;
582  }
583 
584  /*
585  *the default size is 16 pages on linux
586  */
587  llp->size = pagesize * 16;
588  llp->can_grow = 1;
589 
590  pthread_setspecific(se->pipe_key, llp);
591  }
592 
593  return llp;
594 }
595 #endif
596 
597 static void fuse_ll_clear_pipe(struct fuse_session *se)
598 {
599  struct fuse_ll_pipe *llp = pthread_getspecific(se->pipe_key);
600  if (llp) {
601  pthread_setspecific(se->pipe_key, NULL);
602  fuse_ll_pipe_free(llp);
603  }
604 }
605 
606 #if defined(HAVE_SPLICE) && defined(HAVE_VMSPLICE)
607 static int read_back(int fd, char *buf, size_t len)
608 {
609  int res;
610 
611  res = read(fd, buf, len);
612  if (res == -1) {
613  fuse_log(FUSE_LOG_ERR, "fuse: internal error: failed to read back from pipe: %s\n", strerror(errno));
614  return -EIO;
615  }
616  if (res != len) {
617  fuse_log(FUSE_LOG_ERR, "fuse: internal error: short read back from pipe: %i from %zi\n", res, len);
618  return -EIO;
619  }
620  return 0;
621 }
622 
623 static int grow_pipe_to_max(int pipefd)
624 {
625  int max;
626  int res;
627  int maxfd;
628  char buf[32];
629 
630  maxfd = open("/proc/sys/fs/pipe-max-size", O_RDONLY);
631  if (maxfd < 0)
632  return -errno;
633 
634  res = read(maxfd, buf, sizeof(buf) - 1);
635  if (res < 0) {
636  int saved_errno;
637 
638  saved_errno = errno;
639  close(maxfd);
640  return -saved_errno;
641  }
642  close(maxfd);
643  buf[res] = '\0';
644 
645  max = atoi(buf);
646  res = fcntl(pipefd, F_SETPIPE_SZ, max);
647  if (res < 0)
648  return -errno;
649  return max;
650 }
651 
652 static int fuse_send_data_iov(struct fuse_session *se, struct fuse_chan *ch,
653  struct iovec *iov, int iov_count,
654  struct fuse_bufvec *buf, unsigned int flags)
655 {
656  int res;
657  size_t len = fuse_buf_size(buf);
658  struct fuse_out_header *out = iov[0].iov_base;
659  struct fuse_ll_pipe *llp;
660  int splice_flags;
661  size_t pipesize;
662  size_t total_buf_size;
663  size_t idx;
664  size_t headerlen;
665  struct fuse_bufvec pipe_buf = FUSE_BUFVEC_INIT(len);
666 
667  if (se->broken_splice_nonblock)
668  goto fallback;
669 
670  if (flags & FUSE_BUF_NO_SPLICE)
671  goto fallback;
672 
673  total_buf_size = 0;
674  for (idx = buf->idx; idx < buf->count; idx++) {
675  total_buf_size += buf->buf[idx].size;
676  if (idx == buf->idx)
677  total_buf_size -= buf->off;
678  }
679  if (total_buf_size < 2 * pagesize)
680  goto fallback;
681 
682  if (se->conn.proto_minor < 14 ||
683  !(se->conn.want & FUSE_CAP_SPLICE_WRITE))
684  goto fallback;
685 
686  llp = fuse_ll_get_pipe(se);
687  if (llp == NULL)
688  goto fallback;
689 
690 
691  headerlen = iov_length(iov, iov_count);
692 
693  out->len = headerlen + len;
694 
695  /*
696  * Heuristic for the required pipe size, does not work if the
697  * source contains less than page size fragments
698  */
699  pipesize = pagesize * (iov_count + buf->count + 1) + out->len;
700 
701  if (llp->size < pipesize) {
702  if (llp->can_grow) {
703  res = fcntl(llp->pipe[0], F_SETPIPE_SZ, pipesize);
704  if (res == -1) {
705  res = grow_pipe_to_max(llp->pipe[0]);
706  if (res > 0)
707  llp->size = res;
708  llp->can_grow = 0;
709  goto fallback;
710  }
711  llp->size = res;
712  }
713  if (llp->size < pipesize)
714  goto fallback;
715  }
716 
717 
718  res = vmsplice(llp->pipe[1], iov, iov_count, SPLICE_F_NONBLOCK);
719  if (res == -1)
720  goto fallback;
721 
722  if (res != headerlen) {
723  res = -EIO;
724  fuse_log(FUSE_LOG_ERR, "fuse: short vmsplice to pipe: %u/%zu\n", res,
725  headerlen);
726  goto clear_pipe;
727  }
728 
729  pipe_buf.buf[0].flags = FUSE_BUF_IS_FD;
730  pipe_buf.buf[0].fd = llp->pipe[1];
731 
732  res = fuse_buf_copy(&pipe_buf, buf,
734  if (res < 0) {
735  if (res == -EAGAIN || res == -EINVAL) {
736  /*
737  * Should only get EAGAIN on kernels with
738  * broken SPLICE_F_NONBLOCK support (<=
739  * 2.6.35) where this error or a short read is
740  * returned even if the pipe itself is not
741  * full
742  *
743  * EINVAL might mean that splice can't handle
744  * this combination of input and output.
745  */
746  if (res == -EAGAIN)
747  se->broken_splice_nonblock = 1;
748 
749  pthread_setspecific(se->pipe_key, NULL);
750  fuse_ll_pipe_free(llp);
751  goto fallback;
752  }
753  res = -res;
754  goto clear_pipe;
755  }
756 
757  if (res != 0 && res < len) {
758  struct fuse_bufvec mem_buf = FUSE_BUFVEC_INIT(len);
759  void *mbuf;
760  size_t now_len = res;
761  /*
762  * For regular files a short count is either
763  * 1) due to EOF, or
764  * 2) because of broken SPLICE_F_NONBLOCK (see above)
765  *
766  * For other inputs it's possible that we overflowed
767  * the pipe because of small buffer fragments.
768  */
769 
770  res = posix_memalign(&mbuf, pagesize, len);
771  if (res != 0)
772  goto clear_pipe;
773 
774  mem_buf.buf[0].mem = mbuf;
775  mem_buf.off = now_len;
776  res = fuse_buf_copy(&mem_buf, buf, 0);
777  if (res > 0) {
778  char *tmpbuf;
779  size_t extra_len = res;
780  /*
781  * Trickiest case: got more data. Need to get
782  * back the data from the pipe and then fall
783  * back to regular write.
784  */
785  tmpbuf = malloc(headerlen);
786  if (tmpbuf == NULL) {
787  free(mbuf);
788  res = ENOMEM;
789  goto clear_pipe;
790  }
791  res = read_back(llp->pipe[0], tmpbuf, headerlen);
792  free(tmpbuf);
793  if (res != 0) {
794  free(mbuf);
795  goto clear_pipe;
796  }
797  res = read_back(llp->pipe[0], mbuf, now_len);
798  if (res != 0) {
799  free(mbuf);
800  goto clear_pipe;
801  }
802  len = now_len + extra_len;
803  iov[iov_count].iov_base = mbuf;
804  iov[iov_count].iov_len = len;
805  iov_count++;
806  res = fuse_send_msg(se, ch, iov, iov_count);
807  free(mbuf);
808  return res;
809  }
810  free(mbuf);
811  res = now_len;
812  }
813  len = res;
814  out->len = headerlen + len;
815 
816  if (se->debug) {
817  fuse_log(FUSE_LOG_DEBUG,
818  " unique: %llu, success, outsize: %i (splice)\n",
819  (unsigned long long) out->unique, out->len);
820  }
821 
822  splice_flags = 0;
823  if ((flags & FUSE_BUF_SPLICE_MOVE) &&
824  (se->conn.want & FUSE_CAP_SPLICE_MOVE))
825  splice_flags |= SPLICE_F_MOVE;
826 
827  if (se->io != NULL && se->io->splice_send != NULL) {
828  res = se->io->splice_send(llp->pipe[0], NULL,
829  ch ? ch->fd : se->fd, NULL, out->len,
830  splice_flags, se->userdata);
831  } else {
832  res = splice(llp->pipe[0], NULL, ch ? ch->fd : se->fd, NULL,
833  out->len, splice_flags);
834  }
835  if (res == -1) {
836  res = -errno;
837  perror("fuse: splice from pipe");
838  goto clear_pipe;
839  }
840  if (res != out->len) {
841  res = -EIO;
842  fuse_log(FUSE_LOG_ERR, "fuse: short splice from pipe: %u/%u\n",
843  res, out->len);
844  goto clear_pipe;
845  }
846  return 0;
847 
848 clear_pipe:
849  fuse_ll_clear_pipe(se);
850  return res;
851 
852 fallback:
853  return fuse_send_data_iov_fallback(se, ch, iov, iov_count, buf, len);
854 }
855 #else
856 static int fuse_send_data_iov(struct fuse_session *se, struct fuse_chan *ch,
857  struct iovec *iov, int iov_count,
858  struct fuse_bufvec *buf, unsigned int flags)
859 {
860  size_t len = fuse_buf_size(buf);
861  (void) flags;
862 
863  return fuse_send_data_iov_fallback(se, ch, iov, iov_count, buf, len);
864 }
865 #endif
866 
867 int fuse_reply_data(fuse_req_t req, struct fuse_bufvec *bufv,
868  enum fuse_buf_copy_flags flags)
869 {
870  struct iovec iov[2];
871  struct fuse_out_header out;
872  int res;
873 
874  iov[0].iov_base = &out;
875  iov[0].iov_len = sizeof(struct fuse_out_header);
876 
877  out.unique = req->unique;
878  out.error = 0;
879 
880  res = fuse_send_data_iov(req->se, req->ch, iov, 1, bufv, flags);
881  if (res <= 0) {
882  fuse_free_req(req);
883  return res;
884  } else {
885  return fuse_reply_err(req, res);
886  }
887 }
888 
889 int fuse_reply_statfs(fuse_req_t req, const struct statvfs *stbuf)
890 {
891  struct fuse_statfs_out arg;
892  size_t size = req->se->conn.proto_minor < 4 ?
893  FUSE_COMPAT_STATFS_SIZE : sizeof(arg);
894 
895  memset(&arg, 0, sizeof(arg));
896  convert_statfs(stbuf, &arg.st);
897 
898  return send_reply_ok(req, &arg, size);
899 }
900 
901 int fuse_reply_xattr(fuse_req_t req, size_t count)
902 {
903  struct fuse_getxattr_out arg;
904 
905  memset(&arg, 0, sizeof(arg));
906  arg.size = count;
907 
908  return send_reply_ok(req, &arg, sizeof(arg));
909 }
910 
911 int fuse_reply_lock(fuse_req_t req, const struct flock *lock)
912 {
913  struct fuse_lk_out arg;
914 
915  memset(&arg, 0, sizeof(arg));
916  arg.lk.type = lock->l_type;
917  if (lock->l_type != F_UNLCK) {
918  arg.lk.start = lock->l_start;
919  if (lock->l_len == 0)
920  arg.lk.end = OFFSET_MAX;
921  else
922  arg.lk.end = lock->l_start + lock->l_len - 1;
923  }
924  arg.lk.pid = lock->l_pid;
925  return send_reply_ok(req, &arg, sizeof(arg));
926 }
927 
928 int fuse_reply_bmap(fuse_req_t req, uint64_t idx)
929 {
930  struct fuse_bmap_out arg;
931 
932  memset(&arg, 0, sizeof(arg));
933  arg.block = idx;
934 
935  return send_reply_ok(req, &arg, sizeof(arg));
936 }
937 
938 static struct fuse_ioctl_iovec *fuse_ioctl_iovec_copy(const struct iovec *iov,
939  size_t count)
940 {
941  struct fuse_ioctl_iovec *fiov;
942  size_t i;
943 
944  fiov = malloc(sizeof(fiov[0]) * count);
945  if (!fiov)
946  return NULL;
947 
948  for (i = 0; i < count; i++) {
949  fiov[i].base = (uintptr_t) iov[i].iov_base;
950  fiov[i].len = iov[i].iov_len;
951  }
952 
953  return fiov;
954 }
955 
957  const struct iovec *in_iov, size_t in_count,
958  const struct iovec *out_iov, size_t out_count)
959 {
960  struct fuse_ioctl_out arg;
961  struct fuse_ioctl_iovec *in_fiov = NULL;
962  struct fuse_ioctl_iovec *out_fiov = NULL;
963  struct iovec iov[4];
964  size_t count = 1;
965  int res;
966 
967  memset(&arg, 0, sizeof(arg));
968  arg.flags |= FUSE_IOCTL_RETRY;
969  arg.in_iovs = in_count;
970  arg.out_iovs = out_count;
971  iov[count].iov_base = &arg;
972  iov[count].iov_len = sizeof(arg);
973  count++;
974 
975  if (req->se->conn.proto_minor < 16) {
976  if (in_count) {
977  iov[count].iov_base = (void *)in_iov;
978  iov[count].iov_len = sizeof(in_iov[0]) * in_count;
979  count++;
980  }
981 
982  if (out_count) {
983  iov[count].iov_base = (void *)out_iov;
984  iov[count].iov_len = sizeof(out_iov[0]) * out_count;
985  count++;
986  }
987  } else {
988  /* Can't handle non-compat 64bit ioctls on 32bit */
989  if (sizeof(void *) == 4 && req->ioctl_64bit) {
990  res = fuse_reply_err(req, EINVAL);
991  goto out;
992  }
993 
994  if (in_count) {
995  in_fiov = fuse_ioctl_iovec_copy(in_iov, in_count);
996  if (!in_fiov)
997  goto enomem;
998 
999  iov[count].iov_base = (void *)in_fiov;
1000  iov[count].iov_len = sizeof(in_fiov[0]) * in_count;
1001  count++;
1002  }
1003  if (out_count) {
1004  out_fiov = fuse_ioctl_iovec_copy(out_iov, out_count);
1005  if (!out_fiov)
1006  goto enomem;
1007 
1008  iov[count].iov_base = (void *)out_fiov;
1009  iov[count].iov_len = sizeof(out_fiov[0]) * out_count;
1010  count++;
1011  }
1012  }
1013 
1014  res = send_reply_iov(req, 0, iov, count);
1015 out:
1016  free(in_fiov);
1017  free(out_fiov);
1018 
1019  return res;
1020 
1021 enomem:
1022  res = fuse_reply_err(req, ENOMEM);
1023  goto out;
1024 }
1025 
1026 int fuse_reply_ioctl(fuse_req_t req, int result, const void *buf, size_t size)
1027 {
1028  struct fuse_ioctl_out arg;
1029  struct iovec iov[3];
1030  size_t count = 1;
1031 
1032  memset(&arg, 0, sizeof(arg));
1033  arg.result = result;
1034  iov[count].iov_base = &arg;
1035  iov[count].iov_len = sizeof(arg);
1036  count++;
1037 
1038  if (size) {
1039  iov[count].iov_base = (char *) buf;
1040  iov[count].iov_len = size;
1041  count++;
1042  }
1043 
1044  return send_reply_iov(req, 0, iov, count);
1045 }
1046 
1047 int fuse_reply_ioctl_iov(fuse_req_t req, int result, const struct iovec *iov,
1048  int count)
1049 {
1050  struct iovec *padded_iov;
1051  struct fuse_ioctl_out arg;
1052  int res;
1053 
1054  padded_iov = malloc((count + 2) * sizeof(struct iovec));
1055  if (padded_iov == NULL)
1056  return fuse_reply_err(req, ENOMEM);
1057 
1058  memset(&arg, 0, sizeof(arg));
1059  arg.result = result;
1060  padded_iov[1].iov_base = &arg;
1061  padded_iov[1].iov_len = sizeof(arg);
1062 
1063  memcpy(&padded_iov[2], iov, count * sizeof(struct iovec));
1064 
1065  res = send_reply_iov(req, 0, padded_iov, count + 2);
1066  free(padded_iov);
1067 
1068  return res;
1069 }
1070 
1071 int fuse_reply_poll(fuse_req_t req, unsigned revents)
1072 {
1073  struct fuse_poll_out arg;
1074 
1075  memset(&arg, 0, sizeof(arg));
1076  arg.revents = revents;
1077 
1078  return send_reply_ok(req, &arg, sizeof(arg));
1079 }
1080 
1081 int fuse_reply_lseek(fuse_req_t req, off_t off)
1082 {
1083  struct fuse_lseek_out arg;
1084 
1085  memset(&arg, 0, sizeof(arg));
1086  arg.offset = off;
1087 
1088  return send_reply_ok(req, &arg, sizeof(arg));
1089 }
1090 
1091 static void do_lookup(fuse_req_t req, fuse_ino_t nodeid, const void *inarg)
1092 {
1093  char *name = (char *) inarg;
1094 
1095  if (req->se->op.lookup)
1096  req->se->op.lookup(req, nodeid, name);
1097  else
1098  fuse_reply_err(req, ENOSYS);
1099 }
1100 
1101 static void do_forget(fuse_req_t req, fuse_ino_t nodeid, const void *inarg)
1102 {
1103  struct fuse_forget_in *arg = (struct fuse_forget_in *) inarg;
1104 
1105  if (req->se->op.forget)
1106  req->se->op.forget(req, nodeid, arg->nlookup);
1107  else
1108  fuse_reply_none(req);
1109 }
1110 
1111 static void do_batch_forget(fuse_req_t req, fuse_ino_t nodeid,
1112  const void *inarg)
1113 {
1114  struct fuse_batch_forget_in *arg = (void *) inarg;
1115  struct fuse_forget_one *param = (void *) PARAM(arg);
1116  unsigned int i;
1117 
1118  (void) nodeid;
1119 
1120  if (req->se->op.forget_multi) {
1121  req->se->op.forget_multi(req, arg->count,
1122  (struct fuse_forget_data *) param);
1123  } else if (req->se->op.forget) {
1124  for (i = 0; i < arg->count; i++) {
1125  struct fuse_forget_one *forget = &param[i];
1126  struct fuse_req *dummy_req;
1127 
1128  dummy_req = fuse_ll_alloc_req(req->se);
1129  if (dummy_req == NULL)
1130  break;
1131 
1132  dummy_req->unique = req->unique;
1133  dummy_req->ctx = req->ctx;
1134  dummy_req->ch = NULL;
1135 
1136  req->se->op.forget(dummy_req, forget->nodeid,
1137  forget->nlookup);
1138  }
1139  fuse_reply_none(req);
1140  } else {
1141  fuse_reply_none(req);
1142  }
1143 }
1144 
1145 static void do_getattr(fuse_req_t req, fuse_ino_t nodeid, const void *inarg)
1146 {
1147  struct fuse_file_info *fip = NULL;
1148  struct fuse_file_info fi;
1149 
1150  if (req->se->conn.proto_minor >= 9) {
1151  struct fuse_getattr_in *arg = (struct fuse_getattr_in *) inarg;
1152 
1153  if (arg->getattr_flags & FUSE_GETATTR_FH) {
1154  memset(&fi, 0, sizeof(fi));
1155  fi.fh = arg->fh;
1156  fip = &fi;
1157  }
1158  }
1159 
1160  if (req->se->op.getattr)
1161  req->se->op.getattr(req, nodeid, fip);
1162  else
1163  fuse_reply_err(req, ENOSYS);
1164 }
1165 
1166 static void do_setattr(fuse_req_t req, fuse_ino_t nodeid, const void *inarg)
1167 {
1168  struct fuse_setattr_in *arg = (struct fuse_setattr_in *) inarg;
1169 
1170  if (req->se->op.setattr) {
1171  struct fuse_file_info *fi = NULL;
1172  struct fuse_file_info fi_store;
1173  struct stat stbuf;
1174  memset(&stbuf, 0, sizeof(stbuf));
1175  convert_attr(arg, &stbuf);
1176  if (arg->valid & FATTR_FH) {
1177  arg->valid &= ~FATTR_FH;
1178  memset(&fi_store, 0, sizeof(fi_store));
1179  fi = &fi_store;
1180  fi->fh = arg->fh;
1181  }
1182  arg->valid &=
1183  FUSE_SET_ATTR_MODE |
1184  FUSE_SET_ATTR_UID |
1185  FUSE_SET_ATTR_GID |
1186  FUSE_SET_ATTR_SIZE |
1187  FUSE_SET_ATTR_ATIME |
1188  FUSE_SET_ATTR_MTIME |
1189  FUSE_SET_ATTR_KILL_SUID |
1190  FUSE_SET_ATTR_KILL_SGID |
1191  FUSE_SET_ATTR_ATIME_NOW |
1192  FUSE_SET_ATTR_MTIME_NOW |
1193  FUSE_SET_ATTR_CTIME;
1194 
1195  req->se->op.setattr(req, nodeid, &stbuf, arg->valid, fi);
1196  } else
1197  fuse_reply_err(req, ENOSYS);
1198 }
1199 
1200 static void do_access(fuse_req_t req, fuse_ino_t nodeid, const void *inarg)
1201 {
1202  struct fuse_access_in *arg = (struct fuse_access_in *) inarg;
1203 
1204  if (req->se->op.access)
1205  req->se->op.access(req, nodeid, arg->mask);
1206  else
1207  fuse_reply_err(req, ENOSYS);
1208 }
1209 
1210 static void do_readlink(fuse_req_t req, fuse_ino_t nodeid, const void *inarg)
1211 {
1212  (void) inarg;
1213 
1214  if (req->se->op.readlink)
1215  req->se->op.readlink(req, nodeid);
1216  else
1217  fuse_reply_err(req, ENOSYS);
1218 }
1219 
1220 static void do_mknod(fuse_req_t req, fuse_ino_t nodeid, const void *inarg)
1221 {
1222  struct fuse_mknod_in *arg = (struct fuse_mknod_in *) inarg;
1223  char *name = PARAM(arg);
1224 
1225  if (req->se->conn.proto_minor >= 12)
1226  req->ctx.umask = arg->umask;
1227  else
1228  name = (char *) inarg + FUSE_COMPAT_MKNOD_IN_SIZE;
1229 
1230  if (req->se->op.mknod)
1231  req->se->op.mknod(req, nodeid, name, arg->mode, arg->rdev);
1232  else
1233  fuse_reply_err(req, ENOSYS);
1234 }
1235 
1236 static void do_mkdir(fuse_req_t req, fuse_ino_t nodeid, const void *inarg)
1237 {
1238  struct fuse_mkdir_in *arg = (struct fuse_mkdir_in *) inarg;
1239 
1240  if (req->se->conn.proto_minor >= 12)
1241  req->ctx.umask = arg->umask;
1242 
1243  if (req->se->op.mkdir)
1244  req->se->op.mkdir(req, nodeid, PARAM(arg), arg->mode);
1245  else
1246  fuse_reply_err(req, ENOSYS);
1247 }
1248 
1249 static void do_unlink(fuse_req_t req, fuse_ino_t nodeid, const void *inarg)
1250 {
1251  char *name = (char *) inarg;
1252 
1253  if (req->se->op.unlink)
1254  req->se->op.unlink(req, nodeid, name);
1255  else
1256  fuse_reply_err(req, ENOSYS);
1257 }
1258 
1259 static void do_rmdir(fuse_req_t req, fuse_ino_t nodeid, const void *inarg)
1260 {
1261  char *name = (char *) inarg;
1262 
1263  if (req->se->op.rmdir)
1264  req->se->op.rmdir(req, nodeid, name);
1265  else
1266  fuse_reply_err(req, ENOSYS);
1267 }
1268 
1269 static void do_symlink(fuse_req_t req, fuse_ino_t nodeid, const void *inarg)
1270 {
1271  char *name = (char *) inarg;
1272  char *linkname = ((char *) inarg) + strlen((char *) inarg) + 1;
1273 
1274  if (req->se->op.symlink)
1275  req->se->op.symlink(req, linkname, nodeid, name);
1276  else
1277  fuse_reply_err(req, ENOSYS);
1278 }
1279 
1280 static void do_rename(fuse_req_t req, fuse_ino_t nodeid, const void *inarg)
1281 {
1282  struct fuse_rename_in *arg = (struct fuse_rename_in *) inarg;
1283  char *oldname = PARAM(arg);
1284  char *newname = oldname + strlen(oldname) + 1;
1285 
1286  if (req->se->op.rename)
1287  req->se->op.rename(req, nodeid, oldname, arg->newdir, newname,
1288  0);
1289  else
1290  fuse_reply_err(req, ENOSYS);
1291 }
1292 
1293 static void do_rename2(fuse_req_t req, fuse_ino_t nodeid, const void *inarg)
1294 {
1295  struct fuse_rename2_in *arg = (struct fuse_rename2_in *) inarg;
1296  char *oldname = PARAM(arg);
1297  char *newname = oldname + strlen(oldname) + 1;
1298 
1299  if (req->se->op.rename)
1300  req->se->op.rename(req, nodeid, oldname, arg->newdir, newname,
1301  arg->flags);
1302  else
1303  fuse_reply_err(req, ENOSYS);
1304 }
1305 
1306 static void do_link(fuse_req_t req, fuse_ino_t nodeid, const void *inarg)
1307 {
1308  struct fuse_link_in *arg = (struct fuse_link_in *) inarg;
1309 
1310  if (req->se->op.link)
1311  req->se->op.link(req, arg->oldnodeid, nodeid, PARAM(arg));
1312  else
1313  fuse_reply_err(req, ENOSYS);
1314 }
1315 
1316 static void do_create(fuse_req_t req, fuse_ino_t nodeid, const void *inarg)
1317 {
1318  struct fuse_create_in *arg = (struct fuse_create_in *) inarg;
1319 
1320  if (req->se->op.create) {
1321  struct fuse_file_info fi;
1322  char *name = PARAM(arg);
1323 
1324  memset(&fi, 0, sizeof(fi));
1325  fi.flags = arg->flags;
1326 
1327  if (req->se->conn.proto_minor >= 12)
1328  req->ctx.umask = arg->umask;
1329  else
1330  name = (char *) inarg + sizeof(struct fuse_open_in);
1331 
1332  req->se->op.create(req, nodeid, name, arg->mode, &fi);
1333  } else
1334  fuse_reply_err(req, ENOSYS);
1335 }
1336 
1337 static void do_open(fuse_req_t req, fuse_ino_t nodeid, const void *inarg)
1338 {
1339  struct fuse_open_in *arg = (struct fuse_open_in *) inarg;
1340  struct fuse_file_info fi;
1341 
1342  memset(&fi, 0, sizeof(fi));
1343  fi.flags = arg->flags;
1344 
1345  if (req->se->op.open)
1346  req->se->op.open(req, nodeid, &fi);
1347  else
1348  fuse_reply_open(req, &fi);
1349 }
1350 
1351 static void do_read(fuse_req_t req, fuse_ino_t nodeid, const void *inarg)
1352 {
1353  struct fuse_read_in *arg = (struct fuse_read_in *) inarg;
1354 
1355  if (req->se->op.read) {
1356  struct fuse_file_info fi;
1357 
1358  memset(&fi, 0, sizeof(fi));
1359  fi.fh = arg->fh;
1360  if (req->se->conn.proto_minor >= 9) {
1361  fi.lock_owner = arg->lock_owner;
1362  fi.flags = arg->flags;
1363  }
1364  req->se->op.read(req, nodeid, arg->size, arg->offset, &fi);
1365  } else
1366  fuse_reply_err(req, ENOSYS);
1367 }
1368 
1369 static void do_write(fuse_req_t req, fuse_ino_t nodeid, const void *inarg)
1370 {
1371  struct fuse_write_in *arg = (struct fuse_write_in *) inarg;
1372  struct fuse_file_info fi;
1373  char *param;
1374 
1375  memset(&fi, 0, sizeof(fi));
1376  fi.fh = arg->fh;
1377  fi.writepage = (arg->write_flags & FUSE_WRITE_CACHE) != 0;
1378 
1379  if (req->se->conn.proto_minor < 9) {
1380  param = ((char *) arg) + FUSE_COMPAT_WRITE_IN_SIZE;
1381  } else {
1382  fi.lock_owner = arg->lock_owner;
1383  fi.flags = arg->flags;
1384  param = PARAM(arg);
1385  }
1386 
1387  if (req->se->op.write)
1388  req->se->op.write(req, nodeid, param, arg->size,
1389  arg->offset, &fi);
1390  else
1391  fuse_reply_err(req, ENOSYS);
1392 }
1393 
1394 static void do_write_buf(fuse_req_t req, fuse_ino_t nodeid, const void *inarg,
1395  const struct fuse_buf *ibuf)
1396 {
1397  struct fuse_session *se = req->se;
1398  struct fuse_bufvec bufv = {
1399  .buf[0] = *ibuf,
1400  .count = 1,
1401  };
1402  struct fuse_write_in *arg = (struct fuse_write_in *) inarg;
1403  struct fuse_file_info fi;
1404 
1405  memset(&fi, 0, sizeof(fi));
1406  fi.fh = arg->fh;
1407  fi.writepage = arg->write_flags & FUSE_WRITE_CACHE;
1408 
1409  if (se->conn.proto_minor < 9) {
1410  bufv.buf[0].mem = ((char *) arg) + FUSE_COMPAT_WRITE_IN_SIZE;
1411  bufv.buf[0].size -= sizeof(struct fuse_in_header) +
1412  FUSE_COMPAT_WRITE_IN_SIZE;
1413  assert(!(bufv.buf[0].flags & FUSE_BUF_IS_FD));
1414  } else {
1415  fi.lock_owner = arg->lock_owner;
1416  fi.flags = arg->flags;
1417  if (!(bufv.buf[0].flags & FUSE_BUF_IS_FD))
1418  bufv.buf[0].mem = PARAM(arg);
1419 
1420  bufv.buf[0].size -= sizeof(struct fuse_in_header) +
1421  sizeof(struct fuse_write_in);
1422  }
1423  if (bufv.buf[0].size < arg->size) {
1424  fuse_log(FUSE_LOG_ERR, "fuse: do_write_buf: buffer size too small\n");
1425  fuse_reply_err(req, EIO);
1426  goto out;
1427  }
1428  bufv.buf[0].size = arg->size;
1429 
1430  se->op.write_buf(req, nodeid, &bufv, arg->offset, &fi);
1431 
1432 out:
1433  /* Need to reset the pipe if ->write_buf() didn't consume all data */
1434  if ((ibuf->flags & FUSE_BUF_IS_FD) && bufv.idx < bufv.count)
1435  fuse_ll_clear_pipe(se);
1436 }
1437 
1438 static void do_flush(fuse_req_t req, fuse_ino_t nodeid, const void *inarg)
1439 {
1440  struct fuse_flush_in *arg = (struct fuse_flush_in *) inarg;
1441  struct fuse_file_info fi;
1442 
1443  memset(&fi, 0, sizeof(fi));
1444  fi.fh = arg->fh;
1445  fi.flush = 1;
1446  if (req->se->conn.proto_minor >= 7)
1447  fi.lock_owner = arg->lock_owner;
1448 
1449  if (req->se->op.flush)
1450  req->se->op.flush(req, nodeid, &fi);
1451  else
1452  fuse_reply_err(req, ENOSYS);
1453 }
1454 
1455 static void do_release(fuse_req_t req, fuse_ino_t nodeid, const void *inarg)
1456 {
1457  struct fuse_release_in *arg = (struct fuse_release_in *) inarg;
1458  struct fuse_file_info fi;
1459 
1460  memset(&fi, 0, sizeof(fi));
1461  fi.flags = arg->flags;
1462  fi.fh = arg->fh;
1463  if (req->se->conn.proto_minor >= 8) {
1464  fi.flush = (arg->release_flags & FUSE_RELEASE_FLUSH) ? 1 : 0;
1465  fi.lock_owner = arg->lock_owner;
1466  }
1467  if (arg->release_flags & FUSE_RELEASE_FLOCK_UNLOCK) {
1468  fi.flock_release = 1;
1469  fi.lock_owner = arg->lock_owner;
1470  }
1471 
1472  if (req->se->op.release)
1473  req->se->op.release(req, nodeid, &fi);
1474  else
1475  fuse_reply_err(req, 0);
1476 }
1477 
1478 static void do_fsync(fuse_req_t req, fuse_ino_t nodeid, const void *inarg)
1479 {
1480  struct fuse_fsync_in *arg = (struct fuse_fsync_in *) inarg;
1481  struct fuse_file_info fi;
1482  int datasync = arg->fsync_flags & 1;
1483 
1484  memset(&fi, 0, sizeof(fi));
1485  fi.fh = arg->fh;
1486 
1487  if (req->se->op.fsync)
1488  req->se->op.fsync(req, nodeid, datasync, &fi);
1489  else
1490  fuse_reply_err(req, ENOSYS);
1491 }
1492 
1493 static void do_opendir(fuse_req_t req, fuse_ino_t nodeid, const void *inarg)
1494 {
1495  struct fuse_open_in *arg = (struct fuse_open_in *) inarg;
1496  struct fuse_file_info fi;
1497 
1498  memset(&fi, 0, sizeof(fi));
1499  fi.flags = arg->flags;
1500 
1501  if (req->se->op.opendir)
1502  req->se->op.opendir(req, nodeid, &fi);
1503  else
1504  fuse_reply_open(req, &fi);
1505 }
1506 
1507 static void do_readdir(fuse_req_t req, fuse_ino_t nodeid, const void *inarg)
1508 {
1509  struct fuse_read_in *arg = (struct fuse_read_in *) inarg;
1510  struct fuse_file_info fi;
1511 
1512  memset(&fi, 0, sizeof(fi));
1513  fi.fh = arg->fh;
1514 
1515  if (req->se->op.readdir)
1516  req->se->op.readdir(req, nodeid, arg->size, arg->offset, &fi);
1517  else
1518  fuse_reply_err(req, ENOSYS);
1519 }
1520 
1521 static void do_readdirplus(fuse_req_t req, fuse_ino_t nodeid, const void *inarg)
1522 {
1523  struct fuse_read_in *arg = (struct fuse_read_in *) inarg;
1524  struct fuse_file_info fi;
1525 
1526  memset(&fi, 0, sizeof(fi));
1527  fi.fh = arg->fh;
1528 
1529  if (req->se->op.readdirplus)
1530  req->se->op.readdirplus(req, nodeid, arg->size, arg->offset, &fi);
1531  else
1532  fuse_reply_err(req, ENOSYS);
1533 }
1534 
1535 static void do_releasedir(fuse_req_t req, fuse_ino_t nodeid, const void *inarg)
1536 {
1537  struct fuse_release_in *arg = (struct fuse_release_in *) inarg;
1538  struct fuse_file_info fi;
1539 
1540  memset(&fi, 0, sizeof(fi));
1541  fi.flags = arg->flags;
1542  fi.fh = arg->fh;
1543 
1544  if (req->se->op.releasedir)
1545  req->se->op.releasedir(req, nodeid, &fi);
1546  else
1547  fuse_reply_err(req, 0);
1548 }
1549 
1550 static void do_fsyncdir(fuse_req_t req, fuse_ino_t nodeid, const void *inarg)
1551 {
1552  struct fuse_fsync_in *arg = (struct fuse_fsync_in *) inarg;
1553  struct fuse_file_info fi;
1554  int datasync = arg->fsync_flags & 1;
1555 
1556  memset(&fi, 0, sizeof(fi));
1557  fi.fh = arg->fh;
1558 
1559  if (req->se->op.fsyncdir)
1560  req->se->op.fsyncdir(req, nodeid, datasync, &fi);
1561  else
1562  fuse_reply_err(req, ENOSYS);
1563 }
1564 
1565 static void do_statfs(fuse_req_t req, fuse_ino_t nodeid, const void *inarg)
1566 {
1567  (void) nodeid;
1568  (void) inarg;
1569 
1570  if (req->se->op.statfs)
1571  req->se->op.statfs(req, nodeid);
1572  else {
1573  struct statvfs buf = {
1574  .f_namemax = 255,
1575  .f_bsize = 512,
1576  };
1577  fuse_reply_statfs(req, &buf);
1578  }
1579 }
1580 
1581 static void do_setxattr(fuse_req_t req, fuse_ino_t nodeid, const void *inarg)
1582 {
1583  struct fuse_setxattr_in *arg = (struct fuse_setxattr_in *) inarg;
1584  char *name = PARAM(arg);
1585  char *value = name + strlen(name) + 1;
1586 
1587  if (req->se->op.setxattr)
1588  req->se->op.setxattr(req, nodeid, name, value, arg->size,
1589  arg->flags);
1590  else
1591  fuse_reply_err(req, ENOSYS);
1592 }
1593 
1594 static void do_getxattr(fuse_req_t req, fuse_ino_t nodeid, const void *inarg)
1595 {
1596  struct fuse_getxattr_in *arg = (struct fuse_getxattr_in *) inarg;
1597 
1598  if (req->se->op.getxattr)
1599  req->se->op.getxattr(req, nodeid, PARAM(arg), arg->size);
1600  else
1601  fuse_reply_err(req, ENOSYS);
1602 }
1603 
1604 static void do_listxattr(fuse_req_t req, fuse_ino_t nodeid, const void *inarg)
1605 {
1606  struct fuse_getxattr_in *arg = (struct fuse_getxattr_in *) inarg;
1607 
1608  if (req->se->op.listxattr)
1609  req->se->op.listxattr(req, nodeid, arg->size);
1610  else
1611  fuse_reply_err(req, ENOSYS);
1612 }
1613 
1614 static void do_removexattr(fuse_req_t req, fuse_ino_t nodeid, const void *inarg)
1615 {
1616  char *name = (char *) inarg;
1617 
1618  if (req->se->op.removexattr)
1619  req->se->op.removexattr(req, nodeid, name);
1620  else
1621  fuse_reply_err(req, ENOSYS);
1622 }
1623 
1624 static void convert_fuse_file_lock(struct fuse_file_lock *fl,
1625  struct flock *flock)
1626 {
1627  memset(flock, 0, sizeof(struct flock));
1628  flock->l_type = fl->type;
1629  flock->l_whence = SEEK_SET;
1630  flock->l_start = fl->start;
1631  if (fl->end == OFFSET_MAX)
1632  flock->l_len = 0;
1633  else
1634  flock->l_len = fl->end - fl->start + 1;
1635  flock->l_pid = fl->pid;
1636 }
1637 
1638 static void do_getlk(fuse_req_t req, fuse_ino_t nodeid, const void *inarg)
1639 {
1640  struct fuse_lk_in *arg = (struct fuse_lk_in *) inarg;
1641  struct fuse_file_info fi;
1642  struct flock flock;
1643 
1644  memset(&fi, 0, sizeof(fi));
1645  fi.fh = arg->fh;
1646  fi.lock_owner = arg->owner;
1647 
1648  convert_fuse_file_lock(&arg->lk, &flock);
1649  if (req->se->op.getlk)
1650  req->se->op.getlk(req, nodeid, &fi, &flock);
1651  else
1652  fuse_reply_err(req, ENOSYS);
1653 }
1654 
1655 static void do_setlk_common(fuse_req_t req, fuse_ino_t nodeid,
1656  const void *inarg, int sleep)
1657 {
1658  struct fuse_lk_in *arg = (struct fuse_lk_in *) inarg;
1659  struct fuse_file_info fi;
1660  struct flock flock;
1661 
1662  memset(&fi, 0, sizeof(fi));
1663  fi.fh = arg->fh;
1664  fi.lock_owner = arg->owner;
1665 
1666  if (arg->lk_flags & FUSE_LK_FLOCK) {
1667  int op = 0;
1668 
1669  switch (arg->lk.type) {
1670  case F_RDLCK:
1671  op = LOCK_SH;
1672  break;
1673  case F_WRLCK:
1674  op = LOCK_EX;
1675  break;
1676  case F_UNLCK:
1677  op = LOCK_UN;
1678  break;
1679  }
1680  if (!sleep)
1681  op |= LOCK_NB;
1682 
1683  if (req->se->op.flock)
1684  req->se->op.flock(req, nodeid, &fi, op);
1685  else
1686  fuse_reply_err(req, ENOSYS);
1687  } else {
1688  convert_fuse_file_lock(&arg->lk, &flock);
1689  if (req->se->op.setlk)
1690  req->se->op.setlk(req, nodeid, &fi, &flock, sleep);
1691  else
1692  fuse_reply_err(req, ENOSYS);
1693  }
1694 }
1695 
1696 static void do_setlk(fuse_req_t req, fuse_ino_t nodeid, const void *inarg)
1697 {
1698  do_setlk_common(req, nodeid, inarg, 0);
1699 }
1700 
1701 static void do_setlkw(fuse_req_t req, fuse_ino_t nodeid, const void *inarg)
1702 {
1703  do_setlk_common(req, nodeid, inarg, 1);
1704 }
1705 
1706 static int find_interrupted(struct fuse_session *se, struct fuse_req *req)
1707 {
1708  struct fuse_req *curr;
1709 
1710  for (curr = se->list.next; curr != &se->list; curr = curr->next) {
1711  if (curr->unique == req->u.i.unique) {
1712  fuse_interrupt_func_t func;
1713  void *data;
1714 
1715  curr->ctr++;
1716  pthread_mutex_unlock(&se->lock);
1717 
1718  /* Ugh, ugly locking */
1719  pthread_mutex_lock(&curr->lock);
1720  pthread_mutex_lock(&se->lock);
1721  curr->interrupted = 1;
1722  func = curr->u.ni.func;
1723  data = curr->u.ni.data;
1724  pthread_mutex_unlock(&se->lock);
1725  if (func)
1726  func(curr, data);
1727  pthread_mutex_unlock(&curr->lock);
1728 
1729  pthread_mutex_lock(&se->lock);
1730  curr->ctr--;
1731  if (!curr->ctr) {
1732  fuse_chan_put(req->ch);
1733  req->ch = NULL;
1734  destroy_req(curr);
1735  }
1736 
1737  return 1;
1738  }
1739  }
1740  for (curr = se->interrupts.next; curr != &se->interrupts;
1741  curr = curr->next) {
1742  if (curr->u.i.unique == req->u.i.unique)
1743  return 1;
1744  }
1745  return 0;
1746 }
1747 
1748 static void do_interrupt(fuse_req_t req, fuse_ino_t nodeid, const void *inarg)
1749 {
1750  struct fuse_interrupt_in *arg = (struct fuse_interrupt_in *) inarg;
1751  struct fuse_session *se = req->se;
1752 
1753  (void) nodeid;
1754  if (se->debug)
1755  fuse_log(FUSE_LOG_DEBUG, "INTERRUPT: %llu\n",
1756  (unsigned long long) arg->unique);
1757 
1758  req->u.i.unique = arg->unique;
1759 
1760  pthread_mutex_lock(&se->lock);
1761  if (find_interrupted(se, req)) {
1762  fuse_chan_put(req->ch);
1763  req->ch = NULL;
1764  destroy_req(req);
1765  } else
1766  list_add_req(req, &se->interrupts);
1767  pthread_mutex_unlock(&se->lock);
1768 }
1769 
1770 static struct fuse_req *check_interrupt(struct fuse_session *se,
1771  struct fuse_req *req)
1772 {
1773  struct fuse_req *curr;
1774 
1775  for (curr = se->interrupts.next; curr != &se->interrupts;
1776  curr = curr->next) {
1777  if (curr->u.i.unique == req->unique) {
1778  req->interrupted = 1;
1779  list_del_req(curr);
1780  fuse_chan_put(curr->ch);
1781  curr->ch = NULL;
1782  destroy_req(curr);
1783  return NULL;
1784  }
1785  }
1786  curr = se->interrupts.next;
1787  if (curr != &se->interrupts) {
1788  list_del_req(curr);
1789  list_init_req(curr);
1790  return curr;
1791  } else
1792  return NULL;
1793 }
1794 
1795 static void do_bmap(fuse_req_t req, fuse_ino_t nodeid, const void *inarg)
1796 {
1797  struct fuse_bmap_in *arg = (struct fuse_bmap_in *) inarg;
1798 
1799  if (req->se->op.bmap)
1800  req->se->op.bmap(req, nodeid, arg->blocksize, arg->block);
1801  else
1802  fuse_reply_err(req, ENOSYS);
1803 }
1804 
1805 static void do_ioctl(fuse_req_t req, fuse_ino_t nodeid, const void *inarg)
1806 {
1807  struct fuse_ioctl_in *arg = (struct fuse_ioctl_in *) inarg;
1808  unsigned int flags = arg->flags;
1809  void *in_buf = arg->in_size ? PARAM(arg) : NULL;
1810  struct fuse_file_info fi;
1811 
1812  if (flags & FUSE_IOCTL_DIR &&
1813  !(req->se->conn.want & FUSE_CAP_IOCTL_DIR)) {
1814  fuse_reply_err(req, ENOTTY);
1815  return;
1816  }
1817 
1818  memset(&fi, 0, sizeof(fi));
1819  fi.fh = arg->fh;
1820 
1821  if (sizeof(void *) == 4 && req->se->conn.proto_minor >= 16 &&
1822  !(flags & FUSE_IOCTL_32BIT)) {
1823  req->ioctl_64bit = 1;
1824  }
1825 
1826  if (req->se->op.ioctl)
1827  req->se->op.ioctl(req, nodeid, arg->cmd,
1828  (void *)(uintptr_t)arg->arg, &fi, flags,
1829  in_buf, arg->in_size, arg->out_size);
1830  else
1831  fuse_reply_err(req, ENOSYS);
1832 }
1833 
1834 void fuse_pollhandle_destroy(struct fuse_pollhandle *ph)
1835 {
1836  free(ph);
1837 }
1838 
1839 static void do_poll(fuse_req_t req, fuse_ino_t nodeid, const void *inarg)
1840 {
1841  struct fuse_poll_in *arg = (struct fuse_poll_in *) inarg;
1842  struct fuse_file_info fi;
1843 
1844  memset(&fi, 0, sizeof(fi));
1845  fi.fh = arg->fh;
1846  fi.poll_events = arg->events;
1847 
1848  if (req->se->op.poll) {
1849  struct fuse_pollhandle *ph = NULL;
1850 
1851  if (arg->flags & FUSE_POLL_SCHEDULE_NOTIFY) {
1852  ph = malloc(sizeof(struct fuse_pollhandle));
1853  if (ph == NULL) {
1854  fuse_reply_err(req, ENOMEM);
1855  return;
1856  }
1857  ph->kh = arg->kh;
1858  ph->se = req->se;
1859  }
1860 
1861  req->se->op.poll(req, nodeid, &fi, ph);
1862  } else {
1863  fuse_reply_err(req, ENOSYS);
1864  }
1865 }
1866 
1867 static void do_fallocate(fuse_req_t req, fuse_ino_t nodeid, const void *inarg)
1868 {
1869  struct fuse_fallocate_in *arg = (struct fuse_fallocate_in *) inarg;
1870  struct fuse_file_info fi;
1871 
1872  memset(&fi, 0, sizeof(fi));
1873  fi.fh = arg->fh;
1874 
1875  if (req->se->op.fallocate)
1876  req->se->op.fallocate(req, nodeid, arg->mode, arg->offset, arg->length, &fi);
1877  else
1878  fuse_reply_err(req, ENOSYS);
1879 }
1880 
1881 static void do_copy_file_range(fuse_req_t req, fuse_ino_t nodeid_in, const void *inarg)
1882 {
1883  struct fuse_copy_file_range_in *arg = (struct fuse_copy_file_range_in *) inarg;
1884  struct fuse_file_info fi_in, fi_out;
1885 
1886  memset(&fi_in, 0, sizeof(fi_in));
1887  fi_in.fh = arg->fh_in;
1888 
1889  memset(&fi_out, 0, sizeof(fi_out));
1890  fi_out.fh = arg->fh_out;
1891 
1892 
1893  if (req->se->op.copy_file_range)
1894  req->se->op.copy_file_range(req, nodeid_in, arg->off_in,
1895  &fi_in, arg->nodeid_out,
1896  arg->off_out, &fi_out, arg->len,
1897  arg->flags);
1898  else
1899  fuse_reply_err(req, ENOSYS);
1900 }
1901 
1902 static void do_lseek(fuse_req_t req, fuse_ino_t nodeid, const void *inarg)
1903 {
1904  struct fuse_lseek_in *arg = (struct fuse_lseek_in *) inarg;
1905  struct fuse_file_info fi;
1906 
1907  memset(&fi, 0, sizeof(fi));
1908  fi.fh = arg->fh;
1909 
1910  if (req->se->op.lseek)
1911  req->se->op.lseek(req, nodeid, arg->offset, arg->whence, &fi);
1912  else
1913  fuse_reply_err(req, ENOSYS);
1914 }
1915 
1916 /* Prevent bogus data races (bogus since "init" is called before
1917  * multi-threading becomes relevant */
1918 static __attribute__((no_sanitize("thread")))
1919 void do_init(fuse_req_t req, fuse_ino_t nodeid, const void *inarg)
1920 {
1921  struct fuse_init_in *arg = (struct fuse_init_in *) inarg;
1922  struct fuse_init_out outarg;
1923  struct fuse_session *se = req->se;
1924  size_t bufsize = se->bufsize;
1925  size_t outargsize = sizeof(outarg);
1926  uint64_t inargflags = 0;
1927  uint64_t outargflags = 0;
1928  (void) nodeid;
1929  if (se->debug) {
1930  fuse_log(FUSE_LOG_DEBUG, "INIT: %u.%u\n", arg->major, arg->minor);
1931  if (arg->major == 7 && arg->minor >= 6) {
1932  fuse_log(FUSE_LOG_DEBUG, "flags=0x%08x\n", arg->flags);
1933  fuse_log(FUSE_LOG_DEBUG, "max_readahead=0x%08x\n",
1934  arg->max_readahead);
1935  }
1936  }
1937  se->conn.proto_major = arg->major;
1938  se->conn.proto_minor = arg->minor;
1939  se->conn.capable = 0;
1940  se->conn.want = 0;
1941 
1942  memset(&outarg, 0, sizeof(outarg));
1943  outarg.major = FUSE_KERNEL_VERSION;
1944  outarg.minor = FUSE_KERNEL_MINOR_VERSION;
1945 
1946  if (arg->major < 7) {
1947  fuse_log(FUSE_LOG_ERR, "fuse: unsupported protocol version: %u.%u\n",
1948  arg->major, arg->minor);
1949  fuse_reply_err(req, EPROTO);
1950  return;
1951  }
1952 
1953  if (arg->major > 7) {
1954  /* Wait for a second INIT request with a 7.X version */
1955  send_reply_ok(req, &outarg, sizeof(outarg));
1956  return;
1957  }
1958 
1959  if (arg->minor >= 6) {
1960  if (arg->max_readahead < se->conn.max_readahead)
1961  se->conn.max_readahead = arg->max_readahead;
1962  inargflags = arg->flags;
1963  if (inargflags & FUSE_INIT_EXT)
1964  inargflags = inargflags | (uint64_t) arg->flags2 << 32;
1965  if (inargflags & FUSE_ASYNC_READ)
1966  se->conn.capable |= FUSE_CAP_ASYNC_READ;
1967  if (inargflags & FUSE_POSIX_LOCKS)
1968  se->conn.capable |= FUSE_CAP_POSIX_LOCKS;
1969  if (inargflags & FUSE_ATOMIC_O_TRUNC)
1970  se->conn.capable |= FUSE_CAP_ATOMIC_O_TRUNC;
1971  if (inargflags & FUSE_EXPORT_SUPPORT)
1972  se->conn.capable |= FUSE_CAP_EXPORT_SUPPORT;
1973  if (inargflags & FUSE_DONT_MASK)
1974  se->conn.capable |= FUSE_CAP_DONT_MASK;
1975  if (inargflags & FUSE_FLOCK_LOCKS)
1976  se->conn.capable |= FUSE_CAP_FLOCK_LOCKS;
1977  if (inargflags & FUSE_AUTO_INVAL_DATA)
1978  se->conn.capable |= FUSE_CAP_AUTO_INVAL_DATA;
1979  if (inargflags & FUSE_DO_READDIRPLUS)
1980  se->conn.capable |= FUSE_CAP_READDIRPLUS;
1981  if (inargflags & FUSE_READDIRPLUS_AUTO)
1982  se->conn.capable |= FUSE_CAP_READDIRPLUS_AUTO;
1983  if (inargflags & FUSE_ASYNC_DIO)
1984  se->conn.capable |= FUSE_CAP_ASYNC_DIO;
1985  if (inargflags & FUSE_WRITEBACK_CACHE)
1986  se->conn.capable |= FUSE_CAP_WRITEBACK_CACHE;
1987  if (inargflags & FUSE_NO_OPEN_SUPPORT)
1988  se->conn.capable |= FUSE_CAP_NO_OPEN_SUPPORT;
1989  if (inargflags & FUSE_PARALLEL_DIROPS)
1990  se->conn.capable |= FUSE_CAP_PARALLEL_DIROPS;
1991  if (inargflags & FUSE_POSIX_ACL)
1992  se->conn.capable |= FUSE_CAP_POSIX_ACL;
1993  if (inargflags & FUSE_HANDLE_KILLPRIV)
1994  se->conn.capable |= FUSE_CAP_HANDLE_KILLPRIV;
1995  if (inargflags & FUSE_CACHE_SYMLINKS)
1996  se->conn.capable |= FUSE_CAP_CACHE_SYMLINKS;
1997  if (inargflags & FUSE_NO_OPENDIR_SUPPORT)
1998  se->conn.capable |= FUSE_CAP_NO_OPENDIR_SUPPORT;
1999  if (inargflags & FUSE_EXPLICIT_INVAL_DATA)
2000  se->conn.capable |= FUSE_CAP_EXPLICIT_INVAL_DATA;
2001  if (!(inargflags & FUSE_MAX_PAGES)) {
2002  size_t max_bufsize =
2003  FUSE_DEFAULT_MAX_PAGES_PER_REQ * getpagesize()
2004  + FUSE_BUFFER_HEADER_SIZE;
2005  if (bufsize > max_bufsize) {
2006  bufsize = max_bufsize;
2007  }
2008  }
2009  if (arg->minor >= 38)
2010  se->conn.capable |= FUSE_CAP_EXPIRE_ONLY;
2011  } else {
2012  se->conn.max_readahead = 0;
2013  }
2014 
2015  if (se->conn.proto_minor >= 14) {
2016 #ifdef HAVE_SPLICE
2017 #ifdef HAVE_VMSPLICE
2018  if ((se->io == NULL) || (se->io->splice_send != NULL)) {
2019  se->conn.capable |= FUSE_CAP_SPLICE_WRITE | FUSE_CAP_SPLICE_MOVE;
2020  }
2021 #endif
2022  if ((se->io == NULL) || (se->io->splice_receive != NULL)) {
2023  se->conn.capable |= FUSE_CAP_SPLICE_READ;
2024  }
2025 #endif
2026  }
2027  if (se->conn.proto_minor >= 18)
2028  se->conn.capable |= FUSE_CAP_IOCTL_DIR;
2029 
2030  /* Default settings for modern filesystems.
2031  *
2032  * Most of these capabilities were disabled by default in
2033  * libfuse2 for backwards compatibility reasons. In libfuse3,
2034  * we can finally enable them by default (as long as they're
2035  * supported by the kernel).
2036  */
2037 #define LL_SET_DEFAULT(cond, cap) \
2038  if ((cond) && (se->conn.capable & (cap))) \
2039  se->conn.want |= (cap)
2040  LL_SET_DEFAULT(1, FUSE_CAP_ASYNC_READ);
2041  LL_SET_DEFAULT(1, FUSE_CAP_PARALLEL_DIROPS);
2042  LL_SET_DEFAULT(1, FUSE_CAP_AUTO_INVAL_DATA);
2043  LL_SET_DEFAULT(1, FUSE_CAP_HANDLE_KILLPRIV);
2044  LL_SET_DEFAULT(1, FUSE_CAP_ASYNC_DIO);
2045  LL_SET_DEFAULT(1, FUSE_CAP_IOCTL_DIR);
2046  LL_SET_DEFAULT(1, FUSE_CAP_ATOMIC_O_TRUNC);
2047  LL_SET_DEFAULT(se->op.write_buf, FUSE_CAP_SPLICE_READ);
2048  LL_SET_DEFAULT(se->op.getlk && se->op.setlk,
2050  LL_SET_DEFAULT(se->op.flock, FUSE_CAP_FLOCK_LOCKS);
2051  LL_SET_DEFAULT(se->op.readdirplus, FUSE_CAP_READDIRPLUS);
2052  LL_SET_DEFAULT(se->op.readdirplus && se->op.readdir,
2054  se->conn.time_gran = 1;
2055 
2056  if (bufsize < FUSE_MIN_READ_BUFFER) {
2057  fuse_log(FUSE_LOG_ERR, "fuse: warning: buffer size too small: %zu\n",
2058  bufsize);
2059  bufsize = FUSE_MIN_READ_BUFFER;
2060  }
2061  se->bufsize = bufsize;
2062 
2063  if (se->conn.max_write > bufsize - FUSE_BUFFER_HEADER_SIZE)
2064  se->conn.max_write = bufsize - FUSE_BUFFER_HEADER_SIZE;
2065 
2066  se->got_init = 1;
2067  if (se->op.init)
2068  se->op.init(se->userdata, &se->conn);
2069 
2070  if (se->conn.want & (~se->conn.capable)) {
2071  fuse_log(FUSE_LOG_ERR, "fuse: error: filesystem requested capabilities "
2072  "0x%x that are not supported by kernel, aborting.\n",
2073  se->conn.want & (~se->conn.capable));
2074  fuse_reply_err(req, EPROTO);
2075  se->error = -EPROTO;
2076  fuse_session_exit(se);
2077  return;
2078  }
2079 
2080  unsigned max_read_mo = get_max_read(se->mo);
2081  if (se->conn.max_read != max_read_mo) {
2082  fuse_log(FUSE_LOG_ERR, "fuse: error: init() and fuse_session_new() "
2083  "requested different maximum read size (%u vs %u)\n",
2084  se->conn.max_read, max_read_mo);
2085  fuse_reply_err(req, EPROTO);
2086  se->error = -EPROTO;
2087  fuse_session_exit(se);
2088  return;
2089  }
2090 
2091  if (se->conn.max_write < bufsize - FUSE_BUFFER_HEADER_SIZE) {
2092  se->bufsize = se->conn.max_write + FUSE_BUFFER_HEADER_SIZE;
2093  }
2094  if (arg->flags & FUSE_MAX_PAGES) {
2095  outarg.flags |= FUSE_MAX_PAGES;
2096  outarg.max_pages = (se->conn.max_write - 1) / getpagesize() + 1;
2097  }
2098  outargflags = outarg.flags;
2099  /* Always enable big writes, this is superseded
2100  by the max_write option */
2101  outargflags |= FUSE_BIG_WRITES;
2102 
2103  if (se->conn.want & FUSE_CAP_ASYNC_READ)
2104  outargflags |= FUSE_ASYNC_READ;
2105  if (se->conn.want & FUSE_CAP_POSIX_LOCKS)
2106  outargflags |= FUSE_POSIX_LOCKS;
2107  if (se->conn.want & FUSE_CAP_ATOMIC_O_TRUNC)
2108  outargflags |= FUSE_ATOMIC_O_TRUNC;
2109  if (se->conn.want & FUSE_CAP_EXPORT_SUPPORT)
2110  outargflags |= FUSE_EXPORT_SUPPORT;
2111  if (se->conn.want & FUSE_CAP_DONT_MASK)
2112  outargflags |= FUSE_DONT_MASK;
2113  if (se->conn.want & FUSE_CAP_FLOCK_LOCKS)
2114  outargflags |= FUSE_FLOCK_LOCKS;
2115  if (se->conn.want & FUSE_CAP_AUTO_INVAL_DATA)
2116  outargflags |= FUSE_AUTO_INVAL_DATA;
2117  if (se->conn.want & FUSE_CAP_READDIRPLUS)
2118  outargflags |= FUSE_DO_READDIRPLUS;
2119  if (se->conn.want & FUSE_CAP_READDIRPLUS_AUTO)
2120  outargflags |= FUSE_READDIRPLUS_AUTO;
2121  if (se->conn.want & FUSE_CAP_ASYNC_DIO)
2122  outargflags |= FUSE_ASYNC_DIO;
2123  if (se->conn.want & FUSE_CAP_WRITEBACK_CACHE)
2124  outargflags |= FUSE_WRITEBACK_CACHE;
2125  if (se->conn.want & FUSE_CAP_POSIX_ACL)
2126  outargflags |= FUSE_POSIX_ACL;
2127  if (se->conn.want & FUSE_CAP_CACHE_SYMLINKS)
2128  outargflags |= FUSE_CACHE_SYMLINKS;
2129  if (se->conn.want & FUSE_CAP_EXPLICIT_INVAL_DATA)
2130  outargflags |= FUSE_EXPLICIT_INVAL_DATA;
2131 
2132  if (inargflags & FUSE_INIT_EXT) {
2133  outargflags |= FUSE_INIT_EXT;
2134  outarg.flags2 = outargflags >> 32;
2135  }
2136 
2137  outarg.flags = outargflags;
2138 
2139  outarg.max_readahead = se->conn.max_readahead;
2140  outarg.max_write = se->conn.max_write;
2141  if (se->conn.proto_minor >= 13) {
2142  if (se->conn.max_background >= (1 << 16))
2143  se->conn.max_background = (1 << 16) - 1;
2144  if (se->conn.congestion_threshold > se->conn.max_background)
2145  se->conn.congestion_threshold = se->conn.max_background;
2146  if (!se->conn.congestion_threshold) {
2147  se->conn.congestion_threshold =
2148  se->conn.max_background * 3 / 4;
2149  }
2150 
2151  outarg.max_background = se->conn.max_background;
2152  outarg.congestion_threshold = se->conn.congestion_threshold;
2153  }
2154  if (se->conn.proto_minor >= 23)
2155  outarg.time_gran = se->conn.time_gran;
2156 
2157  if (se->debug) {
2158  fuse_log(FUSE_LOG_DEBUG, " INIT: %u.%u\n", outarg.major, outarg.minor);
2159  fuse_log(FUSE_LOG_DEBUG, " flags=0x%08x\n", outarg.flags);
2160  fuse_log(FUSE_LOG_DEBUG, " max_readahead=0x%08x\n",
2161  outarg.max_readahead);
2162  fuse_log(FUSE_LOG_DEBUG, " max_write=0x%08x\n", outarg.max_write);
2163  fuse_log(FUSE_LOG_DEBUG, " max_background=%i\n",
2164  outarg.max_background);
2165  fuse_log(FUSE_LOG_DEBUG, " congestion_threshold=%i\n",
2166  outarg.congestion_threshold);
2167  fuse_log(FUSE_LOG_DEBUG, " time_gran=%u\n",
2168  outarg.time_gran);
2169  }
2170  if (arg->minor < 5)
2171  outargsize = FUSE_COMPAT_INIT_OUT_SIZE;
2172  else if (arg->minor < 23)
2173  outargsize = FUSE_COMPAT_22_INIT_OUT_SIZE;
2174 
2175  send_reply_ok(req, &outarg, outargsize);
2176 }
2177 
2178 static void do_destroy(fuse_req_t req, fuse_ino_t nodeid, const void *inarg)
2179 {
2180  struct fuse_session *se = req->se;
2181 
2182  (void) nodeid;
2183  (void) inarg;
2184 
2185  se->got_destroy = 1;
2186  if (se->op.destroy)
2187  se->op.destroy(se->userdata);
2188 
2189  send_reply_ok(req, NULL, 0);
2190 }
2191 
2192 static void list_del_nreq(struct fuse_notify_req *nreq)
2193 {
2194  struct fuse_notify_req *prev = nreq->prev;
2195  struct fuse_notify_req *next = nreq->next;
2196  prev->next = next;
2197  next->prev = prev;
2198 }
2199 
2200 static void list_add_nreq(struct fuse_notify_req *nreq,
2201  struct fuse_notify_req *next)
2202 {
2203  struct fuse_notify_req *prev = next->prev;
2204  nreq->next = next;
2205  nreq->prev = prev;
2206  prev->next = nreq;
2207  next->prev = nreq;
2208 }
2209 
2210 static void list_init_nreq(struct fuse_notify_req *nreq)
2211 {
2212  nreq->next = nreq;
2213  nreq->prev = nreq;
2214 }
2215 
2216 static void do_notify_reply(fuse_req_t req, fuse_ino_t nodeid,
2217  const void *inarg, const struct fuse_buf *buf)
2218 {
2219  struct fuse_session *se = req->se;
2220  struct fuse_notify_req *nreq;
2221  struct fuse_notify_req *head;
2222 
2223  pthread_mutex_lock(&se->lock);
2224  head = &se->notify_list;
2225  for (nreq = head->next; nreq != head; nreq = nreq->next) {
2226  if (nreq->unique == req->unique) {
2227  list_del_nreq(nreq);
2228  break;
2229  }
2230  }
2231  pthread_mutex_unlock(&se->lock);
2232 
2233  if (nreq != head)
2234  nreq->reply(nreq, req, nodeid, inarg, buf);
2235 }
2236 
2237 static int send_notify_iov(struct fuse_session *se, int notify_code,
2238  struct iovec *iov, int count)
2239 {
2240  struct fuse_out_header out;
2241 
2242  if (!se->got_init)
2243  return -ENOTCONN;
2244 
2245  out.unique = 0;
2246  out.error = notify_code;
2247  iov[0].iov_base = &out;
2248  iov[0].iov_len = sizeof(struct fuse_out_header);
2249 
2250  return fuse_send_msg(se, NULL, iov, count);
2251 }
2252 
2253 int fuse_lowlevel_notify_poll(struct fuse_pollhandle *ph)
2254 {
2255  if (ph != NULL) {
2256  struct fuse_notify_poll_wakeup_out outarg;
2257  struct iovec iov[2];
2258 
2259  outarg.kh = ph->kh;
2260 
2261  iov[1].iov_base = &outarg;
2262  iov[1].iov_len = sizeof(outarg);
2263 
2264  return send_notify_iov(ph->se, FUSE_NOTIFY_POLL, iov, 2);
2265  } else {
2266  return 0;
2267  }
2268 }
2269 
2270 int fuse_lowlevel_notify_inval_inode(struct fuse_session *se, fuse_ino_t ino,
2271  off_t off, off_t len)
2272 {
2273  struct fuse_notify_inval_inode_out outarg;
2274  struct iovec iov[2];
2275 
2276  if (!se)
2277  return -EINVAL;
2278 
2279  if (se->conn.proto_minor < 12)
2280  return -ENOSYS;
2281 
2282  outarg.ino = ino;
2283  outarg.off = off;
2284  outarg.len = len;
2285 
2286  iov[1].iov_base = &outarg;
2287  iov[1].iov_len = sizeof(outarg);
2288 
2289  return send_notify_iov(se, FUSE_NOTIFY_INVAL_INODE, iov, 2);
2290 }
2291 
2292 int fuse_lowlevel_notify_expire_entry(struct fuse_session *se, fuse_ino_t parent,
2293  const char *name, size_t namelen,
2294  enum fuse_expire_flags flags)
2295 {
2296  struct fuse_notify_inval_entry_out outarg;
2297  struct iovec iov[3];
2298 
2299  if (!se)
2300  return -EINVAL;
2301 
2302  if (se->conn.proto_minor < 12)
2303  return -ENOSYS;
2304 
2305  outarg.parent = parent;
2306  outarg.namelen = namelen;
2307  outarg.flags = 0;
2308  if (flags & FUSE_LL_EXPIRE_ONLY)
2309  outarg.flags |= FUSE_EXPIRE_ONLY;
2310 
2311  iov[1].iov_base = &outarg;
2312  iov[1].iov_len = sizeof(outarg);
2313  iov[2].iov_base = (void *)name;
2314  iov[2].iov_len = namelen + 1;
2315 
2316  return send_notify_iov(se, FUSE_NOTIFY_INVAL_ENTRY, iov, 3);
2317 }
2318 
2319 int fuse_lowlevel_notify_inval_entry(struct fuse_session *se, fuse_ino_t parent,
2320  const char *name, size_t namelen)
2321 {
2322  return fuse_lowlevel_notify_expire_entry(se, parent, name, namelen, 0);
2323 }
2324 
2325 
2326 int fuse_lowlevel_notify_delete(struct fuse_session *se,
2327  fuse_ino_t parent, fuse_ino_t child,
2328  const char *name, size_t namelen)
2329 {
2330  struct fuse_notify_delete_out outarg;
2331  struct iovec iov[3];
2332 
2333  if (!se)
2334  return -EINVAL;
2335 
2336  if (se->conn.proto_minor < 18)
2337  return -ENOSYS;
2338 
2339  outarg.parent = parent;
2340  outarg.child = child;
2341  outarg.namelen = namelen;
2342  outarg.padding = 0;
2343 
2344  iov[1].iov_base = &outarg;
2345  iov[1].iov_len = sizeof(outarg);
2346  iov[2].iov_base = (void *)name;
2347  iov[2].iov_len = namelen + 1;
2348 
2349  return send_notify_iov(se, FUSE_NOTIFY_DELETE, iov, 3);
2350 }
2351 
2352 int fuse_lowlevel_notify_store(struct fuse_session *se, fuse_ino_t ino,
2353  off_t offset, struct fuse_bufvec *bufv,
2354  enum fuse_buf_copy_flags flags)
2355 {
2356  struct fuse_out_header out;
2357  struct fuse_notify_store_out outarg;
2358  struct iovec iov[3];
2359  size_t size = fuse_buf_size(bufv);
2360  int res;
2361 
2362  if (!se)
2363  return -EINVAL;
2364 
2365  if (se->conn.proto_minor < 15)
2366  return -ENOSYS;
2367 
2368  out.unique = 0;
2369  out.error = FUSE_NOTIFY_STORE;
2370 
2371  outarg.nodeid = ino;
2372  outarg.offset = offset;
2373  outarg.size = size;
2374  outarg.padding = 0;
2375 
2376  iov[0].iov_base = &out;
2377  iov[0].iov_len = sizeof(out);
2378  iov[1].iov_base = &outarg;
2379  iov[1].iov_len = sizeof(outarg);
2380 
2381  res = fuse_send_data_iov(se, NULL, iov, 2, bufv, flags);
2382  if (res > 0)
2383  res = -res;
2384 
2385  return res;
2386 }
2387 
2388 struct fuse_retrieve_req {
2389  struct fuse_notify_req nreq;
2390  void *cookie;
2391 };
2392 
2393 static void fuse_ll_retrieve_reply(struct fuse_notify_req *nreq,
2394  fuse_req_t req, fuse_ino_t ino,
2395  const void *inarg,
2396  const struct fuse_buf *ibuf)
2397 {
2398  struct fuse_session *se = req->se;
2399  struct fuse_retrieve_req *rreq =
2400  container_of(nreq, struct fuse_retrieve_req, nreq);
2401  const struct fuse_notify_retrieve_in *arg = inarg;
2402  struct fuse_bufvec bufv = {
2403  .buf[0] = *ibuf,
2404  .count = 1,
2405  };
2406 
2407  if (!(bufv.buf[0].flags & FUSE_BUF_IS_FD))
2408  bufv.buf[0].mem = PARAM(arg);
2409 
2410  bufv.buf[0].size -= sizeof(struct fuse_in_header) +
2411  sizeof(struct fuse_notify_retrieve_in);
2412 
2413  if (bufv.buf[0].size < arg->size) {
2414  fuse_log(FUSE_LOG_ERR, "fuse: retrieve reply: buffer size too small\n");
2415  fuse_reply_none(req);
2416  goto out;
2417  }
2418  bufv.buf[0].size = arg->size;
2419 
2420  if (se->op.retrieve_reply) {
2421  se->op.retrieve_reply(req, rreq->cookie, ino,
2422  arg->offset, &bufv);
2423  } else {
2424  fuse_reply_none(req);
2425  }
2426 out:
2427  free(rreq);
2428  if ((ibuf->flags & FUSE_BUF_IS_FD) && bufv.idx < bufv.count)
2429  fuse_ll_clear_pipe(se);
2430 }
2431 
2432 int fuse_lowlevel_notify_retrieve(struct fuse_session *se, fuse_ino_t ino,
2433  size_t size, off_t offset, void *cookie)
2434 {
2435  struct fuse_notify_retrieve_out outarg;
2436  struct iovec iov[2];
2437  struct fuse_retrieve_req *rreq;
2438  int err;
2439 
2440  if (!se)
2441  return -EINVAL;
2442 
2443  if (se->conn.proto_minor < 15)
2444  return -ENOSYS;
2445 
2446  rreq = malloc(sizeof(*rreq));
2447  if (rreq == NULL)
2448  return -ENOMEM;
2449 
2450  pthread_mutex_lock(&se->lock);
2451  rreq->cookie = cookie;
2452  rreq->nreq.unique = se->notify_ctr++;
2453  rreq->nreq.reply = fuse_ll_retrieve_reply;
2454  list_add_nreq(&rreq->nreq, &se->notify_list);
2455  pthread_mutex_unlock(&se->lock);
2456 
2457  outarg.notify_unique = rreq->nreq.unique;
2458  outarg.nodeid = ino;
2459  outarg.offset = offset;
2460  outarg.size = size;
2461  outarg.padding = 0;
2462 
2463  iov[1].iov_base = &outarg;
2464  iov[1].iov_len = sizeof(outarg);
2465 
2466  err = send_notify_iov(se, FUSE_NOTIFY_RETRIEVE, iov, 2);
2467  if (err) {
2468  pthread_mutex_lock(&se->lock);
2469  list_del_nreq(&rreq->nreq);
2470  pthread_mutex_unlock(&se->lock);
2471  free(rreq);
2472  }
2473 
2474  return err;
2475 }
2476 
2477 void *fuse_req_userdata(fuse_req_t req)
2478 {
2479  return req->se->userdata;
2480 }
2481 
2482 const struct fuse_ctx *fuse_req_ctx(fuse_req_t req)
2483 {
2484  return &req->ctx;
2485 }
2486 
2488  void *data)
2489 {
2490  pthread_mutex_lock(&req->lock);
2491  pthread_mutex_lock(&req->se->lock);
2492  req->u.ni.func = func;
2493  req->u.ni.data = data;
2494  pthread_mutex_unlock(&req->se->lock);
2495  if (req->interrupted && func)
2496  func(req, data);
2497  pthread_mutex_unlock(&req->lock);
2498 }
2499 
2501 {
2502  int interrupted;
2503 
2504  pthread_mutex_lock(&req->se->lock);
2505  interrupted = req->interrupted;
2506  pthread_mutex_unlock(&req->se->lock);
2507 
2508  return interrupted;
2509 }
2510 
2511 static struct {
2512  void (*func)(fuse_req_t, fuse_ino_t, const void *);
2513  const char *name;
2514 } fuse_ll_ops[] = {
2515  [FUSE_LOOKUP] = { do_lookup, "LOOKUP" },
2516  [FUSE_FORGET] = { do_forget, "FORGET" },
2517  [FUSE_GETATTR] = { do_getattr, "GETATTR" },
2518  [FUSE_SETATTR] = { do_setattr, "SETATTR" },
2519  [FUSE_READLINK] = { do_readlink, "READLINK" },
2520  [FUSE_SYMLINK] = { do_symlink, "SYMLINK" },
2521  [FUSE_MKNOD] = { do_mknod, "MKNOD" },
2522  [FUSE_MKDIR] = { do_mkdir, "MKDIR" },
2523  [FUSE_UNLINK] = { do_unlink, "UNLINK" },
2524  [FUSE_RMDIR] = { do_rmdir, "RMDIR" },
2525  [FUSE_RENAME] = { do_rename, "RENAME" },
2526  [FUSE_LINK] = { do_link, "LINK" },
2527  [FUSE_OPEN] = { do_open, "OPEN" },
2528  [FUSE_READ] = { do_read, "READ" },
2529  [FUSE_WRITE] = { do_write, "WRITE" },
2530  [FUSE_STATFS] = { do_statfs, "STATFS" },
2531  [FUSE_RELEASE] = { do_release, "RELEASE" },
2532  [FUSE_FSYNC] = { do_fsync, "FSYNC" },
2533  [FUSE_SETXATTR] = { do_setxattr, "SETXATTR" },
2534  [FUSE_GETXATTR] = { do_getxattr, "GETXATTR" },
2535  [FUSE_LISTXATTR] = { do_listxattr, "LISTXATTR" },
2536  [FUSE_REMOVEXATTR] = { do_removexattr, "REMOVEXATTR" },
2537  [FUSE_FLUSH] = { do_flush, "FLUSH" },
2538  [FUSE_INIT] = { do_init, "INIT" },
2539  [FUSE_OPENDIR] = { do_opendir, "OPENDIR" },
2540  [FUSE_READDIR] = { do_readdir, "READDIR" },
2541  [FUSE_RELEASEDIR] = { do_releasedir, "RELEASEDIR" },
2542  [FUSE_FSYNCDIR] = { do_fsyncdir, "FSYNCDIR" },
2543  [FUSE_GETLK] = { do_getlk, "GETLK" },
2544  [FUSE_SETLK] = { do_setlk, "SETLK" },
2545  [FUSE_SETLKW] = { do_setlkw, "SETLKW" },
2546  [FUSE_ACCESS] = { do_access, "ACCESS" },
2547  [FUSE_CREATE] = { do_create, "CREATE" },
2548  [FUSE_INTERRUPT] = { do_interrupt, "INTERRUPT" },
2549  [FUSE_BMAP] = { do_bmap, "BMAP" },
2550  [FUSE_IOCTL] = { do_ioctl, "IOCTL" },
2551  [FUSE_POLL] = { do_poll, "POLL" },
2552  [FUSE_FALLOCATE] = { do_fallocate, "FALLOCATE" },
2553  [FUSE_DESTROY] = { do_destroy, "DESTROY" },
2554  [FUSE_NOTIFY_REPLY] = { (void *) 1, "NOTIFY_REPLY" },
2555  [FUSE_BATCH_FORGET] = { do_batch_forget, "BATCH_FORGET" },
2556  [FUSE_READDIRPLUS] = { do_readdirplus, "READDIRPLUS"},
2557  [FUSE_RENAME2] = { do_rename2, "RENAME2" },
2558  [FUSE_COPY_FILE_RANGE] = { do_copy_file_range, "COPY_FILE_RANGE" },
2559  [FUSE_LSEEK] = { do_lseek, "LSEEK" },
2560  [CUSE_INIT] = { cuse_lowlevel_init, "CUSE_INIT" },
2561 };
2562 
2563 #define FUSE_MAXOP (sizeof(fuse_ll_ops) / sizeof(fuse_ll_ops[0]))
2564 
2565 static const char *opname(enum fuse_opcode opcode)
2566 {
2567  if (opcode >= FUSE_MAXOP || !fuse_ll_ops[opcode].name)
2568  return "???";
2569  else
2570  return fuse_ll_ops[opcode].name;
2571 }
2572 
2573 static int fuse_ll_copy_from_pipe(struct fuse_bufvec *dst,
2574  struct fuse_bufvec *src)
2575 {
2576  ssize_t res = fuse_buf_copy(dst, src, 0);
2577  if (res < 0) {
2578  fuse_log(FUSE_LOG_ERR, "fuse: copy from pipe: %s\n", strerror(-res));
2579  return res;
2580  }
2581  if ((size_t)res < fuse_buf_size(dst)) {
2582  fuse_log(FUSE_LOG_ERR, "fuse: copy from pipe: short read\n");
2583  return -1;
2584  }
2585  return 0;
2586 }
2587 
2588 void fuse_session_process_buf(struct fuse_session *se,
2589  const struct fuse_buf *buf)
2590 {
2591  fuse_session_process_buf_int(se, buf, NULL);
2592 }
2593 
2594 void fuse_session_process_buf_int(struct fuse_session *se,
2595  const struct fuse_buf *buf, struct fuse_chan *ch)
2596 {
2597  const size_t write_header_size = sizeof(struct fuse_in_header) +
2598  sizeof(struct fuse_write_in);
2599  struct fuse_bufvec bufv = { .buf[0] = *buf, .count = 1 };
2600  struct fuse_bufvec tmpbuf = FUSE_BUFVEC_INIT(write_header_size);
2601  struct fuse_in_header *in;
2602  const void *inarg;
2603  struct fuse_req *req;
2604  void *mbuf = NULL;
2605  int err;
2606  int res;
2607 
2608  if (buf->flags & FUSE_BUF_IS_FD) {
2609  if (buf->size < tmpbuf.buf[0].size)
2610  tmpbuf.buf[0].size = buf->size;
2611 
2612  mbuf = malloc(tmpbuf.buf[0].size);
2613  if (mbuf == NULL) {
2614  fuse_log(FUSE_LOG_ERR, "fuse: failed to allocate header\n");
2615  goto clear_pipe;
2616  }
2617  tmpbuf.buf[0].mem = mbuf;
2618 
2619  res = fuse_ll_copy_from_pipe(&tmpbuf, &bufv);
2620  if (res < 0)
2621  goto clear_pipe;
2622 
2623  in = mbuf;
2624  } else {
2625  in = buf->mem;
2626  }
2627 
2628  if (se->debug) {
2629  fuse_log(FUSE_LOG_DEBUG,
2630  "unique: %llu, opcode: %s (%i), nodeid: %llu, insize: %zu, pid: %u\n",
2631  (unsigned long long) in->unique,
2632  opname((enum fuse_opcode) in->opcode), in->opcode,
2633  (unsigned long long) in->nodeid, buf->size, in->pid);
2634  }
2635 
2636  req = fuse_ll_alloc_req(se);
2637  if (req == NULL) {
2638  struct fuse_out_header out = {
2639  .unique = in->unique,
2640  .error = -ENOMEM,
2641  };
2642  struct iovec iov = {
2643  .iov_base = &out,
2644  .iov_len = sizeof(struct fuse_out_header),
2645  };
2646 
2647  fuse_send_msg(se, ch, &iov, 1);
2648  goto clear_pipe;
2649  }
2650 
2651  req->unique = in->unique;
2652  req->ctx.uid = in->uid;
2653  req->ctx.gid = in->gid;
2654  req->ctx.pid = in->pid;
2655  req->ch = ch ? fuse_chan_get(ch) : NULL;
2656 
2657  err = EIO;
2658  if (!se->got_init) {
2659  enum fuse_opcode expected;
2660 
2661  expected = se->cuse_data ? CUSE_INIT : FUSE_INIT;
2662  if (in->opcode != expected)
2663  goto reply_err;
2664  } else if (in->opcode == FUSE_INIT || in->opcode == CUSE_INIT)
2665  goto reply_err;
2666 
2667  err = EACCES;
2668  /* Implement -o allow_root */
2669  if (se->deny_others && in->uid != se->owner && in->uid != 0 &&
2670  in->opcode != FUSE_INIT && in->opcode != FUSE_READ &&
2671  in->opcode != FUSE_WRITE && in->opcode != FUSE_FSYNC &&
2672  in->opcode != FUSE_RELEASE && in->opcode != FUSE_READDIR &&
2673  in->opcode != FUSE_FSYNCDIR && in->opcode != FUSE_RELEASEDIR &&
2674  in->opcode != FUSE_NOTIFY_REPLY &&
2675  in->opcode != FUSE_READDIRPLUS)
2676  goto reply_err;
2677 
2678  err = ENOSYS;
2679  if (in->opcode >= FUSE_MAXOP || !fuse_ll_ops[in->opcode].func)
2680  goto reply_err;
2681  if (in->opcode != FUSE_INTERRUPT) {
2682  struct fuse_req *intr;
2683  pthread_mutex_lock(&se->lock);
2684  intr = check_interrupt(se, req);
2685  list_add_req(req, &se->list);
2686  pthread_mutex_unlock(&se->lock);
2687  if (intr)
2688  fuse_reply_err(intr, EAGAIN);
2689  }
2690 
2691  if ((buf->flags & FUSE_BUF_IS_FD) && write_header_size < buf->size &&
2692  (in->opcode != FUSE_WRITE || !se->op.write_buf) &&
2693  in->opcode != FUSE_NOTIFY_REPLY) {
2694  void *newmbuf;
2695 
2696  err = ENOMEM;
2697  newmbuf = realloc(mbuf, buf->size);
2698  if (newmbuf == NULL)
2699  goto reply_err;
2700  mbuf = newmbuf;
2701 
2702  tmpbuf = FUSE_BUFVEC_INIT(buf->size - write_header_size);
2703  tmpbuf.buf[0].mem = (char *)mbuf + write_header_size;
2704 
2705  res = fuse_ll_copy_from_pipe(&tmpbuf, &bufv);
2706  err = -res;
2707  if (res < 0)
2708  goto reply_err;
2709 
2710  in = mbuf;
2711  }
2712 
2713  inarg = (void *) &in[1];
2714  if (in->opcode == FUSE_WRITE && se->op.write_buf)
2715  do_write_buf(req, in->nodeid, inarg, buf);
2716  else if (in->opcode == FUSE_NOTIFY_REPLY)
2717  do_notify_reply(req, in->nodeid, inarg, buf);
2718  else
2719  fuse_ll_ops[in->opcode].func(req, in->nodeid, inarg);
2720 
2721 out_free:
2722  free(mbuf);
2723  return;
2724 
2725 reply_err:
2726  fuse_reply_err(req, err);
2727 clear_pipe:
2728  if (buf->flags & FUSE_BUF_IS_FD)
2729  fuse_ll_clear_pipe(se);
2730  goto out_free;
2731 }
2732 
2733 #define LL_OPTION(n,o,v) \
2734  { n, offsetof(struct fuse_session, o), v }
2735 
2736 static const struct fuse_opt fuse_ll_opts[] = {
2737  LL_OPTION("debug", debug, 1),
2738  LL_OPTION("-d", debug, 1),
2739  LL_OPTION("--debug", debug, 1),
2740  LL_OPTION("allow_root", deny_others, 1),
2741  FUSE_OPT_END
2742 };
2743 
2744 void fuse_lowlevel_version(void)
2745 {
2746  printf("using FUSE kernel interface version %i.%i\n",
2747  FUSE_KERNEL_VERSION, FUSE_KERNEL_MINOR_VERSION);
2748  fuse_mount_version();
2749 }
2750 
2751 void fuse_lowlevel_help(void)
2752 {
2753  /* These are not all options, but the ones that are
2754  potentially of interest to an end-user */
2755  printf(
2756 " -o allow_other allow access by all users\n"
2757 " -o allow_root allow access by root\n"
2758 " -o auto_unmount auto unmount on process termination\n");
2759 }
2760 
2761 void fuse_session_destroy(struct fuse_session *se)
2762 {
2763  struct fuse_ll_pipe *llp;
2764 
2765  if (se->got_init && !se->got_destroy) {
2766  if (se->op.destroy)
2767  se->op.destroy(se->userdata);
2768  }
2769  llp = pthread_getspecific(se->pipe_key);
2770  if (llp != NULL)
2771  fuse_ll_pipe_free(llp);
2772  pthread_key_delete(se->pipe_key);
2773  pthread_mutex_destroy(&se->lock);
2774  free(se->cuse_data);
2775  if (se->fd != -1)
2776  close(se->fd);
2777  if (se->io != NULL)
2778  free(se->io);
2779  destroy_mount_opts(se->mo);
2780  free(se);
2781 }
2782 
2783 
2784 static void fuse_ll_pipe_destructor(void *data)
2785 {
2786  struct fuse_ll_pipe *llp = data;
2787  fuse_ll_pipe_free(llp);
2788 }
2789 
2790 int fuse_session_receive_buf(struct fuse_session *se, struct fuse_buf *buf)
2791 {
2792  return fuse_session_receive_buf_int(se, buf, NULL);
2793 }
2794 
2795 int fuse_session_receive_buf_int(struct fuse_session *se, struct fuse_buf *buf,
2796  struct fuse_chan *ch)
2797 {
2798  int err;
2799  ssize_t res;
2800 #ifdef HAVE_SPLICE
2801  size_t bufsize = se->bufsize;
2802  struct fuse_ll_pipe *llp;
2803  struct fuse_buf tmpbuf;
2804 
2805  if (se->conn.proto_minor < 14 || !(se->conn.want & FUSE_CAP_SPLICE_READ))
2806  goto fallback;
2807 
2808  llp = fuse_ll_get_pipe(se);
2809  if (llp == NULL)
2810  goto fallback;
2811 
2812  if (llp->size < bufsize) {
2813  if (llp->can_grow) {
2814  res = fcntl(llp->pipe[0], F_SETPIPE_SZ, bufsize);
2815  if (res == -1) {
2816  llp->can_grow = 0;
2817  res = grow_pipe_to_max(llp->pipe[0]);
2818  if (res > 0)
2819  llp->size = res;
2820  goto fallback;
2821  }
2822  llp->size = res;
2823  }
2824  if (llp->size < bufsize)
2825  goto fallback;
2826  }
2827 
2828  if (se->io != NULL && se->io->splice_receive != NULL) {
2829  res = se->io->splice_receive(ch ? ch->fd : se->fd, NULL,
2830  llp->pipe[1], NULL, bufsize, 0,
2831  se->userdata);
2832  } else {
2833  res = splice(ch ? ch->fd : se->fd, NULL, llp->pipe[1], NULL,
2834  bufsize, 0);
2835  }
2836  err = errno;
2837 
2838  if (fuse_session_exited(se))
2839  return 0;
2840 
2841  if (res == -1) {
2842  if (err == ENODEV) {
2843  /* Filesystem was unmounted, or connection was aborted
2844  via /sys/fs/fuse/connections */
2845  fuse_session_exit(se);
2846  return 0;
2847  }
2848  if (err != EINTR && err != EAGAIN)
2849  perror("fuse: splice from device");
2850  return -err;
2851  }
2852 
2853  if (res < sizeof(struct fuse_in_header)) {
2854  fuse_log(FUSE_LOG_ERR, "short splice from fuse device\n");
2855  return -EIO;
2856  }
2857 
2858  tmpbuf = (struct fuse_buf) {
2859  .size = res,
2860  .flags = FUSE_BUF_IS_FD,
2861  .fd = llp->pipe[0],
2862  };
2863 
2864  /*
2865  * Don't bother with zero copy for small requests.
2866  * fuse_loop_mt() needs to check for FORGET so this more than
2867  * just an optimization.
2868  */
2869  if (res < sizeof(struct fuse_in_header) +
2870  sizeof(struct fuse_write_in) + pagesize) {
2871  struct fuse_bufvec src = { .buf[0] = tmpbuf, .count = 1 };
2872  struct fuse_bufvec dst = { .count = 1 };
2873 
2874  if (!buf->mem) {
2875  buf->mem = malloc(se->bufsize);
2876  if (!buf->mem) {
2877  fuse_log(FUSE_LOG_ERR,
2878  "fuse: failed to allocate read buffer\n");
2879  return -ENOMEM;
2880  }
2881  }
2882  buf->size = se->bufsize;
2883  buf->flags = 0;
2884  dst.buf[0] = *buf;
2885 
2886  res = fuse_buf_copy(&dst, &src, 0);
2887  if (res < 0) {
2888  fuse_log(FUSE_LOG_ERR, "fuse: copy from pipe: %s\n",
2889  strerror(-res));
2890  fuse_ll_clear_pipe(se);
2891  return res;
2892  }
2893  if (res < tmpbuf.size) {
2894  fuse_log(FUSE_LOG_ERR, "fuse: copy from pipe: short read\n");
2895  fuse_ll_clear_pipe(se);
2896  return -EIO;
2897  }
2898  assert(res == tmpbuf.size);
2899 
2900  } else {
2901  /* Don't overwrite buf->mem, as that would cause a leak */
2902  buf->fd = tmpbuf.fd;
2903  buf->flags = tmpbuf.flags;
2904  }
2905  buf->size = tmpbuf.size;
2906 
2907  return res;
2908 
2909 fallback:
2910 #endif
2911  if (!buf->mem) {
2912  buf->mem = malloc(se->bufsize);
2913  if (!buf->mem) {
2914  fuse_log(FUSE_LOG_ERR,
2915  "fuse: failed to allocate read buffer\n");
2916  return -ENOMEM;
2917  }
2918  }
2919 
2920 restart:
2921  if (se->io != NULL) {
2922  /* se->io->read is never NULL if se->io is not NULL as
2923  specified by fuse_session_custom_io()*/
2924  res = se->io->read(ch ? ch->fd : se->fd, buf->mem, se->bufsize,
2925  se->userdata);
2926  } else {
2927  res = read(ch ? ch->fd : se->fd, buf->mem, se->bufsize);
2928  }
2929  err = errno;
2930 
2931  if (fuse_session_exited(se))
2932  return 0;
2933  if (res == -1) {
2934  /* ENOENT means the operation was interrupted, it's safe
2935  to restart */
2936  if (err == ENOENT)
2937  goto restart;
2938 
2939  if (err == ENODEV) {
2940  /* Filesystem was unmounted, or connection was aborted
2941  via /sys/fs/fuse/connections */
2942  fuse_session_exit(se);
2943  return 0;
2944  }
2945  /* Errors occurring during normal operation: EINTR (read
2946  interrupted), EAGAIN (nonblocking I/O), ENODEV (filesystem
2947  umounted) */
2948  if (err != EINTR && err != EAGAIN)
2949  perror("fuse: reading device");
2950  return -err;
2951  }
2952  if ((size_t) res < sizeof(struct fuse_in_header)) {
2953  fuse_log(FUSE_LOG_ERR, "short read on fuse device\n");
2954  return -EIO;
2955  }
2956 
2957  buf->size = res;
2958 
2959  return res;
2960 }
2961 
2962 struct fuse_session *fuse_session_new(struct fuse_args *args,
2963  const struct fuse_lowlevel_ops *op,
2964  size_t op_size, void *userdata)
2965 {
2966  int err;
2967  struct fuse_session *se;
2968  struct mount_opts *mo;
2969 
2970  if (sizeof(struct fuse_lowlevel_ops) < op_size) {
2971  fuse_log(FUSE_LOG_ERR, "fuse: warning: library too old, some operations may not work\n");
2972  op_size = sizeof(struct fuse_lowlevel_ops);
2973  }
2974 
2975  if (args->argc == 0) {
2976  fuse_log(FUSE_LOG_ERR, "fuse: empty argv passed to fuse_session_new().\n");
2977  return NULL;
2978  }
2979 
2980  se = (struct fuse_session *) calloc(1, sizeof(struct fuse_session));
2981  if (se == NULL) {
2982  fuse_log(FUSE_LOG_ERR, "fuse: failed to allocate fuse object\n");
2983  goto out1;
2984  }
2985  se->fd = -1;
2986  se->conn.max_write = UINT_MAX;
2987  se->conn.max_readahead = UINT_MAX;
2988 
2989  /* Parse options */
2990  if(fuse_opt_parse(args, se, fuse_ll_opts, NULL) == -1)
2991  goto out2;
2992  if(se->deny_others) {
2993  /* Allowing access only by root is done by instructing
2994  * kernel to allow access by everyone, and then restricting
2995  * access to root and mountpoint owner in libfuse.
2996  */
2997  // We may be adding the option a second time, but
2998  // that doesn't hurt.
2999  if(fuse_opt_add_arg(args, "-oallow_other") == -1)
3000  goto out2;
3001  }
3002  mo = parse_mount_opts(args);
3003  if (mo == NULL)
3004  goto out3;
3005 
3006  if(args->argc == 1 &&
3007  args->argv[0][0] == '-') {
3008  fuse_log(FUSE_LOG_ERR, "fuse: warning: argv[0] looks like an option, but "
3009  "will be ignored\n");
3010  } else if (args->argc != 1) {
3011  int i;
3012  fuse_log(FUSE_LOG_ERR, "fuse: unknown option(s): `");
3013  for(i = 1; i < args->argc-1; i++)
3014  fuse_log(FUSE_LOG_ERR, "%s ", args->argv[i]);
3015  fuse_log(FUSE_LOG_ERR, "%s'\n", args->argv[i]);
3016  goto out4;
3017  }
3018 
3019  if (se->debug)
3020  fuse_log(FUSE_LOG_DEBUG, "FUSE library version: %s\n", PACKAGE_VERSION);
3021 
3022  se->bufsize = FUSE_MAX_MAX_PAGES * getpagesize() +
3023  FUSE_BUFFER_HEADER_SIZE;
3024 
3025  list_init_req(&se->list);
3026  list_init_req(&se->interrupts);
3027  list_init_nreq(&se->notify_list);
3028  se->notify_ctr = 1;
3029  pthread_mutex_init(&se->lock, NULL);
3030 
3031  err = pthread_key_create(&se->pipe_key, fuse_ll_pipe_destructor);
3032  if (err) {
3033  fuse_log(FUSE_LOG_ERR, "fuse: failed to create thread specific key: %s\n",
3034  strerror(err));
3035  goto out5;
3036  }
3037 
3038  memcpy(&se->op, op, op_size);
3039  se->owner = getuid();
3040  se->userdata = userdata;
3041 
3042  se->mo = mo;
3043  return se;
3044 
3045 out5:
3046  pthread_mutex_destroy(&se->lock);
3047 out4:
3048  fuse_opt_free_args(args);
3049 out3:
3050  if (mo != NULL)
3051  destroy_mount_opts(mo);
3052 out2:
3053  free(se);
3054 out1:
3055  return NULL;
3056 }
3057 
3058 int fuse_session_custom_io(struct fuse_session *se, const struct fuse_custom_io *io,
3059  int fd)
3060 {
3061  if (fd < 0) {
3062  fuse_log(FUSE_LOG_ERR, "Invalid file descriptor value %d passed to "
3063  "fuse_session_custom_io()\n", fd);
3064  return -EBADF;
3065  }
3066  if (io == NULL) {
3067  fuse_log(FUSE_LOG_ERR, "No custom IO passed to "
3068  "fuse_session_custom_io()\n");
3069  return -EINVAL;
3070  } else if (io->read == NULL || io->writev == NULL) {
3071  /* If the user provides their own file descriptor, we can't
3072  guarantee that the default behavior of the io operations made
3073  in libfuse will function properly. Therefore, we enforce the
3074  user to implement these io operations when using custom io. */
3075  fuse_log(FUSE_LOG_ERR, "io passed to fuse_session_custom_io() must "
3076  "implement both io->read() and io->writev\n");
3077  return -EINVAL;
3078  }
3079 
3080  se->io = malloc(sizeof(struct fuse_custom_io));
3081  if (se->io == NULL) {
3082  fuse_log(FUSE_LOG_ERR, "Failed to allocate memory for custom io. "
3083  "Error: %s\n", strerror(errno));
3084  return -errno;
3085  }
3086 
3087  se->fd = fd;
3088  *se->io = *io;
3089  return 0;
3090 }
3091 
3092 int fuse_session_mount(struct fuse_session *se, const char *mountpoint)
3093 {
3094  int fd;
3095 
3096  /*
3097  * Make sure file descriptors 0, 1 and 2 are open, otherwise chaos
3098  * would ensue.
3099  */
3100  do {
3101  fd = open("/dev/null", O_RDWR);
3102  if (fd > 2)
3103  close(fd);
3104  } while (fd >= 0 && fd <= 2);
3105 
3106  /*
3107  * To allow FUSE daemons to run without privileges, the caller may open
3108  * /dev/fuse before launching the file system and pass on the file
3109  * descriptor by specifying /dev/fd/N as the mount point. Note that the
3110  * parent process takes care of performing the mount in this case.
3111  */
3112  fd = fuse_mnt_parse_fuse_fd(mountpoint);
3113  if (fd != -1) {
3114  if (fcntl(fd, F_GETFD) == -1) {
3115  fuse_log(FUSE_LOG_ERR,
3116  "fuse: Invalid file descriptor /dev/fd/%u\n",
3117  fd);
3118  return -1;
3119  }
3120  se->fd = fd;
3121  return 0;
3122  }
3123 
3124  /* Open channel */
3125  fd = fuse_kern_mount(mountpoint, se->mo);
3126  if (fd == -1)
3127  return -1;
3128  se->fd = fd;
3129 
3130  /* Save mountpoint */
3131  se->mountpoint = strdup(mountpoint);
3132  if (se->mountpoint == NULL)
3133  goto error_out;
3134 
3135  return 0;
3136 
3137 error_out:
3138  fuse_kern_unmount(mountpoint, fd);
3139  return -1;
3140 }
3141 
3142 int fuse_session_fd(struct fuse_session *se)
3143 {
3144  return se->fd;
3145 }
3146 
3147 void fuse_session_unmount(struct fuse_session *se)
3148 {
3149  if (se->mountpoint != NULL) {
3150  fuse_kern_unmount(se->mountpoint, se->fd);
3151  se->fd = -1;
3152  free(se->mountpoint);
3153  se->mountpoint = NULL;
3154  }
3155 }
3156 
3157 #ifdef linux
3158 int fuse_req_getgroups(fuse_req_t req, int size, gid_t list[])
3159 {
3160  char *buf;
3161  size_t bufsize = 1024;
3162  char path[128];
3163  int ret;
3164  int fd;
3165  unsigned long pid = req->ctx.pid;
3166  char *s;
3167 
3168  sprintf(path, "/proc/%lu/task/%lu/status", pid, pid);
3169 
3170 retry:
3171  buf = malloc(bufsize);
3172  if (buf == NULL)
3173  return -ENOMEM;
3174 
3175  ret = -EIO;
3176  fd = open(path, O_RDONLY);
3177  if (fd == -1)
3178  goto out_free;
3179 
3180  ret = read(fd, buf, bufsize);
3181  close(fd);
3182  if (ret < 0) {
3183  ret = -EIO;
3184  goto out_free;
3185  }
3186 
3187  if ((size_t)ret == bufsize) {
3188  free(buf);
3189  bufsize *= 4;
3190  goto retry;
3191  }
3192 
3193  ret = -EIO;
3194  s = strstr(buf, "\nGroups:");
3195  if (s == NULL)
3196  goto out_free;
3197 
3198  s += 8;
3199  ret = 0;
3200  while (1) {
3201  char *end;
3202  unsigned long val = strtoul(s, &end, 0);
3203  if (end == s)
3204  break;
3205 
3206  s = end;
3207  if (ret < size)
3208  list[ret] = val;
3209  ret++;
3210  }
3211 
3212 out_free:
3213  free(buf);
3214  return ret;
3215 }
3216 #else /* linux */
3217 /*
3218  * This is currently not implemented on other than Linux...
3219  */
3220 int fuse_req_getgroups(fuse_req_t req, int size, gid_t list[])
3221 {
3222  (void) req; (void) size; (void) list;
3223  return -ENOSYS;
3224 }
3225 #endif
3226 
3227 /* Prevent spurious data race warning - we don't care
3228  * about races for this flag */
3229 __attribute__((no_sanitize_thread))
3230 void fuse_session_exit(struct fuse_session *se)
3231 {
3232  se->exited = 1;
3233 }
3234 
3235 __attribute__((no_sanitize_thread))
3236 void fuse_session_reset(struct fuse_session *se)
3237 {
3238  se->exited = 0;
3239  se->error = 0;
3240 }
3241 
3242 __attribute__((no_sanitize_thread))
3243 int fuse_session_exited(struct fuse_session *se)
3244 {
3245  return se->exited;
3246 }
#define FUSE_CAP_IOCTL_DIR
Definition: fuse_common.h:232
#define FUSE_CAP_DONT_MASK
Definition: fuse_common.h:187
#define FUSE_CAP_HANDLE_KILLPRIV
Definition: fuse_common.h:361
#define FUSE_CAP_AUTO_INVAL_DATA
Definition: fuse_common.h:254
#define FUSE_CAP_SPLICE_READ
Definition: fuse_common.h:212
#define FUSE_CAP_PARALLEL_DIROPS
Definition: fuse_common.h:333
size_t fuse_buf_size(const struct fuse_bufvec *bufv)
Definition: buffer.c:22
#define FUSE_CAP_WRITEBACK_CACHE
Definition: fuse_common.h:310
#define FUSE_CAP_EXPIRE_ONLY
Definition: fuse_common.h:425
#define FUSE_CAP_ATOMIC_O_TRUNC
Definition: fuse_common.h:172
#define FUSE_CAP_ASYNC_READ
Definition: fuse_common.h:155
#define FUSE_CAP_SPLICE_WRITE
Definition: fuse_common.h:195
#define FUSE_CAP_CACHE_SYMLINKS
Definition: fuse_common.h:374
#define FUSE_CAP_POSIX_ACL
Definition: fuse_common.h:352
@ FUSE_BUF_IS_FD
Definition: fuse_common.h:663
#define FUSE_CAP_EXPORT_SUPPORT
Definition: fuse_common.h:179
#define FUSE_CAP_POSIX_LOCKS
Definition: fuse_common.h:163
#define FUSE_CAP_EXPLICIT_INVAL_DATA
Definition: fuse_common.h:409
#define FUSE_CAP_READDIRPLUS_AUTO
Definition: fuse_common.h:290
ssize_t fuse_buf_copy(struct fuse_bufvec *dst, struct fuse_bufvec *src, enum fuse_buf_copy_flags flags)
Definition: buffer.c:284
#define FUSE_CAP_NO_OPENDIR_SUPPORT
Definition: fuse_common.h:386
#define FUSE_CAP_ASYNC_DIO
Definition: fuse_common.h:301
#define FUSE_CAP_NO_OPEN_SUPPORT
Definition: fuse_common.h:323
#define FUSE_CAP_READDIRPLUS
Definition: fuse_common.h:262
void fuse_pollhandle_destroy(struct fuse_pollhandle *ph)
fuse_buf_copy_flags
Definition: fuse_common.h:687
@ FUSE_BUF_SPLICE_NONBLOCK
Definition: fuse_common.h:723
@ FUSE_BUF_FORCE_SPLICE
Definition: fuse_common.h:705
@ FUSE_BUF_NO_SPLICE
Definition: fuse_common.h:697
@ FUSE_BUF_SPLICE_MOVE
Definition: fuse_common.h:714
#define FUSE_CAP_SPLICE_MOVE
Definition: fuse_common.h:203
#define FUSE_CAP_FLOCK_LOCKS
Definition: fuse_common.h:225
void fuse_log(enum fuse_log_level level, const char *fmt,...)
Definition: fuse_log.c:33
void fuse_session_destroy(struct fuse_session *se)
fuse_expire_flags
int fuse_reply_data(fuse_req_t req, struct fuse_bufvec *bufv, enum fuse_buf_copy_flags flags)
int fuse_reply_lock(fuse_req_t req, const struct flock *lock)
int fuse_reply_open(fuse_req_t req, const struct fuse_file_info *fi)
void fuse_session_exit(struct fuse_session *se)
void(* fuse_interrupt_func_t)(fuse_req_t req, void *data)
int fuse_reply_poll(fuse_req_t req, unsigned revents)
int fuse_reply_err(fuse_req_t req, int err)
int fuse_reply_buf(fuse_req_t req, const char *buf, size_t size)
struct fuse_req * fuse_req_t
Definition: fuse_lowlevel.h:49
size_t fuse_add_direntry_plus(fuse_req_t req, char *buf, size_t bufsize, const char *name, const struct fuse_entry_param *e, off_t off)
int fuse_reply_ioctl_iov(fuse_req_t req, int result, const struct iovec *iov, int count)
int fuse_lowlevel_notify_delete(struct fuse_session *se, fuse_ino_t parent, fuse_ino_t child, const char *name, size_t namelen)
void fuse_session_process_buf(struct fuse_session *se, const struct fuse_buf *buf)
int fuse_session_exited(struct fuse_session *se)
int fuse_session_fd(struct fuse_session *se)
int fuse_req_interrupted(fuse_req_t req)
int fuse_req_getgroups(fuse_req_t req, int size, gid_t list[])
int fuse_lowlevel_notify_retrieve(struct fuse_session *se, fuse_ino_t ino, size_t size, off_t offset, void *cookie)
int fuse_reply_readlink(fuse_req_t req, const char *link)
int fuse_reply_iov(fuse_req_t req, const struct iovec *iov, int count)
int fuse_reply_bmap(fuse_req_t req, uint64_t idx)
int fuse_reply_entry(fuse_req_t req, const struct fuse_entry_param *e)
void fuse_session_unmount(struct fuse_session *se)
void fuse_reply_none(fuse_req_t req)
int fuse_reply_ioctl_retry(fuse_req_t req, const struct iovec *in_iov, size_t in_count, const struct iovec *out_iov, size_t out_count)
void fuse_lowlevel_help(void)
int fuse_lowlevel_notify_inval_inode(struct fuse_session *se, fuse_ino_t ino, off_t off, off_t len)
struct fuse_session * fuse_session_new(struct fuse_args *args, const struct fuse_lowlevel_ops *op, size_t op_size, void *userdata)
int fuse_reply_statfs(fuse_req_t req, const struct statvfs *stbuf)
int fuse_reply_write(fuse_req_t req, size_t count)
int fuse_session_receive_buf(struct fuse_session *se, struct fuse_buf *buf)
int fuse_session_mount(struct fuse_session *se, const char *mountpoint)
void * fuse_req_userdata(fuse_req_t req)
int fuse_lowlevel_notify_expire_entry(struct fuse_session *se, fuse_ino_t parent, const char *name, size_t namelen, enum fuse_expire_flags flags)
int fuse_lowlevel_notify_poll(struct fuse_pollhandle *ph)
int fuse_lowlevel_notify_inval_entry(struct fuse_session *se, fuse_ino_t parent, const char *name, size_t namelen)
void fuse_req_interrupt_func(fuse_req_t req, fuse_interrupt_func_t func, void *data)
void fuse_session_reset(struct fuse_session *se)
int fuse_session_custom_io(struct fuse_session *se, const struct fuse_custom_io *io, int fd)
int fuse_reply_create(fuse_req_t req, const struct fuse_entry_param *e, const struct fuse_file_info *fi)
int fuse_reply_lseek(fuse_req_t req, off_t off)
void fuse_lowlevel_version(void)
uint64_t fuse_ino_t
Definition: fuse_lowlevel.h:46
size_t fuse_add_direntry(fuse_req_t req, char *buf, size_t bufsize, const char *name, const struct stat *stbuf, off_t off)
const struct fuse_ctx * fuse_req_ctx(fuse_req_t req)
int fuse_reply_attr(fuse_req_t req, const struct stat *attr, double attr_timeout)
int fuse_reply_ioctl(fuse_req_t req, int result, const void *buf, size_t size)
int fuse_lowlevel_notify_store(struct fuse_session *se, fuse_ino_t ino, off_t offset, struct fuse_bufvec *bufv, enum fuse_buf_copy_flags flags)
int fuse_reply_xattr(fuse_req_t req, size_t count)
int fuse_opt_add_arg(struct fuse_args *args, const char *arg)
Definition: fuse_opt.c:55
void fuse_opt_free_args(struct fuse_args *args)
Definition: fuse_opt.c:34
int fuse_opt_parse(struct fuse_args *args, void *data, const struct fuse_opt opts[], fuse_opt_proc_t proc)
Definition: fuse_opt.c:398
#define FUSE_OPT_END
Definition: fuse_opt.h:104
int argc
Definition: fuse_opt.h:111
char ** argv
Definition: fuse_opt.h:114
enum fuse_buf_flags flags
Definition: fuse_common.h:741
void * mem
Definition: fuse_common.h:748
size_t size
Definition: fuse_common.h:736
size_t off
Definition: fuse_common.h:787
struct fuse_buf buf[1]
Definition: fuse_common.h:792
size_t idx
Definition: fuse_common.h:782
size_t count
Definition: fuse_common.h:777
Definition: fuse_lowlevel.h:59
double entry_timeout
fuse_ino_t ino
Definition: fuse_lowlevel.h:67
uint64_t generation
Definition: fuse_lowlevel.h:79
double attr_timeout
Definition: fuse_lowlevel.h:94
struct stat attr
Definition: fuse_lowlevel.h:88
unsigned int direct_io
Definition: fuse_common.h:57
unsigned int keep_cache
Definition: fuse_common.h:64
unsigned int nonseekable
Definition: fuse_common.h:73
uint64_t lock_owner
Definition: fuse_common.h:100
uint64_t fh
Definition: fuse_common.h:97
uint32_t poll_events
Definition: fuse_common.h:104
unsigned int noflush
Definition: fuse_common.h:88
unsigned int writepage
Definition: fuse_common.h:54
unsigned int flush
Definition: fuse_common.h:69
unsigned int cache_readdir
Definition: fuse_common.h:84