aio.c 10 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418
  1. #include <aio.h>
  2. #include <pthread.h>
  3. #include <semaphore.h>
  4. #include <limits.h>
  5. #include <errno.h>
  6. #include <unistd.h>
  7. #include <stdlib.h>
  8. #include <sys/auxv.h>
  9. #include "syscall.h"
  10. #include "atomic.h"
  11. #include "pthread_impl.h"
  12. #include "aio_impl.h"
  13. #define malloc __libc_malloc
  14. #define calloc __libc_calloc
  15. #define realloc __libc_realloc
  16. #define free __libc_free
  17. /* The following is a threads-based implementation of AIO with minimal
  18. * dependence on implementation details. Most synchronization is
  19. * performed with pthread primitives, but atomics and futex operations
  20. * are used for notification in a couple places where the pthread
  21. * primitives would be inefficient or impractical.
  22. *
  23. * For each fd with outstanding aio operations, an aio_queue structure
  24. * is maintained. These are reference-counted and destroyed by the last
  25. * aio worker thread to exit. Accessing any member of the aio_queue
  26. * structure requires a lock on the aio_queue. Adding and removing aio
  27. * queues themselves requires a write lock on the global map object,
  28. * a 4-level table mapping file descriptor numbers to aio queues. A
  29. * read lock on the map is used to obtain locks on existing queues by
  30. * excluding destruction of the queue by a different thread while it is
  31. * being locked.
  32. *
  33. * Each aio queue has a list of active threads/operations. Presently there
  34. * is a one to one relationship between threads and operations. The only
  35. * members of the aio_thread structure which are accessed by other threads
  36. * are the linked list pointers, op (which is immutable), running (which
  37. * is updated atomically), and err (which is synchronized via running),
  38. * so no locking is necessary. Most of the other other members are used
  39. * for sharing data between the main flow of execution and cancellation
  40. * cleanup handler.
  41. *
  42. * Taking any aio locks requires having all signals blocked. This is
  43. * necessary because aio_cancel is needed by close, and close is required
  44. * to be async-signal safe. All aio worker threads run with all signals
  45. * blocked permanently.
  46. */
  47. struct aio_thread {
  48. pthread_t td;
  49. struct aiocb *cb;
  50. struct aio_thread *next, *prev;
  51. struct aio_queue *q;
  52. volatile int running;
  53. int err, op;
  54. ssize_t ret;
  55. };
  56. struct aio_queue {
  57. int fd, seekable, append, ref, init;
  58. pthread_mutex_t lock;
  59. pthread_cond_t cond;
  60. struct aio_thread *head;
  61. };
  62. struct aio_args {
  63. struct aiocb *cb;
  64. struct aio_queue *q;
  65. int op;
  66. sem_t sem;
  67. };
  68. static pthread_rwlock_t maplock = PTHREAD_RWLOCK_INITIALIZER;
  69. static struct aio_queue *****map;
  70. static volatile int aio_fd_cnt;
  71. volatile int __aio_fut;
  72. static size_t io_thread_stack_size;
  73. #define MAX(a,b) ((a)>(b) ? (a) : (b))
  74. static struct aio_queue *__aio_get_queue(int fd, int need)
  75. {
  76. if (fd < 0) {
  77. errno = EBADF;
  78. return 0;
  79. }
  80. int a=fd>>24;
  81. unsigned char b=fd>>16, c=fd>>8, d=fd;
  82. struct aio_queue *q = 0;
  83. pthread_rwlock_rdlock(&maplock);
  84. if ((!map || !map[a] || !map[a][b] || !map[a][b][c] || !(q=map[a][b][c][d])) && need) {
  85. pthread_rwlock_unlock(&maplock);
  86. if (fcntl(fd, F_GETFD) < 0) return 0;
  87. pthread_rwlock_wrlock(&maplock);
  88. if (!io_thread_stack_size) {
  89. unsigned long val = __getauxval(AT_MINSIGSTKSZ);
  90. io_thread_stack_size = MAX(MINSIGSTKSZ+2048, val+512);
  91. }
  92. if (!map) map = calloc(sizeof *map, (-1U/2+1)>>24);
  93. if (!map) goto out;
  94. if (!map[a]) map[a] = calloc(sizeof **map, 256);
  95. if (!map[a]) goto out;
  96. if (!map[a][b]) map[a][b] = calloc(sizeof ***map, 256);
  97. if (!map[a][b]) goto out;
  98. if (!map[a][b][c]) map[a][b][c] = calloc(sizeof ****map, 256);
  99. if (!map[a][b][c]) goto out;
  100. if (!(q = map[a][b][c][d])) {
  101. map[a][b][c][d] = q = calloc(sizeof *****map, 1);
  102. if (q) {
  103. q->fd = fd;
  104. pthread_mutex_init(&q->lock, 0);
  105. pthread_cond_init(&q->cond, 0);
  106. a_inc(&aio_fd_cnt);
  107. }
  108. }
  109. }
  110. if (q) pthread_mutex_lock(&q->lock);
  111. out:
  112. pthread_rwlock_unlock(&maplock);
  113. return q;
  114. }
  115. static void __aio_unref_queue(struct aio_queue *q)
  116. {
  117. if (q->ref > 1) {
  118. q->ref--;
  119. pthread_mutex_unlock(&q->lock);
  120. return;
  121. }
  122. /* This is potentially the last reference, but a new reference
  123. * may arrive since we cannot free the queue object without first
  124. * taking the maplock, which requires releasing the queue lock. */
  125. pthread_mutex_unlock(&q->lock);
  126. pthread_rwlock_wrlock(&maplock);
  127. pthread_mutex_lock(&q->lock);
  128. if (q->ref == 1) {
  129. int fd=q->fd;
  130. int a=fd>>24;
  131. unsigned char b=fd>>16, c=fd>>8, d=fd;
  132. map[a][b][c][d] = 0;
  133. a_dec(&aio_fd_cnt);
  134. pthread_rwlock_unlock(&maplock);
  135. pthread_mutex_unlock(&q->lock);
  136. free(q);
  137. } else {
  138. q->ref--;
  139. pthread_rwlock_unlock(&maplock);
  140. pthread_mutex_unlock(&q->lock);
  141. }
  142. }
  143. static void cleanup(void *ctx)
  144. {
  145. struct aio_thread *at = ctx;
  146. struct aio_queue *q = at->q;
  147. struct aiocb *cb = at->cb;
  148. struct sigevent sev = cb->aio_sigevent;
  149. /* There are four potential types of waiters we could need to wake:
  150. * 1. Callers of aio_cancel/close.
  151. * 2. Callers of aio_suspend with a single aiocb.
  152. * 3. Callers of aio_suspend with a list.
  153. * 4. AIO worker threads waiting for sequenced operations.
  154. * Types 1-3 are notified via atomics/futexes, mainly for AS-safety
  155. * considerations. Type 4 is notified later via a cond var. */
  156. cb->__ret = at->ret;
  157. if (a_swap(&at->running, 0) < 0)
  158. __wake(&at->running, -1, 1);
  159. if (a_swap(&cb->__err, at->err) != EINPROGRESS)
  160. __wake(&cb->__err, -1, 1);
  161. if (a_swap(&__aio_fut, 0))
  162. __wake(&__aio_fut, -1, 1);
  163. pthread_mutex_lock(&q->lock);
  164. if (at->next) at->next->prev = at->prev;
  165. if (at->prev) at->prev->next = at->next;
  166. else q->head = at->next;
  167. /* Signal aio worker threads waiting for sequenced operations. */
  168. pthread_cond_broadcast(&q->cond);
  169. __aio_unref_queue(q);
  170. if (sev.sigev_notify == SIGEV_SIGNAL) {
  171. siginfo_t si = {
  172. .si_signo = sev.sigev_signo,
  173. .si_value = sev.sigev_value,
  174. .si_code = SI_ASYNCIO,
  175. .si_pid = getpid(),
  176. .si_uid = getuid()
  177. };
  178. __syscall(SYS_rt_sigqueueinfo, si.si_pid, si.si_signo, &si);
  179. }
  180. if (sev.sigev_notify == SIGEV_THREAD) {
  181. a_store(&__pthread_self()->cancel, 0);
  182. sev.sigev_notify_function(sev.sigev_value);
  183. }
  184. }
  185. static void *io_thread_func(void *ctx)
  186. {
  187. struct aio_thread at, *p;
  188. struct aio_args *args = ctx;
  189. struct aiocb *cb = args->cb;
  190. int fd = cb->aio_fildes;
  191. int op = args->op;
  192. void *buf = (void *)cb->aio_buf;
  193. size_t len = cb->aio_nbytes;
  194. off_t off = cb->aio_offset;
  195. struct aio_queue *q = args->q;
  196. ssize_t ret;
  197. pthread_mutex_lock(&q->lock);
  198. sem_post(&args->sem);
  199. at.op = op;
  200. at.running = 1;
  201. at.ret = -1;
  202. at.err = ECANCELED;
  203. at.q = q;
  204. at.td = __pthread_self();
  205. at.cb = cb;
  206. at.prev = 0;
  207. if ((at.next = q->head)) at.next->prev = &at;
  208. q->head = &at;
  209. if (!q->init) {
  210. int seekable = lseek(fd, 0, SEEK_CUR) >= 0;
  211. q->seekable = seekable;
  212. q->append = !seekable || (fcntl(fd, F_GETFL) & O_APPEND);
  213. q->init = 1;
  214. }
  215. pthread_cleanup_push(cleanup, &at);
  216. /* Wait for sequenced operations. */
  217. if (op!=LIO_READ && (op!=LIO_WRITE || q->append)) {
  218. for (;;) {
  219. for (p=at.next; p && p->op!=LIO_WRITE; p=p->next);
  220. if (!p) break;
  221. pthread_cond_wait(&q->cond, &q->lock);
  222. }
  223. }
  224. pthread_mutex_unlock(&q->lock);
  225. switch (op) {
  226. case LIO_WRITE:
  227. ret = q->append ? write(fd, buf, len) : pwrite(fd, buf, len, off);
  228. break;
  229. case LIO_READ:
  230. ret = !q->seekable ? read(fd, buf, len) : pread(fd, buf, len, off);
  231. break;
  232. case O_SYNC:
  233. ret = fsync(fd);
  234. break;
  235. case O_DSYNC:
  236. ret = fdatasync(fd);
  237. break;
  238. }
  239. at.ret = ret;
  240. at.err = ret<0 ? errno : 0;
  241. pthread_cleanup_pop(1);
  242. return 0;
  243. }
  244. static int submit(struct aiocb *cb, int op)
  245. {
  246. int ret = 0;
  247. pthread_attr_t a;
  248. sigset_t allmask, origmask;
  249. pthread_t td;
  250. struct aio_queue *q = __aio_get_queue(cb->aio_fildes, 1);
  251. struct aio_args args = { .cb = cb, .op = op, .q = q };
  252. sem_init(&args.sem, 0, 0);
  253. if (!q) {
  254. if (errno != EBADF) errno = EAGAIN;
  255. cb->__ret = -1;
  256. cb->__err = errno;
  257. return -1;
  258. }
  259. q->ref++;
  260. pthread_mutex_unlock(&q->lock);
  261. if (cb->aio_sigevent.sigev_notify == SIGEV_THREAD) {
  262. if (cb->aio_sigevent.sigev_notify_attributes)
  263. a = *cb->aio_sigevent.sigev_notify_attributes;
  264. else
  265. pthread_attr_init(&a);
  266. } else {
  267. pthread_attr_init(&a);
  268. pthread_attr_setstacksize(&a, io_thread_stack_size);
  269. pthread_attr_setguardsize(&a, 0);
  270. }
  271. pthread_attr_setdetachstate(&a, PTHREAD_CREATE_DETACHED);
  272. sigfillset(&allmask);
  273. pthread_sigmask(SIG_BLOCK, &allmask, &origmask);
  274. cb->__err = EINPROGRESS;
  275. if (pthread_create(&td, &a, io_thread_func, &args)) {
  276. pthread_mutex_lock(&q->lock);
  277. __aio_unref_queue(q);
  278. cb->__err = errno = EAGAIN;
  279. cb->__ret = ret = -1;
  280. }
  281. pthread_sigmask(SIG_SETMASK, &origmask, 0);
  282. if (!ret) {
  283. while (sem_wait(&args.sem));
  284. }
  285. return ret;
  286. }
  287. int aio_read(struct aiocb *cb)
  288. {
  289. return submit(cb, LIO_READ);
  290. }
  291. int aio_write(struct aiocb *cb)
  292. {
  293. return submit(cb, LIO_WRITE);
  294. }
  295. int aio_fsync(int op, struct aiocb *cb)
  296. {
  297. if (op != O_SYNC && op != O_DSYNC) {
  298. errno = EINVAL;
  299. return -1;
  300. }
  301. return submit(cb, op);
  302. }
  303. ssize_t aio_return(struct aiocb *cb)
  304. {
  305. return cb->__ret;
  306. }
  307. int aio_error(const struct aiocb *cb)
  308. {
  309. a_barrier();
  310. return cb->__err & 0x7fffffff;
  311. }
  312. int aio_cancel(int fd, struct aiocb *cb)
  313. {
  314. sigset_t allmask, origmask;
  315. int ret = AIO_ALLDONE;
  316. struct aio_thread *p;
  317. struct aio_queue *q;
  318. /* Unspecified behavior case. Report an error. */
  319. if (cb && fd != cb->aio_fildes) {
  320. errno = EINVAL;
  321. return -1;
  322. }
  323. sigfillset(&allmask);
  324. pthread_sigmask(SIG_BLOCK, &allmask, &origmask);
  325. errno = ENOENT;
  326. if (!(q = __aio_get_queue(fd, 0))) {
  327. if (errno == EBADF) ret = -1;
  328. goto done;
  329. }
  330. for (p = q->head; p; p = p->next) {
  331. if (cb && cb != p->cb) continue;
  332. /* Transition target from running to running-with-waiters */
  333. if (a_cas(&p->running, 1, -1)) {
  334. pthread_cancel(p->td);
  335. __wait(&p->running, 0, -1, 1);
  336. if (p->err == ECANCELED) ret = AIO_CANCELED;
  337. }
  338. }
  339. pthread_mutex_unlock(&q->lock);
  340. done:
  341. pthread_sigmask(SIG_SETMASK, &origmask, 0);
  342. return ret;
  343. }
  344. int __aio_close(int fd)
  345. {
  346. a_barrier();
  347. if (aio_fd_cnt) aio_cancel(fd, 0);
  348. return fd;
  349. }
  350. void __aio_atfork(int who)
  351. {
  352. if (who<0) {
  353. pthread_rwlock_rdlock(&maplock);
  354. return;
  355. }
  356. if (who>0 && map) for (int a=0; a<(-1U/2+1)>>24; a++)
  357. if (map[a]) for (int b=0; b<256; b++)
  358. if (map[a][b]) for (int c=0; c<256; c++)
  359. if (map[a][b][c]) for (int d=0; d<256; d++)
  360. map[a][b][c][d] = 0;
  361. pthread_rwlock_unlock(&maplock);
  362. }
  363. weak_alias(aio_cancel, aio_cancel64);
  364. weak_alias(aio_error, aio_error64);
  365. weak_alias(aio_fsync, aio_fsync64);
  366. weak_alias(aio_read, aio_read64);
  367. weak_alias(aio_write, aio_write64);
  368. weak_alias(aio_return, aio_return64);