syscall_arch.h 5.4 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234
  1. #define __SYSCALL_LL_E(x) (x)
  2. #define __SYSCALL_LL_O(x) (x)
  3. #define SYSCALL_RLIM_INFINITY (-1UL/2)
  4. #include <sys/stat.h>
  5. struct kernel_stat {
  6. unsigned int st_dev;
  7. unsigned int __pad1[3];
  8. unsigned long long st_ino;
  9. unsigned int st_mode;
  10. unsigned int st_nlink;
  11. int st_uid;
  12. int st_gid;
  13. unsigned int st_rdev;
  14. unsigned int __pad2[3];
  15. long long st_size;
  16. unsigned int st_atime_sec;
  17. unsigned int st_atime_nsec;
  18. unsigned int st_mtime_sec;
  19. unsigned int st_mtime_nsec;
  20. unsigned int st_ctime_sec;
  21. unsigned int st_ctime_nsec;
  22. unsigned int st_blksize;
  23. unsigned int __pad3;
  24. unsigned long long st_blocks;
  25. };
  26. static void __stat_fix(struct kernel_stat *kst, struct stat *st)
  27. {
  28. st->st_dev = kst->st_dev;
  29. st->st_ino = kst->st_ino;
  30. st->st_mode = kst->st_mode;
  31. st->st_nlink = kst->st_nlink;
  32. st->st_uid = kst->st_uid;
  33. st->st_gid = kst->st_gid;
  34. st->st_rdev = kst->st_rdev;
  35. st->st_size = kst->st_size;
  36. st->st_atim.tv_sec = kst->st_atime_sec;
  37. st->st_atim.tv_nsec = kst->st_atime_nsec;
  38. st->st_mtim.tv_sec = kst->st_mtime_sec;
  39. st->st_mtim.tv_nsec = kst->st_mtime_nsec;
  40. st->st_ctim.tv_sec = kst->st_ctime_sec;
  41. st->st_ctim.tv_nsec = kst->st_ctime_nsec;
  42. st->st_blksize = kst->st_blksize;
  43. st->st_blocks = kst->st_blocks;
  44. }
  45. #define SYSCALL_CLOBBERLIST \
  46. "$1", "$3", "$10", "$11", "$12", "$13", \
  47. "$14", "$15", "$24", "$25", "hi", "lo", "memory"
  48. static inline long __syscall0(long n)
  49. {
  50. register long r7 __asm__("$7");
  51. register long r2 __asm__("$2") = n;
  52. __asm__ __volatile__ (
  53. "syscall"
  54. : "+&r"(r2), "=r"(r7)
  55. :
  56. : SYSCALL_CLOBBERLIST);
  57. return r7 ? -r2 : r2;
  58. }
  59. static inline long __syscall1(long n, long a)
  60. {
  61. register long r4 __asm__("$4") = a;
  62. register long r7 __asm__("$7");
  63. register long r2 __asm__("$2") = n;
  64. __asm__ __volatile__ (
  65. "syscall"
  66. : "+&r"(r2), "=r"(r7)
  67. : "r"(r4)
  68. : SYSCALL_CLOBBERLIST);
  69. return r7 ? -r2 : r2;
  70. }
  71. static inline long __syscall2(long n, long a, long b)
  72. {
  73. struct kernel_stat kst;
  74. long ret;
  75. register long r4 __asm__("$4") = a;
  76. register long r5 __asm__("$5") = b;
  77. register long r7 __asm__("$7");
  78. register long r2 __asm__("$2") = n;
  79. if (n == SYS_stat || n == SYS_fstat || n == SYS_lstat)
  80. r5 = (long) &kst;
  81. __asm__ __volatile__ (
  82. "syscall"
  83. : "+&r"(r2), "=r"(r7)
  84. : "r"(r4), "r"(r5)
  85. : SYSCALL_CLOBBERLIST);
  86. if (r7) return -r2;
  87. ret = r2;
  88. if (n == SYS_stat || n == SYS_fstat || n == SYS_lstat)
  89. __stat_fix(&kst, (struct stat *)b);
  90. return ret;
  91. }
  92. static inline long __syscall3(long n, long a, long b, long c)
  93. {
  94. struct kernel_stat kst;
  95. long ret;
  96. register long r4 __asm__("$4") = a;
  97. register long r5 __asm__("$5") = b;
  98. register long r6 __asm__("$6") = c;
  99. register long r7 __asm__("$7");
  100. register long r2 __asm__("$2") = n;
  101. if (n == SYS_stat || n == SYS_fstat || n == SYS_lstat)
  102. r5 = (long) &kst;
  103. __asm__ __volatile__ (
  104. "syscall"
  105. : "+&r"(r2), "=r"(r7)
  106. : "r"(r4), "r"(r5), "r"(r6)
  107. : SYSCALL_CLOBBERLIST);
  108. if (r7) return -r2;
  109. ret = r2;
  110. if (n == SYS_stat || n == SYS_fstat || n == SYS_lstat)
  111. __stat_fix(&kst, (struct stat *)b);
  112. return ret;
  113. }
  114. static inline long __syscall4(long n, long a, long b, long c, long d)
  115. {
  116. struct kernel_stat kst;
  117. long ret;
  118. register long r4 __asm__("$4") = a;
  119. register long r5 __asm__("$5") = b;
  120. register long r6 __asm__("$6") = c;
  121. register long r7 __asm__("$7") = d;
  122. register long r2 __asm__("$2") = n;
  123. if (n == SYS_stat || n == SYS_fstat || n == SYS_lstat)
  124. r5 = (long) &kst;
  125. if (n == SYS_newfstatat)
  126. r6 = (long) &kst;
  127. __asm__ __volatile__ (
  128. "syscall"
  129. : "+&r"(r2), "+r"(r7)
  130. : "r"(r4), "r"(r5), "r"(r6)
  131. : SYSCALL_CLOBBERLIST);
  132. if (r7) return -r2;
  133. ret = r2;
  134. if (n == SYS_stat || n == SYS_fstat || n == SYS_lstat)
  135. __stat_fix(&kst, (struct stat *)b);
  136. if (n == SYS_newfstatat)
  137. __stat_fix(&kst, (struct stat *)c);
  138. return ret;
  139. }
  140. static inline long __syscall5(long n, long a, long b, long c, long d, long e)
  141. {
  142. struct kernel_stat kst;
  143. long ret;
  144. register long r4 __asm__("$4") = a;
  145. register long r5 __asm__("$5") = b;
  146. register long r6 __asm__("$6") = c;
  147. register long r7 __asm__("$7") = d;
  148. register long r8 __asm__("$8") = e;
  149. register long r2 __asm__("$2") = n;
  150. if (n == SYS_stat || n == SYS_fstat || n == SYS_lstat)
  151. r5 = (long) &kst;
  152. if (n == SYS_newfstatat)
  153. r6 = (long) &kst;
  154. __asm__ __volatile__ (
  155. "syscall"
  156. : "+&r"(r2), "+r"(r7)
  157. : "r"(r4), "r"(r5), "r"(r6), "r"(r8)
  158. : SYSCALL_CLOBBERLIST);
  159. if (r7) return -r2;
  160. ret = r2;
  161. if (n == SYS_stat || n == SYS_fstat || n == SYS_lstat)
  162. __stat_fix(&kst, (struct stat *)b);
  163. if (n == SYS_newfstatat)
  164. __stat_fix(&kst, (struct stat *)c);
  165. return ret;
  166. }
  167. static inline long __syscall6(long n, long a, long b, long c, long d, long e, long f)
  168. {
  169. struct kernel_stat kst;
  170. long ret;
  171. register long r4 __asm__("$4") = a;
  172. register long r5 __asm__("$5") = b;
  173. register long r6 __asm__("$6") = c;
  174. register long r7 __asm__("$7") = d;
  175. register long r8 __asm__("$8") = e;
  176. register long r9 __asm__("$9") = f;
  177. register long r2 __asm__("$2") = n;
  178. if (n == SYS_stat || n == SYS_fstat || n == SYS_lstat)
  179. r5 = (long) &kst;
  180. if (n == SYS_newfstatat)
  181. r6 = (long) &kst;
  182. __asm__ __volatile__ (
  183. "syscall"
  184. : "+&r"(r2), "+r"(r7)
  185. : "r"(r4), "r"(r5), "r"(r6), "r"(r8), "r"(r9)
  186. : SYSCALL_CLOBBERLIST);
  187. if (r7) return -r2;
  188. ret = r2;
  189. if (n == SYS_stat || n == SYS_fstat || n == SYS_lstat)
  190. __stat_fix(&kst, (struct stat *)b);
  191. if (n == SYS_newfstatat)
  192. __stat_fix(&kst, (struct stat *)c);
  193. return ret;
  194. }
  195. #define VDSO_USEFUL
  196. #define VDSO_CGT_SYM "__vdso_clock_gettime"
  197. #define VDSO_CGT_VER "LINUX_2.6"