root/kernel/time/time.c

/* [<][>][^][v][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. SYSCALL_DEFINE1
  2. SYSCALL_DEFINE1
  3. SYSCALL_DEFINE1
  4. SYSCALL_DEFINE1
  5. SYSCALL_DEFINE2
  6. do_sys_settimeofday64
  7. SYSCALL_DEFINE2
  8. COMPAT_SYSCALL_DEFINE2
  9. COMPAT_SYSCALL_DEFINE2
  10. SYSCALL_DEFINE1
  11. get_old_timex32
  12. put_old_timex32
  13. SYSCALL_DEFINE1
  14. jiffies_to_msecs
  15. jiffies_to_usecs
  16. mktime64
  17. ns_to_timespec
  18. ns_to_timeval
  19. ns_to_kernel_old_timeval
  20. set_normalized_timespec64
  21. ns_to_timespec64
  22. __msecs_to_jiffies
  23. __usecs_to_jiffies
  24. __timespec64_to_jiffies
  25. __timespec_to_jiffies
  26. timespec64_to_jiffies
  27. jiffies_to_timespec64
  28. timeval_to_jiffies
  29. jiffies_to_timeval
  30. jiffies_to_clock_t
  31. clock_t_to_jiffies
  32. jiffies_64_to_clock_t
  33. nsec_to_clock_t
  34. jiffies64_to_nsecs
  35. jiffies64_to_msecs
  36. nsecs_to_jiffies64
  37. nsecs_to_jiffies
  38. timespec64_add_safe
  39. get_timespec64
  40. put_timespec64
  41. __get_old_timespec32
  42. __put_old_timespec32
  43. get_old_timespec32
  44. put_old_timespec32
  45. get_itimerspec64
  46. put_itimerspec64
  47. get_old_itimerspec32
  48. put_old_itimerspec32

   1 // SPDX-License-Identifier: GPL-2.0
   2 /*
   3  *  Copyright (C) 1991, 1992  Linus Torvalds
   4  *
   5  *  This file contains the interface functions for the various time related
   6  *  system calls: time, stime, gettimeofday, settimeofday, adjtime
   7  *
   8  * Modification history:
   9  *
  10  * 1993-09-02    Philip Gladstone
  11  *      Created file with time related functions from sched/core.c and adjtimex()
  12  * 1993-10-08    Torsten Duwe
  13  *      adjtime interface update and CMOS clock write code
  14  * 1995-08-13    Torsten Duwe
  15  *      kernel PLL updated to 1994-12-13 specs (rfc-1589)
  16  * 1999-01-16    Ulrich Windl
  17  *      Introduced error checking for many cases in adjtimex().
  18  *      Updated NTP code according to technical memorandum Jan '96
  19  *      "A Kernel Model for Precision Timekeeping" by Dave Mills
  20  *      Allow time_constant larger than MAXTC(6) for NTP v4 (MAXTC == 10)
  21  *      (Even though the technical memorandum forbids it)
  22  * 2004-07-14    Christoph Lameter
  23  *      Added getnstimeofday to allow the posix timer functions to return
  24  *      with nanosecond accuracy
  25  */
  26 
  27 #include <linux/export.h>
  28 #include <linux/kernel.h>
  29 #include <linux/timex.h>
  30 #include <linux/capability.h>
  31 #include <linux/timekeeper_internal.h>
  32 #include <linux/errno.h>
  33 #include <linux/syscalls.h>
  34 #include <linux/security.h>
  35 #include <linux/fs.h>
  36 #include <linux/math64.h>
  37 #include <linux/ptrace.h>
  38 
  39 #include <linux/uaccess.h>
  40 #include <linux/compat.h>
  41 #include <asm/unistd.h>
  42 
  43 #include <generated/timeconst.h>
  44 #include "timekeeping.h"
  45 
  46 /*
  47  * The timezone where the local system is located.  Used as a default by some
  48  * programs who obtain this value by using gettimeofday.
  49  */
  50 struct timezone sys_tz;
  51 
  52 EXPORT_SYMBOL(sys_tz);
  53 
  54 #ifdef __ARCH_WANT_SYS_TIME
  55 
  56 /*
  57  * sys_time() can be implemented in user-level using
  58  * sys_gettimeofday().  Is this for backwards compatibility?  If so,
  59  * why not move it into the appropriate arch directory (for those
  60  * architectures that need it).
  61  */
  62 SYSCALL_DEFINE1(time, time_t __user *, tloc)
  63 {
  64         time_t i = (time_t)ktime_get_real_seconds();
  65 
  66         if (tloc) {
  67                 if (put_user(i,tloc))
  68                         return -EFAULT;
  69         }
  70         force_successful_syscall_return();
  71         return i;
  72 }
  73 
  74 /*
  75  * sys_stime() can be implemented in user-level using
  76  * sys_settimeofday().  Is this for backwards compatibility?  If so,
  77  * why not move it into the appropriate arch directory (for those
  78  * architectures that need it).
  79  */
  80 
  81 SYSCALL_DEFINE1(stime, time_t __user *, tptr)
  82 {
  83         struct timespec64 tv;
  84         int err;
  85 
  86         if (get_user(tv.tv_sec, tptr))
  87                 return -EFAULT;
  88 
  89         tv.tv_nsec = 0;
  90 
  91         err = security_settime64(&tv, NULL);
  92         if (err)
  93                 return err;
  94 
  95         do_settimeofday64(&tv);
  96         return 0;
  97 }
  98 
  99 #endif /* __ARCH_WANT_SYS_TIME */
 100 
 101 #ifdef CONFIG_COMPAT_32BIT_TIME
 102 #ifdef __ARCH_WANT_SYS_TIME32
 103 
 104 /* old_time32_t is a 32 bit "long" and needs to get converted. */
 105 SYSCALL_DEFINE1(time32, old_time32_t __user *, tloc)
 106 {
 107         old_time32_t i;
 108 
 109         i = (old_time32_t)ktime_get_real_seconds();
 110 
 111         if (tloc) {
 112                 if (put_user(i,tloc))
 113                         return -EFAULT;
 114         }
 115         force_successful_syscall_return();
 116         return i;
 117 }
 118 
 119 SYSCALL_DEFINE1(stime32, old_time32_t __user *, tptr)
 120 {
 121         struct timespec64 tv;
 122         int err;
 123 
 124         if (get_user(tv.tv_sec, tptr))
 125                 return -EFAULT;
 126 
 127         tv.tv_nsec = 0;
 128 
 129         err = security_settime64(&tv, NULL);
 130         if (err)
 131                 return err;
 132 
 133         do_settimeofday64(&tv);
 134         return 0;
 135 }
 136 
 137 #endif /* __ARCH_WANT_SYS_TIME32 */
 138 #endif
 139 
 140 SYSCALL_DEFINE2(gettimeofday, struct timeval __user *, tv,
 141                 struct timezone __user *, tz)
 142 {
 143         if (likely(tv != NULL)) {
 144                 struct timespec64 ts;
 145 
 146                 ktime_get_real_ts64(&ts);
 147                 if (put_user(ts.tv_sec, &tv->tv_sec) ||
 148                     put_user(ts.tv_nsec / 1000, &tv->tv_usec))
 149                         return -EFAULT;
 150         }
 151         if (unlikely(tz != NULL)) {
 152                 if (copy_to_user(tz, &sys_tz, sizeof(sys_tz)))
 153                         return -EFAULT;
 154         }
 155         return 0;
 156 }
 157 
 158 /*
 159  * In case for some reason the CMOS clock has not already been running
 160  * in UTC, but in some local time: The first time we set the timezone,
 161  * we will warp the clock so that it is ticking UTC time instead of
 162  * local time. Presumably, if someone is setting the timezone then we
 163  * are running in an environment where the programs understand about
 164  * timezones. This should be done at boot time in the /etc/rc script,
 165  * as soon as possible, so that the clock can be set right. Otherwise,
 166  * various programs will get confused when the clock gets warped.
 167  */
 168 
 169 int do_sys_settimeofday64(const struct timespec64 *tv, const struct timezone *tz)
 170 {
 171         static int firsttime = 1;
 172         int error = 0;
 173 
 174         if (tv && !timespec64_valid_settod(tv))
 175                 return -EINVAL;
 176 
 177         error = security_settime64(tv, tz);
 178         if (error)
 179                 return error;
 180 
 181         if (tz) {
 182                 /* Verify we're witin the +-15 hrs range */
 183                 if (tz->tz_minuteswest > 15*60 || tz->tz_minuteswest < -15*60)
 184                         return -EINVAL;
 185 
 186                 sys_tz = *tz;
 187                 update_vsyscall_tz();
 188                 if (firsttime) {
 189                         firsttime = 0;
 190                         if (!tv)
 191                                 timekeeping_warp_clock();
 192                 }
 193         }
 194         if (tv)
 195                 return do_settimeofday64(tv);
 196         return 0;
 197 }
 198 
 199 SYSCALL_DEFINE2(settimeofday, struct timeval __user *, tv,
 200                 struct timezone __user *, tz)
 201 {
 202         struct timespec64 new_ts;
 203         struct timeval user_tv;
 204         struct timezone new_tz;
 205 
 206         if (tv) {
 207                 if (copy_from_user(&user_tv, tv, sizeof(*tv)))
 208                         return -EFAULT;
 209 
 210                 if (!timeval_valid(&user_tv))
 211                         return -EINVAL;
 212 
 213                 new_ts.tv_sec = user_tv.tv_sec;
 214                 new_ts.tv_nsec = user_tv.tv_usec * NSEC_PER_USEC;
 215         }
 216         if (tz) {
 217                 if (copy_from_user(&new_tz, tz, sizeof(*tz)))
 218                         return -EFAULT;
 219         }
 220 
 221         return do_sys_settimeofday64(tv ? &new_ts : NULL, tz ? &new_tz : NULL);
 222 }
 223 
 224 #ifdef CONFIG_COMPAT
 225 COMPAT_SYSCALL_DEFINE2(gettimeofday, struct old_timeval32 __user *, tv,
 226                        struct timezone __user *, tz)
 227 {
 228         if (tv) {
 229                 struct timespec64 ts;
 230 
 231                 ktime_get_real_ts64(&ts);
 232                 if (put_user(ts.tv_sec, &tv->tv_sec) ||
 233                     put_user(ts.tv_nsec / 1000, &tv->tv_usec))
 234                         return -EFAULT;
 235         }
 236         if (tz) {
 237                 if (copy_to_user(tz, &sys_tz, sizeof(sys_tz)))
 238                         return -EFAULT;
 239         }
 240 
 241         return 0;
 242 }
 243 
 244 COMPAT_SYSCALL_DEFINE2(settimeofday, struct old_timeval32 __user *, tv,
 245                        struct timezone __user *, tz)
 246 {
 247         struct timespec64 new_ts;
 248         struct timeval user_tv;
 249         struct timezone new_tz;
 250 
 251         if (tv) {
 252                 if (compat_get_timeval(&user_tv, tv))
 253                         return -EFAULT;
 254 
 255                 if (!timeval_valid(&user_tv))
 256                         return -EINVAL;
 257 
 258                 new_ts.tv_sec = user_tv.tv_sec;
 259                 new_ts.tv_nsec = user_tv.tv_usec * NSEC_PER_USEC;
 260         }
 261         if (tz) {
 262                 if (copy_from_user(&new_tz, tz, sizeof(*tz)))
 263                         return -EFAULT;
 264         }
 265 
 266         return do_sys_settimeofday64(tv ? &new_ts : NULL, tz ? &new_tz : NULL);
 267 }
 268 #endif
 269 
 270 #if !defined(CONFIG_64BIT_TIME) || defined(CONFIG_64BIT)
 271 SYSCALL_DEFINE1(adjtimex, struct __kernel_timex __user *, txc_p)
 272 {
 273         struct __kernel_timex txc;              /* Local copy of parameter */
 274         int ret;
 275 
 276         /* Copy the user data space into the kernel copy
 277          * structure. But bear in mind that the structures
 278          * may change
 279          */
 280         if (copy_from_user(&txc, txc_p, sizeof(struct __kernel_timex)))
 281                 return -EFAULT;
 282         ret = do_adjtimex(&txc);
 283         return copy_to_user(txc_p, &txc, sizeof(struct __kernel_timex)) ? -EFAULT : ret;
 284 }
 285 #endif
 286 
 287 #ifdef CONFIG_COMPAT_32BIT_TIME
 288 int get_old_timex32(struct __kernel_timex *txc, const struct old_timex32 __user *utp)
 289 {
 290         struct old_timex32 tx32;
 291 
 292         memset(txc, 0, sizeof(struct __kernel_timex));
 293         if (copy_from_user(&tx32, utp, sizeof(struct old_timex32)))
 294                 return -EFAULT;
 295 
 296         txc->modes = tx32.modes;
 297         txc->offset = tx32.offset;
 298         txc->freq = tx32.freq;
 299         txc->maxerror = tx32.maxerror;
 300         txc->esterror = tx32.esterror;
 301         txc->status = tx32.status;
 302         txc->constant = tx32.constant;
 303         txc->precision = tx32.precision;
 304         txc->tolerance = tx32.tolerance;
 305         txc->time.tv_sec = tx32.time.tv_sec;
 306         txc->time.tv_usec = tx32.time.tv_usec;
 307         txc->tick = tx32.tick;
 308         txc->ppsfreq = tx32.ppsfreq;
 309         txc->jitter = tx32.jitter;
 310         txc->shift = tx32.shift;
 311         txc->stabil = tx32.stabil;
 312         txc->jitcnt = tx32.jitcnt;
 313         txc->calcnt = tx32.calcnt;
 314         txc->errcnt = tx32.errcnt;
 315         txc->stbcnt = tx32.stbcnt;
 316 
 317         return 0;
 318 }
 319 
 320 int put_old_timex32(struct old_timex32 __user *utp, const struct __kernel_timex *txc)
 321 {
 322         struct old_timex32 tx32;
 323 
 324         memset(&tx32, 0, sizeof(struct old_timex32));
 325         tx32.modes = txc->modes;
 326         tx32.offset = txc->offset;
 327         tx32.freq = txc->freq;
 328         tx32.maxerror = txc->maxerror;
 329         tx32.esterror = txc->esterror;
 330         tx32.status = txc->status;
 331         tx32.constant = txc->constant;
 332         tx32.precision = txc->precision;
 333         tx32.tolerance = txc->tolerance;
 334         tx32.time.tv_sec = txc->time.tv_sec;
 335         tx32.time.tv_usec = txc->time.tv_usec;
 336         tx32.tick = txc->tick;
 337         tx32.ppsfreq = txc->ppsfreq;
 338         tx32.jitter = txc->jitter;
 339         tx32.shift = txc->shift;
 340         tx32.stabil = txc->stabil;
 341         tx32.jitcnt = txc->jitcnt;
 342         tx32.calcnt = txc->calcnt;
 343         tx32.errcnt = txc->errcnt;
 344         tx32.stbcnt = txc->stbcnt;
 345         tx32.tai = txc->tai;
 346         if (copy_to_user(utp, &tx32, sizeof(struct old_timex32)))
 347                 return -EFAULT;
 348         return 0;
 349 }
 350 
 351 SYSCALL_DEFINE1(adjtimex_time32, struct old_timex32 __user *, utp)
 352 {
 353         struct __kernel_timex txc;
 354         int err, ret;
 355 
 356         err = get_old_timex32(&txc, utp);
 357         if (err)
 358                 return err;
 359 
 360         ret = do_adjtimex(&txc);
 361 
 362         err = put_old_timex32(utp, &txc);
 363         if (err)
 364                 return err;
 365 
 366         return ret;
 367 }
 368 #endif
 369 
 370 /*
 371  * Convert jiffies to milliseconds and back.
 372  *
 373  * Avoid unnecessary multiplications/divisions in the
 374  * two most common HZ cases:
 375  */
 376 unsigned int jiffies_to_msecs(const unsigned long j)
 377 {
 378 #if HZ <= MSEC_PER_SEC && !(MSEC_PER_SEC % HZ)
 379         return (MSEC_PER_SEC / HZ) * j;
 380 #elif HZ > MSEC_PER_SEC && !(HZ % MSEC_PER_SEC)
 381         return (j + (HZ / MSEC_PER_SEC) - 1)/(HZ / MSEC_PER_SEC);
 382 #else
 383 # if BITS_PER_LONG == 32
 384         return (HZ_TO_MSEC_MUL32 * j + (1ULL << HZ_TO_MSEC_SHR32) - 1) >>
 385                HZ_TO_MSEC_SHR32;
 386 # else
 387         return DIV_ROUND_UP(j * HZ_TO_MSEC_NUM, HZ_TO_MSEC_DEN);
 388 # endif
 389 #endif
 390 }
 391 EXPORT_SYMBOL(jiffies_to_msecs);
 392 
 393 unsigned int jiffies_to_usecs(const unsigned long j)
 394 {
 395         /*
 396          * Hz usually doesn't go much further MSEC_PER_SEC.
 397          * jiffies_to_usecs() and usecs_to_jiffies() depend on that.
 398          */
 399         BUILD_BUG_ON(HZ > USEC_PER_SEC);
 400 
 401 #if !(USEC_PER_SEC % HZ)
 402         return (USEC_PER_SEC / HZ) * j;
 403 #else
 404 # if BITS_PER_LONG == 32
 405         return (HZ_TO_USEC_MUL32 * j) >> HZ_TO_USEC_SHR32;
 406 # else
 407         return (j * HZ_TO_USEC_NUM) / HZ_TO_USEC_DEN;
 408 # endif
 409 #endif
 410 }
 411 EXPORT_SYMBOL(jiffies_to_usecs);
 412 
 413 /*
 414  * mktime64 - Converts date to seconds.
 415  * Converts Gregorian date to seconds since 1970-01-01 00:00:00.
 416  * Assumes input in normal date format, i.e. 1980-12-31 23:59:59
 417  * => year=1980, mon=12, day=31, hour=23, min=59, sec=59.
 418  *
 419  * [For the Julian calendar (which was used in Russia before 1917,
 420  * Britain & colonies before 1752, anywhere else before 1582,
 421  * and is still in use by some communities) leave out the
 422  * -year/100+year/400 terms, and add 10.]
 423  *
 424  * This algorithm was first published by Gauss (I think).
 425  *
 426  * A leap second can be indicated by calling this function with sec as
 427  * 60 (allowable under ISO 8601).  The leap second is treated the same
 428  * as the following second since they don't exist in UNIX time.
 429  *
 430  * An encoding of midnight at the end of the day as 24:00:00 - ie. midnight
 431  * tomorrow - (allowable under ISO 8601) is supported.
 432  */
 433 time64_t mktime64(const unsigned int year0, const unsigned int mon0,
 434                 const unsigned int day, const unsigned int hour,
 435                 const unsigned int min, const unsigned int sec)
 436 {
 437         unsigned int mon = mon0, year = year0;
 438 
 439         /* 1..12 -> 11,12,1..10 */
 440         if (0 >= (int) (mon -= 2)) {
 441                 mon += 12;      /* Puts Feb last since it has leap day */
 442                 year -= 1;
 443         }
 444 
 445         return ((((time64_t)
 446                   (year/4 - year/100 + year/400 + 367*mon/12 + day) +
 447                   year*365 - 719499
 448             )*24 + hour /* now have hours - midnight tomorrow handled here */
 449           )*60 + min /* now have minutes */
 450         )*60 + sec; /* finally seconds */
 451 }
 452 EXPORT_SYMBOL(mktime64);
 453 
 454 /**
 455  * ns_to_timespec - Convert nanoseconds to timespec
 456  * @nsec:       the nanoseconds value to be converted
 457  *
 458  * Returns the timespec representation of the nsec parameter.
 459  */
 460 struct timespec ns_to_timespec(const s64 nsec)
 461 {
 462         struct timespec ts;
 463         s32 rem;
 464 
 465         if (!nsec)
 466                 return (struct timespec) {0, 0};
 467 
 468         ts.tv_sec = div_s64_rem(nsec, NSEC_PER_SEC, &rem);
 469         if (unlikely(rem < 0)) {
 470                 ts.tv_sec--;
 471                 rem += NSEC_PER_SEC;
 472         }
 473         ts.tv_nsec = rem;
 474 
 475         return ts;
 476 }
 477 EXPORT_SYMBOL(ns_to_timespec);
 478 
 479 /**
 480  * ns_to_timeval - Convert nanoseconds to timeval
 481  * @nsec:       the nanoseconds value to be converted
 482  *
 483  * Returns the timeval representation of the nsec parameter.
 484  */
 485 struct timeval ns_to_timeval(const s64 nsec)
 486 {
 487         struct timespec ts = ns_to_timespec(nsec);
 488         struct timeval tv;
 489 
 490         tv.tv_sec = ts.tv_sec;
 491         tv.tv_usec = (suseconds_t) ts.tv_nsec / 1000;
 492 
 493         return tv;
 494 }
 495 EXPORT_SYMBOL(ns_to_timeval);
 496 
 497 struct __kernel_old_timeval ns_to_kernel_old_timeval(const s64 nsec)
 498 {
 499         struct timespec64 ts = ns_to_timespec64(nsec);
 500         struct __kernel_old_timeval tv;
 501 
 502         tv.tv_sec = ts.tv_sec;
 503         tv.tv_usec = (suseconds_t)ts.tv_nsec / 1000;
 504 
 505         return tv;
 506 }
 507 EXPORT_SYMBOL(ns_to_kernel_old_timeval);
 508 
 509 /**
 510  * set_normalized_timespec - set timespec sec and nsec parts and normalize
 511  *
 512  * @ts:         pointer to timespec variable to be set
 513  * @sec:        seconds to set
 514  * @nsec:       nanoseconds to set
 515  *
 516  * Set seconds and nanoseconds field of a timespec variable and
 517  * normalize to the timespec storage format
 518  *
 519  * Note: The tv_nsec part is always in the range of
 520  *      0 <= tv_nsec < NSEC_PER_SEC
 521  * For negative values only the tv_sec field is negative !
 522  */
 523 void set_normalized_timespec64(struct timespec64 *ts, time64_t sec, s64 nsec)
 524 {
 525         while (nsec >= NSEC_PER_SEC) {
 526                 /*
 527                  * The following asm() prevents the compiler from
 528                  * optimising this loop into a modulo operation. See
 529                  * also __iter_div_u64_rem() in include/linux/time.h
 530                  */
 531                 asm("" : "+rm"(nsec));
 532                 nsec -= NSEC_PER_SEC;
 533                 ++sec;
 534         }
 535         while (nsec < 0) {
 536                 asm("" : "+rm"(nsec));
 537                 nsec += NSEC_PER_SEC;
 538                 --sec;
 539         }
 540         ts->tv_sec = sec;
 541         ts->tv_nsec = nsec;
 542 }
 543 EXPORT_SYMBOL(set_normalized_timespec64);
 544 
 545 /**
 546  * ns_to_timespec64 - Convert nanoseconds to timespec64
 547  * @nsec:       the nanoseconds value to be converted
 548  *
 549  * Returns the timespec64 representation of the nsec parameter.
 550  */
 551 struct timespec64 ns_to_timespec64(const s64 nsec)
 552 {
 553         struct timespec64 ts;
 554         s32 rem;
 555 
 556         if (!nsec)
 557                 return (struct timespec64) {0, 0};
 558 
 559         ts.tv_sec = div_s64_rem(nsec, NSEC_PER_SEC, &rem);
 560         if (unlikely(rem < 0)) {
 561                 ts.tv_sec--;
 562                 rem += NSEC_PER_SEC;
 563         }
 564         ts.tv_nsec = rem;
 565 
 566         return ts;
 567 }
 568 EXPORT_SYMBOL(ns_to_timespec64);
 569 
 570 /**
 571  * msecs_to_jiffies: - convert milliseconds to jiffies
 572  * @m:  time in milliseconds
 573  *
 574  * conversion is done as follows:
 575  *
 576  * - negative values mean 'infinite timeout' (MAX_JIFFY_OFFSET)
 577  *
 578  * - 'too large' values [that would result in larger than
 579  *   MAX_JIFFY_OFFSET values] mean 'infinite timeout' too.
 580  *
 581  * - all other values are converted to jiffies by either multiplying
 582  *   the input value by a factor or dividing it with a factor and
 583  *   handling any 32-bit overflows.
 584  *   for the details see __msecs_to_jiffies()
 585  *
 586  * msecs_to_jiffies() checks for the passed in value being a constant
 587  * via __builtin_constant_p() allowing gcc to eliminate most of the
 588  * code, __msecs_to_jiffies() is called if the value passed does not
 589  * allow constant folding and the actual conversion must be done at
 590  * runtime.
 591  * the _msecs_to_jiffies helpers are the HZ dependent conversion
 592  * routines found in include/linux/jiffies.h
 593  */
 594 unsigned long __msecs_to_jiffies(const unsigned int m)
 595 {
 596         /*
 597          * Negative value, means infinite timeout:
 598          */
 599         if ((int)m < 0)
 600                 return MAX_JIFFY_OFFSET;
 601         return _msecs_to_jiffies(m);
 602 }
 603 EXPORT_SYMBOL(__msecs_to_jiffies);
 604 
 605 unsigned long __usecs_to_jiffies(const unsigned int u)
 606 {
 607         if (u > jiffies_to_usecs(MAX_JIFFY_OFFSET))
 608                 return MAX_JIFFY_OFFSET;
 609         return _usecs_to_jiffies(u);
 610 }
 611 EXPORT_SYMBOL(__usecs_to_jiffies);
 612 
 613 /*
 614  * The TICK_NSEC - 1 rounds up the value to the next resolution.  Note
 615  * that a remainder subtract here would not do the right thing as the
 616  * resolution values don't fall on second boundries.  I.e. the line:
 617  * nsec -= nsec % TICK_NSEC; is NOT a correct resolution rounding.
 618  * Note that due to the small error in the multiplier here, this
 619  * rounding is incorrect for sufficiently large values of tv_nsec, but
 620  * well formed timespecs should have tv_nsec < NSEC_PER_SEC, so we're
 621  * OK.
 622  *
 623  * Rather, we just shift the bits off the right.
 624  *
 625  * The >> (NSEC_JIFFIE_SC - SEC_JIFFIE_SC) converts the scaled nsec
 626  * value to a scaled second value.
 627  */
 628 static unsigned long
 629 __timespec64_to_jiffies(u64 sec, long nsec)
 630 {
 631         nsec = nsec + TICK_NSEC - 1;
 632 
 633         if (sec >= MAX_SEC_IN_JIFFIES){
 634                 sec = MAX_SEC_IN_JIFFIES;
 635                 nsec = 0;
 636         }
 637         return ((sec * SEC_CONVERSION) +
 638                 (((u64)nsec * NSEC_CONVERSION) >>
 639                  (NSEC_JIFFIE_SC - SEC_JIFFIE_SC))) >> SEC_JIFFIE_SC;
 640 
 641 }
 642 
 643 static unsigned long
 644 __timespec_to_jiffies(unsigned long sec, long nsec)
 645 {
 646         return __timespec64_to_jiffies((u64)sec, nsec);
 647 }
 648 
 649 unsigned long
 650 timespec64_to_jiffies(const struct timespec64 *value)
 651 {
 652         return __timespec64_to_jiffies(value->tv_sec, value->tv_nsec);
 653 }
 654 EXPORT_SYMBOL(timespec64_to_jiffies);
 655 
 656 void
 657 jiffies_to_timespec64(const unsigned long jiffies, struct timespec64 *value)
 658 {
 659         /*
 660          * Convert jiffies to nanoseconds and separate with
 661          * one divide.
 662          */
 663         u32 rem;
 664         value->tv_sec = div_u64_rem((u64)jiffies * TICK_NSEC,
 665                                     NSEC_PER_SEC, &rem);
 666         value->tv_nsec = rem;
 667 }
 668 EXPORT_SYMBOL(jiffies_to_timespec64);
 669 
 670 /*
 671  * We could use a similar algorithm to timespec_to_jiffies (with a
 672  * different multiplier for usec instead of nsec). But this has a
 673  * problem with rounding: we can't exactly add TICK_NSEC - 1 to the
 674  * usec value, since it's not necessarily integral.
 675  *
 676  * We could instead round in the intermediate scaled representation
 677  * (i.e. in units of 1/2^(large scale) jiffies) but that's also
 678  * perilous: the scaling introduces a small positive error, which
 679  * combined with a division-rounding-upward (i.e. adding 2^(scale) - 1
 680  * units to the intermediate before shifting) leads to accidental
 681  * overflow and overestimates.
 682  *
 683  * At the cost of one additional multiplication by a constant, just
 684  * use the timespec implementation.
 685  */
 686 unsigned long
 687 timeval_to_jiffies(const struct timeval *value)
 688 {
 689         return __timespec_to_jiffies(value->tv_sec,
 690                                      value->tv_usec * NSEC_PER_USEC);
 691 }
 692 EXPORT_SYMBOL(timeval_to_jiffies);
 693 
 694 void jiffies_to_timeval(const unsigned long jiffies, struct timeval *value)
 695 {
 696         /*
 697          * Convert jiffies to nanoseconds and separate with
 698          * one divide.
 699          */
 700         u32 rem;
 701 
 702         value->tv_sec = div_u64_rem((u64)jiffies * TICK_NSEC,
 703                                     NSEC_PER_SEC, &rem);
 704         value->tv_usec = rem / NSEC_PER_USEC;
 705 }
 706 EXPORT_SYMBOL(jiffies_to_timeval);
 707 
 708 /*
 709  * Convert jiffies/jiffies_64 to clock_t and back.
 710  */
 711 clock_t jiffies_to_clock_t(unsigned long x)
 712 {
 713 #if (TICK_NSEC % (NSEC_PER_SEC / USER_HZ)) == 0
 714 # if HZ < USER_HZ
 715         return x * (USER_HZ / HZ);
 716 # else
 717         return x / (HZ / USER_HZ);
 718 # endif
 719 #else
 720         return div_u64((u64)x * TICK_NSEC, NSEC_PER_SEC / USER_HZ);
 721 #endif
 722 }
 723 EXPORT_SYMBOL(jiffies_to_clock_t);
 724 
 725 unsigned long clock_t_to_jiffies(unsigned long x)
 726 {
 727 #if (HZ % USER_HZ)==0
 728         if (x >= ~0UL / (HZ / USER_HZ))
 729                 return ~0UL;
 730         return x * (HZ / USER_HZ);
 731 #else
 732         /* Don't worry about loss of precision here .. */
 733         if (x >= ~0UL / HZ * USER_HZ)
 734                 return ~0UL;
 735 
 736         /* .. but do try to contain it here */
 737         return div_u64((u64)x * HZ, USER_HZ);
 738 #endif
 739 }
 740 EXPORT_SYMBOL(clock_t_to_jiffies);
 741 
 742 u64 jiffies_64_to_clock_t(u64 x)
 743 {
 744 #if (TICK_NSEC % (NSEC_PER_SEC / USER_HZ)) == 0
 745 # if HZ < USER_HZ
 746         x = div_u64(x * USER_HZ, HZ);
 747 # elif HZ > USER_HZ
 748         x = div_u64(x, HZ / USER_HZ);
 749 # else
 750         /* Nothing to do */
 751 # endif
 752 #else
 753         /*
 754          * There are better ways that don't overflow early,
 755          * but even this doesn't overflow in hundreds of years
 756          * in 64 bits, so..
 757          */
 758         x = div_u64(x * TICK_NSEC, (NSEC_PER_SEC / USER_HZ));
 759 #endif
 760         return x;
 761 }
 762 EXPORT_SYMBOL(jiffies_64_to_clock_t);
 763 
 764 u64 nsec_to_clock_t(u64 x)
 765 {
 766 #if (NSEC_PER_SEC % USER_HZ) == 0
 767         return div_u64(x, NSEC_PER_SEC / USER_HZ);
 768 #elif (USER_HZ % 512) == 0
 769         return div_u64(x * USER_HZ / 512, NSEC_PER_SEC / 512);
 770 #else
 771         /*
 772          * max relative error 5.7e-8 (1.8s per year) for USER_HZ <= 1024,
 773          * overflow after 64.99 years.
 774          * exact for HZ=60, 72, 90, 120, 144, 180, 300, 600, 900, ...
 775          */
 776         return div_u64(x * 9, (9ull * NSEC_PER_SEC + (USER_HZ / 2)) / USER_HZ);
 777 #endif
 778 }
 779 
 780 u64 jiffies64_to_nsecs(u64 j)
 781 {
 782 #if !(NSEC_PER_SEC % HZ)
 783         return (NSEC_PER_SEC / HZ) * j;
 784 # else
 785         return div_u64(j * HZ_TO_NSEC_NUM, HZ_TO_NSEC_DEN);
 786 #endif
 787 }
 788 EXPORT_SYMBOL(jiffies64_to_nsecs);
 789 
 790 u64 jiffies64_to_msecs(const u64 j)
 791 {
 792 #if HZ <= MSEC_PER_SEC && !(MSEC_PER_SEC % HZ)
 793         return (MSEC_PER_SEC / HZ) * j;
 794 #else
 795         return div_u64(j * HZ_TO_MSEC_NUM, HZ_TO_MSEC_DEN);
 796 #endif
 797 }
 798 EXPORT_SYMBOL(jiffies64_to_msecs);
 799 
 800 /**
 801  * nsecs_to_jiffies64 - Convert nsecs in u64 to jiffies64
 802  *
 803  * @n:  nsecs in u64
 804  *
 805  * Unlike {m,u}secs_to_jiffies, type of input is not unsigned int but u64.
 806  * And this doesn't return MAX_JIFFY_OFFSET since this function is designed
 807  * for scheduler, not for use in device drivers to calculate timeout value.
 808  *
 809  * note:
 810  *   NSEC_PER_SEC = 10^9 = (5^9 * 2^9) = (1953125 * 512)
 811  *   ULLONG_MAX ns = 18446744073.709551615 secs = about 584 years
 812  */
 813 u64 nsecs_to_jiffies64(u64 n)
 814 {
 815 #if (NSEC_PER_SEC % HZ) == 0
 816         /* Common case, HZ = 100, 128, 200, 250, 256, 500, 512, 1000 etc. */
 817         return div_u64(n, NSEC_PER_SEC / HZ);
 818 #elif (HZ % 512) == 0
 819         /* overflow after 292 years if HZ = 1024 */
 820         return div_u64(n * HZ / 512, NSEC_PER_SEC / 512);
 821 #else
 822         /*
 823          * Generic case - optimized for cases where HZ is a multiple of 3.
 824          * overflow after 64.99 years, exact for HZ = 60, 72, 90, 120 etc.
 825          */
 826         return div_u64(n * 9, (9ull * NSEC_PER_SEC + HZ / 2) / HZ);
 827 #endif
 828 }
 829 EXPORT_SYMBOL(nsecs_to_jiffies64);
 830 
 831 /**
 832  * nsecs_to_jiffies - Convert nsecs in u64 to jiffies
 833  *
 834  * @n:  nsecs in u64
 835  *
 836  * Unlike {m,u}secs_to_jiffies, type of input is not unsigned int but u64.
 837  * And this doesn't return MAX_JIFFY_OFFSET since this function is designed
 838  * for scheduler, not for use in device drivers to calculate timeout value.
 839  *
 840  * note:
 841  *   NSEC_PER_SEC = 10^9 = (5^9 * 2^9) = (1953125 * 512)
 842  *   ULLONG_MAX ns = 18446744073.709551615 secs = about 584 years
 843  */
 844 unsigned long nsecs_to_jiffies(u64 n)
 845 {
 846         return (unsigned long)nsecs_to_jiffies64(n);
 847 }
 848 EXPORT_SYMBOL_GPL(nsecs_to_jiffies);
 849 
 850 /*
 851  * Add two timespec64 values and do a safety check for overflow.
 852  * It's assumed that both values are valid (>= 0).
 853  * And, each timespec64 is in normalized form.
 854  */
 855 struct timespec64 timespec64_add_safe(const struct timespec64 lhs,
 856                                 const struct timespec64 rhs)
 857 {
 858         struct timespec64 res;
 859 
 860         set_normalized_timespec64(&res, (timeu64_t) lhs.tv_sec + rhs.tv_sec,
 861                         lhs.tv_nsec + rhs.tv_nsec);
 862 
 863         if (unlikely(res.tv_sec < lhs.tv_sec || res.tv_sec < rhs.tv_sec)) {
 864                 res.tv_sec = TIME64_MAX;
 865                 res.tv_nsec = 0;
 866         }
 867 
 868         return res;
 869 }
 870 
 871 int get_timespec64(struct timespec64 *ts,
 872                    const struct __kernel_timespec __user *uts)
 873 {
 874         struct __kernel_timespec kts;
 875         int ret;
 876 
 877         ret = copy_from_user(&kts, uts, sizeof(kts));
 878         if (ret)
 879                 return -EFAULT;
 880 
 881         ts->tv_sec = kts.tv_sec;
 882 
 883         /* Zero out the padding for 32 bit systems or in compat mode */
 884         if (IS_ENABLED(CONFIG_64BIT_TIME) && (!IS_ENABLED(CONFIG_64BIT) ||
 885                                               in_compat_syscall()))
 886                 kts.tv_nsec &= 0xFFFFFFFFUL;
 887 
 888         ts->tv_nsec = kts.tv_nsec;
 889 
 890         return 0;
 891 }
 892 EXPORT_SYMBOL_GPL(get_timespec64);
 893 
 894 int put_timespec64(const struct timespec64 *ts,
 895                    struct __kernel_timespec __user *uts)
 896 {
 897         struct __kernel_timespec kts = {
 898                 .tv_sec = ts->tv_sec,
 899                 .tv_nsec = ts->tv_nsec
 900         };
 901 
 902         return copy_to_user(uts, &kts, sizeof(kts)) ? -EFAULT : 0;
 903 }
 904 EXPORT_SYMBOL_GPL(put_timespec64);
 905 
 906 static int __get_old_timespec32(struct timespec64 *ts64,
 907                                    const struct old_timespec32 __user *cts)
 908 {
 909         struct old_timespec32 ts;
 910         int ret;
 911 
 912         ret = copy_from_user(&ts, cts, sizeof(ts));
 913         if (ret)
 914                 return -EFAULT;
 915 
 916         ts64->tv_sec = ts.tv_sec;
 917         ts64->tv_nsec = ts.tv_nsec;
 918 
 919         return 0;
 920 }
 921 
 922 static int __put_old_timespec32(const struct timespec64 *ts64,
 923                                    struct old_timespec32 __user *cts)
 924 {
 925         struct old_timespec32 ts = {
 926                 .tv_sec = ts64->tv_sec,
 927                 .tv_nsec = ts64->tv_nsec
 928         };
 929         return copy_to_user(cts, &ts, sizeof(ts)) ? -EFAULT : 0;
 930 }
 931 
 932 int get_old_timespec32(struct timespec64 *ts, const void __user *uts)
 933 {
 934         if (COMPAT_USE_64BIT_TIME)
 935                 return copy_from_user(ts, uts, sizeof(*ts)) ? -EFAULT : 0;
 936         else
 937                 return __get_old_timespec32(ts, uts);
 938 }
 939 EXPORT_SYMBOL_GPL(get_old_timespec32);
 940 
 941 int put_old_timespec32(const struct timespec64 *ts, void __user *uts)
 942 {
 943         if (COMPAT_USE_64BIT_TIME)
 944                 return copy_to_user(uts, ts, sizeof(*ts)) ? -EFAULT : 0;
 945         else
 946                 return __put_old_timespec32(ts, uts);
 947 }
 948 EXPORT_SYMBOL_GPL(put_old_timespec32);
 949 
 950 int get_itimerspec64(struct itimerspec64 *it,
 951                         const struct __kernel_itimerspec __user *uit)
 952 {
 953         int ret;
 954 
 955         ret = get_timespec64(&it->it_interval, &uit->it_interval);
 956         if (ret)
 957                 return ret;
 958 
 959         ret = get_timespec64(&it->it_value, &uit->it_value);
 960 
 961         return ret;
 962 }
 963 EXPORT_SYMBOL_GPL(get_itimerspec64);
 964 
 965 int put_itimerspec64(const struct itimerspec64 *it,
 966                         struct __kernel_itimerspec __user *uit)
 967 {
 968         int ret;
 969 
 970         ret = put_timespec64(&it->it_interval, &uit->it_interval);
 971         if (ret)
 972                 return ret;
 973 
 974         ret = put_timespec64(&it->it_value, &uit->it_value);
 975 
 976         return ret;
 977 }
 978 EXPORT_SYMBOL_GPL(put_itimerspec64);
 979 
 980 int get_old_itimerspec32(struct itimerspec64 *its,
 981                         const struct old_itimerspec32 __user *uits)
 982 {
 983 
 984         if (__get_old_timespec32(&its->it_interval, &uits->it_interval) ||
 985             __get_old_timespec32(&its->it_value, &uits->it_value))
 986                 return -EFAULT;
 987         return 0;
 988 }
 989 EXPORT_SYMBOL_GPL(get_old_itimerspec32);
 990 
 991 int put_old_itimerspec32(const struct itimerspec64 *its,
 992                         struct old_itimerspec32 __user *uits)
 993 {
 994         if (__put_old_timespec32(&its->it_interval, &uits->it_interval) ||
 995             __put_old_timespec32(&its->it_value, &uits->it_value))
 996                 return -EFAULT;
 997         return 0;
 998 }
 999 EXPORT_SYMBOL_GPL(put_old_itimerspec32);

/* [<][>][^][v][top][bottom][index][help] */