1
0
Files
kernel-49/lib/random32.c
Eric Biggers cb74d5c7ab Merge 4.9.320 into android-4.9-q
Changes in 4.9.320
	9p: missing chunk of "fs/9p: Don't update file type when updating file attributes"
	random: remove stale maybe_reseed_primary_crng
	random: remove stale urandom_init_wait
	random: remove variable limit
	random: fix comment for unused random_min_urandom_seed
	random: convert get_random_int/long into get_random_u32/u64
	random: move random_min_urandom_seed into CONFIG_SYSCTL ifdef block
	random: invalidate batched entropy after crng init
	random: silence compiler warnings and fix race
	random: add wait_for_random_bytes() API
	random: add get_random_{bytes,u32,u64,int,long,once}_wait family
	random: warn when kernel uses unseeded randomness
	random: do not ignore early device randomness
	random: suppress spammy warnings about unseeded randomness
	random: reorder READ_ONCE() in get_random_uXX
	random: fix warning message on ia64 and parisc
	random: use a different mixing algorithm for add_device_randomness()
	random: set up the NUMA crng instances after the CRNG is fully initialized
	random: fix possible sleeping allocation from irq context
	random: rate limit unseeded randomness warnings
	random: add a spinlock_t to struct batched_entropy
	char/random: silence a lockdep splat with printk()
	Revert "char/random: silence a lockdep splat with printk()"
	random: always use batched entropy for get_random_u{32,64}
	random: fix data race on crng_node_pool
	crypto: chacha20 - Fix keystream alignment for chacha20_block()
	random: always fill buffer in get_random_bytes_wait
	random: optimize add_interrupt_randomness
	drivers/char/random.c: remove unused dont_count_entropy
	random: Fix whitespace pre random-bytes work
	random: Return nbytes filled from hw RNG
	random: add a config option to trust the CPU's hwrng
	random: remove preempt disabled region
	random: Make crng state queryable
	random: make CPU trust a boot parameter
	drivers/char/random.c: constify poolinfo_table
	drivers/char/random.c: remove unused stuct poolinfo::poolbits
	drivers/char/random.c: make primary_crng static
	random: only read from /dev/random after its pool has received 128 bits
	random: move rand_initialize() earlier
	random: document get_random_int() family
	latent_entropy: avoid build error when plugin cflags are not set
	random: fix soft lockup when trying to read from an uninitialized blocking pool
	random: Support freezable kthreads in add_hwgenerator_randomness()
	fdt: add support for rng-seed
	random: Use wait_event_freezable() in add_hwgenerator_randomness()
	char/random: Add a newline at the end of the file
	Revert "hwrng: core - Freeze khwrng thread during suspend"
	crypto: Deduplicate le32_to_cpu_array() and cpu_to_le32_array()
	crypto: blake2s - generic C library implementation and selftest
	lib/crypto: blake2s: move hmac construction into wireguard
	lib/crypto: sha1: re-roll loops to reduce code size
	random: Don't wake crng_init_wait when crng_init == 1
	random: Add a urandom_read_nowait() for random APIs that don't warn
	random: add GRND_INSECURE to return best-effort non-cryptographic bytes
	random: ignore GRND_RANDOM in getentropy(2)
	random: make /dev/random be almost like /dev/urandom
	random: fix crash on multiple early calls to add_bootloader_randomness()
	random: remove the blocking pool
	random: delete code to pull data into pools
	random: remove kernel.random.read_wakeup_threshold
	random: remove unnecessary unlikely()
	random: convert to ENTROPY_BITS for better code readability
	random: Add and use pr_fmt()
	random: fix typo in add_timer_randomness()
	random: remove some dead code of poolinfo
	random: split primary/secondary crng init paths
	random: avoid warnings for !CONFIG_NUMA builds
	x86: Remove arch_has_random, arch_has_random_seed
	powerpc: Remove arch_has_random, arch_has_random_seed
	linux/random.h: Remove arch_has_random, arch_has_random_seed
	linux/random.h: Use false with bool
	linux/random.h: Mark CONFIG_ARCH_RANDOM functions __must_check
	powerpc: Use bool in archrandom.h
	random: add arch_get_random_*long_early()
	random: avoid arch_get_random_seed_long() when collecting IRQ randomness
	random: remove dead code left over from blocking pool
	MAINTAINERS: co-maintain random.c
	crypto: blake2s - include <linux/bug.h> instead of <asm/bug.h>
	crypto: blake2s - adjust include guard naming
	random: document add_hwgenerator_randomness() with other input functions
	random: remove unused irq_flags argument from add_interrupt_randomness()
	random: use BLAKE2s instead of SHA1 in extraction
	random: do not sign extend bytes for rotation when mixing
	random: do not re-init if crng_reseed completes before primary init
	random: mix bootloader randomness into pool
	random: harmonize "crng init done" messages
	random: use IS_ENABLED(CONFIG_NUMA) instead of ifdefs
	random: initialize ChaCha20 constants with correct endianness
	random: early initialization of ChaCha constants
	random: avoid superfluous call to RDRAND in CRNG extraction
	random: don't reset crng_init_cnt on urandom_read()
	random: fix typo in comments
	random: cleanup poolinfo abstraction
	crypto: chacha20 - Fix chacha20_block() keystream alignment (again)
	random: cleanup integer types
	random: remove incomplete last_data logic
	random: remove unused extract_entropy() reserved argument
	random: try to actively add entropy rather than passively wait for it
	random: rather than entropy_store abstraction, use global
	random: remove unused OUTPUT_POOL constants
	random: de-duplicate INPUT_POOL constants
	random: prepend remaining pool constants with POOL_
	random: cleanup fractional entropy shift constants
	random: access input_pool_data directly rather than through pointer
	random: simplify arithmetic function flow in account()
	random: continually use hwgenerator randomness
	random: access primary_pool directly rather than through pointer
	random: only call crng_finalize_init() for primary_crng
	random: use computational hash for entropy extraction
	random: simplify entropy debiting
	random: use linear min-entropy accumulation crediting
	random: always wake up entropy writers after extraction
	random: make credit_entropy_bits() always safe
	random: remove use_input_pool parameter from crng_reseed()
	random: remove batched entropy locking
	random: fix locking in crng_fast_load()
	random: use RDSEED instead of RDRAND in entropy extraction
	random: inline leaves of rand_initialize()
	random: ensure early RDSEED goes through mixer on init
	random: do not xor RDRAND when writing into /dev/random
	random: absorb fast pool into input pool after fast load
	random: use hash function for crng_slow_load()
	random: remove outdated INT_MAX >> 6 check in urandom_read()
	random: zero buffer after reading entropy from userspace
	random: tie batched entropy generation to base_crng generation
	random: remove ifdef'd out interrupt bench
	random: remove unused tracepoints
	random: add proper SPDX header
	random: deobfuscate irq u32/u64 contributions
	random: introduce drain_entropy() helper to declutter crng_reseed()
	random: remove useless header comment
	random: remove whitespace and reorder includes
	random: group initialization wait functions
	random: group entropy extraction functions
	random: group entropy collection functions
	random: group userspace read/write functions
	random: group sysctl functions
	random: rewrite header introductory comment
	workqueue: make workqueue available early during boot
	random: defer fast pool mixing to worker
	random: do not take pool spinlock at boot
	random: unify early init crng load accounting
	random: check for crng_init == 0 in add_device_randomness()
	hwrng: core - do not use multiple blank lines
	hwrng: core - rewrite better comparison to NULL
	hwrng: core - Rewrite the header
	hwrng: core - Move hwrng miscdev minor number to include/linux/miscdevice.h
	hwrng: core - remove unused PFX macro
	hwrng: use rng source with best quality
	hwrng: remember rng chosen by user
	random: pull add_hwgenerator_randomness() declaration into random.h
	random: clear fast pool, crng, and batches in cpuhp bring up
	random: round-robin registers as ulong, not u32
	random: only wake up writers after zap if threshold was passed
	random: cleanup UUID handling
	random: unify cycles_t and jiffies usage and types
	random: do crng pre-init loading in worker rather than irq
	random: give sysctl_random_min_urandom_seed a more sensible value
	random: don't let 644 read-only sysctls be written to
	random: replace custom notifier chain with standard one
	random: use SipHash as interrupt entropy accumulator
	random: make consistent usage of crng_ready()
	random: reseed more often immediately after booting
	random: check for signal and try earlier when generating entropy
	random: skip fast_init if hwrng provides large chunk of entropy
	random: treat bootloader trust toggle the same way as cpu trust toggle
	random: re-add removed comment about get_random_{u32,u64} reseeding
	random: mix build-time latent entropy into pool at init
	random: do not split fast init input in add_hwgenerator_randomness()
	random: do not allow user to keep crng key around on stack
	random: check for signal_pending() outside of need_resched() check
	random: check for signals every PAGE_SIZE chunk of /dev/[u]random
	random: make random_get_entropy() return an unsigned long
	random: document crng_fast_key_erasure() destination possibility
	random: fix sysctl documentation nits
	init: call time_init() before rand_initialize()
	ia64: define get_cycles macro for arch-override
	s390: define get_cycles macro for arch-override
	parisc: define get_cycles macro for arch-override
	alpha: define get_cycles macro for arch-override
	powerpc: define get_cycles macro for arch-override
	timekeeping: Add raw clock fallback for random_get_entropy()
	m68k: use fallback for random_get_entropy() instead of zero
	mips: use fallback for random_get_entropy() instead of just c0 random
	arm: use fallback for random_get_entropy() instead of zero
	nios2: use fallback for random_get_entropy() instead of zero
	x86/tsc: Use fallback for random_get_entropy() instead of zero
	um: use fallback for random_get_entropy() instead of zero
	sparc: use fallback for random_get_entropy() instead of zero
	xtensa: use fallback for random_get_entropy() instead of zero
	uapi: rename ext2_swab() to swab() and share globally in swab.h
	random: insist on random_get_entropy() existing in order to simplify
	random: do not use batches when !crng_ready()
	random: do not pretend to handle premature next security model
	random: order timer entropy functions below interrupt functions
	random: do not use input pool from hard IRQs
	random: help compiler out with fast_mix() by using simpler arguments
	siphash: use one source of truth for siphash permutations
	random: use symbolic constants for crng_init states
	random: avoid initializing twice in credit race
	random: remove ratelimiting for in-kernel unseeded randomness
	random: use proper jiffies comparison macro
	random: handle latent entropy and command line from random_init()
	random: credit architectural init the exact amount
	random: use static branch for crng_ready()
	random: remove extern from functions in header
	random: use proper return types on get_random_{int,long}_wait()
	random: move initialization functions out of hot pages
	random: move randomize_page() into mm where it belongs
	random: convert to using fops->write_iter()
	random: wire up fops->splice_{read,write}_iter()
	random: check for signals after page of pool writes
	Revert "random: use static branch for crng_ready()"
	crypto: drbg - add FIPS 140-2 CTRNG for noise source
	crypto: drbg - always seeded with SP800-90B compliant noise source
	crypto: drbg - prepare for more fine-grained tracking of seeding state
	crypto: drbg - track whether DRBG was seeded with !rng_is_initialized()
	crypto: drbg - move dynamic ->reseed_threshold adjustments to __drbg_seed()
	crypto: drbg - always try to free Jitter RNG instance
	crypto: drbg - make reseeding from get_random_bytes() synchronous
	random: avoid checking crng_ready() twice in random_init()
	random: mark bootloader randomness code as __init
	random: account for arch randomness in bits
	ASoC: cs42l52: Fix TLV scales for mixer controls
	ASoC: cs53l30: Correct number of volume levels on SX controls
	ASoC: cs42l52: Correct TLV for Bypass Volume
	ASoC: cs42l56: Correct typo in minimum level for SX volume controls
	ata: libata-core: fix NULL pointer deref in ata_host_alloc_pinfo()
	ASoC: wm8962: Fix suspend while playing music
	scsi: vmw_pvscsi: Expand vcpuHint to 16 bits
	scsi: lpfc: Fix port stuck in bypassed state after LIP in PT2PT topology
	virtio-mmio: fix missing put_device() when vm_cmdline_parent registration failed
	nfc: nfcmrvl: Fix memory leak in nfcmrvl_play_deferred
	ipv6: Fix signed integer overflow in l2tp_ip6_sendmsg
	net: ethernet: mtk_eth_soc: fix misuse of mem alloc interface netdev[napi]_alloc_frag
	random: credit cpu and bootloader seeds by default
	pNFS: Don't keep retrying if the server replied NFS4ERR_LAYOUTUNAVAILABLE
	misc: atmel-ssc: Fix IRQ check in ssc_probe
	irqchip/gic/realview: Fix refcount leak in realview_gic_of_init
	irqchip/gic-v3: Iterate over possible CPUs by for_each_possible_cpu()
	comedi: vmk80xx: fix expression for tx buffer size
	USB: serial: option: add support for Cinterion MV31 with new baseline
	USB: serial: io_ti: add Agilent E5805A support
	usb: gadget: lpc32xx_udc: Fix refcount leak in lpc32xx_udc_probe
	serial: 8250: Store to lsr_save_flags after lsr read
	ext4: fix bug_on ext4_mb_use_inode_pa
	ext4: make variable "count" signed
	ext4: add reserved GDT blocks check
	l2tp: don't use inet_shutdown on ppp session destroy
	l2tp: fix race in pppol2tp_release with session object destroy
	s390/mm: use non-quiescing sske for KVM switch to keyed guest
	xprtrdma: fix incorrect header size calculations
	swiotlb: fix info leak with DMA_FROM_DEVICE
	Reinstate some of "swiotlb: rework "fix info leak with DMA_FROM_DEVICE""
	fuse: fix pipe buffer lifetime for direct_io
	tcp: change source port randomizarion at connect() time
	tcp: add some entropy in __inet_hash_connect()
	secure_seq: use the 64 bits of the siphash for port offset calculation
	tcp: use different parts of the port_offset for index and offset
	tcp: add small random increments to the source port
	tcp: dynamically allocate the perturb table used by source ports
	tcp: increase source port perturb table to 2^16
	tcp: drop the hash_32() part from the index calculation
	Linux 4.9.320

Conflicts:
	crypto/chacha20_generic.c
	drivers/char/random.c
	drivers/of/fdt.c
	include/crypto/chacha20.h
	lib/chacha20.c

Merge resolution notes:
  - Added CHACHA20_KEY_SIZE and CHACHA20_BLOCK_SIZE constants to
    chacha.h, to minimize changes from the 4.9.320 version of random.c

  - Updated lib/vsprintf.c for
    "random: replace custom notifier chain with standard one".

Change-Id: Ia7a12d8883b808f88bbe807d6150552bb084f6b3
Signed-off-by: Eric Biggers <ebiggers@google.com>
2022-07-04 10:26:10 +03:00

574 lines
17 KiB
C

/*
* This is a maximally equidistributed combined Tausworthe generator
* based on code from GNU Scientific Library 1.5 (30 Jun 2004)
*
* lfsr113 version:
*
* x_n = (s1_n ^ s2_n ^ s3_n ^ s4_n)
*
* s1_{n+1} = (((s1_n & 4294967294) << 18) ^ (((s1_n << 6) ^ s1_n) >> 13))
* s2_{n+1} = (((s2_n & 4294967288) << 2) ^ (((s2_n << 2) ^ s2_n) >> 27))
* s3_{n+1} = (((s3_n & 4294967280) << 7) ^ (((s3_n << 13) ^ s3_n) >> 21))
* s4_{n+1} = (((s4_n & 4294967168) << 13) ^ (((s4_n << 3) ^ s4_n) >> 12))
*
* The period of this generator is about 2^113 (see erratum paper).
*
* From: P. L'Ecuyer, "Maximally Equidistributed Combined Tausworthe
* Generators", Mathematics of Computation, 65, 213 (1996), 203--213:
* http://www.iro.umontreal.ca/~lecuyer/myftp/papers/tausme.ps
* ftp://ftp.iro.umontreal.ca/pub/simulation/lecuyer/papers/tausme.ps
*
* There is an erratum in the paper "Tables of Maximally Equidistributed
* Combined LFSR Generators", Mathematics of Computation, 68, 225 (1999),
* 261--269: http://www.iro.umontreal.ca/~lecuyer/myftp/papers/tausme2.ps
*
* ... the k_j most significant bits of z_j must be non-zero,
* for each j. (Note: this restriction also applies to the
* computer code given in [4], but was mistakenly not mentioned
* in that paper.)
*
* This affects the seeding procedure by imposing the requirement
* s1 > 1, s2 > 7, s3 > 15, s4 > 127.
*/
#include <linux/types.h>
#include <linux/percpu.h>
#include <linux/export.h>
#include <linux/jiffies.h>
#include <linux/random.h>
#include <linux/sched.h>
#include <linux/bitops.h>
#include <linux/slab.h>
#include <linux/notifier.h>
#include <asm/unaligned.h>
/**
* prandom_u32_state - seeded pseudo-random number generator.
* @state: pointer to state structure holding seeded state.
*
* This is used for pseudo-randomness with no outside seeding.
* For more random results, use prandom_u32().
*/
u32 prandom_u32_state(struct rnd_state *state)
{
#define TAUSWORTHE(s, a, b, c, d) ((s & c) << d) ^ (((s << a) ^ s) >> b)
state->s1 = TAUSWORTHE(state->s1, 6U, 13U, 4294967294U, 18U);
state->s2 = TAUSWORTHE(state->s2, 2U, 27U, 4294967288U, 2U);
state->s3 = TAUSWORTHE(state->s3, 13U, 21U, 4294967280U, 7U);
state->s4 = TAUSWORTHE(state->s4, 3U, 12U, 4294967168U, 13U);
return (state->s1 ^ state->s2 ^ state->s3 ^ state->s4);
}
EXPORT_SYMBOL(prandom_u32_state);
/**
* prandom_bytes_state - get the requested number of pseudo-random bytes
*
* @state: pointer to state structure holding seeded state.
* @buf: where to copy the pseudo-random bytes to
* @bytes: the requested number of bytes
*
* This is used for pseudo-randomness with no outside seeding.
* For more random results, use prandom_bytes().
*/
void prandom_bytes_state(struct rnd_state *state, void *buf, size_t bytes)
{
u8 *ptr = buf;
while (bytes >= sizeof(u32)) {
put_unaligned(prandom_u32_state(state), (u32 *) ptr);
ptr += sizeof(u32);
bytes -= sizeof(u32);
}
if (bytes > 0) {
u32 rem = prandom_u32_state(state);
do {
*ptr++ = (u8) rem;
bytes--;
rem >>= BITS_PER_BYTE;
} while (bytes > 0);
}
}
EXPORT_SYMBOL(prandom_bytes_state);
static void prandom_warmup(struct rnd_state *state)
{
/* Calling RNG ten times to satisfy recurrence condition */
prandom_u32_state(state);
prandom_u32_state(state);
prandom_u32_state(state);
prandom_u32_state(state);
prandom_u32_state(state);
prandom_u32_state(state);
prandom_u32_state(state);
prandom_u32_state(state);
prandom_u32_state(state);
prandom_u32_state(state);
}
void prandom_seed_full_state(struct rnd_state __percpu *pcpu_state)
{
int i;
for_each_possible_cpu(i) {
struct rnd_state *state = per_cpu_ptr(pcpu_state, i);
u32 seeds[4];
get_random_bytes(&seeds, sizeof(seeds));
state->s1 = __seed(seeds[0], 2U);
state->s2 = __seed(seeds[1], 8U);
state->s3 = __seed(seeds[2], 16U);
state->s4 = __seed(seeds[3], 128U);
prandom_warmup(state);
}
}
EXPORT_SYMBOL(prandom_seed_full_state);
#ifdef CONFIG_RANDOM32_SELFTEST
static struct prandom_test1 {
u32 seed;
u32 result;
} test1[] = {
{ 1U, 3484351685U },
{ 2U, 2623130059U },
{ 3U, 3125133893U },
{ 4U, 984847254U },
};
static struct prandom_test2 {
u32 seed;
u32 iteration;
u32 result;
} test2[] = {
/* Test cases against taus113 from GSL library. */
{ 931557656U, 959U, 2975593782U },
{ 1339693295U, 876U, 3887776532U },
{ 1545556285U, 961U, 1615538833U },
{ 601730776U, 723U, 1776162651U },
{ 1027516047U, 687U, 511983079U },
{ 416526298U, 700U, 916156552U },
{ 1395522032U, 652U, 2222063676U },
{ 366221443U, 617U, 2992857763U },
{ 1539836965U, 714U, 3783265725U },
{ 556206671U, 994U, 799626459U },
{ 684907218U, 799U, 367789491U },
{ 2121230701U, 931U, 2115467001U },
{ 1668516451U, 644U, 3620590685U },
{ 768046066U, 883U, 2034077390U },
{ 1989159136U, 833U, 1195767305U },
{ 536585145U, 996U, 3577259204U },
{ 1008129373U, 642U, 1478080776U },
{ 1740775604U, 939U, 1264980372U },
{ 1967883163U, 508U, 10734624U },
{ 1923019697U, 730U, 3821419629U },
{ 442079932U, 560U, 3440032343U },
{ 1961302714U, 845U, 841962572U },
{ 2030205964U, 962U, 1325144227U },
{ 1160407529U, 507U, 240940858U },
{ 635482502U, 779U, 4200489746U },
{ 1252788931U, 699U, 867195434U },
{ 1961817131U, 719U, 668237657U },
{ 1071468216U, 983U, 917876630U },
{ 1281848367U, 932U, 1003100039U },
{ 582537119U, 780U, 1127273778U },
{ 1973672777U, 853U, 1071368872U },
{ 1896756996U, 762U, 1127851055U },
{ 847917054U, 500U, 1717499075U },
{ 1240520510U, 951U, 2849576657U },
{ 1685071682U, 567U, 1961810396U },
{ 1516232129U, 557U, 3173877U },
{ 1208118903U, 612U, 1613145022U },
{ 1817269927U, 693U, 4279122573U },
{ 1510091701U, 717U, 638191229U },
{ 365916850U, 807U, 600424314U },
{ 399324359U, 702U, 1803598116U },
{ 1318480274U, 779U, 2074237022U },
{ 697758115U, 840U, 1483639402U },
{ 1696507773U, 840U, 577415447U },
{ 2081979121U, 981U, 3041486449U },
{ 955646687U, 742U, 3846494357U },
{ 1250683506U, 749U, 836419859U },
{ 595003102U, 534U, 366794109U },
{ 47485338U, 558U, 3521120834U },
{ 619433479U, 610U, 3991783875U },
{ 704096520U, 518U, 4139493852U },
{ 1712224984U, 606U, 2393312003U },
{ 1318233152U, 922U, 3880361134U },
{ 855572992U, 761U, 1472974787U },
{ 64721421U, 703U, 683860550U },
{ 678931758U, 840U, 380616043U },
{ 692711973U, 778U, 1382361947U },
{ 677703619U, 530U, 2826914161U },
{ 92393223U, 586U, 1522128471U },
{ 1222592920U, 743U, 3466726667U },
{ 358288986U, 695U, 1091956998U },
{ 1935056945U, 958U, 514864477U },
{ 735675993U, 990U, 1294239989U },
{ 1560089402U, 897U, 2238551287U },
{ 70616361U, 829U, 22483098U },
{ 368234700U, 731U, 2913875084U },
{ 20221190U, 879U, 1564152970U },
{ 539444654U, 682U, 1835141259U },
{ 1314987297U, 840U, 1801114136U },
{ 2019295544U, 645U, 3286438930U },
{ 469023838U, 716U, 1637918202U },
{ 1843754496U, 653U, 2562092152U },
{ 400672036U, 809U, 4264212785U },
{ 404722249U, 965U, 2704116999U },
{ 600702209U, 758U, 584979986U },
{ 519953954U, 667U, 2574436237U },
{ 1658071126U, 694U, 2214569490U },
{ 420480037U, 749U, 3430010866U },
{ 690103647U, 969U, 3700758083U },
{ 1029424799U, 937U, 3787746841U },
{ 2012608669U, 506U, 3362628973U },
{ 1535432887U, 998U, 42610943U },
{ 1330635533U, 857U, 3040806504U },
{ 1223800550U, 539U, 3954229517U },
{ 1322411537U, 680U, 3223250324U },
{ 1877847898U, 945U, 2915147143U },
{ 1646356099U, 874U, 965988280U },
{ 805687536U, 744U, 4032277920U },
{ 1948093210U, 633U, 1346597684U },
{ 392609744U, 783U, 1636083295U },
{ 690241304U, 770U, 1201031298U },
{ 1360302965U, 696U, 1665394461U },
{ 1220090946U, 780U, 1316922812U },
{ 447092251U, 500U, 3438743375U },
{ 1613868791U, 592U, 828546883U },
{ 523430951U, 548U, 2552392304U },
{ 726692899U, 810U, 1656872867U },
{ 1364340021U, 836U, 3710513486U },
{ 1986257729U, 931U, 935013962U },
{ 407983964U, 921U, 728767059U },
};
static u32 __extract_hwseed(void)
{
unsigned int val = 0;
(void)(arch_get_random_seed_int(&val) ||
arch_get_random_int(&val));
return val;
}
static void prandom_seed_early(struct rnd_state *state, u32 seed,
bool mix_with_hwseed)
{
#define LCG(x) ((x) * 69069U) /* super-duper LCG */
#define HWSEED() (mix_with_hwseed ? __extract_hwseed() : 0)
state->s1 = __seed(HWSEED() ^ LCG(seed), 2U);
state->s2 = __seed(HWSEED() ^ LCG(state->s1), 8U);
state->s3 = __seed(HWSEED() ^ LCG(state->s2), 16U);
state->s4 = __seed(HWSEED() ^ LCG(state->s3), 128U);
}
static int __init prandom_state_selftest(void)
{
int i, j, errors = 0, runs = 0;
bool error = false;
for (i = 0; i < ARRAY_SIZE(test1); i++) {
struct rnd_state state;
prandom_seed_early(&state, test1[i].seed, false);
prandom_warmup(&state);
if (test1[i].result != prandom_u32_state(&state))
error = true;
}
if (error)
pr_warn("prandom: seed boundary self test failed\n");
else
pr_info("prandom: seed boundary self test passed\n");
for (i = 0; i < ARRAY_SIZE(test2); i++) {
struct rnd_state state;
prandom_seed_early(&state, test2[i].seed, false);
prandom_warmup(&state);
for (j = 0; j < test2[i].iteration - 1; j++)
prandom_u32_state(&state);
if (test2[i].result != prandom_u32_state(&state))
errors++;
runs++;
cond_resched();
}
if (errors)
pr_warn("prandom: %d/%d self tests failed\n", errors, runs);
else
pr_info("prandom: %d self tests passed\n", runs);
return 0;
}
core_initcall(prandom_state_selftest);
#endif
/*
* The prandom_u32() implementation is now completely separate from the
* prandom_state() functions, which are retained (for now) for compatibility.
*
* Because of (ab)use in the networking code for choosing random TCP/UDP port
* numbers, which open DoS possibilities if guessable, we want something
* stronger than a standard PRNG. But the performance requirements of
* the network code do not allow robust crypto for this application.
*
* So this is a homebrew Junior Spaceman implementation, based on the
* lowest-latency trustworthy crypto primitive available, SipHash.
* (The authors of SipHash have not been consulted about this abuse of
* their work.)
*
* Standard SipHash-2-4 uses 2n+4 rounds to hash n words of input to
* one word of output. This abbreviated version uses 2 rounds per word
* of output.
*/
struct siprand_state {
unsigned long v0;
unsigned long v1;
unsigned long v2;
unsigned long v3;
};
static DEFINE_PER_CPU(struct siprand_state, net_rand_state) __latent_entropy;
/*
* This is the core CPRNG function. As "pseudorandom", this is not used
* for truly valuable things, just intended to be a PITA to guess.
* For maximum speed, we do just two SipHash rounds per word. This is
* the same rate as 4 rounds per 64 bits that SipHash normally uses,
* so hopefully it's reasonably secure.
*
* There are two changes from the official SipHash finalization:
* - We omit some constants XORed with v2 in the SipHash spec as irrelevant;
* they are there only to make the output rounds distinct from the input
* rounds, and this application has no input rounds.
* - Rather than returning v0^v1^v2^v3, return v1+v3.
* If you look at the SipHash round, the last operation on v3 is
* "v3 ^= v0", so "v0 ^ v3" just undoes that, a waste of time.
* Likewise "v1 ^= v2". (The rotate of v2 makes a difference, but
* it still cancels out half of the bits in v2 for no benefit.)
* Second, since the last combining operation was xor, continue the
* pattern of alternating xor/add for a tiny bit of extra non-linearity.
*/
static inline u32 siprand_u32(struct siprand_state *s)
{
unsigned long v0 = s->v0, v1 = s->v1, v2 = s->v2, v3 = s->v3;
PRND_SIPROUND(v0, v1, v2, v3);
PRND_SIPROUND(v0, v1, v2, v3);
s->v0 = v0; s->v1 = v1; s->v2 = v2; s->v3 = v3;
return v1 + v3;
}
/**
* prandom_u32 - pseudo random number generator
*
* A 32 bit pseudo-random number is generated using a fast
* algorithm suitable for simulation. This algorithm is NOT
* considered safe for cryptographic use.
*/
u32 prandom_u32(void)
{
struct siprand_state *state = get_cpu_ptr(&net_rand_state);
u32 res = siprand_u32(state);
put_cpu_ptr(&net_rand_state);
return res;
}
EXPORT_SYMBOL(prandom_u32);
/**
* prandom_bytes - get the requested number of pseudo-random bytes
* @buf: where to copy the pseudo-random bytes to
* @bytes: the requested number of bytes
*/
void prandom_bytes(void *buf, size_t bytes)
{
struct siprand_state *state = get_cpu_ptr(&net_rand_state);
u8 *ptr = buf;
while (bytes >= sizeof(u32)) {
put_unaligned(siprand_u32(state), (u32 *)ptr);
ptr += sizeof(u32);
bytes -= sizeof(u32);
}
if (bytes > 0) {
u32 rem = siprand_u32(state);
do {
*ptr++ = (u8)rem;
rem >>= BITS_PER_BYTE;
} while (--bytes > 0);
}
put_cpu_ptr(&net_rand_state);
}
EXPORT_SYMBOL(prandom_bytes);
/**
* prandom_seed - add entropy to pseudo random number generator
* @entropy: entropy value
*
* Add some additional seed material to the prandom pool.
* The "entropy" is actually our IP address (the only caller is
* the network code), not for unpredictability, but to ensure that
* different machines are initialized differently.
*/
void prandom_seed(u32 entropy)
{
int i;
add_device_randomness(&entropy, sizeof(entropy));
for_each_possible_cpu(i) {
struct siprand_state *state = per_cpu_ptr(&net_rand_state, i);
unsigned long v0 = state->v0, v1 = state->v1;
unsigned long v2 = state->v2, v3 = state->v3;
do {
v3 ^= entropy;
PRND_SIPROUND(v0, v1, v2, v3);
PRND_SIPROUND(v0, v1, v2, v3);
v0 ^= entropy;
} while (unlikely(!v0 || !v1 || !v2 || !v3));
WRITE_ONCE(state->v0, v0);
WRITE_ONCE(state->v1, v1);
WRITE_ONCE(state->v2, v2);
WRITE_ONCE(state->v3, v3);
}
}
EXPORT_SYMBOL(prandom_seed);
/*
* Generate some initially weak seeding values to allow
* the prandom_u32() engine to be started.
*/
static int __init prandom_init_early(void)
{
int i;
unsigned long v0, v1, v2, v3;
if (!arch_get_random_long(&v0))
v0 = jiffies;
if (!arch_get_random_long(&v1))
v1 = random_get_entropy();
v2 = v0 ^ PRND_K0;
v3 = v1 ^ PRND_K1;
for_each_possible_cpu(i) {
struct siprand_state *state;
v3 ^= i;
PRND_SIPROUND(v0, v1, v2, v3);
PRND_SIPROUND(v0, v1, v2, v3);
v0 ^= i;
state = per_cpu_ptr(&net_rand_state, i);
state->v0 = v0; state->v1 = v1;
state->v2 = v2; state->v3 = v3;
}
return 0;
}
core_initcall(prandom_init_early);
/* Stronger reseeding when available, and periodically thereafter. */
static void prandom_reseed(unsigned long dontcare);
static DEFINE_TIMER(seed_timer, prandom_reseed, 0, 0);
static void prandom_reseed(unsigned long dontcare)
{
unsigned long expires;
int i;
/*
* Reinitialize each CPU's PRNG with 128 bits of key.
* No locking on the CPUs, but then somewhat random results are,
* well, expected.
*/
for_each_possible_cpu(i) {
struct siprand_state *state;
unsigned long v0 = get_random_long(), v2 = v0 ^ PRND_K0;
unsigned long v1 = get_random_long(), v3 = v1 ^ PRND_K1;
#if BITS_PER_LONG == 32
int j;
/*
* On 32-bit machines, hash in two extra words to
* approximate 128-bit key length. Not that the hash
* has that much security, but this prevents a trivial
* 64-bit brute force.
*/
for (j = 0; j < 2; j++) {
unsigned long m = get_random_long();
v3 ^= m;
PRND_SIPROUND(v0, v1, v2, v3);
PRND_SIPROUND(v0, v1, v2, v3);
v0 ^= m;
}
#endif
/*
* Probably impossible in practice, but there is a
* theoretical risk that a race between this reseeding
* and the target CPU writing its state back could
* create the all-zero SipHash fixed point.
*
* To ensure that never happens, ensure the state
* we write contains no zero words.
*/
state = per_cpu_ptr(&net_rand_state, i);
WRITE_ONCE(state->v0, v0 ? v0 : -1ul);
WRITE_ONCE(state->v1, v1 ? v1 : -1ul);
WRITE_ONCE(state->v2, v2 ? v2 : -1ul);
WRITE_ONCE(state->v3, v3 ? v3 : -1ul);
}
/* reseed every ~60 seconds, in [40 .. 80) interval with slack */
expires = round_jiffies(jiffies + 40 * HZ + prandom_u32_max(40 * HZ));
mod_timer(&seed_timer, expires);
}
/*
* The random ready callback can be called from almost any interrupt.
* To avoid worrying about whether it's safe to delay that interrupt
* long enough to seed all CPUs, just schedule an immediate timer event.
*/
static int prandom_timer_start(struct notifier_block *nb,
unsigned long action, void *data)
{
mod_timer(&seed_timer, jiffies);
return 0;
}
/*
* Start periodic full reseeding as soon as strong
* random numbers are available.
*/
static int __init prandom_init_late(void)
{
static struct notifier_block random_ready = {
.notifier_call = prandom_timer_start
};
int ret = register_random_ready_notifier(&random_ready);
if (ret == -EALREADY) {
prandom_timer_start(&random_ready, 0, NULL);
ret = 0;
}
return ret;
}
late_initcall(prandom_init_late);