summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--Documentation/blockdev/zram.txt6
-rw-r--r--Documentation/sysctl/vm.txt4
-rw-r--r--Documentation/vm/ksm.txt18
-rw-r--r--Documentation/vm/userfaultfd.txt89
-rw-r--r--arch/Kconfig3
-rw-r--r--arch/alpha/include/asm/Kbuild1
-rw-r--r--arch/alpha/include/asm/current.h9
-rw-r--r--arch/arm/mm/dma-mapping.c16
-rw-r--r--arch/arm64/mm/dma-mapping.c4
-rw-r--r--arch/cris/include/asm/Kbuild1
-rw-r--r--arch/cris/include/asm/current.h15
-rw-r--r--arch/frv/mb93090-mb00/pci-frv.c11
-rw-r--r--arch/m68k/68000/bootlogo-vz.h4
-rw-r--r--arch/m68k/68000/bootlogo.h4
-rw-r--r--arch/m68k/include/asm/MC68328.h3
-rw-r--r--arch/m68k/include/asm/MC68EZ328.h3
-rw-r--r--arch/m68k/include/asm/MC68VZ328.h2
-rw-r--r--arch/m68k/include/asm/natfeat.h3
-rw-r--r--arch/m68k/lib/ashldi3.c8
-rw-r--r--arch/m68k/lib/ashrdi3.c8
-rw-r--r--arch/m68k/lib/lshrdi3.c8
-rw-r--r--arch/m68k/lib/muldi3.c8
-rw-r--r--arch/microblaze/pci/pci-common.c1
-rw-r--r--arch/mips/kernel/vdso.c2
-rw-r--r--arch/mips/mm/dma-default.c4
-rw-r--r--arch/powerpc/include/asm/book3s/64/pgtable.h52
-rw-r--r--arch/powerpc/kernel/pci-common.c1
-rw-r--r--arch/powerpc/kvm/book3s_64_vio.c4
-rw-r--r--arch/powerpc/kvm/book3s_hv_builtin.c3
-rw-r--r--arch/powerpc/platforms/cell/spufs/file.c39
-rw-r--r--arch/s390/kernel/crash_dump.c1
-rw-r--r--arch/tile/mm/elf.c2
-rw-r--r--arch/x86/Kconfig1
-rw-r--r--arch/x86/entry/vdso/vma.c2
-rw-r--r--arch/x86/include/asm/paravirt.h11
-rw-r--r--arch/x86/include/asm/paravirt_types.h2
-rw-r--r--arch/x86/include/asm/pgtable-2level.h17
-rw-r--r--arch/x86/include/asm/pgtable-3level.h30
-rw-r--r--arch/x86/include/asm/pgtable.h140
-rw-r--r--arch/x86/include/asm/pgtable_64.h15
-rw-r--r--arch/x86/kernel/paravirt.c1
-rw-r--r--arch/x86/kernel/pci-dma.c3
-rw-r--r--arch/x86/mm/gup.c28
-rw-r--r--arch/x86/mm/mpx.c4
-rw-r--r--arch/x86/mm/pgtable.c31
-rw-r--r--arch/xtensa/kernel/pci-dma.c3
-rw-r--r--crypto/lz4.c23
-rw-r--r--crypto/lz4hc.c23
-rw-r--r--crypto/testmgr.h142
-rw-r--r--drivers/android/binder.c2
-rw-r--r--drivers/base/core.c5
-rw-r--r--drivers/base/dma-contiguous.c5
-rw-r--r--drivers/base/memory.c2
-rw-r--r--drivers/block/zram/zram_drv.c127
-rw-r--r--drivers/block/zram/zram_drv.h12
-rw-r--r--drivers/char/agp/alpha-agp.c5
-rw-r--r--drivers/char/mspec.c6
-rw-r--r--drivers/dax/dax.c92
-rw-r--r--drivers/gpu/drm/armada/armada_gem.c9
-rw-r--r--drivers/gpu/drm/drm_vm.c36
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_drv.h2
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_gem.c3
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_gem.c3
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_gem.h2
-rw-r--r--drivers/gpu/drm/gma500/framebuffer.c3
-rw-r--r--drivers/gpu/drm/gma500/gem.c3
-rw-r--r--drivers/gpu/drm/gma500/psb_drv.h2
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h2
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c4
-rw-r--r--drivers/gpu/drm/msm/msm_drv.h2
-rw-r--r--drivers/gpu/drm/msm/msm_gem.c3
-rw-r--r--drivers/gpu/drm/omapdrm/omap_drv.h2
-rw-r--r--drivers/gpu/drm/omapdrm/omap_gem.c4
-rw-r--r--drivers/gpu/drm/qxl/qxl_ttm.c6
-rw-r--r--drivers/gpu/drm/radeon/radeon_ttm.c6
-rw-r--r--drivers/gpu/drm/tegra/gem.c3
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo_vm.c10
-rw-r--r--drivers/gpu/drm/udl/udl_drv.h2
-rw-r--r--drivers/gpu/drm/udl/udl_gem.c3
-rw-r--r--drivers/gpu/drm/vgem/vgem_drv.c3
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_ttm.c7
-rw-r--r--drivers/hsi/clients/cmt_speech.c4
-rw-r--r--drivers/hwtracing/intel_th/msu.c6
-rw-r--r--drivers/infiniband/hw/hfi1/file_ops.c4
-rw-r--r--drivers/infiniband/hw/qib/qib_file_ops.c2
-rw-r--r--drivers/iommu/amd_iommu.c2
-rw-r--r--drivers/iommu/intel-iommu.c2
-rw-r--r--drivers/media/v4l2-core/videobuf-dma-sg.c3
-rw-r--r--drivers/misc/cxl/context.c3
-rw-r--r--drivers/misc/sgi-gru/grumain.c3
-rw-r--r--drivers/misc/sgi-gru/grutables.h2
-rw-r--r--drivers/scsi/cxlflash/superpipe.c6
-rw-r--r--drivers/scsi/sg.c3
-rw-r--r--drivers/staging/android/ion/ion.c6
-rw-r--r--drivers/staging/lustre/lustre/llite/llite_mmap.c7
-rw-r--r--drivers/staging/lustre/lustre/llite/vvp_io.c2
-rw-r--r--drivers/target/target_core_user.c6
-rw-r--r--drivers/uio/uio.c6
-rw-r--r--drivers/usb/mon/mon_bin.c4
-rw-r--r--drivers/video/fbdev/core/fb_defio.c16
-rw-r--r--drivers/virtio/virtio_balloon.c4
-rw-r--r--drivers/xen/privcmd.c4
-rw-r--r--fs/9p/vfs_file.c4
-rw-r--r--fs/aio.c2
-rw-r--r--fs/btrfs/ctree.h2
-rw-r--r--fs/btrfs/inode.c6
-rw-r--r--fs/ceph/addr.c8
-rw-r--r--fs/cifs/file.c2
-rw-r--r--fs/dax.c59
-rw-r--r--fs/ext2/file.c19
-rw-r--r--fs/ext4/ext4.h4
-rw-r--r--fs/ext4/file.c40
-rw-r--r--fs/ext4/inode.c9
-rw-r--r--fs/f2fs/file.c7
-rw-r--r--fs/fuse/file.c6
-rw-r--r--fs/gfs2/file.c8
-rw-r--r--fs/iomap.c5
-rw-r--r--fs/kernfs/file.c13
-rw-r--r--fs/ncpfs/mmap.c7
-rw-r--r--fs/nfs/file.c4
-rw-r--r--fs/nilfs2/file.c3
-rw-r--r--fs/ocfs2/mmap.c15
-rw-r--r--fs/proc/base.c152
-rw-r--r--fs/proc/generic.c11
-rw-r--r--fs/proc/inode.c2
-rw-r--r--fs/proc/root.c3
-rw-r--r--fs/proc/vmcore.c8
-rw-r--r--fs/pstore/platform.c22
-rw-r--r--fs/squashfs/lz4_wrapper.c12
-rw-r--r--fs/ubifs/file.c5
-rw-r--r--fs/userfaultfd.c109
-rw-r--r--fs/xfs/xfs_file.c38
-rw-r--r--fs/xfs/xfs_trace.h2
-rw-r--r--include/asm-generic/pgtable.h104
-rw-r--r--include/asm-generic/tlb.h14
-rw-r--r--include/linux/bug.h12
-rw-r--r--include/linux/cma.h3
-rw-r--r--include/linux/compiler-gcc.h1
-rw-r--r--include/linux/dax.h12
-rw-r--r--include/linux/device.h1
-rw-r--r--include/linux/dma-contiguous.h4
-rw-r--r--include/linux/gfp.h2
-rw-r--r--include/linux/huge_mm.h83
-rw-r--r--include/linux/iomap.h3
-rw-r--r--include/linux/iopoll.h2
-rw-r--r--include/linux/kasan.h4
-rw-r--r--include/linux/kernel.h10
-rw-r--r--include/linux/lz4.h701
-rw-r--r--include/linux/memblock.h1
-rw-r--r--include/linux/memory.h3
-rw-r--r--include/linux/migrate.h4
-rw-r--r--include/linux/mm.h82
-rw-r--r--include/linux/mm_inline.h7
-rw-r--r--include/linux/mmu_notifier.h14
-rw-r--r--include/linux/mmzone.h2
-rw-r--r--include/linux/pfn_t.h12
-rw-r--r--include/linux/pid_namespace.h6
-rw-r--r--include/linux/rbtree_augmented.h4
-rw-r--r--include/linux/rmap.h52
-rw-r--r--include/linux/shmem_fs.h7
-rw-r--r--include/linux/userfaultfd_k.h41
-rw-r--r--include/linux/writeback.h2
-rw-r--r--include/trace/events/writeback.h2
-rw-r--r--include/uapi/linux/mqueue.h2
-rw-r--r--include/uapi/linux/userfaultfd.h16
-rw-r--r--ipc/shm.c14
-rw-r--r--kernel/events/core.c6
-rw-r--r--kernel/events/uprobes.c26
-rw-r--r--kernel/exit.c2
-rw-r--r--kernel/ksysfs.c2
-rw-r--r--kernel/memremap.c6
-rw-r--r--kernel/notifier.c2
-rw-r--r--kernel/relay.c4
-rw-r--r--lib/Kconfig6
-rw-r--r--lib/Kconfig.debug13
-rw-r--r--lib/Makefile3
-rw-r--r--lib/atomic64_test.c10
-rw-r--r--lib/crc32.c824
-rw-r--r--lib/crc32test.c856
-rw-r--r--lib/decompress_unlz4.c13
-rw-r--r--lib/find_bit.c4
-rw-r--r--lib/glob.c164
-rw-r--r--lib/globtest.c167
-rw-r--r--lib/list_debug.c45
-rw-r--r--lib/lz4/Makefile2
-rw-r--r--lib/lz4/lz4_compress.c1141
-rw-r--r--lib/lz4/lz4_decompress.c665
-rw-r--r--lib/lz4/lz4defs.h338
-rw-r--r--lib/lz4/lz4hc_compress.c846
-rw-r--r--lib/rbtree.c4
-rw-r--r--lib/sort.c41
-rw-r--r--lib/test_kasan.c34
-rw-r--r--lib/test_sort.c44
-rw-r--r--mm/Makefile6
-rw-r--r--mm/cma.c40
-rw-r--r--mm/cma_debug.c2
-rw-r--r--mm/compaction.c2
-rw-r--r--mm/dmapool.c16
-rw-r--r--mm/filemap.c26
-rw-r--r--mm/gup.c12
-rw-r--r--mm/huge_memory.c282
-rw-r--r--mm/hugetlb.c5
-rw-r--r--mm/internal.h9
-rw-r--r--mm/kasan/kasan.c2
-rw-r--r--mm/kasan/quarantine.c1
-rw-r--r--mm/ksm.c109
-rw-r--r--mm/madvise.c55
-rw-r--r--mm/memblock.c32
-rw-r--r--mm/memcontrol.c1
-rw-r--r--mm/memory-failure.c26
-rw-r--r--mm/memory.c179
-rw-r--r--mm/memory_hotplug.c37
-rw-r--r--mm/migrate.c108
-rw-r--r--mm/mincore.c1
-rw-r--r--mm/mmap.c74
-rw-r--r--mm/mprotect.c2
-rw-r--r--mm/mremap.c23
-rw-r--r--mm/nommu.c11
-rw-r--r--mm/oom_kill.c16
-rw-r--r--mm/page-writeback.c2
-rw-r--r--mm/page_alloc.c322
-rw-r--r--mm/page_idle.c34
-rw-r--r--mm/page_vma_mapped.c218
-rw-r--r--mm/pagewalk.c20
-rw-r--r--mm/pgtable-generic.c14
-rw-r--r--mm/rmap.c574
-rw-r--r--mm/shmem.c5
-rw-r--r--mm/slab_common.c4
-rw-r--r--mm/swap.c9
-rw-r--r--mm/truncate.c1
-rw-r--r--mm/userfaultfd.c58
-rw-r--r--mm/util.c5
-rw-r--r--mm/vmalloc.c7
-rw-r--r--mm/vmpressure.c10
-rw-r--r--mm/vmscan.c78
-rw-r--r--mm/workingset.c1
-rw-r--r--mm/z3fold.c381
-rw-r--r--mm/zsmalloc.c10
-rwxr-xr-xscripts/checkpatch.pl37
-rw-r--r--security/selinux/selinuxfs.c5
-rw-r--r--sound/core/pcm_native.c15
-rw-r--r--sound/usb/usx2y/us122l.c5
-rw-r--r--sound/usb/usx2y/usX2Yhwdep.c7
-rw-r--r--sound/usb/usx2y/usx2yhwdeppcm.c5
-rw-r--r--tools/lib/find_bit.c2
-rw-r--r--tools/testing/selftests/vm/userfaultfd.c22
-rw-r--r--virt/kvm/kvm_main.c4
247 files changed, 7290 insertions, 4008 deletions
diff --git a/Documentation/blockdev/zram.txt b/Documentation/blockdev/zram.txt
index 1c0c08d9206b..4fced8a21307 100644
--- a/Documentation/blockdev/zram.txt
+++ b/Documentation/blockdev/zram.txt
@@ -201,8 +201,8 @@ File /sys/block/zram<id>/mm_stat
The stat file represents device's mm statistics. It consists of a single
line of text and contains the following stats separated by whitespace:
orig_data_size uncompressed size of data stored in this disk.
- This excludes zero-filled pages (zero_pages) since no
- memory is allocated for them.
+ This excludes same-element-filled pages (same_pages) since
+ no memory is allocated for them.
Unit: bytes
compr_data_size compressed size of data stored in this disk
mem_used_total the amount of memory allocated for this disk. This
@@ -214,7 +214,7 @@ line of text and contains the following stats separated by whitespace:
the compressed data
mem_used_max the maximum amount of memory zram have consumed to
store the data
- zero_pages the number of zero filled pages written to this disk.
+ same_pages the number of same element filled pages written to this disk.
No memory is allocated for such pages.
pages_compacted the number of pages freed during compaction
diff --git a/Documentation/sysctl/vm.txt b/Documentation/sysctl/vm.txt
index 95ccbe6d79ce..b4ad97f10b8e 100644
--- a/Documentation/sysctl/vm.txt
+++ b/Documentation/sysctl/vm.txt
@@ -376,8 +376,8 @@ max_map_count:
This file contains the maximum number of memory map areas a process
may have. Memory map areas are used as a side-effect of calling
-malloc, directly by mmap and mprotect, and also when loading shared
-libraries.
+malloc, directly by mmap, mprotect, and madvise, and also when loading
+shared libraries.
While most applications need less than a thousand maps, certain
programs, particularly malloc debuggers, may consume lots of them,
diff --git a/Documentation/vm/ksm.txt b/Documentation/vm/ksm.txt
index f34a8ee6f860..6b0ca7feb135 100644
--- a/Documentation/vm/ksm.txt
+++ b/Documentation/vm/ksm.txt
@@ -38,6 +38,10 @@ the range for whenever the KSM daemon is started; even if the range
cannot contain any pages which KSM could actually merge; even if
MADV_UNMERGEABLE is applied to a range which was never MADV_MERGEABLE.
+If a region of memory must be split into at least one new MADV_MERGEABLE
+or MADV_UNMERGEABLE region, the madvise may return ENOMEM if the process
+will exceed vm.max_map_count (see Documentation/sysctl/vm.txt).
+
Like other madvise calls, they are intended for use on mapped areas of
the user address space: they will report ENOMEM if the specified range
includes unmapped gaps (though working on the intervening mapped areas),
@@ -80,6 +84,20 @@ run - set 0 to stop ksmd from running but keep merged pages,
Default: 0 (must be changed to 1 to activate KSM,
except if CONFIG_SYSFS is disabled)
+use_zero_pages - specifies whether empty pages (i.e. allocated pages
+ that only contain zeroes) should be treated specially.
+ When set to 1, empty pages are merged with the kernel
+ zero page(s) instead of with each other as it would
+ happen normally. This can improve the performance on
+ architectures with coloured zero pages, depending on
+ the workload. Care should be taken when enabling this
+ setting, as it can potentially degrade the performance
+ of KSM for some workloads, for example if the checksums
+ of pages candidate for merging match the checksum of
+ an empty page. This setting can be changed at any time,
+ it is only effective for pages merged after the change.
+ Default: 0 (normal KSM behaviour as in earlier releases)
+
The effectiveness of KSM and MADV_MERGEABLE is shown in /sys/kernel/mm/ksm/:
pages_shared - how many shared pages are being used
diff --git a/Documentation/vm/userfaultfd.txt b/Documentation/vm/userfaultfd.txt
index 70a3c94d1941..fe51a5aa8963 100644
--- a/Documentation/vm/userfaultfd.txt
+++ b/Documentation/vm/userfaultfd.txt
@@ -54,6 +54,26 @@ uffdio_api.features and uffdio_api.ioctls two 64bit bitmasks of
respectively all the available features of the read(2) protocol and
the generic ioctl available.
+The uffdio_api.features bitmask returned by the UFFDIO_API ioctl
+defines what memory types are supported by the userfaultfd and what
+events, except page fault notifications, may be generated.
+
+If the kernel supports registering userfaultfd ranges on hugetlbfs
+virtual memory areas, UFFD_FEATURE_MISSING_HUGETLBFS will be set in
+uffdio_api.features. Similarly, UFFD_FEATURE_MISSING_SHMEM will be
+set if the kernel supports registering userfaultfd ranges on shared
+memory (covering all shmem APIs, i.e. tmpfs, IPCSHM, /dev/zero
+MAP_SHARED, memfd_create, etc).
+
+The userland application that wants to use userfaultfd with hugetlbfs
+or shared memory need to set the corresponding flag in
+uffdio_api.features to enable those features.
+
+If the userland desires to receive notifications for events other than
+page faults, it has to verify that uffdio_api.features has appropriate
+UFFD_FEATURE_EVENT_* bits set. These events are described in more
+detail below in "Non-cooperative userfaultfd" section.
+
Once the userfaultfd has been enabled the UFFDIO_REGISTER ioctl should
be invoked (if present in the returned uffdio_api.ioctls bitmask) to
register a memory range in the userfaultfd by setting the
@@ -142,3 +162,72 @@ course the bitmap is updated accordingly. It's also useful to avoid
sending the same page twice (in case the userfault is read by the
postcopy thread just before UFFDIO_COPY|ZEROPAGE runs in the migration
thread).
+
+== Non-cooperative userfaultfd ==
+
+When the userfaultfd is monitored by an external manager, the manager
+must be able to track changes in the process virtual memory
+layout. Userfaultfd can notify the manager about such changes using
+the same read(2) protocol as for the page fault notifications. The
+manager has to explicitly enable these events by setting appropriate
+bits in uffdio_api.features passed to UFFDIO_API ioctl:
+
+UFFD_FEATURE_EVENT_EXIT - enable notification about exit() of the
+non-cooperative process. When the monitored process exits, the uffd
+manager will get UFFD_EVENT_EXIT.
+
+UFFD_FEATURE_EVENT_FORK - enable userfaultfd hooks for fork(). When
+this feature is enabled, the userfaultfd context of the parent process
+is duplicated into the newly created process. The manager receives
+UFFD_EVENT_FORK with file descriptor of the new userfaultfd context in
+the uffd_msg.fork.
+
+UFFD_FEATURE_EVENT_REMAP - enable notifications about mremap()
+calls. When the non-cooperative process moves a virtual memory area to
+a different location, the manager will receive UFFD_EVENT_REMAP. The
+uffd_msg.remap will contain the old and new addresses of the area and
+its original length.
+
+UFFD_FEATURE_EVENT_REMOVE - enable notifications about
+madvise(MADV_REMOVE) and madvise(MADV_DONTNEED) calls. The event
+UFFD_EVENT_REMOVE will be generated upon these calls to madvise. The
+uffd_msg.remove will contain start and end addresses of the removed
+area.
+
+UFFD_FEATURE_EVENT_UNMAP - enable notifications about memory
+unmapping. The manager will get UFFD_EVENT_UNMAP with uffd_msg.remove
+containing start and end addresses of the unmapped area.
+
+Although the UFFD_FEATURE_EVENT_REMOVE and UFFD_FEATURE_EVENT_UNMAP
+are pretty similar, they quite differ in the action expected from the
+userfaultfd manager. In the former case, the virtual memory is
+removed, but the area is not, the area remains monitored by the
+userfaultfd, and if a page fault occurs in that area it will be
+delivered to the manager. The proper resolution for such page fault is
+to zeromap the faulting address. However, in the latter case, when an
+area is unmapped, either explicitly (with munmap() system call), or
+implicitly (e.g. during mremap()), the area is removed and in turn the
+userfaultfd context for such area disappears too and the manager will
+not get further userland page faults from the removed area. Still, the
+notification is required in order to prevent manager from using
+UFFDIO_COPY on the unmapped area.
+
+Unlike userland page faults which have to be synchronous and require
+explicit or implicit wakeup, all the events are delivered
+asynchronously and the non-cooperative process resumes execution as
+soon as manager executes read(). The userfaultfd manager should
+carefully synchronize calls to UFFDIO_COPY with the events
+processing. To aid the synchronization, the UFFDIO_COPY ioctl will
+return -ENOSPC when the monitored process exits at the time of
+UFFDIO_COPY, and -ENOENT, when the non-cooperative process has changed
+its virtual memory layout simultaneously with outstanding UFFDIO_COPY
+operation.
+
+The current asynchronous model of the event delivery is optimal for
+single threaded non-cooperative userfaultfd manager implementations. A
+synchronous event delivery model can be added later as a new
+userfaultfd feature to facilitate multithreading enhancements of the
+non cooperative manager, for example to allow UFFDIO_COPY ioctls to
+run in parallel to the event reception. Single threaded
+implementations should continue to use the current async event
+delivery model instead.
diff --git a/arch/Kconfig b/arch/Kconfig
index f761142976e5..d0012add6b19 100644
--- a/arch/Kconfig
+++ b/arch/Kconfig
@@ -571,6 +571,9 @@ config HAVE_IRQ_TIME_ACCOUNTING
config HAVE_ARCH_TRANSPARENT_HUGEPAGE
bool
+config HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD
+ bool
+
config HAVE_ARCH_HUGE_VMAP
bool
diff --git a/arch/alpha/include/asm/Kbuild b/arch/alpha/include/asm/Kbuild
index baa152b9348e..46e47c088622 100644
--- a/arch/alpha/include/asm/Kbuild
+++ b/arch/alpha/include/asm/Kbuild
@@ -9,3 +9,4 @@ generic-y += mm-arch-hooks.h
generic-y += preempt.h
generic-y += sections.h
generic-y += trace_clock.h
+generic-y += current.h
diff --git a/arch/alpha/include/asm/current.h b/arch/alpha/include/asm/current.h
deleted file mode 100644
index 094d285a1b34..000000000000
--- a/arch/alpha/include/asm/current.h
+++ /dev/null
@@ -1,9 +0,0 @@
-#ifndef _ALPHA_CURRENT_H
-#define _ALPHA_CURRENT_H
-
-#include <linux/thread_info.h>
-
-#define get_current() (current_thread_info()->task)
-#define current get_current()
-
-#endif /* _ALPHA_CURRENT_H */
diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c
index 82d3e79ec82b..6ffdf17e0d5c 100644
--- a/arch/arm/mm/dma-mapping.c
+++ b/arch/arm/mm/dma-mapping.c
@@ -349,7 +349,7 @@ static void __dma_free_buffer(struct page *page, size_t size)
static void *__alloc_from_contiguous(struct device *dev, size_t size,
pgprot_t prot, struct page **ret_page,
const void *caller, bool want_vaddr,
- int coherent_flag);
+ int coherent_flag, gfp_t gfp);
static void *__alloc_remap_buffer(struct device *dev, size_t size, gfp_t gfp,
pgprot_t prot, struct page **ret_page,
@@ -420,7 +420,8 @@ static int __init atomic_pool_init(void)
*/
if (dev_get_cma_area(NULL))
ptr = __alloc_from_contiguous(NULL, atomic_pool_size, prot,
- &page, atomic_pool_init, true, NORMAL);
+ &page, atomic_pool_init, true, NORMAL,
+ GFP_KERNEL);
else
ptr = __alloc_remap_buffer(NULL, atomic_pool_size, gfp, prot,
&page, atomic_pool_init, true);
@@ -594,14 +595,14 @@ static int __free_from_pool(void *start, size_t size)
static void *__alloc_from_contiguous(struct device *dev, size_t size,
pgprot_t prot, struct page **ret_page,
const void *caller, bool want_vaddr,
- int coherent_flag)
+ int coherent_flag, gfp_t gfp)
{
unsigned long order = get_order(size);
size_t count = size >> PAGE_SHIFT;
struct page *page;
void *ptr = NULL;
- page = dma_alloc_from_contiguous(dev, count, order);
+ page = dma_alloc_from_contiguous(dev, count, order, gfp);
if (!page)
return NULL;
@@ -655,7 +656,7 @@ static inline pgprot_t __get_dma_pgprot(unsigned long attrs, pgprot_t prot)
#define __get_dma_pgprot(attrs, prot) __pgprot(0)
#define __alloc_remap_buffer(dev, size, gfp, prot, ret, c, wv) NULL
#define __alloc_from_pool(size, ret_page) NULL
-#define __alloc_from_contiguous(dev, size, prot, ret, c, wv, coherent_flag) NULL
+#define __alloc_from_contiguous(dev, size, prot, ret, c, wv, coherent_flag, gfp) NULL
#define __free_from_pool(cpu_addr, size) do { } while (0)
#define __free_from_contiguous(dev, page, cpu_addr, size, wv) do { } while (0)
#define __dma_free_remap(cpu_addr, size) do { } while (0)
@@ -697,7 +698,8 @@ static void *cma_allocator_alloc(struct arm_dma_alloc_args *args,
{
return __alloc_from_contiguous(args->dev, args->size, args->prot,
ret_page, args->caller,
- args->want_vaddr, args->coherent_flag);
+ args->want_vaddr, args->coherent_flag,
+ args->gfp);
}
static void cma_allocator_free(struct arm_dma_free_args *args)
@@ -1312,7 +1314,7 @@ static struct page **__iommu_alloc_buffer(struct device *dev, size_t size,
unsigned long order = get_order(size);
struct page *page;
- page = dma_alloc_from_contiguous(dev, count, order);
+ page = dma_alloc_from_contiguous(dev, count, order, gfp);
if (!page)
goto error;
diff --git a/arch/arm64/mm/dma-mapping.c b/arch/arm64/mm/dma-mapping.c
index 351f7595cb3e..aff1d0afeb1e 100644
--- a/arch/arm64/mm/dma-mapping.c
+++ b/arch/arm64/mm/dma-mapping.c
@@ -107,7 +107,7 @@ static void *__dma_alloc_coherent(struct device *dev, size_t size,
void *addr;
page = dma_alloc_from_contiguous(dev, size >> PAGE_SHIFT,
- get_order(size));
+ get_order(size), flags);
if (!page)
return NULL;
@@ -390,7 +390,7 @@ static int __init atomic_pool_init(void)
if (dev_get_cma_area(NULL))
page = dma_alloc_from_contiguous(NULL, nr_pages,
- pool_size_order);
+ pool_size_order, GFP_KERNEL);
else
page = alloc_pages(GFP_DMA, pool_size_order);
diff --git a/arch/cris/include/asm/Kbuild b/arch/cris/include/asm/Kbuild
index 9f19e19bff9d..8e4ef321001f 100644
--- a/arch/cris/include/asm/Kbuild
+++ b/arch/cris/include/asm/Kbuild
@@ -4,6 +4,7 @@ generic-y += barrier.h
generic-y += bitsperlong.h
generic-y += clkdev.h
generic-y += cmpxchg.h
+generic-y += current.h
generic-y += device.h
generic-y += div64.h
generic-y += errno.h
diff --git a/arch/cris/include/asm/current.h b/arch/cris/include/asm/current.h
deleted file mode 100644
index 5f5c0efd00be..000000000000
--- a/arch/cris/include/asm/current.h
+++ /dev/null
@@ -1,15 +0,0 @@
-#ifndef _CRIS_CURRENT_H
-#define _CRIS_CURRENT_H
-
-#include <linux/thread_info.h>
-
-struct task_struct;
-
-static inline struct task_struct * get_current(void)
-{
- return current_thread_info()->task;
-}
-
-#define current get_current()
-
-#endif /* !(_CRIS_CURRENT_H) */
diff --git a/arch/frv/mb93090-mb00/pci-frv.c b/arch/frv/mb93090-mb00/pci-frv.c
index 34bb4b13e079..c452ddb5620f 100644
--- a/arch/frv/mb93090-mb00/pci-frv.c
+++ b/arch/frv/mb93090-mb00/pci-frv.c
@@ -147,7 +147,7 @@ static void __init pcibios_allocate_resources(int pass)
static void __init pcibios_assign_resources(void)
{
struct pci_dev *dev = NULL;
- int idx;
+ int idx, err;
struct resource *r;
for_each_pci_dev(dev) {
@@ -172,8 +172,13 @@ static void __init pcibios_assign_resources(void)
* the BIOS forgot to do so or because we have decided the old
* address was unusable for some reason.
*/
- if (!r->start && r->end)
- pci_assign_resource(dev, idx);
+ if (!r->start && r->end) {
+ err = pci_assign_resource(dev, idx);
+ if (err)
+ dev_err(&dev->dev,
+ "Failed to assign new address to %d\n",
+ idx);
+ }
}
}
}
diff --git a/arch/m68k/68000/bootlogo-vz.h b/arch/m68k/68000/bootlogo-vz.h
index b38e2b255142..6ff09beba1ba 100644
--- a/arch/m68k/68000/bootlogo-vz.h
+++ b/arch/m68k/68000/bootlogo-vz.h
@@ -1,6 +1,8 @@
+#include <linux/compiler.h>
+
#define splash_width 640
#define splash_height 480
-unsigned char __attribute__ ((aligned(16))) bootlogo_bits[] = {
+unsigned char __aligned(16) bootlogo_bits[] = {
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
diff --git a/arch/m68k/68000/bootlogo.h b/arch/m68k/68000/bootlogo.h
index b896c933fafc..c466db3ca3a8 100644
--- a/arch/m68k/68000/bootlogo.h
+++ b/arch/m68k/68000/bootlogo.h
@@ -1,6 +1,8 @@
+#include <linux/compiler.h>
+
#define bootlogo_width 160
#define bootlogo_height 160
-unsigned char __attribute__ ((aligned(16))) bootlogo_bits[] = {
+unsigned char __aligned(16) bootlogo_bits[] = {
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x55, 0x01, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x40, 0x55, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
diff --git a/arch/m68k/include/asm/MC68328.h b/arch/m68k/include/asm/MC68328.h
index 1a8080c4cc40..b61230e74e63 100644
--- a/arch/m68k/include/asm/MC68328.h
+++ b/arch/m68k/include/asm/MC68328.h
@@ -8,6 +8,7 @@
* Copyright (C) 1998 Kenneth Albanowski <kjahds@kjahds.com>,
*
*/
+#include <linux/compiler.h>
#ifndef _MC68328_H_
#define _MC68328_H_
@@ -993,7 +994,7 @@ typedef volatile struct {
volatile unsigned short int pad1;
volatile unsigned short int pad2;
volatile unsigned short int pad3;
-} __attribute__((packed)) m68328_uart;
+} __packed m68328_uart;
/**********
diff --git a/arch/m68k/include/asm/MC68EZ328.h b/arch/m68k/include/asm/MC68EZ328.h
index fedac87c5d13..703331ece328 100644
--- a/arch/m68k/include/asm/MC68EZ328.h
+++ b/arch/m68k/include/asm/MC68EZ328.h
@@ -9,6 +9,7 @@
* The Silver Hammer Group, Ltd.
*
*/
+#include <linux/compiler.h>
#ifndef _MC68EZ328_H_
#define _MC68EZ328_H_
@@ -815,7 +816,7 @@ typedef volatile struct {
volatile unsigned short int nipr;
volatile unsigned short int pad1;
volatile unsigned short int pad2;
-} __attribute__((packed)) m68328_uart;
+} __packed m68328_uart;
/**********
diff --git a/arch/m68k/include/asm/MC68VZ328.h b/arch/m68k/include/asm/MC68VZ328.h
index 34a51b2c784f..fbaed7ddfb41 100644
--- a/arch/m68k/include/asm/MC68VZ328.h
+++ b/arch/m68k/include/asm/MC68VZ328.h
@@ -909,7 +909,7 @@ typedef struct {
volatile unsigned short int nipr;
volatile unsigned short int hmark;
volatile unsigned short int unused;
-} __attribute__((packed)) m68328_uart;
+} __packed m68328_uart;
diff --git a/arch/m68k/include/asm/natfeat.h b/arch/m68k/include/asm/natfeat.h
index a3521b80c3b9..2d2424de1d65 100644
--- a/arch/m68k/include/asm/natfeat.h
+++ b/arch/m68k/include/asm/natfeat.h
@@ -6,6 +6,7 @@
* This software may be used and distributed according to the terms of
* the GNU General Public License (GPL), incorporated herein by reference.
*/
+#include <linux/compiler.h>
#ifndef _NATFEAT_H
#define _NATFEAT_H
@@ -17,6 +18,6 @@ void nf_init(void);
void nf_shutdown(void);
void nfprint(const char *fmt, ...)
- __attribute__ ((format (printf, 1, 2)));
+ __printf(1, 2);
# endif /* _NATFEAT_H */
diff --git a/arch/m68k/lib/ashldi3.c b/arch/m68k/lib/ashldi3.c
index 8dffd36ec4f2..ac08f8141390 100644
--- a/arch/m68k/lib/ashldi3.c
+++ b/arch/m68k/lib/ashldi3.c
@@ -18,10 +18,10 @@ GNU General Public License for more details. */
#define BITS_PER_UNIT 8
-typedef int SItype __attribute__ ((mode (SI)));
-typedef unsigned int USItype __attribute__ ((mode (SI)));
-typedef int DItype __attribute__ ((mode (DI)));
-typedef int word_type __attribute__ ((mode (__word__)));
+typedef int SItype __mode(SI);
+typedef unsigned int USItype __mode(SI);
+typedef int DItype __mode(DI);
+typedef int word_type __mode(__word__);
struct DIstruct {SItype high, low;};
diff --git a/arch/m68k/lib/ashrdi3.c b/arch/m68k/lib/ashrdi3.c
index e6565a3ee2c3..5837b1dd3334 100644
--- a/arch/m68k/lib/ashrdi3.c
+++ b/arch/m68k/lib/ashrdi3.c
@@ -18,10 +18,10 @@ GNU General Public License for more details. */
#define BITS_PER_UNIT 8
-typedef int SItype __attribute__ ((mode (SI)));
-typedef unsigned int USItype __attribute__ ((mode (SI)));
-typedef int DItype __attribute__ ((mode (DI)));
-typedef int word_type __attribute__ ((mode (__word__)));
+typedef int SItype __mode(SI);
+typedef unsigned int USItype __mode(SI);
+typedef int DItype __mode(DI);
+typedef int word_type __mode(__word__);
struct DIstruct {SItype high, low;};
diff --git a/arch/m68k/lib/lshrdi3.c b/arch/m68k/lib/lshrdi3.c
index 039779737c7d..7f40566be6c8 100644
--- a/arch/m68k/lib/lshrdi3.c
+++ b/arch/m68k/lib/lshrdi3.c
@@ -18,10 +18,10 @@ GNU General Public License for more details. */
#define BITS_PER_UNIT 8
-typedef int SItype __attribute__ ((mode (SI)));
-typedef unsigned int USItype __attribute__ ((mode (SI)));
-typedef int DItype __attribute__ ((mode (DI)));
-typedef int word_type __attribute__ ((mode (__word__)));
+typedef int SItype __mode(SI);
+typedef unsigned int USItype __mode(SI);
+typedef int DItype __mode(DI);
+typedef int word_type __mode(__word__);
struct DIstruct {SItype high, low;};
diff --git a/arch/m68k/lib/muldi3.c b/arch/m68k/lib/muldi3.c
index 6459af5b2af0..3fb05c698c41 100644
--- a/arch/m68k/lib/muldi3.c
+++ b/arch/m68k/lib/muldi3.c
@@ -65,10 +65,10 @@ GNU General Public License for more details. */
umul_ppmm (__w.s.high, __w.s.low, u, v); \
__w.ll; })
-typedef int SItype __attribute__ ((mode (SI)));
-typedef unsigned int USItype __attribute__ ((mode (SI)));
-typedef int DItype __attribute__ ((mode (DI)));
-typedef int word_type __attribute__ ((mode (__word__)));
+typedef int SItype __mode(SI);
+typedef unsigned int USItype __mode(SI);
+typedef int DItype __mode(DI);
+typedef int word_type __mode(__word__);
struct DIstruct {SItype high, low;};
diff --git a/arch/microblaze/pci/pci-common.c b/arch/microblaze/pci/pci-common.c
index 7f696f97f9dd..13bc93242c0c 100644
--- a/arch/microblaze/pci/pci-common.c
+++ b/arch/microblaze/pci/pci-common.c
@@ -22,6 +22,7 @@
#include <linux/init.h>
#include <linux/bootmem.h>
#include <linux/mm.h>
+#include <linux/shmem_fs.h>
#include <linux/list.h>
#include <linux/syscalls.h>
#include <linux/irq.h>
diff --git a/arch/mips/kernel/vdso.c b/arch/mips/kernel/vdso.c
index f9dbfb14af33..093517e85a6c 100644
--- a/arch/mips/kernel/vdso.c
+++ b/arch/mips/kernel/vdso.c
@@ -111,7 +111,7 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
base = mmap_region(NULL, STACK_TOP, PAGE_SIZE,
VM_READ|VM_WRITE|VM_EXEC|
VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC,
- 0);
+ 0, NULL);
if (IS_ERR_VALUE(base)) {
ret = base;
goto out;
diff --git a/arch/mips/mm/dma-default.c b/arch/mips/mm/dma-default.c
index a39c36af97ad..1895a692efd4 100644
--- a/arch/mips/mm/dma-default.c
+++ b/arch/mips/mm/dma-default.c
@@ -148,8 +148,8 @@ static void *mips_dma_alloc_coherent(struct device *dev, size_t size,
gfp = massage_gfp_flags(dev, gfp);
if (IS_ENABLED(CONFIG_DMA_CMA) && gfpflags_allow_blocking(gfp))
- page = dma_alloc_from_contiguous(dev,
- count, get_order(size));
+ page = dma_alloc_from_contiguous(dev, count, get_order(size),
+ gfp);
if (!page)
page = alloc_pages(gfp, get_order(size));
diff --git a/arch/powerpc/include/asm/book3s/64/pgtable.h b/arch/powerpc/include/asm/book3s/64/pgtable.h
index fef738229a68..1eeeb72c7015 100644
--- a/arch/powerpc/include/asm/book3s/64/pgtable.h
+++ b/arch/powerpc/include/asm/book3s/64/pgtable.h
@@ -1,6 +1,9 @@
#ifndef _ASM_POWERPC_BOOK3S_64_PGTABLE_H_
#define _ASM_POWERPC_BOOK3S_64_PGTABLE_H_
+#ifndef __ASSEMBLY__
+#include <linux/mmdebug.h>
+#endif
/*
* Common bits between hash and Radix page table
*/
@@ -434,15 +437,47 @@ static inline pte_t pte_clear_soft_dirty(pte_t pte)
#endif /* CONFIG_HAVE_ARCH_SOFT_DIRTY */
#ifdef CONFIG_NUMA_BALANCING
-/*
- * These work without NUMA balancing but the kernel does not care. See the
- * comment in include/asm-generic/pgtable.h . On powerpc, this will only
- * work for user pages and always return true for kernel pages.
- */
static inline int pte_protnone(pte_t pte)
{
- return (pte_raw(pte) & cpu_to_be64(_PAGE_PRESENT | _PAGE_PRIVILEGED)) ==
- cpu_to_be64(_PAGE_PRESENT | _PAGE_PRIVILEGED);
+ return (pte_raw(pte) & cpu_to_be64(_PAGE_PRESENT | _PAGE_PTE | _PAGE_RWX)) ==
+ cpu_to_be64(_PAGE_PRESENT | _PAGE_PTE);
+}
+
+#define pte_mk_savedwrite pte_mk_savedwrite
+static inline pte_t pte_mk_savedwrite(pte_t pte)
+{
+ /*
+ * Used by Autonuma subsystem to preserve the write bit
+ * while marking the pte PROT_NONE. Only allow this
+ * on PROT_NONE pte
+ */
+ VM_BUG_ON((pte_raw(pte) & cpu_to_be64(_PAGE_PRESENT | _PAGE_RWX | _PAGE_PRIVILEGED)) !=
+ cpu_to_be64(_PAGE_PRESENT | _PAGE_PRIVILEGED));
+ return __pte(pte_val(pte) & ~_PAGE_PRIVILEGED);
+}
+
+#define pte_clear_savedwrite pte_clear_savedwrite
+static inline pte_t pte_clear_savedwrite(pte_t pte)
+{
+ /*
+ * Used by KSM subsystem to make a protnone pte readonly.
+ */
+ VM_BUG_ON(!pte_protnone(pte));
+ return __pte(pte_val(pte) | _PAGE_PRIVILEGED);
+}
+
+#define pte_savedwrite pte_savedwrite
+static inline bool pte_savedwrite(pte_t pte)
+{
+ /*
+ * Saved write ptes are prot none ptes that doesn't have
+ * privileged bit sit. We mark prot none as one which has
+ * present and pviliged bit set and RWX cleared. To mark
+ * protnone which used to have _PAGE_WRITE set we clear
+ * the privileged bit.
+ */
+ VM_BUG_ON(!pte_protnone(pte));
+ return !(pte_raw(pte) & cpu_to_be64(_PAGE_RWX | _PAGE_PRIVILEGED));
}
#endif /* CONFIG_NUMA_BALANCING */
@@ -873,6 +908,8 @@ static inline pte_t *pmdp_ptep(pmd_t *pmd)
#define pmd_mkclean(pmd) pte_pmd(pte_mkclean(pmd_pte(pmd)))
#define pmd_mkyoung(pmd) pte_pmd(pte_mkyoung(pmd_pte(pmd)))
#define pmd_mkwrite(pmd) pte_pmd(pte_mkwrite(pmd_pte(pmd)))
+#define pmd_mk_savedwrite(pmd) pte_pmd(pte_mk_savedwrite(pmd_pte(pmd)))
+#define pmd_clear_savedwrite(pmd) pte_pmd(pte_clear_savedwrite(pmd_pte(pmd)))
#ifdef CONFIG_HAVE_ARCH_SOFT_DIRTY
#define pmd_soft_dirty(pmd) pte_soft_dirty(pmd_pte(pmd))
@@ -889,6 +926,7 @@ static inline int pmd_protnone(pmd_t pmd)
#define __HAVE_ARCH_PMD_WRITE
#define pmd_write(pmd) pte_write(pmd_pte(pmd))
+#define pmd_savedwrite(pmd) pte_savedwrite(pmd_pte(pmd))
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
extern pmd_t pfn_pmd(unsigned long pfn, pgprot_t pgprot);
diff --git a/arch/powerpc/kernel/pci-common.c b/arch/powerpc/kernel/pci-common.c
index 74bec5498972..a3f5334f5d8c 100644
--- a/arch/powerpc/kernel/pci-common.c
+++ b/arch/powerpc/kernel/pci-common.c
@@ -25,6 +25,7 @@
#include <linux/of_address.h>
#include <linux/of_pci.h>
#include <linux/mm.h>
+#include <linux/shmem_fs.h>
#include <linux/list.h>
#include <linux/syscalls.h>
#include <linux/irq.h>
diff --git a/arch/powerpc/kvm/book3s_64_vio.c b/arch/powerpc/kvm/book3s_64_vio.c
index 491c5d8120f7..ab9d14c0e460 100644
--- a/arch/powerpc/kvm/book3s_64_vio.c
+++ b/arch/powerpc/kvm/book3s_64_vio.c
@@ -102,9 +102,9 @@ static void release_spapr_tce_table(struct rcu_head *head)
kfree(stt);
}
-static int kvm_spapr_tce_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
+static int kvm_spapr_tce_fault(struct vm_fault *vmf)
{
- struct kvmppc_spapr_tce_table *stt = vma->vm_file->private_data;
+ struct kvmppc_spapr_tce_table *stt = vmf->vma->vm_file->private_data;
struct page *page;
if (vmf->pgoff >= kvmppc_tce_pages(stt->size))
diff --git a/arch/powerpc/kvm/book3s_hv_builtin.c b/arch/powerpc/kvm/book3s_hv_builtin.c
index c42a7e63b39e..4d6c64b3041c 100644
--- a/arch/powerpc/kvm/book3s_hv_builtin.c
+++ b/arch/powerpc/kvm/book3s_hv_builtin.c
@@ -56,7 +56,8 @@ struct page *kvm_alloc_hpt_cma(unsigned long nr_pages)
{
VM_BUG_ON(order_base_2(nr_pages) < KVM_CMA_CHUNK_ORDER - PAGE_SHIFT);
- return cma_alloc(kvm_cma, nr_pages, order_base_2(HPT_ALIGN_PAGES));
+ return cma_alloc(kvm_cma, nr_pages, order_base_2(HPT_ALIGN_PAGES),
+ GFP_KERNEL);
}
EXPORT_SYMBOL_GPL(kvm_alloc_hpt_cma);
diff --git a/arch/powerpc/platforms/cell/spufs/file.c b/arch/powerpc/platforms/cell/spufs/file.c
index a35e2c29d7ee..e5ec1368f0cd 100644
--- a/arch/powerpc/platforms/cell/spufs/file.c
+++ b/arch/powerpc/platforms/cell/spufs/file.c
@@ -233,8 +233,9 @@ spufs_mem_write(struct file *file, const char __user *buffer,
}
static int
-spufs_mem_mmap_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
+spufs_mem_mmap_fault(struct vm_fault *vmf)
{
+ struct vm_area_struct *vma = vmf->vma;
struct spu_context *ctx = vma->vm_file->private_data;
unsigned long pfn, offset;
@@ -311,12 +312,11 @@ static const struct file_operations spufs_mem_fops = {
.mmap = spufs_mem_mmap,
};
-static int spufs_ps_fault(struct vm_area_struct *vma,
- struct vm_fault *vmf,
+static int spufs_ps_fault(struct vm_fault *vmf,
unsigned long ps_offs,
unsigned long ps_size)
{
- struct spu_context *ctx = vma->vm_file->private_data;
+ struct spu_context *ctx = vmf->vma->vm_file->private_data;
unsigned long area, offset = vmf->pgoff << PAGE_SHIFT;
int ret = 0;
@@ -354,7 +354,7 @@ static int spufs_ps_fault(struct vm_area_struct *vma,
down_read(&current->mm->mmap_sem);
} else {
area = ctx->spu->problem_phys + ps_offs;
- vm_insert_pfn(vma, vmf->address, (area + offset) >> PAGE_SHIFT);
+ vm_insert_pfn(vmf->vma, vmf->address, (area + offset) >> PAGE_SHIFT);
spu_context_trace(spufs_ps_fault__insert, ctx, ctx->spu);
}
@@ -367,10 +367,9 @@ refault:
}
#if SPUFS_MMAP_4K
-static int spufs_cntl_mmap_fault(struct vm_area_struct *vma,
- struct vm_fault *vmf)
+static int spufs_cntl_mmap_fault(struct vm_fault *vmf)
{
- return spufs_ps_fault(vma, vmf, 0x4000, SPUFS_CNTL_MAP_SIZE);
+ return spufs_ps_fault(vmf, 0x4000, SPUFS_CNTL_MAP_SIZE);
}
static const struct vm_operations_struct spufs_cntl_mmap_vmops = {
@@ -1067,15 +1066,15 @@ static ssize_t spufs_signal1_write(struct file *file, const char __user *buf,
}
static int
-spufs_signal1_mmap_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
+spufs_signal1_mmap_fault(struct vm_fault *vmf)
{
#if SPUFS_SIGNAL_MAP_SIZE == 0x1000
- return spufs_ps_fault(vma, vmf, 0x14000, SPUFS_SIGNAL_MAP_SIZE);
+ return spufs_ps_fault(vmf, 0x14000, SPUFS_SIGNAL_MAP_SIZE);
#elif SPUFS_SIGNAL_MAP_SIZE == 0x10000
/* For 64k pages, both signal1 and signal2 can be used to mmap the whole
* signal 1 and 2 area
*/
- return spufs_ps_fault(vma, vmf, 0x10000, SPUFS_SIGNAL_MAP_SIZE);
+ return spufs_ps_fault(vmf, 0x10000, SPUFS_SIGNAL_MAP_SIZE);
#else
#error unsupported page size
#endif
@@ -1205,15 +1204,15 @@ static ssize_t spufs_signal2_write(struct file *file, const char __user *buf,
#if SPUFS_MMAP_4K
static int
-spufs_signal2_mmap_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
+spufs_signal2_mmap_fault(struct vm_fault *vmf)
{
#if SPUFS_SIGNAL_MAP_SIZE == 0x1000
- return spufs_ps_fault(vma, vmf, 0x1c000, SPUFS_SIGNAL_MAP_SIZE);
+ return spufs_ps_fault(vmf, 0x1c000, SPUFS_SIGNAL_MAP_SIZE);
#elif SPUFS_SIGNAL_MAP_SIZE == 0x10000
/* For 64k pages, both signal1 and signal2 can be used to mmap the whole
* signal 1 and 2 area
*/
- return spufs_ps_fault(vma, vmf, 0x10000, SPUFS_SIGNAL_MAP_SIZE);
+ return spufs_ps_fault(vmf, 0x10000, SPUFS_SIGNAL_MAP_SIZE);
#else
#error unsupported page size
#endif
@@ -1334,9 +1333,9 @@ DEFINE_SPUFS_ATTRIBUTE(spufs_signal2_type, spufs_signal2_type_get,
#if SPUFS_MMAP_4K
static int
-spufs_mss_mmap_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
+spufs_mss_mmap_fault(struct vm_fault *vmf)
{
- return spufs_ps_fault(vma, vmf, 0x0000, SPUFS_MSS_MAP_SIZE);
+ return spufs_ps_fault(vmf, 0x0000, SPUFS_MSS_MAP_SIZE);
}
static const struct vm_operations_struct spufs_mss_mmap_vmops = {
@@ -1396,9 +1395,9 @@ static const struct file_operations spufs_mss_fops = {
};
static int
-spufs_psmap_mmap_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
+spufs_psmap_mmap_fault(struct vm_fault *vmf)
{
- return spufs_ps_fault(vma, vmf, 0x0000, SPUFS_PS_MAP_SIZE);
+ return spufs_ps_fault(vmf, 0x0000, SPUFS_PS_MAP_SIZE);
}
static const struct vm_operations_struct spufs_psmap_mmap_vmops = {
@@ -1456,9 +1455,9 @@ static const struct file_operations spufs_psmap_fops = {
#if SPUFS_MMAP_4K
static int
-spufs_mfc_mmap_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
+spufs_mfc_mmap_fault(struct vm_fault *vmf)
{
- return spufs_ps_fault(vma, vmf, 0x3000, SPUFS_MFC_MAP_SIZE);
+ return spufs_ps_fault(vmf, 0x3000, SPUFS_MFC_MAP_SIZE);
}
static const struct vm_operations_struct spufs_mfc_mmap_vmops = {
diff --git a/arch/s390/kernel/crash_dump.c b/arch/s390/kernel/crash_dump.c
index e2293c662bdf..dd1d5c62c374 100644
--- a/arch/s390/kernel/crash_dump.c
+++ b/arch/s390/kernel/crash_dump.c
@@ -32,6 +32,7 @@ static struct memblock_type oldmem_type = {
.max = 1,
.total_size = 0,
.regions = &oldmem_region,
+ .name = "oldmem",
};
struct save_area {
diff --git a/arch/tile/mm/elf.c b/arch/tile/mm/elf.c
index 6225cc998db1..889901824400 100644
--- a/arch/tile/mm/elf.c
+++ b/arch/tile/mm/elf.c
@@ -143,7 +143,7 @@ int arch_setup_additional_pages(struct linux_binprm *bprm,
unsigned long addr = MEM_USER_INTRPT;
addr = mmap_region(NULL, addr, INTRPT_SIZE,
VM_READ|VM_EXEC|
- VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC, 0);
+ VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC, 0, NULL);
if (addr > (unsigned long) -PAGE_SIZE)
retval = (int) addr;
}
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
index 874c1238dffd..33007aa74111 100644
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
@@ -109,6 +109,7 @@ config X86
select HAVE_ARCH_SECCOMP_FILTER
select HAVE_ARCH_TRACEHOOK
select HAVE_ARCH_TRANSPARENT_HUGEPAGE
+ select HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD if X86_64
select HAVE_ARCH_VMAP_STACK if X86_64
select HAVE_ARCH_WITHIN_STACK_FRAMES
select HAVE_CC_STACKPROTECTOR
diff --git a/arch/x86/entry/vdso/vma.c b/arch/x86/entry/vdso/vma.c
index 10820f6cefbf..572cee3fccff 100644
--- a/arch/x86/entry/vdso/vma.c
+++ b/arch/x86/entry/vdso/vma.c
@@ -186,7 +186,7 @@ static int map_vdso(const struct vdso_image *image, unsigned long addr)
if (IS_ERR(vma)) {
ret = PTR_ERR(vma);
- do_munmap(mm, text_start, image->size);
+ do_munmap(mm, text_start, image->size, NULL);
} else {
current->mm->context.vdso = (void __user *)text_start;
current->mm->context.vdso_image = image;
diff --git a/arch/x86/include/asm/paravirt.h b/arch/x86/include/asm/paravirt.h
index f75fbfe550f2..0489884fdc44 100644
--- a/arch/x86/include/asm/paravirt.h
+++ b/arch/x86/include/asm/paravirt.h
@@ -475,6 +475,17 @@ static inline void set_pmd_at(struct mm_struct *mm, unsigned long addr,
native_pmd_val(pmd));
}
+static inline void set_pud_at(struct mm_struct *mm, unsigned long addr,
+ pud_t *pudp, pud_t pud)
+{
+ if (sizeof(pudval_t) > sizeof(long))
+ /* 5 arg words */
+ pv_mmu_ops.set_pud_at(mm, addr, pudp, pud);
+ else
+ PVOP_VCALL4(pv_mmu_ops.set_pud_at, mm, addr, pudp,
+ native_pud_val(pud));
+}
+
static inline void set_pmd(pmd_t *pmdp, pmd_t pmd)
{
pmdval_t val = native_pmd_val(pmd);
diff --git a/arch/x86/include/asm/paravirt_types.h b/arch/x86/include/asm/paravirt_types.h
index bb2de45a60f2..b060f962d581 100644
--- a/arch/x86/include/asm/paravirt_types.h
+++ b/arch/x86/include/asm/paravirt_types.h
@@ -249,6 +249,8 @@ struct pv_mmu_ops {
void (*set_pmd)(pmd_t *pmdp, pmd_t pmdval);
void (*set_pmd_at)(struct mm_struct *mm, unsigned long addr,
pmd_t *pmdp, pmd_t pmdval);
+ void (*set_pud_at)(struct mm_struct *mm, unsigned long addr,
+ pud_t *pudp, pud_t pudval);
void (*pte_update)(struct mm_struct *mm, unsigned long addr,
pte_t *ptep);
diff --git a/arch/x86/include/asm/pgtable-2level.h b/arch/x86/include/asm/pgtable-2level.h
index fd74a11959de..a8b96e708c2b 100644
--- a/arch/x86/include/asm/pgtable-2level.h
+++ b/arch/x86/include/asm/pgtable-2level.h
@@ -21,6 +21,10 @@ static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd)
*pmdp = pmd;
}
+static inline void native_set_pud(pud_t *pudp, pud_t pud)
+{
+}
+
static inline void native_set_pte_atomic(pte_t *ptep, pte_t pte)
{
native_set_pte(ptep, pte);
@@ -31,6 +35,10 @@ static inline void native_pmd_clear(pmd_t *pmdp)
native_set_pmd(pmdp, __pmd(0));
}
+static inline void native_pud_clear(pud_t *pudp)
+{
+}
+
static inline void native_pte_clear(struct mm_struct *mm,
unsigned long addr, pte_t *xp)
{
@@ -55,6 +63,15 @@ static inline pmd_t native_pmdp_get_and_clear(pmd_t *xp)
#define native_pmdp_get_and_clear(xp) native_local_pmdp_get_and_clear(xp)
#endif
+#ifdef CONFIG_SMP
+static inline pud_t native_pudp_get_and_clear(pud_t *xp)
+{
+ return __pud(xchg((pudval_t *)xp, 0));
+}
+#else
+#define native_pudp_get_and_clear(xp) native_local_pudp_get_and_clear(xp)
+#endif
+
/* Bit manipulation helper on pte/pgoff entry */
static inline unsigned long pte_bitop(unsigned long value, unsigned int rightshift,
unsigned long mask, unsigned int leftshift)
diff --git a/arch/x86/include/asm/pgtable-3level.h b/arch/x86/include/asm/pgtable-3level.h
index cdaa58c9b39e..8f50fb3f04e1 100644
--- a/arch/x86/include/asm/pgtable-3level.h
+++ b/arch/x86/include/asm/pgtable-3level.h
@@ -121,6 +121,12 @@ static inline void native_pmd_clear(pmd_t *pmd)
*(tmp + 1) = 0;
}
+#ifndef CONFIG_SMP
+static inline void native_pud_clear(pud_t *pudp)
+{
+}
+#endif
+
static inline void pud_clear(pud_t *pudp)
{
set_pud(pudp, __pud(0));
@@ -176,6 +182,30 @@ static inline pmd_t native_pmdp_get_and_clear(pmd_t *pmdp)
#define native_pmdp_get_and_clear(xp) native_local_pmdp_get_and_clear(xp)
#endif
+#ifdef CONFIG_SMP
+union split_pud {
+ struct {
+ u32 pud_low;
+ u32 pud_high;
+ };
+ pud_t pud;
+};
+
+static inline pud_t native_pudp_get_and_clear(pud_t *pudp)
+{
+ union split_pud res, *orig = (union split_pud *)pudp;
+
+ /* xchg acts as a barrier before setting of the high bits */
+ res.pud_low = xchg(&orig->pud_low, 0);
+ res.pud_high = orig->pud_high;
+ orig->pud_high = 0;
+
+ return res.pud;
+}
+#else
+#define native_pudp_get_and_clear(xp) native_local_pudp_get_and_clear(xp)
+#endif
+
/* Encode and de-code a swap entry */
#define MAX_SWAPFILES_CHECK() BUILD_BUG_ON(MAX_SWAPFILES_SHIFT > 5)
#define __swp_type(x) (((x).val) & 0x1f)
diff --git a/arch/x86/include/asm/pgtable.h b/arch/x86/include/asm/pgtable.h
index 437feb436efa..1cfb36b8c024 100644
--- a/arch/x86/include/asm/pgtable.h
+++ b/arch/x86/include/asm/pgtable.h
@@ -46,6 +46,7 @@ extern struct mm_struct *pgd_page_get_mm(struct page *page);
#define set_pte(ptep, pte) native_set_pte(ptep, pte)
#define set_pte_at(mm, addr, ptep, pte) native_set_pte_at(mm, addr, ptep, pte)
#define set_pmd_at(mm, addr, pmdp, pmd) native_set_pmd_at(mm, addr, pmdp, pmd)
+#define set_pud_at(mm, addr, pudp, pud) native_set_pud_at(mm, addr, pudp, pud)
#define set_pte_atomic(ptep, pte) \
native_set_pte_atomic(ptep, pte)
@@ -128,6 +129,16 @@ static inline int pmd_young(pmd_t pmd)
return pmd_flags(pmd) & _PAGE_ACCESSED;
}
+static inline int pud_dirty(pud_t pud)
+{
+ return pud_flags(pud) & _PAGE_DIRTY;
+}
+
+static inline int pud_young(pud_t pud)
+{
+ return pud_flags(pud) & _PAGE_ACCESSED;
+}
+
static inline int pte_write(pte_t pte)
{
return pte_flags(pte) & _PAGE_RW;
@@ -181,6 +192,13 @@ static inline int pmd_trans_huge(pmd_t pmd)
return (pmd_val(pmd) & (_PAGE_PSE|_PAGE_DEVMAP)) == _PAGE_PSE;
}
+#ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD
+static inline int pud_trans_huge(pud_t pud)
+{
+ return (pud_val(pud) & (_PAGE_PSE|_PAGE_DEVMAP)) == _PAGE_PSE;
+}
+#endif
+
#define has_transparent_hugepage has_transparent_hugepage
static inline int has_transparent_hugepage(void)
{
@@ -192,6 +210,18 @@ static inline int pmd_devmap(pmd_t pmd)
{
return !!(pmd_val(pmd) & _PAGE_DEVMAP);
}
+
+#ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD
+static inline int pud_devmap(pud_t pud)
+{
+ return !!(pud_val(pud) & _PAGE_DEVMAP);
+}
+#else
+static inline int pud_devmap(pud_t pud)
+{
+ return 0;
+}
+#endif
#endif
#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
@@ -333,6 +363,65 @@ static inline pmd_t pmd_mknotpresent(pmd_t pmd)
return pmd_clear_flags(pmd, _PAGE_PRESENT | _PAGE_PROTNONE);
}
+static inline pud_t pud_set_flags(pud_t pud, pudval_t set)
+{
+ pudval_t v = native_pud_val(pud);
+
+ return __pud(v | set);
+}
+
+static inline pud_t pud_clear_flags(pud_t pud, pudval_t clear)
+{
+ pudval_t v = native_pud_val(pud);
+
+ return __pud(v & ~clear);
+}
+
+static inline pud_t pud_mkold(pud_t pud)
+{
+ return pud_clear_flags(pud, _PAGE_ACCESSED);
+}
+
+static inline pud_t pud_mkclean(pud_t pud)
+{
+ return pud_clear_flags(pud, _PAGE_DIRTY);
+}
+
+static inline pud_t pud_wrprotect(pud_t pud)
+{
+ return pud_clear_flags(pud, _PAGE_RW);
+}
+
+static inline pud_t pud_mkdirty(pud_t pud)
+{
+ return pud_set_flags(pud, _PAGE_DIRTY | _PAGE_SOFT_DIRTY);
+}
+
+static inline pud_t pud_mkdevmap(pud_t pud)
+{
+ return pud_set_flags(pud, _PAGE_DEVMAP);
+}
+
+static inline pud_t pud_mkhuge(pud_t pud)
+{
+ return pud_set_flags(pud, _PAGE_PSE);
+}
+
+static inline pud_t pud_mkyoung(pud_t pud)
+{
+ return pud_set_flags(pud, _PAGE_ACCESSED);
+}
+
+static inline pud_t pud_mkwrite(pud_t pud)
+{
+ return pud_set_flags(pud, _PAGE_RW);
+}
+
+static inline pud_t pud_mknotpresent(pud_t pud)
+{
+ return pud_clear_flags(pud, _PAGE_PRESENT | _PAGE_PROTNONE);
+}
+
#ifdef CONFIG_HAVE_ARCH_SOFT_DIRTY
static inline int pte_soft_dirty(pte_t pte)
{
@@ -344,6 +433,11 @@ static inline int pmd_soft_dirty(pmd_t pmd)
return pmd_flags(pmd) & _PAGE_SOFT_DIRTY;
}
+static inline int pud_soft_dirty(pud_t pud)
+{
+ return pud_flags(pud) & _PAGE_SOFT_DIRTY;
+}
+
static inline pte_t pte_mksoft_dirty(pte_t pte)
{
return pte_set_flags(pte, _PAGE_SOFT_DIRTY);
@@ -354,6 +448,11 @@ static inline pmd_t pmd_mksoft_dirty(pmd_t pmd)
return pmd_set_flags(pmd, _PAGE_SOFT_DIRTY);
}
+static inline pud_t pud_mksoft_dirty(pud_t pud)
+{
+ return pud_set_flags(pud, _PAGE_SOFT_DIRTY);
+}
+
static inline pte_t pte_clear_soft_dirty(pte_t pte)
{
return pte_clear_flags(pte, _PAGE_SOFT_DIRTY);
@@ -364,6 +463,11 @@ static inline pmd_t pmd_clear_soft_dirty(pmd_t pmd)
return pmd_clear_flags(pmd, _PAGE_SOFT_DIRTY);
}
+static inline pud_t pud_clear_soft_dirty(pud_t pud)
+{
+ return pud_clear_flags(pud, _PAGE_SOFT_DIRTY);
+}
+
#endif /* CONFIG_HAVE_ARCH_SOFT_DIRTY */
/*
@@ -392,6 +496,12 @@ static inline pmd_t pfn_pmd(unsigned long page_nr, pgprot_t pgprot)
massage_pgprot(pgprot));
}
+static inline pud_t pfn_pud(unsigned long page_nr, pgprot_t pgprot)
+{
+ return __pud(((phys_addr_t)page_nr << PAGE_SHIFT) |
+ massage_pgprot(pgprot));
+}
+
static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
{
pteval_t val = pte_val(pte);
@@ -771,6 +881,14 @@ static inline pmd_t native_local_pmdp_get_and_clear(pmd_t *pmdp)
return res;
}
+static inline pud_t native_local_pudp_get_and_clear(pud_t *pudp)
+{
+ pud_t res = *pudp;
+
+ native_pud_clear(pudp);
+ return res;
+}
+
static inline void native_set_pte_at(struct mm_struct *mm, unsigned long addr,
pte_t *ptep , pte_t pte)
{
@@ -783,6 +901,12 @@ static inline void native_set_pmd_at(struct mm_struct *mm, unsigned long addr,
native_set_pmd(pmdp, pmd);
}
+static inline void native_set_pud_at(struct mm_struct *mm, unsigned long addr,
+ pud_t *pudp, pud_t pud)
+{
+ native_set_pud(pudp, pud);
+}
+
#ifndef CONFIG_PARAVIRT
/*
* Rules for using pte_update - it must be called after any PTE update which
@@ -861,10 +985,15 @@ static inline void ptep_set_wrprotect(struct mm_struct *mm,
extern int pmdp_set_access_flags(struct vm_area_struct *vma,
unsigned long address, pmd_t *pmdp,
pmd_t entry, int dirty);
+extern int pudp_set_access_flags(struct vm_area_struct *vma,
+ unsigned long address, pud_t *pudp,
+ pud_t entry, int dirty);
#define __HAVE_ARCH_PMDP_TEST_AND_CLEAR_YOUNG
extern int pmdp_test_and_clear_young(struct vm_area_struct *vma,
unsigned long addr, pmd_t *pmdp);
+extern int pudp_test_and_clear_young(struct vm_area_struct *vma,
+ unsigned long addr, pud_t *pudp);
#define __HAVE_ARCH_PMDP_CLEAR_YOUNG_FLUSH
extern int pmdp_clear_flush_young(struct vm_area_struct *vma,
@@ -884,6 +1013,13 @@ static inline pmd_t pmdp_huge_get_and_clear(struct mm_struct *mm, unsigned long
return native_pmdp_get_and_clear(pmdp);
}
+#define __HAVE_ARCH_PUDP_HUGE_GET_AND_CLEAR
+static inline pud_t pudp_huge_get_and_clear(struct mm_struct *mm,
+ unsigned long addr, pud_t *pudp)
+{
+ return native_pudp_get_and_clear(pudp);
+}
+
#define __HAVE_ARCH_PMDP_SET_WRPROTECT
static inline void pmdp_set_wrprotect(struct mm_struct *mm,
unsigned long addr, pmd_t *pmdp)
@@ -932,6 +1068,10 @@ static inline void update_mmu_cache_pmd(struct vm_area_struct *vma,
unsigned long addr, pmd_t *pmd)
{
}
+static inline void update_mmu_cache_pud(struct vm_area_struct *vma,
+ unsigned long addr, pud_t *pud)
+{
+}
#ifdef CONFIG_HAVE_ARCH_SOFT_DIRTY
static inline pte_t pte_swp_mksoft_dirty(pte_t pte)
diff --git a/arch/x86/include/asm/pgtable_64.h b/arch/x86/include/asm/pgtable_64.h
index 62b775926045..73c7ccc38912 100644
--- a/arch/x86/include/asm/pgtable_64.h
+++ b/arch/x86/include/asm/pgtable_64.h
@@ -106,6 +106,21 @@ static inline void native_pud_clear(pud_t *pud)
native_set_pud(pud, native_make_pud(0));
}
+static inline pud_t native_pudp_get_and_clear(pud_t *xp)
+{
+#ifdef CONFIG_SMP
+ return native_make_pud(xchg(&xp->pud, 0));
+#else
+ /* native_local_pudp_get_and_clear,
+ * but duplicated because of cyclic dependency
+ */
+ pud_t ret = *xp;
+
+ native_pud_clear(xp);
+ return ret;
+#endif
+}
+
static inline void native_set_pgd(pgd_t *pgdp, pgd_t pgd)
{
*pgdp = pgd;
diff --git a/arch/x86/kernel/paravirt.c b/arch/x86/kernel/paravirt.c
index a1bfba0f7234..4797e87b0fb6 100644
--- a/arch/x86/kernel/paravirt.c
+++ b/arch/x86/kernel/paravirt.c
@@ -425,6 +425,7 @@ struct pv_mmu_ops pv_mmu_ops __ro_after_init = {
.pmd_clear = native_pmd_clear,
#endif
.set_pud = native_set_pud,
+ .set_pud_at = native_set_pud_at,
.pmd_val = PTE_IDENT,
.make_pmd = PTE_IDENT,
diff --git a/arch/x86/kernel/pci-dma.c b/arch/x86/kernel/pci-dma.c
index d30c37750765..d5c223c9cf11 100644
--- a/arch/x86/kernel/pci-dma.c
+++ b/arch/x86/kernel/pci-dma.c
@@ -91,7 +91,8 @@ again:
page = NULL;
/* CMA can be used only in the context which permits sleeping */
if (gfpflags_allow_blocking(flag)) {
- page = dma_alloc_from_contiguous(dev, count, get_order(size));
+ page = dma_alloc_from_contiguous(dev, count, get_order(size),
+ flag);
if (page && page_to_phys(page) + size > dma_mask) {
dma_release_from_contiguous(dev, page, count);
page = NULL;
diff --git a/arch/x86/mm/gup.c b/arch/x86/mm/gup.c
index 0d4fb3ebbbac..99c7805a9693 100644
--- a/arch/x86/mm/gup.c
+++ b/arch/x86/mm/gup.c
@@ -154,14 +154,12 @@ static inline void get_head_page_multiple(struct page *page, int nr)
SetPageReferenced(page);
}
-static int __gup_device_huge_pmd(pmd_t pmd, unsigned long addr,
+static int __gup_device_huge(unsigned long pfn, unsigned long addr,
unsigned long end, struct page **pages, int *nr)
{
int nr_start = *nr;
- unsigned long pfn = pmd_pfn(pmd);
struct dev_pagemap *pgmap = NULL;
- pfn += (addr & ~PMD_MASK) >> PAGE_SHIFT;
do {
struct page *page = pfn_to_page(pfn);
@@ -180,6 +178,24 @@ static int __gup_device_huge_pmd(pmd_t pmd, unsigned long addr,
return 1;
}
+static int __gup_device_huge_pmd(pmd_t pmd, unsigned long addr,
+ unsigned long end, struct page **pages, int *nr)
+{
+ unsigned long fault_pfn;
+
+ fault_pfn = pmd_pfn(pmd) + ((addr & ~PMD_MASK) >> PAGE_SHIFT);
+ return __gup_device_huge(fault_pfn, addr, end, pages, nr);
+}
+
+static int __gup_device_huge_pud(pud_t pud, unsigned long addr,
+ unsigned long end, struct page **pages, int *nr)
+{
+ unsigned long fault_pfn;
+
+ fault_pfn = pud_pfn(pud) + ((addr & ~PUD_MASK) >> PAGE_SHIFT);
+ return __gup_device_huge(fault_pfn, addr, end, pages, nr);
+}
+
static noinline int gup_huge_pmd(pmd_t pmd, unsigned long addr,
unsigned long end, int write, struct page **pages, int *nr)
{
@@ -251,9 +267,13 @@ static noinline int gup_huge_pud(pud_t pud, unsigned long addr,
if (!pte_allows_gup(pud_val(pud), write))
return 0;
+
+ VM_BUG_ON(!pfn_valid(pud_pfn(pud)));
+ if (pud_devmap(pud))
+ return __gup_device_huge_pud(pud, addr, end, pages, nr);
+
/* hugepages are never "special" */
VM_BUG_ON(pud_flags(pud) & _PAGE_SPECIAL);
- VM_BUG_ON(!pfn_valid(pud_pfn(pud)));
refs = 0;
head = pud_page(pud);
diff --git a/arch/x86/mm/mpx.c b/arch/x86/mm/mpx.c
index aad4ac386f98..c98079684bdb 100644
--- a/arch/x86/mm/mpx.c
+++ b/arch/x86/mm/mpx.c
@@ -51,7 +51,7 @@ static unsigned long mpx_mmap(unsigned long len)
down_write(&mm->mmap_sem);
addr = do_mmap(NULL, 0, len, PROT_READ | PROT_WRITE,
- MAP_ANONYMOUS | MAP_PRIVATE, VM_MPX, 0, &populate);
+ MAP_ANONYMOUS | MAP_PRIVATE, VM_MPX, 0, &populate, NULL);
up_write(&mm->mmap_sem);
if (populate)
mm_populate(addr, populate);
@@ -893,7 +893,7 @@ static int unmap_entire_bt(struct mm_struct *mm,
* avoid recursion, do_munmap() will check whether it comes
* from one bounds table through VM_MPX flag.
*/
- return do_munmap(mm, bt_addr, mpx_bt_size_bytes(mm));
+ return do_munmap(mm, bt_addr, mpx_bt_size_bytes(mm), NULL);
}
static int try_unmap_single_bt(struct mm_struct *mm,
diff --git a/arch/x86/mm/pgtable.c b/arch/x86/mm/pgtable.c
index 3feec5af4e67..6cbdff26bb96 100644
--- a/arch/x86/mm/pgtable.c
+++ b/arch/x86/mm/pgtable.c
@@ -445,6 +445,26 @@ int pmdp_set_access_flags(struct vm_area_struct *vma,
return changed;
}
+
+int pudp_set_access_flags(struct vm_area_struct *vma, unsigned long address,
+ pud_t *pudp, pud_t entry, int dirty)
+{
+ int changed = !pud_same(*pudp, entry);
+
+ VM_BUG_ON(address & ~HPAGE_PUD_MASK);
+
+ if (changed && dirty) {
+ *pudp = entry;
+ /*
+ * We had a write-protection fault here and changed the pud
+ * to to more permissive. No need to flush the TLB for that,
+ * #PF is architecturally guaranteed to do that and in the
+ * worst-case we'll generate a spurious fault.
+ */
+ }
+
+ return changed;
+}
#endif
int ptep_test_and_clear_young(struct vm_area_struct *vma,
@@ -474,6 +494,17 @@ int pmdp_test_and_clear_young(struct vm_area_struct *vma,
return ret;
}
+int pudp_test_and_clear_young(struct vm_area_struct *vma,
+ unsigned long addr, pud_t *pudp)
+{
+ int ret = 0;
+
+ if (pud_young(*pudp))
+ ret = test_and_clear_bit(_PAGE_BIT_ACCESSED,
+ (unsigned long *)pudp);
+
+ return ret;
+}
#endif
int ptep_clear_flush_young(struct vm_area_struct *vma,
diff --git a/arch/xtensa/kernel/pci-dma.c b/arch/xtensa/kernel/pci-dma.c
index 70e362e6038e..34c1f9fa6acc 100644
--- a/arch/xtensa/kernel/pci-dma.c
+++ b/arch/xtensa/kernel/pci-dma.c
@@ -158,7 +158,8 @@ static void *xtensa_dma_alloc(struct device *dev, size_t size,
flag |= GFP_DMA;
if (gfpflags_allow_blocking(flag))
- page = dma_alloc_from_contiguous(dev, count, get_order(size));
+ page = dma_alloc_from_contiguous(dev, count, get_order(size),
+ flag);
if (!page)
page = alloc_pages(flag, get_order(size));
diff --git a/crypto/lz4.c b/crypto/lz4.c
index 99c1b2cc2976..71eff9b01b12 100644
--- a/crypto/lz4.c
+++ b/crypto/lz4.c
@@ -66,15 +66,13 @@ static void lz4_exit(struct crypto_tfm *tfm)
static int __lz4_compress_crypto(const u8 *src, unsigned int slen,
u8 *dst, unsigned int *dlen, void *ctx)
{
- size_t tmp_len = *dlen;
- int err;
+ int out_len = LZ4_compress_default(src, dst,
+ slen, *dlen, ctx);
- err = lz4_compress(src, slen, dst, &tmp_len, ctx);
-
- if (err < 0)
+ if (!out_len)
return -EINVAL;
- *dlen = tmp_len;
+ *dlen = out_len;
return 0;
}
@@ -96,16 +94,13 @@ static int lz4_compress_crypto(struct crypto_tfm *tfm, const u8 *src,
static int __lz4_decompress_crypto(const u8 *src, unsigned int slen,
u8 *dst, unsigned int *dlen, void *ctx)
{
- int err;
- size_t tmp_len = *dlen;
- size_t __slen = slen;
+ int out_len = LZ4_decompress_safe(src, dst, slen, *dlen);
- err = lz4_decompress_unknownoutputsize(src, __slen, dst, &tmp_len);
- if (err < 0)
- return -EINVAL;
+ if (out_len < 0)
+ return out_len;
- *dlen = tmp_len;
- return err;
+ *dlen = out_len;
+ return 0;
}
static int lz4_sdecompress(struct crypto_scomp *tfm, const u8 *src,
diff --git a/crypto/lz4hc.c b/crypto/lz4hc.c
index 75ffc4a3f786..03a34a8109c0 100644
--- a/crypto/lz4hc.c
+++ b/crypto/lz4hc.c
@@ -65,15 +65,13 @@ static void lz4hc_exit(struct crypto_tfm *tfm)
static int __lz4hc_compress_crypto(const u8 *src, unsigned int slen,
u8 *dst, unsigned int *dlen, void *ctx)
{
- size_t tmp_len = *dlen;
- int err;
+ int out_len = LZ4_compress_HC(src, dst, slen,
+ *dlen, LZ4HC_DEFAULT_CLEVEL, ctx);
- err = lz4hc_compress(src, slen, dst, &tmp_len, ctx);
-
- if (err < 0)
+ if (!out_len)
return -EINVAL;
- *dlen = tmp_len;
+ *dlen = out_len;
return 0;
}
@@ -97,16 +95,13 @@ static int lz4hc_compress_crypto(struct crypto_tfm *tfm, const u8 *src,
static int __lz4hc_decompress_crypto(const u8 *src, unsigned int slen,
u8 *dst, unsigned int *dlen, void *ctx)
{
- int err;
- size_t tmp_len = *dlen;
- size_t __slen = slen;
+ int out_len = LZ4_decompress_safe(src, dst, slen, *dlen);
- err = lz4_decompress_unknownoutputsize(src, __slen, dst, &tmp_len);
- if (err < 0)
- return -EINVAL;
+ if (out_len < 0)
+ return out_len;
- *dlen = tmp_len;
- return err;
+ *dlen = out_len;
+ return 0;
}
static int lz4hc_sdecompress(struct crypto_scomp *tfm, const u8 *src,
diff --git a/crypto/testmgr.h b/crypto/testmgr.h
index f85e51cf7dcc..006ecc434351 100644
--- a/crypto/testmgr.h
+++ b/crypto/testmgr.h
@@ -34293,61 +34293,123 @@ static struct hash_testvec bfin_crc_tv_template[] = {
static struct comp_testvec lz4_comp_tv_template[] = {
{
- .inlen = 70,
- .outlen = 45,
- .input = "Join us now and share the software "
- "Join us now and share the software ",
- .output = "\xf0\x10\x4a\x6f\x69\x6e\x20\x75"
- "\x73\x20\x6e\x6f\x77\x20\x61\x6e"
- "\x64\x20\x73\x68\x61\x72\x65\x20"
- "\x74\x68\x65\x20\x73\x6f\x66\x74"
- "\x77\x0d\x00\x0f\x23\x00\x0b\x50"
- "\x77\x61\x72\x65\x20",
+ .inlen = 255,
+ .outlen = 218,
+ .input = "LZ4 is lossless compression algorithm, providing"
+ " compression speed at 400 MB/s per core, scalable "
+ "with multi-cores CPU. It features an extremely fast "
+ "decoder, with speed in multiple GB/s per core, "
+ "typically reaching RAM speed limits on multi-core "
+ "systems.",
+ .output = "\xf9\x21\x4c\x5a\x34\x20\x69\x73\x20\x6c\x6f\x73\x73"
+ "\x6c\x65\x73\x73\x20\x63\x6f\x6d\x70\x72\x65\x73\x73"
+ "\x69\x6f\x6e\x20\x61\x6c\x67\x6f\x72\x69\x74\x68\x6d"
+ "\x2c\x20\x70\x72\x6f\x76\x69\x64\x69\x6e\x67\x21\x00"
+ "\xf0\x21\x73\x70\x65\x65\x64\x20\x61\x74\x20\x34\x30"
+ "\x30\x20\x4d\x42\x2f\x73\x20\x70\x65\x72\x20\x63\x6f"
+ "\x72\x65\x2c\x20\x73\x63\x61\x6c\x61\x62\x6c\x65\x20"
+ "\x77\x69\x74\x68\x20\x6d\x75\x6c\x74\x69\x2d\x1a\x00"
+ "\xf0\x00\x73\x20\x43\x50\x55\x2e\x20\x49\x74\x20\x66"
+ "\x65\x61\x74\x75\x11\x00\xf2\x0b\x61\x6e\x20\x65\x78"
+ "\x74\x72\x65\x6d\x65\x6c\x79\x20\x66\x61\x73\x74\x20"
+ "\x64\x65\x63\x6f\x64\x65\x72\x2c\x3d\x00\x02\x67\x00"
+ "\x22\x69\x6e\x46\x00\x5a\x70\x6c\x65\x20\x47\x6c\x00"
+ "\xf0\x00\x74\x79\x70\x69\x63\x61\x6c\x6c\x79\x20\x72"
+ "\x65\x61\x63\x68\xa7\x00\x33\x52\x41\x4d\x38\x00\x83"
+ "\x6c\x69\x6d\x69\x74\x73\x20\x6f\x3f\x00\x01\x85\x00"
+ "\x90\x20\x73\x79\x73\x74\x65\x6d\x73\x2e",
+
},
};
static struct comp_testvec lz4_decomp_tv_template[] = {
{
- .inlen = 45,
- .outlen = 70,
- .input = "\xf0\x10\x4a\x6f\x69\x6e\x20\x75"
- "\x73\x20\x6e\x6f\x77\x20\x61\x6e"
- "\x64\x20\x73\x68\x61\x72\x65\x20"
- "\x74\x68\x65\x20\x73\x6f\x66\x74"
- "\x77\x0d\x00\x0f\x23\x00\x0b\x50"
- "\x77\x61\x72\x65\x20",
- .output = "Join us now and share the software "
- "Join us now and share the software ",
+ .inlen = 218,
+ .outlen = 255,
+ .input = "\xf9\x21\x4c\x5a\x34\x20\x69\x73\x20\x6c\x6f\x73\x73"
+ "\x6c\x65\x73\x73\x20\x63\x6f\x6d\x70\x72\x65\x73\x73"
+ "\x69\x6f\x6e\x20\x61\x6c\x67\x6f\x72\x69\x74\x68\x6d"
+ "\x2c\x20\x70\x72\x6f\x76\x69\x64\x69\x6e\x67\x21\x00"
+ "\xf0\x21\x73\x70\x65\x65\x64\x20\x61\x74\x20\x34\x30"
+ "\x30\x20\x4d\x42\x2f\x73\x20\x70\x65\x72\x20\x63\x6f"
+ "\x72\x65\x2c\x20\x73\x63\x61\x6c\x61\x62\x6c\x65\x20"
+ "\x77\x69\x74\x68\x20\x6d\x75\x6c\x74\x69\x2d\x1a\x00"
+ "\xf0\x00\x73\x20\x43\x50\x55\x2e\x20\x49\x74\x20\x66"
+ "\x65\x61\x74\x75\x11\x00\xf2\x0b\x61\x6e\x20\x65\x78"
+ "\x74\x72\x65\x6d\x65\x6c\x79\x20\x66\x61\x73\x74\x20"
+ "\x64\x65\x63\x6f\x64\x65\x72\x2c\x3d\x00\x02\x67\x00"
+ "\x22\x69\x6e\x46\x00\x5a\x70\x6c\x65\x20\x47\x6c\x00"
+ "\xf0\x00\x74\x79\x70\x69\x63\x61\x6c\x6c\x79\x20\x72"
+ "\x65\x61\x63\x68\xa7\x00\x33\x52\x41\x4d\x38\x00\x83"
+ "\x6c\x69\x6d\x69\x74\x73\x20\x6f\x3f\x00\x01\x85\x00"
+ "\x90\x20\x73\x79\x73\x74\x65\x6d\x73\x2e",
+ .output = "LZ4 is lossless compression algorithm, providing"
+ " compression speed at 400 MB/s per core, scalable "
+ "with multi-cores CPU. It features an extremely fast "
+ "decoder, with speed in multiple GB/s per core, "
+ "typically reaching RAM speed limits on multi-core "
+ "systems.",
},
};
static struct comp_testvec lz4hc_comp_tv_template[] = {
{
- .inlen = 70,
- .outlen = 45,
- .input = "Join us now and share the software "
- "Join us now and share the software ",
- .output = "\xf0\x10\x4a\x6f\x69\x6e\x20\x75"
- "\x73\x20\x6e\x6f\x77\x20\x61\x6e"
- "\x64\x20\x73\x68\x61\x72\x65\x20"
- "\x74\x68\x65\x20\x73\x6f\x66\x74"
- "\x77\x0d\x00\x0f\x23\x00\x0b\x50"
- "\x77\x61\x72\x65\x20",
+ .inlen = 255,
+ .outlen = 216,
+ .input = "LZ4 is lossless compression algorithm, providing"
+ " compression speed at 400 MB/s per core, scalable "
+ "with multi-cores CPU. It features an extremely fast "
+ "decoder, with speed in multiple GB/s per core, "
+ "typically reaching RAM speed limits on multi-core "
+ "systems.",
+ .output = "\xf9\x21\x4c\x5a\x34\x20\x69\x73\x20\x6c\x6f\x73\x73"
+ "\x6c\x65\x73\x73\x20\x63\x6f\x6d\x70\x72\x65\x73\x73"
+ "\x69\x6f\x6e\x20\x61\x6c\x67\x6f\x72\x69\x74\x68\x6d"
+ "\x2c\x20\x70\x72\x6f\x76\x69\x64\x69\x6e\x67\x21\x00"
+ "\xf0\x21\x73\x70\x65\x65\x64\x20\x61\x74\x20\x34\x30"
+ "\x30\x20\x4d\x42\x2f\x73\x20\x70\x65\x72\x20\x63\x6f"
+ "\x72\x65\x2c\x20\x73\x63\x61\x6c\x61\x62\x6c\x65\x20"
+ "\x77\x69\x74\x68\x20\x6d\x75\x6c\x74\x69\x2d\x1a\x00"
+ "\xf0\x00\x73\x20\x43\x50\x55\x2e\x20\x49\x74\x20\x66"
+ "\x65\x61\x74\x75\x11\x00\xf2\x0b\x61\x6e\x20\x65\x78"
+ "\x74\x72\x65\x6d\x65\x6c\x79\x20\x66\x61\x73\x74\x20"
+ "\x64\x65\x63\x6f\x64\x65\x72\x2c\x3d\x00\x02\x67\x00"
+ "\x22\x69\x6e\x46\x00\x5a\x70\x6c\x65\x20\x47\x6c\x00"
+ "\xf0\x00\x74\x79\x70\x69\x63\x61\x6c\x6c\x79\x20\x72"
+ "\x65\x61\x63\x68\xa7\x00\x33\x52\x41\x4d\x38\x00\x97"
+ "\x6c\x69\x6d\x69\x74\x73\x20\x6f\x6e\x85\x00\x90\x20"
+ "\x73\x79\x73\x74\x65\x6d\x73\x2e",
+
},
};
static struct comp_testvec lz4hc_decomp_tv_template[] = {
{
- .inlen = 45,
- .outlen = 70,
- .input = "\xf0\x10\x4a\x6f\x69\x6e\x20\x75"
- "\x73\x20\x6e\x6f\x77\x20\x61\x6e"
- "\x64\x20\x73\x68\x61\x72\x65\x20"
- "\x74\x68\x65\x20\x73\x6f\x66\x74"
- "\x77\x0d\x00\x0f\x23\x00\x0b\x50"
- "\x77\x61\x72\x65\x20",
- .output = "Join us now and share the software "
- "Join us now and share the software ",
+ .inlen = 216,
+ .outlen = 255,
+ .input = "\xf9\x21\x4c\x5a\x34\x20\x69\x73\x20\x6c\x6f\x73\x73"
+ "\x6c\x65\x73\x73\x20\x63\x6f\x6d\x70\x72\x65\x73\x73"
+ "\x69\x6f\x6e\x20\x61\x6c\x67\x6f\x72\x69\x74\x68\x6d"
+ "\x2c\x20\x70\x72\x6f\x76\x69\x64\x69\x6e\x67\x21\x00"
+ "\xf0\x21\x73\x70\x65\x65\x64\x20\x61\x74\x20\x34\x30"
+ "\x30\x20\x4d\x42\x2f\x73\x20\x70\x65\x72\x20\x63\x6f"
+ "\x72\x65\x2c\x20\x73\x63\x61\x6c\x61\x62\x6c\x65\x20"
+ "\x77\x69\x74\x68\x20\x6d\x75\x6c\x74\x69\x2d\x1a\x00"
+ "\xf0\x00\x73\x20\x43\x50\x55\x2e\x20\x49\x74\x20\x66"
+ "\x65\x61\x74\x75\x11\x00\xf2\x0b\x61\x6e\x20\x65\x78"
+ "\x74\x72\x65\x6d\x65\x6c\x79\x20\x66\x61\x73\x74\x20"
+ "\x64\x65\x63\x6f\x64\x65\x72\x2c\x3d\x00\x02\x67\x00"
+ "\x22\x69\x6e\x46\x00\x5a\x70\x6c\x65\x20\x47\x6c\x00"
+ "\xf0\x00\x74\x79\x70\x69\x63\x61\x6c\x6c\x79\x20\x72"
+ "\x65\x61\x63\x68\xa7\x00\x33\x52\x41\x4d\x38\x00\x97"
+ "\x6c\x69\x6d\x69\x74\x73\x20\x6f\x6e\x85\x00\x90\x20"
+ "\x73\x79\x73\x74\x65\x6d\x73\x2e",
+ .output = "LZ4 is lossless compression algorithm, providing"
+ " compression speed at 400 MB/s per core, scalable "
+ "with multi-cores CPU. It features an extremely fast "
+ "decoder, with speed in multiple GB/s per core, "
+ "typically reaching RAM speed limits on multi-core "
+ "systems.",
},
};
diff --git a/drivers/android/binder.c b/drivers/android/binder.c
index 15b263a420e8..2bbcdc6fdfee 100644
--- a/drivers/android/binder.c
+++ b/drivers/android/binder.c
@@ -3342,7 +3342,7 @@ static void binder_vma_close(struct vm_area_struct *vma)
binder_defer_work(proc, BINDER_DEFERRED_PUT_FILES);
}
-static int binder_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
+static int binder_vm_fault(struct vm_fault *vmf)
{
return VM_FAULT_SIGBUS;
}
diff --git a/drivers/base/core.c b/drivers/base/core.c
index 8c25e68e67d7..3050e6f99403 100644
--- a/drivers/base/core.c
+++ b/drivers/base/core.c
@@ -638,6 +638,11 @@ int lock_device_hotplug_sysfs(void)
return restart_syscall();
}
+void assert_held_device_hotplug(void)
+{
+ lockdep_assert_held(&device_hotplug_lock);
+}
+
#ifdef CONFIG_BLOCK
static inline int device_is_not_partition(struct device *dev)
{
diff --git a/drivers/base/dma-contiguous.c b/drivers/base/dma-contiguous.c
index e167a1e1bccb..b55804cac4c4 100644
--- a/drivers/base/dma-contiguous.c
+++ b/drivers/base/dma-contiguous.c
@@ -181,6 +181,7 @@ int __init dma_contiguous_reserve_area(phys_addr_t size, phys_addr_t base,
* @dev: Pointer to device for which the allocation is performed.
* @count: Requested number of pages.
* @align: Requested alignment of pages (in PAGE_SIZE order).
+ * @gfp_mask: GFP flags to use for this allocation.
*
* This function allocates memory buffer for specified device. It uses
* device specific contiguous memory area if available or the default
@@ -188,12 +189,12 @@ int __init dma_contiguous_reserve_area(phys_addr_t size, phys_addr_t base,
* function.
*/
struct page *dma_alloc_from_contiguous(struct device *dev, size_t count,
- unsigned int align)
+ unsigned int align, gfp_t gfp_mask)
{
if (align > CONFIG_CMA_ALIGNMENT)
align = CONFIG_CMA_ALIGNMENT;
- return cma_alloc(dev_get_cma_area(dev), count, align);
+ return cma_alloc(dev_get_cma_area(dev), count, align, gfp_mask);
}
/**
diff --git a/drivers/base/memory.c b/drivers/base/memory.c
index fa26ffd25fa6..cc4f1d0cbffe 100644
--- a/drivers/base/memory.c
+++ b/drivers/base/memory.c
@@ -249,7 +249,7 @@ memory_block_action(unsigned long phys_index, unsigned long action, int online_t
return ret;
}
-int memory_block_change_state(struct memory_block *mem,
+static int memory_block_change_state(struct memory_block *mem,
unsigned long to_state, unsigned long from_state_req)
{
int ret = 0;
diff --git a/drivers/block/zram/zram_drv.c b/drivers/block/zram/zram_drv.c
index c73fede582f7..e27d89a36c34 100644
--- a/drivers/block/zram/zram_drv.c
+++ b/drivers/block/zram/zram_drv.c
@@ -74,6 +74,17 @@ static void zram_clear_flag(struct zram_meta *meta, u32 index,
meta->table[index].value &= ~BIT(flag);
}
+static inline void zram_set_element(struct zram_meta *meta, u32 index,
+ unsigned long element)
+{
+ meta->table[index].element = element;
+}
+
+static inline void zram_clear_element(struct zram_meta *meta, u32 index)
+{
+ meta->table[index].element = 0;
+}
+
static size_t zram_get_obj_size(struct zram_meta *meta, u32 index)
{
return meta->table[index].value & (BIT(ZRAM_FLAG_SHIFT) - 1);
@@ -146,31 +157,46 @@ static inline void update_used_max(struct zram *zram,
} while (old_max != cur_max);
}
-static bool page_zero_filled(void *ptr)
+static inline void zram_fill_page(char *ptr, unsigned long len,
+ unsigned long value)
+{
+ int i;
+ unsigned long *page = (unsigned long *)ptr;
+
+ WARN_ON_ONCE(!IS_ALIGNED(len, sizeof(unsigned long)));
+
+ if (likely(value == 0)) {
+ memset(ptr, 0, len);
+ } else {
+ for (i = 0; i < len / sizeof(*page); i++)
+ page[i] = value;
+ }
+}
+
+static bool page_same_filled(void *ptr, unsigned long *element)
{
unsigned int pos;
unsigned long *page;
page = (unsigned long *)ptr;
- for (pos = 0; pos != PAGE_SIZE / sizeof(*page); pos++) {
- if (page[pos])
+ for (pos = 0; pos < PAGE_SIZE / sizeof(*page) - 1; pos++) {
+ if (page[pos] != page[pos + 1])
return false;
}
+ *element = page[pos];
+
return true;
}
-static void handle_zero_page(struct bio_vec *bvec)
+static void handle_same_page(struct bio_vec *bvec, unsigned long element)
{
struct page *page = bvec->bv_page;
void *user_mem;
user_mem = kmap_atomic(page);
- if (is_partial_io(bvec))
- memset(user_mem + bvec->bv_offset, 0, bvec->bv_len);
- else
- clear_page(user_mem);
+ zram_fill_page(user_mem + bvec->bv_offset, bvec->bv_len, element);
kunmap_atomic(user_mem);
flush_dcache_page(page);
@@ -363,7 +389,7 @@ static ssize_t mm_stat_show(struct device *dev,
mem_used << PAGE_SHIFT,
zram->limit_pages << PAGE_SHIFT,
max_used << PAGE_SHIFT,
- (u64)atomic64_read(&zram->stats.zero_pages),
+ (u64)atomic64_read(&zram->stats.same_pages),
pool_stats.pages_compacted);
up_read(&zram->init_lock);
@@ -391,18 +417,6 @@ static DEVICE_ATTR_RO(io_stat);
static DEVICE_ATTR_RO(mm_stat);
static DEVICE_ATTR_RO(debug_stat);
-static inline bool zram_meta_get(struct zram *zram)
-{
- if (atomic_inc_not_zero(&zram->refcount))
- return true;
- return false;
-}
-
-static inline void zram_meta_put(struct zram *zram)
-{
- atomic_dec(&zram->refcount);
-}
-
static void zram_meta_free(struct zram_meta *meta, u64 disksize)
{
size_t num_pages = disksize >> PAGE_SHIFT;
@@ -411,8 +425,11 @@ static void zram_meta_free(struct zram_meta *meta, u64 disksize)
/* Free all pages that are still in this zram device */
for (index = 0; index < num_pages; index++) {
unsigned long handle = meta->table[index].handle;
-
- if (!handle)
+ /*
+ * No memory is allocated for same element filled pages.
+ * Simply clear same page flag.
+ */
+ if (!handle || zram_test_flag(meta, index, ZRAM_SAME))
continue;
zs_free(meta->mem_pool, handle);
@@ -462,18 +479,20 @@ static void zram_free_page(struct zram *zram, size_t index)
struct zram_meta *meta = zram->meta;
unsigned long handle = meta->table[index].handle;
- if (unlikely(!handle)) {
- /*
- * No memory is allocated for zero filled pages.
- * Simply clear zero page flag.
- */
- if (zram_test_flag(meta, index, ZRAM_ZERO)) {
- zram_clear_flag(meta, index, ZRAM_ZERO);
- atomic64_dec(&zram->stats.zero_pages);
- }
+ /*
+ * No memory is allocated for same element filled pages.
+ * Simply clear same page flag.
+ */
+ if (zram_test_flag(meta, index, ZRAM_SAME)) {
+ zram_clear_flag(meta, index, ZRAM_SAME);
+ zram_clear_element(meta, index);
+ atomic64_dec(&zram->stats.same_pages);
return;
}
+ if (!handle)
+ return;
+
zs_free(meta->mem_pool, handle);
atomic64_sub(zram_get_obj_size(meta, index),
@@ -496,9 +515,9 @@ static int zram_decompress_page(struct zram *zram, char *mem, u32 index)
handle = meta->table[index].handle;
size = zram_get_obj_size(meta, index);
- if (!handle || zram_test_flag(meta, index, ZRAM_ZERO)) {
+ if (!handle || zram_test_flag(meta, index, ZRAM_SAME)) {
bit_spin_unlock(ZRAM_ACCESS, &meta->table[index].value);
- clear_page(mem);
+ zram_fill_page(mem, PAGE_SIZE, meta->table[index].element);
return 0;
}
@@ -534,9 +553,9 @@ static int zram_bvec_read(struct zram *zram, struct bio_vec *bvec,
bit_spin_lock(ZRAM_ACCESS, &meta->table[index].value);
if (unlikely(!meta->table[index].handle) ||
- zram_test_flag(meta, index, ZRAM_ZERO)) {
+ zram_test_flag(meta, index, ZRAM_SAME)) {
bit_spin_unlock(ZRAM_ACCESS, &meta->table[index].value);
- handle_zero_page(bvec);
+ handle_same_page(bvec, meta->table[index].element);
return 0;
}
bit_spin_unlock(ZRAM_ACCESS, &meta->table[index].value);
@@ -584,6 +603,7 @@ static int zram_bvec_write(struct zram *zram, struct bio_vec *bvec, u32 index,
struct zram_meta *meta = zram->meta;
struct zcomp_strm *zstrm = NULL;
unsigned long alloced_pages;
+ unsigned long element;
page = bvec->bv_page;
if (is_partial_io(bvec)) {
@@ -612,16 +632,17 @@ compress_again:
uncmem = user_mem;
}
- if (page_zero_filled(uncmem)) {
+ if (page_same_filled(uncmem, &element)) {
if (user_mem)
kunmap_atomic(user_mem);
/* Free memory associated with this sector now. */
bit_spin_lock(ZRAM_ACCESS, &meta->table[index].value);
zram_free_page(zram, index);
- zram_set_flag(meta, index, ZRAM_ZERO);
+ zram_set_flag(meta, index, ZRAM_SAME);
+ zram_set_element(meta, index, element);
bit_spin_unlock(ZRAM_ACCESS, &meta->table[index].value);
- atomic64_inc(&zram->stats.zero_pages);
+ atomic64_inc(&zram->stats.same_pages);
ret = 0;
goto out;
}
@@ -859,22 +880,17 @@ static blk_qc_t zram_make_request(struct request_queue *queue, struct bio *bio)
{
struct zram *zram = queue->queuedata;
- if (unlikely(!zram_meta_get(zram)))
- goto error;
-
blk_queue_split(queue, &bio, queue->bio_split);
if (!valid_io_request(zram, bio->bi_iter.bi_sector,
bio->bi_iter.bi_size)) {
atomic64_inc(&zram->stats.invalid_io);
- goto put_zram;
+ goto error;
}
__zram_make_request(zram, bio);
- zram_meta_put(zram);
return BLK_QC_T_NONE;
-put_zram:
- zram_meta_put(zram);
+
error:
bio_io_error(bio);
return BLK_QC_T_NONE;
@@ -904,13 +920,11 @@ static int zram_rw_page(struct block_device *bdev, sector_t sector,
struct bio_vec bv;
zram = bdev->bd_disk->private_data;
- if (unlikely(!zram_meta_get(zram)))
- goto out;
if (!valid_io_request(zram, sector, PAGE_SIZE)) {
atomic64_inc(&zram->stats.invalid_io);
err = -EINVAL;
- goto put_zram;
+ goto out;
}
index = sector >> SECTORS_PER_PAGE_SHIFT;
@@ -921,8 +935,6 @@ static int zram_rw_page(struct block_device *bdev, sector_t sector,
bv.bv_offset = 0;
err = zram_bvec_rw(zram, &bv, index, offset, is_write);
-put_zram:
- zram_meta_put(zram);
out:
/*
* If I/O fails, just return error(ie, non-zero) without
@@ -955,17 +967,6 @@ static void zram_reset_device(struct zram *zram)
meta = zram->meta;
comp = zram->comp;
disksize = zram->disksize;
- /*
- * Refcount will go down to 0 eventually and r/w handler
- * cannot handle further I/O so it will bail out by
- * check zram_meta_get.
- */
- zram_meta_put(zram);
- /*
- * We want to free zram_meta in process context to avoid
- * deadlock between reclaim path and any other locks.
- */
- wait_event(zram->io_done, atomic_read(&zram->refcount) == 0);
/* Reset stats */
memset(&zram->stats, 0, sizeof(zram->stats));
@@ -1013,8 +1014,6 @@ static ssize_t disksize_store(struct device *dev,
goto out_destroy_comp;
}
- init_waitqueue_head(&zram->io_done);
- atomic_set(&zram->refcount, 1);
zram->meta = meta;
zram->comp = comp;
zram->disksize = disksize;
diff --git a/drivers/block/zram/zram_drv.h b/drivers/block/zram/zram_drv.h
index 74fcf10da374..caeff51f1571 100644
--- a/drivers/block/zram/zram_drv.h
+++ b/drivers/block/zram/zram_drv.h
@@ -61,7 +61,7 @@ static const size_t max_zpage_size = PAGE_SIZE / 4 * 3;
/* Flags for zram pages (table[page_no].value) */
enum zram_pageflags {
/* Page consists entirely of zeros */
- ZRAM_ZERO = ZRAM_FLAG_SHIFT,
+ ZRAM_SAME = ZRAM_FLAG_SHIFT,
ZRAM_ACCESS, /* page is now accessed */
__NR_ZRAM_PAGEFLAGS,
@@ -71,7 +71,10 @@ enum zram_pageflags {
/* Allocated for each disk page */
struct zram_table_entry {
- unsigned long handle;
+ union {
+ unsigned long handle;
+ unsigned long element;
+ };
unsigned long value;
};
@@ -83,7 +86,7 @@ struct zram_stats {
atomic64_t failed_writes; /* can happen when memory is too low */
atomic64_t invalid_io; /* non-page-aligned I/O requests */
atomic64_t notify_free; /* no. of swap slot free notifications */
- atomic64_t zero_pages; /* no. of zero filled pages */
+ atomic64_t same_pages; /* no. of same element filled pages */
atomic64_t pages_stored; /* no. of pages currently stored */
atomic_long_t max_used_pages; /* no. of maximum pages stored */
atomic64_t writestall; /* no. of write slow paths */
@@ -106,9 +109,6 @@ struct zram {
unsigned long limit_pages;
struct zram_stats stats;
- atomic_t refcount; /* refcount for zram_meta */
- /* wait all IO under all of cpu are done */
- wait_queue_head_t io_done;
/*
* This is the limit on amount of *uncompressed* worth of data
* we can store in a disk.
diff --git a/drivers/char/agp/alpha-agp.c b/drivers/char/agp/alpha-agp.c
index 737187865269..53fe633df1e8 100644
--- a/drivers/char/agp/alpha-agp.c
+++ b/drivers/char/agp/alpha-agp.c
@@ -11,15 +11,14 @@
#include "agp.h"
-static int alpha_core_agp_vm_fault(struct vm_area_struct *vma,
- struct vm_fault *vmf)
+static int alpha_core_agp_vm_fault(struct vm_fault *vmf)
{
alpha_agp_info *agp = agp_bridge->dev_private_data;
dma_addr_t dma_addr;
unsigned long pa;
struct page *page;
- dma_addr = vmf->address - vma->vm_start + agp->aperture.bus_base;
+ dma_addr = vmf->address - vmf->vma->vm_start + agp->aperture.bus_base;
pa = agp->ops->translate(agp, dma_addr);
if (pa == (unsigned long)-EINVAL)
diff --git a/drivers/char/mspec.c b/drivers/char/mspec.c
index a697ca0cab1e..a9c2fa3c81e5 100644
--- a/drivers/char/mspec.c
+++ b/drivers/char/mspec.c
@@ -191,12 +191,12 @@ mspec_close(struct vm_area_struct *vma)
* Creates a mspec page and maps it to user space.
*/
static int
-mspec_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
+mspec_fault(struct vm_fault *vmf)
{
unsigned long paddr, maddr;
unsigned long pfn;
pgoff_t index = vmf->pgoff;
- struct vma_data *vdata = vma->vm_private_data;
+ struct vma_data *vdata = vmf->vma->vm_private_data;
maddr = (volatile unsigned long) vdata->maddr[index];
if (maddr == 0) {
@@ -227,7 +227,7 @@ mspec_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
* be because another thread has installed the pte first, so it
* is no problem.
*/
- vm_insert_pfn(vma, vmf->address, pfn);
+ vm_insert_pfn(vmf->vma, vmf->address, pfn);
return VM_FAULT_NOPAGE;
}
diff --git a/drivers/dax/dax.c b/drivers/dax/dax.c
index 18e9875f6277..b75c77254fdb 100644
--- a/drivers/dax/dax.c
+++ b/drivers/dax/dax.c
@@ -419,8 +419,7 @@ static phys_addr_t pgoff_to_phys(struct dax_dev *dax_dev, pgoff_t pgoff,
return -1;
}
-static int __dax_dev_fault(struct dax_dev *dax_dev, struct vm_area_struct *vma,
- struct vm_fault *vmf)
+static int __dax_dev_pte_fault(struct dax_dev *dax_dev, struct vm_fault *vmf)
{
struct device *dev = &dax_dev->dev;
struct dax_region *dax_region;
@@ -428,7 +427,7 @@ static int __dax_dev_fault(struct dax_dev *dax_dev, struct vm_area_struct *vma,
phys_addr_t phys;
pfn_t pfn;
- if (check_vma(dax_dev, vma, __func__))
+ if (check_vma(dax_dev, vmf->vma, __func__))
return VM_FAULT_SIGBUS;
dax_region = dax_dev->region;
@@ -446,7 +445,7 @@ static int __dax_dev_fault(struct dax_dev *dax_dev, struct vm_area_struct *vma,
pfn = phys_to_pfn_t(phys, dax_region->pfn_flags);
- rc = vm_insert_mixed(vma, vmf->address, pfn);
+ rc = vm_insert_mixed(vmf->vma, vmf->address, pfn);
if (rc == -ENOMEM)
return VM_FAULT_OOM;
@@ -456,22 +455,6 @@ static int __dax_dev_fault(struct dax_dev *dax_dev, struct vm_area_struct *vma,
return VM_FAULT_NOPAGE;
}
-static int dax_dev_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
-{
- int rc;
- struct file *filp = vma->vm_file;
- struct dax_dev *dax_dev = filp->private_data;
-
- dev_dbg(&dax_dev->dev, "%s: %s: %s (%#lx - %#lx)\n", __func__,
- current->comm, (vmf->flags & FAULT_FLAG_WRITE)
- ? "write" : "read", vma->vm_start, vma->vm_end);
- rcu_read_lock();
- rc = __dax_dev_fault(dax_dev, vma, vmf);
- rcu_read_unlock();
-
- return rc;
-}
-
static int __dax_dev_pmd_fault(struct dax_dev *dax_dev, struct vm_fault *vmf)
{
unsigned long pmd_addr = vmf->address & PMD_MASK;
@@ -510,7 +493,53 @@ static int __dax_dev_pmd_fault(struct dax_dev *dax_dev, struct vm_fault *vmf)
vmf->flags & FAULT_FLAG_WRITE);
}
-static int dax_dev_pmd_fault(struct vm_fault *vmf)
+#ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD
+static int __dax_dev_pud_fault(struct dax_dev *dax_dev, struct vm_fault *vmf)
+{
+ unsigned long pud_addr = vmf->address & PUD_MASK;
+ struct device *dev = &dax_dev->dev;
+ struct dax_region *dax_region;
+ phys_addr_t phys;
+ pgoff_t pgoff;
+ pfn_t pfn;
+
+ if (check_vma(dax_dev, vmf->vma, __func__))
+ return VM_FAULT_SIGBUS;
+
+ dax_region = dax_dev->region;
+ if (dax_region->align > PUD_SIZE) {
+ dev_dbg(dev, "%s: alignment > fault size\n", __func__);
+ return VM_FAULT_SIGBUS;
+ }
+
+ /* dax pud mappings require pfn_t_devmap() */
+ if ((dax_region->pfn_flags & (PFN_DEV|PFN_MAP)) != (PFN_DEV|PFN_MAP)) {
+ dev_dbg(dev, "%s: alignment > fault size\n", __func__);
+ return VM_FAULT_SIGBUS;
+ }
+
+ pgoff = linear_page_index(vmf->vma, pud_addr);
+ phys = pgoff_to_phys(dax_dev, pgoff, PUD_SIZE);
+ if (phys == -1) {
+ dev_dbg(dev, "%s: phys_to_pgoff(%#lx) failed\n", __func__,
+ pgoff);
+ return VM_FAULT_SIGBUS;
+ }
+
+ pfn = phys_to_pfn_t(phys, dax_region->pfn_flags);
+
+ return vmf_insert_pfn_pud(vmf->vma, vmf->address, vmf->pud, pfn,
+ vmf->flags & FAULT_FLAG_WRITE);
+}
+#else
+static int __dax_dev_pud_fault(struct dax_dev *dax_dev, struct vm_fault *vmf)
+{
+ return VM_FAULT_FALLBACK;
+}
+#endif /* !CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD */
+
+static int dax_dev_huge_fault(struct vm_fault *vmf,
+ enum page_entry_size pe_size)
{
int rc;
struct file *filp = vmf->vma->vm_file;
@@ -522,15 +551,32 @@ static int dax_dev_pmd_fault(struct vm_fault *vmf)
vmf->vma->vm_start, vmf->vma->vm_end);
rcu_read_lock();
- rc = __dax_dev_pmd_fault(dax_dev, vmf);
+ switch (pe_size) {
+ case PE_SIZE_PTE:
+ rc = __dax_dev_pte_fault(dax_dev, vmf);
+ break;
+ case PE_SIZE_PMD:
+ rc = __dax_dev_pmd_fault(dax_dev, vmf);
+ break;
+ case PE_SIZE_PUD:
+ rc = __dax_dev_pud_fault(dax_dev, vmf);
+ break;
+ default:
+ return VM_FAULT_FALLBACK;
+ }
rcu_read_unlock();
return rc;
}
+static int dax_dev_fault(struct vm_fault *vmf)
+{
+ return dax_dev_huge_fault(vmf, PE_SIZE_PTE);
+}
+
static const struct vm_operations_struct dax_dev_vm_ops = {
.fault = dax_dev_fault,
- .pmd_fault = dax_dev_pmd_fault,
+ .huge_fault = dax_dev_huge_fault,
};
static int dax_mmap(struct file *filp, struct vm_area_struct *vma)
diff --git a/drivers/gpu/drm/armada/armada_gem.c b/drivers/gpu/drm/armada/armada_gem.c
index 560d416deab2..1597458d884e 100644
--- a/drivers/gpu/drm/armada/armada_gem.c
+++ b/drivers/gpu/drm/armada/armada_gem.c
@@ -14,14 +14,15 @@
#include <drm/armada_drm.h>
#include "armada_ioctlP.h"
-static int armada_gem_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
+static int armada_gem_vm_fault(struct vm_fault *vmf)
{
- struct armada_gem_object *obj = drm_to_armada_gem(vma->vm_private_data);
+ struct drm_gem_object *gobj = vmf->vma->vm_private_data;
+ struct armada_gem_object *obj = drm_to_armada_gem(gobj);
unsigned long pfn = obj->phys_addr >> PAGE_SHIFT;
int ret;
- pfn += (vmf->address - vma->vm_start) >> PAGE_SHIFT;
- ret = vm_insert_pfn(vma, vmf->address, pfn);
+ pfn += (vmf->address - vmf->vma->vm_start) >> PAGE_SHIFT;
+ ret = vm_insert_pfn(vmf->vma, vmf->address, pfn);
switch (ret) {
case 0:
diff --git a/drivers/gpu/drm/drm_vm.c b/drivers/gpu/drm/drm_vm.c
index bd311c77c254..1170b3209a12 100644
--- a/drivers/gpu/drm/drm_vm.c
+++ b/drivers/gpu/drm/drm_vm.c
@@ -96,8 +96,9 @@ static pgprot_t drm_dma_prot(uint32_t map_type, struct vm_area_struct *vma)
* map, get the page, increment the use count and return it.
*/
#if IS_ENABLED(CONFIG_AGP)
-static int drm_do_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
+static int drm_vm_fault(struct vm_fault *vmf)
{
+ struct vm_area_struct *vma = vmf->vma;
struct drm_file *priv = vma->vm_file->private_data;
struct drm_device *dev = priv->minor->dev;
struct drm_local_map *map = NULL;
@@ -168,7 +169,7 @@ vm_fault_error:
return VM_FAULT_SIGBUS; /* Disallow mremap */
}
#else
-static int drm_do_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
+static int drm_vm_fault(struct vm_fault *vmf)
{
return VM_FAULT_SIGBUS;
}
@@ -184,8 +185,9 @@ static int drm_do_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
* Get the mapping, find the real physical page to map, get the page, and
* return it.
*/
-static int drm_do_vm_shm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
+static int drm_vm_shm_fault(struct vm_fault *vmf)
{
+ struct vm_area_struct *vma = vmf->vma;
struct drm_local_map *map = vma->vm_private_data;
unsigned long offset;
unsigned long i;
@@ -280,14 +282,14 @@ static void drm_vm_shm_close(struct vm_area_struct *vma)
/**
* \c fault method for DMA virtual memory.
*
- * \param vma virtual memory area.
* \param address access address.
* \return pointer to the page structure.
*
* Determine the page number from the page offset and get it from drm_device_dma::pagelist.
*/
-static int drm_do_vm_dma_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
+static int drm_vm_dma_fault(struct vm_fault *vmf)
{
+ struct vm_area_struct *vma = vmf->vma;
struct drm_file *priv = vma->vm_file->private_data;
struct drm_device *dev = priv->minor->dev;
struct drm_device_dma *dma = dev->dma;
@@ -315,14 +317,14 @@ static int drm_do_vm_dma_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
/**
* \c fault method for scatter-gather virtual memory.
*
- * \param vma virtual memory area.
* \param address access address.
* \return pointer to the page structure.
*
* Determine the map offset from the page offset and get it from drm_sg_mem::pagelist.
*/
-static int drm_do_vm_sg_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
+static int drm_vm_sg_fault(struct vm_fault *vmf)
{
+ struct vm_area_struct *vma = vmf->vma;
struct drm_local_map *map = vma->vm_private_data;
struct drm_file *priv = vma->vm_file->private_data;
struct drm_device *dev = priv->minor->dev;
@@ -347,26 +349,6 @@ static int drm_do_vm_sg_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
return 0;
}
-static int drm_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
-{
- return drm_do_vm_fault(vma, vmf);
-}
-
-static int drm_vm_shm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
-{
- return drm_do_vm_shm_fault(vma, vmf);
-}
-
-static int drm_vm_dma_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
-{
- return drm_do_vm_dma_fault(vma, vmf);
-}
-
-static int drm_vm_sg_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
-{
- return drm_do_vm_sg_fault(vma, vmf);
-}
-
/** AGP virtual memory operations */
static const struct vm_operations_struct drm_vm_ops = {
.fault = drm_vm_fault,
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_drv.h b/drivers/gpu/drm/etnaviv/etnaviv_drv.h
index c255eda40526..e41f38667c1c 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_drv.h
+++ b/drivers/gpu/drm/etnaviv/etnaviv_drv.h
@@ -73,7 +73,7 @@ int etnaviv_ioctl_gem_submit(struct drm_device *dev, void *data,
struct drm_file *file);
int etnaviv_gem_mmap(struct file *filp, struct vm_area_struct *vma);
-int etnaviv_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf);
+int etnaviv_gem_fault(struct vm_fault *vmf);
int etnaviv_gem_mmap_offset(struct drm_gem_object *obj, u64 *offset);
struct sg_table *etnaviv_gem_prime_get_sg_table(struct drm_gem_object *obj);
void *etnaviv_gem_prime_vmap(struct drm_gem_object *obj);
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gem.c b/drivers/gpu/drm/etnaviv/etnaviv_gem.c
index aa6e35ddc87f..e78f1406885d 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_gem.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_gem.c
@@ -175,8 +175,9 @@ int etnaviv_gem_mmap(struct file *filp, struct vm_area_struct *vma)
return obj->ops->mmap(obj, vma);
}
-int etnaviv_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
+int etnaviv_gem_fault(struct vm_fault *vmf)
{
+ struct vm_area_struct *vma = vmf->vma;
struct drm_gem_object *obj = vma->vm_private_data;
struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj);
struct page **pages, *page;
diff --git a/drivers/gpu/drm/exynos/exynos_drm_gem.c b/drivers/gpu/drm/exynos/exynos_drm_gem.c
index 57b81460fec8..4c28f7ffcc4d 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_gem.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_gem.c
@@ -447,8 +447,9 @@ int exynos_drm_gem_dumb_map_offset(struct drm_file *file_priv,
return ret;
}
-int exynos_drm_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
+int exynos_drm_gem_fault(struct vm_fault *vmf)
{
+ struct vm_area_struct *vma = vmf->vma;
struct drm_gem_object *obj = vma->vm_private_data;
struct exynos_drm_gem *exynos_gem = to_exynos_gem(obj);
unsigned long pfn;
diff --git a/drivers/gpu/drm/exynos/exynos_drm_gem.h b/drivers/gpu/drm/exynos/exynos_drm_gem.h
index df7c543d6558..85457255fcd1 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_gem.h
+++ b/drivers/gpu/drm/exynos/exynos_drm_gem.h
@@ -116,7 +116,7 @@ int exynos_drm_gem_dumb_map_offset(struct drm_file *file_priv,
uint64_t *offset);
/* page fault handler and mmap fault address(virtual) to physical memory. */
-int exynos_drm_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf);
+int exynos_drm_gem_fault(struct vm_fault *vmf);
/* set vm_flags and we can change the vm attribute to other one at here. */
int exynos_drm_gem_mmap(struct file *filp, struct vm_area_struct *vma);
diff --git a/drivers/gpu/drm/gma500/framebuffer.c b/drivers/gpu/drm/gma500/framebuffer.c
index da42d2e1d397..ffe6b4ffa1a8 100644
--- a/drivers/gpu/drm/gma500/framebuffer.c
+++ b/drivers/gpu/drm/gma500/framebuffer.c
@@ -111,8 +111,9 @@ static int psbfb_pan(struct fb_var_screeninfo *var, struct fb_info *info)
return 0;
}
-static int psbfb_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
+static int psbfb_vm_fault(struct vm_fault *vmf)
{
+ struct vm_area_struct *vma = vmf->vma;
struct psb_framebuffer *psbfb = vma->vm_private_data;
struct drm_device *dev = psbfb->base.dev;
struct drm_psb_private *dev_priv = dev->dev_private;
diff --git a/drivers/gpu/drm/gma500/gem.c b/drivers/gpu/drm/gma500/gem.c
index 527c62917660..7da061aab729 100644
--- a/drivers/gpu/drm/gma500/gem.c
+++ b/drivers/gpu/drm/gma500/gem.c
@@ -164,8 +164,9 @@ int psb_gem_dumb_create(struct drm_file *file, struct drm_device *dev,
* vma->vm_private_data points to the GEM object that is backing this
* mapping.
*/
-int psb_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
+int psb_gem_fault(struct vm_fault *vmf)
{
+ struct vm_area_struct *vma = vmf->vma;
struct drm_gem_object *obj;
struct gtt_range *r;
int ret;
diff --git a/drivers/gpu/drm/gma500/psb_drv.h b/drivers/gpu/drm/gma500/psb_drv.h
index 05d7aaf47eea..83e22fd4cfc0 100644
--- a/drivers/gpu/drm/gma500/psb_drv.h
+++ b/drivers/gpu/drm/gma500/psb_drv.h
@@ -752,7 +752,7 @@ extern int psb_gem_dumb_create(struct drm_file *file, struct drm_device *dev,
struct drm_mode_create_dumb *args);
extern int psb_gem_dumb_map_gtt(struct drm_file *file, struct drm_device *dev,
uint32_t handle, uint64_t *offset);
-extern int psb_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf);
+extern int psb_gem_fault(struct vm_fault *vmf);
/* psb_device.c */
extern const struct psb_ops psb_chip_ops;
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index bcc81912b5e5..0a4b42d31391 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -3352,7 +3352,7 @@ int __must_check i915_gem_wait_for_idle(struct drm_i915_private *dev_priv,
unsigned int flags);
int __must_check i915_gem_suspend(struct drm_i915_private *dev_priv);
void i915_gem_resume(struct drm_i915_private *dev_priv);
-int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf);
+int i915_gem_fault(struct vm_fault *vmf);
int i915_gem_object_wait(struct drm_i915_gem_object *obj,
unsigned int flags,
long timeout,
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 88f3628b4e29..6908123162d1 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -1772,7 +1772,6 @@ compute_partial_view(struct drm_i915_gem_object *obj,
/**
* i915_gem_fault - fault a page into the GTT
- * @area: CPU VMA in question
* @vmf: fault info
*
* The fault handler is set up by drm_gem_mmap() when a object is GTT mapped
@@ -1789,9 +1788,10 @@ compute_partial_view(struct drm_i915_gem_object *obj,
* The current feature set supported by i915_gem_fault() and thus GTT mmaps
* is exposed via I915_PARAM_MMAP_GTT_VERSION (see i915_gem_mmap_gtt_version).
*/
-int i915_gem_fault(struct vm_area_struct *area, struct vm_fault *vmf)
+int i915_gem_fault(struct vm_fault *vmf)
{
#define MIN_CHUNK_PAGES ((1 << 20) >> PAGE_SHIFT) /* 1 MiB */
+ struct vm_area_struct *area = vmf->vma;
struct drm_i915_gem_object *obj = to_intel_bo(area->vm_private_data);
struct drm_device *dev = obj->base.dev;
struct drm_i915_private *dev_priv = to_i915(dev);
diff --git a/drivers/gpu/drm/msm/msm_drv.h b/drivers/gpu/drm/msm/msm_drv.h
index cdd7b2f8e977..c3b14876edaa 100644
--- a/drivers/gpu/drm/msm/msm_drv.h
+++ b/drivers/gpu/drm/msm/msm_drv.h
@@ -206,7 +206,7 @@ void msm_gem_shrinker_cleanup(struct drm_device *dev);
int msm_gem_mmap_obj(struct drm_gem_object *obj,
struct vm_area_struct *vma);
int msm_gem_mmap(struct file *filp, struct vm_area_struct *vma);
-int msm_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf);
+int msm_gem_fault(struct vm_fault *vmf);
uint64_t msm_gem_mmap_offset(struct drm_gem_object *obj);
int msm_gem_get_iova_locked(struct drm_gem_object *obj, int id,
uint64_t *iova);
diff --git a/drivers/gpu/drm/msm/msm_gem.c b/drivers/gpu/drm/msm/msm_gem.c
index e140b05af134..59811f29607d 100644
--- a/drivers/gpu/drm/msm/msm_gem.c
+++ b/drivers/gpu/drm/msm/msm_gem.c
@@ -191,8 +191,9 @@ int msm_gem_mmap(struct file *filp, struct vm_area_struct *vma)
return msm_gem_mmap_obj(vma->vm_private_data, vma);
}
-int msm_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
+int msm_gem_fault(struct vm_fault *vmf)
{
+ struct vm_area_struct *vma = vmf->vma;
struct drm_gem_object *obj = vma->vm_private_data;
struct drm_device *dev = obj->dev;
struct msm_drm_private *priv = dev->dev_private;
diff --git a/drivers/gpu/drm/omapdrm/omap_drv.h b/drivers/gpu/drm/omapdrm/omap_drv.h
index 36d93ce84a29..65977982f15f 100644
--- a/drivers/gpu/drm/omapdrm/omap_drv.h
+++ b/drivers/gpu/drm/omapdrm/omap_drv.h
@@ -188,7 +188,7 @@ int omap_gem_dumb_create(struct drm_file *file, struct drm_device *dev,
int omap_gem_mmap(struct file *filp, struct vm_area_struct *vma);
int omap_gem_mmap_obj(struct drm_gem_object *obj,
struct vm_area_struct *vma);
-int omap_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf);
+int omap_gem_fault(struct vm_fault *vmf);
int omap_gem_op_start(struct drm_gem_object *obj, enum omap_gem_op op);
int omap_gem_op_finish(struct drm_gem_object *obj, enum omap_gem_op op);
int omap_gem_op_sync(struct drm_gem_object *obj, enum omap_gem_op op);
diff --git a/drivers/gpu/drm/omapdrm/omap_gem.c b/drivers/gpu/drm/omapdrm/omap_gem.c
index 74a9968df421..5d5a9f517c30 100644
--- a/drivers/gpu/drm/omapdrm/omap_gem.c
+++ b/drivers/gpu/drm/omapdrm/omap_gem.c
@@ -518,7 +518,6 @@ static int fault_2d(struct drm_gem_object *obj,
/**
* omap_gem_fault - pagefault handler for GEM objects
- * @vma: the VMA of the GEM object
* @vmf: fault detail
*
* Invoked when a fault occurs on an mmap of a GEM managed area. GEM
@@ -529,8 +528,9 @@ static int fault_2d(struct drm_gem_object *obj,
* vma->vm_private_data points to the GEM object that is backing this
* mapping.
*/
-int omap_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
+int omap_gem_fault(struct vm_fault *vmf)
{
+ struct vm_area_struct *vma = vmf->vma;
struct drm_gem_object *obj = vma->vm_private_data;
struct omap_gem_object *omap_obj = to_omap_bo(obj);
struct drm_device *dev = obj->dev;
diff --git a/drivers/gpu/drm/qxl/qxl_ttm.c b/drivers/gpu/drm/qxl/qxl_ttm.c
index 4e1a40389964..7d1cab57c89e 100644
--- a/drivers/gpu/drm/qxl/qxl_ttm.c
+++ b/drivers/gpu/drm/qxl/qxl_ttm.c
@@ -105,15 +105,15 @@ static void qxl_ttm_global_fini(struct qxl_device *qdev)
static struct vm_operations_struct qxl_ttm_vm_ops;
static const struct vm_operations_struct *ttm_vm_ops;
-static int qxl_ttm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
+static int qxl_ttm_fault(struct vm_fault *vmf)
{
struct ttm_buffer_object *bo;
int r;
- bo = (struct ttm_buffer_object *)vma->vm_private_data;
+ bo = (struct ttm_buffer_object *)vmf->vma->vm_private_data;
if (bo == NULL)
return VM_FAULT_NOPAGE;
- r = ttm_vm_ops->fault(vma, vmf);
+ r = ttm_vm_ops->fault(vmf);
return r;
}
diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c
index 7a10b3852970..684f1703aa5c 100644
--- a/drivers/gpu/drm/radeon/radeon_ttm.c
+++ b/drivers/gpu/drm/radeon/radeon_ttm.c
@@ -979,19 +979,19 @@ void radeon_ttm_set_active_vram_size(struct radeon_device *rdev, u64 size)
static struct vm_operations_struct radeon_ttm_vm_ops;
static const struct vm_operations_struct *ttm_vm_ops = NULL;
-static int radeon_ttm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
+static int radeon_ttm_fault(struct vm_fault *vmf)
{
struct ttm_buffer_object *bo;
struct radeon_device *rdev;
int r;
- bo = (struct ttm_buffer_object *)vma->vm_private_data;
+ bo = (struct ttm_buffer_object *)vmf->vma->vm_private_data;
if (bo == NULL) {
return VM_FAULT_NOPAGE;
}
rdev = radeon_get_rdev(bo->bdev);
down_read(&rdev->pm.mclk_lock);
- r = ttm_vm_ops->fault(vma, vmf);
+ r = ttm_vm_ops->fault(vmf);
up_read(&rdev->pm.mclk_lock);
return r;
}
diff --git a/drivers/gpu/drm/tegra/gem.c b/drivers/gpu/drm/tegra/gem.c
index b523a5d4a38c..17e62ecb5d4d 100644
--- a/drivers/gpu/drm/tegra/gem.c
+++ b/drivers/gpu/drm/tegra/gem.c
@@ -441,8 +441,9 @@ int tegra_bo_dumb_map_offset(struct drm_file *file, struct drm_device *drm,
return 0;
}
-static int tegra_bo_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
+static int tegra_bo_fault(struct vm_fault *vmf)
{
+ struct vm_area_struct *vma = vmf->vma;
struct drm_gem_object *gem = vma->vm_private_data;
struct tegra_bo *bo = to_tegra_bo(gem);
struct page *page;
diff --git a/drivers/gpu/drm/ttm/ttm_bo_vm.c b/drivers/gpu/drm/ttm/ttm_bo_vm.c
index 88169141bef5..35ffb3754feb 100644
--- a/drivers/gpu/drm/ttm/ttm_bo_vm.c
+++ b/drivers/gpu/drm/ttm/ttm_bo_vm.c
@@ -43,7 +43,6 @@
#define TTM_BO_VM_NUM_PREFAULT 16
static int ttm_bo_vm_fault_idle(struct ttm_buffer_object *bo,
- struct vm_area_struct *vma,
struct vm_fault *vmf)
{
int ret = 0;
@@ -67,7 +66,7 @@ static int ttm_bo_vm_fault_idle(struct ttm_buffer_object *bo,
goto out_unlock;
ttm_bo_reference(bo);
- up_read(&vma->vm_mm->mmap_sem);
+ up_read(&vmf->vma->vm_mm->mmap_sem);
(void) dma_fence_wait(bo->moving, true);
ttm_bo_unreserve(bo);
ttm_bo_unref(&bo);
@@ -92,8 +91,9 @@ out_unlock:
return ret;
}
-static int ttm_bo_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
+static int ttm_bo_vm_fault(struct vm_fault *vmf)
{
+ struct vm_area_struct *vma = vmf->vma;
struct ttm_buffer_object *bo = (struct ttm_buffer_object *)
vma->vm_private_data;
struct ttm_bo_device *bdev = bo->bdev;
@@ -124,7 +124,7 @@ static int ttm_bo_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
if (vmf->flags & FAULT_FLAG_ALLOW_RETRY) {
if (!(vmf->flags & FAULT_FLAG_RETRY_NOWAIT)) {
ttm_bo_reference(bo);
- up_read(&vma->vm_mm->mmap_sem);
+ up_read(&vmf->vma->vm_mm->mmap_sem);
(void) ttm_bo_wait_unreserved(bo);
ttm_bo_unref(&bo);
}
@@ -168,7 +168,7 @@ static int ttm_bo_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
* Wait for buffer data in transit, due to a pipelined
* move.
*/
- ret = ttm_bo_vm_fault_idle(bo, vma, vmf);
+ ret = ttm_bo_vm_fault_idle(bo, vmf);
if (unlikely(ret != 0)) {
retval = ret;
diff --git a/drivers/gpu/drm/udl/udl_drv.h b/drivers/gpu/drm/udl/udl_drv.h
index 6c4286e57362..2a75ab80527a 100644
--- a/drivers/gpu/drm/udl/udl_drv.h
+++ b/drivers/gpu/drm/udl/udl_drv.h
@@ -134,7 +134,7 @@ void udl_gem_put_pages(struct udl_gem_object *obj);
int udl_gem_vmap(struct udl_gem_object *obj);
void udl_gem_vunmap(struct udl_gem_object *obj);
int udl_drm_gem_mmap(struct file *filp, struct vm_area_struct *vma);
-int udl_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf);
+int udl_gem_fault(struct vm_fault *vmf);
int udl_handle_damage(struct udl_framebuffer *fb, int x, int y,
int width, int height);
diff --git a/drivers/gpu/drm/udl/udl_gem.c b/drivers/gpu/drm/udl/udl_gem.c
index 3c0c4bd3f750..775c50e4f02c 100644
--- a/drivers/gpu/drm/udl/udl_gem.c
+++ b/drivers/gpu/drm/udl/udl_gem.c
@@ -100,8 +100,9 @@ int udl_drm_gem_mmap(struct file *filp, struct vm_area_struct *vma)
return ret;
}
-int udl_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
+int udl_gem_fault(struct vm_fault *vmf)
{
+ struct vm_area_struct *vma = vmf->vma;
struct udl_gem_object *obj = to_udl_bo(vma->vm_private_data);
struct page *page;
unsigned int page_offset;
diff --git a/drivers/gpu/drm/vgem/vgem_drv.c b/drivers/gpu/drm/vgem/vgem_drv.c
index 477e07f0ecb6..7ccbb03e98de 100644
--- a/drivers/gpu/drm/vgem/vgem_drv.c
+++ b/drivers/gpu/drm/vgem/vgem_drv.c
@@ -50,8 +50,9 @@ static void vgem_gem_free_object(struct drm_gem_object *obj)
kfree(vgem_obj);
}
-static int vgem_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
+static int vgem_gem_fault(struct vm_fault *vmf)
{
+ struct vm_area_struct *vma = vmf->vma;
struct drm_vgem_gem_object *obj = vma->vm_private_data;
/* We don't use vmf->pgoff since that has the fake offset */
unsigned long vaddr = vmf->address;
diff --git a/drivers/gpu/drm/virtio/virtgpu_ttm.c b/drivers/gpu/drm/virtio/virtgpu_ttm.c
index 9cc7079f7aca..70ec8ca8d9b1 100644
--- a/drivers/gpu/drm/virtio/virtgpu_ttm.c
+++ b/drivers/gpu/drm/virtio/virtgpu_ttm.c
@@ -114,18 +114,17 @@ static void virtio_gpu_ttm_global_fini(struct virtio_gpu_device *vgdev)
static struct vm_operations_struct virtio_gpu_ttm_vm_ops;
static const struct vm_operations_struct *ttm_vm_ops;
-static int virtio_gpu_ttm_fault(struct vm_area_struct *vma,
- struct vm_fault *vmf)
+static int virtio_gpu_ttm_fault(struct vm_fault *vmf)
{
struct ttm_buffer_object *bo;
struct virtio_gpu_device *vgdev;
int r;
- bo = (struct ttm_buffer_object *)vma->vm_private_data;
+ bo = (struct ttm_buffer_object *)vmf->vma->vm_private_data;
if (bo == NULL)
return VM_FAULT_NOPAGE;
vgdev = virtio_gpu_get_vgdev(bo->bdev);
- r = ttm_vm_ops->fault(vma, vmf);
+ r = ttm_vm_ops->fault(vmf);
return r;
}
#endif
diff --git a/drivers/hsi/clients/cmt_speech.c b/drivers/hsi/clients/cmt_speech.c
index 3deef6cc7d7c..7175e6bedf21 100644
--- a/drivers/hsi/clients/cmt_speech.c
+++ b/drivers/hsi/clients/cmt_speech.c
@@ -1098,9 +1098,9 @@ static void cs_hsi_stop(struct cs_hsi_iface *hi)
kfree(hi);
}
-static int cs_char_vma_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
+static int cs_char_vma_fault(struct vm_fault *vmf)
{
- struct cs_char *csdata = vma->vm_private_data;
+ struct cs_char *csdata = vmf->vma->vm_private_data;
struct page *page;
page = virt_to_page(csdata->mmap_base);
diff --git a/drivers/hwtracing/intel_th/msu.c b/drivers/hwtracing/intel_th/msu.c
index e8d55a153a65..e88afe1a435c 100644
--- a/drivers/hwtracing/intel_th/msu.c
+++ b/drivers/hwtracing/intel_th/msu.c
@@ -1188,9 +1188,9 @@ static void msc_mmap_close(struct vm_area_struct *vma)
mutex_unlock(&msc->buf_mutex);
}
-static int msc_mmap_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
+static int msc_mmap_fault(struct vm_fault *vmf)
{
- struct msc_iter *iter = vma->vm_file->private_data;
+ struct msc_iter *iter = vmf->vma->vm_file->private_data;
struct msc *msc = iter->msc;
vmf->page = msc_buffer_get_page(msc, vmf->pgoff);
@@ -1198,7 +1198,7 @@ static int msc_mmap_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
return VM_FAULT_SIGBUS;
get_page(vmf->page);
- vmf->page->mapping = vma->vm_file->f_mapping;
+ vmf->page->mapping = vmf->vma->vm_file->f_mapping;
vmf->page->index = vmf->pgoff;
return 0;
diff --git a/drivers/infiniband/hw/hfi1/file_ops.c b/drivers/infiniband/hw/hfi1/file_ops.c
index bd786b7bd30b..f46033984d07 100644
--- a/drivers/infiniband/hw/hfi1/file_ops.c
+++ b/drivers/infiniband/hw/hfi1/file_ops.c
@@ -92,7 +92,7 @@ static unsigned int poll_next(struct file *, struct poll_table_struct *);
static int user_event_ack(struct hfi1_ctxtdata *, int, unsigned long);
static int set_ctxt_pkey(struct hfi1_ctxtdata *, unsigned, u16);
static int manage_rcvq(struct hfi1_ctxtdata *, unsigned, int);
-static int vma_fault(struct vm_area_struct *, struct vm_fault *);
+static int vma_fault(struct vm_fault *);
static long hfi1_file_ioctl(struct file *fp, unsigned int cmd,
unsigned long arg);
@@ -695,7 +695,7 @@ done:
* Local (non-chip) user memory is not mapped right away but as it is
* accessed by the user-level code.
*/
-static int vma_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
+static int vma_fault(struct vm_fault *vmf)
{
struct page *page;
diff --git a/drivers/infiniband/hw/qib/qib_file_ops.c b/drivers/infiniband/hw/qib/qib_file_ops.c
index 2d1eacf1dfed..9396c1807cc3 100644
--- a/drivers/infiniband/hw/qib/qib_file_ops.c
+++ b/drivers/infiniband/hw/qib/qib_file_ops.c
@@ -893,7 +893,7 @@ bail:
/*
* qib_file_vma_fault - handle a VMA page fault.
*/
-static int qib_file_vma_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
+static int qib_file_vma_fault(struct vm_fault *vmf)
{
struct page *page;
diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c
index 1b5b8c5361c5..09bd3b290bb8 100644
--- a/drivers/iommu/amd_iommu.c
+++ b/drivers/iommu/amd_iommu.c
@@ -2672,7 +2672,7 @@ static void *alloc_coherent(struct device *dev, size_t size,
return NULL;
page = dma_alloc_from_contiguous(dev, size >> PAGE_SHIFT,
- get_order(size));
+ get_order(size), flag);
if (!page)
return NULL;
}
diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c
index f5e02f8e7371..a8f7ae0eb7a4 100644
--- a/drivers/iommu/intel-iommu.c
+++ b/drivers/iommu/intel-iommu.c
@@ -3829,7 +3829,7 @@ static void *intel_alloc_coherent(struct device *dev, size_t size,
if (gfpflags_allow_blocking(flags)) {
unsigned int count = size >> PAGE_SHIFT;
- page = dma_alloc_from_contiguous(dev, count, order);
+ page = dma_alloc_from_contiguous(dev, count, order, flags);
if (page && iommu_no_mapping(dev) &&
page_to_phys(page) + size > dev->coherent_dma_mask) {
dma_release_from_contiguous(dev, page, count);
diff --git a/drivers/media/v4l2-core/videobuf-dma-sg.c b/drivers/media/v4l2-core/videobuf-dma-sg.c
index ba63ca57ed7e..36bd904946bd 100644
--- a/drivers/media/v4l2-core/videobuf-dma-sg.c
+++ b/drivers/media/v4l2-core/videobuf-dma-sg.c
@@ -434,8 +434,9 @@ static void videobuf_vm_close(struct vm_area_struct *vma)
* now ...). Bounce buffers don't work very well for the data rates
* video capture has.
*/
-static int videobuf_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
+static int videobuf_vm_fault(struct vm_fault *vmf)
{
+ struct vm_area_struct *vma = vmf->vma;
struct page *page;
dprintk(3, "fault: fault @ %08lx [vma %08lx-%08lx]\n",
diff --git a/drivers/misc/cxl/context.c b/drivers/misc/cxl/context.c
index 3907387b6d15..062bf6ca2625 100644
--- a/drivers/misc/cxl/context.c
+++ b/drivers/misc/cxl/context.c
@@ -121,8 +121,9 @@ void cxl_context_set_mapping(struct cxl_context *ctx,
mutex_unlock(&ctx->mapping_lock);
}
-static int cxl_mmap_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
+static int cxl_mmap_fault(struct vm_fault *vmf)
{
+ struct vm_area_struct *vma = vmf->vma;
struct cxl_context *ctx = vma->vm_file->private_data;
u64 area, offset;
diff --git a/drivers/misc/sgi-gru/grumain.c b/drivers/misc/sgi-gru/grumain.c
index af2e077da4b8..3641f1334cf0 100644
--- a/drivers/misc/sgi-gru/grumain.c
+++ b/drivers/misc/sgi-gru/grumain.c
@@ -926,8 +926,9 @@ again:
*
* Note: gru segments alway mmaped on GRU_GSEG_PAGESIZE boundaries.
*/
-int gru_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
+int gru_fault(struct vm_fault *vmf)
{
+ struct vm_area_struct *vma = vmf->vma;
struct gru_thread_state *gts;
unsigned long paddr, vaddr;
unsigned long expires;
diff --git a/drivers/misc/sgi-gru/grutables.h b/drivers/misc/sgi-gru/grutables.h
index 5c3ce2459675..b5e308b50ed1 100644
--- a/drivers/misc/sgi-gru/grutables.h
+++ b/drivers/misc/sgi-gru/grutables.h
@@ -665,7 +665,7 @@ extern unsigned long gru_reserve_cb_resources(struct gru_state *gru,
int cbr_au_count, char *cbmap);
extern unsigned long gru_reserve_ds_resources(struct gru_state *gru,
int dsr_au_count, char *dsmap);
-extern int gru_fault(struct vm_area_struct *, struct vm_fault *vmf);
+extern int gru_fault(struct vm_fault *vmf);
extern struct gru_mm_struct *gru_register_mmu_notifier(void);
extern void gru_drop_mmu_notifier(struct gru_mm_struct *gms);
diff --git a/drivers/scsi/cxlflash/superpipe.c b/drivers/scsi/cxlflash/superpipe.c
index 90869cee2b20..ef5bf55f08a4 100644
--- a/drivers/scsi/cxlflash/superpipe.c
+++ b/drivers/scsi/cxlflash/superpipe.c
@@ -1053,7 +1053,6 @@ out:
/**
* cxlflash_mmap_fault() - mmap fault handler for adapter file descriptor
- * @vma: VM area associated with mapping.
* @vmf: VM fault associated with current fault.
*
* To support error notification via MMIO, faults are 'caught' by this routine
@@ -1067,8 +1066,9 @@ out:
*
* Return: 0 on success, VM_FAULT_SIGBUS on failure
*/
-static int cxlflash_mmap_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
+static int cxlflash_mmap_fault(struct vm_fault *vmf)
{
+ struct vm_area_struct *vma = vmf->vma;
struct file *file = vma->vm_file;
struct cxl_context *ctx = cxl_fops_get_context(file);
struct cxlflash_cfg *cfg = container_of(file->f_op, struct cxlflash_cfg,
@@ -1097,7 +1097,7 @@ static int cxlflash_mmap_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
if (likely(!ctxi->err_recovery_active)) {
vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
- rc = ctxi->cxl_mmap_vmops->fault(vma, vmf);
+ rc = ctxi->cxl_mmap_vmops->fault(vmf);
} else {
dev_dbg(dev, "%s: err recovery active, use err_page\n",
__func__);
diff --git a/drivers/scsi/sg.c b/drivers/scsi/sg.c
index e831e01f9fa6..29b86505f796 100644
--- a/drivers/scsi/sg.c
+++ b/drivers/scsi/sg.c
@@ -1185,8 +1185,9 @@ sg_fasync(int fd, struct file *filp, int mode)
}
static int
-sg_vma_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
+sg_vma_fault(struct vm_fault *vmf)
{
+ struct vm_area_struct *vma = vmf->vma;
Sg_fd *sfp;
unsigned long offset, len, sa;
Sg_scatter_hold *rsv_schp;
diff --git a/drivers/staging/android/ion/ion.c b/drivers/staging/android/ion/ion.c
index 969600779e44..2c3ffbcbd621 100644
--- a/drivers/staging/android/ion/ion.c
+++ b/drivers/staging/android/ion/ion.c
@@ -870,9 +870,9 @@ static void ion_buffer_sync_for_device(struct ion_buffer *buffer,
mutex_unlock(&buffer->lock);
}
-static int ion_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
+static int ion_vm_fault(struct vm_fault *vmf)
{
- struct ion_buffer *buffer = vma->vm_private_data;
+ struct ion_buffer *buffer = vmf->vma->vm_private_data;
unsigned long pfn;
int ret;
@@ -881,7 +881,7 @@ static int ion_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
BUG_ON(!buffer->pages || !buffer->pages[vmf->pgoff]);
pfn = page_to_pfn(ion_buffer_page(buffer->pages[vmf->pgoff]));
- ret = vm_insert_pfn(vma, vmf->address, pfn);
+ ret = vm_insert_pfn(vmf->vma, vmf->address, pfn);
mutex_unlock(&buffer->lock);
if (ret)
return VM_FAULT_ERROR;
diff --git a/drivers/staging/lustre/lustre/llite/llite_mmap.c b/drivers/staging/lustre/lustre/llite/llite_mmap.c
index 9afa6bec3e6f..896196c74cd2 100644
--- a/drivers/staging/lustre/lustre/llite/llite_mmap.c
+++ b/drivers/staging/lustre/lustre/llite/llite_mmap.c
@@ -321,7 +321,7 @@ out:
return fault_ret;
}
-static int ll_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
+static int ll_fault(struct vm_fault *vmf)
{
int count = 0;
bool printed = false;
@@ -335,7 +335,7 @@ static int ll_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
set = cfs_block_sigsinv(sigmask(SIGKILL) | sigmask(SIGTERM));
restart:
- result = ll_fault0(vma, vmf);
+ result = ll_fault0(vmf->vma, vmf);
LASSERT(!(result & VM_FAULT_LOCKED));
if (result == 0) {
struct page *vmpage = vmf->page;
@@ -362,8 +362,9 @@ restart:
return result;
}
-static int ll_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
+static int ll_page_mkwrite(struct vm_fault *vmf)
{
+ struct vm_area_struct *vma = vmf->vma;
int count = 0;
bool printed = false;
bool retry;
diff --git a/drivers/staging/lustre/lustre/llite/vvp_io.c b/drivers/staging/lustre/lustre/llite/vvp_io.c
index 3e9cf710501b..4c57755e06e7 100644
--- a/drivers/staging/lustre/lustre/llite/vvp_io.c
+++ b/drivers/staging/lustre/lustre/llite/vvp_io.c
@@ -1014,7 +1014,7 @@ static int vvp_io_kernel_fault(struct vvp_fault_io *cfio)
{
struct vm_fault *vmf = cfio->ft_vmf;
- cfio->ft_flags = filemap_fault(cfio->ft_vma, vmf);
+ cfio->ft_flags = filemap_fault(vmf);
cfio->ft_flags_valid = 1;
if (vmf->page) {
diff --git a/drivers/target/target_core_user.c b/drivers/target/target_core_user.c
index 8041710b6972..5c1cb2df3a54 100644
--- a/drivers/target/target_core_user.c
+++ b/drivers/target/target_core_user.c
@@ -783,15 +783,15 @@ static int tcmu_find_mem_index(struct vm_area_struct *vma)
return -1;
}
-static int tcmu_vma_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
+static int tcmu_vma_fault(struct vm_fault *vmf)
{
- struct tcmu_dev *udev = vma->vm_private_data;
+ struct tcmu_dev *udev = vmf->vma->vm_private_data;
struct uio_info *info = &udev->uio_info;
struct page *page;
unsigned long offset;
void *addr;
- int mi = tcmu_find_mem_index(vma);
+ int mi = tcmu_find_mem_index(vmf->vma);
if (mi < 0)
return VM_FAULT_SIGBUS;
diff --git a/drivers/uio/uio.c b/drivers/uio/uio.c
index fba021f5736a..31d95dc9c202 100644
--- a/drivers/uio/uio.c
+++ b/drivers/uio/uio.c
@@ -597,14 +597,14 @@ static int uio_find_mem_index(struct vm_area_struct *vma)
return -1;
}
-static int uio_vma_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
+static int uio_vma_fault(struct vm_fault *vmf)
{
- struct uio_device *idev = vma->vm_private_data;
+ struct uio_device *idev = vmf->vma->vm_private_data;
struct page *page;
unsigned long offset;
void *addr;
- int mi = uio_find_mem_index(vma);
+ int mi = uio_find_mem_index(vmf->vma);
if (mi < 0)
return VM_FAULT_SIGBUS;
diff --git a/drivers/usb/mon/mon_bin.c b/drivers/usb/mon/mon_bin.c
index 91c22276c03b..9fb8b1e6ecc2 100644
--- a/drivers/usb/mon/mon_bin.c
+++ b/drivers/usb/mon/mon_bin.c
@@ -1223,9 +1223,9 @@ static void mon_bin_vma_close(struct vm_area_struct *vma)
/*
* Map ring pages to user space.
*/
-static int mon_bin_vma_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
+static int mon_bin_vma_fault(struct vm_fault *vmf)
{
- struct mon_reader_bin *rp = vma->vm_private_data;
+ struct mon_reader_bin *rp = vmf->vma->vm_private_data;
unsigned long offset, chunk_idx;
struct page *pageptr;
diff --git a/drivers/video/fbdev/core/fb_defio.c b/drivers/video/fbdev/core/fb_defio.c
index 74b5bcac8bf2..37f69c061210 100644
--- a/drivers/video/fbdev/core/fb_defio.c
+++ b/drivers/video/fbdev/core/fb_defio.c
@@ -37,12 +37,11 @@ static struct page *fb_deferred_io_page(struct fb_info *info, unsigned long offs
}
/* this is to find and return the vmalloc-ed fb pages */
-static int fb_deferred_io_fault(struct vm_area_struct *vma,
- struct vm_fault *vmf)
+static int fb_deferred_io_fault(struct vm_fault *vmf)
{
unsigned long offset;
struct page *page;
- struct fb_info *info = vma->vm_private_data;
+ struct fb_info *info = vmf->vma->vm_private_data;
offset = vmf->pgoff << PAGE_SHIFT;
if (offset >= info->fix.smem_len)
@@ -54,8 +53,8 @@ static int fb_deferred_io_fault(struct vm_area_struct *vma,
get_page(page);
- if (vma->vm_file)
- page->mapping = vma->vm_file->f_mapping;
+ if (vmf->vma->vm_file)
+ page->mapping = vmf->vma->vm_file->f_mapping;
else
printk(KERN_ERR "no mapping available\n");
@@ -91,11 +90,10 @@ int fb_deferred_io_fsync(struct file *file, loff_t start, loff_t end, int datasy
EXPORT_SYMBOL_GPL(fb_deferred_io_fsync);
/* vm_ops->page_mkwrite handler */
-static int fb_deferred_io_mkwrite(struct vm_area_struct *vma,
- struct vm_fault *vmf)
+static int fb_deferred_io_mkwrite(struct vm_fault *vmf)
{
struct page *page = vmf->page;
- struct fb_info *info = vma->vm_private_data;
+ struct fb_info *info = vmf->vma->vm_private_data;
struct fb_deferred_io *fbdefio = info->fbdefio;
struct page *cur;
@@ -105,7 +103,7 @@ static int fb_deferred_io_mkwrite(struct vm_area_struct *vma,
deferred framebuffer IO. then if userspace touches a page
again, we repeat the same scheme */
- file_update_time(vma->vm_file);
+ file_update_time(vmf->vma->vm_file);
/* protect against the workqueue changing the page list */
mutex_lock(&fbdefio->lock);
diff --git a/drivers/virtio/virtio_balloon.c b/drivers/virtio/virtio_balloon.c
index 181793f07852..9d2738e9217f 100644
--- a/drivers/virtio/virtio_balloon.c
+++ b/drivers/virtio/virtio_balloon.c
@@ -615,8 +615,12 @@ static void virtballoon_remove(struct virtio_device *vdev)
cancel_work_sync(&vb->update_balloon_stats_work);
remove_common(vb);
+#ifdef CONFIG_BALLOON_COMPACTION
if (vb->vb_dev_info.inode)
iput(vb->vb_dev_info.inode);
+
+ kern_unmount(balloon_mnt);
+#endif
kfree(vb);
}
diff --git a/drivers/xen/privcmd.c b/drivers/xen/privcmd.c
index 2077a3ac7c0c..7a92a5e1d40c 100644
--- a/drivers/xen/privcmd.c
+++ b/drivers/xen/privcmd.c
@@ -804,10 +804,10 @@ static void privcmd_close(struct vm_area_struct *vma)
kfree(pages);
}
-static int privcmd_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
+static int privcmd_fault(struct vm_fault *vmf)
{
printk(KERN_DEBUG "privcmd_fault: vma=%p %lx-%lx, pgoff=%lx, uv=%p\n",
- vma, vma->vm_start, vma->vm_end,
+ vmf->vma, vmf->vma->vm_start, vmf->vma->vm_end,
vmf->pgoff, (void *)vmf->address);
return VM_FAULT_SIGBUS;
diff --git a/fs/9p/vfs_file.c b/fs/9p/vfs_file.c
index 6a0f3fa85ef7..3de3b4a89d89 100644
--- a/fs/9p/vfs_file.c
+++ b/fs/9p/vfs_file.c
@@ -534,11 +534,11 @@ v9fs_mmap_file_mmap(struct file *filp, struct vm_area_struct *vma)
}
static int
-v9fs_vm_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
+v9fs_vm_page_mkwrite(struct vm_fault *vmf)
{
struct v9fs_inode *v9inode;
struct page *page = vmf->page;
- struct file *filp = vma->vm_file;
+ struct file *filp = vmf->vma->vm_file;
struct inode *inode = file_inode(filp);
diff --git a/fs/aio.c b/fs/aio.c
index 873b4ca82ccb..7e2ab9c8e39c 100644
--- a/fs/aio.c
+++ b/fs/aio.c
@@ -512,7 +512,7 @@ static int aio_setup_ring(struct kioctx *ctx)
ctx->mmap_base = do_mmap_pgoff(ctx->aio_ring_file, 0, ctx->mmap_size,
PROT_READ | PROT_WRITE,
- MAP_SHARED, 0, &unused);
+ MAP_SHARED, 0, &unused, NULL);
up_write(&mm->mmap_sem);
if (IS_ERR((void *)ctx->mmap_base)) {
ctx->mmap_size = 0;
diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h
index 6a823719b6c5..adf16307842a 100644
--- a/fs/btrfs/ctree.h
+++ b/fs/btrfs/ctree.h
@@ -3147,7 +3147,7 @@ int btrfs_create_subvol_root(struct btrfs_trans_handle *trans,
int btrfs_merge_bio_hook(struct page *page, unsigned long offset,
size_t size, struct bio *bio,
unsigned long bio_flags);
-int btrfs_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf);
+int btrfs_page_mkwrite(struct vm_fault *vmf);
int btrfs_readpage(struct file *file, struct page *page);
void btrfs_evict_inode(struct inode *inode);
int btrfs_write_inode(struct inode *inode, struct writeback_control *wbc);
diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
index 1e861a063721..ea1d500cfba6 100644
--- a/fs/btrfs/inode.c
+++ b/fs/btrfs/inode.c
@@ -8964,10 +8964,10 @@ again:
* beyond EOF, then the page is guaranteed safe against truncation until we
* unlock the page.
*/
-int btrfs_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
+int btrfs_page_mkwrite(struct vm_fault *vmf)
{
struct page *page = vmf->page;
- struct inode *inode = file_inode(vma->vm_file);
+ struct inode *inode = file_inode(vmf->vma->vm_file);
struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
struct btrfs_ordered_extent *ordered;
@@ -9000,7 +9000,7 @@ int btrfs_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
ret = btrfs_delalloc_reserve_space(inode, page_start,
reserved_space);
if (!ret) {
- ret = file_update_time(vma->vm_file);
+ ret = file_update_time(vmf->vma->vm_file);
reserved = 1;
}
if (ret) {
diff --git a/fs/ceph/addr.c b/fs/ceph/addr.c
index e4b066cd912a..09860c0ec7c1 100644
--- a/fs/ceph/addr.c
+++ b/fs/ceph/addr.c
@@ -1386,8 +1386,9 @@ static void ceph_restore_sigs(sigset_t *oldset)
/*
* vm ops
*/
-static int ceph_filemap_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
+static int ceph_filemap_fault(struct vm_fault *vmf)
{
+ struct vm_area_struct *vma = vmf->vma;
struct inode *inode = file_inode(vma->vm_file);
struct ceph_inode_info *ci = ceph_inode(inode);
struct ceph_file_info *fi = vma->vm_file->private_data;
@@ -1416,7 +1417,7 @@ static int ceph_filemap_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
if ((got & (CEPH_CAP_FILE_CACHE | CEPH_CAP_FILE_LAZYIO)) ||
ci->i_inline_version == CEPH_INLINE_NONE) {
current->journal_info = vma->vm_file;
- ret = filemap_fault(vma, vmf);
+ ret = filemap_fault(vmf);
current->journal_info = NULL;
} else
ret = -EAGAIN;
@@ -1477,8 +1478,9 @@ out_restore:
/*
* Reuse write_begin here for simplicity.
*/
-static int ceph_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
+static int ceph_page_mkwrite(struct vm_fault *vmf)
{
+ struct vm_area_struct *vma = vmf->vma;
struct inode *inode = file_inode(vma->vm_file);
struct ceph_inode_info *ci = ceph_inode(inode);
struct ceph_file_info *fi = vma->vm_file->private_data;
diff --git a/fs/cifs/file.c b/fs/cifs/file.c
index 98dc842e7245..aa3debbba826 100644
--- a/fs/cifs/file.c
+++ b/fs/cifs/file.c
@@ -3282,7 +3282,7 @@ cifs_read(struct file *file, char *read_data, size_t read_size, loff_t *offset)
* sure that it doesn't change while being written back.
*/
static int
-cifs_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
+cifs_page_mkwrite(struct vm_fault *vmf)
{
struct page *page = vmf->page;
diff --git a/fs/dax.c b/fs/dax.c
index 3f1181563fb1..5ae8b71ebadc 100644
--- a/fs/dax.c
+++ b/fs/dax.c
@@ -925,12 +925,11 @@ static int dax_insert_mapping(struct address_space *mapping,
/**
* dax_pfn_mkwrite - handle first write to DAX page
- * @vma: The virtual memory area where the fault occurred
* @vmf: The description of the fault
*/
-int dax_pfn_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
+int dax_pfn_mkwrite(struct vm_fault *vmf)
{
- struct file *file = vma->vm_file;
+ struct file *file = vmf->vma->vm_file;
struct address_space *mapping = file->f_mapping;
void *entry, **slot;
pgoff_t index = vmf->pgoff;
@@ -1119,20 +1118,10 @@ static int dax_fault_return(int error)
return VM_FAULT_SIGBUS;
}
-/**
- * dax_iomap_fault - handle a page fault on a DAX file
- * @vma: The virtual memory area where the fault occurred
- * @vmf: The description of the fault
- * @ops: iomap ops passed from the file system
- *
- * When a page fault occurs, filesystems may call this helper in their fault
- * or mkwrite handler for DAX files. Assumes the caller has done all the
- * necessary locking for the page fault to proceed successfully.
- */
-int dax_iomap_fault(struct vm_area_struct *vma, struct vm_fault *vmf,
- const struct iomap_ops *ops)
+static int dax_iomap_pte_fault(struct vm_fault *vmf,
+ const struct iomap_ops *ops)
{
- struct address_space *mapping = vma->vm_file->f_mapping;
+ struct address_space *mapping = vmf->vma->vm_file->f_mapping;
struct inode *inode = mapping->host;
unsigned long vaddr = vmf->address;
loff_t pos = (loff_t)vmf->pgoff << PAGE_SHIFT;
@@ -1205,11 +1194,11 @@ int dax_iomap_fault(struct vm_area_struct *vma, struct vm_fault *vmf,
case IOMAP_MAPPED:
if (iomap.flags & IOMAP_F_NEW) {
count_vm_event(PGMAJFAULT);
- mem_cgroup_count_vm_event(vma->vm_mm, PGMAJFAULT);
+ mem_cgroup_count_vm_event(vmf->vma->vm_mm, PGMAJFAULT);
major = VM_FAULT_MAJOR;
}
error = dax_insert_mapping(mapping, iomap.bdev, sector,
- PAGE_SIZE, &entry, vma, vmf);
+ PAGE_SIZE, &entry, vmf->vma, vmf);
/* -EBUSY is fine, somebody else faulted on the same PTE */
if (error == -EBUSY)
error = 0;
@@ -1247,7 +1236,6 @@ int dax_iomap_fault(struct vm_area_struct *vma, struct vm_fault *vmf,
}
return vmf_ret;
}
-EXPORT_SYMBOL_GPL(dax_iomap_fault);
#ifdef CONFIG_FS_DAX_PMD
/*
@@ -1338,7 +1326,8 @@ fallback:
return VM_FAULT_FALLBACK;
}
-int dax_iomap_pmd_fault(struct vm_fault *vmf, const struct iomap_ops *ops)
+static int dax_iomap_pmd_fault(struct vm_fault *vmf,
+ const struct iomap_ops *ops)
{
struct vm_area_struct *vma = vmf->vma;
struct address_space *mapping = vma->vm_file->f_mapping;
@@ -1446,5 +1435,33 @@ out:
trace_dax_pmd_fault_done(inode, vmf, max_pgoff, result);
return result;
}
-EXPORT_SYMBOL_GPL(dax_iomap_pmd_fault);
+#else
+static int dax_iomap_pmd_fault(struct vm_fault *vmf, struct iomap_ops *ops)
+{
+ return VM_FAULT_FALLBACK;
+}
#endif /* CONFIG_FS_DAX_PMD */
+
+/**
+ * dax_iomap_fault - handle a page fault on a DAX file
+ * @vmf: The description of the fault
+ * @ops: iomap ops passed from the file system
+ *
+ * When a page fault occurs, filesystems may call this helper in
+ * their fault handler for DAX files. dax_iomap_fault() assumes the caller
+ * has done all the necessary locking for page fault to proceed
+ * successfully.
+ */
+int dax_iomap_fault(struct vm_fault *vmf, enum page_entry_size pe_size,
+ const struct iomap_ops *ops)
+{
+ switch (pe_size) {
+ case PE_SIZE_PTE:
+ return dax_iomap_pte_fault(vmf, ops);
+ case PE_SIZE_PMD:
+ return dax_iomap_pmd_fault(vmf, ops);
+ default:
+ return VM_FAULT_FALLBACK;
+ }
+}
+EXPORT_SYMBOL_GPL(dax_iomap_fault);
diff --git a/fs/ext2/file.c b/fs/ext2/file.c
index b0f241528a30..b21891a6bfca 100644
--- a/fs/ext2/file.c
+++ b/fs/ext2/file.c
@@ -87,19 +87,19 @@ out_unlock:
* The default page_lock and i_size verification done by non-DAX fault paths
* is sufficient because ext2 doesn't support hole punching.
*/
-static int ext2_dax_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
+static int ext2_dax_fault(struct vm_fault *vmf)
{
- struct inode *inode = file_inode(vma->vm_file);
+ struct inode *inode = file_inode(vmf->vma->vm_file);
struct ext2_inode_info *ei = EXT2_I(inode);
int ret;
if (vmf->flags & FAULT_FLAG_WRITE) {
sb_start_pagefault(inode->i_sb);
- file_update_time(vma->vm_file);
+ file_update_time(vmf->vma->vm_file);
}
down_read(&ei->dax_sem);
- ret = dax_iomap_fault(vma, vmf, &ext2_iomap_ops);
+ ret = dax_iomap_fault(vmf, PE_SIZE_PTE, &ext2_iomap_ops);
up_read(&ei->dax_sem);
if (vmf->flags & FAULT_FLAG_WRITE)
@@ -107,16 +107,15 @@ static int ext2_dax_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
return ret;
}
-static int ext2_dax_pfn_mkwrite(struct vm_area_struct *vma,
- struct vm_fault *vmf)
+static int ext2_dax_pfn_mkwrite(struct vm_fault *vmf)
{
- struct inode *inode = file_inode(vma->vm_file);
+ struct inode *inode = file_inode(vmf->vma->vm_file);
struct ext2_inode_info *ei = EXT2_I(inode);
loff_t size;
int ret;
sb_start_pagefault(inode->i_sb);
- file_update_time(vma->vm_file);
+ file_update_time(vmf->vma->vm_file);
down_read(&ei->dax_sem);
/* check that the faulting page hasn't raced with truncate */
@@ -124,7 +123,7 @@ static int ext2_dax_pfn_mkwrite(struct vm_area_struct *vma,
if (vmf->pgoff >= size)
ret = VM_FAULT_SIGBUS;
else
- ret = dax_pfn_mkwrite(vma, vmf);
+ ret = dax_pfn_mkwrite(vmf);
up_read(&ei->dax_sem);
sb_end_pagefault(inode->i_sb);
@@ -134,7 +133,7 @@ static int ext2_dax_pfn_mkwrite(struct vm_area_struct *vma,
static const struct vm_operations_struct ext2_dax_vm_ops = {
.fault = ext2_dax_fault,
/*
- * .pmd_fault is not supported for DAX because allocation in ext2
+ * .huge_fault is not supported for DAX because allocation in ext2
* cannot be reliably aligned to huge page sizes and so pmd faults
* will always fail and fail back to regular faults.
*/
diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h
index cee23b684f47..2fd17e8e4984 100644
--- a/fs/ext4/ext4.h
+++ b/fs/ext4/ext4.h
@@ -2483,8 +2483,8 @@ extern int ext4_writepage_trans_blocks(struct inode *);
extern int ext4_chunk_trans_blocks(struct inode *, int nrblocks);
extern int ext4_zero_partial_blocks(handle_t *handle, struct inode *inode,
loff_t lstart, loff_t lend);
-extern int ext4_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf);
-extern int ext4_filemap_fault(struct vm_area_struct *vma, struct vm_fault *vmf);
+extern int ext4_page_mkwrite(struct vm_fault *vmf);
+extern int ext4_filemap_fault(struct vm_fault *vmf);
extern qsize_t *ext4_get_reserved_space(struct inode *inode);
extern int ext4_get_projid(struct inode *inode, kprojid_t *projid);
extern void ext4_da_update_reserve_space(struct inode *inode,
diff --git a/fs/ext4/file.c b/fs/ext4/file.c
index 13021a054fc0..8210c1f43556 100644
--- a/fs/ext4/file.c
+++ b/fs/ext4/file.c
@@ -253,19 +253,20 @@ out:
}
#ifdef CONFIG_FS_DAX
-static int ext4_dax_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
+static int ext4_dax_huge_fault(struct vm_fault *vmf,
+ enum page_entry_size pe_size)
{
int result;
- struct inode *inode = file_inode(vma->vm_file);
+ struct inode *inode = file_inode(vmf->vma->vm_file);
struct super_block *sb = inode->i_sb;
bool write = vmf->flags & FAULT_FLAG_WRITE;
if (write) {
sb_start_pagefault(sb);
- file_update_time(vma->vm_file);
+ file_update_time(vmf->vma->vm_file);
}
down_read(&EXT4_I(inode)->i_mmap_sem);
- result = dax_iomap_fault(vma, vmf, &ext4_iomap_ops);
+ result = dax_iomap_fault(vmf, pe_size, &ext4_iomap_ops);
up_read(&EXT4_I(inode)->i_mmap_sem);
if (write)
sb_end_pagefault(sb);
@@ -273,25 +274,9 @@ static int ext4_dax_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
return result;
}
-static int
-ext4_dax_pmd_fault(struct vm_fault *vmf)
+static int ext4_dax_fault(struct vm_fault *vmf)
{
- int result;
- struct inode *inode = file_inode(vmf->vma->vm_file);
- struct super_block *sb = inode->i_sb;
- bool write = vmf->flags & FAULT_FLAG_WRITE;
-
- if (write) {
- sb_start_pagefault(sb);
- file_update_time(vmf->vma->vm_file);
- }
- down_read(&EXT4_I(inode)->i_mmap_sem);
- result = dax_iomap_pmd_fault(vmf, &ext4_iomap_ops);
- up_read(&EXT4_I(inode)->i_mmap_sem);
- if (write)
- sb_end_pagefault(sb);
-
- return result;
+ return ext4_dax_huge_fault(vmf, PE_SIZE_PTE);
}
/*
@@ -303,22 +288,21 @@ ext4_dax_pmd_fault(struct vm_fault *vmf)
* wp_pfn_shared() fails. Thus fault gets retried and things work out as
* desired.
*/
-static int ext4_dax_pfn_mkwrite(struct vm_area_struct *vma,
- struct vm_fault *vmf)
+static int ext4_dax_pfn_mkwrite(struct vm_fault *vmf)
{
- struct inode *inode = file_inode(vma->vm_file);
+ struct inode *inode = file_inode(vmf->vma->vm_file);
struct super_block *sb = inode->i_sb;
loff_t size;
int ret;
sb_start_pagefault(sb);
- file_update_time(vma->vm_file);
+ file_update_time(vmf->vma->vm_file);
down_read(&EXT4_I(inode)->i_mmap_sem);
size = (i_size_read(inode) + PAGE_SIZE - 1) >> PAGE_SHIFT;
if (vmf->pgoff >= size)
ret = VM_FAULT_SIGBUS;
else
- ret = dax_pfn_mkwrite(vma, vmf);
+ ret = dax_pfn_mkwrite(vmf);
up_read(&EXT4_I(inode)->i_mmap_sem);
sb_end_pagefault(sb);
@@ -327,7 +311,7 @@ static int ext4_dax_pfn_mkwrite(struct vm_area_struct *vma,
static const struct vm_operations_struct ext4_dax_vm_ops = {
.fault = ext4_dax_fault,
- .pmd_fault = ext4_dax_pmd_fault,
+ .huge_fault = ext4_dax_huge_fault,
.page_mkwrite = ext4_dax_fault,
.pfn_mkwrite = ext4_dax_pfn_mkwrite,
};
diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c
index 75212a6e69f8..41d8e53e5a7f 100644
--- a/fs/ext4/inode.c
+++ b/fs/ext4/inode.c
@@ -5821,8 +5821,9 @@ static int ext4_bh_unmapped(handle_t *handle, struct buffer_head *bh)
return !buffer_mapped(bh);
}
-int ext4_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
+int ext4_page_mkwrite(struct vm_fault *vmf)
{
+ struct vm_area_struct *vma = vmf->vma;
struct page *page = vmf->page;
loff_t size;
unsigned long len;
@@ -5912,13 +5913,13 @@ out:
return ret;
}
-int ext4_filemap_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
+int ext4_filemap_fault(struct vm_fault *vmf)
{
- struct inode *inode = file_inode(vma->vm_file);
+ struct inode *inode = file_inode(vmf->vma->vm_file);
int err;
down_read(&EXT4_I(inode)->i_mmap_sem);
- err = filemap_fault(vma, vmf);
+ err = filemap_fault(vmf);
up_read(&EXT4_I(inode)->i_mmap_sem);
return err;
diff --git a/fs/f2fs/file.c b/fs/f2fs/file.c
index 49f10dce817d..1edc86e874e3 100644
--- a/fs/f2fs/file.c
+++ b/fs/f2fs/file.c
@@ -32,11 +32,10 @@
#include "trace.h"
#include <trace/events/f2fs.h>
-static int f2fs_vm_page_mkwrite(struct vm_area_struct *vma,
- struct vm_fault *vmf)
+static int f2fs_vm_page_mkwrite(struct vm_fault *vmf)
{
struct page *page = vmf->page;
- struct inode *inode = file_inode(vma->vm_file);
+ struct inode *inode = file_inode(vmf->vma->vm_file);
struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
struct dnode_of_data dn;
int err;
@@ -58,7 +57,7 @@ static int f2fs_vm_page_mkwrite(struct vm_area_struct *vma,
f2fs_balance_fs(sbi, dn.node_changed);
- file_update_time(vma->vm_file);
+ file_update_time(vmf->vma->vm_file);
lock_page(page);
if (unlikely(page->mapping != inode->i_mapping ||
page_offset(page) > i_size_read(inode) ||
diff --git a/fs/fuse/file.c b/fs/fuse/file.c
index 2401c5dabb2a..e80bfd06daf5 100644
--- a/fs/fuse/file.c
+++ b/fs/fuse/file.c
@@ -2043,12 +2043,12 @@ static void fuse_vma_close(struct vm_area_struct *vma)
* - sync(2)
* - try_to_free_pages() with order > PAGE_ALLOC_COSTLY_ORDER
*/
-static int fuse_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
+static int fuse_page_mkwrite(struct vm_fault *vmf)
{
struct page *page = vmf->page;
- struct inode *inode = file_inode(vma->vm_file);
+ struct inode *inode = file_inode(vmf->vma->vm_file);
- file_update_time(vma->vm_file);
+ file_update_time(vmf->vma->vm_file);
lock_page(page);
if (page->mapping != inode->i_mapping) {
unlock_page(page);
diff --git a/fs/gfs2/file.c b/fs/gfs2/file.c
index 016c11eaca7c..6fe2a59c6a9a 100644
--- a/fs/gfs2/file.c
+++ b/fs/gfs2/file.c
@@ -379,10 +379,10 @@ static int gfs2_allocate_page_backing(struct page *page)
* blocks allocated on disk to back that page.
*/
-static int gfs2_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
+static int gfs2_page_mkwrite(struct vm_fault *vmf)
{
struct page *page = vmf->page;
- struct inode *inode = file_inode(vma->vm_file);
+ struct inode *inode = file_inode(vmf->vma->vm_file);
struct gfs2_inode *ip = GFS2_I(inode);
struct gfs2_sbd *sdp = GFS2_SB(inode);
struct gfs2_alloc_parms ap = { .aflags = 0, };
@@ -399,7 +399,7 @@ static int gfs2_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
if (ret)
goto out;
- gfs2_size_hint(vma->vm_file, pos, PAGE_SIZE);
+ gfs2_size_hint(vmf->vma->vm_file, pos, PAGE_SIZE);
gfs2_holder_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, &gh);
ret = gfs2_glock_nq(&gh);
@@ -407,7 +407,7 @@ static int gfs2_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
goto out_uninit;
/* Update file times before taking page lock */
- file_update_time(vma->vm_file);
+ file_update_time(vmf->vma->vm_file);
set_bit(GLF_DIRTY, &ip->i_gl->gl_flags);
set_bit(GIF_SW_PAGED, &ip->i_flags);
diff --git a/fs/iomap.c b/fs/iomap.c
index d89f70bbb952..d209f42cdcb8 100644
--- a/fs/iomap.c
+++ b/fs/iomap.c
@@ -445,11 +445,10 @@ iomap_page_mkwrite_actor(struct inode *inode, loff_t pos, loff_t length,
return length;
}
-int iomap_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf,
- const struct iomap_ops *ops)
+int iomap_page_mkwrite(struct vm_fault *vmf, const struct iomap_ops *ops)
{
struct page *page = vmf->page;
- struct inode *inode = file_inode(vma->vm_file);
+ struct inode *inode = file_inode(vmf->vma->vm_file);
unsigned long length;
loff_t offset, size;
ssize_t ret;
diff --git a/fs/kernfs/file.c b/fs/kernfs/file.c
index 78219d5644e9..4f0535890b30 100644
--- a/fs/kernfs/file.c
+++ b/fs/kernfs/file.c
@@ -348,9 +348,9 @@ static void kernfs_vma_open(struct vm_area_struct *vma)
kernfs_put_active(of->kn);
}
-static int kernfs_vma_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
+static int kernfs_vma_fault(struct vm_fault *vmf)
{
- struct file *file = vma->vm_file;
+ struct file *file = vmf->vma->vm_file;
struct kernfs_open_file *of = kernfs_of(file);
int ret;
@@ -362,16 +362,15 @@ static int kernfs_vma_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
ret = VM_FAULT_SIGBUS;
if (of->vm_ops->fault)
- ret = of->vm_ops->fault(vma, vmf);
+ ret = of->vm_ops->fault(vmf);
kernfs_put_active(of->kn);
return ret;
}
-static int kernfs_vma_page_mkwrite(struct vm_area_struct *vma,
- struct vm_fault *vmf)
+static int kernfs_vma_page_mkwrite(struct vm_fault *vmf)
{
- struct file *file = vma->vm_file;
+ struct file *file = vmf->vma->vm_file;
struct kernfs_open_file *of = kernfs_of(file);
int ret;
@@ -383,7 +382,7 @@ static int kernfs_vma_page_mkwrite(struct vm_area_struct *vma,
ret = 0;
if (of->vm_ops->page_mkwrite)
- ret = of->vm_ops->page_mkwrite(vma, vmf);
+ ret = of->vm_ops->page_mkwrite(vmf);
else
file_update_time(file);
diff --git a/fs/ncpfs/mmap.c b/fs/ncpfs/mmap.c
index 39f57bef8531..0c3905e0542e 100644
--- a/fs/ncpfs/mmap.c
+++ b/fs/ncpfs/mmap.c
@@ -27,10 +27,9 @@
* XXX: how are we excluding truncate/invalidate here? Maybe need to lock
* page?
*/
-static int ncp_file_mmap_fault(struct vm_area_struct *area,
- struct vm_fault *vmf)
+static int ncp_file_mmap_fault(struct vm_fault *vmf)
{
- struct inode *inode = file_inode(area->vm_file);
+ struct inode *inode = file_inode(vmf->vma->vm_file);
char *pg_addr;
unsigned int already_read;
unsigned int count;
@@ -90,7 +89,7 @@ static int ncp_file_mmap_fault(struct vm_area_struct *area,
* -- nyc
*/
count_vm_event(PGMAJFAULT);
- mem_cgroup_count_vm_event(area->vm_mm, PGMAJFAULT);
+ mem_cgroup_count_vm_event(vmf->vma->vm_mm, PGMAJFAULT);
return VM_FAULT_MAJOR;
}
diff --git a/fs/nfs/file.c b/fs/nfs/file.c
index 26dbe8b0c10d..668213984d68 100644
--- a/fs/nfs/file.c
+++ b/fs/nfs/file.c
@@ -528,10 +528,10 @@ const struct address_space_operations nfs_file_aops = {
* writable, implying that someone is about to modify the page through a
* shared-writable mapping
*/
-static int nfs_vm_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
+static int nfs_vm_page_mkwrite(struct vm_fault *vmf)
{
struct page *page = vmf->page;
- struct file *filp = vma->vm_file;
+ struct file *filp = vmf->vma->vm_file;
struct inode *inode = file_inode(filp);
unsigned pagelen;
int ret = VM_FAULT_NOPAGE;
diff --git a/fs/nilfs2/file.c b/fs/nilfs2/file.c
index 547381f3ce13..c5fa3dee72fc 100644
--- a/fs/nilfs2/file.c
+++ b/fs/nilfs2/file.c
@@ -51,8 +51,9 @@ int nilfs_sync_file(struct file *file, loff_t start, loff_t end, int datasync)
return err;
}
-static int nilfs_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
+static int nilfs_page_mkwrite(struct vm_fault *vmf)
{
+ struct vm_area_struct *vma = vmf->vma;
struct page *page = vmf->page;
struct inode *inode = file_inode(vma->vm_file);
struct nilfs_transaction_info ti;
diff --git a/fs/ocfs2/mmap.c b/fs/ocfs2/mmap.c
index 429088786e93..098f5c712569 100644
--- a/fs/ocfs2/mmap.c
+++ b/fs/ocfs2/mmap.c
@@ -44,17 +44,18 @@
#include "ocfs2_trace.h"
-static int ocfs2_fault(struct vm_area_struct *area, struct vm_fault *vmf)
+static int ocfs2_fault(struct vm_fault *vmf)
{
+ struct vm_area_struct *vma = vmf->vma;
sigset_t oldset;
int ret;
ocfs2_block_signals(&oldset);
- ret = filemap_fault(area, vmf);
+ ret = filemap_fault(vmf);
ocfs2_unblock_signals(&oldset);
- trace_ocfs2_fault(OCFS2_I(area->vm_file->f_mapping->host)->ip_blkno,
- area, vmf->page, vmf->pgoff);
+ trace_ocfs2_fault(OCFS2_I(vma->vm_file->f_mapping->host)->ip_blkno,
+ vma, vmf->page, vmf->pgoff);
return ret;
}
@@ -127,10 +128,10 @@ out:
return ret;
}
-static int ocfs2_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
+static int ocfs2_page_mkwrite(struct vm_fault *vmf)
{
struct page *page = vmf->page;
- struct inode *inode = file_inode(vma->vm_file);
+ struct inode *inode = file_inode(vmf->vma->vm_file);
struct buffer_head *di_bh = NULL;
sigset_t oldset;
int ret;
@@ -160,7 +161,7 @@ static int ocfs2_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
*/
down_write(&OCFS2_I(inode)->ip_alloc_sem);
- ret = __ocfs2_page_mkwrite(vma->vm_file, di_bh, page);
+ ret = __ocfs2_page_mkwrite(vmf->vma->vm_file, di_bh, page);
up_write(&OCFS2_I(inode)->ip_alloc_sem);
diff --git a/fs/proc/base.c b/fs/proc/base.c
index b73b4de8fb36..b8f06273353e 100644
--- a/fs/proc/base.c
+++ b/fs/proc/base.c
@@ -292,101 +292,69 @@ static ssize_t proc_pid_cmdline_read(struct file *file, char __user *buf,
}
} else {
/*
- * Command line (1 string) occupies ARGV and maybe
- * extends into ENVP.
- */
- if (len1 + len2 <= *pos)
- goto skip_argv_envp;
- if (len1 <= *pos)
- goto skip_argv;
-
- p = arg_start + *pos;
- len = len1 - *pos;
- while (count > 0 && len > 0) {
- unsigned int _count, l;
- int nr_read;
- bool final;
-
- _count = min3(count, len, PAGE_SIZE);
- nr_read = access_remote_vm(mm, p, page, _count, 0);
- if (nr_read < 0)
- rv = nr_read;
- if (nr_read <= 0)
- goto out_free_page;
-
- /*
- * Command line can be shorter than whole ARGV
- * even if last "marker" byte says it is not.
- */
- final = false;
- l = strnlen(page, nr_read);
- if (l < nr_read) {
- nr_read = l;
- final = true;
- }
-
- if (copy_to_user(buf, page, nr_read)) {
- rv = -EFAULT;
- goto out_free_page;
- }
-
- p += nr_read;
- len -= nr_read;
- buf += nr_read;
- count -= nr_read;
- rv += nr_read;
-
- if (final)
- goto out_free_page;
- }
-skip_argv:
- /*
* Command line (1 string) occupies ARGV and
* extends into ENVP.
*/
- if (len1 <= *pos) {
- p = env_start + *pos - len1;
- len = len1 + len2 - *pos;
- } else {
- p = env_start;
- len = len2;
+ struct {
+ unsigned long p;
+ unsigned long len;
+ } cmdline[2] = {
+ { .p = arg_start, .len = len1 },
+ { .p = env_start, .len = len2 },
+ };
+ loff_t pos1 = *pos;
+ unsigned int i;
+
+ i = 0;
+ while (i < 2 && pos1 >= cmdline[i].len) {
+ pos1 -= cmdline[i].len;
+ i++;
}
- while (count > 0 && len > 0) {
- unsigned int _count, l;
- int nr_read;
- bool final;
-
- _count = min3(count, len, PAGE_SIZE);
- nr_read = access_remote_vm(mm, p, page, _count, 0);
- if (nr_read < 0)
- rv = nr_read;
- if (nr_read <= 0)
- goto out_free_page;
-
- /* Find EOS. */
- final = false;
- l = strnlen(page, nr_read);
- if (l < nr_read) {
- nr_read = l;
- final = true;
- }
-
- if (copy_to_user(buf, page, nr_read)) {
- rv = -EFAULT;
- goto out_free_page;
+ while (i < 2) {
+ p = cmdline[i].p + pos1;
+ len = cmdline[i].len - pos1;
+ while (count > 0 && len > 0) {
+ unsigned int _count, l;
+ int nr_read;
+ bool final;
+
+ _count = min3(count, len, PAGE_SIZE);
+ nr_read = access_remote_vm(mm, p, page, _count, 0);
+ if (nr_read < 0)
+ rv = nr_read;
+ if (nr_read <= 0)
+ goto out_free_page;
+
+ /*
+ * Command line can be shorter than whole ARGV
+ * even if last "marker" byte says it is not.
+ */
+ final = false;
+ l = strnlen(page, nr_read);
+ if (l < nr_read) {
+ nr_read = l;
+ final = true;
+ }
+
+ if (copy_to_user(buf, page, nr_read)) {
+ rv = -EFAULT;
+ goto out_free_page;
+ }
+
+ p += nr_read;
+ len -= nr_read;
+ buf += nr_read;
+ count -= nr_read;
+ rv += nr_read;
+
+ if (final)
+ goto out_free_page;
}
- p += nr_read;
- len -= nr_read;
- buf += nr_read;
- count -= nr_read;
- rv += nr_read;
-
- if (final)
- goto out_free_page;
+ /* Only first chunk can be read partially. */
+ pos1 = 0;
+ i++;
}
-skip_argv_envp:
- ;
}
out_free_page:
@@ -729,11 +697,11 @@ static int proc_pid_permission(struct inode *inode, int mask)
task = get_proc_task(inode);
if (!task)
return -ESRCH;
- has_perms = has_pid_permissions(pid, task, 1);
+ has_perms = has_pid_permissions(pid, task, HIDEPID_NO_ACCESS);
put_task_struct(task);
if (!has_perms) {
- if (pid->hide_pid == 2) {
+ if (pid->hide_pid == HIDEPID_INVISIBLE) {
/*
* Let's make getdents(), stat(), and open()
* consistent with each other. If a process
@@ -1769,7 +1737,7 @@ int pid_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat *stat)
stat->gid = GLOBAL_ROOT_GID;
task = pid_task(proc_pid(inode), PIDTYPE_PID);
if (task) {
- if (!has_pid_permissions(pid, task, 2)) {
+ if (!has_pid_permissions(pid, task, HIDEPID_INVISIBLE)) {
rcu_read_unlock();
/*
* This doesn't prevent learning whether PID exists,
@@ -3200,7 +3168,7 @@ int proc_pid_readdir(struct file *file, struct dir_context *ctx)
int len;
cond_resched();
- if (!has_pid_permissions(ns, iter.task, 2))
+ if (!has_pid_permissions(ns, iter.task, HIDEPID_INVISIBLE))
continue;
len = snprintf(name, sizeof(name), "%d", iter.tgid);
diff --git a/fs/proc/generic.c b/fs/proc/generic.c
index f6a01f09f79d..06c73904d497 100644
--- a/fs/proc/generic.c
+++ b/fs/proc/generic.c
@@ -57,9 +57,9 @@ static struct proc_dir_entry *pde_subdir_find(struct proc_dir_entry *dir,
struct rb_node *node = dir->subdir.rb_node;
while (node) {
- struct proc_dir_entry *de = container_of(node,
- struct proc_dir_entry,
- subdir_node);
+ struct proc_dir_entry *de = rb_entry(node,
+ struct proc_dir_entry,
+ subdir_node);
int result = proc_match(len, name, de);
if (result < 0)
@@ -80,8 +80,9 @@ static bool pde_subdir_insert(struct proc_dir_entry *dir,
/* Figure out where to put new node */
while (*new) {
- struct proc_dir_entry *this =
- container_of(*new, struct proc_dir_entry, subdir_node);
+ struct proc_dir_entry *this = rb_entry(*new,
+ struct proc_dir_entry,
+ subdir_node);
int result = proc_match(de->namelen, de->name, this);
parent = *new;
diff --git a/fs/proc/inode.c b/fs/proc/inode.c
index 7ad9ed7958af..2cc7a8030275 100644
--- a/fs/proc/inode.c
+++ b/fs/proc/inode.c
@@ -107,7 +107,7 @@ static int proc_show_options(struct seq_file *seq, struct dentry *root)
if (!gid_eq(pid->pid_gid, GLOBAL_ROOT_GID))
seq_printf(seq, ",gid=%u", from_kgid_munged(&init_user_ns, pid->pid_gid));
- if (pid->hide_pid != 0)
+ if (pid->hide_pid != HIDEPID_OFF)
seq_printf(seq, ",hidepid=%u", pid->hide_pid);
return 0;
diff --git a/fs/proc/root.c b/fs/proc/root.c
index 1988440b2049..b90da888b81a 100644
--- a/fs/proc/root.c
+++ b/fs/proc/root.c
@@ -58,7 +58,8 @@ int proc_parse_options(char *options, struct pid_namespace *pid)
case Opt_hidepid:
if (match_int(&args[0], &option))
return 0;
- if (option < 0 || option > 2) {
+ if (option < HIDEPID_OFF ||
+ option > HIDEPID_INVISIBLE) {
pr_err("proc: hidepid value must be between 0 and 2.\n");
return 0;
}
diff --git a/fs/proc/vmcore.c b/fs/proc/vmcore.c
index 5105b1599981..885d445afa0d 100644
--- a/fs/proc/vmcore.c
+++ b/fs/proc/vmcore.c
@@ -265,10 +265,10 @@ static ssize_t read_vmcore(struct file *file, char __user *buffer,
* On s390 the fault handler is used for memory regions that can't be mapped
* directly with remap_pfn_range().
*/
-static int mmap_vmcore_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
+static int mmap_vmcore_fault(struct vm_fault *vmf)
{
#ifdef CONFIG_S390
- struct address_space *mapping = vma->vm_file->f_mapping;
+ struct address_space *mapping = vmf->vma->vm_file->f_mapping;
pgoff_t index = vmf->pgoff;
struct page *page;
loff_t offset;
@@ -388,7 +388,7 @@ static int remap_oldmem_pfn_checked(struct vm_area_struct *vma,
}
return 0;
fail:
- do_munmap(vma->vm_mm, from, len);
+ do_munmap(vma->vm_mm, from, len, NULL);
return -EAGAIN;
}
@@ -481,7 +481,7 @@ static int mmap_vmcore(struct file *file, struct vm_area_struct *vma)
return 0;
fail:
- do_munmap(vma->vm_mm, vma->vm_start, len);
+ do_munmap(vma->vm_mm, vma->vm_start, len, NULL);
return -EAGAIN;
}
#else
diff --git a/fs/pstore/platform.c b/fs/pstore/platform.c
index 729677e18e36..efab7b64925b 100644
--- a/fs/pstore/platform.c
+++ b/fs/pstore/platform.c
@@ -342,31 +342,35 @@ static int compress_lz4(const void *in, void *out, size_t inlen, size_t outlen)
{
int ret;
- ret = lz4_compress(in, inlen, out, &outlen, workspace);
- if (ret) {
- pr_err("lz4_compress error, ret = %d!\n", ret);
+ ret = LZ4_compress_default(in, out, inlen, outlen, workspace);
+ if (!ret) {
+ pr_err("LZ4_compress_default error; compression failed!\n");
return -EIO;
}
- return outlen;
+ return ret;
}
static int decompress_lz4(void *in, void *out, size_t inlen, size_t outlen)
{
int ret;
- ret = lz4_decompress_unknownoutputsize(in, inlen, out, &outlen);
- if (ret) {
- pr_err("lz4_decompress error, ret = %d!\n", ret);
+ ret = LZ4_decompress_safe(in, out, inlen, outlen);
+ if (ret < 0) {
+ /*
+ * LZ4_decompress_safe will return an error code
+ * (< 0) if decompression failed
+ */
+ pr_err("LZ4_decompress_safe error, ret = %d!\n", ret);
return -EIO;
}
- return outlen;
+ return ret;
}
static void allocate_lz4(void)
{
- big_oops_buf_sz = lz4_compressbound(psinfo->bufsize);
+ big_oops_buf_sz = LZ4_compressBound(psinfo->bufsize);
big_oops_buf = kmalloc(big_oops_buf_sz, GFP_KERNEL);
if (big_oops_buf) {
workspace = kmalloc(LZ4_MEM_COMPRESS, GFP_KERNEL);
diff --git a/fs/squashfs/lz4_wrapper.c b/fs/squashfs/lz4_wrapper.c
index ff4468bd18b0..95da65366548 100644
--- a/fs/squashfs/lz4_wrapper.c
+++ b/fs/squashfs/lz4_wrapper.c
@@ -97,7 +97,6 @@ static int lz4_uncompress(struct squashfs_sb_info *msblk, void *strm,
struct squashfs_lz4 *stream = strm;
void *buff = stream->input, *data;
int avail, i, bytes = length, res;
- size_t dest_len = output->length;
for (i = 0; i < b; i++) {
avail = min(bytes, msblk->devblksize - offset);
@@ -108,12 +107,13 @@ static int lz4_uncompress(struct squashfs_sb_info *msblk, void *strm,
put_bh(bh[i]);
}
- res = lz4_decompress_unknownoutputsize(stream->input, length,
- stream->output, &dest_len);
- if (res)
+ res = LZ4_decompress_safe(stream->input, stream->output,
+ length, output->length);
+
+ if (res < 0)
return -EIO;
- bytes = dest_len;
+ bytes = res;
data = squashfs_first_page(output);
buff = stream->output;
while (data) {
@@ -128,7 +128,7 @@ static int lz4_uncompress(struct squashfs_sb_info *msblk, void *strm,
}
squashfs_finish_page(output);
- return dest_len;
+ return res;
}
const struct squashfs_decompressor squashfs_lz4_comp_ops = {
diff --git a/fs/ubifs/file.c b/fs/ubifs/file.c
index b0d783774c96..d9ae86f96df7 100644
--- a/fs/ubifs/file.c
+++ b/fs/ubifs/file.c
@@ -1506,11 +1506,10 @@ static int ubifs_releasepage(struct page *page, gfp_t unused_gfp_flags)
* mmap()d file has taken write protection fault and is being made writable.
* UBIFS must ensure page is budgeted for.
*/
-static int ubifs_vm_page_mkwrite(struct vm_area_struct *vma,
- struct vm_fault *vmf)
+static int ubifs_vm_page_mkwrite(struct vm_fault *vmf)
{
struct page *page = vmf->page;
- struct inode *inode = file_inode(vma->vm_file);
+ struct inode *inode = file_inode(vmf->vma->vm_file);
struct ubifs_info *c = inode->i_sb->s_fs_info;
struct timespec now = ubifs_current_time(inode);
struct ubifs_budget_req req = { .new_page = 1 };
diff --git a/fs/userfaultfd.c b/fs/userfaultfd.c
index 18406158e13f..625b7285a37b 100644
--- a/fs/userfaultfd.c
+++ b/fs/userfaultfd.c
@@ -71,6 +71,13 @@ struct userfaultfd_fork_ctx {
struct list_head list;
};
+struct userfaultfd_unmap_ctx {
+ struct userfaultfd_ctx *ctx;
+ unsigned long start;
+ unsigned long end;
+ struct list_head list;
+};
+
struct userfaultfd_wait_queue {
struct uffd_msg msg;
wait_queue_t wq;
@@ -681,16 +688,16 @@ void mremap_userfaultfd_complete(struct vm_userfaultfd_ctx *vm_ctx,
userfaultfd_event_wait_completion(ctx, &ewq);
}
-void madvise_userfault_dontneed(struct vm_area_struct *vma,
- struct vm_area_struct **prev,
- unsigned long start, unsigned long end)
+void userfaultfd_remove(struct vm_area_struct *vma,
+ struct vm_area_struct **prev,
+ unsigned long start, unsigned long end)
{
struct mm_struct *mm = vma->vm_mm;
struct userfaultfd_ctx *ctx;
struct userfaultfd_wait_queue ewq;
ctx = vma->vm_userfaultfd_ctx.ctx;
- if (!ctx || !(ctx->features & UFFD_FEATURE_EVENT_MADVDONTNEED))
+ if (!ctx || !(ctx->features & UFFD_FEATURE_EVENT_REMOVE))
return;
userfaultfd_ctx_get(ctx);
@@ -700,15 +707,101 @@ void madvise_userfault_dontneed(struct vm_area_struct *vma,
msg_init(&ewq.msg);
- ewq.msg.event = UFFD_EVENT_MADVDONTNEED;
- ewq.msg.arg.madv_dn.start = start;
- ewq.msg.arg.madv_dn.end = end;
+ ewq.msg.event = UFFD_EVENT_REMOVE;
+ ewq.msg.arg.remove.start = start;
+ ewq.msg.arg.remove.end = end;
userfaultfd_event_wait_completion(ctx, &ewq);
down_read(&mm->mmap_sem);
}
+static bool has_unmap_ctx(struct userfaultfd_ctx *ctx, struct list_head *unmaps,
+ unsigned long start, unsigned long end)
+{
+ struct userfaultfd_unmap_ctx *unmap_ctx;
+
+ list_for_each_entry(unmap_ctx, unmaps, list)
+ if (unmap_ctx->ctx == ctx && unmap_ctx->start == start &&
+ unmap_ctx->end == end)
+ return true;
+
+ return false;
+}
+
+int userfaultfd_unmap_prep(struct vm_area_struct *vma,
+ unsigned long start, unsigned long end,
+ struct list_head *unmaps)
+{
+ for ( ; vma && vma->vm_start < end; vma = vma->vm_next) {
+ struct userfaultfd_unmap_ctx *unmap_ctx;
+ struct userfaultfd_ctx *ctx = vma->vm_userfaultfd_ctx.ctx;
+
+ if (!ctx || !(ctx->features & UFFD_FEATURE_EVENT_UNMAP) ||
+ has_unmap_ctx(ctx, unmaps, start, end))
+ continue;
+
+ unmap_ctx = kzalloc(sizeof(*unmap_ctx), GFP_KERNEL);
+ if (!unmap_ctx)
+ return -ENOMEM;
+
+ userfaultfd_ctx_get(ctx);
+ unmap_ctx->ctx = ctx;
+ unmap_ctx->start = start;
+ unmap_ctx->end = end;
+ list_add_tail(&unmap_ctx->list, unmaps);
+ }
+
+ return 0;
+}
+
+void userfaultfd_unmap_complete(struct mm_struct *mm, struct list_head *uf)
+{
+ struct userfaultfd_unmap_ctx *ctx, *n;
+ struct userfaultfd_wait_queue ewq;
+
+ list_for_each_entry_safe(ctx, n, uf, list) {
+ msg_init(&ewq.msg);
+
+ ewq.msg.event = UFFD_EVENT_UNMAP;
+ ewq.msg.arg.remove.start = ctx->start;
+ ewq.msg.arg.remove.end = ctx->end;
+
+ userfaultfd_event_wait_completion(ctx->ctx, &ewq);
+
+ list_del(&ctx->list);
+ kfree(ctx);
+ }
+}
+
+void userfaultfd_exit(struct mm_struct *mm)
+{
+ struct vm_area_struct *vma = mm->mmap;
+
+ /*
+ * We can do the vma walk without locking because the caller
+ * (exit_mm) knows it now has exclusive access
+ */
+ while (vma) {
+ struct userfaultfd_ctx *ctx = vma->vm_userfaultfd_ctx.ctx;
+
+ if (ctx && (ctx->features & UFFD_FEATURE_EVENT_EXIT)) {
+ struct userfaultfd_wait_queue ewq;
+
+ userfaultfd_ctx_get(ctx);
+
+ msg_init(&ewq.msg);
+ ewq.msg.event = UFFD_EVENT_EXIT;
+
+ userfaultfd_event_wait_completion(ctx, &ewq);
+
+ ctx->features &= ~UFFD_FEATURE_EVENT_EXIT;
+ }
+
+ vma = vma->vm_next;
+ }
+}
+
static int userfaultfd_release(struct inode *inode, struct file *file)
{
struct userfaultfd_ctx *ctx = file->private_data;
@@ -1514,6 +1607,8 @@ static int userfaultfd_copy(struct userfaultfd_ctx *ctx,
ret = mcopy_atomic(ctx->mm, uffdio_copy.dst, uffdio_copy.src,
uffdio_copy.len);
mmput(ctx->mm);
+ } else {
+ return -ENOSPC;
}
if (unlikely(put_user(ret, &user_uffdio_copy->copy)))
return -EFAULT;
diff --git a/fs/xfs/xfs_file.c b/fs/xfs/xfs_file.c
index 022014016d80..a50eca676670 100644
--- a/fs/xfs/xfs_file.c
+++ b/fs/xfs/xfs_file.c
@@ -1379,22 +1379,21 @@ xfs_file_llseek(
*/
STATIC int
xfs_filemap_page_mkwrite(
- struct vm_area_struct *vma,
struct vm_fault *vmf)
{
- struct inode *inode = file_inode(vma->vm_file);
+ struct inode *inode = file_inode(vmf->vma->vm_file);
int ret;
trace_xfs_filemap_page_mkwrite(XFS_I(inode));
sb_start_pagefault(inode->i_sb);
- file_update_time(vma->vm_file);
+ file_update_time(vmf->vma->vm_file);
xfs_ilock(XFS_I(inode), XFS_MMAPLOCK_SHARED);
if (IS_DAX(inode)) {
- ret = dax_iomap_fault(vma, vmf, &xfs_iomap_ops);
+ ret = dax_iomap_fault(vmf, PE_SIZE_PTE, &xfs_iomap_ops);
} else {
- ret = iomap_page_mkwrite(vma, vmf, &xfs_iomap_ops);
+ ret = iomap_page_mkwrite(vmf, &xfs_iomap_ops);
ret = block_page_mkwrite_return(ret);
}
@@ -1406,23 +1405,22 @@ xfs_filemap_page_mkwrite(
STATIC int
xfs_filemap_fault(
- struct vm_area_struct *vma,
struct vm_fault *vmf)
{
- struct inode *inode = file_inode(vma->vm_file);
+ struct inode *inode = file_inode(vmf->vma->vm_file);
int ret;
trace_xfs_filemap_fault(XFS_I(inode));
/* DAX can shortcut the normal fault path on write faults! */
if ((vmf->flags & FAULT_FLAG_WRITE) && IS_DAX(inode))
- return xfs_filemap_page_mkwrite(vma, vmf);
+ return xfs_filemap_page_mkwrite(vmf);
xfs_ilock(XFS_I(inode), XFS_MMAPLOCK_SHARED);
if (IS_DAX(inode))
- ret = dax_iomap_fault(vma, vmf, &xfs_iomap_ops);
+ ret = dax_iomap_fault(vmf, PE_SIZE_PTE, &xfs_iomap_ops);
else
- ret = filemap_fault(vma, vmf);
+ ret = filemap_fault(vmf);
xfs_iunlock(XFS_I(inode), XFS_MMAPLOCK_SHARED);
return ret;
@@ -1431,13 +1429,14 @@ xfs_filemap_fault(
/*
* Similar to xfs_filemap_fault(), the DAX fault path can call into here on
* both read and write faults. Hence we need to handle both cases. There is no
- * ->pmd_mkwrite callout for huge pages, so we have a single function here to
+ * ->huge_mkwrite callout for huge pages, so we have a single function here to
* handle both cases here. @flags carries the information on the type of fault
* occuring.
*/
STATIC int
-xfs_filemap_pmd_fault(
- struct vm_fault *vmf)
+xfs_filemap_huge_fault(
+ struct vm_fault *vmf,
+ enum page_entry_size pe_size)
{
struct inode *inode = file_inode(vmf->vma->vm_file);
struct xfs_inode *ip = XFS_I(inode);
@@ -1446,7 +1445,7 @@ xfs_filemap_pmd_fault(
if (!IS_DAX(inode))
return VM_FAULT_FALLBACK;
- trace_xfs_filemap_pmd_fault(ip);
+ trace_xfs_filemap_huge_fault(ip);
if (vmf->flags & FAULT_FLAG_WRITE) {
sb_start_pagefault(inode->i_sb);
@@ -1454,7 +1453,7 @@ xfs_filemap_pmd_fault(
}
xfs_ilock(XFS_I(inode), XFS_MMAPLOCK_SHARED);
- ret = dax_iomap_pmd_fault(vmf, &xfs_iomap_ops);
+ ret = dax_iomap_fault(vmf, pe_size, &xfs_iomap_ops);
xfs_iunlock(XFS_I(inode), XFS_MMAPLOCK_SHARED);
if (vmf->flags & FAULT_FLAG_WRITE)
@@ -1471,11 +1470,10 @@ xfs_filemap_pmd_fault(
*/
static int
xfs_filemap_pfn_mkwrite(
- struct vm_area_struct *vma,
struct vm_fault *vmf)
{
- struct inode *inode = file_inode(vma->vm_file);
+ struct inode *inode = file_inode(vmf->vma->vm_file);
struct xfs_inode *ip = XFS_I(inode);
int ret = VM_FAULT_NOPAGE;
loff_t size;
@@ -1483,7 +1481,7 @@ xfs_filemap_pfn_mkwrite(
trace_xfs_filemap_pfn_mkwrite(ip);
sb_start_pagefault(inode->i_sb);
- file_update_time(vma->vm_file);
+ file_update_time(vmf->vma->vm_file);
/* check if the faulting page hasn't raced with truncate */
xfs_ilock(ip, XFS_MMAPLOCK_SHARED);
@@ -1491,7 +1489,7 @@ xfs_filemap_pfn_mkwrite(
if (vmf->pgoff >= size)
ret = VM_FAULT_SIGBUS;
else if (IS_DAX(inode))
- ret = dax_pfn_mkwrite(vma, vmf);
+ ret = dax_pfn_mkwrite(vmf);
xfs_iunlock(ip, XFS_MMAPLOCK_SHARED);
sb_end_pagefault(inode->i_sb);
return ret;
@@ -1500,7 +1498,7 @@ xfs_filemap_pfn_mkwrite(
static const struct vm_operations_struct xfs_file_vm_ops = {
.fault = xfs_filemap_fault,
- .pmd_fault = xfs_filemap_pmd_fault,
+ .huge_fault = xfs_filemap_huge_fault,
.map_pages = filemap_map_pages,
.page_mkwrite = xfs_filemap_page_mkwrite,
.pfn_mkwrite = xfs_filemap_pfn_mkwrite,
diff --git a/fs/xfs/xfs_trace.h b/fs/xfs/xfs_trace.h
index fb7555e73a62..383ac227ce2c 100644
--- a/fs/xfs/xfs_trace.h
+++ b/fs/xfs/xfs_trace.h
@@ -687,7 +687,7 @@ DEFINE_INODE_EVENT(xfs_inode_clear_cowblocks_tag);
DEFINE_INODE_EVENT(xfs_inode_free_cowblocks_invalid);
DEFINE_INODE_EVENT(xfs_filemap_fault);
-DEFINE_INODE_EVENT(xfs_filemap_pmd_fault);
+DEFINE_INODE_EVENT(xfs_filemap_huge_fault);
DEFINE_INODE_EVENT(xfs_filemap_page_mkwrite);
DEFINE_INODE_EVENT(xfs_filemap_pfn_mkwrite);
diff --git a/include/asm-generic/pgtable.h b/include/asm-generic/pgtable.h
index 18af2bcefe6a..f4ca23b158b3 100644
--- a/include/asm-generic/pgtable.h
+++ b/include/asm-generic/pgtable.h
@@ -36,6 +36,9 @@ extern int ptep_set_access_flags(struct vm_area_struct *vma,
extern int pmdp_set_access_flags(struct vm_area_struct *vma,
unsigned long address, pmd_t *pmdp,
pmd_t entry, int dirty);
+extern int pudp_set_access_flags(struct vm_area_struct *vma,
+ unsigned long address, pud_t *pudp,
+ pud_t entry, int dirty);
#else
static inline int pmdp_set_access_flags(struct vm_area_struct *vma,
unsigned long address, pmd_t *pmdp,
@@ -44,6 +47,13 @@ static inline int pmdp_set_access_flags(struct vm_area_struct *vma,
BUILD_BUG();
return 0;
}
+static inline int pudp_set_access_flags(struct vm_area_struct *vma,
+ unsigned long address, pud_t *pudp,
+ pud_t entry, int dirty)
+{
+ BUILD_BUG();
+ return 0;
+}
#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
#endif
@@ -121,8 +131,8 @@ static inline pte_t ptep_get_and_clear(struct mm_struct *mm,
}
#endif
-#ifndef __HAVE_ARCH_PMDP_HUGE_GET_AND_CLEAR
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+#ifndef __HAVE_ARCH_PMDP_HUGE_GET_AND_CLEAR
static inline pmd_t pmdp_huge_get_and_clear(struct mm_struct *mm,
unsigned long address,
pmd_t *pmdp)
@@ -131,20 +141,40 @@ static inline pmd_t pmdp_huge_get_and_clear(struct mm_struct *mm,
pmd_clear(pmdp);
return pmd;
}
+#endif /* __HAVE_ARCH_PMDP_HUGE_GET_AND_CLEAR */
+#ifndef __HAVE_ARCH_PUDP_HUGE_GET_AND_CLEAR
+static inline pud_t pudp_huge_get_and_clear(struct mm_struct *mm,
+ unsigned long address,
+ pud_t *pudp)
+{
+ pud_t pud = *pudp;
+
+ pud_clear(pudp);
+ return pud;
+}
+#endif /* __HAVE_ARCH_PUDP_HUGE_GET_AND_CLEAR */
#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
-#endif
-#ifndef __HAVE_ARCH_PMDP_HUGE_GET_AND_CLEAR_FULL
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+#ifndef __HAVE_ARCH_PMDP_HUGE_GET_AND_CLEAR_FULL
static inline pmd_t pmdp_huge_get_and_clear_full(struct mm_struct *mm,
unsigned long address, pmd_t *pmdp,
int full)
{
return pmdp_huge_get_and_clear(mm, address, pmdp);
}
-#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
#endif
+#ifndef __HAVE_ARCH_PUDP_HUGE_GET_AND_CLEAR_FULL
+static inline pud_t pudp_huge_get_and_clear_full(struct mm_struct *mm,
+ unsigned long address, pud_t *pudp,
+ int full)
+{
+ return pudp_huge_get_and_clear(mm, address, pudp);
+}
+#endif
+#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
+
#ifndef __HAVE_ARCH_PTEP_GET_AND_CLEAR_FULL
static inline pte_t ptep_get_and_clear_full(struct mm_struct *mm,
unsigned long address, pte_t *ptep,
@@ -181,6 +211,9 @@ extern pte_t ptep_clear_flush(struct vm_area_struct *vma,
extern pmd_t pmdp_huge_clear_flush(struct vm_area_struct *vma,
unsigned long address,
pmd_t *pmdp);
+extern pud_t pudp_huge_clear_flush(struct vm_area_struct *vma,
+ unsigned long address,
+ pud_t *pudp);
#endif
#ifndef __HAVE_ARCH_PTEP_SET_WRPROTECT
@@ -192,6 +225,30 @@ static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addres
}
#endif
+#ifndef pte_savedwrite
+#define pte_savedwrite pte_write
+#endif
+
+#ifndef pte_mk_savedwrite
+#define pte_mk_savedwrite pte_mkwrite
+#endif
+
+#ifndef pte_clear_savedwrite
+#define pte_clear_savedwrite pte_wrprotect
+#endif
+
+#ifndef pmd_savedwrite
+#define pmd_savedwrite pmd_write
+#endif
+
+#ifndef pmd_mk_savedwrite
+#define pmd_mk_savedwrite pmd_mkwrite
+#endif
+
+#ifndef pmd_clear_savedwrite
+#define pmd_clear_savedwrite pmd_wrprotect
+#endif
+
#ifndef __HAVE_ARCH_PMDP_SET_WRPROTECT
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
static inline void pmdp_set_wrprotect(struct mm_struct *mm,
@@ -208,6 +265,23 @@ static inline void pmdp_set_wrprotect(struct mm_struct *mm,
}
#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
#endif
+#ifndef __HAVE_ARCH_PUDP_SET_WRPROTECT
+#ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD
+static inline void pudp_set_wrprotect(struct mm_struct *mm,
+ unsigned long address, pud_t *pudp)
+{
+ pud_t old_pud = *pudp;
+
+ set_pud_at(mm, address, pudp, pud_wrprotect(old_pud));
+}
+#else
+static inline void pudp_set_wrprotect(struct mm_struct *mm,
+ unsigned long address, pud_t *pudp)
+{
+ BUILD_BUG();
+}
+#endif /* CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD */
+#endif
#ifndef pmdp_collapse_flush
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
@@ -273,12 +347,23 @@ static inline int pmd_same(pmd_t pmd_a, pmd_t pmd_b)
{
return pmd_val(pmd_a) == pmd_val(pmd_b);
}
+
+static inline int pud_same(pud_t pud_a, pud_t pud_b)
+{
+ return pud_val(pud_a) == pud_val(pud_b);
+}
#else /* CONFIG_TRANSPARENT_HUGEPAGE */
static inline int pmd_same(pmd_t pmd_a, pmd_t pmd_b)
{
BUILD_BUG();
return 0;
}
+
+static inline int pud_same(pud_t pud_a, pud_t pud_b)
+{
+ BUILD_BUG();
+ return 0;
+}
#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
#endif
@@ -640,6 +725,15 @@ static inline int pmd_write(pmd_t pmd)
#endif /* __HAVE_ARCH_PMD_WRITE */
#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
+#if !defined(CONFIG_TRANSPARENT_HUGEPAGE) || \
+ (defined(CONFIG_TRANSPARENT_HUGEPAGE) && \
+ !defined(CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD))
+static inline int pud_trans_huge(pud_t pud)
+{
+ return 0;
+}
+#endif
+
#ifndef pmd_read_atomic
static inline pmd_t pmd_read_atomic(pmd_t *pmdp)
{
@@ -785,8 +879,10 @@ static inline int pmd_clear_huge(pmd_t *pmd)
* e.g. see arch/arc: flush_pmd_tlb_range
*/
#define flush_pmd_tlb_range(vma, addr, end) flush_tlb_range(vma, addr, end)
+#define flush_pud_tlb_range(vma, addr, end) flush_tlb_range(vma, addr, end)
#else
#define flush_pmd_tlb_range(vma, addr, end) BUILD_BUG()
+#define flush_pud_tlb_range(vma, addr, end) BUILD_BUG()
#endif
#endif
diff --git a/include/asm-generic/tlb.h b/include/asm-generic/tlb.h
index 7eed8cf3130a..4329bc6ef04b 100644
--- a/include/asm-generic/tlb.h
+++ b/include/asm-generic/tlb.h
@@ -232,6 +232,20 @@ static inline void tlb_remove_check_page_size_change(struct mmu_gather *tlb,
__tlb_remove_pmd_tlb_entry(tlb, pmdp, address); \
} while (0)
+/**
+ * tlb_remove_pud_tlb_entry - remember a pud mapping for later tlb
+ * invalidation. This is a nop so far, because only x86 needs it.
+ */
+#ifndef __tlb_remove_pud_tlb_entry
+#define __tlb_remove_pud_tlb_entry(tlb, pudp, address) do {} while (0)
+#endif
+
+#define tlb_remove_pud_tlb_entry(tlb, pudp, address) \
+ do { \
+ __tlb_adjust_range(tlb, address, HPAGE_PUD_SIZE); \
+ __tlb_remove_pud_tlb_entry(tlb, pudp, address); \
+ } while (0)
+
/*
* For things like page tables caches (ie caching addresses "inside" the
* page tables, like x86 does), for legacy reasons, flushing an
diff --git a/include/linux/bug.h b/include/linux/bug.h
index baff2e8fc8a8..5828489309bb 100644
--- a/include/linux/bug.h
+++ b/include/linux/bug.h
@@ -124,18 +124,20 @@ static inline enum bug_trap_type report_bug(unsigned long bug_addr,
/*
* Since detected data corruption should stop operation on the affected
- * structures, this returns false if the corruption condition is found.
+ * structures. Return value must be checked and sanely acted on by caller.
*/
+static inline __must_check bool check_data_corruption(bool v) { return v; }
#define CHECK_DATA_CORRUPTION(condition, fmt, ...) \
- do { \
- if (unlikely(condition)) { \
+ check_data_corruption(({ \
+ bool corruption = unlikely(condition); \
+ if (corruption) { \
if (IS_ENABLED(CONFIG_BUG_ON_DATA_CORRUPTION)) { \
pr_err(fmt, ##__VA_ARGS__); \
BUG(); \
} else \
WARN(1, fmt, ##__VA_ARGS__); \
- return false; \
} \
- } while (0)
+ corruption; \
+ }))
#endif /* _LINUX_BUG_H */
diff --git a/include/linux/cma.h b/include/linux/cma.h
index 6f0a91b37f68..03f32d0bd1d8 100644
--- a/include/linux/cma.h
+++ b/include/linux/cma.h
@@ -29,6 +29,7 @@ extern int __init cma_declare_contiguous(phys_addr_t base,
extern int cma_init_reserved_mem(phys_addr_t base, phys_addr_t size,
unsigned int order_per_bit,
struct cma **res_cma);
-extern struct page *cma_alloc(struct cma *cma, size_t count, unsigned int align);
+extern struct page *cma_alloc(struct cma *cma, size_t count, unsigned int align,
+ gfp_t gfp_mask);
extern bool cma_release(struct cma *cma, const struct page *pages, unsigned int count);
#endif
diff --git a/include/linux/compiler-gcc.h b/include/linux/compiler-gcc.h
index fddd1a5eb322..811f7a915658 100644
--- a/include/linux/compiler-gcc.h
+++ b/include/linux/compiler-gcc.h
@@ -122,6 +122,7 @@
#define __attribute_const__ __attribute__((__const__))
#define __maybe_unused __attribute__((unused))
#define __always_unused __attribute__((unused))
+#define __mode(x) __attribute__((mode(x)))
/* gcc version specific checks */
diff --git a/include/linux/dax.h b/include/linux/dax.h
index 1e77ff5818f1..d8a3dc042e1c 100644
--- a/include/linux/dax.h
+++ b/include/linux/dax.h
@@ -38,8 +38,8 @@ static inline void *dax_radix_locked_entry(sector_t sector, unsigned long flags)
ssize_t dax_iomap_rw(struct kiocb *iocb, struct iov_iter *iter,
const struct iomap_ops *ops);
-int dax_iomap_fault(struct vm_area_struct *vma, struct vm_fault *vmf,
- const struct iomap_ops *ops);
+int dax_iomap_fault(struct vm_fault *vmf, enum page_entry_size pe_size,
+ const struct iomap_ops *ops);
int dax_delete_mapping_entry(struct address_space *mapping, pgoff_t index);
int dax_invalidate_mapping_entry(struct address_space *mapping, pgoff_t index);
int dax_invalidate_mapping_entry_sync(struct address_space *mapping,
@@ -71,19 +71,13 @@ static inline unsigned int dax_radix_order(void *entry)
return PMD_SHIFT - PAGE_SHIFT;
return 0;
}
-int dax_iomap_pmd_fault(struct vm_fault *vmf, const struct iomap_ops *ops);
#else
static inline unsigned int dax_radix_order(void *entry)
{
return 0;
}
-static inline int dax_iomap_pmd_fault(struct vm_fault *vmf,
- const struct iomap_ops *ops)
-{
- return VM_FAULT_FALLBACK;
-}
#endif
-int dax_pfn_mkwrite(struct vm_area_struct *, struct vm_fault *);
+int dax_pfn_mkwrite(struct vm_fault *vmf);
static inline bool vma_is_dax(struct vm_area_struct *vma)
{
diff --git a/include/linux/device.h b/include/linux/device.h
index bd684fc8ec1d..a48a7ff70164 100644
--- a/include/linux/device.h
+++ b/include/linux/device.h
@@ -1139,6 +1139,7 @@ static inline bool device_supports_offline(struct device *dev)
extern void lock_device_hotplug(void);
extern void unlock_device_hotplug(void);
extern int lock_device_hotplug_sysfs(void);
+void assert_held_device_hotplug(void);
extern int device_offline(struct device *dev);
extern int device_online(struct device *dev);
extern void set_primary_fwnode(struct device *dev, struct fwnode_handle *fwnode);
diff --git a/include/linux/dma-contiguous.h b/include/linux/dma-contiguous.h
index fec734df1524..b67bf6ac907d 100644
--- a/include/linux/dma-contiguous.h
+++ b/include/linux/dma-contiguous.h
@@ -112,7 +112,7 @@ static inline int dma_declare_contiguous(struct device *dev, phys_addr_t size,
}
struct page *dma_alloc_from_contiguous(struct device *dev, size_t count,
- unsigned int order);
+ unsigned int order, gfp_t gfp_mask);
bool dma_release_from_contiguous(struct device *dev, struct page *pages,
int count);
@@ -145,7 +145,7 @@ int dma_declare_contiguous(struct device *dev, phys_addr_t size,
static inline
struct page *dma_alloc_from_contiguous(struct device *dev, size_t count,
- unsigned int order)
+ unsigned int order, gfp_t gfp_mask)
{
return NULL;
}
diff --git a/include/linux/gfp.h b/include/linux/gfp.h
index 0fe0b6295ab5..db373b9d3223 100644
--- a/include/linux/gfp.h
+++ b/include/linux/gfp.h
@@ -541,7 +541,7 @@ static inline bool pm_suspended_storage(void)
#if (defined(CONFIG_MEMORY_ISOLATION) && defined(CONFIG_COMPACTION)) || defined(CONFIG_CMA)
/* The below functions must be run on a range from a single zone. */
extern int alloc_contig_range(unsigned long start, unsigned long end,
- unsigned migratetype);
+ unsigned migratetype, gfp_t gfp_mask);
extern void free_contig_range(unsigned long pfn, unsigned nr_pages);
#endif
diff --git a/include/linux/huge_mm.h b/include/linux/huge_mm.h
index f0029e786205..a3762d49ba39 100644
--- a/include/linux/huge_mm.h
+++ b/include/linux/huge_mm.h
@@ -6,6 +6,18 @@ extern int copy_huge_pmd(struct mm_struct *dst_mm, struct mm_struct *src_mm,
pmd_t *dst_pmd, pmd_t *src_pmd, unsigned long addr,
struct vm_area_struct *vma);
extern void huge_pmd_set_accessed(struct vm_fault *vmf, pmd_t orig_pmd);
+extern int copy_huge_pud(struct mm_struct *dst_mm, struct mm_struct *src_mm,
+ pud_t *dst_pud, pud_t *src_pud, unsigned long addr,
+ struct vm_area_struct *vma);
+
+#ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD
+extern void huge_pud_set_accessed(struct vm_fault *vmf, pud_t orig_pud);
+#else
+static inline void huge_pud_set_accessed(struct vm_fault *vmf, pud_t orig_pud)
+{
+}
+#endif
+
extern int do_huge_pmd_wp_page(struct vm_fault *vmf, pmd_t orig_pmd);
extern struct page *follow_trans_huge_pmd(struct vm_area_struct *vma,
unsigned long addr,
@@ -17,6 +29,9 @@ extern bool madvise_free_huge_pmd(struct mmu_gather *tlb,
extern int zap_huge_pmd(struct mmu_gather *tlb,
struct vm_area_struct *vma,
pmd_t *pmd, unsigned long addr);
+extern int zap_huge_pud(struct mmu_gather *tlb,
+ struct vm_area_struct *vma,
+ pud_t *pud, unsigned long addr);
extern int mincore_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
unsigned long addr, unsigned long end,
unsigned char *vec);
@@ -26,8 +41,10 @@ extern bool move_huge_pmd(struct vm_area_struct *vma, unsigned long old_addr,
extern int change_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
unsigned long addr, pgprot_t newprot,
int prot_numa);
-int vmf_insert_pfn_pmd(struct vm_area_struct *, unsigned long addr, pmd_t *,
- pfn_t pfn, bool write);
+int vmf_insert_pfn_pmd(struct vm_area_struct *vma, unsigned long addr,
+ pmd_t *pmd, pfn_t pfn, bool write);
+int vmf_insert_pfn_pud(struct vm_area_struct *vma, unsigned long addr,
+ pud_t *pud, pfn_t pfn, bool write);
enum transparent_hugepage_flag {
TRANSPARENT_HUGEPAGE_FLAG,
TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG,
@@ -58,13 +75,14 @@ extern struct kobj_attribute shmem_enabled_attr;
#define HPAGE_PMD_NR (1<<HPAGE_PMD_ORDER)
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
-struct page *follow_devmap_pmd(struct vm_area_struct *vma, unsigned long addr,
- pmd_t *pmd, int flags);
-
#define HPAGE_PMD_SHIFT PMD_SHIFT
#define HPAGE_PMD_SIZE ((1UL) << HPAGE_PMD_SHIFT)
#define HPAGE_PMD_MASK (~(HPAGE_PMD_SIZE - 1))
+#define HPAGE_PUD_SHIFT PUD_SHIFT
+#define HPAGE_PUD_SIZE ((1UL) << HPAGE_PUD_SHIFT)
+#define HPAGE_PUD_MASK (~(HPAGE_PUD_SIZE - 1))
+
extern bool is_vma_temporary_stack(struct vm_area_struct *vma);
#define transparent_hugepage_enabled(__vma) \
@@ -118,6 +136,17 @@ void __split_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
void split_huge_pmd_address(struct vm_area_struct *vma, unsigned long address,
bool freeze, struct page *page);
+void __split_huge_pud(struct vm_area_struct *vma, pud_t *pud,
+ unsigned long address);
+
+#define split_huge_pud(__vma, __pud, __address) \
+ do { \
+ pud_t *____pud = (__pud); \
+ if (pud_trans_huge(*____pud) \
+ || pud_devmap(*____pud)) \
+ __split_huge_pud(__vma, __pud, __address); \
+ } while (0)
+
extern int hugepage_madvise(struct vm_area_struct *vma,
unsigned long *vm_flags, int advice);
extern void vma_adjust_trans_huge(struct vm_area_struct *vma,
@@ -126,6 +155,8 @@ extern void vma_adjust_trans_huge(struct vm_area_struct *vma,
long adjust_next);
extern spinlock_t *__pmd_trans_huge_lock(pmd_t *pmd,
struct vm_area_struct *vma);
+extern spinlock_t *__pud_trans_huge_lock(pud_t *pud,
+ struct vm_area_struct *vma);
/* mmap_sem must be held on entry */
static inline spinlock_t *pmd_trans_huge_lock(pmd_t *pmd,
struct vm_area_struct *vma)
@@ -136,6 +167,15 @@ static inline spinlock_t *pmd_trans_huge_lock(pmd_t *pmd,
else
return NULL;
}
+static inline spinlock_t *pud_trans_huge_lock(pud_t *pud,
+ struct vm_area_struct *vma)
+{
+ VM_BUG_ON_VMA(!rwsem_is_locked(&vma->vm_mm->mmap_sem), vma);
+ if (pud_trans_huge(*pud) || pud_devmap(*pud))
+ return __pud_trans_huge_lock(pud, vma);
+ else
+ return NULL;
+}
static inline int hpage_nr_pages(struct page *page)
{
if (unlikely(PageTransHuge(page)))
@@ -143,6 +183,11 @@ static inline int hpage_nr_pages(struct page *page)
return 1;
}
+struct page *follow_devmap_pmd(struct vm_area_struct *vma, unsigned long addr,
+ pmd_t *pmd, int flags);
+struct page *follow_devmap_pud(struct vm_area_struct *vma, unsigned long addr,
+ pud_t *pud, int flags);
+
extern int do_huge_pmd_numa_page(struct vm_fault *vmf, pmd_t orig_pmd);
extern struct page *huge_zero_page;
@@ -157,6 +202,11 @@ static inline bool is_huge_zero_pmd(pmd_t pmd)
return is_huge_zero_page(pmd_page(pmd));
}
+static inline bool is_huge_zero_pud(pud_t pud)
+{
+ return false;
+}
+
struct page *mm_get_huge_zero_page(struct mm_struct *mm);
void mm_put_huge_zero_page(struct mm_struct *mm);
@@ -167,6 +217,10 @@ void mm_put_huge_zero_page(struct mm_struct *mm);
#define HPAGE_PMD_MASK ({ BUILD_BUG(); 0; })
#define HPAGE_PMD_SIZE ({ BUILD_BUG(); 0; })
+#define HPAGE_PUD_SHIFT ({ BUILD_BUG(); 0; })
+#define HPAGE_PUD_MASK ({ BUILD_BUG(); 0; })
+#define HPAGE_PUD_SIZE ({ BUILD_BUG(); 0; })
+
#define hpage_nr_pages(x) 1
#define transparent_hugepage_enabled(__vma) 0
@@ -195,6 +249,9 @@ static inline void __split_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
static inline void split_huge_pmd_address(struct vm_area_struct *vma,
unsigned long address, bool freeze, struct page *page) {}
+#define split_huge_pud(__vma, __pmd, __address) \
+ do { } while (0)
+
static inline int hugepage_madvise(struct vm_area_struct *vma,
unsigned long *vm_flags, int advice)
{
@@ -212,6 +269,11 @@ static inline spinlock_t *pmd_trans_huge_lock(pmd_t *pmd,
{
return NULL;
}
+static inline spinlock_t *pud_trans_huge_lock(pud_t *pud,
+ struct vm_area_struct *vma)
+{
+ return NULL;
+}
static inline int do_huge_pmd_numa_page(struct vm_fault *vmf, pmd_t orig_pmd)
{
@@ -223,6 +285,11 @@ static inline bool is_huge_zero_page(struct page *page)
return false;
}
+static inline bool is_huge_zero_pud(pud_t pud)
+{
+ return false;
+}
+
static inline void mm_put_huge_zero_page(struct mm_struct *mm)
{
return;
@@ -233,6 +300,12 @@ static inline struct page *follow_devmap_pmd(struct vm_area_struct *vma,
{
return NULL;
}
+
+static inline struct page *follow_devmap_pud(struct vm_area_struct *vma,
+ unsigned long addr, pud_t *pud, int flags)
+{
+ return NULL;
+}
#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
#endif /* _LINUX_HUGE_MM_H */
diff --git a/include/linux/iomap.h b/include/linux/iomap.h
index 891459caa278..7291810067eb 100644
--- a/include/linux/iomap.h
+++ b/include/linux/iomap.h
@@ -79,8 +79,7 @@ int iomap_zero_range(struct inode *inode, loff_t pos, loff_t len,
bool *did_zero, const struct iomap_ops *ops);
int iomap_truncate_page(struct inode *inode, loff_t pos, bool *did_zero,
const struct iomap_ops *ops);
-int iomap_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf,
- const struct iomap_ops *ops);
+int iomap_page_mkwrite(struct vm_fault *vmf, const struct iomap_ops *ops);
int iomap_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
loff_t start, loff_t len, const struct iomap_ops *ops);
diff --git a/include/linux/iopoll.h b/include/linux/iopoll.h
index 1c30014ed176..d29e1e21bf3f 100644
--- a/include/linux/iopoll.h
+++ b/include/linux/iopoll.h
@@ -17,7 +17,7 @@
#include <linux/kernel.h>
#include <linux/types.h>
-#include <linux/hrtimer.h>
+#include <linux/ktime.h>
#include <linux/delay.h>
#include <linux/errno.h>
#include <linux/io.h>
diff --git a/include/linux/kasan.h b/include/linux/kasan.h
index 820c0ad54a01..c908b25bf5a5 100644
--- a/include/linux/kasan.h
+++ b/include/linux/kasan.h
@@ -52,7 +52,7 @@ void kasan_free_pages(struct page *page, unsigned int order);
void kasan_cache_create(struct kmem_cache *cache, size_t *size,
unsigned long *flags);
void kasan_cache_shrink(struct kmem_cache *cache);
-void kasan_cache_destroy(struct kmem_cache *cache);
+void kasan_cache_shutdown(struct kmem_cache *cache);
void kasan_poison_slab(struct page *page);
void kasan_unpoison_object_data(struct kmem_cache *cache, void *object);
@@ -98,7 +98,7 @@ static inline void kasan_cache_create(struct kmem_cache *cache,
size_t *size,
unsigned long *flags) {}
static inline void kasan_cache_shrink(struct kmem_cache *cache) {}
-static inline void kasan_cache_destroy(struct kmem_cache *cache) {}
+static inline void kasan_cache_shutdown(struct kmem_cache *cache) {}
static inline void kasan_poison_slab(struct page *page) {}
static inline void kasan_unpoison_object_data(struct kmem_cache *cache,
diff --git a/include/linux/kernel.h b/include/linux/kernel.h
index cb09238f6d32..4c26dc3a8295 100644
--- a/include/linux/kernel.h
+++ b/include/linux/kernel.h
@@ -100,16 +100,18 @@
)
/*
- * Divide positive or negative dividend by positive divisor and round
- * to closest integer. Result is undefined for negative divisors and
- * for negative dividends if the divisor variable type is unsigned.
+ * Divide positive or negative dividend by positive or negative divisor
+ * and round to closest integer. Result is undefined for negative
+ * divisors if he dividend variable type is unsigned and for negative
+ * dividends if the divisor variable type is unsigned.
*/
#define DIV_ROUND_CLOSEST(x, divisor)( \
{ \
typeof(x) __x = x; \
typeof(divisor) __d = divisor; \
(((typeof(x))-1) > 0 || \
- ((typeof(divisor))-1) > 0 || (__x) > 0) ? \
+ ((typeof(divisor))-1) > 0 || \
+ (((__x) > 0) == ((__d) > 0))) ? \
(((__x) + ((__d) / 2)) / (__d)) : \
(((__x) - ((__d) / 2)) / (__d)); \
} \
diff --git a/include/linux/lz4.h b/include/linux/lz4.h
index 6b784c59f321..394e3d9213b8 100644
--- a/include/linux/lz4.h
+++ b/include/linux/lz4.h
@@ -1,87 +1,648 @@
-#ifndef __LZ4_H__
-#define __LZ4_H__
-/*
- * LZ4 Kernel Interface
+/* LZ4 Kernel Interface
*
* Copyright (C) 2013, LG Electronics, Kyungsik Lee <kyungsik.lee@lge.com>
+ * Copyright (C) 2016, Sven Schmidt <4sschmid@informatik.uni-hamburg.de>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
+ *
+ * This file is based on the original header file
+ * for LZ4 - Fast LZ compression algorithm.
+ *
+ * LZ4 - Fast LZ compression algorithm
+ * Copyright (C) 2011-2016, Yann Collet.
+ * BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following disclaimer
+ * in the documentation and/or other materials provided with the
+ * distribution.
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ * You can contact the author at :
+ * - LZ4 homepage : http://www.lz4.org
+ * - LZ4 source repository : https://github.com/lz4/lz4
*/
-#define LZ4_MEM_COMPRESS (16384)
-#define LZ4HC_MEM_COMPRESS (262144 + (2 * sizeof(unsigned char *)))
+#ifndef __LZ4_H__
+#define __LZ4_H__
+
+#include <linux/types.h>
+#include <linux/string.h> /* memset, memcpy */
+
+/*-************************************************************************
+ * CONSTANTS
+ **************************************************************************/
/*
- * lz4_compressbound()
- * Provides the maximum size that LZ4 may output in a "worst case" scenario
- * (input data not compressible)
+ * LZ4_MEMORY_USAGE :
+ * Memory usage formula : N->2^N Bytes
+ * (examples : 10 -> 1KB; 12 -> 4KB ; 16 -> 64KB; 20 -> 1MB; etc.)
+ * Increasing memory usage improves compression ratio
+ * Reduced memory usage can improve speed, due to cache effect
+ * Default value is 14, for 16KB, which nicely fits into Intel x86 L1 cache
*/
-static inline size_t lz4_compressbound(size_t isize)
-{
- return isize + (isize / 255) + 16;
-}
+#define LZ4_MEMORY_USAGE 14
+
+#define LZ4_MAX_INPUT_SIZE 0x7E000000 /* 2 113 929 216 bytes */
+#define LZ4_COMPRESSBOUND(isize) (\
+ (unsigned int)(isize) > (unsigned int)LZ4_MAX_INPUT_SIZE \
+ ? 0 \
+ : (isize) + ((isize)/255) + 16)
+
+#define LZ4_ACCELERATION_DEFAULT 1
+#define LZ4_HASHLOG (LZ4_MEMORY_USAGE-2)
+#define LZ4_HASHTABLESIZE (1 << LZ4_MEMORY_USAGE)
+#define LZ4_HASH_SIZE_U32 (1 << LZ4_HASHLOG)
+
+#define LZ4HC_MIN_CLEVEL 3
+#define LZ4HC_DEFAULT_CLEVEL 9
+#define LZ4HC_MAX_CLEVEL 16
+
+#define LZ4HC_DICTIONARY_LOGSIZE 16
+#define LZ4HC_MAXD (1<<LZ4HC_DICTIONARY_LOGSIZE)
+#define LZ4HC_MAXD_MASK (LZ4HC_MAXD - 1)
+#define LZ4HC_HASH_LOG (LZ4HC_DICTIONARY_LOGSIZE - 1)
+#define LZ4HC_HASHTABLESIZE (1 << LZ4HC_HASH_LOG)
+#define LZ4HC_HASH_MASK (LZ4HC_HASHTABLESIZE - 1)
+
+/*-************************************************************************
+ * STREAMING CONSTANTS AND STRUCTURES
+ **************************************************************************/
+#define LZ4_STREAMSIZE_U64 ((1 << (LZ4_MEMORY_USAGE - 3)) + 4)
+#define LZ4_STREAMSIZE (LZ4_STREAMSIZE_U64 * sizeof(unsigned long long))
+
+#define LZ4_STREAMHCSIZE 262192
+#define LZ4_STREAMHCSIZE_SIZET (262192 / sizeof(size_t))
+
+#define LZ4_STREAMDECODESIZE_U64 4
+#define LZ4_STREAMDECODESIZE (LZ4_STREAMDECODESIZE_U64 * \
+ sizeof(unsigned long long))
/*
- * lz4_compress()
- * src : source address of the original data
- * src_len : size of the original data
- * dst : output buffer address of the compressed data
- * This requires 'dst' of size LZ4_COMPRESSBOUND.
- * dst_len : is the output size, which is returned after compress done
- * workmem : address of the working memory.
- * This requires 'workmem' of size LZ4_MEM_COMPRESS.
- * return : Success if return 0
- * Error if return (< 0)
- * note : Destination buffer and workmem must be already allocated with
- * the defined size.
- */
-int lz4_compress(const unsigned char *src, size_t src_len,
- unsigned char *dst, size_t *dst_len, void *wrkmem);
-
- /*
- * lz4hc_compress()
- * src : source address of the original data
- * src_len : size of the original data
- * dst : output buffer address of the compressed data
- * This requires 'dst' of size LZ4_COMPRESSBOUND.
- * dst_len : is the output size, which is returned after compress done
- * workmem : address of the working memory.
- * This requires 'workmem' of size LZ4HC_MEM_COMPRESS.
- * return : Success if return 0
- * Error if return (< 0)
- * note : Destination buffer and workmem must be already allocated with
- * the defined size.
- */
-int lz4hc_compress(const unsigned char *src, size_t src_len,
- unsigned char *dst, size_t *dst_len, void *wrkmem);
+ * LZ4_stream_t - information structure to track an LZ4 stream.
+ */
+typedef struct {
+ uint32_t hashTable[LZ4_HASH_SIZE_U32];
+ uint32_t currentOffset;
+ uint32_t initCheck;
+ const uint8_t *dictionary;
+ uint8_t *bufferStart;
+ uint32_t dictSize;
+} LZ4_stream_t_internal;
+typedef union {
+ unsigned long long table[LZ4_STREAMSIZE_U64];
+ LZ4_stream_t_internal internal_donotuse;
+} LZ4_stream_t;
/*
- * lz4_decompress()
- * src : source address of the compressed data
- * src_len : is the input size, whcih is returned after decompress done
- * dest : output buffer address of the decompressed data
- * actual_dest_len: is the size of uncompressed data, supposing it's known
- * return : Success if return 0
- * Error if return (< 0)
- * note : Destination buffer must be already allocated.
- * slightly faster than lz4_decompress_unknownoutputsize()
- */
-int lz4_decompress(const unsigned char *src, size_t *src_len,
- unsigned char *dest, size_t actual_dest_len);
+ * LZ4_streamHC_t - information structure to track an LZ4HC stream.
+ */
+typedef struct {
+ unsigned int hashTable[LZ4HC_HASHTABLESIZE];
+ unsigned short chainTable[LZ4HC_MAXD];
+ /* next block to continue on current prefix */
+ const unsigned char *end;
+ /* All index relative to this position */
+ const unsigned char *base;
+ /* alternate base for extDict */
+ const unsigned char *dictBase;
+ /* below that point, need extDict */
+ unsigned int dictLimit;
+ /* below that point, no more dict */
+ unsigned int lowLimit;
+ /* index from which to continue dict update */
+ unsigned int nextToUpdate;
+ unsigned int compressionLevel;
+} LZ4HC_CCtx_internal;
+typedef union {
+ size_t table[LZ4_STREAMHCSIZE_SIZET];
+ LZ4HC_CCtx_internal internal_donotuse;
+} LZ4_streamHC_t;
/*
- * lz4_decompress_unknownoutputsize()
- * src : source address of the compressed data
- * src_len : is the input size, therefore the compressed size
- * dest : output buffer address of the decompressed data
- * dest_len: is the max size of the destination buffer, which is
- * returned with actual size of decompressed data after
- * decompress done
- * return : Success if return 0
- * Error if return (< 0)
- * note : Destination buffer must be already allocated.
- */
-int lz4_decompress_unknownoutputsize(const unsigned char *src, size_t src_len,
- unsigned char *dest, size_t *dest_len);
+ * LZ4_streamDecode_t - information structure to track an
+ * LZ4 stream during decompression.
+ *
+ * init this structure using LZ4_setStreamDecode (or memset()) before first use
+ */
+typedef struct {
+ const uint8_t *externalDict;
+ size_t extDictSize;
+ const uint8_t *prefixEnd;
+ size_t prefixSize;
+} LZ4_streamDecode_t_internal;
+typedef union {
+ unsigned long long table[LZ4_STREAMDECODESIZE_U64];
+ LZ4_streamDecode_t_internal internal_donotuse;
+} LZ4_streamDecode_t;
+
+/*-************************************************************************
+ * SIZE OF STATE
+ **************************************************************************/
+#define LZ4_MEM_COMPRESS LZ4_STREAMSIZE
+#define LZ4HC_MEM_COMPRESS LZ4_STREAMHCSIZE
+
+/*-************************************************************************
+ * Compression Functions
+ **************************************************************************/
+
+/**
+ * LZ4_compressBound() - Max. output size in worst case szenarios
+ * @isize: Size of the input data
+ *
+ * Return: Max. size LZ4 may output in a "worst case" szenario
+ * (data not compressible)
+ */
+static inline int LZ4_compressBound(size_t isize)
+{
+ return LZ4_COMPRESSBOUND(isize);
+}
+
+/**
+ * LZ4_compress_default() - Compress data from source to dest
+ * @source: source address of the original data
+ * @dest: output buffer address of the compressed data
+ * @inputSize: size of the input data. Max supported value is LZ4_MAX_INPUT_SIZE
+ * @maxOutputSize: full or partial size of buffer 'dest'
+ * which must be already allocated
+ * @wrkmem: address of the working memory.
+ * This requires 'workmem' of LZ4_MEM_COMPRESS.
+ *
+ * Compresses 'sourceSize' bytes from buffer 'source'
+ * into already allocated 'dest' buffer of size 'maxOutputSize'.
+ * Compression is guaranteed to succeed if
+ * 'maxOutputSize' >= LZ4_compressBound(inputSize).
+ * It also runs faster, so it's a recommended setting.
+ * If the function cannot compress 'source' into a more limited 'dest' budget,
+ * compression stops *immediately*, and the function result is zero.
+ * As a consequence, 'dest' content is not valid.
+ *
+ * Return: Number of bytes written into buffer 'dest'
+ * (necessarily <= maxOutputSize) or 0 if compression fails
+ */
+int LZ4_compress_default(const char *source, char *dest, int inputSize,
+ int maxOutputSize, void *wrkmem);
+
+/**
+ * LZ4_compress_fast() - As LZ4_compress_default providing an acceleration param
+ * @source: source address of the original data
+ * @dest: output buffer address of the compressed data
+ * @inputSize: size of the input data. Max supported value is LZ4_MAX_INPUT_SIZE
+ * @maxOutputSize: full or partial size of buffer 'dest'
+ * which must be already allocated
+ * @acceleration: acceleration factor
+ * @wrkmem: address of the working memory.
+ * This requires 'workmem' of LZ4_MEM_COMPRESS.
+ *
+ * Same as LZ4_compress_default(), but allows to select an "acceleration"
+ * factor. The larger the acceleration value, the faster the algorithm,
+ * but also the lesser the compression. It's a trade-off. It can be fine tuned,
+ * with each successive value providing roughly +~3% to speed.
+ * An acceleration value of "1" is the same as regular LZ4_compress_default()
+ * Values <= 0 will be replaced by LZ4_ACCELERATION_DEFAULT, which is 1.
+ *
+ * Return: Number of bytes written into buffer 'dest'
+ * (necessarily <= maxOutputSize) or 0 if compression fails
+ */
+int LZ4_compress_fast(const char *source, char *dest, int inputSize,
+ int maxOutputSize, int acceleration, void *wrkmem);
+
+/**
+ * LZ4_compress_destSize() - Compress as much data as possible
+ * from source to dest
+ * @source: source address of the original data
+ * @dest: output buffer address of the compressed data
+ * @sourceSizePtr: will be modified to indicate how many bytes where read
+ * from 'source' to fill 'dest'. New value is necessarily <= old value.
+ * @targetDestSize: Size of buffer 'dest' which must be already allocated
+ * @wrkmem: address of the working memory.
+ * This requires 'workmem' of LZ4_MEM_COMPRESS.
+ *
+ * Reverse the logic, by compressing as much data as possible
+ * from 'source' buffer into already allocated buffer 'dest'
+ * of size 'targetDestSize'.
+ * This function either compresses the entire 'source' content into 'dest'
+ * if it's large enough, or fill 'dest' buffer completely with as much data as
+ * possible from 'source'.
+ *
+ * Return: Number of bytes written into 'dest' (necessarily <= targetDestSize)
+ * or 0 if compression fails
+ */
+int LZ4_compress_destSize(const char *source, char *dest, int *sourceSizePtr,
+ int targetDestSize, void *wrkmem);
+
+/*-************************************************************************
+ * Decompression Functions
+ **************************************************************************/
+
+/**
+ * LZ4_decompress_fast() - Decompresses data from 'source' into 'dest'
+ * @source: source address of the compressed data
+ * @dest: output buffer address of the uncompressed data
+ * which must be already allocated with 'originalSize' bytes
+ * @originalSize: is the original and therefore uncompressed size
+ *
+ * Decompresses data from 'source' into 'dest'.
+ * This function fully respect memory boundaries for properly formed
+ * compressed data.
+ * It is a bit faster than LZ4_decompress_safe().
+ * However, it does not provide any protection against intentionally
+ * modified data stream (malicious input).
+ * Use this function in trusted environment only
+ * (data to decode comes from a trusted source).
+ *
+ * Return: number of bytes read from the source buffer
+ * or a negative result if decompression fails.
+ */
+int LZ4_decompress_fast(const char *source, char *dest, int originalSize);
+
+/**
+ * LZ4_decompress_safe() - Decompression protected against buffer overflow
+ * @source: source address of the compressed data
+ * @dest: output buffer address of the uncompressed data
+ * which must be already allocated
+ * @compressedSize: is the precise full size of the compressed block
+ * @maxDecompressedSize: is the size of 'dest' buffer
+ *
+ * Decompresses data fom 'source' into 'dest'.
+ * If the source stream is detected malformed, the function will
+ * stop decoding and return a negative result.
+ * This function is protected against buffer overflow exploits,
+ * including malicious data packets. It never writes outside output buffer,
+ * nor reads outside input buffer.
+ *
+ * Return: number of bytes decompressed into destination buffer
+ * (necessarily <= maxDecompressedSize)
+ * or a negative result in case of error
+ */
+int LZ4_decompress_safe(const char *source, char *dest, int compressedSize,
+ int maxDecompressedSize);
+
+/**
+ * LZ4_decompress_safe_partial() - Decompress a block of size 'compressedSize'
+ * at position 'source' into buffer 'dest'
+ * @source: source address of the compressed data
+ * @dest: output buffer address of the decompressed data which must be
+ * already allocated
+ * @compressedSize: is the precise full size of the compressed block.
+ * @targetOutputSize: the decompression operation will try
+ * to stop as soon as 'targetOutputSize' has been reached
+ * @maxDecompressedSize: is the size of destination buffer
+ *
+ * This function decompresses a compressed block of size 'compressedSize'
+ * at position 'source' into destination buffer 'dest'
+ * of size 'maxDecompressedSize'.
+ * The function tries to stop decompressing operation as soon as
+ * 'targetOutputSize' has been reached, reducing decompression time.
+ * This function never writes outside of output buffer,
+ * and never reads outside of input buffer.
+ * It is therefore protected against malicious data packets.
+ *
+ * Return: the number of bytes decoded in the destination buffer
+ * (necessarily <= maxDecompressedSize)
+ * or a negative result in case of error
+ *
+ */
+int LZ4_decompress_safe_partial(const char *source, char *dest,
+ int compressedSize, int targetOutputSize, int maxDecompressedSize);
+
+/*-************************************************************************
+ * LZ4 HC Compression
+ **************************************************************************/
+
+/**
+ * LZ4_compress_HC() - Compress data from `src` into `dst`, using HC algorithm
+ * @src: source address of the original data
+ * @dst: output buffer address of the compressed data
+ * @srcSize: size of the input data. Max supported value is LZ4_MAX_INPUT_SIZE
+ * @dstCapacity: full or partial size of buffer 'dst',
+ * which must be already allocated
+ * @compressionLevel: Recommended values are between 4 and 9, although any
+ * value between 1 and LZ4HC_MAX_CLEVEL will work.
+ * Values >LZ4HC_MAX_CLEVEL behave the same as 16.
+ * @wrkmem: address of the working memory.
+ * This requires 'wrkmem' of size LZ4HC_MEM_COMPRESS.
+ *
+ * Compress data from 'src' into 'dst', using the more powerful
+ * but slower "HC" algorithm. Compression is guaranteed to succeed if
+ * `dstCapacity >= LZ4_compressBound(srcSize)
+ *
+ * Return : the number of bytes written into 'dst' or 0 if compression fails.
+ */
+int LZ4_compress_HC(const char *src, char *dst, int srcSize, int dstCapacity,
+ int compressionLevel, void *wrkmem);
+
+/**
+ * LZ4_resetStreamHC() - Init an allocated 'LZ4_streamHC_t' structure
+ * @streamHCPtr: pointer to the 'LZ4_streamHC_t' structure
+ * @compressionLevel: Recommended values are between 4 and 9, although any
+ * value between 1 and LZ4HC_MAX_CLEVEL will work.
+ * Values >LZ4HC_MAX_CLEVEL behave the same as 16.
+ *
+ * An LZ4_streamHC_t structure can be allocated once
+ * and re-used multiple times.
+ * Use this function to init an allocated `LZ4_streamHC_t` structure
+ * and start a new compression.
+ */
+void LZ4_resetStreamHC(LZ4_streamHC_t *streamHCPtr, int compressionLevel);
+
+/**
+ * LZ4_loadDictHC() - Load a static dictionary into LZ4_streamHC
+ * @streamHCPtr: pointer to the LZ4HC_stream_t
+ * @dictionary: dictionary to load
+ * @dictSize: size of dictionary
+ *
+ * Use this function to load a static dictionary into LZ4HC_stream.
+ * Any previous data will be forgotten, only 'dictionary'
+ * will remain in memory.
+ * Loading a size of 0 is allowed.
+ *
+ * Return : dictionary size, in bytes (necessarily <= 64 KB)
+ */
+int LZ4_loadDictHC(LZ4_streamHC_t *streamHCPtr, const char *dictionary,
+ int dictSize);
+
+/**
+ * LZ4_compress_HC_continue() - Compress 'src' using data from previously
+ * compressed blocks as a dictionary using the HC algorithm
+ * @streamHCPtr: Pointer to the previous 'LZ4_streamHC_t' structure
+ * @src: source address of the original data
+ * @dst: output buffer address of the compressed data,
+ * which must be already allocated
+ * @srcSize: size of the input data. Max supported value is LZ4_MAX_INPUT_SIZE
+ * @maxDstSize: full or partial size of buffer 'dest'
+ * which must be already allocated
+ *
+ * These functions compress data in successive blocks of any size, using
+ * previous blocks as dictionary. One key assumption is that previous
+ * blocks (up to 64 KB) remain read-accessible while
+ * compressing next blocks. There is an exception for ring buffers,
+ * which can be smaller than 64 KB.
+ * Ring buffers scenario is automatically detected and handled by
+ * LZ4_compress_HC_continue().
+ * Before starting compression, state must be properly initialized,
+ * using LZ4_resetStreamHC().
+ * A first "fictional block" can then be designated as
+ * initial dictionary, using LZ4_loadDictHC() (Optional).
+ * Then, use LZ4_compress_HC_continue()
+ * to compress each successive block. Previous memory blocks
+ * (including initial dictionary when present) must remain accessible
+ * and unmodified during compression.
+ * 'dst' buffer should be sized to handle worst case scenarios, using
+ * LZ4_compressBound(), to ensure operation success.
+ * If, for any reason, previous data blocks can't be preserved unmodified
+ * in memory during next compression block,
+ * you must save it to a safer memory space, using LZ4_saveDictHC().
+ * Return value of LZ4_saveDictHC() is the size of dictionary
+ * effectively saved into 'safeBuffer'.
+ *
+ * Return: Number of bytes written into buffer 'dst' or 0 if compression fails
+ */
+int LZ4_compress_HC_continue(LZ4_streamHC_t *streamHCPtr, const char *src,
+ char *dst, int srcSize, int maxDstSize);
+
+/**
+ * LZ4_saveDictHC() - Save static dictionary from LZ4HC_stream
+ * @streamHCPtr: pointer to the 'LZ4HC_stream_t' structure
+ * @safeBuffer: buffer to save dictionary to, must be already allocated
+ * @maxDictSize: size of 'safeBuffer'
+ *
+ * If previously compressed data block is not guaranteed
+ * to remain available at its memory location,
+ * save it into a safer place (char *safeBuffer).
+ * Note : you don't need to call LZ4_loadDictHC() afterwards,
+ * dictionary is immediately usable, you can therefore call
+ * LZ4_compress_HC_continue().
+ *
+ * Return : saved dictionary size in bytes (necessarily <= maxDictSize),
+ * or 0 if error.
+ */
+int LZ4_saveDictHC(LZ4_streamHC_t *streamHCPtr, char *safeBuffer,
+ int maxDictSize);
+
+/*-*********************************************
+ * Streaming Compression Functions
+ ***********************************************/
+
+/**
+ * LZ4_resetStream() - Init an allocated 'LZ4_stream_t' structure
+ * @LZ4_stream: pointer to the 'LZ4_stream_t' structure
+ *
+ * An LZ4_stream_t structure can be allocated once
+ * and re-used multiple times.
+ * Use this function to init an allocated `LZ4_stream_t` structure
+ * and start a new compression.
+ */
+void LZ4_resetStream(LZ4_stream_t *LZ4_stream);
+
+/**
+ * LZ4_loadDict() - Load a static dictionary into LZ4_stream
+ * @streamPtr: pointer to the LZ4_stream_t
+ * @dictionary: dictionary to load
+ * @dictSize: size of dictionary
+ *
+ * Use this function to load a static dictionary into LZ4_stream.
+ * Any previous data will be forgotten, only 'dictionary'
+ * will remain in memory.
+ * Loading a size of 0 is allowed.
+ *
+ * Return : dictionary size, in bytes (necessarily <= 64 KB)
+ */
+int LZ4_loadDict(LZ4_stream_t *streamPtr, const char *dictionary,
+ int dictSize);
+
+/**
+ * LZ4_saveDict() - Save static dictionary from LZ4_stream
+ * @streamPtr: pointer to the 'LZ4_stream_t' structure
+ * @safeBuffer: buffer to save dictionary to, must be already allocated
+ * @dictSize: size of 'safeBuffer'
+ *
+ * If previously compressed data block is not guaranteed
+ * to remain available at its memory location,
+ * save it into a safer place (char *safeBuffer).
+ * Note : you don't need to call LZ4_loadDict() afterwards,
+ * dictionary is immediately usable, you can therefore call
+ * LZ4_compress_fast_continue().
+ *
+ * Return : saved dictionary size in bytes (necessarily <= dictSize),
+ * or 0 if error.
+ */
+int LZ4_saveDict(LZ4_stream_t *streamPtr, char *safeBuffer, int dictSize);
+
+/**
+ * LZ4_compress_fast_continue() - Compress 'src' using data from previously
+ * compressed blocks as a dictionary
+ * @streamPtr: Pointer to the previous 'LZ4_stream_t' structure
+ * @src: source address of the original data
+ * @dst: output buffer address of the compressed data,
+ * which must be already allocated
+ * @srcSize: size of the input data. Max supported value is LZ4_MAX_INPUT_SIZE
+ * @maxDstSize: full or partial size of buffer 'dest'
+ * which must be already allocated
+ * @acceleration: acceleration factor
+ *
+ * Compress buffer content 'src', using data from previously compressed blocks
+ * as dictionary to improve compression ratio.
+ * Important : Previous data blocks are assumed to still
+ * be present and unmodified !
+ * If maxDstSize >= LZ4_compressBound(srcSize),
+ * compression is guaranteed to succeed, and runs faster.
+ *
+ * Return: Number of bytes written into buffer 'dst' or 0 if compression fails
+ */
+int LZ4_compress_fast_continue(LZ4_stream_t *streamPtr, const char *src,
+ char *dst, int srcSize, int maxDstSize, int acceleration);
+
+/**
+ * LZ4_setStreamDecode() - Instruct where to find dictionary
+ * @LZ4_streamDecode: the 'LZ4_streamDecode_t' structure
+ * @dictionary: dictionary to use
+ * @dictSize: size of dictionary
+ *
+ * Use this function to instruct where to find the dictionary.
+ * Setting a size of 0 is allowed (same effect as reset).
+ *
+ * Return: 1 if OK, 0 if error
+ */
+int LZ4_setStreamDecode(LZ4_streamDecode_t *LZ4_streamDecode,
+ const char *dictionary, int dictSize);
+
+/**
+ * LZ4_decompress_fast_continue() - Decompress blocks in streaming mode
+ * @LZ4_streamDecode: the 'LZ4_streamDecode_t' structure
+ * @source: source address of the compressed data
+ * @dest: output buffer address of the uncompressed data
+ * which must be already allocated
+ * @compressedSize: is the precise full size of the compressed block
+ * @maxDecompressedSize: is the size of 'dest' buffer
+ *
+ * These decoding function allows decompression of multiple blocks
+ * in "streaming" mode.
+ * Previously decoded blocks *must* remain available at the memory position
+ * where they were decoded (up to 64 KB)
+ * In the case of a ring buffers, decoding buffer must be either :
+ * - Exactly same size as encoding buffer, with same update rule
+ * (block boundaries at same positions) In which case,
+ * the decoding & encoding ring buffer can have any size,
+ * including very small ones ( < 64 KB).
+ * - Larger than encoding buffer, by a minimum of maxBlockSize more bytes.
+ * maxBlockSize is implementation dependent.
+ * It's the maximum size you intend to compress into a single block.
+ * In which case, encoding and decoding buffers do not need
+ * to be synchronized, and encoding ring buffer can have any size,
+ * including small ones ( < 64 KB).
+ * - _At least_ 64 KB + 8 bytes + maxBlockSize.
+ * In which case, encoding and decoding buffers do not need to be
+ * synchronized, and encoding ring buffer can have any size,
+ * including larger than decoding buffer. W
+ * Whenever these conditions are not possible, save the last 64KB of decoded
+ * data into a safe buffer, and indicate where it is saved
+ * using LZ4_setStreamDecode()
+ *
+ * Return: number of bytes decompressed into destination buffer
+ * (necessarily <= maxDecompressedSize)
+ * or a negative result in case of error
+ */
+int LZ4_decompress_safe_continue(LZ4_streamDecode_t *LZ4_streamDecode,
+ const char *source, char *dest, int compressedSize,
+ int maxDecompressedSize);
+
+/**
+ * LZ4_decompress_fast_continue() - Decompress blocks in streaming mode
+ * @LZ4_streamDecode: the 'LZ4_streamDecode_t' structure
+ * @source: source address of the compressed data
+ * @dest: output buffer address of the uncompressed data
+ * which must be already allocated with 'originalSize' bytes
+ * @originalSize: is the original and therefore uncompressed size
+ *
+ * These decoding function allows decompression of multiple blocks
+ * in "streaming" mode.
+ * Previously decoded blocks *must* remain available at the memory position
+ * where they were decoded (up to 64 KB)
+ * In the case of a ring buffers, decoding buffer must be either :
+ * - Exactly same size as encoding buffer, with same update rule
+ * (block boundaries at same positions) In which case,
+ * the decoding & encoding ring buffer can have any size,
+ * including very small ones ( < 64 KB).
+ * - Larger than encoding buffer, by a minimum of maxBlockSize more bytes.
+ * maxBlockSize is implementation dependent.
+ * It's the maximum size you intend to compress into a single block.
+ * In which case, encoding and decoding buffers do not need
+ * to be synchronized, and encoding ring buffer can have any size,
+ * including small ones ( < 64 KB).
+ * - _At least_ 64 KB + 8 bytes + maxBlockSize.
+ * In which case, encoding and decoding buffers do not need to be
+ * synchronized, and encoding ring buffer can have any size,
+ * including larger than decoding buffer. W
+ * Whenever these conditions are not possible, save the last 64KB of decoded
+ * data into a safe buffer, and indicate where it is saved
+ * using LZ4_setStreamDecode()
+ *
+ * Return: number of bytes decompressed into destination buffer
+ * (necessarily <= maxDecompressedSize)
+ * or a negative result in case of error
+ */
+int LZ4_decompress_fast_continue(LZ4_streamDecode_t *LZ4_streamDecode,
+ const char *source, char *dest, int originalSize);
+
+/**
+ * LZ4_decompress_safe_usingDict() - Same as LZ4_setStreamDecode()
+ * followed by LZ4_decompress_safe_continue()
+ * @source: source address of the compressed data
+ * @dest: output buffer address of the uncompressed data
+ * which must be already allocated
+ * @compressedSize: is the precise full size of the compressed block
+ * @maxDecompressedSize: is the size of 'dest' buffer
+ * @dictStart: pointer to the start of the dictionary in memory
+ * @dictSize: size of dictionary
+ *
+ * These decoding function works the same as
+ * a combination of LZ4_setStreamDecode() followed by
+ * LZ4_decompress_safe_continue()
+ * It is stand-alone, and don'tn eed a LZ4_streamDecode_t structure.
+ *
+ * Return: number of bytes decompressed into destination buffer
+ * (necessarily <= maxDecompressedSize)
+ * or a negative result in case of error
+ */
+int LZ4_decompress_safe_usingDict(const char *source, char *dest,
+ int compressedSize, int maxDecompressedSize, const char *dictStart,
+ int dictSize);
+
+/**
+ * LZ4_decompress_fast_usingDict() - Same as LZ4_setStreamDecode()
+ * followed by LZ4_decompress_fast_continue()
+ * @source: source address of the compressed data
+ * @dest: output buffer address of the uncompressed data
+ * which must be already allocated with 'originalSize' bytes
+ * @originalSize: is the original and therefore uncompressed size
+ * @dictStart: pointer to the start of the dictionary in memory
+ * @dictSize: size of dictionary
+ *
+ * These decoding function works the same as
+ * a combination of LZ4_setStreamDecode() followed by
+ * LZ4_decompress_safe_continue()
+ * It is stand-alone, and don'tn eed a LZ4_streamDecode_t structure.
+ *
+ * Return: number of bytes decompressed into destination buffer
+ * (necessarily <= maxDecompressedSize)
+ * or a negative result in case of error
+ */
+int LZ4_decompress_fast_usingDict(const char *source, char *dest,
+ int originalSize, const char *dictStart, int dictSize);
+
#endif
diff --git a/include/linux/memblock.h b/include/linux/memblock.h
index 38bcf00cbed3..bdfc65af4152 100644
--- a/include/linux/memblock.h
+++ b/include/linux/memblock.h
@@ -42,6 +42,7 @@ struct memblock_type {
unsigned long max; /* size of the allocated array */
phys_addr_t total_size; /* size of all regions */
struct memblock_region *regions;
+ char *name;
};
struct memblock {
diff --git a/include/linux/memory.h b/include/linux/memory.h
index 093607f90b91..b723a686fc10 100644
--- a/include/linux/memory.h
+++ b/include/linux/memory.h
@@ -109,9 +109,6 @@ extern void unregister_memory_notifier(struct notifier_block *nb);
extern int register_memory_isolate_notifier(struct notifier_block *nb);
extern void unregister_memory_isolate_notifier(struct notifier_block *nb);
extern int register_new_memory(int, struct mem_section *);
-extern int memory_block_change_state(struct memory_block *mem,
- unsigned long to_state,
- unsigned long from_state_req);
#ifdef CONFIG_MEMORY_HOTREMOVE
extern int unregister_memory_section(struct mem_section *);
#endif
diff --git a/include/linux/migrate.h b/include/linux/migrate.h
index ae8d475a9385..fa76b516fa47 100644
--- a/include/linux/migrate.h
+++ b/include/linux/migrate.h
@@ -37,7 +37,7 @@ extern int migrate_page(struct address_space *,
struct page *, struct page *, enum migrate_mode);
extern int migrate_pages(struct list_head *l, new_page_t new, free_page_t free,
unsigned long private, enum migrate_mode mode, int reason);
-extern bool isolate_movable_page(struct page *page, isolate_mode_t mode);
+extern int isolate_movable_page(struct page *page, isolate_mode_t mode);
extern void putback_movable_page(struct page *page);
extern int migrate_prep(void);
@@ -56,6 +56,8 @@ static inline int migrate_pages(struct list_head *l, new_page_t new,
free_page_t free, unsigned long private, enum migrate_mode mode,
int reason)
{ return -ENOSYS; }
+static inline int isolate_movable_page(struct page *page, isolate_mode_t mode)
+ { return -EBUSY; }
static inline int migrate_prep(void) { return -ENOSYS; }
static inline int migrate_prep_local(void) { return -ENOSYS; }
diff --git a/include/linux/mm.h b/include/linux/mm.h
index 574bc157a27c..0d65dd72c0f4 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -314,6 +314,9 @@ struct vm_fault {
unsigned long address; /* Faulting virtual address */
pmd_t *pmd; /* Pointer to pmd entry matching
* the 'address' */
+ pud_t *pud; /* Pointer to pud entry matching
+ * the 'address'
+ */
pte_t orig_pte; /* Value of PTE at the time of fault */
struct page *cow_page; /* Page handler may use for COW fault */
@@ -341,6 +344,13 @@ struct vm_fault {
*/
};
+/* page entry size for vm->huge_fault() */
+enum page_entry_size {
+ PE_SIZE_PTE = 0,
+ PE_SIZE_PMD,
+ PE_SIZE_PUD,
+};
+
/*
* These are the virtual MM functions - opening of an area, closing and
* unmapping it (needed to keep files on disk up-to-date etc), pointer
@@ -350,17 +360,17 @@ struct vm_operations_struct {
void (*open)(struct vm_area_struct * area);
void (*close)(struct vm_area_struct * area);
int (*mremap)(struct vm_area_struct * area);
- int (*fault)(struct vm_area_struct *vma, struct vm_fault *vmf);
- int (*pmd_fault)(struct vm_fault *vmf);
+ int (*fault)(struct vm_fault *vmf);
+ int (*huge_fault)(struct vm_fault *vmf, enum page_entry_size pe_size);
void (*map_pages)(struct vm_fault *vmf,
pgoff_t start_pgoff, pgoff_t end_pgoff);
/* notification that a previously read-only page is about to become
* writable, if an error is returned it will cause a SIGBUS */
- int (*page_mkwrite)(struct vm_area_struct *vma, struct vm_fault *vmf);
+ int (*page_mkwrite)(struct vm_fault *vmf);
/* same as page_mkwrite when using VM_PFNMAP|VM_MIXEDMAP */
- int (*pfn_mkwrite)(struct vm_area_struct *vma, struct vm_fault *vmf);
+ int (*pfn_mkwrite)(struct vm_fault *vmf);
/* called by access_process_vm when get_user_pages() fails, typically
* for use by special VMAs that can switch between memory and hardware
@@ -416,6 +426,10 @@ static inline int pmd_devmap(pmd_t pmd)
{
return 0;
}
+static inline int pud_devmap(pud_t pud)
+{
+ return 0;
+}
#endif
/*
@@ -1154,16 +1168,6 @@ extern void pagefault_out_of_memory(void);
extern void show_free_areas(unsigned int flags, nodemask_t *nodemask);
-int shmem_zero_setup(struct vm_area_struct *);
-#ifdef CONFIG_SHMEM
-bool shmem_mapping(struct address_space *mapping);
-#else
-static inline bool shmem_mapping(struct address_space *mapping)
-{
- return false;
-}
-#endif
-
extern bool can_do_mlock(void);
extern int user_shm_lock(size_t, struct user_struct *);
extern void user_shm_unlock(size_t, struct user_struct *);
@@ -1191,6 +1195,10 @@ void unmap_vmas(struct mmu_gather *tlb, struct vm_area_struct *start_vma,
/**
* mm_walk - callbacks for walk_page_range
+ * @pud_entry: if set, called for each non-empty PUD (2nd-level) entry
+ * this handler should only handle pud_trans_huge() puds.
+ * the pmd_entry or pte_entry callbacks will be used for
+ * regular PUDs.
* @pmd_entry: if set, called for each non-empty PMD (3rd-level) entry
* this handler is required to be able to handle
* pmd_trans_huge() pmds. They may simply choose to
@@ -1210,6 +1218,8 @@ void unmap_vmas(struct mmu_gather *tlb, struct vm_area_struct *start_vma,
* (see the comment on walk_page_range() for more details)
*/
struct mm_walk {
+ int (*pud_entry)(pud_t *pud, unsigned long addr,
+ unsigned long next, struct mm_walk *walk);
int (*pmd_entry)(pmd_t *pmd, unsigned long addr,
unsigned long next, struct mm_walk *walk);
int (*pte_entry)(pte_t *pte, unsigned long addr,
@@ -1793,8 +1803,26 @@ static inline spinlock_t *pmd_lock(struct mm_struct *mm, pmd_t *pmd)
return ptl;
}
-extern void __init pagecache_init(void);
+/*
+ * No scalability reason to split PUD locks yet, but follow the same pattern
+ * as the PMD locks to make it easier if we decide to. The VM should not be
+ * considered ready to switch to split PUD locks yet; there may be places
+ * which need to be converted from page_table_lock.
+ */
+static inline spinlock_t *pud_lockptr(struct mm_struct *mm, pud_t *pud)
+{
+ return &mm->page_table_lock;
+}
+static inline spinlock_t *pud_lock(struct mm_struct *mm, pud_t *pud)
+{
+ spinlock_t *ptl = pud_lockptr(mm, pud);
+
+ spin_lock(ptl);
+ return ptl;
+}
+
+extern void __init pagecache_init(void);
extern void free_area_init(unsigned long * zones_size);
extern void free_area_init_node(int nid, unsigned long * zones_size,
unsigned long zone_start_pfn, unsigned long *zholes_size);
@@ -2003,8 +2031,10 @@ extern struct vm_area_struct *vma_merge(struct mm_struct *,
unsigned long vm_flags, struct anon_vma *, struct file *, pgoff_t,
struct mempolicy *, struct vm_userfaultfd_ctx);
extern struct anon_vma *find_mergeable_anon_vma(struct vm_area_struct *);
-extern int split_vma(struct mm_struct *,
- struct vm_area_struct *, unsigned long addr, int new_below);
+extern int __split_vma(struct mm_struct *, struct vm_area_struct *,
+ unsigned long addr, int new_below);
+extern int split_vma(struct mm_struct *, struct vm_area_struct *,
+ unsigned long addr, int new_below);
extern int insert_vm_struct(struct mm_struct *, struct vm_area_struct *);
extern void __vma_link_rb(struct mm_struct *, struct vm_area_struct *,
struct rb_node **, struct rb_node *);
@@ -2052,18 +2082,22 @@ extern int install_special_mapping(struct mm_struct *mm,
extern unsigned long get_unmapped_area(struct file *, unsigned long, unsigned long, unsigned long, unsigned long);
extern unsigned long mmap_region(struct file *file, unsigned long addr,
- unsigned long len, vm_flags_t vm_flags, unsigned long pgoff);
+ unsigned long len, vm_flags_t vm_flags, unsigned long pgoff,
+ struct list_head *uf);
extern unsigned long do_mmap(struct file *file, unsigned long addr,
unsigned long len, unsigned long prot, unsigned long flags,
- vm_flags_t vm_flags, unsigned long pgoff, unsigned long *populate);
-extern int do_munmap(struct mm_struct *, unsigned long, size_t);
+ vm_flags_t vm_flags, unsigned long pgoff, unsigned long *populate,
+ struct list_head *uf);
+extern int do_munmap(struct mm_struct *, unsigned long, size_t,
+ struct list_head *uf);
static inline unsigned long
do_mmap_pgoff(struct file *file, unsigned long addr,
unsigned long len, unsigned long prot, unsigned long flags,
- unsigned long pgoff, unsigned long *populate)
+ unsigned long pgoff, unsigned long *populate,
+ struct list_head *uf)
{
- return do_mmap(file, addr, len, prot, flags, 0, pgoff, populate);
+ return do_mmap(file, addr, len, prot, flags, 0, pgoff, populate, uf);
}
#ifdef CONFIG_MMU
@@ -2124,10 +2158,10 @@ extern void truncate_inode_pages_range(struct address_space *,
extern void truncate_inode_pages_final(struct address_space *);
/* generic vm_area_ops exported for stackable file systems */
-extern int filemap_fault(struct vm_area_struct *, struct vm_fault *);
+extern int filemap_fault(struct vm_fault *vmf);
extern void filemap_map_pages(struct vm_fault *vmf,
pgoff_t start_pgoff, pgoff_t end_pgoff);
-extern int filemap_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf);
+extern int filemap_page_mkwrite(struct vm_fault *vmf);
/* mm/page-writeback.c */
int write_one_page(struct page *page, int wait);
diff --git a/include/linux/mm_inline.h b/include/linux/mm_inline.h
index 41d376e7116d..e030a68ead7e 100644
--- a/include/linux/mm_inline.h
+++ b/include/linux/mm_inline.h
@@ -50,6 +50,13 @@ static __always_inline void add_page_to_lru_list(struct page *page,
list_add(&page->lru, &lruvec->lists[lru]);
}
+static __always_inline void add_page_to_lru_list_tail(struct page *page,
+ struct lruvec *lruvec, enum lru_list lru)
+{
+ update_lru_size(lruvec, lru, page_zonenum(page), hpage_nr_pages(page));
+ list_add_tail(&page->lru, &lruvec->lists[lru]);
+}
+
static __always_inline void del_page_from_lru_list(struct page *page,
struct lruvec *lruvec, enum lru_list lru)
{
diff --git a/include/linux/mmu_notifier.h b/include/linux/mmu_notifier.h
index a1a210d59961..51891fb0d3ce 100644
--- a/include/linux/mmu_notifier.h
+++ b/include/linux/mmu_notifier.h
@@ -381,6 +381,19 @@ static inline void mmu_notifier_mm_destroy(struct mm_struct *mm)
___pmd; \
})
+#define pudp_huge_clear_flush_notify(__vma, __haddr, __pud) \
+({ \
+ unsigned long ___haddr = __haddr & HPAGE_PUD_MASK; \
+ struct mm_struct *___mm = (__vma)->vm_mm; \
+ pud_t ___pud; \
+ \
+ ___pud = pudp_huge_clear_flush(__vma, __haddr, __pud); \
+ mmu_notifier_invalidate_range(___mm, ___haddr, \
+ ___haddr + HPAGE_PUD_SIZE); \
+ \
+ ___pud; \
+})
+
#define pmdp_huge_get_and_clear_notify(__mm, __haddr, __pmd) \
({ \
unsigned long ___haddr = __haddr & HPAGE_PMD_MASK; \
@@ -475,6 +488,7 @@ static inline void mmu_notifier_mm_destroy(struct mm_struct *mm)
#define pmdp_clear_young_notify pmdp_test_and_clear_young
#define ptep_clear_flush_notify ptep_clear_flush
#define pmdp_huge_clear_flush_notify pmdp_huge_clear_flush
+#define pudp_huge_clear_flush_notify pudp_huge_clear_flush
#define pmdp_huge_get_and_clear_notify pmdp_huge_get_and_clear
#define set_pte_at_notify set_pte_at
diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
index 82fc632fd11d..8e02b3750fe0 100644
--- a/include/linux/mmzone.h
+++ b/include/linux/mmzone.h
@@ -236,8 +236,6 @@ struct lruvec {
#define LRU_ALL_ANON (BIT(LRU_INACTIVE_ANON) | BIT(LRU_ACTIVE_ANON))
#define LRU_ALL ((1 << NR_LRU_LISTS) - 1)
-/* Isolate clean file */
-#define ISOLATE_CLEAN ((__force isolate_mode_t)0x1)
/* Isolate unmapped file */
#define ISOLATE_UNMAPPED ((__force isolate_mode_t)0x2)
/* Isolate for asynchronous migration */
diff --git a/include/linux/pfn_t.h b/include/linux/pfn_t.h
index 033fc7bbcefa..a49b3259cad7 100644
--- a/include/linux/pfn_t.h
+++ b/include/linux/pfn_t.h
@@ -90,6 +90,13 @@ static inline pmd_t pfn_t_pmd(pfn_t pfn, pgprot_t pgprot)
{
return pfn_pmd(pfn_t_to_pfn(pfn), pgprot);
}
+
+#ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD
+static inline pud_t pfn_t_pud(pfn_t pfn, pgprot_t pgprot)
+{
+ return pfn_pud(pfn_t_to_pfn(pfn), pgprot);
+}
+#endif
#endif
#ifdef __HAVE_ARCH_PTE_DEVMAP
@@ -106,5 +113,10 @@ static inline bool pfn_t_devmap(pfn_t pfn)
}
pte_t pte_mkdevmap(pte_t pte);
pmd_t pmd_mkdevmap(pmd_t pmd);
+#if defined(CONFIG_TRANSPARENT_HUGEPAGE) && \
+ defined(CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD)
+pud_t pud_mkdevmap(pud_t pud);
#endif
+#endif /* __HAVE_ARCH_PTE_DEVMAP */
+
#endif /* _LINUX_PFN_T_H_ */
diff --git a/include/linux/pid_namespace.h b/include/linux/pid_namespace.h
index 34cce96741bc..c2a989dee876 100644
--- a/include/linux/pid_namespace.h
+++ b/include/linux/pid_namespace.h
@@ -21,6 +21,12 @@ struct pidmap {
struct fs_pin;
+enum { /* definitions for pid_namespace's hide_pid field */
+ HIDEPID_OFF = 0,
+ HIDEPID_NO_ACCESS = 1,
+ HIDEPID_INVISIBLE = 2,
+};
+
struct pid_namespace {
struct kref kref;
struct pidmap pidmap[PIDMAP_ENTRIES];
diff --git a/include/linux/rbtree_augmented.h b/include/linux/rbtree_augmented.h
index d076183e49be..9702b6e183bc 100644
--- a/include/linux/rbtree_augmented.h
+++ b/include/linux/rbtree_augmented.h
@@ -90,7 +90,9 @@ rbname ## _rotate(struct rb_node *rb_old, struct rb_node *rb_new) \
old->rbaugmented = rbcompute(old); \
} \
rbstatic const struct rb_augment_callbacks rbname = { \
- rbname ## _propagate, rbname ## _copy, rbname ## _rotate \
+ .propagate = rbname ## _propagate, \
+ .copy = rbname ## _copy, \
+ .rotate = rbname ## _rotate \
};
diff --git a/include/linux/rmap.h b/include/linux/rmap.h
index 15321fb1df6b..8c89e902df3e 100644
--- a/include/linux/rmap.h
+++ b/include/linux/rmap.h
@@ -9,6 +9,7 @@
#include <linux/mm.h>
#include <linux/rwsem.h>
#include <linux/memcontrol.h>
+#include <linux/highmem.h>
/*
* The anon_vma heads a list of private "related" vmas, to scan if
@@ -196,41 +197,30 @@ int page_referenced(struct page *, int is_locked,
int try_to_unmap(struct page *, enum ttu_flags flags);
-/*
- * Used by uprobes to replace a userspace page safely
- */
-pte_t *__page_check_address(struct page *, struct mm_struct *,
- unsigned long, spinlock_t **, int);
-
-static inline pte_t *page_check_address(struct page *page, struct mm_struct *mm,
- unsigned long address,
- spinlock_t **ptlp, int sync)
-{
- pte_t *ptep;
+/* Avoid racy checks */
+#define PVMW_SYNC (1 << 0)
+/* Look for migarion entries rather than present PTEs */
+#define PVMW_MIGRATION (1 << 1)
- __cond_lock(*ptlp, ptep = __page_check_address(page, mm, address,
- ptlp, sync));
- return ptep;
-}
+struct page_vma_mapped_walk {
+ struct page *page;
+ struct vm_area_struct *vma;
+ unsigned long address;
+ pmd_t *pmd;
+ pte_t *pte;
+ spinlock_t *ptl;
+ unsigned int flags;
+};
-/*
- * Used by idle page tracking to check if a page was referenced via page
- * tables.
- */
-#ifdef CONFIG_TRANSPARENT_HUGEPAGE
-bool page_check_address_transhuge(struct page *page, struct mm_struct *mm,
- unsigned long address, pmd_t **pmdp,
- pte_t **ptep, spinlock_t **ptlp);
-#else
-static inline bool page_check_address_transhuge(struct page *page,
- struct mm_struct *mm, unsigned long address,
- pmd_t **pmdp, pte_t **ptep, spinlock_t **ptlp)
+static inline void page_vma_mapped_walk_done(struct page_vma_mapped_walk *pvmw)
{
- *ptep = page_check_address(page, mm, address, ptlp, 0);
- *pmdp = NULL;
- return !!*ptep;
+ if (pvmw->pte)
+ pte_unmap(pvmw->pte);
+ if (pvmw->ptl)
+ spin_unlock(pvmw->ptl);
}
-#endif
+
+bool page_vma_mapped_walk(struct page_vma_mapped_walk *pvmw);
/*
* Used by swapoff to help locate where page is expected in vma.
diff --git a/include/linux/shmem_fs.h b/include/linux/shmem_fs.h
index fdaac9d4d46d..a7d6bd2a918f 100644
--- a/include/linux/shmem_fs.h
+++ b/include/linux/shmem_fs.h
@@ -57,7 +57,14 @@ extern int shmem_zero_setup(struct vm_area_struct *);
extern unsigned long shmem_get_unmapped_area(struct file *, unsigned long addr,
unsigned long len, unsigned long pgoff, unsigned long flags);
extern int shmem_lock(struct file *file, int lock, struct user_struct *user);
+#ifdef CONFIG_SHMEM
extern bool shmem_mapping(struct address_space *mapping);
+#else
+static inline bool shmem_mapping(struct address_space *mapping)
+{
+ return false;
+}
+#endif /* CONFIG_SHMEM */
extern void shmem_unlock_mapping(struct address_space *mapping);
extern struct page *shmem_read_mapping_page_gfp(struct address_space *mapping,
pgoff_t index, gfp_t gfp_mask);
diff --git a/include/linux/userfaultfd_k.h b/include/linux/userfaultfd_k.h
index f431861f22f1..0468548acebf 100644
--- a/include/linux/userfaultfd_k.h
+++ b/include/linux/userfaultfd_k.h
@@ -61,10 +61,18 @@ extern void mremap_userfaultfd_complete(struct vm_userfaultfd_ctx *,
unsigned long from, unsigned long to,
unsigned long len);
-extern void madvise_userfault_dontneed(struct vm_area_struct *vma,
- struct vm_area_struct **prev,
- unsigned long start,
- unsigned long end);
+extern void userfaultfd_remove(struct vm_area_struct *vma,
+ struct vm_area_struct **prev,
+ unsigned long start,
+ unsigned long end);
+
+extern int userfaultfd_unmap_prep(struct vm_area_struct *vma,
+ unsigned long start, unsigned long end,
+ struct list_head *uf);
+extern void userfaultfd_unmap_complete(struct mm_struct *mm,
+ struct list_head *uf);
+
+extern void userfaultfd_exit(struct mm_struct *mm);
#else /* CONFIG_USERFAULTFD */
@@ -112,12 +120,29 @@ static inline void mremap_userfaultfd_complete(struct vm_userfaultfd_ctx *ctx,
{
}
-static inline void madvise_userfault_dontneed(struct vm_area_struct *vma,
- struct vm_area_struct **prev,
- unsigned long start,
- unsigned long end)
+static inline void userfaultfd_remove(struct vm_area_struct *vma,
+ struct vm_area_struct **prev,
+ unsigned long start,
+ unsigned long end)
{
}
+
+static inline int userfaultfd_unmap_prep(struct vm_area_struct *vma,
+ unsigned long start, unsigned long end,
+ struct list_head *uf)
+{
+ return 0;
+}
+
+static inline void userfaultfd_unmap_complete(struct mm_struct *mm,
+ struct list_head *uf)
+{
+}
+
+static inline void userfaultfd_exit(struct mm_struct *mm)
+{
+}
+
#endif /* CONFIG_USERFAULTFD */
#endif /* _LINUX_USERFAULTFD_K_H */
diff --git a/include/linux/writeback.h b/include/linux/writeback.h
index 5527d910ba3d..a3c0cbd7c888 100644
--- a/include/linux/writeback.h
+++ b/include/linux/writeback.h
@@ -46,7 +46,7 @@ enum writeback_sync_modes {
*/
enum wb_reason {
WB_REASON_BACKGROUND,
- WB_REASON_TRY_TO_FREE_PAGES,
+ WB_REASON_VMSCAN,
WB_REASON_SYNC,
WB_REASON_PERIODIC,
WB_REASON_LAPTOP_TIMER,
diff --git a/include/trace/events/writeback.h b/include/trace/events/writeback.h
index 2ccd9ccbf9ef..7bd8783a590f 100644
--- a/include/trace/events/writeback.h
+++ b/include/trace/events/writeback.h
@@ -31,7 +31,7 @@
#define WB_WORK_REASON \
EM( WB_REASON_BACKGROUND, "background") \
- EM( WB_REASON_TRY_TO_FREE_PAGES, "try_to_free_pages") \
+ EM( WB_REASON_VMSCAN, "vmscan") \
EM( WB_REASON_SYNC, "sync") \
EM( WB_REASON_PERIODIC, "periodic") \
EM( WB_REASON_LAPTOP_TIMER, "laptop_timer") \
diff --git a/include/uapi/linux/mqueue.h b/include/uapi/linux/mqueue.h
index d0a2b8e89813..bbd5116ea739 100644
--- a/include/uapi/linux/mqueue.h
+++ b/include/uapi/linux/mqueue.h
@@ -18,6 +18,8 @@
#ifndef _LINUX_MQUEUE_H
#define _LINUX_MQUEUE_H
+#include <linux/types.h>
+
#define MQ_PRIO_MAX 32768
/* per-uid limit of kernel memory used by mqueue, in bytes */
#define MQ_BYTES_MAX 819200
diff --git a/include/uapi/linux/userfaultfd.h b/include/uapi/linux/userfaultfd.h
index 9ac4b68c54d1..c055947c5c98 100644
--- a/include/uapi/linux/userfaultfd.h
+++ b/include/uapi/linux/userfaultfd.h
@@ -18,9 +18,11 @@
* means the userland is reading).
*/
#define UFFD_API ((__u64)0xAA)
-#define UFFD_API_FEATURES (UFFD_FEATURE_EVENT_FORK | \
+#define UFFD_API_FEATURES (UFFD_FEATURE_EVENT_EXIT | \
+ UFFD_FEATURE_EVENT_FORK | \
UFFD_FEATURE_EVENT_REMAP | \
- UFFD_FEATURE_EVENT_MADVDONTNEED | \
+ UFFD_FEATURE_EVENT_REMOVE | \
+ UFFD_FEATURE_EVENT_UNMAP | \
UFFD_FEATURE_MISSING_HUGETLBFS | \
UFFD_FEATURE_MISSING_SHMEM)
#define UFFD_API_IOCTLS \
@@ -92,7 +94,7 @@ struct uffd_msg {
struct {
__u64 start;
__u64 end;
- } madv_dn;
+ } remove;
struct {
/* unused reserved fields */
@@ -109,7 +111,9 @@ struct uffd_msg {
#define UFFD_EVENT_PAGEFAULT 0x12
#define UFFD_EVENT_FORK 0x13
#define UFFD_EVENT_REMAP 0x14
-#define UFFD_EVENT_MADVDONTNEED 0x15
+#define UFFD_EVENT_REMOVE 0x15
+#define UFFD_EVENT_UNMAP 0x16
+#define UFFD_EVENT_EXIT 0x17
/* flags for UFFD_EVENT_PAGEFAULT */
#define UFFD_PAGEFAULT_FLAG_WRITE (1<<0) /* If this was a write fault */
@@ -155,9 +159,11 @@ struct uffdio_api {
#define UFFD_FEATURE_PAGEFAULT_FLAG_WP (1<<0)
#define UFFD_FEATURE_EVENT_FORK (1<<1)
#define UFFD_FEATURE_EVENT_REMAP (1<<2)
-#define UFFD_FEATURE_EVENT_MADVDONTNEED (1<<3)
+#define UFFD_FEATURE_EVENT_REMOVE (1<<3)
#define UFFD_FEATURE_MISSING_HUGETLBFS (1<<4)
#define UFFD_FEATURE_MISSING_SHMEM (1<<5)
+#define UFFD_FEATURE_EVENT_UNMAP (1<<6)
+#define UFFD_FEATURE_EVENT_EXIT (1<<7)
__u64 features;
__u64 ioctls;
diff --git a/ipc/shm.c b/ipc/shm.c
index 81203e8ba013..d7805acb44fd 100644
--- a/ipc/shm.c
+++ b/ipc/shm.c
@@ -374,12 +374,12 @@ void exit_shm(struct task_struct *task)
up_write(&shm_ids(ns).rwsem);
}
-static int shm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
+static int shm_fault(struct vm_fault *vmf)
{
- struct file *file = vma->vm_file;
+ struct file *file = vmf->vma->vm_file;
struct shm_file_data *sfd = shm_file_data(file);
- return sfd->vm_ops->fault(vma, vmf);
+ return sfd->vm_ops->fault(vmf);
}
#ifdef CONFIG_NUMA
@@ -1222,7 +1222,7 @@ long do_shmat(int shmid, char __user *shmaddr, int shmflg, ulong *raddr,
goto invalid;
}
- addr = do_mmap_pgoff(file, addr, size, prot, flags, 0, &populate);
+ addr = do_mmap_pgoff(file, addr, size, prot, flags, 0, &populate, NULL);
*raddr = addr;
err = 0;
if (IS_ERR_VALUE(addr))
@@ -1329,7 +1329,7 @@ SYSCALL_DEFINE1(shmdt, char __user *, shmaddr)
*/
file = vma->vm_file;
size = i_size_read(file_inode(vma->vm_file));
- do_munmap(mm, vma->vm_start, vma->vm_end - vma->vm_start);
+ do_munmap(mm, vma->vm_start, vma->vm_end - vma->vm_start, NULL);
/*
* We discovered the size of the shm segment, so
* break out of here and fall through to the next
@@ -1356,7 +1356,7 @@ SYSCALL_DEFINE1(shmdt, char __user *, shmaddr)
if ((vma->vm_ops == &shm_vm_ops) &&
((vma->vm_start - addr)/PAGE_SIZE == vma->vm_pgoff) &&
(vma->vm_file == file))
- do_munmap(mm, vma->vm_start, vma->vm_end - vma->vm_start);
+ do_munmap(mm, vma->vm_start, vma->vm_end - vma->vm_start, NULL);
vma = next;
}
@@ -1365,7 +1365,7 @@ SYSCALL_DEFINE1(shmdt, char __user *, shmaddr)
* given
*/
if (vma && vma->vm_start == addr && vma->vm_ops == &shm_vm_ops) {
- do_munmap(mm, vma->vm_start, vma->vm_end - vma->vm_start);
+ do_munmap(mm, vma->vm_start, vma->vm_end - vma->vm_start, NULL);
retval = 0;
}
diff --git a/kernel/events/core.c b/kernel/events/core.c
index 77a932b54a64..b2eb3542e829 100644
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -4925,9 +4925,9 @@ unlock:
rcu_read_unlock();
}
-static int perf_mmap_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
+static int perf_mmap_fault(struct vm_fault *vmf)
{
- struct perf_event *event = vma->vm_file->private_data;
+ struct perf_event *event = vmf->vma->vm_file->private_data;
struct ring_buffer *rb;
int ret = VM_FAULT_SIGBUS;
@@ -4950,7 +4950,7 @@ static int perf_mmap_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
goto unlock;
get_page(vmf->page);
- vmf->page->mapping = vma->vm_file->f_mapping;
+ vmf->page->mapping = vmf->vma->vm_file->f_mapping;
vmf->page->index = vmf->pgoff;
ret = 0;
diff --git a/kernel/events/uprobes.c b/kernel/events/uprobes.c
index d416f3baf392..18c6b23edd3c 100644
--- a/kernel/events/uprobes.c
+++ b/kernel/events/uprobes.c
@@ -153,14 +153,19 @@ static int __replace_page(struct vm_area_struct *vma, unsigned long addr,
struct page *old_page, struct page *new_page)
{
struct mm_struct *mm = vma->vm_mm;
- spinlock_t *ptl;
- pte_t *ptep;
+ struct page_vma_mapped_walk pvmw = {
+ .page = old_page,
+ .vma = vma,
+ .address = addr,
+ };
int err;
/* For mmu_notifiers */
const unsigned long mmun_start = addr;
const unsigned long mmun_end = addr + PAGE_SIZE;
struct mem_cgroup *memcg;
+ VM_BUG_ON_PAGE(PageTransHuge(old_page), old_page);
+
err = mem_cgroup_try_charge(new_page, vma->vm_mm, GFP_KERNEL, &memcg,
false);
if (err)
@@ -171,11 +176,11 @@ static int __replace_page(struct vm_area_struct *vma, unsigned long addr,
mmu_notifier_invalidate_range_start(mm, mmun_start, mmun_end);
err = -EAGAIN;
- ptep = page_check_address(old_page, mm, addr, &ptl, 0);
- if (!ptep) {
+ if (!page_vma_mapped_walk(&pvmw)) {
mem_cgroup_cancel_charge(new_page, memcg, false);
goto unlock;
}
+ VM_BUG_ON_PAGE(addr != pvmw.address, old_page);
get_page(new_page);
page_add_new_anon_rmap(new_page, vma, addr, false);
@@ -187,14 +192,15 @@ static int __replace_page(struct vm_area_struct *vma, unsigned long addr,
inc_mm_counter(mm, MM_ANONPAGES);
}
- flush_cache_page(vma, addr, pte_pfn(*ptep));
- ptep_clear_flush_notify(vma, addr, ptep);
- set_pte_at_notify(mm, addr, ptep, mk_pte(new_page, vma->vm_page_prot));
+ flush_cache_page(vma, addr, pte_pfn(*pvmw.pte));
+ ptep_clear_flush_notify(vma, addr, pvmw.pte);
+ set_pte_at_notify(mm, addr, pvmw.pte,
+ mk_pte(new_page, vma->vm_page_prot));
page_remove_rmap(old_page, false);
if (!page_mapped(old_page))
try_to_free_swap(old_page);
- pte_unmap_unlock(ptep, ptl);
+ page_vma_mapped_walk_done(&pvmw);
if (vma->vm_flags & VM_LOCKED)
munlock_vma_page(old_page);
@@ -300,8 +306,8 @@ int uprobe_write_opcode(struct mm_struct *mm, unsigned long vaddr,
retry:
/* Read the page with vaddr into memory */
- ret = get_user_pages_remote(NULL, mm, vaddr, 1, FOLL_FORCE, &old_page,
- &vma, NULL);
+ ret = get_user_pages_remote(NULL, mm, vaddr, 1,
+ FOLL_FORCE | FOLL_SPLIT, &old_page, &vma, NULL);
if (ret <= 0)
return ret;
diff --git a/kernel/exit.c b/kernel/exit.c
index 9960accbf2ab..90b09ca35c84 100644
--- a/kernel/exit.c
+++ b/kernel/exit.c
@@ -45,6 +45,7 @@
#include <linux/task_io_accounting_ops.h>
#include <linux/tracehook.h>
#include <linux/fs_struct.h>
+#include <linux/userfaultfd_k.h>
#include <linux/init_task.h>
#include <linux/perf_event.h>
#include <trace/events/sched.h>
@@ -547,6 +548,7 @@ static void exit_mm(void)
enter_lazy_tlb(mm, current);
task_unlock(current);
mm_update_next_owner(mm);
+ userfaultfd_exit(mm);
mmput(mm);
if (test_thread_flag(TIF_MEMDIE))
exit_oom_victim();
diff --git a/kernel/ksysfs.c b/kernel/ksysfs.c
index ee1bc1bb8feb..0999679d6f26 100644
--- a/kernel/ksysfs.c
+++ b/kernel/ksysfs.c
@@ -195,7 +195,7 @@ static ssize_t notes_read(struct file *filp, struct kobject *kobj,
return count;
}
-static struct bin_attribute notes_attr = {
+static struct bin_attribute notes_attr __ro_after_init = {
.attr = {
.name = "notes",
.mode = S_IRUGO,
diff --git a/kernel/memremap.c b/kernel/memremap.c
index 9ecedc28b928..06123234f118 100644
--- a/kernel/memremap.c
+++ b/kernel/memremap.c
@@ -246,9 +246,13 @@ static void devm_memremap_pages_release(struct device *dev, void *data)
/* pages are dead and unused, undo the arch mapping */
align_start = res->start & ~(SECTION_SIZE - 1);
align_size = ALIGN(resource_size(res), SECTION_SIZE);
+
+ lock_device_hotplug();
mem_hotplug_begin();
arch_remove_memory(align_start, align_size);
mem_hotplug_done();
+ unlock_device_hotplug();
+
untrack_pfn(NULL, PHYS_PFN(align_start), align_size);
pgmap_radix_release(res);
dev_WARN_ONCE(dev, pgmap->altmap && pgmap->altmap->alloc,
@@ -360,9 +364,11 @@ void *devm_memremap_pages(struct device *dev, struct resource *res,
if (error)
goto err_pfn_remap;
+ lock_device_hotplug();
mem_hotplug_begin();
error = arch_add_memory(nid, align_start, align_size, true);
mem_hotplug_done();
+ unlock_device_hotplug();
if (error)
goto err_add_memory;
diff --git a/kernel/notifier.c b/kernel/notifier.c
index fd2c9acbcc19..6196af8a8223 100644
--- a/kernel/notifier.c
+++ b/kernel/notifier.c
@@ -95,7 +95,7 @@ static int notifier_call_chain(struct notifier_block **nl,
if (nr_calls)
(*nr_calls)++;
- if ((ret & NOTIFY_STOP_MASK) == NOTIFY_STOP_MASK)
+ if (ret & NOTIFY_STOP_MASK)
break;
nb = next_nb;
nr_to_call--;
diff --git a/kernel/relay.c b/kernel/relay.c
index 8f18d314a96a..8f8dc91db680 100644
--- a/kernel/relay.c
+++ b/kernel/relay.c
@@ -39,10 +39,10 @@ static void relay_file_mmap_close(struct vm_area_struct *vma)
/*
* fault() vm_op implementation for relay file mapping.
*/
-static int relay_buf_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
+static int relay_buf_fault(struct vm_fault *vmf)
{
struct page *page;
- struct rchan_buf *buf = vma->vm_private_data;
+ struct rchan_buf *buf = vmf->vma->vm_private_data;
pgoff_t pgoff = vmf->pgoff;
if (!buf)
diff --git a/lib/Kconfig b/lib/Kconfig
index 87ecd41031bd..fe7e8e175db8 100644
--- a/lib/Kconfig
+++ b/lib/Kconfig
@@ -103,8 +103,7 @@ config CRC32
functions require M here.
config CRC32_SELFTEST
- bool "CRC32 perform self test on init"
- default n
+ tristate "CRC32 perform self test on init"
depends on CRC32
help
This option enables the CRC32 library functions to perform a
@@ -432,8 +431,7 @@ config GLOB
depends on this.
config GLOB_SELFTEST
- bool "glob self-test on init"
- default n
+ tristate "glob self-test on init"
depends on GLOB
help
This option enables a simple self-test of the glob_match
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
index 66fb4389f05c..55735c9bdb75 100644
--- a/lib/Kconfig.debug
+++ b/lib/Kconfig.debug
@@ -1739,6 +1739,14 @@ config TEST_LIST_SORT
If unsure, say N.
+config TEST_SORT
+ bool "Array-based sort test"
+ depends on DEBUG_KERNEL
+ help
+ This option enables the self-test function of 'sort()' at boot.
+
+ If unsure, say N.
+
config KPROBES_SANITY_TEST
bool "Kprobes sanity tests"
depends on DEBUG_KERNEL
@@ -1790,9 +1798,10 @@ config PERCPU_TEST
If unsure, say N.
config ATOMIC64_SELFTEST
- bool "Perform an atomic64_t self-test at boot"
+ tristate "Perform an atomic64_t self-test"
help
- Enable this option to test the atomic64_t functions at boot.
+ Enable this option to test the atomic64_t functions at boot or
+ at module load time.
If unsure, say N.
diff --git a/lib/Makefile b/lib/Makefile
index f1a0364af377..445a39c21f46 100644
--- a/lib/Makefile
+++ b/lib/Makefile
@@ -50,6 +50,7 @@ obj-$(CONFIG_TEST_KASAN) += test_kasan.o
obj-$(CONFIG_TEST_KSTRTOX) += test-kstrtox.o
obj-$(CONFIG_TEST_LKM) += test_module.o
obj-$(CONFIG_TEST_RHASHTABLE) += test_rhashtable.o
+obj-$(CONFIG_TEST_SORT) += test_sort.o
obj-$(CONFIG_TEST_USER_COPY) += test_user_copy.o
obj-$(CONFIG_TEST_STATIC_KEYS) += test_static_keys.o
obj-$(CONFIG_TEST_STATIC_KEYS) += test_static_key_base.o
@@ -92,6 +93,7 @@ obj-$(CONFIG_CRC16) += crc16.o
obj-$(CONFIG_CRC_T10DIF)+= crc-t10dif.o
obj-$(CONFIG_CRC_ITU_T) += crc-itu-t.o
obj-$(CONFIG_CRC32) += crc32.o
+obj-$(CONFIG_CRC32_SELFTEST) += crc32test.o
obj-$(CONFIG_CRC7) += crc7.o
obj-$(CONFIG_LIBCRC32C) += libcrc32c.o
obj-$(CONFIG_CRC8) += crc8.o
@@ -161,6 +163,7 @@ obj-$(CONFIG_CORDIC) += cordic.o
obj-$(CONFIG_DQL) += dynamic_queue_limits.o
obj-$(CONFIG_GLOB) += glob.o
+obj-$(CONFIG_GLOB_SELFTEST) += globtest.o
obj-$(CONFIG_MPILIB) += mpi/
obj-$(CONFIG_SIGNATURE) += digsig.o
diff --git a/lib/atomic64_test.c b/lib/atomic64_test.c
index 46042901130f..fd70c0e0e673 100644
--- a/lib/atomic64_test.c
+++ b/lib/atomic64_test.c
@@ -15,6 +15,7 @@
#include <linux/bug.h>
#include <linux/kernel.h>
#include <linux/atomic.h>
+#include <linux/module.h>
#ifdef CONFIG_X86
#include <asm/cpufeature.h> /* for boot_cpu_has below */
@@ -241,7 +242,7 @@ static __init void test_atomic64(void)
BUG_ON(v.counter != r);
}
-static __init int test_atomics(void)
+static __init int test_atomics_init(void)
{
test_atomic();
test_atomic64();
@@ -264,4 +265,9 @@ static __init int test_atomics(void)
return 0;
}
-core_initcall(test_atomics);
+static __exit void test_atomics_exit(void) {}
+
+module_init(test_atomics_init);
+module_exit(test_atomics_exit);
+
+MODULE_LICENSE("GPL");
diff --git a/lib/crc32.c b/lib/crc32.c
index 7fbd1a112b9d..6ddc92bc1460 100644
--- a/lib/crc32.c
+++ b/lib/crc32.c
@@ -340,827 +340,3 @@ u32 __pure crc32_be(u32 crc, unsigned char const *p, size_t len)
}
#endif
EXPORT_SYMBOL(crc32_be);
-
-#ifdef CONFIG_CRC32_SELFTEST
-
-/* 4096 random bytes */
-static u8 const __aligned(8) test_buf[] __initconst =
-{
- 0x5b, 0x85, 0x21, 0xcb, 0x09, 0x68, 0x7d, 0x30,
- 0xc7, 0x69, 0xd7, 0x30, 0x92, 0xde, 0x59, 0xe4,
- 0xc9, 0x6e, 0x8b, 0xdb, 0x98, 0x6b, 0xaa, 0x60,
- 0xa8, 0xb5, 0xbc, 0x6c, 0xa9, 0xb1, 0x5b, 0x2c,
- 0xea, 0xb4, 0x92, 0x6a, 0x3f, 0x79, 0x91, 0xe4,
- 0xe9, 0x70, 0x51, 0x8c, 0x7f, 0x95, 0x6f, 0x1a,
- 0x56, 0xa1, 0x5c, 0x27, 0x03, 0x67, 0x9f, 0x3a,
- 0xe2, 0x31, 0x11, 0x29, 0x6b, 0x98, 0xfc, 0xc4,
- 0x53, 0x24, 0xc5, 0x8b, 0xce, 0x47, 0xb2, 0xb9,
- 0x32, 0xcb, 0xc1, 0xd0, 0x03, 0x57, 0x4e, 0xd4,
- 0xe9, 0x3c, 0xa1, 0x63, 0xcf, 0x12, 0x0e, 0xca,
- 0xe1, 0x13, 0xd1, 0x93, 0xa6, 0x88, 0x5c, 0x61,
- 0x5b, 0xbb, 0xf0, 0x19, 0x46, 0xb4, 0xcf, 0x9e,
- 0xb6, 0x6b, 0x4c, 0x3a, 0xcf, 0x60, 0xf9, 0x7a,
- 0x8d, 0x07, 0x63, 0xdb, 0x40, 0xe9, 0x0b, 0x6f,
- 0xad, 0x97, 0xf1, 0xed, 0xd0, 0x1e, 0x26, 0xfd,
- 0xbf, 0xb7, 0xc8, 0x04, 0x94, 0xf8, 0x8b, 0x8c,
- 0xf1, 0xab, 0x7a, 0xd4, 0xdd, 0xf3, 0xe8, 0x88,
- 0xc3, 0xed, 0x17, 0x8a, 0x9b, 0x40, 0x0d, 0x53,
- 0x62, 0x12, 0x03, 0x5f, 0x1b, 0x35, 0x32, 0x1f,
- 0xb4, 0x7b, 0x93, 0x78, 0x0d, 0xdb, 0xce, 0xa4,
- 0xc0, 0x47, 0xd5, 0xbf, 0x68, 0xe8, 0x5d, 0x74,
- 0x8f, 0x8e, 0x75, 0x1c, 0xb2, 0x4f, 0x9a, 0x60,
- 0xd1, 0xbe, 0x10, 0xf4, 0x5c, 0xa1, 0x53, 0x09,
- 0xa5, 0xe0, 0x09, 0x54, 0x85, 0x5c, 0xdc, 0x07,
- 0xe7, 0x21, 0x69, 0x7b, 0x8a, 0xfd, 0x90, 0xf1,
- 0x22, 0xd0, 0xb4, 0x36, 0x28, 0xe6, 0xb8, 0x0f,
- 0x39, 0xde, 0xc8, 0xf3, 0x86, 0x60, 0x34, 0xd2,
- 0x5e, 0xdf, 0xfd, 0xcf, 0x0f, 0xa9, 0x65, 0xf0,
- 0xd5, 0x4d, 0x96, 0x40, 0xe3, 0xdf, 0x3f, 0x95,
- 0x5a, 0x39, 0x19, 0x93, 0xf4, 0x75, 0xce, 0x22,
- 0x00, 0x1c, 0x93, 0xe2, 0x03, 0x66, 0xf4, 0x93,
- 0x73, 0x86, 0x81, 0x8e, 0x29, 0x44, 0x48, 0x86,
- 0x61, 0x7c, 0x48, 0xa3, 0x43, 0xd2, 0x9c, 0x8d,
- 0xd4, 0x95, 0xdd, 0xe1, 0x22, 0x89, 0x3a, 0x40,
- 0x4c, 0x1b, 0x8a, 0x04, 0xa8, 0x09, 0x69, 0x8b,
- 0xea, 0xc6, 0x55, 0x8e, 0x57, 0xe6, 0x64, 0x35,
- 0xf0, 0xc7, 0x16, 0x9f, 0x5d, 0x5e, 0x86, 0x40,
- 0x46, 0xbb, 0xe5, 0x45, 0x88, 0xfe, 0xc9, 0x63,
- 0x15, 0xfb, 0xf5, 0xbd, 0x71, 0x61, 0xeb, 0x7b,
- 0x78, 0x70, 0x07, 0x31, 0x03, 0x9f, 0xb2, 0xc8,
- 0xa7, 0xab, 0x47, 0xfd, 0xdf, 0xa0, 0x78, 0x72,
- 0xa4, 0x2a, 0xe4, 0xb6, 0xba, 0xc0, 0x1e, 0x86,
- 0x71, 0xe6, 0x3d, 0x18, 0x37, 0x70, 0xe6, 0xff,
- 0xe0, 0xbc, 0x0b, 0x22, 0xa0, 0x1f, 0xd3, 0xed,
- 0xa2, 0x55, 0x39, 0xab, 0xa8, 0x13, 0x73, 0x7c,
- 0x3f, 0xb2, 0xd6, 0x19, 0xac, 0xff, 0x99, 0xed,
- 0xe8, 0xe6, 0xa6, 0x22, 0xe3, 0x9c, 0xf1, 0x30,
- 0xdc, 0x01, 0x0a, 0x56, 0xfa, 0xe4, 0xc9, 0x99,
- 0xdd, 0xa8, 0xd8, 0xda, 0x35, 0x51, 0x73, 0xb4,
- 0x40, 0x86, 0x85, 0xdb, 0x5c, 0xd5, 0x85, 0x80,
- 0x14, 0x9c, 0xfd, 0x98, 0xa9, 0x82, 0xc5, 0x37,
- 0xff, 0x32, 0x5d, 0xd0, 0x0b, 0xfa, 0xdc, 0x04,
- 0x5e, 0x09, 0xd2, 0xca, 0x17, 0x4b, 0x1a, 0x8e,
- 0x15, 0xe1, 0xcc, 0x4e, 0x52, 0x88, 0x35, 0xbd,
- 0x48, 0xfe, 0x15, 0xa0, 0x91, 0xfd, 0x7e, 0x6c,
- 0x0e, 0x5d, 0x79, 0x1b, 0x81, 0x79, 0xd2, 0x09,
- 0x34, 0x70, 0x3d, 0x81, 0xec, 0xf6, 0x24, 0xbb,
- 0xfb, 0xf1, 0x7b, 0xdf, 0x54, 0xea, 0x80, 0x9b,
- 0xc7, 0x99, 0x9e, 0xbd, 0x16, 0x78, 0x12, 0x53,
- 0x5e, 0x01, 0xa7, 0x4e, 0xbd, 0x67, 0xe1, 0x9b,
- 0x4c, 0x0e, 0x61, 0x45, 0x97, 0xd2, 0xf0, 0x0f,
- 0xfe, 0x15, 0x08, 0xb7, 0x11, 0x4c, 0xe7, 0xff,
- 0x81, 0x53, 0xff, 0x91, 0x25, 0x38, 0x7e, 0x40,
- 0x94, 0xe5, 0xe0, 0xad, 0xe6, 0xd9, 0x79, 0xb6,
- 0x92, 0xc9, 0xfc, 0xde, 0xc3, 0x1a, 0x23, 0xbb,
- 0xdd, 0xc8, 0x51, 0x0c, 0x3a, 0x72, 0xfa, 0x73,
- 0x6f, 0xb7, 0xee, 0x61, 0x39, 0x03, 0x01, 0x3f,
- 0x7f, 0x94, 0x2e, 0x2e, 0xba, 0x3a, 0xbb, 0xb4,
- 0xfa, 0x6a, 0x17, 0xfe, 0xea, 0xef, 0x5e, 0x66,
- 0x97, 0x3f, 0x32, 0x3d, 0xd7, 0x3e, 0xb1, 0xf1,
- 0x6c, 0x14, 0x4c, 0xfd, 0x37, 0xd3, 0x38, 0x80,
- 0xfb, 0xde, 0xa6, 0x24, 0x1e, 0xc8, 0xca, 0x7f,
- 0x3a, 0x93, 0xd8, 0x8b, 0x18, 0x13, 0xb2, 0xe5,
- 0xe4, 0x93, 0x05, 0x53, 0x4f, 0x84, 0x66, 0xa7,
- 0x58, 0x5c, 0x7b, 0x86, 0x52, 0x6d, 0x0d, 0xce,
- 0xa4, 0x30, 0x7d, 0xb6, 0x18, 0x9f, 0xeb, 0xff,
- 0x22, 0xbb, 0x72, 0x29, 0xb9, 0x44, 0x0b, 0x48,
- 0x1e, 0x84, 0x71, 0x81, 0xe3, 0x6d, 0x73, 0x26,
- 0x92, 0xb4, 0x4d, 0x2a, 0x29, 0xb8, 0x1f, 0x72,
- 0xed, 0xd0, 0xe1, 0x64, 0x77, 0xea, 0x8e, 0x88,
- 0x0f, 0xef, 0x3f, 0xb1, 0x3b, 0xad, 0xf9, 0xc9,
- 0x8b, 0xd0, 0xac, 0xc6, 0xcc, 0xa9, 0x40, 0xcc,
- 0x76, 0xf6, 0x3b, 0x53, 0xb5, 0x88, 0xcb, 0xc8,
- 0x37, 0xf1, 0xa2, 0xba, 0x23, 0x15, 0x99, 0x09,
- 0xcc, 0xe7, 0x7a, 0x3b, 0x37, 0xf7, 0x58, 0xc8,
- 0x46, 0x8c, 0x2b, 0x2f, 0x4e, 0x0e, 0xa6, 0x5c,
- 0xea, 0x85, 0x55, 0xba, 0x02, 0x0e, 0x0e, 0x48,
- 0xbc, 0xe1, 0xb1, 0x01, 0x35, 0x79, 0x13, 0x3d,
- 0x1b, 0xc0, 0x53, 0x68, 0x11, 0xe7, 0x95, 0x0f,
- 0x9d, 0x3f, 0x4c, 0x47, 0x7b, 0x4d, 0x1c, 0xae,
- 0x50, 0x9b, 0xcb, 0xdd, 0x05, 0x8d, 0x9a, 0x97,
- 0xfd, 0x8c, 0xef, 0x0c, 0x1d, 0x67, 0x73, 0xa8,
- 0x28, 0x36, 0xd5, 0xb6, 0x92, 0x33, 0x40, 0x75,
- 0x0b, 0x51, 0xc3, 0x64, 0xba, 0x1d, 0xc2, 0xcc,
- 0xee, 0x7d, 0x54, 0x0f, 0x27, 0x69, 0xa7, 0x27,
- 0x63, 0x30, 0x29, 0xd9, 0xc8, 0x84, 0xd8, 0xdf,
- 0x9f, 0x68, 0x8d, 0x04, 0xca, 0xa6, 0xc5, 0xc7,
- 0x7a, 0x5c, 0xc8, 0xd1, 0xcb, 0x4a, 0xec, 0xd0,
- 0xd8, 0x20, 0x69, 0xc5, 0x17, 0xcd, 0x78, 0xc8,
- 0x75, 0x23, 0x30, 0x69, 0xc9, 0xd4, 0xea, 0x5c,
- 0x4f, 0x6b, 0x86, 0x3f, 0x8b, 0xfe, 0xee, 0x44,
- 0xc9, 0x7c, 0xb7, 0xdd, 0x3e, 0xe5, 0xec, 0x54,
- 0x03, 0x3e, 0xaa, 0x82, 0xc6, 0xdf, 0xb2, 0x38,
- 0x0e, 0x5d, 0xb3, 0x88, 0xd9, 0xd3, 0x69, 0x5f,
- 0x8f, 0x70, 0x8a, 0x7e, 0x11, 0xd9, 0x1e, 0x7b,
- 0x38, 0xf1, 0x42, 0x1a, 0xc0, 0x35, 0xf5, 0xc7,
- 0x36, 0x85, 0xf5, 0xf7, 0xb8, 0x7e, 0xc7, 0xef,
- 0x18, 0xf1, 0x63, 0xd6, 0x7a, 0xc6, 0xc9, 0x0e,
- 0x4d, 0x69, 0x4f, 0x84, 0xef, 0x26, 0x41, 0x0c,
- 0xec, 0xc7, 0xe0, 0x7e, 0x3c, 0x67, 0x01, 0x4c,
- 0x62, 0x1a, 0x20, 0x6f, 0xee, 0x47, 0x4d, 0xc0,
- 0x99, 0x13, 0x8d, 0x91, 0x4a, 0x26, 0xd4, 0x37,
- 0x28, 0x90, 0x58, 0x75, 0x66, 0x2b, 0x0a, 0xdf,
- 0xda, 0xee, 0x92, 0x25, 0x90, 0x62, 0x39, 0x9e,
- 0x44, 0x98, 0xad, 0xc1, 0x88, 0xed, 0xe4, 0xb4,
- 0xaf, 0xf5, 0x8c, 0x9b, 0x48, 0x4d, 0x56, 0x60,
- 0x97, 0x0f, 0x61, 0x59, 0x9e, 0xa6, 0x27, 0xfe,
- 0xc1, 0x91, 0x15, 0x38, 0xb8, 0x0f, 0xae, 0x61,
- 0x7d, 0x26, 0x13, 0x5a, 0x73, 0xff, 0x1c, 0xa3,
- 0x61, 0x04, 0x58, 0x48, 0x55, 0x44, 0x11, 0xfe,
- 0x15, 0xca, 0xc3, 0xbd, 0xca, 0xc5, 0xb4, 0x40,
- 0x5d, 0x1b, 0x7f, 0x39, 0xb5, 0x9c, 0x35, 0xec,
- 0x61, 0x15, 0x32, 0x32, 0xb8, 0x4e, 0x40, 0x9f,
- 0x17, 0x1f, 0x0a, 0x4d, 0xa9, 0x91, 0xef, 0xb7,
- 0xb0, 0xeb, 0xc2, 0x83, 0x9a, 0x6c, 0xd2, 0x79,
- 0x43, 0x78, 0x5e, 0x2f, 0xe5, 0xdd, 0x1a, 0x3c,
- 0x45, 0xab, 0x29, 0x40, 0x3a, 0x37, 0x5b, 0x6f,
- 0xd7, 0xfc, 0x48, 0x64, 0x3c, 0x49, 0xfb, 0x21,
- 0xbe, 0xc3, 0xff, 0x07, 0xfb, 0x17, 0xe9, 0xc9,
- 0x0c, 0x4c, 0x5c, 0x15, 0x9e, 0x8e, 0x22, 0x30,
- 0x0a, 0xde, 0x48, 0x7f, 0xdb, 0x0d, 0xd1, 0x2b,
- 0x87, 0x38, 0x9e, 0xcc, 0x5a, 0x01, 0x16, 0xee,
- 0x75, 0x49, 0x0d, 0x30, 0x01, 0x34, 0x6a, 0xb6,
- 0x9a, 0x5a, 0x2a, 0xec, 0xbb, 0x48, 0xac, 0xd3,
- 0x77, 0x83, 0xd8, 0x08, 0x86, 0x4f, 0x48, 0x09,
- 0x29, 0x41, 0x79, 0xa1, 0x03, 0x12, 0xc4, 0xcd,
- 0x90, 0x55, 0x47, 0x66, 0x74, 0x9a, 0xcc, 0x4f,
- 0x35, 0x8c, 0xd6, 0x98, 0xef, 0xeb, 0x45, 0xb9,
- 0x9a, 0x26, 0x2f, 0x39, 0xa5, 0x70, 0x6d, 0xfc,
- 0xb4, 0x51, 0xee, 0xf4, 0x9c, 0xe7, 0x38, 0x59,
- 0xad, 0xf4, 0xbc, 0x46, 0xff, 0x46, 0x8e, 0x60,
- 0x9c, 0xa3, 0x60, 0x1d, 0xf8, 0x26, 0x72, 0xf5,
- 0x72, 0x9d, 0x68, 0x80, 0x04, 0xf6, 0x0b, 0xa1,
- 0x0a, 0xd5, 0xa7, 0x82, 0x3a, 0x3e, 0x47, 0xa8,
- 0x5a, 0xde, 0x59, 0x4f, 0x7b, 0x07, 0xb3, 0xe9,
- 0x24, 0x19, 0x3d, 0x34, 0x05, 0xec, 0xf1, 0xab,
- 0x6e, 0x64, 0x8f, 0xd3, 0xe6, 0x41, 0x86, 0x80,
- 0x70, 0xe3, 0x8d, 0x60, 0x9c, 0x34, 0x25, 0x01,
- 0x07, 0x4d, 0x19, 0x41, 0x4e, 0x3d, 0x5c, 0x7e,
- 0xa8, 0xf5, 0xcc, 0xd5, 0x7b, 0xe2, 0x7d, 0x3d,
- 0x49, 0x86, 0x7d, 0x07, 0xb7, 0x10, 0xe3, 0x35,
- 0xb8, 0x84, 0x6d, 0x76, 0xab, 0x17, 0xc6, 0x38,
- 0xb4, 0xd3, 0x28, 0x57, 0xad, 0xd3, 0x88, 0x5a,
- 0xda, 0xea, 0xc8, 0x94, 0xcc, 0x37, 0x19, 0xac,
- 0x9c, 0x9f, 0x4b, 0x00, 0x15, 0xc0, 0xc8, 0xca,
- 0x1f, 0x15, 0xaa, 0xe0, 0xdb, 0xf9, 0x2f, 0x57,
- 0x1b, 0x24, 0xc7, 0x6f, 0x76, 0x29, 0xfb, 0xed,
- 0x25, 0x0d, 0xc0, 0xfe, 0xbd, 0x5a, 0xbf, 0x20,
- 0x08, 0x51, 0x05, 0xec, 0x71, 0xa3, 0xbf, 0xef,
- 0x5e, 0x99, 0x75, 0xdb, 0x3c, 0x5f, 0x9a, 0x8c,
- 0xbb, 0x19, 0x5c, 0x0e, 0x93, 0x19, 0xf8, 0x6a,
- 0xbc, 0xf2, 0x12, 0x54, 0x2f, 0xcb, 0x28, 0x64,
- 0x88, 0xb3, 0x92, 0x0d, 0x96, 0xd1, 0xa6, 0xe4,
- 0x1f, 0xf1, 0x4d, 0xa4, 0xab, 0x1c, 0xee, 0x54,
- 0xf2, 0xad, 0x29, 0x6d, 0x32, 0x37, 0xb2, 0x16,
- 0x77, 0x5c, 0xdc, 0x2e, 0x54, 0xec, 0x75, 0x26,
- 0xc6, 0x36, 0xd9, 0x17, 0x2c, 0xf1, 0x7a, 0xdc,
- 0x4b, 0xf1, 0xe2, 0xd9, 0x95, 0xba, 0xac, 0x87,
- 0xc1, 0xf3, 0x8e, 0x58, 0x08, 0xd8, 0x87, 0x60,
- 0xc9, 0xee, 0x6a, 0xde, 0xa4, 0xd2, 0xfc, 0x0d,
- 0xe5, 0x36, 0xc4, 0x5c, 0x52, 0xb3, 0x07, 0x54,
- 0x65, 0x24, 0xc1, 0xb1, 0xd1, 0xb1, 0x53, 0x13,
- 0x31, 0x79, 0x7f, 0x05, 0x76, 0xeb, 0x37, 0x59,
- 0x15, 0x2b, 0xd1, 0x3f, 0xac, 0x08, 0x97, 0xeb,
- 0x91, 0x98, 0xdf, 0x6c, 0x09, 0x0d, 0x04, 0x9f,
- 0xdc, 0x3b, 0x0e, 0x60, 0x68, 0x47, 0x23, 0x15,
- 0x16, 0xc6, 0x0b, 0x35, 0xf8, 0x77, 0xa2, 0x78,
- 0x50, 0xd4, 0x64, 0x22, 0x33, 0xff, 0xfb, 0x93,
- 0x71, 0x46, 0x50, 0x39, 0x1b, 0x9c, 0xea, 0x4e,
- 0x8d, 0x0c, 0x37, 0xe5, 0x5c, 0x51, 0x3a, 0x31,
- 0xb2, 0x85, 0x84, 0x3f, 0x41, 0xee, 0xa2, 0xc1,
- 0xc6, 0x13, 0x3b, 0x54, 0x28, 0xd2, 0x18, 0x37,
- 0xcc, 0x46, 0x9f, 0x6a, 0x91, 0x3d, 0x5a, 0x15,
- 0x3c, 0x89, 0xa3, 0x61, 0x06, 0x7d, 0x2e, 0x78,
- 0xbe, 0x7d, 0x40, 0xba, 0x2f, 0x95, 0xb1, 0x2f,
- 0x87, 0x3b, 0x8a, 0xbe, 0x6a, 0xf4, 0xc2, 0x31,
- 0x74, 0xee, 0x91, 0xe0, 0x23, 0xaa, 0x5d, 0x7f,
- 0xdd, 0xf0, 0x44, 0x8c, 0x0b, 0x59, 0x2b, 0xfc,
- 0x48, 0x3a, 0xdf, 0x07, 0x05, 0x38, 0x6c, 0xc9,
- 0xeb, 0x18, 0x24, 0x68, 0x8d, 0x58, 0x98, 0xd3,
- 0x31, 0xa3, 0xe4, 0x70, 0x59, 0xb1, 0x21, 0xbe,
- 0x7e, 0x65, 0x7d, 0xb8, 0x04, 0xab, 0xf6, 0xe4,
- 0xd7, 0xda, 0xec, 0x09, 0x8f, 0xda, 0x6d, 0x24,
- 0x07, 0xcc, 0x29, 0x17, 0x05, 0x78, 0x1a, 0xc1,
- 0xb1, 0xce, 0xfc, 0xaa, 0x2d, 0xe7, 0xcc, 0x85,
- 0x84, 0x84, 0x03, 0x2a, 0x0c, 0x3f, 0xa9, 0xf8,
- 0xfd, 0x84, 0x53, 0x59, 0x5c, 0xf0, 0xd4, 0x09,
- 0xf0, 0xd2, 0x6c, 0x32, 0x03, 0xb0, 0xa0, 0x8c,
- 0x52, 0xeb, 0x23, 0x91, 0x88, 0x43, 0x13, 0x46,
- 0xf6, 0x1e, 0xb4, 0x1b, 0xf5, 0x8e, 0x3a, 0xb5,
- 0x3d, 0x00, 0xf6, 0xe5, 0x08, 0x3d, 0x5f, 0x39,
- 0xd3, 0x21, 0x69, 0xbc, 0x03, 0x22, 0x3a, 0xd2,
- 0x5c, 0x84, 0xf8, 0x15, 0xc4, 0x80, 0x0b, 0xbc,
- 0x29, 0x3c, 0xf3, 0x95, 0x98, 0xcd, 0x8f, 0x35,
- 0xbc, 0xa5, 0x3e, 0xfc, 0xd4, 0x13, 0x9e, 0xde,
- 0x4f, 0xce, 0x71, 0x9d, 0x09, 0xad, 0xf2, 0x80,
- 0x6b, 0x65, 0x7f, 0x03, 0x00, 0x14, 0x7c, 0x15,
- 0x85, 0x40, 0x6d, 0x70, 0xea, 0xdc, 0xb3, 0x63,
- 0x35, 0x4f, 0x4d, 0xe0, 0xd9, 0xd5, 0x3c, 0x58,
- 0x56, 0x23, 0x80, 0xe2, 0x36, 0xdd, 0x75, 0x1d,
- 0x94, 0x11, 0x41, 0x8e, 0xe0, 0x81, 0x8e, 0xcf,
- 0xe0, 0xe5, 0xf6, 0xde, 0xd1, 0xe7, 0x04, 0x12,
- 0x79, 0x92, 0x2b, 0x71, 0x2a, 0x79, 0x8b, 0x7c,
- 0x44, 0x79, 0x16, 0x30, 0x4e, 0xf4, 0xf6, 0x9b,
- 0xb7, 0x40, 0xa3, 0x5a, 0xa7, 0x69, 0x3e, 0xc1,
- 0x3a, 0x04, 0xd0, 0x88, 0xa0, 0x3b, 0xdd, 0xc6,
- 0x9e, 0x7e, 0x1e, 0x1e, 0x8f, 0x44, 0xf7, 0x73,
- 0x67, 0x1e, 0x1a, 0x78, 0xfa, 0x62, 0xf4, 0xa9,
- 0xa8, 0xc6, 0x5b, 0xb8, 0xfa, 0x06, 0x7d, 0x5e,
- 0x38, 0x1c, 0x9a, 0x39, 0xe9, 0x39, 0x98, 0x22,
- 0x0b, 0xa7, 0xac, 0x0b, 0xf3, 0xbc, 0xf1, 0xeb,
- 0x8c, 0x81, 0xe3, 0x48, 0x8a, 0xed, 0x42, 0xc2,
- 0x38, 0xcf, 0x3e, 0xda, 0xd2, 0x89, 0x8d, 0x9c,
- 0x53, 0xb5, 0x2f, 0x41, 0x01, 0x26, 0x84, 0x9c,
- 0xa3, 0x56, 0xf6, 0x49, 0xc7, 0xd4, 0x9f, 0x93,
- 0x1b, 0x96, 0x49, 0x5e, 0xad, 0xb3, 0x84, 0x1f,
- 0x3c, 0xa4, 0xe0, 0x9b, 0xd1, 0x90, 0xbc, 0x38,
- 0x6c, 0xdd, 0x95, 0x4d, 0x9d, 0xb1, 0x71, 0x57,
- 0x2d, 0x34, 0xe8, 0xb8, 0x42, 0xc7, 0x99, 0x03,
- 0xc7, 0x07, 0x30, 0x65, 0x91, 0x55, 0xd5, 0x90,
- 0x70, 0x97, 0x37, 0x68, 0xd4, 0x11, 0xf9, 0xe8,
- 0xce, 0xec, 0xdc, 0x34, 0xd5, 0xd3, 0xb7, 0xc4,
- 0xb8, 0x97, 0x05, 0x92, 0xad, 0xf8, 0xe2, 0x36,
- 0x64, 0x41, 0xc9, 0xc5, 0x41, 0x77, 0x52, 0xd7,
- 0x2c, 0xa5, 0x24, 0x2f, 0xd9, 0x34, 0x0b, 0x47,
- 0x35, 0xa7, 0x28, 0x8b, 0xc5, 0xcd, 0xe9, 0x46,
- 0xac, 0x39, 0x94, 0x3c, 0x10, 0xc6, 0x29, 0x73,
- 0x0e, 0x0e, 0x5d, 0xe0, 0x71, 0x03, 0x8a, 0x72,
- 0x0e, 0x26, 0xb0, 0x7d, 0x84, 0xed, 0x95, 0x23,
- 0x49, 0x5a, 0x45, 0x83, 0x45, 0x60, 0x11, 0x4a,
- 0x46, 0x31, 0xd4, 0xd8, 0x16, 0x54, 0x98, 0x58,
- 0xed, 0x6d, 0xcc, 0x5d, 0xd6, 0x50, 0x61, 0x9f,
- 0x9d, 0xc5, 0x3e, 0x9d, 0x32, 0x47, 0xde, 0x96,
- 0xe1, 0x5d, 0xd8, 0xf8, 0xb4, 0x69, 0x6f, 0xb9,
- 0x15, 0x90, 0x57, 0x7a, 0xf6, 0xad, 0xb0, 0x5b,
- 0xf5, 0xa6, 0x36, 0x94, 0xfd, 0x84, 0xce, 0x1c,
- 0x0f, 0x4b, 0xd0, 0xc2, 0x5b, 0x6b, 0x56, 0xef,
- 0x73, 0x93, 0x0b, 0xc3, 0xee, 0xd9, 0xcf, 0xd3,
- 0xa4, 0x22, 0x58, 0xcd, 0x50, 0x6e, 0x65, 0xf4,
- 0xe9, 0xb7, 0x71, 0xaf, 0x4b, 0xb3, 0xb6, 0x2f,
- 0x0f, 0x0e, 0x3b, 0xc9, 0x85, 0x14, 0xf5, 0x17,
- 0xe8, 0x7a, 0x3a, 0xbf, 0x5f, 0x5e, 0xf8, 0x18,
- 0x48, 0xa6, 0x72, 0xab, 0x06, 0x95, 0xe9, 0xc8,
- 0xa7, 0xf4, 0x32, 0x44, 0x04, 0x0c, 0x84, 0x98,
- 0x73, 0xe3, 0x89, 0x8d, 0x5f, 0x7e, 0x4a, 0x42,
- 0x8f, 0xc5, 0x28, 0xb1, 0x82, 0xef, 0x1c, 0x97,
- 0x31, 0x3b, 0x4d, 0xe0, 0x0e, 0x10, 0x10, 0x97,
- 0x93, 0x49, 0x78, 0x2f, 0x0d, 0x86, 0x8b, 0xa1,
- 0x53, 0xa9, 0x81, 0x20, 0x79, 0xe7, 0x07, 0x77,
- 0xb6, 0xac, 0x5e, 0xd2, 0x05, 0xcd, 0xe9, 0xdb,
- 0x8a, 0x94, 0x82, 0x8a, 0x23, 0xb9, 0x3d, 0x1c,
- 0xa9, 0x7d, 0x72, 0x4a, 0xed, 0x33, 0xa3, 0xdb,
- 0x21, 0xa7, 0x86, 0x33, 0x45, 0xa5, 0xaa, 0x56,
- 0x45, 0xb5, 0x83, 0x29, 0x40, 0x47, 0x79, 0x04,
- 0x6e, 0xb9, 0x95, 0xd0, 0x81, 0x77, 0x2d, 0x48,
- 0x1e, 0xfe, 0xc3, 0xc2, 0x1e, 0xe5, 0xf2, 0xbe,
- 0xfd, 0x3b, 0x94, 0x9f, 0xc4, 0xc4, 0x26, 0x9d,
- 0xe4, 0x66, 0x1e, 0x19, 0xee, 0x6c, 0x79, 0x97,
- 0x11, 0x31, 0x4b, 0x0d, 0x01, 0xcb, 0xde, 0xa8,
- 0xf6, 0x6d, 0x7c, 0x39, 0x46, 0x4e, 0x7e, 0x3f,
- 0x94, 0x17, 0xdf, 0xa1, 0x7d, 0xd9, 0x1c, 0x8e,
- 0xbc, 0x7d, 0x33, 0x7d, 0xe3, 0x12, 0x40, 0xca,
- 0xab, 0x37, 0x11, 0x46, 0xd4, 0xae, 0xef, 0x44,
- 0xa2, 0xb3, 0x6a, 0x66, 0x0e, 0x0c, 0x90, 0x7f,
- 0xdf, 0x5c, 0x66, 0x5f, 0xf2, 0x94, 0x9f, 0xa6,
- 0x73, 0x4f, 0xeb, 0x0d, 0xad, 0xbf, 0xc0, 0x63,
- 0x5c, 0xdc, 0x46, 0x51, 0xe8, 0x8e, 0x90, 0x19,
- 0xa8, 0xa4, 0x3c, 0x91, 0x79, 0xfa, 0x7e, 0x58,
- 0x85, 0x13, 0x55, 0xc5, 0x19, 0x82, 0x37, 0x1b,
- 0x0a, 0x02, 0x1f, 0x99, 0x6b, 0x18, 0xf1, 0x28,
- 0x08, 0xa2, 0x73, 0xb8, 0x0f, 0x2e, 0xcd, 0xbf,
- 0xf3, 0x86, 0x7f, 0xea, 0xef, 0xd0, 0xbb, 0xa6,
- 0x21, 0xdf, 0x49, 0x73, 0x51, 0xcc, 0x36, 0xd3,
- 0x3e, 0xa0, 0xf8, 0x44, 0xdf, 0xd3, 0xa6, 0xbe,
- 0x8a, 0xd4, 0x57, 0xdd, 0x72, 0x94, 0x61, 0x0f,
- 0x82, 0xd1, 0x07, 0xb8, 0x7c, 0x18, 0x83, 0xdf,
- 0x3a, 0xe5, 0x50, 0x6a, 0x82, 0x20, 0xac, 0xa9,
- 0xa8, 0xff, 0xd9, 0xf3, 0x77, 0x33, 0x5a, 0x9e,
- 0x7f, 0x6d, 0xfe, 0x5d, 0x33, 0x41, 0x42, 0xe7,
- 0x6c, 0x19, 0xe0, 0x44, 0x8a, 0x15, 0xf6, 0x70,
- 0x98, 0xb7, 0x68, 0x4d, 0xfa, 0x97, 0x39, 0xb0,
- 0x8e, 0xe8, 0x84, 0x8b, 0x75, 0x30, 0xb7, 0x7d,
- 0x92, 0x69, 0x20, 0x9c, 0x81, 0xfb, 0x4b, 0xf4,
- 0x01, 0x50, 0xeb, 0xce, 0x0c, 0x1c, 0x6c, 0xb5,
- 0x4a, 0xd7, 0x27, 0x0c, 0xce, 0xbb, 0xe5, 0x85,
- 0xf0, 0xb6, 0xee, 0xd5, 0x70, 0xdd, 0x3b, 0xfc,
- 0xd4, 0x99, 0xf1, 0x33, 0xdd, 0x8b, 0xc4, 0x2f,
- 0xae, 0xab, 0x74, 0x96, 0x32, 0xc7, 0x4c, 0x56,
- 0x3c, 0x89, 0x0f, 0x96, 0x0b, 0x42, 0xc0, 0xcb,
- 0xee, 0x0f, 0x0b, 0x8c, 0xfb, 0x7e, 0x47, 0x7b,
- 0x64, 0x48, 0xfd, 0xb2, 0x00, 0x80, 0x89, 0xa5,
- 0x13, 0x55, 0x62, 0xfc, 0x8f, 0xe2, 0x42, 0x03,
- 0xb7, 0x4e, 0x2a, 0x79, 0xb4, 0x82, 0xea, 0x23,
- 0x49, 0xda, 0xaf, 0x52, 0x63, 0x1e, 0x60, 0x03,
- 0x89, 0x06, 0x44, 0x46, 0x08, 0xc3, 0xc4, 0x87,
- 0x70, 0x2e, 0xda, 0x94, 0xad, 0x6b, 0xe0, 0xe4,
- 0xd1, 0x8a, 0x06, 0xc2, 0xa8, 0xc0, 0xa7, 0x43,
- 0x3c, 0x47, 0x52, 0x0e, 0xc3, 0x77, 0x81, 0x11,
- 0x67, 0x0e, 0xa0, 0x70, 0x04, 0x47, 0x29, 0x40,
- 0x86, 0x0d, 0x34, 0x56, 0xa7, 0xc9, 0x35, 0x59,
- 0x68, 0xdc, 0x93, 0x81, 0x70, 0xee, 0x86, 0xd9,
- 0x80, 0x06, 0x40, 0x4f, 0x1a, 0x0d, 0x40, 0x30,
- 0x0b, 0xcb, 0x96, 0x47, 0xc1, 0xb7, 0x52, 0xfd,
- 0x56, 0xe0, 0x72, 0x4b, 0xfb, 0xbd, 0x92, 0x45,
- 0x61, 0x71, 0xc2, 0x33, 0x11, 0xbf, 0x52, 0x83,
- 0x79, 0x26, 0xe0, 0x49, 0x6b, 0xb7, 0x05, 0x8b,
- 0xe8, 0x0e, 0x87, 0x31, 0xd7, 0x9d, 0x8a, 0xf5,
- 0xc0, 0x5f, 0x2e, 0x58, 0x4a, 0xdb, 0x11, 0xb3,
- 0x6c, 0x30, 0x2a, 0x46, 0x19, 0xe3, 0x27, 0x84,
- 0x1f, 0x63, 0x6e, 0xf6, 0x57, 0xc7, 0xc9, 0xd8,
- 0x5e, 0xba, 0xb3, 0x87, 0xd5, 0x83, 0x26, 0x34,
- 0x21, 0x9e, 0x65, 0xde, 0x42, 0xd3, 0xbe, 0x7b,
- 0xbc, 0x91, 0x71, 0x44, 0x4d, 0x99, 0x3b, 0x31,
- 0xe5, 0x3f, 0x11, 0x4e, 0x7f, 0x13, 0x51, 0x3b,
- 0xae, 0x79, 0xc9, 0xd3, 0x81, 0x8e, 0x25, 0x40,
- 0x10, 0xfc, 0x07, 0x1e, 0xf9, 0x7b, 0x9a, 0x4b,
- 0x6c, 0xe3, 0xb3, 0xad, 0x1a, 0x0a, 0xdd, 0x9e,
- 0x59, 0x0c, 0xa2, 0xcd, 0xae, 0x48, 0x4a, 0x38,
- 0x5b, 0x47, 0x41, 0x94, 0x65, 0x6b, 0xbb, 0xeb,
- 0x5b, 0xe3, 0xaf, 0x07, 0x5b, 0xd4, 0x4a, 0xa2,
- 0xc9, 0x5d, 0x2f, 0x64, 0x03, 0xd7, 0x3a, 0x2c,
- 0x6e, 0xce, 0x76, 0x95, 0xb4, 0xb3, 0xc0, 0xf1,
- 0xe2, 0x45, 0x73, 0x7a, 0x5c, 0xab, 0xc1, 0xfc,
- 0x02, 0x8d, 0x81, 0x29, 0xb3, 0xac, 0x07, 0xec,
- 0x40, 0x7d, 0x45, 0xd9, 0x7a, 0x59, 0xee, 0x34,
- 0xf0, 0xe9, 0xd5, 0x7b, 0x96, 0xb1, 0x3d, 0x95,
- 0xcc, 0x86, 0xb5, 0xb6, 0x04, 0x2d, 0xb5, 0x92,
- 0x7e, 0x76, 0xf4, 0x06, 0xa9, 0xa3, 0x12, 0x0f,
- 0xb1, 0xaf, 0x26, 0xba, 0x7c, 0xfc, 0x7e, 0x1c,
- 0xbc, 0x2c, 0x49, 0x97, 0x53, 0x60, 0x13, 0x0b,
- 0xa6, 0x61, 0x83, 0x89, 0x42, 0xd4, 0x17, 0x0c,
- 0x6c, 0x26, 0x52, 0xc3, 0xb3, 0xd4, 0x67, 0xf5,
- 0xe3, 0x04, 0xb7, 0xf4, 0xcb, 0x80, 0xb8, 0xcb,
- 0x77, 0x56, 0x3e, 0xaa, 0x57, 0x54, 0xee, 0xb4,
- 0x2c, 0x67, 0xcf, 0xf2, 0xdc, 0xbe, 0x55, 0xf9,
- 0x43, 0x1f, 0x6e, 0x22, 0x97, 0x67, 0x7f, 0xc4,
- 0xef, 0xb1, 0x26, 0x31, 0x1e, 0x27, 0xdf, 0x41,
- 0x80, 0x47, 0x6c, 0xe2, 0xfa, 0xa9, 0x8c, 0x2a,
- 0xf6, 0xf2, 0xab, 0xf0, 0x15, 0xda, 0x6c, 0xc8,
- 0xfe, 0xb5, 0x23, 0xde, 0xa9, 0x05, 0x3f, 0x06,
- 0x54, 0x4c, 0xcd, 0xe1, 0xab, 0xfc, 0x0e, 0x62,
- 0x33, 0x31, 0x73, 0x2c, 0x76, 0xcb, 0xb4, 0x47,
- 0x1e, 0x20, 0xad, 0xd8, 0xf2, 0x31, 0xdd, 0xc4,
- 0x8b, 0x0c, 0x77, 0xbe, 0xe1, 0x8b, 0x26, 0x00,
- 0x02, 0x58, 0xd6, 0x8d, 0xef, 0xad, 0x74, 0x67,
- 0xab, 0x3f, 0xef, 0xcb, 0x6f, 0xb0, 0xcc, 0x81,
- 0x44, 0x4c, 0xaf, 0xe9, 0x49, 0x4f, 0xdb, 0xa0,
- 0x25, 0xa4, 0xf0, 0x89, 0xf1, 0xbe, 0xd8, 0x10,
- 0xff, 0xb1, 0x3b, 0x4b, 0xfa, 0x98, 0xf5, 0x79,
- 0x6d, 0x1e, 0x69, 0x4d, 0x57, 0xb1, 0xc8, 0x19,
- 0x1b, 0xbd, 0x1e, 0x8c, 0x84, 0xb7, 0x7b, 0xe8,
- 0xd2, 0x2d, 0x09, 0x41, 0x41, 0x37, 0x3d, 0xb1,
- 0x6f, 0x26, 0x5d, 0x71, 0x16, 0x3d, 0xb7, 0x83,
- 0x27, 0x2c, 0xa7, 0xb6, 0x50, 0xbd, 0x91, 0x86,
- 0xab, 0x24, 0xa1, 0x38, 0xfd, 0xea, 0x71, 0x55,
- 0x7e, 0x9a, 0x07, 0x77, 0x4b, 0xfa, 0x61, 0x66,
- 0x20, 0x1e, 0x28, 0x95, 0x18, 0x1b, 0xa4, 0xa0,
- 0xfd, 0xc0, 0x89, 0x72, 0x43, 0xd9, 0x3b, 0x49,
- 0x5a, 0x3f, 0x9d, 0xbf, 0xdb, 0xb4, 0x46, 0xea,
- 0x42, 0x01, 0x77, 0x23, 0x68, 0x95, 0xb6, 0x24,
- 0xb3, 0xa8, 0x6c, 0x28, 0x3b, 0x11, 0x40, 0x7e,
- 0x18, 0x65, 0x6d, 0xd8, 0x24, 0x42, 0x7d, 0x88,
- 0xc0, 0x52, 0xd9, 0x05, 0xe4, 0x95, 0x90, 0x87,
- 0x8c, 0xf4, 0xd0, 0x6b, 0xb9, 0x83, 0x99, 0x34,
- 0x6d, 0xfe, 0x54, 0x40, 0x94, 0x52, 0x21, 0x4f,
- 0x14, 0x25, 0xc5, 0xd6, 0x5e, 0x95, 0xdc, 0x0a,
- 0x2b, 0x89, 0x20, 0x11, 0x84, 0x48, 0xd6, 0x3a,
- 0xcd, 0x5c, 0x24, 0xad, 0x62, 0xe3, 0xb1, 0x93,
- 0x25, 0x8d, 0xcd, 0x7e, 0xfc, 0x27, 0xa3, 0x37,
- 0xfd, 0x84, 0xfc, 0x1b, 0xb2, 0xf1, 0x27, 0x38,
- 0x5a, 0xb7, 0xfc, 0xf2, 0xfa, 0x95, 0x66, 0xd4,
- 0xfb, 0xba, 0xa7, 0xd7, 0xa3, 0x72, 0x69, 0x48,
- 0x48, 0x8c, 0xeb, 0x28, 0x89, 0xfe, 0x33, 0x65,
- 0x5a, 0x36, 0x01, 0x7e, 0x06, 0x79, 0x0a, 0x09,
- 0x3b, 0x74, 0x11, 0x9a, 0x6e, 0xbf, 0xd4, 0x9e,
- 0x58, 0x90, 0x49, 0x4f, 0x4d, 0x08, 0xd4, 0xe5,
- 0x4a, 0x09, 0x21, 0xef, 0x8b, 0xb8, 0x74, 0x3b,
- 0x91, 0xdd, 0x36, 0x85, 0x60, 0x2d, 0xfa, 0xd4,
- 0x45, 0x7b, 0x45, 0x53, 0xf5, 0x47, 0x87, 0x7e,
- 0xa6, 0x37, 0xc8, 0x78, 0x7a, 0x68, 0x9d, 0x8d,
- 0x65, 0x2c, 0x0e, 0x91, 0x5c, 0xa2, 0x60, 0xf0,
- 0x8e, 0x3f, 0xe9, 0x1a, 0xcd, 0xaa, 0xe7, 0xd5,
- 0x77, 0x18, 0xaf, 0xc9, 0xbc, 0x18, 0xea, 0x48,
- 0x1b, 0xfb, 0x22, 0x48, 0x70, 0x16, 0x29, 0x9e,
- 0x5b, 0xc1, 0x2c, 0x66, 0x23, 0xbc, 0xf0, 0x1f,
- 0xef, 0xaf, 0xe4, 0xd6, 0x04, 0x19, 0x82, 0x7a,
- 0x0b, 0xba, 0x4b, 0x46, 0xb1, 0x6a, 0x85, 0x5d,
- 0xb4, 0x73, 0xd6, 0x21, 0xa1, 0x71, 0x60, 0x14,
- 0xee, 0x0a, 0x77, 0xc4, 0x66, 0x2e, 0xf9, 0x69,
- 0x30, 0xaf, 0x41, 0x0b, 0xc8, 0x83, 0x3c, 0x53,
- 0x99, 0x19, 0x27, 0x46, 0xf7, 0x41, 0x6e, 0x56,
- 0xdc, 0x94, 0x28, 0x67, 0x4e, 0xb7, 0x25, 0x48,
- 0x8a, 0xc2, 0xe0, 0x60, 0x96, 0xcc, 0x18, 0xf4,
- 0x84, 0xdd, 0xa7, 0x5e, 0x3e, 0x05, 0x0b, 0x26,
- 0x26, 0xb2, 0x5c, 0x1f, 0x57, 0x1a, 0x04, 0x7e,
- 0x6a, 0xe3, 0x2f, 0xb4, 0x35, 0xb6, 0x38, 0x40,
- 0x40, 0xcd, 0x6f, 0x87, 0x2e, 0xef, 0xa3, 0xd7,
- 0xa9, 0xc2, 0xe8, 0x0d, 0x27, 0xdf, 0x44, 0x62,
- 0x99, 0xa0, 0xfc, 0xcf, 0x81, 0x78, 0xcb, 0xfe,
- 0xe5, 0xa0, 0x03, 0x4e, 0x6c, 0xd7, 0xf4, 0xaf,
- 0x7a, 0xbb, 0x61, 0x82, 0xfe, 0x71, 0x89, 0xb2,
- 0x22, 0x7c, 0x8e, 0x83, 0x04, 0xce, 0xf6, 0x5d,
- 0x84, 0x8f, 0x95, 0x6a, 0x7f, 0xad, 0xfd, 0x32,
- 0x9c, 0x5e, 0xe4, 0x9c, 0x89, 0x60, 0x54, 0xaa,
- 0x96, 0x72, 0xd2, 0xd7, 0x36, 0x85, 0xa9, 0x45,
- 0xd2, 0x2a, 0xa1, 0x81, 0x49, 0x6f, 0x7e, 0x04,
- 0xfa, 0xe2, 0xfe, 0x90, 0x26, 0x77, 0x5a, 0x33,
- 0xb8, 0x04, 0x9a, 0x7a, 0xe6, 0x4c, 0x4f, 0xad,
- 0x72, 0x96, 0x08, 0x28, 0x58, 0x13, 0xf8, 0xc4,
- 0x1c, 0xf0, 0xc3, 0x45, 0x95, 0x49, 0x20, 0x8c,
- 0x9f, 0x39, 0x70, 0xe1, 0x77, 0xfe, 0xd5, 0x4b,
- 0xaf, 0x86, 0xda, 0xef, 0x22, 0x06, 0x83, 0x36,
- 0x29, 0x12, 0x11, 0x40, 0xbc, 0x3b, 0x86, 0xaa,
- 0xaa, 0x65, 0x60, 0xc3, 0x80, 0xca, 0xed, 0xa9,
- 0xf3, 0xb0, 0x79, 0x96, 0xa2, 0x55, 0x27, 0x28,
- 0x55, 0x73, 0x26, 0xa5, 0x50, 0xea, 0x92, 0x4b,
- 0x3c, 0x5c, 0x82, 0x33, 0xf0, 0x01, 0x3f, 0x03,
- 0xc1, 0x08, 0x05, 0xbf, 0x98, 0xf4, 0x9b, 0x6d,
- 0xa5, 0xa8, 0xb4, 0x82, 0x0c, 0x06, 0xfa, 0xff,
- 0x2d, 0x08, 0xf3, 0x05, 0x4f, 0x57, 0x2a, 0x39,
- 0xd4, 0x83, 0x0d, 0x75, 0x51, 0xd8, 0x5b, 0x1b,
- 0xd3, 0x51, 0x5a, 0x32, 0x2a, 0x9b, 0x32, 0xb2,
- 0xf2, 0xa4, 0x96, 0x12, 0xf2, 0xae, 0x40, 0x34,
- 0x67, 0xa8, 0xf5, 0x44, 0xd5, 0x35, 0x53, 0xfe,
- 0xa3, 0x60, 0x96, 0x63, 0x0f, 0x1f, 0x6e, 0xb0,
- 0x5a, 0x42, 0xa6, 0xfc, 0x51, 0x0b, 0x60, 0x27,
- 0xbc, 0x06, 0x71, 0xed, 0x65, 0x5b, 0x23, 0x86,
- 0x4a, 0x07, 0x3b, 0x22, 0x07, 0x46, 0xe6, 0x90,
- 0x3e, 0xf3, 0x25, 0x50, 0x1b, 0x4c, 0x7f, 0x03,
- 0x08, 0xa8, 0x36, 0x6b, 0x87, 0xe5, 0xe3, 0xdb,
- 0x9a, 0x38, 0x83, 0xff, 0x9f, 0x1a, 0x9f, 0x57,
- 0xa4, 0x2a, 0xf6, 0x37, 0xbc, 0x1a, 0xff, 0xc9,
- 0x1e, 0x35, 0x0c, 0xc3, 0x7c, 0xa3, 0xb2, 0xe5,
- 0xd2, 0xc6, 0xb4, 0x57, 0x47, 0xe4, 0x32, 0x16,
- 0x6d, 0xa9, 0xae, 0x64, 0xe6, 0x2d, 0x8d, 0xc5,
- 0x8d, 0x50, 0x8e, 0xe8, 0x1a, 0x22, 0x34, 0x2a,
- 0xd9, 0xeb, 0x51, 0x90, 0x4a, 0xb1, 0x41, 0x7d,
- 0x64, 0xf9, 0xb9, 0x0d, 0xf6, 0x23, 0x33, 0xb0,
- 0x33, 0xf4, 0xf7, 0x3f, 0x27, 0x84, 0xc6, 0x0f,
- 0x54, 0xa5, 0xc0, 0x2e, 0xec, 0x0b, 0x3a, 0x48,
- 0x6e, 0x80, 0x35, 0x81, 0x43, 0x9b, 0x90, 0xb1,
- 0xd0, 0x2b, 0xea, 0x21, 0xdc, 0xda, 0x5b, 0x09,
- 0xf4, 0xcc, 0x10, 0xb4, 0xc7, 0xfe, 0x79, 0x51,
- 0xc3, 0xc5, 0xac, 0x88, 0x74, 0x84, 0x0b, 0x4b,
- 0xca, 0x79, 0x16, 0x29, 0xfb, 0x69, 0x54, 0xdf,
- 0x41, 0x7e, 0xe9, 0xc7, 0x8e, 0xea, 0xa5, 0xfe,
- 0xfc, 0x76, 0x0e, 0x90, 0xc4, 0x92, 0x38, 0xad,
- 0x7b, 0x48, 0xe6, 0x6e, 0xf7, 0x21, 0xfd, 0x4e,
- 0x93, 0x0a, 0x7b, 0x41, 0x83, 0x68, 0xfb, 0x57,
- 0x51, 0x76, 0x34, 0xa9, 0x6c, 0x00, 0xaa, 0x4f,
- 0x66, 0x65, 0x98, 0x4a, 0x4f, 0xa3, 0xa0, 0xef,
- 0x69, 0x3f, 0xe3, 0x1c, 0x92, 0x8c, 0xfd, 0xd8,
- 0xe8, 0xde, 0x7c, 0x7f, 0x3e, 0x84, 0x8e, 0x69,
- 0x3c, 0xf1, 0xf2, 0x05, 0x46, 0xdc, 0x2f, 0x9d,
- 0x5e, 0x6e, 0x4c, 0xfb, 0xb5, 0x99, 0x2a, 0x59,
- 0x63, 0xc1, 0x34, 0xbc, 0x57, 0xc0, 0x0d, 0xb9,
- 0x61, 0x25, 0xf3, 0x33, 0x23, 0x51, 0xb6, 0x0d,
- 0x07, 0xa6, 0xab, 0x94, 0x4a, 0xb7, 0x2a, 0xea,
- 0xee, 0xac, 0xa3, 0xc3, 0x04, 0x8b, 0x0e, 0x56,
- 0xfe, 0x44, 0xa7, 0x39, 0xe2, 0xed, 0xed, 0xb4,
- 0x22, 0x2b, 0xac, 0x12, 0x32, 0x28, 0x91, 0xd8,
- 0xa5, 0xab, 0xff, 0x5f, 0xe0, 0x4b, 0xda, 0x78,
- 0x17, 0xda, 0xf1, 0x01, 0x5b, 0xcd, 0xe2, 0x5f,
- 0x50, 0x45, 0x73, 0x2b, 0xe4, 0x76, 0x77, 0xf4,
- 0x64, 0x1d, 0x43, 0xfb, 0x84, 0x7a, 0xea, 0x91,
- 0xae, 0xf9, 0x9e, 0xb7, 0xb4, 0xb0, 0x91, 0x5f,
- 0x16, 0x35, 0x9a, 0x11, 0xb8, 0xc7, 0xc1, 0x8c,
- 0xc6, 0x10, 0x8d, 0x2f, 0x63, 0x4a, 0xa7, 0x57,
- 0x3a, 0x51, 0xd6, 0x32, 0x2d, 0x64, 0x72, 0xd4,
- 0x66, 0xdc, 0x10, 0xa6, 0x67, 0xd6, 0x04, 0x23,
- 0x9d, 0x0a, 0x11, 0x77, 0xdd, 0x37, 0x94, 0x17,
- 0x3c, 0xbf, 0x8b, 0x65, 0xb0, 0x2e, 0x5e, 0x66,
- 0x47, 0x64, 0xac, 0xdd, 0xf0, 0x84, 0xfd, 0x39,
- 0xfa, 0x15, 0x5d, 0xef, 0xae, 0xca, 0xc1, 0x36,
- 0xa7, 0x5c, 0xbf, 0xc7, 0x08, 0xc2, 0x66, 0x00,
- 0x74, 0x74, 0x4e, 0x27, 0x3f, 0x55, 0x8a, 0xb7,
- 0x38, 0x66, 0x83, 0x6d, 0xcf, 0x99, 0x9e, 0x60,
- 0x8f, 0xdd, 0x2e, 0x62, 0x22, 0x0e, 0xef, 0x0c,
- 0x98, 0xa7, 0x85, 0x74, 0x3b, 0x9d, 0xec, 0x9e,
- 0xa9, 0x19, 0x72, 0xa5, 0x7f, 0x2c, 0x39, 0xb7,
- 0x7d, 0xb7, 0xf1, 0x12, 0x65, 0x27, 0x4b, 0x5a,
- 0xde, 0x17, 0xfe, 0xad, 0x44, 0xf3, 0x20, 0x4d,
- 0xfd, 0xe4, 0x1f, 0xb5, 0x81, 0xb0, 0x36, 0x37,
- 0x08, 0x6f, 0xc3, 0x0c, 0xe9, 0x85, 0x98, 0x82,
- 0xa9, 0x62, 0x0c, 0xc4, 0x97, 0xc0, 0x50, 0xc8,
- 0xa7, 0x3c, 0x50, 0x9f, 0x43, 0xb9, 0xcd, 0x5e,
- 0x4d, 0xfa, 0x1c, 0x4b, 0x0b, 0xa9, 0x98, 0x85,
- 0x38, 0x92, 0xac, 0x8d, 0xe4, 0xad, 0x9b, 0x98,
- 0xab, 0xd9, 0x38, 0xac, 0x62, 0x52, 0xa3, 0x22,
- 0x63, 0x0f, 0xbf, 0x95, 0x48, 0xdf, 0x69, 0xe7,
- 0x8b, 0x33, 0xd5, 0xb2, 0xbd, 0x05, 0x49, 0x49,
- 0x9d, 0x57, 0x73, 0x19, 0x33, 0xae, 0xfa, 0x33,
- 0xf1, 0x19, 0xa8, 0x80, 0xce, 0x04, 0x9f, 0xbc,
- 0x1d, 0x65, 0x82, 0x1b, 0xe5, 0x3a, 0x51, 0xc8,
- 0x1c, 0x21, 0xe3, 0x5d, 0xf3, 0x7d, 0x9b, 0x2f,
- 0x2c, 0x1d, 0x4a, 0x7f, 0x9b, 0x68, 0x35, 0xa3,
- 0xb2, 0x50, 0xf7, 0x62, 0x79, 0xcd, 0xf4, 0x98,
- 0x4f, 0xe5, 0x63, 0x7c, 0x3e, 0x45, 0x31, 0x8c,
- 0x16, 0xa0, 0x12, 0xc8, 0x58, 0xce, 0x39, 0xa6,
- 0xbc, 0x54, 0xdb, 0xc5, 0xe0, 0xd5, 0xba, 0xbc,
- 0xb9, 0x04, 0xf4, 0x8d, 0xe8, 0x2f, 0x15, 0x9d,
-};
-
-/* 100 test cases */
-static struct crc_test {
- u32 crc; /* random starting crc */
- u32 start; /* random 6 bit offset in buf */
- u32 length; /* random 11 bit length of test */
- u32 crc_le; /* expected crc32_le result */
- u32 crc_be; /* expected crc32_be result */
- u32 crc32c_le; /* expected crc32c_le result */
-} const test[] __initconst =
-{
- {0x674bf11d, 0x00000038, 0x00000542, 0x0af6d466, 0xd8b6e4c1, 0xf6e93d6c},
- {0x35c672c6, 0x0000003a, 0x000001aa, 0xc6d3dfba, 0x28aaf3ad, 0x0fe92aca},
- {0x496da28e, 0x00000039, 0x000005af, 0xd933660f, 0x5d57e81f, 0x52e1ebb8},
- {0x09a9b90e, 0x00000027, 0x000001f8, 0xb45fe007, 0xf45fca9a, 0x0798af9a},
- {0xdc97e5a9, 0x00000025, 0x000003b6, 0xf81a3562, 0xe0126ba2, 0x18eb3152},
- {0x47c58900, 0x0000000a, 0x000000b9, 0x8e58eccf, 0xf3afc793, 0xd00d08c7},
- {0x292561e8, 0x0000000c, 0x00000403, 0xa2ba8aaf, 0x0b797aed, 0x8ba966bc},
- {0x415037f6, 0x00000003, 0x00000676, 0xa17d52e8, 0x7f0fdf35, 0x11d694a2},
- {0x3466e707, 0x00000026, 0x00000042, 0x258319be, 0x75c484a2, 0x6ab3208d},
- {0xafd1281b, 0x00000023, 0x000002ee, 0x4428eaf8, 0x06c7ad10, 0xba4603c5},
- {0xd3857b18, 0x00000028, 0x000004a2, 0x5c430821, 0xb062b7cb, 0xe6071c6f},
- {0x1d825a8f, 0x0000002b, 0x0000050b, 0xd2c45f0c, 0xd68634e0, 0x179ec30a},
- {0x5033e3bc, 0x0000000b, 0x00000078, 0xa3ea4113, 0xac6d31fb, 0x0903beb8},
- {0x94f1fb5e, 0x0000000f, 0x000003a2, 0xfbfc50b1, 0x3cfe50ed, 0x6a7cb4fa},
- {0xc9a0fe14, 0x00000009, 0x00000473, 0x5fb61894, 0x87070591, 0xdb535801},
- {0x88a034b1, 0x0000001c, 0x000005ad, 0xc1b16053, 0x46f95c67, 0x92bed597},
- {0xf0f72239, 0x00000020, 0x0000026d, 0xa6fa58f3, 0xf8c2c1dd, 0x192a3f1b},
- {0xcc20a5e3, 0x0000003b, 0x0000067a, 0x7740185a, 0x308b979a, 0xccbaec1a},
- {0xce589c95, 0x0000002b, 0x00000641, 0xd055e987, 0x40aae25b, 0x7eabae4d},
- {0x78edc885, 0x00000035, 0x000005be, 0xa39cb14b, 0x035b0d1f, 0x28c72982},
- {0x9d40a377, 0x0000003b, 0x00000038, 0x1f47ccd2, 0x197fbc9d, 0xc3cd4d18},
- {0x703d0e01, 0x0000003c, 0x000006f1, 0x88735e7c, 0xfed57c5a, 0xbca8f0e7},
- {0x776bf505, 0x0000000f, 0x000005b2, 0x5cc4fc01, 0xf32efb97, 0x713f60b3},
- {0x4a3e7854, 0x00000027, 0x000004b8, 0x8d923c82, 0x0cbfb4a2, 0xebd08fd5},
- {0x209172dd, 0x0000003b, 0x00000356, 0xb89e9c2b, 0xd7868138, 0x64406c59},
- {0x3ba4cc5b, 0x0000002f, 0x00000203, 0xe51601a9, 0x5b2a1032, 0x7421890e},
- {0xfc62f297, 0x00000000, 0x00000079, 0x71a8e1a2, 0x5d88685f, 0xe9347603},
- {0x64280b8b, 0x00000016, 0x000007ab, 0x0fa7a30c, 0xda3a455f, 0x1bef9060},
- {0x97dd724b, 0x00000033, 0x000007ad, 0x5788b2f4, 0xd7326d32, 0x34720072},
- {0x61394b52, 0x00000035, 0x00000571, 0xc66525f1, 0xcabe7fef, 0x48310f59},
- {0x29b4faff, 0x00000024, 0x0000006e, 0xca13751e, 0x993648e0, 0x783a4213},
- {0x29bfb1dc, 0x0000000b, 0x00000244, 0x436c43f7, 0x429f7a59, 0x9e8efd41},
- {0x86ae934b, 0x00000035, 0x00000104, 0x0760ec93, 0x9cf7d0f4, 0xfc3d34a5},
- {0xc4c1024e, 0x0000002e, 0x000006b1, 0x6516a3ec, 0x19321f9c, 0x17a52ae2},
- {0x3287a80a, 0x00000026, 0x00000496, 0x0b257eb1, 0x754ebd51, 0x886d935a},
- {0xa4db423e, 0x00000023, 0x0000045d, 0x9b3a66dc, 0x873e9f11, 0xeaaeaeb2},
- {0x7a1078df, 0x00000015, 0x0000014a, 0x8c2484c5, 0x6a628659, 0x8e900a4b},
- {0x6048bd5b, 0x00000006, 0x0000006a, 0x897e3559, 0xac9961af, 0xd74662b1},
- {0xd8f9ea20, 0x0000003d, 0x00000277, 0x60eb905b, 0xed2aaf99, 0xd26752ba},
- {0xea5ec3b4, 0x0000002a, 0x000004fe, 0x869965dc, 0x6c1f833b, 0x8b1fcd62},
- {0x2dfb005d, 0x00000016, 0x00000345, 0x6a3b117e, 0xf05e8521, 0xf54342fe},
- {0x5a214ade, 0x00000020, 0x000005b6, 0x467f70be, 0xcb22ccd3, 0x5b95b988},
- {0xf0ab9cca, 0x00000032, 0x00000515, 0xed223df3, 0x7f3ef01d, 0x2e1176be},
- {0x91b444f9, 0x0000002e, 0x000007f8, 0x84e9a983, 0x5676756f, 0x66120546},
- {0x1b5d2ddb, 0x0000002e, 0x0000012c, 0xba638c4c, 0x3f42047b, 0xf256a5cc},
- {0xd824d1bb, 0x0000003a, 0x000007b5, 0x6288653b, 0x3a3ebea0, 0x4af1dd69},
- {0x0470180c, 0x00000034, 0x000001f0, 0x9d5b80d6, 0x3de08195, 0x56f0a04a},
- {0xffaa3a3f, 0x00000036, 0x00000299, 0xf3a82ab8, 0x53e0c13d, 0x74f6b6b2},
- {0x6406cfeb, 0x00000023, 0x00000600, 0xa920b8e8, 0xe4e2acf4, 0x085951fd},
- {0xb24aaa38, 0x0000003e, 0x000004a1, 0x657cc328, 0x5077b2c3, 0xc65387eb},
- {0x58b2ab7c, 0x00000039, 0x000002b4, 0x3a17ee7e, 0x9dcb3643, 0x1ca9257b},
- {0x3db85970, 0x00000006, 0x000002b6, 0x95268b59, 0xb9812c10, 0xfd196d76},
- {0x857830c5, 0x00000003, 0x00000590, 0x4ef439d5, 0xf042161d, 0x5ef88339},
- {0xe1fcd978, 0x0000003e, 0x000007d8, 0xae8d8699, 0xce0a1ef5, 0x2c3714d9},
- {0xb982a768, 0x00000016, 0x000006e0, 0x62fad3df, 0x5f8a067b, 0x58576548},
- {0x1d581ce8, 0x0000001e, 0x0000058b, 0xf0f5da53, 0x26e39eee, 0xfd7c57de},
- {0x2456719b, 0x00000025, 0x00000503, 0x4296ac64, 0xd50e4c14, 0xd5fedd59},
- {0xfae6d8f2, 0x00000000, 0x0000055d, 0x057fdf2e, 0x2a31391a, 0x1cc3b17b},
- {0xcba828e3, 0x00000039, 0x000002ce, 0xe3f22351, 0x8f00877b, 0x270eed73},
- {0x13d25952, 0x0000000a, 0x0000072d, 0x76d4b4cc, 0x5eb67ec3, 0x91ecbb11},
- {0x0342be3f, 0x00000015, 0x00000599, 0xec75d9f1, 0x9d4d2826, 0x05ed8d0c},
- {0xeaa344e0, 0x00000014, 0x000004d8, 0x72a4c981, 0x2064ea06, 0x0b09ad5b},
- {0xbbb52021, 0x0000003b, 0x00000272, 0x04af99fc, 0xaf042d35, 0xf8d511fb},
- {0xb66384dc, 0x0000001d, 0x000007fc, 0xd7629116, 0x782bd801, 0x5ad832cc},
- {0x616c01b6, 0x00000022, 0x000002c8, 0x5b1dab30, 0x783ce7d2, 0x1214d196},
- {0xce2bdaad, 0x00000016, 0x0000062a, 0x932535c8, 0x3f02926d, 0x5747218a},
- {0x00fe84d7, 0x00000005, 0x00000205, 0x850e50aa, 0x753d649c, 0xde8f14de},
- {0xbebdcb4c, 0x00000006, 0x0000055d, 0xbeaa37a2, 0x2d8c9eba, 0x3563b7b9},
- {0xd8b1a02a, 0x00000010, 0x00000387, 0x5017d2fc, 0x503541a5, 0x071475d0},
- {0x3b96cad2, 0x00000036, 0x00000347, 0x1d2372ae, 0x926cd90b, 0x54c79d60},
- {0xc94c1ed7, 0x00000005, 0x0000038b, 0x9e9fdb22, 0x144a9178, 0x4c53eee6},
- {0x1aad454e, 0x00000025, 0x000002b2, 0xc3f6315c, 0x5c7a35b3, 0x10137a3c},
- {0xa4fec9a6, 0x00000000, 0x000006d6, 0x90be5080, 0xa4107605, 0xaa9d6c73},
- {0x1bbe71e2, 0x0000001f, 0x000002fd, 0x4e504c3b, 0x284ccaf1, 0xb63d23e7},
- {0x4201c7e4, 0x00000002, 0x000002b7, 0x7822e3f9, 0x0cc912a9, 0x7f53e9cf},
- {0x23fddc96, 0x00000003, 0x00000627, 0x8a385125, 0x07767e78, 0x13c1cd83},
- {0xd82ba25c, 0x00000016, 0x0000063e, 0x98e4148a, 0x283330c9, 0x49ff5867},
- {0x786f2032, 0x0000002d, 0x0000060f, 0xf201600a, 0xf561bfcd, 0x8467f211},
- {0xfebe4e1f, 0x0000002a, 0x000004f2, 0x95e51961, 0xfd80dcab, 0x3f9683b2},
- {0x1a6e0a39, 0x00000008, 0x00000672, 0x8af6c2a5, 0x78dd84cb, 0x76a3f874},
- {0x56000ab8, 0x0000000e, 0x000000e5, 0x36bacb8f, 0x22ee1f77, 0x863b702f},
- {0x4717fe0c, 0x00000000, 0x000006ec, 0x8439f342, 0x5c8e03da, 0xdc6c58ff},
- {0xd5d5d68e, 0x0000003c, 0x000003a3, 0x46fff083, 0x177d1b39, 0x0622cc95},
- {0xc25dd6c6, 0x00000024, 0x000006c0, 0x5ceb8eb4, 0x892b0d16, 0xe85605cd},
- {0xe9b11300, 0x00000023, 0x00000683, 0x07a5d59a, 0x6c6a3208, 0x31da5f06},
- {0x95cd285e, 0x00000001, 0x00000047, 0x7b3a4368, 0x0202c07e, 0xa1f2e784},
- {0xd9245a25, 0x0000001e, 0x000003a6, 0xd33c1841, 0x1936c0d5, 0xb07cc616},
- {0x103279db, 0x00000006, 0x0000039b, 0xca09b8a0, 0x77d62892, 0xbf943b6c},
- {0x1cba3172, 0x00000027, 0x000001c8, 0xcb377194, 0xebe682db, 0x2c01af1c},
- {0x8f613739, 0x0000000c, 0x000001df, 0xb4b0bc87, 0x7710bd43, 0x0fe5f56d},
- {0x1c6aa90d, 0x0000001b, 0x0000053c, 0x70559245, 0xda7894ac, 0xf8943b2d},
- {0xaabe5b93, 0x0000003d, 0x00000715, 0xcdbf42fa, 0x0c3b99e7, 0xe4d89272},
- {0xf15dd038, 0x00000006, 0x000006db, 0x6e104aea, 0x8d5967f2, 0x7c2f6bbb},
- {0x584dd49c, 0x00000020, 0x000007bc, 0x36b6cfd6, 0xad4e23b2, 0xabbf388b},
- {0x5d8c9506, 0x00000020, 0x00000470, 0x4c62378e, 0x31d92640, 0x1dca1f4e},
- {0xb80d17b0, 0x00000032, 0x00000346, 0x22a5bb88, 0x9a7ec89f, 0x5c170e23},
- {0xdaf0592e, 0x00000023, 0x000007b0, 0x3cab3f99, 0x9b1fdd99, 0xc0e9d672},
- {0x4793cc85, 0x0000000d, 0x00000706, 0xe82e04f6, 0xed3db6b7, 0xc18bdc86},
- {0x82ebf64e, 0x00000009, 0x000007c3, 0x69d590a9, 0x9efa8499, 0xa874fcdd},
- {0xb18a0319, 0x00000026, 0x000007db, 0x1cf98dcc, 0x8fa9ad6a, 0x9dc0bb48},
-};
-
-#include <linux/time.h>
-
-static int __init crc32c_test(void)
-{
- int i;
- int errors = 0;
- int bytes = 0;
- u64 nsec;
- unsigned long flags;
-
- /* keep static to prevent cache warming code from
- * getting eliminated by the compiler */
- static u32 crc;
-
- /* pre-warm the cache */
- for (i = 0; i < 100; i++) {
- bytes += 2*test[i].length;
-
- crc ^= __crc32c_le(test[i].crc, test_buf +
- test[i].start, test[i].length);
- }
-
- /* reduce OS noise */
- local_irq_save(flags);
- local_irq_disable();
-
- nsec = ktime_get_ns();
- for (i = 0; i < 100; i++) {
- if (test[i].crc32c_le != __crc32c_le(test[i].crc, test_buf +
- test[i].start, test[i].length))
- errors++;
- }
- nsec = ktime_get_ns() - nsec;
-
- local_irq_restore(flags);
- local_irq_enable();
-
- pr_info("crc32c: CRC_LE_BITS = %d\n", CRC_LE_BITS);
-
- if (errors)
- pr_warn("crc32c: %d self tests failed\n", errors);
- else {
- pr_info("crc32c: self tests passed, processed %d bytes in %lld nsec\n",
- bytes, nsec);
- }
-
- return 0;
-}
-
-static int __init crc32c_combine_test(void)
-{
- int i, j;
- int errors = 0, runs = 0;
-
- for (i = 0; i < 10; i++) {
- u32 crc_full;
-
- crc_full = __crc32c_le(test[i].crc, test_buf + test[i].start,
- test[i].length);
- for (j = 0; j <= test[i].length; ++j) {
- u32 crc1, crc2;
- u32 len1 = j, len2 = test[i].length - j;
-
- crc1 = __crc32c_le(test[i].crc, test_buf +
- test[i].start, len1);
- crc2 = __crc32c_le(0, test_buf + test[i].start +
- len1, len2);
-
- if (!(crc_full == __crc32c_le_combine(crc1, crc2, len2) &&
- crc_full == test[i].crc32c_le))
- errors++;
- runs++;
- cond_resched();
- }
- }
-
- if (errors)
- pr_warn("crc32c_combine: %d/%d self tests failed\n", errors, runs);
- else
- pr_info("crc32c_combine: %d self tests passed\n", runs);
-
- return 0;
-}
-
-static int __init crc32_test(void)
-{
- int i;
- int errors = 0;
- int bytes = 0;
- u64 nsec;
- unsigned long flags;
-
- /* keep static to prevent cache warming code from
- * getting eliminated by the compiler */
- static u32 crc;
-
- /* pre-warm the cache */
- for (i = 0; i < 100; i++) {
- bytes += 2*test[i].length;
-
- crc ^= crc32_le(test[i].crc, test_buf +
- test[i].start, test[i].length);
-
- crc ^= crc32_be(test[i].crc, test_buf +
- test[i].start, test[i].length);
- }
-
- /* reduce OS noise */
- local_irq_save(flags);
- local_irq_disable();
-
- nsec = ktime_get_ns();
- for (i = 0; i < 100; i++) {
- if (test[i].crc_le != crc32_le(test[i].crc, test_buf +
- test[i].start, test[i].length))
- errors++;
-
- if (test[i].crc_be != crc32_be(test[i].crc, test_buf +
- test[i].start, test[i].length))
- errors++;
- }
- nsec = ktime_get_ns() - nsec;
-
- local_irq_restore(flags);
- local_irq_enable();
-
- pr_info("crc32: CRC_LE_BITS = %d, CRC_BE BITS = %d\n",
- CRC_LE_BITS, CRC_BE_BITS);
-
- if (errors)
- pr_warn("crc32: %d self tests failed\n", errors);
- else {
- pr_info("crc32: self tests passed, processed %d bytes in %lld nsec\n",
- bytes, nsec);
- }
-
- return 0;
-}
-
-static int __init crc32_combine_test(void)
-{
- int i, j;
- int errors = 0, runs = 0;
-
- for (i = 0; i < 10; i++) {
- u32 crc_full;
-
- crc_full = crc32_le(test[i].crc, test_buf + test[i].start,
- test[i].length);
- for (j = 0; j <= test[i].length; ++j) {
- u32 crc1, crc2;
- u32 len1 = j, len2 = test[i].length - j;
-
- crc1 = crc32_le(test[i].crc, test_buf +
- test[i].start, len1);
- crc2 = crc32_le(0, test_buf + test[i].start +
- len1, len2);
-
- if (!(crc_full == crc32_le_combine(crc1, crc2, len2) &&
- crc_full == test[i].crc_le))
- errors++;
- runs++;
- cond_resched();
- }
- }
-
- if (errors)
- pr_warn("crc32_combine: %d/%d self tests failed\n", errors, runs);
- else
- pr_info("crc32_combine: %d self tests passed\n", runs);
-
- return 0;
-}
-
-static int __init crc32test_init(void)
-{
- crc32_test();
- crc32c_test();
-
- crc32_combine_test();
- crc32c_combine_test();
-
- return 0;
-}
-
-static void __exit crc32_exit(void)
-{
-}
-
-module_init(crc32test_init);
-module_exit(crc32_exit);
-#endif /* CONFIG_CRC32_SELFTEST */
diff --git a/lib/crc32test.c b/lib/crc32test.c
new file mode 100644
index 000000000000..97d6a57cefcc
--- /dev/null
+++ b/lib/crc32test.c
@@ -0,0 +1,856 @@
+/*
+ * Aug 8, 2011 Bob Pearson with help from Joakim Tjernlund and George Spelvin
+ * cleaned up code to current version of sparse and added the slicing-by-8
+ * algorithm to the closely similar existing slicing-by-4 algorithm.
+ *
+ * Oct 15, 2000 Matt Domsch <Matt_Domsch@dell.com>
+ * Nicer crc32 functions/docs submitted by linux@horizon.com. Thanks!
+ * Code was from the public domain, copyright abandoned. Code was
+ * subsequently included in the kernel, thus was re-licensed under the
+ * GNU GPL v2.
+ *
+ * Oct 12, 2000 Matt Domsch <Matt_Domsch@dell.com>
+ * Same crc32 function was used in 5 other places in the kernel.
+ * I made one version, and deleted the others.
+ * There are various incantations of crc32(). Some use a seed of 0 or ~0.
+ * Some xor at the end with ~0. The generic crc32() function takes
+ * seed as an argument, and doesn't xor at the end. Then individual
+ * users can do whatever they need.
+ * drivers/net/smc9194.c uses seed ~0, doesn't xor with ~0.
+ * fs/jffs2 uses seed 0, doesn't xor with ~0.
+ * fs/partitions/efi.c uses seed ~0, xor's with ~0.
+ *
+ * This source code is licensed under the GNU General Public License,
+ * Version 2. See the file COPYING for more details.
+ */
+
+#include <linux/crc32.h>
+#include <linux/module.h>
+#include <linux/sched.h>
+
+#include "crc32defs.h"
+
+/* 4096 random bytes */
+static u8 const __aligned(8) test_buf[] __initconst =
+{
+ 0x5b, 0x85, 0x21, 0xcb, 0x09, 0x68, 0x7d, 0x30,
+ 0xc7, 0x69, 0xd7, 0x30, 0x92, 0xde, 0x59, 0xe4,
+ 0xc9, 0x6e, 0x8b, 0xdb, 0x98, 0x6b, 0xaa, 0x60,
+ 0xa8, 0xb5, 0xbc, 0x6c, 0xa9, 0xb1, 0x5b, 0x2c,
+ 0xea, 0xb4, 0x92, 0x6a, 0x3f, 0x79, 0x91, 0xe4,
+ 0xe9, 0x70, 0x51, 0x8c, 0x7f, 0x95, 0x6f, 0x1a,
+ 0x56, 0xa1, 0x5c, 0x27, 0x03, 0x67, 0x9f, 0x3a,
+ 0xe2, 0x31, 0x11, 0x29, 0x6b, 0x98, 0xfc, 0xc4,
+ 0x53, 0x24, 0xc5, 0x8b, 0xce, 0x47, 0xb2, 0xb9,
+ 0x32, 0xcb, 0xc1, 0xd0, 0x03, 0x57, 0x4e, 0xd4,
+ 0xe9, 0x3c, 0xa1, 0x63, 0xcf, 0x12, 0x0e, 0xca,
+ 0xe1, 0x13, 0xd1, 0x93, 0xa6, 0x88, 0x5c, 0x61,
+ 0x5b, 0xbb, 0xf0, 0x19, 0x46, 0xb4, 0xcf, 0x9e,
+ 0xb6, 0x6b, 0x4c, 0x3a, 0xcf, 0x60, 0xf9, 0x7a,
+ 0x8d, 0x07, 0x63, 0xdb, 0x40, 0xe9, 0x0b, 0x6f,
+ 0xad, 0x97, 0xf1, 0xed, 0xd0, 0x1e, 0x26, 0xfd,
+ 0xbf, 0xb7, 0xc8, 0x04, 0x94, 0xf8, 0x8b, 0x8c,
+ 0xf1, 0xab, 0x7a, 0xd4, 0xdd, 0xf3, 0xe8, 0x88,
+ 0xc3, 0xed, 0x17, 0x8a, 0x9b, 0x40, 0x0d, 0x53,
+ 0x62, 0x12, 0x03, 0x5f, 0x1b, 0x35, 0x32, 0x1f,
+ 0xb4, 0x7b, 0x93, 0x78, 0x0d, 0xdb, 0xce, 0xa4,
+ 0xc0, 0x47, 0xd5, 0xbf, 0x68, 0xe8, 0x5d, 0x74,
+ 0x8f, 0x8e, 0x75, 0x1c, 0xb2, 0x4f, 0x9a, 0x60,
+ 0xd1, 0xbe, 0x10, 0xf4, 0x5c, 0xa1, 0x53, 0x09,
+ 0xa5, 0xe0, 0x09, 0x54, 0x85, 0x5c, 0xdc, 0x07,
+ 0xe7, 0x21, 0x69, 0x7b, 0x8a, 0xfd, 0x90, 0xf1,
+ 0x22, 0xd0, 0xb4, 0x36, 0x28, 0xe6, 0xb8, 0x0f,
+ 0x39, 0xde, 0xc8, 0xf3, 0x86, 0x60, 0x34, 0xd2,
+ 0x5e, 0xdf, 0xfd, 0xcf, 0x0f, 0xa9, 0x65, 0xf0,
+ 0xd5, 0x4d, 0x96, 0x40, 0xe3, 0xdf, 0x3f, 0x95,
+ 0x5a, 0x39, 0x19, 0x93, 0xf4, 0x75, 0xce, 0x22,
+ 0x00, 0x1c, 0x93, 0xe2, 0x03, 0x66, 0xf4, 0x93,
+ 0x73, 0x86, 0x81, 0x8e, 0x29, 0x44, 0x48, 0x86,
+ 0x61, 0x7c, 0x48, 0xa3, 0x43, 0xd2, 0x9c, 0x8d,
+ 0xd4, 0x95, 0xdd, 0xe1, 0x22, 0x89, 0x3a, 0x40,
+ 0x4c, 0x1b, 0x8a, 0x04, 0xa8, 0x09, 0x69, 0x8b,
+ 0xea, 0xc6, 0x55, 0x8e, 0x57, 0xe6, 0x64, 0x35,
+ 0xf0, 0xc7, 0x16, 0x9f, 0x5d, 0x5e, 0x86, 0x40,
+ 0x46, 0xbb, 0xe5, 0x45, 0x88, 0xfe, 0xc9, 0x63,
+ 0x15, 0xfb, 0xf5, 0xbd, 0x71, 0x61, 0xeb, 0x7b,
+ 0x78, 0x70, 0x07, 0x31, 0x03, 0x9f, 0xb2, 0xc8,
+ 0xa7, 0xab, 0x47, 0xfd, 0xdf, 0xa0, 0x78, 0x72,
+ 0xa4, 0x2a, 0xe4, 0xb6, 0xba, 0xc0, 0x1e, 0x86,
+ 0x71, 0xe6, 0x3d, 0x18, 0x37, 0x70, 0xe6, 0xff,
+ 0xe0, 0xbc, 0x0b, 0x22, 0xa0, 0x1f, 0xd3, 0xed,
+ 0xa2, 0x55, 0x39, 0xab, 0xa8, 0x13, 0x73, 0x7c,
+ 0x3f, 0xb2, 0xd6, 0x19, 0xac, 0xff, 0x99, 0xed,
+ 0xe8, 0xe6, 0xa6, 0x22, 0xe3, 0x9c, 0xf1, 0x30,
+ 0xdc, 0x01, 0x0a, 0x56, 0xfa, 0xe4, 0xc9, 0x99,
+ 0xdd, 0xa8, 0xd8, 0xda, 0x35, 0x51, 0x73, 0xb4,
+ 0x40, 0x86, 0x85, 0xdb, 0x5c, 0xd5, 0x85, 0x80,
+ 0x14, 0x9c, 0xfd, 0x98, 0xa9, 0x82, 0xc5, 0x37,
+ 0xff, 0x32, 0x5d, 0xd0, 0x0b, 0xfa, 0xdc, 0x04,
+ 0x5e, 0x09, 0xd2, 0xca, 0x17, 0x4b, 0x1a, 0x8e,
+ 0x15, 0xe1, 0xcc, 0x4e, 0x52, 0x88, 0x35, 0xbd,
+ 0x48, 0xfe, 0x15, 0xa0, 0x91, 0xfd, 0x7e, 0x6c,
+ 0x0e, 0x5d, 0x79, 0x1b, 0x81, 0x79, 0xd2, 0x09,
+ 0x34, 0x70, 0x3d, 0x81, 0xec, 0xf6, 0x24, 0xbb,
+ 0xfb, 0xf1, 0x7b, 0xdf, 0x54, 0xea, 0x80, 0x9b,
+ 0xc7, 0x99, 0x9e, 0xbd, 0x16, 0x78, 0x12, 0x53,
+ 0x5e, 0x01, 0xa7, 0x4e, 0xbd, 0x67, 0xe1, 0x9b,
+ 0x4c, 0x0e, 0x61, 0x45, 0x97, 0xd2, 0xf0, 0x0f,
+ 0xfe, 0x15, 0x08, 0xb7, 0x11, 0x4c, 0xe7, 0xff,
+ 0x81, 0x53, 0xff, 0x91, 0x25, 0x38, 0x7e, 0x40,
+ 0x94, 0xe5, 0xe0, 0xad, 0xe6, 0xd9, 0x79, 0xb6,
+ 0x92, 0xc9, 0xfc, 0xde, 0xc3, 0x1a, 0x23, 0xbb,
+ 0xdd, 0xc8, 0x51, 0x0c, 0x3a, 0x72, 0xfa, 0x73,
+ 0x6f, 0xb7, 0xee, 0x61, 0x39, 0x03, 0x01, 0x3f,
+ 0x7f, 0x94, 0x2e, 0x2e, 0xba, 0x3a, 0xbb, 0xb4,
+ 0xfa, 0x6a, 0x17, 0xfe, 0xea, 0xef, 0x5e, 0x66,
+ 0x97, 0x3f, 0x32, 0x3d, 0xd7, 0x3e, 0xb1, 0xf1,
+ 0x6c, 0x14, 0x4c, 0xfd, 0x37, 0xd3, 0x38, 0x80,
+ 0xfb, 0xde, 0xa6, 0x24, 0x1e, 0xc8, 0xca, 0x7f,
+ 0x3a, 0x93, 0xd8, 0x8b, 0x18, 0x13, 0xb2, 0xe5,
+ 0xe4, 0x93, 0x05, 0x53, 0x4f, 0x84, 0x66, 0xa7,
+ 0x58, 0x5c, 0x7b, 0x86, 0x52, 0x6d, 0x0d, 0xce,
+ 0xa4, 0x30, 0x7d, 0xb6, 0x18, 0x9f, 0xeb, 0xff,
+ 0x22, 0xbb, 0x72, 0x29, 0xb9, 0x44, 0x0b, 0x48,
+ 0x1e, 0x84, 0x71, 0x81, 0xe3, 0x6d, 0x73, 0x26,
+ 0x92, 0xb4, 0x4d, 0x2a, 0x29, 0xb8, 0x1f, 0x72,
+ 0xed, 0xd0, 0xe1, 0x64, 0x77, 0xea, 0x8e, 0x88,
+ 0x0f, 0xef, 0x3f, 0xb1, 0x3b, 0xad, 0xf9, 0xc9,
+ 0x8b, 0xd0, 0xac, 0xc6, 0xcc, 0xa9, 0x40, 0xcc,
+ 0x76, 0xf6, 0x3b, 0x53, 0xb5, 0x88, 0xcb, 0xc8,
+ 0x37, 0xf1, 0xa2, 0xba, 0x23, 0x15, 0x99, 0x09,
+ 0xcc, 0xe7, 0x7a, 0x3b, 0x37, 0xf7, 0x58, 0xc8,
+ 0x46, 0x8c, 0x2b, 0x2f, 0x4e, 0x0e, 0xa6, 0x5c,
+ 0xea, 0x85, 0x55, 0xba, 0x02, 0x0e, 0x0e, 0x48,
+ 0xbc, 0xe1, 0xb1, 0x01, 0x35, 0x79, 0x13, 0x3d,
+ 0x1b, 0xc0, 0x53, 0x68, 0x11, 0xe7, 0x95, 0x0f,
+ 0x9d, 0x3f, 0x4c, 0x47, 0x7b, 0x4d, 0x1c, 0xae,
+ 0x50, 0x9b, 0xcb, 0xdd, 0x05, 0x8d, 0x9a, 0x97,
+ 0xfd, 0x8c, 0xef, 0x0c, 0x1d, 0x67, 0x73, 0xa8,
+ 0x28, 0x36, 0xd5, 0xb6, 0x92, 0x33, 0x40, 0x75,
+ 0x0b, 0x51, 0xc3, 0x64, 0xba, 0x1d, 0xc2, 0xcc,
+ 0xee, 0x7d, 0x54, 0x0f, 0x27, 0x69, 0xa7, 0x27,
+ 0x63, 0x30, 0x29, 0xd9, 0xc8, 0x84, 0xd8, 0xdf,
+ 0x9f, 0x68, 0x8d, 0x04, 0xca, 0xa6, 0xc5, 0xc7,
+ 0x7a, 0x5c, 0xc8, 0xd1, 0xcb, 0x4a, 0xec, 0xd0,
+ 0xd8, 0x20, 0x69, 0xc5, 0x17, 0xcd, 0x78, 0xc8,
+ 0x75, 0x23, 0x30, 0x69, 0xc9, 0xd4, 0xea, 0x5c,
+ 0x4f, 0x6b, 0x86, 0x3f, 0x8b, 0xfe, 0xee, 0x44,
+ 0xc9, 0x7c, 0xb7, 0xdd, 0x3e, 0xe5, 0xec, 0x54,
+ 0x03, 0x3e, 0xaa, 0x82, 0xc6, 0xdf, 0xb2, 0x38,
+ 0x0e, 0x5d, 0xb3, 0x88, 0xd9, 0xd3, 0x69, 0x5f,
+ 0x8f, 0x70, 0x8a, 0x7e, 0x11, 0xd9, 0x1e, 0x7b,
+ 0x38, 0xf1, 0x42, 0x1a, 0xc0, 0x35, 0xf5, 0xc7,
+ 0x36, 0x85, 0xf5, 0xf7, 0xb8, 0x7e, 0xc7, 0xef,
+ 0x18, 0xf1, 0x63, 0xd6, 0x7a, 0xc6, 0xc9, 0x0e,
+ 0x4d, 0x69, 0x4f, 0x84, 0xef, 0x26, 0x41, 0x0c,
+ 0xec, 0xc7, 0xe0, 0x7e, 0x3c, 0x67, 0x01, 0x4c,
+ 0x62, 0x1a, 0x20, 0x6f, 0xee, 0x47, 0x4d, 0xc0,
+ 0x99, 0x13, 0x8d, 0x91, 0x4a, 0x26, 0xd4, 0x37,
+ 0x28, 0x90, 0x58, 0x75, 0x66, 0x2b, 0x0a, 0xdf,
+ 0xda, 0xee, 0x92, 0x25, 0x90, 0x62, 0x39, 0x9e,
+ 0x44, 0x98, 0xad, 0xc1, 0x88, 0xed, 0xe4, 0xb4,
+ 0xaf, 0xf5, 0x8c, 0x9b, 0x48, 0x4d, 0x56, 0x60,
+ 0x97, 0x0f, 0x61, 0x59, 0x9e, 0xa6, 0x27, 0xfe,
+ 0xc1, 0x91, 0x15, 0x38, 0xb8, 0x0f, 0xae, 0x61,
+ 0x7d, 0x26, 0x13, 0x5a, 0x73, 0xff, 0x1c, 0xa3,
+ 0x61, 0x04, 0x58, 0x48, 0x55, 0x44, 0x11, 0xfe,
+ 0x15, 0xca, 0xc3, 0xbd, 0xca, 0xc5, 0xb4, 0x40,
+ 0x5d, 0x1b, 0x7f, 0x39, 0xb5, 0x9c, 0x35, 0xec,
+ 0x61, 0x15, 0x32, 0x32, 0xb8, 0x4e, 0x40, 0x9f,
+ 0x17, 0x1f, 0x0a, 0x4d, 0xa9, 0x91, 0xef, 0xb7,
+ 0xb0, 0xeb, 0xc2, 0x83, 0x9a, 0x6c, 0xd2, 0x79,
+ 0x43, 0x78, 0x5e, 0x2f, 0xe5, 0xdd, 0x1a, 0x3c,
+ 0x45, 0xab, 0x29, 0x40, 0x3a, 0x37, 0x5b, 0x6f,
+ 0xd7, 0xfc, 0x48, 0x64, 0x3c, 0x49, 0xfb, 0x21,
+ 0xbe, 0xc3, 0xff, 0x07, 0xfb, 0x17, 0xe9, 0xc9,
+ 0x0c, 0x4c, 0x5c, 0x15, 0x9e, 0x8e, 0x22, 0x30,
+ 0x0a, 0xde, 0x48, 0x7f, 0xdb, 0x0d, 0xd1, 0x2b,
+ 0x87, 0x38, 0x9e, 0xcc, 0x5a, 0x01, 0x16, 0xee,
+ 0x75, 0x49, 0x0d, 0x30, 0x01, 0x34, 0x6a, 0xb6,
+ 0x9a, 0x5a, 0x2a, 0xec, 0xbb, 0x48, 0xac, 0xd3,
+ 0x77, 0x83, 0xd8, 0x08, 0x86, 0x4f, 0x48, 0x09,
+ 0x29, 0x41, 0x79, 0xa1, 0x03, 0x12, 0xc4, 0xcd,
+ 0x90, 0x55, 0x47, 0x66, 0x74, 0x9a, 0xcc, 0x4f,
+ 0x35, 0x8c, 0xd6, 0x98, 0xef, 0xeb, 0x45, 0xb9,
+ 0x9a, 0x26, 0x2f, 0x39, 0xa5, 0x70, 0x6d, 0xfc,
+ 0xb4, 0x51, 0xee, 0xf4, 0x9c, 0xe7, 0x38, 0x59,
+ 0xad, 0xf4, 0xbc, 0x46, 0xff, 0x46, 0x8e, 0x60,
+ 0x9c, 0xa3, 0x60, 0x1d, 0xf8, 0x26, 0x72, 0xf5,
+ 0x72, 0x9d, 0x68, 0x80, 0x04, 0xf6, 0x0b, 0xa1,
+ 0x0a, 0xd5, 0xa7, 0x82, 0x3a, 0x3e, 0x47, 0xa8,
+ 0x5a, 0xde, 0x59, 0x4f, 0x7b, 0x07, 0xb3, 0xe9,
+ 0x24, 0x19, 0x3d, 0x34, 0x05, 0xec, 0xf1, 0xab,
+ 0x6e, 0x64, 0x8f, 0xd3, 0xe6, 0x41, 0x86, 0x80,
+ 0x70, 0xe3, 0x8d, 0x60, 0x9c, 0x34, 0x25, 0x01,
+ 0x07, 0x4d, 0x19, 0x41, 0x4e, 0x3d, 0x5c, 0x7e,
+ 0xa8, 0xf5, 0xcc, 0xd5, 0x7b, 0xe2, 0x7d, 0x3d,
+ 0x49, 0x86, 0x7d, 0x07, 0xb7, 0x10, 0xe3, 0x35,
+ 0xb8, 0x84, 0x6d, 0x76, 0xab, 0x17, 0xc6, 0x38,
+ 0xb4, 0xd3, 0x28, 0x57, 0xad, 0xd3, 0x88, 0x5a,
+ 0xda, 0xea, 0xc8, 0x94, 0xcc, 0x37, 0x19, 0xac,
+ 0x9c, 0x9f, 0x4b, 0x00, 0x15, 0xc0, 0xc8, 0xca,
+ 0x1f, 0x15, 0xaa, 0xe0, 0xdb, 0xf9, 0x2f, 0x57,
+ 0x1b, 0x24, 0xc7, 0x6f, 0x76, 0x29, 0xfb, 0xed,
+ 0x25, 0x0d, 0xc0, 0xfe, 0xbd, 0x5a, 0xbf, 0x20,
+ 0x08, 0x51, 0x05, 0xec, 0x71, 0xa3, 0xbf, 0xef,
+ 0x5e, 0x99, 0x75, 0xdb, 0x3c, 0x5f, 0x9a, 0x8c,
+ 0xbb, 0x19, 0x5c, 0x0e, 0x93, 0x19, 0xf8, 0x6a,
+ 0xbc, 0xf2, 0x12, 0x54, 0x2f, 0xcb, 0x28, 0x64,
+ 0x88, 0xb3, 0x92, 0x0d, 0x96, 0xd1, 0xa6, 0xe4,
+ 0x1f, 0xf1, 0x4d, 0xa4, 0xab, 0x1c, 0xee, 0x54,
+ 0xf2, 0xad, 0x29, 0x6d, 0x32, 0x37, 0xb2, 0x16,
+ 0x77, 0x5c, 0xdc, 0x2e, 0x54, 0xec, 0x75, 0x26,
+ 0xc6, 0x36, 0xd9, 0x17, 0x2c, 0xf1, 0x7a, 0xdc,
+ 0x4b, 0xf1, 0xe2, 0xd9, 0x95, 0xba, 0xac, 0x87,
+ 0xc1, 0xf3, 0x8e, 0x58, 0x08, 0xd8, 0x87, 0x60,
+ 0xc9, 0xee, 0x6a, 0xde, 0xa4, 0xd2, 0xfc, 0x0d,
+ 0xe5, 0x36, 0xc4, 0x5c, 0x52, 0xb3, 0x07, 0x54,
+ 0x65, 0x24, 0xc1, 0xb1, 0xd1, 0xb1, 0x53, 0x13,
+ 0x31, 0x79, 0x7f, 0x05, 0x76, 0xeb, 0x37, 0x59,
+ 0x15, 0x2b, 0xd1, 0x3f, 0xac, 0x08, 0x97, 0xeb,
+ 0x91, 0x98, 0xdf, 0x6c, 0x09, 0x0d, 0x04, 0x9f,
+ 0xdc, 0x3b, 0x0e, 0x60, 0x68, 0x47, 0x23, 0x15,
+ 0x16, 0xc6, 0x0b, 0x35, 0xf8, 0x77, 0xa2, 0x78,
+ 0x50, 0xd4, 0x64, 0x22, 0x33, 0xff, 0xfb, 0x93,
+ 0x71, 0x46, 0x50, 0x39, 0x1b, 0x9c, 0xea, 0x4e,
+ 0x8d, 0x0c, 0x37, 0xe5, 0x5c, 0x51, 0x3a, 0x31,
+ 0xb2, 0x85, 0x84, 0x3f, 0x41, 0xee, 0xa2, 0xc1,
+ 0xc6, 0x13, 0x3b, 0x54, 0x28, 0xd2, 0x18, 0x37,
+ 0xcc, 0x46, 0x9f, 0x6a, 0x91, 0x3d, 0x5a, 0x15,
+ 0x3c, 0x89, 0xa3, 0x61, 0x06, 0x7d, 0x2e, 0x78,
+ 0xbe, 0x7d, 0x40, 0xba, 0x2f, 0x95, 0xb1, 0x2f,
+ 0x87, 0x3b, 0x8a, 0xbe, 0x6a, 0xf4, 0xc2, 0x31,
+ 0x74, 0xee, 0x91, 0xe0, 0x23, 0xaa, 0x5d, 0x7f,
+ 0xdd, 0xf0, 0x44, 0x8c, 0x0b, 0x59, 0x2b, 0xfc,
+ 0x48, 0x3a, 0xdf, 0x07, 0x05, 0x38, 0x6c, 0xc9,
+ 0xeb, 0x18, 0x24, 0x68, 0x8d, 0x58, 0x98, 0xd3,
+ 0x31, 0xa3, 0xe4, 0x70, 0x59, 0xb1, 0x21, 0xbe,
+ 0x7e, 0x65, 0x7d, 0xb8, 0x04, 0xab, 0xf6, 0xe4,
+ 0xd7, 0xda, 0xec, 0x09, 0x8f, 0xda, 0x6d, 0x24,
+ 0x07, 0xcc, 0x29, 0x17, 0x05, 0x78, 0x1a, 0xc1,
+ 0xb1, 0xce, 0xfc, 0xaa, 0x2d, 0xe7, 0xcc, 0x85,
+ 0x84, 0x84, 0x03, 0x2a, 0x0c, 0x3f, 0xa9, 0xf8,
+ 0xfd, 0x84, 0x53, 0x59, 0x5c, 0xf0, 0xd4, 0x09,
+ 0xf0, 0xd2, 0x6c, 0x32, 0x03, 0xb0, 0xa0, 0x8c,
+ 0x52, 0xeb, 0x23, 0x91, 0x88, 0x43, 0x13, 0x46,
+ 0xf6, 0x1e, 0xb4, 0x1b, 0xf5, 0x8e, 0x3a, 0xb5,
+ 0x3d, 0x00, 0xf6, 0xe5, 0x08, 0x3d, 0x5f, 0x39,
+ 0xd3, 0x21, 0x69, 0xbc, 0x03, 0x22, 0x3a, 0xd2,
+ 0x5c, 0x84, 0xf8, 0x15, 0xc4, 0x80, 0x0b, 0xbc,
+ 0x29, 0x3c, 0xf3, 0x95, 0x98, 0xcd, 0x8f, 0x35,
+ 0xbc, 0xa5, 0x3e, 0xfc, 0xd4, 0x13, 0x9e, 0xde,
+ 0x4f, 0xce, 0x71, 0x9d, 0x09, 0xad, 0xf2, 0x80,
+ 0x6b, 0x65, 0x7f, 0x03, 0x00, 0x14, 0x7c, 0x15,
+ 0x85, 0x40, 0x6d, 0x70, 0xea, 0xdc, 0xb3, 0x63,
+ 0x35, 0x4f, 0x4d, 0xe0, 0xd9, 0xd5, 0x3c, 0x58,
+ 0x56, 0x23, 0x80, 0xe2, 0x36, 0xdd, 0x75, 0x1d,
+ 0x94, 0x11, 0x41, 0x8e, 0xe0, 0x81, 0x8e, 0xcf,
+ 0xe0, 0xe5, 0xf6, 0xde, 0xd1, 0xe7, 0x04, 0x12,
+ 0x79, 0x92, 0x2b, 0x71, 0x2a, 0x79, 0x8b, 0x7c,
+ 0x44, 0x79, 0x16, 0x30, 0x4e, 0xf4, 0xf6, 0x9b,
+ 0xb7, 0x40, 0xa3, 0x5a, 0xa7, 0x69, 0x3e, 0xc1,
+ 0x3a, 0x04, 0xd0, 0x88, 0xa0, 0x3b, 0xdd, 0xc6,
+ 0x9e, 0x7e, 0x1e, 0x1e, 0x8f, 0x44, 0xf7, 0x73,
+ 0x67, 0x1e, 0x1a, 0x78, 0xfa, 0x62, 0xf4, 0xa9,
+ 0xa8, 0xc6, 0x5b, 0xb8, 0xfa, 0x06, 0x7d, 0x5e,
+ 0x38, 0x1c, 0x9a, 0x39, 0xe9, 0x39, 0x98, 0x22,
+ 0x0b, 0xa7, 0xac, 0x0b, 0xf3, 0xbc, 0xf1, 0xeb,
+ 0x8c, 0x81, 0xe3, 0x48, 0x8a, 0xed, 0x42, 0xc2,
+ 0x38, 0xcf, 0x3e, 0xda, 0xd2, 0x89, 0x8d, 0x9c,
+ 0x53, 0xb5, 0x2f, 0x41, 0x01, 0x26, 0x84, 0x9c,
+ 0xa3, 0x56, 0xf6, 0x49, 0xc7, 0xd4, 0x9f, 0x93,
+ 0x1b, 0x96, 0x49, 0x5e, 0xad, 0xb3, 0x84, 0x1f,
+ 0x3c, 0xa4, 0xe0, 0x9b, 0xd1, 0x90, 0xbc, 0x38,
+ 0x6c, 0xdd, 0x95, 0x4d, 0x9d, 0xb1, 0x71, 0x57,
+ 0x2d, 0x34, 0xe8, 0xb8, 0x42, 0xc7, 0x99, 0x03,
+ 0xc7, 0x07, 0x30, 0x65, 0x91, 0x55, 0xd5, 0x90,
+ 0x70, 0x97, 0x37, 0x68, 0xd4, 0x11, 0xf9, 0xe8,
+ 0xce, 0xec, 0xdc, 0x34, 0xd5, 0xd3, 0xb7, 0xc4,
+ 0xb8, 0x97, 0x05, 0x92, 0xad, 0xf8, 0xe2, 0x36,
+ 0x64, 0x41, 0xc9, 0xc5, 0x41, 0x77, 0x52, 0xd7,
+ 0x2c, 0xa5, 0x24, 0x2f, 0xd9, 0x34, 0x0b, 0x47,
+ 0x35, 0xa7, 0x28, 0x8b, 0xc5, 0xcd, 0xe9, 0x46,
+ 0xac, 0x39, 0x94, 0x3c, 0x10, 0xc6, 0x29, 0x73,
+ 0x0e, 0x0e, 0x5d, 0xe0, 0x71, 0x03, 0x8a, 0x72,
+ 0x0e, 0x26, 0xb0, 0x7d, 0x84, 0xed, 0x95, 0x23,
+ 0x49, 0x5a, 0x45, 0x83, 0x45, 0x60, 0x11, 0x4a,
+ 0x46, 0x31, 0xd4, 0xd8, 0x16, 0x54, 0x98, 0x58,
+ 0xed, 0x6d, 0xcc, 0x5d, 0xd6, 0x50, 0x61, 0x9f,
+ 0x9d, 0xc5, 0x3e, 0x9d, 0x32, 0x47, 0xde, 0x96,
+ 0xe1, 0x5d, 0xd8, 0xf8, 0xb4, 0x69, 0x6f, 0xb9,
+ 0x15, 0x90, 0x57, 0x7a, 0xf6, 0xad, 0xb0, 0x5b,
+ 0xf5, 0xa6, 0x36, 0x94, 0xfd, 0x84, 0xce, 0x1c,
+ 0x0f, 0x4b, 0xd0, 0xc2, 0x5b, 0x6b, 0x56, 0xef,
+ 0x73, 0x93, 0x0b, 0xc3, 0xee, 0xd9, 0xcf, 0xd3,
+ 0xa4, 0x22, 0x58, 0xcd, 0x50, 0x6e, 0x65, 0xf4,
+ 0xe9, 0xb7, 0x71, 0xaf, 0x4b, 0xb3, 0xb6, 0x2f,
+ 0x0f, 0x0e, 0x3b, 0xc9, 0x85, 0x14, 0xf5, 0x17,
+ 0xe8, 0x7a, 0x3a, 0xbf, 0x5f, 0x5e, 0xf8, 0x18,
+ 0x48, 0xa6, 0x72, 0xab, 0x06, 0x95, 0xe9, 0xc8,
+ 0xa7, 0xf4, 0x32, 0x44, 0x04, 0x0c, 0x84, 0x98,
+ 0x73, 0xe3, 0x89, 0x8d, 0x5f, 0x7e, 0x4a, 0x42,
+ 0x8f, 0xc5, 0x28, 0xb1, 0x82, 0xef, 0x1c, 0x97,
+ 0x31, 0x3b, 0x4d, 0xe0, 0x0e, 0x10, 0x10, 0x97,
+ 0x93, 0x49, 0x78, 0x2f, 0x0d, 0x86, 0x8b, 0xa1,
+ 0x53, 0xa9, 0x81, 0x20, 0x79, 0xe7, 0x07, 0x77,
+ 0xb6, 0xac, 0x5e, 0xd2, 0x05, 0xcd, 0xe9, 0xdb,
+ 0x8a, 0x94, 0x82, 0x8a, 0x23, 0xb9, 0x3d, 0x1c,
+ 0xa9, 0x7d, 0x72, 0x4a, 0xed, 0x33, 0xa3, 0xdb,
+ 0x21, 0xa7, 0x86, 0x33, 0x45, 0xa5, 0xaa, 0x56,
+ 0x45, 0xb5, 0x83, 0x29, 0x40, 0x47, 0x79, 0x04,
+ 0x6e, 0xb9, 0x95, 0xd0, 0x81, 0x77, 0x2d, 0x48,
+ 0x1e, 0xfe, 0xc3, 0xc2, 0x1e, 0xe5, 0xf2, 0xbe,
+ 0xfd, 0x3b, 0x94, 0x9f, 0xc4, 0xc4, 0x26, 0x9d,
+ 0xe4, 0x66, 0x1e, 0x19, 0xee, 0x6c, 0x79, 0x97,
+ 0x11, 0x31, 0x4b, 0x0d, 0x01, 0xcb, 0xde, 0xa8,
+ 0xf6, 0x6d, 0x7c, 0x39, 0x46, 0x4e, 0x7e, 0x3f,
+ 0x94, 0x17, 0xdf, 0xa1, 0x7d, 0xd9, 0x1c, 0x8e,
+ 0xbc, 0x7d, 0x33, 0x7d, 0xe3, 0x12, 0x40, 0xca,
+ 0xab, 0x37, 0x11, 0x46, 0xd4, 0xae, 0xef, 0x44,
+ 0xa2, 0xb3, 0x6a, 0x66, 0x0e, 0x0c, 0x90, 0x7f,
+ 0xdf, 0x5c, 0x66, 0x5f, 0xf2, 0x94, 0x9f, 0xa6,
+ 0x73, 0x4f, 0xeb, 0x0d, 0xad, 0xbf, 0xc0, 0x63,
+ 0x5c, 0xdc, 0x46, 0x51, 0xe8, 0x8e, 0x90, 0x19,
+ 0xa8, 0xa4, 0x3c, 0x91, 0x79, 0xfa, 0x7e, 0x58,
+ 0x85, 0x13, 0x55, 0xc5, 0x19, 0x82, 0x37, 0x1b,
+ 0x0a, 0x02, 0x1f, 0x99, 0x6b, 0x18, 0xf1, 0x28,
+ 0x08, 0xa2, 0x73, 0xb8, 0x0f, 0x2e, 0xcd, 0xbf,
+ 0xf3, 0x86, 0x7f, 0xea, 0xef, 0xd0, 0xbb, 0xa6,
+ 0x21, 0xdf, 0x49, 0x73, 0x51, 0xcc, 0x36, 0xd3,
+ 0x3e, 0xa0, 0xf8, 0x44, 0xdf, 0xd3, 0xa6, 0xbe,
+ 0x8a, 0xd4, 0x57, 0xdd, 0x72, 0x94, 0x61, 0x0f,
+ 0x82, 0xd1, 0x07, 0xb8, 0x7c, 0x18, 0x83, 0xdf,
+ 0x3a, 0xe5, 0x50, 0x6a, 0x82, 0x20, 0xac, 0xa9,
+ 0xa8, 0xff, 0xd9, 0xf3, 0x77, 0x33, 0x5a, 0x9e,
+ 0x7f, 0x6d, 0xfe, 0x5d, 0x33, 0x41, 0x42, 0xe7,
+ 0x6c, 0x19, 0xe0, 0x44, 0x8a, 0x15, 0xf6, 0x70,
+ 0x98, 0xb7, 0x68, 0x4d, 0xfa, 0x97, 0x39, 0xb0,
+ 0x8e, 0xe8, 0x84, 0x8b, 0x75, 0x30, 0xb7, 0x7d,
+ 0x92, 0x69, 0x20, 0x9c, 0x81, 0xfb, 0x4b, 0xf4,
+ 0x01, 0x50, 0xeb, 0xce, 0x0c, 0x1c, 0x6c, 0xb5,
+ 0x4a, 0xd7, 0x27, 0x0c, 0xce, 0xbb, 0xe5, 0x85,
+ 0xf0, 0xb6, 0xee, 0xd5, 0x70, 0xdd, 0x3b, 0xfc,
+ 0xd4, 0x99, 0xf1, 0x33, 0xdd, 0x8b, 0xc4, 0x2f,
+ 0xae, 0xab, 0x74, 0x96, 0x32, 0xc7, 0x4c, 0x56,
+ 0x3c, 0x89, 0x0f, 0x96, 0x0b, 0x42, 0xc0, 0xcb,
+ 0xee, 0x0f, 0x0b, 0x8c, 0xfb, 0x7e, 0x47, 0x7b,
+ 0x64, 0x48, 0xfd, 0xb2, 0x00, 0x80, 0x89, 0xa5,
+ 0x13, 0x55, 0x62, 0xfc, 0x8f, 0xe2, 0x42, 0x03,
+ 0xb7, 0x4e, 0x2a, 0x79, 0xb4, 0x82, 0xea, 0x23,
+ 0x49, 0xda, 0xaf, 0x52, 0x63, 0x1e, 0x60, 0x03,
+ 0x89, 0x06, 0x44, 0x46, 0x08, 0xc3, 0xc4, 0x87,
+ 0x70, 0x2e, 0xda, 0x94, 0xad, 0x6b, 0xe0, 0xe4,
+ 0xd1, 0x8a, 0x06, 0xc2, 0xa8, 0xc0, 0xa7, 0x43,
+ 0x3c, 0x47, 0x52, 0x0e, 0xc3, 0x77, 0x81, 0x11,
+ 0x67, 0x0e, 0xa0, 0x70, 0x04, 0x47, 0x29, 0x40,
+ 0x86, 0x0d, 0x34, 0x56, 0xa7, 0xc9, 0x35, 0x59,
+ 0x68, 0xdc, 0x93, 0x81, 0x70, 0xee, 0x86, 0xd9,
+ 0x80, 0x06, 0x40, 0x4f, 0x1a, 0x0d, 0x40, 0x30,
+ 0x0b, 0xcb, 0x96, 0x47, 0xc1, 0xb7, 0x52, 0xfd,
+ 0x56, 0xe0, 0x72, 0x4b, 0xfb, 0xbd, 0x92, 0x45,
+ 0x61, 0x71, 0xc2, 0x33, 0x11, 0xbf, 0x52, 0x83,
+ 0x79, 0x26, 0xe0, 0x49, 0x6b, 0xb7, 0x05, 0x8b,
+ 0xe8, 0x0e, 0x87, 0x31, 0xd7, 0x9d, 0x8a, 0xf5,
+ 0xc0, 0x5f, 0x2e, 0x58, 0x4a, 0xdb, 0x11, 0xb3,
+ 0x6c, 0x30, 0x2a, 0x46, 0x19, 0xe3, 0x27, 0x84,
+ 0x1f, 0x63, 0x6e, 0xf6, 0x57, 0xc7, 0xc9, 0xd8,
+ 0x5e, 0xba, 0xb3, 0x87, 0xd5, 0x83, 0x26, 0x34,
+ 0x21, 0x9e, 0x65, 0xde, 0x42, 0xd3, 0xbe, 0x7b,
+ 0xbc, 0x91, 0x71, 0x44, 0x4d, 0x99, 0x3b, 0x31,
+ 0xe5, 0x3f, 0x11, 0x4e, 0x7f, 0x13, 0x51, 0x3b,
+ 0xae, 0x79, 0xc9, 0xd3, 0x81, 0x8e, 0x25, 0x40,
+ 0x10, 0xfc, 0x07, 0x1e, 0xf9, 0x7b, 0x9a, 0x4b,
+ 0x6c, 0xe3, 0xb3, 0xad, 0x1a, 0x0a, 0xdd, 0x9e,
+ 0x59, 0x0c, 0xa2, 0xcd, 0xae, 0x48, 0x4a, 0x38,
+ 0x5b, 0x47, 0x41, 0x94, 0x65, 0x6b, 0xbb, 0xeb,
+ 0x5b, 0xe3, 0xaf, 0x07, 0x5b, 0xd4, 0x4a, 0xa2,
+ 0xc9, 0x5d, 0x2f, 0x64, 0x03, 0xd7, 0x3a, 0x2c,
+ 0x6e, 0xce, 0x76, 0x95, 0xb4, 0xb3, 0xc0, 0xf1,
+ 0xe2, 0x45, 0x73, 0x7a, 0x5c, 0xab, 0xc1, 0xfc,
+ 0x02, 0x8d, 0x81, 0x29, 0xb3, 0xac, 0x07, 0xec,
+ 0x40, 0x7d, 0x45, 0xd9, 0x7a, 0x59, 0xee, 0x34,
+ 0xf0, 0xe9, 0xd5, 0x7b, 0x96, 0xb1, 0x3d, 0x95,
+ 0xcc, 0x86, 0xb5, 0xb6, 0x04, 0x2d, 0xb5, 0x92,
+ 0x7e, 0x76, 0xf4, 0x06, 0xa9, 0xa3, 0x12, 0x0f,
+ 0xb1, 0xaf, 0x26, 0xba, 0x7c, 0xfc, 0x7e, 0x1c,
+ 0xbc, 0x2c, 0x49, 0x97, 0x53, 0x60, 0x13, 0x0b,
+ 0xa6, 0x61, 0x83, 0x89, 0x42, 0xd4, 0x17, 0x0c,
+ 0x6c, 0x26, 0x52, 0xc3, 0xb3, 0xd4, 0x67, 0xf5,
+ 0xe3, 0x04, 0xb7, 0xf4, 0xcb, 0x80, 0xb8, 0xcb,
+ 0x77, 0x56, 0x3e, 0xaa, 0x57, 0x54, 0xee, 0xb4,
+ 0x2c, 0x67, 0xcf, 0xf2, 0xdc, 0xbe, 0x55, 0xf9,
+ 0x43, 0x1f, 0x6e, 0x22, 0x97, 0x67, 0x7f, 0xc4,
+ 0xef, 0xb1, 0x26, 0x31, 0x1e, 0x27, 0xdf, 0x41,
+ 0x80, 0x47, 0x6c, 0xe2, 0xfa, 0xa9, 0x8c, 0x2a,
+ 0xf6, 0xf2, 0xab, 0xf0, 0x15, 0xda, 0x6c, 0xc8,
+ 0xfe, 0xb5, 0x23, 0xde, 0xa9, 0x05, 0x3f, 0x06,
+ 0x54, 0x4c, 0xcd, 0xe1, 0xab, 0xfc, 0x0e, 0x62,
+ 0x33, 0x31, 0x73, 0x2c, 0x76, 0xcb, 0xb4, 0x47,
+ 0x1e, 0x20, 0xad, 0xd8, 0xf2, 0x31, 0xdd, 0xc4,
+ 0x8b, 0x0c, 0x77, 0xbe, 0xe1, 0x8b, 0x26, 0x00,
+ 0x02, 0x58, 0xd6, 0x8d, 0xef, 0xad, 0x74, 0x67,
+ 0xab, 0x3f, 0xef, 0xcb, 0x6f, 0xb0, 0xcc, 0x81,
+ 0x44, 0x4c, 0xaf, 0xe9, 0x49, 0x4f, 0xdb, 0xa0,
+ 0x25, 0xa4, 0xf0, 0x89, 0xf1, 0xbe, 0xd8, 0x10,
+ 0xff, 0xb1, 0x3b, 0x4b, 0xfa, 0x98, 0xf5, 0x79,
+ 0x6d, 0x1e, 0x69, 0x4d, 0x57, 0xb1, 0xc8, 0x19,
+ 0x1b, 0xbd, 0x1e, 0x8c, 0x84, 0xb7, 0x7b, 0xe8,
+ 0xd2, 0x2d, 0x09, 0x41, 0x41, 0x37, 0x3d, 0xb1,
+ 0x6f, 0x26, 0x5d, 0x71, 0x16, 0x3d, 0xb7, 0x83,
+ 0x27, 0x2c, 0xa7, 0xb6, 0x50, 0xbd, 0x91, 0x86,
+ 0xab, 0x24, 0xa1, 0x38, 0xfd, 0xea, 0x71, 0x55,
+ 0x7e, 0x9a, 0x07, 0x77, 0x4b, 0xfa, 0x61, 0x66,
+ 0x20, 0x1e, 0x28, 0x95, 0x18, 0x1b, 0xa4, 0xa0,
+ 0xfd, 0xc0, 0x89, 0x72, 0x43, 0xd9, 0x3b, 0x49,
+ 0x5a, 0x3f, 0x9d, 0xbf, 0xdb, 0xb4, 0x46, 0xea,
+ 0x42, 0x01, 0x77, 0x23, 0x68, 0x95, 0xb6, 0x24,
+ 0xb3, 0xa8, 0x6c, 0x28, 0x3b, 0x11, 0x40, 0x7e,
+ 0x18, 0x65, 0x6d, 0xd8, 0x24, 0x42, 0x7d, 0x88,
+ 0xc0, 0x52, 0xd9, 0x05, 0xe4, 0x95, 0x90, 0x87,
+ 0x8c, 0xf4, 0xd0, 0x6b, 0xb9, 0x83, 0x99, 0x34,
+ 0x6d, 0xfe, 0x54, 0x40, 0x94, 0x52, 0x21, 0x4f,
+ 0x14, 0x25, 0xc5, 0xd6, 0x5e, 0x95, 0xdc, 0x0a,
+ 0x2b, 0x89, 0x20, 0x11, 0x84, 0x48, 0xd6, 0x3a,
+ 0xcd, 0x5c, 0x24, 0xad, 0x62, 0xe3, 0xb1, 0x93,
+ 0x25, 0x8d, 0xcd, 0x7e, 0xfc, 0x27, 0xa3, 0x37,
+ 0xfd, 0x84, 0xfc, 0x1b, 0xb2, 0xf1, 0x27, 0x38,
+ 0x5a, 0xb7, 0xfc, 0xf2, 0xfa, 0x95, 0x66, 0xd4,
+ 0xfb, 0xba, 0xa7, 0xd7, 0xa3, 0x72, 0x69, 0x48,
+ 0x48, 0x8c, 0xeb, 0x28, 0x89, 0xfe, 0x33, 0x65,
+ 0x5a, 0x36, 0x01, 0x7e, 0x06, 0x79, 0x0a, 0x09,
+ 0x3b, 0x74, 0x11, 0x9a, 0x6e, 0xbf, 0xd4, 0x9e,
+ 0x58, 0x90, 0x49, 0x4f, 0x4d, 0x08, 0xd4, 0xe5,
+ 0x4a, 0x09, 0x21, 0xef, 0x8b, 0xb8, 0x74, 0x3b,
+ 0x91, 0xdd, 0x36, 0x85, 0x60, 0x2d, 0xfa, 0xd4,
+ 0x45, 0x7b, 0x45, 0x53, 0xf5, 0x47, 0x87, 0x7e,
+ 0xa6, 0x37, 0xc8, 0x78, 0x7a, 0x68, 0x9d, 0x8d,
+ 0x65, 0x2c, 0x0e, 0x91, 0x5c, 0xa2, 0x60, 0xf0,
+ 0x8e, 0x3f, 0xe9, 0x1a, 0xcd, 0xaa, 0xe7, 0xd5,
+ 0x77, 0x18, 0xaf, 0xc9, 0xbc, 0x18, 0xea, 0x48,
+ 0x1b, 0xfb, 0x22, 0x48, 0x70, 0x16, 0x29, 0x9e,
+ 0x5b, 0xc1, 0x2c, 0x66, 0x23, 0xbc, 0xf0, 0x1f,
+ 0xef, 0xaf, 0xe4, 0xd6, 0x04, 0x19, 0x82, 0x7a,
+ 0x0b, 0xba, 0x4b, 0x46, 0xb1, 0x6a, 0x85, 0x5d,
+ 0xb4, 0x73, 0xd6, 0x21, 0xa1, 0x71, 0x60, 0x14,
+ 0xee, 0x0a, 0x77, 0xc4, 0x66, 0x2e, 0xf9, 0x69,
+ 0x30, 0xaf, 0x41, 0x0b, 0xc8, 0x83, 0x3c, 0x53,
+ 0x99, 0x19, 0x27, 0x46, 0xf7, 0x41, 0x6e, 0x56,
+ 0xdc, 0x94, 0x28, 0x67, 0x4e, 0xb7, 0x25, 0x48,
+ 0x8a, 0xc2, 0xe0, 0x60, 0x96, 0xcc, 0x18, 0xf4,
+ 0x84, 0xdd, 0xa7, 0x5e, 0x3e, 0x05, 0x0b, 0x26,
+ 0x26, 0xb2, 0x5c, 0x1f, 0x57, 0x1a, 0x04, 0x7e,
+ 0x6a, 0xe3, 0x2f, 0xb4, 0x35, 0xb6, 0x38, 0x40,
+ 0x40, 0xcd, 0x6f, 0x87, 0x2e, 0xef, 0xa3, 0xd7,
+ 0xa9, 0xc2, 0xe8, 0x0d, 0x27, 0xdf, 0x44, 0x62,
+ 0x99, 0xa0, 0xfc, 0xcf, 0x81, 0x78, 0xcb, 0xfe,
+ 0xe5, 0xa0, 0x03, 0x4e, 0x6c, 0xd7, 0xf4, 0xaf,
+ 0x7a, 0xbb, 0x61, 0x82, 0xfe, 0x71, 0x89, 0xb2,
+ 0x22, 0x7c, 0x8e, 0x83, 0x04, 0xce, 0xf6, 0x5d,
+ 0x84, 0x8f, 0x95, 0x6a, 0x7f, 0xad, 0xfd, 0x32,
+ 0x9c, 0x5e, 0xe4, 0x9c, 0x89, 0x60, 0x54, 0xaa,
+ 0x96, 0x72, 0xd2, 0xd7, 0x36, 0x85, 0xa9, 0x45,
+ 0xd2, 0x2a, 0xa1, 0x81, 0x49, 0x6f, 0x7e, 0x04,
+ 0xfa, 0xe2, 0xfe, 0x90, 0x26, 0x77, 0x5a, 0x33,
+ 0xb8, 0x04, 0x9a, 0x7a, 0xe6, 0x4c, 0x4f, 0xad,
+ 0x72, 0x96, 0x08, 0x28, 0x58, 0x13, 0xf8, 0xc4,
+ 0x1c, 0xf0, 0xc3, 0x45, 0x95, 0x49, 0x20, 0x8c,
+ 0x9f, 0x39, 0x70, 0xe1, 0x77, 0xfe, 0xd5, 0x4b,
+ 0xaf, 0x86, 0xda, 0xef, 0x22, 0x06, 0x83, 0x36,
+ 0x29, 0x12, 0x11, 0x40, 0xbc, 0x3b, 0x86, 0xaa,
+ 0xaa, 0x65, 0x60, 0xc3, 0x80, 0xca, 0xed, 0xa9,
+ 0xf3, 0xb0, 0x79, 0x96, 0xa2, 0x55, 0x27, 0x28,
+ 0x55, 0x73, 0x26, 0xa5, 0x50, 0xea, 0x92, 0x4b,
+ 0x3c, 0x5c, 0x82, 0x33, 0xf0, 0x01, 0x3f, 0x03,
+ 0xc1, 0x08, 0x05, 0xbf, 0x98, 0xf4, 0x9b, 0x6d,
+ 0xa5, 0xa8, 0xb4, 0x82, 0x0c, 0x06, 0xfa, 0xff,
+ 0x2d, 0x08, 0xf3, 0x05, 0x4f, 0x57, 0x2a, 0x39,
+ 0xd4, 0x83, 0x0d, 0x75, 0x51, 0xd8, 0x5b, 0x1b,
+ 0xd3, 0x51, 0x5a, 0x32, 0x2a, 0x9b, 0x32, 0xb2,
+ 0xf2, 0xa4, 0x96, 0x12, 0xf2, 0xae, 0x40, 0x34,
+ 0x67, 0xa8, 0xf5, 0x44, 0xd5, 0x35, 0x53, 0xfe,
+ 0xa3, 0x60, 0x96, 0x63, 0x0f, 0x1f, 0x6e, 0xb0,
+ 0x5a, 0x42, 0xa6, 0xfc, 0x51, 0x0b, 0x60, 0x27,
+ 0xbc, 0x06, 0x71, 0xed, 0x65, 0x5b, 0x23, 0x86,
+ 0x4a, 0x07, 0x3b, 0x22, 0x07, 0x46, 0xe6, 0x90,
+ 0x3e, 0xf3, 0x25, 0x50, 0x1b, 0x4c, 0x7f, 0x03,
+ 0x08, 0xa8, 0x36, 0x6b, 0x87, 0xe5, 0xe3, 0xdb,
+ 0x9a, 0x38, 0x83, 0xff, 0x9f, 0x1a, 0x9f, 0x57,
+ 0xa4, 0x2a, 0xf6, 0x37, 0xbc, 0x1a, 0xff, 0xc9,
+ 0x1e, 0x35, 0x0c, 0xc3, 0x7c, 0xa3, 0xb2, 0xe5,
+ 0xd2, 0xc6, 0xb4, 0x57, 0x47, 0xe4, 0x32, 0x16,
+ 0x6d, 0xa9, 0xae, 0x64, 0xe6, 0x2d, 0x8d, 0xc5,
+ 0x8d, 0x50, 0x8e, 0xe8, 0x1a, 0x22, 0x34, 0x2a,
+ 0xd9, 0xeb, 0x51, 0x90, 0x4a, 0xb1, 0x41, 0x7d,
+ 0x64, 0xf9, 0xb9, 0x0d, 0xf6, 0x23, 0x33, 0xb0,
+ 0x33, 0xf4, 0xf7, 0x3f, 0x27, 0x84, 0xc6, 0x0f,
+ 0x54, 0xa5, 0xc0, 0x2e, 0xec, 0x0b, 0x3a, 0x48,
+ 0x6e, 0x80, 0x35, 0x81, 0x43, 0x9b, 0x90, 0xb1,
+ 0xd0, 0x2b, 0xea, 0x21, 0xdc, 0xda, 0x5b, 0x09,
+ 0xf4, 0xcc, 0x10, 0xb4, 0xc7, 0xfe, 0x79, 0x51,
+ 0xc3, 0xc5, 0xac, 0x88, 0x74, 0x84, 0x0b, 0x4b,
+ 0xca, 0x79, 0x16, 0x29, 0xfb, 0x69, 0x54, 0xdf,
+ 0x41, 0x7e, 0xe9, 0xc7, 0x8e, 0xea, 0xa5, 0xfe,
+ 0xfc, 0x76, 0x0e, 0x90, 0xc4, 0x92, 0x38, 0xad,
+ 0x7b, 0x48, 0xe6, 0x6e, 0xf7, 0x21, 0xfd, 0x4e,
+ 0x93, 0x0a, 0x7b, 0x41, 0x83, 0x68, 0xfb, 0x57,
+ 0x51, 0x76, 0x34, 0xa9, 0x6c, 0x00, 0xaa, 0x4f,
+ 0x66, 0x65, 0x98, 0x4a, 0x4f, 0xa3, 0xa0, 0xef,
+ 0x69, 0x3f, 0xe3, 0x1c, 0x92, 0x8c, 0xfd, 0xd8,
+ 0xe8, 0xde, 0x7c, 0x7f, 0x3e, 0x84, 0x8e, 0x69,
+ 0x3c, 0xf1, 0xf2, 0x05, 0x46, 0xdc, 0x2f, 0x9d,
+ 0x5e, 0x6e, 0x4c, 0xfb, 0xb5, 0x99, 0x2a, 0x59,
+ 0x63, 0xc1, 0x34, 0xbc, 0x57, 0xc0, 0x0d, 0xb9,
+ 0x61, 0x25, 0xf3, 0x33, 0x23, 0x51, 0xb6, 0x0d,
+ 0x07, 0xa6, 0xab, 0x94, 0x4a, 0xb7, 0x2a, 0xea,
+ 0xee, 0xac, 0xa3, 0xc3, 0x04, 0x8b, 0x0e, 0x56,
+ 0xfe, 0x44, 0xa7, 0x39, 0xe2, 0xed, 0xed, 0xb4,
+ 0x22, 0x2b, 0xac, 0x12, 0x32, 0x28, 0x91, 0xd8,
+ 0xa5, 0xab, 0xff, 0x5f, 0xe0, 0x4b, 0xda, 0x78,
+ 0x17, 0xda, 0xf1, 0x01, 0x5b, 0xcd, 0xe2, 0x5f,
+ 0x50, 0x45, 0x73, 0x2b, 0xe4, 0x76, 0x77, 0xf4,
+ 0x64, 0x1d, 0x43, 0xfb, 0x84, 0x7a, 0xea, 0x91,
+ 0xae, 0xf9, 0x9e, 0xb7, 0xb4, 0xb0, 0x91, 0x5f,
+ 0x16, 0x35, 0x9a, 0x11, 0xb8, 0xc7, 0xc1, 0x8c,
+ 0xc6, 0x10, 0x8d, 0x2f, 0x63, 0x4a, 0xa7, 0x57,
+ 0x3a, 0x51, 0xd6, 0x32, 0x2d, 0x64, 0x72, 0xd4,
+ 0x66, 0xdc, 0x10, 0xa6, 0x67, 0xd6, 0x04, 0x23,
+ 0x9d, 0x0a, 0x11, 0x77, 0xdd, 0x37, 0x94, 0x17,
+ 0x3c, 0xbf, 0x8b, 0x65, 0xb0, 0x2e, 0x5e, 0x66,
+ 0x47, 0x64, 0xac, 0xdd, 0xf0, 0x84, 0xfd, 0x39,
+ 0xfa, 0x15, 0x5d, 0xef, 0xae, 0xca, 0xc1, 0x36,
+ 0xa7, 0x5c, 0xbf, 0xc7, 0x08, 0xc2, 0x66, 0x00,
+ 0x74, 0x74, 0x4e, 0x27, 0x3f, 0x55, 0x8a, 0xb7,
+ 0x38, 0x66, 0x83, 0x6d, 0xcf, 0x99, 0x9e, 0x60,
+ 0x8f, 0xdd, 0x2e, 0x62, 0x22, 0x0e, 0xef, 0x0c,
+ 0x98, 0xa7, 0x85, 0x74, 0x3b, 0x9d, 0xec, 0x9e,
+ 0xa9, 0x19, 0x72, 0xa5, 0x7f, 0x2c, 0x39, 0xb7,
+ 0x7d, 0xb7, 0xf1, 0x12, 0x65, 0x27, 0x4b, 0x5a,
+ 0xde, 0x17, 0xfe, 0xad, 0x44, 0xf3, 0x20, 0x4d,
+ 0xfd, 0xe4, 0x1f, 0xb5, 0x81, 0xb0, 0x36, 0x37,
+ 0x08, 0x6f, 0xc3, 0x0c, 0xe9, 0x85, 0x98, 0x82,
+ 0xa9, 0x62, 0x0c, 0xc4, 0x97, 0xc0, 0x50, 0xc8,
+ 0xa7, 0x3c, 0x50, 0x9f, 0x43, 0xb9, 0xcd, 0x5e,
+ 0x4d, 0xfa, 0x1c, 0x4b, 0x0b, 0xa9, 0x98, 0x85,
+ 0x38, 0x92, 0xac, 0x8d, 0xe4, 0xad, 0x9b, 0x98,
+ 0xab, 0xd9, 0x38, 0xac, 0x62, 0x52, 0xa3, 0x22,
+ 0x63, 0x0f, 0xbf, 0x95, 0x48, 0xdf, 0x69, 0xe7,
+ 0x8b, 0x33, 0xd5, 0xb2, 0xbd, 0x05, 0x49, 0x49,
+ 0x9d, 0x57, 0x73, 0x19, 0x33, 0xae, 0xfa, 0x33,
+ 0xf1, 0x19, 0xa8, 0x80, 0xce, 0x04, 0x9f, 0xbc,
+ 0x1d, 0x65, 0x82, 0x1b, 0xe5, 0x3a, 0x51, 0xc8,
+ 0x1c, 0x21, 0xe3, 0x5d, 0xf3, 0x7d, 0x9b, 0x2f,
+ 0x2c, 0x1d, 0x4a, 0x7f, 0x9b, 0x68, 0x35, 0xa3,
+ 0xb2, 0x50, 0xf7, 0x62, 0x79, 0xcd, 0xf4, 0x98,
+ 0x4f, 0xe5, 0x63, 0x7c, 0x3e, 0x45, 0x31, 0x8c,
+ 0x16, 0xa0, 0x12, 0xc8, 0x58, 0xce, 0x39, 0xa6,
+ 0xbc, 0x54, 0xdb, 0xc5, 0xe0, 0xd5, 0xba, 0xbc,
+ 0xb9, 0x04, 0xf4, 0x8d, 0xe8, 0x2f, 0x15, 0x9d,
+};
+
+/* 100 test cases */
+static struct crc_test {
+ u32 crc; /* random starting crc */
+ u32 start; /* random 6 bit offset in buf */
+ u32 length; /* random 11 bit length of test */
+ u32 crc_le; /* expected crc32_le result */
+ u32 crc_be; /* expected crc32_be result */
+ u32 crc32c_le; /* expected crc32c_le result */
+} const test[] __initconst =
+{
+ {0x674bf11d, 0x00000038, 0x00000542, 0x0af6d466, 0xd8b6e4c1, 0xf6e93d6c},
+ {0x35c672c6, 0x0000003a, 0x000001aa, 0xc6d3dfba, 0x28aaf3ad, 0x0fe92aca},
+ {0x496da28e, 0x00000039, 0x000005af, 0xd933660f, 0x5d57e81f, 0x52e1ebb8},
+ {0x09a9b90e, 0x00000027, 0x000001f8, 0xb45fe007, 0xf45fca9a, 0x0798af9a},
+ {0xdc97e5a9, 0x00000025, 0x000003b6, 0xf81a3562, 0xe0126ba2, 0x18eb3152},
+ {0x47c58900, 0x0000000a, 0x000000b9, 0x8e58eccf, 0xf3afc793, 0xd00d08c7},
+ {0x292561e8, 0x0000000c, 0x00000403, 0xa2ba8aaf, 0x0b797aed, 0x8ba966bc},
+ {0x415037f6, 0x00000003, 0x00000676, 0xa17d52e8, 0x7f0fdf35, 0x11d694a2},
+ {0x3466e707, 0x00000026, 0x00000042, 0x258319be, 0x75c484a2, 0x6ab3208d},
+ {0xafd1281b, 0x00000023, 0x000002ee, 0x4428eaf8, 0x06c7ad10, 0xba4603c5},
+ {0xd3857b18, 0x00000028, 0x000004a2, 0x5c430821, 0xb062b7cb, 0xe6071c6f},
+ {0x1d825a8f, 0x0000002b, 0x0000050b, 0xd2c45f0c, 0xd68634e0, 0x179ec30a},
+ {0x5033e3bc, 0x0000000b, 0x00000078, 0xa3ea4113, 0xac6d31fb, 0x0903beb8},
+ {0x94f1fb5e, 0x0000000f, 0x000003a2, 0xfbfc50b1, 0x3cfe50ed, 0x6a7cb4fa},
+ {0xc9a0fe14, 0x00000009, 0x00000473, 0x5fb61894, 0x87070591, 0xdb535801},
+ {0x88a034b1, 0x0000001c, 0x000005ad, 0xc1b16053, 0x46f95c67, 0x92bed597},
+ {0xf0f72239, 0x00000020, 0x0000026d, 0xa6fa58f3, 0xf8c2c1dd, 0x192a3f1b},
+ {0xcc20a5e3, 0x0000003b, 0x0000067a, 0x7740185a, 0x308b979a, 0xccbaec1a},
+ {0xce589c95, 0x0000002b, 0x00000641, 0xd055e987, 0x40aae25b, 0x7eabae4d},
+ {0x78edc885, 0x00000035, 0x000005be, 0xa39cb14b, 0x035b0d1f, 0x28c72982},
+ {0x9d40a377, 0x0000003b, 0x00000038, 0x1f47ccd2, 0x197fbc9d, 0xc3cd4d18},
+ {0x703d0e01, 0x0000003c, 0x000006f1, 0x88735e7c, 0xfed57c5a, 0xbca8f0e7},
+ {0x776bf505, 0x0000000f, 0x000005b2, 0x5cc4fc01, 0xf32efb97, 0x713f60b3},
+ {0x4a3e7854, 0x00000027, 0x000004b8, 0x8d923c82, 0x0cbfb4a2, 0xebd08fd5},
+ {0x209172dd, 0x0000003b, 0x00000356, 0xb89e9c2b, 0xd7868138, 0x64406c59},
+ {0x3ba4cc5b, 0x0000002f, 0x00000203, 0xe51601a9, 0x5b2a1032, 0x7421890e},
+ {0xfc62f297, 0x00000000, 0x00000079, 0x71a8e1a2, 0x5d88685f, 0xe9347603},
+ {0x64280b8b, 0x00000016, 0x000007ab, 0x0fa7a30c, 0xda3a455f, 0x1bef9060},
+ {0x97dd724b, 0x00000033, 0x000007ad, 0x5788b2f4, 0xd7326d32, 0x34720072},
+ {0x61394b52, 0x00000035, 0x00000571, 0xc66525f1, 0xcabe7fef, 0x48310f59},
+ {0x29b4faff, 0x00000024, 0x0000006e, 0xca13751e, 0x993648e0, 0x783a4213},
+ {0x29bfb1dc, 0x0000000b, 0x00000244, 0x436c43f7, 0x429f7a59, 0x9e8efd41},
+ {0x86ae934b, 0x00000035, 0x00000104, 0x0760ec93, 0x9cf7d0f4, 0xfc3d34a5},
+ {0xc4c1024e, 0x0000002e, 0x000006b1, 0x6516a3ec, 0x19321f9c, 0x17a52ae2},
+ {0x3287a80a, 0x00000026, 0x00000496, 0x0b257eb1, 0x754ebd51, 0x886d935a},
+ {0xa4db423e, 0x00000023, 0x0000045d, 0x9b3a66dc, 0x873e9f11, 0xeaaeaeb2},
+ {0x7a1078df, 0x00000015, 0x0000014a, 0x8c2484c5, 0x6a628659, 0x8e900a4b},
+ {0x6048bd5b, 0x00000006, 0x0000006a, 0x897e3559, 0xac9961af, 0xd74662b1},
+ {0xd8f9ea20, 0x0000003d, 0x00000277, 0x60eb905b, 0xed2aaf99, 0xd26752ba},
+ {0xea5ec3b4, 0x0000002a, 0x000004fe, 0x869965dc, 0x6c1f833b, 0x8b1fcd62},
+ {0x2dfb005d, 0x00000016, 0x00000345, 0x6a3b117e, 0xf05e8521, 0xf54342fe},
+ {0x5a214ade, 0x00000020, 0x000005b6, 0x467f70be, 0xcb22ccd3, 0x5b95b988},
+ {0xf0ab9cca, 0x00000032, 0x00000515, 0xed223df3, 0x7f3ef01d, 0x2e1176be},
+ {0x91b444f9, 0x0000002e, 0x000007f8, 0x84e9a983, 0x5676756f, 0x66120546},
+ {0x1b5d2ddb, 0x0000002e, 0x0000012c, 0xba638c4c, 0x3f42047b, 0xf256a5cc},
+ {0xd824d1bb, 0x0000003a, 0x000007b5, 0x6288653b, 0x3a3ebea0, 0x4af1dd69},
+ {0x0470180c, 0x00000034, 0x000001f0, 0x9d5b80d6, 0x3de08195, 0x56f0a04a},
+ {0xffaa3a3f, 0x00000036, 0x00000299, 0xf3a82ab8, 0x53e0c13d, 0x74f6b6b2},
+ {0x6406cfeb, 0x00000023, 0x00000600, 0xa920b8e8, 0xe4e2acf4, 0x085951fd},
+ {0xb24aaa38, 0x0000003e, 0x000004a1, 0x657cc328, 0x5077b2c3, 0xc65387eb},
+ {0x58b2ab7c, 0x00000039, 0x000002b4, 0x3a17ee7e, 0x9dcb3643, 0x1ca9257b},
+ {0x3db85970, 0x00000006, 0x000002b6, 0x95268b59, 0xb9812c10, 0xfd196d76},
+ {0x857830c5, 0x00000003, 0x00000590, 0x4ef439d5, 0xf042161d, 0x5ef88339},
+ {0xe1fcd978, 0x0000003e, 0x000007d8, 0xae8d8699, 0xce0a1ef5, 0x2c3714d9},
+ {0xb982a768, 0x00000016, 0x000006e0, 0x62fad3df, 0x5f8a067b, 0x58576548},
+ {0x1d581ce8, 0x0000001e, 0x0000058b, 0xf0f5da53, 0x26e39eee, 0xfd7c57de},
+ {0x2456719b, 0x00000025, 0x00000503, 0x4296ac64, 0xd50e4c14, 0xd5fedd59},
+ {0xfae6d8f2, 0x00000000, 0x0000055d, 0x057fdf2e, 0x2a31391a, 0x1cc3b17b},
+ {0xcba828e3, 0x00000039, 0x000002ce, 0xe3f22351, 0x8f00877b, 0x270eed73},
+ {0x13d25952, 0x0000000a, 0x0000072d, 0x76d4b4cc, 0x5eb67ec3, 0x91ecbb11},
+ {0x0342be3f, 0x00000015, 0x00000599, 0xec75d9f1, 0x9d4d2826, 0x05ed8d0c},
+ {0xeaa344e0, 0x00000014, 0x000004d8, 0x72a4c981, 0x2064ea06, 0x0b09ad5b},
+ {0xbbb52021, 0x0000003b, 0x00000272, 0x04af99fc, 0xaf042d35, 0xf8d511fb},
+ {0xb66384dc, 0x0000001d, 0x000007fc, 0xd7629116, 0x782bd801, 0x5ad832cc},
+ {0x616c01b6, 0x00000022, 0x000002c8, 0x5b1dab30, 0x783ce7d2, 0x1214d196},
+ {0xce2bdaad, 0x00000016, 0x0000062a, 0x932535c8, 0x3f02926d, 0x5747218a},
+ {0x00fe84d7, 0x00000005, 0x00000205, 0x850e50aa, 0x753d649c, 0xde8f14de},
+ {0xbebdcb4c, 0x00000006, 0x0000055d, 0xbeaa37a2, 0x2d8c9eba, 0x3563b7b9},
+ {0xd8b1a02a, 0x00000010, 0x00000387, 0x5017d2fc, 0x503541a5, 0x071475d0},
+ {0x3b96cad2, 0x00000036, 0x00000347, 0x1d2372ae, 0x926cd90b, 0x54c79d60},
+ {0xc94c1ed7, 0x00000005, 0x0000038b, 0x9e9fdb22, 0x144a9178, 0x4c53eee6},
+ {0x1aad454e, 0x00000025, 0x000002b2, 0xc3f6315c, 0x5c7a35b3, 0x10137a3c},
+ {0xa4fec9a6, 0x00000000, 0x000006d6, 0x90be5080, 0xa4107605, 0xaa9d6c73},
+ {0x1bbe71e2, 0x0000001f, 0x000002fd, 0x4e504c3b, 0x284ccaf1, 0xb63d23e7},
+ {0x4201c7e4, 0x00000002, 0x000002b7, 0x7822e3f9, 0x0cc912a9, 0x7f53e9cf},
+ {0x23fddc96, 0x00000003, 0x00000627, 0x8a385125, 0x07767e78, 0x13c1cd83},
+ {0xd82ba25c, 0x00000016, 0x0000063e, 0x98e4148a, 0x283330c9, 0x49ff5867},
+ {0x786f2032, 0x0000002d, 0x0000060f, 0xf201600a, 0xf561bfcd, 0x8467f211},
+ {0xfebe4e1f, 0x0000002a, 0x000004f2, 0x95e51961, 0xfd80dcab, 0x3f9683b2},
+ {0x1a6e0a39, 0x00000008, 0x00000672, 0x8af6c2a5, 0x78dd84cb, 0x76a3f874},
+ {0x56000ab8, 0x0000000e, 0x000000e5, 0x36bacb8f, 0x22ee1f77, 0x863b702f},
+ {0x4717fe0c, 0x00000000, 0x000006ec, 0x8439f342, 0x5c8e03da, 0xdc6c58ff},
+ {0xd5d5d68e, 0x0000003c, 0x000003a3, 0x46fff083, 0x177d1b39, 0x0622cc95},
+ {0xc25dd6c6, 0x00000024, 0x000006c0, 0x5ceb8eb4, 0x892b0d16, 0xe85605cd},
+ {0xe9b11300, 0x00000023, 0x00000683, 0x07a5d59a, 0x6c6a3208, 0x31da5f06},
+ {0x95cd285e, 0x00000001, 0x00000047, 0x7b3a4368, 0x0202c07e, 0xa1f2e784},
+ {0xd9245a25, 0x0000001e, 0x000003a6, 0xd33c1841, 0x1936c0d5, 0xb07cc616},
+ {0x103279db, 0x00000006, 0x0000039b, 0xca09b8a0, 0x77d62892, 0xbf943b6c},
+ {0x1cba3172, 0x00000027, 0x000001c8, 0xcb377194, 0xebe682db, 0x2c01af1c},
+ {0x8f613739, 0x0000000c, 0x000001df, 0xb4b0bc87, 0x7710bd43, 0x0fe5f56d},
+ {0x1c6aa90d, 0x0000001b, 0x0000053c, 0x70559245, 0xda7894ac, 0xf8943b2d},
+ {0xaabe5b93, 0x0000003d, 0x00000715, 0xcdbf42fa, 0x0c3b99e7, 0xe4d89272},
+ {0xf15dd038, 0x00000006, 0x000006db, 0x6e104aea, 0x8d5967f2, 0x7c2f6bbb},
+ {0x584dd49c, 0x00000020, 0x000007bc, 0x36b6cfd6, 0xad4e23b2, 0xabbf388b},
+ {0x5d8c9506, 0x00000020, 0x00000470, 0x4c62378e, 0x31d92640, 0x1dca1f4e},
+ {0xb80d17b0, 0x00000032, 0x00000346, 0x22a5bb88, 0x9a7ec89f, 0x5c170e23},
+ {0xdaf0592e, 0x00000023, 0x000007b0, 0x3cab3f99, 0x9b1fdd99, 0xc0e9d672},
+ {0x4793cc85, 0x0000000d, 0x00000706, 0xe82e04f6, 0xed3db6b7, 0xc18bdc86},
+ {0x82ebf64e, 0x00000009, 0x000007c3, 0x69d590a9, 0x9efa8499, 0xa874fcdd},
+ {0xb18a0319, 0x00000026, 0x000007db, 0x1cf98dcc, 0x8fa9ad6a, 0x9dc0bb48},
+};
+
+#include <linux/time.h>
+
+static int __init crc32c_test(void)
+{
+ int i;
+ int errors = 0;
+ int bytes = 0;
+ u64 nsec;
+ unsigned long flags;
+
+ /* keep static to prevent cache warming code from
+ * getting eliminated by the compiler */
+ static u32 crc;
+
+ /* pre-warm the cache */
+ for (i = 0; i < 100; i++) {
+ bytes += 2*test[i].length;
+
+ crc ^= __crc32c_le(test[i].crc, test_buf +
+ test[i].start, test[i].length);
+ }
+
+ /* reduce OS noise */
+ local_irq_save(flags);
+ local_irq_disable();
+
+ nsec = ktime_get_ns();
+ for (i = 0; i < 100; i++) {
+ if (test[i].crc32c_le != __crc32c_le(test[i].crc, test_buf +
+ test[i].start, test[i].length))
+ errors++;
+ }
+ nsec = ktime_get_ns() - nsec;
+
+ local_irq_restore(flags);
+ local_irq_enable();
+
+ pr_info("crc32c: CRC_LE_BITS = %d\n", CRC_LE_BITS);
+
+ if (errors)
+ pr_warn("crc32c: %d self tests failed\n", errors);
+ else {
+ pr_info("crc32c: self tests passed, processed %d bytes in %lld nsec\n",
+ bytes, nsec);
+ }
+
+ return 0;
+}
+
+static int __init crc32c_combine_test(void)
+{
+ int i, j;
+ int errors = 0, runs = 0;
+
+ for (i = 0; i < 10; i++) {
+ u32 crc_full;
+
+ crc_full = __crc32c_le(test[i].crc, test_buf + test[i].start,
+ test[i].length);
+ for (j = 0; j <= test[i].length; ++j) {
+ u32 crc1, crc2;
+ u32 len1 = j, len2 = test[i].length - j;
+
+ crc1 = __crc32c_le(test[i].crc, test_buf +
+ test[i].start, len1);
+ crc2 = __crc32c_le(0, test_buf + test[i].start +
+ len1, len2);
+
+ if (!(crc_full == __crc32c_le_combine(crc1, crc2, len2) &&
+ crc_full == test[i].crc32c_le))
+ errors++;
+ runs++;
+ cond_resched();
+ }
+ }
+
+ if (errors)
+ pr_warn("crc32c_combine: %d/%d self tests failed\n", errors, runs);
+ else
+ pr_info("crc32c_combine: %d self tests passed\n", runs);
+
+ return 0;
+}
+
+static int __init crc32_test(void)
+{
+ int i;
+ int errors = 0;
+ int bytes = 0;
+ u64 nsec;
+ unsigned long flags;
+
+ /* keep static to prevent cache warming code from
+ * getting eliminated by the compiler */
+ static u32 crc;
+
+ /* pre-warm the cache */
+ for (i = 0; i < 100; i++) {
+ bytes += 2*test[i].length;
+
+ crc ^= crc32_le(test[i].crc, test_buf +
+ test[i].start, test[i].length);
+
+ crc ^= crc32_be(test[i].crc, test_buf +
+ test[i].start, test[i].length);
+ }
+
+ /* reduce OS noise */
+ local_irq_save(flags);
+ local_irq_disable();
+
+ nsec = ktime_get_ns();
+ for (i = 0; i < 100; i++) {
+ if (test[i].crc_le != crc32_le(test[i].crc, test_buf +
+ test[i].start, test[i].length))
+ errors++;
+
+ if (test[i].crc_be != crc32_be(test[i].crc, test_buf +
+ test[i].start, test[i].length))
+ errors++;
+ }
+ nsec = ktime_get_ns() - nsec;
+
+ local_irq_restore(flags);
+ local_irq_enable();
+
+ pr_info("crc32: CRC_LE_BITS = %d, CRC_BE BITS = %d\n",
+ CRC_LE_BITS, CRC_BE_BITS);
+
+ if (errors)
+ pr_warn("crc32: %d self tests failed\n", errors);
+ else {
+ pr_info("crc32: self tests passed, processed %d bytes in %lld nsec\n",
+ bytes, nsec);
+ }
+
+ return 0;
+}
+
+static int __init crc32_combine_test(void)
+{
+ int i, j;
+ int errors = 0, runs = 0;
+
+ for (i = 0; i < 10; i++) {
+ u32 crc_full;
+
+ crc_full = crc32_le(test[i].crc, test_buf + test[i].start,
+ test[i].length);
+ for (j = 0; j <= test[i].length; ++j) {
+ u32 crc1, crc2;
+ u32 len1 = j, len2 = test[i].length - j;
+
+ crc1 = crc32_le(test[i].crc, test_buf +
+ test[i].start, len1);
+ crc2 = crc32_le(0, test_buf + test[i].start +
+ len1, len2);
+
+ if (!(crc_full == crc32_le_combine(crc1, crc2, len2) &&
+ crc_full == test[i].crc_le))
+ errors++;
+ runs++;
+ cond_resched();
+ }
+ }
+
+ if (errors)
+ pr_warn("crc32_combine: %d/%d self tests failed\n", errors, runs);
+ else
+ pr_info("crc32_combine: %d self tests passed\n", runs);
+
+ return 0;
+}
+
+static int __init crc32test_init(void)
+{
+ crc32_test();
+ crc32c_test();
+
+ crc32_combine_test();
+ crc32c_combine_test();
+
+ return 0;
+}
+
+static void __exit crc32_exit(void)
+{
+}
+
+module_init(crc32test_init);
+module_exit(crc32_exit);
+
+MODULE_AUTHOR("Matt Domsch <Matt_Domsch@dell.com>");
+MODULE_DESCRIPTION("CRC32 selftest");
+MODULE_LICENSE("GPL");
diff --git a/lib/decompress_unlz4.c b/lib/decompress_unlz4.c
index 036fc882cd72..1b0baf3008ea 100644
--- a/lib/decompress_unlz4.c
+++ b/lib/decompress_unlz4.c
@@ -72,7 +72,7 @@ STATIC inline int INIT unlz4(u8 *input, long in_len,
error("NULL input pointer and missing fill function");
goto exit_1;
} else {
- inp = large_malloc(lz4_compressbound(uncomp_chunksize));
+ inp = large_malloc(LZ4_compressBound(uncomp_chunksize));
if (!inp) {
error("Could not allocate input buffer");
goto exit_1;
@@ -136,7 +136,7 @@ STATIC inline int INIT unlz4(u8 *input, long in_len,
inp += 4;
size -= 4;
} else {
- if (chunksize > lz4_compressbound(uncomp_chunksize)) {
+ if (chunksize > LZ4_compressBound(uncomp_chunksize)) {
error("chunk length is longer than allocated");
goto exit_2;
}
@@ -152,11 +152,14 @@ STATIC inline int INIT unlz4(u8 *input, long in_len,
out_len -= dest_len;
} else
dest_len = out_len;
- ret = lz4_decompress(inp, &chunksize, outp, dest_len);
+
+ ret = LZ4_decompress_fast(inp, outp, dest_len);
+ chunksize = ret;
#else
dest_len = uncomp_chunksize;
- ret = lz4_decompress_unknownoutputsize(inp, chunksize, outp,
- &dest_len);
+
+ ret = LZ4_decompress_safe(inp, outp, chunksize, dest_len);
+ dest_len = ret;
#endif
if (ret < 0) {
error("Decoding failed");
diff --git a/lib/find_bit.c b/lib/find_bit.c
index 18072ea9c20e..6ed74f78380c 100644
--- a/lib/find_bit.c
+++ b/lib/find_bit.c
@@ -33,7 +33,7 @@ static unsigned long _find_next_bit(const unsigned long *addr,
{
unsigned long tmp;
- if (!nbits || start >= nbits)
+ if (unlikely(start >= nbits))
return nbits;
tmp = addr[start / BITS_PER_LONG] ^ invert;
@@ -151,7 +151,7 @@ static unsigned long _find_next_bit_le(const unsigned long *addr,
{
unsigned long tmp;
- if (!nbits || start >= nbits)
+ if (unlikely(start >= nbits))
return nbits;
tmp = addr[start / BITS_PER_LONG] ^ invert;
diff --git a/lib/glob.c b/lib/glob.c
index 500fc80d23e1..0ba3ea86b546 100644
--- a/lib/glob.c
+++ b/lib/glob.c
@@ -121,167 +121,3 @@ backtrack:
}
}
EXPORT_SYMBOL(glob_match);
-
-
-#ifdef CONFIG_GLOB_SELFTEST
-
-#include <linux/printk.h>
-#include <linux/moduleparam.h>
-
-/* Boot with "glob.verbose=1" to show successful tests, too */
-static bool verbose = false;
-module_param(verbose, bool, 0);
-
-struct glob_test {
- char const *pat, *str;
- bool expected;
-};
-
-static bool __pure __init test(char const *pat, char const *str, bool expected)
-{
- bool match = glob_match(pat, str);
- bool success = match == expected;
-
- /* Can't get string literals into a particular section, so... */
- static char const msg_error[] __initconst =
- KERN_ERR "glob: \"%s\" vs. \"%s\": %s *** ERROR ***\n";
- static char const msg_ok[] __initconst =
- KERN_DEBUG "glob: \"%s\" vs. \"%s\": %s OK\n";
- static char const mismatch[] __initconst = "mismatch";
- char const *message;
-
- if (!success)
- message = msg_error;
- else if (verbose)
- message = msg_ok;
- else
- return success;
-
- printk(message, pat, str, mismatch + 3*match);
- return success;
-}
-
-/*
- * The tests are all jammed together in one array to make it simpler
- * to place that array in the .init.rodata section. The obvious
- * "array of structures containing char *" has no way to force the
- * pointed-to strings to be in a particular section.
- *
- * Anyway, a test consists of:
- * 1. Expected glob_match result: '1' or '0'.
- * 2. Pattern to match: null-terminated string
- * 3. String to match against: null-terminated string
- *
- * The list of tests is terminated with a final '\0' instead of
- * a glob_match result character.
- */
-static char const glob_tests[] __initconst =
- /* Some basic tests */
- "1" "a\0" "a\0"
- "0" "a\0" "b\0"
- "0" "a\0" "aa\0"
- "0" "a\0" "\0"
- "1" "\0" "\0"
- "0" "\0" "a\0"
- /* Simple character class tests */
- "1" "[a]\0" "a\0"
- "0" "[a]\0" "b\0"
- "0" "[!a]\0" "a\0"
- "1" "[!a]\0" "b\0"
- "1" "[ab]\0" "a\0"
- "1" "[ab]\0" "b\0"
- "0" "[ab]\0" "c\0"
- "1" "[!ab]\0" "c\0"
- "1" "[a-c]\0" "b\0"
- "0" "[a-c]\0" "d\0"
- /* Corner cases in character class parsing */
- "1" "[a-c-e-g]\0" "-\0"
- "0" "[a-c-e-g]\0" "d\0"
- "1" "[a-c-e-g]\0" "f\0"
- "1" "[]a-ceg-ik[]\0" "a\0"
- "1" "[]a-ceg-ik[]\0" "]\0"
- "1" "[]a-ceg-ik[]\0" "[\0"
- "1" "[]a-ceg-ik[]\0" "h\0"
- "0" "[]a-ceg-ik[]\0" "f\0"
- "0" "[!]a-ceg-ik[]\0" "h\0"
- "0" "[!]a-ceg-ik[]\0" "]\0"
- "1" "[!]a-ceg-ik[]\0" "f\0"
- /* Simple wild cards */
- "1" "?\0" "a\0"
- "0" "?\0" "aa\0"
- "0" "??\0" "a\0"
- "1" "?x?\0" "axb\0"
- "0" "?x?\0" "abx\0"
- "0" "?x?\0" "xab\0"
- /* Asterisk wild cards (backtracking) */
- "0" "*??\0" "a\0"
- "1" "*??\0" "ab\0"
- "1" "*??\0" "abc\0"
- "1" "*??\0" "abcd\0"
- "0" "??*\0" "a\0"
- "1" "??*\0" "ab\0"
- "1" "??*\0" "abc\0"
- "1" "??*\0" "abcd\0"
- "0" "?*?\0" "a\0"
- "1" "?*?\0" "ab\0"
- "1" "?*?\0" "abc\0"
- "1" "?*?\0" "abcd\0"
- "1" "*b\0" "b\0"
- "1" "*b\0" "ab\0"
- "0" "*b\0" "ba\0"
- "1" "*b\0" "bb\0"
- "1" "*b\0" "abb\0"
- "1" "*b\0" "bab\0"
- "1" "*bc\0" "abbc\0"
- "1" "*bc\0" "bc\0"
- "1" "*bc\0" "bbc\0"
- "1" "*bc\0" "bcbc\0"
- /* Multiple asterisks (complex backtracking) */
- "1" "*ac*\0" "abacadaeafag\0"
- "1" "*ac*ae*ag*\0" "abacadaeafag\0"
- "1" "*a*b*[bc]*[ef]*g*\0" "abacadaeafag\0"
- "0" "*a*b*[ef]*[cd]*g*\0" "abacadaeafag\0"
- "1" "*abcd*\0" "abcabcabcabcdefg\0"
- "1" "*ab*cd*\0" "abcabcabcabcdefg\0"
- "1" "*abcd*abcdef*\0" "abcabcdabcdeabcdefg\0"
- "0" "*abcd*\0" "abcabcabcabcefg\0"
- "0" "*ab*cd*\0" "abcabcabcabcefg\0";
-
-static int __init glob_init(void)
-{
- unsigned successes = 0;
- unsigned n = 0;
- char const *p = glob_tests;
- static char const message[] __initconst =
- KERN_INFO "glob: %u self-tests passed, %u failed\n";
-
- /*
- * Tests are jammed together in a string. The first byte is '1'
- * or '0' to indicate the expected outcome, or '\0' to indicate the
- * end of the tests. Then come two null-terminated strings: the
- * pattern and the string to match it against.
- */
- while (*p) {
- bool expected = *p++ & 1;
- char const *pat = p;
-
- p += strlen(p) + 1;
- successes += test(pat, p, expected);
- p += strlen(p) + 1;
- n++;
- }
-
- n -= successes;
- printk(message, successes, n);
-
- /* What's the errno for "kernel bug detected"? Guess... */
- return n ? -ECANCELED : 0;
-}
-
-/* We need a dummy exit function to allow unload */
-static void __exit glob_fini(void) { }
-
-module_init(glob_init);
-module_exit(glob_fini);
-
-#endif /* CONFIG_GLOB_SELFTEST */
diff --git a/lib/globtest.c b/lib/globtest.c
new file mode 100644
index 000000000000..d8e97d43b905
--- /dev/null
+++ b/lib/globtest.c
@@ -0,0 +1,167 @@
+/*
+ * Extracted fronm glob.c
+ */
+
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/glob.h>
+#include <linux/printk.h>
+
+/* Boot with "glob.verbose=1" to show successful tests, too */
+static bool verbose = false;
+module_param(verbose, bool, 0);
+
+struct glob_test {
+ char const *pat, *str;
+ bool expected;
+};
+
+static bool __pure __init test(char const *pat, char const *str, bool expected)
+{
+ bool match = glob_match(pat, str);
+ bool success = match == expected;
+
+ /* Can't get string literals into a particular section, so... */
+ static char const msg_error[] __initconst =
+ KERN_ERR "glob: \"%s\" vs. \"%s\": %s *** ERROR ***\n";
+ static char const msg_ok[] __initconst =
+ KERN_DEBUG "glob: \"%s\" vs. \"%s\": %s OK\n";
+ static char const mismatch[] __initconst = "mismatch";
+ char const *message;
+
+ if (!success)
+ message = msg_error;
+ else if (verbose)
+ message = msg_ok;
+ else
+ return success;
+
+ printk(message, pat, str, mismatch + 3*match);
+ return success;
+}
+
+/*
+ * The tests are all jammed together in one array to make it simpler
+ * to place that array in the .init.rodata section. The obvious
+ * "array of structures containing char *" has no way to force the
+ * pointed-to strings to be in a particular section.
+ *
+ * Anyway, a test consists of:
+ * 1. Expected glob_match result: '1' or '0'.
+ * 2. Pattern to match: null-terminated string
+ * 3. String to match against: null-terminated string
+ *
+ * The list of tests is terminated with a final '\0' instead of
+ * a glob_match result character.
+ */
+static char const glob_tests[] __initconst =
+ /* Some basic tests */
+ "1" "a\0" "a\0"
+ "0" "a\0" "b\0"
+ "0" "a\0" "aa\0"
+ "0" "a\0" "\0"
+ "1" "\0" "\0"
+ "0" "\0" "a\0"
+ /* Simple character class tests */
+ "1" "[a]\0" "a\0"
+ "0" "[a]\0" "b\0"
+ "0" "[!a]\0" "a\0"
+ "1" "[!a]\0" "b\0"
+ "1" "[ab]\0" "a\0"
+ "1" "[ab]\0" "b\0"
+ "0" "[ab]\0" "c\0"
+ "1" "[!ab]\0" "c\0"
+ "1" "[a-c]\0" "b\0"
+ "0" "[a-c]\0" "d\0"
+ /* Corner cases in character class parsing */
+ "1" "[a-c-e-g]\0" "-\0"
+ "0" "[a-c-e-g]\0" "d\0"
+ "1" "[a-c-e-g]\0" "f\0"
+ "1" "[]a-ceg-ik[]\0" "a\0"
+ "1" "[]a-ceg-ik[]\0" "]\0"
+ "1" "[]a-ceg-ik[]\0" "[\0"
+ "1" "[]a-ceg-ik[]\0" "h\0"
+ "0" "[]a-ceg-ik[]\0" "f\0"
+ "0" "[!]a-ceg-ik[]\0" "h\0"
+ "0" "[!]a-ceg-ik[]\0" "]\0"
+ "1" "[!]a-ceg-ik[]\0" "f\0"
+ /* Simple wild cards */
+ "1" "?\0" "a\0"
+ "0" "?\0" "aa\0"
+ "0" "??\0" "a\0"
+ "1" "?x?\0" "axb\0"
+ "0" "?x?\0" "abx\0"
+ "0" "?x?\0" "xab\0"
+ /* Asterisk wild cards (backtracking) */
+ "0" "*??\0" "a\0"
+ "1" "*??\0" "ab\0"
+ "1" "*??\0" "abc\0"
+ "1" "*??\0" "abcd\0"
+ "0" "??*\0" "a\0"
+ "1" "??*\0" "ab\0"
+ "1" "??*\0" "abc\0"
+ "1" "??*\0" "abcd\0"
+ "0" "?*?\0" "a\0"
+ "1" "?*?\0" "ab\0"
+ "1" "?*?\0" "abc\0"
+ "1" "?*?\0" "abcd\0"
+ "1" "*b\0" "b\0"
+ "1" "*b\0" "ab\0"
+ "0" "*b\0" "ba\0"
+ "1" "*b\0" "bb\0"
+ "1" "*b\0" "abb\0"
+ "1" "*b\0" "bab\0"
+ "1" "*bc\0" "abbc\0"
+ "1" "*bc\0" "bc\0"
+ "1" "*bc\0" "bbc\0"
+ "1" "*bc\0" "bcbc\0"
+ /* Multiple asterisks (complex backtracking) */
+ "1" "*ac*\0" "abacadaeafag\0"
+ "1" "*ac*ae*ag*\0" "abacadaeafag\0"
+ "1" "*a*b*[bc]*[ef]*g*\0" "abacadaeafag\0"
+ "0" "*a*b*[ef]*[cd]*g*\0" "abacadaeafag\0"
+ "1" "*abcd*\0" "abcabcabcabcdefg\0"
+ "1" "*ab*cd*\0" "abcabcabcabcdefg\0"
+ "1" "*abcd*abcdef*\0" "abcabcdabcdeabcdefg\0"
+ "0" "*abcd*\0" "abcabcabcabcefg\0"
+ "0" "*ab*cd*\0" "abcabcabcabcefg\0";
+
+static int __init glob_init(void)
+{
+ unsigned successes = 0;
+ unsigned n = 0;
+ char const *p = glob_tests;
+ static char const message[] __initconst =
+ KERN_INFO "glob: %u self-tests passed, %u failed\n";
+
+ /*
+ * Tests are jammed together in a string. The first byte is '1'
+ * or '0' to indicate the expected outcome, or '\0' to indicate the
+ * end of the tests. Then come two null-terminated strings: the
+ * pattern and the string to match it against.
+ */
+ while (*p) {
+ bool expected = *p++ & 1;
+ char const *pat = p;
+
+ p += strlen(p) + 1;
+ successes += test(pat, p, expected);
+ p += strlen(p) + 1;
+ n++;
+ }
+
+ n -= successes;
+ printk(message, successes, n);
+
+ /* What's the errno for "kernel bug detected"? Guess... */
+ return n ? -ECANCELED : 0;
+}
+
+/* We need a dummy exit function to allow unload */
+static void __exit glob_fini(void) { }
+
+module_init(glob_init);
+module_exit(glob_fini);
+
+MODULE_DESCRIPTION("glob(7) matching tests");
+MODULE_LICENSE("Dual MIT/GPL");
diff --git a/lib/list_debug.c b/lib/list_debug.c
index 7f7bfa55eb6d..a34db8d27667 100644
--- a/lib/list_debug.c
+++ b/lib/list_debug.c
@@ -20,15 +20,16 @@
bool __list_add_valid(struct list_head *new, struct list_head *prev,
struct list_head *next)
{
- CHECK_DATA_CORRUPTION(next->prev != prev,
- "list_add corruption. next->prev should be prev (%p), but was %p. (next=%p).\n",
- prev, next->prev, next);
- CHECK_DATA_CORRUPTION(prev->next != next,
- "list_add corruption. prev->next should be next (%p), but was %p. (prev=%p).\n",
- next, prev->next, prev);
- CHECK_DATA_CORRUPTION(new == prev || new == next,
- "list_add double add: new=%p, prev=%p, next=%p.\n",
- new, prev, next);
+ if (CHECK_DATA_CORRUPTION(next->prev != prev,
+ "list_add corruption. next->prev should be prev (%p), but was %p. (next=%p).\n",
+ prev, next->prev, next) ||
+ CHECK_DATA_CORRUPTION(prev->next != next,
+ "list_add corruption. prev->next should be next (%p), but was %p. (prev=%p).\n",
+ next, prev->next, prev) ||
+ CHECK_DATA_CORRUPTION(new == prev || new == next,
+ "list_add double add: new=%p, prev=%p, next=%p.\n",
+ new, prev, next))
+ return false;
return true;
}
@@ -41,18 +42,20 @@ bool __list_del_entry_valid(struct list_head *entry)
prev = entry->prev;
next = entry->next;
- CHECK_DATA_CORRUPTION(next == LIST_POISON1,
- "list_del corruption, %p->next is LIST_POISON1 (%p)\n",
- entry, LIST_POISON1);
- CHECK_DATA_CORRUPTION(prev == LIST_POISON2,
- "list_del corruption, %p->prev is LIST_POISON2 (%p)\n",
- entry, LIST_POISON2);
- CHECK_DATA_CORRUPTION(prev->next != entry,
- "list_del corruption. prev->next should be %p, but was %p\n",
- entry, prev->next);
- CHECK_DATA_CORRUPTION(next->prev != entry,
- "list_del corruption. next->prev should be %p, but was %p\n",
- entry, next->prev);
+ if (CHECK_DATA_CORRUPTION(next == LIST_POISON1,
+ "list_del corruption, %p->next is LIST_POISON1 (%p)\n",
+ entry, LIST_POISON1) ||
+ CHECK_DATA_CORRUPTION(prev == LIST_POISON2,
+ "list_del corruption, %p->prev is LIST_POISON2 (%p)\n",
+ entry, LIST_POISON2) ||
+ CHECK_DATA_CORRUPTION(prev->next != entry,
+ "list_del corruption. prev->next should be %p, but was %p\n",
+ entry, prev->next) ||
+ CHECK_DATA_CORRUPTION(next->prev != entry,
+ "list_del corruption. next->prev should be %p, but was %p\n",
+ entry, next->prev))
+ return false;
+
return true;
}
diff --git a/lib/lz4/Makefile b/lib/lz4/Makefile
index 8085d04e9309..f7b113271d13 100644
--- a/lib/lz4/Makefile
+++ b/lib/lz4/Makefile
@@ -1,3 +1,5 @@
+ccflags-y += -O3
+
obj-$(CONFIG_LZ4_COMPRESS) += lz4_compress.o
obj-$(CONFIG_LZ4HC_COMPRESS) += lz4hc_compress.o
obj-$(CONFIG_LZ4_DECOMPRESS) += lz4_decompress.o
diff --git a/lib/lz4/lz4_compress.c b/lib/lz4/lz4_compress.c
index 28321d8f75ef..cc7b6d4cc7c7 100644
--- a/lib/lz4/lz4_compress.c
+++ b/lib/lz4/lz4_compress.c
@@ -1,19 +1,16 @@
/*
* LZ4 - Fast LZ compression algorithm
- * Copyright (C) 2011-2012, Yann Collet.
- * BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
-
+ * Copyright (C) 2011 - 2016, Yann Collet.
+ * BSD 2 - Clause License (http://www.opensource.org/licenses/bsd - license.php)
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following disclaimer
* in the documentation and/or other materials provided with the
* distribution.
- *
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
@@ -25,419 +22,919 @@
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
* You can contact the author at :
- * - LZ4 homepage : http://fastcompression.blogspot.com/p/lz4.html
- * - LZ4 source repository : http://code.google.com/p/lz4/
+ * - LZ4 homepage : http://www.lz4.org
+ * - LZ4 source repository : https://github.com/lz4/lz4
*
- * Changed for kernel use by:
- * Chanho Min <chanho.min@lge.com>
+ * Changed for kernel usage by:
+ * Sven Schmidt <4sschmid@informatik.uni-hamburg.de>
*/
+/*-************************************
+ * Dependencies
+ **************************************/
+#include <linux/lz4.h>
+#include "lz4defs.h"
#include <linux/module.h>
#include <linux/kernel.h>
-#include <linux/lz4.h>
#include <asm/unaligned.h>
-#include "lz4defs.h"
-/*
- * LZ4_compressCtx :
- * -----------------
- * Compress 'isize' bytes from 'source' into an output buffer 'dest' of
- * maximum size 'maxOutputSize'. * If it cannot achieve it, compression
- * will stop, and result of the function will be zero.
- * return : the number of bytes written in buffer 'dest', or 0 if the
- * compression fails
- */
-static inline int lz4_compressctx(void *ctx,
- const char *source,
- char *dest,
- int isize,
- int maxoutputsize)
+static const int LZ4_minLength = (MFLIMIT + 1);
+static const int LZ4_64Klimit = ((64 * KB) + (MFLIMIT - 1));
+
+/*-******************************
+ * Compression functions
+ ********************************/
+static FORCE_INLINE U32 LZ4_hash4(
+ U32 sequence,
+ tableType_t const tableType)
{
- HTYPE *hashtable = (HTYPE *)ctx;
- const u8 *ip = (u8 *)source;
-#if LZ4_ARCH64
- const BYTE * const base = ip;
+ if (tableType == byU16)
+ return ((sequence * 2654435761U)
+ >> ((MINMATCH * 8) - (LZ4_HASHLOG + 1)));
+ else
+ return ((sequence * 2654435761U)
+ >> ((MINMATCH * 8) - LZ4_HASHLOG));
+}
+
+static FORCE_INLINE U32 LZ4_hash5(
+ U64 sequence,
+ tableType_t const tableType)
+{
+ const U32 hashLog = (tableType == byU16)
+ ? LZ4_HASHLOG + 1
+ : LZ4_HASHLOG;
+
+#if LZ4_LITTLE_ENDIAN
+ static const U64 prime5bytes = 889523592379ULL;
+
+ return (U32)(((sequence << 24) * prime5bytes) >> (64 - hashLog));
#else
- const int base = 0;
+ static const U64 prime8bytes = 11400714785074694791ULL;
+
+ return (U32)(((sequence >> 24) * prime8bytes) >> (64 - hashLog));
#endif
- const u8 *anchor = ip;
- const u8 *const iend = ip + isize;
- const u8 *const mflimit = iend - MFLIMIT;
- #define MATCHLIMIT (iend - LASTLITERALS)
-
- u8 *op = (u8 *) dest;
- u8 *const oend = op + maxoutputsize;
- int length;
- const int skipstrength = SKIPSTRENGTH;
- u32 forwardh;
- int lastrun;
-
- /* Init */
- if (isize < MINLENGTH)
- goto _last_literals;
+}
+
+static FORCE_INLINE U32 LZ4_hashPosition(
+ const void *p,
+ tableType_t const tableType)
+{
+#if LZ4_ARCH64
+ if (tableType == byU32)
+ return LZ4_hash5(LZ4_read_ARCH(p), tableType);
+#endif
+
+ return LZ4_hash4(LZ4_read32(p), tableType);
+}
+
+static void LZ4_putPositionOnHash(
+ const BYTE *p,
+ U32 h,
+ void *tableBase,
+ tableType_t const tableType,
+ const BYTE *srcBase)
+{
+ switch (tableType) {
+ case byPtr:
+ {
+ const BYTE **hashTable = (const BYTE **)tableBase;
+
+ hashTable[h] = p;
+ return;
+ }
+ case byU32:
+ {
+ U32 *hashTable = (U32 *) tableBase;
+
+ hashTable[h] = (U32)(p - srcBase);
+ return;
+ }
+ case byU16:
+ {
+ U16 *hashTable = (U16 *) tableBase;
+
+ hashTable[h] = (U16)(p - srcBase);
+ return;
+ }
+ }
+}
+
+static FORCE_INLINE void LZ4_putPosition(
+ const BYTE *p,
+ void *tableBase,
+ tableType_t tableType,
+ const BYTE *srcBase)
+{
+ U32 const h = LZ4_hashPosition(p, tableType);
+
+ LZ4_putPositionOnHash(p, h, tableBase, tableType, srcBase);
+}
+
+static const BYTE *LZ4_getPositionOnHash(
+ U32 h,
+ void *tableBase,
+ tableType_t tableType,
+ const BYTE *srcBase)
+{
+ if (tableType == byPtr) {
+ const BYTE **hashTable = (const BYTE **) tableBase;
+
+ return hashTable[h];
+ }
+
+ if (tableType == byU32) {
+ const U32 * const hashTable = (U32 *) tableBase;
+
+ return hashTable[h] + srcBase;
+ }
+
+ {
+ /* default, to ensure a return */
+ const U16 * const hashTable = (U16 *) tableBase;
+
+ return hashTable[h] + srcBase;
+ }
+}
+
+static FORCE_INLINE const BYTE *LZ4_getPosition(
+ const BYTE *p,
+ void *tableBase,
+ tableType_t tableType,
+ const BYTE *srcBase)
+{
+ U32 const h = LZ4_hashPosition(p, tableType);
+
+ return LZ4_getPositionOnHash(h, tableBase, tableType, srcBase);
+}
- memset((void *)hashtable, 0, LZ4_MEM_COMPRESS);
+
+/*
+ * LZ4_compress_generic() :
+ * inlined, to ensure branches are decided at compilation time
+ */
+static FORCE_INLINE int LZ4_compress_generic(
+ LZ4_stream_t_internal * const dictPtr,
+ const char * const source,
+ char * const dest,
+ const int inputSize,
+ const int maxOutputSize,
+ const limitedOutput_directive outputLimited,
+ const tableType_t tableType,
+ const dict_directive dict,
+ const dictIssue_directive dictIssue,
+ const U32 acceleration)
+{
+ const BYTE *ip = (const BYTE *) source;
+ const BYTE *base;
+ const BYTE *lowLimit;
+ const BYTE * const lowRefLimit = ip - dictPtr->dictSize;
+ const BYTE * const dictionary = dictPtr->dictionary;
+ const BYTE * const dictEnd = dictionary + dictPtr->dictSize;
+ const size_t dictDelta = dictEnd - (const BYTE *)source;
+ const BYTE *anchor = (const BYTE *) source;
+ const BYTE * const iend = ip + inputSize;
+ const BYTE * const mflimit = iend - MFLIMIT;
+ const BYTE * const matchlimit = iend - LASTLITERALS;
+
+ BYTE *op = (BYTE *) dest;
+ BYTE * const olimit = op + maxOutputSize;
+
+ U32 forwardH;
+ size_t refDelta = 0;
+
+ /* Init conditions */
+ if ((U32)inputSize > (U32)LZ4_MAX_INPUT_SIZE) {
+ /* Unsupported inputSize, too large (or negative) */
+ return 0;
+ }
+
+ switch (dict) {
+ case noDict:
+ default:
+ base = (const BYTE *)source;
+ lowLimit = (const BYTE *)source;
+ break;
+ case withPrefix64k:
+ base = (const BYTE *)source - dictPtr->currentOffset;
+ lowLimit = (const BYTE *)source - dictPtr->dictSize;
+ break;
+ case usingExtDict:
+ base = (const BYTE *)source - dictPtr->currentOffset;
+ lowLimit = (const BYTE *)source;
+ break;
+ }
+
+ if ((tableType == byU16)
+ && (inputSize >= LZ4_64Klimit)) {
+ /* Size too large (not within 64K limit) */
+ return 0;
+ }
+
+ if (inputSize < LZ4_minLength) {
+ /* Input too small, no compression (all literals) */
+ goto _last_literals;
+ }
/* First Byte */
- hashtable[LZ4_HASH_VALUE(ip)] = ip - base;
+ LZ4_putPosition(ip, dictPtr->hashTable, tableType, base);
ip++;
- forwardh = LZ4_HASH_VALUE(ip);
+ forwardH = LZ4_hashPosition(ip, tableType);
/* Main Loop */
- for (;;) {
- int findmatchattempts = (1U << skipstrength) + 3;
- const u8 *forwardip = ip;
- const u8 *ref;
- u8 *token;
+ for ( ; ; ) {
+ const BYTE *match;
+ BYTE *token;
/* Find a match */
- do {
- u32 h = forwardh;
- int step = findmatchattempts++ >> skipstrength;
- ip = forwardip;
- forwardip = ip + step;
-
- if (unlikely(forwardip > mflimit))
- goto _last_literals;
-
- forwardh = LZ4_HASH_VALUE(forwardip);
- ref = base + hashtable[h];
- hashtable[h] = ip - base;
- } while ((ref < ip - MAX_DISTANCE) || (A32(ref) != A32(ip)));
+ {
+ const BYTE *forwardIp = ip;
+ unsigned int step = 1;
+ unsigned int searchMatchNb = acceleration << LZ4_SKIPTRIGGER;
+
+ do {
+ U32 const h = forwardH;
+
+ ip = forwardIp;
+ forwardIp += step;
+ step = (searchMatchNb++ >> LZ4_SKIPTRIGGER);
+
+ if (unlikely(forwardIp > mflimit))
+ goto _last_literals;
+
+ match = LZ4_getPositionOnHash(h,
+ dictPtr->hashTable,
+ tableType, base);
+
+ if (dict == usingExtDict) {
+ if (match < (const BYTE *)source) {
+ refDelta = dictDelta;
+ lowLimit = dictionary;
+ } else {
+ refDelta = 0;
+ lowLimit = (const BYTE *)source;
+ } }
+
+ forwardH = LZ4_hashPosition(forwardIp,
+ tableType);
+
+ LZ4_putPositionOnHash(ip, h, dictPtr->hashTable,
+ tableType, base);
+ } while (((dictIssue == dictSmall)
+ ? (match < lowRefLimit)
+ : 0)
+ || ((tableType == byU16)
+ ? 0
+ : (match + MAX_DISTANCE < ip))
+ || (LZ4_read32(match + refDelta)
+ != LZ4_read32(ip)));
+ }
/* Catch up */
- while ((ip > anchor) && (ref > (u8 *)source) &&
- unlikely(ip[-1] == ref[-1])) {
+ while (((ip > anchor) & (match + refDelta > lowLimit))
+ && (unlikely(ip[-1] == match[refDelta - 1]))) {
ip--;
- ref--;
+ match--;
}
- /* Encode Literal length */
- length = (int)(ip - anchor);
- token = op++;
- /* check output limit */
- if (unlikely(op + length + (2 + 1 + LASTLITERALS) +
- (length >> 8) > oend))
- return 0;
+ /* Encode Literals */
+ {
+ unsigned const int litLength = (unsigned int)(ip - anchor);
- if (length >= (int)RUN_MASK) {
- int len;
- *token = (RUN_MASK << ML_BITS);
- len = length - RUN_MASK;
- for (; len > 254 ; len -= 255)
- *op++ = 255;
- *op++ = (u8)len;
- } else
- *token = (length << ML_BITS);
+ token = op++;
+
+ if ((outputLimited) &&
+ /* Check output buffer overflow */
+ (unlikely(op + litLength +
+ (2 + 1 + LASTLITERALS) +
+ (litLength / 255) > olimit)))
+ return 0;
+
+ if (litLength >= RUN_MASK) {
+ int len = (int)litLength - RUN_MASK;
+
+ *token = (RUN_MASK << ML_BITS);
+
+ for (; len >= 255; len -= 255)
+ *op++ = 255;
+ *op++ = (BYTE)len;
+ } else
+ *token = (BYTE)(litLength << ML_BITS);
+
+ /* Copy Literals */
+ LZ4_wildCopy(op, anchor, op + litLength);
+ op += litLength;
+ }
- /* Copy Literals */
- LZ4_BLINDCOPY(anchor, op, length);
_next_match:
/* Encode Offset */
- LZ4_WRITE_LITTLEENDIAN_16(op, (u16)(ip - ref));
+ LZ4_writeLE16(op, (U16)(ip - match));
+ op += 2;
- /* Start Counting */
- ip += MINMATCH;
- /* MinMatch verified */
- ref += MINMATCH;
- anchor = ip;
- while (likely(ip < MATCHLIMIT - (STEPSIZE - 1))) {
- #if LZ4_ARCH64
- u64 diff = A64(ref) ^ A64(ip);
- #else
- u32 diff = A32(ref) ^ A32(ip);
- #endif
- if (!diff) {
- ip += STEPSIZE;
- ref += STEPSIZE;
- continue;
- }
- ip += LZ4_NBCOMMONBYTES(diff);
- goto _endcount;
- }
- #if LZ4_ARCH64
- if ((ip < (MATCHLIMIT - 3)) && (A32(ref) == A32(ip))) {
- ip += 4;
- ref += 4;
- }
- #endif
- if ((ip < (MATCHLIMIT - 1)) && (A16(ref) == A16(ip))) {
- ip += 2;
- ref += 2;
- }
- if ((ip < MATCHLIMIT) && (*ref == *ip))
- ip++;
-_endcount:
/* Encode MatchLength */
- length = (int)(ip - anchor);
- /* Check output limit */
- if (unlikely(op + (1 + LASTLITERALS) + (length >> 8) > oend))
- return 0;
- if (length >= (int)ML_MASK) {
- *token += ML_MASK;
- length -= ML_MASK;
- for (; length > 509 ; length -= 510) {
- *op++ = 255;
- *op++ = 255;
- }
- if (length > 254) {
- length -= 255;
- *op++ = 255;
+ {
+ unsigned int matchCode;
+
+ if ((dict == usingExtDict)
+ && (lowLimit == dictionary)) {
+ const BYTE *limit;
+
+ match += refDelta;
+ limit = ip + (dictEnd - match);
+
+ if (limit > matchlimit)
+ limit = matchlimit;
+
+ matchCode = LZ4_count(ip + MINMATCH,
+ match + MINMATCH, limit);
+
+ ip += MINMATCH + matchCode;
+
+ if (ip == limit) {
+ unsigned const int more = LZ4_count(ip,
+ (const BYTE *)source,
+ matchlimit);
+
+ matchCode += more;
+ ip += more;
+ }
+ } else {
+ matchCode = LZ4_count(ip + MINMATCH,
+ match + MINMATCH, matchlimit);
+ ip += MINMATCH + matchCode;
}
- *op++ = (u8)length;
- } else
- *token += length;
+
+ if (outputLimited &&
+ /* Check output buffer overflow */
+ (unlikely(op +
+ (1 + LASTLITERALS) +
+ (matchCode >> 8) > olimit)))
+ return 0;
+
+ if (matchCode >= ML_MASK) {
+ *token += ML_MASK;
+ matchCode -= ML_MASK;
+ LZ4_write32(op, 0xFFFFFFFF);
+
+ while (matchCode >= 4 * 255) {
+ op += 4;
+ LZ4_write32(op, 0xFFFFFFFF);
+ matchCode -= 4 * 255;
+ }
+
+ op += matchCode / 255;
+ *op++ = (BYTE)(matchCode % 255);
+ } else
+ *token += (BYTE)(matchCode);
+ }
+
+ anchor = ip;
/* Test end of chunk */
- if (ip > mflimit) {
- anchor = ip;
+ if (ip > mflimit)
break;
- }
/* Fill table */
- hashtable[LZ4_HASH_VALUE(ip-2)] = ip - 2 - base;
+ LZ4_putPosition(ip - 2, dictPtr->hashTable, tableType, base);
/* Test next position */
- ref = base + hashtable[LZ4_HASH_VALUE(ip)];
- hashtable[LZ4_HASH_VALUE(ip)] = ip - base;
- if ((ref > ip - (MAX_DISTANCE + 1)) && (A32(ref) == A32(ip))) {
+ match = LZ4_getPosition(ip, dictPtr->hashTable,
+ tableType, base);
+
+ if (dict == usingExtDict) {
+ if (match < (const BYTE *)source) {
+ refDelta = dictDelta;
+ lowLimit = dictionary;
+ } else {
+ refDelta = 0;
+ lowLimit = (const BYTE *)source;
+ }
+ }
+
+ LZ4_putPosition(ip, dictPtr->hashTable, tableType, base);
+
+ if (((dictIssue == dictSmall) ? (match >= lowRefLimit) : 1)
+ && (match + MAX_DISTANCE >= ip)
+ && (LZ4_read32(match + refDelta) == LZ4_read32(ip))) {
token = op++;
*token = 0;
goto _next_match;
}
/* Prepare next loop */
- anchor = ip++;
- forwardh = LZ4_HASH_VALUE(ip);
+ forwardH = LZ4_hashPosition(++ip, tableType);
}
_last_literals:
/* Encode Last Literals */
- lastrun = (int)(iend - anchor);
- if (((char *)op - dest) + lastrun + 1
- + ((lastrun + 255 - RUN_MASK) / 255) > (u32)maxoutputsize)
- return 0;
+ {
+ size_t const lastRun = (size_t)(iend - anchor);
+
+ if ((outputLimited) &&
+ /* Check output buffer overflow */
+ ((op - (BYTE *)dest) + lastRun + 1 +
+ ((lastRun + 255 - RUN_MASK) / 255) > (U32)maxOutputSize))
+ return 0;
+
+ if (lastRun >= RUN_MASK) {
+ size_t accumulator = lastRun - RUN_MASK;
+ *op++ = RUN_MASK << ML_BITS;
+ for (; accumulator >= 255; accumulator -= 255)
+ *op++ = 255;
+ *op++ = (BYTE) accumulator;
+ } else {
+ *op++ = (BYTE)(lastRun << ML_BITS);
+ }
- if (lastrun >= (int)RUN_MASK) {
- *op++ = (RUN_MASK << ML_BITS);
- lastrun -= RUN_MASK;
- for (; lastrun > 254 ; lastrun -= 255)
- *op++ = 255;
- *op++ = (u8)lastrun;
- } else
- *op++ = (lastrun << ML_BITS);
- memcpy(op, anchor, iend - anchor);
- op += iend - anchor;
+ memcpy(op, anchor, lastRun);
+
+ op += lastRun;
+ }
/* End */
- return (int)(((char *)op) - dest);
+ return (int) (((char *)op) - dest);
}
-static inline int lz4_compress64kctx(void *ctx,
- const char *source,
- char *dest,
- int isize,
- int maxoutputsize)
+static int LZ4_compress_fast_extState(
+ void *state,
+ const char *source,
+ char *dest,
+ int inputSize,
+ int maxOutputSize,
+ int acceleration)
{
- u16 *hashtable = (u16 *)ctx;
- const u8 *ip = (u8 *) source;
- const u8 *anchor = ip;
- const u8 *const base = ip;
- const u8 *const iend = ip + isize;
- const u8 *const mflimit = iend - MFLIMIT;
- #define MATCHLIMIT (iend - LASTLITERALS)
-
- u8 *op = (u8 *) dest;
- u8 *const oend = op + maxoutputsize;
- int len, length;
- const int skipstrength = SKIPSTRENGTH;
- u32 forwardh;
- int lastrun;
-
- /* Init */
- if (isize < MINLENGTH)
- goto _last_literals;
+ LZ4_stream_t_internal *ctx = &((LZ4_stream_t *)state)->internal_donotuse;
+#if LZ4_ARCH64
+ const tableType_t tableType = byU32;
+#else
+ const tableType_t tableType = byPtr;
+#endif
+
+ LZ4_resetStream((LZ4_stream_t *)state);
+
+ if (acceleration < 1)
+ acceleration = LZ4_ACCELERATION_DEFAULT;
+
+ if (maxOutputSize >= LZ4_COMPRESSBOUND(inputSize)) {
+ if (inputSize < LZ4_64Klimit)
+ return LZ4_compress_generic(ctx, source,
+ dest, inputSize, 0,
+ noLimit, byU16, noDict,
+ noDictIssue, acceleration);
+ else
+ return LZ4_compress_generic(ctx, source,
+ dest, inputSize, 0,
+ noLimit, tableType, noDict,
+ noDictIssue, acceleration);
+ } else {
+ if (inputSize < LZ4_64Klimit)
+ return LZ4_compress_generic(ctx, source,
+ dest, inputSize,
+ maxOutputSize, limitedOutput, byU16, noDict,
+ noDictIssue, acceleration);
+ else
+ return LZ4_compress_generic(ctx, source,
+ dest, inputSize,
+ maxOutputSize, limitedOutput, tableType, noDict,
+ noDictIssue, acceleration);
+ }
+}
+
+int LZ4_compress_fast(const char *source, char *dest, int inputSize,
+ int maxOutputSize, int acceleration, void *wrkmem)
+{
+ return LZ4_compress_fast_extState(wrkmem, source, dest, inputSize,
+ maxOutputSize, acceleration);
+}
+EXPORT_SYMBOL(LZ4_compress_fast);
- memset((void *)hashtable, 0, LZ4_MEM_COMPRESS);
+int LZ4_compress_default(const char *source, char *dest, int inputSize,
+ int maxOutputSize, void *wrkmem)
+{
+ return LZ4_compress_fast(source, dest, inputSize,
+ maxOutputSize, LZ4_ACCELERATION_DEFAULT, wrkmem);
+}
+EXPORT_SYMBOL(LZ4_compress_default);
+
+/*-******************************
+ * *_destSize() variant
+ ********************************/
+static int LZ4_compress_destSize_generic(
+ LZ4_stream_t_internal * const ctx,
+ const char * const src,
+ char * const dst,
+ int * const srcSizePtr,
+ const int targetDstSize,
+ const tableType_t tableType)
+{
+ const BYTE *ip = (const BYTE *) src;
+ const BYTE *base = (const BYTE *) src;
+ const BYTE *lowLimit = (const BYTE *) src;
+ const BYTE *anchor = ip;
+ const BYTE * const iend = ip + *srcSizePtr;
+ const BYTE * const mflimit = iend - MFLIMIT;
+ const BYTE * const matchlimit = iend - LASTLITERALS;
+
+ BYTE *op = (BYTE *) dst;
+ BYTE * const oend = op + targetDstSize;
+ BYTE * const oMaxLit = op + targetDstSize - 2 /* offset */
+ - 8 /* because 8 + MINMATCH == MFLIMIT */ - 1 /* token */;
+ BYTE * const oMaxMatch = op + targetDstSize
+ - (LASTLITERALS + 1 /* token */);
+ BYTE * const oMaxSeq = oMaxLit - 1 /* token */;
+
+ U32 forwardH;
+
+ /* Init conditions */
+ /* Impossible to store anything */
+ if (targetDstSize < 1)
+ return 0;
+ /* Unsupported input size, too large (or negative) */
+ if ((U32)*srcSizePtr > (U32)LZ4_MAX_INPUT_SIZE)
+ return 0;
+ /* Size too large (not within 64K limit) */
+ if ((tableType == byU16) && (*srcSizePtr >= LZ4_64Klimit))
+ return 0;
+ /* Input too small, no compression (all literals) */
+ if (*srcSizePtr < LZ4_minLength)
+ goto _last_literals;
/* First Byte */
- ip++;
- forwardh = LZ4_HASH64K_VALUE(ip);
+ *srcSizePtr = 0;
+ LZ4_putPosition(ip, ctx->hashTable, tableType, base);
+ ip++; forwardH = LZ4_hashPosition(ip, tableType);
/* Main Loop */
- for (;;) {
- int findmatchattempts = (1U << skipstrength) + 3;
- const u8 *forwardip = ip;
- const u8 *ref;
- u8 *token;
+ for ( ; ; ) {
+ const BYTE *match;
+ BYTE *token;
/* Find a match */
- do {
- u32 h = forwardh;
- int step = findmatchattempts++ >> skipstrength;
- ip = forwardip;
- forwardip = ip + step;
-
- if (forwardip > mflimit)
- goto _last_literals;
-
- forwardh = LZ4_HASH64K_VALUE(forwardip);
- ref = base + hashtable[h];
- hashtable[h] = (u16)(ip - base);
- } while (A32(ref) != A32(ip));
+ {
+ const BYTE *forwardIp = ip;
+ unsigned int step = 1;
+ unsigned int searchMatchNb = 1 << LZ4_SKIPTRIGGER;
+
+ do {
+ U32 h = forwardH;
+
+ ip = forwardIp;
+ forwardIp += step;
+ step = (searchMatchNb++ >> LZ4_SKIPTRIGGER);
+
+ if (unlikely(forwardIp > mflimit))
+ goto _last_literals;
+
+ match = LZ4_getPositionOnHash(h, ctx->hashTable,
+ tableType, base);
+ forwardH = LZ4_hashPosition(forwardIp,
+ tableType);
+ LZ4_putPositionOnHash(ip, h,
+ ctx->hashTable, tableType,
+ base);
+
+ } while (((tableType == byU16)
+ ? 0
+ : (match + MAX_DISTANCE < ip))
+ || (LZ4_read32(match) != LZ4_read32(ip)));
+ }
/* Catch up */
- while ((ip > anchor) && (ref > (u8 *)source)
- && (ip[-1] == ref[-1])) {
+ while ((ip > anchor)
+ && (match > lowLimit)
+ && (unlikely(ip[-1] == match[-1]))) {
ip--;
- ref--;
+ match--;
}
/* Encode Literal length */
- length = (int)(ip - anchor);
- token = op++;
- /* Check output limit */
- if (unlikely(op + length + (2 + 1 + LASTLITERALS)
- + (length >> 8) > oend))
- return 0;
- if (length >= (int)RUN_MASK) {
- *token = (RUN_MASK << ML_BITS);
- len = length - RUN_MASK;
- for (; len > 254 ; len -= 255)
- *op++ = 255;
- *op++ = (u8)len;
- } else
- *token = (length << ML_BITS);
+ {
+ unsigned int litLength = (unsigned int)(ip - anchor);
- /* Copy Literals */
- LZ4_BLINDCOPY(anchor, op, length);
+ token = op++;
+ if (op + ((litLength + 240) / 255)
+ + litLength > oMaxLit) {
+ /* Not enough space for a last match */
+ op--;
+ goto _last_literals;
+ }
+ if (litLength >= RUN_MASK) {
+ unsigned int len = litLength - RUN_MASK;
+ *token = (RUN_MASK<<ML_BITS);
+ for (; len >= 255; len -= 255)
+ *op++ = 255;
+ *op++ = (BYTE)len;
+ } else
+ *token = (BYTE)(litLength << ML_BITS);
+
+ /* Copy Literals */
+ LZ4_wildCopy(op, anchor, op + litLength);
+ op += litLength;
+ }
_next_match:
/* Encode Offset */
- LZ4_WRITE_LITTLEENDIAN_16(op, (u16)(ip - ref));
+ LZ4_writeLE16(op, (U16)(ip - match)); op += 2;
- /* Start Counting */
- ip += MINMATCH;
- /* MinMatch verified */
- ref += MINMATCH;
- anchor = ip;
+ /* Encode MatchLength */
+ {
+ size_t matchLength = LZ4_count(ip + MINMATCH,
+ match + MINMATCH, matchlimit);
- while (ip < MATCHLIMIT - (STEPSIZE - 1)) {
- #if LZ4_ARCH64
- u64 diff = A64(ref) ^ A64(ip);
- #else
- u32 diff = A32(ref) ^ A32(ip);
- #endif
-
- if (!diff) {
- ip += STEPSIZE;
- ref += STEPSIZE;
- continue;
+ if (op + ((matchLength + 240)/255) > oMaxMatch) {
+ /* Match description too long : reduce it */
+ matchLength = (15 - 1) + (oMaxMatch - op) * 255;
}
- ip += LZ4_NBCOMMONBYTES(diff);
- goto _endcount;
- }
- #if LZ4_ARCH64
- if ((ip < (MATCHLIMIT - 3)) && (A32(ref) == A32(ip))) {
- ip += 4;
- ref += 4;
+ ip += MINMATCH + matchLength;
+
+ if (matchLength >= ML_MASK) {
+ *token += ML_MASK;
+ matchLength -= ML_MASK;
+ while (matchLength >= 255) {
+ matchLength -= 255;
+ *op++ = 255;
+ }
+ *op++ = (BYTE)matchLength;
+ } else
+ *token += (BYTE)(matchLength);
}
- #endif
- if ((ip < (MATCHLIMIT - 1)) && (A16(ref) == A16(ip))) {
- ip += 2;
- ref += 2;
- }
- if ((ip < MATCHLIMIT) && (*ref == *ip))
- ip++;
-_endcount:
- /* Encode MatchLength */
- len = (int)(ip - anchor);
- /* Check output limit */
- if (unlikely(op + (1 + LASTLITERALS) + (len >> 8) > oend))
- return 0;
- if (len >= (int)ML_MASK) {
- *token += ML_MASK;
- len -= ML_MASK;
- for (; len > 509 ; len -= 510) {
- *op++ = 255;
- *op++ = 255;
- }
- if (len > 254) {
- len -= 255;
- *op++ = 255;
- }
- *op++ = (u8)len;
- } else
- *token += len;
+ anchor = ip;
- /* Test end of chunk */
- if (ip > mflimit) {
- anchor = ip;
+ /* Test end of block */
+ if (ip > mflimit)
+ break;
+ if (op > oMaxSeq)
break;
- }
/* Fill table */
- hashtable[LZ4_HASH64K_VALUE(ip-2)] = (u16)(ip - 2 - base);
+ LZ4_putPosition(ip - 2, ctx->hashTable, tableType, base);
/* Test next position */
- ref = base + hashtable[LZ4_HASH64K_VALUE(ip)];
- hashtable[LZ4_HASH64K_VALUE(ip)] = (u16)(ip - base);
- if (A32(ref) == A32(ip)) {
- token = op++;
- *token = 0;
+ match = LZ4_getPosition(ip, ctx->hashTable, tableType, base);
+ LZ4_putPosition(ip, ctx->hashTable, tableType, base);
+
+ if ((match + MAX_DISTANCE >= ip)
+ && (LZ4_read32(match) == LZ4_read32(ip))) {
+ token = op++; *token = 0;
goto _next_match;
}
/* Prepare next loop */
- anchor = ip++;
- forwardh = LZ4_HASH64K_VALUE(ip);
+ forwardH = LZ4_hashPosition(++ip, tableType);
}
_last_literals:
/* Encode Last Literals */
- lastrun = (int)(iend - anchor);
- if (op + lastrun + 1 + (lastrun - RUN_MASK + 255) / 255 > oend)
- return 0;
- if (lastrun >= (int)RUN_MASK) {
- *op++ = (RUN_MASK << ML_BITS);
- lastrun -= RUN_MASK;
- for (; lastrun > 254 ; lastrun -= 255)
- *op++ = 255;
- *op++ = (u8)lastrun;
- } else
- *op++ = (lastrun << ML_BITS);
- memcpy(op, anchor, iend - anchor);
- op += iend - anchor;
+ {
+ size_t lastRunSize = (size_t)(iend - anchor);
+
+ if (op + 1 /* token */
+ + ((lastRunSize + 240) / 255) /* litLength */
+ + lastRunSize /* literals */ > oend) {
+ /* adapt lastRunSize to fill 'dst' */
+ lastRunSize = (oend - op) - 1;
+ lastRunSize -= (lastRunSize + 240) / 255;
+ }
+ ip = anchor + lastRunSize;
+
+ if (lastRunSize >= RUN_MASK) {
+ size_t accumulator = lastRunSize - RUN_MASK;
+
+ *op++ = RUN_MASK << ML_BITS;
+ for (; accumulator >= 255; accumulator -= 255)
+ *op++ = 255;
+ *op++ = (BYTE) accumulator;
+ } else {
+ *op++ = (BYTE)(lastRunSize<<ML_BITS);
+ }
+ memcpy(op, anchor, lastRunSize);
+ op += lastRunSize;
+ }
+
/* End */
- return (int)(((char *)op) - dest);
+ *srcSizePtr = (int) (((const char *)ip) - src);
+ return (int) (((char *)op) - dst);
}
-int lz4_compress(const unsigned char *src, size_t src_len,
- unsigned char *dst, size_t *dst_len, void *wrkmem)
+static int LZ4_compress_destSize_extState(
+ LZ4_stream_t *state,
+ const char *src,
+ char *dst,
+ int *srcSizePtr,
+ int targetDstSize)
{
- int ret = -1;
- int out_len = 0;
+#if LZ4_ARCH64
+ const tableType_t tableType = byU32;
+#else
+ const tableType_t tableType = byPtr;
+#endif
- if (src_len < LZ4_64KLIMIT)
- out_len = lz4_compress64kctx(wrkmem, src, dst, src_len,
- lz4_compressbound(src_len));
- else
- out_len = lz4_compressctx(wrkmem, src, dst, src_len,
- lz4_compressbound(src_len));
+ LZ4_resetStream(state);
+
+ if (targetDstSize >= LZ4_COMPRESSBOUND(*srcSizePtr)) {
+ /* compression success is guaranteed */
+ return LZ4_compress_fast_extState(
+ state, src, dst, *srcSizePtr,
+ targetDstSize, 1);
+ } else {
+ if (*srcSizePtr < LZ4_64Klimit)
+ return LZ4_compress_destSize_generic(
+ &state->internal_donotuse,
+ src, dst, srcSizePtr,
+ targetDstSize, byU16);
+ else
+ return LZ4_compress_destSize_generic(
+ &state->internal_donotuse,
+ src, dst, srcSizePtr,
+ targetDstSize, tableType);
+ }
+}
+
+
+int LZ4_compress_destSize(
+ const char *src,
+ char *dst,
+ int *srcSizePtr,
+ int targetDstSize,
+ void *wrkmem)
+{
+ return LZ4_compress_destSize_extState(wrkmem, src, dst, srcSizePtr,
+ targetDstSize);
+}
+EXPORT_SYMBOL(LZ4_compress_destSize);
+
+/*-******************************
+ * Streaming functions
+ ********************************/
+void LZ4_resetStream(LZ4_stream_t *LZ4_stream)
+{
+ memset(LZ4_stream, 0, sizeof(LZ4_stream_t));
+}
+
+int LZ4_loadDict(LZ4_stream_t *LZ4_dict,
+ const char *dictionary, int dictSize)
+{
+ LZ4_stream_t_internal *dict = &LZ4_dict->internal_donotuse;
+ const BYTE *p = (const BYTE *)dictionary;
+ const BYTE * const dictEnd = p + dictSize;
+ const BYTE *base;
+
+ if ((dict->initCheck)
+ || (dict->currentOffset > 1 * GB)) {
+ /* Uninitialized structure, or reuse overflow */
+ LZ4_resetStream(LZ4_dict);
+ }
+
+ if (dictSize < (int)HASH_UNIT) {
+ dict->dictionary = NULL;
+ dict->dictSize = 0;
+ return 0;
+ }
+
+ if ((dictEnd - p) > 64 * KB)
+ p = dictEnd - 64 * KB;
+ dict->currentOffset += 64 * KB;
+ base = p - dict->currentOffset;
+ dict->dictionary = p;
+ dict->dictSize = (U32)(dictEnd - p);
+ dict->currentOffset += dict->dictSize;
+
+ while (p <= dictEnd - HASH_UNIT) {
+ LZ4_putPosition(p, dict->hashTable, byU32, base);
+ p += 3;
+ }
+
+ return dict->dictSize;
+}
+EXPORT_SYMBOL(LZ4_loadDict);
+
+static void LZ4_renormDictT(LZ4_stream_t_internal *LZ4_dict,
+ const BYTE *src)
+{
+ if ((LZ4_dict->currentOffset > 0x80000000) ||
+ ((uptrval)LZ4_dict->currentOffset > (uptrval)src)) {
+ /* address space overflow */
+ /* rescale hash table */
+ U32 const delta = LZ4_dict->currentOffset - 64 * KB;
+ const BYTE *dictEnd = LZ4_dict->dictionary + LZ4_dict->dictSize;
+ int i;
+
+ for (i = 0; i < LZ4_HASH_SIZE_U32; i++) {
+ if (LZ4_dict->hashTable[i] < delta)
+ LZ4_dict->hashTable[i] = 0;
+ else
+ LZ4_dict->hashTable[i] -= delta;
+ }
+ LZ4_dict->currentOffset = 64 * KB;
+ if (LZ4_dict->dictSize > 64 * KB)
+ LZ4_dict->dictSize = 64 * KB;
+ LZ4_dict->dictionary = dictEnd - LZ4_dict->dictSize;
+ }
+}
+
+int LZ4_saveDict(LZ4_stream_t *LZ4_dict, char *safeBuffer, int dictSize)
+{
+ LZ4_stream_t_internal * const dict = &LZ4_dict->internal_donotuse;
+ const BYTE * const previousDictEnd = dict->dictionary + dict->dictSize;
+
+ if ((U32)dictSize > 64 * KB) {
+ /* useless to define a dictionary > 64 * KB */
+ dictSize = 64 * KB;
+ }
+ if ((U32)dictSize > dict->dictSize)
+ dictSize = dict->dictSize;
+
+ memmove(safeBuffer, previousDictEnd - dictSize, dictSize);
+
+ dict->dictionary = (const BYTE *)safeBuffer;
+ dict->dictSize = (U32)dictSize;
+
+ return dictSize;
+}
+EXPORT_SYMBOL(LZ4_saveDict);
+
+int LZ4_compress_fast_continue(LZ4_stream_t *LZ4_stream, const char *source,
+ char *dest, int inputSize, int maxOutputSize, int acceleration)
+{
+ LZ4_stream_t_internal *streamPtr = &LZ4_stream->internal_donotuse;
+ const BYTE * const dictEnd = streamPtr->dictionary
+ + streamPtr->dictSize;
- if (out_len < 0)
- goto exit;
+ const BYTE *smallest = (const BYTE *) source;
- *dst_len = out_len;
+ if (streamPtr->initCheck) {
+ /* Uninitialized structure detected */
+ return 0;
+ }
+
+ if ((streamPtr->dictSize > 0) && (smallest > dictEnd))
+ smallest = dictEnd;
+
+ LZ4_renormDictT(streamPtr, smallest);
+
+ if (acceleration < 1)
+ acceleration = LZ4_ACCELERATION_DEFAULT;
+
+ /* Check overlapping input/dictionary space */
+ {
+ const BYTE *sourceEnd = (const BYTE *) source + inputSize;
+
+ if ((sourceEnd > streamPtr->dictionary)
+ && (sourceEnd < dictEnd)) {
+ streamPtr->dictSize = (U32)(dictEnd - sourceEnd);
+ if (streamPtr->dictSize > 64 * KB)
+ streamPtr->dictSize = 64 * KB;
+ if (streamPtr->dictSize < 4)
+ streamPtr->dictSize = 0;
+ streamPtr->dictionary = dictEnd - streamPtr->dictSize;
+ }
+ }
- return 0;
-exit:
- return ret;
+ /* prefix mode : source data follows dictionary */
+ if (dictEnd == (const BYTE *)source) {
+ int result;
+
+ if ((streamPtr->dictSize < 64 * KB) &&
+ (streamPtr->dictSize < streamPtr->currentOffset)) {
+ result = LZ4_compress_generic(
+ streamPtr, source, dest, inputSize,
+ maxOutputSize, limitedOutput, byU32,
+ withPrefix64k, dictSmall, acceleration);
+ } else {
+ result = LZ4_compress_generic(
+ streamPtr, source, dest, inputSize,
+ maxOutputSize, limitedOutput, byU32,
+ withPrefix64k, noDictIssue, acceleration);
+ }
+ streamPtr->dictSize += (U32)inputSize;
+ streamPtr->currentOffset += (U32)inputSize;
+ return result;
+ }
+
+ /* external dictionary mode */
+ {
+ int result;
+
+ if ((streamPtr->dictSize < 64 * KB) &&
+ (streamPtr->dictSize < streamPtr->currentOffset)) {
+ result = LZ4_compress_generic(
+ streamPtr, source, dest, inputSize,
+ maxOutputSize, limitedOutput, byU32,
+ usingExtDict, dictSmall, acceleration);
+ } else {
+ result = LZ4_compress_generic(
+ streamPtr, source, dest, inputSize,
+ maxOutputSize, limitedOutput, byU32,
+ usingExtDict, noDictIssue, acceleration);
+ }
+ streamPtr->dictionary = (const BYTE *)source;
+ streamPtr->dictSize = (U32)inputSize;
+ streamPtr->currentOffset += (U32)inputSize;
+ return result;
+ }
}
-EXPORT_SYMBOL(lz4_compress);
+EXPORT_SYMBOL(LZ4_compress_fast_continue);
MODULE_LICENSE("Dual BSD/GPL");
MODULE_DESCRIPTION("LZ4 compressor");
diff --git a/lib/lz4/lz4_decompress.c b/lib/lz4/lz4_decompress.c
index 6d940c72b5fc..bd3574312b82 100644
--- a/lib/lz4/lz4_decompress.c
+++ b/lib/lz4/lz4_decompress.c
@@ -1,25 +1,16 @@
/*
- * LZ4 Decompressor for Linux kernel
- *
- * Copyright (C) 2013, LG Electronics, Kyungsik Lee <kyungsik.lee@lge.com>
- *
- * Based on LZ4 implementation by Yann Collet.
- *
* LZ4 - Fast LZ compression algorithm
- * Copyright (C) 2011-2012, Yann Collet.
- * BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
- *
+ * Copyright (C) 2011 - 2016, Yann Collet.
+ * BSD 2 - Clause License (http://www.opensource.org/licenses/bsd - license.php)
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following disclaimer
* in the documentation and/or other materials provided with the
* distribution.
- *
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
@@ -31,313 +22,487 @@
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ * You can contact the author at :
+ * - LZ4 homepage : http://www.lz4.org
+ * - LZ4 source repository : https://github.com/lz4/lz4
*
- * You can contact the author at :
- * - LZ4 homepage : http://fastcompression.blogspot.com/p/lz4.html
- * - LZ4 source repository : http://code.google.com/p/lz4/
+ * Changed for kernel usage by:
+ * Sven Schmidt <4sschmid@informatik.uni-hamburg.de>
*/
-#ifndef STATIC
+/*-************************************
+ * Dependencies
+ **************************************/
+#include <linux/lz4.h>
+#include "lz4defs.h"
+#include <linux/init.h>
#include <linux/module.h>
#include <linux/kernel.h>
-#endif
-#include <linux/lz4.h>
-
#include <asm/unaligned.h>
-#include "lz4defs.h"
-
-static const int dec32table[] = {0, 3, 2, 3, 0, 0, 0, 0};
-#if LZ4_ARCH64
-static const int dec64table[] = {0, 0, 0, -1, 0, 1, 2, 3};
-#endif
-
-static int lz4_uncompress(const char *source, char *dest, int osize)
+/*-*****************************
+ * Decompression functions
+ *******************************/
+/* LZ4_decompress_generic() :
+ * This generic decompression function cover all use cases.
+ * It shall be instantiated several times, using different sets of directives
+ * Note that it is important this generic function is really inlined,
+ * in order to remove useless branches during compilation optimization.
+ */
+static FORCE_INLINE int LZ4_decompress_generic(
+ const char * const source,
+ char * const dest,
+ int inputSize,
+ /*
+ * If endOnInput == endOnInputSize,
+ * this value is the max size of Output Buffer.
+ */
+ int outputSize,
+ /* endOnOutputSize, endOnInputSize */
+ int endOnInput,
+ /* full, partial */
+ int partialDecoding,
+ /* only used if partialDecoding == partial */
+ int targetOutputSize,
+ /* noDict, withPrefix64k, usingExtDict */
+ int dict,
+ /* == dest when no prefix */
+ const BYTE * const lowPrefix,
+ /* only if dict == usingExtDict */
+ const BYTE * const dictStart,
+ /* note : = 0 if noDict */
+ const size_t dictSize
+ )
{
+ /* Local Variables */
const BYTE *ip = (const BYTE *) source;
- const BYTE *ref;
+ const BYTE * const iend = ip + inputSize;
+
BYTE *op = (BYTE *) dest;
- BYTE * const oend = op + osize;
+ BYTE * const oend = op + outputSize;
BYTE *cpy;
- unsigned token;
- size_t length;
+ BYTE *oexit = op + targetOutputSize;
+ const BYTE * const lowLimit = lowPrefix - dictSize;
+ const BYTE * const dictEnd = (const BYTE *)dictStart + dictSize;
+ const unsigned int dec32table[] = { 0, 1, 2, 1, 4, 4, 4, 4 };
+ const int dec64table[] = { 0, 0, 0, -1, 0, 1, 2, 3 };
+
+ const int safeDecode = (endOnInput == endOnInputSize);
+ const int checkOffset = ((safeDecode) && (dictSize < (int)(64 * KB)));
+
+ /* Special cases */
+ /* targetOutputSize too high => decode everything */
+ if ((partialDecoding) && (oexit > oend - MFLIMIT))
+ oexit = oend - MFLIMIT;
+
+ /* Empty output buffer */
+ if ((endOnInput) && (unlikely(outputSize == 0)))
+ return ((inputSize == 1) && (*ip == 0)) ? 0 : -1;
+
+ if ((!endOnInput) && (unlikely(outputSize == 0)))
+ return (*ip == 0 ? 1 : -1);
+
+ /* Main Loop : decode sequences */
while (1) {
+ size_t length;
+ const BYTE *match;
+ size_t offset;
+
+ /* get literal length */
+ unsigned int const token = *ip++;
+
+ length = token>>ML_BITS;
- /* get runlength */
- token = *ip++;
- length = (token >> ML_BITS);
if (length == RUN_MASK) {
- size_t len;
+ unsigned int s;
- len = *ip++;
- for (; len == 255; length += 255)
- len = *ip++;
- if (unlikely(length > (size_t)(length + len)))
+ do {
+ s = *ip++;
+ length += s;
+ } while (likely(endOnInput
+ ? ip < iend - RUN_MASK
+ : 1) & (s == 255));
+
+ if ((safeDecode)
+ && unlikely(
+ (size_t)(op + length) < (size_t)(op))) {
+ /* overflow detection */
+ goto _output_error;
+ }
+ if ((safeDecode)
+ && unlikely(
+ (size_t)(ip + length) < (size_t)(ip))) {
+ /* overflow detection */
goto _output_error;
- length += len;
+ }
}
/* copy literals */
cpy = op + length;
- if (unlikely(cpy > oend - COPYLENGTH)) {
- /*
- * Error: not enough place for another match
- * (min 4) + 5 literals
- */
- if (cpy != oend)
- goto _output_error;
+ if (((endOnInput) && ((cpy > (partialDecoding ? oexit : oend - MFLIMIT))
+ || (ip + length > iend - (2 + 1 + LASTLITERALS))))
+ || ((!endOnInput) && (cpy > oend - WILDCOPYLENGTH))) {
+ if (partialDecoding) {
+ if (cpy > oend) {
+ /*
+ * Error :
+ * write attempt beyond end of output buffer
+ */
+ goto _output_error;
+ }
+ if ((endOnInput)
+ && (ip + length > iend)) {
+ /*
+ * Error :
+ * read attempt beyond
+ * end of input buffer
+ */
+ goto _output_error;
+ }
+ } else {
+ if ((!endOnInput)
+ && (cpy != oend)) {
+ /*
+ * Error :
+ * block decoding must
+ * stop exactly there
+ */
+ goto _output_error;
+ }
+ if ((endOnInput)
+ && ((ip + length != iend)
+ || (cpy > oend))) {
+ /*
+ * Error :
+ * input must be consumed
+ */
+ goto _output_error;
+ }
+ }
memcpy(op, ip, length);
ip += length;
- break; /* EOF */
+ op += length;
+ /* Necessarily EOF, due to parsing restrictions */
+ break;
}
- LZ4_WILDCOPY(ip, op, cpy);
- ip -= (op - cpy);
+
+ LZ4_wildCopy(op, ip, cpy);
+ ip += length;
op = cpy;
/* get offset */
- LZ4_READ_LITTLEENDIAN_16(ref, cpy, ip);
+ offset = LZ4_readLE16(ip);
ip += 2;
+ match = op - offset;
- /* Error: offset create reference outside destination buffer */
- if (unlikely(ref < (BYTE *const) dest))
+ if ((checkOffset) && (unlikely(match < lowLimit))) {
+ /* Error : offset outside buffers */
goto _output_error;
+ }
+
+ /* costs ~1%; silence an msan warning when offset == 0 */
+ LZ4_write32(op, (U32)offset);
/* get matchlength */
length = token & ML_MASK;
if (length == ML_MASK) {
- for (; *ip == 255; length += 255)
- ip++;
- if (unlikely(length > (size_t)(length + *ip)))
+ unsigned int s;
+
+ do {
+ s = *ip++;
+
+ if ((endOnInput) && (ip > iend - LASTLITERALS))
+ goto _output_error;
+
+ length += s;
+ } while (s == 255);
+
+ if ((safeDecode)
+ && unlikely(
+ (size_t)(op + length) < (size_t)op)) {
+ /* overflow detection */
goto _output_error;
- length += *ip++;
+ }
}
- /* copy repeated sequence */
- if (unlikely((op - ref) < STEPSIZE)) {
-#if LZ4_ARCH64
- int dec64 = dec64table[op - ref];
-#else
- const int dec64 = 0;
-#endif
- op[0] = ref[0];
- op[1] = ref[1];
- op[2] = ref[2];
- op[3] = ref[3];
- op += 4;
- ref += 4;
- ref -= dec32table[op-ref];
- PUT4(ref, op);
- op += STEPSIZE - 4;
- ref -= dec64;
+ length += MINMATCH;
+
+ /* check external dictionary */
+ if ((dict == usingExtDict) && (match < lowPrefix)) {
+ if (unlikely(op + length > oend - LASTLITERALS)) {
+ /* doesn't respect parsing restriction */
+ goto _output_error;
+ }
+
+ if (length <= (size_t)(lowPrefix - match)) {
+ /*
+ * match can be copied as a single segment
+ * from external dictionary
+ */
+ memmove(op, dictEnd - (lowPrefix - match),
+ length);
+ op += length;
+ } else {
+ /*
+ * match encompass external
+ * dictionary and current block
+ */
+ size_t const copySize = (size_t)(lowPrefix - match);
+ size_t const restSize = length - copySize;
+
+ memcpy(op, dictEnd - copySize, copySize);
+ op += copySize;
+
+ if (restSize > (size_t)(op - lowPrefix)) {
+ /* overlap copy */
+ BYTE * const endOfMatch = op + restSize;
+ const BYTE *copyFrom = lowPrefix;
+
+ while (op < endOfMatch)
+ *op++ = *copyFrom++;
+ } else {
+ memcpy(op, lowPrefix, restSize);
+ op += restSize;
+ }
+ }
+
+ continue;
+ }
+
+ /* copy match within block */
+ cpy = op + length;
+
+ if (unlikely(offset < 8)) {
+ const int dec64 = dec64table[offset];
+
+ op[0] = match[0];
+ op[1] = match[1];
+ op[2] = match[2];
+ op[3] = match[3];
+ match += dec32table[offset];
+ memcpy(op + 4, match, 4);
+ match -= dec64;
} else {
- LZ4_COPYSTEP(ref, op);
+ LZ4_copy8(op, match);
+ match += 8;
}
- cpy = op + length - (STEPSIZE - 4);
- if (cpy > (oend - COPYLENGTH)) {
- /* Error: request to write beyond destination buffer */
- if (cpy > oend)
- goto _output_error;
-#if LZ4_ARCH64
- if ((ref + COPYLENGTH) > oend)
-#else
- if ((ref + COPYLENGTH) > oend ||
- (op + COPYLENGTH) > oend)
-#endif
+ op += 8;
+
+ if (unlikely(cpy > oend - 12)) {
+ BYTE * const oCopyLimit = oend - (WILDCOPYLENGTH - 1);
+
+ if (cpy > oend - LASTLITERALS) {
+ /*
+ * Error : last LASTLITERALS bytes
+ * must be literals (uncompressed)
+ */
goto _output_error;
- LZ4_SECURECOPY(ref, op, (oend - COPYLENGTH));
+ }
+
+ if (op < oCopyLimit) {
+ LZ4_wildCopy(op, match, oCopyLimit);
+ match += oCopyLimit - op;
+ op = oCopyLimit;
+ }
+
while (op < cpy)
- *op++ = *ref++;
- op = cpy;
- /*
- * Check EOF (should never happen, since last 5 bytes
- * are supposed to be literals)
- */
- if (op == oend)
- goto _output_error;
- continue;
+ *op++ = *match++;
+ } else {
+ LZ4_copy8(op, match);
+
+ if (length > 16)
+ LZ4_wildCopy(op + 8, match + 8, cpy);
}
- LZ4_SECURECOPY(ref, op, cpy);
+
op = cpy; /* correction */
}
+
/* end of decoding */
- return (int) (((char *)ip) - source);
+ if (endOnInput) {
+ /* Nb of output bytes decoded */
+ return (int) (((char *)op) - dest);
+ } else {
+ /* Nb of input bytes read */
+ return (int) (((const char *)ip) - source);
+ }
- /* write overflow error detected */
+ /* Overflow error detected */
_output_error:
return -1;
}
-static int lz4_uncompress_unknownoutputsize(const char *source, char *dest,
- int isize, size_t maxoutputsize)
+int LZ4_decompress_safe(const char *source, char *dest,
+ int compressedSize, int maxDecompressedSize)
{
- const BYTE *ip = (const BYTE *) source;
- const BYTE *const iend = ip + isize;
- const BYTE *ref;
-
+ return LZ4_decompress_generic(source, dest, compressedSize,
+ maxDecompressedSize, endOnInputSize, full, 0,
+ noDict, (BYTE *)dest, NULL, 0);
+}
- BYTE *op = (BYTE *) dest;
- BYTE * const oend = op + maxoutputsize;
- BYTE *cpy;
+int LZ4_decompress_safe_partial(const char *source, char *dest,
+ int compressedSize, int targetOutputSize, int maxDecompressedSize)
+{
+ return LZ4_decompress_generic(source, dest, compressedSize,
+ maxDecompressedSize, endOnInputSize, partial,
+ targetOutputSize, noDict, (BYTE *)dest, NULL, 0);
+}
- /* Main Loop */
- while (ip < iend) {
+int LZ4_decompress_fast(const char *source, char *dest, int originalSize)
+{
+ return LZ4_decompress_generic(source, dest, 0, originalSize,
+ endOnOutputSize, full, 0, withPrefix64k,
+ (BYTE *)(dest - 64 * KB), NULL, 64 * KB);
+}
- unsigned token;
- size_t length;
+int LZ4_setStreamDecode(LZ4_streamDecode_t *LZ4_streamDecode,
+ const char *dictionary, int dictSize)
+{
+ LZ4_streamDecode_t_internal *lz4sd = (LZ4_streamDecode_t_internal *) LZ4_streamDecode;
- /* get runlength */
- token = *ip++;
- length = (token >> ML_BITS);
- if (length == RUN_MASK) {
- int s = 255;
- while ((ip < iend) && (s == 255)) {
- s = *ip++;
- if (unlikely(length > (size_t)(length + s)))
- goto _output_error;
- length += s;
- }
- }
- /* copy literals */
- cpy = op + length;
- if ((cpy > oend - COPYLENGTH) ||
- (ip + length > iend - COPYLENGTH)) {
-
- if (cpy > oend)
- goto _output_error;/* writes beyond buffer */
-
- if (ip + length != iend)
- goto _output_error;/*
- * Error: LZ4 format requires
- * to consume all input
- * at this stage
- */
- memcpy(op, ip, length);
- op += length;
- break;/* Necessarily EOF, due to parsing restrictions */
- }
- LZ4_WILDCOPY(ip, op, cpy);
- ip -= (op - cpy);
- op = cpy;
+ lz4sd->prefixSize = (size_t) dictSize;
+ lz4sd->prefixEnd = (const BYTE *) dictionary + dictSize;
+ lz4sd->externalDict = NULL;
+ lz4sd->extDictSize = 0;
+ return 1;
+}
- /* get offset */
- LZ4_READ_LITTLEENDIAN_16(ref, cpy, ip);
- ip += 2;
- if (ref < (BYTE * const) dest)
- goto _output_error;
- /*
- * Error : offset creates reference
- * outside of destination buffer
- */
+/*
+ * *_continue() :
+ * These decoding functions allow decompression of multiple blocks
+ * in "streaming" mode.
+ * Previously decoded blocks must still be available at the memory
+ * position where they were decoded.
+ * If it's not possible, save the relevant part of
+ * decoded data into a safe buffer,
+ * and indicate where it stands using LZ4_setStreamDecode()
+ */
+int LZ4_decompress_safe_continue(LZ4_streamDecode_t *LZ4_streamDecode,
+ const char *source, char *dest, int compressedSize, int maxOutputSize)
+{
+ LZ4_streamDecode_t_internal *lz4sd = &LZ4_streamDecode->internal_donotuse;
+ int result;
+
+ if (lz4sd->prefixEnd == (BYTE *)dest) {
+ result = LZ4_decompress_generic(source, dest,
+ compressedSize,
+ maxOutputSize,
+ endOnInputSize, full, 0,
+ usingExtDict, lz4sd->prefixEnd - lz4sd->prefixSize,
+ lz4sd->externalDict,
+ lz4sd->extDictSize);
+
+ if (result <= 0)
+ return result;
+
+ lz4sd->prefixSize += result;
+ lz4sd->prefixEnd += result;
+ } else {
+ lz4sd->extDictSize = lz4sd->prefixSize;
+ lz4sd->externalDict = lz4sd->prefixEnd - lz4sd->extDictSize;
+ result = LZ4_decompress_generic(source, dest,
+ compressedSize, maxOutputSize,
+ endOnInputSize, full, 0,
+ usingExtDict, (BYTE *)dest,
+ lz4sd->externalDict, lz4sd->extDictSize);
+ if (result <= 0)
+ return result;
+ lz4sd->prefixSize = result;
+ lz4sd->prefixEnd = (BYTE *)dest + result;
+ }
- /* get matchlength */
- length = (token & ML_MASK);
- if (length == ML_MASK) {
- while (ip < iend) {
- int s = *ip++;
- if (unlikely(length > (size_t)(length + s)))
- goto _output_error;
- length += s;
- if (s == 255)
- continue;
- break;
- }
- }
+ return result;
+}
- /* copy repeated sequence */
- if (unlikely((op - ref) < STEPSIZE)) {
-#if LZ4_ARCH64
- int dec64 = dec64table[op - ref];
-#else
- const int dec64 = 0;
-#endif
- op[0] = ref[0];
- op[1] = ref[1];
- op[2] = ref[2];
- op[3] = ref[3];
- op += 4;
- ref += 4;
- ref -= dec32table[op - ref];
- PUT4(ref, op);
- op += STEPSIZE - 4;
- ref -= dec64;
- } else {
- LZ4_COPYSTEP(ref, op);
- }
- cpy = op + length - (STEPSIZE-4);
- if (cpy > oend - COPYLENGTH) {
- if (cpy > oend)
- goto _output_error; /* write outside of buf */
-#if LZ4_ARCH64
- if ((ref + COPYLENGTH) > oend)
-#else
- if ((ref + COPYLENGTH) > oend ||
- (op + COPYLENGTH) > oend)
-#endif
- goto _output_error;
- LZ4_SECURECOPY(ref, op, (oend - COPYLENGTH));
- while (op < cpy)
- *op++ = *ref++;
- op = cpy;
- /*
- * Check EOF (should never happen, since last 5 bytes
- * are supposed to be literals)
- */
- if (op == oend)
- goto _output_error;
- continue;
- }
- LZ4_SECURECOPY(ref, op, cpy);
- op = cpy; /* correction */
+int LZ4_decompress_fast_continue(LZ4_streamDecode_t *LZ4_streamDecode,
+ const char *source, char *dest, int originalSize)
+{
+ LZ4_streamDecode_t_internal *lz4sd = &LZ4_streamDecode->internal_donotuse;
+ int result;
+
+ if (lz4sd->prefixEnd == (BYTE *)dest) {
+ result = LZ4_decompress_generic(source, dest, 0, originalSize,
+ endOnOutputSize, full, 0,
+ usingExtDict,
+ lz4sd->prefixEnd - lz4sd->prefixSize,
+ lz4sd->externalDict, lz4sd->extDictSize);
+
+ if (result <= 0)
+ return result;
+
+ lz4sd->prefixSize += originalSize;
+ lz4sd->prefixEnd += originalSize;
+ } else {
+ lz4sd->extDictSize = lz4sd->prefixSize;
+ lz4sd->externalDict = lz4sd->prefixEnd - lz4sd->extDictSize;
+ result = LZ4_decompress_generic(source, dest, 0, originalSize,
+ endOnOutputSize, full, 0,
+ usingExtDict, (BYTE *)dest,
+ lz4sd->externalDict, lz4sd->extDictSize);
+ if (result <= 0)
+ return result;
+ lz4sd->prefixSize = originalSize;
+ lz4sd->prefixEnd = (BYTE *)dest + originalSize;
}
- /* end of decoding */
- return (int) (((char *) op) - dest);
- /* write overflow error detected */
-_output_error:
- return -1;
+ return result;
}
-int lz4_decompress(const unsigned char *src, size_t *src_len,
- unsigned char *dest, size_t actual_dest_len)
+/*
+ * Advanced decoding functions :
+ * *_usingDict() :
+ * These decoding functions work the same as "_continue" ones,
+ * the dictionary must be explicitly provided within parameters
+ */
+static FORCE_INLINE int LZ4_decompress_usingDict_generic(const char *source,
+ char *dest, int compressedSize, int maxOutputSize, int safe,
+ const char *dictStart, int dictSize)
{
- int ret = -1;
- int input_len = 0;
-
- input_len = lz4_uncompress(src, dest, actual_dest_len);
- if (input_len < 0)
- goto exit_0;
- *src_len = input_len;
+ if (dictSize == 0)
+ return LZ4_decompress_generic(source, dest,
+ compressedSize, maxOutputSize, safe, full, 0,
+ noDict, (BYTE *)dest, NULL, 0);
+ if (dictStart + dictSize == dest) {
+ if (dictSize >= (int)(64 * KB - 1))
+ return LZ4_decompress_generic(source, dest,
+ compressedSize, maxOutputSize, safe, full, 0,
+ withPrefix64k, (BYTE *)dest - 64 * KB, NULL, 0);
+ return LZ4_decompress_generic(source, dest, compressedSize,
+ maxOutputSize, safe, full, 0, noDict,
+ (BYTE *)dest - dictSize, NULL, 0);
+ }
+ return LZ4_decompress_generic(source, dest, compressedSize,
+ maxOutputSize, safe, full, 0, usingExtDict,
+ (BYTE *)dest, (const BYTE *)dictStart, dictSize);
+}
- return 0;
-exit_0:
- return ret;
+int LZ4_decompress_safe_usingDict(const char *source, char *dest,
+ int compressedSize, int maxOutputSize,
+ const char *dictStart, int dictSize)
+{
+ return LZ4_decompress_usingDict_generic(source, dest,
+ compressedSize, maxOutputSize, 1, dictStart, dictSize);
}
-#ifndef STATIC
-EXPORT_SYMBOL(lz4_decompress);
-#endif
-int lz4_decompress_unknownoutputsize(const unsigned char *src, size_t src_len,
- unsigned char *dest, size_t *dest_len)
+int LZ4_decompress_fast_usingDict(const char *source, char *dest,
+ int originalSize, const char *dictStart, int dictSize)
{
- int ret = -1;
- int out_len = 0;
-
- out_len = lz4_uncompress_unknownoutputsize(src, dest, src_len,
- *dest_len);
- if (out_len < 0)
- goto exit_0;
- *dest_len = out_len;
-
- return 0;
-exit_0:
- return ret;
+ return LZ4_decompress_usingDict_generic(source, dest, 0,
+ originalSize, 0, dictStart, dictSize);
}
+
#ifndef STATIC
-EXPORT_SYMBOL(lz4_decompress_unknownoutputsize);
+EXPORT_SYMBOL(LZ4_decompress_safe);
+EXPORT_SYMBOL(LZ4_decompress_safe_partial);
+EXPORT_SYMBOL(LZ4_decompress_fast);
+EXPORT_SYMBOL(LZ4_setStreamDecode);
+EXPORT_SYMBOL(LZ4_decompress_safe_continue);
+EXPORT_SYMBOL(LZ4_decompress_fast_continue);
+EXPORT_SYMBOL(LZ4_decompress_safe_usingDict);
+EXPORT_SYMBOL(LZ4_decompress_fast_usingDict);
MODULE_LICENSE("Dual BSD/GPL");
-MODULE_DESCRIPTION("LZ4 Decompressor");
+MODULE_DESCRIPTION("LZ4 decompressor");
#endif
diff --git a/lib/lz4/lz4defs.h b/lib/lz4/lz4defs.h
index c79d7ea8a38e..00a0b58a0871 100644
--- a/lib/lz4/lz4defs.h
+++ b/lib/lz4/lz4defs.h
@@ -1,157 +1,227 @@
+#ifndef __LZ4DEFS_H__
+#define __LZ4DEFS_H__
+
/*
- * lz4defs.h -- architecture specific defines
- *
- * Copyright (C) 2013, LG Electronics, Kyungsik Lee <kyungsik.lee@lge.com>
+ * lz4defs.h -- common and architecture specific defines for the kernel usage
+
+ * LZ4 - Fast LZ compression algorithm
+ * Copyright (C) 2011-2016, Yann Collet.
+ * BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following disclaimer
+ * in the documentation and/or other materials provided with the
+ * distribution.
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ * You can contact the author at :
+ * - LZ4 homepage : http://www.lz4.org
+ * - LZ4 source repository : https://github.com/lz4/lz4
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
+ * Changed for kernel usage by:
+ * Sven Schmidt <4sschmid@informatik.uni-hamburg.de>
*/
-/*
- * Detects 64 bits mode
- */
+#include <asm/unaligned.h>
+#include <linux/string.h> /* memset, memcpy */
+
+#define FORCE_INLINE __always_inline
+
+/*-************************************
+ * Basic Types
+ **************************************/
+#include <linux/types.h>
+
+typedef uint8_t BYTE;
+typedef uint16_t U16;
+typedef uint32_t U32;
+typedef int32_t S32;
+typedef uint64_t U64;
+typedef uintptr_t uptrval;
+
+/*-************************************
+ * Architecture specifics
+ **************************************/
#if defined(CONFIG_64BIT)
#define LZ4_ARCH64 1
#else
#define LZ4_ARCH64 0
#endif
-/*
- * Architecture-specific macros
- */
-#define BYTE u8
-typedef struct _U16_S { u16 v; } U16_S;
-typedef struct _U32_S { u32 v; } U32_S;
-typedef struct _U64_S { u64 v; } U64_S;
-#if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
-
-#define A16(x) (((U16_S *)(x))->v)
-#define A32(x) (((U32_S *)(x))->v)
-#define A64(x) (((U64_S *)(x))->v)
-
-#define PUT4(s, d) (A32(d) = A32(s))
-#define PUT8(s, d) (A64(d) = A64(s))
-
-#define LZ4_READ_LITTLEENDIAN_16(d, s, p) \
- (d = s - A16(p))
-
-#define LZ4_WRITE_LITTLEENDIAN_16(p, v) \
- do { \
- A16(p) = v; \
- p += 2; \
- } while (0)
-#else /* CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS */
-
-#define A64(x) get_unaligned((u64 *)&(((U16_S *)(x))->v))
-#define A32(x) get_unaligned((u32 *)&(((U16_S *)(x))->v))
-#define A16(x) get_unaligned((u16 *)&(((U16_S *)(x))->v))
-
-#define PUT4(s, d) \
- put_unaligned(get_unaligned((const u32 *) s), (u32 *) d)
-#define PUT8(s, d) \
- put_unaligned(get_unaligned((const u64 *) s), (u64 *) d)
-
-#define LZ4_READ_LITTLEENDIAN_16(d, s, p) \
- (d = s - get_unaligned_le16(p))
-
-#define LZ4_WRITE_LITTLEENDIAN_16(p, v) \
- do { \
- put_unaligned_le16(v, (u16 *)(p)); \
- p += 2; \
- } while (0)
+#if defined(__LITTLE_ENDIAN)
+#define LZ4_LITTLE_ENDIAN 1
+#else
+#define LZ4_LITTLE_ENDIAN 0
#endif
-#define COPYLENGTH 8
-#define ML_BITS 4
-#define ML_MASK ((1U << ML_BITS) - 1)
+/*-************************************
+ * Constants
+ **************************************/
+#define MINMATCH 4
+
+#define WILDCOPYLENGTH 8
+#define LASTLITERALS 5
+#define MFLIMIT (WILDCOPYLENGTH + MINMATCH)
+
+/* Increase this value ==> compression run slower on incompressible data */
+#define LZ4_SKIPTRIGGER 6
+
+#define HASH_UNIT sizeof(size_t)
+
+#define KB (1 << 10)
+#define MB (1 << 20)
+#define GB (1U << 30)
+
+#define MAXD_LOG 16
+#define MAX_DISTANCE ((1 << MAXD_LOG) - 1)
+#define STEPSIZE sizeof(size_t)
+
+#define ML_BITS 4
+#define ML_MASK ((1U << ML_BITS) - 1)
#define RUN_BITS (8 - ML_BITS)
#define RUN_MASK ((1U << RUN_BITS) - 1)
-#define MEMORY_USAGE 14
-#define MINMATCH 4
-#define SKIPSTRENGTH 6
-#define LASTLITERALS 5
-#define MFLIMIT (COPYLENGTH + MINMATCH)
-#define MINLENGTH (MFLIMIT + 1)
-#define MAXD_LOG 16
-#define MAXD (1 << MAXD_LOG)
-#define MAXD_MASK (u32)(MAXD - 1)
-#define MAX_DISTANCE (MAXD - 1)
-#define HASH_LOG (MAXD_LOG - 1)
-#define HASHTABLESIZE (1 << HASH_LOG)
-#define MAX_NB_ATTEMPTS 256
-#define OPTIMAL_ML (int)((ML_MASK-1)+MINMATCH)
-#define LZ4_64KLIMIT ((1<<16) + (MFLIMIT - 1))
-#define HASHLOG64K ((MEMORY_USAGE - 2) + 1)
-#define HASH64KTABLESIZE (1U << HASHLOG64K)
-#define LZ4_HASH_VALUE(p) (((A32(p)) * 2654435761U) >> \
- ((MINMATCH * 8) - (MEMORY_USAGE-2)))
-#define LZ4_HASH64K_VALUE(p) (((A32(p)) * 2654435761U) >> \
- ((MINMATCH * 8) - HASHLOG64K))
-#define HASH_VALUE(p) (((A32(p)) * 2654435761U) >> \
- ((MINMATCH * 8) - HASH_LOG))
-
-#if LZ4_ARCH64/* 64-bit */
-#define STEPSIZE 8
-
-#define LZ4_COPYSTEP(s, d) \
- do { \
- PUT8(s, d); \
- d += 8; \
- s += 8; \
- } while (0)
-
-#define LZ4_COPYPACKET(s, d) LZ4_COPYSTEP(s, d)
-
-#define LZ4_SECURECOPY(s, d, e) \
- do { \
- if (d < e) { \
- LZ4_WILDCOPY(s, d, e); \
- } \
- } while (0)
-#define HTYPE u32
-
-#ifdef __BIG_ENDIAN
-#define LZ4_NBCOMMONBYTES(val) (__builtin_clzll(val) >> 3)
+
+/*-************************************
+ * Reading and writing into memory
+ **************************************/
+static FORCE_INLINE U16 LZ4_read16(const void *ptr)
+{
+ return get_unaligned((const U16 *)ptr);
+}
+
+static FORCE_INLINE U32 LZ4_read32(const void *ptr)
+{
+ return get_unaligned((const U32 *)ptr);
+}
+
+static FORCE_INLINE size_t LZ4_read_ARCH(const void *ptr)
+{
+ return get_unaligned((const size_t *)ptr);
+}
+
+static FORCE_INLINE void LZ4_write16(void *memPtr, U16 value)
+{
+ put_unaligned(value, (U16 *)memPtr);
+}
+
+static FORCE_INLINE void LZ4_write32(void *memPtr, U32 value)
+{
+ put_unaligned(value, (U32 *)memPtr);
+}
+
+static FORCE_INLINE U16 LZ4_readLE16(const void *memPtr)
+{
+ return get_unaligned_le16(memPtr);
+}
+
+static FORCE_INLINE void LZ4_writeLE16(void *memPtr, U16 value)
+{
+ return put_unaligned_le16(value, memPtr);
+}
+
+static FORCE_INLINE void LZ4_copy8(void *dst, const void *src)
+{
+#if LZ4_ARCH64
+ U64 a = get_unaligned((const U64 *)src);
+
+ put_unaligned(a, (U64 *)dst);
+#else
+ U32 a = get_unaligned((const U32 *)src);
+ U32 b = get_unaligned((const U32 *)src + 1);
+
+ put_unaligned(a, (U32 *)dst);
+ put_unaligned(b, (U32 *)dst + 1);
+#endif
+}
+
+/*
+ * customized variant of memcpy,
+ * which can overwrite up to 7 bytes beyond dstEnd
+ */
+static FORCE_INLINE void LZ4_wildCopy(void *dstPtr,
+ const void *srcPtr, void *dstEnd)
+{
+ BYTE *d = (BYTE *)dstPtr;
+ const BYTE *s = (const BYTE *)srcPtr;
+ BYTE *const e = (BYTE *)dstEnd;
+
+ do {
+ LZ4_copy8(d, s);
+ d += 8;
+ s += 8;
+ } while (d < e);
+}
+
+static FORCE_INLINE unsigned int LZ4_NbCommonBytes(register size_t val)
+{
+#if LZ4_LITTLE_ENDIAN
+ return __ffs(val) >> 3;
#else
-#define LZ4_NBCOMMONBYTES(val) (__builtin_ctzll(val) >> 3)
+ return (BITS_PER_LONG - 1 - __fls(val)) >> 3;
+#endif
+}
+
+static FORCE_INLINE unsigned int LZ4_count(
+ const BYTE *pIn,
+ const BYTE *pMatch,
+ const BYTE *pInLimit)
+{
+ const BYTE *const pStart = pIn;
+
+ while (likely(pIn < pInLimit - (STEPSIZE - 1))) {
+ size_t const diff = LZ4_read_ARCH(pMatch) ^ LZ4_read_ARCH(pIn);
+
+ if (!diff) {
+ pIn += STEPSIZE;
+ pMatch += STEPSIZE;
+ continue;
+ }
+
+ pIn += LZ4_NbCommonBytes(diff);
+
+ return (unsigned int)(pIn - pStart);
+ }
+
+#if LZ4_ARCH64
+ if ((pIn < (pInLimit - 3))
+ && (LZ4_read32(pMatch) == LZ4_read32(pIn))) {
+ pIn += 4;
+ pMatch += 4;
+ }
#endif
-#else /* 32-bit */
-#define STEPSIZE 4
+ if ((pIn < (pInLimit - 1))
+ && (LZ4_read16(pMatch) == LZ4_read16(pIn))) {
+ pIn += 2;
+ pMatch += 2;
+ }
-#define LZ4_COPYSTEP(s, d) \
- do { \
- PUT4(s, d); \
- d += 4; \
- s += 4; \
- } while (0)
+ if ((pIn < pInLimit) && (*pMatch == *pIn))
+ pIn++;
-#define LZ4_COPYPACKET(s, d) \
- do { \
- LZ4_COPYSTEP(s, d); \
- LZ4_COPYSTEP(s, d); \
- } while (0)
+ return (unsigned int)(pIn - pStart);
+}
-#define LZ4_SECURECOPY LZ4_WILDCOPY
-#define HTYPE const u8*
+typedef enum { noLimit = 0, limitedOutput = 1 } limitedOutput_directive;
+typedef enum { byPtr, byU32, byU16 } tableType_t;
-#ifdef __BIG_ENDIAN
-#define LZ4_NBCOMMONBYTES(val) (__builtin_clz(val) >> 3)
-#else
-#define LZ4_NBCOMMONBYTES(val) (__builtin_ctz(val) >> 3)
-#endif
+typedef enum { noDict = 0, withPrefix64k, usingExtDict } dict_directive;
+typedef enum { noDictIssue = 0, dictSmall } dictIssue_directive;
-#endif
+typedef enum { endOnOutputSize = 0, endOnInputSize = 1 } endCondition_directive;
+typedef enum { full = 0, partial = 1 } earlyEnd_directive;
-#define LZ4_WILDCOPY(s, d, e) \
- do { \
- LZ4_COPYPACKET(s, d); \
- } while (d < e)
-
-#define LZ4_BLINDCOPY(s, d, l) \
- do { \
- u8 *e = (d) + l; \
- LZ4_WILDCOPY(s, d, e); \
- d = e; \
- } while (0)
+#endif
diff --git a/lib/lz4/lz4hc_compress.c b/lib/lz4/lz4hc_compress.c
index f344f76b6559..176f03b83e56 100644
--- a/lib/lz4/lz4hc_compress.c
+++ b/lib/lz4/lz4hc_compress.c
@@ -1,19 +1,17 @@
/*
* LZ4 HC - High Compression Mode of LZ4
- * Copyright (C) 2011-2012, Yann Collet.
- * BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
+ * Copyright (C) 2011-2015, Yann Collet.
*
+ * BSD 2 - Clause License (http://www.opensource.org/licenses/bsd - license.php)
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following disclaimer
* in the documentation and/or other materials provided with the
* distribution.
- *
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
@@ -25,323 +23,361 @@
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
* You can contact the author at :
- * - LZ4 homepage : http://fastcompression.blogspot.com/p/lz4.html
- * - LZ4 source repository : http://code.google.com/p/lz4/
+ * - LZ4 homepage : http://www.lz4.org
+ * - LZ4 source repository : https://github.com/lz4/lz4
*
- * Changed for kernel use by:
- * Chanho Min <chanho.min@lge.com>
+ * Changed for kernel usage by:
+ * Sven Schmidt <4sschmid@informatik.uni-hamburg.de>
*/
-#include <linux/module.h>
-#include <linux/kernel.h>
+/*-************************************
+ * Dependencies
+ **************************************/
#include <linux/lz4.h>
-#include <asm/unaligned.h>
#include "lz4defs.h"
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/string.h> /* memset */
-struct lz4hc_data {
- const u8 *base;
- HTYPE hashtable[HASHTABLESIZE];
- u16 chaintable[MAXD];
- const u8 *nexttoupdate;
-} __attribute__((__packed__));
+/* *************************************
+ * Local Constants and types
+ ***************************************/
-static inline int lz4hc_init(struct lz4hc_data *hc4, const u8 *base)
+#define OPTIMAL_ML (int)((ML_MASK - 1) + MINMATCH)
+
+#define HASH_FUNCTION(i) (((i) * 2654435761U) \
+ >> ((MINMATCH*8) - LZ4HC_HASH_LOG))
+#define DELTANEXTU16(p) chainTable[(U16)(p)] /* faster */
+
+static U32 LZ4HC_hashPtr(const void *ptr)
{
- memset((void *)hc4->hashtable, 0, sizeof(hc4->hashtable));
- memset(hc4->chaintable, 0xFF, sizeof(hc4->chaintable));
-
-#if LZ4_ARCH64
- hc4->nexttoupdate = base + 1;
-#else
- hc4->nexttoupdate = base;
-#endif
- hc4->base = base;
- return 1;
+ return HASH_FUNCTION(LZ4_read32(ptr));
+}
+
+/**************************************
+ * HC Compression
+ **************************************/
+static void LZ4HC_init(LZ4HC_CCtx_internal *hc4, const BYTE *start)
+{
+ memset((void *)hc4->hashTable, 0, sizeof(hc4->hashTable));
+ memset(hc4->chainTable, 0xFF, sizeof(hc4->chainTable));
+ hc4->nextToUpdate = 64 * KB;
+ hc4->base = start - 64 * KB;
+ hc4->end = start;
+ hc4->dictBase = start - 64 * KB;
+ hc4->dictLimit = 64 * KB;
+ hc4->lowLimit = 64 * KB;
}
/* Update chains up to ip (excluded) */
-static inline void lz4hc_insert(struct lz4hc_data *hc4, const u8 *ip)
+static FORCE_INLINE void LZ4HC_Insert(LZ4HC_CCtx_internal *hc4,
+ const BYTE *ip)
{
- u16 *chaintable = hc4->chaintable;
- HTYPE *hashtable = hc4->hashtable;
-#if LZ4_ARCH64
+ U16 * const chainTable = hc4->chainTable;
+ U32 * const hashTable = hc4->hashTable;
const BYTE * const base = hc4->base;
-#else
- const int base = 0;
-#endif
+ U32 const target = (U32)(ip - base);
+ U32 idx = hc4->nextToUpdate;
+
+ while (idx < target) {
+ U32 const h = LZ4HC_hashPtr(base + idx);
+ size_t delta = idx - hashTable[h];
- while (hc4->nexttoupdate < ip) {
- const u8 *p = hc4->nexttoupdate;
- size_t delta = p - (hashtable[HASH_VALUE(p)] + base);
if (delta > MAX_DISTANCE)
delta = MAX_DISTANCE;
- chaintable[(size_t)(p) & MAXD_MASK] = (u16)delta;
- hashtable[HASH_VALUE(p)] = (p) - base;
- hc4->nexttoupdate++;
- }
-}
-static inline size_t lz4hc_commonlength(const u8 *p1, const u8 *p2,
- const u8 *const matchlimit)
-{
- const u8 *p1t = p1;
-
- while (p1t < matchlimit - (STEPSIZE - 1)) {
-#if LZ4_ARCH64
- u64 diff = A64(p2) ^ A64(p1t);
-#else
- u32 diff = A32(p2) ^ A32(p1t);
-#endif
- if (!diff) {
- p1t += STEPSIZE;
- p2 += STEPSIZE;
- continue;
- }
- p1t += LZ4_NBCOMMONBYTES(diff);
- return p1t - p1;
- }
-#if LZ4_ARCH64
- if ((p1t < (matchlimit-3)) && (A32(p2) == A32(p1t))) {
- p1t += 4;
- p2 += 4;
- }
-#endif
+ DELTANEXTU16(idx) = (U16)delta;
- if ((p1t < (matchlimit - 1)) && (A16(p2) == A16(p1t))) {
- p1t += 2;
- p2 += 2;
+ hashTable[h] = idx;
+ idx++;
}
- if ((p1t < matchlimit) && (*p2 == *p1t))
- p1t++;
- return p1t - p1;
+
+ hc4->nextToUpdate = target;
}
-static inline int lz4hc_insertandfindbestmatch(struct lz4hc_data *hc4,
- const u8 *ip, const u8 *const matchlimit, const u8 **matchpos)
+static FORCE_INLINE int LZ4HC_InsertAndFindBestMatch(
+ LZ4HC_CCtx_internal *hc4, /* Index table will be updated */
+ const BYTE *ip,
+ const BYTE * const iLimit,
+ const BYTE **matchpos,
+ const int maxNbAttempts)
{
- u16 *const chaintable = hc4->chaintable;
- HTYPE *const hashtable = hc4->hashtable;
- const u8 *ref;
-#if LZ4_ARCH64
+ U16 * const chainTable = hc4->chainTable;
+ U32 * const HashTable = hc4->hashTable;
const BYTE * const base = hc4->base;
-#else
- const int base = 0;
-#endif
- int nbattempts = MAX_NB_ATTEMPTS;
- size_t repl = 0, ml = 0;
- u16 delta;
+ const BYTE * const dictBase = hc4->dictBase;
+ const U32 dictLimit = hc4->dictLimit;
+ const U32 lowLimit = (hc4->lowLimit + 64 * KB > (U32)(ip - base))
+ ? hc4->lowLimit
+ : (U32)(ip - base) - (64 * KB - 1);
+ U32 matchIndex;
+ int nbAttempts = maxNbAttempts;
+ size_t ml = 0;
/* HC4 match finder */
- lz4hc_insert(hc4, ip);
- ref = hashtable[HASH_VALUE(ip)] + base;
-
- /* potential repetition */
- if (ref >= ip-4) {
- /* confirmed */
- if (A32(ref) == A32(ip)) {
- delta = (u16)(ip-ref);
- repl = ml = lz4hc_commonlength(ip + MINMATCH,
- ref + MINMATCH, matchlimit) + MINMATCH;
- *matchpos = ref;
- }
- ref -= (size_t)chaintable[(size_t)(ref) & MAXD_MASK];
- }
+ LZ4HC_Insert(hc4, ip);
+ matchIndex = HashTable[LZ4HC_hashPtr(ip)];
+
+ while ((matchIndex >= lowLimit)
+ && (nbAttempts)) {
+ nbAttempts--;
+ if (matchIndex >= dictLimit) {
+ const BYTE * const match = base + matchIndex;
+
+ if (*(match + ml) == *(ip + ml)
+ && (LZ4_read32(match) == LZ4_read32(ip))) {
+ size_t const mlt = LZ4_count(ip + MINMATCH,
+ match + MINMATCH, iLimit) + MINMATCH;
- while ((ref >= ip - MAX_DISTANCE) && nbattempts) {
- nbattempts--;
- if (*(ref + ml) == *(ip + ml)) {
- if (A32(ref) == A32(ip)) {
- size_t mlt =
- lz4hc_commonlength(ip + MINMATCH,
- ref + MINMATCH, matchlimit) + MINMATCH;
if (mlt > ml) {
ml = mlt;
- *matchpos = ref;
+ *matchpos = match;
+ }
+ }
+ } else {
+ const BYTE * const match = dictBase + matchIndex;
+
+ if (LZ4_read32(match) == LZ4_read32(ip)) {
+ size_t mlt;
+ const BYTE *vLimit = ip
+ + (dictLimit - matchIndex);
+
+ if (vLimit > iLimit)
+ vLimit = iLimit;
+ mlt = LZ4_count(ip + MINMATCH,
+ match + MINMATCH, vLimit) + MINMATCH;
+ if ((ip + mlt == vLimit)
+ && (vLimit < iLimit))
+ mlt += LZ4_count(ip + mlt,
+ base + dictLimit,
+ iLimit);
+ if (mlt > ml) {
+ /* virtual matchpos */
+ ml = mlt;
+ *matchpos = base + matchIndex;
}
}
}
- ref -= (size_t)chaintable[(size_t)(ref) & MAXD_MASK];
- }
-
- /* Complete table */
- if (repl) {
- const BYTE *ptr = ip;
- const BYTE *end;
- end = ip + repl - (MINMATCH-1);
- /* Pre-Load */
- while (ptr < end - delta) {
- chaintable[(size_t)(ptr) & MAXD_MASK] = delta;
- ptr++;
- }
- do {
- chaintable[(size_t)(ptr) & MAXD_MASK] = delta;
- /* Head of chain */
- hashtable[HASH_VALUE(ptr)] = (ptr) - base;
- ptr++;
- } while (ptr < end);
- hc4->nexttoupdate = end;
+ matchIndex -= DELTANEXTU16(matchIndex);
}
return (int)ml;
}
-static inline int lz4hc_insertandgetwidermatch(struct lz4hc_data *hc4,
- const u8 *ip, const u8 *startlimit, const u8 *matchlimit, int longest,
- const u8 **matchpos, const u8 **startpos)
+static FORCE_INLINE int LZ4HC_InsertAndGetWiderMatch(
+ LZ4HC_CCtx_internal *hc4,
+ const BYTE * const ip,
+ const BYTE * const iLowLimit,
+ const BYTE * const iHighLimit,
+ int longest,
+ const BYTE **matchpos,
+ const BYTE **startpos,
+ const int maxNbAttempts)
{
- u16 *const chaintable = hc4->chaintable;
- HTYPE *const hashtable = hc4->hashtable;
-#if LZ4_ARCH64
+ U16 * const chainTable = hc4->chainTable;
+ U32 * const HashTable = hc4->hashTable;
const BYTE * const base = hc4->base;
-#else
- const int base = 0;
-#endif
- const u8 *ref;
- int nbattempts = MAX_NB_ATTEMPTS;
- int delta = (int)(ip - startlimit);
+ const U32 dictLimit = hc4->dictLimit;
+ const BYTE * const lowPrefixPtr = base + dictLimit;
+ const U32 lowLimit = (hc4->lowLimit + 64 * KB > (U32)(ip - base))
+ ? hc4->lowLimit
+ : (U32)(ip - base) - (64 * KB - 1);
+ const BYTE * const dictBase = hc4->dictBase;
+ U32 matchIndex;
+ int nbAttempts = maxNbAttempts;
+ int delta = (int)(ip - iLowLimit);
/* First Match */
- lz4hc_insert(hc4, ip);
- ref = hashtable[HASH_VALUE(ip)] + base;
-
- while ((ref >= ip - MAX_DISTANCE) && (ref >= hc4->base)
- && (nbattempts)) {
- nbattempts--;
- if (*(startlimit + longest) == *(ref - delta + longest)) {
- if (A32(ref) == A32(ip)) {
- const u8 *reft = ref + MINMATCH;
- const u8 *ipt = ip + MINMATCH;
- const u8 *startt = ip;
-
- while (ipt < matchlimit-(STEPSIZE - 1)) {
- #if LZ4_ARCH64
- u64 diff = A64(reft) ^ A64(ipt);
- #else
- u32 diff = A32(reft) ^ A32(ipt);
- #endif
-
- if (!diff) {
- ipt += STEPSIZE;
- reft += STEPSIZE;
- continue;
+ LZ4HC_Insert(hc4, ip);
+ matchIndex = HashTable[LZ4HC_hashPtr(ip)];
+
+ while ((matchIndex >= lowLimit)
+ && (nbAttempts)) {
+ nbAttempts--;
+ if (matchIndex >= dictLimit) {
+ const BYTE *matchPtr = base + matchIndex;
+
+ if (*(iLowLimit + longest)
+ == *(matchPtr - delta + longest)) {
+ if (LZ4_read32(matchPtr) == LZ4_read32(ip)) {
+ int mlt = MINMATCH + LZ4_count(
+ ip + MINMATCH,
+ matchPtr + MINMATCH,
+ iHighLimit);
+ int back = 0;
+
+ while ((ip + back > iLowLimit)
+ && (matchPtr + back > lowPrefixPtr)
+ && (ip[back - 1] == matchPtr[back - 1]))
+ back--;
+
+ mlt -= back;
+
+ if (mlt > longest) {
+ longest = (int)mlt;
+ *matchpos = matchPtr + back;
+ *startpos = ip + back;
}
- ipt += LZ4_NBCOMMONBYTES(diff);
- goto _endcount;
- }
- #if LZ4_ARCH64
- if ((ipt < (matchlimit - 3))
- && (A32(reft) == A32(ipt))) {
- ipt += 4;
- reft += 4;
- }
- ipt += 2;
- #endif
- if ((ipt < (matchlimit - 1))
- && (A16(reft) == A16(ipt))) {
- reft += 2;
}
- if ((ipt < matchlimit) && (*reft == *ipt))
- ipt++;
-_endcount:
- reft = ref;
-
- while ((startt > startlimit)
- && (reft > hc4->base)
- && (startt[-1] == reft[-1])) {
- startt--;
- reft--;
- }
-
- if ((ipt - startt) > longest) {
- longest = (int)(ipt - startt);
- *matchpos = reft;
- *startpos = startt;
+ }
+ } else {
+ const BYTE * const matchPtr = dictBase + matchIndex;
+
+ if (LZ4_read32(matchPtr) == LZ4_read32(ip)) {
+ size_t mlt;
+ int back = 0;
+ const BYTE *vLimit = ip + (dictLimit - matchIndex);
+
+ if (vLimit > iHighLimit)
+ vLimit = iHighLimit;
+
+ mlt = LZ4_count(ip + MINMATCH,
+ matchPtr + MINMATCH, vLimit) + MINMATCH;
+
+ if ((ip + mlt == vLimit) && (vLimit < iHighLimit))
+ mlt += LZ4_count(ip + mlt, base + dictLimit,
+ iHighLimit);
+ while ((ip + back > iLowLimit)
+ && (matchIndex + back > lowLimit)
+ && (ip[back - 1] == matchPtr[back - 1]))
+ back--;
+
+ mlt -= back;
+
+ if ((int)mlt > longest) {
+ longest = (int)mlt;
+ *matchpos = base + matchIndex + back;
+ *startpos = ip + back;
}
}
}
- ref -= (size_t)chaintable[(size_t)(ref) & MAXD_MASK];
+
+ matchIndex -= DELTANEXTU16(matchIndex);
}
+
return longest;
}
-static inline int lz4_encodesequence(const u8 **ip, u8 **op, const u8 **anchor,
- int ml, const u8 *ref)
+static FORCE_INLINE int LZ4HC_encodeSequence(
+ const BYTE **ip,
+ BYTE **op,
+ const BYTE **anchor,
+ int matchLength,
+ const BYTE * const match,
+ limitedOutput_directive limitedOutputBuffer,
+ BYTE *oend)
{
- int length, len;
- u8 *token;
+ int length;
+ BYTE *token;
/* Encode Literal length */
length = (int)(*ip - *anchor);
token = (*op)++;
+
+ if ((limitedOutputBuffer)
+ && ((*op + (length>>8)
+ + length + (2 + 1 + LASTLITERALS)) > oend)) {
+ /* Check output limit */
+ return 1;
+ }
if (length >= (int)RUN_MASK) {
- *token = (RUN_MASK << ML_BITS);
+ int len;
+
+ *token = (RUN_MASK<<ML_BITS);
len = length - RUN_MASK;
for (; len > 254 ; len -= 255)
*(*op)++ = 255;
- *(*op)++ = (u8)len;
+ *(*op)++ = (BYTE)len;
} else
- *token = (length << ML_BITS);
+ *token = (BYTE)(length<<ML_BITS);
/* Copy Literals */
- LZ4_BLINDCOPY(*anchor, *op, length);
+ LZ4_wildCopy(*op, *anchor, (*op) + length);
+ *op += length;
/* Encode Offset */
- LZ4_WRITE_LITTLEENDIAN_16(*op, (u16)(*ip - ref));
+ LZ4_writeLE16(*op, (U16)(*ip - match));
+ *op += 2;
/* Encode MatchLength */
- len = (int)(ml - MINMATCH);
- if (len >= (int)ML_MASK) {
+ length = (int)(matchLength - MINMATCH);
+
+ if ((limitedOutputBuffer)
+ && (*op + (length>>8)
+ + (1 + LASTLITERALS) > oend)) {
+ /* Check output limit */
+ return 1;
+ }
+
+ if (length >= (int)ML_MASK) {
*token += ML_MASK;
- len -= ML_MASK;
- for (; len > 509 ; len -= 510) {
+ length -= ML_MASK;
+
+ for (; length > 509 ; length -= 510) {
*(*op)++ = 255;
*(*op)++ = 255;
}
- if (len > 254) {
- len -= 255;
+
+ if (length > 254) {
+ length -= 255;
*(*op)++ = 255;
}
- *(*op)++ = (u8)len;
+
+ *(*op)++ = (BYTE)length;
} else
- *token += len;
+ *token += (BYTE)(length);
/* Prepare next loop */
- *ip += ml;
+ *ip += matchLength;
*anchor = *ip;
return 0;
}
-static int lz4_compresshcctx(struct lz4hc_data *ctx,
- const char *source,
- char *dest,
- int isize)
+static int LZ4HC_compress_generic(
+ LZ4HC_CCtx_internal *const ctx,
+ const char * const source,
+ char * const dest,
+ int const inputSize,
+ int const maxOutputSize,
+ int compressionLevel,
+ limitedOutput_directive limit
+ )
{
- const u8 *ip = (const u8 *)source;
- const u8 *anchor = ip;
- const u8 *const iend = ip + isize;
- const u8 *const mflimit = iend - MFLIMIT;
- const u8 *const matchlimit = (iend - LASTLITERALS);
+ const BYTE *ip = (const BYTE *) source;
+ const BYTE *anchor = ip;
+ const BYTE * const iend = ip + inputSize;
+ const BYTE * const mflimit = iend - MFLIMIT;
+ const BYTE * const matchlimit = (iend - LASTLITERALS);
- u8 *op = (u8 *)dest;
+ BYTE *op = (BYTE *) dest;
+ BYTE * const oend = op + maxOutputSize;
+ unsigned int maxNbAttempts;
int ml, ml2, ml3, ml0;
- const u8 *ref = NULL;
- const u8 *start2 = NULL;
- const u8 *ref2 = NULL;
- const u8 *start3 = NULL;
- const u8 *ref3 = NULL;
- const u8 *start0;
- const u8 *ref0;
- int lastrun;
+ const BYTE *ref = NULL;
+ const BYTE *start2 = NULL;
+ const BYTE *ref2 = NULL;
+ const BYTE *start3 = NULL;
+ const BYTE *ref3 = NULL;
+ const BYTE *start0;
+ const BYTE *ref0;
+
+ /* init */
+ if (compressionLevel > LZ4HC_MAX_CLEVEL)
+ compressionLevel = LZ4HC_MAX_CLEVEL;
+ if (compressionLevel < 1)
+ compressionLevel = LZ4HC_DEFAULT_CLEVEL;
+ maxNbAttempts = 1 << (compressionLevel - 1);
+ ctx->end += inputSize;
ip++;
/* Main Loop */
while (ip < mflimit) {
- ml = lz4hc_insertandfindbestmatch(ctx, ip, matchlimit, (&ref));
+ ml = LZ4HC_InsertAndFindBestMatch(ctx, ip,
+ matchlimit, (&ref), maxNbAttempts);
if (!ml) {
ip++;
continue;
@@ -351,51 +387,59 @@ static int lz4_compresshcctx(struct lz4hc_data *ctx,
start0 = ip;
ref0 = ref;
ml0 = ml;
-_search2:
- if (ip+ml < mflimit)
- ml2 = lz4hc_insertandgetwidermatch(ctx, ip + ml - 2,
- ip + 1, matchlimit, ml, &ref2, &start2);
+
+_Search2:
+ if (ip + ml < mflimit)
+ ml2 = LZ4HC_InsertAndGetWiderMatch(ctx,
+ ip + ml - 2, ip + 0,
+ matchlimit, ml, &ref2,
+ &start2, maxNbAttempts);
else
ml2 = ml;
- /* No better match */
+
if (ml2 == ml) {
- lz4_encodesequence(&ip, &op, &anchor, ml, ref);
+ /* No better match */
+ if (LZ4HC_encodeSequence(&ip, &op,
+ &anchor, ml, ref, limit, oend))
+ return 0;
continue;
}
if (start0 < ip) {
- /* empirical */
if (start2 < ip + ml0) {
+ /* empirical */
ip = start0;
ref = ref0;
ml = ml0;
}
}
- /*
- * Here, start0==ip
- * First Match too small : removed
- */
+
+ /* Here, start0 == ip */
if ((start2 - ip) < 3) {
+ /* First Match too small : removed */
ml = ml2;
ip = start2;
ref = ref2;
- goto _search2;
+ goto _Search2;
}
-_search3:
+_Search3:
/*
- * Currently we have :
- * ml2 > ml1, and
- * ip1+3 <= ip2 (usually < ip1+ml1)
- */
+ * Currently we have :
+ * ml2 > ml1, and
+ * ip1 + 3 <= ip2 (usually < ip1 + ml1)
+ */
if ((start2 - ip) < OPTIMAL_ML) {
int correction;
int new_ml = ml;
+
if (new_ml > OPTIMAL_ML)
new_ml = OPTIMAL_ML;
if (ip + new_ml > start2 + ml2 - MINMATCH)
new_ml = (int)(start2 - ip) + ml2 - MINMATCH;
+
correction = new_ml - (int)(start2 - ip);
+
if (correction > 0) {
start2 += correction;
ref2 += correction;
@@ -403,39 +447,44 @@ _search3:
}
}
/*
- * Now, we have start2 = ip+new_ml,
- * with new_ml=min(ml, OPTIMAL_ML=18)
+ * Now, we have start2 = ip + new_ml,
+ * with new_ml = min(ml, OPTIMAL_ML = 18)
*/
+
if (start2 + ml2 < mflimit)
- ml3 = lz4hc_insertandgetwidermatch(ctx,
- start2 + ml2 - 3, start2, matchlimit,
- ml2, &ref3, &start3);
+ ml3 = LZ4HC_InsertAndGetWiderMatch(ctx,
+ start2 + ml2 - 3, start2,
+ matchlimit, ml2, &ref3, &start3,
+ maxNbAttempts);
else
ml3 = ml2;
- /* No better match : 2 sequences to encode */
if (ml3 == ml2) {
+ /* No better match : 2 sequences to encode */
/* ip & ref are known; Now for ml */
- if (start2 < ip+ml)
+ if (start2 < ip + ml)
ml = (int)(start2 - ip);
-
/* Now, encode 2 sequences */
- lz4_encodesequence(&ip, &op, &anchor, ml, ref);
+ if (LZ4HC_encodeSequence(&ip, &op, &anchor,
+ ml, ref, limit, oend))
+ return 0;
ip = start2;
- lz4_encodesequence(&ip, &op, &anchor, ml2, ref2);
+ if (LZ4HC_encodeSequence(&ip, &op, &anchor,
+ ml2, ref2, limit, oend))
+ return 0;
continue;
}
- /* Not enough space for match 2 : remove it */
if (start3 < ip + ml + 3) {
- /*
- * can write Seq1 immediately ==> Seq2 is removed,
- * so Seq3 becomes Seq1
- */
+ /* Not enough space for match 2 : remove it */
if (start3 >= (ip + ml)) {
+ /* can write Seq1 immediately
+ * ==> Seq2 is removed,
+ * so Seq3 becomes Seq1
+ */
if (start2 < ip + ml) {
- int correction =
- (int)(ip + ml - start2);
+ int correction = (int)(ip + ml - start2);
+
start2 += correction;
ref2 += correction;
ml2 -= correction;
@@ -446,35 +495,38 @@ _search3:
}
}
- lz4_encodesequence(&ip, &op, &anchor, ml, ref);
- ip = start3;
+ if (LZ4HC_encodeSequence(&ip, &op, &anchor,
+ ml, ref, limit, oend))
+ return 0;
+ ip = start3;
ref = ref3;
- ml = ml3;
+ ml = ml3;
start0 = start2;
ref0 = ref2;
ml0 = ml2;
- goto _search2;
+ goto _Search2;
}
start2 = start3;
ref2 = ref3;
ml2 = ml3;
- goto _search3;
+ goto _Search3;
}
/*
- * OK, now we have 3 ascending matches; let's write at least
- * the first one ip & ref are known; Now for ml
- */
+ * OK, now we have 3 ascending matches;
+ * let's write at least the first one
+ * ip & ref are known; Now for ml
+ */
if (start2 < ip + ml) {
if ((start2 - ip) < (int)ML_MASK) {
int correction;
+
if (ml > OPTIMAL_ML)
ml = OPTIMAL_ML;
if (ip + ml > start2 + ml2 - MINMATCH)
- ml = (int)(start2 - ip) + ml2
- - MINMATCH;
+ ml = (int)(start2 - ip) + ml2 - MINMATCH;
correction = ml - (int)(start2 - ip);
if (correction > 0) {
start2 += correction;
@@ -484,7 +536,9 @@ _search3:
} else
ml = (int)(start2 - ip);
}
- lz4_encodesequence(&ip, &op, &anchor, ml, ref);
+ if (LZ4HC_encodeSequence(&ip, &op, &anchor, ml,
+ ref, limit, oend))
+ return 0;
ip = start2;
ref = ref2;
@@ -494,46 +548,222 @@ _search3:
ref2 = ref3;
ml2 = ml3;
- goto _search3;
+ goto _Search3;
}
/* Encode Last Literals */
- lastrun = (int)(iend - anchor);
- if (lastrun >= (int)RUN_MASK) {
- *op++ = (RUN_MASK << ML_BITS);
- lastrun -= RUN_MASK;
- for (; lastrun > 254 ; lastrun -= 255)
- *op++ = 255;
- *op++ = (u8) lastrun;
- } else
- *op++ = (lastrun << ML_BITS);
- memcpy(op, anchor, iend - anchor);
- op += iend - anchor;
+ {
+ int lastRun = (int)(iend - anchor);
+
+ if ((limit)
+ && (((char *)op - dest) + lastRun + 1
+ + ((lastRun + 255 - RUN_MASK)/255)
+ > (U32)maxOutputSize)) {
+ /* Check output limit */
+ return 0;
+ }
+ if (lastRun >= (int)RUN_MASK) {
+ *op++ = (RUN_MASK<<ML_BITS);
+ lastRun -= RUN_MASK;
+ for (; lastRun > 254 ; lastRun -= 255)
+ *op++ = 255;
+ *op++ = (BYTE) lastRun;
+ } else
+ *op++ = (BYTE)(lastRun<<ML_BITS);
+ memcpy(op, anchor, iend - anchor);
+ op += iend - anchor;
+ }
+
/* End */
return (int) (((char *)op) - dest);
}
-int lz4hc_compress(const unsigned char *src, size_t src_len,
- unsigned char *dst, size_t *dst_len, void *wrkmem)
+static int LZ4_compress_HC_extStateHC(
+ void *state,
+ const char *src,
+ char *dst,
+ int srcSize,
+ int maxDstSize,
+ int compressionLevel)
{
- int ret = -1;
- int out_len = 0;
+ LZ4HC_CCtx_internal *ctx = &((LZ4_streamHC_t *)state)->internal_donotuse;
- struct lz4hc_data *hc4 = (struct lz4hc_data *)wrkmem;
- lz4hc_init(hc4, (const u8 *)src);
- out_len = lz4_compresshcctx((struct lz4hc_data *)hc4, (const u8 *)src,
- (char *)dst, (int)src_len);
+ if (((size_t)(state)&(sizeof(void *) - 1)) != 0) {
+ /* Error : state is not aligned
+ * for pointers (32 or 64 bits)
+ */
+ return 0;
+ }
- if (out_len < 0)
- goto exit;
+ LZ4HC_init(ctx, (const BYTE *)src);
- *dst_len = out_len;
- return 0;
+ if (maxDstSize < LZ4_compressBound(srcSize))
+ return LZ4HC_compress_generic(ctx, src, dst,
+ srcSize, maxDstSize, compressionLevel, limitedOutput);
+ else
+ return LZ4HC_compress_generic(ctx, src, dst,
+ srcSize, maxDstSize, compressionLevel, noLimit);
+}
+
+int LZ4_compress_HC(const char *src, char *dst, int srcSize,
+ int maxDstSize, int compressionLevel, void *wrkmem)
+{
+ return LZ4_compress_HC_extStateHC(wrkmem, src, dst,
+ srcSize, maxDstSize, compressionLevel);
+}
+EXPORT_SYMBOL(LZ4_compress_HC);
+
+/**************************************
+ * Streaming Functions
+ **************************************/
+void LZ4_resetStreamHC(LZ4_streamHC_t *LZ4_streamHCPtr, int compressionLevel)
+{
+ LZ4_streamHCPtr->internal_donotuse.base = NULL;
+ LZ4_streamHCPtr->internal_donotuse.compressionLevel = (unsigned int)compressionLevel;
+}
+
+int LZ4_loadDictHC(LZ4_streamHC_t *LZ4_streamHCPtr,
+ const char *dictionary,
+ int dictSize)
+{
+ LZ4HC_CCtx_internal *ctxPtr = &LZ4_streamHCPtr->internal_donotuse;
+
+ if (dictSize > 64 * KB) {
+ dictionary += dictSize - 64 * KB;
+ dictSize = 64 * KB;
+ }
+ LZ4HC_init(ctxPtr, (const BYTE *)dictionary);
+ if (dictSize >= 4)
+ LZ4HC_Insert(ctxPtr, (const BYTE *)dictionary + (dictSize - 3));
+ ctxPtr->end = (const BYTE *)dictionary + dictSize;
+ return dictSize;
+}
+EXPORT_SYMBOL(LZ4_loadDictHC);
-exit:
- return ret;
+/* compression */
+
+static void LZ4HC_setExternalDict(
+ LZ4HC_CCtx_internal *ctxPtr,
+ const BYTE *newBlock)
+{
+ if (ctxPtr->end >= ctxPtr->base + 4) {
+ /* Referencing remaining dictionary content */
+ LZ4HC_Insert(ctxPtr, ctxPtr->end - 3);
+ }
+
+ /*
+ * Only one memory segment for extDict,
+ * so any previous extDict is lost at this stage
+ */
+ ctxPtr->lowLimit = ctxPtr->dictLimit;
+ ctxPtr->dictLimit = (U32)(ctxPtr->end - ctxPtr->base);
+ ctxPtr->dictBase = ctxPtr->base;
+ ctxPtr->base = newBlock - ctxPtr->dictLimit;
+ ctxPtr->end = newBlock;
+ /* match referencing will resume from there */
+ ctxPtr->nextToUpdate = ctxPtr->dictLimit;
+}
+EXPORT_SYMBOL(LZ4HC_setExternalDict);
+
+static int LZ4_compressHC_continue_generic(
+ LZ4_streamHC_t *LZ4_streamHCPtr,
+ const char *source,
+ char *dest,
+ int inputSize,
+ int maxOutputSize,
+ limitedOutput_directive limit)
+{
+ LZ4HC_CCtx_internal *ctxPtr = &LZ4_streamHCPtr->internal_donotuse;
+
+ /* auto - init if forgotten */
+ if (ctxPtr->base == NULL)
+ LZ4HC_init(ctxPtr, (const BYTE *) source);
+
+ /* Check overflow */
+ if ((size_t)(ctxPtr->end - ctxPtr->base) > 2 * GB) {
+ size_t dictSize = (size_t)(ctxPtr->end - ctxPtr->base)
+ - ctxPtr->dictLimit;
+ if (dictSize > 64 * KB)
+ dictSize = 64 * KB;
+ LZ4_loadDictHC(LZ4_streamHCPtr,
+ (const char *)(ctxPtr->end) - dictSize, (int)dictSize);
+ }
+
+ /* Check if blocks follow each other */
+ if ((const BYTE *)source != ctxPtr->end)
+ LZ4HC_setExternalDict(ctxPtr, (const BYTE *)source);
+
+ /* Check overlapping input/dictionary space */
+ {
+ const BYTE *sourceEnd = (const BYTE *) source + inputSize;
+ const BYTE * const dictBegin = ctxPtr->dictBase + ctxPtr->lowLimit;
+ const BYTE * const dictEnd = ctxPtr->dictBase + ctxPtr->dictLimit;
+
+ if ((sourceEnd > dictBegin)
+ && ((const BYTE *)source < dictEnd)) {
+ if (sourceEnd > dictEnd)
+ sourceEnd = dictEnd;
+ ctxPtr->lowLimit = (U32)(sourceEnd - ctxPtr->dictBase);
+
+ if (ctxPtr->dictLimit - ctxPtr->lowLimit < 4)
+ ctxPtr->lowLimit = ctxPtr->dictLimit;
+ }
+ }
+
+ return LZ4HC_compress_generic(ctxPtr, source, dest,
+ inputSize, maxOutputSize, ctxPtr->compressionLevel, limit);
+}
+
+int LZ4_compress_HC_continue(
+ LZ4_streamHC_t *LZ4_streamHCPtr,
+ const char *source,
+ char *dest,
+ int inputSize,
+ int maxOutputSize)
+{
+ if (maxOutputSize < LZ4_compressBound(inputSize))
+ return LZ4_compressHC_continue_generic(LZ4_streamHCPtr,
+ source, dest, inputSize, maxOutputSize, limitedOutput);
+ else
+ return LZ4_compressHC_continue_generic(LZ4_streamHCPtr,
+ source, dest, inputSize, maxOutputSize, noLimit);
+}
+EXPORT_SYMBOL(LZ4_compress_HC_continue);
+
+/* dictionary saving */
+
+int LZ4_saveDictHC(
+ LZ4_streamHC_t *LZ4_streamHCPtr,
+ char *safeBuffer,
+ int dictSize)
+{
+ LZ4HC_CCtx_internal *const streamPtr = &LZ4_streamHCPtr->internal_donotuse;
+ int const prefixSize = (int)(streamPtr->end
+ - (streamPtr->base + streamPtr->dictLimit));
+
+ if (dictSize > 64 * KB)
+ dictSize = 64 * KB;
+ if (dictSize < 4)
+ dictSize = 0;
+ if (dictSize > prefixSize)
+ dictSize = prefixSize;
+
+ memmove(safeBuffer, streamPtr->end - dictSize, dictSize);
+
+ {
+ U32 const endIndex = (U32)(streamPtr->end - streamPtr->base);
+
+ streamPtr->end = (const BYTE *)safeBuffer + dictSize;
+ streamPtr->base = streamPtr->end - endIndex;
+ streamPtr->dictLimit = endIndex - dictSize;
+ streamPtr->lowLimit = endIndex - dictSize;
+
+ if (streamPtr->nextToUpdate < streamPtr->dictLimit)
+ streamPtr->nextToUpdate = streamPtr->dictLimit;
+ }
+ return dictSize;
}
-EXPORT_SYMBOL(lz4hc_compress);
+EXPORT_SYMBOL(LZ4_saveDictHC);
MODULE_LICENSE("Dual BSD/GPL");
-MODULE_DESCRIPTION("LZ4HC compressor");
+MODULE_DESCRIPTION("LZ4 HC compressor");
diff --git a/lib/rbtree.c b/lib/rbtree.c
index 1f8b112a7c35..4ba2828a67c0 100644
--- a/lib/rbtree.c
+++ b/lib/rbtree.c
@@ -427,7 +427,9 @@ static inline void dummy_copy(struct rb_node *old, struct rb_node *new) {}
static inline void dummy_rotate(struct rb_node *old, struct rb_node *new) {}
static const struct rb_augment_callbacks dummy_callbacks = {
- dummy_propagate, dummy_copy, dummy_rotate
+ .propagate = dummy_propagate,
+ .copy = dummy_copy,
+ .rotate = dummy_rotate
};
void rb_insert_color(struct rb_node *node, struct rb_root *root)
diff --git a/lib/sort.c b/lib/sort.c
index fc20df42aa6f..975c6ef6fec7 100644
--- a/lib/sort.c
+++ b/lib/sort.c
@@ -4,6 +4,8 @@
* Jan 23 2005 Matt Mackall <mpm@selenic.com>
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/types.h>
#include <linux/export.h>
#include <linux/sort.h>
@@ -101,42 +103,3 @@ void sort(void *base, size_t num, size_t size,
}
EXPORT_SYMBOL(sort);
-
-#if 0
-#include <linux/slab.h>
-/* a simple boot-time regression test */
-
-int cmpint(const void *a, const void *b)
-{
- return *(int *)a - *(int *)b;
-}
-
-static int sort_test(void)
-{
- int *a, i, r = 1;
-
- a = kmalloc(1000 * sizeof(int), GFP_KERNEL);
- BUG_ON(!a);
-
- printk("testing sort()\n");
-
- for (i = 0; i < 1000; i++) {
- r = (r * 725861) % 6599;
- a[i] = r;
- }
-
- sort(a, 1000, sizeof(int), cmpint, NULL);
-
- for (i = 0; i < 999; i++)
- if (a[i] > a[i+1]) {
- printk("sort() failed!\n");
- break;
- }
-
- kfree(a);
-
- return 0;
-}
-
-module_init(sort_test);
-#endif
diff --git a/lib/test_kasan.c b/lib/test_kasan.c
index fbdf87920093..0b1d3140fbb8 100644
--- a/lib/test_kasan.c
+++ b/lib/test_kasan.c
@@ -11,6 +11,7 @@
#define pr_fmt(fmt) "kasan test: %s " fmt, __func__
+#include <linux/delay.h>
#include <linux/kernel.h>
#include <linux/mman.h>
#include <linux/mm.h>
@@ -331,6 +332,38 @@ static noinline void __init kmem_cache_oob(void)
kmem_cache_destroy(cache);
}
+static noinline void __init memcg_accounted_kmem_cache(void)
+{
+ int i;
+ char *p;
+ size_t size = 200;
+ struct kmem_cache *cache;
+
+ cache = kmem_cache_create("test_cache", size, 0, SLAB_ACCOUNT, NULL);
+ if (!cache) {
+ pr_err("Cache allocation failed\n");
+ return;
+ }
+
+ pr_info("allocate memcg accounted object\n");
+ /*
+ * Several allocations with a delay to allow for lazy per memcg kmem
+ * cache creation.
+ */
+ for (i = 0; i < 5; i++) {
+ p = kmem_cache_alloc(cache, GFP_KERNEL);
+ if (!p) {
+ pr_err("Allocation failed\n");
+ goto free_cache;
+ }
+ kmem_cache_free(cache, p);
+ msleep(100);
+ }
+
+free_cache:
+ kmem_cache_destroy(cache);
+}
+
static char global_array[10];
static noinline void __init kasan_global_oob(void)
@@ -460,6 +493,7 @@ static int __init kmalloc_tests_init(void)
kmalloc_uaf_memset();
kmalloc_uaf2();
kmem_cache_oob();
+ memcg_accounted_kmem_cache();
kasan_stack_oob();
kasan_global_oob();
ksize_unpoisons_memory();
diff --git a/lib/test_sort.c b/lib/test_sort.c
new file mode 100644
index 000000000000..4db3911db50a
--- /dev/null
+++ b/lib/test_sort.c
@@ -0,0 +1,44 @@
+#include <linux/sort.h>
+#include <linux/slab.h>
+#include <linux/init.h>
+
+/*
+ * A simple boot-time regression test
+ * License: GPL
+ */
+
+#define TEST_LEN 1000
+
+static int __init cmpint(const void *a, const void *b)
+{
+ return *(int *)a - *(int *)b;
+}
+
+static int __init test_sort_init(void)
+{
+ int *a, i, r = 1, err = -ENOMEM;
+
+ a = kmalloc_array(TEST_LEN, sizeof(*a), GFP_KERNEL);
+ if (!a)
+ return err;
+
+ for (i = 0; i < TEST_LEN; i++) {
+ r = (r * 725861) % 6599;
+ a[i] = r;
+ }
+
+ sort(a, TEST_LEN, sizeof(*a), cmpint, NULL);
+
+ err = -EINVAL;
+ for (i = 0; i < TEST_LEN-1; i++)
+ if (a[i] > a[i+1]) {
+ pr_err("test has failed\n");
+ goto exit;
+ }
+ err = 0;
+ pr_info("test passed\n");
+exit:
+ kfree(a);
+ return err;
+}
+subsys_initcall(test_sort_init);
diff --git a/mm/Makefile b/mm/Makefile
index 433eaf9a876e..aa0aa17cb413 100644
--- a/mm/Makefile
+++ b/mm/Makefile
@@ -23,8 +23,10 @@ KCOV_INSTRUMENT_vmstat.o := n
mmu-y := nommu.o
mmu-$(CONFIG_MMU) := gup.o highmem.o memory.o mincore.o \
- mlock.o mmap.o mprotect.o mremap.o msync.o rmap.o \
- vmalloc.o pagewalk.o pgtable-generic.o
+ mlock.o mmap.o mprotect.o mremap.o msync.o \
+ page_vma_mapped.o pagewalk.o pgtable-generic.o \
+ rmap.o vmalloc.o
+
ifdef CONFIG_CROSS_MEMORY_ATTACH
mmu-$(CONFIG_MMU) += process_vm_access.o
diff --git a/mm/cma.c b/mm/cma.c
index 94b3460cd608..a6033e344430 100644
--- a/mm/cma.c
+++ b/mm/cma.c
@@ -348,6 +348,32 @@ err:
return ret;
}
+#ifdef CONFIG_CMA_DEBUG
+static void cma_debug_show_areas(struct cma *cma)
+{
+ unsigned long next_zero_bit, next_set_bit;
+ unsigned long start = 0;
+ unsigned int nr_zero, nr_total = 0;
+
+ mutex_lock(&cma->lock);
+ pr_info("number of available pages: ");
+ for (;;) {
+ next_zero_bit = find_next_zero_bit(cma->bitmap, cma->count, start);
+ if (next_zero_bit >= cma->count)
+ break;
+ next_set_bit = find_next_bit(cma->bitmap, cma->count, next_zero_bit);
+ nr_zero = next_set_bit - next_zero_bit;
+ pr_cont("%s%u@%lu", nr_total ? "+" : "", nr_zero, next_zero_bit);
+ nr_total += nr_zero;
+ start = next_zero_bit + nr_zero;
+ }
+ pr_cont("=> %u free of %lu total pages\n", nr_total, cma->count);
+ mutex_unlock(&cma->lock);
+}
+#else
+static inline void cma_debug_show_areas(struct cma *cma) { }
+#endif
+
/**
* cma_alloc() - allocate pages from contiguous area
* @cma: Contiguous memory region for which the allocation is performed.
@@ -357,14 +383,15 @@ err:
* This function allocates part of contiguous memory on specific
* contiguous memory area.
*/
-struct page *cma_alloc(struct cma *cma, size_t count, unsigned int align)
+struct page *cma_alloc(struct cma *cma, size_t count, unsigned int align,
+ gfp_t gfp_mask)
{
unsigned long mask, offset;
unsigned long pfn = -1;
unsigned long start = 0;
unsigned long bitmap_maxno, bitmap_no, bitmap_count;
struct page *page = NULL;
- int ret;
+ int ret = -ENOMEM;
if (!cma || !cma->count)
return NULL;
@@ -402,7 +429,8 @@ struct page *cma_alloc(struct cma *cma, size_t count, unsigned int align)
pfn = cma->base_pfn + (bitmap_no << cma->order_per_bit);
mutex_lock(&cma_mutex);
- ret = alloc_contig_range(pfn, pfn + count, MIGRATE_CMA);
+ ret = alloc_contig_range(pfn, pfn + count, MIGRATE_CMA,
+ gfp_mask);
mutex_unlock(&cma_mutex);
if (ret == 0) {
page = pfn_to_page(pfn);
@@ -421,6 +449,12 @@ struct page *cma_alloc(struct cma *cma, size_t count, unsigned int align)
trace_cma_alloc(pfn, page, count, align);
+ if (ret) {
+ pr_info("%s: alloc failed, req-size: %zu pages, ret: %d\n",
+ __func__, count, ret);
+ cma_debug_show_areas(cma);
+ }
+
pr_debug("%s(): returned %p\n", __func__, page);
return page;
}
diff --git a/mm/cma_debug.c b/mm/cma_debug.c
index f8e4b60db167..ffc0c3d0ae64 100644
--- a/mm/cma_debug.c
+++ b/mm/cma_debug.c
@@ -138,7 +138,7 @@ static int cma_alloc_mem(struct cma *cma, int count)
if (!mem)
return -ENOMEM;
- p = cma_alloc(cma, count, 0);
+ p = cma_alloc(cma, count, 0, GFP_KERNEL);
if (!p) {
kfree(mem);
return -ENOMEM;
diff --git a/mm/compaction.c b/mm/compaction.c
index 0aa2757399ee..0fdfde016ee2 100644
--- a/mm/compaction.c
+++ b/mm/compaction.c
@@ -802,7 +802,7 @@ isolate_migratepages_block(struct compact_control *cc, unsigned long low_pfn,
locked = false;
}
- if (isolate_movable_page(page, isolate_mode))
+ if (!isolate_movable_page(page, isolate_mode))
goto isolate_success;
}
diff --git a/mm/dmapool.c b/mm/dmapool.c
index abcbfe86c25a..cef82b8a9291 100644
--- a/mm/dmapool.c
+++ b/mm/dmapool.c
@@ -434,11 +434,11 @@ void dma_pool_free(struct dma_pool *pool, void *vaddr, dma_addr_t dma)
spin_unlock_irqrestore(&pool->lock, flags);
if (pool->dev)
dev_err(pool->dev,
- "dma_pool_free %s, %p (bad vaddr)/%Lx\n",
- pool->name, vaddr, (unsigned long long)dma);
+ "dma_pool_free %s, %p (bad vaddr)/%pad\n",
+ pool->name, vaddr, &dma);
else
- pr_err("dma_pool_free %s, %p (bad vaddr)/%Lx\n",
- pool->name, vaddr, (unsigned long long)dma);
+ pr_err("dma_pool_free %s, %p (bad vaddr)/%pad\n",
+ pool->name, vaddr, &dma);
return;
}
{
@@ -450,11 +450,11 @@ void dma_pool_free(struct dma_pool *pool, void *vaddr, dma_addr_t dma)
}
spin_unlock_irqrestore(&pool->lock, flags);
if (pool->dev)
- dev_err(pool->dev, "dma_pool_free %s, dma %Lx already free\n",
- pool->name, (unsigned long long)dma);
+ dev_err(pool->dev, "dma_pool_free %s, dma %pad already free\n",
+ pool->name, &dma);
else
- pr_err("dma_pool_free %s, dma %Lx already free\n",
- pool->name, (unsigned long long)dma);
+ pr_err("dma_pool_free %s, dma %pad already free\n",
+ pool->name, &dma);
return;
}
}
diff --git a/mm/filemap.c b/mm/filemap.c
index 416d563468a3..1944c631e3e6 100644
--- a/mm/filemap.c
+++ b/mm/filemap.c
@@ -1008,9 +1008,12 @@ void page_endio(struct page *page, bool is_write, int err)
unlock_page(page);
} else {
if (err) {
+ struct address_space *mapping;
+
SetPageError(page);
- if (page->mapping)
- mapping_set_error(page->mapping, err);
+ mapping = page_mapping(page);
+ if (mapping)
+ mapping_set_error(mapping, err);
}
end_page_writeback(page);
}
@@ -2169,7 +2172,6 @@ static void do_async_mmap_readahead(struct vm_area_struct *vma,
/**
* filemap_fault - read in file data for page fault handling
- * @vma: vma in which the fault was taken
* @vmf: struct vm_fault containing details of the fault
*
* filemap_fault() is invoked via the vma operations vector for a
@@ -2191,10 +2193,10 @@ static void do_async_mmap_readahead(struct vm_area_struct *vma,
*
* We never return with VM_FAULT_RETRY and a bit from VM_FAULT_ERROR set.
*/
-int filemap_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
+int filemap_fault(struct vm_fault *vmf)
{
int error;
- struct file *file = vma->vm_file;
+ struct file *file = vmf->vma->vm_file;
struct address_space *mapping = file->f_mapping;
struct file_ra_state *ra = &file->f_ra;
struct inode *inode = mapping->host;
@@ -2216,12 +2218,12 @@ int filemap_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
* We found the page, so try async readahead before
* waiting for the lock.
*/
- do_async_mmap_readahead(vma, ra, file, page, offset);
+ do_async_mmap_readahead(vmf->vma, ra, file, page, offset);
} else if (!page) {
/* No page in the page cache at all */
- do_sync_mmap_readahead(vma, ra, file, offset);
+ do_sync_mmap_readahead(vmf->vma, ra, file, offset);
count_vm_event(PGMAJFAULT);
- mem_cgroup_count_vm_event(vma->vm_mm, PGMAJFAULT);
+ mem_cgroup_count_vm_event(vmf->vma->vm_mm, PGMAJFAULT);
ret = VM_FAULT_MAJOR;
retry_find:
page = find_get_page(mapping, offset);
@@ -2229,7 +2231,7 @@ retry_find:
goto no_cached_page;
}
- if (!lock_page_or_retry(page, vma->vm_mm, vmf->flags)) {
+ if (!lock_page_or_retry(page, vmf->vma->vm_mm, vmf->flags)) {
put_page(page);
return ret | VM_FAULT_RETRY;
}
@@ -2396,14 +2398,14 @@ next:
}
EXPORT_SYMBOL(filemap_map_pages);
-int filemap_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
+int filemap_page_mkwrite(struct vm_fault *vmf)
{
struct page *page = vmf->page;
- struct inode *inode = file_inode(vma->vm_file);
+ struct inode *inode = file_inode(vmf->vma->vm_file);
int ret = VM_FAULT_LOCKED;
sb_start_pagefault(inode->i_sb);
- file_update_time(vma->vm_file);
+ file_update_time(vmf->vma->vm_file);
lock_page(page);
if (page->mapping != inode->i_mapping) {
unlock_page(page);
diff --git a/mm/gup.c b/mm/gup.c
index 40abe4c90383..94fab8fa432b 100644
--- a/mm/gup.c
+++ b/mm/gup.c
@@ -253,6 +253,13 @@ struct page *follow_page_mask(struct vm_area_struct *vma,
return page;
return no_page_table(vma, flags);
}
+ if (pud_devmap(*pud)) {
+ ptl = pud_lock(mm, pud);
+ page = follow_devmap_pud(vma, address, pud, flags);
+ spin_unlock(ptl);
+ if (page)
+ return page;
+ }
if (unlikely(pud_bad(*pud)))
return no_page_table(vma, flags);
@@ -265,8 +272,6 @@ struct page *follow_page_mask(struct vm_area_struct *vma,
return page;
return no_page_table(vma, flags);
}
- if ((flags & FOLL_NUMA) && pmd_protnone(*pmd))
- return no_page_table(vma, flags);
if (pmd_devmap(*pmd)) {
ptl = pmd_lock(mm, pmd);
page = follow_devmap_pmd(vma, address, pmd, flags);
@@ -277,6 +282,9 @@ struct page *follow_page_mask(struct vm_area_struct *vma,
if (likely(!pmd_trans_huge(*pmd)))
return follow_page_pte(vma, address, pmd, flags);
+ if ((flags & FOLL_NUMA) && pmd_protnone(*pmd))
+ return no_page_table(vma, flags);
+
ptl = pmd_lock(mm, pmd);
if (unlikely(!pmd_trans_huge(*pmd))) {
spin_unlock(ptl);
diff --git a/mm/huge_memory.c b/mm/huge_memory.c
index f9ecc2aeadfc..71e3dede95b4 100644
--- a/mm/huge_memory.c
+++ b/mm/huge_memory.c
@@ -757,6 +757,60 @@ int vmf_insert_pfn_pmd(struct vm_area_struct *vma, unsigned long addr,
}
EXPORT_SYMBOL_GPL(vmf_insert_pfn_pmd);
+#ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD
+static pud_t maybe_pud_mkwrite(pud_t pud, struct vm_area_struct *vma)
+{
+ if (likely(vma->vm_flags & VM_WRITE))
+ pud = pud_mkwrite(pud);
+ return pud;
+}
+
+static void insert_pfn_pud(struct vm_area_struct *vma, unsigned long addr,
+ pud_t *pud, pfn_t pfn, pgprot_t prot, bool write)
+{
+ struct mm_struct *mm = vma->vm_mm;
+ pud_t entry;
+ spinlock_t *ptl;
+
+ ptl = pud_lock(mm, pud);
+ entry = pud_mkhuge(pfn_t_pud(pfn, prot));
+ if (pfn_t_devmap(pfn))
+ entry = pud_mkdevmap(entry);
+ if (write) {
+ entry = pud_mkyoung(pud_mkdirty(entry));
+ entry = maybe_pud_mkwrite(entry, vma);
+ }
+ set_pud_at(mm, addr, pud, entry);
+ update_mmu_cache_pud(vma, addr, pud);
+ spin_unlock(ptl);
+}
+
+int vmf_insert_pfn_pud(struct vm_area_struct *vma, unsigned long addr,
+ pud_t *pud, pfn_t pfn, bool write)
+{
+ pgprot_t pgprot = vma->vm_page_prot;
+ /*
+ * If we had pud_special, we could avoid all these restrictions,
+ * but we need to be consistent with PTEs and architectures that
+ * can't support a 'special' bit.
+ */
+ BUG_ON(!(vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP)));
+ BUG_ON((vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP)) ==
+ (VM_PFNMAP|VM_MIXEDMAP));
+ BUG_ON((vma->vm_flags & VM_PFNMAP) && is_cow_mapping(vma->vm_flags));
+ BUG_ON(!pfn_t_devmap(pfn));
+
+ if (addr < vma->vm_start || addr >= vma->vm_end)
+ return VM_FAULT_SIGBUS;
+
+ track_pfn_insert(vma, &pgprot, pfn);
+
+ insert_pfn_pud(vma, addr, pud, pfn, pgprot, write);
+ return VM_FAULT_NOPAGE;
+}
+EXPORT_SYMBOL_GPL(vmf_insert_pfn_pud);
+#endif /* CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD */
+
static void touch_pmd(struct vm_area_struct *vma, unsigned long addr,
pmd_t *pmd)
{
@@ -887,6 +941,123 @@ out:
return ret;
}
+#ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD
+static void touch_pud(struct vm_area_struct *vma, unsigned long addr,
+ pud_t *pud)
+{
+ pud_t _pud;
+
+ /*
+ * We should set the dirty bit only for FOLL_WRITE but for now
+ * the dirty bit in the pud is meaningless. And if the dirty
+ * bit will become meaningful and we'll only set it with
+ * FOLL_WRITE, an atomic set_bit will be required on the pud to
+ * set the young bit, instead of the current set_pud_at.
+ */
+ _pud = pud_mkyoung(pud_mkdirty(*pud));
+ if (pudp_set_access_flags(vma, addr & HPAGE_PUD_MASK,
+ pud, _pud, 1))
+ update_mmu_cache_pud(vma, addr, pud);
+}
+
+struct page *follow_devmap_pud(struct vm_area_struct *vma, unsigned long addr,
+ pud_t *pud, int flags)
+{
+ unsigned long pfn = pud_pfn(*pud);
+ struct mm_struct *mm = vma->vm_mm;
+ struct dev_pagemap *pgmap;
+ struct page *page;
+
+ assert_spin_locked(pud_lockptr(mm, pud));
+
+ if (flags & FOLL_WRITE && !pud_write(*pud))
+ return NULL;
+
+ if (pud_present(*pud) && pud_devmap(*pud))
+ /* pass */;
+ else
+ return NULL;
+
+ if (flags & FOLL_TOUCH)
+ touch_pud(vma, addr, pud);
+
+ /*
+ * device mapped pages can only be returned if the
+ * caller will manage the page reference count.
+ */
+ if (!(flags & FOLL_GET))
+ return ERR_PTR(-EEXIST);
+
+ pfn += (addr & ~PUD_MASK) >> PAGE_SHIFT;
+ pgmap = get_dev_pagemap(pfn, NULL);
+ if (!pgmap)
+ return ERR_PTR(-EFAULT);
+ page = pfn_to_page(pfn);
+ get_page(page);
+ put_dev_pagemap(pgmap);
+
+ return page;
+}
+
+int copy_huge_pud(struct mm_struct *dst_mm, struct mm_struct *src_mm,
+ pud_t *dst_pud, pud_t *src_pud, unsigned long addr,
+ struct vm_area_struct *vma)
+{
+ spinlock_t *dst_ptl, *src_ptl;
+ pud_t pud;
+ int ret;
+
+ dst_ptl = pud_lock(dst_mm, dst_pud);
+ src_ptl = pud_lockptr(src_mm, src_pud);
+ spin_lock_nested(src_ptl, SINGLE_DEPTH_NESTING);
+
+ ret = -EAGAIN;
+ pud = *src_pud;
+ if (unlikely(!pud_trans_huge(pud) && !pud_devmap(pud)))
+ goto out_unlock;
+
+ /*
+ * When page table lock is held, the huge zero pud should not be
+ * under splitting since we don't split the page itself, only pud to
+ * a page table.
+ */
+ if (is_huge_zero_pud(pud)) {
+ /* No huge zero pud yet */
+ }
+
+ pudp_set_wrprotect(src_mm, addr, src_pud);
+ pud = pud_mkold(pud_wrprotect(pud));
+ set_pud_at(dst_mm, addr, dst_pud, pud);
+
+ ret = 0;
+out_unlock:
+ spin_unlock(src_ptl);
+ spin_unlock(dst_ptl);
+ return ret;
+}
+
+void huge_pud_set_accessed(struct vm_fault *vmf, pud_t orig_pud)
+{
+ pud_t entry;
+ unsigned long haddr;
+ bool write = vmf->flags & FAULT_FLAG_WRITE;
+
+ vmf->ptl = pud_lock(vmf->vma->vm_mm, vmf->pud);
+ if (unlikely(!pud_same(*vmf->pud, orig_pud)))
+ goto unlock;
+
+ entry = pud_mkyoung(orig_pud);
+ if (write)
+ entry = pud_mkdirty(entry);
+ haddr = vmf->address & HPAGE_PUD_MASK;
+ if (pudp_set_access_flags(vmf->vma, haddr, vmf->pud, entry, write))
+ update_mmu_cache_pud(vmf->vma, vmf->address, vmf->pud);
+
+unlock:
+ spin_unlock(vmf->ptl);
+}
+#endif /* CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD */
+
void huge_pmd_set_accessed(struct vm_fault *vmf, pmd_t orig_pmd)
{
pmd_t entry;
@@ -1255,7 +1426,7 @@ int do_huge_pmd_numa_page(struct vm_fault *vmf, pmd_t pmd)
}
/* See similar comment in do_numa_page for explanation */
- if (!pmd_write(pmd))
+ if (!pmd_savedwrite(pmd))
flags |= TNF_NO_GROUP;
/*
@@ -1318,7 +1489,7 @@ int do_huge_pmd_numa_page(struct vm_fault *vmf, pmd_t pmd)
goto out;
clear_pmdnuma:
BUG_ON(!PageLocked(page));
- was_writable = pmd_write(pmd);
+ was_writable = pmd_savedwrite(pmd);
pmd = pmd_modify(pmd, vma->vm_page_prot);
pmd = pmd_mkyoung(pmd);
if (was_writable)
@@ -1335,7 +1506,7 @@ out:
if (page_nid != -1)
task_numa_fault(last_cpupid, page_nid, HPAGE_PMD_NR,
- vmf->flags);
+ flags);
return 0;
}
@@ -1573,7 +1744,7 @@ int change_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
entry = pmdp_huge_get_and_clear_notify(mm, addr, pmd);
entry = pmd_modify(entry, newprot);
if (preserve_write)
- entry = pmd_mkwrite(entry);
+ entry = pmd_mk_savedwrite(entry);
ret = HPAGE_PMD_NR;
set_pmd_at(mm, addr, pmd, entry);
BUG_ON(vma_is_anonymous(vma) && !preserve_write &&
@@ -1601,6 +1772,84 @@ spinlock_t *__pmd_trans_huge_lock(pmd_t *pmd, struct vm_area_struct *vma)
return NULL;
}
+/*
+ * Returns true if a given pud maps a thp, false otherwise.
+ *
+ * Note that if it returns true, this routine returns without unlocking page
+ * table lock. So callers must unlock it.
+ */
+spinlock_t *__pud_trans_huge_lock(pud_t *pud, struct vm_area_struct *vma)
+{
+ spinlock_t *ptl;
+
+ ptl = pud_lock(vma->vm_mm, pud);
+ if (likely(pud_trans_huge(*pud) || pud_devmap(*pud)))
+ return ptl;
+ spin_unlock(ptl);
+ return NULL;
+}
+
+#ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD
+int zap_huge_pud(struct mmu_gather *tlb, struct vm_area_struct *vma,
+ pud_t *pud, unsigned long addr)
+{
+ pud_t orig_pud;
+ spinlock_t *ptl;
+
+ ptl = __pud_trans_huge_lock(pud, vma);
+ if (!ptl)
+ return 0;
+ /*
+ * For architectures like ppc64 we look at deposited pgtable
+ * when calling pudp_huge_get_and_clear. So do the
+ * pgtable_trans_huge_withdraw after finishing pudp related
+ * operations.
+ */
+ orig_pud = pudp_huge_get_and_clear_full(tlb->mm, addr, pud,
+ tlb->fullmm);
+ tlb_remove_pud_tlb_entry(tlb, pud, addr);
+ if (vma_is_dax(vma)) {
+ spin_unlock(ptl);
+ /* No zero page support yet */
+ } else {
+ /* No support for anonymous PUD pages yet */
+ BUG();
+ }
+ return 1;
+}
+
+static void __split_huge_pud_locked(struct vm_area_struct *vma, pud_t *pud,
+ unsigned long haddr)
+{
+ VM_BUG_ON(haddr & ~HPAGE_PUD_MASK);
+ VM_BUG_ON_VMA(vma->vm_start > haddr, vma);
+ VM_BUG_ON_VMA(vma->vm_end < haddr + HPAGE_PUD_SIZE, vma);
+ VM_BUG_ON(!pud_trans_huge(*pud) && !pud_devmap(*pud));
+
+ count_vm_event(THP_SPLIT_PMD);
+
+ pudp_huge_clear_flush_notify(vma, haddr, pud);
+}
+
+void __split_huge_pud(struct vm_area_struct *vma, pud_t *pud,
+ unsigned long address)
+{
+ spinlock_t *ptl;
+ struct mm_struct *mm = vma->vm_mm;
+ unsigned long haddr = address & HPAGE_PUD_MASK;
+
+ mmu_notifier_invalidate_range_start(mm, haddr, haddr + HPAGE_PUD_SIZE);
+ ptl = pud_lock(mm, pud);
+ if (unlikely(!pud_trans_huge(*pud) && !pud_devmap(*pud)))
+ goto out;
+ __split_huge_pud_locked(vma, pud, haddr);
+
+out:
+ spin_unlock(ptl);
+ mmu_notifier_invalidate_range_end(mm, haddr, haddr + HPAGE_PUD_SIZE);
+}
+#endif /* CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD */
+
static void __split_huge_zero_page_pmd(struct vm_area_struct *vma,
unsigned long haddr, pmd_t *pmd)
{
@@ -1857,32 +2106,27 @@ void vma_adjust_trans_huge(struct vm_area_struct *vma,
static void freeze_page(struct page *page)
{
enum ttu_flags ttu_flags = TTU_IGNORE_MLOCK | TTU_IGNORE_ACCESS |
- TTU_RMAP_LOCKED;
- int i, ret;
+ TTU_RMAP_LOCKED | TTU_SPLIT_HUGE_PMD;
+ int ret;
VM_BUG_ON_PAGE(!PageHead(page), page);
if (PageAnon(page))
ttu_flags |= TTU_MIGRATION;
- /* We only need TTU_SPLIT_HUGE_PMD once */
- ret = try_to_unmap(page, ttu_flags | TTU_SPLIT_HUGE_PMD);
- for (i = 1; !ret && i < HPAGE_PMD_NR; i++) {
- /* Cut short if the page is unmapped */
- if (page_count(page) == 1)
- return;
-
- ret = try_to_unmap(page + i, ttu_flags);
- }
- VM_BUG_ON_PAGE(ret, page + i - 1);
+ ret = try_to_unmap(page, ttu_flags);
+ VM_BUG_ON_PAGE(ret, page);
}
static void unfreeze_page(struct page *page)
{
int i;
-
- for (i = 0; i < HPAGE_PMD_NR; i++)
- remove_migration_ptes(page + i, page + i, true);
+ if (PageTransHuge(page)) {
+ remove_migration_ptes(page, page, true);
+ } else {
+ for (i = 0; i < HPAGE_PMD_NR; i++)
+ remove_migration_ptes(page + i, page + i, true);
+ }
}
static void __split_huge_page_tail(struct page *head, int tail,
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index 30e7709a5121..2e0e8159ce8e 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -1052,7 +1052,8 @@ static int __alloc_gigantic_page(unsigned long start_pfn,
unsigned long nr_pages)
{
unsigned long end_pfn = start_pfn + nr_pages;
- return alloc_contig_range(start_pfn, end_pfn, MIGRATE_MOVABLE);
+ return alloc_contig_range(start_pfn, end_pfn, MIGRATE_MOVABLE,
+ GFP_KERNEL);
}
static bool pfn_range_valid_gigantic(struct zone *z,
@@ -3142,7 +3143,7 @@ static void hugetlb_vm_op_close(struct vm_area_struct *vma)
* hugegpage VMA. do_page_fault() is supposed to trap this, so BUG is we get
* this far.
*/
-static int hugetlb_vm_op_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
+static int hugetlb_vm_op_fault(struct vm_fault *vmf)
{
BUG();
return 0;
diff --git a/mm/internal.h b/mm/internal.h
index 8ab72f4374e0..ccfc2a2969f4 100644
--- a/mm/internal.h
+++ b/mm/internal.h
@@ -335,12 +335,15 @@ __vma_address(struct page *page, struct vm_area_struct *vma)
static inline unsigned long
vma_address(struct page *page, struct vm_area_struct *vma)
{
- unsigned long address = __vma_address(page, vma);
+ unsigned long start, end;
+
+ start = __vma_address(page, vma);
+ end = start + PAGE_SIZE * (hpage_nr_pages(page) - 1);
/* page should be within @vma mapping range */
- VM_BUG_ON_VMA(address < vma->vm_start || address >= vma->vm_end, vma);
+ VM_BUG_ON_VMA(end < vma->vm_start || start >= vma->vm_end, vma);
- return address;
+ return max(start, vma->vm_start);
}
#else /* !CONFIG_MMU */
diff --git a/mm/kasan/kasan.c b/mm/kasan/kasan.c
index b2a0cff2bb35..25f0e6521f36 100644
--- a/mm/kasan/kasan.c
+++ b/mm/kasan/kasan.c
@@ -435,7 +435,7 @@ void kasan_cache_shrink(struct kmem_cache *cache)
quarantine_remove_cache(cache);
}
-void kasan_cache_destroy(struct kmem_cache *cache)
+void kasan_cache_shutdown(struct kmem_cache *cache)
{
quarantine_remove_cache(cache);
}
diff --git a/mm/kasan/quarantine.c b/mm/kasan/quarantine.c
index dae929c02bbb..6f1ed1630873 100644
--- a/mm/kasan/quarantine.c
+++ b/mm/kasan/quarantine.c
@@ -274,6 +274,7 @@ static void per_cpu_remove_cache(void *arg)
qlist_free_all(&to_free, cache);
}
+/* Free all quarantined objects belonging to cache. */
void quarantine_remove_cache(struct kmem_cache *cache)
{
unsigned long flags, i;
diff --git a/mm/ksm.c b/mm/ksm.c
index 9ae6011a41f8..cf211c01ceac 100644
--- a/mm/ksm.c
+++ b/mm/ksm.c
@@ -223,6 +223,12 @@ static unsigned int ksm_thread_pages_to_scan = 100;
/* Milliseconds ksmd should sleep between batches */
static unsigned int ksm_thread_sleep_millisecs = 20;
+/* Checksum of an empty (zeroed) page */
+static unsigned int zero_checksum __read_mostly;
+
+/* Whether to merge empty (zeroed) pages with actual zero pages */
+static bool ksm_use_zero_pages __read_mostly;
+
#ifdef CONFIG_NUMA
/* Zeroed when merging across nodes is not allowed */
static unsigned int ksm_merge_across_nodes = 1;
@@ -850,33 +856,36 @@ static int write_protect_page(struct vm_area_struct *vma, struct page *page,
pte_t *orig_pte)
{
struct mm_struct *mm = vma->vm_mm;
- unsigned long addr;
- pte_t *ptep;
- spinlock_t *ptl;
+ struct page_vma_mapped_walk pvmw = {
+ .page = page,
+ .vma = vma,
+ };
int swapped;
int err = -EFAULT;
unsigned long mmun_start; /* For mmu_notifiers */
unsigned long mmun_end; /* For mmu_notifiers */
- addr = page_address_in_vma(page, vma);
- if (addr == -EFAULT)
+ pvmw.address = page_address_in_vma(page, vma);
+ if (pvmw.address == -EFAULT)
goto out;
BUG_ON(PageTransCompound(page));
- mmun_start = addr;
- mmun_end = addr + PAGE_SIZE;
+ mmun_start = pvmw.address;
+ mmun_end = pvmw.address + PAGE_SIZE;
mmu_notifier_invalidate_range_start(mm, mmun_start, mmun_end);
- ptep = page_check_address(page, mm, addr, &ptl, 0);
- if (!ptep)
+ if (!page_vma_mapped_walk(&pvmw))
goto out_mn;
+ if (WARN_ONCE(!pvmw.pte, "Unexpected PMD mapping?"))
+ goto out_unlock;
- if (pte_write(*ptep) || pte_dirty(*ptep)) {
+ if (pte_write(*pvmw.pte) || pte_dirty(*pvmw.pte) ||
+ (pte_protnone(*pvmw.pte) && pte_savedwrite(*pvmw.pte))) {
pte_t entry;
swapped = PageSwapCache(page);
- flush_cache_page(vma, addr, page_to_pfn(page));
+ flush_cache_page(vma, pvmw.address, page_to_pfn(page));
/*
* Ok this is tricky, when get_user_pages_fast() run it doesn't
* take any lock, therefore the check that we are going to make
@@ -886,25 +895,29 @@ static int write_protect_page(struct vm_area_struct *vma, struct page *page,
* this assure us that no O_DIRECT can happen after the check
* or in the middle of the check.
*/
- entry = ptep_clear_flush_notify(vma, addr, ptep);
+ entry = ptep_clear_flush_notify(vma, pvmw.address, pvmw.pte);
/*
* Check that no O_DIRECT or similar I/O is in progress on the
* page
*/
if (page_mapcount(page) + 1 + swapped != page_count(page)) {
- set_pte_at(mm, addr, ptep, entry);
+ set_pte_at(mm, pvmw.address, pvmw.pte, entry);
goto out_unlock;
}
if (pte_dirty(entry))
set_page_dirty(page);
- entry = pte_mkclean(pte_wrprotect(entry));
- set_pte_at_notify(mm, addr, ptep, entry);
+
+ if (pte_protnone(entry))
+ entry = pte_mkclean(pte_clear_savedwrite(entry));
+ else
+ entry = pte_mkclean(pte_wrprotect(entry));
+ set_pte_at_notify(mm, pvmw.address, pvmw.pte, entry);
}
- *orig_pte = *ptep;
+ *orig_pte = *pvmw.pte;
err = 0;
out_unlock:
- pte_unmap_unlock(ptep, ptl);
+ page_vma_mapped_walk_done(&pvmw);
out_mn:
mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end);
out:
@@ -926,6 +939,7 @@ static int replace_page(struct vm_area_struct *vma, struct page *page,
struct mm_struct *mm = vma->vm_mm;
pmd_t *pmd;
pte_t *ptep;
+ pte_t newpte;
spinlock_t *ptl;
unsigned long addr;
int err = -EFAULT;
@@ -950,12 +964,22 @@ static int replace_page(struct vm_area_struct *vma, struct page *page,
goto out_mn;
}
- get_page(kpage);
- page_add_anon_rmap(kpage, vma, addr, false);
+ /*
+ * No need to check ksm_use_zero_pages here: we can only have a
+ * zero_page here if ksm_use_zero_pages was enabled alreaady.
+ */
+ if (!is_zero_pfn(page_to_pfn(kpage))) {
+ get_page(kpage);
+ page_add_anon_rmap(kpage, vma, addr, false);
+ newpte = mk_pte(kpage, vma->vm_page_prot);
+ } else {
+ newpte = pte_mkspecial(pfn_pte(page_to_pfn(kpage),
+ vma->vm_page_prot));
+ }
flush_cache_page(vma, addr, pte_pfn(*ptep));
ptep_clear_flush_notify(vma, addr, ptep);
- set_pte_at_notify(mm, addr, ptep, mk_pte(kpage, vma->vm_page_prot));
+ set_pte_at_notify(mm, addr, ptep, newpte);
page_remove_rmap(page, false);
if (!page_mapped(page))
@@ -1467,6 +1491,23 @@ static void cmp_and_merge_page(struct page *page, struct rmap_item *rmap_item)
return;
}
+ /*
+ * Same checksum as an empty page. We attempt to merge it with the
+ * appropriate zero page if the user enabled this via sysfs.
+ */
+ if (ksm_use_zero_pages && (checksum == zero_checksum)) {
+ struct vm_area_struct *vma;
+
+ vma = find_mergeable_vma(rmap_item->mm, rmap_item->address);
+ err = try_to_merge_one_page(vma, page,
+ ZERO_PAGE(rmap_item->address));
+ /*
+ * In case of failure, the page was not really empty, so we
+ * need to continue. Otherwise we're done.
+ */
+ if (!err)
+ return;
+ }
tree_rmap_item =
unstable_tree_search_insert(rmap_item, page, &tree_page);
if (tree_rmap_item) {
@@ -2233,6 +2274,28 @@ static ssize_t merge_across_nodes_store(struct kobject *kobj,
KSM_ATTR(merge_across_nodes);
#endif
+static ssize_t use_zero_pages_show(struct kobject *kobj,
+ struct kobj_attribute *attr, char *buf)
+{
+ return sprintf(buf, "%u\n", ksm_use_zero_pages);
+}
+static ssize_t use_zero_pages_store(struct kobject *kobj,
+ struct kobj_attribute *attr,
+ const char *buf, size_t count)
+{
+ int err;
+ bool value;
+
+ err = kstrtobool(buf, &value);
+ if (err)
+ return -EINVAL;
+
+ ksm_use_zero_pages = value;
+
+ return count;
+}
+KSM_ATTR(use_zero_pages);
+
static ssize_t pages_shared_show(struct kobject *kobj,
struct kobj_attribute *attr, char *buf)
{
@@ -2290,6 +2353,7 @@ static struct attribute *ksm_attrs[] = {
#ifdef CONFIG_NUMA
&merge_across_nodes_attr.attr,
#endif
+ &use_zero_pages_attr.attr,
NULL,
};
@@ -2304,6 +2368,11 @@ static int __init ksm_init(void)
struct task_struct *ksm_thread;
int err;
+ /* The correct value depends on page size and endianness */
+ zero_checksum = calc_checksum(ZERO_PAGE(0));
+ /* Default to false for backwards compatibility */
+ ksm_use_zero_pages = false;
+
err = ksm_slab_init();
if (err)
goto out;
diff --git a/mm/madvise.c b/mm/madvise.c
index b530a4986035..dc5927c812d3 100644
--- a/mm/madvise.c
+++ b/mm/madvise.c
@@ -21,6 +21,7 @@
#include <linux/backing-dev.h>
#include <linux/swap.h>
#include <linux/swapops.h>
+#include <linux/shmem_fs.h>
#include <linux/mmu_notifier.h>
#include <asm/tlb.h>
@@ -92,14 +93,28 @@ static long madvise_behavior(struct vm_area_struct *vma,
case MADV_MERGEABLE:
case MADV_UNMERGEABLE:
error = ksm_madvise(vma, start, end, behavior, &new_flags);
- if (error)
+ if (error) {
+ /*
+ * madvise() returns EAGAIN if kernel resources, such as
+ * slab, are temporarily unavailable.
+ */
+ if (error == -ENOMEM)
+ error = -EAGAIN;
goto out;
+ }
break;
case MADV_HUGEPAGE:
case MADV_NOHUGEPAGE:
error = hugepage_madvise(vma, &new_flags, behavior);
- if (error)
+ if (error) {
+ /*
+ * madvise() returns EAGAIN if kernel resources, such as
+ * slab, are temporarily unavailable.
+ */
+ if (error == -ENOMEM)
+ error = -EAGAIN;
goto out;
+ }
break;
}
@@ -120,15 +135,37 @@ static long madvise_behavior(struct vm_area_struct *vma,
*prev = vma;
if (start != vma->vm_start) {
- error = split_vma(mm, vma, start, 1);
- if (error)
+ if (unlikely(mm->map_count >= sysctl_max_map_count)) {
+ error = -ENOMEM;
goto out;
+ }
+ error = __split_vma(mm, vma, start, 1);
+ if (error) {
+ /*
+ * madvise() returns EAGAIN if kernel resources, such as
+ * slab, are temporarily unavailable.
+ */
+ if (error == -ENOMEM)
+ error = -EAGAIN;
+ goto out;
+ }
}
if (end != vma->vm_end) {
- error = split_vma(mm, vma, end, 0);
- if (error)
+ if (unlikely(mm->map_count >= sysctl_max_map_count)) {
+ error = -ENOMEM;
+ goto out;
+ }
+ error = __split_vma(mm, vma, end, 0);
+ if (error) {
+ /*
+ * madvise() returns EAGAIN if kernel resources, such as
+ * slab, are temporarily unavailable.
+ */
+ if (error == -ENOMEM)
+ error = -EAGAIN;
goto out;
+ }
}
success:
@@ -136,10 +173,7 @@ success:
* vm_flags is protected by the mmap_sem held in write mode.
*/
vma->vm_flags = new_flags;
-
out:
- if (error == -ENOMEM)
- error = -EAGAIN;
return error;
}
@@ -479,7 +513,7 @@ static long madvise_dontneed(struct vm_area_struct *vma,
if (!can_madv_dontneed_vma(vma))
return -EINVAL;
- madvise_userfault_dontneed(vma, prev, start, end);
+ userfaultfd_remove(vma, prev, start, end);
zap_page_range(vma, start, end - start);
return 0;
}
@@ -520,6 +554,7 @@ static long madvise_remove(struct vm_area_struct *vma,
* mmap_sem.
*/
get_file(f);
+ userfaultfd_remove(vma, prev, start, end);
up_read(&current->mm->mmap_sem);
error = vfs_fallocate(f,
FALLOC_FL_PUNCH_HOLE | FALLOC_FL_KEEP_SIZE,
diff --git a/mm/memblock.c b/mm/memblock.c
index c004f52be419..b64b47803e52 100644
--- a/mm/memblock.c
+++ b/mm/memblock.c
@@ -35,15 +35,18 @@ struct memblock memblock __initdata_memblock = {
.memory.regions = memblock_memory_init_regions,
.memory.cnt = 1, /* empty dummy entry */
.memory.max = INIT_MEMBLOCK_REGIONS,
+ .memory.name = "memory",
.reserved.regions = memblock_reserved_init_regions,
.reserved.cnt = 1, /* empty dummy entry */
.reserved.max = INIT_MEMBLOCK_REGIONS,
+ .reserved.name = "reserved",
#ifdef CONFIG_HAVE_MEMBLOCK_PHYS_MAP
.physmem.regions = memblock_physmem_init_regions,
.physmem.cnt = 1, /* empty dummy entry */
.physmem.max = INIT_PHYSMEM_REGIONS,
+ .physmem.name = "physmem",
#endif
.bottom_up = false,
@@ -64,18 +67,6 @@ ulong __init_memblock choose_memblock_flags(void)
return system_has_some_mirror ? MEMBLOCK_MIRROR : MEMBLOCK_NONE;
}
-/* inline so we don't get a warning when pr_debug is compiled out */
-static __init_memblock const char *
-memblock_type_name(struct memblock_type *type)
-{
- if (type == &memblock.memory)
- return "memory";
- else if (type == &memblock.reserved)
- return "reserved";
- else
- return "unknown";
-}
-
/* adjust *@size so that (@base + *@size) doesn't overflow, return new size */
static inline phys_addr_t memblock_cap_size(phys_addr_t base, phys_addr_t *size)
{
@@ -402,12 +393,12 @@ static int __init_memblock memblock_double_array(struct memblock_type *type,
}
if (!addr) {
pr_err("memblock: Failed to double %s array from %ld to %ld entries !\n",
- memblock_type_name(type), type->max, type->max * 2);
+ type->name, type->max, type->max * 2);
return -1;
}
memblock_dbg("memblock: %s is doubled to %ld at [%#010llx-%#010llx]",
- memblock_type_name(type), type->max * 2, (u64)addr,
+ type->name, type->max * 2, (u64)addr,
(u64)addr + new_size - 1);
/*
@@ -1693,14 +1684,14 @@ phys_addr_t __init_memblock memblock_get_current_limit(void)
return memblock.current_limit;
}
-static void __init_memblock memblock_dump(struct memblock_type *type, char *name)
+static void __init_memblock memblock_dump(struct memblock_type *type)
{
phys_addr_t base, end, size;
unsigned long flags;
int idx;
struct memblock_region *rgn;
- pr_info(" %s.cnt = 0x%lx\n", name, type->cnt);
+ pr_info(" %s.cnt = 0x%lx\n", type->name, type->cnt);
for_each_memblock_type(type, rgn) {
char nid_buf[32] = "";
@@ -1715,7 +1706,7 @@ static void __init_memblock memblock_dump(struct memblock_type *type, char *name
memblock_get_region_node(rgn));
#endif
pr_info(" %s[%#x]\t[%pa-%pa], %pa bytes%s flags: %#lx\n",
- name, idx, &base, &end, &size, nid_buf, flags);
+ type->name, idx, &base, &end, &size, nid_buf, flags);
}
}
@@ -1726,8 +1717,11 @@ void __init_memblock __memblock_dump_all(void)
&memblock.memory.total_size,
&memblock.reserved.total_size);
- memblock_dump(&memblock.memory, "memory");
- memblock_dump(&memblock.reserved, "reserved");
+ memblock_dump(&memblock.memory);
+ memblock_dump(&memblock.reserved);
+#ifdef CONFIG_HAVE_MEMBLOCK_PHYS_MAP
+ memblock_dump(&memblock.physmem);
+#endif
}
void __init memblock_allow_resize(void)
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index 1fd6affcdde7..45867e439d31 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -35,6 +35,7 @@
#include <linux/memcontrol.h>
#include <linux/cgroup.h>
#include <linux/mm.h>
+#include <linux/shmem_fs.h>
#include <linux/hugetlb.h>
#include <linux/pagemap.h>
#include <linux/smp.h>
diff --git a/mm/memory-failure.c b/mm/memory-failure.c
index f283c7e0a2a3..3d0f2fd4bf73 100644
--- a/mm/memory-failure.c
+++ b/mm/memory-failure.c
@@ -1527,7 +1527,8 @@ static int get_any_page(struct page *page, unsigned long pfn, int flags)
{
int ret = __get_any_page(page, pfn, flags);
- if (ret == 1 && !PageHuge(page) && !PageLRU(page)) {
+ if (ret == 1 && !PageHuge(page) &&
+ !PageLRU(page) && !__PageMovable(page)) {
/*
* Try to free it.
*/
@@ -1649,7 +1650,10 @@ static int __soft_offline_page(struct page *page, int flags)
* Try to migrate to a new page instead. migrate.c
* handles a large number of cases for us.
*/
- ret = isolate_lru_page(page);
+ if (PageLRU(page))
+ ret = isolate_lru_page(page);
+ else
+ ret = isolate_movable_page(page, ISOLATE_UNEVICTABLE);
/*
* Drop page reference which is came from get_any_page()
* successful isolate_lru_page() already took another one.
@@ -1657,18 +1661,20 @@ static int __soft_offline_page(struct page *page, int flags)
put_hwpoison_page(page);
if (!ret) {
LIST_HEAD(pagelist);
- inc_node_page_state(page, NR_ISOLATED_ANON +
- page_is_file_cache(page));
+ /*
+ * After isolated lru page, the PageLRU will be cleared,
+ * so use !__PageMovable instead for LRU page's mapping
+ * cannot have PAGE_MAPPING_MOVABLE.
+ */
+ if (!__PageMovable(page))
+ inc_node_page_state(page, NR_ISOLATED_ANON +
+ page_is_file_cache(page));
list_add(&page->lru, &pagelist);
ret = migrate_pages(&pagelist, new_page, NULL, MPOL_MF_MOVE_ALL,
MIGRATE_SYNC, MR_MEMORY_FAILURE);
if (ret) {
- if (!list_empty(&pagelist)) {
- list_del(&page->lru);
- dec_node_page_state(page, NR_ISOLATED_ANON +
- page_is_file_cache(page));
- putback_lru_page(page);
- }
+ if (!list_empty(&pagelist))
+ putback_movable_pages(&pagelist);
pr_info("soft offline: %#lx: migration failed %d, type %lx\n",
pfn, ret, page->flags);
diff --git a/mm/memory.c b/mm/memory.c
index 7663068a33c6..14fc0b40f0bb 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -30,7 +30,7 @@
/*
* 05.04.94 - Multi-page memory management added for v1.1.
- * Idea by Alex Bligh (alex@cconcepts.co.uk)
+ * Idea by Alex Bligh (alex@cconcepts.co.uk)
*
* 16.07.99 - Support of BIGMEM added by Gerhard Wichert, Siemens AG
* (Gerhard.Wichert@pdb.siemens.de)
@@ -82,9 +82,9 @@
#ifndef CONFIG_NEED_MULTIPLE_NODES
/* use the per-pgdat data instead for discontigmem - mbligh */
unsigned long max_mapnr;
-struct page *mem_map;
-
EXPORT_SYMBOL(max_mapnr);
+
+struct page *mem_map;
EXPORT_SYMBOL(mem_map);
#endif
@@ -95,8 +95,7 @@ EXPORT_SYMBOL(mem_map);
* highstart_pfn must be the same; there must be no gap between ZONE_NORMAL
* and ZONE_HIGHMEM.
*/
-void * high_memory;
-
+void *high_memory;
EXPORT_SYMBOL(high_memory);
/*
@@ -120,10 +119,10 @@ static int __init disable_randmaps(char *s)
__setup("norandmaps", disable_randmaps);
unsigned long zero_pfn __read_mostly;
-unsigned long highest_memmap_pfn __read_mostly;
-
EXPORT_SYMBOL(zero_pfn);
+unsigned long highest_memmap_pfn __read_mostly;
+
/*
* CONFIG_MMU architectures set up ZERO_PAGE in their paging_init()
*/
@@ -556,7 +555,7 @@ void free_pgtables(struct mmu_gather *tlb, struct vm_area_struct *vma,
if (is_vm_hugetlb_page(vma)) {
hugetlb_free_pgd_range(tlb, addr, vma->vm_end,
- floor, next? next->vm_start: ceiling);
+ floor, next ? next->vm_start : ceiling);
} else {
/*
* Optimization: gather nearby vmas into one call down
@@ -569,7 +568,7 @@ void free_pgtables(struct mmu_gather *tlb, struct vm_area_struct *vma,
unlink_file_vma(vma);
}
free_pgd_range(tlb, addr, vma->vm_end,
- floor, next? next->vm_start: ceiling);
+ floor, next ? next->vm_start : ceiling);
}
vma = next;
}
@@ -1001,7 +1000,7 @@ static inline int copy_pmd_range(struct mm_struct *dst_mm, struct mm_struct *src
next = pmd_addr_end(addr, end);
if (pmd_trans_huge(*src_pmd) || pmd_devmap(*src_pmd)) {
int err;
- VM_BUG_ON(next-addr != HPAGE_PMD_SIZE);
+ VM_BUG_ON_VMA(next-addr != HPAGE_PMD_SIZE, vma);
err = copy_huge_pmd(dst_mm, src_mm,
dst_pmd, src_pmd, addr, vma);
if (err == -ENOMEM)
@@ -1032,6 +1031,18 @@ static inline int copy_pud_range(struct mm_struct *dst_mm, struct mm_struct *src
src_pud = pud_offset(src_pgd, addr);
do {
next = pud_addr_end(addr, end);
+ if (pud_trans_huge(*src_pud) || pud_devmap(*src_pud)) {
+ int err;
+
+ VM_BUG_ON_VMA(next-addr != HPAGE_PUD_SIZE, vma);
+ err = copy_huge_pud(dst_mm, src_mm,
+ dst_pud, src_pud, addr, vma);
+ if (err == -ENOMEM)
+ return -ENOMEM;
+ if (!err)
+ continue;
+ /* fall through */
+ }
if (pud_none_or_clear_bad(src_pud))
continue;
if (copy_pmd_range(dst_mm, src_mm, dst_pud, src_pud,
@@ -1129,9 +1140,8 @@ again:
arch_enter_lazy_mmu_mode();
do {
pte_t ptent = *pte;
- if (pte_none(ptent)) {
+ if (pte_none(ptent))
continue;
- }
if (pte_present(ptent)) {
struct page *page;
@@ -1263,9 +1273,19 @@ static inline unsigned long zap_pud_range(struct mmu_gather *tlb,
pud = pud_offset(pgd, addr);
do {
next = pud_addr_end(addr, end);
+ if (pud_trans_huge(*pud) || pud_devmap(*pud)) {
+ if (next - addr != HPAGE_PUD_SIZE) {
+ VM_BUG_ON_VMA(!rwsem_is_locked(&tlb->mm->mmap_sem), vma);
+ split_huge_pud(vma, pud, addr);
+ } else if (zap_huge_pud(tlb, vma, pud, addr))
+ goto next;
+ /* fall through */
+ }
if (pud_none_or_clear_bad(pud))
continue;
next = zap_pmd_range(tlb, vma, pud, addr, next, details);
+next:
+ cond_resched();
} while (pud++, addr = next, addr != end);
return addr;
@@ -1441,10 +1461,10 @@ EXPORT_SYMBOL_GPL(zap_vma_ptes);
pte_t *__get_locked_pte(struct mm_struct *mm, unsigned long addr,
spinlock_t **ptl)
{
- pgd_t * pgd = pgd_offset(mm, addr);
- pud_t * pud = pud_alloc(mm, pgd, addr);
+ pgd_t *pgd = pgd_offset(mm, addr);
+ pud_t *pud = pud_alloc(mm, pgd, addr);
if (pud) {
- pmd_t * pmd = pmd_alloc(mm, pud, addr);
+ pmd_t *pmd = pmd_alloc(mm, pud, addr);
if (pmd) {
VM_BUG_ON(pmd_trans_huge(*pmd));
return pte_alloc_map_lock(mm, pmd, addr, ptl);
@@ -2035,7 +2055,7 @@ static int do_page_mkwrite(struct vm_fault *vmf)
vmf->flags = FAULT_FLAG_WRITE|FAULT_FLAG_MKWRITE;
- ret = vmf->vma->vm_ops->page_mkwrite(vmf->vma, vmf);
+ ret = vmf->vma->vm_ops->page_mkwrite(vmf);
/* Restore original flags so that caller is not surprised */
vmf->flags = old_flags;
if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE)))
@@ -2307,7 +2327,7 @@ static int wp_pfn_shared(struct vm_fault *vmf)
pte_unmap_unlock(vmf->pte, vmf->ptl);
vmf->flags |= FAULT_FLAG_MKWRITE;
- ret = vma->vm_ops->pfn_mkwrite(vma, vmf);
+ ret = vma->vm_ops->pfn_mkwrite(vmf);
if (ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE))
return ret;
return finish_mkwrite_fault(vmf);
@@ -2503,7 +2523,7 @@ void unmap_mapping_range(struct address_space *mapping,
hlen = ULONG_MAX - hba + 1;
}
- details.check_mapping = even_cows? NULL: mapping;
+ details.check_mapping = even_cows ? NULL : mapping;
details.first_index = hba;
details.last_index = hba + hlen - 1;
if (details.last_index < details.first_index)
@@ -2861,7 +2881,7 @@ static int __do_fault(struct vm_fault *vmf)
struct vm_area_struct *vma = vmf->vma;
int ret;
- ret = vma->vm_ops->fault(vma, vmf);
+ ret = vma->vm_ops->fault(vmf);
if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE | VM_FAULT_RETRY |
VM_FAULT_DONE_COW)))
return ret;
@@ -2898,7 +2918,7 @@ static int pte_alloc_one_map(struct vm_fault *vmf)
atomic_long_inc(&vma->vm_mm->nr_ptes);
pmd_populate(vma->vm_mm, vmf->pmd, vmf->prealloc_pte);
spin_unlock(vmf->ptl);
- vmf->prealloc_pte = 0;
+ vmf->prealloc_pte = NULL;
} else if (unlikely(pte_alloc(vma->vm_mm, vmf->pmd, vmf->address))) {
return VM_FAULT_OOM;
}
@@ -2946,7 +2966,7 @@ static void deposit_prealloc_pte(struct vm_fault *vmf)
* count that as nr_ptes.
*/
atomic_long_inc(&vma->vm_mm->nr_ptes);
- vmf->prealloc_pte = 0;
+ vmf->prealloc_pte = NULL;
}
static int do_set_pmd(struct vm_fault *vmf, struct page *page)
@@ -3352,7 +3372,7 @@ static int do_fault(struct vm_fault *vmf)
/* preallocated pagetable is unused: free it */
if (vmf->prealloc_pte) {
pte_free(vma->vm_mm, vmf->prealloc_pte);
- vmf->prealloc_pte = 0;
+ vmf->prealloc_pte = NULL;
}
return ret;
}
@@ -3380,32 +3400,32 @@ static int do_numa_page(struct vm_fault *vmf)
int last_cpupid;
int target_nid;
bool migrated = false;
- pte_t pte = vmf->orig_pte;
- bool was_writable = pte_write(pte);
+ pte_t pte;
+ bool was_writable = pte_savedwrite(vmf->orig_pte);
int flags = 0;
/*
- * The "pte" at this point cannot be used safely without
- * validation through pte_unmap_same(). It's of NUMA type but
- * the pfn may be screwed if the read is non atomic.
- *
- * We can safely just do a "set_pte_at()", because the old
- * page table entry is not accessible, so there would be no
- * concurrent hardware modifications to the PTE.
- */
+ * The "pte" at this point cannot be used safely without
+ * validation through pte_unmap_same(). It's of NUMA type but
+ * the pfn may be screwed if the read is non atomic.
+ */
vmf->ptl = pte_lockptr(vma->vm_mm, vmf->pmd);
spin_lock(vmf->ptl);
- if (unlikely(!pte_same(*vmf->pte, pte))) {
+ if (unlikely(!pte_same(*vmf->pte, vmf->orig_pte))) {
pte_unmap_unlock(vmf->pte, vmf->ptl);
goto out;
}
- /* Make it present again */
+ /*
+ * Make it present again, Depending on how arch implementes non
+ * accessible ptes, some can allow access by kernel mode.
+ */
+ pte = ptep_modify_prot_start(vma->vm_mm, vmf->address, vmf->pte);
pte = pte_modify(pte, vma->vm_page_prot);
pte = pte_mkyoung(pte);
if (was_writable)
pte = pte_mkwrite(pte);
- set_pte_at(vma->vm_mm, vmf->address, vmf->pte, pte);
+ ptep_modify_prot_commit(vma->vm_mm, vmf->address, vmf->pte, pte);
update_mmu_cache(vma, vmf->address, vmf->pte);
page = vm_normal_page(vma, vmf->address, pte);
@@ -3466,8 +3486,8 @@ static int create_huge_pmd(struct vm_fault *vmf)
{
if (vma_is_anonymous(vmf->vma))
return do_huge_pmd_anonymous_page(vmf);
- if (vmf->vma->vm_ops->pmd_fault)
- return vmf->vma->vm_ops->pmd_fault(vmf);
+ if (vmf->vma->vm_ops->huge_fault)
+ return vmf->vma->vm_ops->huge_fault(vmf, PE_SIZE_PMD);
return VM_FAULT_FALLBACK;
}
@@ -3475,8 +3495,8 @@ static int wp_huge_pmd(struct vm_fault *vmf, pmd_t orig_pmd)
{
if (vma_is_anonymous(vmf->vma))
return do_huge_pmd_wp_page(vmf, orig_pmd);
- if (vmf->vma->vm_ops->pmd_fault)
- return vmf->vma->vm_ops->pmd_fault(vmf);
+ if (vmf->vma->vm_ops->huge_fault)
+ return vmf->vma->vm_ops->huge_fault(vmf, PE_SIZE_PMD);
/* COW handled on pte level: split pmd */
VM_BUG_ON_VMA(vmf->vma->vm_flags & VM_SHARED, vmf->vma);
@@ -3490,6 +3510,30 @@ static inline bool vma_is_accessible(struct vm_area_struct *vma)
return vma->vm_flags & (VM_READ | VM_EXEC | VM_WRITE);
}
+static int create_huge_pud(struct vm_fault *vmf)
+{
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+ /* No support for anonymous transparent PUD pages yet */
+ if (vma_is_anonymous(vmf->vma))
+ return VM_FAULT_FALLBACK;
+ if (vmf->vma->vm_ops->huge_fault)
+ return vmf->vma->vm_ops->huge_fault(vmf, PE_SIZE_PUD);
+#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
+ return VM_FAULT_FALLBACK;
+}
+
+static int wp_huge_pud(struct vm_fault *vmf, pud_t orig_pud)
+{
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+ /* No support for anonymous transparent PUD pages yet */
+ if (vma_is_anonymous(vmf->vma))
+ return VM_FAULT_FALLBACK;
+ if (vmf->vma->vm_ops->huge_fault)
+ return vmf->vma->vm_ops->huge_fault(vmf, PE_SIZE_PUD);
+#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
+ return VM_FAULT_FALLBACK;
+}
+
/*
* These routines also need to handle stuff like marking pages dirty
* and/or accessed for architectures that don't do it in hardware (most
@@ -3605,22 +3649,46 @@ static int __handle_mm_fault(struct vm_area_struct *vma, unsigned long address,
};
struct mm_struct *mm = vma->vm_mm;
pgd_t *pgd;
- pud_t *pud;
+ int ret;
pgd = pgd_offset(mm, address);
- pud = pud_alloc(mm, pgd, address);
- if (!pud)
+
+ vmf.pud = pud_alloc(mm, pgd, address);
+ if (!vmf.pud)
return VM_FAULT_OOM;
- vmf.pmd = pmd_alloc(mm, pud, address);
+ if (pud_none(*vmf.pud) && transparent_hugepage_enabled(vma)) {
+ ret = create_huge_pud(&vmf);
+ if (!(ret & VM_FAULT_FALLBACK))
+ return ret;
+ } else {
+ pud_t orig_pud = *vmf.pud;
+
+ barrier();
+ if (pud_trans_huge(orig_pud) || pud_devmap(orig_pud)) {
+ unsigned int dirty = flags & FAULT_FLAG_WRITE;
+
+ /* NUMA case for anonymous PUDs would go here */
+
+ if (dirty && !pud_write(orig_pud)) {
+ ret = wp_huge_pud(&vmf, orig_pud);
+ if (!(ret & VM_FAULT_FALLBACK))
+ return ret;
+ } else {
+ huge_pud_set_accessed(&vmf, orig_pud);
+ return 0;
+ }
+ }
+ }
+
+ vmf.pmd = pmd_alloc(mm, vmf.pud, address);
if (!vmf.pmd)
return VM_FAULT_OOM;
if (pmd_none(*vmf.pmd) && transparent_hugepage_enabled(vma)) {
- int ret = create_huge_pmd(&vmf);
+ ret = create_huge_pmd(&vmf);
if (!(ret & VM_FAULT_FALLBACK))
return ret;
} else {
pmd_t orig_pmd = *vmf.pmd;
- int ret;
barrier();
if (pmd_trans_huge(orig_pmd) || pmd_devmap(orig_pmd)) {
@@ -3680,14 +3748,14 @@ int handle_mm_fault(struct vm_area_struct *vma, unsigned long address,
if (flags & FAULT_FLAG_USER) {
mem_cgroup_oom_disable();
- /*
- * The task may have entered a memcg OOM situation but
- * if the allocation error was handled gracefully (no
- * VM_FAULT_OOM), there is no need to kill anything.
- * Just clean up the OOM state peacefully.
- */
- if (task_in_memcg_oom(current) && !(ret & VM_FAULT_OOM))
- mem_cgroup_oom_synchronize(false);
+ /*
+ * The task may have entered a memcg OOM situation but
+ * if the allocation error was handled gracefully (no
+ * VM_FAULT_OOM), there is no need to kill anything.
+ * Just clean up the OOM state peacefully.
+ */
+ if (task_in_memcg_oom(current) && !(ret & VM_FAULT_OOM))
+ mem_cgroup_oom_synchronize(false);
}
/*
@@ -3737,13 +3805,14 @@ int __pud_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long address)
*/
int __pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address)
{
+ spinlock_t *ptl;
pmd_t *new = pmd_alloc_one(mm, address);
if (!new)
return -ENOMEM;
smp_wmb(); /* See comment in __pte_alloc */
- spin_lock(&mm->page_table_lock);
+ ptl = pud_lock(mm, pud);
#ifndef __ARCH_HAS_4LEVEL_HACK
if (!pud_present(*pud)) {
mm_inc_nr_pmds(mm);
@@ -3757,7 +3826,7 @@ int __pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address)
} else /* Another has populated it */
pmd_free(mm, new);
#endif /* __ARCH_HAS_4LEVEL_HACK */
- spin_unlock(&mm->page_table_lock);
+ spin_unlock(ptl);
return 0;
}
#endif /* __PAGETABLE_PMD_FOLDED */
diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c
index d67787d10ff0..1d3ed58f92ab 100644
--- a/mm/memory_hotplug.c
+++ b/mm/memory_hotplug.c
@@ -126,6 +126,8 @@ void put_online_mems(void)
void mem_hotplug_begin(void)
{
+ assert_held_device_hotplug();
+
mem_hotplug.active_writer = current;
memhp_lock_acquire();
@@ -862,7 +864,6 @@ int __remove_pages(struct zone *zone, unsigned long phys_start_pfn,
return ret;
}
-EXPORT_SYMBOL_GPL(__remove_pages);
#endif /* CONFIG_MEMORY_HOTREMOVE */
int set_online_page_callback(online_page_callback_t callback)
@@ -1336,7 +1337,7 @@ int zone_for_memory(int nid, u64 start, u64 size, int zone_default,
static int online_memory_block(struct memory_block *mem, void *arg)
{
- return memory_block_change_state(mem, MEM_ONLINE, MEM_OFFLINE);
+ return device_online(&mem->dev);
}
/* we are OK calling __meminit stuff here - we have CONFIG_MEMORY_HOTPLUG */
@@ -1508,7 +1509,7 @@ int test_pages_in_a_zone(unsigned long start_pfn, unsigned long end_pfn,
while ((i < MAX_ORDER_NR_PAGES) &&
!pfn_valid_within(pfn + i))
i++;
- if (i == MAX_ORDER_NR_PAGES)
+ if (i == MAX_ORDER_NR_PAGES || pfn + i >= end_pfn)
continue;
page = pfn_to_page(pfn + i);
if (zone && page_zone(page) != zone)
@@ -1522,7 +1523,7 @@ int test_pages_in_a_zone(unsigned long start_pfn, unsigned long end_pfn,
if (zone) {
*valid_start = start;
- *valid_end = end;
+ *valid_end = min(end, end_pfn);
return 1;
} else {
return 0;
@@ -1530,10 +1531,10 @@ int test_pages_in_a_zone(unsigned long start_pfn, unsigned long end_pfn,
}
/*
- * Scan pfn range [start,end) to find movable/migratable pages (LRU pages
- * and hugepages). We scan pfn because it's much easier than scanning over
- * linked list. This function returns the pfn of the first found movable
- * page if it's found, otherwise 0.
+ * Scan pfn range [start,end) to find movable/migratable pages (LRU pages,
+ * non-lru movable pages and hugepages). We scan pfn because it's much
+ * easier than scanning over linked list. This function returns the pfn
+ * of the first found movable page if it's found, otherwise 0.
*/
static unsigned long scan_movable_pages(unsigned long start, unsigned long end)
{
@@ -1544,6 +1545,8 @@ static unsigned long scan_movable_pages(unsigned long start, unsigned long end)
page = pfn_to_page(pfn);
if (PageLRU(page))
return pfn;
+ if (__PageMovable(page))
+ return pfn;
if (PageHuge(page)) {
if (page_huge_active(page))
return pfn;
@@ -1620,21 +1623,25 @@ do_migrate_range(unsigned long start_pfn, unsigned long end_pfn)
if (!get_page_unless_zero(page))
continue;
/*
- * We can skip free pages. And we can only deal with pages on
- * LRU.
+ * We can skip free pages. And we can deal with pages on
+ * LRU and non-lru movable pages.
*/
- ret = isolate_lru_page(page);
+ if (PageLRU(page))
+ ret = isolate_lru_page(page);
+ else
+ ret = isolate_movable_page(page, ISOLATE_UNEVICTABLE);
if (!ret) { /* Success */
put_page(page);
list_add_tail(&page->lru, &source);
move_pages--;
- inc_node_page_state(page, NR_ISOLATED_ANON +
- page_is_file_cache(page));
+ if (!__PageMovable(page))
+ inc_node_page_state(page, NR_ISOLATED_ANON +
+ page_is_file_cache(page));
} else {
#ifdef CONFIG_DEBUG_VM
- pr_alert("removing pfn %lx from LRU failed\n", pfn);
- dump_page(page, "failed to remove from LRU");
+ pr_alert("failed to isolate pfn %lx\n", pfn);
+ dump_page(page, "isolation failed");
#endif
put_page(page);
/* Because we don't have big zone->lock. we should
diff --git a/mm/migrate.c b/mm/migrate.c
index 87f4d0f81819..2c63ac06791b 100644
--- a/mm/migrate.c
+++ b/mm/migrate.c
@@ -74,7 +74,7 @@ int migrate_prep_local(void)
return 0;
}
-bool isolate_movable_page(struct page *page, isolate_mode_t mode)
+int isolate_movable_page(struct page *page, isolate_mode_t mode)
{
struct address_space *mapping;
@@ -125,14 +125,14 @@ bool isolate_movable_page(struct page *page, isolate_mode_t mode)
__SetPageIsolated(page);
unlock_page(page);
- return true;
+ return 0;
out_no_isolated:
unlock_page(page);
out_putpage:
put_page(page);
out:
- return false;
+ return -EBUSY;
}
/* It should be called on page which is PG_movable */
@@ -193,82 +193,62 @@ void putback_movable_pages(struct list_head *l)
/*
* Restore a potential migration pte to a working pte entry
*/
-static int remove_migration_pte(struct page *new, struct vm_area_struct *vma,
+static int remove_migration_pte(struct page *page, struct vm_area_struct *vma,
unsigned long addr, void *old)
{
- struct mm_struct *mm = vma->vm_mm;
+ struct page_vma_mapped_walk pvmw = {
+ .page = old,
+ .vma = vma,
+ .address = addr,
+ .flags = PVMW_SYNC | PVMW_MIGRATION,
+ };
+ struct page *new;
+ pte_t pte;
swp_entry_t entry;
- pmd_t *pmd;
- pte_t *ptep, pte;
- spinlock_t *ptl;
- if (unlikely(PageHuge(new))) {
- ptep = huge_pte_offset(mm, addr);
- if (!ptep)
- goto out;
- ptl = huge_pte_lockptr(hstate_vma(vma), mm, ptep);
- } else {
- pmd = mm_find_pmd(mm, addr);
- if (!pmd)
- goto out;
+ VM_BUG_ON_PAGE(PageTail(page), page);
+ while (page_vma_mapped_walk(&pvmw)) {
+ new = page - pvmw.page->index +
+ linear_page_index(vma, pvmw.address);
- ptep = pte_offset_map(pmd, addr);
+ get_page(new);
+ pte = pte_mkold(mk_pte(new, READ_ONCE(vma->vm_page_prot)));
+ if (pte_swp_soft_dirty(*pvmw.pte))
+ pte = pte_mksoft_dirty(pte);
/*
- * Peek to check is_swap_pte() before taking ptlock? No, we
- * can race mremap's move_ptes(), which skips anon_vma lock.
+ * Recheck VMA as permissions can change since migration started
*/
-
- ptl = pte_lockptr(mm, pmd);
- }
-
- spin_lock(ptl);
- pte = *ptep;
- if (!is_swap_pte(pte))
- goto unlock;
-
- entry = pte_to_swp_entry(pte);
-
- if (!is_migration_entry(entry) ||
- migration_entry_to_page(entry) != old)
- goto unlock;
-
- get_page(new);
- pte = pte_mkold(mk_pte(new, READ_ONCE(vma->vm_page_prot)));
- if (pte_swp_soft_dirty(*ptep))
- pte = pte_mksoft_dirty(pte);
-
- /* Recheck VMA as permissions can change since migration started */
- if (is_write_migration_entry(entry))
- pte = maybe_mkwrite(pte, vma);
+ entry = pte_to_swp_entry(*pvmw.pte);
+ if (is_write_migration_entry(entry))
+ pte = maybe_mkwrite(pte, vma);
#ifdef CONFIG_HUGETLB_PAGE
- if (PageHuge(new)) {
- pte = pte_mkhuge(pte);
- pte = arch_make_huge_pte(pte, vma, new, 0);
- }
+ if (PageHuge(new)) {
+ pte = pte_mkhuge(pte);
+ pte = arch_make_huge_pte(pte, vma, new, 0);
+ }
#endif
- flush_dcache_page(new);
- set_pte_at(mm, addr, ptep, pte);
+ flush_dcache_page(new);
+ set_pte_at(vma->vm_mm, pvmw.address, pvmw.pte, pte);
- if (PageHuge(new)) {
- if (PageAnon(new))
- hugepage_add_anon_rmap(new, vma, addr);
+ if (PageHuge(new)) {
+ if (PageAnon(new))
+ hugepage_add_anon_rmap(new, vma, pvmw.address);
+ else
+ page_dup_rmap(new, true);
+ } else if (PageAnon(new))
+ page_add_anon_rmap(new, vma, pvmw.address, false);
else
- page_dup_rmap(new, true);
- } else if (PageAnon(new))
- page_add_anon_rmap(new, vma, addr, false);
- else
- page_add_file_rmap(new, false);
+ page_add_file_rmap(new, false);
- if (vma->vm_flags & VM_LOCKED && !PageTransCompound(new))
- mlock_vma_page(new);
+ if (vma->vm_flags & VM_LOCKED && !PageTransCompound(new))
+ mlock_vma_page(new);
+
+ /* No need to invalidate - it was non-present before */
+ update_mmu_cache(vma, pvmw.address, pvmw.pte);
+ }
- /* No need to invalidate - it was non-present before */
- update_mmu_cache(vma, addr, ptep);
-unlock:
- pte_unmap_unlock(ptep, ptl);
-out:
return SWAP_AGAIN;
}
diff --git a/mm/mincore.c b/mm/mincore.c
index ddb872da3f5b..c5687c45c326 100644
--- a/mm/mincore.c
+++ b/mm/mincore.c
@@ -14,6 +14,7 @@
#include <linux/syscalls.h>
#include <linux/swap.h>
#include <linux/swapops.h>
+#include <linux/shmem_fs.h>
#include <linux/hugetlb.h>
#include <linux/uaccess.h>
diff --git a/mm/mmap.c b/mm/mmap.c
index b729084eea90..499b988b1639 100644
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -176,7 +176,7 @@ static struct vm_area_struct *remove_vma(struct vm_area_struct *vma)
return next;
}
-static int do_brk(unsigned long addr, unsigned long len);
+static int do_brk(unsigned long addr, unsigned long len, struct list_head *uf);
SYSCALL_DEFINE1(brk, unsigned long, brk)
{
@@ -185,6 +185,7 @@ SYSCALL_DEFINE1(brk, unsigned long, brk)
struct mm_struct *mm = current->mm;
unsigned long min_brk;
bool populate;
+ LIST_HEAD(uf);
if (down_write_killable(&mm->mmap_sem))
return -EINTR;
@@ -222,7 +223,7 @@ SYSCALL_DEFINE1(brk, unsigned long, brk)
/* Always allow shrinking brk. */
if (brk <= mm->brk) {
- if (!do_munmap(mm, newbrk, oldbrk-newbrk))
+ if (!do_munmap(mm, newbrk, oldbrk-newbrk, &uf))
goto set_brk;
goto out;
}
@@ -232,13 +233,14 @@ SYSCALL_DEFINE1(brk, unsigned long, brk)
goto out;
/* Ok, looks good - let it rip. */
- if (do_brk(oldbrk, newbrk-oldbrk) < 0)
+ if (do_brk(oldbrk, newbrk-oldbrk, &uf) < 0)
goto out;
set_brk:
mm->brk = brk;
populate = newbrk > oldbrk && (mm->def_flags & VM_LOCKED) != 0;
up_write(&mm->mmap_sem);
+ userfaultfd_unmap_complete(mm, &uf);
if (populate)
mm_populate(oldbrk, newbrk - oldbrk);
return brk;
@@ -1304,7 +1306,8 @@ static inline int mlock_future_check(struct mm_struct *mm,
unsigned long do_mmap(struct file *file, unsigned long addr,
unsigned long len, unsigned long prot,
unsigned long flags, vm_flags_t vm_flags,
- unsigned long pgoff, unsigned long *populate)
+ unsigned long pgoff, unsigned long *populate,
+ struct list_head *uf)
{
struct mm_struct *mm = current->mm;
int pkey = 0;
@@ -1447,7 +1450,7 @@ unsigned long do_mmap(struct file *file, unsigned long addr,
vm_flags |= VM_NORESERVE;
}
- addr = mmap_region(file, addr, len, vm_flags, pgoff);
+ addr = mmap_region(file, addr, len, vm_flags, pgoff, uf);
if (!IS_ERR_VALUE(addr) &&
((vm_flags & VM_LOCKED) ||
(flags & (MAP_POPULATE | MAP_NONBLOCK)) == MAP_POPULATE))
@@ -1583,7 +1586,8 @@ static inline int accountable_mapping(struct file *file, vm_flags_t vm_flags)
}
unsigned long mmap_region(struct file *file, unsigned long addr,
- unsigned long len, vm_flags_t vm_flags, unsigned long pgoff)
+ unsigned long len, vm_flags_t vm_flags, unsigned long pgoff,
+ struct list_head *uf)
{
struct mm_struct *mm = current->mm;
struct vm_area_struct *vma, *prev;
@@ -1609,7 +1613,7 @@ unsigned long mmap_region(struct file *file, unsigned long addr,
/* Clear old maps */
while (find_vma_links(mm, addr, addr + len, &prev, &rb_link,
&rb_parent)) {
- if (do_munmap(mm, addr, len))
+ if (do_munmap(mm, addr, len, uf))
return -ENOMEM;
}
@@ -2495,11 +2499,11 @@ detach_vmas_to_be_unmapped(struct mm_struct *mm, struct vm_area_struct *vma,
}
/*
- * __split_vma() bypasses sysctl_max_map_count checking. We use this on the
- * munmap path where it doesn't make sense to fail.
+ * __split_vma() bypasses sysctl_max_map_count checking. We use this where it
+ * has already been checked or doesn't make sense to fail.
*/
-static int __split_vma(struct mm_struct *mm, struct vm_area_struct *vma,
- unsigned long addr, int new_below)
+int __split_vma(struct mm_struct *mm, struct vm_area_struct *vma,
+ unsigned long addr, int new_below)
{
struct vm_area_struct *new;
int err;
@@ -2579,7 +2583,8 @@ int split_vma(struct mm_struct *mm, struct vm_area_struct *vma,
* work. This now handles partial unmappings.
* Jeremy Fitzhardinge <jeremy@goop.org>
*/
-int do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
+int do_munmap(struct mm_struct *mm, unsigned long start, size_t len,
+ struct list_head *uf)
{
unsigned long end;
struct vm_area_struct *vma, *prev, *last;
@@ -2603,6 +2608,13 @@ int do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
if (vma->vm_start >= end)
return 0;
+ if (uf) {
+ int error = userfaultfd_unmap_prep(vma, start, end, uf);
+
+ if (error)
+ return error;
+ }
+
/*
* If we need to split any vma, do it now to save pain later.
*
@@ -2668,27 +2680,22 @@ int vm_munmap(unsigned long start, size_t len)
{
int ret;
struct mm_struct *mm = current->mm;
+ LIST_HEAD(uf);
if (down_write_killable(&mm->mmap_sem))
return -EINTR;
- ret = do_munmap(mm, start, len);
+ ret = do_munmap(mm, start, len, &uf);
up_write(&mm->mmap_sem);
+ userfaultfd_unmap_complete(mm, &uf);
return ret;
}
EXPORT_SYMBOL(vm_munmap);
SYSCALL_DEFINE2(munmap, unsigned long, addr, size_t, len)
{
- int ret;
- struct mm_struct *mm = current->mm;
-
profile_munmap(addr);
- if (down_write_killable(&mm->mmap_sem))
- return -EINTR;
- ret = do_munmap(mm, addr, len);
- up_write(&mm->mmap_sem);
- return ret;
+ return vm_munmap(addr, len);
}
@@ -2780,7 +2787,7 @@ SYSCALL_DEFINE5(remap_file_pages, unsigned long, start, unsigned long, size,
file = get_file(vma->vm_file);
ret = do_mmap_pgoff(vma->vm_file, start, size,
- prot, flags, pgoff, &populate);
+ prot, flags, pgoff, &populate, NULL);
fput(file);
out:
up_write(&mm->mmap_sem);
@@ -2806,7 +2813,7 @@ static inline void verify_mm_writelocked(struct mm_struct *mm)
* anonymous maps. eventually we may be able to do some
* brk-specific accounting here.
*/
-static int do_brk_flags(unsigned long addr, unsigned long request, unsigned long flags)
+static int do_brk_flags(unsigned long addr, unsigned long request, unsigned long flags, struct list_head *uf)
{
struct mm_struct *mm = current->mm;
struct vm_area_struct *vma, *prev;
@@ -2845,7 +2852,7 @@ static int do_brk_flags(unsigned long addr, unsigned long request, unsigned long
*/
while (find_vma_links(mm, addr, addr + len, &prev, &rb_link,
&rb_parent)) {
- if (do_munmap(mm, addr, len))
+ if (do_munmap(mm, addr, len, uf))
return -ENOMEM;
}
@@ -2892,9 +2899,9 @@ out:
return 0;
}
-static int do_brk(unsigned long addr, unsigned long len)
+static int do_brk(unsigned long addr, unsigned long len, struct list_head *uf)
{
- return do_brk_flags(addr, len, 0);
+ return do_brk_flags(addr, len, 0, uf);
}
int vm_brk_flags(unsigned long addr, unsigned long len, unsigned long flags)
@@ -2902,13 +2909,15 @@ int vm_brk_flags(unsigned long addr, unsigned long len, unsigned long flags)
struct mm_struct *mm = current->mm;
int ret;
bool populate;
+ LIST_HEAD(uf);
if (down_write_killable(&mm->mmap_sem))
return -EINTR;
- ret = do_brk_flags(addr, len, flags);
+ ret = do_brk_flags(addr, len, flags, &uf);
populate = ((mm->def_flags & VM_LOCKED) != 0);
up_write(&mm->mmap_sem);
+ userfaultfd_unmap_complete(mm, &uf);
if (populate && !ret)
mm_populate(addr, len);
return ret;
@@ -3125,8 +3134,7 @@ void vm_stat_account(struct mm_struct *mm, vm_flags_t flags, long npages)
mm->data_vm += npages;
}
-static int special_mapping_fault(struct vm_area_struct *vma,
- struct vm_fault *vmf);
+static int special_mapping_fault(struct vm_fault *vmf);
/*
* Having a close hook prevents vma merging regardless of flags.
@@ -3161,9 +3169,9 @@ static const struct vm_operations_struct legacy_special_mapping_vmops = {
.fault = special_mapping_fault,
};
-static int special_mapping_fault(struct vm_area_struct *vma,
- struct vm_fault *vmf)
+static int special_mapping_fault(struct vm_fault *vmf)
{
+ struct vm_area_struct *vma = vmf->vma;
pgoff_t pgoff;
struct page **pages;
@@ -3173,7 +3181,7 @@ static int special_mapping_fault(struct vm_area_struct *vma,
struct vm_special_mapping *sm = vma->vm_private_data;
if (sm->fault)
- return sm->fault(sm, vma, vmf);
+ return sm->fault(sm, vmf->vma, vmf);
pages = sm->pages;
}
@@ -3447,7 +3455,7 @@ void mm_drop_all_locks(struct mm_struct *mm)
}
/*
- * initialise the VMA slab
+ * initialise the percpu counter for VM
*/
void __init mmap_init(void)
{
diff --git a/mm/mprotect.c b/mm/mprotect.c
index a45b4dc6a7f5..848e946b08e5 100644
--- a/mm/mprotect.c
+++ b/mm/mprotect.c
@@ -99,7 +99,7 @@ static unsigned long change_pte_range(struct vm_area_struct *vma, pmd_t *pmd,
ptent = ptep_modify_prot_start(mm, addr, pte);
ptent = pte_modify(ptent, newprot);
if (preserve_write)
- ptent = pte_mkwrite(ptent);
+ ptent = pte_mk_savedwrite(ptent);
/* Avoid taking write faults for known dirty pages */
if (dirty_accountable && pte_dirty(ptent) &&
diff --git a/mm/mremap.c b/mm/mremap.c
index 8779928d6a70..8233b0105c82 100644
--- a/mm/mremap.c
+++ b/mm/mremap.c
@@ -252,7 +252,8 @@ unsigned long move_page_tables(struct vm_area_struct *vma,
static unsigned long move_vma(struct vm_area_struct *vma,
unsigned long old_addr, unsigned long old_len,
unsigned long new_len, unsigned long new_addr,
- bool *locked, struct vm_userfaultfd_ctx *uf)
+ bool *locked, struct vm_userfaultfd_ctx *uf,
+ struct list_head *uf_unmap)
{
struct mm_struct *mm = vma->vm_mm;
struct vm_area_struct *new_vma;
@@ -341,7 +342,7 @@ static unsigned long move_vma(struct vm_area_struct *vma,
if (unlikely(vma->vm_flags & VM_PFNMAP))
untrack_pfn_moved(vma);
- if (do_munmap(mm, old_addr, old_len) < 0) {
+ if (do_munmap(mm, old_addr, old_len, uf_unmap) < 0) {
/* OOM: unable to split vma, just get accounts right */
vm_unacct_memory(excess >> PAGE_SHIFT);
excess = 0;
@@ -417,7 +418,8 @@ static struct vm_area_struct *vma_to_resize(unsigned long addr,
static unsigned long mremap_to(unsigned long addr, unsigned long old_len,
unsigned long new_addr, unsigned long new_len, bool *locked,
- struct vm_userfaultfd_ctx *uf)
+ struct vm_userfaultfd_ctx *uf,
+ struct list_head *uf_unmap)
{
struct mm_struct *mm = current->mm;
struct vm_area_struct *vma;
@@ -435,12 +437,12 @@ static unsigned long mremap_to(unsigned long addr, unsigned long old_len,
if (addr + old_len > new_addr && new_addr + new_len > addr)
goto out;
- ret = do_munmap(mm, new_addr, new_len);
+ ret = do_munmap(mm, new_addr, new_len, NULL);
if (ret)
goto out;
if (old_len >= new_len) {
- ret = do_munmap(mm, addr+new_len, old_len - new_len);
+ ret = do_munmap(mm, addr+new_len, old_len - new_len, uf_unmap);
if (ret && old_len != new_len)
goto out;
old_len = new_len;
@@ -462,7 +464,8 @@ static unsigned long mremap_to(unsigned long addr, unsigned long old_len,
if (offset_in_page(ret))
goto out1;
- ret = move_vma(vma, addr, old_len, new_len, new_addr, locked, uf);
+ ret = move_vma(vma, addr, old_len, new_len, new_addr, locked, uf,
+ uf_unmap);
if (!(offset_in_page(ret)))
goto out;
out1:
@@ -502,6 +505,7 @@ SYSCALL_DEFINE5(mremap, unsigned long, addr, unsigned long, old_len,
unsigned long charged = 0;
bool locked = false;
struct vm_userfaultfd_ctx uf = NULL_VM_UFFD_CTX;
+ LIST_HEAD(uf_unmap);
if (flags & ~(MREMAP_FIXED | MREMAP_MAYMOVE))
return ret;
@@ -528,7 +532,7 @@ SYSCALL_DEFINE5(mremap, unsigned long, addr, unsigned long, old_len,
if (flags & MREMAP_FIXED) {
ret = mremap_to(addr, old_len, new_addr, new_len,
- &locked, &uf);
+ &locked, &uf, &uf_unmap);
goto out;
}
@@ -538,7 +542,7 @@ SYSCALL_DEFINE5(mremap, unsigned long, addr, unsigned long, old_len,
* do_munmap does all the needed commit accounting
*/
if (old_len >= new_len) {
- ret = do_munmap(mm, addr+new_len, old_len - new_len);
+ ret = do_munmap(mm, addr+new_len, old_len - new_len, &uf_unmap);
if (ret && old_len != new_len)
goto out;
ret = addr;
@@ -598,7 +602,7 @@ SYSCALL_DEFINE5(mremap, unsigned long, addr, unsigned long, old_len,
}
ret = move_vma(vma, addr, old_len, new_len, new_addr,
- &locked, &uf);
+ &locked, &uf, &uf_unmap);
}
out:
if (offset_in_page(ret)) {
@@ -609,5 +613,6 @@ out:
if (locked && new_len > old_len)
mm_populate(new_addr + old_len, new_len - old_len);
mremap_userfaultfd_complete(&uf, addr, new_addr, old_len);
+ userfaultfd_unmap_complete(mm, &uf_unmap);
return ret;
}
diff --git a/mm/nommu.c b/mm/nommu.c
index bc964c26be8c..fe9f4fa4a7a7 100644
--- a/mm/nommu.c
+++ b/mm/nommu.c
@@ -517,7 +517,7 @@ SYSCALL_DEFINE1(brk, unsigned long, brk)
}
/*
- * initialise the VMA and region record slabs
+ * initialise the percpu counter for VM and region record slabs
*/
void __init mmap_init(void)
{
@@ -1205,7 +1205,8 @@ unsigned long do_mmap(struct file *file,
unsigned long flags,
vm_flags_t vm_flags,
unsigned long pgoff,
- unsigned long *populate)
+ unsigned long *populate,
+ struct list_head *uf)
{
struct vm_area_struct *vma;
struct vm_region *region;
@@ -1577,7 +1578,7 @@ static int shrink_vma(struct mm_struct *mm,
* - under NOMMU conditions the chunk to be unmapped must be backed by a single
* VMA, though it need not cover the whole VMA
*/
-int do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
+int do_munmap(struct mm_struct *mm, unsigned long start, size_t len, struct list_head *uf)
{
struct vm_area_struct *vma;
unsigned long end;
@@ -1643,7 +1644,7 @@ int vm_munmap(unsigned long addr, size_t len)
int ret;
down_write(&mm->mmap_sem);
- ret = do_munmap(mm, addr, len);
+ ret = do_munmap(mm, addr, len, NULL);
up_write(&mm->mmap_sem);
return ret;
}
@@ -1794,7 +1795,7 @@ void unmap_mapping_range(struct address_space *mapping,
}
EXPORT_SYMBOL(unmap_mapping_range);
-int filemap_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
+int filemap_fault(struct vm_fault *vmf)
{
BUG();
return 0;
diff --git a/mm/oom_kill.c b/mm/oom_kill.c
index 8256788ac119..578321f1c070 100644
--- a/mm/oom_kill.c
+++ b/mm/oom_kill.c
@@ -403,12 +403,14 @@ static void dump_tasks(struct mem_cgroup *memcg, const nodemask_t *nodemask)
static void dump_header(struct oom_control *oc, struct task_struct *p)
{
- nodemask_t *nm = (oc->nodemask) ? oc->nodemask : &cpuset_current_mems_allowed;
-
- pr_warn("%s invoked oom-killer: gfp_mask=%#x(%pGg), nodemask=%*pbl, order=%d, oom_score_adj=%hd\n",
- current->comm, oc->gfp_mask, &oc->gfp_mask,
- nodemask_pr_args(nm), oc->order,
- current->signal->oom_score_adj);
+ pr_warn("%s invoked oom-killer: gfp_mask=%#x(%pGg), nodemask=",
+ current->comm, oc->gfp_mask, &oc->gfp_mask);
+ if (oc->nodemask)
+ pr_cont("%*pbl", nodemask_pr_args(oc->nodemask));
+ else
+ pr_cont("(null)");
+ pr_cont(", order=%d, oom_score_adj=%hd\n",
+ oc->order, current->signal->oom_score_adj);
if (!IS_ENABLED(CONFIG_COMPACTION) && oc->order)
pr_warn("COMPACTION is disabled!!!\n");
@@ -417,7 +419,7 @@ static void dump_header(struct oom_control *oc, struct task_struct *p)
if (oc->memcg)
mem_cgroup_print_oom_info(oc->memcg, p);
else
- show_mem(SHOW_MEM_FILTER_NODES, nm);
+ show_mem(SHOW_MEM_FILTER_NODES, oc->nodemask);
if (sysctl_oom_dump_tasks)
dump_tasks(oc->memcg, oc->nodemask);
}
diff --git a/mm/page-writeback.c b/mm/page-writeback.c
index 216449825859..ae6e601f0a58 100644
--- a/mm/page-writeback.c
+++ b/mm/page-writeback.c
@@ -580,7 +580,7 @@ static void wb_domain_writeout_inc(struct wb_domain *dom,
__fprop_inc_percpu_max(&dom->completions, completions,
max_prop_frac);
/* First event after period switching was turned off? */
- if (!unlikely(dom->period_time)) {
+ if (unlikely(!dom->period_time)) {
/*
* We can race with other __bdi_writeout_inc calls here but
* it does not cause any harm since the resulting time when
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index c21b33668133..9f9623d690d6 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -59,7 +59,6 @@
#include <linux/prefetch.h>
#include <linux/mm_inline.h>
#include <linux/migrate.h>
-#include <linux/page_ext.h>
#include <linux/hugetlb.h>
#include <linux/sched/rt.h>
#include <linux/page_owner.h>
@@ -92,6 +91,10 @@ EXPORT_PER_CPU_SYMBOL(_numa_mem_);
int _node_numa_mem_[MAX_NUMNODES];
#endif
+/* work_structs for global per-cpu drains */
+DEFINE_MUTEX(pcpu_drain_mutex);
+DEFINE_PER_CPU(struct work_struct, pcpu_drain);
+
#ifdef CONFIG_GCC_PLUGIN_LATENT_ENTROPY
volatile unsigned long latent_entropy __latent_entropy;
EXPORT_SYMBOL(latent_entropy);
@@ -1085,10 +1088,10 @@ static void free_pcppages_bulk(struct zone *zone, int count,
{
int migratetype = 0;
int batch_free = 0;
- unsigned long nr_scanned;
+ unsigned long nr_scanned, flags;
bool isolated_pageblocks;
- spin_lock(&zone->lock);
+ spin_lock_irqsave(&zone->lock, flags);
isolated_pageblocks = has_isolate_pageblock(zone);
nr_scanned = node_page_state(zone->zone_pgdat, NR_PAGES_SCANNED);
if (nr_scanned)
@@ -1137,7 +1140,7 @@ static void free_pcppages_bulk(struct zone *zone, int count,
trace_mm_page_pcpu_drain(page, 0, mt);
} while (--count && --batch_free && !list_empty(list));
}
- spin_unlock(&zone->lock);
+ spin_unlock_irqrestore(&zone->lock, flags);
}
static void free_one_page(struct zone *zone,
@@ -1145,8 +1148,9 @@ static void free_one_page(struct zone *zone,
unsigned int order,
int migratetype)
{
- unsigned long nr_scanned;
- spin_lock(&zone->lock);
+ unsigned long nr_scanned, flags;
+ spin_lock_irqsave(&zone->lock, flags);
+ __count_vm_events(PGFREE, 1 << order);
nr_scanned = node_page_state(zone->zone_pgdat, NR_PAGES_SCANNED);
if (nr_scanned)
__mod_node_page_state(zone->zone_pgdat, NR_PAGES_SCANNED, -nr_scanned);
@@ -1156,7 +1160,7 @@ static void free_one_page(struct zone *zone,
migratetype = get_pfnblock_migratetype(page, pfn);
}
__free_one_page(page, pfn, zone, order, migratetype);
- spin_unlock(&zone->lock);
+ spin_unlock_irqrestore(&zone->lock, flags);
}
static void __meminit __init_single_page(struct page *page, unsigned long pfn,
@@ -1234,7 +1238,6 @@ void __meminit reserve_bootmem_region(phys_addr_t start, phys_addr_t end)
static void __free_pages_ok(struct page *page, unsigned int order)
{
- unsigned long flags;
int migratetype;
unsigned long pfn = page_to_pfn(page);
@@ -1242,10 +1245,7 @@ static void __free_pages_ok(struct page *page, unsigned int order)
return;
migratetype = get_pfnblock_migratetype(page, pfn);
- local_irq_save(flags);
- __count_vm_events(PGFREE, 1 << order);
free_one_page(page_zone(page), page, pfn, order, migratetype);
- local_irq_restore(flags);
}
static void __init __free_pages_boot_core(struct page *page, unsigned int order)
@@ -2217,8 +2217,9 @@ static int rmqueue_bulk(struct zone *zone, unsigned int order,
int migratetype, bool cold)
{
int i, alloced = 0;
+ unsigned long flags;
- spin_lock(&zone->lock);
+ spin_lock_irqsave(&zone->lock, flags);
for (i = 0; i < count; ++i) {
struct page *page = __rmqueue(zone, order, migratetype);
if (unlikely(page == NULL))
@@ -2254,7 +2255,7 @@ static int rmqueue_bulk(struct zone *zone, unsigned int order,
* pages added to the pcp list.
*/
__mod_zone_page_state(zone, NR_FREE_PAGES, -(i << order));
- spin_unlock(&zone->lock);
+ spin_unlock_irqrestore(&zone->lock, flags);
return alloced;
}
@@ -2339,16 +2340,26 @@ void drain_local_pages(struct zone *zone)
drain_pages(cpu);
}
+static void drain_local_pages_wq(struct work_struct *work)
+{
+ /*
+ * drain_all_pages doesn't use proper cpu hotplug protection so
+ * we can race with cpu offline when the WQ can move this from
+ * a cpu pinned worker to an unbound one. We can operate on a different
+ * cpu which is allright but we also have to make sure to not move to
+ * a different one.
+ */
+ preempt_disable();
+ drain_local_pages(NULL);
+ preempt_enable();
+}
+
/*
* Spill all the per-cpu pages from all CPUs back into the buddy allocator.
*
* When zone parameter is non-NULL, spill just the single zone's pages.
*
- * Note that this code is protected against sending an IPI to an offline
- * CPU but does not guarantee sending an IPI to newly hotplugged CPUs:
- * on_each_cpu_mask() blocks hotplug and won't talk to offlined CPUs but
- * nothing keeps CPUs from showing up after we populated the cpumask and
- * before the call to on_each_cpu_mask().
+ * Note that this can be extremely slow as the draining happens in a workqueue.
*/
void drain_all_pages(struct zone *zone)
{
@@ -2360,6 +2371,21 @@ void drain_all_pages(struct zone *zone)
*/
static cpumask_t cpus_with_pcps;
+ /* Workqueues cannot recurse */
+ if (current->flags & PF_WQ_WORKER)
+ return;
+
+ /*
+ * Do not drain if one is already in progress unless it's specific to
+ * a zone. Such callers are primarily CMA and memory hotplug and need
+ * the drain to be complete when the call returns.
+ */
+ if (unlikely(!mutex_trylock(&pcpu_drain_mutex))) {
+ if (!zone)
+ return;
+ mutex_lock(&pcpu_drain_mutex);
+ }
+
/*
* We don't care about racing with CPU hotplug event
* as offline notification will cause the notified
@@ -2390,8 +2416,16 @@ void drain_all_pages(struct zone *zone)
else
cpumask_clear_cpu(cpu, &cpus_with_pcps);
}
- on_each_cpu_mask(&cpus_with_pcps, (smp_call_func_t) drain_local_pages,
- zone, 1);
+
+ for_each_cpu(cpu, &cpus_with_pcps) {
+ struct work_struct *work = per_cpu_ptr(&pcpu_drain, cpu);
+ INIT_WORK(work, drain_local_pages_wq);
+ schedule_work_on(cpu, work);
+ }
+ for_each_cpu(cpu, &cpus_with_pcps)
+ flush_work(per_cpu_ptr(&pcpu_drain, cpu));
+
+ mutex_unlock(&pcpu_drain_mutex);
}
#ifdef CONFIG_HIBERNATION
@@ -2442,17 +2476,20 @@ void free_hot_cold_page(struct page *page, bool cold)
{
struct zone *zone = page_zone(page);
struct per_cpu_pages *pcp;
- unsigned long flags;
unsigned long pfn = page_to_pfn(page);
int migratetype;
+ if (in_interrupt()) {
+ __free_pages_ok(page, 0);
+ return;
+ }
+
if (!free_pcp_prepare(page))
return;
migratetype = get_pfnblock_migratetype(page, pfn);
set_pcppage_migratetype(page, migratetype);
- local_irq_save(flags);
- __count_vm_event(PGFREE);
+ preempt_disable();
/*
* We only track unmovable, reclaimable and movable on pcp lists.
@@ -2469,6 +2506,7 @@ void free_hot_cold_page(struct page *page, bool cold)
migratetype = MIGRATE_MOVABLE;
}
+ __count_vm_event(PGFREE);
pcp = &this_cpu_ptr(zone->pageset)->pcp;
if (!cold)
list_add(&page->lru, &pcp->lists[migratetype]);
@@ -2482,7 +2520,7 @@ void free_hot_cold_page(struct page *page, bool cold)
}
out:
- local_irq_restore(flags);
+ preempt_enable();
}
/*
@@ -2600,74 +2638,105 @@ static inline void zone_statistics(struct zone *preferred_zone, struct zone *z)
#endif
}
+/* Remove page from the per-cpu list, caller must protect the list */
+static struct page *__rmqueue_pcplist(struct zone *zone, int migratetype,
+ bool cold, struct per_cpu_pages *pcp,
+ struct list_head *list)
+{
+ struct page *page;
+
+ VM_BUG_ON(in_interrupt());
+
+ do {
+ if (list_empty(list)) {
+ pcp->count += rmqueue_bulk(zone, 0,
+ pcp->batch, list,
+ migratetype, cold);
+ if (unlikely(list_empty(list)))
+ return NULL;
+ }
+
+ if (cold)
+ page = list_last_entry(list, struct page, lru);
+ else
+ page = list_first_entry(list, struct page, lru);
+
+ list_del(&page->lru);
+ pcp->count--;
+ } while (check_new_pcp(page));
+
+ return page;
+}
+
+/* Lock and remove page from the per-cpu list */
+static struct page *rmqueue_pcplist(struct zone *preferred_zone,
+ struct zone *zone, unsigned int order,
+ gfp_t gfp_flags, int migratetype)
+{
+ struct per_cpu_pages *pcp;
+ struct list_head *list;
+ bool cold = ((gfp_flags & __GFP_COLD) != 0);
+ struct page *page;
+
+ preempt_disable();
+ pcp = &this_cpu_ptr(zone->pageset)->pcp;
+ list = &pcp->lists[migratetype];
+ page = __rmqueue_pcplist(zone, migratetype, cold, pcp, list);
+ if (page) {
+ __count_zid_vm_events(PGALLOC, page_zonenum(page), 1 << order);
+ zone_statistics(preferred_zone, zone);
+ }
+ preempt_enable();
+ return page;
+}
+
/*
* Allocate a page from the given zone. Use pcplists for order-0 allocations.
*/
static inline
-struct page *buffered_rmqueue(struct zone *preferred_zone,
+struct page *rmqueue(struct zone *preferred_zone,
struct zone *zone, unsigned int order,
gfp_t gfp_flags, unsigned int alloc_flags,
int migratetype)
{
unsigned long flags;
struct page *page;
- bool cold = ((gfp_flags & __GFP_COLD) != 0);
- if (likely(order == 0)) {
- struct per_cpu_pages *pcp;
- struct list_head *list;
-
- local_irq_save(flags);
- do {
- pcp = &this_cpu_ptr(zone->pageset)->pcp;
- list = &pcp->lists[migratetype];
- if (list_empty(list)) {
- pcp->count += rmqueue_bulk(zone, 0,
- pcp->batch, list,
- migratetype, cold);
- if (unlikely(list_empty(list)))
- goto failed;
- }
-
- if (cold)
- page = list_last_entry(list, struct page, lru);
- else
- page = list_first_entry(list, struct page, lru);
-
- list_del(&page->lru);
- pcp->count--;
+ if (likely(order == 0) && !in_interrupt()) {
+ page = rmqueue_pcplist(preferred_zone, zone, order,
+ gfp_flags, migratetype);
+ goto out;
+ }
- } while (check_new_pcp(page));
- } else {
- /*
- * We most definitely don't want callers attempting to
- * allocate greater than order-1 page units with __GFP_NOFAIL.
- */
- WARN_ON_ONCE((gfp_flags & __GFP_NOFAIL) && (order > 1));
- spin_lock_irqsave(&zone->lock, flags);
+ /*
+ * We most definitely don't want callers attempting to
+ * allocate greater than order-1 page units with __GFP_NOFAIL.
+ */
+ WARN_ON_ONCE((gfp_flags & __GFP_NOFAIL) && (order > 1));
+ spin_lock_irqsave(&zone->lock, flags);
- do {
- page = NULL;
- if (alloc_flags & ALLOC_HARDER) {
- page = __rmqueue_smallest(zone, order, MIGRATE_HIGHATOMIC);
- if (page)
- trace_mm_page_alloc_zone_locked(page, order, migratetype);
- }
- if (!page)
- page = __rmqueue(zone, order, migratetype);
- } while (page && check_new_pages(page, order));
- spin_unlock(&zone->lock);
+ do {
+ page = NULL;
+ if (alloc_flags & ALLOC_HARDER) {
+ page = __rmqueue_smallest(zone, order, MIGRATE_HIGHATOMIC);
+ if (page)
+ trace_mm_page_alloc_zone_locked(page, order, migratetype);
+ }
if (!page)
- goto failed;
- __mod_zone_freepage_state(zone, -(1 << order),
- get_pcppage_migratetype(page));
- }
+ page = __rmqueue(zone, order, migratetype);
+ } while (page && check_new_pages(page, order));
+ spin_unlock(&zone->lock);
+ if (!page)
+ goto failed;
+ __mod_zone_freepage_state(zone, -(1 << order),
+ get_pcppage_migratetype(page));
__count_zid_vm_events(PGALLOC, page_zonenum(page), 1 << order);
zone_statistics(preferred_zone, zone);
local_irq_restore(flags);
- VM_BUG_ON_PAGE(bad_range(zone, page), page);
+out:
+ VM_BUG_ON_PAGE(page && bad_range(zone, page), page);
return page;
failed:
@@ -2875,7 +2944,7 @@ bool zone_watermark_ok_safe(struct zone *z, unsigned int order,
#ifdef CONFIG_NUMA
static bool zone_allows_reclaim(struct zone *local_zone, struct zone *zone)
{
- return node_distance(zone_to_nid(local_zone), zone_to_nid(zone)) <
+ return node_distance(zone_to_nid(local_zone), zone_to_nid(zone)) <=
RECLAIM_DISTANCE;
}
#else /* CONFIG_NUMA */
@@ -2972,7 +3041,7 @@ get_page_from_freelist(gfp_t gfp_mask, unsigned int order, int alloc_flags,
}
try_this_zone:
- page = buffered_rmqueue(ac->preferred_zoneref->zone, zone, order,
+ page = rmqueue(ac->preferred_zoneref->zone, zone, order,
gfp_mask, alloc_flags, ac->migratetype);
if (page) {
prep_new_page(page, order, gfp_mask, alloc_flags);
@@ -3825,76 +3894,76 @@ got_pg:
return page;
}
-/*
- * This is the 'heart' of the zoned buddy allocator.
- */
-struct page *
-__alloc_pages_nodemask(gfp_t gfp_mask, unsigned int order,
- struct zonelist *zonelist, nodemask_t *nodemask)
+static inline bool prepare_alloc_pages(gfp_t gfp_mask, unsigned int order,
+ struct zonelist *zonelist, nodemask_t *nodemask,
+ struct alloc_context *ac, gfp_t *alloc_mask,
+ unsigned int *alloc_flags)
{
- struct page *page;
- unsigned int alloc_flags = ALLOC_WMARK_LOW;
- gfp_t alloc_mask = gfp_mask; /* The gfp_t that was actually used for allocation */
- struct alloc_context ac = {
- .high_zoneidx = gfp_zone(gfp_mask),
- .zonelist = zonelist,
- .nodemask = nodemask,
- .migratetype = gfpflags_to_migratetype(gfp_mask),
- };
+ ac->high_zoneidx = gfp_zone(gfp_mask);
+ ac->zonelist = zonelist;
+ ac->nodemask = nodemask;
+ ac->migratetype = gfpflags_to_migratetype(gfp_mask);
if (cpusets_enabled()) {
- alloc_mask |= __GFP_HARDWALL;
- alloc_flags |= ALLOC_CPUSET;
- if (!ac.nodemask)
- ac.nodemask = &cpuset_current_mems_allowed;
+ *alloc_mask |= __GFP_HARDWALL;
+ if (!ac->nodemask)
+ ac->nodemask = &cpuset_current_mems_allowed;
+ else
+ *alloc_flags |= ALLOC_CPUSET;
}
- gfp_mask &= gfp_allowed_mask;
-
lockdep_trace_alloc(gfp_mask);
might_sleep_if(gfp_mask & __GFP_DIRECT_RECLAIM);
if (should_fail_alloc_page(gfp_mask, order))
- return NULL;
+ return false;
- /*
- * Check the zones suitable for the gfp_mask contain at least one
- * valid zone. It's possible to have an empty zonelist as a result
- * of __GFP_THISNODE and a memoryless node
- */
- if (unlikely(!zonelist->_zonerefs->zone))
- return NULL;
+ if (IS_ENABLED(CONFIG_CMA) && ac->migratetype == MIGRATE_MOVABLE)
+ *alloc_flags |= ALLOC_CMA;
- if (IS_ENABLED(CONFIG_CMA) && ac.migratetype == MIGRATE_MOVABLE)
- alloc_flags |= ALLOC_CMA;
+ return true;
+}
+/* Determine whether to spread dirty pages and what the first usable zone */
+static inline void finalise_ac(gfp_t gfp_mask,
+ unsigned int order, struct alloc_context *ac)
+{
/* Dirty zone balancing only done in the fast path */
- ac.spread_dirty_pages = (gfp_mask & __GFP_WRITE);
+ ac->spread_dirty_pages = (gfp_mask & __GFP_WRITE);
/*
* The preferred zone is used for statistics but crucially it is
* also used as the starting point for the zonelist iterator. It
* may get reset for allocations that ignore memory policies.
*/
- ac.preferred_zoneref = first_zones_zonelist(ac.zonelist,
- ac.high_zoneidx, ac.nodemask);
- if (!ac.preferred_zoneref->zone) {
- page = NULL;
- /*
- * This might be due to race with cpuset_current_mems_allowed
- * update, so make sure we retry with original nodemask in the
- * slow path.
- */
- goto no_zone;
- }
+ ac->preferred_zoneref = first_zones_zonelist(ac->zonelist,
+ ac->high_zoneidx, ac->nodemask);
+}
+
+/*
+ * This is the 'heart' of the zoned buddy allocator.
+ */
+struct page *
+__alloc_pages_nodemask(gfp_t gfp_mask, unsigned int order,
+ struct zonelist *zonelist, nodemask_t *nodemask)
+{
+ struct page *page;
+ unsigned int alloc_flags = ALLOC_WMARK_LOW;
+ gfp_t alloc_mask = gfp_mask; /* The gfp_t that was actually used for allocation */
+ struct alloc_context ac = { };
+
+ gfp_mask &= gfp_allowed_mask;
+ if (!prepare_alloc_pages(gfp_mask, order, zonelist, nodemask, &ac, &alloc_mask, &alloc_flags))
+ return NULL;
+
+ finalise_ac(gfp_mask, order, &ac);
/* First allocation attempt */
page = get_page_from_freelist(alloc_mask, order, alloc_flags, &ac);
if (likely(page))
goto out;
-no_zone:
/*
* Runtime PM, block IO and its error handling path can deadlock
* because I/O on the device might not complete.
@@ -6420,8 +6489,6 @@ void __init free_area_init_nodes(unsigned long *max_zone_pfn)
start_pfn = end_pfn;
}
- arch_zone_lowest_possible_pfn[ZONE_MOVABLE] = 0;
- arch_zone_highest_possible_pfn[ZONE_MOVABLE] = 0;
/* Find the PFNs that ZONE_MOVABLE begins at in each node */
memset(zone_movable_pfn, 0, sizeof(zone_movable_pfn));
@@ -7157,8 +7224,9 @@ void *__init alloc_large_system_hash(const char *tablename,
* If @count is not zero, it is okay to include less @count unmovable pages
*
* PageLRU check without isolation or lru_lock could race so that
- * MIGRATE_MOVABLE block might include unmovable pages. It means you can't
- * expect this function should be exact.
+ * MIGRATE_MOVABLE block might include unmovable pages. And __PageMovable
+ * check without lock_page also may miss some movable non-lru pages at
+ * race condition. So you can't expect this function should be exact.
*/
bool has_unmovable_pages(struct zone *zone, struct page *page, int count,
bool skip_hwpoisoned_pages)
@@ -7214,6 +7282,9 @@ bool has_unmovable_pages(struct zone *zone, struct page *page, int count,
if (skip_hwpoisoned_pages && PageHWPoison(page))
continue;
+ if (__PageMovable(page))
+ continue;
+
if (!PageLRU(page))
found++;
/*
@@ -7325,6 +7396,7 @@ static int __alloc_contig_migrate_range(struct compact_control *cc,
* #MIGRATE_MOVABLE or #MIGRATE_CMA). All pageblocks
* in range must have the same migratetype and it must
* be either of the two.
+ * @gfp_mask: GFP mask to use during compaction
*
* The PFN range does not have to be pageblock or MAX_ORDER_NR_PAGES
* aligned, however it's the caller's responsibility to guarantee that
@@ -7338,7 +7410,7 @@ static int __alloc_contig_migrate_range(struct compact_control *cc,
* need to be freed with free_contig_range().
*/
int alloc_contig_range(unsigned long start, unsigned long end,
- unsigned migratetype)
+ unsigned migratetype, gfp_t gfp_mask)
{
unsigned long outer_start, outer_end;
unsigned int order;
@@ -7350,7 +7422,7 @@ int alloc_contig_range(unsigned long start, unsigned long end,
.zone = page_zone(pfn_to_page(start)),
.mode = MIGRATE_SYNC,
.ignore_skip_hint = true,
- .gfp_mask = GFP_KERNEL,
+ .gfp_mask = memalloc_noio_flags(gfp_mask),
};
INIT_LIST_HEAD(&cc.migratepages);
diff --git a/mm/page_idle.c b/mm/page_idle.c
index ae11aa914e55..b0ee56c56b58 100644
--- a/mm/page_idle.c
+++ b/mm/page_idle.c
@@ -54,27 +54,27 @@ static int page_idle_clear_pte_refs_one(struct page *page,
struct vm_area_struct *vma,
unsigned long addr, void *arg)
{
- struct mm_struct *mm = vma->vm_mm;
- pmd_t *pmd;
- pte_t *pte;
- spinlock_t *ptl;
+ struct page_vma_mapped_walk pvmw = {
+ .page = page,
+ .vma = vma,
+ .address = addr,
+ };
bool referenced = false;
- if (!page_check_address_transhuge(page, mm, addr, &pmd, &pte, &ptl))
- return SWAP_AGAIN;
-
- if (pte) {
- referenced = ptep_clear_young_notify(vma, addr, pte);
- pte_unmap(pte);
- } else if (IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE)) {
- referenced = pmdp_clear_young_notify(vma, addr, pmd);
- } else {
- /* unexpected pmd-mapped page? */
- WARN_ON_ONCE(1);
+ while (page_vma_mapped_walk(&pvmw)) {
+ addr = pvmw.address;
+ if (pvmw.pte) {
+ referenced = ptep_clear_young_notify(vma, addr,
+ pvmw.pte);
+ } else if (IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE)) {
+ referenced = pmdp_clear_young_notify(vma, addr,
+ pvmw.pmd);
+ } else {
+ /* unexpected pmd-mapped page? */
+ WARN_ON_ONCE(1);
+ }
}
- spin_unlock(ptl);
-
if (referenced) {
clear_page_idle(page);
/*
diff --git a/mm/page_vma_mapped.c b/mm/page_vma_mapped.c
new file mode 100644
index 000000000000..a23001a22c15
--- /dev/null
+++ b/mm/page_vma_mapped.c
@@ -0,0 +1,218 @@
+#include <linux/mm.h>
+#include <linux/rmap.h>
+#include <linux/hugetlb.h>
+#include <linux/swap.h>
+#include <linux/swapops.h>
+
+#include "internal.h"
+
+static inline bool check_pmd(struct page_vma_mapped_walk *pvmw)
+{
+ pmd_t pmde;
+ /*
+ * Make sure we don't re-load pmd between present and !trans_huge check.
+ * We need a consistent view.
+ */
+ pmde = READ_ONCE(*pvmw->pmd);
+ return pmd_present(pmde) && !pmd_trans_huge(pmde);
+}
+
+static inline bool not_found(struct page_vma_mapped_walk *pvmw)
+{
+ page_vma_mapped_walk_done(pvmw);
+ return false;
+}
+
+static bool map_pte(struct page_vma_mapped_walk *pvmw)
+{
+ pvmw->pte = pte_offset_map(pvmw->pmd, pvmw->address);
+ if (!(pvmw->flags & PVMW_SYNC)) {
+ if (pvmw->flags & PVMW_MIGRATION) {
+ if (!is_swap_pte(*pvmw->pte))
+ return false;
+ } else {
+ if (!pte_present(*pvmw->pte))
+ return false;
+ }
+ }
+ pvmw->ptl = pte_lockptr(pvmw->vma->vm_mm, pvmw->pmd);
+ spin_lock(pvmw->ptl);
+ return true;
+}
+
+static bool check_pte(struct page_vma_mapped_walk *pvmw)
+{
+ if (pvmw->flags & PVMW_MIGRATION) {
+#ifdef CONFIG_MIGRATION
+ swp_entry_t entry;
+ if (!is_swap_pte(*pvmw->pte))
+ return false;
+ entry = pte_to_swp_entry(*pvmw->pte);
+ if (!is_migration_entry(entry))
+ return false;
+ if (migration_entry_to_page(entry) - pvmw->page >=
+ hpage_nr_pages(pvmw->page)) {
+ return false;
+ }
+ if (migration_entry_to_page(entry) < pvmw->page)
+ return false;
+#else
+ WARN_ON_ONCE(1);
+#endif
+ } else {
+ if (!pte_present(*pvmw->pte))
+ return false;
+
+ /* THP can be referenced by any subpage */
+ if (pte_page(*pvmw->pte) - pvmw->page >=
+ hpage_nr_pages(pvmw->page)) {
+ return false;
+ }
+ if (pte_page(*pvmw->pte) < pvmw->page)
+ return false;
+ }
+
+ return true;
+}
+
+/**
+ * page_vma_mapped_walk - check if @pvmw->page is mapped in @pvmw->vma at
+ * @pvmw->address
+ * @pvmw: pointer to struct page_vma_mapped_walk. page, vma, address and flags
+ * must be set. pmd, pte and ptl must be NULL.
+ *
+ * Returns true if the page is mapped in the vma. @pvmw->pmd and @pvmw->pte point
+ * to relevant page table entries. @pvmw->ptl is locked. @pvmw->address is
+ * adjusted if needed (for PTE-mapped THPs).
+ *
+ * If @pvmw->pmd is set but @pvmw->pte is not, you have found PMD-mapped page
+ * (usually THP). For PTE-mapped THP, you should run page_vma_mapped_walk() in
+ * a loop to find all PTEs that map the THP.
+ *
+ * For HugeTLB pages, @pvmw->pte is set to the relevant page table entry
+ * regardless of which page table level the page is mapped at. @pvmw->pmd is
+ * NULL.
+ *
+ * Retruns false if there are no more page table entries for the page in
+ * the vma. @pvmw->ptl is unlocked and @pvmw->pte is unmapped.
+ *
+ * If you need to stop the walk before page_vma_mapped_walk() returned false,
+ * use page_vma_mapped_walk_done(). It will do the housekeeping.
+ */
+bool page_vma_mapped_walk(struct page_vma_mapped_walk *pvmw)
+{
+ struct mm_struct *mm = pvmw->vma->vm_mm;
+ struct page *page = pvmw->page;
+ pgd_t *pgd;
+ pud_t *pud;
+
+ /* The only possible pmd mapping has been handled on last iteration */
+ if (pvmw->pmd && !pvmw->pte)
+ return not_found(pvmw);
+
+ /* Only for THP, seek to next pte entry makes sense */
+ if (pvmw->pte) {
+ if (!PageTransHuge(pvmw->page) || PageHuge(pvmw->page))
+ return not_found(pvmw);
+ goto next_pte;
+ }
+
+ if (unlikely(PageHuge(pvmw->page))) {
+ /* when pud is not present, pte will be NULL */
+ pvmw->pte = huge_pte_offset(mm, pvmw->address);
+ if (!pvmw->pte)
+ return false;
+
+ pvmw->ptl = huge_pte_lockptr(page_hstate(page), mm, pvmw->pte);
+ spin_lock(pvmw->ptl);
+ if (!check_pte(pvmw))
+ return not_found(pvmw);
+ return true;
+ }
+restart:
+ pgd = pgd_offset(mm, pvmw->address);
+ if (!pgd_present(*pgd))
+ return false;
+ pud = pud_offset(pgd, pvmw->address);
+ if (!pud_present(*pud))
+ return false;
+ pvmw->pmd = pmd_offset(pud, pvmw->address);
+ if (pmd_trans_huge(*pvmw->pmd)) {
+ pvmw->ptl = pmd_lock(mm, pvmw->pmd);
+ if (!pmd_present(*pvmw->pmd))
+ return not_found(pvmw);
+ if (likely(pmd_trans_huge(*pvmw->pmd))) {
+ if (pvmw->flags & PVMW_MIGRATION)
+ return not_found(pvmw);
+ if (pmd_page(*pvmw->pmd) != page)
+ return not_found(pvmw);
+ return true;
+ } else {
+ /* THP pmd was split under us: handle on pte level */
+ spin_unlock(pvmw->ptl);
+ pvmw->ptl = NULL;
+ }
+ } else {
+ if (!check_pmd(pvmw))
+ return false;
+ }
+ if (!map_pte(pvmw))
+ goto next_pte;
+ while (1) {
+ if (check_pte(pvmw))
+ return true;
+next_pte: do {
+ pvmw->address += PAGE_SIZE;
+ if (pvmw->address >=
+ __vma_address(pvmw->page, pvmw->vma) +
+ hpage_nr_pages(pvmw->page) * PAGE_SIZE)
+ return not_found(pvmw);
+ /* Did we cross page table boundary? */
+ if (pvmw->address % PMD_SIZE == 0) {
+ pte_unmap(pvmw->pte);
+ if (pvmw->ptl) {
+ spin_unlock(pvmw->ptl);
+ pvmw->ptl = NULL;
+ }
+ goto restart;
+ } else {
+ pvmw->pte++;
+ }
+ } while (pte_none(*pvmw->pte));
+
+ if (!pvmw->ptl) {
+ pvmw->ptl = pte_lockptr(mm, pvmw->pmd);
+ spin_lock(pvmw->ptl);
+ }
+ }
+}
+
+/**
+ * page_mapped_in_vma - check whether a page is really mapped in a VMA
+ * @page: the page to test
+ * @vma: the VMA to test
+ *
+ * Returns 1 if the page is mapped into the page tables of the VMA, 0
+ * if the page is not mapped into the page tables of this VMA. Only
+ * valid for normal file or anonymous VMAs.
+ */
+int page_mapped_in_vma(struct page *page, struct vm_area_struct *vma)
+{
+ struct page_vma_mapped_walk pvmw = {
+ .page = page,
+ .vma = vma,
+ .flags = PVMW_SYNC,
+ };
+ unsigned long start, end;
+
+ start = __vma_address(page, vma);
+ end = start + PAGE_SIZE * (hpage_nr_pages(page) - 1);
+
+ if (unlikely(end < vma->vm_start || start >= vma->vm_end))
+ return 0;
+ pvmw.address = max(start, vma->vm_start);
+ if (!page_vma_mapped_walk(&pvmw))
+ return 0;
+ page_vma_mapped_walk_done(&pvmw);
+ return 1;
+}
diff --git a/mm/pagewalk.c b/mm/pagewalk.c
index 207244489a68..03761577ae86 100644
--- a/mm/pagewalk.c
+++ b/mm/pagewalk.c
@@ -78,14 +78,32 @@ static int walk_pud_range(pgd_t *pgd, unsigned long addr, unsigned long end,
pud = pud_offset(pgd, addr);
do {
+ again:
next = pud_addr_end(addr, end);
- if (pud_none_or_clear_bad(pud)) {
+ if (pud_none(*pud) || !walk->vma) {
if (walk->pte_hole)
err = walk->pte_hole(addr, next, walk);
if (err)
break;
continue;
}
+
+ if (walk->pud_entry) {
+ spinlock_t *ptl = pud_trans_huge_lock(pud, walk->vma);
+
+ if (ptl) {
+ err = walk->pud_entry(pud, addr, next, walk);
+ spin_unlock(ptl);
+ if (err)
+ break;
+ continue;
+ }
+ }
+
+ split_huge_pud(walk->vma, pud, addr);
+ if (pud_none(*pud))
+ goto again;
+
if (walk->pmd_entry || walk->pte_entry)
err = walk_pmd_range(pud, addr, next, walk);
if (err)
diff --git a/mm/pgtable-generic.c b/mm/pgtable-generic.c
index 71c5f9109f2a..4ed5908c65b0 100644
--- a/mm/pgtable-generic.c
+++ b/mm/pgtable-generic.c
@@ -123,6 +123,20 @@ pmd_t pmdp_huge_clear_flush(struct vm_area_struct *vma, unsigned long address,
flush_pmd_tlb_range(vma, address, address + HPAGE_PMD_SIZE);
return pmd;
}
+
+#ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD
+pud_t pudp_huge_clear_flush(struct vm_area_struct *vma, unsigned long address,
+ pud_t *pudp)
+{
+ pud_t pud;
+
+ VM_BUG_ON(address & ~HPAGE_PUD_MASK);
+ VM_BUG_ON(!pud_trans_huge(*pudp) && !pud_devmap(*pudp));
+ pud = pudp_huge_get_and_clear(vma->vm_mm, address, pudp);
+ flush_pud_tlb_range(vma, address, address + HPAGE_PUD_SIZE);
+ return pud;
+}
+#endif
#endif
#ifndef __HAVE_ARCH_PGTABLE_DEPOSIT
diff --git a/mm/rmap.c b/mm/rmap.c
index 91619fd70939..8774791e2809 100644
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -607,8 +607,7 @@ void try_to_unmap_flush_dirty(void)
try_to_unmap_flush();
}
-static void set_tlb_ubc_flush_pending(struct mm_struct *mm,
- struct page *page, bool writable)
+static void set_tlb_ubc_flush_pending(struct mm_struct *mm, bool writable)
{
struct tlbflush_unmap_batch *tlb_ubc = &current->tlb_ubc;
@@ -643,8 +642,7 @@ static bool should_defer_flush(struct mm_struct *mm, enum ttu_flags flags)
return should_defer;
}
#else
-static void set_tlb_ubc_flush_pending(struct mm_struct *mm,
- struct page *page, bool writable)
+static void set_tlb_ubc_flush_pending(struct mm_struct *mm, bool writable)
{
}
@@ -710,170 +708,6 @@ out:
return pmd;
}
-/*
- * Check that @page is mapped at @address into @mm.
- *
- * If @sync is false, page_check_address may perform a racy check to avoid
- * the page table lock when the pte is not present (helpful when reclaiming
- * highly shared pages).
- *
- * On success returns with pte mapped and locked.
- */
-pte_t *__page_check_address(struct page *page, struct mm_struct *mm,
- unsigned long address, spinlock_t **ptlp, int sync)
-{
- pmd_t *pmd;
- pte_t *pte;
- spinlock_t *ptl;
-
- if (unlikely(PageHuge(page))) {
- /* when pud is not present, pte will be NULL */
- pte = huge_pte_offset(mm, address);
- if (!pte)
- return NULL;
-
- ptl = huge_pte_lockptr(page_hstate(page), mm, pte);
- goto check;
- }
-
- pmd = mm_find_pmd(mm, address);
- if (!pmd)
- return NULL;
-
- pte = pte_offset_map(pmd, address);
- /* Make a quick check before getting the lock */
- if (!sync && !pte_present(*pte)) {
- pte_unmap(pte);
- return NULL;
- }
-
- ptl = pte_lockptr(mm, pmd);
-check:
- spin_lock(ptl);
- if (pte_present(*pte) && page_to_pfn(page) == pte_pfn(*pte)) {
- *ptlp = ptl;
- return pte;
- }
- pte_unmap_unlock(pte, ptl);
- return NULL;
-}
-
-/**
- * page_mapped_in_vma - check whether a page is really mapped in a VMA
- * @page: the page to test
- * @vma: the VMA to test
- *
- * Returns 1 if the page is mapped into the page tables of the VMA, 0
- * if the page is not mapped into the page tables of this VMA. Only
- * valid for normal file or anonymous VMAs.
- */
-int page_mapped_in_vma(struct page *page, struct vm_area_struct *vma)
-{
- unsigned long address;
- pte_t *pte;
- spinlock_t *ptl;
-
- address = __vma_address(page, vma);
- if (unlikely(address < vma->vm_start || address >= vma->vm_end))
- return 0;
- pte = page_check_address(page, vma->vm_mm, address, &ptl, 1);
- if (!pte) /* the page is not in this mm */
- return 0;
- pte_unmap_unlock(pte, ptl);
-
- return 1;
-}
-
-#ifdef CONFIG_TRANSPARENT_HUGEPAGE
-/*
- * Check that @page is mapped at @address into @mm. In contrast to
- * page_check_address(), this function can handle transparent huge pages.
- *
- * On success returns true with pte mapped and locked. For PMD-mapped
- * transparent huge pages *@ptep is set to NULL.
- */
-bool page_check_address_transhuge(struct page *page, struct mm_struct *mm,
- unsigned long address, pmd_t **pmdp,
- pte_t **ptep, spinlock_t **ptlp)
-{
- pgd_t *pgd;
- pud_t *pud;
- pmd_t *pmd;
- pte_t *pte;
- spinlock_t *ptl;
-
- if (unlikely(PageHuge(page))) {
- /* when pud is not present, pte will be NULL */
- pte = huge_pte_offset(mm, address);
- if (!pte)
- return false;
-
- ptl = huge_pte_lockptr(page_hstate(page), mm, pte);
- pmd = NULL;
- goto check_pte;
- }
-
- pgd = pgd_offset(mm, address);
- if (!pgd_present(*pgd))
- return false;
- pud = pud_offset(pgd, address);
- if (!pud_present(*pud))
- return false;
- pmd = pmd_offset(pud, address);
-
- if (pmd_trans_huge(*pmd)) {
- ptl = pmd_lock(mm, pmd);
- if (!pmd_present(*pmd))
- goto unlock_pmd;
- if (unlikely(!pmd_trans_huge(*pmd))) {
- spin_unlock(ptl);
- goto map_pte;
- }
-
- if (pmd_page(*pmd) != page)
- goto unlock_pmd;
-
- pte = NULL;
- goto found;
-unlock_pmd:
- spin_unlock(ptl);
- return false;
- } else {
- pmd_t pmde = *pmd;
-
- barrier();
- if (!pmd_present(pmde) || pmd_trans_huge(pmde))
- return false;
- }
-map_pte:
- pte = pte_offset_map(pmd, address);
- if (!pte_present(*pte)) {
- pte_unmap(pte);
- return false;
- }
-
- ptl = pte_lockptr(mm, pmd);
-check_pte:
- spin_lock(ptl);
-
- if (!pte_present(*pte)) {
- pte_unmap_unlock(pte, ptl);
- return false;
- }
-
- /* THP can be referenced by any subpage */
- if (pte_pfn(*pte) - page_to_pfn(page) >= hpage_nr_pages(page)) {
- pte_unmap_unlock(pte, ptl);
- return false;
- }
-found:
- *ptep = pte;
- *pmdp = pmd;
- *ptlp = ptl;
- return true;
-}
-#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
-
struct page_referenced_arg {
int mapcount;
int referenced;
@@ -886,45 +720,48 @@ struct page_referenced_arg {
static int page_referenced_one(struct page *page, struct vm_area_struct *vma,
unsigned long address, void *arg)
{
- struct mm_struct *mm = vma->vm_mm;
struct page_referenced_arg *pra = arg;
- pmd_t *pmd;
- pte_t *pte;
- spinlock_t *ptl;
+ struct page_vma_mapped_walk pvmw = {
+ .page = page,
+ .vma = vma,
+ .address = address,
+ };
int referenced = 0;
- if (!page_check_address_transhuge(page, mm, address, &pmd, &pte, &ptl))
- return SWAP_AGAIN;
+ while (page_vma_mapped_walk(&pvmw)) {
+ address = pvmw.address;
- if (vma->vm_flags & VM_LOCKED) {
- if (pte)
- pte_unmap(pte);
- spin_unlock(ptl);
- pra->vm_flags |= VM_LOCKED;
- return SWAP_FAIL; /* To break the loop */
- }
+ if (vma->vm_flags & VM_LOCKED) {
+ page_vma_mapped_walk_done(&pvmw);
+ pra->vm_flags |= VM_LOCKED;
+ return SWAP_FAIL; /* To break the loop */
+ }
- if (pte) {
- if (ptep_clear_flush_young_notify(vma, address, pte)) {
- /*
- * Don't treat a reference through a sequentially read
- * mapping as such. If the page has been used in
- * another mapping, we will catch it; if this other
- * mapping is already gone, the unmap path will have
- * set PG_referenced or activated the page.
- */
- if (likely(!(vma->vm_flags & VM_SEQ_READ)))
+ if (pvmw.pte) {
+ if (ptep_clear_flush_young_notify(vma, address,
+ pvmw.pte)) {
+ /*
+ * Don't treat a reference through
+ * a sequentially read mapping as such.
+ * If the page has been used in another mapping,
+ * we will catch it; if this other mapping is
+ * already gone, the unmap path will have set
+ * PG_referenced or activated the page.
+ */
+ if (likely(!(vma->vm_flags & VM_SEQ_READ)))
+ referenced++;
+ }
+ } else if (IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE)) {
+ if (pmdp_clear_flush_young_notify(vma, address,
+ pvmw.pmd))
referenced++;
+ } else {
+ /* unexpected pmd-mapped page? */
+ WARN_ON_ONCE(1);
}
- pte_unmap(pte);
- } else if (IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE)) {
- if (pmdp_clear_flush_young_notify(vma, address, pmd))
- referenced++;
- } else {
- /* unexpected pmd-mapped page? */
- WARN_ON_ONCE(1);
+
+ pra->mapcount--;
}
- spin_unlock(ptl);
if (referenced)
clear_page_idle(page);
@@ -936,7 +773,6 @@ static int page_referenced_one(struct page *page, struct vm_area_struct *vma,
pra->vm_flags |= vma->vm_flags;
}
- pra->mapcount--;
if (!pra->mapcount)
return SWAP_SUCCESS; /* To break the loop */
@@ -1015,34 +851,56 @@ int page_referenced(struct page *page,
static int page_mkclean_one(struct page *page, struct vm_area_struct *vma,
unsigned long address, void *arg)
{
- struct mm_struct *mm = vma->vm_mm;
- pte_t *pte;
- spinlock_t *ptl;
- int ret = 0;
+ struct page_vma_mapped_walk pvmw = {
+ .page = page,
+ .vma = vma,
+ .address = address,
+ .flags = PVMW_SYNC,
+ };
int *cleaned = arg;
- pte = page_check_address(page, mm, address, &ptl, 1);
- if (!pte)
- goto out;
-
- if (pte_dirty(*pte) || pte_write(*pte)) {
- pte_t entry;
+ while (page_vma_mapped_walk(&pvmw)) {
+ int ret = 0;
+ address = pvmw.address;
+ if (pvmw.pte) {
+ pte_t entry;
+ pte_t *pte = pvmw.pte;
+
+ if (!pte_dirty(*pte) && !pte_write(*pte))
+ continue;
+
+ flush_cache_page(vma, address, pte_pfn(*pte));
+ entry = ptep_clear_flush(vma, address, pte);
+ entry = pte_wrprotect(entry);
+ entry = pte_mkclean(entry);
+ set_pte_at(vma->vm_mm, address, pte, entry);
+ ret = 1;
+ } else {
+#ifdef CONFIG_TRANSPARENT_HUGE_PAGECACHE
+ pmd_t *pmd = pvmw.pmd;
+ pmd_t entry;
+
+ if (!pmd_dirty(*pmd) && !pmd_write(*pmd))
+ continue;
+
+ flush_cache_page(vma, address, page_to_pfn(page));
+ entry = pmdp_huge_clear_flush(vma, address, pmd);
+ entry = pmd_wrprotect(entry);
+ entry = pmd_mkclean(entry);
+ set_pmd_at(vma->vm_mm, address, pmd, entry);
+ ret = 1;
+#else
+ /* unexpected pmd-mapped page? */
+ WARN_ON_ONCE(1);
+#endif
+ }
- flush_cache_page(vma, address, pte_pfn(*pte));
- entry = ptep_clear_flush(vma, address, pte);
- entry = pte_wrprotect(entry);
- entry = pte_mkclean(entry);
- set_pte_at(mm, address, pte, entry);
- ret = 1;
+ if (ret) {
+ mmu_notifier_invalidate_page(vma->vm_mm, address);
+ (*cleaned)++;
+ }
}
- pte_unmap_unlock(pte, ptl);
-
- if (ret) {
- mmu_notifier_invalidate_page(mm, address);
- (*cleaned)++;
- }
-out:
return SWAP_AGAIN;
}
@@ -1435,155 +1293,163 @@ static int try_to_unmap_one(struct page *page, struct vm_area_struct *vma,
unsigned long address, void *arg)
{
struct mm_struct *mm = vma->vm_mm;
- pte_t *pte;
+ struct page_vma_mapped_walk pvmw = {
+ .page = page,
+ .vma = vma,
+ .address = address,
+ };
pte_t pteval;
- spinlock_t *ptl;
+ struct page *subpage;
int ret = SWAP_AGAIN;
struct rmap_private *rp = arg;
enum ttu_flags flags = rp->flags;
/* munlock has nothing to gain from examining un-locked vmas */
if ((flags & TTU_MUNLOCK) && !(vma->vm_flags & VM_LOCKED))
- goto out;
+ return SWAP_AGAIN;
if (flags & TTU_SPLIT_HUGE_PMD) {
split_huge_pmd_address(vma, address,
flags & TTU_MIGRATION, page);
- /* check if we have anything to do after split */
- if (page_mapcount(page) == 0)
- goto out;
}
- pte = page_check_address(page, mm, address, &ptl,
- PageTransCompound(page));
- if (!pte)
- goto out;
+ while (page_vma_mapped_walk(&pvmw)) {
+ subpage = page - page_to_pfn(page) + pte_pfn(*pvmw.pte);
+ address = pvmw.address;
- /*
- * If the page is mlock()d, we cannot swap it out.
- * If it's recently referenced (perhaps page_referenced
- * skipped over this mm) then we should reactivate it.
- */
- if (!(flags & TTU_IGNORE_MLOCK)) {
- if (vma->vm_flags & VM_LOCKED) {
- /* PTE-mapped THP are never mlocked */
- if (!PageTransCompound(page)) {
- /*
- * Holding pte lock, we do *not* need
- * mmap_sem here
- */
- mlock_vma_page(page);
- }
- ret = SWAP_MLOCK;
- goto out_unmap;
- }
- if (flags & TTU_MUNLOCK)
- goto out_unmap;
- }
- if (!(flags & TTU_IGNORE_ACCESS)) {
- if (ptep_clear_flush_young_notify(vma, address, pte)) {
- ret = SWAP_FAIL;
- goto out_unmap;
- }
- }
+ /* Unexpected PMD-mapped THP? */
+ VM_BUG_ON_PAGE(!pvmw.pte, page);
- /* Nuke the page table entry. */
- flush_cache_page(vma, address, page_to_pfn(page));
- if (should_defer_flush(mm, flags)) {
/*
- * We clear the PTE but do not flush so potentially a remote
- * CPU could still be writing to the page. If the entry was
- * previously clean then the architecture must guarantee that
- * a clear->dirty transition on a cached TLB entry is written
- * through and traps if the PTE is unmapped.
+ * If the page is mlock()d, we cannot swap it out.
+ * If it's recently referenced (perhaps page_referenced
+ * skipped over this mm) then we should reactivate it.
*/
- pteval = ptep_get_and_clear(mm, address, pte);
-
- set_tlb_ubc_flush_pending(mm, page, pte_dirty(pteval));
- } else {
- pteval = ptep_clear_flush(vma, address, pte);
- }
+ if (!(flags & TTU_IGNORE_MLOCK)) {
+ if (vma->vm_flags & VM_LOCKED) {
+ /* PTE-mapped THP are never mlocked */
+ if (!PageTransCompound(page)) {
+ /*
+ * Holding pte lock, we do *not* need
+ * mmap_sem here
+ */
+ mlock_vma_page(page);
+ }
+ ret = SWAP_MLOCK;
+ page_vma_mapped_walk_done(&pvmw);
+ break;
+ }
+ if (flags & TTU_MUNLOCK)
+ continue;
+ }
- /* Move the dirty bit to the physical page now the pte is gone. */
- if (pte_dirty(pteval))
- set_page_dirty(page);
+ if (!(flags & TTU_IGNORE_ACCESS)) {
+ if (ptep_clear_flush_young_notify(vma, address,
+ pvmw.pte)) {
+ ret = SWAP_FAIL;
+ page_vma_mapped_walk_done(&pvmw);
+ break;
+ }
+ }
- /* Update high watermark before we lower rss */
- update_hiwater_rss(mm);
+ /* Nuke the page table entry. */
+ flush_cache_page(vma, address, pte_pfn(*pvmw.pte));
+ if (should_defer_flush(mm, flags)) {
+ /*
+ * We clear the PTE but do not flush so potentially
+ * a remote CPU could still be writing to the page.
+ * If the entry was previously clean then the
+ * architecture must guarantee that a clear->dirty
+ * transition on a cached TLB entry is written through
+ * and traps if the PTE is unmapped.
+ */
+ pteval = ptep_get_and_clear(mm, address, pvmw.pte);
- if (PageHWPoison(page) && !(flags & TTU_IGNORE_HWPOISON)) {
- if (PageHuge(page)) {
- hugetlb_count_sub(1 << compound_order(page), mm);
+ set_tlb_ubc_flush_pending(mm, pte_dirty(pteval));
} else {
- dec_mm_counter(mm, mm_counter(page));
+ pteval = ptep_clear_flush(vma, address, pvmw.pte);
}
- set_pte_at(mm, address, pte,
- swp_entry_to_pte(make_hwpoison_entry(page)));
- } else if (pte_unused(pteval)) {
- /*
- * The guest indicated that the page content is of no
- * interest anymore. Simply discard the pte, vmscan
- * will take care of the rest.
- */
- dec_mm_counter(mm, mm_counter(page));
- } else if (IS_ENABLED(CONFIG_MIGRATION) && (flags & TTU_MIGRATION)) {
- swp_entry_t entry;
- pte_t swp_pte;
- /*
- * Store the pfn of the page in a special migration
- * pte. do_swap_page() will wait until the migration
- * pte is removed and then restart fault handling.
- */
- entry = make_migration_entry(page, pte_write(pteval));
- swp_pte = swp_entry_to_pte(entry);
- if (pte_soft_dirty(pteval))
- swp_pte = pte_swp_mksoft_dirty(swp_pte);
- set_pte_at(mm, address, pte, swp_pte);
- } else if (PageAnon(page)) {
- swp_entry_t entry = { .val = page_private(page) };
- pte_t swp_pte;
- /*
- * Store the swap location in the pte.
- * See handle_pte_fault() ...
- */
- VM_BUG_ON_PAGE(!PageSwapCache(page), page);
- if (!PageDirty(page) && (flags & TTU_LZFREE)) {
- /* It's a freeable page by MADV_FREE */
- dec_mm_counter(mm, MM_ANONPAGES);
- rp->lazyfreed++;
- goto discard;
- }
+ /* Move the dirty bit to the page. Now the pte is gone. */
+ if (pte_dirty(pteval))
+ set_page_dirty(page);
- if (swap_duplicate(entry) < 0) {
- set_pte_at(mm, address, pte, pteval);
- ret = SWAP_FAIL;
- goto out_unmap;
- }
- if (list_empty(&mm->mmlist)) {
- spin_lock(&mmlist_lock);
- if (list_empty(&mm->mmlist))
- list_add(&mm->mmlist, &init_mm.mmlist);
- spin_unlock(&mmlist_lock);
- }
- dec_mm_counter(mm, MM_ANONPAGES);
- inc_mm_counter(mm, MM_SWAPENTS);
- swp_pte = swp_entry_to_pte(entry);
- if (pte_soft_dirty(pteval))
- swp_pte = pte_swp_mksoft_dirty(swp_pte);
- set_pte_at(mm, address, pte, swp_pte);
- } else
- dec_mm_counter(mm, mm_counter_file(page));
+ /* Update high watermark before we lower rss */
+ update_hiwater_rss(mm);
-discard:
- page_remove_rmap(page, PageHuge(page));
- put_page(page);
+ if (PageHWPoison(page) && !(flags & TTU_IGNORE_HWPOISON)) {
+ if (PageHuge(page)) {
+ int nr = 1 << compound_order(page);
+ hugetlb_count_sub(nr, mm);
+ } else {
+ dec_mm_counter(mm, mm_counter(page));
+ }
+
+ pteval = swp_entry_to_pte(make_hwpoison_entry(subpage));
+ set_pte_at(mm, address, pvmw.pte, pteval);
+ } else if (pte_unused(pteval)) {
+ /*
+ * The guest indicated that the page content is of no
+ * interest anymore. Simply discard the pte, vmscan
+ * will take care of the rest.
+ */
+ dec_mm_counter(mm, mm_counter(page));
+ } else if (IS_ENABLED(CONFIG_MIGRATION) &&
+ (flags & TTU_MIGRATION)) {
+ swp_entry_t entry;
+ pte_t swp_pte;
+ /*
+ * Store the pfn of the page in a special migration
+ * pte. do_swap_page() will wait until the migration
+ * pte is removed and then restart fault handling.
+ */
+ entry = make_migration_entry(subpage,
+ pte_write(pteval));
+ swp_pte = swp_entry_to_pte(entry);
+ if (pte_soft_dirty(pteval))
+ swp_pte = pte_swp_mksoft_dirty(swp_pte);
+ set_pte_at(mm, address, pvmw.pte, swp_pte);
+ } else if (PageAnon(page)) {
+ swp_entry_t entry = { .val = page_private(subpage) };
+ pte_t swp_pte;
+ /*
+ * Store the swap location in the pte.
+ * See handle_pte_fault() ...
+ */
+ VM_BUG_ON_PAGE(!PageSwapCache(page), page);
+
+ if (!PageDirty(page) && (flags & TTU_LZFREE)) {
+ /* It's a freeable page by MADV_FREE */
+ dec_mm_counter(mm, MM_ANONPAGES);
+ rp->lazyfreed++;
+ goto discard;
+ }
-out_unmap:
- pte_unmap_unlock(pte, ptl);
- if (ret != SWAP_FAIL && ret != SWAP_MLOCK && !(flags & TTU_MUNLOCK))
+ if (swap_duplicate(entry) < 0) {
+ set_pte_at(mm, address, pvmw.pte, pteval);
+ ret = SWAP_FAIL;
+ page_vma_mapped_walk_done(&pvmw);
+ break;
+ }
+ if (list_empty(&mm->mmlist)) {
+ spin_lock(&mmlist_lock);
+ if (list_empty(&mm->mmlist))
+ list_add(&mm->mmlist, &init_mm.mmlist);
+ spin_unlock(&mmlist_lock);
+ }
+ dec_mm_counter(mm, MM_ANONPAGES);
+ inc_mm_counter(mm, MM_SWAPENTS);
+ swp_pte = swp_entry_to_pte(entry);
+ if (pte_soft_dirty(pteval))
+ swp_pte = pte_swp_mksoft_dirty(swp_pte);
+ set_pte_at(mm, address, pvmw.pte, swp_pte);
+ } else
+ dec_mm_counter(mm, mm_counter_file(page));
+discard:
+ page_remove_rmap(subpage, PageHuge(page));
+ put_page(page);
mmu_notifier_invalidate_page(mm, address);
-out:
+ }
return ret;
}
@@ -1608,7 +1474,7 @@ static bool invalid_migration_vma(struct vm_area_struct *vma, void *arg)
static int page_mapcount_is_zero(struct page *page)
{
- return !page_mapcount(page);
+ return !total_mapcount(page);
}
/**
@@ -1755,7 +1621,7 @@ static int rmap_walk_anon(struct page *page, struct rmap_walk_control *rwc,
bool locked)
{
struct anon_vma *anon_vma;
- pgoff_t pgoff;
+ pgoff_t pgoff_start, pgoff_end;
struct anon_vma_chain *avc;
int ret = SWAP_AGAIN;
@@ -1769,8 +1635,10 @@ static int rmap_walk_anon(struct page *page, struct rmap_walk_control *rwc,
if (!anon_vma)
return ret;
- pgoff = page_to_pgoff(page);
- anon_vma_interval_tree_foreach(avc, &anon_vma->rb_root, pgoff, pgoff) {
+ pgoff_start = page_to_pgoff(page);
+ pgoff_end = pgoff_start + hpage_nr_pages(page) - 1;
+ anon_vma_interval_tree_foreach(avc, &anon_vma->rb_root,
+ pgoff_start, pgoff_end) {
struct vm_area_struct *vma = avc->vma;
unsigned long address = vma_address(page, vma);
@@ -1808,7 +1676,7 @@ static int rmap_walk_file(struct page *page, struct rmap_walk_control *rwc,
bool locked)
{
struct address_space *mapping = page_mapping(page);
- pgoff_t pgoff;
+ pgoff_t pgoff_start, pgoff_end;
struct vm_area_struct *vma;
int ret = SWAP_AGAIN;
@@ -1823,10 +1691,12 @@ static int rmap_walk_file(struct page *page, struct rmap_walk_control *rwc,
if (!mapping)
return ret;
- pgoff = page_to_pgoff(page);
+ pgoff_start = page_to_pgoff(page);
+ pgoff_end = pgoff_start + hpage_nr_pages(page) - 1;
if (!locked)
i_mmap_lock_read(mapping);
- vma_interval_tree_foreach(vma, &mapping->i_mmap, pgoff, pgoff) {
+ vma_interval_tree_foreach(vma, &mapping->i_mmap,
+ pgoff_start, pgoff_end) {
unsigned long address = vma_address(page, vma);
cond_resched();
diff --git a/mm/shmem.c b/mm/shmem.c
index 9c6d22ff44e2..a26649a6633f 100644
--- a/mm/shmem.c
+++ b/mm/shmem.c
@@ -1908,8 +1908,9 @@ static int synchronous_wake_function(wait_queue_t *wait, unsigned mode, int sync
return ret;
}
-static int shmem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
+static int shmem_fault(struct vm_fault *vmf)
{
+ struct vm_area_struct *vma = vmf->vma;
struct inode *inode = file_inode(vma->vm_file);
gfp_t gfp = mapping_gfp_mask(inode->i_mapping);
enum sgp_type sgp;
@@ -2330,7 +2331,7 @@ shmem_write_begin(struct file *file, struct address_space *mapping,
pgoff_t index = pos >> PAGE_SHIFT;
/* i_mutex is held by caller */
- if (unlikely(info->seals)) {
+ if (unlikely(info->seals & (F_SEAL_WRITE | F_SEAL_GROW))) {
if (info->seals & F_SEAL_WRITE)
return -EPERM;
if ((info->seals & F_SEAL_GROW) && pos + len > inode->i_size)
diff --git a/mm/slab_common.c b/mm/slab_common.c
index 23ff74e61838..09d0e849b07f 100644
--- a/mm/slab_common.c
+++ b/mm/slab_common.c
@@ -528,6 +528,9 @@ static void slab_caches_to_rcu_destroy_workfn(struct work_struct *work)
static int shutdown_cache(struct kmem_cache *s)
{
+ /* free asan quarantined objects */
+ kasan_cache_shutdown(s);
+
if (__kmem_cache_shutdown(s) != 0)
return -EBUSY;
@@ -816,7 +819,6 @@ void kmem_cache_destroy(struct kmem_cache *s)
get_online_cpus();
get_online_mems();
- kasan_cache_destroy(s);
mutex_lock(&slab_mutex);
s->refcount--;
diff --git a/mm/swap.c b/mm/swap.c
index aabf2e90fe32..c4910f14f957 100644
--- a/mm/swap.c
+++ b/mm/swap.c
@@ -209,9 +209,10 @@ static void pagevec_move_tail_fn(struct page *page, struct lruvec *lruvec,
{
int *pgmoved = arg;
- if (PageLRU(page) && !PageActive(page) && !PageUnevictable(page)) {
- enum lru_list lru = page_lru_base_type(page);
- list_move_tail(&page->lru, &lruvec->lists[lru]);
+ if (PageLRU(page) && !PageUnevictable(page)) {
+ del_page_from_lru_list(page, lruvec, page_lru(page));
+ ClearPageActive(page);
+ add_page_to_lru_list_tail(page, lruvec, page_lru(page));
(*pgmoved)++;
}
}
@@ -235,7 +236,7 @@ static void pagevec_move_tail(struct pagevec *pvec)
*/
void rotate_reclaimable_page(struct page *page)
{
- if (!PageLocked(page) && !PageDirty(page) && !PageActive(page) &&
+ if (!PageLocked(page) && !PageDirty(page) &&
!PageUnevictable(page) && PageLRU(page)) {
struct pagevec *pvec;
unsigned long flags;
diff --git a/mm/truncate.c b/mm/truncate.c
index dd7b24e083c5..f2db67465495 100644
--- a/mm/truncate.c
+++ b/mm/truncate.c
@@ -20,6 +20,7 @@
#include <linux/task_io_accounting_ops.h>
#include <linux/buffer_head.h> /* grr. try_to_release_page,
do_invalidatepage */
+#include <linux/shmem_fs.h>
#include <linux/cleancache.h>
#include <linux/rmap.h>
#include "internal.h"
diff --git a/mm/userfaultfd.c b/mm/userfaultfd.c
index 1e5c2f94e8a3..9f0ad2a4f102 100644
--- a/mm/userfaultfd.c
+++ b/mm/userfaultfd.c
@@ -197,22 +197,25 @@ retry:
* retry, dst_vma will be set to NULL and we must lookup again.
*/
if (!dst_vma) {
- err = -EINVAL;
+ err = -ENOENT;
dst_vma = find_vma(dst_mm, dst_start);
if (!dst_vma || !is_vm_hugetlb_page(dst_vma))
goto out_unlock;
-
- if (vma_hpagesize != vma_kernel_pagesize(dst_vma))
- goto out_unlock;
-
/*
- * Make sure the remaining dst range is both valid and
- * fully within a single existing vma.
+ * Only allow __mcopy_atomic_hugetlb on userfaultfd
+ * registered ranges.
*/
+ if (!dst_vma->vm_userfaultfd_ctx.ctx)
+ goto out_unlock;
+
if (dst_start < dst_vma->vm_start ||
dst_start + len > dst_vma->vm_end)
goto out_unlock;
+ err = -EINVAL;
+ if (vma_hpagesize != vma_kernel_pagesize(dst_vma))
+ goto out_unlock;
+
vm_shared = dst_vma->vm_flags & VM_SHARED;
}
@@ -221,12 +224,6 @@ retry:
goto out_unlock;
/*
- * Only allow __mcopy_atomic_hugetlb on userfaultfd registered ranges.
- */
- if (!dst_vma->vm_userfaultfd_ctx.ctx)
- goto out_unlock;
-
- /*
* If not shared, ensure the dst_vma has a anon_vma.
*/
err = -ENOMEM;
@@ -404,22 +401,35 @@ retry:
* Make sure the vma is not shared, that the dst range is
* both valid and fully within a single existing vma.
*/
- err = -EINVAL;
+ err = -ENOENT;
dst_vma = find_vma(dst_mm, dst_start);
if (!dst_vma)
goto out_unlock;
/*
- * shmem_zero_setup is invoked in mmap for MAP_ANONYMOUS|MAP_SHARED but
- * it will overwrite vm_ops, so vma_is_anonymous must return false.
+ * Be strict and only allow __mcopy_atomic on userfaultfd
+ * registered ranges to prevent userland errors going
+ * unnoticed. As far as the VM consistency is concerned, it
+ * would be perfectly safe to remove this check, but there's
+ * no useful usage for __mcopy_atomic ouside of userfaultfd
+ * registered ranges. This is after all why these are ioctls
+ * belonging to the userfaultfd and not syscalls.
*/
- if (WARN_ON_ONCE(vma_is_anonymous(dst_vma) &&
- dst_vma->vm_flags & VM_SHARED))
+ if (!dst_vma->vm_userfaultfd_ctx.ctx)
goto out_unlock;
if (dst_start < dst_vma->vm_start ||
dst_start + len > dst_vma->vm_end)
goto out_unlock;
+ err = -EINVAL;
+ /*
+ * shmem_zero_setup is invoked in mmap for MAP_ANONYMOUS|MAP_SHARED but
+ * it will overwrite vm_ops, so vma_is_anonymous must return false.
+ */
+ if (WARN_ON_ONCE(vma_is_anonymous(dst_vma) &&
+ dst_vma->vm_flags & VM_SHARED))
+ goto out_unlock;
+
/*
* If this is a HUGETLB vma, pass off to appropriate routine
*/
@@ -427,18 +437,6 @@ retry:
return __mcopy_atomic_hugetlb(dst_mm, dst_vma, dst_start,
src_start, len, zeropage);
- /*
- * Be strict and only allow __mcopy_atomic on userfaultfd
- * registered ranges to prevent userland errors going
- * unnoticed. As far as the VM consistency is concerned, it
- * would be perfectly safe to remove this check, but there's
- * no useful usage for __mcopy_atomic ouside of userfaultfd
- * registered ranges. This is after all why these are ioctls
- * belonging to the userfaultfd and not syscalls.
- */
- if (!dst_vma->vm_userfaultfd_ctx.ctx)
- goto out_unlock;
-
if (!vma_is_anonymous(dst_vma) && !vma_is_shmem(dst_vma))
goto out_unlock;
diff --git a/mm/util.c b/mm/util.c
index 3cb2164f4099..b8f538863b5a 100644
--- a/mm/util.c
+++ b/mm/util.c
@@ -11,6 +11,7 @@
#include <linux/mman.h>
#include <linux/hugetlb.h>
#include <linux/vmalloc.h>
+#include <linux/userfaultfd_k.h>
#include <asm/sections.h>
#include <linux/uaccess.h>
@@ -297,14 +298,16 @@ unsigned long vm_mmap_pgoff(struct file *file, unsigned long addr,
unsigned long ret;
struct mm_struct *mm = current->mm;
unsigned long populate;
+ LIST_HEAD(uf);
ret = security_mmap_file(file, prot, flag);
if (!ret) {
if (down_write_killable(&mm->mmap_sem))
return -EINTR;
ret = do_mmap_pgoff(file, addr, len, prot, flag, pgoff,
- &populate);
+ &populate, &uf);
up_write(&mm->mmap_sem);
+ userfaultfd_unmap_complete(mm, &uf);
if (populate)
mm_populate(ret, populate);
}
diff --git a/mm/vmalloc.c b/mm/vmalloc.c
index d89034a393f2..be93949b4885 100644
--- a/mm/vmalloc.c
+++ b/mm/vmalloc.c
@@ -1642,6 +1642,11 @@ static void *__vmalloc_area_node(struct vm_struct *area, gfp_t gfp_mask,
for (i = 0; i < area->nr_pages; i++) {
struct page *page;
+ if (fatal_signal_pending(current)) {
+ area->nr_pages = i;
+ goto fail;
+ }
+
if (node == NUMA_NO_NODE)
page = alloc_page(alloc_mask);
else
@@ -2654,7 +2659,7 @@ static int s_show(struct seq_file *m, void *p)
seq_printf(m, " pages=%d", v->nr_pages);
if (v->phys_addr)
- seq_printf(m, " phys=%llx", (unsigned long long)v->phys_addr);
+ seq_printf(m, " phys=%pa", &v->phys_addr);
if (v->flags & VM_IOREMAP)
seq_puts(m, " ioremap");
diff --git a/mm/vmpressure.c b/mm/vmpressure.c
index 149fdf6c5c56..6063581f705c 100644
--- a/mm/vmpressure.c
+++ b/mm/vmpressure.c
@@ -112,9 +112,16 @@ static enum vmpressure_levels vmpressure_calc_level(unsigned long scanned,
unsigned long reclaimed)
{
unsigned long scale = scanned + reclaimed;
- unsigned long pressure;
+ unsigned long pressure = 0;
/*
+ * reclaimed can be greater than scanned in cases
+ * like THP, where the scanned is 1 and reclaimed
+ * could be 512
+ */
+ if (reclaimed >= scanned)
+ goto out;
+ /*
* We calculate the ratio (in percents) of how many pages were
* scanned vs. reclaimed in a given time frame (window). Note that
* time is in VM reclaimer's "ticks", i.e. number of pages
@@ -124,6 +131,7 @@ static enum vmpressure_levels vmpressure_calc_level(unsigned long scanned,
pressure = scale - (reclaimed * scale / scanned);
pressure = pressure * 100 / scale;
+out:
pr_debug("%s: %3lu (s: %lu r: %lu)\n", __func__, pressure,
scanned, reclaimed);
diff --git a/mm/vmscan.c b/mm/vmscan.c
index 7bb23ff229b6..70aa739c6b68 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -87,6 +87,7 @@ struct scan_control {
/* The highest zone to isolate pages for reclaim from */
enum zone_type reclaim_idx;
+ /* Writepage batching in laptop mode; RECLAIM_WRITE */
unsigned int may_writepage:1;
/* Can mapped pages be reclaimed? */
@@ -1055,6 +1056,15 @@ static unsigned long shrink_page_list(struct list_head *page_list,
* throttling so we could easily OOM just because too many
* pages are in writeback and there is nothing else to
* reclaim. Wait for the writeback to complete.
+ *
+ * In cases 1) and 2) we activate the pages to get them out of
+ * the way while we continue scanning for clean pages on the
+ * inactive list and refilling from the active list. The
+ * observation here is that waiting for disk writes is more
+ * expensive than potentially causing reloads down the line.
+ * Since they're marked for immediate reclaim, they won't put
+ * memory pressure on the cache working set any longer than it
+ * takes to write them to disk.
*/
if (PageWriteback(page)) {
/* Case 1 above */
@@ -1062,7 +1072,7 @@ static unsigned long shrink_page_list(struct list_head *page_list,
PageReclaim(page) &&
test_bit(PGDAT_WRITEBACK, &pgdat->flags)) {
nr_immediate++;
- goto keep_locked;
+ goto activate_locked;
/* Case 2 above */
} else if (sane_reclaim(sc) ||
@@ -1080,7 +1090,7 @@ static unsigned long shrink_page_list(struct list_head *page_list,
*/
SetPageReclaim(page);
nr_writeback++;
- goto keep_locked;
+ goto activate_locked;
/* Case 3 above */
} else {
@@ -1152,13 +1162,18 @@ static unsigned long shrink_page_list(struct list_head *page_list,
if (PageDirty(page)) {
/*
- * Only kswapd can writeback filesystem pages to
- * avoid risk of stack overflow but only writeback
- * if many dirty pages have been encountered.
+ * Only kswapd can writeback filesystem pages
+ * to avoid risk of stack overflow. But avoid
+ * injecting inefficient single-page IO into
+ * flusher writeback as much as possible: only
+ * write pages when we've encountered many
+ * dirty pages, and when we've already scanned
+ * the rest of the LRU for clean pages and see
+ * the same dirty pages again (PageReclaim).
*/
if (page_is_file_cache(page) &&
- (!current_is_kswapd() ||
- !test_bit(PGDAT_DIRTY, &pgdat->flags))) {
+ (!current_is_kswapd() || !PageReclaim(page) ||
+ !test_bit(PGDAT_DIRTY, &pgdat->flags))) {
/*
* Immediately reclaim when written back.
* Similar in principal to deactivate_page()
@@ -1168,7 +1183,7 @@ static unsigned long shrink_page_list(struct list_head *page_list,
inc_node_page_state(page, NR_VMSCAN_IMMEDIATE);
SetPageReclaim(page);
- goto keep_locked;
+ goto activate_locked;
}
if (references == PAGEREF_RECLAIM_CLEAN)
@@ -1373,13 +1388,10 @@ int __isolate_lru_page(struct page *page, isolate_mode_t mode)
* wants to isolate pages it will be able to operate on without
* blocking - clean pages for the most part.
*
- * ISOLATE_CLEAN means that only clean pages should be isolated. This
- * is used by reclaim when it is cannot write to backing storage
- *
* ISOLATE_ASYNC_MIGRATE is used to indicate that it only wants to pages
* that it is possible to migrate without blocking
*/
- if (mode & (ISOLATE_CLEAN|ISOLATE_ASYNC_MIGRATE)) {
+ if (mode & ISOLATE_ASYNC_MIGRATE) {
/* All the caller can do on PageWriteback is block */
if (PageWriteback(page))
return ret;
@@ -1387,10 +1399,6 @@ int __isolate_lru_page(struct page *page, isolate_mode_t mode)
if (PageDirty(page)) {
struct address_space *mapping;
- /* ISOLATE_CLEAN means only clean pages */
- if (mode & ISOLATE_CLEAN)
- return ret;
-
/*
* Only pages without mappings or that have a
* ->migratepage callback are possible to migrate
@@ -1731,8 +1739,6 @@ shrink_inactive_list(unsigned long nr_to_scan, struct lruvec *lruvec,
if (!sc->may_unmap)
isolate_mode |= ISOLATE_UNMAPPED;
- if (!sc->may_writepage)
- isolate_mode |= ISOLATE_CLEAN;
spin_lock_irq(&pgdat->lru_lock);
@@ -1806,12 +1812,20 @@ shrink_inactive_list(unsigned long nr_to_scan, struct lruvec *lruvec,
/*
* If dirty pages are scanned that are not queued for IO, it
- * implies that flushers are not keeping up. In this case, flag
- * the pgdat PGDAT_DIRTY and kswapd will start writing pages from
- * reclaim context.
+ * implies that flushers are not doing their job. This can
+ * happen when memory pressure pushes dirty pages to the end of
+ * the LRU before the dirty limits are breached and the dirty
+ * data has expired. It can also happen when the proportion of
+ * dirty pages grows not through writes but through memory
+ * pressure reclaiming all the clean cache. And in some cases,
+ * the flushers simply cannot keep up with the allocation
+ * rate. Nudge the flusher threads in case they are asleep, but
+ * also allow kswapd to start writing pages during reclaim.
*/
- if (stat.nr_unqueued_dirty == nr_taken)
+ if (stat.nr_unqueued_dirty == nr_taken) {
+ wakeup_flusher_threads(0, WB_REASON_VMSCAN);
set_bit(PGDAT_DIRTY, &pgdat->flags);
+ }
/*
* If kswapd scans pages marked marked for immediate
@@ -1929,8 +1943,6 @@ static void shrink_active_list(unsigned long nr_to_scan,
if (!sc->may_unmap)
isolate_mode |= ISOLATE_UNMAPPED;
- if (!sc->may_writepage)
- isolate_mode |= ISOLATE_CLEAN;
spin_lock_irq(&pgdat->lru_lock);
@@ -2759,8 +2771,6 @@ static unsigned long do_try_to_free_pages(struct zonelist *zonelist,
struct scan_control *sc)
{
int initial_priority = sc->priority;
- unsigned long total_scanned = 0;
- unsigned long writeback_threshold;
retry:
delayacct_freepages_start();
@@ -2773,7 +2783,6 @@ retry:
sc->nr_scanned = 0;
shrink_zones(zonelist, sc);
- total_scanned += sc->nr_scanned;
if (sc->nr_reclaimed >= sc->nr_to_reclaim)
break;
@@ -2786,20 +2795,6 @@ retry:
*/
if (sc->priority < DEF_PRIORITY - 2)
sc->may_writepage = 1;
-
- /*
- * Try to write back as many pages as we just scanned. This
- * tends to cause slow streaming writers to write data to the
- * disk smoothly, at the dirtying rate, which is nice. But
- * that's undesirable in laptop mode, where we *want* lumpy
- * writeout. So in laptop mode, write out the whole world.
- */
- writeback_threshold = sc->nr_to_reclaim + sc->nr_to_reclaim / 2;
- if (total_scanned > writeback_threshold) {
- wakeup_flusher_threads(laptop_mode ? 0 : total_scanned,
- WB_REASON_TRY_TO_FREE_PAGES);
- sc->may_writepage = 1;
- }
} while (--sc->priority >= 0);
delayacct_freepages_end();
@@ -3101,6 +3096,7 @@ static bool zone_balanced(struct zone *zone, int order, int classzone_idx)
*/
clear_bit(PGDAT_CONGESTED, &zone->zone_pgdat->flags);
clear_bit(PGDAT_DIRTY, &zone->zone_pgdat->flags);
+ clear_bit(PGDAT_WRITEBACK, &zone->zone_pgdat->flags);
return true;
}
diff --git a/mm/workingset.c b/mm/workingset.c
index a67f5796b995..79ed5364375d 100644
--- a/mm/workingset.c
+++ b/mm/workingset.c
@@ -6,6 +6,7 @@
#include <linux/memcontrol.h>
#include <linux/writeback.h>
+#include <linux/shmem_fs.h>
#include <linux/pagemap.h>
#include <linux/atomic.h>
#include <linux/module.h>
diff --git a/mm/z3fold.c b/mm/z3fold.c
index 207e5ddc87a2..8970a2fd3b1a 100644
--- a/mm/z3fold.c
+++ b/mm/z3fold.c
@@ -34,29 +34,62 @@
/*****************
* Structures
*****************/
+struct z3fold_pool;
+struct z3fold_ops {
+ int (*evict)(struct z3fold_pool *pool, unsigned long handle);
+};
+
+enum buddy {
+ HEADLESS = 0,
+ FIRST,
+ MIDDLE,
+ LAST,
+ BUDDIES_MAX
+};
+
+/*
+ * struct z3fold_header - z3fold page metadata occupying the first chunk of each
+ * z3fold page, except for HEADLESS pages
+ * @buddy: links the z3fold page into the relevant list in the pool
+ * @page_lock: per-page lock
+ * @refcount: reference cound for the z3fold page
+ * @first_chunks: the size of the first buddy in chunks, 0 if free
+ * @middle_chunks: the size of the middle buddy in chunks, 0 if free
+ * @last_chunks: the size of the last buddy in chunks, 0 if free
+ * @first_num: the starting number (for the first handle)
+ */
+struct z3fold_header {
+ struct list_head buddy;
+ spinlock_t page_lock;
+ struct kref refcount;
+ unsigned short first_chunks;
+ unsigned short middle_chunks;
+ unsigned short last_chunks;
+ unsigned short start_middle;
+ unsigned short first_num:2;
+};
+
/*
* NCHUNKS_ORDER determines the internal allocation granularity, effectively
* adjusting internal fragmentation. It also determines the number of
* freelists maintained in each pool. NCHUNKS_ORDER of 6 means that the
- * allocation granularity will be in chunks of size PAGE_SIZE/64. As one chunk
- * in allocated page is occupied by z3fold header, NCHUNKS will be calculated
- * to 63 which shows the max number of free chunks in z3fold page, also there
- * will be 63 freelists per pool.
+ * allocation granularity will be in chunks of size PAGE_SIZE/64. Some chunks
+ * in the beginning of an allocated page are occupied by z3fold header, so
+ * NCHUNKS will be calculated to 63 (or 62 in case CONFIG_DEBUG_SPINLOCK=y),
+ * which shows the max number of free chunks in z3fold page, also there will
+ * be 63, or 62, respectively, freelists per pool.
*/
#define NCHUNKS_ORDER 6
#define CHUNK_SHIFT (PAGE_SHIFT - NCHUNKS_ORDER)
#define CHUNK_SIZE (1 << CHUNK_SHIFT)
-#define ZHDR_SIZE_ALIGNED CHUNK_SIZE
+#define ZHDR_SIZE_ALIGNED round_up(sizeof(struct z3fold_header), CHUNK_SIZE)
+#define ZHDR_CHUNKS (ZHDR_SIZE_ALIGNED >> CHUNK_SHIFT)
+#define TOTAL_CHUNKS (PAGE_SIZE >> CHUNK_SHIFT)
#define NCHUNKS ((PAGE_SIZE - ZHDR_SIZE_ALIGNED) >> CHUNK_SHIFT)
#define BUDDY_MASK (0x3)
-struct z3fold_pool;
-struct z3fold_ops {
- int (*evict)(struct z3fold_pool *pool, unsigned long handle);
-};
-
/**
* struct z3fold_pool - stores metadata for each z3fold pool
* @lock: protects all pool fields and first|last_chunk fields of any
@@ -64,8 +97,6 @@ struct z3fold_ops {
* @unbuddied: array of lists tracking z3fold pages that contain 2- buddies;
* the lists each z3fold page is added to depends on the size of
* its free region.
- * @buddied: list tracking the z3fold pages that contain 3 buddies;
- * these z3fold pages are full
* @lru: list tracking the z3fold pages in LRU order by most recently
* added buddy.
* @pages_nr: number of z3fold pages in the pool.
@@ -78,49 +109,22 @@ struct z3fold_ops {
struct z3fold_pool {
spinlock_t lock;
struct list_head unbuddied[NCHUNKS];
- struct list_head buddied;
struct list_head lru;
- u64 pages_nr;
+ atomic64_t pages_nr;
const struct z3fold_ops *ops;
struct zpool *zpool;
const struct zpool_ops *zpool_ops;
};
-enum buddy {
- HEADLESS = 0,
- FIRST,
- MIDDLE,
- LAST,
- BUDDIES_MAX
-};
-
-/*
- * struct z3fold_header - z3fold page metadata occupying the first chunk of each
- * z3fold page, except for HEADLESS pages
- * @buddy: links the z3fold page into the relevant list in the pool
- * @first_chunks: the size of the first buddy in chunks, 0 if free
- * @middle_chunks: the size of the middle buddy in chunks, 0 if free
- * @last_chunks: the size of the last buddy in chunks, 0 if free
- * @first_num: the starting number (for the first handle)
- */
-struct z3fold_header {
- struct list_head buddy;
- unsigned short first_chunks;
- unsigned short middle_chunks;
- unsigned short last_chunks;
- unsigned short start_middle;
- unsigned short first_num:2;
-};
-
/*
* Internal z3fold page flags
*/
enum z3fold_page_flags {
- UNDER_RECLAIM = 0,
- PAGE_HEADLESS,
+ PAGE_HEADLESS = 0,
MIDDLE_CHUNK_MAPPED,
};
+
/*****************
* Helpers
*****************/
@@ -140,10 +144,11 @@ static struct z3fold_header *init_z3fold_page(struct page *page)
struct z3fold_header *zhdr = page_address(page);
INIT_LIST_HEAD(&page->lru);
- clear_bit(UNDER_RECLAIM, &page->private);
clear_bit(PAGE_HEADLESS, &page->private);
clear_bit(MIDDLE_CHUNK_MAPPED, &page->private);
+ spin_lock_init(&zhdr->page_lock);
+ kref_init(&zhdr->refcount);
zhdr->first_chunks = 0;
zhdr->middle_chunks = 0;
zhdr->last_chunks = 0;
@@ -154,9 +159,36 @@ static struct z3fold_header *init_z3fold_page(struct page *page)
}
/* Resets the struct page fields and frees the page */
-static void free_z3fold_page(struct z3fold_header *zhdr)
+static void free_z3fold_page(struct page *page)
+{
+ __free_page(page);
+}
+
+static void release_z3fold_page(struct kref *ref)
+{
+ struct z3fold_header *zhdr;
+ struct page *page;
+
+ zhdr = container_of(ref, struct z3fold_header, refcount);
+ page = virt_to_page(zhdr);
+
+ if (!list_empty(&zhdr->buddy))
+ list_del(&zhdr->buddy);
+ if (!list_empty(&page->lru))
+ list_del(&page->lru);
+ free_z3fold_page(page);
+}
+
+/* Lock a z3fold page */
+static inline void z3fold_page_lock(struct z3fold_header *zhdr)
+{
+ spin_lock(&zhdr->page_lock);
+}
+
+/* Unlock a z3fold page */
+static inline void z3fold_page_unlock(struct z3fold_header *zhdr)
{
- __free_page(virt_to_page(zhdr));
+ spin_unlock(&zhdr->page_lock);
}
/*
@@ -204,9 +236,10 @@ static int num_free_chunks(struct z3fold_header *zhdr)
*/
if (zhdr->middle_chunks != 0) {
int nfree_before = zhdr->first_chunks ?
- 0 : zhdr->start_middle - 1;
+ 0 : zhdr->start_middle - ZHDR_CHUNKS;
int nfree_after = zhdr->last_chunks ?
- 0 : NCHUNKS - zhdr->start_middle - zhdr->middle_chunks;
+ 0 : TOTAL_CHUNKS -
+ (zhdr->start_middle + zhdr->middle_chunks);
nfree = max(nfree_before, nfree_after);
} else
nfree = NCHUNKS - zhdr->first_chunks - zhdr->last_chunks;
@@ -236,9 +269,8 @@ static struct z3fold_pool *z3fold_create_pool(gfp_t gfp,
spin_lock_init(&pool->lock);
for_each_unbuddied_list(i, 0)
INIT_LIST_HEAD(&pool->unbuddied[i]);
- INIT_LIST_HEAD(&pool->buddied);
INIT_LIST_HEAD(&pool->lru);
- pool->pages_nr = 0;
+ atomic64_set(&pool->pages_nr, 0);
pool->ops = ops;
return pool;
}
@@ -254,25 +286,58 @@ static void z3fold_destroy_pool(struct z3fold_pool *pool)
kfree(pool);
}
+static inline void *mchunk_memmove(struct z3fold_header *zhdr,
+ unsigned short dst_chunk)
+{
+ void *beg = zhdr;
+ return memmove(beg + (dst_chunk << CHUNK_SHIFT),
+ beg + (zhdr->start_middle << CHUNK_SHIFT),
+ zhdr->middle_chunks << CHUNK_SHIFT);
+}
+
+#define BIG_CHUNK_GAP 3
/* Has to be called with lock held */
static int z3fold_compact_page(struct z3fold_header *zhdr)
{
struct page *page = virt_to_page(zhdr);
- void *beg = zhdr;
+ if (test_bit(MIDDLE_CHUNK_MAPPED, &page->private))
+ return 0; /* can't move middle chunk, it's used */
+
+ if (zhdr->middle_chunks == 0)
+ return 0; /* nothing to compact */
- if (!test_bit(MIDDLE_CHUNK_MAPPED, &page->private) &&
- zhdr->middle_chunks != 0 &&
- zhdr->first_chunks == 0 && zhdr->last_chunks == 0) {
- memmove(beg + ZHDR_SIZE_ALIGNED,
- beg + (zhdr->start_middle << CHUNK_SHIFT),
- zhdr->middle_chunks << CHUNK_SHIFT);
+ if (zhdr->first_chunks == 0 && zhdr->last_chunks == 0) {
+ /* move to the beginning */
+ mchunk_memmove(zhdr, ZHDR_CHUNKS);
zhdr->first_chunks = zhdr->middle_chunks;
zhdr->middle_chunks = 0;
zhdr->start_middle = 0;
zhdr->first_num++;
return 1;
}
+
+ /*
+ * moving data is expensive, so let's only do that if
+ * there's substantial gain (at least BIG_CHUNK_GAP chunks)
+ */
+ if (zhdr->first_chunks != 0 && zhdr->last_chunks == 0 &&
+ zhdr->start_middle - (zhdr->first_chunks + ZHDR_CHUNKS) >=
+ BIG_CHUNK_GAP) {
+ mchunk_memmove(zhdr, zhdr->first_chunks + ZHDR_CHUNKS);
+ zhdr->start_middle = zhdr->first_chunks + ZHDR_CHUNKS;
+ return 1;
+ } else if (zhdr->last_chunks != 0 && zhdr->first_chunks == 0 &&
+ TOTAL_CHUNKS - (zhdr->last_chunks + zhdr->start_middle
+ + zhdr->middle_chunks) >=
+ BIG_CHUNK_GAP) {
+ unsigned short new_start = TOTAL_CHUNKS - zhdr->last_chunks -
+ zhdr->middle_chunks;
+ mchunk_memmove(zhdr, new_start);
+ zhdr->start_middle = new_start;
+ return 1;
+ }
+
return 0;
}
@@ -313,50 +378,63 @@ static int z3fold_alloc(struct z3fold_pool *pool, size_t size, gfp_t gfp,
bud = HEADLESS;
else {
chunks = size_to_chunks(size);
- spin_lock(&pool->lock);
/* First, try to find an unbuddied z3fold page. */
zhdr = NULL;
for_each_unbuddied_list(i, chunks) {
- if (!list_empty(&pool->unbuddied[i])) {
- zhdr = list_first_entry(&pool->unbuddied[i],
+ spin_lock(&pool->lock);
+ zhdr = list_first_entry_or_null(&pool->unbuddied[i],
struct z3fold_header, buddy);
- page = virt_to_page(zhdr);
- if (zhdr->first_chunks == 0) {
- if (zhdr->middle_chunks != 0 &&
- chunks >= zhdr->start_middle)
- bud = LAST;
- else
- bud = FIRST;
- } else if (zhdr->last_chunks == 0)
+ if (!zhdr) {
+ spin_unlock(&pool->lock);
+ continue;
+ }
+ kref_get(&zhdr->refcount);
+ list_del_init(&zhdr->buddy);
+ spin_unlock(&pool->lock);
+
+ page = virt_to_page(zhdr);
+ z3fold_page_lock(zhdr);
+ if (zhdr->first_chunks == 0) {
+ if (zhdr->middle_chunks != 0 &&
+ chunks >= zhdr->start_middle)
bud = LAST;
- else if (zhdr->middle_chunks == 0)
- bud = MIDDLE;
- else {
- pr_err("No free chunks in unbuddied\n");
- WARN_ON(1);
- continue;
- }
- list_del(&zhdr->buddy);
- goto found;
+ else
+ bud = FIRST;
+ } else if (zhdr->last_chunks == 0)
+ bud = LAST;
+ else if (zhdr->middle_chunks == 0)
+ bud = MIDDLE;
+ else {
+ z3fold_page_unlock(zhdr);
+ spin_lock(&pool->lock);
+ if (kref_put(&zhdr->refcount,
+ release_z3fold_page))
+ atomic64_dec(&pool->pages_nr);
+ spin_unlock(&pool->lock);
+ pr_err("No free chunks in unbuddied\n");
+ WARN_ON(1);
+ continue;
}
+ goto found;
}
bud = FIRST;
- spin_unlock(&pool->lock);
}
/* Couldn't find unbuddied z3fold page, create new one */
page = alloc_page(gfp);
if (!page)
return -ENOMEM;
- spin_lock(&pool->lock);
- pool->pages_nr++;
+
+ atomic64_inc(&pool->pages_nr);
zhdr = init_z3fold_page(page);
if (bud == HEADLESS) {
set_bit(PAGE_HEADLESS, &page->private);
+ spin_lock(&pool->lock);
goto headless;
}
+ z3fold_page_lock(zhdr);
found:
if (bud == FIRST)
@@ -365,17 +443,15 @@ found:
zhdr->last_chunks = chunks;
else {
zhdr->middle_chunks = chunks;
- zhdr->start_middle = zhdr->first_chunks + 1;
+ zhdr->start_middle = zhdr->first_chunks + ZHDR_CHUNKS;
}
+ spin_lock(&pool->lock);
if (zhdr->first_chunks == 0 || zhdr->last_chunks == 0 ||
zhdr->middle_chunks == 0) {
/* Add to unbuddied list */
freechunks = num_free_chunks(zhdr);
list_add(&zhdr->buddy, &pool->unbuddied[freechunks]);
- } else {
- /* Add to buddied list */
- list_add(&zhdr->buddy, &pool->buddied);
}
headless:
@@ -387,6 +463,8 @@ headless:
*handle = encode_handle(zhdr, bud);
spin_unlock(&pool->lock);
+ if (bud != HEADLESS)
+ z3fold_page_unlock(zhdr);
return 0;
}
@@ -408,7 +486,6 @@ static void z3fold_free(struct z3fold_pool *pool, unsigned long handle)
struct page *page;
enum buddy bud;
- spin_lock(&pool->lock);
zhdr = handle_to_z3fold_header(handle);
page = virt_to_page(zhdr);
@@ -416,6 +493,7 @@ static void z3fold_free(struct z3fold_pool *pool, unsigned long handle)
/* HEADLESS page stored */
bud = HEADLESS;
} else {
+ z3fold_page_lock(zhdr);
bud = handle_to_buddy(handle);
switch (bud) {
@@ -432,38 +510,36 @@ static void z3fold_free(struct z3fold_pool *pool, unsigned long handle)
default:
pr_err("%s: unknown bud %d\n", __func__, bud);
WARN_ON(1);
- spin_unlock(&pool->lock);
+ z3fold_page_unlock(zhdr);
return;
}
}
- if (test_bit(UNDER_RECLAIM, &page->private)) {
- /* z3fold page is under reclaim, reclaim will free */
- spin_unlock(&pool->lock);
- return;
- }
-
- if (bud != HEADLESS) {
- /* Remove from existing buddy list */
- list_del(&zhdr->buddy);
- }
-
- if (bud == HEADLESS ||
- (zhdr->first_chunks == 0 && zhdr->middle_chunks == 0 &&
- zhdr->last_chunks == 0)) {
- /* z3fold page is empty, free */
+ if (bud == HEADLESS) {
+ spin_lock(&pool->lock);
list_del(&page->lru);
- clear_bit(PAGE_HEADLESS, &page->private);
- free_z3fold_page(zhdr);
- pool->pages_nr--;
+ spin_unlock(&pool->lock);
+ free_z3fold_page(page);
+ atomic64_dec(&pool->pages_nr);
} else {
- z3fold_compact_page(zhdr);
- /* Add to the unbuddied list */
- freechunks = num_free_chunks(zhdr);
- list_add(&zhdr->buddy, &pool->unbuddied[freechunks]);
+ if (zhdr->first_chunks != 0 || zhdr->middle_chunks != 0 ||
+ zhdr->last_chunks != 0) {
+ z3fold_compact_page(zhdr);
+ /* Add to the unbuddied list */
+ spin_lock(&pool->lock);
+ if (!list_empty(&zhdr->buddy))
+ list_del(&zhdr->buddy);
+ freechunks = num_free_chunks(zhdr);
+ list_add(&zhdr->buddy, &pool->unbuddied[freechunks]);
+ spin_unlock(&pool->lock);
+ }
+ z3fold_page_unlock(zhdr);
+ spin_lock(&pool->lock);
+ if (kref_put(&zhdr->refcount, release_z3fold_page))
+ atomic64_dec(&pool->pages_nr);
+ spin_unlock(&pool->lock);
}
- spin_unlock(&pool->lock);
}
/**
@@ -510,20 +586,25 @@ static int z3fold_reclaim_page(struct z3fold_pool *pool, unsigned int retries)
unsigned long first_handle = 0, middle_handle = 0, last_handle = 0;
spin_lock(&pool->lock);
- if (!pool->ops || !pool->ops->evict || list_empty(&pool->lru) ||
- retries == 0) {
+ if (!pool->ops || !pool->ops->evict || retries == 0) {
spin_unlock(&pool->lock);
return -EINVAL;
}
for (i = 0; i < retries; i++) {
+ if (list_empty(&pool->lru)) {
+ spin_unlock(&pool->lock);
+ return -EINVAL;
+ }
page = list_last_entry(&pool->lru, struct page, lru);
- list_del(&page->lru);
+ list_del_init(&page->lru);
- /* Protect z3fold page against free */
- set_bit(UNDER_RECLAIM, &page->private);
zhdr = page_address(page);
if (!test_bit(PAGE_HEADLESS, &page->private)) {
- list_del(&zhdr->buddy);
+ if (!list_empty(&zhdr->buddy))
+ list_del_init(&zhdr->buddy);
+ kref_get(&zhdr->refcount);
+ spin_unlock(&pool->lock);
+ z3fold_page_lock(zhdr);
/*
* We need encode the handles before unlocking, since
* we can race with free that will set
@@ -538,13 +619,13 @@ static int z3fold_reclaim_page(struct z3fold_pool *pool, unsigned int retries)
middle_handle = encode_handle(zhdr, MIDDLE);
if (zhdr->last_chunks)
last_handle = encode_handle(zhdr, LAST);
+ z3fold_page_unlock(zhdr);
} else {
first_handle = encode_handle(zhdr, HEADLESS);
last_handle = middle_handle = 0;
+ spin_unlock(&pool->lock);
}
- spin_unlock(&pool->lock);
-
/* Issue the eviction callback(s) */
if (middle_handle) {
ret = pool->ops->evict(pool, middle_handle);
@@ -562,36 +643,40 @@ static int z3fold_reclaim_page(struct z3fold_pool *pool, unsigned int retries)
goto next;
}
next:
- spin_lock(&pool->lock);
- clear_bit(UNDER_RECLAIM, &page->private);
- if ((test_bit(PAGE_HEADLESS, &page->private) && ret == 0) ||
- (zhdr->first_chunks == 0 && zhdr->last_chunks == 0 &&
- zhdr->middle_chunks == 0)) {
- /*
- * All buddies are now free, free the z3fold page and
- * return success.
- */
- clear_bit(PAGE_HEADLESS, &page->private);
- free_z3fold_page(zhdr);
- pool->pages_nr--;
- spin_unlock(&pool->lock);
- return 0;
- } else if (!test_bit(PAGE_HEADLESS, &page->private)) {
- if (zhdr->first_chunks != 0 &&
- zhdr->last_chunks != 0 &&
- zhdr->middle_chunks != 0) {
- /* Full, add to buddied list */
- list_add(&zhdr->buddy, &pool->buddied);
+ if (test_bit(PAGE_HEADLESS, &page->private)) {
+ if (ret == 0) {
+ free_z3fold_page(page);
+ return 0;
} else {
+ spin_lock(&pool->lock);
+ }
+ } else {
+ z3fold_page_lock(zhdr);
+ if ((zhdr->first_chunks || zhdr->last_chunks ||
+ zhdr->middle_chunks) &&
+ !(zhdr->first_chunks && zhdr->last_chunks &&
+ zhdr->middle_chunks)) {
z3fold_compact_page(zhdr);
/* add to unbuddied list */
+ spin_lock(&pool->lock);
freechunks = num_free_chunks(zhdr);
list_add(&zhdr->buddy,
&pool->unbuddied[freechunks]);
+ spin_unlock(&pool->lock);
+ }
+ z3fold_page_unlock(zhdr);
+ spin_lock(&pool->lock);
+ if (kref_put(&zhdr->refcount, release_z3fold_page)) {
+ atomic64_dec(&pool->pages_nr);
+ return 0;
}
}
- /* add to beginning of LRU */
+ /*
+ * Add to the beginning of LRU.
+ * Pool lock has to be kept here to ensure the page has
+ * not already been released
+ */
list_add(&page->lru, &pool->lru);
}
spin_unlock(&pool->lock);
@@ -615,7 +700,6 @@ static void *z3fold_map(struct z3fold_pool *pool, unsigned long handle)
void *addr;
enum buddy buddy;
- spin_lock(&pool->lock);
zhdr = handle_to_z3fold_header(handle);
addr = zhdr;
page = virt_to_page(zhdr);
@@ -623,6 +707,7 @@ static void *z3fold_map(struct z3fold_pool *pool, unsigned long handle)
if (test_bit(PAGE_HEADLESS, &page->private))
goto out;
+ z3fold_page_lock(zhdr);
buddy = handle_to_buddy(handle);
switch (buddy) {
case FIRST:
@@ -641,8 +726,9 @@ static void *z3fold_map(struct z3fold_pool *pool, unsigned long handle)
addr = NULL;
break;
}
+
+ z3fold_page_unlock(zhdr);
out:
- spin_unlock(&pool->lock);
return addr;
}
@@ -657,31 +743,28 @@ static void z3fold_unmap(struct z3fold_pool *pool, unsigned long handle)
struct page *page;
enum buddy buddy;
- spin_lock(&pool->lock);
zhdr = handle_to_z3fold_header(handle);
page = virt_to_page(zhdr);
- if (test_bit(PAGE_HEADLESS, &page->private)) {
- spin_unlock(&pool->lock);
+ if (test_bit(PAGE_HEADLESS, &page->private))
return;
- }
+ z3fold_page_lock(zhdr);
buddy = handle_to_buddy(handle);
if (buddy == MIDDLE)
clear_bit(MIDDLE_CHUNK_MAPPED, &page->private);
- spin_unlock(&pool->lock);
+ z3fold_page_unlock(zhdr);
}
/**
* z3fold_get_pool_size() - gets the z3fold pool size in pages
* @pool: pool whose size is being queried
*
- * Returns: size in pages of the given pool. The pool lock need not be
- * taken to access pages_nr.
+ * Returns: size in pages of the given pool.
*/
static u64 z3fold_get_pool_size(struct z3fold_pool *pool)
{
- return pool->pages_nr;
+ return atomic64_read(&pool->pages_nr);
}
/*****************
@@ -780,8 +863,8 @@ MODULE_ALIAS("zpool-z3fold");
static int __init init_z3fold(void)
{
- /* Make sure the z3fold header will fit in one chunk */
- BUILD_BUG_ON(sizeof(struct z3fold_header) > ZHDR_SIZE_ALIGNED);
+ /* Make sure the z3fold header is not larger than the page size */
+ BUILD_BUG_ON(ZHDR_SIZE_ALIGNED > PAGE_SIZE);
zpool_register_driver(&z3fold_zpool_driver);
return 0;
diff --git a/mm/zsmalloc.c b/mm/zsmalloc.c
index a1f24989ac23..b7b1fb6c8c21 100644
--- a/mm/zsmalloc.c
+++ b/mm/zsmalloc.c
@@ -24,7 +24,6 @@
*
* Usage of struct page flags:
* PG_private: identifies the first component page
- * PG_private2: identifies the last component page
* PG_owner_priv_1: identifies the huge component page
*
*/
@@ -268,10 +267,6 @@ struct zs_pool {
#endif
};
-/*
- * A zspage's class index and fullness group
- * are encoded in its (first)page->mapping
- */
#define FULLNESS_BITS 2
#define CLASS_BITS 8
#define ISOLATED_BITS 3
@@ -938,7 +933,6 @@ static void reset_page(struct page *page)
{
__ClearPageMovable(page);
ClearPagePrivate(page);
- ClearPagePrivate2(page);
set_page_private(page, 0);
page_mapcount_reset(page);
ClearPageHugeObject(page);
@@ -1085,7 +1079,7 @@ static void create_page_chain(struct size_class *class, struct zspage *zspage,
* 2. each sub-page point to zspage using page->private
*
* we set PG_private to identify the first page (i.e. no other sub-page
- * has this flag set) and PG_private_2 to identify the last page.
+ * has this flag set).
*/
for (i = 0; i < nr_pages; i++) {
page = pages[i];
@@ -1100,8 +1094,6 @@ static void create_page_chain(struct size_class *class, struct zspage *zspage,
} else {
prev_page->freelist = page;
}
- if (i == nr_pages - 1)
- SetPagePrivate2(page);
prev_page = page;
}
}
diff --git a/scripts/checkpatch.pl b/scripts/checkpatch.pl
index 982c52ca6473..918259a55f65 100755
--- a/scripts/checkpatch.pl
+++ b/scripts/checkpatch.pl
@@ -424,7 +424,7 @@ our $typeTypedefs = qr{(?x:
our $zero_initializer = qr{(?:(?:0[xX])?0+$Int_type?|NULL|false)\b};
our $logFunctions = qr{(?x:
- printk(?:_ratelimited|_once|)|
+ printk(?:_ratelimited|_once|_deferred_once|_deferred|)|
(?:[a-z0-9]+_){1,2}(?:printk|emerg|alert|crit|err|warning|warn|notice|info|debug|dbg|vdbg|devel|cont|WARN)(?:_ratelimited|_once|)|
WARN(?:_RATELIMIT|_ONCE|)|
panic|
@@ -2134,7 +2134,7 @@ sub process {
my $in_header_lines = $file ? 0 : 1;
my $in_commit_log = 0; #Scanning lines before patch
my $has_commit_log = 0; #Encountered lines before patch
- my $commit_log_possible_stack_dump = 0;
+ my $commit_log_possible_stack_dump = 0;
my $commit_log_long_line = 0;
my $commit_log_has_diff = 0;
my $reported_maintainer_file = 0;
@@ -2154,6 +2154,7 @@ sub process {
my $realline = 0;
my $realcnt = 0;
my $here = '';
+ my $context_function; #undef'd unless there's a known function
my $in_comment = 0;
my $comment_edge = 0;
my $first_line = 0;
@@ -2192,7 +2193,8 @@ sub process {
}
#next;
}
- if ($rawline=~/^\@\@ -\d+(?:,\d+)? \+(\d+)(,(\d+))? \@\@/) {
+ if ($rawline=~/^\@\@ -\d+(?:,\d+)? \+(\d+)(,(\d+))? \@\@(.*)/) {
+ my $context = $4;
$realline=$1-1;
if (defined $2) {
$realcnt=$3+1;
@@ -2201,6 +2203,12 @@ sub process {
}
$in_comment = 0;
+ if ($context =~ /\b(\w+)\s*\(/) {
+ $context_function = $1;
+ } else {
+ undef $context_function;
+ }
+
# Guestimate if this is a continuing comment. Run
# the context looking for a comment "edge". If this
# edge is a close comment then we must be in a comment
@@ -2695,6 +2703,7 @@ sub process {
# Check for FSF mailing addresses.
if ($rawline =~ /\bwrite to the Free/i ||
+ $rawline =~ /\b675\s+Mass\s+Ave/i ||
$rawline =~ /\b59\s+Temple\s+Pl/i ||
$rawline =~ /\b51\s+Franklin\s+St/i) {
my $herevet = "$here\n" . cat_vet($rawline) . "\n";
@@ -5095,6 +5104,12 @@ sub process {
}
}
+# check for single line unbalanced braces
+ if ($sline =~ /^.\s*\}\s*else\s*$/ ||
+ $sline =~ /^.\s*else\s*\{\s*$/) {
+ CHK("BRACES", "Unbalanced braces around else statement\n" . $herecurr);
+ }
+
# check for unnecessary blank lines around braces
if (($line =~ /^.\s*}\s*$/ && $prevrawline =~ /^.\s*$/)) {
if (CHK("BRACES",
@@ -5157,6 +5172,16 @@ sub process {
"break quoted strings at a space character\n" . $hereprev);
}
+#check for an embedded function name in a string when the function is known
+# as part of a diff. This does not work for -f --file checking as it
+#depends on patch context providing the function name
+ if ($line =~ /^\+.*$String/ &&
+ defined($context_function) &&
+ get_quoted_string($line, $rawline) =~ /\b$context_function\b/) {
+ WARN("EMBEDDED_FUNCTION_NAME",
+ "Prefer using \"%s\", __func__ to embedded function names\n" . $herecurr);
+ }
+
# check for spaces before a quoted newline
if ($rawline =~ /^.*\".*\s\\n/) {
if (WARN("QUOTED_WHITESPACE_BEFORE_NEWLINE",
@@ -5269,6 +5294,12 @@ sub process {
}
}
+# check for logging continuations
+ if ($line =~ /\bprintk\s*\(\s*KERN_CONT\b|\bpr_cont\s*\(/) {
+ WARN("LOGGING_CONTINUATION",
+ "Avoid logging continuation uses where feasible\n" . $herecurr);
+ }
+
# check for mask then right shift without a parentheses
if ($^V && $^V ge 5.10.0 &&
$line =~ /$LvalOrFunc\s*\&\s*($LvalOrFunc)\s*>>/ &&
diff --git a/security/selinux/selinuxfs.c b/security/selinux/selinuxfs.c
index c354807381c1..c9e8a9898ce4 100644
--- a/security/selinux/selinuxfs.c
+++ b/security/selinux/selinuxfs.c
@@ -424,10 +424,9 @@ out:
return ret;
}
-static int sel_mmap_policy_fault(struct vm_area_struct *vma,
- struct vm_fault *vmf)
+static int sel_mmap_policy_fault(struct vm_fault *vmf)
{
- struct policy_load_memory *plm = vma->vm_file->private_data;
+ struct policy_load_memory *plm = vmf->vma->vm_file->private_data;
unsigned long offset;
struct page *page;
diff --git a/sound/core/pcm_native.c b/sound/core/pcm_native.c
index 9d33c1e85c79..aec9c92250fd 100644
--- a/sound/core/pcm_native.c
+++ b/sound/core/pcm_native.c
@@ -3245,10 +3245,9 @@ static unsigned int snd_pcm_capture_poll(struct file *file, poll_table * wait)
/*
* mmap status record
*/
-static int snd_pcm_mmap_status_fault(struct vm_area_struct *area,
- struct vm_fault *vmf)
+static int snd_pcm_mmap_status_fault(struct vm_fault *vmf)
{
- struct snd_pcm_substream *substream = area->vm_private_data;
+ struct snd_pcm_substream *substream = vmf->vma->vm_private_data;
struct snd_pcm_runtime *runtime;
if (substream == NULL)
@@ -3282,10 +3281,9 @@ static int snd_pcm_mmap_status(struct snd_pcm_substream *substream, struct file
/*
* mmap control record
*/
-static int snd_pcm_mmap_control_fault(struct vm_area_struct *area,
- struct vm_fault *vmf)
+static int snd_pcm_mmap_control_fault(struct vm_fault *vmf)
{
- struct snd_pcm_substream *substream = area->vm_private_data;
+ struct snd_pcm_substream *substream = vmf->vma->vm_private_data;
struct snd_pcm_runtime *runtime;
if (substream == NULL)
@@ -3341,10 +3339,9 @@ snd_pcm_default_page_ops(struct snd_pcm_substream *substream, unsigned long ofs)
/*
* fault callback for mmapping a RAM page
*/
-static int snd_pcm_mmap_data_fault(struct vm_area_struct *area,
- struct vm_fault *vmf)
+static int snd_pcm_mmap_data_fault(struct vm_fault *vmf)
{
- struct snd_pcm_substream *substream = area->vm_private_data;
+ struct snd_pcm_substream *substream = vmf->vma->vm_private_data;
struct snd_pcm_runtime *runtime;
unsigned long offset;
struct page * page;
diff --git a/sound/usb/usx2y/us122l.c b/sound/usb/usx2y/us122l.c
index cf5dc33f4a6d..cf45bf1f7ee0 100644
--- a/sound/usb/usx2y/us122l.c
+++ b/sound/usb/usx2y/us122l.c
@@ -137,13 +137,12 @@ static void usb_stream_hwdep_vm_open(struct vm_area_struct *area)
snd_printdd(KERN_DEBUG "%i\n", atomic_read(&us122l->mmap_count));
}
-static int usb_stream_hwdep_vm_fault(struct vm_area_struct *area,
- struct vm_fault *vmf)
+static int usb_stream_hwdep_vm_fault(struct vm_fault *vmf)
{
unsigned long offset;
struct page *page;
void *vaddr;
- struct us122l *us122l = area->vm_private_data;
+ struct us122l *us122l = vmf->vma->vm_private_data;
struct usb_stream *s;
mutex_lock(&us122l->mutex);
diff --git a/sound/usb/usx2y/usX2Yhwdep.c b/sound/usb/usx2y/usX2Yhwdep.c
index 0b34dbc8f302..605e1047c01d 100644
--- a/sound/usb/usx2y/usX2Yhwdep.c
+++ b/sound/usb/usx2y/usX2Yhwdep.c
@@ -31,19 +31,18 @@
#include "usbusx2y.h"
#include "usX2Yhwdep.h"
-static int snd_us428ctls_vm_fault(struct vm_area_struct *area,
- struct vm_fault *vmf)
+static int snd_us428ctls_vm_fault(struct vm_fault *vmf)
{
unsigned long offset;
struct page * page;
void *vaddr;
snd_printdd("ENTER, start %lXh, pgoff %ld\n",
- area->vm_start,
+ vmf->vma->vm_start,
vmf->pgoff);
offset = vmf->pgoff << PAGE_SHIFT;
- vaddr = (char*)((struct usX2Ydev *)area->vm_private_data)->us428ctls_sharedmem + offset;
+ vaddr = (char *)((struct usX2Ydev *)vmf->vma->vm_private_data)->us428ctls_sharedmem + offset;
page = virt_to_page(vaddr);
get_page(page);
vmf->page = page;
diff --git a/sound/usb/usx2y/usx2yhwdeppcm.c b/sound/usb/usx2y/usx2yhwdeppcm.c
index 90766a92e7fd..f95164b91152 100644
--- a/sound/usb/usx2y/usx2yhwdeppcm.c
+++ b/sound/usb/usx2y/usx2yhwdeppcm.c
@@ -652,14 +652,13 @@ static void snd_usX2Y_hwdep_pcm_vm_close(struct vm_area_struct *area)
}
-static int snd_usX2Y_hwdep_pcm_vm_fault(struct vm_area_struct *area,
- struct vm_fault *vmf)
+static int snd_usX2Y_hwdep_pcm_vm_fault(struct vm_fault *vmf)
{
unsigned long offset;
void *vaddr;
offset = vmf->pgoff << PAGE_SHIFT;
- vaddr = (char*)((struct usX2Ydev *)area->vm_private_data)->hwdep_pcm_shm + offset;
+ vaddr = (char *)((struct usX2Ydev *)vmf->vma->vm_private_data)->hwdep_pcm_shm + offset;
vmf->page = virt_to_page(vaddr);
get_page(vmf->page);
return 0;
diff --git a/tools/lib/find_bit.c b/tools/lib/find_bit.c
index 6d8b8f22cf55..42c15f906aac 100644
--- a/tools/lib/find_bit.c
+++ b/tools/lib/find_bit.c
@@ -34,7 +34,7 @@ static unsigned long _find_next_bit(const unsigned long *addr,
{
unsigned long tmp;
- if (!nbits || start >= nbits)
+ if (unlikely(start >= nbits))
return nbits;
tmp = addr[start / BITS_PER_LONG] ^ invert;
diff --git a/tools/testing/selftests/vm/userfaultfd.c b/tools/testing/selftests/vm/userfaultfd.c
index 5a840a605a16..e9449c801888 100644
--- a/tools/testing/selftests/vm/userfaultfd.c
+++ b/tools/testing/selftests/vm/userfaultfd.c
@@ -398,12 +398,12 @@ static void *uffd_poll_thread(void *arg)
uffd = msg.arg.fork.ufd;
pollfd[0].fd = uffd;
break;
- case UFFD_EVENT_MADVDONTNEED:
- uffd_reg.range.start = msg.arg.madv_dn.start;
- uffd_reg.range.len = msg.arg.madv_dn.end -
- msg.arg.madv_dn.start;
+ case UFFD_EVENT_REMOVE:
+ uffd_reg.range.start = msg.arg.remove.start;
+ uffd_reg.range.len = msg.arg.remove.end -
+ msg.arg.remove.start;
if (ioctl(uffd, UFFDIO_UNREGISTER, &uffd_reg.range))
- fprintf(stderr, "madv_dn failure\n"), exit(1);
+ fprintf(stderr, "remove failure\n"), exit(1);
break;
case UFFD_EVENT_REMAP:
area_dst = (char *)(unsigned long)msg.arg.remap.to;
@@ -569,9 +569,9 @@ static int userfaultfd_open(int features)
* part is accessed after mremap. Since hugetlbfs does not support
* mremap, the entire monitored area is accessed in a single pass for
* HUGETLB_TEST.
- * The release of the pages currently generates event only for
- * anonymous memory (UFFD_EVENT_MADVDONTNEED), hence it is not checked
- * for hugetlb and shmem.
+ * The release of the pages currently generates event for shmem and
+ * anonymous memory (UFFD_EVENT_REMOVE), hence it is not checked
+ * for hugetlb.
*/
static int faulting_process(void)
{
@@ -610,7 +610,6 @@ static int faulting_process(void)
}
}
-#ifndef SHMEM_TEST
if (release_pages(area_dst))
return 1;
@@ -618,7 +617,6 @@ static int faulting_process(void)
if (my_bcmp(area_dst + nr * page_size, zeropage, page_size))
fprintf(stderr, "nr %lu is not zero\n", nr), exit(1);
}
-#endif /* SHMEM_TEST */
#endif /* HUGETLB_TEST */
@@ -715,14 +713,14 @@ static int userfaultfd_events_test(void)
pid_t pid;
char c;
- printf("testing events (fork, remap, madv_dn): ");
+ printf("testing events (fork, remap, remove): ");
fflush(stdout);
if (release_pages(area_dst))
return 1;
features = UFFD_FEATURE_EVENT_FORK | UFFD_FEATURE_EVENT_REMAP |
- UFFD_FEATURE_EVENT_MADVDONTNEED;
+ UFFD_FEATURE_EVENT_REMOVE;
if (userfaultfd_open(features) < 0)
return 1;
fcntl(uffd, F_SETFL, uffd_flags | O_NONBLOCK);
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
index cc4d6e0dd2a2..5b0dd4a9b2cb 100644
--- a/virt/kvm/kvm_main.c
+++ b/virt/kvm/kvm_main.c
@@ -2350,9 +2350,9 @@ void kvm_vcpu_on_spin(struct kvm_vcpu *me)
}
EXPORT_SYMBOL_GPL(kvm_vcpu_on_spin);
-static int kvm_vcpu_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
+static int kvm_vcpu_fault(struct vm_fault *vmf)
{
- struct kvm_vcpu *vcpu = vma->vm_file->private_data;
+ struct kvm_vcpu *vcpu = vmf->vma->vm_file->private_data;
struct page *page;
if (vmf->pgoff == 0)