summaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
Diffstat (limited to 'drivers')
-rw-r--r--drivers/acpi/acpi_memhotplug.c19
-rw-r--r--drivers/atm/idt77252.c1
-rw-r--r--drivers/base/devtmpfs.c3
-rw-r--r--drivers/base/firmware_loader/fallback_table.c13
-rw-r--r--drivers/base/memory.c219
-rw-r--r--drivers/base/node.c35
-rw-r--r--drivers/bus/brcmstb_gisb.c4
-rw-r--r--drivers/bus/fsl-mc/dprc.c30
-rw-r--r--drivers/bus/fsl-mc/fsl-mc-bus.c15
-rw-r--r--drivers/bus/fsl-mc/fsl-mc-private.h17
-rw-r--r--drivers/bus/ti-sysc.c454
-rw-r--r--drivers/crypto/amcc/crypto4xx_trng.c1
-rw-r--r--drivers/crypto/ccp/ccp-ops.c15
-rw-r--r--drivers/crypto/ccp/psp-dev.c19
-rw-r--r--drivers/crypto/stm32/stm32-hash.c2
-rw-r--r--drivers/dax/super.c23
-rw-r--r--drivers/dma-buf/dma-buf.c15
-rw-r--r--drivers/firmware/arm_scmi/clock.c2
-rw-r--r--drivers/firmware/arm_scmi/sensors.c10
-rw-r--r--drivers/firmware/psci/psci_checker.c10
-rw-r--r--drivers/firmware/tegra/bpmp.c4
-rw-r--r--drivers/firmware/ti_sci.c859
-rw-r--r--drivers/firmware/ti_sci.h810
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu.h1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c9
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_device.c1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c19
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c13
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c25
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h12
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c51
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.h3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c34
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c8
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfxhub_v2_0.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mmhub_v2_0.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/nv.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/psp_v11_0.c52
-rw-r--r--drivers/gpu/drm/amd/amdgpu/psp_v3_1.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/soc15.c14
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vi.c1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vi_dpm.h32
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c16
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_v9.c2
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_priv.h2
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c3
-rw-r--r--drivers/gpu/drm/amd/display/Kconfig1
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c222
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h25
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_pp_smu.c9
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_resource.c45
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_stream.h1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/Makefile8
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c25
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c12
-rw-r--r--drivers/gpu/drm/amd/display/dc/dsc/Makefile16
-rw-r--r--drivers/gpu/drm/amd/include/amd_shared.h2
-rw-r--r--drivers/gpu/drm/amd/powerplay/amdgpu_smu.c75
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/process_pptables_v1_0.c2
-rw-r--r--drivers/gpu/drm/amd/powerplay/inc/amdgpu_smu.h3
-rw-r--r--drivers/gpu/drm/amd/powerplay/inc/smu11_driver_if.h6
-rw-r--r--drivers/gpu/drm/amd/powerplay/navi10_ppt.c62
-rw-r--r--drivers/gpu/drm/amd/powerplay/smu_v11_0.c16
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.c2
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/iceland_smumgr.c2
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c2
-rw-r--r--drivers/gpu/drm/amd/powerplay/vega20_ppt.c41
-rw-r--r--drivers/gpu/drm/arm/display/komeda/komeda_crtc.c63
-rw-r--r--drivers/gpu/drm/arm/display/komeda/komeda_kms.h18
-rw-r--r--drivers/gpu/drm/arm/display/komeda/komeda_pipeline.h3
-rw-r--r--drivers/gpu/drm/arm/display/komeda/komeda_pipeline_state.c15
-rw-r--r--drivers/gpu/drm/arm/display/komeda/komeda_plane.c84
-rw-r--r--drivers/gpu/drm/arm/display/komeda/komeda_wb_connector.c10
-rw-r--r--drivers/gpu/drm/bochs/bochs.h2
-rw-r--r--drivers/gpu/drm/bochs/bochs_hw.c14
-rw-r--r--drivers/gpu/drm/bochs/bochs_kms.c3
-rw-r--r--drivers/gpu/drm/drm_client_modeset.c3
-rw-r--r--drivers/gpu/drm/drm_connector.c2
-rw-r--r--drivers/gpu/drm/drm_drv.c20
-rw-r--r--drivers/gpu/drm/drm_modes.c14
-rw-r--r--drivers/gpu/drm/drm_panel_orientation_quirks.c12
-rw-r--r--drivers/gpu/drm/i915/i915_perf.c8
-rw-r--r--drivers/gpu/drm/nouveau/Kbuild2
-rw-r--r--drivers/gpu/drm/nouveau/dispnv04/Kbuild2
-rw-r--r--drivers/gpu/drm/nouveau/dispnv04/cursor.c2
-rw-r--r--drivers/gpu/drm/nouveau/dispnv04/disp.h2
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/Kbuild2
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/disp.c9
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/head.c28
-rw-r--r--drivers/gpu/drm/nouveau/include/nvif/cl0002.h2
-rw-r--r--drivers/gpu/drm/nouveau/include/nvif/cl0046.h2
-rw-r--r--drivers/gpu/drm/nouveau/include/nvif/cl006b.h2
-rw-r--r--drivers/gpu/drm/nouveau/include/nvif/cl0080.h2
-rw-r--r--drivers/gpu/drm/nouveau/include/nvif/cl506e.h2
-rw-r--r--drivers/gpu/drm/nouveau/include/nvif/cl506f.h2
-rw-r--r--drivers/gpu/drm/nouveau/include/nvif/cl5070.h2
-rw-r--r--drivers/gpu/drm/nouveau/include/nvif/cl507a.h2
-rw-r--r--drivers/gpu/drm/nouveau/include/nvif/cl507b.h2
-rw-r--r--drivers/gpu/drm/nouveau/include/nvif/cl507c.h2
-rw-r--r--drivers/gpu/drm/nouveau/include/nvif/cl507d.h2
-rw-r--r--drivers/gpu/drm/nouveau/include/nvif/cl507e.h2
-rw-r--r--drivers/gpu/drm/nouveau/include/nvif/cl826e.h2
-rw-r--r--drivers/gpu/drm/nouveau/include/nvif/cl826f.h2
-rw-r--r--drivers/gpu/drm/nouveau/include/nvif/cl906f.h2
-rw-r--r--drivers/gpu/drm/nouveau/include/nvif/cl9097.h2
-rw-r--r--drivers/gpu/drm/nouveau/include/nvif/cla06f.h2
-rw-r--r--drivers/gpu/drm/nouveau/include/nvif/class.h2
-rw-r--r--drivers/gpu/drm/nouveau/include/nvif/clc36f.h2
-rw-r--r--drivers/gpu/drm/nouveau/include/nvif/clc37b.h2
-rw-r--r--drivers/gpu/drm/nouveau/include/nvif/clc37e.h2
-rw-r--r--drivers/gpu/drm/nouveau/include/nvif/client.h2
-rw-r--r--drivers/gpu/drm/nouveau/include/nvif/device.h2
-rw-r--r--drivers/gpu/drm/nouveau/include/nvif/driver.h2
-rw-r--r--drivers/gpu/drm/nouveau/include/nvif/event.h2
-rw-r--r--drivers/gpu/drm/nouveau/include/nvif/if0000.h2
-rw-r--r--drivers/gpu/drm/nouveau/include/nvif/if0001.h2
-rw-r--r--drivers/gpu/drm/nouveau/include/nvif/if0002.h2
-rw-r--r--drivers/gpu/drm/nouveau/include/nvif/if0003.h2
-rw-r--r--drivers/gpu/drm/nouveau/include/nvif/if0004.h2
-rw-r--r--drivers/gpu/drm/nouveau/include/nvif/if0005.h2
-rw-r--r--drivers/gpu/drm/nouveau/include/nvif/ioctl.h2
-rw-r--r--drivers/gpu/drm/nouveau/include/nvif/notify.h2
-rw-r--r--drivers/gpu/drm/nouveau/include/nvif/object.h2
-rw-r--r--drivers/gpu/drm/nouveau/include/nvif/os.h2
-rw-r--r--drivers/gpu/drm/nouveau/include/nvif/unpack.h2
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/core/client.h2
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/core/debug.h2
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/core/device.h2
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/core/engine.h2
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/core/enum.h2
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/core/event.h2
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/core/firmware.h2
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/core/gpuobj.h2
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/core/ioctl.h2
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/core/memory.h2
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/core/mm.h2
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/core/notify.h2
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/core/object.h2
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/core/oproxy.h2
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/core/option.h2
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/core/os.h2
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/core/pci.h2
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/core/ramht.h2
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/core/subdev.h2
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/core/tegra.h2
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/engine/bsp.h2
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/engine/ce.h2
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/engine/cipher.h2
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/engine/disp.h2
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/engine/dma.h2
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/engine/falcon.h2
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/engine/fifo.h2
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/engine/gr.h2
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/engine/mpeg.h2
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/engine/msenc.h2
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/engine/mspdec.h2
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/engine/msppp.h2
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/engine/msvld.h2
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/engine/nvdec.h2
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/engine/nvenc.h2
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/engine/pm.h2
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/engine/sec.h2
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/engine/sec2.h2
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/engine/sw.h2
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/engine/vic.h2
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/engine/vp.h2
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/engine/xtensa.h2
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/bar.h2
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/bios.h2
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/M0203.h2
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/M0205.h2
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/M0209.h2
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/P0260.h2
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/bit.h2
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/bmp.h2
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/boost.h2
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/conn.h2
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/cstep.h2
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/dcb.h2
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/disp.h2
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/dp.h2
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/extdev.h2
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/fan.h2
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/gpio.h2
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/i2c.h2
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/iccsense.h2
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/image.h2
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/init.h2
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/mxm.h2
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/npde.h2
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/pcir.h2
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/perf.h2
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/pll.h2
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/pmu.h2
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/power_budget.h2
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/ramcfg.h2
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/rammap.h2
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/therm.h2
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/timing.h2
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/vmap.h2
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/volt.h2
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/vpstate.h2
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/xpio.h2
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/bus.h2
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/clk.h2
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/devinit.h2
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/fb.h2
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/fuse.h2
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/gpio.h2
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/i2c.h2
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/ibus.h2
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/iccsense.h2
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/instmem.h2
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/ltc.h2
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/mc.h2
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/mmu.h2
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/mxm.h2
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/pci.h2
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/pmu.h2
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/therm.h2
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/timer.h2
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/top.h2
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/vga.h2
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/volt.h2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_abi16.h2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_acpi.c2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_acpi.h2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bo.h2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_chan.h2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_connector.c9
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_debugfs.h2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_display.h2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_dmem.c3
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_drv.h2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_fence.h2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_gem.h2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_hwmon.c10
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_ioctl.h2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_reg.h2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_sgdma.c2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_ttm.h2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_usif.h2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_vga.c2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_vga.h2
-rw-r--r--drivers/gpu/drm/nouveau/nv10_fence.h2
-rw-r--r--drivers/gpu/drm/nouveau/nvif/Kbuild2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/Kbuild2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/core/Kbuild2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/Kbuild2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/bsp/Kbuild2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/ce/Kbuild2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/ce/fuc/gf100.fuc3.h2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/ce/fuc/gt215.fuc3.h2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/ce/priv.h2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/cipher/Kbuild2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/device/Kbuild2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/device/acpi.h2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/device/base.c38
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/device/ctrl.h2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/device/priv.h2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/Kbuild2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/channv50.h2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/conn.h2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/dp.h2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/hdmi.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/hdmi.h2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/head.h2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/ior.h2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.h2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/outp.h2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/priv.h2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/rootnv50.h2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/sortu102.c1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/dma/Kbuild2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/dma/priv.h2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/dma/user.h2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/Kbuild2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/chan.h2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/changf100.h2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/changk104.h2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/channv04.h2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/channv50.h2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/gf100.h2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk104.h2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv04.h2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv50.h2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/priv.h2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/regsnv04.h2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/Kbuild2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf100.h2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxnv40.h2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpcgf100.fuc3.h2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpcgf117.fuc3.h2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpcgk104.fuc3.h2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpcgk110.fuc3.h2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpcgk208.fuc5.h2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpcgm107.fuc5.h2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/hubgf100.fuc3.h2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/hubgf117.fuc3.h2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/hubgk104.fuc3.h2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/hubgk110.fuc3.h2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/hubgk208.fuc5.h2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/hubgm107.fuc5.h2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/os.h2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/nv10.h2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/nv20.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/nv20.h2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/nv25.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/nv2a.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/nv30.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/nv34.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/nv35.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/nv40.h2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/nv50.h2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/priv.h2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/regs.h2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/mpeg/Kbuild2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/mpeg/nv31.h2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/mpeg/priv.h2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/msenc/Kbuild2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/mspdec/Kbuild2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/mspdec/priv.h2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/msppp/Kbuild2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/msppp/priv.h2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/msvld/Kbuild2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/msvld/priv.h2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/nvdec/Kbuild2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/nvdec/priv.h2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/nvenc/Kbuild2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/pm/Kbuild2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/pm/gf100.h2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/pm/nv40.h2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/pm/priv.h2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/sec/Kbuild2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/sec/fuc/g98.fuc0s.h2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/sec2/Kbuild2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/sec2/priv.h2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/sw/Kbuild2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/sw/chan.h2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/sw/nv50.h2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/sw/nvsw.h2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/sw/priv.h2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/vic/Kbuild2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/vp/Kbuild2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/falcon/Kbuild2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/falcon/priv.h2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/falcon/v1.c36
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/Kbuild2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/bar/Kbuild2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/bar/gf100.h2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/bar/nv50.h2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/bar/priv.h2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/bios/Kbuild2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/bios/priv.h2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/bus/Kbuild2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/bus/hwsq.h2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/bus/priv.h2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/clk/Kbuild2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/clk/gt215.h2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/clk/nv50.h2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/clk/pll.h2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/clk/priv.h2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/clk/seq.h2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/devinit/Kbuild2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/devinit/nv04.h2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/devinit/nv50.h2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/devinit/priv.h2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fault/Kbuild2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/Kbuild2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/gf100.h2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv50.h2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/priv.h2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/ram.h2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramfuc.h2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramnv40.h2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramseq.h2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/regsnv04.h2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fuse/Kbuild2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fuse/priv.h2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gpio/Kbuild2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gpio/priv.h2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/Kbuild2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/i2c/Kbuild2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/i2c/aux.h2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/i2c/base.c20
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/i2c/bus.h2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/i2c/pad.h2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/i2c/priv.h2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/ibus/Kbuild2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/ibus/priv.h2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/iccsense/Kbuild2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/iccsense/priv.h2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/instmem/Kbuild2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/instmem/priv.h2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/ltc/Kbuild2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/ltc/priv.h2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mc/Kbuild2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mc/priv.h2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mmu/Kbuild2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mmu/priv.h2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mxm/Kbuild2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mxm/mxms.h2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mxm/priv.h2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/pci/Kbuild2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/pci/agp.h2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/pci/priv.h2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/pmu/Kbuild2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/pmu/fuc/gf100.fuc3.h2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/pmu/fuc/gf119.fuc4.h2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/pmu/fuc/gk208.fuc5.h2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/pmu/fuc/gt215.fuc3.h2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/pmu/fuc/os.h2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/pmu/memx.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/pmu/priv.h2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/secboot/Kbuild2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/secboot/ls_ucode_msgqueue.c29
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/therm/Kbuild2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/timer/Kbuild2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/timer/priv.h2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/timer/regsnv04.h2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/top/Kbuild2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/top/priv.h2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/volt/Kbuild2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/volt/priv.h2
-rw-r--r--drivers/gpu/drm/selftests/test-drm_cmdline_parser.c136
-rw-r--r--drivers/hv/vmbus_drv.c6
-rw-r--r--drivers/hwmon/scmi-hwmon.c48
-rw-r--r--drivers/infiniband/hw/qib/qib_fs.c26
-rw-r--r--drivers/infiniband/ulp/iser/iscsi_iser.c35
-rw-r--r--drivers/infiniband/ulp/srp/ib_srp.c18
-rw-r--r--drivers/input/joystick/iforce/iforce-ff.c3
-rw-r--r--drivers/input/joystick/iforce/iforce-main.c3
-rw-r--r--drivers/input/joystick/iforce/iforce-packets.c3
-rw-r--r--drivers/input/joystick/iforce/iforce-serio.c3
-rw-r--r--drivers/input/joystick/iforce/iforce-usb.c3
-rw-r--r--drivers/input/joystick/iforce/iforce.h3
-rw-r--r--drivers/input/keyboard/Kconfig16
-rw-r--r--drivers/input/keyboard/Makefile1
-rw-r--r--drivers/input/keyboard/adp5589-keys.c1
-rw-r--r--drivers/input/keyboard/applespi.c1977
-rw-r--r--drivers/input/keyboard/applespi.h29
-rw-r--r--drivers/input/keyboard/applespi_trace.h93
-rw-r--r--drivers/input/keyboard/mtk-pmic-keys.c9
-rw-r--r--drivers/input/keyboard/sun4i-lradc-keys.c3
-rw-r--r--drivers/input/mouse/alps.c32
-rw-r--r--drivers/input/mouse/synaptics.c3
-rw-r--r--drivers/input/mouse/trackpoint.h3
-rw-r--r--drivers/input/serio/hyperv-keyboard.c4
-rw-r--r--drivers/input/tablet/gtco.c20
-rw-r--r--drivers/input/touchscreen/auo-pixcir-ts.c3
-rw-r--r--drivers/iommu/amd_iommu.c2
-rw-r--r--drivers/isdn/hardware/mISDN/hfcsusb.c3
-rw-r--r--drivers/md/dm-kcopyd.c34
-rw-r--r--drivers/md/dm-snap.c10
-rw-r--r--drivers/md/dm-zoned-metadata.c24
-rw-r--r--drivers/md/dm-zoned.h28
-rw-r--r--drivers/memory/.gitignore1
-rw-r--r--drivers/memory/Kconfig8
-rw-r--r--drivers/memory/Makefile6
-rw-r--r--drivers/memory/brcmstb_dpfe.c317
-rw-r--r--drivers/memory/emif.c3
-rw-r--r--drivers/memory/jedec_ddr.h172
-rw-r--r--drivers/memory/jedec_ddr_data.c133
-rw-r--r--drivers/memory/of_memory.c3
-rw-r--r--drivers/memory/tegra/tegra124.c44
-rw-r--r--drivers/memory/ti-emif-sram-pm.S2
-rw-r--r--drivers/misc/cxl/api.c13
-rw-r--r--drivers/misc/ibmasm/ibmasmfs.c21
-rw-r--r--drivers/misc/vmw_balloon.c18
-rw-r--r--drivers/net/caif/caif_hsi.c2
-rw-r--r--drivers/net/ethernet/atheros/ag71xx.c9
-rw-r--r--drivers/net/ethernet/atheros/atlx/atl1.c2
-rw-r--r--drivers/net/ethernet/atheros/atlx/atl2.c1
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt.c9
-rw-r--r--drivers/net/ethernet/broadcom/genet/bcmgenet.c57
-rw-r--r--drivers/net/ethernet/cavium/liquidio/request_manager.c6
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/sched.c1
-rw-r--r--drivers/net/ethernet/emulex/benet/be_main.c6
-rw-r--r--drivers/net/ethernet/freescale/fec_main.c2
-rw-r--r--drivers/net/ethernet/google/gve/gve_main.c23
-rw-r--r--drivers/net/ethernet/google/gve/gve_rx.c4
-rw-r--r--drivers/net/ethernet/jme.c5
-rw-r--r--drivers/net/ethernet/marvell/skge.c2
-rw-r--r--drivers/net/ethernet/marvell/sky2.c7
-rw-r--r--drivers/net/ethernet/mediatek/mtk_eth_soc.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/eq.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_tc.c13
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch.c1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/health.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/pci.c1
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum.h1
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_dcb.c16
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_fid.c10
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c6
-rw-r--r--drivers/net/ethernet/mscc/ocelot_board.c5
-rw-r--r--drivers/net/ethernet/neterion/s2io.c1
-rw-r--r--drivers/net/ethernet/qlogic/netxen/netxen_nic_ctx.c3
-rw-r--r--drivers/net/ethernet/realtek/r8169_main.c137
-rw-r--r--drivers/net/ethernet/sis/sis900.c6
-rw-r--r--drivers/net/ethernet/ti/cpsw.c26
-rw-r--r--drivers/net/ethernet/ti/tlan.c1
-rw-r--r--drivers/net/hippi/rrunner.c2
-rw-r--r--drivers/net/usb/qmi_wwan.c1
-rw-r--r--drivers/net/vmxnet3/vmxnet3_drv.c1
-rw-r--r--drivers/net/wireless/ath/ath10k/mac.c2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/cfg/22000.c53
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-config.h7
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-csr.h2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/drv.c23
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt2x00usb.c12
-rw-r--r--drivers/ntb/Kconfig11
-rw-r--r--drivers/ntb/Makefile3
-rw-r--r--drivers/ntb/core.c (renamed from drivers/ntb/ntb.c)0
-rw-r--r--drivers/ntb/hw/amd/ntb_hw_amd.c10
-rw-r--r--drivers/ntb/hw/intel/ntb_hw_gen3.c6
-rw-r--r--drivers/ntb/hw/mscc/ntb_hw_switchtec.c82
-rw-r--r--drivers/ntb/msi.c415
-rw-r--r--drivers/ntb/ntb_transport.c170
-rw-r--r--drivers/ntb/test/Kconfig9
-rw-r--r--drivers/ntb/test/Makefile1
-rw-r--r--drivers/ntb/test/ntb_msi_test.c433
-rw-r--r--drivers/ntb/test/ntb_perf.c14
-rw-r--r--drivers/nvdimm/dax_devs.c2
-rw-r--r--drivers/nvdimm/pfn.h15
-rw-r--r--drivers/nvdimm/pfn_devs.c95
-rw-r--r--drivers/oprofile/oprofilefs.c20
-rw-r--r--drivers/pci/msi.c54
-rw-r--r--drivers/pci/switch/switchtec.c12
-rw-r--r--drivers/reset/Kconfig3
-rw-r--r--drivers/reset/core.c3
-rw-r--r--drivers/reset/reset-simple.c2
-rw-r--r--drivers/s390/scsi/zfcp_erp.c7
-rw-r--r--drivers/s390/scsi/zfcp_fsf.c55
-rw-r--r--drivers/scsi/Makefile2
-rw-r--r--drivers/scsi/cxlflash/ocxl_hw.c23
-rw-r--r--drivers/scsi/hosts.c3
-rw-r--r--drivers/scsi/libfc/fc_exch.c2
-rw-r--r--drivers/scsi/libsas/sas_scsi_host.c1
-rw-r--r--drivers/scsi/lpfc/lpfc_debugfs.h2
-rw-r--r--drivers/scsi/megaraid/megaraid_sas.h4
-rw-r--r--drivers/scsi/megaraid/megaraid_sas_base.c31
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_scsih.c1
-rw-r--r--drivers/scsi/pm8001/pm8001_sas.c6
-rw-r--r--drivers/scsi/pm8001/pm80xx_hwi.c2
-rw-r--r--drivers/scsi/pm8001/pm80xx_hwi.h2
-rw-r--r--drivers/scsi/scsi_devinfo.c2
-rw-r--r--drivers/scsi/scsi_lib.c13
-rw-r--r--drivers/scsi/sd_zbc.c2
-rw-r--r--drivers/scsi/storvsc_drv.c5
-rw-r--r--drivers/scsi/ufs/ufshcd.c3
-rw-r--r--drivers/soc/amlogic/meson-canvas.c14
-rw-r--r--drivers/soc/aspeed/aspeed-lpc-ctrl.c61
-rw-r--r--drivers/soc/fsl/Kconfig10
-rw-r--r--drivers/soc/fsl/Makefile1
-rw-r--r--drivers/soc/fsl/dpaa2-console.c329
-rw-r--r--drivers/soc/fsl/dpio/dpio-driver.c23
-rw-r--r--drivers/soc/fsl/dpio/qbman-portal.c148
-rw-r--r--drivers/soc/fsl/dpio/qbman-portal.h9
-rw-r--r--drivers/soc/fsl/guts.c6
-rw-r--r--drivers/soc/fsl/qbman/bman_portal.c20
-rw-r--r--drivers/soc/fsl/qbman/qman_ccsr.c2
-rw-r--r--drivers/soc/fsl/qbman/qman_portal.c21
-rw-r--r--drivers/soc/fsl/qbman/qman_priv.h9
-rw-r--r--drivers/soc/imx/Kconfig9
-rw-r--r--drivers/soc/imx/Makefile1
-rw-r--r--drivers/soc/imx/soc-imx-scu.c144
-rw-r--r--drivers/soc/imx/soc-imx8.c63
-rw-r--r--drivers/soc/qcom/Kconfig12
-rw-r--r--drivers/soc/qcom/Makefile1
-rw-r--r--drivers/soc/qcom/apr.c76
-rw-r--r--drivers/soc/qcom/qcom_aoss.c480
-rw-r--r--drivers/soc/qcom/rpmpd.c134
-rw-r--r--drivers/soc/renesas/Kconfig4
-rw-r--r--drivers/soc/rockchip/pm_domains.c230
-rw-r--r--drivers/soc/tegra/Kconfig1
-rw-r--r--drivers/soc/tegra/fuse/fuse-tegra.c6
-rw-r--r--drivers/soc/tegra/pmc.c18
-rw-r--r--drivers/soc/ti/Kconfig5
-rw-r--r--drivers/soc/ti/pm33xx.c1
-rw-r--r--drivers/tty/serial/Kconfig1
-rw-r--r--drivers/tty/serial/sa1100.c46
-rw-r--r--drivers/tty/tty_ldisc.c6
-rw-r--r--drivers/usb/gadget/legacy/inode.c21
-rw-r--r--drivers/virtio/virtio_balloon.c13
-rw-r--r--drivers/xen/Kconfig23
-rw-r--r--drivers/xen/Makefile2
-rw-r--r--drivers/xen/balloon.c23
-rw-r--r--drivers/xen/events/events_base.c12
-rw-r--r--drivers/xen/evtchn.c2
-rw-r--r--drivers/xen/swiotlb-xen.c2
-rw-r--r--drivers/xen/tmem.c419
-rw-r--r--drivers/xen/xen-balloon.c2
-rw-r--r--drivers/xen/xen-selfballoon.c579
-rw-r--r--drivers/xen/xenfs/super.c21
604 files changed, 9907 insertions, 3045 deletions
diff --git a/drivers/acpi/acpi_memhotplug.c b/drivers/acpi/acpi_memhotplug.c
index db013dc21c02..e294f44a7850 100644
--- a/drivers/acpi/acpi_memhotplug.c
+++ b/drivers/acpi/acpi_memhotplug.c
@@ -155,16 +155,6 @@ static int acpi_memory_check_device(struct acpi_memory_device *mem_device)
return 0;
}
-static unsigned long acpi_meminfo_start_pfn(struct acpi_memory_info *info)
-{
- return PFN_DOWN(info->start_addr);
-}
-
-static unsigned long acpi_meminfo_end_pfn(struct acpi_memory_info *info)
-{
- return PFN_UP(info->start_addr + info->length-1);
-}
-
static int acpi_bind_memblk(struct memory_block *mem, void *arg)
{
return acpi_bind_one(&mem->dev, arg);
@@ -173,9 +163,8 @@ static int acpi_bind_memblk(struct memory_block *mem, void *arg)
static int acpi_bind_memory_blocks(struct acpi_memory_info *info,
struct acpi_device *adev)
{
- return walk_memory_range(acpi_meminfo_start_pfn(info),
- acpi_meminfo_end_pfn(info), adev,
- acpi_bind_memblk);
+ return walk_memory_blocks(info->start_addr, info->length, adev,
+ acpi_bind_memblk);
}
static int acpi_unbind_memblk(struct memory_block *mem, void *arg)
@@ -186,8 +175,8 @@ static int acpi_unbind_memblk(struct memory_block *mem, void *arg)
static void acpi_unbind_memory_blocks(struct acpi_memory_info *info)
{
- walk_memory_range(acpi_meminfo_start_pfn(info),
- acpi_meminfo_end_pfn(info), NULL, acpi_unbind_memblk);
+ walk_memory_blocks(info->start_addr, info->length, NULL,
+ acpi_unbind_memblk);
}
static int acpi_memory_enable_device(struct acpi_memory_device *mem_device)
diff --git a/drivers/atm/idt77252.c b/drivers/atm/idt77252.c
index 43a14579e80e..df51680e8931 100644
--- a/drivers/atm/idt77252.c
+++ b/drivers/atm/idt77252.c
@@ -1379,7 +1379,6 @@ init_tsq(struct idt77252_dev *card)
printk("%s: can't allocate TSQ.\n", card->name);
return -1;
}
- memset(card->tsq.base, 0, TSQSIZE);
card->tsq.last = card->tsq.base + TSQ_NUM_ENTRIES - 1;
card->tsq.next = card->tsq.last;
diff --git a/drivers/base/devtmpfs.c b/drivers/base/devtmpfs.c
index 0dbc43068eeb..ba5c80903efe 100644
--- a/drivers/base/devtmpfs.c
+++ b/drivers/base/devtmpfs.c
@@ -357,8 +357,7 @@ int devtmpfs_mount(const char *mntdir)
if (!thread)
return 0;
- err = ksys_mount("devtmpfs", (char *)mntdir, "devtmpfs", MS_SILENT,
- NULL);
+ err = ksys_mount("devtmpfs", mntdir, "devtmpfs", MS_SILENT, NULL);
if (err)
printk(KERN_INFO "devtmpfs: error mounting %i\n", err);
else
diff --git a/drivers/base/firmware_loader/fallback_table.c b/drivers/base/firmware_loader/fallback_table.c
index 776dd69cf5be..ba9d30b28edc 100644
--- a/drivers/base/firmware_loader/fallback_table.c
+++ b/drivers/base/firmware_loader/fallback_table.c
@@ -16,9 +16,6 @@
* firmware fallback configuration table
*/
-static unsigned int zero;
-static unsigned int one = 1;
-
struct firmware_fallback_config fw_fallback_config = {
.force_sysfs_fallback = IS_ENABLED(CONFIG_FW_LOADER_USER_HELPER_FALLBACK),
.loading_timeout = 60,
@@ -26,6 +23,7 @@ struct firmware_fallback_config fw_fallback_config = {
};
EXPORT_SYMBOL_GPL(fw_fallback_config);
+#ifdef CONFIG_SYSCTL
struct ctl_table firmware_config_table[] = {
{
.procname = "force_sysfs_fallback",
@@ -33,8 +31,8 @@ struct ctl_table firmware_config_table[] = {
.maxlen = sizeof(unsigned int),
.mode = 0644,
.proc_handler = proc_douintvec_minmax,
- .extra1 = &zero,
- .extra2 = &one,
+ .extra1 = SYSCTL_ZERO,
+ .extra2 = SYSCTL_ONE,
},
{
.procname = "ignore_sysfs_fallback",
@@ -42,9 +40,10 @@ struct ctl_table firmware_config_table[] = {
.maxlen = sizeof(unsigned int),
.mode = 0644,
.proc_handler = proc_douintvec_minmax,
- .extra1 = &zero,
- .extra2 = &one,
+ .extra1 = SYSCTL_ZERO,
+ .extra2 = SYSCTL_ONE,
},
{ }
};
EXPORT_SYMBOL_GPL(firmware_config_table);
+#endif
diff --git a/drivers/base/memory.c b/drivers/base/memory.c
index f180427e48f4..20c39d1bcef8 100644
--- a/drivers/base/memory.c
+++ b/drivers/base/memory.c
@@ -34,11 +34,21 @@ static DEFINE_MUTEX(mem_sysfs_mutex);
static int sections_per_block;
-static inline int base_memory_block_id(int section_nr)
+static inline unsigned long base_memory_block_id(unsigned long section_nr)
{
return section_nr / sections_per_block;
}
+static inline unsigned long pfn_to_block_id(unsigned long pfn)
+{
+ return base_memory_block_id(pfn_to_section_nr(pfn));
+}
+
+static inline unsigned long phys_to_block_id(unsigned long phys)
+{
+ return pfn_to_block_id(PFN_DOWN(phys));
+}
+
static int memory_subsys_online(struct device *dev);
static int memory_subsys_offline(struct device *dev);
@@ -126,9 +136,9 @@ static ssize_t phys_index_show(struct device *dev,
static ssize_t removable_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
- unsigned long i, pfn;
- int ret = 1;
struct memory_block *mem = to_memory_block(dev);
+ unsigned long pfn;
+ int ret = 1, i;
if (mem->state != MEM_ONLINE)
goto out;
@@ -578,23 +588,13 @@ int __weak arch_get_memory_phys_device(unsigned long start_pfn)
return 0;
}
-/*
- * A reference for the returned object is held and the reference for the
- * hinted object is released.
- */
-struct memory_block *find_memory_block_hinted(struct mem_section *section,
- struct memory_block *hint)
+/* A reference for the returned memory block device is acquired. */
+static struct memory_block *find_memory_block_by_id(unsigned long block_id)
{
- int block_id = base_memory_block_id(__section_nr(section));
- struct device *hintdev = hint ? &hint->dev : NULL;
struct device *dev;
- dev = subsys_find_device_by_id(&memory_subsys, block_id, hintdev);
- if (hint)
- put_device(&hint->dev);
- if (!dev)
- return NULL;
- return to_memory_block(dev);
+ dev = subsys_find_device_by_id(&memory_subsys, block_id, NULL);
+ return dev ? to_memory_block(dev) : NULL;
}
/*
@@ -607,7 +607,9 @@ struct memory_block *find_memory_block_hinted(struct mem_section *section,
*/
struct memory_block *find_memory_block(struct mem_section *section)
{
- return find_memory_block_hinted(section, NULL);
+ unsigned long block_id = base_memory_block_id(__section_nr(section));
+
+ return find_memory_block_by_id(block_id);
}
static struct attribute *memory_memblk_attrs[] = {
@@ -652,20 +654,22 @@ int register_memory(struct memory_block *memory)
}
static int init_memory_block(struct memory_block **memory,
- struct mem_section *section, unsigned long state)
+ unsigned long block_id, unsigned long state)
{
struct memory_block *mem;
unsigned long start_pfn;
- int scn_nr;
int ret = 0;
+ mem = find_memory_block_by_id(block_id);
+ if (mem) {
+ put_device(&mem->dev);
+ return -EEXIST;
+ }
mem = kzalloc(sizeof(*mem), GFP_KERNEL);
if (!mem)
return -ENOMEM;
- scn_nr = __section_nr(section);
- mem->start_section_nr =
- base_memory_block_id(scn_nr) * sections_per_block;
+ mem->start_section_nr = block_id * sections_per_block;
mem->end_section_nr = mem->start_section_nr + sections_per_block - 1;
mem->state = state;
start_pfn = section_nr_to_pfn(mem->start_section_nr);
@@ -677,97 +681,101 @@ static int init_memory_block(struct memory_block **memory,
return ret;
}
-static int add_memory_block(int base_section_nr)
+static int add_memory_block(unsigned long base_section_nr)
{
+ int ret, section_count = 0;
struct memory_block *mem;
- int i, ret, section_count = 0, section_nr;
+ unsigned long nr;
- for (i = base_section_nr;
- i < base_section_nr + sections_per_block;
- i++) {
- if (!present_section_nr(i))
- continue;
- if (section_count == 0)
- section_nr = i;
- section_count++;
- }
+ for (nr = base_section_nr; nr < base_section_nr + sections_per_block;
+ nr++)
+ if (present_section_nr(nr))
+ section_count++;
if (section_count == 0)
return 0;
- ret = init_memory_block(&mem, __nr_to_section(section_nr), MEM_ONLINE);
+ ret = init_memory_block(&mem, base_memory_block_id(base_section_nr),
+ MEM_ONLINE);
if (ret)
return ret;
mem->section_count = section_count;
return 0;
}
+static void unregister_memory(struct memory_block *memory)
+{
+ if (WARN_ON_ONCE(memory->dev.bus != &memory_subsys))
+ return;
+
+ /* drop the ref. we got via find_memory_block() */
+ put_device(&memory->dev);
+ device_unregister(&memory->dev);
+}
+
/*
- * need an interface for the VM to add new memory regions,
- * but without onlining it.
+ * Create memory block devices for the given memory area. Start and size
+ * have to be aligned to memory block granularity. Memory block devices
+ * will be initialized as offline.
*/
-int hotplug_memory_register(int nid, struct mem_section *section)
+int create_memory_block_devices(unsigned long start, unsigned long size)
{
- int ret = 0;
+ const unsigned long start_block_id = pfn_to_block_id(PFN_DOWN(start));
+ unsigned long end_block_id = pfn_to_block_id(PFN_DOWN(start + size));
struct memory_block *mem;
+ unsigned long block_id;
+ int ret = 0;
- mutex_lock(&mem_sysfs_mutex);
+ if (WARN_ON_ONCE(!IS_ALIGNED(start, memory_block_size_bytes()) ||
+ !IS_ALIGNED(size, memory_block_size_bytes())))
+ return -EINVAL;
- mem = find_memory_block(section);
- if (mem) {
- mem->section_count++;
- put_device(&mem->dev);
- } else {
- ret = init_memory_block(&mem, section, MEM_OFFLINE);
+ mutex_lock(&mem_sysfs_mutex);
+ for (block_id = start_block_id; block_id != end_block_id; block_id++) {
+ ret = init_memory_block(&mem, block_id, MEM_OFFLINE);
if (ret)
- goto out;
- mem->section_count++;
+ break;
+ mem->section_count = sections_per_block;
+ }
+ if (ret) {
+ end_block_id = block_id;
+ for (block_id = start_block_id; block_id != end_block_id;
+ block_id++) {
+ mem = find_memory_block_by_id(block_id);
+ mem->section_count = 0;
+ unregister_memory(mem);
+ }
}
-
-out:
mutex_unlock(&mem_sysfs_mutex);
return ret;
}
-#ifdef CONFIG_MEMORY_HOTREMOVE
-static void
-unregister_memory(struct memory_block *memory)
-{
- BUG_ON(memory->dev.bus != &memory_subsys);
-
- /* drop the ref. we got via find_memory_block() */
- put_device(&memory->dev);
- device_unregister(&memory->dev);
-}
-
-void unregister_memory_section(struct mem_section *section)
+/*
+ * Remove memory block devices for the given memory area. Start and size
+ * have to be aligned to memory block granularity. Memory block devices
+ * have to be offline.
+ */
+void remove_memory_block_devices(unsigned long start, unsigned long size)
{
+ const unsigned long start_block_id = pfn_to_block_id(PFN_DOWN(start));
+ const unsigned long end_block_id = pfn_to_block_id(PFN_DOWN(start + size));
struct memory_block *mem;
+ unsigned long block_id;
- if (WARN_ON_ONCE(!present_section(section)))
+ if (WARN_ON_ONCE(!IS_ALIGNED(start, memory_block_size_bytes()) ||
+ !IS_ALIGNED(size, memory_block_size_bytes())))
return;
mutex_lock(&mem_sysfs_mutex);
-
- /*
- * Some users of the memory hotplug do not want/need memblock to
- * track all sections. Skip over those.
- */
- mem = find_memory_block(section);
- if (!mem)
- goto out_unlock;
-
- unregister_mem_sect_under_nodes(mem, __section_nr(section));
-
- mem->section_count--;
- if (mem->section_count == 0)
+ for (block_id = start_block_id; block_id != end_block_id; block_id++) {
+ mem = find_memory_block_by_id(block_id);
+ if (WARN_ON_ONCE(!mem))
+ continue;
+ mem->section_count = 0;
+ unregister_memory_block_under_nodes(mem);
unregister_memory(mem);
- else
- put_device(&mem->dev);
-
-out_unlock:
+ }
mutex_unlock(&mem_sysfs_mutex);
}
-#endif /* CONFIG_MEMORY_HOTREMOVE */
/* return true if the memory block is offlined, otherwise, return false */
bool is_memblock_offlined(struct memory_block *mem)
@@ -804,10 +812,9 @@ static const struct attribute_group *memory_root_attr_groups[] = {
*/
int __init memory_dev_init(void)
{
- unsigned int i;
int ret;
int err;
- unsigned long block_sz;
+ unsigned long block_sz, nr;
ret = subsys_system_register(&memory_subsys, memory_root_attr_groups);
if (ret)
@@ -821,9 +828,9 @@ int __init memory_dev_init(void)
* during boot and have been initialized
*/
mutex_lock(&mem_sysfs_mutex);
- for (i = 0; i <= __highest_present_section_nr;
- i += sections_per_block) {
- err = add_memory_block(i);
+ for (nr = 0; nr <= __highest_present_section_nr;
+ nr += sections_per_block) {
+ err = add_memory_block(nr);
if (!ret)
ret = err;
}
@@ -834,3 +841,43 @@ out:
printk(KERN_ERR "%s() failed: %d\n", __func__, ret);
return ret;
}
+
+/**
+ * walk_memory_blocks - walk through all present memory blocks overlapped
+ * by the range [start, start + size)
+ *
+ * @start: start address of the memory range
+ * @size: size of the memory range
+ * @arg: argument passed to func
+ * @func: callback for each memory section walked
+ *
+ * This function walks through all present memory blocks overlapped by the
+ * range [start, start + size), calling func on each memory block.
+ *
+ * In case func() returns an error, walking is aborted and the error is
+ * returned.
+ */
+int walk_memory_blocks(unsigned long start, unsigned long size,
+ void *arg, walk_memory_blocks_func_t func)
+{
+ const unsigned long start_block_id = phys_to_block_id(start);
+ const unsigned long end_block_id = phys_to_block_id(start + size - 1);
+ struct memory_block *mem;
+ unsigned long block_id;
+ int ret = 0;
+
+ if (!size)
+ return 0;
+
+ for (block_id = start_block_id; block_id <= end_block_id; block_id++) {
+ mem = find_memory_block_by_id(block_id);
+ if (!mem)
+ continue;
+
+ ret = func(mem, arg);
+ put_device(&mem->dev);
+ if (ret)
+ break;
+ }
+ return ret;
+}
diff --git a/drivers/base/node.c b/drivers/base/node.c
index aa878fbcf705..75b7e6f6535b 100644
--- a/drivers/base/node.c
+++ b/drivers/base/node.c
@@ -753,7 +753,8 @@ static int __ref get_nid_for_pfn(unsigned long pfn)
}
/* register memory section under specified node if it spans that node */
-int register_mem_sect_under_node(struct memory_block *mem_blk, void *arg)
+static int register_mem_sect_under_node(struct memory_block *mem_blk,
+ void *arg)
{
int ret, nid = *(int *)arg;
unsigned long pfn, sect_start_pfn, sect_end_pfn;
@@ -802,23 +803,18 @@ int register_mem_sect_under_node(struct memory_block *mem_blk, void *arg)
return 0;
}
-/* unregister memory section under all nodes that it spans */
-int unregister_mem_sect_under_nodes(struct memory_block *mem_blk,
- unsigned long phys_index)
+/*
+ * Unregister memory block device under all nodes that it spans.
+ * Has to be called with mem_sysfs_mutex held (due to unlinked_nodes).
+ */
+void unregister_memory_block_under_nodes(struct memory_block *mem_blk)
{
- NODEMASK_ALLOC(nodemask_t, unlinked_nodes, GFP_KERNEL);
unsigned long pfn, sect_start_pfn, sect_end_pfn;
+ static nodemask_t unlinked_nodes;
- if (!mem_blk) {
- NODEMASK_FREE(unlinked_nodes);
- return -EFAULT;
- }
- if (!unlinked_nodes)
- return -ENOMEM;
- nodes_clear(*unlinked_nodes);
-
- sect_start_pfn = section_nr_to_pfn(phys_index);
- sect_end_pfn = sect_start_pfn + PAGES_PER_SECTION - 1;
+ nodes_clear(unlinked_nodes);
+ sect_start_pfn = section_nr_to_pfn(mem_blk->start_section_nr);
+ sect_end_pfn = section_nr_to_pfn(mem_blk->end_section_nr);
for (pfn = sect_start_pfn; pfn <= sect_end_pfn; pfn++) {
int nid;
@@ -827,21 +823,20 @@ int unregister_mem_sect_under_nodes(struct memory_block *mem_blk,
continue;
if (!node_online(nid))
continue;
- if (node_test_and_set(nid, *unlinked_nodes))
+ if (node_test_and_set(nid, unlinked_nodes))
continue;
sysfs_remove_link(&node_devices[nid]->dev.kobj,
kobject_name(&mem_blk->dev.kobj));
sysfs_remove_link(&mem_blk->dev.kobj,
kobject_name(&node_devices[nid]->dev.kobj));
}
- NODEMASK_FREE(unlinked_nodes);
- return 0;
}
int link_mem_sections(int nid, unsigned long start_pfn, unsigned long end_pfn)
{
- return walk_memory_range(start_pfn, end_pfn, (void *)&nid,
- register_mem_sect_under_node);
+ return walk_memory_blocks(PFN_PHYS(start_pfn),
+ PFN_PHYS(end_pfn - start_pfn), (void *)&nid,
+ register_mem_sect_under_node);
}
#ifdef CONFIG_HUGETLBFS
diff --git a/drivers/bus/brcmstb_gisb.c b/drivers/bus/brcmstb_gisb.c
index 972854ca1d9a..ec1004c858b8 100644
--- a/drivers/bus/brcmstb_gisb.c
+++ b/drivers/bus/brcmstb_gisb.c
@@ -399,8 +399,8 @@ static int __init brcmstb_gisb_arb_probe(struct platform_device *pdev)
&gisb_panic_notifier);
}
- dev_info(&pdev->dev, "registered mem: %p, irqs: %d, %d\n",
- gdev->base, timeout_irq, tea_irq);
+ dev_info(&pdev->dev, "registered irqs: %d, %d\n",
+ timeout_irq, tea_irq);
return 0;
}
diff --git a/drivers/bus/fsl-mc/dprc.c b/drivers/bus/fsl-mc/dprc.c
index 1c3f62182266..0fe3f52ae0de 100644
--- a/drivers/bus/fsl-mc/dprc.c
+++ b/drivers/bus/fsl-mc/dprc.c
@@ -443,11 +443,31 @@ int dprc_get_obj_region(struct fsl_mc_io *mc_io,
struct fsl_mc_command cmd = { 0 };
struct dprc_cmd_get_obj_region *cmd_params;
struct dprc_rsp_get_obj_region *rsp_params;
+ u16 major_ver, minor_ver;
int err;
/* prepare command */
- cmd.header = mc_encode_cmd_header(DPRC_CMDID_GET_OBJ_REG,
- cmd_flags, token);
+ err = dprc_get_api_version(mc_io, 0,
+ &major_ver,
+ &minor_ver);
+ if (err)
+ return err;
+
+ /**
+ * MC API version 6.3 introduced a new field to the region
+ * descriptor: base_address. If the older API is in use then the base
+ * address is set to zero to indicate it needs to be obtained elsewhere
+ * (typically the device tree).
+ */
+ if (major_ver > 6 || (major_ver == 6 && minor_ver >= 3))
+ cmd.header =
+ mc_encode_cmd_header(DPRC_CMDID_GET_OBJ_REG_V2,
+ cmd_flags, token);
+ else
+ cmd.header =
+ mc_encode_cmd_header(DPRC_CMDID_GET_OBJ_REG,
+ cmd_flags, token);
+
cmd_params = (struct dprc_cmd_get_obj_region *)cmd.params;
cmd_params->obj_id = cpu_to_le32(obj_id);
cmd_params->region_index = region_index;
@@ -461,8 +481,12 @@ int dprc_get_obj_region(struct fsl_mc_io *mc_io,
/* retrieve response parameters */
rsp_params = (struct dprc_rsp_get_obj_region *)cmd.params;
- region_desc->base_offset = le64_to_cpu(rsp_params->base_addr);
+ region_desc->base_offset = le64_to_cpu(rsp_params->base_offset);
region_desc->size = le32_to_cpu(rsp_params->size);
+ if (major_ver > 6 || (major_ver == 6 && minor_ver >= 3))
+ region_desc->base_address = le64_to_cpu(rsp_params->base_addr);
+ else
+ region_desc->base_address = 0;
return 0;
}
diff --git a/drivers/bus/fsl-mc/fsl-mc-bus.c b/drivers/bus/fsl-mc/fsl-mc-bus.c
index f0404c6d1ff4..5c9bf2e06552 100644
--- a/drivers/bus/fsl-mc/fsl-mc-bus.c
+++ b/drivers/bus/fsl-mc/fsl-mc-bus.c
@@ -487,10 +487,19 @@ static int fsl_mc_device_get_mmio_regions(struct fsl_mc_device *mc_dev,
"dprc_get_obj_region() failed: %d\n", error);
goto error_cleanup_regions;
}
-
- error = translate_mc_addr(mc_dev, mc_region_type,
+ /*
+ * Older MC only returned region offset and no base address
+ * If base address is in the region_desc use it otherwise
+ * revert to old mechanism
+ */
+ if (region_desc.base_address)
+ regions[i].start = region_desc.base_address +
+ region_desc.base_offset;
+ else
+ error = translate_mc_addr(mc_dev, mc_region_type,
region_desc.base_offset,
&regions[i].start);
+
if (error < 0) {
dev_err(parent_dev,
"Invalid MC offset: %#x (for %s.%d\'s region %d)\n",
@@ -504,6 +513,8 @@ static int fsl_mc_device_get_mmio_regions(struct fsl_mc_device *mc_dev,
regions[i].flags = IORESOURCE_IO;
if (region_desc.flags & DPRC_REGION_CACHEABLE)
regions[i].flags |= IORESOURCE_CACHEABLE;
+ if (region_desc.flags & DPRC_REGION_SHAREABLE)
+ regions[i].flags |= IORESOURCE_MEM;
}
mc_dev->regions = regions;
diff --git a/drivers/bus/fsl-mc/fsl-mc-private.h b/drivers/bus/fsl-mc/fsl-mc-private.h
index ea11b4fe59f7..020fcc04ec8b 100644
--- a/drivers/bus/fsl-mc/fsl-mc-private.h
+++ b/drivers/bus/fsl-mc/fsl-mc-private.h
@@ -79,9 +79,11 @@ int dpmcp_reset(struct fsl_mc_io *mc_io,
/* DPRC command versioning */
#define DPRC_CMD_BASE_VERSION 1
+#define DPRC_CMD_2ND_VERSION 2
#define DPRC_CMD_ID_OFFSET 4
#define DPRC_CMD(id) (((id) << DPRC_CMD_ID_OFFSET) | DPRC_CMD_BASE_VERSION)
+#define DPRC_CMD_V2(id) (((id) << DPRC_CMD_ID_OFFSET) | DPRC_CMD_2ND_VERSION)
/* DPRC command IDs */
#define DPRC_CMDID_CLOSE DPRC_CMD(0x800)
@@ -100,6 +102,7 @@ int dpmcp_reset(struct fsl_mc_io *mc_io,
#define DPRC_CMDID_GET_OBJ_COUNT DPRC_CMD(0x159)
#define DPRC_CMDID_GET_OBJ DPRC_CMD(0x15A)
#define DPRC_CMDID_GET_OBJ_REG DPRC_CMD(0x15E)
+#define DPRC_CMDID_GET_OBJ_REG_V2 DPRC_CMD_V2(0x15E)
#define DPRC_CMDID_SET_OBJ_IRQ DPRC_CMD(0x15F)
struct dprc_cmd_open {
@@ -199,9 +202,16 @@ struct dprc_rsp_get_obj_region {
/* response word 0 */
__le64 pad;
/* response word 1 */
- __le64 base_addr;
+ __le64 base_offset;
/* response word 2 */
__le32 size;
+ __le32 pad2;
+ /* response word 3 */
+ __le32 flags;
+ __le32 pad3;
+ /* response word 4 */
+ /* base_addr may be zero if older MC firmware is used */
+ __le64 base_addr;
};
struct dprc_cmd_set_obj_irq {
@@ -334,6 +344,7 @@ int dprc_set_obj_irq(struct fsl_mc_io *mc_io,
/* Region flags */
/* Cacheable - Indicates that region should be mapped as cacheable */
#define DPRC_REGION_CACHEABLE 0x00000001
+#define DPRC_REGION_SHAREABLE 0x00000002
/**
* enum dprc_region_type - Region type
@@ -342,7 +353,8 @@ int dprc_set_obj_irq(struct fsl_mc_io *mc_io,
*/
enum dprc_region_type {
DPRC_REGION_TYPE_MC_PORTAL,
- DPRC_REGION_TYPE_QBMAN_PORTAL
+ DPRC_REGION_TYPE_QBMAN_PORTAL,
+ DPRC_REGION_TYPE_QBMAN_MEM_BACKED_PORTAL
};
/**
@@ -360,6 +372,7 @@ struct dprc_region_desc {
u32 size;
u32 flags;
enum dprc_region_type type;
+ u64 base_address;
};
int dprc_get_obj_region(struct fsl_mc_io *mc_io,
diff --git a/drivers/bus/ti-sysc.c b/drivers/bus/ti-sysc.c
index b72741668c92..e6deabd8305d 100644
--- a/drivers/bus/ti-sysc.c
+++ b/drivers/bus/ti-sysc.c
@@ -71,6 +71,9 @@ static const char * const clock_names[SYSC_MAX_CLOCKS] = {
* @name: name if available
* @revision: interconnect target module revision
* @needs_resume: runtime resume needed on resume from suspend
+ * @clk_enable_quirk: module specific clock enable quirk
+ * @clk_disable_quirk: module specific clock disable quirk
+ * @reset_done_quirk: module specific reset done quirk
*/
struct sysc {
struct device *dev;
@@ -89,10 +92,14 @@ struct sysc {
struct ti_sysc_cookie cookie;
const char *name;
u32 revision;
- bool enabled;
- bool needs_resume;
- bool child_needs_resume;
+ unsigned int enabled:1;
+ unsigned int needs_resume:1;
+ unsigned int child_needs_resume:1;
+ unsigned int disable_on_idle:1;
struct delayed_work idle_work;
+ void (*clk_enable_quirk)(struct sysc *sysc);
+ void (*clk_disable_quirk)(struct sysc *sysc);
+ void (*reset_done_quirk)(struct sysc *sysc);
};
static void sysc_parse_dts_quirks(struct sysc *ddata, struct device_node *np,
@@ -100,6 +107,20 @@ static void sysc_parse_dts_quirks(struct sysc *ddata, struct device_node *np,
static void sysc_write(struct sysc *ddata, int offset, u32 value)
{
+ if (ddata->cfg.quirks & SYSC_QUIRK_16BIT) {
+ writew_relaxed(value & 0xffff, ddata->module_va + offset);
+
+ /* Only i2c revision has LO and HI register with stride of 4 */
+ if (ddata->offsets[SYSC_REVISION] >= 0 &&
+ offset == ddata->offsets[SYSC_REVISION]) {
+ u16 hi = value >> 16;
+
+ writew_relaxed(hi, ddata->module_va + offset + 4);
+ }
+
+ return;
+ }
+
writel_relaxed(value, ddata->module_va + offset);
}
@@ -109,7 +130,14 @@ static u32 sysc_read(struct sysc *ddata, int offset)
u32 val;
val = readw_relaxed(ddata->module_va + offset);
- val |= (readw_relaxed(ddata->module_va + offset + 4) << 16);
+
+ /* Only i2c revision has LO and HI register with stride of 4 */
+ if (ddata->offsets[SYSC_REVISION] >= 0 &&
+ offset == ddata->offsets[SYSC_REVISION]) {
+ u16 tmp = readw_relaxed(ddata->module_va + offset + 4);
+
+ val |= tmp << 16;
+ }
return val;
}
@@ -132,6 +160,26 @@ static u32 sysc_read_revision(struct sysc *ddata)
return sysc_read(ddata, offset);
}
+static u32 sysc_read_sysconfig(struct sysc *ddata)
+{
+ int offset = ddata->offsets[SYSC_SYSCONFIG];
+
+ if (offset < 0)
+ return 0;
+
+ return sysc_read(ddata, offset);
+}
+
+static u32 sysc_read_sysstatus(struct sysc *ddata)
+{
+ int offset = ddata->offsets[SYSC_SYSSTATUS];
+
+ if (offset < 0)
+ return 0;
+
+ return sysc_read(ddata, offset);
+}
+
static int sysc_add_named_clock_from_child(struct sysc *ddata,
const char *name,
const char *optfck_name)
@@ -422,6 +470,30 @@ static void sysc_disable_opt_clocks(struct sysc *ddata)
}
}
+static void sysc_clkdm_deny_idle(struct sysc *ddata)
+{
+ struct ti_sysc_platform_data *pdata;
+
+ if (ddata->legacy_mode)
+ return;
+
+ pdata = dev_get_platdata(ddata->dev);
+ if (pdata && pdata->clkdm_deny_idle)
+ pdata->clkdm_deny_idle(ddata->dev, &ddata->cookie);
+}
+
+static void sysc_clkdm_allow_idle(struct sysc *ddata)
+{
+ struct ti_sysc_platform_data *pdata;
+
+ if (ddata->legacy_mode)
+ return;
+
+ pdata = dev_get_platdata(ddata->dev);
+ if (pdata && pdata->clkdm_allow_idle)
+ pdata->clkdm_allow_idle(ddata->dev, &ddata->cookie);
+}
+
/**
* sysc_init_resets - init rstctrl reset line if configured
* @ddata: device driver data
@@ -431,7 +503,7 @@ static void sysc_disable_opt_clocks(struct sysc *ddata)
static int sysc_init_resets(struct sysc *ddata)
{
ddata->rsts =
- devm_reset_control_array_get_optional_exclusive(ddata->dev);
+ devm_reset_control_get_optional(ddata->dev, "rstctrl");
if (IS_ERR(ddata->rsts))
return PTR_ERR(ddata->rsts);
@@ -694,8 +766,11 @@ static int sysc_ioremap(struct sysc *ddata)
ddata->offsets[SYSC_SYSCONFIG],
ddata->offsets[SYSC_SYSSTATUS]);
+ if (size < SZ_1K)
+ size = SZ_1K;
+
if ((size + sizeof(u32)) > ddata->module_size)
- return -EINVAL;
+ size = ddata->module_size;
}
ddata->module_va = devm_ioremap(ddata->dev,
@@ -794,7 +869,9 @@ static void sysc_show_registers(struct sysc *ddata)
}
#define SYSC_IDLE_MASK (SYSC_NR_IDLEMODES - 1)
+#define SYSC_CLOCACT_ICK 2
+/* Caller needs to manage sysc_clkdm_deny_idle() and sysc_clkdm_allow_idle() */
static int sysc_enable_module(struct device *dev)
{
struct sysc *ddata;
@@ -805,23 +882,34 @@ static int sysc_enable_module(struct device *dev)
if (ddata->offsets[SYSC_SYSCONFIG] == -ENODEV)
return 0;
- /*
- * TODO: Need to prevent clockdomain autoidle?
- * See clkdm_deny_idle() in arch/mach-omap2/omap_hwmod.c
- */
-
regbits = ddata->cap->regbits;
reg = sysc_read(ddata, ddata->offsets[SYSC_SYSCONFIG]);
+ /* Set CLOCKACTIVITY, we only use it for ick */
+ if (regbits->clkact_shift >= 0 &&
+ (ddata->cfg.quirks & SYSC_QUIRK_USE_CLOCKACT ||
+ ddata->cfg.sysc_val & BIT(regbits->clkact_shift)))
+ reg |= SYSC_CLOCACT_ICK << regbits->clkact_shift;
+
/* Set SIDLE mode */
idlemodes = ddata->cfg.sidlemodes;
if (!idlemodes || regbits->sidle_shift < 0)
goto set_midle;
- best_mode = fls(ddata->cfg.sidlemodes) - 1;
- if (best_mode > SYSC_IDLE_MASK) {
- dev_err(dev, "%s: invalid sidlemode\n", __func__);
- return -EINVAL;
+ if (ddata->cfg.quirks & (SYSC_QUIRK_SWSUP_SIDLE |
+ SYSC_QUIRK_SWSUP_SIDLE_ACT)) {
+ best_mode = SYSC_IDLE_NO;
+ } else {
+ best_mode = fls(ddata->cfg.sidlemodes) - 1;
+ if (best_mode > SYSC_IDLE_MASK) {
+ dev_err(dev, "%s: invalid sidlemode\n", __func__);
+ return -EINVAL;
+ }
+
+ /* Set WAKEUP */
+ if (regbits->enwkup_shift >= 0 &&
+ ddata->cfg.sysc_val & BIT(regbits->enwkup_shift))
+ reg |= BIT(regbits->enwkup_shift);
}
reg &= ~(SYSC_IDLE_MASK << regbits->sidle_shift);
@@ -832,7 +920,7 @@ set_midle:
/* Set MIDLE mode */
idlemodes = ddata->cfg.midlemodes;
if (!idlemodes || regbits->midle_shift < 0)
- return 0;
+ goto set_autoidle;
best_mode = fls(ddata->cfg.midlemodes) - 1;
if (best_mode > SYSC_IDLE_MASK) {
@@ -844,6 +932,14 @@ set_midle:
reg |= best_mode << regbits->midle_shift;
sysc_write(ddata, ddata->offsets[SYSC_SYSCONFIG], reg);
+set_autoidle:
+ /* Autoidle bit must enabled separately if available */
+ if (regbits->autoidle_shift >= 0 &&
+ ddata->cfg.sysc_val & BIT(regbits->autoidle_shift)) {
+ reg |= 1 << regbits->autoidle_shift;
+ sysc_write(ddata, ddata->offsets[SYSC_SYSCONFIG], reg);
+ }
+
return 0;
}
@@ -861,6 +957,7 @@ static int sysc_best_idle_mode(u32 idlemodes, u32 *best_mode)
return 0;
}
+/* Caller needs to manage sysc_clkdm_deny_idle() and sysc_clkdm_allow_idle() */
static int sysc_disable_module(struct device *dev)
{
struct sysc *ddata;
@@ -872,11 +969,6 @@ static int sysc_disable_module(struct device *dev)
if (ddata->offsets[SYSC_SYSCONFIG] == -ENODEV)
return 0;
- /*
- * TODO: Need to prevent clockdomain autoidle?
- * See clkdm_deny_idle() in arch/mach-omap2/omap_hwmod.c
- */
-
regbits = ddata->cap->regbits;
reg = sysc_read(ddata, ddata->offsets[SYSC_SYSCONFIG]);
@@ -901,14 +993,21 @@ set_sidle:
if (!idlemodes || regbits->sidle_shift < 0)
return 0;
- ret = sysc_best_idle_mode(idlemodes, &best_mode);
- if (ret) {
- dev_err(dev, "%s: invalid sidlemode\n", __func__);
- return ret;
+ if (ddata->cfg.quirks & SYSC_QUIRK_SWSUP_SIDLE) {
+ best_mode = SYSC_IDLE_FORCE;
+ } else {
+ ret = sysc_best_idle_mode(idlemodes, &best_mode);
+ if (ret) {
+ dev_err(dev, "%s: invalid sidlemode\n", __func__);
+ return ret;
+ }
}
reg &= ~(SYSC_IDLE_MASK << regbits->sidle_shift);
reg |= best_mode << regbits->sidle_shift;
+ if (regbits->autoidle_shift >= 0 &&
+ ddata->cfg.sysc_val & BIT(regbits->autoidle_shift))
+ reg |= 1 << regbits->autoidle_shift;
sysc_write(ddata, ddata->offsets[SYSC_SYSCONFIG], reg);
return 0;
@@ -932,6 +1031,9 @@ static int __maybe_unused sysc_runtime_suspend_legacy(struct device *dev,
dev_err(dev, "%s: could not idle: %i\n",
__func__, error);
+ if (ddata->disable_on_idle)
+ reset_control_assert(ddata->rsts);
+
return 0;
}
@@ -941,6 +1043,9 @@ static int __maybe_unused sysc_runtime_resume_legacy(struct device *dev,
struct ti_sysc_platform_data *pdata;
int error;
+ if (ddata->disable_on_idle)
+ reset_control_deassert(ddata->rsts);
+
pdata = dev_get_platdata(ddata->dev);
if (!pdata)
return 0;
@@ -966,14 +1071,16 @@ static int __maybe_unused sysc_runtime_suspend(struct device *dev)
if (!ddata->enabled)
return 0;
+ sysc_clkdm_deny_idle(ddata);
+
if (ddata->legacy_mode) {
error = sysc_runtime_suspend_legacy(dev, ddata);
if (error)
- return error;
+ goto err_allow_idle;
} else {
error = sysc_disable_module(dev);
if (error)
- return error;
+ goto err_allow_idle;
}
sysc_disable_main_clocks(ddata);
@@ -983,6 +1090,12 @@ static int __maybe_unused sysc_runtime_suspend(struct device *dev)
ddata->enabled = false;
+err_allow_idle:
+ sysc_clkdm_allow_idle(ddata);
+
+ if (ddata->disable_on_idle)
+ reset_control_assert(ddata->rsts);
+
return error;
}
@@ -996,10 +1109,15 @@ static int __maybe_unused sysc_runtime_resume(struct device *dev)
if (ddata->enabled)
return 0;
+ if (ddata->disable_on_idle)
+ reset_control_deassert(ddata->rsts);
+
+ sysc_clkdm_deny_idle(ddata);
+
if (sysc_opt_clks_needed(ddata)) {
error = sysc_enable_opt_clocks(ddata);
if (error)
- return error;
+ goto err_allow_idle;
}
error = sysc_enable_main_clocks(ddata);
@@ -1018,6 +1136,8 @@ static int __maybe_unused sysc_runtime_resume(struct device *dev)
ddata->enabled = true;
+ sysc_clkdm_allow_idle(ddata);
+
return 0;
err_main_clocks:
@@ -1025,6 +1145,8 @@ err_main_clocks:
err_opt_clocks:
if (sysc_opt_clks_needed(ddata))
sysc_disable_opt_clocks(ddata);
+err_allow_idle:
+ sysc_clkdm_allow_idle(ddata);
return error;
}
@@ -1106,8 +1228,10 @@ static const struct sysc_revision_quirk sysc_revision_quirks[] = {
0),
SYSC_QUIRK("timer", 0, 0, 0x10, -1, 0x4fff1301, 0xffff00ff,
0),
+ SYSC_QUIRK("uart", 0, 0x50, 0x54, 0x58, 0x00000046, 0xffffffff,
+ SYSC_QUIRK_SWSUP_SIDLE | SYSC_QUIRK_LEGACY_IDLE),
SYSC_QUIRK("uart", 0, 0x50, 0x54, 0x58, 0x00000052, 0xffffffff,
- SYSC_QUIRK_SWSUP_SIDLE_ACT | SYSC_QUIRK_LEGACY_IDLE),
+ SYSC_QUIRK_SWSUP_SIDLE | SYSC_QUIRK_LEGACY_IDLE),
/* Uarts on omap4 and later */
SYSC_QUIRK("uart", 0, 0x50, 0x54, 0x58, 0x50411e03, 0xffff00ff,
SYSC_QUIRK_SWSUP_SIDLE_ACT | SYSC_QUIRK_LEGACY_IDLE),
@@ -1119,6 +1243,22 @@ static const struct sysc_revision_quirk sysc_revision_quirks[] = {
SYSC_QUIRK_EXT_OPT_CLOCK | SYSC_QUIRK_NO_RESET_ON_INIT |
SYSC_QUIRK_SWSUP_SIDLE),
+ /* Quirks that need to be set based on detected module */
+ SYSC_QUIRK("hdq1w", 0, 0, 0x14, 0x18, 0x00000006, 0xffffffff,
+ SYSC_MODULE_QUIRK_HDQ1W),
+ SYSC_QUIRK("hdq1w", 0, 0, 0x14, 0x18, 0x0000000a, 0xffffffff,
+ SYSC_MODULE_QUIRK_HDQ1W),
+ SYSC_QUIRK("i2c", 0, 0, 0x20, 0x10, 0x00000036, 0x000000ff,
+ SYSC_MODULE_QUIRK_I2C),
+ SYSC_QUIRK("i2c", 0, 0, 0x20, 0x10, 0x0000003c, 0x000000ff,
+ SYSC_MODULE_QUIRK_I2C),
+ SYSC_QUIRK("i2c", 0, 0, 0x20, 0x10, 0x00000040, 0x000000ff,
+ SYSC_MODULE_QUIRK_I2C),
+ SYSC_QUIRK("i2c", 0, 0, 0x10, 0x90, 0x5040000a, 0xfffff0f0,
+ SYSC_MODULE_QUIRK_I2C),
+ SYSC_QUIRK("wdt", 0, 0, 0x10, 0x14, 0x502a0500, 0xfffff0f0,
+ SYSC_MODULE_QUIRK_WDT),
+
#ifdef DEBUG
SYSC_QUIRK("adc", 0, 0, 0x10, -1, 0x47300001, 0xffffffff, 0),
SYSC_QUIRK("atl", 0, 0, -1, -1, 0x0a070100, 0xffffffff, 0),
@@ -1132,11 +1272,8 @@ static const struct sysc_revision_quirk sysc_revision_quirks[] = {
SYSC_QUIRK("dwc3", 0, 0, 0x10, -1, 0x500a0200, 0xffffffff, 0),
SYSC_QUIRK("epwmss", 0, 0, 0x4, -1, 0x47400001, 0xffffffff, 0),
SYSC_QUIRK("gpu", 0, 0x1fc00, 0x1fc10, -1, 0, 0, 0),
- SYSC_QUIRK("hdq1w", 0, 0, 0x14, 0x18, 0x00000006, 0xffffffff, 0),
- SYSC_QUIRK("hdq1w", 0, 0, 0x14, 0x18, 0x0000000a, 0xffffffff, 0),
SYSC_QUIRK("hsi", 0, 0, 0x10, 0x14, 0x50043101, 0xffffffff, 0),
SYSC_QUIRK("iss", 0, 0, 0x10, -1, 0x40000101, 0xffffffff, 0),
- SYSC_QUIRK("i2c", 0, 0, 0x10, 0x90, 0x5040000a, 0xfffff0f0, 0),
SYSC_QUIRK("lcdc", 0, 0, 0x54, -1, 0x4f201000, 0xffffffff, 0),
SYSC_QUIRK("mcasp", 0, 0, 0x4, -1, 0x44306302, 0xffffffff, 0),
SYSC_QUIRK("mcasp", 0, 0, 0x4, -1, 0x44307b02, 0xffffffff, 0),
@@ -1172,7 +1309,6 @@ static const struct sysc_revision_quirk sysc_revision_quirks[] = {
SYSC_QUIRK("usb_host_hs", 0, 0, 0x10, -1, 0x50700101, 0xffffffff, 0),
SYSC_QUIRK("usb_otg_hs", 0, 0x400, 0x404, 0x408, 0x00000050,
0xffffffff, 0),
- SYSC_QUIRK("wdt", 0, 0, 0x10, 0x14, 0x502a0500, 0xfffff0f0, 0),
SYSC_QUIRK("vfpe", 0, 0, 0x104, -1, 0x4d001200, 0xffffffff, 0),
#endif
};
@@ -1245,6 +1381,121 @@ static void sysc_init_revision_quirks(struct sysc *ddata)
}
}
+/* 1-wire needs module's internal clocks enabled for reset */
+static void sysc_clk_enable_quirk_hdq1w(struct sysc *ddata)
+{
+ int offset = 0x0c; /* HDQ_CTRL_STATUS */
+ u16 val;
+
+ val = sysc_read(ddata, offset);
+ val |= BIT(5);
+ sysc_write(ddata, offset, val);
+}
+
+/* I2C needs extra enable bit toggling for reset */
+static void sysc_clk_quirk_i2c(struct sysc *ddata, bool enable)
+{
+ int offset;
+ u16 val;
+
+ /* I2C_CON, omap2/3 is different from omap4 and later */
+ if ((ddata->revision & 0xffffff00) == 0x001f0000)
+ offset = 0x24;
+ else
+ offset = 0xa4;
+
+ /* I2C_EN */
+ val = sysc_read(ddata, offset);
+ if (enable)
+ val |= BIT(15);
+ else
+ val &= ~BIT(15);
+ sysc_write(ddata, offset, val);
+}
+
+static void sysc_clk_enable_quirk_i2c(struct sysc *ddata)
+{
+ sysc_clk_quirk_i2c(ddata, true);
+}
+
+static void sysc_clk_disable_quirk_i2c(struct sysc *ddata)
+{
+ sysc_clk_quirk_i2c(ddata, false);
+}
+
+/* Watchdog timer needs a disable sequence after reset */
+static void sysc_reset_done_quirk_wdt(struct sysc *ddata)
+{
+ int wps, spr, error;
+ u32 val;
+
+ wps = 0x34;
+ spr = 0x48;
+
+ sysc_write(ddata, spr, 0xaaaa);
+ error = readl_poll_timeout(ddata->module_va + wps, val,
+ !(val & 0x10), 100,
+ MAX_MODULE_SOFTRESET_WAIT);
+ if (error)
+ dev_warn(ddata->dev, "wdt disable spr failed\n");
+
+ sysc_write(ddata, wps, 0x5555);
+ error = readl_poll_timeout(ddata->module_va + wps, val,
+ !(val & 0x10), 100,
+ MAX_MODULE_SOFTRESET_WAIT);
+ if (error)
+ dev_warn(ddata->dev, "wdt disable wps failed\n");
+}
+
+static void sysc_init_module_quirks(struct sysc *ddata)
+{
+ if (ddata->legacy_mode || !ddata->name)
+ return;
+
+ if (ddata->cfg.quirks & SYSC_MODULE_QUIRK_HDQ1W) {
+ ddata->clk_enable_quirk = sysc_clk_enable_quirk_hdq1w;
+
+ return;
+ }
+
+ if (ddata->cfg.quirks & SYSC_MODULE_QUIRK_I2C) {
+ ddata->clk_enable_quirk = sysc_clk_enable_quirk_i2c;
+ ddata->clk_disable_quirk = sysc_clk_disable_quirk_i2c;
+
+ return;
+ }
+
+ if (ddata->cfg.quirks & SYSC_MODULE_QUIRK_WDT)
+ ddata->reset_done_quirk = sysc_reset_done_quirk_wdt;
+}
+
+static int sysc_clockdomain_init(struct sysc *ddata)
+{
+ struct ti_sysc_platform_data *pdata = dev_get_platdata(ddata->dev);
+ struct clk *fck = NULL, *ick = NULL;
+ int error;
+
+ if (!pdata || !pdata->init_clockdomain)
+ return 0;
+
+ switch (ddata->nr_clocks) {
+ case 2:
+ ick = ddata->clocks[SYSC_ICK];
+ /* fallthrough */
+ case 1:
+ fck = ddata->clocks[SYSC_FCK];
+ break;
+ case 0:
+ return 0;
+ }
+
+ error = pdata->init_clockdomain(ddata->dev, fck, ick, &ddata->cookie);
+ if (!error || error == -ENODEV)
+ return 0;
+
+ return error;
+}
+
/*
* Note that pdata->init_module() typically does a reset first. After
* pdata->init_module() is done, PM runtime can be used for the interconnect
@@ -1255,7 +1506,7 @@ static int sysc_legacy_init(struct sysc *ddata)
struct ti_sysc_platform_data *pdata = dev_get_platdata(ddata->dev);
int error;
- if (!ddata->legacy_mode || !pdata || !pdata->init_module)
+ if (!pdata || !pdata->init_module)
return 0;
error = pdata->init_module(ddata->dev, ddata->mdata, &ddata->cookie);
@@ -1280,7 +1531,7 @@ static int sysc_legacy_init(struct sysc *ddata)
*/
static int sysc_rstctrl_reset_deassert(struct sysc *ddata, bool reset)
{
- int error;
+ int error, val;
if (!ddata->rsts)
return 0;
@@ -1291,37 +1542,68 @@ static int sysc_rstctrl_reset_deassert(struct sysc *ddata, bool reset)
return error;
}
- return reset_control_deassert(ddata->rsts);
+ error = reset_control_deassert(ddata->rsts);
+ if (error == -EEXIST)
+ return 0;
+
+ error = readx_poll_timeout(reset_control_status, ddata->rsts, val,
+ val == 0, 100, MAX_MODULE_SOFTRESET_WAIT);
+
+ return error;
}
+/*
+ * Note that the caller must ensure the interconnect target module is enabled
+ * before calling reset. Otherwise reset will not complete.
+ */
static int sysc_reset(struct sysc *ddata)
{
- int offset = ddata->offsets[SYSC_SYSCONFIG];
- int val;
+ int sysc_offset, syss_offset, sysc_val, rstval, quirks, error = 0;
+ u32 sysc_mask, syss_done;
+
+ sysc_offset = ddata->offsets[SYSC_SYSCONFIG];
+ syss_offset = ddata->offsets[SYSC_SYSSTATUS];
+ quirks = ddata->cfg.quirks;
- if (ddata->legacy_mode || offset < 0 ||
+ if (ddata->legacy_mode || sysc_offset < 0 ||
+ ddata->cap->regbits->srst_shift < 0 ||
ddata->cfg.quirks & SYSC_QUIRK_NO_RESET_ON_INIT)
return 0;
- /*
- * Currently only support reset status in sysstatus.
- * Warn and return error in all other cases
- */
- if (!ddata->cfg.syss_mask) {
- dev_err(ddata->dev, "No ti,syss-mask. Reset failed\n");
- return -EINVAL;
- }
+ sysc_mask = BIT(ddata->cap->regbits->srst_shift);
- val = sysc_read(ddata, offset);
- val |= (0x1 << ddata->cap->regbits->srst_shift);
- sysc_write(ddata, offset, val);
+ if (ddata->cfg.quirks & SYSS_QUIRK_RESETDONE_INVERTED)
+ syss_done = 0;
+ else
+ syss_done = ddata->cfg.syss_mask;
+
+ if (ddata->clk_disable_quirk)
+ ddata->clk_disable_quirk(ddata);
+
+ sysc_val = sysc_read_sysconfig(ddata);
+ sysc_val |= sysc_mask;
+ sysc_write(ddata, sysc_offset, sysc_val);
+
+ if (ddata->clk_enable_quirk)
+ ddata->clk_enable_quirk(ddata);
/* Poll on reset status */
- offset = ddata->offsets[SYSC_SYSSTATUS];
+ if (syss_offset >= 0) {
+ error = readx_poll_timeout(sysc_read_sysstatus, ddata, rstval,
+ (rstval & ddata->cfg.syss_mask) ==
+ syss_done,
+ 100, MAX_MODULE_SOFTRESET_WAIT);
+
+ } else if (ddata->cfg.quirks & SYSC_QUIRK_RESET_STATUS) {
+ error = readx_poll_timeout(sysc_read_sysconfig, ddata, rstval,
+ !(rstval & sysc_mask),
+ 100, MAX_MODULE_SOFTRESET_WAIT);
+ }
+
+ if (ddata->reset_done_quirk)
+ ddata->reset_done_quirk(ddata);
- return readl_poll_timeout(ddata->module_va + offset, val,
- (val & ddata->cfg.syss_mask) == 0x0,
- 100, MAX_MODULE_SOFTRESET_WAIT);
+ return error;
}
/*
@@ -1334,12 +1616,8 @@ static int sysc_init_module(struct sysc *ddata)
{
int error = 0;
bool manage_clocks = true;
- bool reset = true;
-
- if (ddata->cfg.quirks & SYSC_QUIRK_NO_RESET_ON_INIT)
- reset = false;
- error = sysc_rstctrl_reset_deassert(ddata, reset);
+ error = sysc_rstctrl_reset_deassert(ddata, false);
if (error)
return error;
@@ -1347,7 +1625,13 @@ static int sysc_init_module(struct sysc *ddata)
(SYSC_QUIRK_NO_IDLE | SYSC_QUIRK_NO_IDLE_ON_INIT))
manage_clocks = false;
+ error = sysc_clockdomain_init(ddata);
+ if (error)
+ return error;
+
if (manage_clocks) {
+ sysc_clkdm_deny_idle(ddata);
+
error = sysc_enable_opt_clocks(ddata);
if (error)
return error;
@@ -1357,23 +1641,43 @@ static int sysc_init_module(struct sysc *ddata)
goto err_opt_clocks;
}
+ if (!(ddata->cfg.quirks & SYSC_QUIRK_NO_RESET_ON_INIT)) {
+ error = sysc_rstctrl_reset_deassert(ddata, true);
+ if (error)
+ goto err_main_clocks;
+ }
+
ddata->revision = sysc_read_revision(ddata);
sysc_init_revision_quirks(ddata);
+ sysc_init_module_quirks(ddata);
- error = sysc_legacy_init(ddata);
- if (error)
- goto err_main_clocks;
+ if (ddata->legacy_mode) {
+ error = sysc_legacy_init(ddata);
+ if (error)
+ goto err_main_clocks;
+ }
+
+ if (!ddata->legacy_mode && manage_clocks) {
+ error = sysc_enable_module(ddata->dev);
+ if (error)
+ goto err_main_clocks;
+ }
error = sysc_reset(ddata);
if (error)
dev_err(ddata->dev, "Reset failed with %d\n", error);
+ if (!ddata->legacy_mode && manage_clocks)
+ sysc_disable_module(ddata->dev);
+
err_main_clocks:
if (manage_clocks)
sysc_disable_main_clocks(ddata);
err_opt_clocks:
- if (manage_clocks)
+ if (manage_clocks) {
sysc_disable_opt_clocks(ddata);
+ sysc_clkdm_allow_idle(ddata);
+ }
return error;
}
@@ -1663,9 +1967,6 @@ static struct dev_pm_domain sysc_child_pm_domain = {
*/
static void sysc_legacy_idle_quirk(struct sysc *ddata, struct device *child)
{
- if (!ddata->legacy_mode)
- return;
-
if (ddata->cfg.quirks & SYSC_QUIRK_LEGACY_IDLE)
dev_pm_domain_set(child, &sysc_child_pm_domain);
}
@@ -2005,6 +2306,7 @@ static const struct sysc_capabilities sysc_dra7_mcan = {
.type = TI_SYSC_DRA7_MCAN,
.sysc_mask = SYSC_DRA7_MCAN_ENAWAKEUP | SYSC_OMAP4_SOFTRESET,
.regbits = &sysc_regbits_dra7_mcan,
+ .mod_quirks = SYSS_QUIRK_RESETDONE_INVERTED,
};
static int sysc_init_pdata(struct sysc *ddata)
@@ -2012,20 +2314,22 @@ static int sysc_init_pdata(struct sysc *ddata)
struct ti_sysc_platform_data *pdata = dev_get_platdata(ddata->dev);
struct ti_sysc_module_data *mdata;
- if (!pdata || !ddata->legacy_mode)
+ if (!pdata)
return 0;
mdata = devm_kzalloc(ddata->dev, sizeof(*mdata), GFP_KERNEL);
if (!mdata)
return -ENOMEM;
- mdata->name = ddata->legacy_mode;
- mdata->module_pa = ddata->module_pa;
- mdata->module_size = ddata->module_size;
- mdata->offsets = ddata->offsets;
- mdata->nr_offsets = SYSC_MAX_REGS;
- mdata->cap = ddata->cap;
- mdata->cfg = &ddata->cfg;
+ if (ddata->legacy_mode) {
+ mdata->name = ddata->legacy_mode;
+ mdata->module_pa = ddata->module_pa;
+ mdata->module_size = ddata->module_size;
+ mdata->offsets = ddata->offsets;
+ mdata->nr_offsets = SYSC_MAX_REGS;
+ mdata->cap = ddata->cap;
+ mdata->cfg = &ddata->cfg;
+ }
ddata->mdata = mdata;
@@ -2145,7 +2449,7 @@ static int sysc_probe(struct platform_device *pdev)
}
if (!of_get_available_child_count(ddata->dev->of_node))
- reset_control_assert(ddata->rsts);
+ ddata->disable_on_idle = true;
return 0;
diff --git a/drivers/crypto/amcc/crypto4xx_trng.c b/drivers/crypto/amcc/crypto4xx_trng.c
index 02a6bed3b062..f10a87e541ed 100644
--- a/drivers/crypto/amcc/crypto4xx_trng.c
+++ b/drivers/crypto/amcc/crypto4xx_trng.c
@@ -108,7 +108,6 @@ void ppc4xx_trng_probe(struct crypto4xx_core_device *core_dev)
return;
err_out:
- of_node_put(trng);
iounmap(dev->trng_base);
kfree(rng);
dev->trng_base = NULL;
diff --git a/drivers/crypto/ccp/ccp-ops.c b/drivers/crypto/ccp/ccp-ops.c
index 866b2e05ca77..c69ed4bae2eb 100644
--- a/drivers/crypto/ccp/ccp-ops.c
+++ b/drivers/crypto/ccp/ccp-ops.c
@@ -622,6 +622,7 @@ static int ccp_run_aes_gcm_cmd(struct ccp_cmd_queue *cmd_q,
unsigned long long *final;
unsigned int dm_offset;
+ unsigned int jobid;
unsigned int ilen;
bool in_place = true; /* Default value */
int ret;
@@ -660,9 +661,11 @@ static int ccp_run_aes_gcm_cmd(struct ccp_cmd_queue *cmd_q,
p_tag = scatterwalk_ffwd(sg_tag, p_inp, ilen);
}
+ jobid = CCP_NEW_JOBID(cmd_q->ccp);
+
memset(&op, 0, sizeof(op));
op.cmd_q = cmd_q;
- op.jobid = CCP_NEW_JOBID(cmd_q->ccp);
+ op.jobid = jobid;
op.sb_key = cmd_q->sb_key; /* Pre-allocated */
op.sb_ctx = cmd_q->sb_ctx; /* Pre-allocated */
op.init = 1;
@@ -813,6 +816,13 @@ static int ccp_run_aes_gcm_cmd(struct ccp_cmd_queue *cmd_q,
final[0] = cpu_to_be64(aes->aad_len * 8);
final[1] = cpu_to_be64(ilen * 8);
+ memset(&op, 0, sizeof(op));
+ op.cmd_q = cmd_q;
+ op.jobid = jobid;
+ op.sb_key = cmd_q->sb_key; /* Pre-allocated */
+ op.sb_ctx = cmd_q->sb_ctx; /* Pre-allocated */
+ op.init = 1;
+ op.u.aes.type = aes->type;
op.u.aes.mode = CCP_AES_MODE_GHASH;
op.u.aes.action = CCP_AES_GHASHFINAL;
op.src.type = CCP_MEMTYPE_SYSTEM;
@@ -840,7 +850,8 @@ static int ccp_run_aes_gcm_cmd(struct ccp_cmd_queue *cmd_q,
if (ret)
goto e_tag;
- ret = memcmp(tag.address, final_wa.address, AES_BLOCK_SIZE);
+ ret = crypto_memneq(tag.address, final_wa.address,
+ AES_BLOCK_SIZE) ? -EBADMSG : 0;
ccp_dm_free(&tag);
}
diff --git a/drivers/crypto/ccp/psp-dev.c b/drivers/crypto/ccp/psp-dev.c
index de5a8ca70d3d..6b17d179ef8a 100644
--- a/drivers/crypto/ccp/psp-dev.c
+++ b/drivers/crypto/ccp/psp-dev.c
@@ -24,10 +24,6 @@
#include "sp-dev.h"
#include "psp-dev.h"
-#define SEV_VERSION_GREATER_OR_EQUAL(_maj, _min) \
- ((psp_master->api_major) >= _maj && \
- (psp_master->api_minor) >= _min)
-
#define DEVICE_NAME "sev"
#define SEV_FW_FILE "amd/sev.fw"
#define SEV_FW_NAME_SIZE 64
@@ -47,6 +43,15 @@ MODULE_PARM_DESC(psp_probe_timeout, " default timeout value, in seconds, during
static bool psp_dead;
static int psp_timeout;
+static inline bool sev_version_greater_or_equal(u8 maj, u8 min)
+{
+ if (psp_master->api_major > maj)
+ return true;
+ if (psp_master->api_major == maj && psp_master->api_minor >= min)
+ return true;
+ return false;
+}
+
static struct psp_device *psp_alloc_struct(struct sp_device *sp)
{
struct device *dev = sp->dev;
@@ -588,7 +593,7 @@ static int sev_ioctl_do_get_id2(struct sev_issue_cmd *argp)
int ret;
/* SEV GET_ID is available from SEV API v0.16 and up */
- if (!SEV_VERSION_GREATER_OR_EQUAL(0, 16))
+ if (!sev_version_greater_or_equal(0, 16))
return -ENOTSUPP;
if (copy_from_user(&input, (void __user *)argp->data, sizeof(input)))
@@ -651,7 +656,7 @@ static int sev_ioctl_do_get_id(struct sev_issue_cmd *argp)
int ret;
/* SEV GET_ID available from SEV API v0.16 and up */
- if (!SEV_VERSION_GREATER_OR_EQUAL(0, 16))
+ if (!sev_version_greater_or_equal(0, 16))
return -ENOTSUPP;
/* SEV FW expects the buffer it fills with the ID to be
@@ -1053,7 +1058,7 @@ void psp_pci_init(void)
psp_master->sev_state = SEV_STATE_UNINIT;
}
- if (SEV_VERSION_GREATER_OR_EQUAL(0, 15) &&
+ if (sev_version_greater_or_equal(0, 15) &&
sev_update_firmware(psp_master->dev) == 0)
sev_get_api_version();
diff --git a/drivers/crypto/stm32/stm32-hash.c b/drivers/crypto/stm32/stm32-hash.c
index 23061f2bc74b..2b70d8796f25 100644
--- a/drivers/crypto/stm32/stm32-hash.c
+++ b/drivers/crypto/stm32/stm32-hash.c
@@ -338,7 +338,7 @@ static int stm32_hash_xmit_cpu(struct stm32_hash_dev *hdev,
len32 = DIV_ROUND_UP(length, sizeof(u32));
- dev_dbg(hdev->dev, "%s: length: %d, final: %x len32 %i\n",
+ dev_dbg(hdev->dev, "%s: length: %zd, final: %x len32 %i\n",
__func__, length, final, len32);
hdev->flags |= HASH_FLAGS_CPU;
diff --git a/drivers/dax/super.c b/drivers/dax/super.c
index 8ab12068eea3..26a654dbc69a 100644
--- a/drivers/dax/super.c
+++ b/drivers/dax/super.c
@@ -5,6 +5,7 @@
#include <linux/pagemap.h>
#include <linux/module.h>
#include <linux/mount.h>
+#include <linux/pseudo_fs.h>
#include <linux/magic.h>
#include <linux/genhd.h>
#include <linux/pfn_t.h>
@@ -469,16 +470,19 @@ static const struct super_operations dax_sops = {
.drop_inode = generic_delete_inode,
};
-static struct dentry *dax_mount(struct file_system_type *fs_type,
- int flags, const char *dev_name, void *data)
+static int dax_init_fs_context(struct fs_context *fc)
{
- return mount_pseudo(fs_type, "dax:", &dax_sops, NULL, DAXFS_MAGIC);
+ struct pseudo_fs_context *ctx = init_pseudo(fc, DAXFS_MAGIC);
+ if (!ctx)
+ return -ENOMEM;
+ ctx->ops = &dax_sops;
+ return 0;
}
static struct file_system_type dax_fs_type = {
- .name = "dax",
- .mount = dax_mount,
- .kill_sb = kill_anon_super,
+ .name = "dax",
+ .init_fs_context = dax_init_fs_context,
+ .kill_sb = kill_anon_super,
};
static int dax_test(struct inode *inode, void *data)
@@ -665,10 +669,6 @@ static int dax_fs_init(void)
if (!dax_cache)
return -ENOMEM;
- rc = register_filesystem(&dax_fs_type);
- if (rc)
- goto err_register_fs;
-
dax_mnt = kern_mount(&dax_fs_type);
if (IS_ERR(dax_mnt)) {
rc = PTR_ERR(dax_mnt);
@@ -679,8 +679,6 @@ static int dax_fs_init(void)
return 0;
err_mount:
- unregister_filesystem(&dax_fs_type);
- err_register_fs:
kmem_cache_destroy(dax_cache);
return rc;
@@ -689,7 +687,6 @@ static int dax_fs_init(void)
static void dax_fs_exit(void)
{
kern_unmount(dax_mnt);
- unregister_filesystem(&dax_fs_type);
kmem_cache_destroy(dax_cache);
}
diff --git a/drivers/dma-buf/dma-buf.c b/drivers/dma-buf/dma-buf.c
index dc4b2c521d79..f45bfb29ef96 100644
--- a/drivers/dma-buf/dma-buf.c
+++ b/drivers/dma-buf/dma-buf.c
@@ -24,6 +24,7 @@
#include <linux/reservation.h>
#include <linux/mm.h>
#include <linux/mount.h>
+#include <linux/pseudo_fs.h>
#include <uapi/linux/dma-buf.h>
#include <uapi/linux/magic.h>
@@ -59,16 +60,20 @@ static const struct dentry_operations dma_buf_dentry_ops = {
static struct vfsmount *dma_buf_mnt;
-static struct dentry *dma_buf_fs_mount(struct file_system_type *fs_type,
- int flags, const char *name, void *data)
+static int dma_buf_fs_init_context(struct fs_context *fc)
{
- return mount_pseudo(fs_type, "dmabuf:", NULL, &dma_buf_dentry_ops,
- DMA_BUF_MAGIC);
+ struct pseudo_fs_context *ctx;
+
+ ctx = init_pseudo(fc, DMA_BUF_MAGIC);
+ if (!ctx)
+ return -ENOMEM;
+ ctx->dops = &dma_buf_dentry_ops;
+ return 0;
}
static struct file_system_type dma_buf_fs_type = {
.name = "dmabuf",
- .mount = dma_buf_fs_mount,
+ .init_fs_context = dma_buf_fs_init_context,
.kill_sb = kill_anon_super,
};
diff --git a/drivers/firmware/arm_scmi/clock.c b/drivers/firmware/arm_scmi/clock.c
index 30fc04e28431..0a194af92438 100644
--- a/drivers/firmware/arm_scmi/clock.c
+++ b/drivers/firmware/arm_scmi/clock.c
@@ -185,6 +185,8 @@ scmi_clock_describe_rates_get(const struct scmi_handle *handle, u32 clk_id,
if (rate_discrete)
clk->list.num_rates = tot_rate_cnt;
+ clk->rate_discrete = rate_discrete;
+
err:
scmi_xfer_put(handle, t);
return ret;
diff --git a/drivers/firmware/arm_scmi/sensors.c b/drivers/firmware/arm_scmi/sensors.c
index b53d5cc9c9f6..0e94ab56f679 100644
--- a/drivers/firmware/arm_scmi/sensors.c
+++ b/drivers/firmware/arm_scmi/sensors.c
@@ -30,10 +30,12 @@ struct scmi_msg_resp_sensor_description {
__le32 id;
__le32 attributes_low;
#define SUPPORTS_ASYNC_READ(x) ((x) & BIT(31))
-#define NUM_TRIP_POINTS(x) (((x) >> 4) & 0xff)
+#define NUM_TRIP_POINTS(x) ((x) & 0xff)
__le32 attributes_high;
#define SENSOR_TYPE(x) ((x) & 0xff)
-#define SENSOR_SCALE(x) (((x) >> 11) & 0x3f)
+#define SENSOR_SCALE(x) (((x) >> 11) & 0x1f)
+#define SENSOR_SCALE_SIGN BIT(4)
+#define SENSOR_SCALE_EXTEND GENMASK(7, 5)
#define SENSOR_UPDATE_SCALE(x) (((x) >> 22) & 0x1f)
#define SENSOR_UPDATE_BASE(x) (((x) >> 27) & 0x1f)
u8 name[SCMI_MAX_STR_SIZE];
@@ -140,6 +142,10 @@ static int scmi_sensor_description_get(const struct scmi_handle *handle,
s = &si->sensors[desc_index + cnt];
s->id = le32_to_cpu(buf->desc[cnt].id);
s->type = SENSOR_TYPE(attrh);
+ s->scale = SENSOR_SCALE(attrh);
+ /* Sign extend to a full s8 */
+ if (s->scale & SENSOR_SCALE_SIGN)
+ s->scale |= SENSOR_SCALE_EXTEND;
strlcpy(s->name, buf->desc[cnt].name, SCMI_MAX_STR_SIZE);
}
diff --git a/drivers/firmware/psci/psci_checker.c b/drivers/firmware/psci/psci_checker.c
index 08c85099d4d0..f3659443f8c2 100644
--- a/drivers/firmware/psci/psci_checker.c
+++ b/drivers/firmware/psci/psci_checker.c
@@ -359,16 +359,16 @@ static int suspend_test_thread(void *arg)
for (;;) {
/* Needs to be set first to avoid missing a wakeup. */
set_current_state(TASK_INTERRUPTIBLE);
- if (kthread_should_stop()) {
- __set_current_state(TASK_RUNNING);
+ if (kthread_should_park())
break;
- }
schedule();
}
pr_info("CPU %d suspend test results: success %d, shallow states %d, errors %d\n",
cpu, nb_suspend, nb_shallow_sleep, nb_err);
+ kthread_parkme();
+
return nb_err;
}
@@ -433,8 +433,10 @@ static int suspend_tests(void)
/* Stop and destroy all threads, get return status. */
- for (i = 0; i < nb_threads; ++i)
+ for (i = 0; i < nb_threads; ++i) {
+ err += kthread_park(threads[i]);
err += kthread_stop(threads[i]);
+ }
out:
cpuidle_resume_and_unlock();
kfree(threads);
diff --git a/drivers/firmware/tegra/bpmp.c b/drivers/firmware/tegra/bpmp.c
index 2418abfe1fb6..19c56133234b 100644
--- a/drivers/firmware/tegra/bpmp.c
+++ b/drivers/firmware/tegra/bpmp.c
@@ -803,7 +803,9 @@ static int __maybe_unused tegra_bpmp_resume(struct device *dev)
return 0;
}
-static SIMPLE_DEV_PM_OPS(tegra_bpmp_pm_ops, NULL, tegra_bpmp_resume);
+static const struct dev_pm_ops tegra_bpmp_pm_ops = {
+ .resume_early = tegra_bpmp_resume,
+};
#if IS_ENABLED(CONFIG_ARCH_TEGRA_186_SOC) || \
IS_ENABLED(CONFIG_ARCH_TEGRA_194_SOC)
diff --git a/drivers/firmware/ti_sci.c b/drivers/firmware/ti_sci.c
index 7696c692ad5a..cdee0b45943d 100644
--- a/drivers/firmware/ti_sci.c
+++ b/drivers/firmware/ti_sci.c
@@ -466,9 +466,9 @@ static int ti_sci_cmd_get_revision(struct ti_sci_info *info)
struct ti_sci_xfer *xfer;
int ret;
- /* No need to setup flags since it is expected to respond */
xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_VERSION,
- 0x0, sizeof(struct ti_sci_msg_hdr),
+ TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
+ sizeof(struct ti_sci_msg_hdr),
sizeof(*rev_info));
if (IS_ERR(xfer)) {
ret = PTR_ERR(xfer);
@@ -596,9 +596,9 @@ static int ti_sci_get_device_state(const struct ti_sci_handle *handle,
info = handle_to_ti_sci_info(handle);
dev = info->dev;
- /* Response is expected, so need of any flags */
xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_GET_DEVICE_STATE,
- 0, sizeof(*req), sizeof(*resp));
+ TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
+ sizeof(*req), sizeof(*resp));
if (IS_ERR(xfer)) {
ret = PTR_ERR(xfer);
dev_err(dev, "Message alloc failed(%d)\n", ret);
@@ -2057,6 +2057,823 @@ static int ti_sci_cmd_free_event_map(const struct ti_sci_handle *handle,
ia_id, vint, global_event, vint_status_bit, 0);
}
+/**
+ * ti_sci_cmd_ring_config() - configure RA ring
+ * @handle: Pointer to TI SCI handle.
+ * @valid_params: Bitfield defining validity of ring configuration
+ * parameters
+ * @nav_id: Device ID of Navigator Subsystem from which the ring is
+ * allocated
+ * @index: Ring index
+ * @addr_lo: The ring base address lo 32 bits
+ * @addr_hi: The ring base address hi 32 bits
+ * @count: Number of ring elements
+ * @mode: The mode of the ring
+ * @size: The ring element size.
+ * @order_id: Specifies the ring's bus order ID
+ *
+ * Return: 0 if all went well, else returns appropriate error value.
+ *
+ * See @ti_sci_msg_rm_ring_cfg_req for more info.
+ */
+static int ti_sci_cmd_ring_config(const struct ti_sci_handle *handle,
+ u32 valid_params, u16 nav_id, u16 index,
+ u32 addr_lo, u32 addr_hi, u32 count,
+ u8 mode, u8 size, u8 order_id)
+{
+ struct ti_sci_msg_rm_ring_cfg_req *req;
+ struct ti_sci_msg_hdr *resp;
+ struct ti_sci_xfer *xfer;
+ struct ti_sci_info *info;
+ struct device *dev;
+ int ret = 0;
+
+ if (IS_ERR_OR_NULL(handle))
+ return -EINVAL;
+
+ info = handle_to_ti_sci_info(handle);
+ dev = info->dev;
+
+ xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_RM_RING_CFG,
+ TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
+ sizeof(*req), sizeof(*resp));
+ if (IS_ERR(xfer)) {
+ ret = PTR_ERR(xfer);
+ dev_err(dev, "RM_RA:Message config failed(%d)\n", ret);
+ return ret;
+ }
+ req = (struct ti_sci_msg_rm_ring_cfg_req *)xfer->xfer_buf;
+ req->valid_params = valid_params;
+ req->nav_id = nav_id;
+ req->index = index;
+ req->addr_lo = addr_lo;
+ req->addr_hi = addr_hi;
+ req->count = count;
+ req->mode = mode;
+ req->size = size;
+ req->order_id = order_id;
+
+ ret = ti_sci_do_xfer(info, xfer);
+ if (ret) {
+ dev_err(dev, "RM_RA:Mbox config send fail %d\n", ret);
+ goto fail;
+ }
+
+ resp = (struct ti_sci_msg_hdr *)xfer->xfer_buf;
+ ret = ti_sci_is_response_ack(resp) ? 0 : -ENODEV;
+
+fail:
+ ti_sci_put_one_xfer(&info->minfo, xfer);
+ dev_dbg(dev, "RM_RA:config ring %u ret:%d\n", index, ret);
+ return ret;
+}
+
+/**
+ * ti_sci_cmd_ring_get_config() - get RA ring configuration
+ * @handle: Pointer to TI SCI handle.
+ * @nav_id: Device ID of Navigator Subsystem from which the ring is
+ * allocated
+ * @index: Ring index
+ * @addr_lo: Returns ring's base address lo 32 bits
+ * @addr_hi: Returns ring's base address hi 32 bits
+ * @count: Returns number of ring elements
+ * @mode: Returns mode of the ring
+ * @size: Returns ring element size
+ * @order_id: Returns ring's bus order ID
+ *
+ * Return: 0 if all went well, else returns appropriate error value.
+ *
+ * See @ti_sci_msg_rm_ring_get_cfg_req for more info.
+ */
+static int ti_sci_cmd_ring_get_config(const struct ti_sci_handle *handle,
+ u32 nav_id, u32 index, u8 *mode,
+ u32 *addr_lo, u32 *addr_hi,
+ u32 *count, u8 *size, u8 *order_id)
+{
+ struct ti_sci_msg_rm_ring_get_cfg_resp *resp;
+ struct ti_sci_msg_rm_ring_get_cfg_req *req;
+ struct ti_sci_xfer *xfer;
+ struct ti_sci_info *info;
+ struct device *dev;
+ int ret = 0;
+
+ if (IS_ERR_OR_NULL(handle))
+ return -EINVAL;
+
+ info = handle_to_ti_sci_info(handle);
+ dev = info->dev;
+
+ xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_RM_RING_GET_CFG,
+ TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
+ sizeof(*req), sizeof(*resp));
+ if (IS_ERR(xfer)) {
+ ret = PTR_ERR(xfer);
+ dev_err(dev,
+ "RM_RA:Message get config failed(%d)\n", ret);
+ return ret;
+ }
+ req = (struct ti_sci_msg_rm_ring_get_cfg_req *)xfer->xfer_buf;
+ req->nav_id = nav_id;
+ req->index = index;
+
+ ret = ti_sci_do_xfer(info, xfer);
+ if (ret) {
+ dev_err(dev, "RM_RA:Mbox get config send fail %d\n", ret);
+ goto fail;
+ }
+
+ resp = (struct ti_sci_msg_rm_ring_get_cfg_resp *)xfer->xfer_buf;
+
+ if (!ti_sci_is_response_ack(resp)) {
+ ret = -ENODEV;
+ } else {
+ if (mode)
+ *mode = resp->mode;
+ if (addr_lo)
+ *addr_lo = resp->addr_lo;
+ if (addr_hi)
+ *addr_hi = resp->addr_hi;
+ if (count)
+ *count = resp->count;
+ if (size)
+ *size = resp->size;
+ if (order_id)
+ *order_id = resp->order_id;
+ };
+
+fail:
+ ti_sci_put_one_xfer(&info->minfo, xfer);
+ dev_dbg(dev, "RM_RA:get config ring %u ret:%d\n", index, ret);
+ return ret;
+}
+
+/**
+ * ti_sci_cmd_rm_psil_pair() - Pair PSI-L source to destination thread
+ * @handle: Pointer to TI SCI handle.
+ * @nav_id: Device ID of Navigator Subsystem which should be used for
+ * pairing
+ * @src_thread: Source PSI-L thread ID
+ * @dst_thread: Destination PSI-L thread ID
+ *
+ * Return: 0 if all went well, else returns appropriate error value.
+ */
+static int ti_sci_cmd_rm_psil_pair(const struct ti_sci_handle *handle,
+ u32 nav_id, u32 src_thread, u32 dst_thread)
+{
+ struct ti_sci_msg_psil_pair *req;
+ struct ti_sci_msg_hdr *resp;
+ struct ti_sci_xfer *xfer;
+ struct ti_sci_info *info;
+ struct device *dev;
+ int ret = 0;
+
+ if (IS_ERR(handle))
+ return PTR_ERR(handle);
+ if (!handle)
+ return -EINVAL;
+
+ info = handle_to_ti_sci_info(handle);
+ dev = info->dev;
+
+ xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_RM_PSIL_PAIR,
+ TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
+ sizeof(*req), sizeof(*resp));
+ if (IS_ERR(xfer)) {
+ ret = PTR_ERR(xfer);
+ dev_err(dev, "RM_PSIL:Message reconfig failed(%d)\n", ret);
+ return ret;
+ }
+ req = (struct ti_sci_msg_psil_pair *)xfer->xfer_buf;
+ req->nav_id = nav_id;
+ req->src_thread = src_thread;
+ req->dst_thread = dst_thread;
+
+ ret = ti_sci_do_xfer(info, xfer);
+ if (ret) {
+ dev_err(dev, "RM_PSIL:Mbox send fail %d\n", ret);
+ goto fail;
+ }
+
+ resp = (struct ti_sci_msg_hdr *)xfer->xfer_buf;
+ ret = ti_sci_is_response_ack(resp) ? 0 : -EINVAL;
+
+fail:
+ ti_sci_put_one_xfer(&info->minfo, xfer);
+
+ return ret;
+}
+
+/**
+ * ti_sci_cmd_rm_psil_unpair() - Unpair PSI-L source from destination thread
+ * @handle: Pointer to TI SCI handle.
+ * @nav_id: Device ID of Navigator Subsystem which should be used for
+ * unpairing
+ * @src_thread: Source PSI-L thread ID
+ * @dst_thread: Destination PSI-L thread ID
+ *
+ * Return: 0 if all went well, else returns appropriate error value.
+ */
+static int ti_sci_cmd_rm_psil_unpair(const struct ti_sci_handle *handle,
+ u32 nav_id, u32 src_thread, u32 dst_thread)
+{
+ struct ti_sci_msg_psil_unpair *req;
+ struct ti_sci_msg_hdr *resp;
+ struct ti_sci_xfer *xfer;
+ struct ti_sci_info *info;
+ struct device *dev;
+ int ret = 0;
+
+ if (IS_ERR(handle))
+ return PTR_ERR(handle);
+ if (!handle)
+ return -EINVAL;
+
+ info = handle_to_ti_sci_info(handle);
+ dev = info->dev;
+
+ xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_RM_PSIL_UNPAIR,
+ TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
+ sizeof(*req), sizeof(*resp));
+ if (IS_ERR(xfer)) {
+ ret = PTR_ERR(xfer);
+ dev_err(dev, "RM_PSIL:Message reconfig failed(%d)\n", ret);
+ return ret;
+ }
+ req = (struct ti_sci_msg_psil_unpair *)xfer->xfer_buf;
+ req->nav_id = nav_id;
+ req->src_thread = src_thread;
+ req->dst_thread = dst_thread;
+
+ ret = ti_sci_do_xfer(info, xfer);
+ if (ret) {
+ dev_err(dev, "RM_PSIL:Mbox send fail %d\n", ret);
+ goto fail;
+ }
+
+ resp = (struct ti_sci_msg_hdr *)xfer->xfer_buf;
+ ret = ti_sci_is_response_ack(resp) ? 0 : -EINVAL;
+
+fail:
+ ti_sci_put_one_xfer(&info->minfo, xfer);
+
+ return ret;
+}
+
+/**
+ * ti_sci_cmd_rm_udmap_tx_ch_cfg() - Configure a UDMAP TX channel
+ * @handle: Pointer to TI SCI handle.
+ * @params: Pointer to ti_sci_msg_rm_udmap_tx_ch_cfg TX channel config
+ * structure
+ *
+ * Return: 0 if all went well, else returns appropriate error value.
+ *
+ * See @ti_sci_msg_rm_udmap_tx_ch_cfg and @ti_sci_msg_rm_udmap_tx_ch_cfg_req for
+ * more info.
+ */
+static int ti_sci_cmd_rm_udmap_tx_ch_cfg(const struct ti_sci_handle *handle,
+ const struct ti_sci_msg_rm_udmap_tx_ch_cfg *params)
+{
+ struct ti_sci_msg_rm_udmap_tx_ch_cfg_req *req;
+ struct ti_sci_msg_hdr *resp;
+ struct ti_sci_xfer *xfer;
+ struct ti_sci_info *info;
+ struct device *dev;
+ int ret = 0;
+
+ if (IS_ERR_OR_NULL(handle))
+ return -EINVAL;
+
+ info = handle_to_ti_sci_info(handle);
+ dev = info->dev;
+
+ xfer = ti_sci_get_one_xfer(info, TISCI_MSG_RM_UDMAP_TX_CH_CFG,
+ TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
+ sizeof(*req), sizeof(*resp));
+ if (IS_ERR(xfer)) {
+ ret = PTR_ERR(xfer);
+ dev_err(dev, "Message TX_CH_CFG alloc failed(%d)\n", ret);
+ return ret;
+ }
+ req = (struct ti_sci_msg_rm_udmap_tx_ch_cfg_req *)xfer->xfer_buf;
+ req->valid_params = params->valid_params;
+ req->nav_id = params->nav_id;
+ req->index = params->index;
+ req->tx_pause_on_err = params->tx_pause_on_err;
+ req->tx_filt_einfo = params->tx_filt_einfo;
+ req->tx_filt_pswords = params->tx_filt_pswords;
+ req->tx_atype = params->tx_atype;
+ req->tx_chan_type = params->tx_chan_type;
+ req->tx_supr_tdpkt = params->tx_supr_tdpkt;
+ req->tx_fetch_size = params->tx_fetch_size;
+ req->tx_credit_count = params->tx_credit_count;
+ req->txcq_qnum = params->txcq_qnum;
+ req->tx_priority = params->tx_priority;
+ req->tx_qos = params->tx_qos;
+ req->tx_orderid = params->tx_orderid;
+ req->fdepth = params->fdepth;
+ req->tx_sched_priority = params->tx_sched_priority;
+ req->tx_burst_size = params->tx_burst_size;
+
+ ret = ti_sci_do_xfer(info, xfer);
+ if (ret) {
+ dev_err(dev, "Mbox send TX_CH_CFG fail %d\n", ret);
+ goto fail;
+ }
+
+ resp = (struct ti_sci_msg_hdr *)xfer->xfer_buf;
+ ret = ti_sci_is_response_ack(resp) ? 0 : -EINVAL;
+
+fail:
+ ti_sci_put_one_xfer(&info->minfo, xfer);
+ dev_dbg(dev, "TX_CH_CFG: chn %u ret:%u\n", params->index, ret);
+ return ret;
+}
+
+/**
+ * ti_sci_cmd_rm_udmap_rx_ch_cfg() - Configure a UDMAP RX channel
+ * @handle: Pointer to TI SCI handle.
+ * @params: Pointer to ti_sci_msg_rm_udmap_rx_ch_cfg RX channel config
+ * structure
+ *
+ * Return: 0 if all went well, else returns appropriate error value.
+ *
+ * See @ti_sci_msg_rm_udmap_rx_ch_cfg and @ti_sci_msg_rm_udmap_rx_ch_cfg_req for
+ * more info.
+ */
+static int ti_sci_cmd_rm_udmap_rx_ch_cfg(const struct ti_sci_handle *handle,
+ const struct ti_sci_msg_rm_udmap_rx_ch_cfg *params)
+{
+ struct ti_sci_msg_rm_udmap_rx_ch_cfg_req *req;
+ struct ti_sci_msg_hdr *resp;
+ struct ti_sci_xfer *xfer;
+ struct ti_sci_info *info;
+ struct device *dev;
+ int ret = 0;
+
+ if (IS_ERR_OR_NULL(handle))
+ return -EINVAL;
+
+ info = handle_to_ti_sci_info(handle);
+ dev = info->dev;
+
+ xfer = ti_sci_get_one_xfer(info, TISCI_MSG_RM_UDMAP_RX_CH_CFG,
+ TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
+ sizeof(*req), sizeof(*resp));
+ if (IS_ERR(xfer)) {
+ ret = PTR_ERR(xfer);
+ dev_err(dev, "Message RX_CH_CFG alloc failed(%d)\n", ret);
+ return ret;
+ }
+ req = (struct ti_sci_msg_rm_udmap_rx_ch_cfg_req *)xfer->xfer_buf;
+ req->valid_params = params->valid_params;
+ req->nav_id = params->nav_id;
+ req->index = params->index;
+ req->rx_fetch_size = params->rx_fetch_size;
+ req->rxcq_qnum = params->rxcq_qnum;
+ req->rx_priority = params->rx_priority;
+ req->rx_qos = params->rx_qos;
+ req->rx_orderid = params->rx_orderid;
+ req->rx_sched_priority = params->rx_sched_priority;
+ req->flowid_start = params->flowid_start;
+ req->flowid_cnt = params->flowid_cnt;
+ req->rx_pause_on_err = params->rx_pause_on_err;
+ req->rx_atype = params->rx_atype;
+ req->rx_chan_type = params->rx_chan_type;
+ req->rx_ignore_short = params->rx_ignore_short;
+ req->rx_ignore_long = params->rx_ignore_long;
+ req->rx_burst_size = params->rx_burst_size;
+
+ ret = ti_sci_do_xfer(info, xfer);
+ if (ret) {
+ dev_err(dev, "Mbox send RX_CH_CFG fail %d\n", ret);
+ goto fail;
+ }
+
+ resp = (struct ti_sci_msg_hdr *)xfer->xfer_buf;
+ ret = ti_sci_is_response_ack(resp) ? 0 : -EINVAL;
+
+fail:
+ ti_sci_put_one_xfer(&info->minfo, xfer);
+ dev_dbg(dev, "RX_CH_CFG: chn %u ret:%d\n", params->index, ret);
+ return ret;
+}
+
+/**
+ * ti_sci_cmd_rm_udmap_rx_flow_cfg() - Configure UDMAP RX FLOW
+ * @handle: Pointer to TI SCI handle.
+ * @params: Pointer to ti_sci_msg_rm_udmap_flow_cfg RX FLOW config
+ * structure
+ *
+ * Return: 0 if all went well, else returns appropriate error value.
+ *
+ * See @ti_sci_msg_rm_udmap_flow_cfg and @ti_sci_msg_rm_udmap_flow_cfg_req for
+ * more info.
+ */
+static int ti_sci_cmd_rm_udmap_rx_flow_cfg(const struct ti_sci_handle *handle,
+ const struct ti_sci_msg_rm_udmap_flow_cfg *params)
+{
+ struct ti_sci_msg_rm_udmap_flow_cfg_req *req;
+ struct ti_sci_msg_hdr *resp;
+ struct ti_sci_xfer *xfer;
+ struct ti_sci_info *info;
+ struct device *dev;
+ int ret = 0;
+
+ if (IS_ERR_OR_NULL(handle))
+ return -EINVAL;
+
+ info = handle_to_ti_sci_info(handle);
+ dev = info->dev;
+
+ xfer = ti_sci_get_one_xfer(info, TISCI_MSG_RM_UDMAP_FLOW_CFG,
+ TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
+ sizeof(*req), sizeof(*resp));
+ if (IS_ERR(xfer)) {
+ ret = PTR_ERR(xfer);
+ dev_err(dev, "RX_FL_CFG: Message alloc failed(%d)\n", ret);
+ return ret;
+ }
+ req = (struct ti_sci_msg_rm_udmap_flow_cfg_req *)xfer->xfer_buf;
+ req->valid_params = params->valid_params;
+ req->nav_id = params->nav_id;
+ req->flow_index = params->flow_index;
+ req->rx_einfo_present = params->rx_einfo_present;
+ req->rx_psinfo_present = params->rx_psinfo_present;
+ req->rx_error_handling = params->rx_error_handling;
+ req->rx_desc_type = params->rx_desc_type;
+ req->rx_sop_offset = params->rx_sop_offset;
+ req->rx_dest_qnum = params->rx_dest_qnum;
+ req->rx_src_tag_hi = params->rx_src_tag_hi;
+ req->rx_src_tag_lo = params->rx_src_tag_lo;
+ req->rx_dest_tag_hi = params->rx_dest_tag_hi;
+ req->rx_dest_tag_lo = params->rx_dest_tag_lo;
+ req->rx_src_tag_hi_sel = params->rx_src_tag_hi_sel;
+ req->rx_src_tag_lo_sel = params->rx_src_tag_lo_sel;
+ req->rx_dest_tag_hi_sel = params->rx_dest_tag_hi_sel;
+ req->rx_dest_tag_lo_sel = params->rx_dest_tag_lo_sel;
+ req->rx_fdq0_sz0_qnum = params->rx_fdq0_sz0_qnum;
+ req->rx_fdq1_qnum = params->rx_fdq1_qnum;
+ req->rx_fdq2_qnum = params->rx_fdq2_qnum;
+ req->rx_fdq3_qnum = params->rx_fdq3_qnum;
+ req->rx_ps_location = params->rx_ps_location;
+
+ ret = ti_sci_do_xfer(info, xfer);
+ if (ret) {
+ dev_err(dev, "RX_FL_CFG: Mbox send fail %d\n", ret);
+ goto fail;
+ }
+
+ resp = (struct ti_sci_msg_hdr *)xfer->xfer_buf;
+ ret = ti_sci_is_response_ack(resp) ? 0 : -EINVAL;
+
+fail:
+ ti_sci_put_one_xfer(&info->minfo, xfer);
+ dev_dbg(info->dev, "RX_FL_CFG: %u ret:%d\n", params->flow_index, ret);
+ return ret;
+}
+
+/**
+ * ti_sci_cmd_proc_request() - Command to request a physical processor control
+ * @handle: Pointer to TI SCI handle
+ * @proc_id: Processor ID this request is for
+ *
+ * Return: 0 if all went well, else returns appropriate error value.
+ */
+static int ti_sci_cmd_proc_request(const struct ti_sci_handle *handle,
+ u8 proc_id)
+{
+ struct ti_sci_msg_req_proc_request *req;
+ struct ti_sci_msg_hdr *resp;
+ struct ti_sci_info *info;
+ struct ti_sci_xfer *xfer;
+ struct device *dev;
+ int ret = 0;
+
+ if (!handle)
+ return -EINVAL;
+ if (IS_ERR(handle))
+ return PTR_ERR(handle);
+
+ info = handle_to_ti_sci_info(handle);
+ dev = info->dev;
+
+ xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_PROC_REQUEST,
+ TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
+ sizeof(*req), sizeof(*resp));
+ if (IS_ERR(xfer)) {
+ ret = PTR_ERR(xfer);
+ dev_err(dev, "Message alloc failed(%d)\n", ret);
+ return ret;
+ }
+ req = (struct ti_sci_msg_req_proc_request *)xfer->xfer_buf;
+ req->processor_id = proc_id;
+
+ ret = ti_sci_do_xfer(info, xfer);
+ if (ret) {
+ dev_err(dev, "Mbox send fail %d\n", ret);
+ goto fail;
+ }
+
+ resp = (struct ti_sci_msg_hdr *)xfer->tx_message.buf;
+
+ ret = ti_sci_is_response_ack(resp) ? 0 : -ENODEV;
+
+fail:
+ ti_sci_put_one_xfer(&info->minfo, xfer);
+
+ return ret;
+}
+
+/**
+ * ti_sci_cmd_proc_release() - Command to release a physical processor control
+ * @handle: Pointer to TI SCI handle
+ * @proc_id: Processor ID this request is for
+ *
+ * Return: 0 if all went well, else returns appropriate error value.
+ */
+static int ti_sci_cmd_proc_release(const struct ti_sci_handle *handle,
+ u8 proc_id)
+{
+ struct ti_sci_msg_req_proc_release *req;
+ struct ti_sci_msg_hdr *resp;
+ struct ti_sci_info *info;
+ struct ti_sci_xfer *xfer;
+ struct device *dev;
+ int ret = 0;
+
+ if (!handle)
+ return -EINVAL;
+ if (IS_ERR(handle))
+ return PTR_ERR(handle);
+
+ info = handle_to_ti_sci_info(handle);
+ dev = info->dev;
+
+ xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_PROC_RELEASE,
+ TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
+ sizeof(*req), sizeof(*resp));
+ if (IS_ERR(xfer)) {
+ ret = PTR_ERR(xfer);
+ dev_err(dev, "Message alloc failed(%d)\n", ret);
+ return ret;
+ }
+ req = (struct ti_sci_msg_req_proc_release *)xfer->xfer_buf;
+ req->processor_id = proc_id;
+
+ ret = ti_sci_do_xfer(info, xfer);
+ if (ret) {
+ dev_err(dev, "Mbox send fail %d\n", ret);
+ goto fail;
+ }
+
+ resp = (struct ti_sci_msg_hdr *)xfer->tx_message.buf;
+
+ ret = ti_sci_is_response_ack(resp) ? 0 : -ENODEV;
+
+fail:
+ ti_sci_put_one_xfer(&info->minfo, xfer);
+
+ return ret;
+}
+
+/**
+ * ti_sci_cmd_proc_handover() - Command to handover a physical processor
+ * control to a host in the processor's access
+ * control list.
+ * @handle: Pointer to TI SCI handle
+ * @proc_id: Processor ID this request is for
+ * @host_id: Host ID to get the control of the processor
+ *
+ * Return: 0 if all went well, else returns appropriate error value.
+ */
+static int ti_sci_cmd_proc_handover(const struct ti_sci_handle *handle,
+ u8 proc_id, u8 host_id)
+{
+ struct ti_sci_msg_req_proc_handover *req;
+ struct ti_sci_msg_hdr *resp;
+ struct ti_sci_info *info;
+ struct ti_sci_xfer *xfer;
+ struct device *dev;
+ int ret = 0;
+
+ if (!handle)
+ return -EINVAL;
+ if (IS_ERR(handle))
+ return PTR_ERR(handle);
+
+ info = handle_to_ti_sci_info(handle);
+ dev = info->dev;
+
+ xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_PROC_HANDOVER,
+ TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
+ sizeof(*req), sizeof(*resp));
+ if (IS_ERR(xfer)) {
+ ret = PTR_ERR(xfer);
+ dev_err(dev, "Message alloc failed(%d)\n", ret);
+ return ret;
+ }
+ req = (struct ti_sci_msg_req_proc_handover *)xfer->xfer_buf;
+ req->processor_id = proc_id;
+ req->host_id = host_id;
+
+ ret = ti_sci_do_xfer(info, xfer);
+ if (ret) {
+ dev_err(dev, "Mbox send fail %d\n", ret);
+ goto fail;
+ }
+
+ resp = (struct ti_sci_msg_hdr *)xfer->tx_message.buf;
+
+ ret = ti_sci_is_response_ack(resp) ? 0 : -ENODEV;
+
+fail:
+ ti_sci_put_one_xfer(&info->minfo, xfer);
+
+ return ret;
+}
+
+/**
+ * ti_sci_cmd_proc_set_config() - Command to set the processor boot
+ * configuration flags
+ * @handle: Pointer to TI SCI handle
+ * @proc_id: Processor ID this request is for
+ * @config_flags_set: Configuration flags to be set
+ * @config_flags_clear: Configuration flags to be cleared.
+ *
+ * Return: 0 if all went well, else returns appropriate error value.
+ */
+static int ti_sci_cmd_proc_set_config(const struct ti_sci_handle *handle,
+ u8 proc_id, u64 bootvector,
+ u32 config_flags_set,
+ u32 config_flags_clear)
+{
+ struct ti_sci_msg_req_set_config *req;
+ struct ti_sci_msg_hdr *resp;
+ struct ti_sci_info *info;
+ struct ti_sci_xfer *xfer;
+ struct device *dev;
+ int ret = 0;
+
+ if (!handle)
+ return -EINVAL;
+ if (IS_ERR(handle))
+ return PTR_ERR(handle);
+
+ info = handle_to_ti_sci_info(handle);
+ dev = info->dev;
+
+ xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_SET_CONFIG,
+ TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
+ sizeof(*req), sizeof(*resp));
+ if (IS_ERR(xfer)) {
+ ret = PTR_ERR(xfer);
+ dev_err(dev, "Message alloc failed(%d)\n", ret);
+ return ret;
+ }
+ req = (struct ti_sci_msg_req_set_config *)xfer->xfer_buf;
+ req->processor_id = proc_id;
+ req->bootvector_low = bootvector & TI_SCI_ADDR_LOW_MASK;
+ req->bootvector_high = (bootvector & TI_SCI_ADDR_HIGH_MASK) >>
+ TI_SCI_ADDR_HIGH_SHIFT;
+ req->config_flags_set = config_flags_set;
+ req->config_flags_clear = config_flags_clear;
+
+ ret = ti_sci_do_xfer(info, xfer);
+ if (ret) {
+ dev_err(dev, "Mbox send fail %d\n", ret);
+ goto fail;
+ }
+
+ resp = (struct ti_sci_msg_hdr *)xfer->tx_message.buf;
+
+ ret = ti_sci_is_response_ack(resp) ? 0 : -ENODEV;
+
+fail:
+ ti_sci_put_one_xfer(&info->minfo, xfer);
+
+ return ret;
+}
+
+/**
+ * ti_sci_cmd_proc_set_control() - Command to set the processor boot
+ * control flags
+ * @handle: Pointer to TI SCI handle
+ * @proc_id: Processor ID this request is for
+ * @control_flags_set: Control flags to be set
+ * @control_flags_clear: Control flags to be cleared
+ *
+ * Return: 0 if all went well, else returns appropriate error value.
+ */
+static int ti_sci_cmd_proc_set_control(const struct ti_sci_handle *handle,
+ u8 proc_id, u32 control_flags_set,
+ u32 control_flags_clear)
+{
+ struct ti_sci_msg_req_set_ctrl *req;
+ struct ti_sci_msg_hdr *resp;
+ struct ti_sci_info *info;
+ struct ti_sci_xfer *xfer;
+ struct device *dev;
+ int ret = 0;
+
+ if (!handle)
+ return -EINVAL;
+ if (IS_ERR(handle))
+ return PTR_ERR(handle);
+
+ info = handle_to_ti_sci_info(handle);
+ dev = info->dev;
+
+ xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_SET_CTRL,
+ TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
+ sizeof(*req), sizeof(*resp));
+ if (IS_ERR(xfer)) {
+ ret = PTR_ERR(xfer);
+ dev_err(dev, "Message alloc failed(%d)\n", ret);
+ return ret;
+ }
+ req = (struct ti_sci_msg_req_set_ctrl *)xfer->xfer_buf;
+ req->processor_id = proc_id;
+ req->control_flags_set = control_flags_set;
+ req->control_flags_clear = control_flags_clear;
+
+ ret = ti_sci_do_xfer(info, xfer);
+ if (ret) {
+ dev_err(dev, "Mbox send fail %d\n", ret);
+ goto fail;
+ }
+
+ resp = (struct ti_sci_msg_hdr *)xfer->tx_message.buf;
+
+ ret = ti_sci_is_response_ack(resp) ? 0 : -ENODEV;
+
+fail:
+ ti_sci_put_one_xfer(&info->minfo, xfer);
+
+ return ret;
+}
+
+/**
+ * ti_sci_cmd_get_boot_status() - Command to get the processor boot status
+ * @handle: Pointer to TI SCI handle
+ * @proc_id: Processor ID this request is for
+ *
+ * Return: 0 if all went well, else returns appropriate error value.
+ */
+static int ti_sci_cmd_proc_get_status(const struct ti_sci_handle *handle,
+ u8 proc_id, u64 *bv, u32 *cfg_flags,
+ u32 *ctrl_flags, u32 *sts_flags)
+{
+ struct ti_sci_msg_resp_get_status *resp;
+ struct ti_sci_msg_req_get_status *req;
+ struct ti_sci_info *info;
+ struct ti_sci_xfer *xfer;
+ struct device *dev;
+ int ret = 0;
+
+ if (!handle)
+ return -EINVAL;
+ if (IS_ERR(handle))
+ return PTR_ERR(handle);
+
+ info = handle_to_ti_sci_info(handle);
+ dev = info->dev;
+
+ xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_GET_STATUS,
+ TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
+ sizeof(*req), sizeof(*resp));
+ if (IS_ERR(xfer)) {
+ ret = PTR_ERR(xfer);
+ dev_err(dev, "Message alloc failed(%d)\n", ret);
+ return ret;
+ }
+ req = (struct ti_sci_msg_req_get_status *)xfer->xfer_buf;
+ req->processor_id = proc_id;
+
+ ret = ti_sci_do_xfer(info, xfer);
+ if (ret) {
+ dev_err(dev, "Mbox send fail %d\n", ret);
+ goto fail;
+ }
+
+ resp = (struct ti_sci_msg_resp_get_status *)xfer->tx_message.buf;
+
+ if (!ti_sci_is_response_ack(resp)) {
+ ret = -ENODEV;
+ } else {
+ *bv = (resp->bootvector_low & TI_SCI_ADDR_LOW_MASK) |
+ (((u64)resp->bootvector_high << TI_SCI_ADDR_HIGH_SHIFT) &
+ TI_SCI_ADDR_HIGH_MASK);
+ *cfg_flags = resp->config_flags;
+ *ctrl_flags = resp->control_flags;
+ *sts_flags = resp->status_flags;
+ }
+
+fail:
+ ti_sci_put_one_xfer(&info->minfo, xfer);
+
+ return ret;
+}
+
/*
* ti_sci_setup_ops() - Setup the operations structures
* @info: pointer to TISCI pointer
@@ -2069,6 +2886,10 @@ static void ti_sci_setup_ops(struct ti_sci_info *info)
struct ti_sci_clk_ops *cops = &ops->clk_ops;
struct ti_sci_rm_core_ops *rm_core_ops = &ops->rm_core_ops;
struct ti_sci_rm_irq_ops *iops = &ops->rm_irq_ops;
+ struct ti_sci_rm_ringacc_ops *rops = &ops->rm_ring_ops;
+ struct ti_sci_rm_psil_ops *psilops = &ops->rm_psil_ops;
+ struct ti_sci_rm_udmap_ops *udmap_ops = &ops->rm_udmap_ops;
+ struct ti_sci_proc_ops *pops = &ops->proc_ops;
core_ops->reboot_device = ti_sci_cmd_core_reboot;
@@ -2108,6 +2929,23 @@ static void ti_sci_setup_ops(struct ti_sci_info *info)
iops->set_event_map = ti_sci_cmd_set_event_map;
iops->free_irq = ti_sci_cmd_free_irq;
iops->free_event_map = ti_sci_cmd_free_event_map;
+
+ rops->config = ti_sci_cmd_ring_config;
+ rops->get_config = ti_sci_cmd_ring_get_config;
+
+ psilops->pair = ti_sci_cmd_rm_psil_pair;
+ psilops->unpair = ti_sci_cmd_rm_psil_unpair;
+
+ udmap_ops->tx_ch_cfg = ti_sci_cmd_rm_udmap_tx_ch_cfg;
+ udmap_ops->rx_ch_cfg = ti_sci_cmd_rm_udmap_rx_ch_cfg;
+ udmap_ops->rx_flow_cfg = ti_sci_cmd_rm_udmap_rx_flow_cfg;
+
+ pops->request = ti_sci_cmd_proc_request;
+ pops->release = ti_sci_cmd_proc_release;
+ pops->handover = ti_sci_cmd_proc_handover;
+ pops->set_config = ti_sci_cmd_proc_set_config;
+ pops->set_control = ti_sci_cmd_proc_set_control;
+ pops->get_status = ti_sci_cmd_proc_get_status;
}
/**
@@ -2395,6 +3233,7 @@ devm_ti_sci_get_of_resource(const struct ti_sci_handle *handle,
struct device *dev, u32 dev_id, char *of_prop)
{
struct ti_sci_resource *res;
+ bool valid_set = false;
u32 resource_subtype;
int i, ret;
@@ -2426,15 +3265,18 @@ devm_ti_sci_get_of_resource(const struct ti_sci_handle *handle,
&res->desc[i].start,
&res->desc[i].num);
if (ret) {
- dev_err(dev, "dev = %d subtype %d not allocated for this host\n",
+ dev_dbg(dev, "dev = %d subtype %d not allocated for this host\n",
dev_id, resource_subtype);
- return ERR_PTR(ret);
+ res->desc[i].start = 0;
+ res->desc[i].num = 0;
+ continue;
}
dev_dbg(dev, "dev = %d, subtype = %d, start = %d, num = %d\n",
dev_id, resource_subtype, res->desc[i].start,
res->desc[i].num);
+ valid_set = true;
res->desc[i].res_map =
devm_kzalloc(dev, BITS_TO_LONGS(res->desc[i].num) *
sizeof(*res->desc[i].res_map), GFP_KERNEL);
@@ -2443,7 +3285,10 @@ devm_ti_sci_get_of_resource(const struct ti_sci_handle *handle,
}
raw_spin_lock_init(&res->lock);
- return res;
+ if (valid_set)
+ return res;
+
+ return ERR_PTR(-EINVAL);
}
static int tisci_reboot_handler(struct notifier_block *nb, unsigned long mode,
diff --git a/drivers/firmware/ti_sci.h b/drivers/firmware/ti_sci.h
index 414e0ced5409..f0d068c03944 100644
--- a/drivers/firmware/ti_sci.h
+++ b/drivers/firmware/ti_sci.h
@@ -42,6 +42,43 @@
#define TI_SCI_MSG_SET_IRQ 0x1000
#define TI_SCI_MSG_FREE_IRQ 0x1001
+/* NAVSS resource management */
+/* Ringacc requests */
+#define TI_SCI_MSG_RM_RING_ALLOCATE 0x1100
+#define TI_SCI_MSG_RM_RING_FREE 0x1101
+#define TI_SCI_MSG_RM_RING_RECONFIG 0x1102
+#define TI_SCI_MSG_RM_RING_RESET 0x1103
+#define TI_SCI_MSG_RM_RING_CFG 0x1110
+#define TI_SCI_MSG_RM_RING_GET_CFG 0x1111
+
+/* PSI-L requests */
+#define TI_SCI_MSG_RM_PSIL_PAIR 0x1280
+#define TI_SCI_MSG_RM_PSIL_UNPAIR 0x1281
+
+#define TI_SCI_MSG_RM_UDMAP_TX_ALLOC 0x1200
+#define TI_SCI_MSG_RM_UDMAP_TX_FREE 0x1201
+#define TI_SCI_MSG_RM_UDMAP_RX_ALLOC 0x1210
+#define TI_SCI_MSG_RM_UDMAP_RX_FREE 0x1211
+#define TI_SCI_MSG_RM_UDMAP_FLOW_CFG 0x1220
+#define TI_SCI_MSG_RM_UDMAP_OPT_FLOW_CFG 0x1221
+
+#define TISCI_MSG_RM_UDMAP_TX_CH_CFG 0x1205
+#define TISCI_MSG_RM_UDMAP_TX_CH_GET_CFG 0x1206
+#define TISCI_MSG_RM_UDMAP_RX_CH_CFG 0x1215
+#define TISCI_MSG_RM_UDMAP_RX_CH_GET_CFG 0x1216
+#define TISCI_MSG_RM_UDMAP_FLOW_CFG 0x1230
+#define TISCI_MSG_RM_UDMAP_FLOW_SIZE_THRESH_CFG 0x1231
+#define TISCI_MSG_RM_UDMAP_FLOW_GET_CFG 0x1232
+#define TISCI_MSG_RM_UDMAP_FLOW_SIZE_THRESH_GET_CFG 0x1233
+
+/* Processor Control requests */
+#define TI_SCI_MSG_PROC_REQUEST 0xc000
+#define TI_SCI_MSG_PROC_RELEASE 0xc001
+#define TI_SCI_MSG_PROC_HANDOVER 0xc005
+#define TI_SCI_MSG_SET_CONFIG 0xc100
+#define TI_SCI_MSG_SET_CTRL 0xc101
+#define TI_SCI_MSG_GET_STATUS 0xc400
+
/**
* struct ti_sci_msg_hdr - Generic Message Header for All messages and responses
* @type: Type of messages: One of TI_SCI_MSG* values
@@ -604,4 +641,777 @@ struct ti_sci_msg_req_manage_irq {
u8 secondary_host;
} __packed;
+/**
+ * struct ti_sci_msg_rm_ring_cfg_req - Configure a Navigator Subsystem ring
+ *
+ * Configures the non-real-time registers of a Navigator Subsystem ring.
+ * @hdr: Generic Header
+ * @valid_params: Bitfield defining validity of ring configuration parameters.
+ * The ring configuration fields are not valid, and will not be used for
+ * ring configuration, if their corresponding valid bit is zero.
+ * Valid bit usage:
+ * 0 - Valid bit for @tisci_msg_rm_ring_cfg_req addr_lo
+ * 1 - Valid bit for @tisci_msg_rm_ring_cfg_req addr_hi
+ * 2 - Valid bit for @tisci_msg_rm_ring_cfg_req count
+ * 3 - Valid bit for @tisci_msg_rm_ring_cfg_req mode
+ * 4 - Valid bit for @tisci_msg_rm_ring_cfg_req size
+ * 5 - Valid bit for @tisci_msg_rm_ring_cfg_req order_id
+ * @nav_id: Device ID of Navigator Subsystem from which the ring is allocated
+ * @index: ring index to be configured.
+ * @addr_lo: 32 LSBs of ring base address to be programmed into the ring's
+ * RING_BA_LO register
+ * @addr_hi: 16 MSBs of ring base address to be programmed into the ring's
+ * RING_BA_HI register.
+ * @count: Number of ring elements. Must be even if mode is CREDENTIALS or QM
+ * modes.
+ * @mode: Specifies the mode the ring is to be configured.
+ * @size: Specifies encoded ring element size. To calculate the encoded size use
+ * the formula (log2(size_bytes) - 2), where size_bytes cannot be
+ * greater than 256.
+ * @order_id: Specifies the ring's bus order ID.
+ */
+struct ti_sci_msg_rm_ring_cfg_req {
+ struct ti_sci_msg_hdr hdr;
+ u32 valid_params;
+ u16 nav_id;
+ u16 index;
+ u32 addr_lo;
+ u32 addr_hi;
+ u32 count;
+ u8 mode;
+ u8 size;
+ u8 order_id;
+} __packed;
+
+/**
+ * struct ti_sci_msg_rm_ring_get_cfg_req - Get RA ring's configuration
+ *
+ * Gets the configuration of the non-real-time register fields of a ring. The
+ * host, or a supervisor of the host, who owns the ring must be the requesting
+ * host. The values of the non-real-time registers are returned in
+ * @ti_sci_msg_rm_ring_get_cfg_resp.
+ *
+ * @hdr: Generic Header
+ * @nav_id: Device ID of Navigator Subsystem from which the ring is allocated
+ * @index: ring index.
+ */
+struct ti_sci_msg_rm_ring_get_cfg_req {
+ struct ti_sci_msg_hdr hdr;
+ u16 nav_id;
+ u16 index;
+} __packed;
+
+/**
+ * struct ti_sci_msg_rm_ring_get_cfg_resp - Ring get configuration response
+ *
+ * Response received by host processor after RM has handled
+ * @ti_sci_msg_rm_ring_get_cfg_req. The response contains the ring's
+ * non-real-time register values.
+ *
+ * @hdr: Generic Header
+ * @addr_lo: Ring 32 LSBs of base address
+ * @addr_hi: Ring 16 MSBs of base address.
+ * @count: Ring number of elements.
+ * @mode: Ring mode.
+ * @size: encoded Ring element size
+ * @order_id: ing order ID.
+ */
+struct ti_sci_msg_rm_ring_get_cfg_resp {
+ struct ti_sci_msg_hdr hdr;
+ u32 addr_lo;
+ u32 addr_hi;
+ u32 count;
+ u8 mode;
+ u8 size;
+ u8 order_id;
+} __packed;
+
+/**
+ * struct ti_sci_msg_psil_pair - Pairs a PSI-L source thread to a destination
+ * thread
+ * @hdr: Generic Header
+ * @nav_id: SoC Navigator Subsystem device ID whose PSI-L config proxy is
+ * used to pair the source and destination threads.
+ * @src_thread: PSI-L source thread ID within the PSI-L System thread map.
+ *
+ * UDMAP transmit channels mapped to source threads will have their
+ * TCHAN_THRD_ID register programmed with the destination thread if the pairing
+ * is successful.
+
+ * @dst_thread: PSI-L destination thread ID within the PSI-L System thread map.
+ * PSI-L destination threads start at index 0x8000. The request is NACK'd if
+ * the destination thread is not greater than or equal to 0x8000.
+ *
+ * UDMAP receive channels mapped to destination threads will have their
+ * RCHAN_THRD_ID register programmed with the source thread if the pairing
+ * is successful.
+ *
+ * Request type is TI_SCI_MSG_RM_PSIL_PAIR, response is a generic ACK or NACK
+ * message.
+ */
+struct ti_sci_msg_psil_pair {
+ struct ti_sci_msg_hdr hdr;
+ u32 nav_id;
+ u32 src_thread;
+ u32 dst_thread;
+} __packed;
+
+/**
+ * struct ti_sci_msg_psil_unpair - Unpairs a PSI-L source thread from a
+ * destination thread
+ * @hdr: Generic Header
+ * @nav_id: SoC Navigator Subsystem device ID whose PSI-L config proxy is
+ * used to unpair the source and destination threads.
+ * @src_thread: PSI-L source thread ID within the PSI-L System thread map.
+ *
+ * UDMAP transmit channels mapped to source threads will have their
+ * TCHAN_THRD_ID register cleared if the unpairing is successful.
+ *
+ * @dst_thread: PSI-L destination thread ID within the PSI-L System thread map.
+ * PSI-L destination threads start at index 0x8000. The request is NACK'd if
+ * the destination thread is not greater than or equal to 0x8000.
+ *
+ * UDMAP receive channels mapped to destination threads will have their
+ * RCHAN_THRD_ID register cleared if the unpairing is successful.
+ *
+ * Request type is TI_SCI_MSG_RM_PSIL_UNPAIR, response is a generic ACK or NACK
+ * message.
+ */
+struct ti_sci_msg_psil_unpair {
+ struct ti_sci_msg_hdr hdr;
+ u32 nav_id;
+ u32 src_thread;
+ u32 dst_thread;
+} __packed;
+
+/**
+ * struct ti_sci_msg_udmap_rx_flow_cfg - UDMAP receive flow configuration
+ * message
+ * @hdr: Generic Header
+ * @nav_id: SoC Navigator Subsystem device ID from which the receive flow is
+ * allocated
+ * @flow_index: UDMAP receive flow index for non-optional configuration.
+ * @rx_ch_index: Specifies the index of the receive channel using the flow_index
+ * @rx_einfo_present: UDMAP receive flow extended packet info present.
+ * @rx_psinfo_present: UDMAP receive flow PS words present.
+ * @rx_error_handling: UDMAP receive flow error handling configuration. Valid
+ * values are TI_SCI_RM_UDMAP_RX_FLOW_ERR_DROP/RETRY.
+ * @rx_desc_type: UDMAP receive flow descriptor type. It can be one of
+ * TI_SCI_RM_UDMAP_RX_FLOW_DESC_HOST/MONO.
+ * @rx_sop_offset: UDMAP receive flow start of packet offset.
+ * @rx_dest_qnum: UDMAP receive flow destination queue number.
+ * @rx_ps_location: UDMAP receive flow PS words location.
+ * 0 - end of packet descriptor
+ * 1 - Beginning of the data buffer
+ * @rx_src_tag_hi: UDMAP receive flow source tag high byte constant
+ * @rx_src_tag_lo: UDMAP receive flow source tag low byte constant
+ * @rx_dest_tag_hi: UDMAP receive flow destination tag high byte constant
+ * @rx_dest_tag_lo: UDMAP receive flow destination tag low byte constant
+ * @rx_src_tag_hi_sel: UDMAP receive flow source tag high byte selector
+ * @rx_src_tag_lo_sel: UDMAP receive flow source tag low byte selector
+ * @rx_dest_tag_hi_sel: UDMAP receive flow destination tag high byte selector
+ * @rx_dest_tag_lo_sel: UDMAP receive flow destination tag low byte selector
+ * @rx_size_thresh_en: UDMAP receive flow packet size based free buffer queue
+ * enable. If enabled, the ti_sci_rm_udmap_rx_flow_opt_cfg also need to be
+ * configured and sent.
+ * @rx_fdq0_sz0_qnum: UDMAP receive flow free descriptor queue 0.
+ * @rx_fdq1_qnum: UDMAP receive flow free descriptor queue 1.
+ * @rx_fdq2_qnum: UDMAP receive flow free descriptor queue 2.
+ * @rx_fdq3_qnum: UDMAP receive flow free descriptor queue 3.
+ *
+ * For detailed information on the settings, see the UDMAP section of the TRM.
+ */
+struct ti_sci_msg_udmap_rx_flow_cfg {
+ struct ti_sci_msg_hdr hdr;
+ u32 nav_id;
+ u32 flow_index;
+ u32 rx_ch_index;
+ u8 rx_einfo_present;
+ u8 rx_psinfo_present;
+ u8 rx_error_handling;
+ u8 rx_desc_type;
+ u16 rx_sop_offset;
+ u16 rx_dest_qnum;
+ u8 rx_ps_location;
+ u8 rx_src_tag_hi;
+ u8 rx_src_tag_lo;
+ u8 rx_dest_tag_hi;
+ u8 rx_dest_tag_lo;
+ u8 rx_src_tag_hi_sel;
+ u8 rx_src_tag_lo_sel;
+ u8 rx_dest_tag_hi_sel;
+ u8 rx_dest_tag_lo_sel;
+ u8 rx_size_thresh_en;
+ u16 rx_fdq0_sz0_qnum;
+ u16 rx_fdq1_qnum;
+ u16 rx_fdq2_qnum;
+ u16 rx_fdq3_qnum;
+} __packed;
+
+/**
+ * struct rm_ti_sci_msg_udmap_rx_flow_opt_cfg - parameters for UDMAP receive
+ * flow optional configuration
+ * @hdr: Generic Header
+ * @nav_id: SoC Navigator Subsystem device ID from which the receive flow is
+ * allocated
+ * @flow_index: UDMAP receive flow index for optional configuration.
+ * @rx_ch_index: Specifies the index of the receive channel using the flow_index
+ * @rx_size_thresh0: UDMAP receive flow packet size threshold 0.
+ * @rx_size_thresh1: UDMAP receive flow packet size threshold 1.
+ * @rx_size_thresh2: UDMAP receive flow packet size threshold 2.
+ * @rx_fdq0_sz1_qnum: UDMAP receive flow free descriptor queue for size
+ * threshold 1.
+ * @rx_fdq0_sz2_qnum: UDMAP receive flow free descriptor queue for size
+ * threshold 2.
+ * @rx_fdq0_sz3_qnum: UDMAP receive flow free descriptor queue for size
+ * threshold 3.
+ *
+ * For detailed information on the settings, see the UDMAP section of the TRM.
+ */
+struct rm_ti_sci_msg_udmap_rx_flow_opt_cfg {
+ struct ti_sci_msg_hdr hdr;
+ u32 nav_id;
+ u32 flow_index;
+ u32 rx_ch_index;
+ u16 rx_size_thresh0;
+ u16 rx_size_thresh1;
+ u16 rx_size_thresh2;
+ u16 rx_fdq0_sz1_qnum;
+ u16 rx_fdq0_sz2_qnum;
+ u16 rx_fdq0_sz3_qnum;
+} __packed;
+
+/**
+ * Configures a Navigator Subsystem UDMAP transmit channel
+ *
+ * Configures the non-real-time registers of a Navigator Subsystem UDMAP
+ * transmit channel. The channel index must be assigned to the host defined
+ * in the TISCI header via the RM board configuration resource assignment
+ * range list.
+ *
+ * @hdr: Generic Header
+ *
+ * @valid_params: Bitfield defining validity of tx channel configuration
+ * parameters. The tx channel configuration fields are not valid, and will not
+ * be used for ch configuration, if their corresponding valid bit is zero.
+ * Valid bit usage:
+ * 0 - Valid bit for @ref ti_sci_msg_rm_udmap_tx_ch_cfg::tx_pause_on_err
+ * 1 - Valid bit for @ref ti_sci_msg_rm_udmap_tx_ch_cfg::tx_atype
+ * 2 - Valid bit for @ref ti_sci_msg_rm_udmap_tx_ch_cfg::tx_chan_type
+ * 3 - Valid bit for @ref ti_sci_msg_rm_udmap_tx_ch_cfg::tx_fetch_size
+ * 4 - Valid bit for @ref ti_sci_msg_rm_udmap_tx_ch_cfg::txcq_qnum
+ * 5 - Valid bit for @ref ti_sci_msg_rm_udmap_tx_ch_cfg::tx_priority
+ * 6 - Valid bit for @ref ti_sci_msg_rm_udmap_tx_ch_cfg::tx_qos
+ * 7 - Valid bit for @ref ti_sci_msg_rm_udmap_tx_ch_cfg::tx_orderid
+ * 8 - Valid bit for @ref ti_sci_msg_rm_udmap_tx_ch_cfg::tx_sched_priority
+ * 9 - Valid bit for @ref ti_sci_msg_rm_udmap_tx_ch_cfg::tx_filt_einfo
+ * 10 - Valid bit for @ref ti_sci_msg_rm_udmap_tx_ch_cfg::tx_filt_pswords
+ * 11 - Valid bit for @ref ti_sci_msg_rm_udmap_tx_ch_cfg::tx_supr_tdpkt
+ * 12 - Valid bit for @ref ti_sci_msg_rm_udmap_tx_ch_cfg::tx_credit_count
+ * 13 - Valid bit for @ref ti_sci_msg_rm_udmap_tx_ch_cfg::fdepth
+ * 14 - Valid bit for @ref ti_sci_msg_rm_udmap_tx_ch_cfg::tx_burst_size
+ *
+ * @nav_id: SoC device ID of Navigator Subsystem where tx channel is located
+ *
+ * @index: UDMAP transmit channel index.
+ *
+ * @tx_pause_on_err: UDMAP transmit channel pause on error configuration to
+ * be programmed into the tx_pause_on_err field of the channel's TCHAN_TCFG
+ * register.
+ *
+ * @tx_filt_einfo: UDMAP transmit channel extended packet information passing
+ * configuration to be programmed into the tx_filt_einfo field of the
+ * channel's TCHAN_TCFG register.
+ *
+ * @tx_filt_pswords: UDMAP transmit channel protocol specific word passing
+ * configuration to be programmed into the tx_filt_pswords field of the
+ * channel's TCHAN_TCFG register.
+ *
+ * @tx_atype: UDMAP transmit channel non Ring Accelerator access pointer
+ * interpretation configuration to be programmed into the tx_atype field of
+ * the channel's TCHAN_TCFG register.
+ *
+ * @tx_chan_type: UDMAP transmit channel functional channel type and work
+ * passing mechanism configuration to be programmed into the tx_chan_type
+ * field of the channel's TCHAN_TCFG register.
+ *
+ * @tx_supr_tdpkt: UDMAP transmit channel teardown packet generation suppression
+ * configuration to be programmed into the tx_supr_tdpkt field of the channel's
+ * TCHAN_TCFG register.
+ *
+ * @tx_fetch_size: UDMAP transmit channel number of 32-bit descriptor words to
+ * fetch configuration to be programmed into the tx_fetch_size field of the
+ * channel's TCHAN_TCFG register. The user must make sure to set the maximum
+ * word count that can pass through the channel for any allowed descriptor type.
+ *
+ * @tx_credit_count: UDMAP transmit channel transfer request credit count
+ * configuration to be programmed into the count field of the TCHAN_TCREDIT
+ * register. Specifies how many credits for complete TRs are available.
+ *
+ * @txcq_qnum: UDMAP transmit channel completion queue configuration to be
+ * programmed into the txcq_qnum field of the TCHAN_TCQ register. The specified
+ * completion queue must be assigned to the host, or a subordinate of the host,
+ * requesting configuration of the transmit channel.
+ *
+ * @tx_priority: UDMAP transmit channel transmit priority value to be programmed
+ * into the priority field of the channel's TCHAN_TPRI_CTRL register.
+ *
+ * @tx_qos: UDMAP transmit channel transmit qos value to be programmed into the
+ * qos field of the channel's TCHAN_TPRI_CTRL register.
+ *
+ * @tx_orderid: UDMAP transmit channel bus order id value to be programmed into
+ * the orderid field of the channel's TCHAN_TPRI_CTRL register.
+ *
+ * @fdepth: UDMAP transmit channel FIFO depth configuration to be programmed
+ * into the fdepth field of the TCHAN_TFIFO_DEPTH register. Sets the number of
+ * Tx FIFO bytes which are allowed to be stored for the channel. Check the UDMAP
+ * section of the TRM for restrictions regarding this parameter.
+ *
+ * @tx_sched_priority: UDMAP transmit channel tx scheduling priority
+ * configuration to be programmed into the priority field of the channel's
+ * TCHAN_TST_SCHED register.
+ *
+ * @tx_burst_size: UDMAP transmit channel burst size configuration to be
+ * programmed into the tx_burst_size field of the TCHAN_TCFG register.
+ */
+struct ti_sci_msg_rm_udmap_tx_ch_cfg_req {
+ struct ti_sci_msg_hdr hdr;
+ u32 valid_params;
+ u16 nav_id;
+ u16 index;
+ u8 tx_pause_on_err;
+ u8 tx_filt_einfo;
+ u8 tx_filt_pswords;
+ u8 tx_atype;
+ u8 tx_chan_type;
+ u8 tx_supr_tdpkt;
+ u16 tx_fetch_size;
+ u8 tx_credit_count;
+ u16 txcq_qnum;
+ u8 tx_priority;
+ u8 tx_qos;
+ u8 tx_orderid;
+ u16 fdepth;
+ u8 tx_sched_priority;
+ u8 tx_burst_size;
+} __packed;
+
+/**
+ * Configures a Navigator Subsystem UDMAP receive channel
+ *
+ * Configures the non-real-time registers of a Navigator Subsystem UDMAP
+ * receive channel. The channel index must be assigned to the host defined
+ * in the TISCI header via the RM board configuration resource assignment
+ * range list.
+ *
+ * @hdr: Generic Header
+ *
+ * @valid_params: Bitfield defining validity of rx channel configuration
+ * parameters.
+ * The rx channel configuration fields are not valid, and will not be used for
+ * ch configuration, if their corresponding valid bit is zero.
+ * Valid bit usage:
+ * 0 - Valid bit for @ti_sci_msg_rm_udmap_rx_ch_cfg_req::rx_pause_on_err
+ * 1 - Valid bit for @ti_sci_msg_rm_udmap_rx_ch_cfg_req::rx_atype
+ * 2 - Valid bit for @ti_sci_msg_rm_udmap_rx_ch_cfg_req::rx_chan_type
+ * 3 - Valid bit for @ti_sci_msg_rm_udmap_rx_ch_cfg_req::rx_fetch_size
+ * 4 - Valid bit for @ti_sci_msg_rm_udmap_rx_ch_cfg_req::rxcq_qnum
+ * 5 - Valid bit for @ti_sci_msg_rm_udmap_rx_ch_cfg_req::rx_priority
+ * 6 - Valid bit for @ti_sci_msg_rm_udmap_rx_ch_cfg_req::rx_qos
+ * 7 - Valid bit for @ti_sci_msg_rm_udmap_rx_ch_cfg_req::rx_orderid
+ * 8 - Valid bit for @ti_sci_msg_rm_udmap_rx_ch_cfg_req::rx_sched_priority
+ * 9 - Valid bit for @ti_sci_msg_rm_udmap_rx_ch_cfg_req::flowid_start
+ * 10 - Valid bit for @ti_sci_msg_rm_udmap_rx_ch_cfg_req::flowid_cnt
+ * 11 - Valid bit for @ti_sci_msg_rm_udmap_rx_ch_cfg_req::rx_ignore_short
+ * 12 - Valid bit for @ti_sci_msg_rm_udmap_rx_ch_cfg_req::rx_ignore_long
+ * 14 - Valid bit for @ti_sci_msg_rm_udmap_rx_ch_cfg_req::rx_burst_size
+ *
+ * @nav_id: SoC device ID of Navigator Subsystem where rx channel is located
+ *
+ * @index: UDMAP receive channel index.
+ *
+ * @rx_fetch_size: UDMAP receive channel number of 32-bit descriptor words to
+ * fetch configuration to be programmed into the rx_fetch_size field of the
+ * channel's RCHAN_RCFG register.
+ *
+ * @rxcq_qnum: UDMAP receive channel completion queue configuration to be
+ * programmed into the rxcq_qnum field of the RCHAN_RCQ register.
+ * The specified completion queue must be assigned to the host, or a subordinate
+ * of the host, requesting configuration of the receive channel.
+ *
+ * @rx_priority: UDMAP receive channel receive priority value to be programmed
+ * into the priority field of the channel's RCHAN_RPRI_CTRL register.
+ *
+ * @rx_qos: UDMAP receive channel receive qos value to be programmed into the
+ * qos field of the channel's RCHAN_RPRI_CTRL register.
+ *
+ * @rx_orderid: UDMAP receive channel bus order id value to be programmed into
+ * the orderid field of the channel's RCHAN_RPRI_CTRL register.
+ *
+ * @rx_sched_priority: UDMAP receive channel rx scheduling priority
+ * configuration to be programmed into the priority field of the channel's
+ * RCHAN_RST_SCHED register.
+ *
+ * @flowid_start: UDMAP receive channel additional flows starting index
+ * configuration to program into the flow_start field of the RCHAN_RFLOW_RNG
+ * register. Specifies the starting index for flow IDs the receive channel is to
+ * make use of beyond the default flow. flowid_start and @ref flowid_cnt must be
+ * set as valid and configured together. The starting flow ID set by
+ * @ref flowid_cnt must be a flow index within the Navigator Subsystem's subset
+ * of flows beyond the default flows statically mapped to receive channels.
+ * The additional flows must be assigned to the host, or a subordinate of the
+ * host, requesting configuration of the receive channel.
+ *
+ * @flowid_cnt: UDMAP receive channel additional flows count configuration to
+ * program into the flowid_cnt field of the RCHAN_RFLOW_RNG register.
+ * This field specifies how many flow IDs are in the additional contiguous range
+ * of legal flow IDs for the channel. @ref flowid_start and flowid_cnt must be
+ * set as valid and configured together. Disabling the valid_params field bit
+ * for flowid_cnt indicates no flow IDs other than the default are to be
+ * allocated and used by the receive channel. @ref flowid_start plus flowid_cnt
+ * cannot be greater than the number of receive flows in the receive channel's
+ * Navigator Subsystem. The additional flows must be assigned to the host, or a
+ * subordinate of the host, requesting configuration of the receive channel.
+ *
+ * @rx_pause_on_err: UDMAP receive channel pause on error configuration to be
+ * programmed into the rx_pause_on_err field of the channel's RCHAN_RCFG
+ * register.
+ *
+ * @rx_atype: UDMAP receive channel non Ring Accelerator access pointer
+ * interpretation configuration to be programmed into the rx_atype field of the
+ * channel's RCHAN_RCFG register.
+ *
+ * @rx_chan_type: UDMAP receive channel functional channel type and work passing
+ * mechanism configuration to be programmed into the rx_chan_type field of the
+ * channel's RCHAN_RCFG register.
+ *
+ * @rx_ignore_short: UDMAP receive channel short packet treatment configuration
+ * to be programmed into the rx_ignore_short field of the RCHAN_RCFG register.
+ *
+ * @rx_ignore_long: UDMAP receive channel long packet treatment configuration to
+ * be programmed into the rx_ignore_long field of the RCHAN_RCFG register.
+ *
+ * @rx_burst_size: UDMAP receive channel burst size configuration to be
+ * programmed into the rx_burst_size field of the RCHAN_RCFG register.
+ */
+struct ti_sci_msg_rm_udmap_rx_ch_cfg_req {
+ struct ti_sci_msg_hdr hdr;
+ u32 valid_params;
+ u16 nav_id;
+ u16 index;
+ u16 rx_fetch_size;
+ u16 rxcq_qnum;
+ u8 rx_priority;
+ u8 rx_qos;
+ u8 rx_orderid;
+ u8 rx_sched_priority;
+ u16 flowid_start;
+ u16 flowid_cnt;
+ u8 rx_pause_on_err;
+ u8 rx_atype;
+ u8 rx_chan_type;
+ u8 rx_ignore_short;
+ u8 rx_ignore_long;
+ u8 rx_burst_size;
+} __packed;
+
+/**
+ * Configures a Navigator Subsystem UDMAP receive flow
+ *
+ * Configures a Navigator Subsystem UDMAP receive flow's registers.
+ * Configuration does not include the flow registers which handle size-based
+ * free descriptor queue routing.
+ *
+ * The flow index must be assigned to the host defined in the TISCI header via
+ * the RM board configuration resource assignment range list.
+ *
+ * @hdr: Standard TISCI header
+ *
+ * @valid_params
+ * Bitfield defining validity of rx flow configuration parameters. The
+ * rx flow configuration fields are not valid, and will not be used for flow
+ * configuration, if their corresponding valid bit is zero. Valid bit usage:
+ * 0 - Valid bit for @tisci_msg_rm_udmap_flow_cfg_req::rx_einfo_present
+ * 1 - Valid bit for @tisci_msg_rm_udmap_flow_cfg_req::rx_psinfo_present
+ * 2 - Valid bit for @tisci_msg_rm_udmap_flow_cfg_req::rx_error_handling
+ * 3 - Valid bit for @tisci_msg_rm_udmap_flow_cfg_req::rx_desc_type
+ * 4 - Valid bit for @tisci_msg_rm_udmap_flow_cfg_req::rx_sop_offset
+ * 5 - Valid bit for @tisci_msg_rm_udmap_flow_cfg_req::rx_dest_qnum
+ * 6 - Valid bit for @tisci_msg_rm_udmap_flow_cfg_req::rx_src_tag_hi
+ * 7 - Valid bit for @tisci_msg_rm_udmap_flow_cfg_req::rx_src_tag_lo
+ * 8 - Valid bit for @tisci_msg_rm_udmap_flow_cfg_req::rx_dest_tag_hi
+ * 9 - Valid bit for @tisci_msg_rm_udmap_flow_cfg_req::rx_dest_tag_lo
+ * 10 - Valid bit for @tisci_msg_rm_udmap_flow_cfg_req::rx_src_tag_hi_sel
+ * 11 - Valid bit for @tisci_msg_rm_udmap_flow_cfg_req::rx_src_tag_lo_sel
+ * 12 - Valid bit for @tisci_msg_rm_udmap_flow_cfg_req::rx_dest_tag_hi_sel
+ * 13 - Valid bit for @tisci_msg_rm_udmap_flow_cfg_req::rx_dest_tag_lo_sel
+ * 14 - Valid bit for @tisci_msg_rm_udmap_flow_cfg_req::rx_fdq0_sz0_qnum
+ * 15 - Valid bit for @tisci_msg_rm_udmap_flow_cfg_req::rx_fdq1_sz0_qnum
+ * 16 - Valid bit for @tisci_msg_rm_udmap_flow_cfg_req::rx_fdq2_sz0_qnum
+ * 17 - Valid bit for @tisci_msg_rm_udmap_flow_cfg_req::rx_fdq3_sz0_qnum
+ * 18 - Valid bit for @tisci_msg_rm_udmap_flow_cfg_req::rx_ps_location
+ *
+ * @nav_id: SoC device ID of Navigator Subsystem from which the receive flow is
+ * allocated
+ *
+ * @flow_index: UDMAP receive flow index for non-optional configuration.
+ *
+ * @rx_einfo_present:
+ * UDMAP receive flow extended packet info present configuration to be
+ * programmed into the rx_einfo_present field of the flow's RFLOW_RFA register.
+ *
+ * @rx_psinfo_present:
+ * UDMAP receive flow PS words present configuration to be programmed into the
+ * rx_psinfo_present field of the flow's RFLOW_RFA register.
+ *
+ * @rx_error_handling:
+ * UDMAP receive flow error handling configuration to be programmed into the
+ * rx_error_handling field of the flow's RFLOW_RFA register.
+ *
+ * @rx_desc_type:
+ * UDMAP receive flow descriptor type configuration to be programmed into the
+ * rx_desc_type field field of the flow's RFLOW_RFA register.
+ *
+ * @rx_sop_offset:
+ * UDMAP receive flow start of packet offset configuration to be programmed
+ * into the rx_sop_offset field of the RFLOW_RFA register. See the UDMAP
+ * section of the TRM for more information on this setting. Valid values for
+ * this field are 0-255 bytes.
+ *
+ * @rx_dest_qnum:
+ * UDMAP receive flow destination queue configuration to be programmed into the
+ * rx_dest_qnum field of the flow's RFLOW_RFA register. The specified
+ * destination queue must be valid within the Navigator Subsystem and must be
+ * owned by the host, or a subordinate of the host, requesting allocation and
+ * configuration of the receive flow.
+ *
+ * @rx_src_tag_hi:
+ * UDMAP receive flow source tag high byte constant configuration to be
+ * programmed into the rx_src_tag_hi field of the flow's RFLOW_RFB register.
+ * See the UDMAP section of the TRM for more information on this setting.
+ *
+ * @rx_src_tag_lo:
+ * UDMAP receive flow source tag low byte constant configuration to be
+ * programmed into the rx_src_tag_lo field of the flow's RFLOW_RFB register.
+ * See the UDMAP section of the TRM for more information on this setting.
+ *
+ * @rx_dest_tag_hi:
+ * UDMAP receive flow destination tag high byte constant configuration to be
+ * programmed into the rx_dest_tag_hi field of the flow's RFLOW_RFB register.
+ * See the UDMAP section of the TRM for more information on this setting.
+ *
+ * @rx_dest_tag_lo:
+ * UDMAP receive flow destination tag low byte constant configuration to be
+ * programmed into the rx_dest_tag_lo field of the flow's RFLOW_RFB register.
+ * See the UDMAP section of the TRM for more information on this setting.
+ *
+ * @rx_src_tag_hi_sel:
+ * UDMAP receive flow source tag high byte selector configuration to be
+ * programmed into the rx_src_tag_hi_sel field of the RFLOW_RFC register. See
+ * the UDMAP section of the TRM for more information on this setting.
+ *
+ * @rx_src_tag_lo_sel:
+ * UDMAP receive flow source tag low byte selector configuration to be
+ * programmed into the rx_src_tag_lo_sel field of the RFLOW_RFC register. See
+ * the UDMAP section of the TRM for more information on this setting.
+ *
+ * @rx_dest_tag_hi_sel:
+ * UDMAP receive flow destination tag high byte selector configuration to be
+ * programmed into the rx_dest_tag_hi_sel field of the RFLOW_RFC register. See
+ * the UDMAP section of the TRM for more information on this setting.
+ *
+ * @rx_dest_tag_lo_sel:
+ * UDMAP receive flow destination tag low byte selector configuration to be
+ * programmed into the rx_dest_tag_lo_sel field of the RFLOW_RFC register. See
+ * the UDMAP section of the TRM for more information on this setting.
+ *
+ * @rx_fdq0_sz0_qnum:
+ * UDMAP receive flow free descriptor queue 0 configuration to be programmed
+ * into the rx_fdq0_sz0_qnum field of the flow's RFLOW_RFD register. See the
+ * UDMAP section of the TRM for more information on this setting. The specified
+ * free queue must be valid within the Navigator Subsystem and must be owned
+ * by the host, or a subordinate of the host, requesting allocation and
+ * configuration of the receive flow.
+ *
+ * @rx_fdq1_qnum:
+ * UDMAP receive flow free descriptor queue 1 configuration to be programmed
+ * into the rx_fdq1_qnum field of the flow's RFLOW_RFD register. See the
+ * UDMAP section of the TRM for more information on this setting. The specified
+ * free queue must be valid within the Navigator Subsystem and must be owned
+ * by the host, or a subordinate of the host, requesting allocation and
+ * configuration of the receive flow.
+ *
+ * @rx_fdq2_qnum:
+ * UDMAP receive flow free descriptor queue 2 configuration to be programmed
+ * into the rx_fdq2_qnum field of the flow's RFLOW_RFE register. See the
+ * UDMAP section of the TRM for more information on this setting. The specified
+ * free queue must be valid within the Navigator Subsystem and must be owned
+ * by the host, or a subordinate of the host, requesting allocation and
+ * configuration of the receive flow.
+ *
+ * @rx_fdq3_qnum:
+ * UDMAP receive flow free descriptor queue 3 configuration to be programmed
+ * into the rx_fdq3_qnum field of the flow's RFLOW_RFE register. See the
+ * UDMAP section of the TRM for more information on this setting. The specified
+ * free queue must be valid within the Navigator Subsystem and must be owned
+ * by the host, or a subordinate of the host, requesting allocation and
+ * configuration of the receive flow.
+ *
+ * @rx_ps_location:
+ * UDMAP receive flow PS words location configuration to be programmed into the
+ * rx_ps_location field of the flow's RFLOW_RFA register.
+ */
+struct ti_sci_msg_rm_udmap_flow_cfg_req {
+ struct ti_sci_msg_hdr hdr;
+ u32 valid_params;
+ u16 nav_id;
+ u16 flow_index;
+ u8 rx_einfo_present;
+ u8 rx_psinfo_present;
+ u8 rx_error_handling;
+ u8 rx_desc_type;
+ u16 rx_sop_offset;
+ u16 rx_dest_qnum;
+ u8 rx_src_tag_hi;
+ u8 rx_src_tag_lo;
+ u8 rx_dest_tag_hi;
+ u8 rx_dest_tag_lo;
+ u8 rx_src_tag_hi_sel;
+ u8 rx_src_tag_lo_sel;
+ u8 rx_dest_tag_hi_sel;
+ u8 rx_dest_tag_lo_sel;
+ u16 rx_fdq0_sz0_qnum;
+ u16 rx_fdq1_qnum;
+ u16 rx_fdq2_qnum;
+ u16 rx_fdq3_qnum;
+ u8 rx_ps_location;
+} __packed;
+
+/**
+ * struct ti_sci_msg_req_proc_request - Request a processor
+ * @hdr: Generic Header
+ * @processor_id: ID of processor being requested
+ *
+ * Request type is TI_SCI_MSG_PROC_REQUEST, response is a generic ACK/NACK
+ * message.
+ */
+struct ti_sci_msg_req_proc_request {
+ struct ti_sci_msg_hdr hdr;
+ u8 processor_id;
+} __packed;
+
+/**
+ * struct ti_sci_msg_req_proc_release - Release a processor
+ * @hdr: Generic Header
+ * @processor_id: ID of processor being released
+ *
+ * Request type is TI_SCI_MSG_PROC_RELEASE, response is a generic ACK/NACK
+ * message.
+ */
+struct ti_sci_msg_req_proc_release {
+ struct ti_sci_msg_hdr hdr;
+ u8 processor_id;
+} __packed;
+
+/**
+ * struct ti_sci_msg_req_proc_handover - Handover a processor to a host
+ * @hdr: Generic Header
+ * @processor_id: ID of processor being handed over
+ * @host_id: Host ID the control needs to be transferred to
+ *
+ * Request type is TI_SCI_MSG_PROC_HANDOVER, response is a generic ACK/NACK
+ * message.
+ */
+struct ti_sci_msg_req_proc_handover {
+ struct ti_sci_msg_hdr hdr;
+ u8 processor_id;
+ u8 host_id;
+} __packed;
+
+/* Boot Vector masks */
+#define TI_SCI_ADDR_LOW_MASK GENMASK_ULL(31, 0)
+#define TI_SCI_ADDR_HIGH_MASK GENMASK_ULL(63, 32)
+#define TI_SCI_ADDR_HIGH_SHIFT 32
+
+/**
+ * struct ti_sci_msg_req_set_config - Set Processor boot configuration
+ * @hdr: Generic Header
+ * @processor_id: ID of processor being configured
+ * @bootvector_low: Lower 32 bit address (Little Endian) of boot vector
+ * @bootvector_high: Higher 32 bit address (Little Endian) of boot vector
+ * @config_flags_set: Optional Processor specific Config Flags to set.
+ * Setting a bit here implies the corresponding mode
+ * will be set
+ * @config_flags_clear: Optional Processor specific Config Flags to clear.
+ * Setting a bit here implies the corresponding mode
+ * will be cleared
+ *
+ * Request type is TI_SCI_MSG_PROC_HANDOVER, response is a generic ACK/NACK
+ * message.
+ */
+struct ti_sci_msg_req_set_config {
+ struct ti_sci_msg_hdr hdr;
+ u8 processor_id;
+ u32 bootvector_low;
+ u32 bootvector_high;
+ u32 config_flags_set;
+ u32 config_flags_clear;
+} __packed;
+
+/**
+ * struct ti_sci_msg_req_set_ctrl - Set Processor boot control flags
+ * @hdr: Generic Header
+ * @processor_id: ID of processor being configured
+ * @control_flags_set: Optional Processor specific Control Flags to set.
+ * Setting a bit here implies the corresponding mode
+ * will be set
+ * @control_flags_clear:Optional Processor specific Control Flags to clear.
+ * Setting a bit here implies the corresponding mode
+ * will be cleared
+ *
+ * Request type is TI_SCI_MSG_SET_CTRL, response is a generic ACK/NACK
+ * message.
+ */
+struct ti_sci_msg_req_set_ctrl {
+ struct ti_sci_msg_hdr hdr;
+ u8 processor_id;
+ u32 control_flags_set;
+ u32 control_flags_clear;
+} __packed;
+
+/**
+ * struct ti_sci_msg_req_get_status - Processor boot status request
+ * @hdr: Generic Header
+ * @processor_id: ID of processor whose status is being requested
+ *
+ * Request type is TI_SCI_MSG_GET_STATUS, response is an appropriate
+ * message, or NACK in case of inability to satisfy request.
+ */
+struct ti_sci_msg_req_get_status {
+ struct ti_sci_msg_hdr hdr;
+ u8 processor_id;
+} __packed;
+
+/**
+ * struct ti_sci_msg_resp_get_status - Processor boot status response
+ * @hdr: Generic Header
+ * @processor_id: ID of processor whose status is returned
+ * @bootvector_low: Lower 32 bit address (Little Endian) of boot vector
+ * @bootvector_high: Higher 32 bit address (Little Endian) of boot vector
+ * @config_flags: Optional Processor specific Config Flags set currently
+ * @control_flags: Optional Processor specific Control Flags set currently
+ * @status_flags: Optional Processor specific Status Flags set currently
+ *
+ * Response structure to a TI_SCI_MSG_GET_STATUS request.
+ */
+struct ti_sci_msg_resp_get_status {
+ struct ti_sci_msg_hdr hdr;
+ u8 processor_id;
+ u32 bootvector_low;
+ u32 bootvector_high;
+ u32 config_flags;
+ u32 control_flags;
+ u32 status_flags;
+} __packed;
+
#endif /* __TI_SCI_H */
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
index f88d8141447c..8199d201b43a 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
@@ -164,6 +164,7 @@ extern int amdgpu_async_gfx_ring;
extern int amdgpu_mcbp;
extern int amdgpu_discovery;
extern int amdgpu_mes;
+extern int amdgpu_noretry;
#ifdef CONFIG_DRM_AMDGPU_SI
extern int amdgpu_si_support;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c
index 20ce158490db..6d54decef7f8 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c
@@ -106,10 +106,10 @@ static int amdgpu_debugfs_process_reg_op(bool read, struct file *f,
ssize_t result = 0;
int r;
bool pm_pg_lock, use_bank, use_ring;
- unsigned instance_bank, sh_bank, se_bank, me, pipe, queue;
+ unsigned instance_bank, sh_bank, se_bank, me, pipe, queue, vmid;
pm_pg_lock = use_bank = use_ring = false;
- instance_bank = sh_bank = se_bank = me = pipe = queue = 0;
+ instance_bank = sh_bank = se_bank = me = pipe = queue = vmid = 0;
if (size & 0x3 || *pos & 0x3 ||
((*pos & (1ULL << 62)) && (*pos & (1ULL << 61))))
@@ -135,6 +135,7 @@ static int amdgpu_debugfs_process_reg_op(bool read, struct file *f,
me = (*pos & GENMASK_ULL(33, 24)) >> 24;
pipe = (*pos & GENMASK_ULL(43, 34)) >> 34;
queue = (*pos & GENMASK_ULL(53, 44)) >> 44;
+ vmid = (*pos & GENMASK_ULL(58, 54)) >> 54;
use_ring = 1;
} else {
@@ -152,7 +153,7 @@ static int amdgpu_debugfs_process_reg_op(bool read, struct file *f,
sh_bank, instance_bank);
} else if (use_ring) {
mutex_lock(&adev->srbm_mutex);
- amdgpu_gfx_select_me_pipe_q(adev, me, pipe, queue);
+ amdgpu_gfx_select_me_pipe_q(adev, me, pipe, queue, vmid);
}
if (pm_pg_lock)
@@ -185,7 +186,7 @@ end:
amdgpu_gfx_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
mutex_unlock(&adev->grbm_idx_mutex);
} else if (use_ring) {
- amdgpu_gfx_select_me_pipe_q(adev, 0, 0, 0);
+ amdgpu_gfx_select_me_pipe_q(adev, 0, 0, 0, 0);
mutex_unlock(&adev->srbm_mutex);
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
index 7401bc95c15b..5a7f893cf724 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
@@ -2537,6 +2537,7 @@ int amdgpu_device_init(struct amdgpu_device *adev,
hash_init(adev->mn_hash);
mutex_init(&adev->lock_reset);
mutex_init(&adev->virt.dpm_mutex);
+ mutex_init(&adev->psp.mutex);
r = amdgpu_device_check_arguments(adev);
if (r)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c
index e049ae6a76fb..1481899f86c1 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c
@@ -123,7 +123,7 @@ static int hw_id_map[MAX_HWIP] = {
[UVD_HWIP] = UVD_HWID,
[VCE_HWIP] = VCE_HWID,
[DF_HWIP] = DF_HWID,
- [DCE_HWIP] = DCEAZ_HWID,
+ [DCE_HWIP] = DMU_HWID,
[OSSSYS_HWIP] = OSSSYS_HWID,
[SMUIO_HWIP] = SMUIO_HWID,
[PWR_HWIP] = PWR_HWID,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
index 1b0613c7cf95..f2e8b4238efd 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
@@ -140,8 +140,9 @@ uint amdgpu_smu_memory_pool_size = 0;
uint amdgpu_dc_feature_mask = 0;
int amdgpu_async_gfx_ring = 1;
int amdgpu_mcbp = 0;
-int amdgpu_discovery = 0;
+int amdgpu_discovery = -1;
int amdgpu_mes = 0;
+int amdgpu_noretry;
struct amdgpu_mgpu_info mgpu_info = {
.mutex = __MUTEX_INITIALIZER(mgpu_info.mutex),
@@ -593,6 +594,7 @@ module_param_named(mcbp, amdgpu_mcbp, int, 0444);
/**
* DOC: discovery (int)
* Allow driver to discover hardware IP information from IP Discovery table at the top of VRAM.
+ * (-1 = auto (default), 0 = disabled, 1 = enabled)
*/
MODULE_PARM_DESC(discovery,
"Allow driver to discover hardware IPs from IP Discovery table at the top of VRAM");
@@ -607,6 +609,10 @@ MODULE_PARM_DESC(mes,
"Enable Micro Engine Scheduler (0 = disabled (default), 1 = enabled)");
module_param_named(mes, amdgpu_mes, int, 0444);
+MODULE_PARM_DESC(noretry,
+ "Disable retry faults (0 = retry enabled (default), 1 = retry disabled)");
+module_param_named(noretry, amdgpu_noretry, int, 0644);
+
#ifdef CONFIG_HSA_AMD
/**
* DOC: sched_policy (int)
@@ -683,17 +689,6 @@ MODULE_PARM_DESC(ignore_crat,
"Ignore CRAT table during KFD initialization (0 = use CRAT (default), 1 = ignore CRAT)");
/**
- * DOC: noretry (int)
- * This parameter sets sh_mem_config.retry_disable. Default value, 0, enables retry.
- * Setting 1 disables retry.
- * Retry is needed for recoverable page faults.
- */
-int noretry;
-module_param(noretry, int, 0644);
-MODULE_PARM_DESC(noretry,
- "Set sh_mem_config.retry_disable on Vega10 (0 = retry enabled (default), 1 = retry disabled)");
-
-/**
* DOC: halt_if_hws_hang (int)
* Halt if HWS hang is detected. Default value, 0, disables the halt on hang.
* Setting 1 enables halt on hang.
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h
index f96407ba9770..1199b5828b90 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h
@@ -195,7 +195,7 @@ struct amdgpu_gfx_funcs {
uint32_t wave, uint32_t start, uint32_t size,
uint32_t *dst);
void (*select_me_pipe_q)(struct amdgpu_device *adev, u32 me, u32 pipe,
- u32 queue);
+ u32 queue, u32 vmid);
};
struct amdgpu_ngg_buf {
@@ -327,7 +327,7 @@ struct amdgpu_gfx {
#define amdgpu_gfx_get_gpu_clock_counter(adev) (adev)->gfx.funcs->get_gpu_clock_counter((adev))
#define amdgpu_gfx_select_se_sh(adev, se, sh, instance) (adev)->gfx.funcs->select_se_sh((adev), (se), (sh), (instance))
-#define amdgpu_gfx_select_me_pipe_q(adev, me, pipe, q) (adev)->gfx.funcs->select_me_pipe_q((adev), (me), (pipe), (q))
+#define amdgpu_gfx_select_me_pipe_q(adev, me, pipe, q, vmid) (adev)->gfx.funcs->select_me_pipe_q((adev), (me), (pipe), (q), (vmid))
/**
* amdgpu_gfx_create_bitmask - create a bitmask
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
index 193d53720d9b..8b7efd0a7028 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
@@ -2077,11 +2077,6 @@ static ssize_t amdgpu_hwmon_show_sclk(struct device *dev,
(ddev->switch_power_state != DRM_SWITCH_POWER_ON))
return -EINVAL;
- /* sanity check PP is enabled */
- if (!(adev->powerplay.pp_funcs &&
- adev->powerplay.pp_funcs->read_sensor))
- return -EINVAL;
-
/* get the sclk */
r = amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GFX_SCLK,
(void *)&sclk, &size);
@@ -2112,11 +2107,6 @@ static ssize_t amdgpu_hwmon_show_mclk(struct device *dev,
(ddev->switch_power_state != DRM_SWITCH_POWER_ON))
return -EINVAL;
- /* sanity check PP is enabled */
- if (!(adev->powerplay.pp_funcs &&
- adev->powerplay.pp_funcs->read_sensor))
- return -EINVAL;
-
/* get the sclk */
r = amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GFX_MCLK,
(void *)&mclk, &size);
@@ -2996,13 +2986,10 @@ void amdgpu_pm_compute_clocks(struct amdgpu_device *adev)
}
if (is_support_sw_smu(adev)) {
- struct smu_context *smu = &adev->smu;
struct smu_dpm_context *smu_dpm = &adev->smu.smu_dpm;
- mutex_lock(&(smu->mutex));
smu_handle_task(&adev->smu,
smu_dpm->dpm_level,
AMD_PP_TASK_DISPLAY_CONFIG_CHANGE);
- mutex_unlock(&(smu->mutex));
} else {
if (adev->powerplay.pp_funcs->dispatch_tasks) {
if (!amdgpu_device_has_dc_support(adev)) {
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
index e69ad6e089c5..c027e5e7713e 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
@@ -130,6 +130,8 @@ psp_cmd_submit_buf(struct psp_context *psp,
int index;
int timeout = 2000;
+ mutex_lock(&psp->mutex);
+
memset(psp->cmd_buf_mem, 0, PSP_CMD_BUFFER_SIZE);
memcpy(psp->cmd_buf_mem, cmd, sizeof(struct psp_gfx_cmd_resp));
@@ -139,6 +141,7 @@ psp_cmd_submit_buf(struct psp_context *psp,
fence_mc_addr, index);
if (ret) {
atomic_dec(&psp->fence_value);
+ mutex_unlock(&psp->mutex);
return ret;
}
@@ -161,8 +164,10 @@ psp_cmd_submit_buf(struct psp_context *psp,
ucode->ucode_id);
DRM_WARN("psp command failed and response status is (%d)\n",
psp->cmd_buf_mem->resp.status);
- if (!timeout)
+ if (!timeout) {
+ mutex_unlock(&psp->mutex);
return -EINVAL;
+ }
}
/* get xGMI session id from response buffer */
@@ -172,6 +177,7 @@ psp_cmd_submit_buf(struct psp_context *psp,
ucode->tmr_mc_addr_lo = psp->cmd_buf_mem->resp.fw_addr_lo;
ucode->tmr_mc_addr_hi = psp->cmd_buf_mem->resp.fw_addr_hi;
}
+ mutex_unlock(&psp->mutex);
return ret;
}
@@ -763,6 +769,15 @@ static int psp_hw_start(struct psp_context *psp)
int ret;
if (!amdgpu_sriov_vf(adev) || !adev->in_gpu_reset) {
+ if (psp->kdb_bin_size &&
+ (psp->funcs->bootloader_load_kdb != NULL)) {
+ ret = psp_bootloader_load_kdb(psp);
+ if (ret) {
+ DRM_ERROR("PSP load kdb failed!\n");
+ return ret;
+ }
+ }
+
ret = psp_bootloader_load_sysdrv(psp);
if (ret) {
DRM_ERROR("PSP load sysdrv failed!\n");
@@ -1188,10 +1203,16 @@ failed:
int psp_gpu_reset(struct amdgpu_device *adev)
{
+ int ret;
+
if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP)
return 0;
- return psp_mode1_reset(&adev->psp);
+ mutex_lock(&adev->psp.mutex);
+ ret = psp_mode1_reset(&adev->psp);
+ mutex_unlock(&adev->psp.mutex);
+
+ return ret;
}
int psp_rlc_autoload_start(struct psp_context *psp)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h
index 6039acc84346..e0fc2a790e53 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h
@@ -42,6 +42,12 @@ struct psp_context;
struct psp_xgmi_node_info;
struct psp_xgmi_topology_info;
+enum psp_bootloader_cmd {
+ PSP_BL__LOAD_SYSDRV = 0x10000,
+ PSP_BL__LOAD_SOSDRV = 0x20000,
+ PSP_BL__LOAD_KEY_DATABASE = 0x80000,
+};
+
enum psp_ring_type
{
PSP_RING_TYPE__INVALID = 0,
@@ -73,6 +79,7 @@ enum psp_reg_prog_id {
struct psp_funcs
{
int (*init_microcode)(struct psp_context *psp);
+ int (*bootloader_load_kdb)(struct psp_context *psp);
int (*bootloader_load_sysdrv)(struct psp_context *psp);
int (*bootloader_load_sos)(struct psp_context *psp);
int (*ring_init)(struct psp_context *psp, enum psp_ring_type ring_type);
@@ -156,9 +163,11 @@ struct psp_context
uint32_t sys_bin_size;
uint32_t sos_bin_size;
uint32_t toc_bin_size;
+ uint32_t kdb_bin_size;
uint8_t *sys_start_addr;
uint8_t *sos_start_addr;
uint8_t *toc_start_addr;
+ uint8_t *kdb_start_addr;
/* tmr buffer */
struct amdgpu_bo *tmr_bo;
@@ -201,6 +210,7 @@ struct psp_context
uint8_t *ta_ras_start_addr;
struct psp_xgmi_context xgmi_context;
struct psp_ras_context ras;
+ struct mutex mutex;
};
struct amdgpu_psp_funcs {
@@ -219,6 +229,8 @@ struct amdgpu_psp_funcs {
(psp)->funcs->compare_sram_data((psp), (ucode), (type))
#define psp_init_microcode(psp) \
((psp)->funcs->init_microcode ? (psp)->funcs->init_microcode((psp)) : 0)
+#define psp_bootloader_load_kdb(psp) \
+ ((psp)->funcs->bootloader_load_kdb ? (psp)->funcs->bootloader_load_kdb((psp)) : 0)
#define psp_bootloader_load_sysdrv(psp) \
((psp)->funcs->bootloader_load_sysdrv ? (psp)->funcs->bootloader_load_sysdrv((psp)) : 0)
#define psp_bootloader_load_sos(psp) \
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
index 5c05644b9b96..e51b48ac48eb 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
@@ -391,6 +391,7 @@ int amdgpu_ttm_copy_mem_to_mem(struct amdgpu_device *adev,
src_node_start = amdgpu_mm_node_addr(src->bo, ++src_mm,
src->mem);
src_node_size = (src_mm->size << PAGE_SHIFT);
+ src_page_offset = 0;
} else {
src_node_start += cur_size;
src_page_offset = src_node_start & (PAGE_SIZE - 1);
@@ -400,6 +401,7 @@ int amdgpu_ttm_copy_mem_to_mem(struct amdgpu_device *adev,
dst_node_start = amdgpu_mm_node_addr(dst->bo, ++dst_mm,
dst->mem);
dst_node_size = (dst_mm->size << PAGE_SHIFT);
+ dst_page_offset = 0;
} else {
dst_node_start += cur_size;
dst_page_offset = dst_node_start & (PAGE_SIZE - 1);
@@ -487,6 +489,7 @@ static int amdgpu_move_vram_ram(struct ttm_buffer_object *bo, bool evict,
placements.flags = TTM_PL_MASK_CACHING | TTM_PL_FLAG_TT;
r = ttm_bo_mem_space(bo, &placement, &tmp_mem, ctx);
if (unlikely(r)) {
+ pr_err("Failed to find GTT space for blit from VRAM\n");
return r;
}
@@ -545,6 +548,7 @@ static int amdgpu_move_ram_vram(struct ttm_buffer_object *bo, bool evict,
placements.flags = TTM_PL_MASK_CACHING | TTM_PL_FLAG_TT;
r = ttm_bo_mem_space(bo, &placement, &tmp_mem, ctx);
if (unlikely(r)) {
+ pr_err("Failed to find GTT space for blit to VRAM\n");
return r;
}
@@ -565,6 +569,30 @@ out_cleanup:
}
/**
+ * amdgpu_mem_visible - Check that memory can be accessed by ttm_bo_move_memcpy
+ *
+ * Called by amdgpu_bo_move()
+ */
+static bool amdgpu_mem_visible(struct amdgpu_device *adev,
+ struct ttm_mem_reg *mem)
+{
+ struct drm_mm_node *nodes = mem->mm_node;
+
+ if (mem->mem_type == TTM_PL_SYSTEM ||
+ mem->mem_type == TTM_PL_TT)
+ return true;
+ if (mem->mem_type != TTM_PL_VRAM)
+ return false;
+
+ /* ttm_mem_reg_ioremap only supports contiguous memory */
+ if (nodes->size != mem->num_pages)
+ return false;
+
+ return ((nodes->start + nodes->size) << PAGE_SHIFT)
+ <= adev->gmc.visible_vram_size;
+}
+
+/**
* amdgpu_bo_move - Move a buffer object to a new memory location
*
* Called by ttm_bo_handle_move_mem()
@@ -608,8 +636,10 @@ static int amdgpu_bo_move(struct ttm_buffer_object *bo, bool evict,
return 0;
}
- if (!adev->mman.buffer_funcs_enabled)
+ if (!adev->mman.buffer_funcs_enabled) {
+ r = -ENODEV;
goto memcpy;
+ }
if (old_mem->mem_type == TTM_PL_VRAM &&
new_mem->mem_type == TTM_PL_SYSTEM) {
@@ -624,10 +654,16 @@ static int amdgpu_bo_move(struct ttm_buffer_object *bo, bool evict,
if (r) {
memcpy:
- r = ttm_bo_move_memcpy(bo, ctx, new_mem);
- if (r) {
+ /* Check that all memory is CPU accessible */
+ if (!amdgpu_mem_visible(adev, old_mem) ||
+ !amdgpu_mem_visible(adev, new_mem)) {
+ pr_err("Move buffer fallback to memcpy unavailable\n");
return r;
}
+
+ r = ttm_bo_move_memcpy(bo, ctx, new_mem);
+ if (r)
+ return r;
}
if (bo->type == ttm_bo_type_device &&
@@ -2059,9 +2095,9 @@ int amdgpu_fill_buffer(struct amdgpu_bo *bo,
mm_node = bo->tbo.mem.mm_node;
num_loops = 0;
while (num_pages) {
- uint32_t byte_count = mm_node->size << PAGE_SHIFT;
+ uint64_t byte_count = mm_node->size << PAGE_SHIFT;
- num_loops += DIV_ROUND_UP(byte_count, max_bytes);
+ num_loops += DIV_ROUND_UP_ULL(byte_count, max_bytes);
num_pages -= mm_node->size;
++mm_node;
}
@@ -2087,12 +2123,13 @@ int amdgpu_fill_buffer(struct amdgpu_bo *bo,
mm_node = bo->tbo.mem.mm_node;
while (num_pages) {
- uint32_t byte_count = mm_node->size << PAGE_SHIFT;
+ uint64_t byte_count = mm_node->size << PAGE_SHIFT;
uint64_t dst_addr;
dst_addr = amdgpu_mm_node_addr(&bo->tbo, mm_node, &bo->tbo.mem);
while (byte_count) {
- uint32_t cur_size_in_bytes = min(byte_count, max_bytes);
+ uint32_t cur_size_in_bytes = min_t(uint64_t, byte_count,
+ max_bytes);
amdgpu_emit_fill_buffer(adev, &job->ibs[0], src_data,
dst_addr, cur_size_in_bytes);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c
index c352a519ddd4..bfaa0eac3213 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c
@@ -262,6 +262,12 @@ void amdgpu_ucode_print_psp_hdr(const struct common_firmware_header *hdr)
le32_to_cpu(psp_hdr_v1_1->toc_offset_bytes));
DRM_DEBUG("toc_size_bytes: %u\n",
le32_to_cpu(psp_hdr_v1_1->toc_size_bytes));
+ DRM_DEBUG("kdb_header_version: %u\n",
+ le32_to_cpu(psp_hdr_v1_1->kdb_header_version));
+ DRM_DEBUG("kdb_offset_bytes: %u\n",
+ le32_to_cpu(psp_hdr_v1_1->kdb_offset_bytes));
+ DRM_DEBUG("kdb_size_bytes: %u\n",
+ le32_to_cpu(psp_hdr_v1_1->kdb_size_bytes));
}
} else {
DRM_ERROR("Unknown PSP ucode version: %u.%u\n",
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.h
index f46944453c6e..c1fb6dc86440 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.h
@@ -85,6 +85,9 @@ struct psp_firmware_header_v1_1 {
uint32_t toc_header_version;
uint32_t toc_offset_bytes;
uint32_t toc_size_bytes;
+ uint32_t kdb_header_version;
+ uint32_t kdb_offset_bytes;
+ uint32_t kdb_size_bytes;
};
/* version_major=1, version_minor=0 */
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c
index 07a7e3820b7b..59dd204498c5 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c
@@ -390,7 +390,8 @@ static uint32_t parse_clk(char *buf, bool min)
if (!ptr)
break;
ptr+=2;
- clk = simple_strtoul(ptr, NULL, 10);
+ if (kstrtou32(ptr, 10, &clk))
+ return 0;
} while (!min);
return clk * 100;
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
index ee41d5592c51..1675d5837c3c 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
@@ -109,6 +109,13 @@ static const struct soc15_reg_golden golden_settings_gc_10_0_nv10[] =
/* Pending on emulation bring up */
};
+#define DEFAULT_SH_MEM_CONFIG \
+ ((SH_MEM_ADDRESS_MODE_64 << SH_MEM_CONFIG__ADDRESS_MODE__SHIFT) | \
+ (SH_MEM_ALIGNMENT_MODE_UNALIGNED << SH_MEM_CONFIG__ALIGNMENT_MODE__SHIFT) | \
+ (SH_MEM_RETRY_MODE_ALL << SH_MEM_CONFIG__RETRY_MODE__SHIFT) | \
+ (3 << SH_MEM_CONFIG__INITIAL_INST_PREFETCH__SHIFT))
+
+
static void gfx_v10_0_set_ring_funcs(struct amdgpu_device *adev);
static void gfx_v10_0_set_irq_funcs(struct amdgpu_device *adev);
static void gfx_v10_0_set_gds_init(struct amdgpu_device *adev);
@@ -995,6 +1002,12 @@ static void gfx_v10_0_read_wave_vgprs(struct amdgpu_device *adev, uint32_t simd,
start + SQIND_WAVE_VGPRS_OFFSET, size, dst);
}
+static void gfx_v10_0_select_me_pipe_q(struct amdgpu_device *adev,
+ u32 me, u32 pipe, u32 q, u32 vm)
+ {
+ nv_grbm_select(adev, me, pipe, q, vm);
+ }
+
static const struct amdgpu_gfx_funcs gfx_v10_0_gfx_funcs = {
.get_gpu_clock_counter = &gfx_v10_0_get_gpu_clock_counter,
@@ -1002,6 +1015,7 @@ static const struct amdgpu_gfx_funcs gfx_v10_0_gfx_funcs = {
.read_wave_data = &gfx_v10_0_read_wave_data,
.read_wave_sgprs = &gfx_v10_0_read_wave_sgprs,
.read_wave_vgprs = &gfx_v10_0_read_wave_vgprs,
+ .select_me_pipe_q = &gfx_v10_0_select_me_pipe_q,
};
static void gfx_v10_0_gpu_early_init(struct amdgpu_device *adev)
@@ -1408,7 +1422,6 @@ static u32 gfx_v10_0_init_pa_sc_tile_steering_override(struct amdgpu_device *ade
static void gfx_v10_0_init_compute_vmid(struct amdgpu_device *adev)
{
int i;
- uint32_t sh_mem_config;
uint32_t sh_mem_bases;
/*
@@ -1419,15 +1432,11 @@ static void gfx_v10_0_init_compute_vmid(struct amdgpu_device *adev)
*/
sh_mem_bases = DEFAULT_SH_MEM_BASES | (DEFAULT_SH_MEM_BASES << 16);
- sh_mem_config = SH_MEM_ADDRESS_MODE_64 |
- SH_MEM_ALIGNMENT_MODE_UNALIGNED <<
- SH_MEM_CONFIG__ALIGNMENT_MODE__SHIFT;
-
mutex_lock(&adev->srbm_mutex);
for (i = FIRST_COMPUTE_VMID; i < LAST_COMPUTE_VMID; i++) {
nv_grbm_select(adev, 0, 0, 0, i);
/* CP and shaders */
- WREG32_SOC15(GC, 0, mmSH_MEM_CONFIG, sh_mem_config);
+ WREG32_SOC15(GC, 0, mmSH_MEM_CONFIG, DEFAULT_SH_MEM_CONFIG);
WREG32_SOC15(GC, 0, mmSH_MEM_BASES, sh_mem_bases);
}
nv_grbm_select(adev, 0, 0, 0, 0);
@@ -1520,17 +1529,8 @@ static void gfx_v10_0_constants_init(struct amdgpu_device *adev)
for (i = 0; i < adev->vm_manager.id_mgr[AMDGPU_GFXHUB].num_ids; i++) {
nv_grbm_select(adev, 0, 0, 0, i);
/* CP and shaders */
- if (i == 0) {
- tmp = REG_SET_FIELD(0, SH_MEM_CONFIG, ALIGNMENT_MODE,
- SH_MEM_ALIGNMENT_MODE_UNALIGNED);
- tmp = REG_SET_FIELD(tmp, SH_MEM_CONFIG, RETRY_MODE, 0);
- WREG32_SOC15(GC, 0, mmSH_MEM_CONFIG, tmp);
- WREG32_SOC15(GC, 0, mmSH_MEM_BASES, 0);
- } else {
- tmp = REG_SET_FIELD(0, SH_MEM_CONFIG, ALIGNMENT_MODE,
- SH_MEM_ALIGNMENT_MODE_UNALIGNED);
- tmp = REG_SET_FIELD(tmp, SH_MEM_CONFIG, RETRY_MODE, 0);
- WREG32_SOC15(GC, 0, mmSH_MEM_CONFIG, tmp);
+ WREG32_SOC15(GC, 0, mmSH_MEM_CONFIG, DEFAULT_SH_MEM_CONFIG);
+ if (i != 0) {
tmp = REG_SET_FIELD(0, SH_MEM_BASES, PRIVATE_BASE,
(adev->gmc.private_aperture_start >> 48));
tmp = REG_SET_FIELD(tmp, SH_MEM_BASES, SHARED_BASE,
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c
index 789e900905e9..7f0a63628c43 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c
@@ -3043,7 +3043,7 @@ static void gfx_v6_0_read_wave_sgprs(struct amdgpu_device *adev, uint32_t simd,
}
static void gfx_v6_0_select_me_pipe_q(struct amdgpu_device *adev,
- u32 me, u32 pipe, u32 q)
+ u32 me, u32 pipe, u32 q, u32 vm)
{
DRM_INFO("Not implemented\n");
}
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
index 341b5024e598..0db9f488da7e 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
@@ -4169,9 +4169,9 @@ static void gfx_v7_0_read_wave_sgprs(struct amdgpu_device *adev, uint32_t simd,
}
static void gfx_v7_0_select_me_pipe_q(struct amdgpu_device *adev,
- u32 me, u32 pipe, u32 q)
+ u32 me, u32 pipe, u32 q, u32 vm)
{
- cik_srbm_select(adev, me, pipe, q, 0);
+ cik_srbm_select(adev, me, pipe, q, vm);
}
static const struct amdgpu_gfx_funcs gfx_v7_0_gfx_funcs = {
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
index 032e76dbc51f..5f401b41ef7c 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
@@ -3436,9 +3436,9 @@ static void gfx_v8_0_select_se_sh(struct amdgpu_device *adev,
}
static void gfx_v8_0_select_me_pipe_q(struct amdgpu_device *adev,
- u32 me, u32 pipe, u32 q)
+ u32 me, u32 pipe, u32 q, u32 vm)
{
- vi_srbm_select(adev, me, pipe, q, 0);
+ vi_srbm_select(adev, me, pipe, q, vm);
}
static u32 gfx_v8_0_get_rb_active_bitmap(struct amdgpu_device *adev)
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
index 5ba332376710..f4c4eea62526 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
@@ -1313,9 +1313,9 @@ static void gfx_v9_0_read_wave_vgprs(struct amdgpu_device *adev, uint32_t simd,
}
static void gfx_v9_0_select_me_pipe_q(struct amdgpu_device *adev,
- u32 me, u32 pipe, u32 q)
+ u32 me, u32 pipe, u32 q, u32 vm)
{
- soc15_grbm_select(adev, me, pipe, q, 0);
+ soc15_grbm_select(adev, me, pipe, q, vm);
}
static const struct amdgpu_gfx_funcs gfx_v9_0_gfx_funcs = {
@@ -1942,11 +1942,15 @@ static void gfx_v9_0_constants_init(struct amdgpu_device *adev)
if (i == 0) {
tmp = REG_SET_FIELD(0, SH_MEM_CONFIG, ALIGNMENT_MODE,
SH_MEM_ALIGNMENT_MODE_UNALIGNED);
+ tmp = REG_SET_FIELD(tmp, SH_MEM_CONFIG, RETRY_DISABLE,
+ !!amdgpu_noretry);
WREG32_SOC15_RLC(GC, 0, mmSH_MEM_CONFIG, tmp);
WREG32_SOC15_RLC(GC, 0, mmSH_MEM_BASES, 0);
} else {
tmp = REG_SET_FIELD(0, SH_MEM_CONFIG, ALIGNMENT_MODE,
SH_MEM_ALIGNMENT_MODE_UNALIGNED);
+ tmp = REG_SET_FIELD(tmp, SH_MEM_CONFIG, RETRY_DISABLE,
+ !!amdgpu_noretry);
WREG32_SOC15_RLC(GC, 0, mmSH_MEM_CONFIG, tmp);
tmp = REG_SET_FIELD(0, SH_MEM_BASES, PRIVATE_BASE,
(adev->gmc.private_aperture_start >> 48));
diff --git a/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.c b/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.c
index 9f0f189fc111..15986748f59f 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.c
@@ -236,7 +236,8 @@ static void gfxhub_v1_0_setup_vmid_config(struct amdgpu_device *adev)
block_size);
/* Send no-retry XNACK on fault to suppress VM fault storm. */
tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
- RETRY_PERMISSION_OR_INVALID_PAGE_FAULT, 1);
+ RETRY_PERMISSION_OR_INVALID_PAGE_FAULT,
+ !amdgpu_noretry);
WREG32_SOC15_OFFSET(GC, 0, mmVM_CONTEXT1_CNTL, i, tmp);
WREG32_SOC15_OFFSET(GC, 0, mmVM_CONTEXT1_PAGE_TABLE_START_ADDR_LO32, i*2, 0);
WREG32_SOC15_OFFSET(GC, 0, mmVM_CONTEXT1_PAGE_TABLE_START_ADDR_HI32, i*2, 0);
diff --git a/drivers/gpu/drm/amd/amdgpu/gfxhub_v2_0.c b/drivers/gpu/drm/amd/amdgpu/gfxhub_v2_0.c
index b7de60a15623..d605b4963f8a 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfxhub_v2_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfxhub_v2_0.c
@@ -215,7 +215,8 @@ static void gfxhub_v2_0_setup_vmid_config(struct amdgpu_device *adev)
adev->vm_manager.block_size - 9);
/* Send no-retry XNACK on fault to suppress VM fault storm. */
tmp = REG_SET_FIELD(tmp, GCVM_CONTEXT1_CNTL,
- RETRY_PERMISSION_OR_INVALID_PAGE_FAULT, 0);
+ RETRY_PERMISSION_OR_INVALID_PAGE_FAULT,
+ !amdgpu_noretry);
WREG32_SOC15_OFFSET(GC, 0, mmGCVM_CONTEXT1_CNTL, i, tmp);
WREG32_SOC15_OFFSET(GC, 0, mmGCVM_CONTEXT1_PAGE_TABLE_START_ADDR_LO32, i*2, 0);
WREG32_SOC15_OFFSET(GC, 0, mmGCVM_CONTEXT1_PAGE_TABLE_START_ADDR_HI32, i*2, 0);
diff --git a/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c b/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c
index 05d1d448c8f5..dc5ce03034d3 100644
--- a/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c
@@ -265,7 +265,8 @@ static void mmhub_v1_0_setup_vmid_config(struct amdgpu_device *adev)
block_size);
/* Send no-retry XNACK on fault to suppress VM fault storm. */
tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
- RETRY_PERMISSION_OR_INVALID_PAGE_FAULT, 1);
+ RETRY_PERMISSION_OR_INVALID_PAGE_FAULT,
+ !amdgpu_noretry);
WREG32_SOC15_OFFSET(MMHUB, 0, mmVM_CONTEXT1_CNTL, i, tmp);
WREG32_SOC15_OFFSET(MMHUB, 0, mmVM_CONTEXT1_PAGE_TABLE_START_ADDR_LO32, i*2, 0);
WREG32_SOC15_OFFSET(MMHUB, 0, mmVM_CONTEXT1_PAGE_TABLE_START_ADDR_HI32, i*2, 0);
diff --git a/drivers/gpu/drm/amd/amdgpu/mmhub_v2_0.c b/drivers/gpu/drm/amd/amdgpu/mmhub_v2_0.c
index 37a1a318ae63..0f9549f19ade 100644
--- a/drivers/gpu/drm/amd/amdgpu/mmhub_v2_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/mmhub_v2_0.c
@@ -205,7 +205,8 @@ static void mmhub_v2_0_setup_vmid_config(struct amdgpu_device *adev)
adev->vm_manager.block_size - 9);
/* Send no-retry XNACK on fault to suppress VM fault storm. */
tmp = REG_SET_FIELD(tmp, MMVM_CONTEXT1_CNTL,
- RETRY_PERMISSION_OR_INVALID_PAGE_FAULT, 0);
+ RETRY_PERMISSION_OR_INVALID_PAGE_FAULT,
+ !amdgpu_noretry);
WREG32_SOC15_OFFSET(MMHUB, 0, mmMMVM_CONTEXT1_CNTL, i, tmp);
WREG32_SOC15_OFFSET(MMHUB, 0, mmMMVM_CONTEXT1_PAGE_TABLE_START_ADDR_LO32, i*2, 0);
WREG32_SOC15_OFFSET(MMHUB, 0, mmMMVM_CONTEXT1_PAGE_TABLE_START_ADDR_HI32, i*2, 0);
diff --git a/drivers/gpu/drm/amd/amdgpu/nv.c b/drivers/gpu/drm/amd/amdgpu/nv.c
index ad430cbcd72f..662612f89c70 100644
--- a/drivers/gpu/drm/amd/amdgpu/nv.c
+++ b/drivers/gpu/drm/amd/amdgpu/nv.c
@@ -392,8 +392,6 @@ int nv_set_ip_blocks(struct amdgpu_device *adev)
#if defined(CONFIG_DRM_AMD_DC)
else if (amdgpu_device_has_dc_support(adev))
amdgpu_device_ip_block_add(adev, &dm_ip_block);
-#else
-# warning "Enable CONFIG_DRM_AMD_DC for display support on navi."
#endif
amdgpu_device_ip_block_add(adev, &gfx_v10_0_ip_block);
amdgpu_device_ip_block_add(adev, &sdma_v5_0_ip_block);
diff --git a/drivers/gpu/drm/amd/amdgpu/psp_v11_0.c b/drivers/gpu/drm/amd/amdgpu/psp_v11_0.c
index 61744e2d16fb..41b72588adcf 100644
--- a/drivers/gpu/drm/amd/amdgpu/psp_v11_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/psp_v11_0.c
@@ -103,6 +103,9 @@ static int psp_v11_0_init_microcode(struct psp_context *psp)
adev->psp.toc_bin_size = le32_to_cpu(sos_hdr_v1_1->toc_size_bytes);
adev->psp.toc_start_addr = (uint8_t *)adev->psp.sys_start_addr +
le32_to_cpu(sos_hdr_v1_1->toc_offset_bytes);
+ adev->psp.kdb_bin_size = le32_to_cpu(sos_hdr_v1_1->kdb_size_bytes);
+ adev->psp.kdb_start_addr = (uint8_t *)adev->psp.sys_start_addr +
+ le32_to_cpu(sos_hdr_v1_1->kdb_offset_bytes);
}
break;
default:
@@ -177,6 +180,48 @@ out:
return err;
}
+static int psp_v11_0_bootloader_load_kdb(struct psp_context *psp)
+{
+ int ret;
+ uint32_t psp_gfxdrv_command_reg = 0;
+ struct amdgpu_device *adev = psp->adev;
+ uint32_t sol_reg;
+
+ /* Check tOS sign of life register to confirm sys driver and sOS
+ * are already been loaded.
+ */
+ sol_reg = RREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_81);
+ if (sol_reg) {
+ psp->sos_fw_version = RREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_58);
+ dev_info(adev->dev, "sos fw version = 0x%x.\n", psp->sos_fw_version);
+ return 0;
+ }
+
+ /* Wait for bootloader to signify that is ready having bit 31 of C2PMSG_35 set to 1 */
+ ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_35),
+ 0x80000000, 0x80000000, false);
+ if (ret)
+ return ret;
+
+ memset(psp->fw_pri_buf, 0, PSP_1_MEG);
+
+ /* Copy PSP KDB binary to memory */
+ memcpy(psp->fw_pri_buf, psp->kdb_start_addr, psp->kdb_bin_size);
+
+ /* Provide the sys driver to bootloader */
+ WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_36,
+ (uint32_t)(psp->fw_pri_mc_addr >> 20));
+ psp_gfxdrv_command_reg = PSP_BL__LOAD_KEY_DATABASE;
+ WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_35,
+ psp_gfxdrv_command_reg);
+
+ /* Wait for bootloader to signify that is ready having bit 31 of C2PMSG_35 set to 1*/
+ ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_35),
+ 0x80000000, 0x80000000, false);
+
+ return ret;
+}
+
static int psp_v11_0_bootloader_load_sysdrv(struct psp_context *psp)
{
int ret;
@@ -190,7 +235,7 @@ static int psp_v11_0_bootloader_load_sysdrv(struct psp_context *psp)
sol_reg = RREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_81);
if (sol_reg) {
psp->sos_fw_version = RREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_58);
- printk("sos fw version = 0x%x.\n", psp->sos_fw_version);
+ dev_info(adev->dev, "sos fw version = 0x%x.\n", psp->sos_fw_version);
return 0;
}
@@ -208,7 +253,7 @@ static int psp_v11_0_bootloader_load_sysdrv(struct psp_context *psp)
/* Provide the sys driver to bootloader */
WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_36,
(uint32_t)(psp->fw_pri_mc_addr >> 20));
- psp_gfxdrv_command_reg = 1 << 16;
+ psp_gfxdrv_command_reg = PSP_BL__LOAD_SYSDRV;
WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_35,
psp_gfxdrv_command_reg);
@@ -249,7 +294,7 @@ static int psp_v11_0_bootloader_load_sos(struct psp_context *psp)
/* Provide the PSP secure OS to bootloader */
WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_36,
(uint32_t)(psp->fw_pri_mc_addr >> 20));
- psp_gfxdrv_command_reg = 2 << 16;
+ psp_gfxdrv_command_reg = PSP_BL__LOAD_SOSDRV;
WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_35,
psp_gfxdrv_command_reg);
@@ -822,6 +867,7 @@ static int psp_v11_0_rlc_autoload_start(struct psp_context *psp)
static const struct psp_funcs psp_v11_0_funcs = {
.init_microcode = psp_v11_0_init_microcode,
+ .bootloader_load_kdb = psp_v11_0_bootloader_load_kdb,
.bootloader_load_sysdrv = psp_v11_0_bootloader_load_sysdrv,
.bootloader_load_sos = psp_v11_0_bootloader_load_sos,
.ring_init = psp_v11_0_ring_init,
diff --git a/drivers/gpu/drm/amd/amdgpu/psp_v3_1.c b/drivers/gpu/drm/amd/amdgpu/psp_v3_1.c
index 2ea772692037..019c47feee42 100644
--- a/drivers/gpu/drm/amd/amdgpu/psp_v3_1.c
+++ b/drivers/gpu/drm/amd/amdgpu/psp_v3_1.c
@@ -155,7 +155,7 @@ static int psp_v3_1_bootloader_load_sysdrv(struct psp_context *psp)
/* Provide the sys driver to bootloader */
WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_36,
(uint32_t)(psp->fw_pri_mc_addr >> 20));
- psp_gfxdrv_command_reg = 1 << 16;
+ psp_gfxdrv_command_reg = PSP_BL__LOAD_SYSDRV;
WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_35,
psp_gfxdrv_command_reg);
@@ -218,7 +218,7 @@ static int psp_v3_1_bootloader_load_sos(struct psp_context *psp)
/* Provide the PSP secure OS to bootloader */
WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_36,
(uint32_t)(psp->fw_pri_mc_addr >> 20));
- psp_gfxdrv_command_reg = 2 << 16;
+ psp_gfxdrv_command_reg = PSP_BL__LOAD_SOSDRV;
WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_35,
psp_gfxdrv_command_reg);
diff --git a/drivers/gpu/drm/amd/amdgpu/soc15.c b/drivers/gpu/drm/amd/amdgpu/soc15.c
index 87152d8ef0df..23265414d448 100644
--- a/drivers/gpu/drm/amd/amdgpu/soc15.c
+++ b/drivers/gpu/drm/amd/amdgpu/soc15.c
@@ -649,8 +649,6 @@ int soc15_set_ip_blocks(struct amdgpu_device *adev)
#if defined(CONFIG_DRM_AMD_DC)
else if (amdgpu_device_has_dc_support(adev))
amdgpu_device_ip_block_add(adev, &dm_ip_block);
-#else
-# warning "Enable CONFIG_DRM_AMD_DC for display support on SOC15."
#endif
if (!(adev->asic_type == CHIP_VEGA20 && amdgpu_sriov_vf(adev))) {
amdgpu_device_ip_block_add(adev, &uvd_v7_0_ip_block);
@@ -671,8 +669,6 @@ int soc15_set_ip_blocks(struct amdgpu_device *adev)
#if defined(CONFIG_DRM_AMD_DC)
else if (amdgpu_device_has_dc_support(adev))
amdgpu_device_ip_block_add(adev, &dm_ip_block);
-#else
-# warning "Enable CONFIG_DRM_AMD_DC for display support on SOC15."
#endif
amdgpu_device_ip_block_add(adev, &vcn_v1_0_ip_block);
break;
@@ -717,9 +713,15 @@ static void soc15_get_pcie_usage(struct amdgpu_device *adev, uint64_t *count0,
return;
/* Set the 2 events that we wish to watch, defined above */
- /* Reg 40 is # received msgs, Reg 104 is # of posted requests sent */
+ /* Reg 40 is # received msgs */
perfctr = REG_SET_FIELD(perfctr, PCIE_PERF_CNTL_TXCLK, EVENT0_SEL, 40);
- perfctr = REG_SET_FIELD(perfctr, PCIE_PERF_CNTL_TXCLK, EVENT1_SEL, 104);
+ /* Pre-VG20, Reg 104 is # of posted requests sent. On VG20 it's 108 */
+ if (adev->asic_type == CHIP_VEGA20)
+ perfctr = REG_SET_FIELD(perfctr, PCIE_PERF_CNTL_TXCLK,
+ EVENT1_SEL, 108);
+ else
+ perfctr = REG_SET_FIELD(perfctr, PCIE_PERF_CNTL_TXCLK,
+ EVENT1_SEL, 104);
/* Write to enable desired perf counters */
WREG32_PCIE(smnPCIE_PERF_CNTL_TXCLK, perfctr);
diff --git a/drivers/gpu/drm/amd/amdgpu/vi.c b/drivers/gpu/drm/amd/amdgpu/vi.c
index d40ed1a828dd..6575ddcfcf00 100644
--- a/drivers/gpu/drm/amd/amdgpu/vi.c
+++ b/drivers/gpu/drm/amd/amdgpu/vi.c
@@ -59,7 +59,6 @@
#include "vid.h"
#include "vi.h"
-#include "vi_dpm.h"
#include "gmc_v8_0.h"
#include "gmc_v7_0.h"
#include "gfx_v8_0.h"
diff --git a/drivers/gpu/drm/amd/amdgpu/vi_dpm.h b/drivers/gpu/drm/amd/amdgpu/vi_dpm.h
deleted file mode 100644
index c43e03fddfba..000000000000
--- a/drivers/gpu/drm/amd/amdgpu/vi_dpm.h
+++ /dev/null
@@ -1,32 +0,0 @@
-/*
- * Copyright 2014 Advanced Micro Devices, Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- */
-
-#ifndef __VI_DPM_H__
-#define __VI_DPM_H__
-
-extern const struct amd_ip_funcs cz_dpm_ip_funcs;
-int cz_smu_init(struct amdgpu_device *adev);
-int cz_smu_start(struct amdgpu_device *adev);
-int cz_smu_fini(struct amdgpu_device *adev);
-
-#endif
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
index 584748c23f14..e6a4288bfaa6 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
@@ -1157,12 +1157,7 @@ static int create_queue_cpsch(struct device_queue_manager *dqm, struct queue *q,
mqd_mgr = dqm->mqd_mgrs[get_mqd_type_from_queue_type(
q->properties.type)];
- /*
- * Eviction state logic: mark all queues as evicted, even ones
- * not currently active. Restoring inactive queues later only
- * updates the is_evicted flag but is a no-op otherwise.
- */
- q->properties.is_evicted = !!qpd->evicted;
+
if (q->properties.type == KFD_QUEUE_TYPE_SDMA ||
q->properties.type == KFD_QUEUE_TYPE_SDMA_XGMI)
dqm->asic_ops.init_sdma_vm(dqm, q, qpd);
@@ -1173,9 +1168,16 @@ static int create_queue_cpsch(struct device_queue_manager *dqm, struct queue *q,
retval = -ENOMEM;
goto out_deallocate_doorbell;
}
+
+ dqm_lock(dqm);
+ /*
+ * Eviction state logic: mark all queues as evicted, even ones
+ * not currently active. Restoring inactive queues later only
+ * updates the is_evicted flag but is a no-op otherwise.
+ */
+ q->properties.is_evicted = !!qpd->evicted;
mqd_mgr->init_mqd(mqd_mgr, &q->mqd, q->mqd_mem_obj,
&q->gart_mqd_addr, &q->properties);
- dqm_lock(dqm);
list_add(&q->list, &qpd->queues_list);
qpd->queue_count++;
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_v9.c b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_v9.c
index e9fe39382371..95a82ac455f2 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_v9.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_v9.c
@@ -61,7 +61,7 @@ static int update_qpd_v9(struct device_queue_manager *dqm,
qpd->sh_mem_config =
SH_MEM_ALIGNMENT_MODE_UNALIGNED <<
SH_MEM_CONFIG__ALIGNMENT_MODE__SHIFT;
- if (noretry &&
+ if (amdgpu_noretry &&
!dqm->dev->device_info->needs_iommu_device)
qpd->sh_mem_config |=
1 << SH_MEM_CONFIG__RETRY_DISABLE__SHIFT;
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
index 08a0feb9d0a0..3933fb6a371e 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
@@ -157,7 +157,7 @@ extern int ignore_crat;
/*
* Set sh_mem_config.retry_disable on Vega10
*/
-extern int noretry;
+extern int amdgpu_noretry;
/*
* Halt if HWS hang is detected
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
index da0958625861..7e6c3ee82f5b 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
@@ -150,6 +150,9 @@ void pqm_uninit(struct process_queue_manager *pqm)
struct process_queue_node *pqn, *next;
list_for_each_entry_safe(pqn, next, &pqm->queues, process_queue_list) {
+ if (pqn->q && pqn->q->gws)
+ amdgpu_amdkfd_remove_gws_from_process(pqm->process->kgd_process_info,
+ pqn->q->gws);
uninit_queue(pqn->q);
list_del(&pqn->process_queue_list);
kfree(pqn);
diff --git a/drivers/gpu/drm/amd/display/Kconfig b/drivers/gpu/drm/amd/display/Kconfig
index 7073cfcf04e8..f954bf61af28 100644
--- a/drivers/gpu/drm/amd/display/Kconfig
+++ b/drivers/gpu/drm/amd/display/Kconfig
@@ -5,6 +5,7 @@ menu "Display Engine Configuration"
config DRM_AMD_DC
bool "AMD DC - Enable new display engine"
default y
+ select SND_HDA_COMPONENT if SND_HDA_CORE
select DRM_AMD_DC_DCN1_0 if X86 && !(KCOV_INSTRUMENT_ALL && KCOV_ENABLE_COMPARISONS)
help
Choose this option if you want to use the new display engine
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
index 0242d693f4f6..4a29f72334d0 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
@@ -56,6 +56,7 @@
#include <linux/pm_runtime.h>
#include <linux/pci.h>
#include <linux/firmware.h>
+#include <linux/component.h>
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_uapi.h>
@@ -65,6 +66,7 @@
#include <drm/drm_fourcc.h>
#include <drm/drm_edid.h>
#include <drm/drm_vblank.h>
+#include <drm/drm_audio_component.h>
#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
#include "ivsrcid/dcn/irqsrcs_dcn_1_0.h"
@@ -508,6 +510,139 @@ static void amdgpu_dm_fbc_init(struct drm_connector *connector)
}
+static int amdgpu_dm_audio_component_get_eld(struct device *kdev, int port,
+ int pipe, bool *enabled,
+ unsigned char *buf, int max_bytes)
+{
+ struct drm_device *dev = dev_get_drvdata(kdev);
+ struct amdgpu_device *adev = dev->dev_private;
+ struct drm_connector *connector;
+ struct drm_connector_list_iter conn_iter;
+ struct amdgpu_dm_connector *aconnector;
+ int ret = 0;
+
+ *enabled = false;
+
+ mutex_lock(&adev->dm.audio_lock);
+
+ drm_connector_list_iter_begin(dev, &conn_iter);
+ drm_for_each_connector_iter(connector, &conn_iter) {
+ aconnector = to_amdgpu_dm_connector(connector);
+ if (aconnector->audio_inst != port)
+ continue;
+
+ *enabled = true;
+ ret = drm_eld_size(connector->eld);
+ memcpy(buf, connector->eld, min(max_bytes, ret));
+
+ break;
+ }
+ drm_connector_list_iter_end(&conn_iter);
+
+ mutex_unlock(&adev->dm.audio_lock);
+
+ DRM_DEBUG_KMS("Get ELD : idx=%d ret=%d en=%d\n", port, ret, *enabled);
+
+ return ret;
+}
+
+static const struct drm_audio_component_ops amdgpu_dm_audio_component_ops = {
+ .get_eld = amdgpu_dm_audio_component_get_eld,
+};
+
+static int amdgpu_dm_audio_component_bind(struct device *kdev,
+ struct device *hda_kdev, void *data)
+{
+ struct drm_device *dev = dev_get_drvdata(kdev);
+ struct amdgpu_device *adev = dev->dev_private;
+ struct drm_audio_component *acomp = data;
+
+ acomp->ops = &amdgpu_dm_audio_component_ops;
+ acomp->dev = kdev;
+ adev->dm.audio_component = acomp;
+
+ return 0;
+}
+
+static void amdgpu_dm_audio_component_unbind(struct device *kdev,
+ struct device *hda_kdev, void *data)
+{
+ struct drm_device *dev = dev_get_drvdata(kdev);
+ struct amdgpu_device *adev = dev->dev_private;
+ struct drm_audio_component *acomp = data;
+
+ acomp->ops = NULL;
+ acomp->dev = NULL;
+ adev->dm.audio_component = NULL;
+}
+
+static const struct component_ops amdgpu_dm_audio_component_bind_ops = {
+ .bind = amdgpu_dm_audio_component_bind,
+ .unbind = amdgpu_dm_audio_component_unbind,
+};
+
+static int amdgpu_dm_audio_init(struct amdgpu_device *adev)
+{
+ int i, ret;
+
+ if (!amdgpu_audio)
+ return 0;
+
+ adev->mode_info.audio.enabled = true;
+
+ adev->mode_info.audio.num_pins = adev->dm.dc->res_pool->audio_count;
+
+ for (i = 0; i < adev->mode_info.audio.num_pins; i++) {
+ adev->mode_info.audio.pin[i].channels = -1;
+ adev->mode_info.audio.pin[i].rate = -1;
+ adev->mode_info.audio.pin[i].bits_per_sample = -1;
+ adev->mode_info.audio.pin[i].status_bits = 0;
+ adev->mode_info.audio.pin[i].category_code = 0;
+ adev->mode_info.audio.pin[i].connected = false;
+ adev->mode_info.audio.pin[i].id =
+ adev->dm.dc->res_pool->audios[i]->inst;
+ adev->mode_info.audio.pin[i].offset = 0;
+ }
+
+ ret = component_add(adev->dev, &amdgpu_dm_audio_component_bind_ops);
+ if (ret < 0)
+ return ret;
+
+ adev->dm.audio_registered = true;
+
+ return 0;
+}
+
+static void amdgpu_dm_audio_fini(struct amdgpu_device *adev)
+{
+ if (!amdgpu_audio)
+ return;
+
+ if (!adev->mode_info.audio.enabled)
+ return;
+
+ if (adev->dm.audio_registered) {
+ component_del(adev->dev, &amdgpu_dm_audio_component_bind_ops);
+ adev->dm.audio_registered = false;
+ }
+
+ /* TODO: Disable audio? */
+
+ adev->mode_info.audio.enabled = false;
+}
+
+void amdgpu_dm_audio_eld_notify(struct amdgpu_device *adev, int pin)
+{
+ struct drm_audio_component *acomp = adev->dm.audio_component;
+
+ if (acomp && acomp->audio_ops && acomp->audio_ops->pin_eld_notify) {
+ DRM_DEBUG_KMS("Notify ELD: %d\n", pin);
+
+ acomp->audio_ops->pin_eld_notify(acomp->audio_ops->audio_ptr,
+ pin, -1);
+ }
+}
+
static int amdgpu_dm_init(struct amdgpu_device *adev)
{
struct dc_init_data init_data;
@@ -518,6 +653,7 @@ static int amdgpu_dm_init(struct amdgpu_device *adev)
memset(&init_data, 0, sizeof(init_data));
mutex_init(&adev->dm.dc_lock);
+ mutex_init(&adev->dm.audio_lock);
if(amdgpu_dm_irq_init(adev)) {
DRM_ERROR("amdgpu: failed to initialize DM IRQ support.\n");
@@ -621,6 +757,8 @@ error:
static void amdgpu_dm_fini(struct amdgpu_device *adev)
{
+ amdgpu_dm_audio_fini(adev);
+
amdgpu_dm_destroy_drm_device(&adev->dm);
/* DC Destroy TODO: Replace destroy DAL */
@@ -641,6 +779,7 @@ static void amdgpu_dm_fini(struct amdgpu_device *adev)
adev->dm.freesync_module = NULL;
}
+ mutex_destroy(&adev->dm.audio_lock);
mutex_destroy(&adev->dm.dc_lock);
return;
@@ -1888,6 +2027,10 @@ static int amdgpu_dm_mode_config_init(struct amdgpu_device *adev)
if (r)
return r;
+ r = amdgpu_dm_audio_init(adev);
+ if (r)
+ return r;
+
return 0;
}
@@ -4834,6 +4977,7 @@ void amdgpu_dm_connector_init_helper(struct amdgpu_display_manager *dm,
aconnector->base.stereo_allowed = false;
aconnector->base.dpms = DRM_MODE_DPMS_OFF;
aconnector->hpd.hpd = AMDGPU_HPD_NONE; /* not used */
+ aconnector->audio_inst = -1;
mutex_init(&aconnector->hpd_lock);
/*
@@ -5728,6 +5872,81 @@ cleanup:
kfree(bundle);
}
+static void amdgpu_dm_commit_audio(struct drm_device *dev,
+ struct drm_atomic_state *state)
+{
+ struct amdgpu_device *adev = dev->dev_private;
+ struct amdgpu_dm_connector *aconnector;
+ struct drm_connector *connector;
+ struct drm_connector_state *old_con_state, *new_con_state;
+ struct drm_crtc_state *new_crtc_state;
+ struct dm_crtc_state *new_dm_crtc_state;
+ const struct dc_stream_status *status;
+ int i, inst;
+
+ /* Notify device removals. */
+ for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
+ if (old_con_state->crtc != new_con_state->crtc) {
+ /* CRTC changes require notification. */
+ goto notify;
+ }
+
+ if (!new_con_state->crtc)
+ continue;
+
+ new_crtc_state = drm_atomic_get_new_crtc_state(
+ state, new_con_state->crtc);
+
+ if (!new_crtc_state)
+ continue;
+
+ if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
+ continue;
+
+ notify:
+ aconnector = to_amdgpu_dm_connector(connector);
+
+ mutex_lock(&adev->dm.audio_lock);
+ inst = aconnector->audio_inst;
+ aconnector->audio_inst = -1;
+ mutex_unlock(&adev->dm.audio_lock);
+
+ amdgpu_dm_audio_eld_notify(adev, inst);
+ }
+
+ /* Notify audio device additions. */
+ for_each_new_connector_in_state(state, connector, new_con_state, i) {
+ if (!new_con_state->crtc)
+ continue;
+
+ new_crtc_state = drm_atomic_get_new_crtc_state(
+ state, new_con_state->crtc);
+
+ if (!new_crtc_state)
+ continue;
+
+ if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
+ continue;
+
+ new_dm_crtc_state = to_dm_crtc_state(new_crtc_state);
+ if (!new_dm_crtc_state->stream)
+ continue;
+
+ status = dc_stream_get_status(new_dm_crtc_state->stream);
+ if (!status)
+ continue;
+
+ aconnector = to_amdgpu_dm_connector(connector);
+
+ mutex_lock(&adev->dm.audio_lock);
+ inst = status->audio_inst;
+ aconnector->audio_inst = inst;
+ mutex_unlock(&adev->dm.audio_lock);
+
+ amdgpu_dm_audio_eld_notify(adev, inst);
+ }
+}
+
/*
* Enable interrupts on CRTCs that are newly active, undergone
* a modeset, or have active planes again.
@@ -6106,6 +6325,9 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
/* Enable interrupts for CRTCs going from 0 to n active planes. */
amdgpu_dm_enable_crtc_interrupts(dev, state, false);
+ /* Update audio instances for each connector. */
+ amdgpu_dm_commit_audio(dev, state);
+
/*
* send vblank event on all events not handled in flip and
* mark consumed event for drm_atomic_helper_commit_hw_done
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h
index baca5dc22b92..b89cbbfcc0e9 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h
@@ -144,6 +144,28 @@ struct amdgpu_display_manager {
struct mutex dc_lock;
/**
+ * @audio_lock:
+ *
+ * Guards access to audio instance changes.
+ */
+ struct mutex audio_lock;
+
+ /**
+ * @audio_component:
+ *
+ * Used to notify ELD changes to sound driver.
+ */
+ struct drm_audio_component *audio_component;
+
+ /**
+ * @audio_registered:
+ *
+ * True if the audio component has been registered
+ * successfully, false otherwise.
+ */
+ bool audio_registered;
+
+ /**
* @irq_handler_list_low_tab:
*
* Low priority IRQ handler table.
@@ -254,6 +276,9 @@ struct amdgpu_dm_connector {
int max_vfreq ;
int pixel_clock_mhz;
+ /* Audio instance - protected by audio_lock. */
+ int audio_inst;
+
struct mutex hpd_lock;
bool fake_enable;
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_pp_smu.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_pp_smu.c
index eac09bfe3be2..592fa499c9f8 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_pp_smu.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_pp_smu.c
@@ -308,7 +308,8 @@ static void pp_to_dc_clock_levels_with_voltage(
DC_DECODE_PP_CLOCK_TYPE(dc_clk_type));
for (i = 0; i < clk_level_info->num_levels; i++) {
- DRM_INFO("DM_PPLIB:\t %d in kHz\n", pp_clks->data[i].clocks_in_khz);
+ DRM_INFO("DM_PPLIB:\t %d in kHz, %d in mV\n", pp_clks->data[i].clocks_in_khz,
+ pp_clks->data[i].voltage_in_mv);
clk_level_info->data[i].clocks_in_khz = pp_clks->data[i].clocks_in_khz;
clk_level_info->data[i].voltage_in_mv = pp_clks->data[i].voltage_in_mv;
}
@@ -910,11 +911,11 @@ void dm_pp_get_funcs(
/* todo set_pme_wa_enable cause 4k@6ohz display not light up */
funcs->nv_funcs.set_pme_wa_enable = NULL;
/* todo debug waring message */
- funcs->nv_funcs.set_hard_min_uclk_by_freq = NULL;
+ funcs->nv_funcs.set_hard_min_uclk_by_freq = pp_nv_set_hard_min_uclk_by_freq;
/* todo compare data with window driver*/
- funcs->nv_funcs.get_maximum_sustainable_clocks = NULL;
+ funcs->nv_funcs.get_maximum_sustainable_clocks = pp_nv_get_maximum_sustainable_clocks;
/*todo compare data with window driver */
- funcs->nv_funcs.get_uclk_dpm_states = NULL;
+ funcs->nv_funcs.get_uclk_dpm_states = pp_nv_get_uclk_dpm_states;
break;
#endif
default:
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
index 173fcfb5abe6..51a78283a86d 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
@@ -175,32 +175,22 @@ struct resource_pool *dc_create_resource_pool(struct dc *dc,
if (res_pool != NULL) {
struct dc_firmware_info fw_info = { { 0 } };
- if (dc->ctx->dc_bios->funcs->get_firmware_info(
- dc->ctx->dc_bios, &fw_info) == BP_RESULT_OK) {
- res_pool->ref_clocks.xtalin_clock_inKhz = fw_info.pll_info.crystal_frequency;
-
- if (IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment)) {
- // On FPGA these dividers are currently not configured by GDB
- res_pool->ref_clocks.dccg_ref_clock_inKhz = res_pool->ref_clocks.xtalin_clock_inKhz;
- res_pool->ref_clocks.dchub_ref_clock_inKhz = res_pool->ref_clocks.xtalin_clock_inKhz;
- } else if (res_pool->dccg && res_pool->hubbub) {
- // If DCCG reference frequency cannot be determined (usually means not set to xtalin) then this is a critical error
- // as this value must be known for DCHUB programming
- (res_pool->dccg->funcs->get_dccg_ref_freq)(res_pool->dccg,
- fw_info.pll_info.crystal_frequency,
- &res_pool->ref_clocks.dccg_ref_clock_inKhz);
-
- // Similarly, if DCHUB reference frequency cannot be determined, then it is also a critical error
- (res_pool->hubbub->funcs->get_dchub_ref_freq)(res_pool->hubbub,
- res_pool->ref_clocks.dccg_ref_clock_inKhz,
- &res_pool->ref_clocks.dchub_ref_clock_inKhz);
- } else {
- // Not all ASICs have DCCG sw component
- res_pool->ref_clocks.dccg_ref_clock_inKhz = res_pool->ref_clocks.xtalin_clock_inKhz;
- res_pool->ref_clocks.dchub_ref_clock_inKhz = res_pool->ref_clocks.xtalin_clock_inKhz;
- }
- } else
- ASSERT_CRITICAL(false);
+ if (dc->ctx->dc_bios->funcs->get_firmware_info(dc->ctx->dc_bios,
+ &fw_info) == BP_RESULT_OK) {
+ res_pool->ref_clocks.xtalin_clock_inKhz =
+ fw_info.pll_info.crystal_frequency;
+ /* initialize with firmware data first, no all
+ * ASIC have DCCG SW component. FPGA or
+ * simulation need initialization of
+ * dccg_ref_clock_inKhz, dchub_ref_clock_inKhz
+ * with xtalin_clock_inKhz
+ */
+ res_pool->ref_clocks.dccg_ref_clock_inKhz =
+ res_pool->ref_clocks.xtalin_clock_inKhz;
+ res_pool->ref_clocks.dchub_ref_clock_inKhz =
+ res_pool->ref_clocks.xtalin_clock_inKhz;
+ } else
+ ASSERT_CRITICAL(false);
}
return res_pool;
@@ -2011,6 +2001,9 @@ enum dc_status resource_map_pool_resources(
if (context->streams[i] == stream) {
context->stream_status[i].primary_otg_inst = pipe_ctx->stream_res.tg->inst;
context->stream_status[i].stream_enc_inst = pipe_ctx->stream_res.stream_enc->id;
+ context->stream_status[i].audio_inst =
+ pipe_ctx->stream_res.audio ? pipe_ctx->stream_res.audio->inst : -1;
+
return DC_OK;
}
diff --git a/drivers/gpu/drm/amd/display/dc/dc_stream.h b/drivers/gpu/drm/amd/display/dc/dc_stream.h
index e253a5c591f6..0fa1c26bc20d 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_stream.h
+++ b/drivers/gpu/drm/amd/display/dc/dc_stream.h
@@ -42,6 +42,7 @@ struct dc_stream_status {
int primary_otg_inst;
int stream_enc_inst;
int plane_count;
+ int audio_inst;
struct timing_sync_info timing_sync_info;
struct dc_plane_state *plane_states[MAX_SURFACE_NUM];
};
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/Makefile b/drivers/gpu/drm/amd/display/dc/dcn20/Makefile
index 1b68de27ba74..e9721a906592 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/Makefile
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/Makefile
@@ -10,7 +10,13 @@ ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
DCN20 += dcn20_dsc.o
endif
-CFLAGS_dcn20_resource.o := -mhard-float -msse -mpreferred-stack-boundary=4
+ifneq ($(call cc-option, -mpreferred-stack-boundary=4),)
+ cc_stack_align := -mpreferred-stack-boundary=4
+else ifneq ($(call cc-option, -mstack-alignment=16),)
+ cc_stack_align := -mstack-alignment=16
+endif
+
+CFLAGS_dcn20_resource.o := -mhard-float -msse $(cc_stack_align)
AMD_DAL_DCN20 = $(addprefix $(AMDDALPATH)/dc/dcn20/,$(DCN20))
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c
index 6925d25d2457..0b84a322b8a2 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c
@@ -523,6 +523,7 @@ static void dcn20_init_hw(struct dc *dc)
struct dc_bios *dcb = dc->ctx->dc_bios;
struct resource_pool *res_pool = dc->res_pool;
struct dc_state *context = dc->current_state;
+ struct dc_firmware_info fw_info = { { 0 } };
if (dc->clk_mgr && dc->clk_mgr->funcs->init_clocks)
dc->clk_mgr->funcs->init_clocks(dc->clk_mgr);
@@ -546,6 +547,30 @@ static void dcn20_init_hw(struct dc *dc)
} else {
if (!dcb->funcs->is_accelerated_mode(dcb)) {
bios_golden_init(dc);
+ if (dc->ctx->dc_bios->funcs->get_firmware_info(
+ dc->ctx->dc_bios, &fw_info) == BP_RESULT_OK) {
+ res_pool->ref_clocks.xtalin_clock_inKhz = fw_info.pll_info.crystal_frequency;
+
+ if (!IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment)) {
+ if (res_pool->dccg && res_pool->hubbub) {
+
+ (res_pool->dccg->funcs->get_dccg_ref_freq)(res_pool->dccg,
+ fw_info.pll_info.crystal_frequency,
+ &res_pool->ref_clocks.dccg_ref_clock_inKhz);
+
+ (res_pool->hubbub->funcs->get_dchub_ref_freq)(res_pool->hubbub,
+ res_pool->ref_clocks.dccg_ref_clock_inKhz,
+ &res_pool->ref_clocks.dchub_ref_clock_inKhz);
+ } else {
+ // Not all ASICs have DCCG sw component
+ res_pool->ref_clocks.dccg_ref_clock_inKhz =
+ res_pool->ref_clocks.xtalin_clock_inKhz;
+ res_pool->ref_clocks.dchub_ref_clock_inKhz =
+ res_pool->ref_clocks.xtalin_clock_inKhz;
+ }
+ }
+ } else
+ ASSERT_CRITICAL(false);
disable_vga(dc->hwseq);
}
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c
index 4e52df82c993..d200bc3cec71 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c
@@ -2415,7 +2415,7 @@ struct pipe_ctx *dcn20_acquire_idle_pipe_for_layer(
ASSERT(0);
if (!idle_pipe)
- return false;
+ return NULL;
idle_pipe->stream = head_pipe->stream;
idle_pipe->stream_res.tg = head_pipe->stream_res.tg;
@@ -2576,6 +2576,9 @@ static void cap_soc_clocks(
&& max_clocks.uClockInKhz != 0)
bb->clock_limits[i].dram_speed_mts = (max_clocks.uClockInKhz / 1000) * 16;
+ // HACK: Force every uclk to max for now to "disable" uclk switching.
+ bb->clock_limits[i].dram_speed_mts = (max_clocks.uClockInKhz / 1000) * 16;
+
if ((bb->clock_limits[i].fabricclk_mhz > (max_clocks.fabricClockInKhz / 1000))
&& max_clocks.fabricClockInKhz != 0)
bb->clock_limits[i].fabricclk_mhz = (max_clocks.fabricClockInKhz / 1000);
@@ -2783,6 +2786,8 @@ static bool init_soc_bounding_box(struct dc *dc,
le32_to_cpu(bb->vmm_page_size_bytes);
dcn2_0_soc.dram_clock_change_latency_us =
fixed16_to_double_to_cpu(bb->dram_clock_change_latency_us);
+ // HACK!! Lower uclock latency switch time so we don't switch
+ dcn2_0_soc.dram_clock_change_latency_us = 10;
dcn2_0_soc.writeback_dram_clock_change_latency_us =
fixed16_to_double_to_cpu(bb->writeback_dram_clock_change_latency_us);
dcn2_0_soc.return_bus_width_bytes =
@@ -2824,6 +2829,7 @@ static bool init_soc_bounding_box(struct dc *dc,
struct pp_smu_nv_clock_table max_clocks = {0};
unsigned int uclk_states[8] = {0};
unsigned int num_states = 0;
+ int i;
enum pp_smu_status status;
bool clock_limits_available = false;
bool uclk_states_available = false;
@@ -2845,6 +2851,10 @@ static bool init_soc_bounding_box(struct dc *dc,
clock_limits_available = (status == PP_SMU_RESULT_OK);
}
+ // HACK: Use the max uclk_states value for all elements.
+ for (i = 0; i < num_states; i++)
+ uclk_states[i] = uclk_states[num_states - 1];
+
if (clock_limits_available && uclk_states_available && num_states)
update_bounding_box(dc, &dcn2_0_soc, &max_clocks, uclk_states, num_states);
else if (clock_limits_available)
diff --git a/drivers/gpu/drm/amd/display/dc/dsc/Makefile b/drivers/gpu/drm/amd/display/dc/dsc/Makefile
index c5d5b94e2604..e019cd9447e8 100644
--- a/drivers/gpu/drm/amd/display/dc/dsc/Makefile
+++ b/drivers/gpu/drm/amd/display/dc/dsc/Makefile
@@ -1,10 +1,18 @@
#
# Makefile for the 'dsc' sub-component of DAL.
-CFLAGS_rc_calc.o := -mhard-float -msse -mpreferred-stack-boundary=4
-CFLAGS_rc_calc_dpi.o := -mhard-float -msse -mpreferred-stack-boundary=4
-CFLAGS_codec_main_amd.o := -mhard-float -msse -mpreferred-stack-boundary=4
-CFLAGS_dc_dsc.o := -mhard-float -msse -mpreferred-stack-boundary=4
+ifneq ($(call cc-option, -mpreferred-stack-boundary=4),)
+ cc_stack_align := -mpreferred-stack-boundary=4
+else ifneq ($(call cc-option, -mstack-alignment=16),)
+ cc_stack_align := -mstack-alignment=16
+endif
+
+dsc_ccflags := -mhard-float -msse $(cc_stack_align)
+
+CFLAGS_rc_calc.o := $(dsc_ccflags)
+CFLAGS_rc_calc_dpi.o := $(dsc_ccflags)
+CFLAGS_codec_main_amd.o := $(dsc_ccflags)
+CFLAGS_dc_dsc.o := $(dsc_ccflags)
DSC = dc_dsc.o rc_calc.o rc_calc_dpi.o
diff --git a/drivers/gpu/drm/amd/include/amd_shared.h b/drivers/gpu/drm/amd/include/amd_shared.h
index d352b8d76365..a0a7211438f2 100644
--- a/drivers/gpu/drm/amd/include/amd_shared.h
+++ b/drivers/gpu/drm/amd/include/amd_shared.h
@@ -26,7 +26,7 @@
#include <drm/amd_asic_type.h>
-#define AMD_MAX_USEC_TIMEOUT 200000 /* 200 ms */
+#define AMD_MAX_USEC_TIMEOUT 1000000 /* 1000 ms */
/*
* Chip flags
diff --git a/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c b/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c
index 3093917adc2d..f1565c448de5 100644
--- a/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c
+++ b/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c
@@ -69,6 +69,9 @@ int smu_set_soft_freq_range(struct smu_context *smu, enum smu_clk_type clk_type,
if (min <= 0 && max <= 0)
return -EINVAL;
+ if (!smu_clk_dpm_is_enabled(smu, clk_type))
+ return 0;
+
clk_id = smu_clk_get_index(smu, clk_type);
if (clk_id < 0)
return clk_id;
@@ -102,6 +105,9 @@ int smu_set_hard_freq_range(struct smu_context *smu, enum smu_clk_type clk_type,
if (min <= 0 && max <= 0)
return -EINVAL;
+ if (!smu_clk_dpm_is_enabled(smu, clk_type))
+ return 0;
+
clk_id = smu_clk_get_index(smu, clk_type);
if (clk_id < 0)
return clk_id;
@@ -135,23 +141,8 @@ int smu_get_dpm_freq_range(struct smu_context *smu, enum smu_clk_type clk_type,
if (!min && !max)
return -EINVAL;
- switch (clk_type) {
- case SMU_UCLK:
- if (!smu_feature_is_enabled(smu, SMU_FEATURE_DPM_UCLK_BIT)) {
- pr_warn("uclk dpm is not enabled\n");
- return 0;
- }
- break;
- case SMU_GFXCLK:
- case SMU_SCLK:
- if (!smu_feature_is_enabled(smu, SMU_FEATURE_DPM_GFXCLK_BIT)) {
- pr_warn("gfxclk dpm is not enabled\n");
- return 0;
- }
- break;
- default:
- break;
- }
+ if (!smu_clk_dpm_is_enabled(smu, clk_type))
+ return 0;
mutex_lock(&smu->mutex);
clk_id = smu_clk_get_index(smu, clk_type);
@@ -194,6 +185,9 @@ int smu_get_dpm_freq_by_index(struct smu_context *smu, enum smu_clk_type clk_typ
if (!value)
return -EINVAL;
+ if (!smu_clk_dpm_is_enabled(smu, clk_type))
+ return 0;
+
clk_id = smu_clk_get_index(smu, clk_type);
if (clk_id < 0)
return clk_id;
@@ -222,6 +216,35 @@ int smu_get_dpm_level_count(struct smu_context *smu, enum smu_clk_type clk_type,
return smu_get_dpm_freq_by_index(smu, clk_type, 0xff, value);
}
+bool smu_clk_dpm_is_enabled(struct smu_context *smu, enum smu_clk_type clk_type)
+{
+ enum smu_feature_mask feature_id = 0;
+
+ switch (clk_type) {
+ case SMU_MCLK:
+ case SMU_UCLK:
+ feature_id = SMU_FEATURE_DPM_UCLK_BIT;
+ break;
+ case SMU_GFXCLK:
+ case SMU_SCLK:
+ feature_id = SMU_FEATURE_DPM_GFXCLK_BIT;
+ break;
+ case SMU_SOCCLK:
+ feature_id = SMU_FEATURE_DPM_SOCCLK_BIT;
+ break;
+ default:
+ return true;
+ }
+
+ if(!smu_feature_is_enabled(smu, feature_id)) {
+ pr_warn("smu %d clk dpm feature %d is not enabled\n", clk_type, feature_id);
+ return false;
+ }
+
+ return true;
+}
+
+
int smu_dpm_set_power_gate(struct smu_context *smu, uint32_t block_type,
bool gate)
{
@@ -300,7 +323,7 @@ int smu_common_read_sensor(struct smu_context *smu, enum amd_pp_sensors sensor,
return ret;
}
-int smu_update_table(struct smu_context *smu, enum smu_table_id table_index,
+int smu_update_table(struct smu_context *smu, enum smu_table_id table_index, int argument,
void *table_data, bool drv2smu)
{
struct smu_table_context *smu_table = &smu->smu_table;
@@ -327,7 +350,7 @@ int smu_update_table(struct smu_context *smu, enum smu_table_id table_index,
ret = smu_send_smc_msg_with_param(smu, drv2smu ?
SMU_MSG_TransferTableDram2Smu :
SMU_MSG_TransferTableSmu2Dram,
- table_id);
+ table_id | ((argument & 0xFFFF) << 16));
if (ret)
return ret;
@@ -1372,10 +1395,10 @@ int smu_adjust_power_state_dynamic(struct smu_context *smu,
break;
case AMD_DPM_FORCED_LEVEL_AUTO:
+ case AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD:
ret = smu_unforce_dpm_levels(smu);
break;
- case AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD:
case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK:
case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK:
case AMD_DPM_FORCED_LEVEL_PROFILE_PEAK:
@@ -1385,8 +1408,9 @@ int smu_adjust_power_state_dynamic(struct smu_context *smu,
&soc_mask);
if (ret)
return ret;
- smu_force_clk_levels(smu, PP_SCLK, 1 << sclk_mask);
- smu_force_clk_levels(smu, PP_MCLK, 1 << mclk_mask);
+ smu_force_clk_levels(smu, SMU_SCLK, 1 << sclk_mask);
+ smu_force_clk_levels(smu, SMU_MCLK, 1 << mclk_mask);
+ smu_force_clk_levels(smu, SMU_SOCCLK, 1 << soc_mask);
break;
case AMD_DPM_FORCED_LEVEL_MANUAL:
@@ -1441,17 +1465,16 @@ int smu_handle_task(struct smu_context *smu,
enum amd_dpm_forced_level smu_get_performance_level(struct smu_context *smu)
{
struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
+ enum amd_dpm_forced_level level;
if (!smu_dpm_ctx->dpm_context)
return -EINVAL;
mutex_lock(&(smu->mutex));
- if (smu_dpm_ctx->dpm_level != smu_dpm_ctx->saved_dpm_level) {
- smu_dpm_ctx->saved_dpm_level = smu_dpm_ctx->dpm_level;
- }
+ level = smu_dpm_ctx->dpm_level;
mutex_unlock(&(smu->mutex));
- return smu_dpm_ctx->dpm_level;
+ return level;
}
int smu_force_performance_level(struct smu_context *smu, enum amd_dpm_forced_level level)
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/process_pptables_v1_0.c b/drivers/gpu/drm/amd/powerplay/hwmgr/process_pptables_v1_0.c
index 1cd5a8b5cdc1..b760f95e7fa7 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/process_pptables_v1_0.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/process_pptables_v1_0.c
@@ -1067,8 +1067,6 @@ static int pp_tables_v1_0_initialize(struct pp_hwmgr *hwmgr)
PP_ASSERT_WITH_CODE((NULL != hwmgr->pptable),
"Failed to allocate hwmgr->pptable!", return -ENOMEM);
- memset(hwmgr->pptable, 0x00, sizeof(struct phm_ppt_v1_information));
-
powerplay_table = get_powerplay_table(hwmgr);
PP_ASSERT_WITH_CODE((NULL != powerplay_table),
diff --git a/drivers/gpu/drm/amd/powerplay/inc/amdgpu_smu.h b/drivers/gpu/drm/amd/powerplay/inc/amdgpu_smu.h
index c97324ef7db2..1af992fb0bde 100644
--- a/drivers/gpu/drm/amd/powerplay/inc/amdgpu_smu.h
+++ b/drivers/gpu/drm/amd/powerplay/inc/amdgpu_smu.h
@@ -937,7 +937,7 @@ extern int smu_feature_is_supported(struct smu_context *smu,
extern int smu_feature_set_supported(struct smu_context *smu,
enum smu_feature_mask mask, bool enable);
-int smu_update_table(struct smu_context *smu, uint32_t table_index,
+int smu_update_table(struct smu_context *smu, enum smu_table_id table_index, int argument,
void *table_data, bool drv2smu);
bool is_support_sw_smu(struct amdgpu_device *adev);
@@ -973,5 +973,6 @@ int smu_set_hard_freq_range(struct smu_context *smu, enum smu_clk_type clk_type,
enum amd_dpm_forced_level smu_get_performance_level(struct smu_context *smu);
int smu_force_performance_level(struct smu_context *smu, enum amd_dpm_forced_level level);
int smu_set_display_count(struct smu_context *smu, uint32_t count);
+bool smu_clk_dpm_is_enabled(struct smu_context *smu, enum smu_clk_type clk_type);
#endif
diff --git a/drivers/gpu/drm/amd/powerplay/inc/smu11_driver_if.h b/drivers/gpu/drm/amd/powerplay/inc/smu11_driver_if.h
index 195c4ae67058..755d51f9c6a9 100644
--- a/drivers/gpu/drm/amd/powerplay/inc/smu11_driver_if.h
+++ b/drivers/gpu/drm/amd/powerplay/inc/smu11_driver_if.h
@@ -27,7 +27,7 @@
// *** IMPORTANT ***
// SMU TEAM: Always increment the interface version if
// any structure is changed in this file
-#define SMU11_DRIVER_IF_VERSION 0x12
+#define SMU11_DRIVER_IF_VERSION 0x13
#define PPTABLE_V20_SMU_VERSION 3
@@ -615,6 +615,7 @@ typedef struct {
uint16_t UclkAverageLpfTau;
uint16_t GfxActivityLpfTau;
uint16_t UclkActivityLpfTau;
+ uint16_t SocketPowerLpfTau;
uint32_t MmHubPadding[8];
@@ -665,7 +666,8 @@ typedef struct {
uint32_t ThrottlerStatus ;
uint8_t LinkDpmLevel;
- uint8_t Padding[3];
+ uint16_t AverageSocketPower;
+ uint8_t Padding;
uint32_t MmHubPadding[7];
diff --git a/drivers/gpu/drm/amd/powerplay/navi10_ppt.c b/drivers/gpu/drm/amd/powerplay/navi10_ppt.c
index 880fe0930d9e..2dae0ae0829e 100644
--- a/drivers/gpu/drm/amd/powerplay/navi10_ppt.c
+++ b/drivers/gpu/drm/amd/powerplay/navi10_ppt.c
@@ -331,7 +331,10 @@ navi10_get_allowed_feature_mask(struct smu_context *smu,
| FEATURE_MASK(FEATURE_DS_DCEFCLK_BIT)
| FEATURE_MASK(FEATURE_FW_DSTATE_BIT)
| FEATURE_MASK(FEATURE_BACO_BIT)
- | FEATURE_MASK(FEATURE_ACDC_BIT);
+ | FEATURE_MASK(FEATURE_ACDC_BIT)
+ | FEATURE_MASK(FEATURE_GFX_SS_BIT)
+ | FEATURE_MASK(FEATURE_APCC_DFLL_BIT)
+ | FEATURE_MASK(FEATURE_FW_CTF_BIT);
if (adev->pm.pp_feature & PP_MCLK_DPM_MASK)
*(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_DPM_UCLK_BIT)
@@ -339,8 +342,7 @@ navi10_get_allowed_feature_mask(struct smu_context *smu,
| FEATURE_MASK(FEATURE_MEM_MVDD_SCALING_BIT);
if (adev->pm.pp_feature & PP_GFXOFF_MASK) {
- *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_GFX_SS_BIT)
- | FEATURE_MASK(FEATURE_GFXOFF_BIT);
+ *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_GFXOFF_BIT);
/* TODO: remove it once fw fix the bug */
*(uint64_t *)feature_mask &= ~FEATURE_MASK(FEATURE_FW_DSTATE_BIT);
}
@@ -465,9 +467,6 @@ static int navi10_append_powerplay_table(struct smu_context *smu)
smc_pptable->MvddRatio = smc_dpm_table->MvddRatio;
if (adev->pm.pp_feature & PP_GFXOFF_MASK) {
- *(uint64_t *)smc_pptable->FeaturesToRun |= FEATURE_MASK(FEATURE_GFX_SS_BIT)
- | FEATURE_MASK(FEATURE_GFXOFF_BIT);
-
/* TODO: remove it once SMU fw fix it */
smc_pptable->DebugOverrides |= DPM_OVERRIDE_DISABLE_DFLL_PLL_SHUTDOWN;
}
@@ -614,7 +613,7 @@ static int navi10_get_current_clk_freq_by_table(struct smu_context *smu,
memset(&metrics, 0, sizeof(metrics));
- ret = smu_update_table(smu, SMU_TABLE_SMU_METRICS, (void *)&metrics, false);
+ ret = smu_update_table(smu, SMU_TABLE_SMU_METRICS, 0, (void *)&metrics, false);
if (ret)
return ret;
@@ -709,7 +708,7 @@ static int navi10_force_clk_levels(struct smu_context *smu,
static int navi10_populate_umd_state_clk(struct smu_context *smu)
{
int ret = 0;
- uint32_t min_sclk_freq = 0;
+ uint32_t min_sclk_freq = 0, min_mclk_freq = 0;
ret = smu_get_dpm_freq_range(smu, SMU_SCLK, &min_sclk_freq, NULL);
if (ret)
@@ -717,6 +716,12 @@ static int navi10_populate_umd_state_clk(struct smu_context *smu)
smu->pstate_sclk = min_sclk_freq * 100;
+ ret = smu_get_dpm_freq_range(smu, SMU_MCLK, &min_mclk_freq, NULL);
+ if (ret)
+ return ret;
+
+ smu->pstate_mclk = min_mclk_freq * 100;
+
return ret;
}
@@ -827,27 +832,20 @@ static int navi10_force_dpm_limit_value(struct smu_context *smu, bool highest)
return ret;
}
-static int navi10_unforce_dpm_levels(struct smu_context *smu) {
-
+static int navi10_unforce_dpm_levels(struct smu_context *smu)
+{
int ret = 0, i = 0;
uint32_t min_freq, max_freq;
enum smu_clk_type clk_type;
- struct clk_feature_map {
- enum smu_clk_type clk_type;
- uint32_t feature;
- } clk_feature_map[] = {
- {SMU_GFXCLK, SMU_FEATURE_DPM_GFXCLK_BIT},
- {SMU_MCLK, SMU_FEATURE_DPM_UCLK_BIT},
- {SMU_SOCCLK, SMU_FEATURE_DPM_SOCCLK_BIT},
+ enum smu_clk_type clks[] = {
+ SMU_GFXCLK,
+ SMU_MCLK,
+ SMU_SOCCLK,
};
- for (i = 0; i < ARRAY_SIZE(clk_feature_map); i++) {
- if (!smu_feature_is_enabled(smu, clk_feature_map[i].feature))
- continue;
-
- clk_type = clk_feature_map[i].clk_type;
-
+ for (i = 0; i < ARRAY_SIZE(clks); i++) {
+ clk_type = clks[i];
ret = smu_get_dpm_freq_range(smu, clk_type, &min_freq, &max_freq);
if (ret)
return ret;
@@ -868,7 +866,7 @@ static int navi10_get_gpu_power(struct smu_context *smu, uint32_t *value)
if (!value)
return -EINVAL;
- ret = smu_update_table(smu, SMU_TABLE_SMU_METRICS, (void *)&metrics,
+ ret = smu_update_table(smu, SMU_TABLE_SMU_METRICS, 0, (void *)&metrics,
false);
if (ret)
return ret;
@@ -890,7 +888,7 @@ static int navi10_get_current_activity_percent(struct smu_context *smu,
msleep(1);
- ret = smu_update_table(smu, SMU_TABLE_SMU_METRICS,
+ ret = smu_update_table(smu, SMU_TABLE_SMU_METRICS, 0,
(void *)&metrics, false);
if (ret)
return ret;
@@ -931,7 +929,7 @@ static int navi10_get_fan_speed(struct smu_context *smu, uint16_t *value)
memset(&metrics, 0, sizeof(metrics));
- ret = smu_update_table(smu, SMU_TABLE_SMU_METRICS,
+ ret = smu_update_table(smu, SMU_TABLE_SMU_METRICS, 0,
(void *)&metrics, false);
if (ret)
return ret;
@@ -997,7 +995,7 @@ static int navi10_get_power_profile_mode(struct smu_context *smu, char *buf)
/* conv PP_SMC_POWER_PROFILE* to WORKLOAD_PPLIB_*_BIT */
workload_type = smu_workload_get_type(smu, i);
result = smu_update_table(smu,
- SMU_TABLE_ACTIVITY_MONITOR_COEFF | workload_type << 16,
+ SMU_TABLE_ACTIVITY_MONITOR_COEFF, workload_type,
(void *)(&activity_monitor), false);
if (result) {
pr_err("[%s] Failed to get activity monitor!", __func__);
@@ -1070,7 +1068,7 @@ static int navi10_set_power_profile_mode(struct smu_context *smu, long *input, u
return -EINVAL;
ret = smu_update_table(smu,
- SMU_TABLE_ACTIVITY_MONITOR_COEFF | WORKLOAD_PPLIB_CUSTOM_BIT << 16,
+ SMU_TABLE_ACTIVITY_MONITOR_COEFF, WORKLOAD_PPLIB_CUSTOM_BIT,
(void *)(&activity_monitor), false);
if (ret) {
pr_err("[%s] Failed to get activity monitor!", __func__);
@@ -1114,7 +1112,7 @@ static int navi10_set_power_profile_mode(struct smu_context *smu, long *input, u
}
ret = smu_update_table(smu,
- SMU_TABLE_ACTIVITY_MONITOR_COEFF | WORKLOAD_PPLIB_CUSTOM_BIT << 16,
+ SMU_TABLE_ACTIVITY_MONITOR_COEFF, WORKLOAD_PPLIB_CUSTOM_BIT,
(void *)(&activity_monitor), true);
if (ret) {
pr_err("[%s] Failed to set activity monitor!", __func__);
@@ -1157,14 +1155,14 @@ static int navi10_get_profiling_clk_mask(struct smu_context *smu,
ret = smu_get_dpm_level_count(smu, SMU_MCLK, &level_count);
if (ret)
return ret;
- *sclk_mask = level_count - 1;
+ *mclk_mask = level_count - 1;
}
if(soc_mask) {
ret = smu_get_dpm_level_count(smu, SMU_SOCCLK, &level_count);
if (ret)
return ret;
- *sclk_mask = level_count - 1;
+ *soc_mask = level_count - 1;
}
}
@@ -1280,7 +1278,7 @@ static int navi10_thermal_get_temperature(struct smu_context *smu,
if (!value)
return -EINVAL;
- ret = smu_update_table(smu, SMU_TABLE_SMU_METRICS, (void *)&metrics, false);
+ ret = smu_update_table(smu, SMU_TABLE_SMU_METRICS, 0, (void *)&metrics, false);
if (ret)
return ret;
diff --git a/drivers/gpu/drm/amd/powerplay/smu_v11_0.c b/drivers/gpu/drm/amd/powerplay/smu_v11_0.c
index a87b86ae2cc5..95c7c4dae523 100644
--- a/drivers/gpu/drm/amd/powerplay/smu_v11_0.c
+++ b/drivers/gpu/drm/amd/powerplay/smu_v11_0.c
@@ -261,14 +261,20 @@ static int smu_v11_0_check_fw_version(struct smu_context *smu)
smu_minor = (smu_version >> 8) & 0xff;
smu_debug = (smu_version >> 0) & 0xff;
-
+ /*
+ * 1. if_version mismatch is not critical as our fw is designed
+ * to be backward compatible.
+ * 2. New fw usually brings some optimizations. But that's visible
+ * only on the paired driver.
+ * Considering above, we just leave user a warning message instead
+ * of halt driver loading.
+ */
if (if_version != smu->smc_if_version) {
pr_info("smu driver if version = 0x%08x, smu fw if version = 0x%08x, "
"smu fw version = 0x%08x (%d.%d.%d)\n",
smu->smc_if_version, if_version,
smu_version, smu_major, smu_minor, smu_debug);
- pr_err("SMU driver if version not matched\n");
- ret = -EINVAL;
+ pr_warn("SMU driver if version not matched\n");
}
return ret;
@@ -703,7 +709,7 @@ static int smu_v11_0_write_pptable(struct smu_context *smu)
struct smu_table_context *table_context = &smu->smu_table;
int ret = 0;
- ret = smu_update_table(smu, SMU_TABLE_PPTABLE,
+ ret = smu_update_table(smu, SMU_TABLE_PPTABLE, 0,
table_context->driver_pptable, true);
return ret;
@@ -722,7 +728,7 @@ static int smu_v11_0_write_watermarks_table(struct smu_context *smu)
if (!table->cpu_addr)
return -EINVAL;
- ret = smu_update_table(smu, SMU_TABLE_WATERMARKS, table->cpu_addr,
+ ret = smu_update_table(smu, SMU_TABLE_WATERMARKS, 0, table->cpu_addr,
true);
return ret;
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.c
index 6c81cb91ebae..15590fd86ef4 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.c
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.c
@@ -2705,8 +2705,6 @@ static int ci_initialize_mc_reg_table(struct pp_hwmgr *hwmgr)
cgs_write_register(hwmgr->device, mmMC_SEQ_PMG_CMD_MRS2_LP, cgs_read_register(hwmgr->device, mmMC_PMG_CMD_MRS2));
cgs_write_register(hwmgr->device, mmMC_SEQ_WR_CTL_2_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_WR_CTL_2));
- memset(table, 0x00, sizeof(pp_atomctrl_mc_reg_table));
-
result = atomctrl_initialize_mc_reg_table(hwmgr, module_index, table);
if (0 == result)
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/iceland_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/iceland_smumgr.c
index 9e0dd56fe7c5..732005c03a82 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/iceland_smumgr.c
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/iceland_smumgr.c
@@ -2634,8 +2634,6 @@ static int iceland_initialize_mc_reg_table(struct pp_hwmgr *hwmgr)
cgs_write_register(hwmgr->device, mmMC_SEQ_PMG_CMD_MRS2_LP, cgs_read_register(hwmgr->device, mmMC_PMG_CMD_MRS2));
cgs_write_register(hwmgr->device, mmMC_SEQ_WR_CTL_2_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_WR_CTL_2));
- memset(table, 0x00, sizeof(pp_atomctrl_mc_reg_table));
-
result = atomctrl_initialize_mc_reg_table(hwmgr, module_index, table);
if (0 == result)
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c
index ba3394303b9c..f19bac7ef7ba 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c
@@ -3117,8 +3117,6 @@ static int tonga_initialize_mc_reg_table(struct pp_hwmgr *hwmgr)
cgs_write_register(hwmgr->device, mmMC_SEQ_WR_CTL_2_LP,
cgs_read_register(hwmgr->device, mmMC_SEQ_WR_CTL_2));
- memset(table, 0x00, sizeof(pp_atomctrl_mc_reg_table));
-
result = atomctrl_initialize_mc_reg_table(hwmgr, module_index, table);
if (!result)
diff --git a/drivers/gpu/drm/amd/powerplay/vega20_ppt.c b/drivers/gpu/drm/amd/powerplay/vega20_ppt.c
index a76a22a18eb4..bb9bb09cfc7a 100644
--- a/drivers/gpu/drm/amd/powerplay/vega20_ppt.c
+++ b/drivers/gpu/drm/amd/powerplay/vega20_ppt.c
@@ -319,7 +319,7 @@ static int vega20_tables_init(struct smu_context *smu, struct smu_table *tables)
AMDGPU_GEM_DOMAIN_VRAM);
smu_table->metrics_table = kzalloc(sizeof(SmuMetrics_t), GFP_KERNEL);
- if (smu_table->metrics_table)
+ if (!smu_table->metrics_table)
return -ENOMEM;
smu_table->metrics_time = 0;
@@ -441,7 +441,6 @@ static int vega20_store_powerplay_table(struct smu_context *smu)
{
ATOM_Vega20_POWERPLAYTABLE *powerplay_table = NULL;
struct smu_table_context *table_context = &smu->smu_table;
- int ret;
if (!table_context->power_play_table)
return -EINVAL;
@@ -455,9 +454,7 @@ static int vega20_store_powerplay_table(struct smu_context *smu)
table_context->thermal_controller_type = powerplay_table->ucThermalControllerType;
table_context->TDPODLimit = le32_to_cpu(powerplay_table->OverDrive8Table.ODSettingsMax[ATOM_VEGA20_ODSETTING_POWERPERCENTAGE]);
- ret = vega20_setup_od8_information(smu);
-
- return ret;
+ return 0;
}
static int vega20_append_powerplay_table(struct smu_context *smu)
@@ -992,7 +989,7 @@ static int vega20_print_clk_levels(struct smu_context *smu,
break;
case SMU_SOCCLK:
- ret = smu_get_current_clk_freq(smu, PPCLK_SOCCLK, &now);
+ ret = smu_get_current_clk_freq(smu, SMU_SOCCLK, &now);
if (ret) {
pr_err("Attempt to get current socclk Failed!");
return ret;
@@ -1013,7 +1010,7 @@ static int vega20_print_clk_levels(struct smu_context *smu,
break;
case SMU_FCLK:
- ret = smu_get_current_clk_freq(smu, PPCLK_FCLK, &now);
+ ret = smu_get_current_clk_freq(smu, SMU_FCLK, &now);
if (ret) {
pr_err("Attempt to get current fclk Failed!");
return ret;
@@ -1028,7 +1025,7 @@ static int vega20_print_clk_levels(struct smu_context *smu,
break;
case SMU_DCEFCLK:
- ret = smu_get_current_clk_freq(smu, PPCLK_DCEFCLK, &now);
+ ret = smu_get_current_clk_freq(smu, SMU_DCEFCLK, &now);
if (ret) {
pr_err("Attempt to get current dcefclk Failed!");
return ret;
@@ -1502,11 +1499,17 @@ static int vega20_set_default_od8_setttings(struct smu_context *smu)
od8_settings = kzalloc(sizeof(struct vega20_od8_settings), GFP_KERNEL);
- if (od8_settings)
+ if (!od8_settings)
return -ENOMEM;
smu->od_settings = (void *)od8_settings;
+ ret = vega20_setup_od8_information(smu);
+ if (ret) {
+ pr_err("Retrieve board OD limits failed!\n");
+ return ret;
+ }
+
if (smu_feature_is_enabled(smu, SMU_FEATURE_DPM_SOCCLK_BIT)) {
if (od8_settings->od_feature_capabilities[ATOM_VEGA20_ODFEATURE_GFXCLK_LIMITS] &&
od8_settings->od_settings_max[OD8_SETTING_GFXCLK_FMAX] > 0 &&
@@ -1677,7 +1680,7 @@ static int vega20_get_metrics_table(struct smu_context *smu,
int ret = 0;
if (!smu_table->metrics_time || time_after(jiffies, smu_table->metrics_time + HZ / 1000)) {
- ret = smu_update_table(smu, SMU_TABLE_SMU_METRICS,
+ ret = smu_update_table(smu, SMU_TABLE_SMU_METRICS, 0,
(void *)smu_table->metrics_table, false);
if (ret) {
pr_info("Failed to export SMU metrics table!\n");
@@ -1706,7 +1709,7 @@ static int vega20_set_default_od_settings(struct smu_context *smu,
if (!table_context->overdrive_table)
return -ENOMEM;
- ret = smu_update_table(smu, SMU_TABLE_OVERDRIVE,
+ ret = smu_update_table(smu, SMU_TABLE_OVERDRIVE, 0,
table_context->overdrive_table, false);
if (ret) {
pr_err("Failed to export over drive table!\n");
@@ -1718,7 +1721,7 @@ static int vega20_set_default_od_settings(struct smu_context *smu,
return ret;
}
- ret = smu_update_table(smu, SMU_TABLE_OVERDRIVE,
+ ret = smu_update_table(smu, SMU_TABLE_OVERDRIVE, 0,
table_context->overdrive_table, true);
if (ret) {
pr_err("Failed to import over drive table!\n");
@@ -1802,7 +1805,7 @@ static int vega20_get_power_profile_mode(struct smu_context *smu, char *buf)
/* conv PP_SMC_POWER_PROFILE* to WORKLOAD_PPLIB_*_BIT */
workload_type = smu_workload_get_type(smu, i);
result = smu_update_table(smu,
- SMU_TABLE_ACTIVITY_MONITOR_COEFF | workload_type << 16,
+ SMU_TABLE_ACTIVITY_MONITOR_COEFF, workload_type,
(void *)(&activity_monitor), false);
if (result) {
pr_err("[%s] Failed to get activity monitor!", __func__);
@@ -1888,7 +1891,7 @@ static int vega20_set_power_profile_mode(struct smu_context *smu, long *input, u
if (smu->power_profile_mode == PP_SMC_POWER_PROFILE_CUSTOM) {
ret = smu_update_table(smu,
- SMU_TABLE_ACTIVITY_MONITOR_COEFF | WORKLOAD_PPLIB_CUSTOM_BIT << 16,
+ SMU_TABLE_ACTIVITY_MONITOR_COEFF, WORKLOAD_PPLIB_CUSTOM_BIT,
(void *)(&activity_monitor), false);
if (ret) {
pr_err("[%s] Failed to get activity monitor!", __func__);
@@ -1943,7 +1946,7 @@ static int vega20_set_power_profile_mode(struct smu_context *smu, long *input, u
}
ret = smu_update_table(smu,
- SMU_TABLE_ACTIVITY_MONITOR_COEFF | WORKLOAD_PPLIB_CUSTOM_BIT << 16,
+ SMU_TABLE_ACTIVITY_MONITOR_COEFF, WORKLOAD_PPLIB_CUSTOM_BIT,
(void *)(&activity_monitor), true);
if (ret) {
pr_err("[%s] Failed to set activity monitor!", __func__);
@@ -2492,7 +2495,7 @@ static int vega20_update_od8_settings(struct smu_context *smu,
struct smu_table_context *table_context = &smu->smu_table;
int ret;
- ret = smu_update_table(smu, SMU_TABLE_OVERDRIVE,
+ ret = smu_update_table(smu, SMU_TABLE_OVERDRIVE, 0,
table_context->overdrive_table, false);
if (ret) {
pr_err("Failed to export over drive table!\n");
@@ -2503,7 +2506,7 @@ static int vega20_update_od8_settings(struct smu_context *smu,
if (ret)
return ret;
- ret = smu_update_table(smu, SMU_TABLE_OVERDRIVE,
+ ret = smu_update_table(smu, SMU_TABLE_OVERDRIVE, 0,
table_context->overdrive_table, true);
if (ret) {
pr_err("Failed to import over drive table!\n");
@@ -2767,7 +2770,7 @@ static int vega20_odn_edit_dpm_table(struct smu_context *smu,
break;
case PP_OD_RESTORE_DEFAULT_TABLE:
- ret = smu_update_table(smu, SMU_TABLE_OVERDRIVE, table_context->overdrive_table, false);
+ ret = smu_update_table(smu, SMU_TABLE_OVERDRIVE, 0, table_context->overdrive_table, false);
if (ret) {
pr_err("Failed to export over drive table!\n");
return ret;
@@ -2776,7 +2779,7 @@ static int vega20_odn_edit_dpm_table(struct smu_context *smu,
break;
case PP_OD_COMMIT_DPM_TABLE:
- ret = smu_update_table(smu, SMU_TABLE_OVERDRIVE, table_context->overdrive_table, true);
+ ret = smu_update_table(smu, SMU_TABLE_OVERDRIVE, 0, table_context->overdrive_table, true);
if (ret) {
pr_err("Failed to import over drive table!\n");
return ret;
diff --git a/drivers/gpu/drm/arm/display/komeda/komeda_crtc.c b/drivers/gpu/drm/arm/display/komeda/komeda_crtc.c
index 3f222f464eb2..f4400788ab94 100644
--- a/drivers/gpu/drm/arm/display/komeda/komeda_crtc.c
+++ b/drivers/gpu/drm/arm/display/komeda/komeda_crtc.c
@@ -454,24 +454,6 @@ static void komeda_crtc_vblank_disable(struct drm_crtc *crtc)
mdev->funcs->on_off_vblank(mdev, kcrtc->master->id, false);
}
-static int
-komeda_crtc_atomic_get_property(struct drm_crtc *crtc,
- const struct drm_crtc_state *state,
- struct drm_property *property, uint64_t *val)
-{
- struct komeda_crtc *kcrtc = to_kcrtc(crtc);
- struct komeda_crtc_state *kcrtc_st = to_kcrtc_st(state);
-
- if (property == kcrtc->clock_ratio_property) {
- *val = kcrtc_st->clock_ratio;
- } else {
- DRM_DEBUG_DRIVER("Unknown property %s\n", property->name);
- return -EINVAL;
- }
-
- return 0;
-}
-
static const struct drm_crtc_funcs komeda_crtc_funcs = {
.gamma_set = drm_atomic_helper_legacy_gamma_set,
.destroy = drm_crtc_cleanup,
@@ -482,7 +464,6 @@ static const struct drm_crtc_funcs komeda_crtc_funcs = {
.atomic_destroy_state = komeda_crtc_atomic_destroy_state,
.enable_vblank = komeda_crtc_vblank_enable,
.disable_vblank = komeda_crtc_vblank_disable,
- .atomic_get_property = komeda_crtc_atomic_get_property,
};
int komeda_kms_setup_crtcs(struct komeda_kms_dev *kms,
@@ -518,42 +499,6 @@ int komeda_kms_setup_crtcs(struct komeda_kms_dev *kms,
return 0;
}
-static int komeda_crtc_create_clock_ratio_property(struct komeda_crtc *kcrtc)
-{
- struct drm_crtc *crtc = &kcrtc->base;
- struct drm_property *prop;
-
- prop = drm_property_create_range(crtc->dev, DRM_MODE_PROP_ATOMIC,
- "CLOCK_RATIO", 0, U64_MAX);
- if (!prop)
- return -ENOMEM;
-
- drm_object_attach_property(&crtc->base, prop, 0);
- kcrtc->clock_ratio_property = prop;
-
- return 0;
-}
-
-static int komeda_crtc_create_slave_planes_property(struct komeda_crtc *kcrtc)
-{
- struct drm_crtc *crtc = &kcrtc->base;
- struct drm_property *prop;
-
- if (kcrtc->slave_planes == 0)
- return 0;
-
- prop = drm_property_create_range(crtc->dev, DRM_MODE_PROP_IMMUTABLE,
- "slave_planes", 0, U32_MAX);
- if (!prop)
- return -ENOMEM;
-
- drm_object_attach_property(&crtc->base, prop, kcrtc->slave_planes);
-
- kcrtc->slave_planes_property = prop;
-
- return 0;
-}
-
static struct drm_plane *
get_crtc_primary(struct komeda_kms_dev *kms, struct komeda_crtc *crtc)
{
@@ -590,14 +535,6 @@ static int komeda_crtc_add(struct komeda_kms_dev *kms,
crtc->port = kcrtc->master->of_output_port;
- err = komeda_crtc_create_clock_ratio_property(kcrtc);
- if (err)
- return err;
-
- err = komeda_crtc_create_slave_planes_property(kcrtc);
- if (err)
- return err;
-
return err;
}
diff --git a/drivers/gpu/drm/arm/display/komeda/komeda_kms.h b/drivers/gpu/drm/arm/display/komeda/komeda_kms.h
index 219fa3f0c336..8c89fc245b83 100644
--- a/drivers/gpu/drm/arm/display/komeda/komeda_kms.h
+++ b/drivers/gpu/drm/arm/display/komeda/komeda_kms.h
@@ -33,11 +33,6 @@ struct komeda_plane {
* Layers with same capabilities.
*/
struct komeda_layer *layer;
-
- /** @prop_img_enhancement: for on/off image enhancement */
- struct drm_property *prop_img_enhancement;
- /** @prop_layer_split: for on/off layer_split */
- struct drm_property *prop_layer_split;
};
/**
@@ -52,11 +47,8 @@ struct komeda_plane_state {
/** @zlist_node: zorder list node */
struct list_head zlist_node;
- /* @img_enhancement: on/off image enhancement
- * @layer_split: on/off layer_split
- */
- u8 img_enhancement : 1,
- layer_split : 1;
+ /** @layer_split: on/off layer_split */
+ u8 layer_split : 1;
};
/**
@@ -94,12 +86,6 @@ struct komeda_crtc {
/** @disable_done: this flip_done is for tracing the disable */
struct completion *disable_done;
-
- /** @clock_ratio_property: property for ratio of (aclk << 32)/pxlclk */
- struct drm_property *clock_ratio_property;
-
- /** @slave_planes_property: property for slaves of the planes */
- struct drm_property *slave_planes_property;
};
/**
diff --git a/drivers/gpu/drm/arm/display/komeda/komeda_pipeline.h b/drivers/gpu/drm/arm/display/komeda/komeda_pipeline.h
index fc1b8613385e..a90bcbb3cb23 100644
--- a/drivers/gpu/drm/arm/display/komeda/komeda_pipeline.h
+++ b/drivers/gpu/drm/arm/display/komeda/komeda_pipeline.h
@@ -537,7 +537,8 @@ void komeda_pipeline_disable(struct komeda_pipeline *pipe,
void komeda_pipeline_update(struct komeda_pipeline *pipe,
struct drm_atomic_state *old_state);
-void komeda_complete_data_flow_cfg(struct komeda_data_flow_cfg *dflow,
+void komeda_complete_data_flow_cfg(struct komeda_layer *layer,
+ struct komeda_data_flow_cfg *dflow,
struct drm_framebuffer *fb);
#endif /* _KOMEDA_PIPELINE_H_*/
diff --git a/drivers/gpu/drm/arm/display/komeda/komeda_pipeline_state.c b/drivers/gpu/drm/arm/display/komeda/komeda_pipeline_state.c
index 2b415ef2b7d3..950235af1e79 100644
--- a/drivers/gpu/drm/arm/display/komeda/komeda_pipeline_state.c
+++ b/drivers/gpu/drm/arm/display/komeda/komeda_pipeline_state.c
@@ -784,9 +784,11 @@ komeda_timing_ctrlr_validate(struct komeda_timing_ctrlr *ctrlr,
return 0;
}
-void komeda_complete_data_flow_cfg(struct komeda_data_flow_cfg *dflow,
+void komeda_complete_data_flow_cfg(struct komeda_layer *layer,
+ struct komeda_data_flow_cfg *dflow,
struct drm_framebuffer *fb)
{
+ struct komeda_scaler *scaler = layer->base.pipeline->scalers[0];
u32 w = dflow->in_w;
u32 h = dflow->in_h;
@@ -803,6 +805,17 @@ void komeda_complete_data_flow_cfg(struct komeda_data_flow_cfg *dflow,
dflow->en_scaling = (w != dflow->out_w) || (h != dflow->out_h);
dflow->is_yuv = fb->format->is_yuv;
+
+ /* try to enable image enhancer if data flow is a 2x+ upscaling */
+ dflow->en_img_enhancement = dflow->out_w >= 2 * w ||
+ dflow->out_h >= 2 * h;
+
+ /* try to enable split if scaling exceed the scaler's acceptable
+ * input/output range.
+ */
+ if (dflow->en_scaling && scaler)
+ dflow->en_split = !in_range(&scaler->hsize, dflow->in_w) ||
+ !in_range(&scaler->hsize, dflow->out_w);
}
static bool merger_is_available(struct komeda_pipeline *pipe,
diff --git a/drivers/gpu/drm/arm/display/komeda/komeda_plane.c b/drivers/gpu/drm/arm/display/komeda/komeda_plane.c
index 04b122f28fb6..c095af154216 100644
--- a/drivers/gpu/drm/arm/display/komeda/komeda_plane.c
+++ b/drivers/gpu/drm/arm/display/komeda/komeda_plane.c
@@ -18,7 +18,6 @@ komeda_plane_init_data_flow(struct drm_plane_state *st,
struct komeda_data_flow_cfg *dflow)
{
struct komeda_plane *kplane = to_kplane(st->plane);
- struct komeda_plane_state *kplane_st = to_kplane_st(st);
struct drm_framebuffer *fb = st->fb;
const struct komeda_format_caps *caps = to_kfb(fb)->format_caps;
struct komeda_pipeline *pipe = kplane->layer->base.pipeline;
@@ -57,10 +56,7 @@ komeda_plane_init_data_flow(struct drm_plane_state *st,
return -EINVAL;
}
- dflow->en_img_enhancement = !!kplane_st->img_enhancement;
- dflow->en_split = !!kplane_st->layer_split;
-
- komeda_complete_data_flow_cfg(dflow, fb);
+ komeda_complete_data_flow_cfg(kplane->layer, dflow, fb);
return 0;
}
@@ -175,8 +171,6 @@ komeda_plane_atomic_duplicate_state(struct drm_plane *plane)
old = to_kplane_st(plane->state);
- new->img_enhancement = old->img_enhancement;
-
return &new->base;
}
@@ -188,44 +182,6 @@ komeda_plane_atomic_destroy_state(struct drm_plane *plane,
kfree(to_kplane_st(state));
}
-static int
-komeda_plane_atomic_get_property(struct drm_plane *plane,
- const struct drm_plane_state *state,
- struct drm_property *property,
- uint64_t *val)
-{
- struct komeda_plane *kplane = to_kplane(plane);
- struct komeda_plane_state *st = to_kplane_st(state);
-
- if (property == kplane->prop_img_enhancement)
- *val = st->img_enhancement;
- else if (property == kplane->prop_layer_split)
- *val = st->layer_split;
- else
- return -EINVAL;
-
- return 0;
-}
-
-static int
-komeda_plane_atomic_set_property(struct drm_plane *plane,
- struct drm_plane_state *state,
- struct drm_property *property,
- uint64_t val)
-{
- struct komeda_plane *kplane = to_kplane(plane);
- struct komeda_plane_state *st = to_kplane_st(state);
-
- if (property == kplane->prop_img_enhancement)
- st->img_enhancement = !!val;
- else if (property == kplane->prop_layer_split)
- st->layer_split = !!val;
- else
- return -EINVAL;
-
- return 0;
-}
-
static bool
komeda_plane_format_mod_supported(struct drm_plane *plane,
u32 format, u64 modifier)
@@ -245,43 +201,9 @@ static const struct drm_plane_funcs komeda_plane_funcs = {
.reset = komeda_plane_reset,
.atomic_duplicate_state = komeda_plane_atomic_duplicate_state,
.atomic_destroy_state = komeda_plane_atomic_destroy_state,
- .atomic_get_property = komeda_plane_atomic_get_property,
- .atomic_set_property = komeda_plane_atomic_set_property,
.format_mod_supported = komeda_plane_format_mod_supported,
};
-static int
-komeda_plane_create_layer_properties(struct komeda_plane *kplane,
- struct komeda_layer *layer)
-{
- struct drm_device *drm = kplane->base.dev;
- struct drm_plane *plane = &kplane->base;
- struct drm_property *prop = NULL;
-
- /* property: layer image_enhancement */
- if (layer->base.supported_outputs & KOMEDA_PIPELINE_SCALERS) {
- prop = drm_property_create_bool(drm, DRM_MODE_PROP_ATOMIC,
- "img_enhancement");
- if (!prop)
- return -ENOMEM;
-
- drm_object_attach_property(&plane->base, prop, 0);
- kplane->prop_img_enhancement = prop;
- }
-
- /* property: layer split */
- if (layer->right) {
- prop = drm_property_create_bool(drm, DRM_MODE_PROP_ATOMIC,
- "layer_split");
- if (!prop)
- return -ENOMEM;
- kplane->prop_layer_split = prop;
- drm_object_attach_property(&plane->base, prop, 0);
- }
-
- return 0;
-}
-
/* for komeda, which is pipeline can be share between crtcs */
static u32 get_possible_crtcs(struct komeda_kms_dev *kms,
struct komeda_pipeline *pipe)
@@ -375,10 +297,6 @@ static int komeda_plane_add(struct komeda_kms_dev *kms,
if (err)
goto cleanup;
- err = komeda_plane_create_layer_properties(kplane, layer);
- if (err)
- goto cleanup;
-
err = drm_plane_create_color_properties(plane,
BIT(DRM_COLOR_YCBCR_BT601) |
BIT(DRM_COLOR_YCBCR_BT709) |
diff --git a/drivers/gpu/drm/arm/display/komeda/komeda_wb_connector.c b/drivers/gpu/drm/arm/display/komeda/komeda_wb_connector.c
index bb8a61f6e9a4..617e1f7b8472 100644
--- a/drivers/gpu/drm/arm/display/komeda/komeda_wb_connector.c
+++ b/drivers/gpu/drm/arm/display/komeda/komeda_wb_connector.c
@@ -13,7 +13,6 @@ komeda_wb_init_data_flow(struct komeda_layer *wb_layer,
struct komeda_crtc_state *kcrtc_st,
struct komeda_data_flow_cfg *dflow)
{
- struct komeda_scaler *scaler = wb_layer->base.pipeline->scalers[0];
struct drm_framebuffer *fb = conn_st->writeback_job->fb;
memset(dflow, 0, sizeof(*dflow));
@@ -28,14 +27,7 @@ komeda_wb_init_data_flow(struct komeda_layer *wb_layer,
dflow->pixel_blend_mode = DRM_MODE_BLEND_PIXEL_NONE;
dflow->rot = DRM_MODE_ROTATE_0;
- komeda_complete_data_flow_cfg(dflow, fb);
-
- /* if scaling exceed the acceptable scaler input/output range, try to
- * enable split.
- */
- if (dflow->en_scaling && scaler)
- dflow->en_split = !in_range(&scaler->hsize, dflow->in_w) ||
- !in_range(&scaler->hsize, dflow->out_w);
+ komeda_complete_data_flow_cfg(wb_layer, dflow, fb);
return 0;
}
diff --git a/drivers/gpu/drm/bochs/bochs.h b/drivers/gpu/drm/bochs/bochs.h
index cc35d492142c..2a65434500ee 100644
--- a/drivers/gpu/drm/bochs/bochs.h
+++ b/drivers/gpu/drm/bochs/bochs.h
@@ -86,7 +86,7 @@ void bochs_hw_setmode(struct bochs_device *bochs,
void bochs_hw_setformat(struct bochs_device *bochs,
const struct drm_format_info *format);
void bochs_hw_setbase(struct bochs_device *bochs,
- int x, int y, u64 addr);
+ int x, int y, int stride, u64 addr);
int bochs_hw_load_edid(struct bochs_device *bochs);
/* bochs_mm.c */
diff --git a/drivers/gpu/drm/bochs/bochs_hw.c b/drivers/gpu/drm/bochs/bochs_hw.c
index 791ab2f79947..ebfea8744fe6 100644
--- a/drivers/gpu/drm/bochs/bochs_hw.c
+++ b/drivers/gpu/drm/bochs/bochs_hw.c
@@ -255,16 +255,22 @@ void bochs_hw_setformat(struct bochs_device *bochs,
}
void bochs_hw_setbase(struct bochs_device *bochs,
- int x, int y, u64 addr)
+ int x, int y, int stride, u64 addr)
{
- unsigned long offset = (unsigned long)addr +
+ unsigned long offset;
+ unsigned int vx, vy, vwidth;
+
+ bochs->stride = stride;
+ offset = (unsigned long)addr +
y * bochs->stride +
x * (bochs->bpp / 8);
- int vy = offset / bochs->stride;
- int vx = (offset % bochs->stride) * 8 / bochs->bpp;
+ vy = offset / bochs->stride;
+ vx = (offset % bochs->stride) * 8 / bochs->bpp;
+ vwidth = stride * 8 / bochs->bpp;
DRM_DEBUG_DRIVER("x %d, y %d, addr %llx -> offset %lx, vx %d, vy %d\n",
x, y, addr, offset, vx, vy);
+ bochs_dispi_write(bochs, VBE_DISPI_INDEX_VIRT_WIDTH, vwidth);
bochs_dispi_write(bochs, VBE_DISPI_INDEX_X_OFFSET, vx);
bochs_dispi_write(bochs, VBE_DISPI_INDEX_Y_OFFSET, vy);
}
diff --git a/drivers/gpu/drm/bochs/bochs_kms.c b/drivers/gpu/drm/bochs/bochs_kms.c
index 5904eddc83a5..bc19dbd531ef 100644
--- a/drivers/gpu/drm/bochs/bochs_kms.c
+++ b/drivers/gpu/drm/bochs/bochs_kms.c
@@ -36,7 +36,8 @@ static void bochs_plane_update(struct bochs_device *bochs,
bochs_hw_setbase(bochs,
state->crtc_x,
state->crtc_y,
- gbo->bo.offset);
+ state->fb->pitches[0],
+ state->fb->offsets[0] + gbo->bo.offset);
bochs_hw_setformat(bochs, state->fb->format);
}
diff --git a/drivers/gpu/drm/drm_client_modeset.c b/drivers/gpu/drm/drm_client_modeset.c
index e95fceac8f8b..56d36779d213 100644
--- a/drivers/gpu/drm/drm_client_modeset.c
+++ b/drivers/gpu/drm/drm_client_modeset.c
@@ -180,7 +180,8 @@ again:
create_mode:
mode = drm_mode_create_from_cmdline_mode(connector->dev, cmdline_mode);
- list_add(&mode->head, &connector->modes);
+ if (mode)
+ list_add(&mode->head, &connector->modes);
return mode;
}
diff --git a/drivers/gpu/drm/drm_connector.c b/drivers/gpu/drm/drm_connector.c
index 3afed5677946..b3f2cf7eae9c 100644
--- a/drivers/gpu/drm/drm_connector.c
+++ b/drivers/gpu/drm/drm_connector.c
@@ -141,7 +141,7 @@ static void drm_connector_get_cmdline_mode(struct drm_connector *connector)
DRM_DEBUG_KMS("cmdline mode for connector %s %s %dx%d@%dHz%s%s%s\n",
connector->name,
- mode->name ? mode->name : "",
+ mode->name,
mode->xres, mode->yres,
mode->refresh_specified ? mode->refresh : 60,
mode->rb ? " reduced blanking" : "",
diff --git a/drivers/gpu/drm/drm_drv.c b/drivers/gpu/drm/drm_drv.c
index fe0ce86c280f..9d00947ca447 100644
--- a/drivers/gpu/drm/drm_drv.c
+++ b/drivers/gpu/drm/drm_drv.c
@@ -31,6 +31,7 @@
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/mount.h>
+#include <linux/pseudo_fs.h>
#include <linux/slab.h>
#include <linux/srcu.h>
@@ -535,28 +536,15 @@ EXPORT_SYMBOL(drm_dev_unplug);
static int drm_fs_cnt;
static struct vfsmount *drm_fs_mnt;
-static const struct dentry_operations drm_fs_dops = {
- .d_dname = simple_dname,
-};
-
-static const struct super_operations drm_fs_sops = {
- .statfs = simple_statfs,
-};
-
-static struct dentry *drm_fs_mount(struct file_system_type *fs_type, int flags,
- const char *dev_name, void *data)
+static int drm_fs_init_fs_context(struct fs_context *fc)
{
- return mount_pseudo(fs_type,
- "drm:",
- &drm_fs_sops,
- &drm_fs_dops,
- 0x010203ff);
+ return init_pseudo(fc, 0x010203ff) ? 0 : -ENOMEM;
}
static struct file_system_type drm_fs_type = {
.name = "drm",
.owner = THIS_MODULE,
- .mount = drm_fs_mount,
+ .init_fs_context = drm_fs_init_fs_context,
.kill_sb = kill_anon_super,
};
diff --git a/drivers/gpu/drm/drm_modes.c b/drivers/gpu/drm/drm_modes.c
index 57e6408288c8..74a5739df506 100644
--- a/drivers/gpu/drm/drm_modes.c
+++ b/drivers/gpu/drm/drm_modes.c
@@ -158,6 +158,9 @@ struct drm_display_mode *drm_cvt_mode(struct drm_device *dev, int hdisplay,
int interlace;
u64 tmp;
+ if (!hdisplay || !vdisplay)
+ return NULL;
+
/* allocate the drm_display_mode structure. If failure, we will
* return directly
*/
@@ -392,6 +395,9 @@ drm_gtf_mode_complex(struct drm_device *dev, int hdisplay, int vdisplay,
int hsync, hfront_porch, vodd_front_porch_lines;
unsigned int tmp1, tmp2;
+ if (!hdisplay || !vdisplay)
+ return NULL;
+
drm_mode = drm_mode_create(dev);
if (!drm_mode)
return NULL;
@@ -1448,7 +1454,7 @@ static int drm_mode_parse_cmdline_refresh(const char *str, char **end_ptr,
}
static int drm_mode_parse_cmdline_extra(const char *str, int length,
- struct drm_connector *connector,
+ const struct drm_connector *connector,
struct drm_cmdline_mode *mode)
{
int i;
@@ -1493,7 +1499,7 @@ static int drm_mode_parse_cmdline_extra(const char *str, int length,
static int drm_mode_parse_cmdline_res_mode(const char *str, unsigned int length,
bool extras,
- struct drm_connector *connector,
+ const struct drm_connector *connector,
struct drm_cmdline_mode *mode)
{
const char *str_start = str;
@@ -1555,7 +1561,7 @@ static int drm_mode_parse_cmdline_res_mode(const char *str, unsigned int length,
}
static int drm_mode_parse_cmdline_options(char *str, size_t len,
- struct drm_connector *connector,
+ const struct drm_connector *connector,
struct drm_cmdline_mode *mode)
{
unsigned int rotation = 0;
@@ -1689,7 +1695,7 @@ static int drm_mode_parse_cmdline_options(char *str, size_t len,
* True if a valid modeline has been parsed, false otherwise.
*/
bool drm_mode_parse_command_line_for_connector(const char *mode_option,
- struct drm_connector *connector,
+ const struct drm_connector *connector,
struct drm_cmdline_mode *mode)
{
const char *name;
diff --git a/drivers/gpu/drm/drm_panel_orientation_quirks.c b/drivers/gpu/drm/drm_panel_orientation_quirks.c
index d8a0bcd02f34..ffd95bfeaa94 100644
--- a/drivers/gpu/drm/drm_panel_orientation_quirks.c
+++ b/drivers/gpu/drm/drm_panel_orientation_quirks.c
@@ -90,6 +90,12 @@ static const struct drm_dmi_panel_orientation_data itworks_tw891 = {
.orientation = DRM_MODE_PANEL_ORIENTATION_RIGHT_UP,
};
+static const struct drm_dmi_panel_orientation_data lcd720x1280_rightside_up = {
+ .width = 720,
+ .height = 1280,
+ .orientation = DRM_MODE_PANEL_ORIENTATION_RIGHT_UP,
+};
+
static const struct drm_dmi_panel_orientation_data lcd800x1280_rightside_up = {
.width = 800,
.height = 1280,
@@ -123,6 +129,12 @@ static const struct dmi_system_id orientation_data[] = {
DMI_EXACT_MATCH(DMI_BOARD_NAME, "Default string"),
},
.driver_data = (void *)&gpd_micropc,
+ }, { /* GPD MicroPC (later BIOS versions with proper DMI strings) */
+ .matches = {
+ DMI_EXACT_MATCH(DMI_SYS_VENDOR, "GPD"),
+ DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "MicroPC"),
+ },
+ .driver_data = (void *)&lcd720x1280_rightside_up,
}, { /*
* GPD Pocket, note that the the DMI data is less generic then
* it seems, devices with a board-vendor of "AMI Corporation"
diff --git a/drivers/gpu/drm/i915/i915_perf.c b/drivers/gpu/drm/i915/i915_perf.c
index 3d8162d28730..a700c5c3d167 100644
--- a/drivers/gpu/drm/i915/i915_perf.c
+++ b/drivers/gpu/drm/i915/i915_perf.c
@@ -274,8 +274,6 @@
#define POLL_PERIOD (NSEC_PER_SEC / POLL_FREQUENCY)
/* for sysctl proc_dointvec_minmax of dev.i915.perf_stream_paranoid */
-static int zero;
-static int one = 1;
static u32 i915_perf_stream_paranoid = true;
/* The maximum exponent the hardware accepts is 63 (essentially it selects one
@@ -3366,8 +3364,8 @@ static struct ctl_table oa_table[] = {
.maxlen = sizeof(i915_perf_stream_paranoid),
.mode = 0644,
.proc_handler = proc_dointvec_minmax,
- .extra1 = &zero,
- .extra2 = &one,
+ .extra1 = SYSCTL_ZERO,
+ .extra2 = SYSCTL_ONE,
},
{
.procname = "oa_max_sample_rate",
@@ -3375,7 +3373,7 @@ static struct ctl_table oa_table[] = {
.maxlen = sizeof(i915_oa_max_sample_rate),
.mode = 0644,
.proc_handler = proc_dointvec_minmax,
- .extra1 = &zero,
+ .extra1 = SYSCTL_ZERO,
.extra2 = &oa_sample_rate_hard_limit,
},
{}
diff --git a/drivers/gpu/drm/nouveau/Kbuild b/drivers/gpu/drm/nouveau/Kbuild
index b0f53f4f71bf..7a62fa04272d 100644
--- a/drivers/gpu/drm/nouveau/Kbuild
+++ b/drivers/gpu/drm/nouveau/Kbuild
@@ -1,4 +1,4 @@
-# SPDX-License-Identifier: GPL-2.0
+# SPDX-License-Identifier: MIT
ccflags-y += -I $(srctree)/$(src)/include
ccflags-y += -I $(srctree)/$(src)/include/nvkm
ccflags-y += -I $(srctree)/$(src)/nvkm
diff --git a/drivers/gpu/drm/nouveau/dispnv04/Kbuild b/drivers/gpu/drm/nouveau/dispnv04/Kbuild
index 65a3990b4e16..975c4e226936 100644
--- a/drivers/gpu/drm/nouveau/dispnv04/Kbuild
+++ b/drivers/gpu/drm/nouveau/dispnv04/Kbuild
@@ -1,4 +1,4 @@
-# SPDX-License-Identifier: GPL-2.0
+# SPDX-License-Identifier: MIT
nouveau-y += dispnv04/arb.o
nouveau-y += dispnv04/crtc.o
nouveau-y += dispnv04/cursor.o
diff --git a/drivers/gpu/drm/nouveau/dispnv04/cursor.c b/drivers/gpu/drm/nouveau/dispnv04/cursor.c
index ebf860bd59af..16e09f6b9113 100644
--- a/drivers/gpu/drm/nouveau/dispnv04/cursor.c
+++ b/drivers/gpu/drm/nouveau/dispnv04/cursor.c
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0
+// SPDX-License-Identifier: MIT
#include <drm/drmP.h>
#include <drm/drm_mode.h>
#include "nouveau_drv.h"
diff --git a/drivers/gpu/drm/nouveau/dispnv04/disp.h b/drivers/gpu/drm/nouveau/dispnv04/disp.h
index c6ed20a09f4a..6ccfc09bcf0f 100644
--- a/drivers/gpu/drm/nouveau/dispnv04/disp.h
+++ b/drivers/gpu/drm/nouveau/dispnv04/disp.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: GPL-2.0 */
+/* SPDX-License-Identifier: MIT */
#ifndef __NV04_DISPLAY_H__
#define __NV04_DISPLAY_H__
#include <subdev/bios.h>
diff --git a/drivers/gpu/drm/nouveau/dispnv50/Kbuild b/drivers/gpu/drm/nouveau/dispnv50/Kbuild
index 475c630308d1..e0c435eae664 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/Kbuild
+++ b/drivers/gpu/drm/nouveau/dispnv50/Kbuild
@@ -1,4 +1,4 @@
-# SPDX-License-Identifier: GPL-2.0
+# SPDX-License-Identifier: MIT
nouveau-y += dispnv50/disp.o
nouveau-y += dispnv50/lut.o
diff --git a/drivers/gpu/drm/nouveau/dispnv50/disp.c b/drivers/gpu/drm/nouveau/dispnv50/disp.c
index 7ba373f493b2..8497768f1b41 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/disp.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/disp.c
@@ -322,8 +322,13 @@ nv50_outp_atomic_check_view(struct drm_encoder *encoder,
switch (connector->connector_type) {
case DRM_MODE_CONNECTOR_LVDS:
case DRM_MODE_CONNECTOR_eDP:
- /* Force use of scaler for non-EDID modes. */
- if (adjusted_mode->type & DRM_MODE_TYPE_DRIVER)
+ /* Don't force scaler for EDID modes with
+ * same size as the native one (e.g. different
+ * refresh rate)
+ */
+ if (adjusted_mode->hdisplay == native_mode->hdisplay &&
+ adjusted_mode->vdisplay == native_mode->vdisplay &&
+ adjusted_mode->type & DRM_MODE_TYPE_DRIVER)
break;
mode = native_mode;
asyc->scaler.full = true;
diff --git a/drivers/gpu/drm/nouveau/dispnv50/head.c b/drivers/gpu/drm/nouveau/dispnv50/head.c
index 48a6485ec4e0..929d93b1677e 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/head.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/head.c
@@ -169,14 +169,34 @@ nv50_head_atomic_check_view(struct nv50_head_atom *armh,
*/
switch (mode) {
case DRM_MODE_SCALE_CENTER:
- asyh->view.oW = min((u16)umode->hdisplay, asyh->view.oW);
- asyh->view.oH = min((u16)umode_vdisplay, asyh->view.oH);
- /* fall-through */
+ /* NOTE: This will cause scaling when the input is
+ * larger than the output.
+ */
+ asyh->view.oW = min(asyh->view.iW, asyh->view.oW);
+ asyh->view.oH = min(asyh->view.iH, asyh->view.oH);
+ break;
case DRM_MODE_SCALE_ASPECT:
- if (asyh->view.oH < asyh->view.oW) {
+ /* Determine whether the scaling should be on width or on
+ * height. This is done by comparing the aspect ratios of the
+ * sizes. If the output AR is larger than input AR, that means
+ * we want to change the width (letterboxed on the
+ * left/right), otherwise on the height (letterboxed on the
+ * top/bottom).
+ *
+ * E.g. 4:3 (1.333) AR image displayed on a 16:10 (1.6) AR
+ * screen will have letterboxes on the left/right. However a
+ * 16:9 (1.777) AR image on that same screen will have
+ * letterboxes on the top/bottom.
+ *
+ * inputAR = iW / iH; outputAR = oW / oH
+ * outputAR > inputAR is equivalent to oW * iH > iW * oH
+ */
+ if (asyh->view.oW * asyh->view.iH > asyh->view.iW * asyh->view.oH) {
+ /* Recompute output width, i.e. left/right letterbox */
u32 r = (asyh->view.iW << 19) / asyh->view.iH;
asyh->view.oW = ((asyh->view.oH * r) + (r / 2)) >> 19;
} else {
+ /* Recompute output height, i.e. top/bottom letterbox */
u32 r = (asyh->view.iH << 19) / asyh->view.iW;
asyh->view.oH = ((asyh->view.oW * r) + (r / 2)) >> 19;
}
diff --git a/drivers/gpu/drm/nouveau/include/nvif/cl0002.h b/drivers/gpu/drm/nouveau/include/nvif/cl0002.h
index 1a8b45b4631f..65d432a5bd6c 100644
--- a/drivers/gpu/drm/nouveau/include/nvif/cl0002.h
+++ b/drivers/gpu/drm/nouveau/include/nvif/cl0002.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: GPL-2.0 */
+/* SPDX-License-Identifier: MIT */
#ifndef __NVIF_CL0002_H__
#define __NVIF_CL0002_H__
diff --git a/drivers/gpu/drm/nouveau/include/nvif/cl0046.h b/drivers/gpu/drm/nouveau/include/nvif/cl0046.h
index c0d5eba4f8fc..d490d401870a 100644
--- a/drivers/gpu/drm/nouveau/include/nvif/cl0046.h
+++ b/drivers/gpu/drm/nouveau/include/nvif/cl0046.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: GPL-2.0 */
+/* SPDX-License-Identifier: MIT */
#ifndef __NVIF_CL0046_H__
#define __NVIF_CL0046_H__
diff --git a/drivers/gpu/drm/nouveau/include/nvif/cl006b.h b/drivers/gpu/drm/nouveau/include/nvif/cl006b.h
index d0e8f35d9e92..c960c449e430 100644
--- a/drivers/gpu/drm/nouveau/include/nvif/cl006b.h
+++ b/drivers/gpu/drm/nouveau/include/nvif/cl006b.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: GPL-2.0 */
+/* SPDX-License-Identifier: MIT */
#ifndef __NVIF_CL006B_H__
#define __NVIF_CL006B_H__
diff --git a/drivers/gpu/drm/nouveau/include/nvif/cl0080.h b/drivers/gpu/drm/nouveau/include/nvif/cl0080.h
index 4cbed0329367..cd9a2e687bb6 100644
--- a/drivers/gpu/drm/nouveau/include/nvif/cl0080.h
+++ b/drivers/gpu/drm/nouveau/include/nvif/cl0080.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: GPL-2.0 */
+/* SPDX-License-Identifier: MIT */
#ifndef __NVIF_CL0080_H__
#define __NVIF_CL0080_H__
diff --git a/drivers/gpu/drm/nouveau/include/nvif/cl506e.h b/drivers/gpu/drm/nouveau/include/nvif/cl506e.h
index 989690fe3cd8..9df289c7a84f 100644
--- a/drivers/gpu/drm/nouveau/include/nvif/cl506e.h
+++ b/drivers/gpu/drm/nouveau/include/nvif/cl506e.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: GPL-2.0 */
+/* SPDX-License-Identifier: MIT */
#ifndef __NVIF_CL506E_H__
#define __NVIF_CL506E_H__
diff --git a/drivers/gpu/drm/nouveau/include/nvif/cl506f.h b/drivers/gpu/drm/nouveau/include/nvif/cl506f.h
index 5137b6879abd..327c96a994bb 100644
--- a/drivers/gpu/drm/nouveau/include/nvif/cl506f.h
+++ b/drivers/gpu/drm/nouveau/include/nvif/cl506f.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: GPL-2.0 */
+/* SPDX-License-Identifier: MIT */
#ifndef __NVIF_CL506F_H__
#define __NVIF_CL506F_H__
diff --git a/drivers/gpu/drm/nouveau/include/nvif/cl5070.h b/drivers/gpu/drm/nouveau/include/nvif/cl5070.h
index bced81987269..38bf4f38e869 100644
--- a/drivers/gpu/drm/nouveau/include/nvif/cl5070.h
+++ b/drivers/gpu/drm/nouveau/include/nvif/cl5070.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: GPL-2.0 */
+/* SPDX-License-Identifier: MIT */
#ifndef __NVIF_CL5070_H__
#define __NVIF_CL5070_H__
diff --git a/drivers/gpu/drm/nouveau/include/nvif/cl507a.h b/drivers/gpu/drm/nouveau/include/nvif/cl507a.h
index 36e537218596..3b2a9809b8ce 100644
--- a/drivers/gpu/drm/nouveau/include/nvif/cl507a.h
+++ b/drivers/gpu/drm/nouveau/include/nvif/cl507a.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: GPL-2.0 */
+/* SPDX-License-Identifier: MIT */
#ifndef __NVIF_CL507A_H__
#define __NVIF_CL507A_H__
diff --git a/drivers/gpu/drm/nouveau/include/nvif/cl507b.h b/drivers/gpu/drm/nouveau/include/nvif/cl507b.h
index 3e643b752bfc..0f3d05581ea5 100644
--- a/drivers/gpu/drm/nouveau/include/nvif/cl507b.h
+++ b/drivers/gpu/drm/nouveau/include/nvif/cl507b.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: GPL-2.0 */
+/* SPDX-License-Identifier: MIT */
#ifndef __NVIF_CL507B_H__
#define __NVIF_CL507B_H__
diff --git a/drivers/gpu/drm/nouveau/include/nvif/cl507c.h b/drivers/gpu/drm/nouveau/include/nvif/cl507c.h
index fd9e336d0a24..7da8813f4f5c 100644
--- a/drivers/gpu/drm/nouveau/include/nvif/cl507c.h
+++ b/drivers/gpu/drm/nouveau/include/nvif/cl507c.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: GPL-2.0 */
+/* SPDX-License-Identifier: MIT */
#ifndef __NVIF_CL507C_H__
#define __NVIF_CL507C_H__
diff --git a/drivers/gpu/drm/nouveau/include/nvif/cl507d.h b/drivers/gpu/drm/nouveau/include/nvif/cl507d.h
index e994c6894e3e..4a56e42d8bc9 100644
--- a/drivers/gpu/drm/nouveau/include/nvif/cl507d.h
+++ b/drivers/gpu/drm/nouveau/include/nvif/cl507d.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: GPL-2.0 */
+/* SPDX-License-Identifier: MIT */
#ifndef __NVIF_CL507D_H__
#define __NVIF_CL507D_H__
diff --git a/drivers/gpu/drm/nouveau/include/nvif/cl507e.h b/drivers/gpu/drm/nouveau/include/nvif/cl507e.h
index 8082d2fde248..633936cb6313 100644
--- a/drivers/gpu/drm/nouveau/include/nvif/cl507e.h
+++ b/drivers/gpu/drm/nouveau/include/nvif/cl507e.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: GPL-2.0 */
+/* SPDX-License-Identifier: MIT */
#ifndef __NVIF_CL507E_H__
#define __NVIF_CL507E_H__
diff --git a/drivers/gpu/drm/nouveau/include/nvif/cl826e.h b/drivers/gpu/drm/nouveau/include/nvif/cl826e.h
index 1a875090b251..1b6496d31580 100644
--- a/drivers/gpu/drm/nouveau/include/nvif/cl826e.h
+++ b/drivers/gpu/drm/nouveau/include/nvif/cl826e.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: GPL-2.0 */
+/* SPDX-License-Identifier: MIT */
#ifndef __NVIF_CL826E_H__
#define __NVIF_CL826E_H__
diff --git a/drivers/gpu/drm/nouveau/include/nvif/cl826f.h b/drivers/gpu/drm/nouveau/include/nvif/cl826f.h
index e4e50cfe88f1..148602264a76 100644
--- a/drivers/gpu/drm/nouveau/include/nvif/cl826f.h
+++ b/drivers/gpu/drm/nouveau/include/nvif/cl826f.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: GPL-2.0 */
+/* SPDX-License-Identifier: MIT */
#ifndef __NVIF_CL826F_H__
#define __NVIF_CL826F_H__
diff --git a/drivers/gpu/drm/nouveau/include/nvif/cl906f.h b/drivers/gpu/drm/nouveau/include/nvif/cl906f.h
index ab0fa8adb756..3823d6891b55 100644
--- a/drivers/gpu/drm/nouveau/include/nvif/cl906f.h
+++ b/drivers/gpu/drm/nouveau/include/nvif/cl906f.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: GPL-2.0 */
+/* SPDX-License-Identifier: MIT */
#ifndef __NVIF_CL906F_H__
#define __NVIF_CL906F_H__
diff --git a/drivers/gpu/drm/nouveau/include/nvif/cl9097.h b/drivers/gpu/drm/nouveau/include/nvif/cl9097.h
index e4c8de6d00b7..599d858afa36 100644
--- a/drivers/gpu/drm/nouveau/include/nvif/cl9097.h
+++ b/drivers/gpu/drm/nouveau/include/nvif/cl9097.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: GPL-2.0 */
+/* SPDX-License-Identifier: MIT */
#ifndef __NVIF_CL9097_H__
#define __NVIF_CL9097_H__
diff --git a/drivers/gpu/drm/nouveau/include/nvif/cla06f.h b/drivers/gpu/drm/nouveau/include/nvif/cla06f.h
index 81401eb970ea..cfa18f1fbf83 100644
--- a/drivers/gpu/drm/nouveau/include/nvif/cla06f.h
+++ b/drivers/gpu/drm/nouveau/include/nvif/cla06f.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: GPL-2.0 */
+/* SPDX-License-Identifier: MIT */
#ifndef __NVIF_CLA06F_H__
#define __NVIF_CLA06F_H__
diff --git a/drivers/gpu/drm/nouveau/include/nvif/class.h b/drivers/gpu/drm/nouveau/include/nvif/class.h
index 7d556a1c92fa..f704ae600e94 100644
--- a/drivers/gpu/drm/nouveau/include/nvif/class.h
+++ b/drivers/gpu/drm/nouveau/include/nvif/class.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: GPL-2.0 */
+/* SPDX-License-Identifier: MIT */
#ifndef __NVIF_CLASS_H__
#define __NVIF_CLASS_H__
diff --git a/drivers/gpu/drm/nouveau/include/nvif/clc36f.h b/drivers/gpu/drm/nouveau/include/nvif/clc36f.h
index 6b14d7e3f6bb..f66885891238 100644
--- a/drivers/gpu/drm/nouveau/include/nvif/clc36f.h
+++ b/drivers/gpu/drm/nouveau/include/nvif/clc36f.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: GPL-2.0 */
+/* SPDX-License-Identifier: MIT */
#ifndef __NVIF_CLC36F_H__
#define __NVIF_CLC36F_H__
diff --git a/drivers/gpu/drm/nouveau/include/nvif/clc37b.h b/drivers/gpu/drm/nouveau/include/nvif/clc37b.h
index 89b18189d43b..970a5ac4cb95 100644
--- a/drivers/gpu/drm/nouveau/include/nvif/clc37b.h
+++ b/drivers/gpu/drm/nouveau/include/nvif/clc37b.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: GPL-2.0 */
+/* SPDX-License-Identifier: MIT */
#ifndef __NVIF_CLC37B_H__
#define __NVIF_CLC37B_H__
diff --git a/drivers/gpu/drm/nouveau/include/nvif/clc37e.h b/drivers/gpu/drm/nouveau/include/nvif/clc37e.h
index 899db9e915ef..7ea23695e7e1 100644
--- a/drivers/gpu/drm/nouveau/include/nvif/clc37e.h
+++ b/drivers/gpu/drm/nouveau/include/nvif/clc37e.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: GPL-2.0 */
+/* SPDX-License-Identifier: MIT */
#ifndef __NVIF_CLC37E_H__
#define __NVIF_CLC37E_H__
diff --git a/drivers/gpu/drm/nouveau/include/nvif/client.h b/drivers/gpu/drm/nouveau/include/nvif/client.h
index f5df8b30c599..e63c6c965b54 100644
--- a/drivers/gpu/drm/nouveau/include/nvif/client.h
+++ b/drivers/gpu/drm/nouveau/include/nvif/client.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: GPL-2.0 */
+/* SPDX-License-Identifier: MIT */
#ifndef __NVIF_CLIENT_H__
#define __NVIF_CLIENT_H__
diff --git a/drivers/gpu/drm/nouveau/include/nvif/device.h b/drivers/gpu/drm/nouveau/include/nvif/device.h
index ef839bd1d37e..25d969dcf67d 100644
--- a/drivers/gpu/drm/nouveau/include/nvif/device.h
+++ b/drivers/gpu/drm/nouveau/include/nvif/device.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: GPL-2.0 */
+/* SPDX-License-Identifier: MIT */
#ifndef __NVIF_DEVICE_H__
#define __NVIF_DEVICE_H__
diff --git a/drivers/gpu/drm/nouveau/include/nvif/driver.h b/drivers/gpu/drm/nouveau/include/nvif/driver.h
index 93bccd45a042..8e85b936eaa0 100644
--- a/drivers/gpu/drm/nouveau/include/nvif/driver.h
+++ b/drivers/gpu/drm/nouveau/include/nvif/driver.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: GPL-2.0 */
+/* SPDX-License-Identifier: MIT */
#ifndef __NVIF_DRIVER_H__
#define __NVIF_DRIVER_H__
#include <nvif/os.h>
diff --git a/drivers/gpu/drm/nouveau/include/nvif/event.h b/drivers/gpu/drm/nouveau/include/nvif/event.h
index ec5c924f576a..a6b1ee4f10ca 100644
--- a/drivers/gpu/drm/nouveau/include/nvif/event.h
+++ b/drivers/gpu/drm/nouveau/include/nvif/event.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: GPL-2.0 */
+/* SPDX-License-Identifier: MIT */
#ifndef __NVIF_EVENT_H__
#define __NVIF_EVENT_H__
diff --git a/drivers/gpu/drm/nouveau/include/nvif/if0000.h b/drivers/gpu/drm/nouveau/include/nvif/if0000.h
index 30ecd31db5df..f7b8f8f48760 100644
--- a/drivers/gpu/drm/nouveau/include/nvif/if0000.h
+++ b/drivers/gpu/drm/nouveau/include/nvif/if0000.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: GPL-2.0 */
+/* SPDX-License-Identifier: MIT */
#ifndef __NVIF_IF0000_H__
#define __NVIF_IF0000_H__
diff --git a/drivers/gpu/drm/nouveau/include/nvif/if0001.h b/drivers/gpu/drm/nouveau/include/nvif/if0001.h
index ca9215262215..4ced50e98ced 100644
--- a/drivers/gpu/drm/nouveau/include/nvif/if0001.h
+++ b/drivers/gpu/drm/nouveau/include/nvif/if0001.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: GPL-2.0 */
+/* SPDX-License-Identifier: MIT */
#ifndef __NVIF_IF0001_H__
#define __NVIF_IF0001_H__
diff --git a/drivers/gpu/drm/nouveau/include/nvif/if0002.h b/drivers/gpu/drm/nouveau/include/nvif/if0002.h
index d9235c011196..df2915d6a61e 100644
--- a/drivers/gpu/drm/nouveau/include/nvif/if0002.h
+++ b/drivers/gpu/drm/nouveau/include/nvif/if0002.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: GPL-2.0 */
+/* SPDX-License-Identifier: MIT */
#ifndef __NVIF_IF0002_H__
#define __NVIF_IF0002_H__
diff --git a/drivers/gpu/drm/nouveau/include/nvif/if0003.h b/drivers/gpu/drm/nouveau/include/nvif/if0003.h
index ae30b8261b88..78467da07c37 100644
--- a/drivers/gpu/drm/nouveau/include/nvif/if0003.h
+++ b/drivers/gpu/drm/nouveau/include/nvif/if0003.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: GPL-2.0 */
+/* SPDX-License-Identifier: MIT */
#ifndef __NVIF_IF0003_H__
#define __NVIF_IF0003_H__
diff --git a/drivers/gpu/drm/nouveau/include/nvif/if0004.h b/drivers/gpu/drm/nouveau/include/nvif/if0004.h
index b35547c8ea36..d324c73c27fb 100644
--- a/drivers/gpu/drm/nouveau/include/nvif/if0004.h
+++ b/drivers/gpu/drm/nouveau/include/nvif/if0004.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: GPL-2.0 */
+/* SPDX-License-Identifier: MIT */
#ifndef __NVIF_IF0004_H__
#define __NVIF_IF0004_H__
diff --git a/drivers/gpu/drm/nouveau/include/nvif/if0005.h b/drivers/gpu/drm/nouveau/include/nvif/if0005.h
index 8ed0ae101715..fb9305b3b32c 100644
--- a/drivers/gpu/drm/nouveau/include/nvif/if0005.h
+++ b/drivers/gpu/drm/nouveau/include/nvif/if0005.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: GPL-2.0 */
+/* SPDX-License-Identifier: MIT */
#ifndef __NVIF_IF0005_H__
#define __NVIF_IF0005_H__
#define NV10_NVSW_NTFY_UEVENT 0x00
diff --git a/drivers/gpu/drm/nouveau/include/nvif/ioctl.h b/drivers/gpu/drm/nouveau/include/nvif/ioctl.h
index b93d586a2304..886c63fe753f 100644
--- a/drivers/gpu/drm/nouveau/include/nvif/ioctl.h
+++ b/drivers/gpu/drm/nouveau/include/nvif/ioctl.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: GPL-2.0 */
+/* SPDX-License-Identifier: MIT */
#ifndef __NVIF_IOCTL_H__
#define __NVIF_IOCTL_H__
diff --git a/drivers/gpu/drm/nouveau/include/nvif/notify.h b/drivers/gpu/drm/nouveau/include/nvif/notify.h
index 4ed169230657..6863732eb286 100644
--- a/drivers/gpu/drm/nouveau/include/nvif/notify.h
+++ b/drivers/gpu/drm/nouveau/include/nvif/notify.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: GPL-2.0 */
+/* SPDX-License-Identifier: MIT */
#ifndef __NVIF_NOTIFY_H__
#define __NVIF_NOTIFY_H__
diff --git a/drivers/gpu/drm/nouveau/include/nvif/object.h b/drivers/gpu/drm/nouveau/include/nvif/object.h
index 8407651f6ac6..604fabc0e689 100644
--- a/drivers/gpu/drm/nouveau/include/nvif/object.h
+++ b/drivers/gpu/drm/nouveau/include/nvif/object.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: GPL-2.0 */
+/* SPDX-License-Identifier: MIT */
#ifndef __NVIF_OBJECT_H__
#define __NVIF_OBJECT_H__
diff --git a/drivers/gpu/drm/nouveau/include/nvif/os.h b/drivers/gpu/drm/nouveau/include/nvif/os.h
index fd09b2842972..429d0106c123 100644
--- a/drivers/gpu/drm/nouveau/include/nvif/os.h
+++ b/drivers/gpu/drm/nouveau/include/nvif/os.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: GPL-2.0 */
+/* SPDX-License-Identifier: MIT */
#ifndef __NOUVEAU_OS_H__
#define __NOUVEAU_OS_H__
diff --git a/drivers/gpu/drm/nouveau/include/nvif/unpack.h b/drivers/gpu/drm/nouveau/include/nvif/unpack.h
index 7f0d9f6cc1e7..0584b938e8f9 100644
--- a/drivers/gpu/drm/nouveau/include/nvif/unpack.h
+++ b/drivers/gpu/drm/nouveau/include/nvif/unpack.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: GPL-2.0 */
+/* SPDX-License-Identifier: MIT */
#ifndef __NVIF_UNPACK_H__
#define __NVIF_UNPACK_H__
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/core/client.h b/drivers/gpu/drm/nouveau/include/nvkm/core/client.h
index 757fac823a10..5d7017fe5039 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/core/client.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/core/client.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: GPL-2.0 */
+/* SPDX-License-Identifier: MIT */
#ifndef __NVKM_CLIENT_H__
#define __NVKM_CLIENT_H__
#define nvkm_client(p) container_of((p), struct nvkm_client, object)
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/core/debug.h b/drivers/gpu/drm/nouveau/include/nvkm/core/debug.h
index 966d1822dd80..b4a9c7d991ca 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/core/debug.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/core/debug.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: GPL-2.0 */
+/* SPDX-License-Identifier: MIT */
#ifndef __NVKM_DEBUG_H__
#define __NVKM_DEBUG_H__
#define NV_DBG_FATAL 0
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/core/device.h b/drivers/gpu/drm/nouveau/include/nvkm/core/device.h
index 642492344196..6d55cd0476aa 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/core/device.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/core/device.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: GPL-2.0 */
+/* SPDX-License-Identifier: MIT */
#ifndef __NVKM_DEVICE_H__
#define __NVKM_DEVICE_H__
#include <core/oclass.h>
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/core/engine.h b/drivers/gpu/drm/nouveau/include/nvkm/core/engine.h
index 8a2be5b635e2..c6b401a6ea23 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/core/engine.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/core/engine.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: GPL-2.0 */
+/* SPDX-License-Identifier: MIT */
#ifndef __NVKM_ENGINE_H__
#define __NVKM_ENGINE_H__
#define nvkm_engine(p) container_of((p), struct nvkm_engine, subdev)
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/core/enum.h b/drivers/gpu/drm/nouveau/include/nvkm/core/enum.h
index 38acbde2de4f..ce98efd4b209 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/core/enum.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/core/enum.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: GPL-2.0 */
+/* SPDX-License-Identifier: MIT */
#ifndef __NVKM_ENUM_H__
#define __NVKM_ENUM_H__
#include <core/os.h>
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/core/event.h b/drivers/gpu/drm/nouveau/include/nvkm/core/event.h
index d3c45e90a1c1..a7a413f07a78 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/core/event.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/core/event.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: GPL-2.0 */
+/* SPDX-License-Identifier: MIT */
#ifndef __NVKM_EVENT_H__
#define __NVKM_EVENT_H__
#include <core/os.h>
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/core/firmware.h b/drivers/gpu/drm/nouveau/include/nvkm/core/firmware.h
index 54da9c6bc8d5..383370c32428 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/core/firmware.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/core/firmware.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: GPL-2.0 */
+/* SPDX-License-Identifier: MIT */
#ifndef __NVKM_FIRMWARE_H__
#define __NVKM_FIRMWARE_H__
#include <core/subdev.h>
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/core/gpuobj.h b/drivers/gpu/drm/nouveau/include/nvkm/core/gpuobj.h
index 10eeaeebc242..0f515ec28fa9 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/core/gpuobj.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/core/gpuobj.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: GPL-2.0 */
+/* SPDX-License-Identifier: MIT */
#ifndef __NVKM_GPUOBJ_H__
#define __NVKM_GPUOBJ_H__
#include <core/memory.h>
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/core/ioctl.h b/drivers/gpu/drm/nouveau/include/nvkm/core/ioctl.h
index e2d39192fa26..71ed147ad077 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/core/ioctl.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/core/ioctl.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: GPL-2.0 */
+/* SPDX-License-Identifier: MIT */
#ifndef __NVKM_IOCTL_H__
#define __NVKM_IOCTL_H__
#include <core/os.h>
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/core/memory.h b/drivers/gpu/drm/nouveau/include/nvkm/core/memory.h
index f34c80310861..b23bf6109f2d 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/core/memory.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/core/memory.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: GPL-2.0 */
+/* SPDX-License-Identifier: MIT */
#ifndef __NVKM_MEMORY_H__
#define __NVKM_MEMORY_H__
#include <core/os.h>
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/core/mm.h b/drivers/gpu/drm/nouveau/include/nvkm/core/mm.h
index b0726c39429e..4ecfbde88537 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/core/mm.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/core/mm.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: GPL-2.0 */
+/* SPDX-License-Identifier: MIT */
#ifndef __NVKM_MM_H__
#define __NVKM_MM_H__
#include <core/os.h>
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/core/notify.h b/drivers/gpu/drm/nouveau/include/nvkm/core/notify.h
index 4eb82bc563f3..3d358a66db3a 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/core/notify.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/core/notify.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: GPL-2.0 */
+/* SPDX-License-Identifier: MIT */
#ifndef __NVKM_NOTIFY_H__
#define __NVKM_NOTIFY_H__
#include <core/os.h>
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/core/object.h b/drivers/gpu/drm/nouveau/include/nvkm/core/object.h
index 270f893cc154..7efcd5d2f2ff 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/core/object.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/core/object.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: GPL-2.0 */
+/* SPDX-License-Identifier: MIT */
#ifndef __NVKM_OBJECT_H__
#define __NVKM_OBJECT_H__
#include <core/oclass.h>
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/core/oproxy.h b/drivers/gpu/drm/nouveau/include/nvkm/core/oproxy.h
index d950d5ee188b..0e70a9afba33 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/core/oproxy.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/core/oproxy.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: GPL-2.0 */
+/* SPDX-License-Identifier: MIT */
#ifndef __NVKM_OPROXY_H__
#define __NVKM_OPROXY_H__
#define nvkm_oproxy(p) container_of((p), struct nvkm_oproxy, base)
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/core/option.h b/drivers/gpu/drm/nouveau/include/nvkm/core/option.h
index a34a79bacbd0..6882eb7c7e26 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/core/option.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/core/option.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: GPL-2.0 */
+/* SPDX-License-Identifier: MIT */
#ifndef __NVKM_OPTION_H__
#define __NVKM_OPTION_H__
#include <core/os.h>
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/core/os.h b/drivers/gpu/drm/nouveau/include/nvkm/core/os.h
index 445602d1e8d3..029a416197db 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/core/os.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/core/os.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: GPL-2.0 */
+/* SPDX-License-Identifier: MIT */
#ifndef __NVKM_OS_H__
#define __NVKM_OS_H__
#include <nvif/os.h>
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/core/pci.h b/drivers/gpu/drm/nouveau/include/nvkm/core/pci.h
index 4c7f647d2dc9..b4b5df3e1610 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/core/pci.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/core/pci.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: GPL-2.0 */
+/* SPDX-License-Identifier: MIT */
#ifndef __NVKM_DEVICE_PCI_H__
#define __NVKM_DEVICE_PCI_H__
#include <core/device.h>
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/core/ramht.h b/drivers/gpu/drm/nouveau/include/nvkm/core/ramht.h
index d5d789663aca..bc2d1dcccb4e 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/core/ramht.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/core/ramht.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: GPL-2.0 */
+/* SPDX-License-Identifier: MIT */
#ifndef __NVKM_RAMHT_H__
#define __NVKM_RAMHT_H__
#include <core/gpuobj.h>
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/core/subdev.h b/drivers/gpu/drm/nouveau/include/nvkm/core/subdev.h
index 85a0777c2ce4..1218f28c14ba 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/core/subdev.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/core/subdev.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: GPL-2.0 */
+/* SPDX-License-Identifier: MIT */
#ifndef __NVKM_SUBDEV_H__
#define __NVKM_SUBDEV_H__
#include <core/device.h>
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/core/tegra.h b/drivers/gpu/drm/nouveau/include/nvkm/core/tegra.h
index 5c102d0206a7..924009dd2bb0 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/core/tegra.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/core/tegra.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: GPL-2.0 */
+/* SPDX-License-Identifier: MIT */
#ifndef __NVKM_DEVICE_TEGRA_H__
#define __NVKM_DEVICE_TEGRA_H__
#include <core/device.h>
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/engine/bsp.h b/drivers/gpu/drm/nouveau/include/nvkm/engine/bsp.h
index 40613983fccb..f938f024db81 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/engine/bsp.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/engine/bsp.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: GPL-2.0 */
+/* SPDX-License-Identifier: MIT */
#ifndef __NVKM_BSP_H__
#define __NVKM_BSP_H__
#include <engine/xtensa.h>
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/engine/ce.h b/drivers/gpu/drm/nouveau/include/nvkm/engine/ce.h
index 5f3650692e4d..86f420f4630b 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/engine/ce.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/engine/ce.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: GPL-2.0 */
+/* SPDX-License-Identifier: MIT */
#ifndef __NVKM_CE_H__
#define __NVKM_CE_H__
#include <engine/falcon.h>
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/engine/cipher.h b/drivers/gpu/drm/nouveau/include/nvkm/engine/cipher.h
index 72b9da2de7c2..66c5c5e27520 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/engine/cipher.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/engine/cipher.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: GPL-2.0 */
+/* SPDX-License-Identifier: MIT */
#ifndef __NVKM_CIPHER_H__
#define __NVKM_CIPHER_H__
#include <core/engine.h>
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/engine/disp.h b/drivers/gpu/drm/nouveau/include/nvkm/engine/disp.h
index 3026b22d44fb..5a96c942d912 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/engine/disp.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/engine/disp.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: GPL-2.0 */
+/* SPDX-License-Identifier: MIT */
#ifndef __NVKM_DISP_H__
#define __NVKM_DISP_H__
#define nvkm_disp(p) container_of((p), struct nvkm_disp, engine)
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/engine/dma.h b/drivers/gpu/drm/nouveau/include/nvkm/engine/dma.h
index f0c1b2c8c78c..2e12cdb6bb93 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/engine/dma.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/engine/dma.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: GPL-2.0 */
+/* SPDX-License-Identifier: MIT */
#ifndef __NVKM_DMA_H__
#define __NVKM_DMA_H__
#include <core/engine.h>
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/engine/falcon.h b/drivers/gpu/drm/nouveau/include/nvkm/engine/falcon.h
index 6427747b6f77..23b582d696c6 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/engine/falcon.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/engine/falcon.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: GPL-2.0 */
+/* SPDX-License-Identifier: MIT */
#ifndef __NVKM_FALCON_H__
#define __NVKM_FALCON_H__
#define nvkm_falcon(p) container_of((p), struct nvkm_falcon, engine)
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/engine/fifo.h b/drivers/gpu/drm/nouveau/include/nvkm/engine/fifo.h
index b7fc04dd1628..b335f3a1e66d 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/engine/fifo.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/engine/fifo.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: GPL-2.0 */
+/* SPDX-License-Identifier: MIT */
#ifndef __NVKM_FIFO_H__
#define __NVKM_FIFO_H__
#include <core/engine.h>
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/engine/gr.h b/drivers/gpu/drm/nouveau/include/nvkm/engine/gr.h
index 1e924c7f7ba7..2cde36f3c064 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/engine/gr.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/engine/gr.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: GPL-2.0 */
+/* SPDX-License-Identifier: MIT */
#ifndef __NVKM_GR_H__
#define __NVKM_GR_H__
#include <core/engine.h>
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/engine/mpeg.h b/drivers/gpu/drm/nouveau/include/nvkm/engine/mpeg.h
index 4ef3d4c5e358..8585a31f5943 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/engine/mpeg.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/engine/mpeg.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: GPL-2.0 */
+/* SPDX-License-Identifier: MIT */
#ifndef __NVKM_MPEG_H__
#define __NVKM_MPEG_H__
#include <core/engine.h>
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/engine/msenc.h b/drivers/gpu/drm/nouveau/include/nvkm/engine/msenc.h
index 985fc9490643..08fbe7b3cb4b 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/engine/msenc.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/engine/msenc.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: GPL-2.0 */
+/* SPDX-License-Identifier: MIT */
#ifndef __NVKM_MSENC_H__
#define __NVKM_MSENC_H__
#include <core/engine.h>
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/engine/mspdec.h b/drivers/gpu/drm/nouveau/include/nvkm/engine/mspdec.h
index e03f33472486..83bb2fcb2cbf 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/engine/mspdec.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/engine/mspdec.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: GPL-2.0 */
+/* SPDX-License-Identifier: MIT */
#ifndef __NVKM_MSPDEC_H__
#define __NVKM_MSPDEC_H__
#include <engine/falcon.h>
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/engine/msppp.h b/drivers/gpu/drm/nouveau/include/nvkm/engine/msppp.h
index 760bf17ea63d..69e09fd96e0c 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/engine/msppp.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/engine/msppp.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: GPL-2.0 */
+/* SPDX-License-Identifier: MIT */
#ifndef __NVKM_MSPPP_H__
#define __NVKM_MSPPP_H__
#include <engine/falcon.h>
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/engine/msvld.h b/drivers/gpu/drm/nouveau/include/nvkm/engine/msvld.h
index 281866d2501d..9e11cefc9649 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/engine/msvld.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/engine/msvld.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: GPL-2.0 */
+/* SPDX-License-Identifier: MIT */
#ifndef __NVKM_MSVLD_H__
#define __NVKM_MSVLD_H__
#include <engine/falcon.h>
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/engine/nvdec.h b/drivers/gpu/drm/nouveau/include/nvkm/engine/nvdec.h
index b72a4844c5f7..7c7d7f0abfcc 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/engine/nvdec.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/engine/nvdec.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: GPL-2.0 */
+/* SPDX-License-Identifier: MIT */
#ifndef __NVKM_NVDEC_H__
#define __NVKM_NVDEC_H__
#define nvkm_nvdec(p) container_of((p), struct nvkm_nvdec, engine)
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/engine/nvenc.h b/drivers/gpu/drm/nouveau/include/nvkm/engine/nvenc.h
index cdd68a8bab8b..21624046d0a1 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/engine/nvenc.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/engine/nvenc.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: GPL-2.0 */
+/* SPDX-License-Identifier: MIT */
#ifndef __NVKM_NVENC_H__
#define __NVKM_NVENC_H__
#include <core/engine.h>
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/engine/pm.h b/drivers/gpu/drm/nouveau/include/nvkm/engine/pm.h
index 6cce8502f9df..4d754e7650d9 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/engine/pm.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/engine/pm.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: GPL-2.0 */
+/* SPDX-License-Identifier: MIT */
#ifndef __NVKM_PM_H__
#define __NVKM_PM_H__
#include <core/engine.h>
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/engine/sec.h b/drivers/gpu/drm/nouveau/include/nvkm/engine/sec.h
index b206b918c43e..f14e98a8a0ca 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/engine/sec.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/engine/sec.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: GPL-2.0 */
+/* SPDX-License-Identifier: MIT */
#ifndef __NVKM_SEC_H__
#define __NVKM_SEC_H__
#include <engine/falcon.h>
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/engine/sec2.h b/drivers/gpu/drm/nouveau/include/nvkm/engine/sec2.h
index c93ad332461a..33078f86c779 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/engine/sec2.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/engine/sec2.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: GPL-2.0 */
+/* SPDX-License-Identifier: MIT */
#ifndef __NVKM_SEC2_H__
#define __NVKM_SEC2_H__
#include <core/engine.h>
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/engine/sw.h b/drivers/gpu/drm/nouveau/include/nvkm/engine/sw.h
index 83a17c4e11e7..2e91769e3ee2 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/engine/sw.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/engine/sw.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: GPL-2.0 */
+/* SPDX-License-Identifier: MIT */
#ifndef __NVKM_SW_H__
#define __NVKM_SW_H__
#include <core/engine.h>
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/engine/vic.h b/drivers/gpu/drm/nouveau/include/nvkm/engine/vic.h
index 9b7d4877cf41..35555c559eab 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/engine/vic.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/engine/vic.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: GPL-2.0 */
+/* SPDX-License-Identifier: MIT */
#ifndef __NVKM_VIC_H__
#define __NVKM_VIC_H__
#include <core/engine.h>
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/engine/vp.h b/drivers/gpu/drm/nouveau/include/nvkm/engine/vp.h
index 53bf8aed48fb..8984415b2a3d 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/engine/vp.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/engine/vp.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: GPL-2.0 */
+/* SPDX-License-Identifier: MIT */
#ifndef __NVKM_VP_H__
#define __NVKM_VP_H__
#include <engine/xtensa.h>
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/engine/xtensa.h b/drivers/gpu/drm/nouveau/include/nvkm/engine/xtensa.h
index 13c00ce6d556..fbf27b2293a9 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/engine/xtensa.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/engine/xtensa.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: GPL-2.0 */
+/* SPDX-License-Identifier: MIT */
#ifndef __NVKM_XTENSA_H__
#define __NVKM_XTENSA_H__
#define nvkm_xtensa(p) container_of((p), struct nvkm_xtensa, engine)
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/bar.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/bar.h
index da14486317ca..14b09f7e46a5 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/bar.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/bar.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: GPL-2.0 */
+/* SPDX-License-Identifier: MIT */
#ifndef __NVKM_BAR_H__
#define __NVKM_BAR_H__
#include <core/subdev.h>
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios.h
index 979e9a144e7b..f2860f8e0c2e 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: GPL-2.0 */
+/* SPDX-License-Identifier: MIT */
#ifndef __NVKM_BIOS_H__
#define __NVKM_BIOS_H__
#include <core/subdev.h>
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/M0203.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/M0203.h
index 425ccc47e3b7..9227ed640132 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/M0203.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/M0203.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: GPL-2.0 */
+/* SPDX-License-Identifier: MIT */
#ifndef __NVBIOS_M0203_H__
#define __NVBIOS_M0203_H__
struct nvbios_M0203T {
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/M0205.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/M0205.h
index b4e14e45a0e8..7ec1dabc5fe4 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/M0205.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/M0205.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: GPL-2.0 */
+/* SPDX-License-Identifier: MIT */
#ifndef __NVBIOS_M0205_H__
#define __NVBIOS_M0205_H__
struct nvbios_M0205T {
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/M0209.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/M0209.h
index c09376894d12..49a7bb0f3c50 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/M0209.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/M0209.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: GPL-2.0 */
+/* SPDX-License-Identifier: MIT */
#ifndef __NVBIOS_M0209_H__
#define __NVBIOS_M0209_H__
u32 nvbios_M0209Te(struct nvkm_bios *,
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/P0260.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/P0260.h
index 901d94ef11b8..caad7256d9e5 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/P0260.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/P0260.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: GPL-2.0 */
+/* SPDX-License-Identifier: MIT */
#ifndef __NVBIOS_P0260_H__
#define __NVBIOS_P0260_H__
u32 nvbios_P0260Te(struct nvkm_bios *,
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/bit.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/bit.h
index d068586f3263..ebfe45fce965 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/bit.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/bit.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: GPL-2.0 */
+/* SPDX-License-Identifier: MIT */
#ifndef __NVBIOS_BIT_H__
#define __NVBIOS_BIT_H__
struct bit_entry {
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/bmp.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/bmp.h
index 9a3f9483ee75..263408a535ae 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/bmp.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/bmp.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: GPL-2.0 */
+/* SPDX-License-Identifier: MIT */
#ifndef __NVBIOS_BMP_H__
#define __NVBIOS_BMP_H__
static inline u16
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/boost.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/boost.h
index a1c48c6b223b..489fd3554a17 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/boost.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/boost.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: GPL-2.0 */
+/* SPDX-License-Identifier: MIT */
#ifndef __NVBIOS_BOOST_H__
#define __NVBIOS_BOOST_H__
u32 nvbios_boostTe(struct nvkm_bios *, u8 *, u8 *, u8 *, u8 *, u8 *, u8 *);
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/conn.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/conn.h
index 8463b421d345..f5f59261ea81 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/conn.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/conn.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: GPL-2.0 */
+/* SPDX-License-Identifier: MIT */
#ifndef __NVBIOS_CONN_H__
#define __NVBIOS_CONN_H__
enum dcb_connector_type {
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/cstep.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/cstep.h
index 49343d276e11..6a287a016580 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/cstep.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/cstep.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: GPL-2.0 */
+/* SPDX-License-Identifier: MIT */
#ifndef __NVBIOS_CSTEP_H__
#define __NVBIOS_CSTEP_H__
u32 nvbios_cstepTe(struct nvkm_bios *,
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/dcb.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/dcb.h
index 63ddc6ed897a..a27a0f3fe7aa 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/dcb.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/dcb.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: GPL-2.0 */
+/* SPDX-License-Identifier: MIT */
#ifndef __NVBIOS_DCB_H__
#define __NVBIOS_DCB_H__
enum dcb_output_type {
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/disp.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/disp.h
index 423d92de0aae..ef44205a91f6 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/disp.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/disp.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: GPL-2.0 */
+/* SPDX-License-Identifier: MIT */
#ifndef __NVBIOS_DISP_H__
#define __NVBIOS_DISP_H__
u16 nvbios_disp_table(struct nvkm_bios *,
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/dp.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/dp.h
index 512e25a41803..1df5e1618455 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/dp.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/dp.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: GPL-2.0 */
+/* SPDX-License-Identifier: MIT */
#ifndef __NVBIOS_DP_H__
#define __NVBIOS_DP_H__
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/extdev.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/extdev.h
index f93e4f951f2f..f29f2d8da142 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/extdev.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/extdev.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: GPL-2.0 */
+/* SPDX-License-Identifier: MIT */
#ifndef __NVBIOS_EXTDEV_H__
#define __NVBIOS_EXTDEV_H__
enum nvbios_extdev_type {
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/fan.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/fan.h
index 09c1d3b9d009..8b3fb1f5d3ab 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/fan.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/fan.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: GPL-2.0 */
+/* SPDX-License-Identifier: MIT */
#ifndef __NVBIOS_FAN_H__
#define __NVBIOS_FAN_H__
#include <subdev/bios/therm.h>
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/gpio.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/gpio.h
index b71a3555c64e..7c4f00366e71 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/gpio.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/gpio.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: GPL-2.0 */
+/* SPDX-License-Identifier: MIT */
#ifndef __NVBIOS_GPIO_H__
#define __NVBIOS_GPIO_H__
enum dcb_gpio_func_name {
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/i2c.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/i2c.h
index ae1f7483dd28..e84a0eb6df26 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/i2c.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/i2c.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: GPL-2.0 */
+/* SPDX-License-Identifier: MIT */
#ifndef __NVBIOS_I2C_H__
#define __NVBIOS_I2C_H__
enum dcb_i2c_type {
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/iccsense.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/iccsense.h
index e220a1ac1387..4c108fd2c805 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/iccsense.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/iccsense.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: GPL-2.0 */
+/* SPDX-License-Identifier: MIT */
#ifndef __NVBIOS_ICCSENSE_H__
#define __NVBIOS_ICCSENSE_H__
struct pwr_rail_resistor_t {
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/image.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/image.h
index 893288b060de..e13dc059a9ee 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/image.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/image.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: GPL-2.0 */
+/* SPDX-License-Identifier: MIT */
#ifndef __NVBIOS_IMAGE_H__
#define __NVBIOS_IMAGE_H__
struct nvbios_image {
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/init.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/init.h
index 744b1868e789..10df0215475e 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/init.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/init.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: GPL-2.0 */
+/* SPDX-License-Identifier: MIT */
#ifndef __NVBIOS_INIT_H__
#define __NVBIOS_INIT_H__
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/mxm.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/mxm.h
index 327bf9c4b703..7204c6f4f247 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/mxm.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/mxm.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: GPL-2.0 */
+/* SPDX-License-Identifier: MIT */
#ifndef __NVBIOS_MXM_H__
#define __NVBIOS_MXM_H__
u16 mxm_table(struct nvkm_bios *, u8 *ver, u8 *hdr);
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/npde.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/npde.h
index ee5419b7b45b..f10f176a3323 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/npde.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/npde.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: GPL-2.0 */
+/* SPDX-License-Identifier: MIT */
#ifndef __NVBIOS_NPDE_H__
#define __NVBIOS_NPDE_H__
struct nvbios_npdeT {
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/pcir.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/pcir.h
index 1dffe8d6cc81..bb7bf67d1d19 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/pcir.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/pcir.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: GPL-2.0 */
+/* SPDX-License-Identifier: MIT */
#ifndef __NVBIOS_PCIR_H__
#define __NVBIOS_PCIR_H__
struct nvbios_pcirT {
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/perf.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/perf.h
index 0ee84ea6d737..1b67c0958721 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/perf.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/perf.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: GPL-2.0 */
+/* SPDX-License-Identifier: MIT */
#ifndef __NVBIOS_PERF_H__
#define __NVBIOS_PERF_H__
u32 nvbios_perf_table(struct nvkm_bios *, u8 *ver, u8 *hdr,
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/pll.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/pll.h
index ab964e085f02..b2c2d0959f6f 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/pll.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/pll.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: GPL-2.0 */
+/* SPDX-License-Identifier: MIT */
#ifndef __NVBIOS_PLL_H__
#define __NVBIOS_PLL_H__
/*XXX: kill me */
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/pmu.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/pmu.h
index fb41ecab8f8c..7177d39371cf 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/pmu.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/pmu.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: GPL-2.0 */
+/* SPDX-License-Identifier: MIT */
#ifndef __NVBIOS_PMU_H__
#define __NVBIOS_PMU_H__
struct nvbios_pmuT {
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/power_budget.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/power_budget.h
index ff12d810dce3..95306be163cc 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/power_budget.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/power_budget.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: GPL-2.0 */
+/* SPDX-License-Identifier: MIT */
#ifndef __NVBIOS_POWER_BUDGET_H__
#define __NVBIOS_POWER_BUDGET_H__
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/ramcfg.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/ramcfg.h
index 2b87a38adb7a..153edf898b5d 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/ramcfg.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/ramcfg.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: GPL-2.0 */
+/* SPDX-License-Identifier: MIT */
#ifndef __NVBIOS_RAMCFG_H__
#define __NVBIOS_RAMCFG_H__
struct nvbios_ramcfg {
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/rammap.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/rammap.h
index 471eef434b51..7f054042f9d7 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/rammap.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/rammap.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: GPL-2.0 */
+/* SPDX-License-Identifier: MIT */
#ifndef __NVBIOS_RAMMAP_H__
#define __NVBIOS_RAMMAP_H__
#include <subdev/bios/ramcfg.h>
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/therm.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/therm.h
index 46a3b15e10ec..0fb8a3480871 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/therm.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/therm.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: GPL-2.0 */
+/* SPDX-License-Identifier: MIT */
#ifndef __NVBIOS_THERM_H__
#define __NVBIOS_THERM_H__
struct nvbios_therm_threshold {
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/timing.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/timing.h
index 40ceabf37827..c1f77773aace 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/timing.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/timing.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: GPL-2.0 */
+/* SPDX-License-Identifier: MIT */
#ifndef __NVBIOS_TIMING_H__
#define __NVBIOS_TIMING_H__
#include <subdev/bios/ramcfg.h>
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/vmap.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/vmap.h
index 67419bad584c..13103b9b5b96 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/vmap.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/vmap.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: GPL-2.0 */
+/* SPDX-License-Identifier: MIT */
#ifndef __NVBIOS_VMAP_H__
#define __NVBIOS_VMAP_H__
struct nvbios_vmap {
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/volt.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/volt.h
index 6b36d5ecb8f9..0c9be1b2ebbf 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/volt.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/volt.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: GPL-2.0 */
+/* SPDX-License-Identifier: MIT */
#ifndef __NVBIOS_VOLT_H__
#define __NVBIOS_VOLT_H__
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/vpstate.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/vpstate.h
index 36f3028d58ef..df94e26f873a 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/vpstate.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/vpstate.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: GPL-2.0 */
+/* SPDX-License-Identifier: MIT */
#ifndef __NVBIOS_VPSTATE_H__
#define __NVBIOS_VPSTATE_H__
struct nvbios_vpstate_header {
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/xpio.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/xpio.h
index d1bb5d044585..11b4c4d27e5f 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/xpio.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/xpio.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: GPL-2.0 */
+/* SPDX-License-Identifier: MIT */
#ifndef __NVBIOS_XPIO_H__
#define __NVBIOS_XPIO_H__
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/bus.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/bus.h
index 7695f7f77a06..ae9ad6c034fb 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/bus.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/bus.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: GPL-2.0 */
+/* SPDX-License-Identifier: MIT */
#ifndef __NVKM_BUS_H__
#define __NVKM_BUS_H__
#include <core/subdev.h>
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/clk.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/clk.h
index 15db75ef0189..bf937e7dfd77 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/clk.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/clk.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: GPL-2.0 */
+/* SPDX-License-Identifier: MIT */
#ifndef __NVKM_CLK_H__
#define __NVKM_CLK_H__
#include <core/subdev.h>
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/devinit.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/devinit.h
index 8ba982c2fdfb..1a39e52e09e3 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/devinit.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/devinit.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: GPL-2.0 */
+/* SPDX-License-Identifier: MIT */
#ifndef __NVKM_DEVINIT_H__
#define __NVKM_DEVINIT_H__
#include <core/subdev.h>
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/fb.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/fb.h
index 27298f8b7ead..239ad222b95a 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/fb.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/fb.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: GPL-2.0 */
+/* SPDX-License-Identifier: MIT */
#ifndef __NVKM_FB_H__
#define __NVKM_FB_H__
#include <core/subdev.h>
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/fuse.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/fuse.h
index 092193b7f98e..00111c34311e 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/fuse.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/fuse.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: GPL-2.0 */
+/* SPDX-License-Identifier: MIT */
#ifndef __NVKM_FUSE_H__
#define __NVKM_FUSE_H__
#include <core/subdev.h>
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/gpio.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/gpio.h
index ee54899076e3..eaacf8d80527 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/gpio.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/gpio.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: GPL-2.0 */
+/* SPDX-License-Identifier: MIT */
#ifndef __NVKM_GPIO_H__
#define __NVKM_GPIO_H__
#include <core/subdev.h>
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/i2c.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/i2c.h
index 7957eafa5f0e..81b977319640 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/i2c.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/i2c.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: GPL-2.0 */
+/* SPDX-License-Identifier: MIT */
#ifndef __NVKM_I2C_H__
#define __NVKM_I2C_H__
#include <core/subdev.h>
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/ibus.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/ibus.h
index 919653c1d101..db791411eaa8 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/ibus.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/ibus.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: GPL-2.0 */
+/* SPDX-License-Identifier: MIT */
#ifndef __NVKM_IBUS_H__
#define __NVKM_IBUS_H__
#include <core/subdev.h>
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/iccsense.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/iccsense.h
index be9475cd94fd..f483dcd7cd1c 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/iccsense.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/iccsense.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: GPL-2.0 */
+/* SPDX-License-Identifier: MIT */
#ifndef __NVKM_ICCSENSE_H__
#define __NVKM_ICCSENSE_H__
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/instmem.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/instmem.h
index 36ed520ed2d0..c74ab7c31d05 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/instmem.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/instmem.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: GPL-2.0 */
+/* SPDX-License-Identifier: MIT */
#ifndef __NVKM_INSTMEM_H__
#define __NVKM_INSTMEM_H__
#include <core/subdev.h>
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/ltc.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/ltc.h
index 9db5f8293198..644d527c3b96 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/ltc.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/ltc.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: GPL-2.0 */
+/* SPDX-License-Identifier: MIT */
#ifndef __NVKM_LTC_H__
#define __NVKM_LTC_H__
#include <core/subdev.h>
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/mc.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/mc.h
index e38f4958dea2..6641fe4c252c 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/mc.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/mc.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: GPL-2.0 */
+/* SPDX-License-Identifier: MIT */
#ifndef __NVKM_MC_H__
#define __NVKM_MC_H__
#include <core/subdev.h>
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/mmu.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/mmu.h
index 28ade86f74c5..54cdcb017518 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/mmu.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/mmu.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: GPL-2.0 */
+/* SPDX-License-Identifier: MIT */
#ifndef __NVKM_MMU_H__
#define __NVKM_MMU_H__
#include <core/subdev.h>
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/mxm.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/mxm.h
index 0fd6d6f8eada..78df1e9def05 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/mxm.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/mxm.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: GPL-2.0 */
+/* SPDX-License-Identifier: MIT */
#ifndef __NVKM_MXM_H__
#define __NVKM_MXM_H__
#include <core/subdev.h>
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/pci.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/pci.h
index 23803cc859fd..4803a4fad4a2 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/pci.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/pci.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: GPL-2.0 */
+/* SPDX-License-Identifier: MIT */
#ifndef __NVKM_PCI_H__
#define __NVKM_PCI_H__
#include <core/subdev.h>
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/pmu.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/pmu.h
index 4bc9384046c6..24fbcccd93eb 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/pmu.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/pmu.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: GPL-2.0 */
+/* SPDX-License-Identifier: MIT */
#ifndef __NVKM_PMU_H__
#define __NVKM_PMU_H__
#include <core/subdev.h>
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/therm.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/therm.h
index 9398d9f09339..62c34f98c930 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/therm.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/therm.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: GPL-2.0 */
+/* SPDX-License-Identifier: MIT */
#ifndef __NVKM_THERM_H__
#define __NVKM_THERM_H__
#include <core/subdev.h>
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/timer.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/timer.h
index 3693ebf371b6..a8c21c6c800b 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/timer.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/timer.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: GPL-2.0 */
+/* SPDX-License-Identifier: MIT */
#ifndef __NVKM_TIMER_H__
#define __NVKM_TIMER_H__
#include <core/subdev.h>
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/top.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/top.h
index 2904e67d79d2..7be0e7e7bd77 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/top.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/top.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: GPL-2.0 */
+/* SPDX-License-Identifier: MIT */
#ifndef __NVKM_TOP_H__
#define __NVKM_TOP_H__
#include <core/subdev.h>
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/vga.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/vga.h
index 312933ad7c2b..15ee5c321574 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/vga.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/vga.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: GPL-2.0 */
+/* SPDX-License-Identifier: MIT */
#ifndef __NOUVEAU_VGA_H__
#define __NOUVEAU_VGA_H__
#include <core/subdev.h>
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/volt.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/volt.h
index 6a765682fbfa..45053a280930 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/volt.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/volt.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: GPL-2.0 */
+/* SPDX-License-Identifier: MIT */
#ifndef __NVKM_VOLT_H__
#define __NVKM_VOLT_H__
#include <core/subdev.h>
diff --git a/drivers/gpu/drm/nouveau/nouveau_abi16.h b/drivers/gpu/drm/nouveau/nouveau_abi16.h
index 36fde1ff3ad5..195546719bfe 100644
--- a/drivers/gpu/drm/nouveau/nouveau_abi16.h
+++ b/drivers/gpu/drm/nouveau/nouveau_abi16.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: GPL-2.0 */
+/* SPDX-License-Identifier: MIT */
#ifndef __NOUVEAU_ABI16_H__
#define __NOUVEAU_ABI16_H__
diff --git a/drivers/gpu/drm/nouveau/nouveau_acpi.c b/drivers/gpu/drm/nouveau/nouveau_acpi.c
index ffb195850314..fe3a10255c36 100644
--- a/drivers/gpu/drm/nouveau/nouveau_acpi.c
+++ b/drivers/gpu/drm/nouveau/nouveau_acpi.c
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0
+// SPDX-License-Identifier: MIT
#include <linux/pci.h>
#include <linux/acpi.h>
#include <linux/slab.h>
diff --git a/drivers/gpu/drm/nouveau/nouveau_acpi.h b/drivers/gpu/drm/nouveau/nouveau_acpi.h
index b86294fc99e8..1e6e8a8c0455 100644
--- a/drivers/gpu/drm/nouveau/nouveau_acpi.h
+++ b/drivers/gpu/drm/nouveau/nouveau_acpi.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: GPL-2.0 */
+/* SPDX-License-Identifier: MIT */
#ifndef __NOUVEAU_ACPI_H__
#define __NOUVEAU_ACPI_H__
diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.h b/drivers/gpu/drm/nouveau/nouveau_bo.h
index 846f4bdec0de..383ac36d5869 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bo.h
+++ b/drivers/gpu/drm/nouveau/nouveau_bo.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: GPL-2.0 */
+/* SPDX-License-Identifier: MIT */
#ifndef __NOUVEAU_BO_H__
#define __NOUVEAU_BO_H__
diff --git a/drivers/gpu/drm/nouveau/nouveau_chan.h b/drivers/gpu/drm/nouveau/nouveau_chan.h
index 93814d1d31e4..9307357e1361 100644
--- a/drivers/gpu/drm/nouveau/nouveau_chan.h
+++ b/drivers/gpu/drm/nouveau/nouveau_chan.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: GPL-2.0 */
+/* SPDX-License-Identifier: MIT */
#ifndef __NOUVEAU_CHAN_H__
#define __NOUVEAU_CHAN_H__
#include <nvif/object.h>
diff --git a/drivers/gpu/drm/nouveau/nouveau_connector.c b/drivers/gpu/drm/nouveau/nouveau_connector.c
index 4116ee62adaf..8f15281faa79 100644
--- a/drivers/gpu/drm/nouveau/nouveau_connector.c
+++ b/drivers/gpu/drm/nouveau/nouveau_connector.c
@@ -252,7 +252,7 @@ nouveau_conn_reset(struct drm_connector *connector)
return;
if (connector->state)
- __drm_atomic_helper_connector_destroy_state(connector->state);
+ nouveau_conn_atomic_destroy_state(connector, connector->state);
__drm_atomic_helper_connector_reset(connector, &asyc->state);
asyc->dither.mode = DITHERING_MODE_AUTO;
asyc->dither.depth = DITHERING_DEPTH_AUTO;
@@ -978,11 +978,13 @@ get_tmds_link_bandwidth(struct drm_connector *connector)
struct nouveau_drm *drm = nouveau_drm(connector->dev);
struct dcb_output *dcb = nv_connector->detected_encoder->dcb;
struct drm_display_info *info = NULL;
- const unsigned duallink_scale =
+ unsigned duallink_scale =
nouveau_duallink && nv_encoder->dcb->duallink_possible ? 2 : 1;
- if (drm_detect_hdmi_monitor(nv_connector->edid))
+ if (drm_detect_hdmi_monitor(nv_connector->edid)) {
info = &nv_connector->base.display_info;
+ duallink_scale = 1;
+ }
if (info) {
if (nouveau_hdmimhz > 0)
@@ -1003,6 +1005,7 @@ get_tmds_link_bandwidth(struct drm_connector *connector)
if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_FERMI)
return 225000;
}
+
if (dcb->location != DCB_LOC_ON_CHIP ||
drm->client.device.info.chipset >= 0x46)
return 165000 * duallink_scale;
diff --git a/drivers/gpu/drm/nouveau/nouveau_debugfs.h b/drivers/gpu/drm/nouveau/nouveau_debugfs.h
index 1d01a82d4b6f..9420a6aca138 100644
--- a/drivers/gpu/drm/nouveau/nouveau_debugfs.h
+++ b/drivers/gpu/drm/nouveau/nouveau_debugfs.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: GPL-2.0 */
+/* SPDX-License-Identifier: MIT */
#ifndef __NOUVEAU_DEBUGFS_H__
#define __NOUVEAU_DEBUGFS_H__
diff --git a/drivers/gpu/drm/nouveau/nouveau_display.h b/drivers/gpu/drm/nouveau/nouveau_display.h
index 311e175f0513..9185f01e2d9b 100644
--- a/drivers/gpu/drm/nouveau/nouveau_display.h
+++ b/drivers/gpu/drm/nouveau/nouveau_display.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: GPL-2.0 */
+/* SPDX-License-Identifier: MIT */
#ifndef __NOUVEAU_DISPLAY_H__
#define __NOUVEAU_DISPLAY_H__
#include "nouveau_drv.h"
diff --git a/drivers/gpu/drm/nouveau/nouveau_dmem.c b/drivers/gpu/drm/nouveau/nouveau_dmem.c
index 42c026010938..1333220787a1 100644
--- a/drivers/gpu/drm/nouveau/nouveau_dmem.c
+++ b/drivers/gpu/drm/nouveau/nouveau_dmem.c
@@ -379,9 +379,10 @@ nouveau_dmem_pages_alloc(struct nouveau_drm *drm,
ret = nouveau_dmem_chunk_alloc(drm);
if (ret) {
if (c)
- break;
+ return 0;
return ret;
}
+ mutex_lock(&drm->dmem->mutex);
continue;
}
diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.h b/drivers/gpu/drm/nouveau/nouveau_drv.h
index 35ff0ca01a3b..aae035816383 100644
--- a/drivers/gpu/drm/nouveau/nouveau_drv.h
+++ b/drivers/gpu/drm/nouveau/nouveau_drv.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: GPL-2.0 */
+/* SPDX-License-Identifier: MIT */
#ifndef __NOUVEAU_DRV_H__
#define __NOUVEAU_DRV_H__
diff --git a/drivers/gpu/drm/nouveau/nouveau_fence.h b/drivers/gpu/drm/nouveau/nouveau_fence.h
index ad27caeca0fd..c9e24baaaa4f 100644
--- a/drivers/gpu/drm/nouveau/nouveau_fence.h
+++ b/drivers/gpu/drm/nouveau/nouveau_fence.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: GPL-2.0 */
+/* SPDX-License-Identifier: MIT */
#ifndef __NOUVEAU_FENCE_H__
#define __NOUVEAU_FENCE_H__
diff --git a/drivers/gpu/drm/nouveau/nouveau_gem.h b/drivers/gpu/drm/nouveau/nouveau_gem.h
index fe39998f65cc..03371204a47c 100644
--- a/drivers/gpu/drm/nouveau/nouveau_gem.h
+++ b/drivers/gpu/drm/nouveau/nouveau_gem.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: GPL-2.0 */
+/* SPDX-License-Identifier: MIT */
#ifndef __NOUVEAU_GEM_H__
#define __NOUVEAU_GEM_H__
diff --git a/drivers/gpu/drm/nouveau/nouveau_hwmon.c b/drivers/gpu/drm/nouveau/nouveau_hwmon.c
index 08a1ab6b150d..6af2d299c3f9 100644
--- a/drivers/gpu/drm/nouveau/nouveau_hwmon.c
+++ b/drivers/gpu/drm/nouveau/nouveau_hwmon.c
@@ -428,6 +428,8 @@ nouveau_temp_read(struct device *dev, u32 attr, int channel, long *val)
switch (attr) {
case hwmon_temp_input:
+ if (drm_dev->switch_power_state != DRM_SWITCH_POWER_ON)
+ return -EINVAL;
ret = nvkm_therm_temp_get(therm);
*val = ret < 0 ? ret : (ret * 1000);
break;
@@ -474,6 +476,8 @@ nouveau_fan_read(struct device *dev, u32 attr, int channel, long *val)
switch (attr) {
case hwmon_fan_input:
+ if (drm_dev->switch_power_state != DRM_SWITCH_POWER_ON)
+ return -EINVAL;
*val = nvkm_therm_fan_sense(therm);
break;
default:
@@ -496,6 +500,8 @@ nouveau_in_read(struct device *dev, u32 attr, int channel, long *val)
switch (attr) {
case hwmon_in_input:
+ if (drm_dev->switch_power_state != DRM_SWITCH_POWER_ON)
+ return -EINVAL;
ret = nvkm_volt_get(volt);
*val = ret < 0 ? ret : (ret / 1000);
break;
@@ -527,6 +533,8 @@ nouveau_pwm_read(struct device *dev, u32 attr, int channel, long *val)
*val = therm->attr_get(therm, NVKM_THERM_ATTR_FAN_MODE);
break;
case hwmon_pwm_input:
+ if (drm_dev->switch_power_state != DRM_SWITCH_POWER_ON)
+ return -EINVAL;
*val = therm->fan_get(therm);
break;
default:
@@ -548,6 +556,8 @@ nouveau_power_read(struct device *dev, u32 attr, int channel, long *val)
switch (attr) {
case hwmon_power_input:
+ if (drm_dev->switch_power_state != DRM_SWITCH_POWER_ON)
+ return -EINVAL;
*val = nvkm_iccsense_read_all(iccsense);
break;
case hwmon_power_max:
diff --git a/drivers/gpu/drm/nouveau/nouveau_ioctl.h b/drivers/gpu/drm/nouveau/nouveau_ioctl.h
index 380ede26806c..d17505530e3e 100644
--- a/drivers/gpu/drm/nouveau/nouveau_ioctl.h
+++ b/drivers/gpu/drm/nouveau/nouveau_ioctl.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: GPL-2.0 */
+/* SPDX-License-Identifier: MIT */
#ifndef __NOUVEAU_IOCTL_H__
#define __NOUVEAU_IOCTL_H__
diff --git a/drivers/gpu/drm/nouveau/nouveau_reg.h b/drivers/gpu/drm/nouveau/nouveau_reg.h
index b5b5fe40779d..cff7389f6ed3 100644
--- a/drivers/gpu/drm/nouveau/nouveau_reg.h
+++ b/drivers/gpu/drm/nouveau/nouveau_reg.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: GPL-2.0 */
+/* SPDX-License-Identifier: MIT */
#define NV04_PFB_BOOT_0 0x00100000
# define NV04_PFB_BOOT_0_RAM_AMOUNT 0x00000003
diff --git a/drivers/gpu/drm/nouveau/nouveau_sgdma.c b/drivers/gpu/drm/nouveau/nouveau_sgdma.c
index 8ebdc74cc0ad..feaac908efed 100644
--- a/drivers/gpu/drm/nouveau/nouveau_sgdma.c
+++ b/drivers/gpu/drm/nouveau/nouveau_sgdma.c
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0
+// SPDX-License-Identifier: MIT
#include <linux/pagemap.h>
#include <linux/slab.h>
diff --git a/drivers/gpu/drm/nouveau/nouveau_ttm.h b/drivers/gpu/drm/nouveau/nouveau_ttm.h
index 89929ad8c7cd..085280754b3e 100644
--- a/drivers/gpu/drm/nouveau/nouveau_ttm.h
+++ b/drivers/gpu/drm/nouveau/nouveau_ttm.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: GPL-2.0 */
+/* SPDX-License-Identifier: MIT */
#ifndef __NOUVEAU_TTM_H__
#define __NOUVEAU_TTM_H__
diff --git a/drivers/gpu/drm/nouveau/nouveau_usif.h b/drivers/gpu/drm/nouveau/nouveau_usif.h
index c68f1c65af3b..dc90d4a9d0d9 100644
--- a/drivers/gpu/drm/nouveau/nouveau_usif.h
+++ b/drivers/gpu/drm/nouveau/nouveau_usif.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: GPL-2.0 */
+/* SPDX-License-Identifier: MIT */
#ifndef __NOUVEAU_USIF_H__
#define __NOUVEAU_USIF_H__
diff --git a/drivers/gpu/drm/nouveau/nouveau_vga.c b/drivers/gpu/drm/nouveau/nouveau_vga.c
index 8f1ce4833230..8f4b12a8092c 100644
--- a/drivers/gpu/drm/nouveau/nouveau_vga.c
+++ b/drivers/gpu/drm/nouveau/nouveau_vga.c
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0
+// SPDX-License-Identifier: MIT
#include <linux/vgaarb.h>
#include <linux/vga_switcheroo.h>
diff --git a/drivers/gpu/drm/nouveau/nouveau_vga.h b/drivers/gpu/drm/nouveau/nouveau_vga.h
index 6a3000c88142..951a83f984dd 100644
--- a/drivers/gpu/drm/nouveau/nouveau_vga.h
+++ b/drivers/gpu/drm/nouveau/nouveau_vga.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: GPL-2.0 */
+/* SPDX-License-Identifier: MIT */
#ifndef __NOUVEAU_VGA_H__
#define __NOUVEAU_VGA_H__
diff --git a/drivers/gpu/drm/nouveau/nv10_fence.h b/drivers/gpu/drm/nouveau/nv10_fence.h
index 7616c66803f8..300cf3fdbb46 100644
--- a/drivers/gpu/drm/nouveau/nv10_fence.h
+++ b/drivers/gpu/drm/nouveau/nv10_fence.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: GPL-2.0 */
+/* SPDX-License-Identifier: MIT */
#ifndef __NV10_FENCE_H_
#define __NV10_FENCE_H_
diff --git a/drivers/gpu/drm/nouveau/nvif/Kbuild b/drivers/gpu/drm/nouveau/nvif/Kbuild
index 7eebd7d18b6d..50d583d63807 100644
--- a/drivers/gpu/drm/nouveau/nvif/Kbuild
+++ b/drivers/gpu/drm/nouveau/nvif/Kbuild
@@ -1,4 +1,4 @@
-# SPDX-License-Identifier: GPL-2.0
+# SPDX-License-Identifier: MIT
nvif-y := nvif/object.o
nvif-y += nvif/client.o
nvif-y += nvif/device.o
diff --git a/drivers/gpu/drm/nouveau/nvkm/Kbuild b/drivers/gpu/drm/nouveau/nvkm/Kbuild
index a8ec75cf02dc..b53de9ba8c73 100644
--- a/drivers/gpu/drm/nouveau/nvkm/Kbuild
+++ b/drivers/gpu/drm/nouveau/nvkm/Kbuild
@@ -1,4 +1,4 @@
-# SPDX-License-Identifier: GPL-2.0
+# SPDX-License-Identifier: MIT
include $(src)/nvkm/core/Kbuild
include $(src)/nvkm/falcon/Kbuild
include $(src)/nvkm/subdev/Kbuild
diff --git a/drivers/gpu/drm/nouveau/nvkm/core/Kbuild b/drivers/gpu/drm/nouveau/nvkm/core/Kbuild
index 01de22144259..2b471ab585b4 100644
--- a/drivers/gpu/drm/nouveau/nvkm/core/Kbuild
+++ b/drivers/gpu/drm/nouveau/nvkm/core/Kbuild
@@ -1,4 +1,4 @@
-# SPDX-License-Identifier: GPL-2.0
+# SPDX-License-Identifier: MIT
nvkm-y := nvkm/core/client.o
nvkm-y += nvkm/core/engine.o
nvkm-y += nvkm/core/enum.o
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/Kbuild b/drivers/gpu/drm/nouveau/nvkm/engine/Kbuild
index 5a43bcfb3622..c6dfed18f35b 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/Kbuild
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/Kbuild
@@ -1,4 +1,4 @@
-# SPDX-License-Identifier: GPL-2.0
+# SPDX-License-Identifier: MIT
nvkm-y += nvkm/engine/falcon.o
nvkm-y += nvkm/engine/xtensa.o
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/bsp/Kbuild b/drivers/gpu/drm/nouveau/nvkm/engine/bsp/Kbuild
index ad1bcfa6fc6c..b596b990519c 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/bsp/Kbuild
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/bsp/Kbuild
@@ -1,2 +1,2 @@
-# SPDX-License-Identifier: GPL-2.0
+# SPDX-License-Identifier: MIT
nvkm-y += nvkm/engine/bsp/g84.o
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/ce/Kbuild b/drivers/gpu/drm/nouveau/nvkm/engine/ce/Kbuild
index 157a70721629..ba88613e1e46 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/ce/Kbuild
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/ce/Kbuild
@@ -1,4 +1,4 @@
-# SPDX-License-Identifier: GPL-2.0
+# SPDX-License-Identifier: MIT
nvkm-y += nvkm/engine/ce/gt215.o
nvkm-y += nvkm/engine/ce/gf100.o
nvkm-y += nvkm/engine/ce/gk104.o
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/ce/fuc/gf100.fuc3.h b/drivers/gpu/drm/nouveau/nvkm/engine/ce/fuc/gf100.fuc3.h
index da130f5058e5..96d934f81600 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/ce/fuc/gf100.fuc3.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/ce/fuc/gf100.fuc3.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: GPL-2.0 */
+/* SPDX-License-Identifier: MIT */
static uint32_t gf100_ce_data[] = {
/* 0x0000: ctx_object */
0x00000000,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/ce/fuc/gt215.fuc3.h b/drivers/gpu/drm/nouveau/nvkm/engine/ce/fuc/gt215.fuc3.h
index 0b92eb32598d..d3fbd4ab5e31 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/ce/fuc/gt215.fuc3.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/ce/fuc/gt215.fuc3.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: GPL-2.0 */
+/* SPDX-License-Identifier: MIT */
static uint32_t gt215_ce_data[] = {
/* 0x0000: ctx_object */
0x00000000,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/ce/priv.h b/drivers/gpu/drm/nouveau/nvkm/engine/ce/priv.h
index 0e3d08f11b0b..b0c8342db15f 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/ce/priv.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/ce/priv.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: GPL-2.0 */
+/* SPDX-License-Identifier: MIT */
#ifndef __NVKM_CE_PRIV_H__
#define __NVKM_CE_PRIV_H__
#include <engine/ce.h>
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/cipher/Kbuild b/drivers/gpu/drm/nouveau/nvkm/engine/cipher/Kbuild
index 95708b59496c..ffad341f42bc 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/cipher/Kbuild
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/cipher/Kbuild
@@ -1,2 +1,2 @@
-# SPDX-License-Identifier: GPL-2.0
+# SPDX-License-Identifier: MIT
nvkm-y += nvkm/engine/cipher/g84.o
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/device/Kbuild b/drivers/gpu/drm/nouveau/nvkm/engine/device/Kbuild
index 206163da52e1..293c57678dab 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/device/Kbuild
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/device/Kbuild
@@ -1,4 +1,4 @@
-# SPDX-License-Identifier: GPL-2.0
+# SPDX-License-Identifier: MIT
nvkm-y += nvkm/engine/device/acpi.o
nvkm-y += nvkm/engine/device/base.o
nvkm-y += nvkm/engine/device/ctrl.o
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/device/acpi.h b/drivers/gpu/drm/nouveau/nvkm/engine/device/acpi.h
index 6a62021e9861..1d3c5cf7c3b4 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/device/acpi.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/device/acpi.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: GPL-2.0 */
+/* SPDX-License-Identifier: MIT */
#ifndef __NVKM_DEVICE_ACPI_H__
#define __NVKM_DEVICE_ACPI_H__
#include <core/os.h>
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c b/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c
index 10d91e8bbb94..c3c7159f3411 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c
@@ -1316,7 +1316,7 @@ nvaf_chipset = {
.i2c = g94_i2c_new,
.imem = nv50_instmem_new,
.mc = gt215_mc_new,
- .mmu = g84_mmu_new,
+ .mmu = mcp77_mmu_new,
.mxm = nv50_mxm_new,
.pci = g94_pci_new,
.pmu = gt215_pmu_new,
@@ -2575,6 +2575,41 @@ nv167_chipset = {
.sec2 = tu102_sec2_new,
};
+static const struct nvkm_device_chip
+nv168_chipset = {
+ .name = "TU116",
+ .bar = tu102_bar_new,
+ .bios = nvkm_bios_new,
+ .bus = gf100_bus_new,
+ .devinit = tu102_devinit_new,
+ .fault = tu102_fault_new,
+ .fb = gv100_fb_new,
+ .fuse = gm107_fuse_new,
+ .gpio = gk104_gpio_new,
+ .gsp = gv100_gsp_new,
+ .i2c = gm200_i2c_new,
+ .ibus = gm200_ibus_new,
+ .imem = nv50_instmem_new,
+ .ltc = gp102_ltc_new,
+ .mc = tu102_mc_new,
+ .mmu = tu102_mmu_new,
+ .pci = gp100_pci_new,
+ .pmu = gp102_pmu_new,
+ .therm = gp100_therm_new,
+ .timer = gk20a_timer_new,
+ .top = gk104_top_new,
+ .ce[0] = tu102_ce_new,
+ .ce[1] = tu102_ce_new,
+ .ce[2] = tu102_ce_new,
+ .ce[3] = tu102_ce_new,
+ .ce[4] = tu102_ce_new,
+ .disp = tu102_disp_new,
+ .dma = gv100_dma_new,
+ .fifo = tu102_fifo_new,
+ .nvdec[0] = gp102_nvdec_new,
+ .sec2 = tu102_sec2_new,
+};
+
static int
nvkm_device_event_ctor(struct nvkm_object *object, void *data, u32 size,
struct nvkm_notify *notify)
@@ -3052,6 +3087,7 @@ nvkm_device_ctor(const struct nvkm_device_func *func,
case 0x164: device->chip = &nv164_chipset; break;
case 0x166: device->chip = &nv166_chipset; break;
case 0x167: device->chip = &nv167_chipset; break;
+ case 0x168: device->chip = &nv168_chipset; break;
default:
nvdev_error(device, "unknown chipset (%08x)\n", boot0);
goto done;
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/device/ctrl.h b/drivers/gpu/drm/nouveau/nvkm/engine/device/ctrl.h
index ebcc5c52fbd1..9f6d7f23af8d 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/device/ctrl.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/device/ctrl.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: GPL-2.0 */
+/* SPDX-License-Identifier: MIT */
#ifndef __NVKM_DEVICE_CTRL_H__
#define __NVKM_DEVICE_CTRL_H__
#define nvkm_control(p) container_of((p), struct nvkm_control, object)
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/device/priv.h b/drivers/gpu/drm/nouveau/nvkm/engine/device/priv.h
index 2a53e37dfa7a..d8be2f77ac66 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/device/priv.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/device/priv.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: GPL-2.0 */
+/* SPDX-License-Identifier: MIT */
#ifndef __NVKM_DEVICE_PRIV_H__
#define __NVKM_DEVICE_PRIV_H__
#include <core/device.h>
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/Kbuild b/drivers/gpu/drm/nouveau/nvkm/engine/disp/Kbuild
index dbfda73cfea6..0d584d0da59c 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/Kbuild
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/Kbuild
@@ -1,4 +1,4 @@
-# SPDX-License-Identifier: GPL-2.0
+# SPDX-License-Identifier: MIT
nvkm-y += nvkm/engine/disp/base.o
nvkm-y += nvkm/engine/disp/nv04.o
nvkm-y += nvkm/engine/disp/nv50.o
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/channv50.h b/drivers/gpu/drm/nouveau/nvkm/engine/disp/channv50.h
index adc9d76d09cc..e55054b7329f 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/channv50.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/channv50.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: GPL-2.0 */
+/* SPDX-License-Identifier: MIT */
#ifndef __NV50_DISP_CHAN_H__
#define __NV50_DISP_CHAN_H__
#define nv50_disp_chan(p) container_of((p), struct nv50_disp_chan, object)
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/conn.h b/drivers/gpu/drm/nouveau/nvkm/engine/disp/conn.h
index 090e869ae612..dcbe60a4b911 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/conn.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/conn.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: GPL-2.0 */
+/* SPDX-License-Identifier: MIT */
#ifndef __NVKM_DISP_CONN_H__
#define __NVKM_DISP_CONN_H__
#include <engine/disp.h>
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/dp.h b/drivers/gpu/drm/nouveau/nvkm/engine/disp/dp.h
index 495f665a0ee6..428b3f488f03 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/dp.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/dp.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: GPL-2.0 */
+/* SPDX-License-Identifier: MIT */
#ifndef __NVKM_DISP_DP_H__
#define __NVKM_DISP_DP_H__
#define nvkm_dp(p) container_of((p), struct nvkm_dp, outp)
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/hdmi.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/hdmi.c
index 10f2aa9f29a4..7147dc6d9018 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/hdmi.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/hdmi.c
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0
+// SPDX-License-Identifier: MIT
#include "hdmi.h"
void pack_hdmi_infoframe(struct packed_hdmi_infoframe *packed_frame,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/hdmi.h b/drivers/gpu/drm/nouveau/nvkm/engine/disp/hdmi.h
index 45094c6e1425..fb1c3e3c5d4c 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/hdmi.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/hdmi.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: GPL-2.0 */
+/* SPDX-License-Identifier: MIT */
#ifndef __NVKM_DISP_HDMI_H__
#define __NVKM_DISP_HDMI_H__
#include "ior.h"
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/head.h b/drivers/gpu/drm/nouveau/nvkm/engine/disp/head.h
index 7d55faf52fcb..7dde6237441d 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/head.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/head.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: GPL-2.0 */
+/* SPDX-License-Identifier: MIT */
#ifndef __NVKM_DISP_HEAD_H__
#define __NVKM_DISP_HEAD_H__
#include "priv.h"
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/ior.h b/drivers/gpu/drm/nouveau/nvkm/engine/disp/ior.h
index 1681ddccd298..009d3a8b7a50 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/ior.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/ior.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: GPL-2.0 */
+/* SPDX-License-Identifier: MIT */
#ifndef __NVKM_DISP_IOR_H__
#define __NVKM_DISP_IOR_H__
#include "priv.h"
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.h b/drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.h
index e5d00f478bb1..a677161c7f3a 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: GPL-2.0 */
+/* SPDX-License-Identifier: MIT */
#ifndef __NV50_DISP_H__
#define __NV50_DISP_H__
#define nv50_disp(p) container_of((p), struct nv50_disp, base)
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/outp.h b/drivers/gpu/drm/nouveau/nvkm/engine/disp/outp.h
index 6c8aa5cfed9d..721b068b87ef 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/outp.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/outp.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: GPL-2.0 */
+/* SPDX-License-Identifier: MIT */
#ifndef __NVKM_DISP_OUTP_H__
#define __NVKM_DISP_OUTP_H__
#include <engine/disp.h>
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/priv.h b/drivers/gpu/drm/nouveau/nvkm/engine/disp/priv.h
index ef66c5f38ad5..f815a5342880 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/priv.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/priv.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: GPL-2.0 */
+/* SPDX-License-Identifier: MIT */
#ifndef __NVKM_DISP_PRIV_H__
#define __NVKM_DISP_PRIV_H__
#include <engine/disp.h>
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootnv50.h b/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootnv50.h
index aee9822a7a87..a1f942793f98 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootnv50.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootnv50.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: GPL-2.0 */
+/* SPDX-License-Identifier: MIT */
#ifndef __NV50_DISP_ROOT_H__
#define __NV50_DISP_ROOT_H__
#define nv50_disp_root(p) container_of((p), struct nv50_disp_root, object)
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/sortu102.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/sortu102.c
index d57b73ada89e..4d5f3791ea7b 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/sortu102.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/sortu102.c
@@ -72,6 +72,7 @@ tu102_sor = {
.clock = gf119_sor_clock,
.hdmi = {
.ctrl = gv100_hdmi_ctrl,
+ .scdc = gm200_hdmi_scdc,
},
.dp = {
.lanes = { 0, 1, 2, 3 },
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/dma/Kbuild b/drivers/gpu/drm/nouveau/nvkm/engine/dma/Kbuild
index 3e2680cbe370..a0e551b92c8d 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/dma/Kbuild
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/dma/Kbuild
@@ -1,4 +1,4 @@
-# SPDX-License-Identifier: GPL-2.0
+# SPDX-License-Identifier: MIT
nvkm-y += nvkm/engine/dma/base.o
nvkm-y += nvkm/engine/dma/nv04.o
nvkm-y += nvkm/engine/dma/nv50.o
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/dma/priv.h b/drivers/gpu/drm/nouveau/nvkm/engine/dma/priv.h
index 4307cbecd5c5..0c9d9640a59d 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/dma/priv.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/dma/priv.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: GPL-2.0 */
+/* SPDX-License-Identifier: MIT */
#ifndef __NVKM_DMA_PRIV_H__
#define __NVKM_DMA_PRIV_H__
#define nvkm_dma(p) container_of((p), struct nvkm_dma, engine)
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/dma/user.h b/drivers/gpu/drm/nouveau/nvkm/engine/dma/user.h
index 9fe01fd75474..9c72ee214be7 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/dma/user.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/dma/user.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: GPL-2.0 */
+/* SPDX-License-Identifier: MIT */
#ifndef __NVKM_DMA_USER_H__
#define __NVKM_DMA_USER_H__
#define nvkm_dmaobj(p) container_of((p), struct nvkm_dmaobj, object)
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/Kbuild b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/Kbuild
index 1f0eddacc9b7..90e9a0972a44 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/Kbuild
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/Kbuild
@@ -1,4 +1,4 @@
-# SPDX-License-Identifier: GPL-2.0
+# SPDX-License-Identifier: MIT
nvkm-y += nvkm/engine/fifo/base.o
nvkm-y += nvkm/engine/fifo/nv04.o
nvkm-y += nvkm/engine/fifo/nv10.o
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/chan.h b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/chan.h
index 2c7c5afc1ea5..177e10562600 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/chan.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/chan.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: GPL-2.0 */
+/* SPDX-License-Identifier: MIT */
#ifndef __NVKM_FIFO_CHAN_H__
#define __NVKM_FIFO_CHAN_H__
#define nvkm_fifo_chan(p) container_of((p), struct nvkm_fifo_chan, object)
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/changf100.h b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/changf100.h
index b653664e081b..7c125a15f963 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/changf100.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/changf100.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: GPL-2.0 */
+/* SPDX-License-Identifier: MIT */
#ifndef __GF100_FIFO_CHAN_H__
#define __GF100_FIFO_CHAN_H__
#define gf100_fifo_chan(p) container_of((p), struct gf100_fifo_chan, base)
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/changk104.h b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/changk104.h
index f8557cdfbd81..22698661aa85 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/changk104.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/changk104.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: GPL-2.0 */
+/* SPDX-License-Identifier: MIT */
#ifndef __GK104_FIFO_CHAN_H__
#define __GK104_FIFO_CHAN_H__
#define gk104_fifo_chan(p) container_of((p), struct gk104_fifo_chan, base)
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/channv04.h b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/channv04.h
index 15b06bdf5067..60ca79465aff 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/channv04.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/channv04.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: GPL-2.0 */
+/* SPDX-License-Identifier: MIT */
#ifndef __NV04_FIFO_CHAN_H__
#define __NV04_FIFO_CHAN_H__
#define nv04_fifo_chan(p) container_of((p), struct nv04_fifo_chan, base)
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/channv50.h b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/channv50.h
index 2e3c4005b874..5735ff72a9d1 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/channv50.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/channv50.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: GPL-2.0 */
+/* SPDX-License-Identifier: MIT */
#ifndef __NV50_FIFO_CHAN_H__
#define __NV50_FIFO_CHAN_H__
#define nv50_fifo_chan(p) container_of((p), struct nv50_fifo_chan, base)
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gf100.h b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gf100.h
index 68f97ba03df6..b8642490eb2f 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gf100.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gf100.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: GPL-2.0 */
+/* SPDX-License-Identifier: MIT */
#ifndef __GF100_FIFO_H__
#define __GF100_FIFO_H__
#define gf100_fifo(p) container_of((p), struct gf100_fifo, base)
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk104.h b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk104.h
index d4e565658f46..c33f4593cbc6 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk104.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk104.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: GPL-2.0 */
+/* SPDX-License-Identifier: MIT */
#ifndef __GK104_FIFO_H__
#define __GK104_FIFO_H__
#define gk104_fifo(p) container_of((p), struct gk104_fifo, base)
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv04.h b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv04.h
index 1d70542553cc..e5ecceee77ae 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv04.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv04.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: GPL-2.0 */
+/* SPDX-License-Identifier: MIT */
#ifndef __NV04_FIFO_H__
#define __NV04_FIFO_H__
#define nv04_fifo(p) container_of((p), struct nv04_fifo, base)
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv50.h b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv50.h
index a3994e8db462..87d30b6bd2ea 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv50.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv50.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: GPL-2.0 */
+/* SPDX-License-Identifier: MIT */
#ifndef __NV50_FIFO_H__
#define __NV50_FIFO_H__
#define nv50_fifo(p) container_of((p), struct nv50_fifo, base)
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/priv.h b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/priv.h
index d5acbba293f4..c66f5370b21f 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/priv.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/priv.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: GPL-2.0 */
+/* SPDX-License-Identifier: MIT */
#ifndef __NVKM_FIFO_PRIV_H__
#define __NVKM_FIFO_PRIV_H__
#define nvkm_fifo(p) container_of((p), struct nvkm_fifo, engine)
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/regsnv04.h b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/regsnv04.h
index 49892a5e7201..4445a12b9a26 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/regsnv04.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/regsnv04.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: GPL-2.0 */
+/* SPDX-License-Identifier: MIT */
#ifndef __NV04_FIFO_REGS_H__
#define __NV04_FIFO_REGS_H__
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/Kbuild b/drivers/gpu/drm/nouveau/nvkm/engine/gr/Kbuild
index 50bd9830694f..73724a8cb861 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/Kbuild
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/Kbuild
@@ -1,4 +1,4 @@
-# SPDX-License-Identifier: GPL-2.0
+# SPDX-License-Identifier: MIT
nvkm-y += nvkm/engine/gr/base.o
nvkm-y += nvkm/engine/gr/nv04.o
nvkm-y += nvkm/engine/gr/nv10.o
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf100.h b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf100.h
index 33e932bd73b1..478b4723d0f9 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf100.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf100.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: GPL-2.0 */
+/* SPDX-License-Identifier: MIT */
#ifndef __NVKM_GRCTX_NVC0_H__
#define __NVKM_GRCTX_NVC0_H__
#include "gf100.h"
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxnv40.h b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxnv40.h
index 4d67d90261b8..7917567ade3a 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxnv40.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxnv40.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: GPL-2.0 */
+/* SPDX-License-Identifier: MIT */
#ifndef __NVKM_GRCTX_H__
#define __NVKM_GRCTX_H__
#include <core/gpuobj.h>
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpcgf100.fuc3.h b/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpcgf100.fuc3.h
index 0323acb739c8..54e14b4d31b3 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpcgf100.fuc3.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpcgf100.fuc3.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: GPL-2.0 */
+/* SPDX-License-Identifier: MIT */
static uint32_t gf100_grgpc_data[] = {
/* 0x0000: gpc_mmio_list_head */
0x00000064,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpcgf117.fuc3.h b/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpcgf117.fuc3.h
index 1bb265917915..67524e615d81 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpcgf117.fuc3.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpcgf117.fuc3.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: GPL-2.0 */
+/* SPDX-License-Identifier: MIT */
static uint32_t gf117_grgpc_data[] = {
/* 0x0000: gpc_mmio_list_head */
0x0000006c,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpcgk104.fuc3.h b/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpcgk104.fuc3.h
index cf8343a693ba..60c8b7e89913 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpcgk104.fuc3.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpcgk104.fuc3.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: GPL-2.0 */
+/* SPDX-License-Identifier: MIT */
static uint32_t gk104_grgpc_data[] = {
/* 0x0000: gpc_mmio_list_head */
0x0000006c,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpcgk110.fuc3.h b/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpcgk110.fuc3.h
index f4bfa109ed27..c99d1566554c 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpcgk110.fuc3.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpcgk110.fuc3.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: GPL-2.0 */
+/* SPDX-License-Identifier: MIT */
static uint32_t gk110_grgpc_data[] = {
/* 0x0000: gpc_mmio_list_head */
0x0000006c,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpcgk208.fuc5.h b/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpcgk208.fuc5.h
index 59a3e1b2927f..753aa66729bf 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpcgk208.fuc5.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpcgk208.fuc5.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: GPL-2.0 */
+/* SPDX-License-Identifier: MIT */
static uint32_t gk208_grgpc_data[] = {
/* 0x0000: gpc_mmio_list_head */
0x0000006c,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpcgm107.fuc5.h b/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpcgm107.fuc5.h
index 8daa0516704a..db8b294cee39 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpcgm107.fuc5.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpcgm107.fuc5.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: GPL-2.0 */
+/* SPDX-License-Identifier: MIT */
static uint32_t gm107_grgpc_data[] = {
/* 0x0000: gpc_mmio_list_head */
0x0000006c,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/hubgf100.fuc3.h b/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/hubgf100.fuc3.h
index cbf2351f8da8..56162f6a6a94 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/hubgf100.fuc3.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/hubgf100.fuc3.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: GPL-2.0 */
+/* SPDX-License-Identifier: MIT */
static uint32_t gf100_grhub_data[] = {
/* 0x0000: hub_mmio_list_head */
0x00000300,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/hubgf117.fuc3.h b/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/hubgf117.fuc3.h
index 70830036ffee..9b9f0d93f915 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/hubgf117.fuc3.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/hubgf117.fuc3.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: GPL-2.0 */
+/* SPDX-License-Identifier: MIT */
static uint32_t gf117_grhub_data[] = {
/* 0x0000: hub_mmio_list_head */
0x00000300,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/hubgk104.fuc3.h b/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/hubgk104.fuc3.h
index 7f2fd84d0c3a..fa11857b9d31 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/hubgk104.fuc3.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/hubgk104.fuc3.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: GPL-2.0 */
+/* SPDX-License-Identifier: MIT */
static uint32_t gk104_grhub_data[] = {
/* 0x0000: hub_mmio_list_head */
0x00000300,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/hubgk110.fuc3.h b/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/hubgk110.fuc3.h
index 560063789de8..1d741b30a04a 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/hubgk110.fuc3.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/hubgk110.fuc3.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: GPL-2.0 */
+/* SPDX-License-Identifier: MIT */
static uint32_t gk110_grhub_data[] = {
/* 0x0000: hub_mmio_list_head */
0x00000300,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/hubgk208.fuc5.h b/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/hubgk208.fuc5.h
index 71e85784b615..c24f35ad56a6 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/hubgk208.fuc5.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/hubgk208.fuc5.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: GPL-2.0 */
+/* SPDX-License-Identifier: MIT */
static uint32_t gk208_grhub_data[] = {
/* 0x0000: hub_mmio_list_head */
0x00000300,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/hubgm107.fuc5.h b/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/hubgm107.fuc5.h
index d85eac6d1c61..649a442b4390 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/hubgm107.fuc5.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/hubgm107.fuc5.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: GPL-2.0 */
+/* SPDX-License-Identifier: MIT */
static uint32_t gm107_grhub_data[] = {
/* 0x0000: hub_mmio_list_head */
0x00000300,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/os.h b/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/os.h
index f87693809c9f..6ac155b9663b 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/os.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/os.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: GPL-2.0 */
+/* SPDX-License-Identifier: MIT */
#ifndef __NVKM_GRAPH_OS_H__
#define __NVKM_GRAPH_OS_H__
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv10.h b/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv10.h
index d5a376c4dd0b..4327baea02af 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv10.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv10.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: GPL-2.0 */
+/* SPDX-License-Identifier: MIT */
#ifndef __NV10_GR_H__
#define __NV10_GR_H__
#include "priv.h"
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv20.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv20.c
index 111c8bb4497b..d837630a3625 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv20.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv20.c
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0
+// SPDX-License-Identifier: MIT
#include "nv20.h"
#include "regs.h"
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv20.h b/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv20.h
index 979dc5f7b32e..e57407a8a7c3 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv20.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv20.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: GPL-2.0 */
+/* SPDX-License-Identifier: MIT */
#ifndef __NV20_GR_H__
#define __NV20_GR_H__
#define nv20_gr(p) container_of((p), struct nv20_gr, base)
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv25.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv25.c
index e59a28a26d65..32d29d3faee0 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv25.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv25.c
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0
+// SPDX-License-Identifier: MIT
#include "nv20.h"
#include "regs.h"
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv2a.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv2a.c
index e113b2d4c811..f941062c66f0 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv2a.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv2a.c
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0
+// SPDX-License-Identifier: MIT
#include "nv20.h"
#include "regs.h"
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv30.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv30.c
index 4aac2c224874..785ec956df0f 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv30.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv30.c
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0
+// SPDX-License-Identifier: MIT
#include "nv20.h"
#include "regs.h"
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv34.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv34.c
index 301556503e93..bd610d75c677 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv34.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv34.c
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0
+// SPDX-License-Identifier: MIT
#include "nv20.h"
#include "regs.h"
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv35.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv35.c
index 5d6926611a5b..89db7f523037 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv35.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv35.c
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0
+// SPDX-License-Identifier: MIT
#include "nv20.h"
#include "regs.h"
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv40.h b/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv40.h
index 731400937edd..e6128791b2d2 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv40.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv40.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: GPL-2.0 */
+/* SPDX-License-Identifier: MIT */
#ifndef __NV40_GR_H__
#define __NV40_GR_H__
#define nv40_gr(p) container_of((p), struct nv40_gr, base)
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv50.h b/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv50.h
index 5b9d99bee207..465f4da0ddfc 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv50.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv50.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: GPL-2.0 */
+/* SPDX-License-Identifier: MIT */
#ifndef __NV50_GR_H__
#define __NV50_GR_H__
#define nv50_gr(p) container_of((p), struct nv50_gr, base)
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/priv.h b/drivers/gpu/drm/nouveau/nvkm/engine/gr/priv.h
index d4d5601c51e7..3b30f24032cc 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/priv.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/priv.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: GPL-2.0 */
+/* SPDX-License-Identifier: MIT */
#ifndef __NVKM_GR_PRIV_H__
#define __NVKM_GR_PRIV_H__
#define nvkm_gr(p) container_of((p), struct nvkm_gr, engine)
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/regs.h b/drivers/gpu/drm/nouveau/nvkm/engine/gr/regs.h
index dc4f936675ac..fd1b7d35c62b 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/regs.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/regs.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: GPL-2.0 */
+/* SPDX-License-Identifier: MIT */
#ifndef __NVKM_GR_REGS_H__
#define __NVKM_GR_REGS_H__
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/mpeg/Kbuild b/drivers/gpu/drm/nouveau/nvkm/engine/mpeg/Kbuild
index 651270137268..8d2d9eae5604 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/mpeg/Kbuild
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/mpeg/Kbuild
@@ -1,4 +1,4 @@
-# SPDX-License-Identifier: GPL-2.0
+# SPDX-License-Identifier: MIT
nvkm-y += nvkm/engine/mpeg/nv31.o
nvkm-y += nvkm/engine/mpeg/nv40.o
nvkm-y += nvkm/engine/mpeg/nv44.o
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/mpeg/nv31.h b/drivers/gpu/drm/nouveau/nvkm/engine/mpeg/nv31.h
index b31fad8bdaad..b3e131538858 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/mpeg/nv31.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/mpeg/nv31.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: GPL-2.0 */
+/* SPDX-License-Identifier: MIT */
#ifndef __NV31_MPEG_H__
#define __NV31_MPEG_H__
#define nv31_mpeg(p) container_of((p), struct nv31_mpeg, engine)
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/mpeg/priv.h b/drivers/gpu/drm/nouveau/nvkm/engine/mpeg/priv.h
index 26f9d14151e2..667a2d05dd89 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/mpeg/priv.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/mpeg/priv.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: GPL-2.0 */
+/* SPDX-License-Identifier: MIT */
#ifndef __NVKM_MPEG_PRIV_H__
#define __NVKM_MPEG_PRIV_H__
#include <engine/mpeg.h>
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/msenc/Kbuild b/drivers/gpu/drm/nouveau/nvkm/engine/msenc/Kbuild
index b808d9e9c964..4bf033ff4d41 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/msenc/Kbuild
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/msenc/Kbuild
@@ -1,2 +1,2 @@
-# SPDX-License-Identifier: GPL-2.0
+# SPDX-License-Identifier: MIT
#nvkm-y += nvkm/engine/msenc/base.o
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/mspdec/Kbuild b/drivers/gpu/drm/nouveau/nvkm/engine/mspdec/Kbuild
index df50010e5f2c..0cd957d72593 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/mspdec/Kbuild
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/mspdec/Kbuild
@@ -1,4 +1,4 @@
-# SPDX-License-Identifier: GPL-2.0
+# SPDX-License-Identifier: MIT
nvkm-y += nvkm/engine/mspdec/base.o
nvkm-y += nvkm/engine/mspdec/g98.o
nvkm-y += nvkm/engine/mspdec/gt215.o
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/mspdec/priv.h b/drivers/gpu/drm/nouveau/nvkm/engine/mspdec/priv.h
index db305072a82f..86445a2600d0 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/mspdec/priv.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/mspdec/priv.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: GPL-2.0 */
+/* SPDX-License-Identifier: MIT */
#ifndef __NVKM_MSPDEC_PRIV_H__
#define __NVKM_MSPDEC_PRIV_H__
#include <engine/mspdec.h>
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/msppp/Kbuild b/drivers/gpu/drm/nouveau/nvkm/engine/msppp/Kbuild
index 322e3470b2f1..788ce7255b61 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/msppp/Kbuild
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/msppp/Kbuild
@@ -1,4 +1,4 @@
-# SPDX-License-Identifier: GPL-2.0
+# SPDX-License-Identifier: MIT
nvkm-y += nvkm/engine/msppp/base.o
nvkm-y += nvkm/engine/msppp/g98.o
nvkm-y += nvkm/engine/msppp/gt215.o
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/msppp/priv.h b/drivers/gpu/drm/nouveau/nvkm/engine/msppp/priv.h
index 7708e52c9043..f20b10915db2 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/msppp/priv.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/msppp/priv.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: GPL-2.0 */
+/* SPDX-License-Identifier: MIT */
#ifndef __NVKM_MSPPP_PRIV_H__
#define __NVKM_MSPPP_PRIV_H__
#include <engine/msppp.h>
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/msvld/Kbuild b/drivers/gpu/drm/nouveau/nvkm/engine/msvld/Kbuild
index beddd82f5755..d68b4431cd80 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/msvld/Kbuild
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/msvld/Kbuild
@@ -1,4 +1,4 @@
-# SPDX-License-Identifier: GPL-2.0
+# SPDX-License-Identifier: MIT
nvkm-y += nvkm/engine/msvld/base.o
nvkm-y += nvkm/engine/msvld/g98.o
nvkm-y += nvkm/engine/msvld/gt215.o
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/msvld/priv.h b/drivers/gpu/drm/nouveau/nvkm/engine/msvld/priv.h
index 66c36049abca..5cd1e83badbb 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/msvld/priv.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/msvld/priv.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: GPL-2.0 */
+/* SPDX-License-Identifier: MIT */
#ifndef __NVKM_MSVLD_PRIV_H__
#define __NVKM_MSVLD_PRIV_H__
#include <engine/msvld.h>
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/nvdec/Kbuild b/drivers/gpu/drm/nouveau/nvkm/engine/nvdec/Kbuild
index 29d7ddb56f0e..cdf631822282 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/nvdec/Kbuild
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/nvdec/Kbuild
@@ -1,3 +1,3 @@
-# SPDX-License-Identifier: GPL-2.0
+# SPDX-License-Identifier: MIT
nvkm-y += nvkm/engine/nvdec/base.o
nvkm-y += nvkm/engine/nvdec/gp102.o
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/nvdec/priv.h b/drivers/gpu/drm/nouveau/nvkm/engine/nvdec/priv.h
index 6c300739f621..57bfa3aa1835 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/nvdec/priv.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/nvdec/priv.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: GPL-2.0 */
+/* SPDX-License-Identifier: MIT */
#ifndef __NVKM_NVDEC_PRIV_H__
#define __NVKM_NVDEC_PRIV_H__
#include <engine/nvdec.h>
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/nvenc/Kbuild b/drivers/gpu/drm/nouveau/nvkm/engine/nvenc/Kbuild
index 85725b11200b..f316de8d45a8 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/nvenc/Kbuild
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/nvenc/Kbuild
@@ -1,2 +1,2 @@
-# SPDX-License-Identifier: GPL-2.0
+# SPDX-License-Identifier: MIT
#nvkm-y += nvkm/engine/nvenc/base.o
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/pm/Kbuild b/drivers/gpu/drm/nouveau/nvkm/engine/pm/Kbuild
index ceb7302e292f..2cc8a5f6fe0c 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/pm/Kbuild
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/pm/Kbuild
@@ -1,4 +1,4 @@
-# SPDX-License-Identifier: GPL-2.0
+# SPDX-License-Identifier: MIT
nvkm-y += nvkm/engine/pm/base.o
nvkm-y += nvkm/engine/pm/nv40.o
nvkm-y += nvkm/engine/pm/nv50.o
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/pm/gf100.h b/drivers/gpu/drm/nouveau/nvkm/engine/pm/gf100.h
index c74fd4557d41..461bb219b1c0 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/pm/gf100.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/pm/gf100.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: GPL-2.0 */
+/* SPDX-License-Identifier: MIT */
#ifndef __NVKM_PM_NVC0_H__
#define __NVKM_PM_NVC0_H__
#include "priv.h"
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/pm/nv40.h b/drivers/gpu/drm/nouveau/nvkm/engine/pm/nv40.h
index 3f37b713936c..8ed19320fda1 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/pm/nv40.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/pm/nv40.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: GPL-2.0 */
+/* SPDX-License-Identifier: MIT */
#ifndef __NVKM_PM_NV40_H__
#define __NVKM_PM_NV40_H__
#define nv40_pm(p) container_of((p), struct nv40_pm, base)
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/pm/priv.h b/drivers/gpu/drm/nouveau/nvkm/engine/pm/priv.h
index 9fad3611a843..cd6f8f79b235 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/pm/priv.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/pm/priv.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: GPL-2.0 */
+/* SPDX-License-Identifier: MIT */
#ifndef __NVKM_PM_PRIV_H__
#define __NVKM_PM_PRIV_H__
#define nvkm_pm(p) container_of((p), struct nvkm_pm, engine)
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/sec/Kbuild b/drivers/gpu/drm/nouveau/nvkm/engine/sec/Kbuild
index f72ee558f8e8..b6e02ceb1c5a 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/sec/Kbuild
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/sec/Kbuild
@@ -1,2 +1,2 @@
-# SPDX-License-Identifier: GPL-2.0
+# SPDX-License-Identifier: MIT
nvkm-y += nvkm/engine/sec/g98.o
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/sec/fuc/g98.fuc0s.h b/drivers/gpu/drm/nouveau/nvkm/engine/sec/fuc/g98.fuc0s.h
index 6278a0c5fe83..fe90f2e05853 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/sec/fuc/g98.fuc0s.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/sec/fuc/g98.fuc0s.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: GPL-2.0 */
+/* SPDX-License-Identifier: MIT */
static uint32_t g98_sec_data[] = {
/* 0x0000: ctx_dma */
/* 0x0000: ctx_dma_query */
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/sec2/Kbuild b/drivers/gpu/drm/nouveau/nvkm/engine/sec2/Kbuild
index 9a2f4f669291..97c4696171f0 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/sec2/Kbuild
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/sec2/Kbuild
@@ -1,4 +1,4 @@
-# SPDX-License-Identifier: GPL-2.0
+# SPDX-License-Identifier: MIT
nvkm-y += nvkm/engine/sec2/base.o
nvkm-y += nvkm/engine/sec2/gp102.o
nvkm-y += nvkm/engine/sec2/tu102.o
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/sec2/priv.h b/drivers/gpu/drm/nouveau/nvkm/engine/sec2/priv.h
index ab0165e2d1a3..b331b00517e6 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/sec2/priv.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/sec2/priv.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: GPL-2.0 */
+/* SPDX-License-Identifier: MIT */
#ifndef __NVKM_SEC2_PRIV_H__
#define __NVKM_SEC2_PRIV_H__
#include <engine/sec2.h>
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/sw/Kbuild b/drivers/gpu/drm/nouveau/nvkm/engine/sw/Kbuild
index 91cf08084bc1..94fe25964f51 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/sw/Kbuild
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/sw/Kbuild
@@ -1,4 +1,4 @@
-# SPDX-License-Identifier: GPL-2.0
+# SPDX-License-Identifier: MIT
nvkm-y += nvkm/engine/sw/base.o
nvkm-y += nvkm/engine/sw/nv04.o
nvkm-y += nvkm/engine/sw/nv10.o
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/sw/chan.h b/drivers/gpu/drm/nouveau/nvkm/engine/sw/chan.h
index d42862fc43fd..32de53427aa4 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/sw/chan.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/sw/chan.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: GPL-2.0 */
+/* SPDX-License-Identifier: MIT */
#ifndef __NVKM_SW_CHAN_H__
#define __NVKM_SW_CHAN_H__
#define nvkm_sw_chan(p) container_of((p), struct nvkm_sw_chan, object)
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/sw/nv50.h b/drivers/gpu/drm/nouveau/nvkm/engine/sw/nv50.h
index 459afd30a484..6d364d7b406a 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/sw/nv50.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/sw/nv50.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: GPL-2.0 */
+/* SPDX-License-Identifier: MIT */
#ifndef __NVKM_SW_NV50_H__
#define __NVKM_SW_NV50_H__
#define nv50_sw_chan(p) container_of((p), struct nv50_sw_chan, base)
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/sw/nvsw.h b/drivers/gpu/drm/nouveau/nvkm/engine/sw/nvsw.h
index d7034950ba87..d2f846499b92 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/sw/nvsw.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/sw/nvsw.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: GPL-2.0 */
+/* SPDX-License-Identifier: MIT */
#ifndef __NVKM_NVSW_H__
#define __NVKM_NVSW_H__
#define nvkm_nvsw(p) container_of((p), struct nvkm_nvsw, object)
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/sw/priv.h b/drivers/gpu/drm/nouveau/nvkm/engine/sw/priv.h
index 4aca1791abc3..6d18fc6180f2 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/sw/priv.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/sw/priv.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: GPL-2.0 */
+/* SPDX-License-Identifier: MIT */
#ifndef __NVKM_SW_PRIV_H__
#define __NVKM_SW_PRIV_H__
#define nvkm_sw(p) container_of((p), struct nvkm_sw, engine)
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/vic/Kbuild b/drivers/gpu/drm/nouveau/nvkm/engine/vic/Kbuild
index 9281c82ea99c..70164f482cc5 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/vic/Kbuild
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/vic/Kbuild
@@ -1,2 +1,2 @@
-# SPDX-License-Identifier: GPL-2.0
+# SPDX-License-Identifier: MIT
#nvkm-y += nvkm/engine/vic/base.o
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/vp/Kbuild b/drivers/gpu/drm/nouveau/nvkm/engine/vp/Kbuild
index 456e43fd1f6c..d48ea0fd1d27 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/vp/Kbuild
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/vp/Kbuild
@@ -1,2 +1,2 @@
-# SPDX-License-Identifier: GPL-2.0
+# SPDX-License-Identifier: MIT
nvkm-y += nvkm/engine/vp/g84.o
diff --git a/drivers/gpu/drm/nouveau/nvkm/falcon/Kbuild b/drivers/gpu/drm/nouveau/nvkm/falcon/Kbuild
index 8afbf0f9bc86..b5665ada850a 100644
--- a/drivers/gpu/drm/nouveau/nvkm/falcon/Kbuild
+++ b/drivers/gpu/drm/nouveau/nvkm/falcon/Kbuild
@@ -1,4 +1,4 @@
-# SPDX-License-Identifier: GPL-2.0
+# SPDX-License-Identifier: MIT
nvkm-y += nvkm/falcon/base.o
nvkm-y += nvkm/falcon/v1.o
nvkm-y += nvkm/falcon/msgqueue.o
diff --git a/drivers/gpu/drm/nouveau/nvkm/falcon/priv.h b/drivers/gpu/drm/nouveau/nvkm/falcon/priv.h
index d515ad994199..900fe1d37b4d 100644
--- a/drivers/gpu/drm/nouveau/nvkm/falcon/priv.h
+++ b/drivers/gpu/drm/nouveau/nvkm/falcon/priv.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: GPL-2.0 */
+/* SPDX-License-Identifier: MIT */
#ifndef __NVKM_FALCON_PRIV_H__
#define __NVKM_FALCON_PRIV_H__
#include <engine/falcon.h>
diff --git a/drivers/gpu/drm/nouveau/nvkm/falcon/v1.c b/drivers/gpu/drm/nouveau/nvkm/falcon/v1.c
index 9def926f24d4..6d978feebbd7 100644
--- a/drivers/gpu/drm/nouveau/nvkm/falcon/v1.c
+++ b/drivers/gpu/drm/nouveau/nvkm/falcon/v1.c
@@ -182,6 +182,7 @@ nvkm_falcon_v1_read_dmem(struct nvkm_falcon *falcon, u32 start, u32 size,
static void
nvkm_falcon_v1_bind_context(struct nvkm_falcon *falcon, struct nvkm_memory *ctx)
{
+ struct nvkm_device *device = falcon->owner->device;
u32 inst_loc;
u32 fbif;
@@ -233,6 +234,41 @@ nvkm_falcon_v1_bind_context(struct nvkm_falcon *falcon, struct nvkm_memory *ctx)
nvkm_falcon_mask(falcon, 0x090, 0x10000, 0x10000);
nvkm_falcon_mask(falcon, 0x0a4, 0x8, 0x8);
+
+ /* Not sure if this is a WAR for a HW issue, or some additional
+ * programming sequence that's needed to properly complete the
+ * context switch we trigger above.
+ *
+ * Fixes unreliability of booting the SEC2 RTOS on Quadro P620,
+ * particularly when resuming from suspend.
+ *
+ * Also removes the need for an odd workaround where we needed
+ * to program SEC2's FALCON_CPUCTL_ALIAS_STARTCPU twice before
+ * the SEC2 RTOS would begin executing.
+ */
+ switch (falcon->owner->index) {
+ case NVKM_SUBDEV_GSP:
+ case NVKM_ENGINE_SEC2:
+ nvkm_msec(device, 10,
+ u32 irqstat = nvkm_falcon_rd32(falcon, 0x008);
+ u32 flcn0dc = nvkm_falcon_rd32(falcon, 0x0dc);
+ if ((irqstat & 0x00000008) &&
+ (flcn0dc & 0x00007000) == 0x00005000)
+ break;
+ );
+
+ nvkm_falcon_mask(falcon, 0x004, 0x00000008, 0x00000008);
+ nvkm_falcon_mask(falcon, 0x058, 0x00000002, 0x00000002);
+
+ nvkm_msec(device, 10,
+ u32 flcn0dc = nvkm_falcon_rd32(falcon, 0x0dc);
+ if ((flcn0dc & 0x00007000) == 0x00000000)
+ break;
+ );
+ break;
+ default:
+ break;
+ }
}
static void
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/Kbuild b/drivers/gpu/drm/nouveau/nvkm/subdev/Kbuild
index d8c287173f4c..4e136f3d7c28 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/Kbuild
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/Kbuild
@@ -1,4 +1,4 @@
-# SPDX-License-Identifier: GPL-2.0
+# SPDX-License-Identifier: MIT
include $(src)/nvkm/subdev/bar/Kbuild
include $(src)/nvkm/subdev/bios/Kbuild
include $(src)/nvkm/subdev/bus/Kbuild
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bar/Kbuild b/drivers/gpu/drm/nouveau/nvkm/subdev/bar/Kbuild
index 8210bf9c52a5..8faee3317a74 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bar/Kbuild
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bar/Kbuild
@@ -1,4 +1,4 @@
-# SPDX-License-Identifier: GPL-2.0
+# SPDX-License-Identifier: MIT
nvkm-y += nvkm/subdev/bar/base.o
nvkm-y += nvkm/subdev/bar/nv50.o
nvkm-y += nvkm/subdev/bar/g84.o
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bar/gf100.h b/drivers/gpu/drm/nouveau/nvkm/subdev/bar/gf100.h
index 4f2b66e8d795..4ae4c7145712 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bar/gf100.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bar/gf100.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: GPL-2.0 */
+/* SPDX-License-Identifier: MIT */
#ifndef __GF100_BAR_H__
#define __GF100_BAR_H__
#define gf100_bar(p) container_of((p), struct gf100_bar, base)
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bar/nv50.h b/drivers/gpu/drm/nouveau/nvkm/subdev/bar/nv50.h
index 2fe833f6d9f7..e4193deb2e51 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bar/nv50.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bar/nv50.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: GPL-2.0 */
+/* SPDX-License-Identifier: MIT */
#ifndef __NV50_BAR_H__
#define __NV50_BAR_H__
#define nv50_bar(p) container_of((p), struct nv50_bar, base)
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bar/priv.h b/drivers/gpu/drm/nouveau/nvkm/subdev/bar/priv.h
index 01ba5b26666e..869ad184f923 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bar/priv.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bar/priv.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: GPL-2.0 */
+/* SPDX-License-Identifier: MIT */
#ifndef __NVKM_BAR_PRIV_H__
#define __NVKM_BAR_PRIV_H__
#define nvkm_bar(p) container_of((p), struct nvkm_bar, subdev)
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/Kbuild b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/Kbuild
index bb4759cc38a6..5a970fb8f7ee 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/Kbuild
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/Kbuild
@@ -1,4 +1,4 @@
-# SPDX-License-Identifier: GPL-2.0
+# SPDX-License-Identifier: MIT
nvkm-y += nvkm/subdev/bios/base.o
nvkm-y += nvkm/subdev/bios/bit.o
nvkm-y += nvkm/subdev/bios/boost.o
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/priv.h b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/priv.h
index 33435ca16311..fac1bff1311b 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/priv.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/priv.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: GPL-2.0 */
+/* SPDX-License-Identifier: MIT */
#ifndef __NVKM_BIOS_PRIV_H__
#define __NVKM_BIOS_PRIV_H__
#define nvkm_bios(p) container_of((p), struct nvkm_bios, subdev)
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bus/Kbuild b/drivers/gpu/drm/nouveau/nvkm/subdev/bus/Kbuild
index 409137fbdddf..01d737989d00 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bus/Kbuild
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bus/Kbuild
@@ -1,4 +1,4 @@
-# SPDX-License-Identifier: GPL-2.0
+# SPDX-License-Identifier: MIT
nvkm-y += nvkm/subdev/bus/base.o
nvkm-y += nvkm/subdev/bus/hwsq.o
nvkm-y += nvkm/subdev/bus/nv04.o
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bus/hwsq.h b/drivers/gpu/drm/nouveau/nvkm/subdev/bus/hwsq.h
index 17ac1812a928..217a0a4a3bc5 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bus/hwsq.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bus/hwsq.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: GPL-2.0 */
+/* SPDX-License-Identifier: MIT */
#ifndef __NVKM_BUS_HWSQ_H__
#define __NVKM_BUS_HWSQ_H__
#include <subdev/bus.h>
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bus/priv.h b/drivers/gpu/drm/nouveau/nvkm/subdev/bus/priv.h
index ef01e569352d..76f7ba1c6494 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bus/priv.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bus/priv.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: GPL-2.0 */
+/* SPDX-License-Identifier: MIT */
#ifndef __NVKM_BUS_PRIV_H__
#define __NVKM_BUS_PRIV_H__
#define nvkm_bus(p) container_of((p), struct nvkm_bus, subdev)
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/clk/Kbuild b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/Kbuild
index 0a8a7072bcbc..dcecd499d8df 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/clk/Kbuild
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/Kbuild
@@ -1,4 +1,4 @@
-# SPDX-License-Identifier: GPL-2.0
+# SPDX-License-Identifier: MIT
nvkm-y += nvkm/subdev/clk/base.o
nvkm-y += nvkm/subdev/clk/nv04.o
nvkm-y += nvkm/subdev/clk/nv40.o
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/clk/gt215.h b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/gt215.h
index 1ea886a4301f..34754efbfb1e 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/clk/gt215.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/gt215.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: GPL-2.0 */
+/* SPDX-License-Identifier: MIT */
#ifndef __NVKM_CLK_NVA3_H__
#define __NVKM_CLK_NVA3_H__
#include "priv.h"
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/clk/nv50.h b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/nv50.h
index f134d979d884..7c7713238ec4 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/clk/nv50.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/nv50.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: GPL-2.0 */
+/* SPDX-License-Identifier: MIT */
#ifndef __NV50_CLK_H__
#define __NV50_CLK_H__
#define nv50_clk(p) container_of((p), struct nv50_clk, base)
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/clk/pll.h b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/pll.h
index 9a39f1fd2976..631907564e71 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/clk/pll.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/pll.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: GPL-2.0 */
+/* SPDX-License-Identifier: MIT */
#ifndef __NVKM_PLL_H__
#define __NVKM_PLL_H__
#include <core/os.h>
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/clk/priv.h b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/priv.h
index b656177923fb..81dfb37480ae 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/clk/priv.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/priv.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: GPL-2.0 */
+/* SPDX-License-Identifier: MIT */
#ifndef __NVKM_CLK_PRIV_H__
#define __NVKM_CLK_PRIV_H__
#define nvkm_clk(p) container_of((p), struct nvkm_clk, subdev)
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/clk/seq.h b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/seq.h
index d0715fe84328..e4b362d3449b 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/clk/seq.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/seq.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: GPL-2.0 */
+/* SPDX-License-Identifier: MIT */
#ifndef __NVKM_CLK_SEQ_H__
#define __NVKM_CLK_SEQ_H__
#include <subdev/bus/hwsq.h>
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/Kbuild b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/Kbuild
index f054c44acab2..b3429371ed82 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/Kbuild
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/Kbuild
@@ -1,4 +1,4 @@
-# SPDX-License-Identifier: GPL-2.0
+# SPDX-License-Identifier: MIT
nvkm-y += nvkm/subdev/devinit/base.o
nvkm-y += nvkm/subdev/devinit/nv04.o
nvkm-y += nvkm/subdev/devinit/nv05.o
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/nv04.h b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/nv04.h
index b18e49847eee..15b029ddf6df 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/nv04.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/nv04.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: GPL-2.0 */
+/* SPDX-License-Identifier: MIT */
#ifndef __NV04_DEVINIT_H__
#define __NV04_DEVINIT_H__
#define nv04_devinit(p) container_of((p), struct nv04_devinit, base)
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/nv50.h b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/nv50.h
index 72d130bb7f7c..e8d37a6145a2 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/nv50.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/nv50.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: GPL-2.0 */
+/* SPDX-License-Identifier: MIT */
#ifndef __NV50_DEVINIT_H__
#define __NV50_DEVINIT_H__
#define nv50_devinit(p) container_of((p), struct nv50_devinit, base)
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/priv.h b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/priv.h
index 5b3097a586dd..94723352137a 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/priv.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/priv.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: GPL-2.0 */
+/* SPDX-License-Identifier: MIT */
#ifndef __NVKM_DEVINIT_PRIV_H__
#define __NVKM_DEVINIT_PRIV_H__
#define nvkm_devinit(p) container_of((p), struct nvkm_devinit, subdev)
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fault/Kbuild b/drivers/gpu/drm/nouveau/nvkm/subdev/fault/Kbuild
index c9bcf3744e5c..53b9d638f2c8 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fault/Kbuild
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fault/Kbuild
@@ -1,4 +1,4 @@
-# SPDX-License-Identifier: GPL-2.0
+# SPDX-License-Identifier: MIT
nvkm-y += nvkm/subdev/fault/base.o
nvkm-y += nvkm/subdev/fault/user.o
nvkm-y += nvkm/subdev/fault/gp100.o
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/Kbuild b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/Kbuild
index 88b1668929ba..43a42159a3d0 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/Kbuild
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/Kbuild
@@ -1,4 +1,4 @@
-# SPDX-License-Identifier: GPL-2.0
+# SPDX-License-Identifier: MIT
nvkm-y += nvkm/subdev/fb/base.o
nvkm-y += nvkm/subdev/fb/nv04.o
nvkm-y += nvkm/subdev/fb/nv10.o
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gf100.h b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gf100.h
index ab261310753a..2ed7cdaab37c 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gf100.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gf100.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: GPL-2.0 */
+/* SPDX-License-Identifier: MIT */
#ifndef __NVKM_RAM_NVC0_H__
#define __NVKM_RAM_NVC0_H__
#define gf100_fb(p) container_of((p), struct gf100_fb, base)
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv50.h b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv50.h
index dacc696387b6..5e2b0c9539ed 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv50.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv50.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: GPL-2.0 */
+/* SPDX-License-Identifier: MIT */
#ifndef __NVKM_FB_NV50_H__
#define __NVKM_FB_NV50_H__
#define nv50_fb(p) container_of((p), struct nv50_fb, base)
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/priv.h b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/priv.h
index 1e4ad61c19e1..c4e9f55af283 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/priv.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/priv.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: GPL-2.0 */
+/* SPDX-License-Identifier: MIT */
#ifndef __NVKM_FB_PRIV_H__
#define __NVKM_FB_PRIV_H__
#define nvkm_fb(p) container_of((p), struct nvkm_fb, subdev)
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ram.h b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ram.h
index 330132e95b6f..d723a9b4e3c4 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ram.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ram.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: GPL-2.0 */
+/* SPDX-License-Identifier: MIT */
#ifndef __NVKM_FB_RAM_PRIV_H__
#define __NVKM_FB_RAM_PRIV_H__
#include "priv.h"
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramfuc.h b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramfuc.h
index a65fa5586af8..247c0f8a723b 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramfuc.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramfuc.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: GPL-2.0 */
+/* SPDX-License-Identifier: MIT */
#ifndef __NVKM_FBRAM_FUC_H__
#define __NVKM_FBRAM_FUC_H__
#include <subdev/fb.h>
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramnv40.h b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramnv40.h
index 11f6bb2936b9..a87de0871dfc 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramnv40.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramnv40.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: GPL-2.0 */
+/* SPDX-License-Identifier: MIT */
#ifndef __NV40_FB_RAM_H__
#define __NV40_FB_RAM_H__
#define nv40_ram(p) container_of((p), struct nv40_ram, base)
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramseq.h b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramseq.h
index d8f5053e8e2a..aba5b73781d3 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramseq.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramseq.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: GPL-2.0 */
+/* SPDX-License-Identifier: MIT */
#ifndef __NVKM_FBRAM_SEQ_H__
#define __NVKM_FBRAM_SEQ_H__
#include <subdev/bus/hwsq.h>
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/regsnv04.h b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/regsnv04.h
index ad26fcbe9e06..8098cd77dfdd 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/regsnv04.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/regsnv04.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: GPL-2.0 */
+/* SPDX-License-Identifier: MIT */
#ifndef __NVKM_FB_REGS_04_H__
#define __NVKM_FB_REGS_04_H__
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fuse/Kbuild b/drivers/gpu/drm/nouveau/nvkm/subdev/fuse/Kbuild
index 9626715768c8..8e7cd9d27f23 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fuse/Kbuild
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fuse/Kbuild
@@ -1,4 +1,4 @@
-# SPDX-License-Identifier: GPL-2.0
+# SPDX-License-Identifier: MIT
nvkm-y += nvkm/subdev/fuse/base.o
nvkm-y += nvkm/subdev/fuse/nv50.o
nvkm-y += nvkm/subdev/fuse/gf100.o
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fuse/priv.h b/drivers/gpu/drm/nouveau/nvkm/subdev/fuse/priv.h
index 3a5595a9e457..2edc612408dd 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fuse/priv.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fuse/priv.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: GPL-2.0 */
+/* SPDX-License-Identifier: MIT */
#ifndef __NVKM_FUSE_PRIV_H__
#define __NVKM_FUSE_PRIV_H__
#define nvkm_fuse(p) container_of((p), struct nvkm_fuse, subdev)
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gpio/Kbuild b/drivers/gpu/drm/nouveau/nvkm/subdev/gpio/Kbuild
index 0169fc30a2f9..b2ad5922a1c2 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/gpio/Kbuild
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gpio/Kbuild
@@ -1,4 +1,4 @@
-# SPDX-License-Identifier: GPL-2.0
+# SPDX-License-Identifier: MIT
nvkm-y += nvkm/subdev/gpio/base.o
nvkm-y += nvkm/subdev/gpio/nv10.o
nvkm-y += nvkm/subdev/gpio/nv50.o
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gpio/priv.h b/drivers/gpu/drm/nouveau/nvkm/subdev/gpio/priv.h
index 9759f13447bf..59e39affe2a0 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/gpio/priv.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gpio/priv.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: GPL-2.0 */
+/* SPDX-License-Identifier: MIT */
#ifndef __NVKM_GPIO_PRIV_H__
#define __NVKM_GPIO_PRIV_H__
#define nvkm_gpio(p) container_of((p), struct nvkm_gpio, subdev)
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/Kbuild b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/Kbuild
index fa566ea6cb95..e7c4f068936e 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/Kbuild
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/Kbuild
@@ -1,2 +1,2 @@
-# SPDX-License-Identifier: GPL-2.0
+# SPDX-License-Identifier: MIT
nvkm-y += nvkm/subdev/gsp/gv100.o
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/Kbuild b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/Kbuild
index 69f341e11d70..723d0284caef 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/Kbuild
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/Kbuild
@@ -1,4 +1,4 @@
-# SPDX-License-Identifier: GPL-2.0
+# SPDX-License-Identifier: MIT
nvkm-y += nvkm/subdev/i2c/base.o
nvkm-y += nvkm/subdev/i2c/nv04.o
nvkm-y += nvkm/subdev/i2c/nv4e.o
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/aux.h b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/aux.h
index 08f6b2ee64ab..30b48896965e 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/aux.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/aux.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: GPL-2.0 */
+/* SPDX-License-Identifier: MIT */
#ifndef __NVKM_I2C_AUX_H__
#define __NVKM_I2C_AUX_H__
#include "pad.h"
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/base.c
index ecacb22834d7..719345074711 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/base.c
@@ -185,6 +185,25 @@ nvkm_i2c_fini(struct nvkm_subdev *subdev, bool suspend)
}
static int
+nvkm_i2c_preinit(struct nvkm_subdev *subdev)
+{
+ struct nvkm_i2c *i2c = nvkm_i2c(subdev);
+ struct nvkm_i2c_bus *bus;
+ struct nvkm_i2c_pad *pad;
+
+ /*
+ * We init our i2c busses as early as possible, since they may be
+ * needed by the vbios init scripts on some cards
+ */
+ list_for_each_entry(pad, &i2c->pad, head)
+ nvkm_i2c_pad_init(pad);
+ list_for_each_entry(bus, &i2c->bus, head)
+ nvkm_i2c_bus_init(bus);
+
+ return 0;
+}
+
+static int
nvkm_i2c_init(struct nvkm_subdev *subdev)
{
struct nvkm_i2c *i2c = nvkm_i2c(subdev);
@@ -238,6 +257,7 @@ nvkm_i2c_dtor(struct nvkm_subdev *subdev)
static const struct nvkm_subdev_func
nvkm_i2c = {
.dtor = nvkm_i2c_dtor,
+ .preinit = nvkm_i2c_preinit,
.init = nvkm_i2c_init,
.fini = nvkm_i2c_fini,
.intr = nvkm_i2c_intr,
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/bus.h b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/bus.h
index 465464bba58b..4c236ab34929 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/bus.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/bus.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: GPL-2.0 */
+/* SPDX-License-Identifier: MIT */
#ifndef __NVKM_I2C_BUS_H__
#define __NVKM_I2C_BUS_H__
#include "pad.h"
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/pad.h b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/pad.h
index 33f0c809e583..461016814f4f 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/pad.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/pad.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: GPL-2.0 */
+/* SPDX-License-Identifier: MIT */
#ifndef __NVKM_I2C_PAD_H__
#define __NVKM_I2C_PAD_H__
#include <subdev/i2c.h>
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/priv.h b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/priv.h
index f476a69b6cb7..bd86bc298ebe 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/priv.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/priv.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: GPL-2.0 */
+/* SPDX-License-Identifier: MIT */
#ifndef __NVKM_I2C_PRIV_H__
#define __NVKM_I2C_PRIV_H__
#define nvkm_i2c(p) container_of((p), struct nvkm_i2c, subdev)
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/ibus/Kbuild b/drivers/gpu/drm/nouveau/nvkm/subdev/ibus/Kbuild
index 557530355064..127efb51f67d 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/ibus/Kbuild
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/ibus/Kbuild
@@ -1,4 +1,4 @@
-# SPDX-License-Identifier: GPL-2.0
+# SPDX-License-Identifier: MIT
nvkm-y += nvkm/subdev/ibus/gf100.o
nvkm-y += nvkm/subdev/ibus/gf117.o
nvkm-y += nvkm/subdev/ibus/gk104.o
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/ibus/priv.h b/drivers/gpu/drm/nouveau/nvkm/subdev/ibus/priv.h
index 504a6d37ec50..302d69e384d8 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/ibus/priv.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/ibus/priv.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: GPL-2.0 */
+/* SPDX-License-Identifier: MIT */
#ifndef __NVKM_IBUS_PRIV_H__
#define __NVKM_IBUS_PRIV_H__
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/iccsense/Kbuild b/drivers/gpu/drm/nouveau/nvkm/subdev/iccsense/Kbuild
index 52eb0746c750..6634bcdc5eeb 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/iccsense/Kbuild
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/iccsense/Kbuild
@@ -1,3 +1,3 @@
-# SPDX-License-Identifier: GPL-2.0
+# SPDX-License-Identifier: MIT
nvkm-y += nvkm/subdev/iccsense/base.o
nvkm-y += nvkm/subdev/iccsense/gf100.o
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/iccsense/priv.h b/drivers/gpu/drm/nouveau/nvkm/subdev/iccsense/priv.h
index bd599b8252ca..cc09c6c504af 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/iccsense/priv.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/iccsense/priv.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: GPL-2.0 */
+/* SPDX-License-Identifier: MIT */
#ifndef __NVKM_ICCSENSE_PRIV_H__
#define __NVKM_ICCSENSE_PRIV_H__
#define nvkm_iccsense(p) container_of((p), struct nvkm_iccsense, subdev)
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/Kbuild b/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/Kbuild
index e0031b5c06b1..06cbe19ce376 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/Kbuild
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/Kbuild
@@ -1,4 +1,4 @@
-# SPDX-License-Identifier: GPL-2.0
+# SPDX-License-Identifier: MIT
nvkm-y += nvkm/subdev/instmem/base.o
nvkm-y += nvkm/subdev/instmem/nv04.o
nvkm-y += nvkm/subdev/instmem/nv40.o
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/priv.h b/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/priv.h
index b9e4751b9921..f5da8fcbdde3 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/priv.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/priv.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: GPL-2.0 */
+/* SPDX-License-Identifier: MIT */
#ifndef __NVKM_INSTMEM_PRIV_H__
#define __NVKM_INSTMEM_PRIV_H__
#define nvkm_instmem(p) container_of((p), struct nvkm_instmem, subdev)
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/Kbuild b/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/Kbuild
index 61f655c2de0c..2b6d36ea7067 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/Kbuild
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/Kbuild
@@ -1,4 +1,4 @@
-# SPDX-License-Identifier: GPL-2.0
+# SPDX-License-Identifier: MIT
nvkm-y += nvkm/subdev/ltc/base.o
nvkm-y += nvkm/subdev/ltc/gf100.o
nvkm-y += nvkm/subdev/ltc/gk104.o
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/priv.h b/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/priv.h
index 9dcde43c0f3c..2fcf18e46ce3 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/priv.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/priv.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: GPL-2.0 */
+/* SPDX-License-Identifier: MIT */
#ifndef __NVKM_LTC_PRIV_H__
#define __NVKM_LTC_PRIV_H__
#define nvkm_ltc(p) container_of((p), struct nvkm_ltc, subdev)
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mc/Kbuild b/drivers/gpu/drm/nouveau/nvkm/subdev/mc/Kbuild
index 15da199d2fca..2585ef07532a 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mc/Kbuild
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mc/Kbuild
@@ -1,4 +1,4 @@
-# SPDX-License-Identifier: GPL-2.0
+# SPDX-License-Identifier: MIT
nvkm-y += nvkm/subdev/mc/base.o
nvkm-y += nvkm/subdev/mc/nv04.o
nvkm-y += nvkm/subdev/mc/nv11.o
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mc/priv.h b/drivers/gpu/drm/nouveau/nvkm/subdev/mc/priv.h
index eb91a4cf452b..4aab753a6040 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mc/priv.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mc/priv.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: GPL-2.0 */
+/* SPDX-License-Identifier: MIT */
#ifndef __NVKM_MC_PRIV_H__
#define __NVKM_MC_PRIV_H__
#define nvkm_mc(p) container_of((p), struct nvkm_mc, subdev)
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/Kbuild b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/Kbuild
index 697dc22c937c..a602b0cb5b31 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/Kbuild
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/Kbuild
@@ -1,4 +1,4 @@
-# SPDX-License-Identifier: GPL-2.0
+# SPDX-License-Identifier: MIT
nvkm-y += nvkm/subdev/mmu/base.o
nvkm-y += nvkm/subdev/mmu/nv04.o
nvkm-y += nvkm/subdev/mmu/nv41.o
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/priv.h b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/priv.h
index 2ad1102a4e31..07f2fcd18f3d 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/priv.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/priv.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: GPL-2.0 */
+/* SPDX-License-Identifier: MIT */
#ifndef __NVKM_MMU_PRIV_H__
#define __NVKM_MMU_PRIV_H__
#define nvkm_mmu(p) container_of((p), struct nvkm_mmu, subdev)
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mxm/Kbuild b/drivers/gpu/drm/nouveau/nvkm/subdev/mxm/Kbuild
index 7a549386e675..5124a0c41367 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mxm/Kbuild
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mxm/Kbuild
@@ -1,4 +1,4 @@
-# SPDX-License-Identifier: GPL-2.0
+# SPDX-License-Identifier: MIT
nvkm-y += nvkm/subdev/mxm/base.o
nvkm-y += nvkm/subdev/mxm/mxms.o
nvkm-y += nvkm/subdev/mxm/nv50.o
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mxm/mxms.h b/drivers/gpu/drm/nouveau/nvkm/subdev/mxm/mxms.h
index 011a67fe4a8b..d9676b282ac0 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mxm/mxms.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mxm/mxms.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: GPL-2.0 */
+/* SPDX-License-Identifier: MIT */
#ifndef __NVMXM_MXMS_H__
#define __NVMXM_MXMS_H__
#include "priv.h"
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mxm/priv.h b/drivers/gpu/drm/nouveau/nvkm/subdev/mxm/priv.h
index 6767c2279e7c..fc8f69e6fc64 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mxm/priv.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mxm/priv.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: GPL-2.0 */
+/* SPDX-License-Identifier: MIT */
#ifndef __NVKM_MXM_PRIV_H__
#define __NVKM_MXM_PRIV_H__
#define nvkm_mxm(p) container_of((p), struct nvkm_mxm, subdev)
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pci/Kbuild b/drivers/gpu/drm/nouveau/nvkm/subdev/pci/Kbuild
index 6fbd008d6f10..174bdf995271 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/pci/Kbuild
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pci/Kbuild
@@ -1,4 +1,4 @@
-# SPDX-License-Identifier: GPL-2.0
+# SPDX-License-Identifier: MIT
nvkm-y += nvkm/subdev/pci/agp.o
nvkm-y += nvkm/subdev/pci/base.o
nvkm-y += nvkm/subdev/pci/pcie.o
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pci/agp.h b/drivers/gpu/drm/nouveau/nvkm/subdev/pci/agp.h
index edb7f00f0de5..ad4d3621d02b 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/pci/agp.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pci/agp.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: GPL-2.0 */
+/* SPDX-License-Identifier: MIT */
#include "priv.h"
#if defined(CONFIG_AGP) || (defined(CONFIG_AGP_MODULE) && defined(MODULE))
#ifndef __NVKM_PCI_AGP_H__
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pci/priv.h b/drivers/gpu/drm/nouveau/nvkm/subdev/pci/priv.h
index c17f6063c9ea..7009aad86b6e 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/pci/priv.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pci/priv.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: GPL-2.0 */
+/* SPDX-License-Identifier: MIT */
#ifndef __NVKM_PCI_PRIV_H__
#define __NVKM_PCI_PRIV_H__
#define nvkm_pci(p) container_of((p), struct nvkm_pci, subdev)
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/Kbuild b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/Kbuild
index 132ae3341d55..e37b6e45eaa2 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/Kbuild
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/Kbuild
@@ -1,4 +1,4 @@
-# SPDX-License-Identifier: GPL-2.0
+# SPDX-License-Identifier: MIT
nvkm-y += nvkm/subdev/pmu/base.o
nvkm-y += nvkm/subdev/pmu/memx.o
nvkm-y += nvkm/subdev/pmu/gt215.o
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/fuc/gf100.fuc3.h b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/fuc/gf100.fuc3.h
index 1dbe593e5960..4cf888f2bd03 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/fuc/gf100.fuc3.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/fuc/gf100.fuc3.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: GPL-2.0 */
+/* SPDX-License-Identifier: MIT */
static uint32_t gf100_pmu_data[] = {
/* 0x0000: proc_kern */
0x52544e49,
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/fuc/gf119.fuc4.h b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/fuc/gf119.fuc4.h
index e1e981966c2d..e80eff18e5d4 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/fuc/gf119.fuc4.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/fuc/gf119.fuc4.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: GPL-2.0 */
+/* SPDX-License-Identifier: MIT */
static uint32_t gf119_pmu_data[] = {
/* 0x0000: proc_kern */
0x52544e49,
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/fuc/gk208.fuc5.h b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/fuc/gk208.fuc5.h
index e0222cb832fb..275ec71bc0c0 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/fuc/gk208.fuc5.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/fuc/gk208.fuc5.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: GPL-2.0 */
+/* SPDX-License-Identifier: MIT */
static uint32_t gk208_pmu_data[] = {
/* 0x0000: proc_kern */
0x52544e49,
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/fuc/gt215.fuc3.h b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/fuc/gt215.fuc3.h
index defddf5957ee..4b071e9bec7d 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/fuc/gt215.fuc3.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/fuc/gt215.fuc3.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: GPL-2.0 */
+/* SPDX-License-Identifier: MIT */
static uint32_t gt215_pmu_data[] = {
/* 0x0000: proc_kern */
0x52544e49,
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/fuc/os.h b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/fuc/os.h
index 30d9480b9be5..0d5abf27ee52 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/fuc/os.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/fuc/os.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: GPL-2.0 */
+/* SPDX-License-Identifier: MIT */
#ifndef __NVKM_PWR_OS_H__
#define __NVKM_PWR_OS_H__
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/memx.c b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/memx.c
index 7b052879af72..22eaebefced3 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/memx.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/memx.c
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0
+// SPDX-License-Identifier: MIT
#ifndef __NVKM_PMU_MEMX_H__
#define __NVKM_PMU_MEMX_H__
#include "priv.h"
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/priv.h b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/priv.h
index e9c6f9725afe..26d73f9cd6d3 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/priv.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/priv.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: GPL-2.0 */
+/* SPDX-License-Identifier: MIT */
#ifndef __NVKM_PMU_PRIV_H__
#define __NVKM_PMU_PRIV_H__
#define nvkm_pmu(p) container_of((p), struct nvkm_pmu, subdev)
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/Kbuild b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/Kbuild
index 51b33799cfdb..f3dee2693c79 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/Kbuild
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/Kbuild
@@ -1,4 +1,4 @@
-# SPDX-License-Identifier: GPL-2.0
+# SPDX-License-Identifier: MIT
nvkm-y += nvkm/subdev/secboot/base.o
nvkm-y += nvkm/subdev/secboot/hs_ucode.o
nvkm-y += nvkm/subdev/secboot/ls_ucode_gr.o
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/ls_ucode_msgqueue.c b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/ls_ucode_msgqueue.c
index 77c13b096a67..a84a999445bb 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/ls_ucode_msgqueue.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/ls_ucode_msgqueue.c
@@ -164,41 +164,12 @@ acr_ls_sec2_post_run(const struct nvkm_acr *acr, const struct nvkm_secboot *sb)
struct nvkm_sec2 *sec = device->sec2;
/* on SEC arguments are always at the beginning of EMEM */
const u32 addr_args = 0x01000000;
- u32 reg;
int ret;
ret = acr_ls_msgqueue_post_run(sec->queue, sec->falcon, addr_args);
if (ret)
return ret;
- /*
- * There is a bug where the LS firmware sometimes require to be started
- * twice (this happens only on SEC). Detect and workaround that
- * condition.
- *
- * Once started, the falcon will end up in STOPPED condition (bit 5)
- * if successful, or in HALT condition (bit 4) if not.
- */
- nvkm_msec(device, 1,
- if ((reg = nvkm_falcon_rd32(sb->boot_falcon, 0x100) & 0x30) != 0)
- break;
- );
- if (reg & BIT(4)) {
- nvkm_debug(subdev, "applying workaround for start bug...\n");
- nvkm_falcon_start(sb->boot_falcon);
- nvkm_msec(subdev->device, 1,
- if ((reg = nvkm_rd32(subdev->device,
- sb->boot_falcon->addr + 0x100)
- & 0x30) != 0)
- break;
- );
- if (reg & BIT(4)) {
- nvkm_error(subdev, "%s failed to start\n",
- nvkm_secboot_falcon_name[acr->boot_falcon]);
- return -EINVAL;
- }
- }
-
nvkm_debug(&sb->subdev, "%s started\n",
nvkm_secboot_falcon_name[acr->boot_falcon]);
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/therm/Kbuild b/drivers/gpu/drm/nouveau/nvkm/subdev/therm/Kbuild
index 0cc1439d863b..9aa76a2befa8 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/therm/Kbuild
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/therm/Kbuild
@@ -1,4 +1,4 @@
-# SPDX-License-Identifier: GPL-2.0
+# SPDX-License-Identifier: MIT
nvkm-y += nvkm/subdev/therm/base.o
nvkm-y += nvkm/subdev/therm/fan.o
nvkm-y += nvkm/subdev/therm/fannil.o
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/timer/Kbuild b/drivers/gpu/drm/nouveau/nvkm/subdev/timer/Kbuild
index a4aa8e621eb2..f710da4427cf 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/timer/Kbuild
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/timer/Kbuild
@@ -1,4 +1,4 @@
-# SPDX-License-Identifier: GPL-2.0
+# SPDX-License-Identifier: MIT
nvkm-y += nvkm/subdev/timer/base.o
nvkm-y += nvkm/subdev/timer/nv04.o
nvkm-y += nvkm/subdev/timer/nv40.o
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/timer/priv.h b/drivers/gpu/drm/nouveau/nvkm/subdev/timer/priv.h
index 3b8878486faa..89e97294b182 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/timer/priv.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/timer/priv.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: GPL-2.0 */
+/* SPDX-License-Identifier: MIT */
#ifndef __NVKM_TIMER_PRIV_H__
#define __NVKM_TIMER_PRIV_H__
#define nvkm_timer(p) container_of((p), struct nvkm_timer, subdev)
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/timer/regsnv04.h b/drivers/gpu/drm/nouveau/nvkm/subdev/timer/regsnv04.h
index 23d07f5f44d9..34a740bc6e4a 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/timer/regsnv04.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/timer/regsnv04.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: GPL-2.0 */
+/* SPDX-License-Identifier: MIT */
#define NV04_PTIMER_INTR_0 0x009100
#define NV04_PTIMER_INTR_EN_0 0x009140
#define NV04_PTIMER_NUMERATOR 0x009200
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/top/Kbuild b/drivers/gpu/drm/nouveau/nvkm/subdev/top/Kbuild
index e0b27242eeea..438d9d78ab52 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/top/Kbuild
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/top/Kbuild
@@ -1,3 +1,3 @@
-# SPDX-License-Identifier: GPL-2.0
+# SPDX-License-Identifier: MIT
nvkm-y += nvkm/subdev/top/base.o
nvkm-y += nvkm/subdev/top/gk104.o
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/top/priv.h b/drivers/gpu/drm/nouveau/nvkm/subdev/top/priv.h
index 4f49b0acaa0e..a16baa2941cf 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/top/priv.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/top/priv.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: GPL-2.0 */
+/* SPDX-License-Identifier: MIT */
#ifndef __NVKM_TOP_PRIV_H__
#define __NVKM_TOP_PRIV_H__
#define nvkm_top(p) container_of((p), struct nvkm_top, subdev)
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/volt/Kbuild b/drivers/gpu/drm/nouveau/nvkm/subdev/volt/Kbuild
index e80bc64b638a..523a7cd155a6 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/volt/Kbuild
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/volt/Kbuild
@@ -1,4 +1,4 @@
-# SPDX-License-Identifier: GPL-2.0
+# SPDX-License-Identifier: MIT
nvkm-y += nvkm/subdev/volt/base.o
nvkm-y += nvkm/subdev/volt/gpio.o
nvkm-y += nvkm/subdev/volt/nv40.o
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/volt/priv.h b/drivers/gpu/drm/nouveau/nvkm/subdev/volt/priv.h
index 1a8ad560321b..75f13a34671f 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/volt/priv.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/volt/priv.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: GPL-2.0 */
+/* SPDX-License-Identifier: MIT */
#ifndef __NVKM_VOLT_PRIV_H__
#define __NVKM_VOLT_PRIV_H__
#define nvkm_volt(p) container_of((p), struct nvkm_volt, subdev)
diff --git a/drivers/gpu/drm/selftests/test-drm_cmdline_parser.c b/drivers/gpu/drm/selftests/test-drm_cmdline_parser.c
index bef4edde6f9f..14c96edb13df 100644
--- a/drivers/gpu/drm/selftests/test-drm_cmdline_parser.c
+++ b/drivers/gpu/drm/selftests/test-drm_cmdline_parser.c
@@ -15,13 +15,14 @@
#include "drm_selftest.h"
#include "test-drm_modeset_common.h"
+static const struct drm_connector no_connector = {};
+
static int drm_cmdline_test_res(void *ignored)
{
- struct drm_connector connector = { };
struct drm_cmdline_mode mode = { };
FAIL_ON(!drm_mode_parse_command_line_for_connector("720x480",
- &connector,
+ &no_connector,
&mode));
FAIL_ON(!mode.specified);
FAIL_ON(mode.xres != 720);
@@ -42,11 +43,10 @@ static int drm_cmdline_test_res(void *ignored)
static int drm_cmdline_test_res_missing_x(void *ignored)
{
- struct drm_connector connector = { };
struct drm_cmdline_mode mode = { };
FAIL_ON(drm_mode_parse_command_line_for_connector("x480",
- &connector,
+ &no_connector,
&mode));
return 0;
@@ -54,11 +54,10 @@ static int drm_cmdline_test_res_missing_x(void *ignored)
static int drm_cmdline_test_res_missing_y(void *ignored)
{
- struct drm_connector connector = { };
struct drm_cmdline_mode mode = { };
FAIL_ON(drm_mode_parse_command_line_for_connector("1024x",
- &connector,
+ &no_connector,
&mode));
return 0;
@@ -66,11 +65,10 @@ static int drm_cmdline_test_res_missing_y(void *ignored)
static int drm_cmdline_test_res_bad_y(void *ignored)
{
- struct drm_connector connector = { };
struct drm_cmdline_mode mode = { };
FAIL_ON(drm_mode_parse_command_line_for_connector("1024xtest",
- &connector,
+ &no_connector,
&mode));
return 0;
@@ -78,11 +76,10 @@ static int drm_cmdline_test_res_bad_y(void *ignored)
static int drm_cmdline_test_res_missing_y_bpp(void *ignored)
{
- struct drm_connector connector = { };
struct drm_cmdline_mode mode = { };
FAIL_ON(drm_mode_parse_command_line_for_connector("1024x-24",
- &connector,
+ &no_connector,
&mode));
return 0;
@@ -90,11 +87,10 @@ static int drm_cmdline_test_res_missing_y_bpp(void *ignored)
static int drm_cmdline_test_res_vesa(void *ignored)
{
- struct drm_connector connector = { };
struct drm_cmdline_mode mode = { };
FAIL_ON(!drm_mode_parse_command_line_for_connector("720x480M",
- &connector,
+ &no_connector,
&mode));
FAIL_ON(!mode.specified);
FAIL_ON(mode.xres != 720);
@@ -115,11 +111,10 @@ static int drm_cmdline_test_res_vesa(void *ignored)
static int drm_cmdline_test_res_vesa_rblank(void *ignored)
{
- struct drm_connector connector = { };
struct drm_cmdline_mode mode = { };
FAIL_ON(!drm_mode_parse_command_line_for_connector("720x480MR",
- &connector,
+ &no_connector,
&mode));
FAIL_ON(!mode.specified);
FAIL_ON(mode.xres != 720);
@@ -140,11 +135,10 @@ static int drm_cmdline_test_res_vesa_rblank(void *ignored)
static int drm_cmdline_test_res_rblank(void *ignored)
{
- struct drm_connector connector = { };
struct drm_cmdline_mode mode = { };
FAIL_ON(!drm_mode_parse_command_line_for_connector("720x480R",
- &connector,
+ &no_connector,
&mode));
FAIL_ON(!mode.specified);
FAIL_ON(mode.xres != 720);
@@ -165,11 +159,10 @@ static int drm_cmdline_test_res_rblank(void *ignored)
static int drm_cmdline_test_res_bpp(void *ignored)
{
- struct drm_connector connector = { };
struct drm_cmdline_mode mode = { };
FAIL_ON(!drm_mode_parse_command_line_for_connector("720x480-24",
- &connector,
+ &no_connector,
&mode));
FAIL_ON(!mode.specified);
FAIL_ON(mode.xres != 720);
@@ -191,11 +184,10 @@ static int drm_cmdline_test_res_bpp(void *ignored)
static int drm_cmdline_test_res_bad_bpp(void *ignored)
{
- struct drm_connector connector = { };
struct drm_cmdline_mode mode = { };
FAIL_ON(drm_mode_parse_command_line_for_connector("720x480-test",
- &connector,
+ &no_connector,
&mode));
return 0;
@@ -203,11 +195,10 @@ static int drm_cmdline_test_res_bad_bpp(void *ignored)
static int drm_cmdline_test_res_refresh(void *ignored)
{
- struct drm_connector connector = { };
struct drm_cmdline_mode mode = { };
FAIL_ON(!drm_mode_parse_command_line_for_connector("720x480@60",
- &connector,
+ &no_connector,
&mode));
FAIL_ON(!mode.specified);
FAIL_ON(mode.xres != 720);
@@ -229,11 +220,10 @@ static int drm_cmdline_test_res_refresh(void *ignored)
static int drm_cmdline_test_res_bad_refresh(void *ignored)
{
- struct drm_connector connector = { };
struct drm_cmdline_mode mode = { };
FAIL_ON(drm_mode_parse_command_line_for_connector("720x480@refresh",
- &connector,
+ &no_connector,
&mode));
return 0;
@@ -241,11 +231,10 @@ static int drm_cmdline_test_res_bad_refresh(void *ignored)
static int drm_cmdline_test_res_bpp_refresh(void *ignored)
{
- struct drm_connector connector = { };
struct drm_cmdline_mode mode = { };
FAIL_ON(!drm_mode_parse_command_line_for_connector("720x480-24@60",
- &connector,
+ &no_connector,
&mode));
FAIL_ON(!mode.specified);
FAIL_ON(mode.xres != 720);
@@ -268,11 +257,10 @@ static int drm_cmdline_test_res_bpp_refresh(void *ignored)
static int drm_cmdline_test_res_bpp_refresh_interlaced(void *ignored)
{
- struct drm_connector connector = { };
struct drm_cmdline_mode mode = { };
FAIL_ON(!drm_mode_parse_command_line_for_connector("720x480-24@60i",
- &connector,
+ &no_connector,
&mode));
FAIL_ON(!mode.specified);
FAIL_ON(mode.xres != 720);
@@ -295,11 +283,10 @@ static int drm_cmdline_test_res_bpp_refresh_interlaced(void *ignored)
static int drm_cmdline_test_res_bpp_refresh_margins(void *ignored)
{
- struct drm_connector connector = { };
struct drm_cmdline_mode mode = { };
FAIL_ON(!drm_mode_parse_command_line_for_connector("720x480-24@60m",
- &connector,
+ &no_connector,
&mode));
FAIL_ON(!mode.specified);
FAIL_ON(mode.xres != 720);
@@ -322,11 +309,10 @@ static int drm_cmdline_test_res_bpp_refresh_margins(void *ignored)
static int drm_cmdline_test_res_bpp_refresh_force_off(void *ignored)
{
- struct drm_connector connector = { };
struct drm_cmdline_mode mode = { };
FAIL_ON(!drm_mode_parse_command_line_for_connector("720x480-24@60d",
- &connector,
+ &no_connector,
&mode));
FAIL_ON(!mode.specified);
FAIL_ON(mode.xres != 720);
@@ -349,11 +335,10 @@ static int drm_cmdline_test_res_bpp_refresh_force_off(void *ignored)
static int drm_cmdline_test_res_bpp_refresh_force_on_off(void *ignored)
{
- struct drm_connector connector = { };
struct drm_cmdline_mode mode = { };
FAIL_ON(drm_mode_parse_command_line_for_connector("720x480-24@60de",
- &connector,
+ &no_connector,
&mode));
return 0;
@@ -361,11 +346,10 @@ static int drm_cmdline_test_res_bpp_refresh_force_on_off(void *ignored)
static int drm_cmdline_test_res_bpp_refresh_force_on(void *ignored)
{
- struct drm_connector connector = { };
struct drm_cmdline_mode mode = { };
FAIL_ON(!drm_mode_parse_command_line_for_connector("720x480-24@60e",
- &connector,
+ &no_connector,
&mode));
FAIL_ON(!mode.specified);
FAIL_ON(mode.xres != 720);
@@ -388,11 +372,10 @@ static int drm_cmdline_test_res_bpp_refresh_force_on(void *ignored)
static int drm_cmdline_test_res_bpp_refresh_force_on_analog(void *ignored)
{
- struct drm_connector connector = { };
struct drm_cmdline_mode mode = { };
FAIL_ON(!drm_mode_parse_command_line_for_connector("720x480-24@60D",
- &connector,
+ &no_connector,
&mode));
FAIL_ON(!mode.specified);
FAIL_ON(mode.xres != 720);
@@ -415,10 +398,11 @@ static int drm_cmdline_test_res_bpp_refresh_force_on_analog(void *ignored)
static int drm_cmdline_test_res_bpp_refresh_force_on_digital(void *ignored)
{
- struct drm_connector connector = { };
struct drm_cmdline_mode mode = { };
+ static const struct drm_connector connector = {
+ .connector_type = DRM_MODE_CONNECTOR_DVII,
+ };
- connector.connector_type = DRM_MODE_CONNECTOR_DVII;
FAIL_ON(!drm_mode_parse_command_line_for_connector("720x480-24@60D",
&connector,
&mode));
@@ -443,11 +427,10 @@ static int drm_cmdline_test_res_bpp_refresh_force_on_digital(void *ignored)
static int drm_cmdline_test_res_bpp_refresh_interlaced_margins_force_on(void *ignored)
{
- struct drm_connector connector = { };
struct drm_cmdline_mode mode = { };
FAIL_ON(!drm_mode_parse_command_line_for_connector("720x480-24@60ime",
- &connector,
+ &no_connector,
&mode));
FAIL_ON(!mode.specified);
FAIL_ON(mode.xres != 720);
@@ -470,11 +453,10 @@ static int drm_cmdline_test_res_bpp_refresh_interlaced_margins_force_on(void *ig
static int drm_cmdline_test_res_margins_force_on(void *ignored)
{
- struct drm_connector connector = { };
struct drm_cmdline_mode mode = { };
FAIL_ON(!drm_mode_parse_command_line_for_connector("720x480me",
- &connector,
+ &no_connector,
&mode));
FAIL_ON(!mode.specified);
FAIL_ON(mode.xres != 720);
@@ -495,11 +477,10 @@ static int drm_cmdline_test_res_margins_force_on(void *ignored)
static int drm_cmdline_test_res_vesa_margins(void *ignored)
{
- struct drm_connector connector = { };
struct drm_cmdline_mode mode = { };
FAIL_ON(!drm_mode_parse_command_line_for_connector("720x480Mm",
- &connector,
+ &no_connector,
&mode));
FAIL_ON(!mode.specified);
FAIL_ON(mode.xres != 720);
@@ -520,11 +501,10 @@ static int drm_cmdline_test_res_vesa_margins(void *ignored)
static int drm_cmdline_test_res_invalid_mode(void *ignored)
{
- struct drm_connector connector = { };
struct drm_cmdline_mode mode = { };
FAIL_ON(drm_mode_parse_command_line_for_connector("720x480f",
- &connector,
+ &no_connector,
&mode));
return 0;
@@ -532,11 +512,10 @@ static int drm_cmdline_test_res_invalid_mode(void *ignored)
static int drm_cmdline_test_res_bpp_wrong_place_mode(void *ignored)
{
- struct drm_connector connector = { };
struct drm_cmdline_mode mode = { };
FAIL_ON(drm_mode_parse_command_line_for_connector("720x480e-24",
- &connector,
+ &no_connector,
&mode));
return 0;
@@ -544,11 +523,10 @@ static int drm_cmdline_test_res_bpp_wrong_place_mode(void *ignored)
static int drm_cmdline_test_name(void *ignored)
{
- struct drm_connector connector = { };
struct drm_cmdline_mode mode = { };
FAIL_ON(!drm_mode_parse_command_line_for_connector("NTSC",
- &connector,
+ &no_connector,
&mode));
FAIL_ON(strcmp(mode.name, "NTSC"));
FAIL_ON(mode.refresh_specified);
@@ -559,11 +537,10 @@ static int drm_cmdline_test_name(void *ignored)
static int drm_cmdline_test_name_bpp(void *ignored)
{
- struct drm_connector connector = { };
struct drm_cmdline_mode mode = { };
FAIL_ON(!drm_mode_parse_command_line_for_connector("NTSC-24",
- &connector,
+ &no_connector,
&mode));
FAIL_ON(strcmp(mode.name, "NTSC"));
@@ -577,11 +554,10 @@ static int drm_cmdline_test_name_bpp(void *ignored)
static int drm_cmdline_test_name_bpp_refresh(void *ignored)
{
- struct drm_connector connector = { };
struct drm_cmdline_mode mode = { };
FAIL_ON(drm_mode_parse_command_line_for_connector("NTSC-24@60",
- &connector,
+ &no_connector,
&mode));
return 0;
@@ -589,11 +565,10 @@ static int drm_cmdline_test_name_bpp_refresh(void *ignored)
static int drm_cmdline_test_name_refresh(void *ignored)
{
- struct drm_connector connector = { };
struct drm_cmdline_mode mode = { };
FAIL_ON(drm_mode_parse_command_line_for_connector("NTSC@60",
- &connector,
+ &no_connector,
&mode));
return 0;
@@ -601,11 +576,10 @@ static int drm_cmdline_test_name_refresh(void *ignored)
static int drm_cmdline_test_name_refresh_wrong_mode(void *ignored)
{
- struct drm_connector connector = { };
struct drm_cmdline_mode mode = { };
FAIL_ON(drm_mode_parse_command_line_for_connector("NTSC@60m",
- &connector,
+ &no_connector,
&mode));
return 0;
@@ -613,11 +587,10 @@ static int drm_cmdline_test_name_refresh_wrong_mode(void *ignored)
static int drm_cmdline_test_name_refresh_invalid_mode(void *ignored)
{
- struct drm_connector connector = { };
struct drm_cmdline_mode mode = { };
FAIL_ON(drm_mode_parse_command_line_for_connector("NTSC@60f",
- &connector,
+ &no_connector,
&mode));
return 0;
@@ -625,11 +598,10 @@ static int drm_cmdline_test_name_refresh_invalid_mode(void *ignored)
static int drm_cmdline_test_name_option(void *ignored)
{
- struct drm_connector connector = { };
struct drm_cmdline_mode mode = { };
FAIL_ON(!drm_mode_parse_command_line_for_connector("NTSC,rotate=180",
- &connector,
+ &no_connector,
&mode));
FAIL_ON(!mode.specified);
FAIL_ON(strcmp(mode.name, "NTSC"));
@@ -640,11 +612,10 @@ static int drm_cmdline_test_name_option(void *ignored)
static int drm_cmdline_test_name_bpp_option(void *ignored)
{
- struct drm_connector connector = { };
struct drm_cmdline_mode mode = { };
FAIL_ON(!drm_mode_parse_command_line_for_connector("NTSC-24,rotate=180",
- &connector,
+ &no_connector,
&mode));
FAIL_ON(!mode.specified);
FAIL_ON(strcmp(mode.name, "NTSC"));
@@ -657,11 +628,10 @@ static int drm_cmdline_test_name_bpp_option(void *ignored)
static int drm_cmdline_test_rotate_0(void *ignored)
{
- struct drm_connector connector = { };
struct drm_cmdline_mode mode = { };
FAIL_ON(!drm_mode_parse_command_line_for_connector("720x480,rotate=0",
- &connector,
+ &no_connector,
&mode));
FAIL_ON(!mode.specified);
FAIL_ON(mode.xres != 720);
@@ -683,11 +653,10 @@ static int drm_cmdline_test_rotate_0(void *ignored)
static int drm_cmdline_test_rotate_90(void *ignored)
{
- struct drm_connector connector = { };
struct drm_cmdline_mode mode = { };
FAIL_ON(!drm_mode_parse_command_line_for_connector("720x480,rotate=90",
- &connector,
+ &no_connector,
&mode));
FAIL_ON(!mode.specified);
FAIL_ON(mode.xres != 720);
@@ -709,11 +678,10 @@ static int drm_cmdline_test_rotate_90(void *ignored)
static int drm_cmdline_test_rotate_180(void *ignored)
{
- struct drm_connector connector = { };
struct drm_cmdline_mode mode = { };
FAIL_ON(!drm_mode_parse_command_line_for_connector("720x480,rotate=180",
- &connector,
+ &no_connector,
&mode));
FAIL_ON(!mode.specified);
FAIL_ON(mode.xres != 720);
@@ -735,11 +703,10 @@ static int drm_cmdline_test_rotate_180(void *ignored)
static int drm_cmdline_test_rotate_270(void *ignored)
{
- struct drm_connector connector = { };
struct drm_cmdline_mode mode = { };
FAIL_ON(!drm_mode_parse_command_line_for_connector("720x480,rotate=270",
- &connector,
+ &no_connector,
&mode));
FAIL_ON(!mode.specified);
FAIL_ON(mode.xres != 720);
@@ -761,11 +728,10 @@ static int drm_cmdline_test_rotate_270(void *ignored)
static int drm_cmdline_test_rotate_invalid_val(void *ignored)
{
- struct drm_connector connector = { };
struct drm_cmdline_mode mode = { };
FAIL_ON(drm_mode_parse_command_line_for_connector("720x480,rotate=42",
- &connector,
+ &no_connector,
&mode));
return 0;
@@ -773,11 +739,10 @@ static int drm_cmdline_test_rotate_invalid_val(void *ignored)
static int drm_cmdline_test_rotate_truncated(void *ignored)
{
- struct drm_connector connector = { };
struct drm_cmdline_mode mode = { };
FAIL_ON(drm_mode_parse_command_line_for_connector("720x480,rotate=",
- &connector,
+ &no_connector,
&mode));
return 0;
@@ -785,11 +750,10 @@ static int drm_cmdline_test_rotate_truncated(void *ignored)
static int drm_cmdline_test_hmirror(void *ignored)
{
- struct drm_connector connector = { };
struct drm_cmdline_mode mode = { };
FAIL_ON(!drm_mode_parse_command_line_for_connector("720x480,reflect_x",
- &connector,
+ &no_connector,
&mode));
FAIL_ON(!mode.specified);
FAIL_ON(mode.xres != 720);
@@ -811,11 +775,10 @@ static int drm_cmdline_test_hmirror(void *ignored)
static int drm_cmdline_test_vmirror(void *ignored)
{
- struct drm_connector connector = { };
struct drm_cmdline_mode mode = { };
FAIL_ON(!drm_mode_parse_command_line_for_connector("720x480,reflect_y",
- &connector,
+ &no_connector,
&mode));
FAIL_ON(!mode.specified);
FAIL_ON(mode.xres != 720);
@@ -837,11 +800,10 @@ static int drm_cmdline_test_vmirror(void *ignored)
static int drm_cmdline_test_margin_options(void *ignored)
{
- struct drm_connector connector = { };
struct drm_cmdline_mode mode = { };
FAIL_ON(!drm_mode_parse_command_line_for_connector("720x480,margin_right=14,margin_left=24,margin_bottom=36,margin_top=42",
- &connector,
+ &no_connector,
&mode));
FAIL_ON(!mode.specified);
FAIL_ON(mode.xres != 720);
@@ -866,11 +828,10 @@ static int drm_cmdline_test_margin_options(void *ignored)
static int drm_cmdline_test_multiple_options(void *ignored)
{
- struct drm_connector connector = { };
struct drm_cmdline_mode mode = { };
FAIL_ON(!drm_mode_parse_command_line_for_connector("720x480,rotate=270,reflect_x",
- &connector,
+ &no_connector,
&mode));
FAIL_ON(!mode.specified);
FAIL_ON(mode.xres != 720);
@@ -892,11 +853,10 @@ static int drm_cmdline_test_multiple_options(void *ignored)
static int drm_cmdline_test_invalid_option(void *ignored)
{
- struct drm_connector connector = { };
struct drm_cmdline_mode mode = { };
FAIL_ON(drm_mode_parse_command_line_for_connector("720x480,test=42",
- &connector,
+ &no_connector,
&mode));
return 0;
diff --git a/drivers/hv/vmbus_drv.c b/drivers/hv/vmbus_drv.c
index 894da5abdc55..ebd35fc35290 100644
--- a/drivers/hv/vmbus_drv.c
+++ b/drivers/hv/vmbus_drv.c
@@ -1197,8 +1197,6 @@ static struct kmsg_dumper hv_kmsg_dumper = {
};
static struct ctl_table_header *hv_ctl_table_hdr;
-static int zero;
-static int one = 1;
/*
* sysctl option to allow the user to control whether kmsg data should be
@@ -1211,8 +1209,8 @@ static struct ctl_table hv_ctl_table[] = {
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = proc_dointvec_minmax,
- .extra1 = &zero,
- .extra2 = &one
+ .extra1 = SYSCTL_ZERO,
+ .extra2 = SYSCTL_ONE
},
{}
};
diff --git a/drivers/hwmon/scmi-hwmon.c b/drivers/hwmon/scmi-hwmon.c
index a80183a488c5..0c93fc5ca762 100644
--- a/drivers/hwmon/scmi-hwmon.c
+++ b/drivers/hwmon/scmi-hwmon.c
@@ -18,6 +18,50 @@ struct scmi_sensors {
const struct scmi_sensor_info **info[hwmon_max];
};
+static inline u64 __pow10(u8 x)
+{
+ u64 r = 1;
+
+ while (x--)
+ r *= 10;
+
+ return r;
+}
+
+static int scmi_hwmon_scale(const struct scmi_sensor_info *sensor, u64 *value)
+{
+ s8 scale = sensor->scale;
+ u64 f;
+
+ switch (sensor->type) {
+ case TEMPERATURE_C:
+ case VOLTAGE:
+ case CURRENT:
+ scale += 3;
+ break;
+ case POWER:
+ case ENERGY:
+ scale += 6;
+ break;
+ default:
+ break;
+ }
+
+ if (scale == 0)
+ return 0;
+
+ if (abs(scale) > 19)
+ return -E2BIG;
+
+ f = __pow10(abs(scale));
+ if (scale > 0)
+ *value *= f;
+ else
+ *value = div64_u64(*value, f);
+
+ return 0;
+}
+
static int scmi_hwmon_read(struct device *dev, enum hwmon_sensor_types type,
u32 attr, int channel, long *val)
{
@@ -29,6 +73,10 @@ static int scmi_hwmon_read(struct device *dev, enum hwmon_sensor_types type,
sensor = *(scmi_sensors->info[type] + channel);
ret = h->sensor_ops->reading_get(h, sensor->id, false, &value);
+ if (ret)
+ return ret;
+
+ ret = scmi_hwmon_scale(sensor, &value);
if (!ret)
*val = value;
diff --git a/drivers/infiniband/hw/qib/qib_fs.c b/drivers/infiniband/hw/qib/qib_fs.c
index ceb42d948412..41a569558a15 100644
--- a/drivers/infiniband/hw/qib/qib_fs.c
+++ b/drivers/infiniband/hw/qib/qib_fs.c
@@ -34,6 +34,7 @@
#include <linux/module.h>
#include <linux/fs.h>
+#include <linux/fs_context.h>
#include <linux/mount.h>
#include <linux/pagemap.h>
#include <linux/init.h>
@@ -506,7 +507,7 @@ bail:
* after device init. The direct add_cntr_files() call handles adding
* them from the init code, when the fs is already mounted.
*/
-static int qibfs_fill_super(struct super_block *sb, void *data, int silent)
+static int qibfs_fill_super(struct super_block *sb, struct fs_context *fc)
{
struct qib_devdata *dd;
unsigned long index;
@@ -534,17 +535,24 @@ bail:
return ret;
}
-static struct dentry *qibfs_mount(struct file_system_type *fs_type, int flags,
- const char *dev_name, void *data)
+static int qibfs_get_tree(struct fs_context *fc)
{
- struct dentry *ret;
-
- ret = mount_single(fs_type, flags, data, qibfs_fill_super);
- if (!IS_ERR(ret))
- qib_super = ret->d_sb;
+ int ret = get_tree_single(fc, qibfs_fill_super);
+ if (ret == 0)
+ qib_super = fc->root->d_sb;
return ret;
}
+static const struct fs_context_operations qibfs_context_ops = {
+ .get_tree = qibfs_get_tree,
+};
+
+static int qibfs_init_fs_context(struct fs_context *fc)
+{
+ fc->ops = &qibfs_context_ops;
+ return 0;
+}
+
static void qibfs_kill_super(struct super_block *s)
{
kill_litter_super(s);
@@ -583,7 +591,7 @@ int qibfs_remove(struct qib_devdata *dd)
static struct file_system_type qibfs_fs_type = {
.owner = THIS_MODULE,
.name = "ipathfs",
- .mount = qibfs_mount,
+ .init_fs_context = qibfs_init_fs_context,
.kill_sb = qibfs_kill_super,
};
MODULE_ALIAS_FS("ipathfs");
diff --git a/drivers/infiniband/ulp/iser/iscsi_iser.c b/drivers/infiniband/ulp/iser/iscsi_iser.c
index c7a3d75fb308..2e72fc5af157 100644
--- a/drivers/infiniband/ulp/iser/iscsi_iser.c
+++ b/drivers/infiniband/ulp/iser/iscsi_iser.c
@@ -611,6 +611,7 @@ iscsi_iser_session_create(struct iscsi_endpoint *ep,
struct Scsi_Host *shost;
struct iser_conn *iser_conn = NULL;
struct ib_conn *ib_conn;
+ struct ib_device *ib_dev;
u32 max_fr_sectors;
shost = iscsi_host_alloc(&iscsi_iser_sht, 0, 0);
@@ -641,16 +642,19 @@ iscsi_iser_session_create(struct iscsi_endpoint *ep,
}
ib_conn = &iser_conn->ib_conn;
+ ib_dev = ib_conn->device->ib_device;
if (ib_conn->pi_support) {
- u32 sig_caps = ib_conn->device->ib_device->attrs.sig_prot_cap;
+ u32 sig_caps = ib_dev->attrs.sig_prot_cap;
scsi_host_set_prot(shost, iser_dif_prot_caps(sig_caps));
scsi_host_set_guard(shost, SHOST_DIX_GUARD_IP |
SHOST_DIX_GUARD_CRC);
}
- if (iscsi_host_add(shost,
- ib_conn->device->ib_device->dev.parent)) {
+ if (!(ib_dev->attrs.device_cap_flags & IB_DEVICE_SG_GAPS_REG))
+ shost->virt_boundary_mask = ~MASK_4K;
+
+ if (iscsi_host_add(shost, ib_dev->dev.parent)) {
mutex_unlock(&iser_conn->state_mutex);
goto free_host;
}
@@ -956,30 +960,6 @@ static umode_t iser_attr_is_visible(int param_type, int param)
return 0;
}
-static int iscsi_iser_slave_alloc(struct scsi_device *sdev)
-{
- struct iscsi_session *session;
- struct iser_conn *iser_conn;
- struct ib_device *ib_dev;
-
- mutex_lock(&unbind_iser_conn_mutex);
-
- session = starget_to_session(scsi_target(sdev))->dd_data;
- iser_conn = session->leadconn->dd_data;
- if (!iser_conn) {
- mutex_unlock(&unbind_iser_conn_mutex);
- return -ENOTCONN;
- }
- ib_dev = iser_conn->ib_conn.device->ib_device;
-
- if (!(ib_dev->attrs.device_cap_flags & IB_DEVICE_SG_GAPS_REG))
- blk_queue_virt_boundary(sdev->request_queue, ~MASK_4K);
-
- mutex_unlock(&unbind_iser_conn_mutex);
-
- return 0;
-}
-
static struct scsi_host_template iscsi_iser_sht = {
.module = THIS_MODULE,
.name = "iSCSI Initiator over iSER",
@@ -992,7 +972,6 @@ static struct scsi_host_template iscsi_iser_sht = {
.eh_device_reset_handler= iscsi_eh_device_reset,
.eh_target_reset_handler = iscsi_eh_recover_target,
.target_alloc = iscsi_target_alloc,
- .slave_alloc = iscsi_iser_slave_alloc,
.proc_name = "iscsi_iser",
.this_id = -1,
.track_queue_depth = 1,
diff --git a/drivers/infiniband/ulp/srp/ib_srp.c b/drivers/infiniband/ulp/srp/ib_srp.c
index c7bd96edce80..b5960351bec0 100644
--- a/drivers/infiniband/ulp/srp/ib_srp.c
+++ b/drivers/infiniband/ulp/srp/ib_srp.c
@@ -3046,20 +3046,6 @@ static int srp_target_alloc(struct scsi_target *starget)
return 0;
}
-static int srp_slave_alloc(struct scsi_device *sdev)
-{
- struct Scsi_Host *shost = sdev->host;
- struct srp_target_port *target = host_to_target(shost);
- struct srp_device *srp_dev = target->srp_host->srp_dev;
- struct ib_device *ibdev = srp_dev->dev;
-
- if (!(ibdev->attrs.device_cap_flags & IB_DEVICE_SG_GAPS_REG))
- blk_queue_virt_boundary(sdev->request_queue,
- ~srp_dev->mr_page_mask);
-
- return 0;
-}
-
static int srp_slave_configure(struct scsi_device *sdev)
{
struct Scsi_Host *shost = sdev->host;
@@ -3262,7 +3248,6 @@ static struct scsi_host_template srp_template = {
.name = "InfiniBand SRP initiator",
.proc_name = DRV_NAME,
.target_alloc = srp_target_alloc,
- .slave_alloc = srp_slave_alloc,
.slave_configure = srp_slave_configure,
.info = srp_target_info,
.queuecommand = srp_queuecommand,
@@ -3806,6 +3791,9 @@ static ssize_t srp_create_target(struct device *dev,
target_host->max_cmd_len = sizeof ((struct srp_cmd *) (void *) 0L)->cdb;
target_host->max_segment_size = ib_dma_max_seg_size(ibdev);
+ if (!(ibdev->attrs.device_cap_flags & IB_DEVICE_SG_GAPS_REG))
+ target_host->virt_boundary_mask = ~srp_dev->mr_page_mask;
+
target = host_to_target(target_host);
target->net = kobj_ns_grab_current(KOBJ_NS_TYPE_NET);
diff --git a/drivers/input/joystick/iforce/iforce-ff.c b/drivers/input/joystick/iforce/iforce-ff.c
index 4cadebd8b9c4..95c0348843e6 100644
--- a/drivers/input/joystick/iforce/iforce-ff.c
+++ b/drivers/input/joystick/iforce/iforce-ff.c
@@ -6,9 +6,6 @@
* USB/RS232 I-Force joysticks and wheels.
*/
-/*
- */
-
#include "iforce.h"
/*
diff --git a/drivers/input/joystick/iforce/iforce-main.c b/drivers/input/joystick/iforce/iforce-main.c
index 9a5f90da06ec..b2a68bc9f0b4 100644
--- a/drivers/input/joystick/iforce/iforce-main.c
+++ b/drivers/input/joystick/iforce/iforce-main.c
@@ -6,9 +6,6 @@
* USB/RS232 I-Force joysticks and wheels.
*/
-/*
- */
-
#include <asm/unaligned.h>
#include "iforce.h"
diff --git a/drivers/input/joystick/iforce/iforce-packets.c b/drivers/input/joystick/iforce/iforce-packets.c
index b313e38b2c3a..763642c8cee9 100644
--- a/drivers/input/joystick/iforce/iforce-packets.c
+++ b/drivers/input/joystick/iforce/iforce-packets.c
@@ -6,9 +6,6 @@
* USB/RS232 I-Force joysticks and wheels.
*/
-/*
- */
-
#include <asm/unaligned.h>
#include "iforce.h"
diff --git a/drivers/input/joystick/iforce/iforce-serio.c b/drivers/input/joystick/iforce/iforce-serio.c
index bbe31e0b759f..f95a81b9fac7 100644
--- a/drivers/input/joystick/iforce/iforce-serio.c
+++ b/drivers/input/joystick/iforce/iforce-serio.c
@@ -6,9 +6,6 @@
* USB/RS232 I-Force joysticks and wheels.
*/
-/*
- */
-
#include <linux/serio.h>
#include "iforce.h"
diff --git a/drivers/input/joystick/iforce/iforce-usb.c b/drivers/input/joystick/iforce/iforce-usb.c
index ade376bfb79f..29abfeeef9a5 100644
--- a/drivers/input/joystick/iforce/iforce-usb.c
+++ b/drivers/input/joystick/iforce/iforce-usb.c
@@ -6,9 +6,6 @@
* USB/RS232 I-Force joysticks and wheels.
*/
-/*
- */
-
#include <linux/usb.h>
#include "iforce.h"
diff --git a/drivers/input/joystick/iforce/iforce.h b/drivers/input/joystick/iforce/iforce.h
index 9cfa460466aa..6aa761ebbdf7 100644
--- a/drivers/input/joystick/iforce/iforce.h
+++ b/drivers/input/joystick/iforce/iforce.h
@@ -6,9 +6,6 @@
* USB/RS232 I-Force joysticks and wheels.
*/
-/*
- */
-
#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/input.h>
diff --git a/drivers/input/keyboard/Kconfig b/drivers/input/keyboard/Kconfig
index 7c4f19dab34f..8e9c3ea9d5e7 100644
--- a/drivers/input/keyboard/Kconfig
+++ b/drivers/input/keyboard/Kconfig
@@ -71,6 +71,22 @@ config KEYBOARD_AMIGA
config ATARI_KBD_CORE
bool
+config KEYBOARD_APPLESPI
+ tristate "Apple SPI keyboard and trackpad"
+ depends on ACPI && EFI
+ depends on SPI
+ depends on X86 || COMPILE_TEST
+ help
+ Say Y here if you are running Linux on any Apple MacBook8,1 or later,
+ or any MacBookPro13,* or MacBookPro14,*.
+
+ You will also need to enable appropriate SPI master controllers:
+ spi_pxa2xx_platform and spi_pxa2xx_pci for MacBook8,1, and
+ spi_pxa2xx_platform and intel_lpss_pci for the rest.
+
+ To compile this driver as a module, choose M here: the
+ module will be called applespi.
+
config KEYBOARD_ATARI
tristate "Atari keyboard"
depends on ATARI
diff --git a/drivers/input/keyboard/Makefile b/drivers/input/keyboard/Makefile
index f0291ca39f62..06a0af6efeae 100644
--- a/drivers/input/keyboard/Makefile
+++ b/drivers/input/keyboard/Makefile
@@ -10,6 +10,7 @@ obj-$(CONFIG_KEYBOARD_ADP5520) += adp5520-keys.o
obj-$(CONFIG_KEYBOARD_ADP5588) += adp5588-keys.o
obj-$(CONFIG_KEYBOARD_ADP5589) += adp5589-keys.o
obj-$(CONFIG_KEYBOARD_AMIGA) += amikbd.o
+obj-$(CONFIG_KEYBOARD_APPLESPI) += applespi.o
obj-$(CONFIG_KEYBOARD_ATARI) += atakbd.o
obj-$(CONFIG_KEYBOARD_ATKBD) += atkbd.o
obj-$(CONFIG_KEYBOARD_BCM) += bcm-keypad.o
diff --git a/drivers/input/keyboard/adp5589-keys.c b/drivers/input/keyboard/adp5589-keys.c
index 4c05c70a8cf3..4f96a4a99e5b 100644
--- a/drivers/input/keyboard/adp5589-keys.c
+++ b/drivers/input/keyboard/adp5589-keys.c
@@ -505,6 +505,7 @@ static int adp5589_gpio_add(struct adp5589_kpad *kpad)
if (!gpio_data)
return 0;
+ kpad->gc.parent = dev;
kpad->gc.ngpio = adp5589_build_gpiomap(kpad, pdata);
if (kpad->gc.ngpio == 0) {
dev_info(dev, "No unused gpios left to export\n");
diff --git a/drivers/input/keyboard/applespi.c b/drivers/input/keyboard/applespi.c
new file mode 100644
index 000000000000..548737e7aeda
--- /dev/null
+++ b/drivers/input/keyboard/applespi.c
@@ -0,0 +1,1977 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * MacBook (Pro) SPI keyboard and touchpad driver
+ *
+ * Copyright (c) 2015-2018 Federico Lorenzi
+ * Copyright (c) 2017-2018 Ronald Tschalär
+ */
+
+/*
+ * The keyboard and touchpad controller on the MacBookAir6, MacBookPro12,
+ * MacBook8 and newer can be driven either by USB or SPI. However the USB
+ * pins are only connected on the MacBookAir6 and 7 and the MacBookPro12.
+ * All others need this driver. The interface is selected using ACPI methods:
+ *
+ * * UIEN ("USB Interface Enable"): If invoked with argument 1, disables SPI
+ * and enables USB. If invoked with argument 0, disables USB.
+ * * UIST ("USB Interface Status"): Returns 1 if USB is enabled, 0 otherwise.
+ * * SIEN ("SPI Interface Enable"): If invoked with argument 1, disables USB
+ * and enables SPI. If invoked with argument 0, disables SPI.
+ * * SIST ("SPI Interface Status"): Returns 1 if SPI is enabled, 0 otherwise.
+ * * ISOL: Resets the four GPIO pins used for SPI. Intended to be invoked with
+ * argument 1, then once more with argument 0.
+ *
+ * UIEN and UIST are only provided on models where the USB pins are connected.
+ *
+ * SPI-based Protocol
+ * ------------------
+ *
+ * The device and driver exchange messages (struct message); each message is
+ * encapsulated in one or more packets (struct spi_packet). There are two types
+ * of exchanges: reads, and writes. A read is signaled by a GPE, upon which one
+ * message can be read from the device. A write exchange consists of writing a
+ * command message, immediately reading a short status packet, and then, upon
+ * receiving a GPE, reading the response message. Write exchanges cannot be
+ * interleaved, i.e. a new write exchange must not be started till the previous
+ * write exchange is complete. Whether a received message is part of a read or
+ * write exchange is indicated in the encapsulating packet's flags field.
+ *
+ * A single message may be too large to fit in a single packet (which has a
+ * fixed, 256-byte size). In that case it will be split over multiple,
+ * consecutive packets.
+ */
+
+#include <linux/acpi.h>
+#include <linux/crc16.h>
+#include <linux/debugfs.h>
+#include <linux/delay.h>
+#include <linux/efi.h>
+#include <linux/input.h>
+#include <linux/input/mt.h>
+#include <linux/leds.h>
+#include <linux/module.h>
+#include <linux/spinlock.h>
+#include <linux/spi/spi.h>
+#include <linux/wait.h>
+#include <linux/workqueue.h>
+
+#include <asm/barrier.h>
+#include <asm/unaligned.h>
+
+#define CREATE_TRACE_POINTS
+#include "applespi.h"
+#include "applespi_trace.h"
+
+#define APPLESPI_PACKET_SIZE 256
+#define APPLESPI_STATUS_SIZE 4
+
+#define PACKET_TYPE_READ 0x20
+#define PACKET_TYPE_WRITE 0x40
+#define PACKET_DEV_KEYB 0x01
+#define PACKET_DEV_TPAD 0x02
+#define PACKET_DEV_INFO 0xd0
+
+#define MAX_ROLLOVER 6
+
+#define MAX_FINGERS 11
+#define MAX_FINGER_ORIENTATION 16384
+#define MAX_PKTS_PER_MSG 2
+
+#define KBD_BL_LEVEL_MIN 32U
+#define KBD_BL_LEVEL_MAX 255U
+#define KBD_BL_LEVEL_SCALE 1000000U
+#define KBD_BL_LEVEL_ADJ \
+ ((KBD_BL_LEVEL_MAX - KBD_BL_LEVEL_MIN) * KBD_BL_LEVEL_SCALE / 255U)
+
+#define EFI_BL_LEVEL_NAME L"KeyboardBacklightLevel"
+#define EFI_BL_LEVEL_GUID EFI_GUID(0xa076d2af, 0x9678, 0x4386, 0x8b, 0x58, 0x1f, 0xc8, 0xef, 0x04, 0x16, 0x19)
+
+#define APPLE_FLAG_FKEY 0x01
+
+#define SPI_RW_CHG_DELAY_US 100 /* from experimentation, in µs */
+
+#define SYNAPTICS_VENDOR_ID 0x06cb
+
+static unsigned int fnmode = 1;
+module_param(fnmode, uint, 0644);
+MODULE_PARM_DESC(fnmode, "Mode of Fn key on Apple keyboards (0 = disabled, [1] = fkeyslast, 2 = fkeysfirst)");
+
+static unsigned int fnremap;
+module_param(fnremap, uint, 0644);
+MODULE_PARM_DESC(fnremap, "Remap Fn key ([0] = no-remap; 1 = left-ctrl, 2 = left-shift, 3 = left-alt, 4 = left-meta, 6 = right-shift, 7 = right-alt, 8 = right-meta)");
+
+static bool iso_layout;
+module_param(iso_layout, bool, 0644);
+MODULE_PARM_DESC(iso_layout, "Enable/Disable hardcoded ISO-layout of the keyboard. ([0] = disabled, 1 = enabled)");
+
+static char touchpad_dimensions[40];
+module_param_string(touchpad_dimensions, touchpad_dimensions,
+ sizeof(touchpad_dimensions), 0444);
+MODULE_PARM_DESC(touchpad_dimensions, "The pixel dimensions of the touchpad, as XxY+W+H .");
+
+/**
+ * struct keyboard_protocol - keyboard message.
+ * message.type = 0x0110, message.length = 0x000a
+ *
+ * @unknown1: unknown
+ * @modifiers: bit-set of modifier/control keys pressed
+ * @unknown2: unknown
+ * @keys_pressed: the (non-modifier) keys currently pressed
+ * @fn_pressed: whether the fn key is currently pressed
+ * @crc16: crc over the whole message struct (message header +
+ * this struct) minus this @crc16 field
+ */
+struct keyboard_protocol {
+ u8 unknown1;
+ u8 modifiers;
+ u8 unknown2;
+ u8 keys_pressed[MAX_ROLLOVER];
+ u8 fn_pressed;
+ __le16 crc16;
+};
+
+/**
+ * struct tp_finger - single trackpad finger structure, le16-aligned
+ *
+ * @origin: zero when switching track finger
+ * @abs_x: absolute x coodinate
+ * @abs_y: absolute y coodinate
+ * @rel_x: relative x coodinate
+ * @rel_y: relative y coodinate
+ * @tool_major: tool area, major axis
+ * @tool_minor: tool area, minor axis
+ * @orientation: 16384 when point, else 15 bit angle
+ * @touch_major: touch area, major axis
+ * @touch_minor: touch area, minor axis
+ * @unused: zeros
+ * @pressure: pressure on forcetouch touchpad
+ * @multi: one finger: varies, more fingers: constant
+ * @crc16: on last finger: crc over the whole message struct
+ * (i.e. message header + this struct) minus the last
+ * @crc16 field; unknown on all other fingers.
+ */
+struct tp_finger {
+ __le16 origin;
+ __le16 abs_x;
+ __le16 abs_y;
+ __le16 rel_x;
+ __le16 rel_y;
+ __le16 tool_major;
+ __le16 tool_minor;
+ __le16 orientation;
+ __le16 touch_major;
+ __le16 touch_minor;
+ __le16 unused[2];
+ __le16 pressure;
+ __le16 multi;
+ __le16 crc16;
+};
+
+/**
+ * struct touchpad_protocol - touchpad message.
+ * message.type = 0x0210
+ *
+ * @unknown1: unknown
+ * @clicked: 1 if a button-click was detected, 0 otherwise
+ * @unknown2: unknown
+ * @number_of_fingers: the number of fingers being reported in @fingers
+ * @clicked2: same as @clicked
+ * @unknown3: unknown
+ * @fingers: the data for each finger
+ */
+struct touchpad_protocol {
+ u8 unknown1[1];
+ u8 clicked;
+ u8 unknown2[28];
+ u8 number_of_fingers;
+ u8 clicked2;
+ u8 unknown3[16];
+ struct tp_finger fingers[0];
+};
+
+/**
+ * struct command_protocol_tp_info - get touchpad info.
+ * message.type = 0x1020, message.length = 0x0000
+ *
+ * @crc16: crc over the whole message struct (message header +
+ * this struct) minus this @crc16 field
+ */
+struct command_protocol_tp_info {
+ __le16 crc16;
+};
+
+/**
+ * struct touchpad_info - touchpad info response.
+ * message.type = 0x1020, message.length = 0x006e
+ *
+ * @unknown1: unknown
+ * @model_flags: flags (vary by model number, but significance otherwise
+ * unknown)
+ * @model_no: the touchpad model number
+ * @unknown2: unknown
+ * @crc16: crc over the whole message struct (message header +
+ * this struct) minus this @crc16 field
+ */
+struct touchpad_info_protocol {
+ u8 unknown1[105];
+ u8 model_flags;
+ u8 model_no;
+ u8 unknown2[3];
+ __le16 crc16;
+};
+
+/**
+ * struct command_protocol_mt_init - initialize multitouch.
+ * message.type = 0x0252, message.length = 0x0002
+ *
+ * @cmd: value: 0x0102
+ * @crc16: crc over the whole message struct (message header +
+ * this struct) minus this @crc16 field
+ */
+struct command_protocol_mt_init {
+ __le16 cmd;
+ __le16 crc16;
+};
+
+/**
+ * struct command_protocol_capsl - toggle caps-lock led
+ * message.type = 0x0151, message.length = 0x0002
+ *
+ * @unknown: value: 0x01 (length?)
+ * @led: 0 off, 2 on
+ * @crc16: crc over the whole message struct (message header +
+ * this struct) minus this @crc16 field
+ */
+struct command_protocol_capsl {
+ u8 unknown;
+ u8 led;
+ __le16 crc16;
+};
+
+/**
+ * struct command_protocol_bl - set keyboard backlight brightness
+ * message.type = 0xB051, message.length = 0x0006
+ *
+ * @const1: value: 0x01B0
+ * @level: the brightness level to set
+ * @const2: value: 0x0001 (backlight off), 0x01F4 (backlight on)
+ * @crc16: crc over the whole message struct (message header +
+ * this struct) minus this @crc16 field
+ */
+struct command_protocol_bl {
+ __le16 const1;
+ __le16 level;
+ __le16 const2;
+ __le16 crc16;
+};
+
+/**
+ * struct message - a complete spi message.
+ *
+ * Each message begins with fixed header, followed by a message-type specific
+ * payload, and ends with a 16-bit crc. Because of the varying lengths of the
+ * payload, the crc is defined at the end of each payload struct, rather than
+ * in this struct.
+ *
+ * @type: the message type
+ * @zero: always 0
+ * @counter: incremented on each message, rolls over after 255; there is a
+ * separate counter for each message type.
+ * @rsp_buf_len:response buffer length (the exact nature of this field is quite
+ * speculative). On a request/write this is often the same as
+ * @length, though in some cases it has been seen to be much larger
+ * (e.g. 0x400); on a response/read this the same as on the
+ * request; for reads that are not responses it is 0.
+ * @length: length of the remainder of the data in the whole message
+ * structure (after re-assembly in case of being split over
+ * multiple spi-packets), minus the trailing crc. The total size
+ * of the message struct is therefore @length + 10.
+ */
+struct message {
+ __le16 type;
+ u8 zero;
+ u8 counter;
+ __le16 rsp_buf_len;
+ __le16 length;
+ union {
+ struct keyboard_protocol keyboard;
+ struct touchpad_protocol touchpad;
+ struct touchpad_info_protocol tp_info;
+ struct command_protocol_tp_info tp_info_command;
+ struct command_protocol_mt_init init_mt_command;
+ struct command_protocol_capsl capsl_command;
+ struct command_protocol_bl bl_command;
+ u8 data[0];
+ };
+};
+
+/* type + zero + counter + rsp_buf_len + length */
+#define MSG_HEADER_SIZE 8
+
+/**
+ * struct spi_packet - a complete spi packet; always 256 bytes. This carries
+ * the (parts of the) message in the data. But note that this does not
+ * necessarily contain a complete message, as in some cases (e.g. many
+ * fingers pressed) the message is split over multiple packets (see the
+ * @offset, @remaining, and @length fields). In general the data parts in
+ * spi_packet's are concatenated until @remaining is 0, and the result is an
+ * message.
+ *
+ * @flags: 0x40 = write (to device), 0x20 = read (from device); note that
+ * the response to a write still has 0x40.
+ * @device: 1 = keyboard, 2 = touchpad
+ * @offset: specifies the offset of this packet's data in the complete
+ * message; i.e. > 0 indicates this is a continuation packet (in
+ * the second packet for a message split over multiple packets
+ * this would then be the same as the @length in the first packet)
+ * @remaining: number of message bytes remaining in subsequents packets (in
+ * the first packet of a message split over two packets this would
+ * then be the same as the @length in the second packet)
+ * @length: length of the valid data in the @data in this packet
+ * @data: all or part of a message
+ * @crc16: crc over this whole structure minus this @crc16 field. This
+ * covers just this packet, even on multi-packet messages (in
+ * contrast to the crc in the message).
+ */
+struct spi_packet {
+ u8 flags;
+ u8 device;
+ __le16 offset;
+ __le16 remaining;
+ __le16 length;
+ u8 data[246];
+ __le16 crc16;
+};
+
+struct spi_settings {
+ u64 spi_cs_delay; /* cs-to-clk delay in us */
+ u64 reset_a2r_usec; /* active-to-receive delay? */
+ u64 reset_rec_usec; /* ? (cur val: 10) */
+};
+
+/* this mimics struct drm_rect */
+struct applespi_tp_info {
+ int x_min;
+ int y_min;
+ int x_max;
+ int y_max;
+};
+
+struct applespi_data {
+ struct spi_device *spi;
+ struct spi_settings spi_settings;
+ struct input_dev *keyboard_input_dev;
+ struct input_dev *touchpad_input_dev;
+
+ u8 *tx_buffer;
+ u8 *tx_status;
+ u8 *rx_buffer;
+
+ u8 *msg_buf;
+ unsigned int saved_msg_len;
+
+ struct applespi_tp_info tp_info;
+
+ u8 last_keys_pressed[MAX_ROLLOVER];
+ u8 last_keys_fn_pressed[MAX_ROLLOVER];
+ u8 last_fn_pressed;
+ struct input_mt_pos pos[MAX_FINGERS];
+ int slots[MAX_FINGERS];
+ int gpe;
+ acpi_handle sien;
+ acpi_handle sist;
+
+ struct spi_transfer dl_t;
+ struct spi_transfer rd_t;
+ struct spi_message rd_m;
+
+ struct spi_transfer ww_t;
+ struct spi_transfer wd_t;
+ struct spi_transfer wr_t;
+ struct spi_transfer st_t;
+ struct spi_message wr_m;
+
+ bool want_tp_info_cmd;
+ bool want_mt_init_cmd;
+ bool want_cl_led_on;
+ bool have_cl_led_on;
+ unsigned int want_bl_level;
+ unsigned int have_bl_level;
+ unsigned int cmd_msg_cntr;
+ /* lock to protect the above parameters and flags below */
+ spinlock_t cmd_msg_lock;
+ bool cmd_msg_queued;
+ enum applespi_evt_type cmd_evt_type;
+
+ struct led_classdev backlight_info;
+
+ bool suspended;
+ bool drain;
+ wait_queue_head_t drain_complete;
+ bool read_active;
+ bool write_active;
+
+ struct work_struct work;
+ struct touchpad_info_protocol rcvd_tp_info;
+
+ struct dentry *debugfs_root;
+ bool debug_tp_dim;
+ char tp_dim_val[40];
+ int tp_dim_min_x;
+ int tp_dim_max_x;
+ int tp_dim_min_y;
+ int tp_dim_max_y;
+};
+
+static const unsigned char applespi_scancodes[] = {
+ 0, 0, 0, 0,
+ KEY_A, KEY_B, KEY_C, KEY_D, KEY_E, KEY_F, KEY_G, KEY_H, KEY_I, KEY_J,
+ KEY_K, KEY_L, KEY_M, KEY_N, KEY_O, KEY_P, KEY_Q, KEY_R, KEY_S, KEY_T,
+ KEY_U, KEY_V, KEY_W, KEY_X, KEY_Y, KEY_Z,
+ KEY_1, KEY_2, KEY_3, KEY_4, KEY_5, KEY_6, KEY_7, KEY_8, KEY_9, KEY_0,
+ KEY_ENTER, KEY_ESC, KEY_BACKSPACE, KEY_TAB, KEY_SPACE, KEY_MINUS,
+ KEY_EQUAL, KEY_LEFTBRACE, KEY_RIGHTBRACE, KEY_BACKSLASH, 0,
+ KEY_SEMICOLON, KEY_APOSTROPHE, KEY_GRAVE, KEY_COMMA, KEY_DOT, KEY_SLASH,
+ KEY_CAPSLOCK,
+ KEY_F1, KEY_F2, KEY_F3, KEY_F4, KEY_F5, KEY_F6, KEY_F7, KEY_F8, KEY_F9,
+ KEY_F10, KEY_F11, KEY_F12, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ KEY_RIGHT, KEY_LEFT, KEY_DOWN, KEY_UP,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, KEY_102ND,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, KEY_RO, 0, KEY_YEN, 0, 0, 0, 0, 0,
+ 0, KEY_KATAKANAHIRAGANA, KEY_MUHENKAN
+};
+
+/*
+ * This must have exactly as many entries as there are bits in
+ * struct keyboard_protocol.modifiers .
+ */
+static const unsigned char applespi_controlcodes[] = {
+ KEY_LEFTCTRL,
+ KEY_LEFTSHIFT,
+ KEY_LEFTALT,
+ KEY_LEFTMETA,
+ 0,
+ KEY_RIGHTSHIFT,
+ KEY_RIGHTALT,
+ KEY_RIGHTMETA
+};
+
+struct applespi_key_translation {
+ u16 from;
+ u16 to;
+ u8 flags;
+};
+
+static const struct applespi_key_translation applespi_fn_codes[] = {
+ { KEY_BACKSPACE, KEY_DELETE },
+ { KEY_ENTER, KEY_INSERT },
+ { KEY_F1, KEY_BRIGHTNESSDOWN, APPLE_FLAG_FKEY },
+ { KEY_F2, KEY_BRIGHTNESSUP, APPLE_FLAG_FKEY },
+ { KEY_F3, KEY_SCALE, APPLE_FLAG_FKEY },
+ { KEY_F4, KEY_DASHBOARD, APPLE_FLAG_FKEY },
+ { KEY_F5, KEY_KBDILLUMDOWN, APPLE_FLAG_FKEY },
+ { KEY_F6, KEY_KBDILLUMUP, APPLE_FLAG_FKEY },
+ { KEY_F7, KEY_PREVIOUSSONG, APPLE_FLAG_FKEY },
+ { KEY_F8, KEY_PLAYPAUSE, APPLE_FLAG_FKEY },
+ { KEY_F9, KEY_NEXTSONG, APPLE_FLAG_FKEY },
+ { KEY_F10, KEY_MUTE, APPLE_FLAG_FKEY },
+ { KEY_F11, KEY_VOLUMEDOWN, APPLE_FLAG_FKEY },
+ { KEY_F12, KEY_VOLUMEUP, APPLE_FLAG_FKEY },
+ { KEY_RIGHT, KEY_END },
+ { KEY_LEFT, KEY_HOME },
+ { KEY_DOWN, KEY_PAGEDOWN },
+ { KEY_UP, KEY_PAGEUP },
+ { }
+};
+
+static const struct applespi_key_translation apple_iso_keyboard[] = {
+ { KEY_GRAVE, KEY_102ND },
+ { KEY_102ND, KEY_GRAVE },
+ { }
+};
+
+struct applespi_tp_model_info {
+ u16 model;
+ struct applespi_tp_info tp_info;
+};
+
+static const struct applespi_tp_model_info applespi_tp_models[] = {
+ {
+ .model = 0x04, /* MB8 MB9 MB10 */
+ .tp_info = { -5087, -182, 5579, 6089 },
+ },
+ {
+ .model = 0x05, /* MBP13,1 MBP13,2 MBP14,1 MBP14,2 */
+ .tp_info = { -6243, -170, 6749, 7685 },
+ },
+ {
+ .model = 0x06, /* MBP13,3 MBP14,3 */
+ .tp_info = { -7456, -163, 7976, 9283 },
+ },
+ {}
+};
+
+typedef void (*applespi_trace_fun)(enum applespi_evt_type,
+ enum applespi_pkt_type, u8 *, size_t);
+
+static applespi_trace_fun applespi_get_trace_fun(enum applespi_evt_type type)
+{
+ switch (type) {
+ case ET_CMD_TP_INI:
+ return trace_applespi_tp_ini_cmd;
+ case ET_CMD_BL:
+ return trace_applespi_backlight_cmd;
+ case ET_CMD_CL:
+ return trace_applespi_caps_lock_cmd;
+ case ET_RD_KEYB:
+ return trace_applespi_keyboard_data;
+ case ET_RD_TPAD:
+ return trace_applespi_touchpad_data;
+ case ET_RD_UNKN:
+ return trace_applespi_unknown_data;
+ default:
+ WARN_ONCE(1, "Unknown msg type %d", type);
+ return trace_applespi_unknown_data;
+ }
+}
+
+static void applespi_setup_read_txfrs(struct applespi_data *applespi)
+{
+ struct spi_message *msg = &applespi->rd_m;
+ struct spi_transfer *dl_t = &applespi->dl_t;
+ struct spi_transfer *rd_t = &applespi->rd_t;
+
+ memset(dl_t, 0, sizeof(*dl_t));
+ memset(rd_t, 0, sizeof(*rd_t));
+
+ dl_t->delay_usecs = applespi->spi_settings.spi_cs_delay;
+
+ rd_t->rx_buf = applespi->rx_buffer;
+ rd_t->len = APPLESPI_PACKET_SIZE;
+
+ spi_message_init(msg);
+ spi_message_add_tail(dl_t, msg);
+ spi_message_add_tail(rd_t, msg);
+}
+
+static void applespi_setup_write_txfrs(struct applespi_data *applespi)
+{
+ struct spi_message *msg = &applespi->wr_m;
+ struct spi_transfer *wt_t = &applespi->ww_t;
+ struct spi_transfer *dl_t = &applespi->wd_t;
+ struct spi_transfer *wr_t = &applespi->wr_t;
+ struct spi_transfer *st_t = &applespi->st_t;
+
+ memset(wt_t, 0, sizeof(*wt_t));
+ memset(dl_t, 0, sizeof(*dl_t));
+ memset(wr_t, 0, sizeof(*wr_t));
+ memset(st_t, 0, sizeof(*st_t));
+
+ /*
+ * All we need here is a delay at the beginning of the message before
+ * asserting cs. But the current spi API doesn't support this, so we
+ * end up with an extra unnecessary (but harmless) cs assertion and
+ * deassertion.
+ */
+ wt_t->delay_usecs = SPI_RW_CHG_DELAY_US;
+ wt_t->cs_change = 1;
+
+ dl_t->delay_usecs = applespi->spi_settings.spi_cs_delay;
+
+ wr_t->tx_buf = applespi->tx_buffer;
+ wr_t->len = APPLESPI_PACKET_SIZE;
+ wr_t->delay_usecs = SPI_RW_CHG_DELAY_US;
+
+ st_t->rx_buf = applespi->tx_status;
+ st_t->len = APPLESPI_STATUS_SIZE;
+
+ spi_message_init(msg);
+ spi_message_add_tail(wt_t, msg);
+ spi_message_add_tail(dl_t, msg);
+ spi_message_add_tail(wr_t, msg);
+ spi_message_add_tail(st_t, msg);
+}
+
+static int applespi_async(struct applespi_data *applespi,
+ struct spi_message *message, void (*complete)(void *))
+{
+ message->complete = complete;
+ message->context = applespi;
+
+ return spi_async(applespi->spi, message);
+}
+
+static inline bool applespi_check_write_status(struct applespi_data *applespi,
+ int sts)
+{
+ static u8 status_ok[] = { 0xac, 0x27, 0x68, 0xd5 };
+
+ if (sts < 0) {
+ dev_warn(&applespi->spi->dev, "Error writing to device: %d\n",
+ sts);
+ return false;
+ }
+
+ if (memcmp(applespi->tx_status, status_ok, APPLESPI_STATUS_SIZE)) {
+ dev_warn(&applespi->spi->dev, "Error writing to device: %*ph\n",
+ APPLESPI_STATUS_SIZE, applespi->tx_status);
+ return false;
+ }
+
+ return true;
+}
+
+static int applespi_get_spi_settings(struct applespi_data *applespi)
+{
+ struct acpi_device *adev = ACPI_COMPANION(&applespi->spi->dev);
+ const union acpi_object *o;
+ struct spi_settings *settings = &applespi->spi_settings;
+
+ if (!acpi_dev_get_property(adev, "spiCSDelay", ACPI_TYPE_BUFFER, &o))
+ settings->spi_cs_delay = *(u64 *)o->buffer.pointer;
+ else
+ dev_warn(&applespi->spi->dev,
+ "Property spiCSDelay not found\n");
+
+ if (!acpi_dev_get_property(adev, "resetA2RUsec", ACPI_TYPE_BUFFER, &o))
+ settings->reset_a2r_usec = *(u64 *)o->buffer.pointer;
+ else
+ dev_warn(&applespi->spi->dev,
+ "Property resetA2RUsec not found\n");
+
+ if (!acpi_dev_get_property(adev, "resetRecUsec", ACPI_TYPE_BUFFER, &o))
+ settings->reset_rec_usec = *(u64 *)o->buffer.pointer;
+ else
+ dev_warn(&applespi->spi->dev,
+ "Property resetRecUsec not found\n");
+
+ dev_dbg(&applespi->spi->dev,
+ "SPI settings: spi_cs_delay=%llu reset_a2r_usec=%llu reset_rec_usec=%llu\n",
+ settings->spi_cs_delay, settings->reset_a2r_usec,
+ settings->reset_rec_usec);
+
+ return 0;
+}
+
+static int applespi_setup_spi(struct applespi_data *applespi)
+{
+ int sts;
+
+ sts = applespi_get_spi_settings(applespi);
+ if (sts)
+ return sts;
+
+ spin_lock_init(&applespi->cmd_msg_lock);
+ init_waitqueue_head(&applespi->drain_complete);
+
+ return 0;
+}
+
+static int applespi_enable_spi(struct applespi_data *applespi)
+{
+ acpi_status acpi_sts;
+ unsigned long long spi_status;
+
+ /* check if SPI is already enabled, so we can skip the delay below */
+ acpi_sts = acpi_evaluate_integer(applespi->sist, NULL, NULL,
+ &spi_status);
+ if (ACPI_SUCCESS(acpi_sts) && spi_status)
+ return 0;
+
+ /* SIEN(1) will enable SPI communication */
+ acpi_sts = acpi_execute_simple_method(applespi->sien, NULL, 1);
+ if (ACPI_FAILURE(acpi_sts)) {
+ dev_err(&applespi->spi->dev, "SIEN failed: %s\n",
+ acpi_format_exception(acpi_sts));
+ return -ENODEV;
+ }
+
+ /*
+ * Allow the SPI interface to come up before returning. Without this
+ * delay, the SPI commands to enable multitouch mode may not reach
+ * the trackpad controller, causing pointer movement to break upon
+ * resume from sleep.
+ */
+ msleep(50);
+
+ return 0;
+}
+
+static int applespi_send_cmd_msg(struct applespi_data *applespi);
+
+static void applespi_msg_complete(struct applespi_data *applespi,
+ bool is_write_msg, bool is_read_compl)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&applespi->cmd_msg_lock, flags);
+
+ if (is_read_compl)
+ applespi->read_active = false;
+ if (is_write_msg)
+ applespi->write_active = false;
+
+ if (applespi->drain && !applespi->write_active)
+ wake_up_all(&applespi->drain_complete);
+
+ if (is_write_msg) {
+ applespi->cmd_msg_queued = false;
+ applespi_send_cmd_msg(applespi);
+ }
+
+ spin_unlock_irqrestore(&applespi->cmd_msg_lock, flags);
+}
+
+static void applespi_async_write_complete(void *context)
+{
+ struct applespi_data *applespi = context;
+ enum applespi_evt_type evt_type = applespi->cmd_evt_type;
+
+ applespi_get_trace_fun(evt_type)(evt_type, PT_WRITE,
+ applespi->tx_buffer,
+ APPLESPI_PACKET_SIZE);
+ applespi_get_trace_fun(evt_type)(evt_type, PT_STATUS,
+ applespi->tx_status,
+ APPLESPI_STATUS_SIZE);
+
+ if (!applespi_check_write_status(applespi, applespi->wr_m.status)) {
+ /*
+ * If we got an error, we presumably won't get the expected
+ * response message either.
+ */
+ applespi_msg_complete(applespi, true, false);
+ }
+}
+
+static int applespi_send_cmd_msg(struct applespi_data *applespi)
+{
+ u16 crc;
+ int sts;
+ struct spi_packet *packet = (struct spi_packet *)applespi->tx_buffer;
+ struct message *message = (struct message *)packet->data;
+ u16 msg_len;
+ u8 device;
+
+ /* check if draining */
+ if (applespi->drain)
+ return 0;
+
+ /* check whether send is in progress */
+ if (applespi->cmd_msg_queued)
+ return 0;
+
+ /* set up packet */
+ memset(packet, 0, APPLESPI_PACKET_SIZE);
+
+ /* are we processing init commands? */
+ if (applespi->want_tp_info_cmd) {
+ applespi->want_tp_info_cmd = false;
+ applespi->want_mt_init_cmd = true;
+ applespi->cmd_evt_type = ET_CMD_TP_INI;
+
+ /* build init command */
+ device = PACKET_DEV_INFO;
+
+ message->type = cpu_to_le16(0x1020);
+ msg_len = sizeof(message->tp_info_command);
+
+ message->zero = 0x02;
+ message->rsp_buf_len = cpu_to_le16(0x0200);
+
+ } else if (applespi->want_mt_init_cmd) {
+ applespi->want_mt_init_cmd = false;
+ applespi->cmd_evt_type = ET_CMD_TP_INI;
+
+ /* build init command */
+ device = PACKET_DEV_TPAD;
+
+ message->type = cpu_to_le16(0x0252);
+ msg_len = sizeof(message->init_mt_command);
+
+ message->init_mt_command.cmd = cpu_to_le16(0x0102);
+
+ /* do we need caps-lock command? */
+ } else if (applespi->want_cl_led_on != applespi->have_cl_led_on) {
+ applespi->have_cl_led_on = applespi->want_cl_led_on;
+ applespi->cmd_evt_type = ET_CMD_CL;
+
+ /* build led command */
+ device = PACKET_DEV_KEYB;
+
+ message->type = cpu_to_le16(0x0151);
+ msg_len = sizeof(message->capsl_command);
+
+ message->capsl_command.unknown = 0x01;
+ message->capsl_command.led = applespi->have_cl_led_on ? 2 : 0;
+
+ /* do we need backlight command? */
+ } else if (applespi->want_bl_level != applespi->have_bl_level) {
+ applespi->have_bl_level = applespi->want_bl_level;
+ applespi->cmd_evt_type = ET_CMD_BL;
+
+ /* build command buffer */
+ device = PACKET_DEV_KEYB;
+
+ message->type = cpu_to_le16(0xB051);
+ msg_len = sizeof(message->bl_command);
+
+ message->bl_command.const1 = cpu_to_le16(0x01B0);
+ message->bl_command.level =
+ cpu_to_le16(applespi->have_bl_level);
+
+ if (applespi->have_bl_level > 0)
+ message->bl_command.const2 = cpu_to_le16(0x01F4);
+ else
+ message->bl_command.const2 = cpu_to_le16(0x0001);
+
+ /* everything's up-to-date */
+ } else {
+ return 0;
+ }
+
+ /* finalize packet */
+ packet->flags = PACKET_TYPE_WRITE;
+ packet->device = device;
+ packet->length = cpu_to_le16(MSG_HEADER_SIZE + msg_len);
+
+ message->counter = applespi->cmd_msg_cntr++ % (U8_MAX + 1);
+
+ message->length = cpu_to_le16(msg_len - 2);
+ if (!message->rsp_buf_len)
+ message->rsp_buf_len = message->length;
+
+ crc = crc16(0, (u8 *)message, le16_to_cpu(packet->length) - 2);
+ put_unaligned_le16(crc, &message->data[msg_len - 2]);
+
+ crc = crc16(0, (u8 *)packet, sizeof(*packet) - 2);
+ packet->crc16 = cpu_to_le16(crc);
+
+ /* send command */
+ sts = applespi_async(applespi, &applespi->wr_m,
+ applespi_async_write_complete);
+ if (sts) {
+ dev_warn(&applespi->spi->dev,
+ "Error queueing async write to device: %d\n", sts);
+ return sts;
+ }
+
+ applespi->cmd_msg_queued = true;
+ applespi->write_active = true;
+
+ return 0;
+}
+
+static void applespi_init(struct applespi_data *applespi, bool is_resume)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&applespi->cmd_msg_lock, flags);
+
+ if (is_resume)
+ applespi->want_mt_init_cmd = true;
+ else
+ applespi->want_tp_info_cmd = true;
+ applespi_send_cmd_msg(applespi);
+
+ spin_unlock_irqrestore(&applespi->cmd_msg_lock, flags);
+}
+
+static int applespi_set_capsl_led(struct applespi_data *applespi,
+ bool capslock_on)
+{
+ unsigned long flags;
+ int sts;
+
+ spin_lock_irqsave(&applespi->cmd_msg_lock, flags);
+
+ applespi->want_cl_led_on = capslock_on;
+ sts = applespi_send_cmd_msg(applespi);
+
+ spin_unlock_irqrestore(&applespi->cmd_msg_lock, flags);
+
+ return sts;
+}
+
+static void applespi_set_bl_level(struct led_classdev *led_cdev,
+ enum led_brightness value)
+{
+ struct applespi_data *applespi =
+ container_of(led_cdev, struct applespi_data, backlight_info);
+ unsigned long flags;
+
+ spin_lock_irqsave(&applespi->cmd_msg_lock, flags);
+
+ if (value == 0) {
+ applespi->want_bl_level = value;
+ } else {
+ /*
+ * The backlight does not turn on till level 32, so we scale
+ * the range here so that from a user's perspective it turns
+ * on at 1.
+ */
+ applespi->want_bl_level =
+ ((value * KBD_BL_LEVEL_ADJ) / KBD_BL_LEVEL_SCALE +
+ KBD_BL_LEVEL_MIN);
+ }
+
+ applespi_send_cmd_msg(applespi);
+
+ spin_unlock_irqrestore(&applespi->cmd_msg_lock, flags);
+}
+
+static int applespi_event(struct input_dev *dev, unsigned int type,
+ unsigned int code, int value)
+{
+ struct applespi_data *applespi = input_get_drvdata(dev);
+
+ switch (type) {
+ case EV_LED:
+ applespi_set_capsl_led(applespi, !!test_bit(LED_CAPSL, dev->led));
+ return 0;
+ }
+
+ return -EINVAL;
+}
+
+/* lifted from the BCM5974 driver and renamed from raw2int */
+/* convert 16-bit little endian to signed integer */
+static inline int le16_to_int(__le16 x)
+{
+ return (signed short)le16_to_cpu(x);
+}
+
+static void applespi_debug_update_dimensions(struct applespi_data *applespi,
+ const struct tp_finger *f)
+{
+ applespi->tp_dim_min_x = min_t(int, applespi->tp_dim_min_x, f->abs_x);
+ applespi->tp_dim_max_x = max_t(int, applespi->tp_dim_max_x, f->abs_x);
+ applespi->tp_dim_min_y = min_t(int, applespi->tp_dim_min_y, f->abs_y);
+ applespi->tp_dim_max_y = max_t(int, applespi->tp_dim_max_y, f->abs_y);
+}
+
+static int applespi_tp_dim_open(struct inode *inode, struct file *file)
+{
+ struct applespi_data *applespi = inode->i_private;
+
+ file->private_data = applespi;
+
+ snprintf(applespi->tp_dim_val, sizeof(applespi->tp_dim_val),
+ "0x%.4x %dx%d+%u+%u\n",
+ applespi->touchpad_input_dev->id.product,
+ applespi->tp_dim_min_x, applespi->tp_dim_min_y,
+ applespi->tp_dim_max_x - applespi->tp_dim_min_x,
+ applespi->tp_dim_max_y - applespi->tp_dim_min_y);
+
+ return nonseekable_open(inode, file);
+}
+
+static ssize_t applespi_tp_dim_read(struct file *file, char __user *buf,
+ size_t len, loff_t *off)
+{
+ struct applespi_data *applespi = file->private_data;
+
+ return simple_read_from_buffer(buf, len, off, applespi->tp_dim_val,
+ strlen(applespi->tp_dim_val));
+}
+
+static const struct file_operations applespi_tp_dim_fops = {
+ .owner = THIS_MODULE,
+ .open = applespi_tp_dim_open,
+ .read = applespi_tp_dim_read,
+ .llseek = no_llseek,
+};
+
+static void report_finger_data(struct input_dev *input, int slot,
+ const struct input_mt_pos *pos,
+ const struct tp_finger *f)
+{
+ input_mt_slot(input, slot);
+ input_mt_report_slot_state(input, MT_TOOL_FINGER, true);
+
+ input_report_abs(input, ABS_MT_TOUCH_MAJOR,
+ le16_to_int(f->touch_major) << 1);
+ input_report_abs(input, ABS_MT_TOUCH_MINOR,
+ le16_to_int(f->touch_minor) << 1);
+ input_report_abs(input, ABS_MT_WIDTH_MAJOR,
+ le16_to_int(f->tool_major) << 1);
+ input_report_abs(input, ABS_MT_WIDTH_MINOR,
+ le16_to_int(f->tool_minor) << 1);
+ input_report_abs(input, ABS_MT_ORIENTATION,
+ MAX_FINGER_ORIENTATION - le16_to_int(f->orientation));
+ input_report_abs(input, ABS_MT_POSITION_X, pos->x);
+ input_report_abs(input, ABS_MT_POSITION_Y, pos->y);
+}
+
+static void report_tp_state(struct applespi_data *applespi,
+ struct touchpad_protocol *t)
+{
+ const struct tp_finger *f;
+ struct input_dev *input;
+ const struct applespi_tp_info *tp_info = &applespi->tp_info;
+ int i, n;
+
+ /* touchpad_input_dev is set async in worker */
+ input = smp_load_acquire(&applespi->touchpad_input_dev);
+ if (!input)
+ return; /* touchpad isn't initialized yet */
+
+ n = 0;
+
+ for (i = 0; i < t->number_of_fingers; i++) {
+ f = &t->fingers[i];
+ if (le16_to_int(f->touch_major) == 0)
+ continue;
+ applespi->pos[n].x = le16_to_int(f->abs_x);
+ applespi->pos[n].y = tp_info->y_min + tp_info->y_max -
+ le16_to_int(f->abs_y);
+ n++;
+
+ if (applespi->debug_tp_dim)
+ applespi_debug_update_dimensions(applespi, f);
+ }
+
+ input_mt_assign_slots(input, applespi->slots, applespi->pos, n, 0);
+
+ for (i = 0; i < n; i++)
+ report_finger_data(input, applespi->slots[i],
+ &applespi->pos[i], &t->fingers[i]);
+
+ input_mt_sync_frame(input);
+ input_report_key(input, BTN_LEFT, t->clicked);
+
+ input_sync(input);
+}
+
+static const struct applespi_key_translation *
+applespi_find_translation(const struct applespi_key_translation *table, u16 key)
+{
+ const struct applespi_key_translation *trans;
+
+ for (trans = table; trans->from; trans++)
+ if (trans->from == key)
+ return trans;
+
+ return NULL;
+}
+
+static unsigned int applespi_translate_fn_key(unsigned int key, int fn_pressed)
+{
+ const struct applespi_key_translation *trans;
+ int do_translate;
+
+ trans = applespi_find_translation(applespi_fn_codes, key);
+ if (trans) {
+ if (trans->flags & APPLE_FLAG_FKEY)
+ do_translate = (fnmode == 2 && fn_pressed) ||
+ (fnmode == 1 && !fn_pressed);
+ else
+ do_translate = fn_pressed;
+
+ if (do_translate)
+ key = trans->to;
+ }
+
+ return key;
+}
+
+static unsigned int applespi_translate_iso_layout(unsigned int key)
+{
+ const struct applespi_key_translation *trans;
+
+ trans = applespi_find_translation(apple_iso_keyboard, key);
+ if (trans)
+ key = trans->to;
+
+ return key;
+}
+
+static unsigned int applespi_code_to_key(u8 code, int fn_pressed)
+{
+ unsigned int key = applespi_scancodes[code];
+
+ if (fnmode)
+ key = applespi_translate_fn_key(key, fn_pressed);
+ if (iso_layout)
+ key = applespi_translate_iso_layout(key);
+ return key;
+}
+
+static void
+applespi_remap_fn_key(struct keyboard_protocol *keyboard_protocol)
+{
+ unsigned char tmp;
+ u8 bit = BIT((fnremap - 1) & 0x07);
+
+ if (!fnremap || fnremap > ARRAY_SIZE(applespi_controlcodes) ||
+ !applespi_controlcodes[fnremap - 1])
+ return;
+
+ tmp = keyboard_protocol->fn_pressed;
+ keyboard_protocol->fn_pressed = !!(keyboard_protocol->modifiers & bit);
+ if (tmp)
+ keyboard_protocol->modifiers |= bit;
+ else
+ keyboard_protocol->modifiers &= ~bit;
+}
+
+static void
+applespi_handle_keyboard_event(struct applespi_data *applespi,
+ struct keyboard_protocol *keyboard_protocol)
+{
+ unsigned int key;
+ int i;
+
+ compiletime_assert(ARRAY_SIZE(applespi_controlcodes) ==
+ sizeof_field(struct keyboard_protocol, modifiers) * 8,
+ "applespi_controlcodes has wrong number of entries");
+
+ /* check for rollover overflow, which is signalled by all keys == 1 */
+ if (!memchr_inv(keyboard_protocol->keys_pressed, 1, MAX_ROLLOVER))
+ return;
+
+ /* remap fn key if desired */
+ applespi_remap_fn_key(keyboard_protocol);
+
+ /* check released keys */
+ for (i = 0; i < MAX_ROLLOVER; i++) {
+ if (memchr(keyboard_protocol->keys_pressed,
+ applespi->last_keys_pressed[i], MAX_ROLLOVER))
+ continue; /* key is still pressed */
+
+ key = applespi_code_to_key(applespi->last_keys_pressed[i],
+ applespi->last_keys_fn_pressed[i]);
+ input_report_key(applespi->keyboard_input_dev, key, 0);
+ applespi->last_keys_fn_pressed[i] = 0;
+ }
+
+ /* check pressed keys */
+ for (i = 0; i < MAX_ROLLOVER; i++) {
+ if (keyboard_protocol->keys_pressed[i] <
+ ARRAY_SIZE(applespi_scancodes) &&
+ keyboard_protocol->keys_pressed[i] > 0) {
+ key = applespi_code_to_key(
+ keyboard_protocol->keys_pressed[i],
+ keyboard_protocol->fn_pressed);
+ input_report_key(applespi->keyboard_input_dev, key, 1);
+ applespi->last_keys_fn_pressed[i] =
+ keyboard_protocol->fn_pressed;
+ }
+ }
+
+ /* check control keys */
+ for (i = 0; i < ARRAY_SIZE(applespi_controlcodes); i++) {
+ if (keyboard_protocol->modifiers & BIT(i))
+ input_report_key(applespi->keyboard_input_dev,
+ applespi_controlcodes[i], 1);
+ else
+ input_report_key(applespi->keyboard_input_dev,
+ applespi_controlcodes[i], 0);
+ }
+
+ /* check function key */
+ if (keyboard_protocol->fn_pressed && !applespi->last_fn_pressed)
+ input_report_key(applespi->keyboard_input_dev, KEY_FN, 1);
+ else if (!keyboard_protocol->fn_pressed && applespi->last_fn_pressed)
+ input_report_key(applespi->keyboard_input_dev, KEY_FN, 0);
+ applespi->last_fn_pressed = keyboard_protocol->fn_pressed;
+
+ /* done */
+ input_sync(applespi->keyboard_input_dev);
+ memcpy(&applespi->last_keys_pressed, keyboard_protocol->keys_pressed,
+ sizeof(applespi->last_keys_pressed));
+}
+
+static const struct applespi_tp_info *applespi_find_touchpad_info(u8 model)
+{
+ const struct applespi_tp_model_info *info;
+
+ for (info = applespi_tp_models; info->model; info++) {
+ if (info->model == model)
+ return &info->tp_info;
+ }
+
+ return NULL;
+}
+
+static int
+applespi_register_touchpad_device(struct applespi_data *applespi,
+ struct touchpad_info_protocol *rcvd_tp_info)
+{
+ const struct applespi_tp_info *tp_info;
+ struct input_dev *touchpad_input_dev;
+ int sts;
+
+ /* set up touchpad dimensions */
+ tp_info = applespi_find_touchpad_info(rcvd_tp_info->model_no);
+ if (!tp_info) {
+ dev_warn(&applespi->spi->dev,
+ "Unknown touchpad model %x - falling back to MB8 touchpad\n",
+ rcvd_tp_info->model_no);
+ tp_info = &applespi_tp_models[0].tp_info;
+ }
+
+ applespi->tp_info = *tp_info;
+
+ if (touchpad_dimensions[0]) {
+ int x, y, w, h;
+
+ sts = sscanf(touchpad_dimensions, "%dx%d+%u+%u", &x, &y, &w, &h);
+ if (sts == 4) {
+ dev_info(&applespi->spi->dev,
+ "Overriding touchpad dimensions from module param\n");
+ applespi->tp_info.x_min = x;
+ applespi->tp_info.y_min = y;
+ applespi->tp_info.x_max = x + w;
+ applespi->tp_info.y_max = y + h;
+ } else {
+ dev_warn(&applespi->spi->dev,
+ "Invalid touchpad dimensions '%s': must be in the form XxY+W+H\n",
+ touchpad_dimensions);
+ touchpad_dimensions[0] = '\0';
+ }
+ }
+ if (!touchpad_dimensions[0]) {
+ snprintf(touchpad_dimensions, sizeof(touchpad_dimensions),
+ "%dx%d+%u+%u",
+ applespi->tp_info.x_min,
+ applespi->tp_info.y_min,
+ applespi->tp_info.x_max - applespi->tp_info.x_min,
+ applespi->tp_info.y_max - applespi->tp_info.y_min);
+ }
+
+ /* create touchpad input device */
+ touchpad_input_dev = devm_input_allocate_device(&applespi->spi->dev);
+ if (!touchpad_input_dev) {
+ dev_err(&applespi->spi->dev,
+ "Failed to allocate touchpad input device\n");
+ return -ENOMEM;
+ }
+
+ touchpad_input_dev->name = "Apple SPI Touchpad";
+ touchpad_input_dev->phys = "applespi/input1";
+ touchpad_input_dev->dev.parent = &applespi->spi->dev;
+ touchpad_input_dev->id.bustype = BUS_SPI;
+ touchpad_input_dev->id.vendor = SYNAPTICS_VENDOR_ID;
+ touchpad_input_dev->id.product =
+ rcvd_tp_info->model_no << 8 | rcvd_tp_info->model_flags;
+
+ /* basic properties */
+ input_set_capability(touchpad_input_dev, EV_REL, REL_X);
+ input_set_capability(touchpad_input_dev, EV_REL, REL_Y);
+
+ __set_bit(INPUT_PROP_POINTER, touchpad_input_dev->propbit);
+ __set_bit(INPUT_PROP_BUTTONPAD, touchpad_input_dev->propbit);
+
+ /* finger touch area */
+ input_set_abs_params(touchpad_input_dev, ABS_MT_TOUCH_MAJOR,
+ 0, 5000, 0, 0);
+ input_set_abs_params(touchpad_input_dev, ABS_MT_TOUCH_MINOR,
+ 0, 5000, 0, 0);
+
+ /* finger approach area */
+ input_set_abs_params(touchpad_input_dev, ABS_MT_WIDTH_MAJOR,
+ 0, 5000, 0, 0);
+ input_set_abs_params(touchpad_input_dev, ABS_MT_WIDTH_MINOR,
+ 0, 5000, 0, 0);
+
+ /* finger orientation */
+ input_set_abs_params(touchpad_input_dev, ABS_MT_ORIENTATION,
+ -MAX_FINGER_ORIENTATION, MAX_FINGER_ORIENTATION,
+ 0, 0);
+
+ /* finger position */
+ input_set_abs_params(touchpad_input_dev, ABS_MT_POSITION_X,
+ applespi->tp_info.x_min, applespi->tp_info.x_max,
+ 0, 0);
+ input_set_abs_params(touchpad_input_dev, ABS_MT_POSITION_Y,
+ applespi->tp_info.y_min, applespi->tp_info.y_max,
+ 0, 0);
+
+ /* touchpad button */
+ input_set_capability(touchpad_input_dev, EV_KEY, BTN_LEFT);
+
+ /* multitouch */
+ sts = input_mt_init_slots(touchpad_input_dev, MAX_FINGERS,
+ INPUT_MT_POINTER | INPUT_MT_DROP_UNUSED |
+ INPUT_MT_TRACK);
+ if (sts) {
+ dev_err(&applespi->spi->dev,
+ "failed to initialize slots: %d", sts);
+ return sts;
+ }
+
+ /* register input device */
+ sts = input_register_device(touchpad_input_dev);
+ if (sts) {
+ dev_err(&applespi->spi->dev,
+ "Unable to register touchpad input device (%d)\n", sts);
+ return sts;
+ }
+
+ /* touchpad_input_dev is read async in spi callback */
+ smp_store_release(&applespi->touchpad_input_dev, touchpad_input_dev);
+
+ return 0;
+}
+
+static void applespi_worker(struct work_struct *work)
+{
+ struct applespi_data *applespi =
+ container_of(work, struct applespi_data, work);
+
+ applespi_register_touchpad_device(applespi, &applespi->rcvd_tp_info);
+}
+
+static void applespi_handle_cmd_response(struct applespi_data *applespi,
+ struct spi_packet *packet,
+ struct message *message)
+{
+ if (packet->device == PACKET_DEV_INFO &&
+ le16_to_cpu(message->type) == 0x1020) {
+ /*
+ * We're not allowed to sleep here, but registering an input
+ * device can sleep.
+ */
+ applespi->rcvd_tp_info = message->tp_info;
+ schedule_work(&applespi->work);
+ return;
+ }
+
+ if (le16_to_cpu(message->length) != 0x0000) {
+ dev_warn_ratelimited(&applespi->spi->dev,
+ "Received unexpected write response: length=%x\n",
+ le16_to_cpu(message->length));
+ return;
+ }
+
+ if (packet->device == PACKET_DEV_TPAD &&
+ le16_to_cpu(message->type) == 0x0252 &&
+ le16_to_cpu(message->rsp_buf_len) == 0x0002)
+ dev_info(&applespi->spi->dev, "modeswitch done.\n");
+}
+
+static bool applespi_verify_crc(struct applespi_data *applespi, u8 *buffer,
+ size_t buflen)
+{
+ u16 crc;
+
+ crc = crc16(0, buffer, buflen);
+ if (crc) {
+ dev_warn_ratelimited(&applespi->spi->dev,
+ "Received corrupted packet (crc mismatch)\n");
+ trace_applespi_bad_crc(ET_RD_CRC, READ, buffer, buflen);
+
+ return false;
+ }
+
+ return true;
+}
+
+static void applespi_debug_print_read_packet(struct applespi_data *applespi,
+ struct spi_packet *packet)
+{
+ unsigned int evt_type;
+
+ if (packet->flags == PACKET_TYPE_READ &&
+ packet->device == PACKET_DEV_KEYB)
+ evt_type = ET_RD_KEYB;
+ else if (packet->flags == PACKET_TYPE_READ &&
+ packet->device == PACKET_DEV_TPAD)
+ evt_type = ET_RD_TPAD;
+ else if (packet->flags == PACKET_TYPE_WRITE)
+ evt_type = applespi->cmd_evt_type;
+ else
+ evt_type = ET_RD_UNKN;
+
+ applespi_get_trace_fun(evt_type)(evt_type, PT_READ, applespi->rx_buffer,
+ APPLESPI_PACKET_SIZE);
+}
+
+static void applespi_got_data(struct applespi_data *applespi)
+{
+ struct spi_packet *packet;
+ struct message *message;
+ unsigned int msg_len;
+ unsigned int off;
+ unsigned int rem;
+ unsigned int len;
+
+ /* process packet header */
+ if (!applespi_verify_crc(applespi, applespi->rx_buffer,
+ APPLESPI_PACKET_SIZE)) {
+ unsigned long flags;
+
+ spin_lock_irqsave(&applespi->cmd_msg_lock, flags);
+
+ if (applespi->drain) {
+ applespi->read_active = false;
+ applespi->write_active = false;
+
+ wake_up_all(&applespi->drain_complete);
+ }
+
+ spin_unlock_irqrestore(&applespi->cmd_msg_lock, flags);
+
+ return;
+ }
+
+ packet = (struct spi_packet *)applespi->rx_buffer;
+
+ applespi_debug_print_read_packet(applespi, packet);
+
+ off = le16_to_cpu(packet->offset);
+ rem = le16_to_cpu(packet->remaining);
+ len = le16_to_cpu(packet->length);
+
+ if (len > sizeof(packet->data)) {
+ dev_warn_ratelimited(&applespi->spi->dev,
+ "Received corrupted packet (invalid packet length %u)\n",
+ len);
+ goto msg_complete;
+ }
+
+ /* handle multi-packet messages */
+ if (rem > 0 || off > 0) {
+ if (off != applespi->saved_msg_len) {
+ dev_warn_ratelimited(&applespi->spi->dev,
+ "Received unexpected offset (got %u, expected %u)\n",
+ off, applespi->saved_msg_len);
+ goto msg_complete;
+ }
+
+ if (off + rem > MAX_PKTS_PER_MSG * APPLESPI_PACKET_SIZE) {
+ dev_warn_ratelimited(&applespi->spi->dev,
+ "Received message too large (size %u)\n",
+ off + rem);
+ goto msg_complete;
+ }
+
+ if (off + len > MAX_PKTS_PER_MSG * APPLESPI_PACKET_SIZE) {
+ dev_warn_ratelimited(&applespi->spi->dev,
+ "Received message too large (size %u)\n",
+ off + len);
+ goto msg_complete;
+ }
+
+ memcpy(applespi->msg_buf + off, &packet->data, len);
+ applespi->saved_msg_len += len;
+
+ if (rem > 0)
+ return;
+
+ message = (struct message *)applespi->msg_buf;
+ msg_len = applespi->saved_msg_len;
+ } else {
+ message = (struct message *)&packet->data;
+ msg_len = len;
+ }
+
+ /* got complete message - verify */
+ if (!applespi_verify_crc(applespi, (u8 *)message, msg_len))
+ goto msg_complete;
+
+ if (le16_to_cpu(message->length) != msg_len - MSG_HEADER_SIZE - 2) {
+ dev_warn_ratelimited(&applespi->spi->dev,
+ "Received corrupted packet (invalid message length %u - expected %u)\n",
+ le16_to_cpu(message->length),
+ msg_len - MSG_HEADER_SIZE - 2);
+ goto msg_complete;
+ }
+
+ /* handle message */
+ if (packet->flags == PACKET_TYPE_READ &&
+ packet->device == PACKET_DEV_KEYB) {
+ applespi_handle_keyboard_event(applespi, &message->keyboard);
+
+ } else if (packet->flags == PACKET_TYPE_READ &&
+ packet->device == PACKET_DEV_TPAD) {
+ struct touchpad_protocol *tp;
+ size_t tp_len;
+
+ tp = &message->touchpad;
+ tp_len = sizeof(*tp) +
+ tp->number_of_fingers * sizeof(tp->fingers[0]);
+
+ if (le16_to_cpu(message->length) + 2 != tp_len) {
+ dev_warn_ratelimited(&applespi->spi->dev,
+ "Received corrupted packet (invalid message length %u - num-fingers %u, tp-len %zu)\n",
+ le16_to_cpu(message->length),
+ tp->number_of_fingers, tp_len);
+ goto msg_complete;
+ }
+
+ if (tp->number_of_fingers > MAX_FINGERS) {
+ dev_warn_ratelimited(&applespi->spi->dev,
+ "Number of reported fingers (%u) exceeds max (%u))\n",
+ tp->number_of_fingers,
+ MAX_FINGERS);
+ tp->number_of_fingers = MAX_FINGERS;
+ }
+
+ report_tp_state(applespi, tp);
+
+ } else if (packet->flags == PACKET_TYPE_WRITE) {
+ applespi_handle_cmd_response(applespi, packet, message);
+ }
+
+msg_complete:
+ applespi->saved_msg_len = 0;
+
+ applespi_msg_complete(applespi, packet->flags == PACKET_TYPE_WRITE,
+ true);
+}
+
+static void applespi_async_read_complete(void *context)
+{
+ struct applespi_data *applespi = context;
+
+ if (applespi->rd_m.status < 0) {
+ dev_warn(&applespi->spi->dev, "Error reading from device: %d\n",
+ applespi->rd_m.status);
+ /*
+ * We don't actually know if this was a pure read, or a response
+ * to a write. But this is a rare error condition that should
+ * never occur, so clearing both flags to avoid deadlock.
+ */
+ applespi_msg_complete(applespi, true, true);
+ } else {
+ applespi_got_data(applespi);
+ }
+
+ acpi_finish_gpe(NULL, applespi->gpe);
+}
+
+static u32 applespi_notify(acpi_handle gpe_device, u32 gpe, void *context)
+{
+ struct applespi_data *applespi = context;
+ int sts;
+ unsigned long flags;
+
+ trace_applespi_irq_received(ET_RD_IRQ, PT_READ);
+
+ spin_lock_irqsave(&applespi->cmd_msg_lock, flags);
+
+ if (!applespi->suspended) {
+ sts = applespi_async(applespi, &applespi->rd_m,
+ applespi_async_read_complete);
+ if (sts)
+ dev_warn(&applespi->spi->dev,
+ "Error queueing async read to device: %d\n",
+ sts);
+ else
+ applespi->read_active = true;
+ }
+
+ spin_unlock_irqrestore(&applespi->cmd_msg_lock, flags);
+
+ return ACPI_INTERRUPT_HANDLED;
+}
+
+static int applespi_get_saved_bl_level(struct applespi_data *applespi)
+{
+ struct efivar_entry *efivar_entry;
+ u16 efi_data = 0;
+ unsigned long efi_data_len;
+ int sts;
+
+ efivar_entry = kmalloc(sizeof(*efivar_entry), GFP_KERNEL);
+ if (!efivar_entry)
+ return -ENOMEM;
+
+ memcpy(efivar_entry->var.VariableName, EFI_BL_LEVEL_NAME,
+ sizeof(EFI_BL_LEVEL_NAME));
+ efivar_entry->var.VendorGuid = EFI_BL_LEVEL_GUID;
+ efi_data_len = sizeof(efi_data);
+
+ sts = efivar_entry_get(efivar_entry, NULL, &efi_data_len, &efi_data);
+ if (sts && sts != -ENOENT)
+ dev_warn(&applespi->spi->dev,
+ "Error getting backlight level from EFI vars: %d\n",
+ sts);
+
+ kfree(efivar_entry);
+
+ return sts ? sts : efi_data;
+}
+
+static void applespi_save_bl_level(struct applespi_data *applespi,
+ unsigned int level)
+{
+ efi_guid_t efi_guid;
+ u32 efi_attr;
+ unsigned long efi_data_len;
+ u16 efi_data;
+ int sts;
+
+ /* Save keyboard backlight level */
+ efi_guid = EFI_BL_LEVEL_GUID;
+ efi_data = (u16)level;
+ efi_data_len = sizeof(efi_data);
+ efi_attr = EFI_VARIABLE_NON_VOLATILE | EFI_VARIABLE_BOOTSERVICE_ACCESS |
+ EFI_VARIABLE_RUNTIME_ACCESS;
+
+ sts = efivar_entry_set_safe(EFI_BL_LEVEL_NAME, efi_guid, efi_attr, true,
+ efi_data_len, &efi_data);
+ if (sts)
+ dev_warn(&applespi->spi->dev,
+ "Error saving backlight level to EFI vars: %d\n", sts);
+}
+
+static int applespi_probe(struct spi_device *spi)
+{
+ struct applespi_data *applespi;
+ acpi_handle spi_handle = ACPI_HANDLE(&spi->dev);
+ acpi_status acpi_sts;
+ int sts, i;
+ unsigned long long gpe, usb_status;
+
+ /* check if the USB interface is present and enabled already */
+ acpi_sts = acpi_evaluate_integer(spi_handle, "UIST", NULL, &usb_status);
+ if (ACPI_SUCCESS(acpi_sts) && usb_status) {
+ /* let the USB driver take over instead */
+ dev_info(&spi->dev, "USB interface already enabled\n");
+ return -ENODEV;
+ }
+
+ /* allocate driver data */
+ applespi = devm_kzalloc(&spi->dev, sizeof(*applespi), GFP_KERNEL);
+ if (!applespi)
+ return -ENOMEM;
+
+ applespi->spi = spi;
+
+ INIT_WORK(&applespi->work, applespi_worker);
+
+ /* store the driver data */
+ spi_set_drvdata(spi, applespi);
+
+ /* create our buffers */
+ applespi->tx_buffer = devm_kmalloc(&spi->dev, APPLESPI_PACKET_SIZE,
+ GFP_KERNEL);
+ applespi->tx_status = devm_kmalloc(&spi->dev, APPLESPI_STATUS_SIZE,
+ GFP_KERNEL);
+ applespi->rx_buffer = devm_kmalloc(&spi->dev, APPLESPI_PACKET_SIZE,
+ GFP_KERNEL);
+ applespi->msg_buf = devm_kmalloc_array(&spi->dev, MAX_PKTS_PER_MSG,
+ APPLESPI_PACKET_SIZE,
+ GFP_KERNEL);
+
+ if (!applespi->tx_buffer || !applespi->tx_status ||
+ !applespi->rx_buffer || !applespi->msg_buf)
+ return -ENOMEM;
+
+ /* set up our spi messages */
+ applespi_setup_read_txfrs(applespi);
+ applespi_setup_write_txfrs(applespi);
+
+ /* cache ACPI method handles */
+ acpi_sts = acpi_get_handle(spi_handle, "SIEN", &applespi->sien);
+ if (ACPI_FAILURE(acpi_sts)) {
+ dev_err(&applespi->spi->dev,
+ "Failed to get SIEN ACPI method handle: %s\n",
+ acpi_format_exception(acpi_sts));
+ return -ENODEV;
+ }
+
+ acpi_sts = acpi_get_handle(spi_handle, "SIST", &applespi->sist);
+ if (ACPI_FAILURE(acpi_sts)) {
+ dev_err(&applespi->spi->dev,
+ "Failed to get SIST ACPI method handle: %s\n",
+ acpi_format_exception(acpi_sts));
+ return -ENODEV;
+ }
+
+ /* switch on the SPI interface */
+ sts = applespi_setup_spi(applespi);
+ if (sts)
+ return sts;
+
+ sts = applespi_enable_spi(applespi);
+ if (sts)
+ return sts;
+
+ /* setup the keyboard input dev */
+ applespi->keyboard_input_dev = devm_input_allocate_device(&spi->dev);
+
+ if (!applespi->keyboard_input_dev)
+ return -ENOMEM;
+
+ applespi->keyboard_input_dev->name = "Apple SPI Keyboard";
+ applespi->keyboard_input_dev->phys = "applespi/input0";
+ applespi->keyboard_input_dev->dev.parent = &spi->dev;
+ applespi->keyboard_input_dev->id.bustype = BUS_SPI;
+
+ applespi->keyboard_input_dev->evbit[0] =
+ BIT_MASK(EV_KEY) | BIT_MASK(EV_LED) | BIT_MASK(EV_REP);
+ applespi->keyboard_input_dev->ledbit[0] = BIT_MASK(LED_CAPSL);
+
+ input_set_drvdata(applespi->keyboard_input_dev, applespi);
+ applespi->keyboard_input_dev->event = applespi_event;
+
+ for (i = 0; i < ARRAY_SIZE(applespi_scancodes); i++)
+ if (applespi_scancodes[i])
+ input_set_capability(applespi->keyboard_input_dev,
+ EV_KEY, applespi_scancodes[i]);
+
+ for (i = 0; i < ARRAY_SIZE(applespi_controlcodes); i++)
+ if (applespi_controlcodes[i])
+ input_set_capability(applespi->keyboard_input_dev,
+ EV_KEY, applespi_controlcodes[i]);
+
+ for (i = 0; i < ARRAY_SIZE(applespi_fn_codes); i++)
+ if (applespi_fn_codes[i].to)
+ input_set_capability(applespi->keyboard_input_dev,
+ EV_KEY, applespi_fn_codes[i].to);
+
+ input_set_capability(applespi->keyboard_input_dev, EV_KEY, KEY_FN);
+
+ sts = input_register_device(applespi->keyboard_input_dev);
+ if (sts) {
+ dev_err(&applespi->spi->dev,
+ "Unable to register keyboard input device (%d)\n", sts);
+ return -ENODEV;
+ }
+
+ /*
+ * The applespi device doesn't send interrupts normally (as is described
+ * in its DSDT), but rather seems to use ACPI GPEs.
+ */
+ acpi_sts = acpi_evaluate_integer(spi_handle, "_GPE", NULL, &gpe);
+ if (ACPI_FAILURE(acpi_sts)) {
+ dev_err(&applespi->spi->dev,
+ "Failed to obtain GPE for SPI slave device: %s\n",
+ acpi_format_exception(acpi_sts));
+ return -ENODEV;
+ }
+ applespi->gpe = (int)gpe;
+
+ acpi_sts = acpi_install_gpe_handler(NULL, applespi->gpe,
+ ACPI_GPE_LEVEL_TRIGGERED,
+ applespi_notify, applespi);
+ if (ACPI_FAILURE(acpi_sts)) {
+ dev_err(&applespi->spi->dev,
+ "Failed to install GPE handler for GPE %d: %s\n",
+ applespi->gpe, acpi_format_exception(acpi_sts));
+ return -ENODEV;
+ }
+
+ applespi->suspended = false;
+
+ acpi_sts = acpi_enable_gpe(NULL, applespi->gpe);
+ if (ACPI_FAILURE(acpi_sts)) {
+ dev_err(&applespi->spi->dev,
+ "Failed to enable GPE handler for GPE %d: %s\n",
+ applespi->gpe, acpi_format_exception(acpi_sts));
+ acpi_remove_gpe_handler(NULL, applespi->gpe, applespi_notify);
+ return -ENODEV;
+ }
+
+ /* trigger touchpad setup */
+ applespi_init(applespi, false);
+
+ /*
+ * By default this device is not enabled for wakeup; but USB keyboards
+ * generally are, so the expectation is that by default the keyboard
+ * will wake the system.
+ */
+ device_wakeup_enable(&spi->dev);
+
+ /* set up keyboard-backlight */
+ sts = applespi_get_saved_bl_level(applespi);
+ if (sts >= 0)
+ applespi_set_bl_level(&applespi->backlight_info, sts);
+
+ applespi->backlight_info.name = "spi::kbd_backlight";
+ applespi->backlight_info.default_trigger = "kbd-backlight";
+ applespi->backlight_info.brightness_set = applespi_set_bl_level;
+
+ sts = devm_led_classdev_register(&spi->dev, &applespi->backlight_info);
+ if (sts)
+ dev_warn(&applespi->spi->dev,
+ "Unable to register keyboard backlight class dev (%d)\n",
+ sts);
+
+ /* set up debugfs entries for touchpad dimensions logging */
+ applespi->debugfs_root = debugfs_create_dir("applespi", NULL);
+ if (IS_ERR(applespi->debugfs_root)) {
+ if (PTR_ERR(applespi->debugfs_root) != -ENODEV)
+ dev_warn(&applespi->spi->dev,
+ "Error creating debugfs root entry (%ld)\n",
+ PTR_ERR(applespi->debugfs_root));
+ } else {
+ struct dentry *ret;
+
+ ret = debugfs_create_bool("enable_tp_dim", 0600,
+ applespi->debugfs_root,
+ &applespi->debug_tp_dim);
+ if (IS_ERR(ret))
+ dev_dbg(&applespi->spi->dev,
+ "Error creating debugfs entry enable_tp_dim (%ld)\n",
+ PTR_ERR(ret));
+
+ ret = debugfs_create_file("tp_dim", 0400,
+ applespi->debugfs_root, applespi,
+ &applespi_tp_dim_fops);
+ if (IS_ERR(ret))
+ dev_dbg(&applespi->spi->dev,
+ "Error creating debugfs entry tp_dim (%ld)\n",
+ PTR_ERR(ret));
+ }
+
+ return 0;
+}
+
+static void applespi_drain_writes(struct applespi_data *applespi)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&applespi->cmd_msg_lock, flags);
+
+ applespi->drain = true;
+ wait_event_lock_irq(applespi->drain_complete, !applespi->write_active,
+ applespi->cmd_msg_lock);
+
+ spin_unlock_irqrestore(&applespi->cmd_msg_lock, flags);
+}
+
+static void applespi_drain_reads(struct applespi_data *applespi)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&applespi->cmd_msg_lock, flags);
+
+ wait_event_lock_irq(applespi->drain_complete, !applespi->read_active,
+ applespi->cmd_msg_lock);
+
+ applespi->suspended = true;
+
+ spin_unlock_irqrestore(&applespi->cmd_msg_lock, flags);
+}
+
+static int applespi_remove(struct spi_device *spi)
+{
+ struct applespi_data *applespi = spi_get_drvdata(spi);
+
+ applespi_drain_writes(applespi);
+
+ acpi_disable_gpe(NULL, applespi->gpe);
+ acpi_remove_gpe_handler(NULL, applespi->gpe, applespi_notify);
+ device_wakeup_disable(&spi->dev);
+
+ applespi_drain_reads(applespi);
+
+ debugfs_remove_recursive(applespi->debugfs_root);
+
+ return 0;
+}
+
+static void applespi_shutdown(struct spi_device *spi)
+{
+ struct applespi_data *applespi = spi_get_drvdata(spi);
+
+ applespi_save_bl_level(applespi, applespi->have_bl_level);
+}
+
+static int applespi_poweroff_late(struct device *dev)
+{
+ struct spi_device *spi = to_spi_device(dev);
+ struct applespi_data *applespi = spi_get_drvdata(spi);
+
+ applespi_save_bl_level(applespi, applespi->have_bl_level);
+
+ return 0;
+}
+
+static int __maybe_unused applespi_suspend(struct device *dev)
+{
+ struct spi_device *spi = to_spi_device(dev);
+ struct applespi_data *applespi = spi_get_drvdata(spi);
+ acpi_status acpi_sts;
+ int sts;
+
+ /* turn off caps-lock - it'll stay on otherwise */
+ sts = applespi_set_capsl_led(applespi, false);
+ if (sts)
+ dev_warn(&applespi->spi->dev,
+ "Failed to turn off caps-lock led (%d)\n", sts);
+
+ applespi_drain_writes(applespi);
+
+ /* disable the interrupt */
+ acpi_sts = acpi_disable_gpe(NULL, applespi->gpe);
+ if (ACPI_FAILURE(acpi_sts))
+ dev_err(&applespi->spi->dev,
+ "Failed to disable GPE handler for GPE %d: %s\n",
+ applespi->gpe, acpi_format_exception(acpi_sts));
+
+ applespi_drain_reads(applespi);
+
+ return 0;
+}
+
+static int __maybe_unused applespi_resume(struct device *dev)
+{
+ struct spi_device *spi = to_spi_device(dev);
+ struct applespi_data *applespi = spi_get_drvdata(spi);
+ acpi_status acpi_sts;
+ unsigned long flags;
+
+ /* ensure our flags and state reflect a newly resumed device */
+ spin_lock_irqsave(&applespi->cmd_msg_lock, flags);
+
+ applespi->drain = false;
+ applespi->have_cl_led_on = false;
+ applespi->have_bl_level = 0;
+ applespi->cmd_msg_queued = false;
+ applespi->read_active = false;
+ applespi->write_active = false;
+
+ applespi->suspended = false;
+
+ spin_unlock_irqrestore(&applespi->cmd_msg_lock, flags);
+
+ /* switch on the SPI interface */
+ applespi_enable_spi(applespi);
+
+ /* re-enable the interrupt */
+ acpi_sts = acpi_enable_gpe(NULL, applespi->gpe);
+ if (ACPI_FAILURE(acpi_sts))
+ dev_err(&applespi->spi->dev,
+ "Failed to re-enable GPE handler for GPE %d: %s\n",
+ applespi->gpe, acpi_format_exception(acpi_sts));
+
+ /* switch the touchpad into multitouch mode */
+ applespi_init(applespi, true);
+
+ return 0;
+}
+
+static const struct acpi_device_id applespi_acpi_match[] = {
+ { "APP000D", 0 },
+ { }
+};
+MODULE_DEVICE_TABLE(acpi, applespi_acpi_match);
+
+const struct dev_pm_ops applespi_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(applespi_suspend, applespi_resume)
+ .poweroff_late = applespi_poweroff_late,
+};
+
+static struct spi_driver applespi_driver = {
+ .driver = {
+ .name = "applespi",
+ .acpi_match_table = applespi_acpi_match,
+ .pm = &applespi_pm_ops,
+ },
+ .probe = applespi_probe,
+ .remove = applespi_remove,
+ .shutdown = applespi_shutdown,
+};
+
+module_spi_driver(applespi_driver)
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("MacBook(Pro) SPI Keyboard/Touchpad driver");
+MODULE_AUTHOR("Federico Lorenzi");
+MODULE_AUTHOR("Ronald Tschalär");
diff --git a/drivers/input/keyboard/applespi.h b/drivers/input/keyboard/applespi.h
new file mode 100644
index 000000000000..7f5ab10c597a
--- /dev/null
+++ b/drivers/input/keyboard/applespi.h
@@ -0,0 +1,29 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * MacBook (Pro) SPI keyboard and touchpad driver
+ *
+ * Copyright (c) 2015-2019 Federico Lorenzi
+ * Copyright (c) 2017-2019 Ronald Tschalär
+ */
+
+#ifndef _APPLESPI_H_
+#define _APPLESPI_H_
+
+enum applespi_evt_type {
+ ET_CMD_TP_INI = BIT(0),
+ ET_CMD_BL = BIT(1),
+ ET_CMD_CL = BIT(2),
+ ET_RD_KEYB = BIT(8),
+ ET_RD_TPAD = BIT(9),
+ ET_RD_UNKN = BIT(10),
+ ET_RD_IRQ = BIT(11),
+ ET_RD_CRC = BIT(12),
+};
+
+enum applespi_pkt_type {
+ PT_READ,
+ PT_WRITE,
+ PT_STATUS,
+};
+
+#endif /* _APPLESPI_H_ */
diff --git a/drivers/input/keyboard/applespi_trace.h b/drivers/input/keyboard/applespi_trace.h
new file mode 100644
index 000000000000..0ad1a3d79f50
--- /dev/null
+++ b/drivers/input/keyboard/applespi_trace.h
@@ -0,0 +1,93 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * MacBook (Pro) SPI keyboard and touchpad driver
+ *
+ * Copyright (c) 2015-2019 Federico Lorenzi
+ * Copyright (c) 2017-2019 Ronald Tschalär
+ */
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM applespi
+
+#if !defined(_APPLESPI_TRACE_H_) || defined(TRACE_HEADER_MULTI_READ)
+#define _APPLESPI_TRACE_H_
+
+#include <linux/types.h>
+#include <linux/tracepoint.h>
+
+#include "applespi.h"
+
+DECLARE_EVENT_CLASS(dump_message_template,
+ TP_PROTO(enum applespi_evt_type evt_type,
+ enum applespi_pkt_type pkt_type,
+ u8 *buf,
+ size_t len),
+
+ TP_ARGS(evt_type, pkt_type, buf, len),
+
+ TP_STRUCT__entry(
+ __field(enum applespi_evt_type, evt_type)
+ __field(enum applespi_pkt_type, pkt_type)
+ __field(size_t, len)
+ __dynamic_array(u8, buf, len)
+ ),
+
+ TP_fast_assign(
+ __entry->evt_type = evt_type;
+ __entry->pkt_type = pkt_type;
+ __entry->len = len;
+ memcpy(__get_dynamic_array(buf), buf, len);
+ ),
+
+ TP_printk("%-6s: %s",
+ __print_symbolic(__entry->pkt_type,
+ { PT_READ, "read" },
+ { PT_WRITE, "write" },
+ { PT_STATUS, "status" }
+ ),
+ __print_hex(__get_dynamic_array(buf), __entry->len))
+);
+
+#define DEFINE_DUMP_MESSAGE_EVENT(name) \
+DEFINE_EVENT(dump_message_template, name, \
+ TP_PROTO(enum applespi_evt_type evt_type, \
+ enum applespi_pkt_type pkt_type, \
+ u8 *buf, \
+ size_t len), \
+ TP_ARGS(evt_type, pkt_type, buf, len) \
+)
+
+DEFINE_DUMP_MESSAGE_EVENT(applespi_tp_ini_cmd);
+DEFINE_DUMP_MESSAGE_EVENT(applespi_backlight_cmd);
+DEFINE_DUMP_MESSAGE_EVENT(applespi_caps_lock_cmd);
+DEFINE_DUMP_MESSAGE_EVENT(applespi_keyboard_data);
+DEFINE_DUMP_MESSAGE_EVENT(applespi_touchpad_data);
+DEFINE_DUMP_MESSAGE_EVENT(applespi_unknown_data);
+DEFINE_DUMP_MESSAGE_EVENT(applespi_bad_crc);
+
+TRACE_EVENT(applespi_irq_received,
+ TP_PROTO(enum applespi_evt_type evt_type,
+ enum applespi_pkt_type pkt_type),
+
+ TP_ARGS(evt_type, pkt_type),
+
+ TP_STRUCT__entry(
+ __field(enum applespi_evt_type, evt_type)
+ __field(enum applespi_pkt_type, pkt_type)
+ ),
+
+ TP_fast_assign(
+ __entry->evt_type = evt_type;
+ __entry->pkt_type = pkt_type;
+ ),
+
+ "\n"
+);
+
+#endif /* _APPLESPI_TRACE_H_ */
+
+/* This part must be outside protection */
+#undef TRACE_INCLUDE_PATH
+#define TRACE_INCLUDE_PATH ../../drivers/input/keyboard
+#define TRACE_INCLUDE_FILE applespi_trace
+#include <trace/define_trace.h>
diff --git a/drivers/input/keyboard/mtk-pmic-keys.c b/drivers/input/keyboard/mtk-pmic-keys.c
index 746ff06eaf8d..62391d6c7da6 100644
--- a/drivers/input/keyboard/mtk-pmic-keys.c
+++ b/drivers/input/keyboard/mtk-pmic-keys.c
@@ -277,8 +277,10 @@ static int mtk_pmic_keys_probe(struct platform_device *pdev)
keys->keys[index].regs = &mtk_pmic_regs->keys_regs[index];
keys->keys[index].irq = platform_get_irq(pdev, index);
- if (keys->keys[index].irq < 0)
+ if (keys->keys[index].irq < 0) {
+ of_node_put(child);
return keys->keys[index].irq;
+ }
error = of_property_read_u32(child,
"linux,keycodes", &keys->keys[index].keycode);
@@ -286,6 +288,7 @@ static int mtk_pmic_keys_probe(struct platform_device *pdev)
dev_err(keys->dev,
"failed to read key:%d linux,keycode property: %d\n",
index, error);
+ of_node_put(child);
return error;
}
@@ -293,8 +296,10 @@ static int mtk_pmic_keys_probe(struct platform_device *pdev)
keys->keys[index].wakeup = true;
error = mtk_pmic_key_setup(keys, &keys->keys[index]);
- if (error)
+ if (error) {
+ of_node_put(child);
return error;
+ }
index++;
}
diff --git a/drivers/input/keyboard/sun4i-lradc-keys.c b/drivers/input/keyboard/sun4i-lradc-keys.c
index 6ffdc26b9c89..4a796bed48ac 100644
--- a/drivers/input/keyboard/sun4i-lradc-keys.c
+++ b/drivers/input/keyboard/sun4i-lradc-keys.c
@@ -198,18 +198,21 @@ static int sun4i_lradc_load_dt_keymap(struct device *dev,
error = of_property_read_u32(pp, "channel", &channel);
if (error || channel != 0) {
dev_err(dev, "%pOFn: Inval channel prop\n", pp);
+ of_node_put(pp);
return -EINVAL;
}
error = of_property_read_u32(pp, "voltage", &map->voltage);
if (error) {
dev_err(dev, "%pOFn: Inval voltage prop\n", pp);
+ of_node_put(pp);
return -EINVAL;
}
error = of_property_read_u32(pp, "linux,code", &map->keycode);
if (error) {
dev_err(dev, "%pOFn: Inval linux,code prop\n", pp);
+ of_node_put(pp);
return -EINVAL;
}
diff --git a/drivers/input/mouse/alps.c b/drivers/input/mouse/alps.c
index 8996323ce8d9..34700eda0429 100644
--- a/drivers/input/mouse/alps.c
+++ b/drivers/input/mouse/alps.c
@@ -21,6 +21,7 @@
#include "psmouse.h"
#include "alps.h"
+#include "trackpoint.h"
/*
* Definitions for ALPS version 3 and 4 command mode protocol
@@ -2861,6 +2862,23 @@ static const struct alps_protocol_info *alps_match_table(unsigned char *e7,
return NULL;
}
+static bool alps_is_cs19_trackpoint(struct psmouse *psmouse)
+{
+ u8 param[2] = { 0 };
+
+ if (ps2_command(&psmouse->ps2dev,
+ param, MAKE_PS2_CMD(0, 2, TP_READ_ID)))
+ return false;
+
+ /*
+ * param[0] contains the trackpoint device variant_id while
+ * param[1] contains the firmware_id. So far all alps
+ * trackpoint-only devices have their variant_ids equal
+ * TP_VARIANT_ALPS and their firmware_ids are in 0x20~0x2f range.
+ */
+ return param[0] == TP_VARIANT_ALPS && ((param[1] & 0xf0) == 0x20);
+}
+
static int alps_identify(struct psmouse *psmouse, struct alps_data *priv)
{
const struct alps_protocol_info *protocol;
@@ -3162,6 +3180,20 @@ int alps_detect(struct psmouse *psmouse, bool set_properties)
return error;
/*
+ * ALPS cs19 is a trackpoint-only device, and uses different
+ * protocol than DualPoint ones, so we return -EINVAL here and let
+ * trackpoint.c drive this device. If the trackpoint driver is not
+ * enabled, the device will fall back to a bare PS/2 mouse.
+ * If ps2_command() fails here, we depend on the immediately
+ * followed psmouse_reset() to reset the device to normal state.
+ */
+ if (alps_is_cs19_trackpoint(psmouse)) {
+ psmouse_dbg(psmouse,
+ "ALPS CS19 trackpoint-only device detected, ignoring\n");
+ return -EINVAL;
+ }
+
+ /*
* Reset the device to make sure it is fully operational:
* on some laptops, like certain Dell Latitudes, we may
* fail to properly detect presence of trackstick if device
diff --git a/drivers/input/mouse/synaptics.c b/drivers/input/mouse/synaptics.c
index 1080c0c49815..b1956ed4c0dd 100644
--- a/drivers/input/mouse/synaptics.c
+++ b/drivers/input/mouse/synaptics.c
@@ -176,6 +176,7 @@ static const char * const smbus_pnp_ids[] = {
"LEN0093", /* T480 */
"LEN0096", /* X280 */
"LEN0097", /* X280 -> ALPS trackpoint */
+ "LEN009b", /* T580 */
"LEN200f", /* T450s */
"LEN2054", /* E480 */
"LEN2055", /* E580 */
@@ -705,7 +706,7 @@ static void synaptics_pt_create(struct psmouse *psmouse)
serio->id.type = SERIO_PS_PSTHRU;
strlcpy(serio->name, "Synaptics pass-through", sizeof(serio->name));
- strlcpy(serio->phys, "synaptics-pt/serio0", sizeof(serio->name));
+ strlcpy(serio->phys, "synaptics-pt/serio0", sizeof(serio->phys));
serio->write = synaptics_pt_write;
serio->start = synaptics_pt_start;
serio->stop = synaptics_pt_stop;
diff --git a/drivers/input/mouse/trackpoint.h b/drivers/input/mouse/trackpoint.h
index 0afffe8d824f..77110f3ec21d 100644
--- a/drivers/input/mouse/trackpoint.h
+++ b/drivers/input/mouse/trackpoint.h
@@ -158,7 +158,8 @@ struct trackpoint_data {
#ifdef CONFIG_MOUSE_PS2_TRACKPOINT
int trackpoint_detect(struct psmouse *psmouse, bool set_properties);
#else
-inline int trackpoint_detect(struct psmouse *psmouse, bool set_properties)
+static inline int trackpoint_detect(struct psmouse *psmouse,
+ bool set_properties)
{
return -ENOSYS;
}
diff --git a/drivers/input/serio/hyperv-keyboard.c b/drivers/input/serio/hyperv-keyboard.c
index 8e457e50f837..88ae7c2ac3c8 100644
--- a/drivers/input/serio/hyperv-keyboard.c
+++ b/drivers/input/serio/hyperv-keyboard.c
@@ -75,8 +75,8 @@ struct synth_kbd_keystroke {
#define HK_MAXIMUM_MESSAGE_SIZE 256
-#define KBD_VSC_SEND_RING_BUFFER_SIZE (10 * PAGE_SIZE)
-#define KBD_VSC_RECV_RING_BUFFER_SIZE (10 * PAGE_SIZE)
+#define KBD_VSC_SEND_RING_BUFFER_SIZE (40 * 1024)
+#define KBD_VSC_RECV_RING_BUFFER_SIZE (40 * 1024)
#define XTKBD_EMUL0 0xe0
#define XTKBD_EMUL1 0xe1
diff --git a/drivers/input/tablet/gtco.c b/drivers/input/tablet/gtco.c
index 4b8b9d7aa75e..35031228a6d0 100644
--- a/drivers/input/tablet/gtco.c
+++ b/drivers/input/tablet/gtco.c
@@ -78,6 +78,7 @@ Scott Hill shill@gtcocalcomp.com
/* Max size of a single report */
#define REPORT_MAX_SIZE 10
+#define MAX_COLLECTION_LEVELS 10
/* Bitmask whether pen is in range */
@@ -223,8 +224,7 @@ static void parse_hid_report_descriptor(struct gtco *device, char * report,
char maintype = 'x';
char globtype[12];
int indent = 0;
- char indentstr[10] = "";
-
+ char indentstr[MAX_COLLECTION_LEVELS + 1] = { 0 };
dev_dbg(ddev, "======>>>>>>PARSE<<<<<<======\n");
@@ -350,6 +350,13 @@ static void parse_hid_report_descriptor(struct gtco *device, char * report,
case TAG_MAIN_COL_START:
maintype = 'S';
+ if (indent == MAX_COLLECTION_LEVELS) {
+ dev_err(ddev, "Collection level %d would exceed limit of %d\n",
+ indent + 1,
+ MAX_COLLECTION_LEVELS);
+ break;
+ }
+
if (data == 0) {
dev_dbg(ddev, "======>>>>>> Physical\n");
strcpy(globtype, "Physical");
@@ -369,8 +376,15 @@ static void parse_hid_report_descriptor(struct gtco *device, char * report,
break;
case TAG_MAIN_COL_END:
- dev_dbg(ddev, "<<<<<<======\n");
maintype = 'E';
+
+ if (indent == 0) {
+ dev_err(ddev, "Collection level already at zero\n");
+ break;
+ }
+
+ dev_dbg(ddev, "<<<<<<======\n");
+
indent--;
for (x = 0; x < indent; x++)
indentstr[x] = '-';
diff --git a/drivers/input/touchscreen/auo-pixcir-ts.c b/drivers/input/touchscreen/auo-pixcir-ts.c
index 8e48fbda487a..8e9f3b7b8180 100644
--- a/drivers/input/touchscreen/auo-pixcir-ts.c
+++ b/drivers/input/touchscreen/auo-pixcir-ts.c
@@ -602,9 +602,8 @@ static int auo_pixcir_probe(struct i2c_client *client,
return error;
}
- error = devm_add_action(&client->dev, auo_pixcir_reset, ts);
+ error = devm_add_action_or_reset(&client->dev, auo_pixcir_reset, ts);
if (error) {
- auo_pixcir_reset(ts);
dev_err(&client->dev, "failed to register reset action, %d\n",
error);
return error;
diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c
index 73740b969e62..b607a92791d3 100644
--- a/drivers/iommu/amd_iommu.c
+++ b/drivers/iommu/amd_iommu.c
@@ -2533,7 +2533,7 @@ static int map_sg(struct device *dev, struct scatterlist *sglist,
npages = sg_num_pages(dev, sglist, nelems);
address = dma_ops_alloc_iova(dev, dma_dom, npages, dma_mask);
- if (address == DMA_MAPPING_ERROR)
+ if (!address)
goto out_err;
prot = dir2prot(direction);
diff --git a/drivers/isdn/hardware/mISDN/hfcsusb.c b/drivers/isdn/hardware/mISDN/hfcsusb.c
index 4c99739b937e..0e224232f746 100644
--- a/drivers/isdn/hardware/mISDN/hfcsusb.c
+++ b/drivers/isdn/hardware/mISDN/hfcsusb.c
@@ -1955,6 +1955,9 @@ hfcsusb_probe(struct usb_interface *intf, const struct usb_device_id *id)
/* get endpoint base */
idx = ((ep_addr & 0x7f) - 1) * 2;
+ if (idx > 15)
+ return -EIO;
+
if (ep_addr & 0x80)
idx++;
attr = ep->desc.bmAttributes;
diff --git a/drivers/md/dm-kcopyd.c b/drivers/md/dm-kcopyd.c
index 671c24332802..df2011de7be2 100644
--- a/drivers/md/dm-kcopyd.c
+++ b/drivers/md/dm-kcopyd.c
@@ -28,10 +28,27 @@
#include "dm-core.h"
-#define SUB_JOB_SIZE 128
#define SPLIT_COUNT 8
#define MIN_JOBS 8
-#define RESERVE_PAGES (DIV_ROUND_UP(SUB_JOB_SIZE << SECTOR_SHIFT, PAGE_SIZE))
+
+#define DEFAULT_SUB_JOB_SIZE_KB 512
+#define MAX_SUB_JOB_SIZE_KB 1024
+
+static unsigned kcopyd_subjob_size_kb = DEFAULT_SUB_JOB_SIZE_KB;
+
+module_param(kcopyd_subjob_size_kb, uint, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(kcopyd_subjob_size_kb, "Sub-job size for dm-kcopyd clients");
+
+static unsigned dm_get_kcopyd_subjob_size(void)
+{
+ unsigned sub_job_size_kb;
+
+ sub_job_size_kb = __dm_get_module_param(&kcopyd_subjob_size_kb,
+ DEFAULT_SUB_JOB_SIZE_KB,
+ MAX_SUB_JOB_SIZE_KB);
+
+ return sub_job_size_kb << 1;
+}
/*-----------------------------------------------------------------
* Each kcopyd client has its own little pool of preallocated
@@ -41,6 +58,7 @@ struct dm_kcopyd_client {
struct page_list *pages;
unsigned nr_reserved_pages;
unsigned nr_free_pages;
+ unsigned sub_job_size;
struct dm_io_client *io_client;
@@ -693,8 +711,8 @@ static void segment_complete(int read_err, unsigned long write_err,
progress = job->progress;
count = job->source.count - progress;
if (count) {
- if (count > SUB_JOB_SIZE)
- count = SUB_JOB_SIZE;
+ if (count > kc->sub_job_size)
+ count = kc->sub_job_size;
job->progress += count;
}
@@ -821,7 +839,7 @@ void dm_kcopyd_copy(struct dm_kcopyd_client *kc, struct dm_io_region *from,
job->master_job = job;
job->write_offset = 0;
- if (job->source.count <= SUB_JOB_SIZE)
+ if (job->source.count <= kc->sub_job_size)
dispatch_job(job);
else {
job->progress = 0;
@@ -888,6 +906,7 @@ int kcopyd_cancel(struct kcopyd_job *job, int block)
struct dm_kcopyd_client *dm_kcopyd_client_create(struct dm_kcopyd_throttle *throttle)
{
int r;
+ unsigned reserve_pages;
struct dm_kcopyd_client *kc;
kc = kzalloc(sizeof(*kc), GFP_KERNEL);
@@ -912,9 +931,12 @@ struct dm_kcopyd_client *dm_kcopyd_client_create(struct dm_kcopyd_throttle *thro
goto bad_workqueue;
}
+ kc->sub_job_size = dm_get_kcopyd_subjob_size();
+ reserve_pages = DIV_ROUND_UP(kc->sub_job_size << SECTOR_SHIFT, PAGE_SIZE);
+
kc->pages = NULL;
kc->nr_reserved_pages = kc->nr_free_pages = 0;
- r = client_reserve_pages(kc, RESERVE_PAGES);
+ r = client_reserve_pages(kc, reserve_pages);
if (r)
goto bad_client_pages;
diff --git a/drivers/md/dm-snap.c b/drivers/md/dm-snap.c
index 63916e1dc569..f150f5c5492b 100644
--- a/drivers/md/dm-snap.c
+++ b/drivers/md/dm-snap.c
@@ -2072,6 +2072,12 @@ static int snapshot_merge_map(struct dm_target *ti, struct bio *bio)
return DM_MAPIO_REMAPPED;
}
+ if (unlikely(bio_op(bio) == REQ_OP_DISCARD)) {
+ /* Once merging, discards no longer effect change */
+ bio_endio(bio);
+ return DM_MAPIO_SUBMITTED;
+ }
+
chunk = sector_to_chunk(s->store, bio->bi_iter.bi_sector);
down_write(&s->lock);
@@ -2331,6 +2337,8 @@ static void snapshot_io_hints(struct dm_target *ti, struct queue_limits *limits)
if (snap->discard_zeroes_cow) {
struct dm_snapshot *snap_src = NULL, *snap_dest = NULL;
+ down_read(&_origins_lock);
+
(void) __find_snapshots_sharing_cow(snap, &snap_src, &snap_dest, NULL);
if (snap_src && snap_dest)
snap = snap_src;
@@ -2338,6 +2346,8 @@ static void snapshot_io_hints(struct dm_target *ti, struct queue_limits *limits)
/* All discards are split on chunk_size boundary */
limits->discard_granularity = snap->store->chunk_size;
limits->max_discard_sectors = snap->store->chunk_size;
+
+ up_read(&_origins_lock);
}
}
diff --git a/drivers/md/dm-zoned-metadata.c b/drivers/md/dm-zoned-metadata.c
index 9faf3e49c7af..8545dcee9fd0 100644
--- a/drivers/md/dm-zoned-metadata.c
+++ b/drivers/md/dm-zoned-metadata.c
@@ -1602,30 +1602,6 @@ struct dm_zone *dmz_get_zone_for_reclaim(struct dmz_metadata *zmd)
}
/*
- * Activate a zone (increment its reference count).
- */
-void dmz_activate_zone(struct dm_zone *zone)
-{
- set_bit(DMZ_ACTIVE, &zone->flags);
- atomic_inc(&zone->refcount);
-}
-
-/*
- * Deactivate a zone. This decrement the zone reference counter
- * and clears the active state of the zone once the count reaches 0,
- * indicating that all BIOs to the zone have completed. Returns
- * true if the zone was deactivated.
- */
-void dmz_deactivate_zone(struct dm_zone *zone)
-{
- if (atomic_dec_and_test(&zone->refcount)) {
- WARN_ON(!test_bit(DMZ_ACTIVE, &zone->flags));
- clear_bit_unlock(DMZ_ACTIVE, &zone->flags);
- smp_mb__after_atomic();
- }
-}
-
-/*
* Get the zone mapping a chunk, if the chunk is mapped already.
* If no mapping exist and the operation is WRITE, a zone is
* allocated and used to map the chunk.
diff --git a/drivers/md/dm-zoned.h b/drivers/md/dm-zoned.h
index 12419f0bfe78..ed8de49c9a08 100644
--- a/drivers/md/dm-zoned.h
+++ b/drivers/md/dm-zoned.h
@@ -115,7 +115,6 @@ enum {
DMZ_BUF,
/* Zone internal state */
- DMZ_ACTIVE,
DMZ_RECLAIM,
DMZ_SEQ_WRITE_ERR,
};
@@ -128,7 +127,6 @@ enum {
#define dmz_is_empty(z) ((z)->wp_block == 0)
#define dmz_is_offline(z) test_bit(DMZ_OFFLINE, &(z)->flags)
#define dmz_is_readonly(z) test_bit(DMZ_READ_ONLY, &(z)->flags)
-#define dmz_is_active(z) test_bit(DMZ_ACTIVE, &(z)->flags)
#define dmz_in_reclaim(z) test_bit(DMZ_RECLAIM, &(z)->flags)
#define dmz_seq_write_err(z) test_bit(DMZ_SEQ_WRITE_ERR, &(z)->flags)
@@ -188,8 +186,30 @@ void dmz_unmap_zone(struct dmz_metadata *zmd, struct dm_zone *zone);
unsigned int dmz_nr_rnd_zones(struct dmz_metadata *zmd);
unsigned int dmz_nr_unmap_rnd_zones(struct dmz_metadata *zmd);
-void dmz_activate_zone(struct dm_zone *zone);
-void dmz_deactivate_zone(struct dm_zone *zone);
+/*
+ * Activate a zone (increment its reference count).
+ */
+static inline void dmz_activate_zone(struct dm_zone *zone)
+{
+ atomic_inc(&zone->refcount);
+}
+
+/*
+ * Deactivate a zone. This decrement the zone reference counter
+ * indicating that all BIOs to the zone have completed when the count is 0.
+ */
+static inline void dmz_deactivate_zone(struct dm_zone *zone)
+{
+ atomic_dec(&zone->refcount);
+}
+
+/*
+ * Test if a zone is active, that is, has a refcount > 0.
+ */
+static inline bool dmz_is_active(struct dm_zone *zone)
+{
+ return atomic_read(&zone->refcount);
+}
int dmz_lock_zone_reclaim(struct dm_zone *zone);
void dmz_unlock_zone_reclaim(struct dm_zone *zone);
diff --git a/drivers/memory/.gitignore b/drivers/memory/.gitignore
new file mode 100644
index 000000000000..cbca8b028437
--- /dev/null
+++ b/drivers/memory/.gitignore
@@ -0,0 +1 @@
+ti-emif-asm-offsets.h
diff --git a/drivers/memory/Kconfig b/drivers/memory/Kconfig
index dbdee02bb592..9bddca292330 100644
--- a/drivers/memory/Kconfig
+++ b/drivers/memory/Kconfig
@@ -8,6 +8,14 @@ menuconfig MEMORY
if MEMORY
+config DDR
+ bool
+ help
+ Data from JEDEC specs for DDR SDRAM memories,
+ particularly the AC timing parameters and addressing
+ information. This data is useful for drivers handling
+ DDR SDRAM controllers.
+
config ARM_PL172_MPMC
tristate "ARM PL172 MPMC driver"
depends on ARM_AMBA && OF
diff --git a/drivers/memory/Makefile b/drivers/memory/Makefile
index 91ae4eb0e913..27b493435e61 100644
--- a/drivers/memory/Makefile
+++ b/drivers/memory/Makefile
@@ -3,6 +3,7 @@
# Makefile for memory devices
#
+obj-$(CONFIG_DDR) += jedec_ddr_data.o
ifeq ($(CONFIG_DDR),y)
obj-$(CONFIG_OF) += of_memory.o
endif
@@ -28,9 +29,10 @@ ti-emif-sram-objs := ti-emif-pm.o ti-emif-sram-pm.o
AFLAGS_ti-emif-sram-pm.o :=-Wa,-march=armv7-a
-drivers/memory/ti-emif-sram-pm.o: include/generated/ti-emif-asm-offsets.h
+$(obj)/ti-emif-sram-pm.o: $(obj)/ti-emif-asm-offsets.h
-include/generated/ti-emif-asm-offsets.h: drivers/memory/emif-asm-offsets.s FORCE
+$(obj)/ti-emif-asm-offsets.h: $(obj)/emif-asm-offsets.s FORCE
$(call filechk,offsets,__TI_EMIF_ASM_OFFSETS_H__)
targets += emif-asm-offsets.s
+clean-files += ti-emif-asm-offsets.h
diff --git a/drivers/memory/brcmstb_dpfe.c b/drivers/memory/brcmstb_dpfe.c
index 3065a8bc8fd6..6827ed484750 100644
--- a/drivers/memory/brcmstb_dpfe.c
+++ b/drivers/memory/brcmstb_dpfe.c
@@ -33,10 +33,10 @@
#include <linux/io.h>
#include <linux/module.h>
#include <linux/of_address.h>
+#include <linux/of_device.h>
#include <linux/platform_device.h>
#define DRVNAME "brcmstb-dpfe"
-#define FIRMWARE_NAME "dpfe.bin"
/* DCPU register offsets */
#define REG_DCPU_RESET 0x0
@@ -59,6 +59,7 @@
#define DRAM_INFO_MR4 0x4
#define DRAM_INFO_ERROR 0x8
#define DRAM_INFO_MR4_MASK 0xff
+#define DRAM_INFO_MR4_SHIFT 24 /* We need to look at byte 3 */
/* DRAM MR4 Offsets & Masks */
#define DRAM_MR4_REFRESH 0x0 /* Refresh rate */
@@ -73,13 +74,23 @@
#define DRAM_MR4_TH_OFFS_MASK 0x3
#define DRAM_MR4_TUF_MASK 0x1
-/* DRAM Vendor Offsets & Masks */
+/* DRAM Vendor Offsets & Masks (API v2) */
#define DRAM_VENDOR_MR5 0x0
#define DRAM_VENDOR_MR6 0x4
#define DRAM_VENDOR_MR7 0x8
#define DRAM_VENDOR_MR8 0xc
#define DRAM_VENDOR_ERROR 0x10
#define DRAM_VENDOR_MASK 0xff
+#define DRAM_VENDOR_SHIFT 24 /* We need to look at byte 3 */
+
+/* DRAM Information Offsets & Masks (API v3) */
+#define DRAM_DDR_INFO_MR4 0x0
+#define DRAM_DDR_INFO_MR5 0x4
+#define DRAM_DDR_INFO_MR6 0x8
+#define DRAM_DDR_INFO_MR7 0xc
+#define DRAM_DDR_INFO_MR8 0x10
+#define DRAM_DDR_INFO_ERROR 0x14
+#define DRAM_DDR_INFO_MASK 0xff
/* Reset register bits & masks */
#define DCPU_RESET_SHIFT 0x0
@@ -109,7 +120,7 @@
#define DPFE_MSG_TYPE_COMMAND 1
#define DPFE_MSG_TYPE_RESPONSE 2
-#define DELAY_LOOP_MAX 200000
+#define DELAY_LOOP_MAX 1000
enum dpfe_msg_fields {
MSG_HEADER,
@@ -117,7 +128,7 @@ enum dpfe_msg_fields {
MSG_ARG_COUNT,
MSG_ARG0,
MSG_CHKSUM,
- MSG_FIELD_MAX /* Last entry */
+ MSG_FIELD_MAX = 16 /* Max number of arguments */
};
enum dpfe_commands {
@@ -127,14 +138,6 @@ enum dpfe_commands {
DPFE_CMD_MAX /* Last entry */
};
-struct dpfe_msg {
- u32 header;
- u32 command;
- u32 arg_count;
- u32 arg0;
- u32 chksum; /* This is the sum of all other entries. */
-};
-
/*
* Format of the binary firmware file:
*
@@ -168,12 +171,21 @@ struct init_data {
bool is_big_endian;
};
+/* API version and corresponding commands */
+struct dpfe_api {
+ int version;
+ const char *fw_name;
+ const struct attribute_group **sysfs_attrs;
+ u32 command[DPFE_CMD_MAX][MSG_FIELD_MAX];
+};
+
/* Things we need for as long as we are active. */
struct private_data {
void __iomem *regs;
void __iomem *dmem;
void __iomem *imem;
struct device *dev;
+ const struct dpfe_api *dpfe_api;
struct mutex lock;
};
@@ -182,28 +194,99 @@ static const char *error_text[] = {
"Incorrect checksum", "Malformed command", "Timed out",
};
-/* List of supported firmware commands */
-static const u32 dpfe_commands[DPFE_CMD_MAX][MSG_FIELD_MAX] = {
- [DPFE_CMD_GET_INFO] = {
- [MSG_HEADER] = DPFE_MSG_TYPE_COMMAND,
- [MSG_COMMAND] = 1,
- [MSG_ARG_COUNT] = 1,
- [MSG_ARG0] = 1,
- [MSG_CHKSUM] = 4,
- },
- [DPFE_CMD_GET_REFRESH] = {
- [MSG_HEADER] = DPFE_MSG_TYPE_COMMAND,
- [MSG_COMMAND] = 2,
- [MSG_ARG_COUNT] = 1,
- [MSG_ARG0] = 1,
- [MSG_CHKSUM] = 5,
- },
- [DPFE_CMD_GET_VENDOR] = {
- [MSG_HEADER] = DPFE_MSG_TYPE_COMMAND,
- [MSG_COMMAND] = 2,
- [MSG_ARG_COUNT] = 1,
- [MSG_ARG0] = 2,
- [MSG_CHKSUM] = 6,
+/*
+ * Forward declaration of our sysfs attribute functions, so we can declare the
+ * attribute data structures early.
+ */
+static ssize_t show_info(struct device *, struct device_attribute *, char *);
+static ssize_t show_refresh(struct device *, struct device_attribute *, char *);
+static ssize_t store_refresh(struct device *, struct device_attribute *,
+ const char *, size_t);
+static ssize_t show_vendor(struct device *, struct device_attribute *, char *);
+static ssize_t show_dram(struct device *, struct device_attribute *, char *);
+
+/*
+ * Declare our attributes early, so they can be referenced in the API data
+ * structure. We need to do this, because the attributes depend on the API
+ * version.
+ */
+static DEVICE_ATTR(dpfe_info, 0444, show_info, NULL);
+static DEVICE_ATTR(dpfe_refresh, 0644, show_refresh, store_refresh);
+static DEVICE_ATTR(dpfe_vendor, 0444, show_vendor, NULL);
+static DEVICE_ATTR(dpfe_dram, 0444, show_dram, NULL);
+
+/* API v2 sysfs attributes */
+static struct attribute *dpfe_v2_attrs[] = {
+ &dev_attr_dpfe_info.attr,
+ &dev_attr_dpfe_refresh.attr,
+ &dev_attr_dpfe_vendor.attr,
+ NULL
+};
+ATTRIBUTE_GROUPS(dpfe_v2);
+
+/* API v3 sysfs attributes */
+static struct attribute *dpfe_v3_attrs[] = {
+ &dev_attr_dpfe_info.attr,
+ &dev_attr_dpfe_dram.attr,
+ NULL
+};
+ATTRIBUTE_GROUPS(dpfe_v3);
+
+/* API v2 firmware commands */
+static const struct dpfe_api dpfe_api_v2 = {
+ .version = 2,
+ .fw_name = "dpfe.bin",
+ .sysfs_attrs = dpfe_v2_groups,
+ .command = {
+ [DPFE_CMD_GET_INFO] = {
+ [MSG_HEADER] = DPFE_MSG_TYPE_COMMAND,
+ [MSG_COMMAND] = 1,
+ [MSG_ARG_COUNT] = 1,
+ [MSG_ARG0] = 1,
+ [MSG_CHKSUM] = 4,
+ },
+ [DPFE_CMD_GET_REFRESH] = {
+ [MSG_HEADER] = DPFE_MSG_TYPE_COMMAND,
+ [MSG_COMMAND] = 2,
+ [MSG_ARG_COUNT] = 1,
+ [MSG_ARG0] = 1,
+ [MSG_CHKSUM] = 5,
+ },
+ [DPFE_CMD_GET_VENDOR] = {
+ [MSG_HEADER] = DPFE_MSG_TYPE_COMMAND,
+ [MSG_COMMAND] = 2,
+ [MSG_ARG_COUNT] = 1,
+ [MSG_ARG0] = 2,
+ [MSG_CHKSUM] = 6,
+ },
+ }
+};
+
+/* API v3 firmware commands */
+static const struct dpfe_api dpfe_api_v3 = {
+ .version = 3,
+ .fw_name = NULL, /* We expect the firmware to have been downloaded! */
+ .sysfs_attrs = dpfe_v3_groups,
+ .command = {
+ [DPFE_CMD_GET_INFO] = {
+ [MSG_HEADER] = DPFE_MSG_TYPE_COMMAND,
+ [MSG_COMMAND] = 0x0101,
+ [MSG_ARG_COUNT] = 1,
+ [MSG_ARG0] = 1,
+ [MSG_CHKSUM] = 0x104,
+ },
+ [DPFE_CMD_GET_REFRESH] = {
+ [MSG_HEADER] = DPFE_MSG_TYPE_COMMAND,
+ [MSG_COMMAND] = 0x0202,
+ [MSG_ARG_COUNT] = 0,
+ /*
+ * This is a bit ugly. Without arguments, the checksum
+ * follows right after the argument count and not at
+ * offset MSG_CHKSUM.
+ */
+ [MSG_ARG0] = 0x203,
+ },
+ /* There's no GET_VENDOR command in API v3. */
},
};
@@ -248,13 +331,13 @@ static void __enable_dcpu(void __iomem *regs)
writel_relaxed(val, regs + REG_DCPU_RESET);
}
-static unsigned int get_msg_chksum(const u32 msg[])
+static unsigned int get_msg_chksum(const u32 msg[], unsigned int max)
{
unsigned int sum = 0;
unsigned int i;
/* Don't include the last field in the checksum. */
- for (i = 0; i < MSG_FIELD_MAX - 1; i++)
+ for (i = 0; i < max; i++)
sum += msg[i];
return sum;
@@ -267,6 +350,11 @@ static void __iomem *get_msg_ptr(struct private_data *priv, u32 response,
unsigned int offset;
void __iomem *ptr = NULL;
+ /* There is no need to use this function for API v3 or later. */
+ if (unlikely(priv->dpfe_api->version >= 3)) {
+ return NULL;
+ }
+
msg_type = (response >> DRAM_MSG_TYPE_OFFSET) & DRAM_MSG_TYPE_MASK;
offset = (response >> DRAM_MSG_ADDR_OFFSET) & DRAM_MSG_ADDR_MASK;
@@ -294,12 +382,25 @@ static void __iomem *get_msg_ptr(struct private_data *priv, u32 response,
return ptr;
}
+static void __finalize_command(struct private_data *priv)
+{
+ unsigned int release_mbox;
+
+ /*
+ * It depends on the API version which MBOX register we have to write to
+ * to signal we are done.
+ */
+ release_mbox = (priv->dpfe_api->version < 3)
+ ? REG_TO_HOST_MBOX : REG_TO_DCPU_MBOX;
+ writel_relaxed(0, priv->regs + release_mbox);
+}
+
static int __send_command(struct private_data *priv, unsigned int cmd,
u32 result[])
{
- const u32 *msg = dpfe_commands[cmd];
+ const u32 *msg = priv->dpfe_api->command[cmd];
void __iomem *regs = priv->regs;
- unsigned int i, chksum;
+ unsigned int i, chksum, chksum_idx;
int ret = 0;
u32 resp;
@@ -308,6 +409,18 @@ static int __send_command(struct private_data *priv, unsigned int cmd,
mutex_lock(&priv->lock);
+ /* Wait for DCPU to become ready */
+ for (i = 0; i < DELAY_LOOP_MAX; i++) {
+ resp = readl_relaxed(regs + REG_TO_HOST_MBOX);
+ if (resp == 0)
+ break;
+ msleep(1);
+ }
+ if (resp != 0) {
+ mutex_unlock(&priv->lock);
+ return -ETIMEDOUT;
+ }
+
/* Write command and arguments to message area */
for (i = 0; i < MSG_FIELD_MAX; i++)
writel_relaxed(msg[i], regs + DCPU_MSG_RAM(i));
@@ -321,7 +434,7 @@ static int __send_command(struct private_data *priv, unsigned int cmd,
resp = readl_relaxed(regs + REG_TO_HOST_MBOX);
if (resp > 0)
break;
- udelay(5);
+ msleep(1);
}
if (i == DELAY_LOOP_MAX) {
@@ -331,10 +444,11 @@ static int __send_command(struct private_data *priv, unsigned int cmd,
/* Read response data */
for (i = 0; i < MSG_FIELD_MAX; i++)
result[i] = readl_relaxed(regs + DCPU_MSG_RAM(i));
+ chksum_idx = result[MSG_ARG_COUNT] + MSG_ARG_COUNT + 1;
}
/* Tell DCPU we are done */
- writel_relaxed(0, regs + REG_TO_HOST_MBOX);
+ __finalize_command(priv);
mutex_unlock(&priv->lock);
@@ -342,8 +456,8 @@ static int __send_command(struct private_data *priv, unsigned int cmd,
return ret;
/* Verify response */
- chksum = get_msg_chksum(result);
- if (chksum != result[MSG_CHKSUM])
+ chksum = get_msg_chksum(result, chksum_idx);
+ if (chksum != result[chksum_idx])
resp = DCPU_RET_ERR_CHKSUM;
if (resp != DCPU_RET_SUCCESS) {
@@ -484,7 +598,15 @@ static int brcmstb_dpfe_download_firmware(struct platform_device *pdev,
return 0;
}
- ret = request_firmware(&fw, FIRMWARE_NAME, dev);
+ /*
+ * If the firmware filename is NULL it means the boot firmware has to
+ * download the DCPU firmware for us. If that didn't work, we have to
+ * bail, since downloading it ourselves wouldn't work either.
+ */
+ if (!priv->dpfe_api->fw_name)
+ return -ENODEV;
+
+ ret = request_firmware(&fw, priv->dpfe_api->fw_name, dev);
/* request_firmware() prints its own error messages. */
if (ret)
return ret;
@@ -525,12 +647,10 @@ static int brcmstb_dpfe_download_firmware(struct platform_device *pdev,
}
static ssize_t generic_show(unsigned int command, u32 response[],
- struct device *dev, char *buf)
+ struct private_data *priv, char *buf)
{
- struct private_data *priv;
int ret;
- priv = dev_get_drvdata(dev);
if (!priv)
return sprintf(buf, "ERROR: driver private data not set\n");
@@ -545,10 +665,12 @@ static ssize_t show_info(struct device *dev, struct device_attribute *devattr,
char *buf)
{
u32 response[MSG_FIELD_MAX];
+ struct private_data *priv;
unsigned int info;
ssize_t ret;
- ret = generic_show(DPFE_CMD_GET_INFO, response, dev, buf);
+ priv = dev_get_drvdata(dev);
+ ret = generic_show(DPFE_CMD_GET_INFO, response, priv, buf);
if (ret)
return ret;
@@ -571,17 +693,17 @@ static ssize_t show_refresh(struct device *dev,
u32 mr4;
ssize_t ret;
- ret = generic_show(DPFE_CMD_GET_REFRESH, response, dev, buf);
+ priv = dev_get_drvdata(dev);
+ ret = generic_show(DPFE_CMD_GET_REFRESH, response, priv, buf);
if (ret)
return ret;
- priv = dev_get_drvdata(dev);
-
info = get_msg_ptr(priv, response[MSG_ARG0], buf, &ret);
if (!info)
return ret;
- mr4 = readl_relaxed(info + DRAM_INFO_MR4) & DRAM_INFO_MR4_MASK;
+ mr4 = (readl_relaxed(info + DRAM_INFO_MR4) >> DRAM_INFO_MR4_SHIFT) &
+ DRAM_INFO_MR4_MASK;
refresh = (mr4 >> DRAM_MR4_REFRESH) & DRAM_MR4_REFRESH_MASK;
sr_abort = (mr4 >> DRAM_MR4_SR_ABORT) & DRAM_MR4_SR_ABORT_MASK;
@@ -608,7 +730,6 @@ static ssize_t store_refresh(struct device *dev, struct device_attribute *attr,
return -EINVAL;
priv = dev_get_drvdata(dev);
-
ret = __send_command(priv, DPFE_CMD_GET_REFRESH, response);
if (ret)
return ret;
@@ -623,30 +744,58 @@ static ssize_t store_refresh(struct device *dev, struct device_attribute *attr,
}
static ssize_t show_vendor(struct device *dev, struct device_attribute *devattr,
- char *buf)
+ char *buf)
{
u32 response[MSG_FIELD_MAX];
struct private_data *priv;
void __iomem *info;
ssize_t ret;
+ u32 mr5, mr6, mr7, mr8, err;
- ret = generic_show(DPFE_CMD_GET_VENDOR, response, dev, buf);
+ priv = dev_get_drvdata(dev);
+ ret = generic_show(DPFE_CMD_GET_VENDOR, response, priv, buf);
if (ret)
return ret;
- priv = dev_get_drvdata(dev);
-
info = get_msg_ptr(priv, response[MSG_ARG0], buf, &ret);
if (!info)
return ret;
- return sprintf(buf, "%#x %#x %#x %#x %#x\n",
- readl_relaxed(info + DRAM_VENDOR_MR5) & DRAM_VENDOR_MASK,
- readl_relaxed(info + DRAM_VENDOR_MR6) & DRAM_VENDOR_MASK,
- readl_relaxed(info + DRAM_VENDOR_MR7) & DRAM_VENDOR_MASK,
- readl_relaxed(info + DRAM_VENDOR_MR8) & DRAM_VENDOR_MASK,
- readl_relaxed(info + DRAM_VENDOR_ERROR) &
- DRAM_VENDOR_MASK);
+ mr5 = (readl_relaxed(info + DRAM_VENDOR_MR5) >> DRAM_VENDOR_SHIFT) &
+ DRAM_VENDOR_MASK;
+ mr6 = (readl_relaxed(info + DRAM_VENDOR_MR6) >> DRAM_VENDOR_SHIFT) &
+ DRAM_VENDOR_MASK;
+ mr7 = (readl_relaxed(info + DRAM_VENDOR_MR7) >> DRAM_VENDOR_SHIFT) &
+ DRAM_VENDOR_MASK;
+ mr8 = (readl_relaxed(info + DRAM_VENDOR_MR8) >> DRAM_VENDOR_SHIFT) &
+ DRAM_VENDOR_MASK;
+ err = readl_relaxed(info + DRAM_VENDOR_ERROR) & DRAM_VENDOR_MASK;
+
+ return sprintf(buf, "%#x %#x %#x %#x %#x\n", mr5, mr6, mr7, mr8, err);
+}
+
+static ssize_t show_dram(struct device *dev, struct device_attribute *devattr,
+ char *buf)
+{
+ u32 response[MSG_FIELD_MAX];
+ struct private_data *priv;
+ ssize_t ret;
+ u32 mr4, mr5, mr6, mr7, mr8, err;
+
+ priv = dev_get_drvdata(dev);
+ ret = generic_show(DPFE_CMD_GET_REFRESH, response, priv, buf);
+ if (ret)
+ return ret;
+
+ mr4 = response[MSG_ARG0 + 0] & DRAM_INFO_MR4_MASK;
+ mr5 = response[MSG_ARG0 + 1] & DRAM_DDR_INFO_MASK;
+ mr6 = response[MSG_ARG0 + 2] & DRAM_DDR_INFO_MASK;
+ mr7 = response[MSG_ARG0 + 3] & DRAM_DDR_INFO_MASK;
+ mr8 = response[MSG_ARG0 + 4] & DRAM_DDR_INFO_MASK;
+ err = response[MSG_ARG0 + 5] & DRAM_DDR_INFO_MASK;
+
+ return sprintf(buf, "%#x %#x %#x %#x %#x %#x\n", mr4, mr5, mr6, mr7,
+ mr8, err);
}
static int brcmstb_dpfe_resume(struct platform_device *pdev)
@@ -656,17 +805,6 @@ static int brcmstb_dpfe_resume(struct platform_device *pdev)
return brcmstb_dpfe_download_firmware(pdev, &init);
}
-static DEVICE_ATTR(dpfe_info, 0444, show_info, NULL);
-static DEVICE_ATTR(dpfe_refresh, 0644, show_refresh, store_refresh);
-static DEVICE_ATTR(dpfe_vendor, 0444, show_vendor, NULL);
-static struct attribute *dpfe_attrs[] = {
- &dev_attr_dpfe_info.attr,
- &dev_attr_dpfe_refresh.attr,
- &dev_attr_dpfe_vendor.attr,
- NULL
-};
-ATTRIBUTE_GROUPS(dpfe);
-
static int brcmstb_dpfe_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
@@ -703,26 +841,47 @@ static int brcmstb_dpfe_probe(struct platform_device *pdev)
return -ENOENT;
}
+ priv->dpfe_api = of_device_get_match_data(dev);
+ if (unlikely(!priv->dpfe_api)) {
+ /*
+ * It should be impossible to end up here, but to be safe we
+ * check anyway.
+ */
+ dev_err(dev, "Couldn't determine API\n");
+ return -ENOENT;
+ }
+
ret = brcmstb_dpfe_download_firmware(pdev, &init);
- if (ret)
+ if (ret) {
+ dev_err(dev, "Couldn't download firmware -- %d\n", ret);
return ret;
+ }
- ret = sysfs_create_groups(&pdev->dev.kobj, dpfe_groups);
+ ret = sysfs_create_groups(&pdev->dev.kobj, priv->dpfe_api->sysfs_attrs);
if (!ret)
- dev_info(dev, "registered.\n");
+ dev_info(dev, "registered with API v%d.\n",
+ priv->dpfe_api->version);
return ret;
}
static int brcmstb_dpfe_remove(struct platform_device *pdev)
{
- sysfs_remove_groups(&pdev->dev.kobj, dpfe_groups);
+ struct private_data *priv = dev_get_drvdata(&pdev->dev);
+
+ sysfs_remove_groups(&pdev->dev.kobj, priv->dpfe_api->sysfs_attrs);
return 0;
}
static const struct of_device_id brcmstb_dpfe_of_match[] = {
- { .compatible = "brcm,dpfe-cpu", },
+ /* Use legacy API v2 for a select number of chips */
+ { .compatible = "brcm,bcm7268-dpfe-cpu", .data = &dpfe_api_v2 },
+ { .compatible = "brcm,bcm7271-dpfe-cpu", .data = &dpfe_api_v2 },
+ { .compatible = "brcm,bcm7278-dpfe-cpu", .data = &dpfe_api_v2 },
+ { .compatible = "brcm,bcm7211-dpfe-cpu", .data = &dpfe_api_v2 },
+ /* API v3 is the default going forward */
+ { .compatible = "brcm,dpfe-cpu", .data = &dpfe_api_v3 },
{}
};
MODULE_DEVICE_TABLE(of, brcmstb_dpfe_of_match);
diff --git a/drivers/memory/emif.c b/drivers/memory/emif.c
index ee67a9a5d775..402c6bc8e621 100644
--- a/drivers/memory/emif.c
+++ b/drivers/memory/emif.c
@@ -23,8 +23,9 @@
#include <linux/list.h>
#include <linux/spinlock.h>
#include <linux/pm.h>
-#include <memory/jedec_ddr.h>
+
#include "emif.h"
+#include "jedec_ddr.h"
#include "of_memory.h"
/**
diff --git a/drivers/memory/jedec_ddr.h b/drivers/memory/jedec_ddr.h
new file mode 100644
index 000000000000..4a21b5044ff8
--- /dev/null
+++ b/drivers/memory/jedec_ddr.h
@@ -0,0 +1,172 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Definitions for DDR memories based on JEDEC specs
+ *
+ * Copyright (C) 2012 Texas Instruments, Inc.
+ *
+ * Aneesh V <aneesh@ti.com>
+ */
+#ifndef __JEDEC_DDR_H
+#define __JEDEC_DDR_H
+
+#include <linux/types.h>
+
+/* DDR Densities */
+#define DDR_DENSITY_64Mb 1
+#define DDR_DENSITY_128Mb 2
+#define DDR_DENSITY_256Mb 3
+#define DDR_DENSITY_512Mb 4
+#define DDR_DENSITY_1Gb 5
+#define DDR_DENSITY_2Gb 6
+#define DDR_DENSITY_4Gb 7
+#define DDR_DENSITY_8Gb 8
+#define DDR_DENSITY_16Gb 9
+#define DDR_DENSITY_32Gb 10
+
+/* DDR type */
+#define DDR_TYPE_DDR2 1
+#define DDR_TYPE_DDR3 2
+#define DDR_TYPE_LPDDR2_S4 3
+#define DDR_TYPE_LPDDR2_S2 4
+#define DDR_TYPE_LPDDR2_NVM 5
+
+/* DDR IO width */
+#define DDR_IO_WIDTH_4 1
+#define DDR_IO_WIDTH_8 2
+#define DDR_IO_WIDTH_16 3
+#define DDR_IO_WIDTH_32 4
+
+/* Number of Row bits */
+#define R9 9
+#define R10 10
+#define R11 11
+#define R12 12
+#define R13 13
+#define R14 14
+#define R15 15
+#define R16 16
+
+/* Number of Column bits */
+#define C7 7
+#define C8 8
+#define C9 9
+#define C10 10
+#define C11 11
+#define C12 12
+
+/* Number of Banks */
+#define B1 0
+#define B2 1
+#define B4 2
+#define B8 3
+
+/* Refresh rate in nano-seconds */
+#define T_REFI_15_6 15600
+#define T_REFI_7_8 7800
+#define T_REFI_3_9 3900
+
+/* tRFC values */
+#define T_RFC_90 90000
+#define T_RFC_110 110000
+#define T_RFC_130 130000
+#define T_RFC_160 160000
+#define T_RFC_210 210000
+#define T_RFC_300 300000
+#define T_RFC_350 350000
+
+/* Mode register numbers */
+#define DDR_MR0 0
+#define DDR_MR1 1
+#define DDR_MR2 2
+#define DDR_MR3 3
+#define DDR_MR4 4
+#define DDR_MR5 5
+#define DDR_MR6 6
+#define DDR_MR7 7
+#define DDR_MR8 8
+#define DDR_MR9 9
+#define DDR_MR10 10
+#define DDR_MR11 11
+#define DDR_MR16 16
+#define DDR_MR17 17
+#define DDR_MR18 18
+
+/*
+ * LPDDR2 related defines
+ */
+
+/* MR4 register fields */
+#define MR4_SDRAM_REF_RATE_SHIFT 0
+#define MR4_SDRAM_REF_RATE_MASK 7
+#define MR4_TUF_SHIFT 7
+#define MR4_TUF_MASK (1 << 7)
+
+/* MR4 SDRAM Refresh Rate field values */
+#define SDRAM_TEMP_NOMINAL 0x3
+#define SDRAM_TEMP_RESERVED_4 0x4
+#define SDRAM_TEMP_HIGH_DERATE_REFRESH 0x5
+#define SDRAM_TEMP_HIGH_DERATE_REFRESH_AND_TIMINGS 0x6
+#define SDRAM_TEMP_VERY_HIGH_SHUTDOWN 0x7
+
+#define NUM_DDR_ADDR_TABLE_ENTRIES 11
+#define NUM_DDR_TIMING_TABLE_ENTRIES 4
+
+/* Structure for DDR addressing info from the JEDEC spec */
+struct lpddr2_addressing {
+ u32 num_banks;
+ u32 tREFI_ns;
+ u32 tRFCab_ps;
+};
+
+/*
+ * Structure for timings from the LPDDR2 datasheet
+ * All parameters are in pico seconds(ps) unless explicitly indicated
+ * with a suffix like tRAS_max_ns below
+ */
+struct lpddr2_timings {
+ u32 max_freq;
+ u32 min_freq;
+ u32 tRPab;
+ u32 tRCD;
+ u32 tWR;
+ u32 tRAS_min;
+ u32 tRRD;
+ u32 tWTR;
+ u32 tXP;
+ u32 tRTP;
+ u32 tCKESR;
+ u32 tDQSCK_max;
+ u32 tDQSCK_max_derated;
+ u32 tFAW;
+ u32 tZQCS;
+ u32 tZQCL;
+ u32 tZQinit;
+ u32 tRAS_max_ns;
+};
+
+/*
+ * Min value for some parameters in terms of number of tCK cycles(nCK)
+ * Please set to zero parameters that are not valid for a given memory
+ * type
+ */
+struct lpddr2_min_tck {
+ u32 tRPab;
+ u32 tRCD;
+ u32 tWR;
+ u32 tRASmin;
+ u32 tRRD;
+ u32 tWTR;
+ u32 tXP;
+ u32 tRTP;
+ u32 tCKE;
+ u32 tCKESR;
+ u32 tFAW;
+};
+
+extern const struct lpddr2_addressing
+ lpddr2_jedec_addressing_table[NUM_DDR_ADDR_TABLE_ENTRIES];
+extern const struct lpddr2_timings
+ lpddr2_jedec_timings[NUM_DDR_TIMING_TABLE_ENTRIES];
+extern const struct lpddr2_min_tck lpddr2_jedec_min_tck;
+
+#endif /* __JEDEC_DDR_H */
diff --git a/drivers/memory/jedec_ddr_data.c b/drivers/memory/jedec_ddr_data.c
new file mode 100644
index 000000000000..ed601d813175
--- /dev/null
+++ b/drivers/memory/jedec_ddr_data.c
@@ -0,0 +1,133 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * DDR addressing details and AC timing parameters from JEDEC specs
+ *
+ * Copyright (C) 2012 Texas Instruments, Inc.
+ *
+ * Aneesh V <aneesh@ti.com>
+ */
+
+#include <linux/export.h>
+
+#include "jedec_ddr.h"
+
+/* LPDDR2 addressing details from JESD209-2 section 2.4 */
+const struct lpddr2_addressing
+ lpddr2_jedec_addressing_table[NUM_DDR_ADDR_TABLE_ENTRIES] = {
+ {B4, T_REFI_15_6, T_RFC_90}, /* 64M */
+ {B4, T_REFI_15_6, T_RFC_90}, /* 128M */
+ {B4, T_REFI_7_8, T_RFC_90}, /* 256M */
+ {B4, T_REFI_7_8, T_RFC_90}, /* 512M */
+ {B8, T_REFI_7_8, T_RFC_130}, /* 1GS4 */
+ {B8, T_REFI_3_9, T_RFC_130}, /* 2GS4 */
+ {B8, T_REFI_3_9, T_RFC_130}, /* 4G */
+ {B8, T_REFI_3_9, T_RFC_210}, /* 8G */
+ {B4, T_REFI_7_8, T_RFC_130}, /* 1GS2 */
+ {B4, T_REFI_3_9, T_RFC_130}, /* 2GS2 */
+};
+EXPORT_SYMBOL_GPL(lpddr2_jedec_addressing_table);
+
+/* LPDDR2 AC timing parameters from JESD209-2 section 12 */
+const struct lpddr2_timings
+ lpddr2_jedec_timings[NUM_DDR_TIMING_TABLE_ENTRIES] = {
+ /* Speed bin 400(200 MHz) */
+ [0] = {
+ .max_freq = 200000000,
+ .min_freq = 10000000,
+ .tRPab = 21000,
+ .tRCD = 18000,
+ .tWR = 15000,
+ .tRAS_min = 42000,
+ .tRRD = 10000,
+ .tWTR = 10000,
+ .tXP = 7500,
+ .tRTP = 7500,
+ .tCKESR = 15000,
+ .tDQSCK_max = 5500,
+ .tFAW = 50000,
+ .tZQCS = 90000,
+ .tZQCL = 360000,
+ .tZQinit = 1000000,
+ .tRAS_max_ns = 70000,
+ .tDQSCK_max_derated = 6000,
+ },
+ /* Speed bin 533(266 MHz) */
+ [1] = {
+ .max_freq = 266666666,
+ .min_freq = 10000000,
+ .tRPab = 21000,
+ .tRCD = 18000,
+ .tWR = 15000,
+ .tRAS_min = 42000,
+ .tRRD = 10000,
+ .tWTR = 7500,
+ .tXP = 7500,
+ .tRTP = 7500,
+ .tCKESR = 15000,
+ .tDQSCK_max = 5500,
+ .tFAW = 50000,
+ .tZQCS = 90000,
+ .tZQCL = 360000,
+ .tZQinit = 1000000,
+ .tRAS_max_ns = 70000,
+ .tDQSCK_max_derated = 6000,
+ },
+ /* Speed bin 800(400 MHz) */
+ [2] = {
+ .max_freq = 400000000,
+ .min_freq = 10000000,
+ .tRPab = 21000,
+ .tRCD = 18000,
+ .tWR = 15000,
+ .tRAS_min = 42000,
+ .tRRD = 10000,
+ .tWTR = 7500,
+ .tXP = 7500,
+ .tRTP = 7500,
+ .tCKESR = 15000,
+ .tDQSCK_max = 5500,
+ .tFAW = 50000,
+ .tZQCS = 90000,
+ .tZQCL = 360000,
+ .tZQinit = 1000000,
+ .tRAS_max_ns = 70000,
+ .tDQSCK_max_derated = 6000,
+ },
+ /* Speed bin 1066(533 MHz) */
+ [3] = {
+ .max_freq = 533333333,
+ .min_freq = 10000000,
+ .tRPab = 21000,
+ .tRCD = 18000,
+ .tWR = 15000,
+ .tRAS_min = 42000,
+ .tRRD = 10000,
+ .tWTR = 7500,
+ .tXP = 7500,
+ .tRTP = 7500,
+ .tCKESR = 15000,
+ .tDQSCK_max = 5500,
+ .tFAW = 50000,
+ .tZQCS = 90000,
+ .tZQCL = 360000,
+ .tZQinit = 1000000,
+ .tRAS_max_ns = 70000,
+ .tDQSCK_max_derated = 5620,
+ },
+};
+EXPORT_SYMBOL_GPL(lpddr2_jedec_timings);
+
+const struct lpddr2_min_tck lpddr2_jedec_min_tck = {
+ .tRPab = 3,
+ .tRCD = 3,
+ .tWR = 3,
+ .tRASmin = 3,
+ .tRRD = 2,
+ .tWTR = 2,
+ .tXP = 2,
+ .tRTP = 2,
+ .tCKE = 3,
+ .tCKESR = 3,
+ .tFAW = 8
+};
+EXPORT_SYMBOL_GPL(lpddr2_jedec_min_tck);
diff --git a/drivers/memory/of_memory.c b/drivers/memory/of_memory.c
index 12a61f558644..46539b27a3fb 100644
--- a/drivers/memory/of_memory.c
+++ b/drivers/memory/of_memory.c
@@ -10,8 +10,9 @@
#include <linux/list.h>
#include <linux/of.h>
#include <linux/gfp.h>
-#include <memory/jedec_ddr.h>
#include <linux/export.h>
+
+#include "jedec_ddr.h"
#include "of_memory.h"
/**
diff --git a/drivers/memory/tegra/tegra124.c b/drivers/memory/tegra/tegra124.c
index 41f08b2effd2..5d0ccb2be206 100644
--- a/drivers/memory/tegra/tegra124.c
+++ b/drivers/memory/tegra/tegra124.c
@@ -30,28 +30,6 @@
#define MC_EMEM_ARB_MISC1 0xdc
#define MC_EMEM_ARB_RING1_THROTTLE 0xe0
-static const unsigned long tegra124_mc_emem_regs[] = {
- MC_EMEM_ARB_CFG,
- MC_EMEM_ARB_OUTSTANDING_REQ,
- MC_EMEM_ARB_TIMING_RCD,
- MC_EMEM_ARB_TIMING_RP,
- MC_EMEM_ARB_TIMING_RC,
- MC_EMEM_ARB_TIMING_RAS,
- MC_EMEM_ARB_TIMING_FAW,
- MC_EMEM_ARB_TIMING_RRD,
- MC_EMEM_ARB_TIMING_RAP2PRE,
- MC_EMEM_ARB_TIMING_WAP2PRE,
- MC_EMEM_ARB_TIMING_R2R,
- MC_EMEM_ARB_TIMING_W2W,
- MC_EMEM_ARB_TIMING_R2W,
- MC_EMEM_ARB_TIMING_W2R,
- MC_EMEM_ARB_DA_TURNS,
- MC_EMEM_ARB_DA_COVERS,
- MC_EMEM_ARB_MISC0,
- MC_EMEM_ARB_MISC1,
- MC_EMEM_ARB_RING1_THROTTLE
-};
-
static const struct tegra_mc_client tegra124_mc_clients[] = {
{
.id = 0x00,
@@ -1046,6 +1024,28 @@ static const struct tegra_mc_reset tegra124_mc_resets[] = {
};
#ifdef CONFIG_ARCH_TEGRA_124_SOC
+static const unsigned long tegra124_mc_emem_regs[] = {
+ MC_EMEM_ARB_CFG,
+ MC_EMEM_ARB_OUTSTANDING_REQ,
+ MC_EMEM_ARB_TIMING_RCD,
+ MC_EMEM_ARB_TIMING_RP,
+ MC_EMEM_ARB_TIMING_RC,
+ MC_EMEM_ARB_TIMING_RAS,
+ MC_EMEM_ARB_TIMING_FAW,
+ MC_EMEM_ARB_TIMING_RRD,
+ MC_EMEM_ARB_TIMING_RAP2PRE,
+ MC_EMEM_ARB_TIMING_WAP2PRE,
+ MC_EMEM_ARB_TIMING_R2R,
+ MC_EMEM_ARB_TIMING_W2W,
+ MC_EMEM_ARB_TIMING_R2W,
+ MC_EMEM_ARB_TIMING_W2R,
+ MC_EMEM_ARB_DA_TURNS,
+ MC_EMEM_ARB_DA_COVERS,
+ MC_EMEM_ARB_MISC0,
+ MC_EMEM_ARB_MISC1,
+ MC_EMEM_ARB_RING1_THROTTLE
+};
+
static const struct tegra_smmu_soc tegra124_smmu_soc = {
.clients = tegra124_mc_clients,
.num_clients = ARRAY_SIZE(tegra124_mc_clients),
diff --git a/drivers/memory/ti-emif-sram-pm.S b/drivers/memory/ti-emif-sram-pm.S
index d75ae18efa7d..d1c83bd5b98e 100644
--- a/drivers/memory/ti-emif-sram-pm.S
+++ b/drivers/memory/ti-emif-sram-pm.S
@@ -14,12 +14,12 @@
* GNU General Public License for more details.
*/
-#include <generated/ti-emif-asm-offsets.h>
#include <linux/linkage.h>
#include <asm/assembler.h>
#include <asm/memory.h>
#include "emif.h"
+#include "ti-emif-asm-offsets.h"
#define EMIF_POWER_MGMT_WAIT_SELF_REFRESH_8192_CYCLES 0x00a0
#define EMIF_POWER_MGMT_SR_TIMER_MASK 0x00f0
diff --git a/drivers/misc/cxl/api.c b/drivers/misc/cxl/api.c
index a450694d5a62..b493de962153 100644
--- a/drivers/misc/cxl/api.c
+++ b/drivers/misc/cxl/api.c
@@ -9,6 +9,7 @@
#include <misc/cxl.h>
#include <linux/module.h>
#include <linux/mount.h>
+#include <linux/pseudo_fs.h>
#include <linux/sched/mm.h>
#include <linux/mmu_context.h>
@@ -33,21 +34,15 @@
static int cxl_fs_cnt;
static struct vfsmount *cxl_vfs_mount;
-static const struct dentry_operations cxl_fs_dops = {
- .d_dname = simple_dname,
-};
-
-static struct dentry *cxl_fs_mount(struct file_system_type *fs_type, int flags,
- const char *dev_name, void *data)
+static int cxl_fs_init_fs_context(struct fs_context *fc)
{
- return mount_pseudo(fs_type, "cxl:", NULL, &cxl_fs_dops,
- CXL_PSEUDO_FS_MAGIC);
+ return init_pseudo(fc, CXL_PSEUDO_FS_MAGIC) ? 0 : -ENOMEM;
}
static struct file_system_type cxl_fs_type = {
.name = "cxl",
.owner = THIS_MODULE,
- .mount = cxl_fs_mount,
+ .init_fs_context = cxl_fs_init_fs_context,
.kill_sb = kill_anon_super,
};
diff --git a/drivers/misc/ibmasm/ibmasmfs.c b/drivers/misc/ibmasm/ibmasmfs.c
index 4989dcb2df14..35fec1bf1b3d 100644
--- a/drivers/misc/ibmasm/ibmasmfs.c
+++ b/drivers/misc/ibmasm/ibmasmfs.c
@@ -60,6 +60,7 @@
*/
#include <linux/fs.h>
+#include <linux/fs_context.h>
#include <linux/pagemap.h>
#include <linux/slab.h>
#include <linux/uaccess.h>
@@ -74,13 +75,21 @@ static LIST_HEAD(service_processors);
static struct inode *ibmasmfs_make_inode(struct super_block *sb, int mode);
static void ibmasmfs_create_files (struct super_block *sb);
-static int ibmasmfs_fill_super (struct super_block *sb, void *data, int silent);
+static int ibmasmfs_fill_super(struct super_block *sb, struct fs_context *fc);
+static int ibmasmfs_get_tree(struct fs_context *fc)
+{
+ return get_tree_single(fc, ibmasmfs_fill_super);
+}
-static struct dentry *ibmasmfs_mount(struct file_system_type *fst,
- int flags, const char *name, void *data)
+static const struct fs_context_operations ibmasmfs_context_ops = {
+ .get_tree = ibmasmfs_get_tree,
+};
+
+static int ibmasmfs_init_fs_context(struct fs_context *fc)
{
- return mount_single(fst, flags, data, ibmasmfs_fill_super);
+ fc->ops = &ibmasmfs_context_ops;
+ return 0;
}
static const struct super_operations ibmasmfs_s_ops = {
@@ -93,12 +102,12 @@ static const struct file_operations *ibmasmfs_dir_ops = &simple_dir_operations;
static struct file_system_type ibmasmfs_type = {
.owner = THIS_MODULE,
.name = "ibmasmfs",
- .mount = ibmasmfs_mount,
+ .init_fs_context = ibmasmfs_init_fs_context,
.kill_sb = kill_litter_super,
};
MODULE_ALIAS_FS("ibmasmfs");
-static int ibmasmfs_fill_super (struct super_block *sb, void *data, int silent)
+static int ibmasmfs_fill_super(struct super_block *sb, struct fs_context *fc)
{
struct inode *root;
diff --git a/drivers/misc/vmw_balloon.c b/drivers/misc/vmw_balloon.c
index 97b58e7ad901..8840299420e0 100644
--- a/drivers/misc/vmw_balloon.c
+++ b/drivers/misc/vmw_balloon.c
@@ -29,6 +29,7 @@
#include <linux/slab.h>
#include <linux/spinlock.h>
#include <linux/mount.h>
+#include <linux/pseudo_fs.h>
#include <linux/balloon_compaction.h>
#include <linux/vmw_vmci_defs.h>
#include <linux/vmw_vmci_api.h>
@@ -1728,22 +1729,15 @@ static inline void vmballoon_debugfs_exit(struct vmballoon *b)
#ifdef CONFIG_BALLOON_COMPACTION
-static struct dentry *vmballoon_mount(struct file_system_type *fs_type,
- int flags, const char *dev_name,
- void *data)
+static int vmballoon_init_fs_context(struct fs_context *fc)
{
- static const struct dentry_operations ops = {
- .d_dname = simple_dname,
- };
-
- return mount_pseudo(fs_type, "balloon-vmware:", NULL, &ops,
- BALLOON_VMW_MAGIC);
+ return init_pseudo(fc, BALLOON_VMW_MAGIC) ? 0 : -ENOMEM;
}
static struct file_system_type vmballoon_fs = {
- .name = "balloon-vmware",
- .mount = vmballoon_mount,
- .kill_sb = kill_anon_super,
+ .name = "balloon-vmware",
+ .init_fs_context = vmballoon_init_fs_context,
+ .kill_sb = kill_anon_super,
};
static struct vfsmount *vmballoon_mnt;
diff --git a/drivers/net/caif/caif_hsi.c b/drivers/net/caif/caif_hsi.c
index b2f10b6ad6e5..bbb2575d4728 100644
--- a/drivers/net/caif/caif_hsi.c
+++ b/drivers/net/caif/caif_hsi.c
@@ -1455,7 +1455,7 @@ static void __exit cfhsi_exit_module(void)
rtnl_lock();
list_for_each_safe(list_node, n, &cfhsi_list) {
cfhsi = list_entry(list_node, struct cfhsi, list);
- unregister_netdev(cfhsi->ndev);
+ unregister_netdevice(cfhsi->ndev);
}
rtnl_unlock();
}
diff --git a/drivers/net/ethernet/atheros/ag71xx.c b/drivers/net/ethernet/atheros/ag71xx.c
index 72a57c6cd254..8b69d0d7e726 100644
--- a/drivers/net/ethernet/atheros/ag71xx.c
+++ b/drivers/net/ethernet/atheros/ag71xx.c
@@ -35,6 +35,7 @@
#include <linux/regmap.h>
#include <linux/reset.h>
#include <linux/clk.h>
+#include <linux/io.h>
/* For our NAPI weight bigger does *NOT* mean better - it means more
* D-cache misses and lots more wasted cycles than we'll ever
@@ -1724,17 +1725,19 @@ static int ag71xx_probe(struct platform_device *pdev)
ag->stop_desc = dmam_alloc_coherent(&pdev->dev,
sizeof(struct ag71xx_desc),
&ag->stop_desc_dma, GFP_KERNEL);
- if (!ag->stop_desc)
+ if (!ag->stop_desc) {
+ err = -ENOMEM;
goto err_free;
+ }
ag->stop_desc->data = 0;
ag->stop_desc->ctrl = 0;
ag->stop_desc->next = (u32)ag->stop_desc_dma;
mac_addr = of_get_mac_address(np);
- if (mac_addr)
+ if (!IS_ERR(mac_addr))
memcpy(ndev->dev_addr, mac_addr, ETH_ALEN);
- if (!mac_addr || !is_valid_ether_addr(ndev->dev_addr)) {
+ if (IS_ERR(mac_addr) || !is_valid_ether_addr(ndev->dev_addr)) {
netif_err(ag, probe, ndev, "invalid MAC address, using random address\n");
eth_random_addr(ndev->dev_addr);
}
diff --git a/drivers/net/ethernet/atheros/atlx/atl1.c b/drivers/net/ethernet/atheros/atlx/atl1.c
index 7c767ce9aafa..b5c6dc914720 100644
--- a/drivers/net/ethernet/atheros/atlx/atl1.c
+++ b/drivers/net/ethernet/atheros/atlx/atl1.c
@@ -1060,8 +1060,6 @@ static s32 atl1_setup_ring_resources(struct atl1_adapter *adapter)
goto err_nomem;
}
- memset(ring_header->desc, 0, ring_header->size);
-
/* init TPD ring */
tpd_ring->dma = ring_header->dma;
offset = (tpd_ring->dma & 0x7) ? (8 - (ring_header->dma & 0x7)) : 0;
diff --git a/drivers/net/ethernet/atheros/atlx/atl2.c b/drivers/net/ethernet/atheros/atlx/atl2.c
index 3a3fb5ce0fee..3aba38322717 100644
--- a/drivers/net/ethernet/atheros/atlx/atl2.c
+++ b/drivers/net/ethernet/atheros/atlx/atl2.c
@@ -291,7 +291,6 @@ static s32 atl2_setup_ring_resources(struct atl2_adapter *adapter)
&adapter->ring_dma);
if (!adapter->ring_vir_addr)
return -ENOMEM;
- memset(adapter->ring_vir_addr, 0, adapter->ring_size);
/* Init TXD Ring */
adapter->txd_dma = adapter->ring_dma ;
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
index 3f632028eff0..7134d2c3eb1c 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
@@ -2677,8 +2677,6 @@ static int bnxt_alloc_tx_rings(struct bnxt *bp)
mapping = txr->tx_push_mapping +
sizeof(struct tx_push_bd);
txr->data_mapping = cpu_to_le64(mapping);
-
- memset(txr->tx_push, 0, sizeof(struct tx_push_bd));
}
qidx = bp->tc_to_qidx[j];
ring->queue_id = bp->q_info[qidx].queue_id;
@@ -3077,7 +3075,7 @@ static int bnxt_alloc_vnics(struct bnxt *bp)
int num_vnics = 1;
#ifdef CONFIG_RFS_ACCEL
- if (bp->flags & BNXT_FLAG_RFS)
+ if ((bp->flags & (BNXT_FLAG_RFS | BNXT_FLAG_CHIP_P5)) == BNXT_FLAG_RFS)
num_vnics += bp->rx_nr_rings;
#endif
@@ -7188,6 +7186,9 @@ static int bnxt_alloc_rfs_vnics(struct bnxt *bp)
#ifdef CONFIG_RFS_ACCEL
int i, rc = 0;
+ if (bp->flags & BNXT_FLAG_CHIP_P5)
+ return 0;
+
for (i = 0; i < bp->rx_nr_rings; i++) {
struct bnxt_vnic_info *vnic;
u16 vnic_id = i + 1;
@@ -9647,7 +9648,7 @@ int bnxt_check_rings(struct bnxt *bp, int tx, int rx, bool sh, int tcs,
return -ENOMEM;
vnics = 1;
- if (bp->flags & BNXT_FLAG_RFS)
+ if ((bp->flags & (BNXT_FLAG_RFS | BNXT_FLAG_CHIP_P5)) == BNXT_FLAG_RFS)
vnics += rx_rings;
if (bp->flags & BNXT_FLAG_AGG_RINGS)
diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.c b/drivers/net/ethernet/broadcom/genet/bcmgenet.c
index 34466b827dde..a2b57807453b 100644
--- a/drivers/net/ethernet/broadcom/genet/bcmgenet.c
+++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.c
@@ -3083,39 +3083,42 @@ static void bcmgenet_timeout(struct net_device *dev)
netif_tx_wake_all_queues(dev);
}
-#define MAX_MC_COUNT 16
+#define MAX_MDF_FILTER 17
static inline void bcmgenet_set_mdf_addr(struct bcmgenet_priv *priv,
unsigned char *addr,
- int *i,
- int *mc)
+ int *i)
{
- u32 reg;
-
bcmgenet_umac_writel(priv, addr[0] << 8 | addr[1],
UMAC_MDF_ADDR + (*i * 4));
bcmgenet_umac_writel(priv, addr[2] << 24 | addr[3] << 16 |
addr[4] << 8 | addr[5],
UMAC_MDF_ADDR + ((*i + 1) * 4));
- reg = bcmgenet_umac_readl(priv, UMAC_MDF_CTRL);
- reg |= (1 << (MAX_MC_COUNT - *mc));
- bcmgenet_umac_writel(priv, reg, UMAC_MDF_CTRL);
*i += 2;
- (*mc)++;
}
static void bcmgenet_set_rx_mode(struct net_device *dev)
{
struct bcmgenet_priv *priv = netdev_priv(dev);
struct netdev_hw_addr *ha;
- int i, mc;
+ int i, nfilter;
u32 reg;
netif_dbg(priv, hw, dev, "%s: %08X\n", __func__, dev->flags);
- /* Promiscuous mode */
+ /* Number of filters needed */
+ nfilter = netdev_uc_count(dev) + netdev_mc_count(dev) + 2;
+
+ /*
+ * Turn on promicuous mode for three scenarios
+ * 1. IFF_PROMISC flag is set
+ * 2. IFF_ALLMULTI flag is set
+ * 3. The number of filters needed exceeds the number filters
+ * supported by the hardware.
+ */
reg = bcmgenet_umac_readl(priv, UMAC_CMD);
- if (dev->flags & IFF_PROMISC) {
+ if ((dev->flags & (IFF_PROMISC | IFF_ALLMULTI)) ||
+ (nfilter > MAX_MDF_FILTER)) {
reg |= CMD_PROMISC;
bcmgenet_umac_writel(priv, reg, UMAC_CMD);
bcmgenet_umac_writel(priv, 0, UMAC_MDF_CTRL);
@@ -3125,32 +3128,24 @@ static void bcmgenet_set_rx_mode(struct net_device *dev)
bcmgenet_umac_writel(priv, reg, UMAC_CMD);
}
- /* UniMac doesn't support ALLMULTI */
- if (dev->flags & IFF_ALLMULTI) {
- netdev_warn(dev, "ALLMULTI is not supported\n");
- return;
- }
-
/* update MDF filter */
i = 0;
- mc = 0;
/* Broadcast */
- bcmgenet_set_mdf_addr(priv, dev->broadcast, &i, &mc);
+ bcmgenet_set_mdf_addr(priv, dev->broadcast, &i);
/* my own address.*/
- bcmgenet_set_mdf_addr(priv, dev->dev_addr, &i, &mc);
- /* Unicast list*/
- if (netdev_uc_count(dev) > (MAX_MC_COUNT - mc))
- return;
+ bcmgenet_set_mdf_addr(priv, dev->dev_addr, &i);
- if (!netdev_uc_empty(dev))
- netdev_for_each_uc_addr(ha, dev)
- bcmgenet_set_mdf_addr(priv, ha->addr, &i, &mc);
- /* Multicast */
- if (netdev_mc_empty(dev) || netdev_mc_count(dev) >= (MAX_MC_COUNT - mc))
- return;
+ /* Unicast */
+ netdev_for_each_uc_addr(ha, dev)
+ bcmgenet_set_mdf_addr(priv, ha->addr, &i);
+ /* Multicast */
netdev_for_each_mc_addr(ha, dev)
- bcmgenet_set_mdf_addr(priv, ha->addr, &i, &mc);
+ bcmgenet_set_mdf_addr(priv, ha->addr, &i);
+
+ /* Enable filters */
+ reg = GENMASK(MAX_MDF_FILTER - 1, MAX_MDF_FILTER - nfilter);
+ bcmgenet_umac_writel(priv, reg, UMAC_MDF_CTRL);
}
/* Set the hardware MAC address. */
diff --git a/drivers/net/ethernet/cavium/liquidio/request_manager.c b/drivers/net/ethernet/cavium/liquidio/request_manager.c
index fcf20a8f92d9..032224178b64 100644
--- a/drivers/net/ethernet/cavium/liquidio/request_manager.c
+++ b/drivers/net/ethernet/cavium/liquidio/request_manager.c
@@ -218,15 +218,13 @@ int octeon_setup_iq(struct octeon_device *oct,
return 0;
}
oct->instr_queue[iq_no] =
- vmalloc_node(sizeof(struct octeon_instr_queue), numa_node);
+ vzalloc_node(sizeof(struct octeon_instr_queue), numa_node);
if (!oct->instr_queue[iq_no])
oct->instr_queue[iq_no] =
- vmalloc(sizeof(struct octeon_instr_queue));
+ vzalloc(sizeof(struct octeon_instr_queue));
if (!oct->instr_queue[iq_no])
return 1;
- memset(oct->instr_queue[iq_no], 0,
- sizeof(struct octeon_instr_queue));
oct->instr_queue[iq_no]->q_index = q_index;
oct->instr_queue[iq_no]->app_ctx = app_ctx;
diff --git a/drivers/net/ethernet/chelsio/cxgb4/sched.c b/drivers/net/ethernet/chelsio/cxgb4/sched.c
index ba6c153ee45c..60218dc676a8 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/sched.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/sched.c
@@ -207,7 +207,6 @@ static int t4_sched_queue_bind(struct port_info *pi, struct ch_sched_queue *p)
goto out_err;
/* Bind queue to specified class */
- memset(qe, 0, sizeof(*qe));
qe->cntxt_id = qid;
memcpy(&qe->param, p, sizeof(qe->param));
diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c
index 82015c8a5ed7..b7a246b33599 100644
--- a/drivers/net/ethernet/emulex/benet/be_main.c
+++ b/drivers/net/ethernet/emulex/benet/be_main.c
@@ -4697,8 +4697,12 @@ int be_update_queues(struct be_adapter *adapter)
struct net_device *netdev = adapter->netdev;
int status;
- if (netif_running(netdev))
+ if (netif_running(netdev)) {
+ /* device cannot transmit now, avoid dev_watchdog timeouts */
+ netif_carrier_off(netdev);
+
be_close(netdev);
+ }
be_cancel_worker(adapter);
diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c
index 9d459ccf251d..e5610a4da539 100644
--- a/drivers/net/ethernet/freescale/fec_main.c
+++ b/drivers/net/ethernet/freescale/fec_main.c
@@ -3144,8 +3144,6 @@ static int fec_enet_init(struct net_device *ndev)
return -ENOMEM;
}
- memset(cbd_base, 0, bd_size);
-
/* Get the Ethernet address */
fec_get_mac(ndev);
/* make sure MAC we just acquired is programmed into the hw */
diff --git a/drivers/net/ethernet/google/gve/gve_main.c b/drivers/net/ethernet/google/gve/gve_main.c
index 24f16e3368cd..497298752381 100644
--- a/drivers/net/ethernet/google/gve/gve_main.c
+++ b/drivers/net/ethernet/google/gve/gve_main.c
@@ -232,7 +232,7 @@ abort_with_mgmt_vector:
abort_with_msix_enabled:
pci_disable_msix(priv->pdev);
abort_with_msix_vectors:
- kfree(priv->msix_vectors);
+ kvfree(priv->msix_vectors);
priv->msix_vectors = NULL;
return err;
}
@@ -256,7 +256,7 @@ static void gve_free_notify_blocks(struct gve_priv *priv)
priv->ntfy_blocks = NULL;
free_irq(priv->msix_vectors[priv->mgmt_msix_idx].vector, priv);
pci_disable_msix(priv->pdev);
- kfree(priv->msix_vectors);
+ kvfree(priv->msix_vectors);
priv->msix_vectors = NULL;
}
@@ -445,12 +445,12 @@ static int gve_alloc_rings(struct gve_priv *priv)
return 0;
free_rx:
- kfree(priv->rx);
+ kvfree(priv->rx);
priv->rx = NULL;
free_tx_queue:
gve_tx_free_rings(priv);
free_tx:
- kfree(priv->tx);
+ kvfree(priv->tx);
priv->tx = NULL;
return err;
}
@@ -500,7 +500,7 @@ static void gve_free_rings(struct gve_priv *priv)
gve_remove_napi(priv, ntfy_idx);
}
gve_tx_free_rings(priv);
- kfree(priv->tx);
+ kvfree(priv->tx);
priv->tx = NULL;
}
if (priv->rx) {
@@ -509,7 +509,7 @@ static void gve_free_rings(struct gve_priv *priv)
gve_remove_napi(priv, ntfy_idx);
}
gve_rx_free_rings(priv);
- kfree(priv->rx);
+ kvfree(priv->rx);
priv->rx = NULL;
}
}
@@ -592,9 +592,9 @@ static void gve_free_queue_page_list(struct gve_priv *priv,
gve_free_page(&priv->pdev->dev, qpl->pages[i],
qpl->page_buses[i], gve_qpl_dma_dir(priv, id));
- kfree(qpl->page_buses);
+ kvfree(qpl->page_buses);
free_pages:
- kfree(qpl->pages);
+ kvfree(qpl->pages);
priv->num_registered_pages -= qpl->num_entries;
}
@@ -635,7 +635,7 @@ static int gve_alloc_qpls(struct gve_priv *priv)
free_qpls:
for (j = 0; j <= i; j++)
gve_free_queue_page_list(priv, j);
- kfree(priv->qpls);
+ kvfree(priv->qpls);
return err;
}
@@ -644,12 +644,12 @@ static void gve_free_qpls(struct gve_priv *priv)
int num_qpls = gve_num_tx_qpls(priv) + gve_num_rx_qpls(priv);
int i;
- kfree(priv->qpl_cfg.qpl_id_map);
+ kvfree(priv->qpl_cfg.qpl_id_map);
for (i = 0; i < num_qpls; i++)
gve_free_queue_page_list(priv, i);
- kfree(priv->qpls);
+ kvfree(priv->qpls);
}
/* Use this to schedule a reset when the device is capable of continuing
@@ -1192,7 +1192,6 @@ abort_with_enabled:
pci_disable_device(pdev);
return -ENXIO;
}
-EXPORT_SYMBOL(gve_probe);
static void gve_remove(struct pci_dev *pdev)
{
diff --git a/drivers/net/ethernet/google/gve/gve_rx.c b/drivers/net/ethernet/google/gve/gve_rx.c
index c1aeabd1c594..1914b8350da7 100644
--- a/drivers/net/ethernet/google/gve/gve_rx.c
+++ b/drivers/net/ethernet/google/gve/gve_rx.c
@@ -35,7 +35,7 @@ static void gve_rx_free_ring(struct gve_priv *priv, int idx)
gve_unassign_qpl(priv, rx->data.qpl->id);
rx->data.qpl = NULL;
- kfree(rx->data.page_info);
+ kvfree(rx->data.page_info);
slots = rx->data.mask + 1;
bytes = sizeof(*rx->data.data_ring) * slots;
@@ -168,7 +168,7 @@ abort_with_q_resources:
rx->q_resources, rx->q_resources_bus);
rx->q_resources = NULL;
abort_filled:
- kfree(rx->data.page_info);
+ kvfree(rx->data.page_info);
abort_with_slots:
bytes = sizeof(*rx->data.data_ring) * slots;
dma_free_coherent(hdev, bytes, rx->data.data_ring, rx->data.data_bus);
diff --git a/drivers/net/ethernet/jme.c b/drivers/net/ethernet/jme.c
index 76b7b7b85e35..0b668357db4d 100644
--- a/drivers/net/ethernet/jme.c
+++ b/drivers/net/ethernet/jme.c
@@ -582,11 +582,6 @@ jme_setup_tx_resources(struct jme_adapter *jme)
if (unlikely(!(txring->bufinf)))
goto err_free_txring;
- /*
- * Initialize Transmit Descriptors
- */
- memset(txring->alloc, 0, TX_RING_ALLOC_SIZE(jme->tx_ring_size));
-
return 0;
err_free_txring:
diff --git a/drivers/net/ethernet/marvell/skge.c b/drivers/net/ethernet/marvell/skge.c
index 35a92fd2cf39..9ac854c2b371 100644
--- a/drivers/net/ethernet/marvell/skge.c
+++ b/drivers/net/ethernet/marvell/skge.c
@@ -2558,8 +2558,6 @@ static int skge_up(struct net_device *dev)
goto free_pci_mem;
}
- memset(skge->mem, 0, skge->mem_size);
-
err = skge_ring_alloc(&skge->rx_ring, skge->mem, skge->dma);
if (err)
goto free_pci_mem;
diff --git a/drivers/net/ethernet/marvell/sky2.c b/drivers/net/ethernet/marvell/sky2.c
index fe518c854d1f..f518312ffe69 100644
--- a/drivers/net/ethernet/marvell/sky2.c
+++ b/drivers/net/ethernet/marvell/sky2.c
@@ -4917,6 +4917,13 @@ static const struct dmi_system_id msi_blacklist[] = {
DMI_MATCH(DMI_PRODUCT_NAME, "P-79"),
},
},
+ {
+ .ident = "ASUS P5W DH Deluxe",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "ASUSTEK COMPUTER INC"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "P5W DH Deluxe"),
+ },
+ },
{}
};
diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.c b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
index b20b3a5a1ebb..c39d7f4ab1d4 100644
--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c
+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
@@ -2548,8 +2548,10 @@ static int mtk_probe(struct platform_device *pdev)
continue;
err = mtk_add_mac(eth, mac_np);
- if (err)
+ if (err) {
+ of_node_put(mac_np);
goto err_deinit_hw;
+ }
}
if (MTK_HAS_CAPS(eth->soc->caps, MTK_SHARED_INT)) {
diff --git a/drivers/net/ethernet/mellanox/mlx4/eq.c b/drivers/net/ethernet/mellanox/mlx4/eq.c
index a5be27772b8e..c790a5fcea73 100644
--- a/drivers/net/ethernet/mellanox/mlx4/eq.c
+++ b/drivers/net/ethernet/mellanox/mlx4/eq.c
@@ -1013,8 +1013,6 @@ static int mlx4_create_eq(struct mlx4_dev *dev, int nent,
dma_list[i] = t;
eq->page_list[i].map = t;
-
- memset(eq->page_list[i].buf, 0, PAGE_SIZE);
}
eq->eqn = mlx4_bitmap_alloc(&priv->eq_table.bitmap);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
index 2d6436257f9d..cc096f6011d9 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
@@ -1499,7 +1499,8 @@ static int __parse_cls_flower(struct mlx5e_priv *priv,
*match_level = MLX5_MATCH_NONE;
if (dissector->used_keys &
- ~(BIT(FLOW_DISSECTOR_KEY_CONTROL) |
+ ~(BIT(FLOW_DISSECTOR_KEY_META) |
+ BIT(FLOW_DISSECTOR_KEY_CONTROL) |
BIT(FLOW_DISSECTOR_KEY_BASIC) |
BIT(FLOW_DISSECTOR_KEY_ETH_ADDRS) |
BIT(FLOW_DISSECTOR_KEY_VLAN) |
@@ -1522,11 +1523,7 @@ static int __parse_cls_flower(struct mlx5e_priv *priv,
return -EOPNOTSUPP;
}
- if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS) ||
- flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS) ||
- flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_KEYID) ||
- flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_PORTS) ||
- flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_OPTS)) {
+ if (mlx5e_get_tc_tun(filter_dev)) {
if (parse_tunnel_attr(priv, spec, f, filter_dev, tunnel_match_level))
return -EOPNOTSUPP;
@@ -2647,6 +2644,10 @@ static int mlx5e_attach_encap(struct mlx5e_priv *priv,
family = ip_tunnel_info_af(tun_info);
key.ip_tun_key = &tun_info->key;
key.tc_tunnel = mlx5e_get_tc_tun(mirred_dev);
+ if (!key.tc_tunnel) {
+ NL_SET_ERR_MSG_MOD(extack, "Unsupported tunnel");
+ return -EOPNOTSUPP;
+ }
hash_key = hash_encap_info(&key);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
index 3b04d8927fb1..1f3891fde2eb 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
@@ -2450,7 +2450,6 @@ int mlx5_eswitch_get_vport_stats(struct mlx5_eswitch *esw,
MLX5_SET(query_vport_counter_in, in, vport_number, vport->vport);
MLX5_SET(query_vport_counter_in, in, other_vport, 1);
- memset(out, 0, outlen);
err = mlx5_cmd_exec(esw->dev, in, sizeof(in), out, outlen);
if (err)
goto free_out;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
index 957d9b09dc3f..089ae4d48a82 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
@@ -1134,7 +1134,6 @@ static int esw_create_offloads_fdb_tables(struct mlx5_eswitch *esw, int nvports)
}
/* create send-to-vport group */
- memset(flow_group_in, 0, inlen);
MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable,
MLX5_MATCH_MISC_PARAMETERS);
@@ -1293,8 +1292,6 @@ static int esw_create_vport_rx_group(struct mlx5_eswitch *esw, int nvports)
return -ENOMEM;
/* create vport rx group */
- memset(flow_group_in, 0, inlen);
-
esw_set_flow_group_source_port(esw, flow_group_in);
MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 0);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/health.c b/drivers/net/ethernet/mellanox/mlx5/core/health.c
index 2fe6923f7ce0..9314777d99e3 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/health.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/health.c
@@ -597,7 +597,7 @@ mlx5_fw_fatal_reporter_dump(struct devlink_health_reporter *reporter,
err = devlink_fmsg_arr_pair_nest_end(fmsg);
free_data:
- kfree(cr_data);
+ kvfree(cr_data);
return err;
}
diff --git a/drivers/net/ethernet/mellanox/mlxsw/pci.c b/drivers/net/ethernet/mellanox/mlxsw/pci.c
index 051b19388a81..615455a21567 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/pci.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/pci.c
@@ -847,7 +847,6 @@ static int mlxsw_pci_queue_init(struct mlxsw_pci *mlxsw_pci, char *mbox,
&mem_item->mapaddr);
if (!mem_item->buf)
return -ENOMEM;
- memset(mem_item->buf, 0, mem_item->size);
q->elem_info = kcalloc(q->count, sizeof(*q->elem_info), GFP_KERNEL);
if (!q->elem_info) {
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h
index a252b080dda9..131f62ce9297 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h
@@ -830,6 +830,7 @@ int mlxsw_sp_setup_tc_prio(struct mlxsw_sp_port *mlxsw_sp_port,
struct tc_prio_qopt_offload *p);
/* spectrum_fid.c */
+bool mlxsw_sp_fid_is_dummy(struct mlxsw_sp *mlxsw_sp, u16 fid_index);
bool mlxsw_sp_fid_lag_vid_valid(const struct mlxsw_sp_fid *fid);
struct mlxsw_sp_fid *mlxsw_sp_fid_lookup_by_index(struct mlxsw_sp *mlxsw_sp,
u16 fid_index);
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_dcb.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_dcb.c
index b25048c6c761..21296fa7f7fb 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_dcb.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_dcb.c
@@ -408,14 +408,6 @@ static int mlxsw_sp_port_dcb_app_update(struct mlxsw_sp_port *mlxsw_sp_port)
have_dscp = mlxsw_sp_port_dcb_app_prio_dscp_map(mlxsw_sp_port,
&prio_map);
- if (!have_dscp) {
- err = mlxsw_sp_port_dcb_toggle_trust(mlxsw_sp_port,
- MLXSW_REG_QPTS_TRUST_STATE_PCP);
- if (err)
- netdev_err(mlxsw_sp_port->dev, "Couldn't switch to trust L2\n");
- return err;
- }
-
mlxsw_sp_port_dcb_app_dscp_prio_map(mlxsw_sp_port, default_prio,
&dscp_map);
err = mlxsw_sp_port_dcb_app_update_qpdpm(mlxsw_sp_port,
@@ -432,6 +424,14 @@ static int mlxsw_sp_port_dcb_app_update(struct mlxsw_sp_port *mlxsw_sp_port)
return err;
}
+ if (!have_dscp) {
+ err = mlxsw_sp_port_dcb_toggle_trust(mlxsw_sp_port,
+ MLXSW_REG_QPTS_TRUST_STATE_PCP);
+ if (err)
+ netdev_err(mlxsw_sp_port->dev, "Couldn't switch to trust L2\n");
+ return err;
+ }
+
err = mlxsw_sp_port_dcb_toggle_trust(mlxsw_sp_port,
MLXSW_REG_QPTS_TRUST_STATE_DSCP);
if (err) {
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_fid.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_fid.c
index 46baf3b44309..8df3cb21baa6 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_fid.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_fid.c
@@ -126,6 +126,16 @@ static const int *mlxsw_sp_packet_type_sfgc_types[] = {
[MLXSW_SP_FLOOD_TYPE_MC] = mlxsw_sp_sfgc_mc_packet_types,
};
+bool mlxsw_sp_fid_is_dummy(struct mlxsw_sp *mlxsw_sp, u16 fid_index)
+{
+ enum mlxsw_sp_fid_type fid_type = MLXSW_SP_FID_TYPE_DUMMY;
+ struct mlxsw_sp_fid_family *fid_family;
+
+ fid_family = mlxsw_sp->fid_core->fid_family_arr[fid_type];
+
+ return fid_family->start_index == fid_index;
+}
+
bool mlxsw_sp_fid_lag_vid_valid(const struct mlxsw_sp_fid *fid)
{
return fid->fid_family->lag_vid_valid;
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
index 50111f228d77..5ecb45118400 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
@@ -2468,6 +2468,9 @@ static void mlxsw_sp_fdb_notify_mac_process(struct mlxsw_sp *mlxsw_sp,
goto just_remove;
}
+ if (mlxsw_sp_fid_is_dummy(mlxsw_sp, fid))
+ goto just_remove;
+
mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_fid(mlxsw_sp_port, fid);
if (!mlxsw_sp_port_vlan) {
netdev_err(mlxsw_sp_port->dev, "Failed to find a matching {Port, VID} following FDB notification\n");
@@ -2527,6 +2530,9 @@ static void mlxsw_sp_fdb_notify_mac_lag_process(struct mlxsw_sp *mlxsw_sp,
goto just_remove;
}
+ if (mlxsw_sp_fid_is_dummy(mlxsw_sp, fid))
+ goto just_remove;
+
mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_fid(mlxsw_sp_port, fid);
if (!mlxsw_sp_port_vlan) {
netdev_err(mlxsw_sp_port->dev, "Failed to find a matching {Port, VID} following FDB notification\n");
diff --git a/drivers/net/ethernet/mscc/ocelot_board.c b/drivers/net/ethernet/mscc/ocelot_board.c
index 58bde1a9eacb..2451d4a96490 100644
--- a/drivers/net/ethernet/mscc/ocelot_board.c
+++ b/drivers/net/ethernet/mscc/ocelot_board.c
@@ -291,8 +291,10 @@ static int mscc_ocelot_probe(struct platform_device *pdev)
continue;
err = ocelot_probe_port(ocelot, port, regs, phy);
- if (err)
+ if (err) {
+ of_node_put(portnp);
return err;
+ }
phy_mode = of_get_phy_mode(portnp);
if (phy_mode < 0)
@@ -318,6 +320,7 @@ static int mscc_ocelot_probe(struct platform_device *pdev)
dev_err(ocelot->dev,
"invalid phy mode for port%d, (Q)SGMII only\n",
port);
+ of_node_put(portnp);
return -EINVAL;
}
diff --git a/drivers/net/ethernet/neterion/s2io.c b/drivers/net/ethernet/neterion/s2io.c
index 3b2ae1a21678..e0b2bf327905 100644
--- a/drivers/net/ethernet/neterion/s2io.c
+++ b/drivers/net/ethernet/neterion/s2io.c
@@ -747,7 +747,6 @@ static int init_shared_mem(struct s2io_nic *nic)
return -ENOMEM;
}
mem_allocated += size;
- memset(tmp_v_addr, 0, size);
size = sizeof(struct rxd_info) *
rxd_count[nic->rxd_mode];
diff --git a/drivers/net/ethernet/qlogic/netxen/netxen_nic_ctx.c b/drivers/net/ethernet/qlogic/netxen/netxen_nic_ctx.c
index 433052f734ed..5e9f8ee99800 100644
--- a/drivers/net/ethernet/qlogic/netxen/netxen_nic_ctx.c
+++ b/drivers/net/ethernet/qlogic/netxen/netxen_nic_ctx.c
@@ -442,10 +442,8 @@ nx_fw_cmd_create_tx_ctx(struct netxen_adapter *adapter)
goto out_free_rq;
}
- memset(rq_addr, 0, rq_size);
prq = rq_addr;
- memset(rsp_addr, 0, rsp_size);
prsp = rsp_addr;
prq->host_rsp_dma_addr = cpu_to_le64(rsp_phys_addr);
@@ -755,7 +753,6 @@ int netxen_alloc_hw_resources(struct netxen_adapter *adapter)
return -ENOMEM;
}
- memset(addr, 0, sizeof(struct netxen_ring_ctx));
recv_ctx->hwctx = addr;
recv_ctx->hwctx->ctx_id = cpu_to_le32(port);
recv_ctx->hwctx->cmd_consumer_offset =
diff --git a/drivers/net/ethernet/realtek/r8169_main.c b/drivers/net/ethernet/realtek/r8169_main.c
index efef5453b94f..0637c6752a78 100644
--- a/drivers/net/ethernet/realtek/r8169_main.c
+++ b/drivers/net/ethernet/realtek/r8169_main.c
@@ -4667,6 +4667,143 @@ static void rtl_hw_start_8411_2(struct rtl8169_private *tp)
/* disable aspm and clock request before access ephy */
rtl_hw_aspm_clkreq_enable(tp, false);
rtl_ephy_init(tp, e_info_8411_2);
+
+ /* The following Realtek-provided magic fixes an issue with the RX unit
+ * getting confused after the PHY having been powered-down.
+ */
+ r8168_mac_ocp_write(tp, 0xFC28, 0x0000);
+ r8168_mac_ocp_write(tp, 0xFC2A, 0x0000);
+ r8168_mac_ocp_write(tp, 0xFC2C, 0x0000);
+ r8168_mac_ocp_write(tp, 0xFC2E, 0x0000);
+ r8168_mac_ocp_write(tp, 0xFC30, 0x0000);
+ r8168_mac_ocp_write(tp, 0xFC32, 0x0000);
+ r8168_mac_ocp_write(tp, 0xFC34, 0x0000);
+ r8168_mac_ocp_write(tp, 0xFC36, 0x0000);
+ mdelay(3);
+ r8168_mac_ocp_write(tp, 0xFC26, 0x0000);
+
+ r8168_mac_ocp_write(tp, 0xF800, 0xE008);
+ r8168_mac_ocp_write(tp, 0xF802, 0xE00A);
+ r8168_mac_ocp_write(tp, 0xF804, 0xE00C);
+ r8168_mac_ocp_write(tp, 0xF806, 0xE00E);
+ r8168_mac_ocp_write(tp, 0xF808, 0xE027);
+ r8168_mac_ocp_write(tp, 0xF80A, 0xE04F);
+ r8168_mac_ocp_write(tp, 0xF80C, 0xE05E);
+ r8168_mac_ocp_write(tp, 0xF80E, 0xE065);
+ r8168_mac_ocp_write(tp, 0xF810, 0xC602);
+ r8168_mac_ocp_write(tp, 0xF812, 0xBE00);
+ r8168_mac_ocp_write(tp, 0xF814, 0x0000);
+ r8168_mac_ocp_write(tp, 0xF816, 0xC502);
+ r8168_mac_ocp_write(tp, 0xF818, 0xBD00);
+ r8168_mac_ocp_write(tp, 0xF81A, 0x074C);
+ r8168_mac_ocp_write(tp, 0xF81C, 0xC302);
+ r8168_mac_ocp_write(tp, 0xF81E, 0xBB00);
+ r8168_mac_ocp_write(tp, 0xF820, 0x080A);
+ r8168_mac_ocp_write(tp, 0xF822, 0x6420);
+ r8168_mac_ocp_write(tp, 0xF824, 0x48C2);
+ r8168_mac_ocp_write(tp, 0xF826, 0x8C20);
+ r8168_mac_ocp_write(tp, 0xF828, 0xC516);
+ r8168_mac_ocp_write(tp, 0xF82A, 0x64A4);
+ r8168_mac_ocp_write(tp, 0xF82C, 0x49C0);
+ r8168_mac_ocp_write(tp, 0xF82E, 0xF009);
+ r8168_mac_ocp_write(tp, 0xF830, 0x74A2);
+ r8168_mac_ocp_write(tp, 0xF832, 0x8CA5);
+ r8168_mac_ocp_write(tp, 0xF834, 0x74A0);
+ r8168_mac_ocp_write(tp, 0xF836, 0xC50E);
+ r8168_mac_ocp_write(tp, 0xF838, 0x9CA2);
+ r8168_mac_ocp_write(tp, 0xF83A, 0x1C11);
+ r8168_mac_ocp_write(tp, 0xF83C, 0x9CA0);
+ r8168_mac_ocp_write(tp, 0xF83E, 0xE006);
+ r8168_mac_ocp_write(tp, 0xF840, 0x74F8);
+ r8168_mac_ocp_write(tp, 0xF842, 0x48C4);
+ r8168_mac_ocp_write(tp, 0xF844, 0x8CF8);
+ r8168_mac_ocp_write(tp, 0xF846, 0xC404);
+ r8168_mac_ocp_write(tp, 0xF848, 0xBC00);
+ r8168_mac_ocp_write(tp, 0xF84A, 0xC403);
+ r8168_mac_ocp_write(tp, 0xF84C, 0xBC00);
+ r8168_mac_ocp_write(tp, 0xF84E, 0x0BF2);
+ r8168_mac_ocp_write(tp, 0xF850, 0x0C0A);
+ r8168_mac_ocp_write(tp, 0xF852, 0xE434);
+ r8168_mac_ocp_write(tp, 0xF854, 0xD3C0);
+ r8168_mac_ocp_write(tp, 0xF856, 0x49D9);
+ r8168_mac_ocp_write(tp, 0xF858, 0xF01F);
+ r8168_mac_ocp_write(tp, 0xF85A, 0xC526);
+ r8168_mac_ocp_write(tp, 0xF85C, 0x64A5);
+ r8168_mac_ocp_write(tp, 0xF85E, 0x1400);
+ r8168_mac_ocp_write(tp, 0xF860, 0xF007);
+ r8168_mac_ocp_write(tp, 0xF862, 0x0C01);
+ r8168_mac_ocp_write(tp, 0xF864, 0x8CA5);
+ r8168_mac_ocp_write(tp, 0xF866, 0x1C15);
+ r8168_mac_ocp_write(tp, 0xF868, 0xC51B);
+ r8168_mac_ocp_write(tp, 0xF86A, 0x9CA0);
+ r8168_mac_ocp_write(tp, 0xF86C, 0xE013);
+ r8168_mac_ocp_write(tp, 0xF86E, 0xC519);
+ r8168_mac_ocp_write(tp, 0xF870, 0x74A0);
+ r8168_mac_ocp_write(tp, 0xF872, 0x48C4);
+ r8168_mac_ocp_write(tp, 0xF874, 0x8CA0);
+ r8168_mac_ocp_write(tp, 0xF876, 0xC516);
+ r8168_mac_ocp_write(tp, 0xF878, 0x74A4);
+ r8168_mac_ocp_write(tp, 0xF87A, 0x48C8);
+ r8168_mac_ocp_write(tp, 0xF87C, 0x48CA);
+ r8168_mac_ocp_write(tp, 0xF87E, 0x9CA4);
+ r8168_mac_ocp_write(tp, 0xF880, 0xC512);
+ r8168_mac_ocp_write(tp, 0xF882, 0x1B00);
+ r8168_mac_ocp_write(tp, 0xF884, 0x9BA0);
+ r8168_mac_ocp_write(tp, 0xF886, 0x1B1C);
+ r8168_mac_ocp_write(tp, 0xF888, 0x483F);
+ r8168_mac_ocp_write(tp, 0xF88A, 0x9BA2);
+ r8168_mac_ocp_write(tp, 0xF88C, 0x1B04);
+ r8168_mac_ocp_write(tp, 0xF88E, 0xC508);
+ r8168_mac_ocp_write(tp, 0xF890, 0x9BA0);
+ r8168_mac_ocp_write(tp, 0xF892, 0xC505);
+ r8168_mac_ocp_write(tp, 0xF894, 0xBD00);
+ r8168_mac_ocp_write(tp, 0xF896, 0xC502);
+ r8168_mac_ocp_write(tp, 0xF898, 0xBD00);
+ r8168_mac_ocp_write(tp, 0xF89A, 0x0300);
+ r8168_mac_ocp_write(tp, 0xF89C, 0x051E);
+ r8168_mac_ocp_write(tp, 0xF89E, 0xE434);
+ r8168_mac_ocp_write(tp, 0xF8A0, 0xE018);
+ r8168_mac_ocp_write(tp, 0xF8A2, 0xE092);
+ r8168_mac_ocp_write(tp, 0xF8A4, 0xDE20);
+ r8168_mac_ocp_write(tp, 0xF8A6, 0xD3C0);
+ r8168_mac_ocp_write(tp, 0xF8A8, 0xC50F);
+ r8168_mac_ocp_write(tp, 0xF8AA, 0x76A4);
+ r8168_mac_ocp_write(tp, 0xF8AC, 0x49E3);
+ r8168_mac_ocp_write(tp, 0xF8AE, 0xF007);
+ r8168_mac_ocp_write(tp, 0xF8B0, 0x49C0);
+ r8168_mac_ocp_write(tp, 0xF8B2, 0xF103);
+ r8168_mac_ocp_write(tp, 0xF8B4, 0xC607);
+ r8168_mac_ocp_write(tp, 0xF8B6, 0xBE00);
+ r8168_mac_ocp_write(tp, 0xF8B8, 0xC606);
+ r8168_mac_ocp_write(tp, 0xF8BA, 0xBE00);
+ r8168_mac_ocp_write(tp, 0xF8BC, 0xC602);
+ r8168_mac_ocp_write(tp, 0xF8BE, 0xBE00);
+ r8168_mac_ocp_write(tp, 0xF8C0, 0x0C4C);
+ r8168_mac_ocp_write(tp, 0xF8C2, 0x0C28);
+ r8168_mac_ocp_write(tp, 0xF8C4, 0x0C2C);
+ r8168_mac_ocp_write(tp, 0xF8C6, 0xDC00);
+ r8168_mac_ocp_write(tp, 0xF8C8, 0xC707);
+ r8168_mac_ocp_write(tp, 0xF8CA, 0x1D00);
+ r8168_mac_ocp_write(tp, 0xF8CC, 0x8DE2);
+ r8168_mac_ocp_write(tp, 0xF8CE, 0x48C1);
+ r8168_mac_ocp_write(tp, 0xF8D0, 0xC502);
+ r8168_mac_ocp_write(tp, 0xF8D2, 0xBD00);
+ r8168_mac_ocp_write(tp, 0xF8D4, 0x00AA);
+ r8168_mac_ocp_write(tp, 0xF8D6, 0xE0C0);
+ r8168_mac_ocp_write(tp, 0xF8D8, 0xC502);
+ r8168_mac_ocp_write(tp, 0xF8DA, 0xBD00);
+ r8168_mac_ocp_write(tp, 0xF8DC, 0x0132);
+
+ r8168_mac_ocp_write(tp, 0xFC26, 0x8000);
+
+ r8168_mac_ocp_write(tp, 0xFC2A, 0x0743);
+ r8168_mac_ocp_write(tp, 0xFC2C, 0x0801);
+ r8168_mac_ocp_write(tp, 0xFC2E, 0x0BE9);
+ r8168_mac_ocp_write(tp, 0xFC30, 0x02FD);
+ r8168_mac_ocp_write(tp, 0xFC32, 0x0C25);
+ r8168_mac_ocp_write(tp, 0xFC34, 0x00A9);
+ r8168_mac_ocp_write(tp, 0xFC36, 0x012D);
+
rtl_hw_aspm_clkreq_enable(tp, true);
}
diff --git a/drivers/net/ethernet/sis/sis900.c b/drivers/net/ethernet/sis/sis900.c
index aba6eea72f15..6e07f5ebacfc 100644
--- a/drivers/net/ethernet/sis/sis900.c
+++ b/drivers/net/ethernet/sis/sis900.c
@@ -262,7 +262,7 @@ static int sis900_get_mac_addr(struct pci_dev *pci_dev,
/* check to see if we have sane EEPROM */
signature = (u16) read_eeprom(ioaddr, EEPROMSignature);
if (signature == 0xffff || signature == 0x0000) {
- printk (KERN_WARNING "%s: Error EERPOM read %x\n",
+ printk (KERN_WARNING "%s: Error EEPROM read %x\n",
pci_name(pci_dev), signature);
return 0;
}
@@ -359,9 +359,9 @@ static int sis635_get_mac_addr(struct pci_dev *pci_dev,
*
* SiS962 or SiS963 model, use EEPROM to store MAC address. And EEPROM
* is shared by
- * LAN and 1394. When access EEPROM, send EEREQ signal to hardware first
+ * LAN and 1394. When accessing EEPROM, send EEREQ signal to hardware first
* and wait for EEGNT. If EEGNT is ON, EEPROM is permitted to be accessed
- * by LAN, otherwise is not. After MAC address is read from EEPROM, send
+ * by LAN, otherwise it is not. After MAC address is read from EEPROM, send
* EEDONE signal to refuse EEPROM access by LAN.
* The EEPROM map of SiS962 or SiS963 is different to SiS900.
* The signature field in SiS962 or SiS963 spec is meaningless.
diff --git a/drivers/net/ethernet/ti/cpsw.c b/drivers/net/ethernet/ti/cpsw.c
index f320f9a0de8b..32a89744972d 100644
--- a/drivers/net/ethernet/ti/cpsw.c
+++ b/drivers/net/ethernet/ti/cpsw.c
@@ -2570,7 +2570,7 @@ static int cpsw_probe_dt(struct cpsw_platform_data *data,
ret = PTR_ERR(slave_data->ifphy);
dev_err(&pdev->dev,
"%d: Error retrieving port phy: %d\n", i, ret);
- return ret;
+ goto err_node_put;
}
slave_data->slave_node = slave_node;
@@ -2589,7 +2589,7 @@ static int cpsw_probe_dt(struct cpsw_platform_data *data,
if (ret) {
if (ret != -EPROBE_DEFER)
dev_err(&pdev->dev, "failed to register fixed-link phy: %d\n", ret);
- return ret;
+ goto err_node_put;
}
slave_data->phy_node = of_node_get(slave_node);
} else if (parp) {
@@ -2607,7 +2607,8 @@ static int cpsw_probe_dt(struct cpsw_platform_data *data,
of_node_put(mdio_node);
if (!mdio) {
dev_err(&pdev->dev, "Missing mdio platform device\n");
- return -EINVAL;
+ ret = -EINVAL;
+ goto err_node_put;
}
snprintf(slave_data->phy_id, sizeof(slave_data->phy_id),
PHY_ID_FMT, mdio->name, phyid);
@@ -2622,7 +2623,8 @@ static int cpsw_probe_dt(struct cpsw_platform_data *data,
if (slave_data->phy_if < 0) {
dev_err(&pdev->dev, "Missing or malformed slave[%d] phy-mode property\n",
i);
- return slave_data->phy_if;
+ ret = slave_data->phy_if;
+ goto err_node_put;
}
no_phy_slave:
@@ -2633,7 +2635,7 @@ no_phy_slave:
ret = ti_cm_get_macid(&pdev->dev, i,
slave_data->mac_addr);
if (ret)
- return ret;
+ goto err_node_put;
}
if (data->dual_emac) {
if (of_property_read_u32(slave_node, "dual_emac_res_vlan",
@@ -2648,11 +2650,17 @@ no_phy_slave:
}
i++;
- if (i == data->slaves)
- break;
+ if (i == data->slaves) {
+ ret = 0;
+ goto err_node_put;
+ }
}
return 0;
+
+err_node_put:
+ of_node_put(slave_node);
+ return ret;
}
static void cpsw_remove_dt(struct platform_device *pdev)
@@ -2675,8 +2683,10 @@ static void cpsw_remove_dt(struct platform_device *pdev)
of_node_put(slave_data->phy_node);
i++;
- if (i == data->slaves)
+ if (i == data->slaves) {
+ of_node_put(slave_node);
break;
+ }
}
of_platform_depopulate(&pdev->dev);
diff --git a/drivers/net/ethernet/ti/tlan.c b/drivers/net/ethernet/ti/tlan.c
index b4ab1a5f6cd0..78f0f2d59e22 100644
--- a/drivers/net/ethernet/ti/tlan.c
+++ b/drivers/net/ethernet/ti/tlan.c
@@ -855,7 +855,6 @@ static int tlan_init(struct net_device *dev)
dev->name);
return -ENOMEM;
}
- memset(priv->dma_storage, 0, dma_size);
priv->rx_list = (struct tlan_list *)
ALIGN((unsigned long)priv->dma_storage, 8);
priv->rx_list_dma = ALIGN(priv->dma_storage_dma, 8);
diff --git a/drivers/net/hippi/rrunner.c b/drivers/net/hippi/rrunner.c
index 7b9350dbebdd..2a6ec5394966 100644
--- a/drivers/net/hippi/rrunner.c
+++ b/drivers/net/hippi/rrunner.c
@@ -1196,7 +1196,6 @@ static int rr_open(struct net_device *dev)
goto error;
}
rrpriv->rx_ctrl_dma = dma_addr;
- memset(rrpriv->rx_ctrl, 0, 256*sizeof(struct ring_ctrl));
rrpriv->info = pci_alloc_consistent(pdev, sizeof(struct rr_info),
&dma_addr);
@@ -1205,7 +1204,6 @@ static int rr_open(struct net_device *dev)
goto error;
}
rrpriv->info_dma = dma_addr;
- memset(rrpriv->info, 0, sizeof(struct rr_info));
wmb();
spin_lock_irqsave(&rrpriv->lock, flags);
diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c
index 8b4ad10cf940..69e0a2acfcb0 100644
--- a/drivers/net/usb/qmi_wwan.c
+++ b/drivers/net/usb/qmi_wwan.c
@@ -1292,6 +1292,7 @@ static const struct usb_device_id products[] = {
{QMI_FIXED_INTF(0x2001, 0x7e16, 3)}, /* D-Link DWM-221 */
{QMI_FIXED_INTF(0x2001, 0x7e19, 4)}, /* D-Link DWM-221 B1 */
{QMI_FIXED_INTF(0x2001, 0x7e35, 4)}, /* D-Link DWM-222 */
+ {QMI_FIXED_INTF(0x2001, 0x7e3d, 4)}, /* D-Link DWM-222 A2 */
{QMI_FIXED_INTF(0x2020, 0x2031, 4)}, /* Olicard 600 */
{QMI_FIXED_INTF(0x2020, 0x2033, 4)}, /* BroadMobi BM806U */
{QMI_FIXED_INTF(0x0f3d, 0x68a2, 8)}, /* Sierra Wireless MC7700 */
diff --git a/drivers/net/vmxnet3/vmxnet3_drv.c b/drivers/net/vmxnet3/vmxnet3_drv.c
index 3f48f05dd2a6..2a1918f25e47 100644
--- a/drivers/net/vmxnet3/vmxnet3_drv.c
+++ b/drivers/net/vmxnet3/vmxnet3_drv.c
@@ -3430,7 +3430,6 @@ vmxnet3_probe_device(struct pci_dev *pdev,
err = -ENOMEM;
goto err_ver;
}
- memset(adapter->coal_conf, 0, sizeof(*adapter->coal_conf));
adapter->coal_conf->coalMode = VMXNET3_COALESCE_DISABLED;
adapter->default_coal_mode = true;
}
diff --git a/drivers/net/wireless/ath/ath10k/mac.c b/drivers/net/wireless/ath/ath10k/mac.c
index e43a566eef77..0606416dc971 100644
--- a/drivers/net/wireless/ath/ath10k/mac.c
+++ b/drivers/net/wireless/ath/ath10k/mac.c
@@ -7541,6 +7541,8 @@ static int ath10k_mac_op_set_bitrate_mask(struct ieee80211_hw *hw,
&vht_nss,
true);
update_bitrate_mask = false;
+ } else {
+ vht_pfr = 0;
}
mutex_lock(&ar->conf_mutex);
diff --git a/drivers/net/wireless/intel/iwlwifi/cfg/22000.c b/drivers/net/wireless/intel/iwlwifi/cfg/22000.c
index 93526dfaf791..1f500cddb3a7 100644
--- a/drivers/net/wireless/intel/iwlwifi/cfg/22000.c
+++ b/drivers/net/wireless/intel/iwlwifi/cfg/22000.c
@@ -80,7 +80,9 @@
#define IWL_22000_QU_B_HR_B_FW_PRE "iwlwifi-Qu-b0-hr-b0-"
#define IWL_22000_HR_B_FW_PRE "iwlwifi-QuQnj-b0-hr-b0-"
#define IWL_22000_HR_A0_FW_PRE "iwlwifi-QuQnj-a0-hr-a0-"
+#define IWL_QU_C_HR_B_FW_PRE "iwlwifi-Qu-c0-hr-b0-"
#define IWL_QU_B_JF_B_FW_PRE "iwlwifi-Qu-b0-jf-b0-"
+#define IWL_QU_C_JF_B_FW_PRE "iwlwifi-Qu-c0-jf-b0-"
#define IWL_QUZ_A_HR_B_FW_PRE "iwlwifi-QuZ-a0-hr-b0-"
#define IWL_QUZ_A_JF_B_FW_PRE "iwlwifi-QuZ-a0-jf-b0-"
#define IWL_QNJ_B_JF_B_FW_PRE "iwlwifi-QuQnj-b0-jf-b0-"
@@ -109,6 +111,8 @@
IWL_QUZ_A_HR_B_FW_PRE __stringify(api) ".ucode"
#define IWL_QUZ_A_JF_B_MODULE_FIRMWARE(api) \
IWL_QUZ_A_JF_B_FW_PRE __stringify(api) ".ucode"
+#define IWL_QU_C_HR_B_MODULE_FIRMWARE(api) \
+ IWL_QU_C_HR_B_FW_PRE __stringify(api) ".ucode"
#define IWL_QU_B_JF_B_MODULE_FIRMWARE(api) \
IWL_QU_B_JF_B_FW_PRE __stringify(api) ".ucode"
#define IWL_QNJ_B_JF_B_MODULE_FIRMWARE(api) \
@@ -256,6 +260,30 @@ const struct iwl_cfg iwl_ax201_cfg_qu_hr = {
.max_tx_agg_size = IEEE80211_MAX_AMPDU_BUF_HT,
};
+const struct iwl_cfg iwl_ax101_cfg_qu_c0_hr_b0 = {
+ .name = "Intel(R) Wi-Fi 6 AX101",
+ .fw_name_pre = IWL_QU_C_HR_B_FW_PRE,
+ IWL_DEVICE_22500,
+ /*
+ * This device doesn't support receiving BlockAck with a large bitmap
+ * so we need to restrict the size of transmitted aggregation to the
+ * HT size; mac80211 would otherwise pick the HE max (256) by default.
+ */
+ .max_tx_agg_size = IEEE80211_MAX_AMPDU_BUF_HT,
+};
+
+const struct iwl_cfg iwl_ax201_cfg_qu_c0_hr_b0 = {
+ .name = "Intel(R) Wi-Fi 6 AX201 160MHz",
+ .fw_name_pre = IWL_QU_C_HR_B_FW_PRE,
+ IWL_DEVICE_22500,
+ /*
+ * This device doesn't support receiving BlockAck with a large bitmap
+ * so we need to restrict the size of transmitted aggregation to the
+ * HT size; mac80211 would otherwise pick the HE max (256) by default.
+ */
+ .max_tx_agg_size = IEEE80211_MAX_AMPDU_BUF_HT,
+};
+
const struct iwl_cfg iwl_ax101_cfg_quz_hr = {
.name = "Intel(R) Wi-Fi 6 AX101",
.fw_name_pre = IWL_QUZ_A_HR_B_FW_PRE,
@@ -372,6 +400,30 @@ const struct iwl_cfg iwl9560_2ac_160_cfg_qu_b0_jf_b0 = {
IWL_DEVICE_22500,
};
+const struct iwl_cfg iwl9461_2ac_cfg_qu_c0_jf_b0 = {
+ .name = "Intel(R) Wireless-AC 9461",
+ .fw_name_pre = IWL_QU_C_JF_B_FW_PRE,
+ IWL_DEVICE_22500,
+};
+
+const struct iwl_cfg iwl9462_2ac_cfg_qu_c0_jf_b0 = {
+ .name = "Intel(R) Wireless-AC 9462",
+ .fw_name_pre = IWL_QU_C_JF_B_FW_PRE,
+ IWL_DEVICE_22500,
+};
+
+const struct iwl_cfg iwl9560_2ac_cfg_qu_c0_jf_b0 = {
+ .name = "Intel(R) Wireless-AC 9560",
+ .fw_name_pre = IWL_QU_C_JF_B_FW_PRE,
+ IWL_DEVICE_22500,
+};
+
+const struct iwl_cfg iwl9560_2ac_160_cfg_qu_c0_jf_b0 = {
+ .name = "Intel(R) Wireless-AC 9560 160MHz",
+ .fw_name_pre = IWL_QU_C_JF_B_FW_PRE,
+ IWL_DEVICE_22500,
+};
+
const struct iwl_cfg iwl9560_2ac_cfg_qnj_jf_b0 = {
.name = "Intel(R) Wireless-AC 9560 160MHz",
.fw_name_pre = IWL_QNJ_B_JF_B_FW_PRE,
@@ -590,6 +642,7 @@ MODULE_FIRMWARE(IWL_22000_HR_A_F0_QNJ_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX));
MODULE_FIRMWARE(IWL_22000_HR_B_F0_QNJ_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX));
MODULE_FIRMWARE(IWL_22000_HR_B_QNJ_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX));
MODULE_FIRMWARE(IWL_22000_HR_A0_QNJ_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX));
+MODULE_FIRMWARE(IWL_QU_C_HR_B_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX));
MODULE_FIRMWARE(IWL_QU_B_JF_B_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX));
MODULE_FIRMWARE(IWL_QUZ_A_HR_B_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX));
MODULE_FIRMWARE(IWL_QUZ_A_JF_B_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX));
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-config.h b/drivers/net/wireless/intel/iwlwifi/iwl-config.h
index bc267bd2c3b0..1c1bf1b281cd 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-config.h
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-config.h
@@ -565,10 +565,13 @@ extern const struct iwl_cfg iwl22000_2ac_cfg_hr;
extern const struct iwl_cfg iwl22000_2ac_cfg_hr_cdb;
extern const struct iwl_cfg iwl22000_2ac_cfg_jf;
extern const struct iwl_cfg iwl_ax101_cfg_qu_hr;
+extern const struct iwl_cfg iwl_ax101_cfg_qu_c0_hr_b0;
extern const struct iwl_cfg iwl_ax101_cfg_quz_hr;
extern const struct iwl_cfg iwl22000_2ax_cfg_hr;
extern const struct iwl_cfg iwl_ax200_cfg_cc;
extern const struct iwl_cfg iwl_ax201_cfg_qu_hr;
+extern const struct iwl_cfg iwl_ax201_cfg_qu_hr;
+extern const struct iwl_cfg iwl_ax201_cfg_qu_c0_hr_b0;
extern const struct iwl_cfg iwl_ax201_cfg_quz_hr;
extern const struct iwl_cfg iwl_ax1650i_cfg_quz_hr;
extern const struct iwl_cfg iwl_ax1650s_cfg_quz_hr;
@@ -580,6 +583,10 @@ extern const struct iwl_cfg iwl9461_2ac_cfg_qu_b0_jf_b0;
extern const struct iwl_cfg iwl9462_2ac_cfg_qu_b0_jf_b0;
extern const struct iwl_cfg iwl9560_2ac_cfg_qu_b0_jf_b0;
extern const struct iwl_cfg iwl9560_2ac_160_cfg_qu_b0_jf_b0;
+extern const struct iwl_cfg iwl9461_2ac_cfg_qu_c0_jf_b0;
+extern const struct iwl_cfg iwl9462_2ac_cfg_qu_c0_jf_b0;
+extern const struct iwl_cfg iwl9560_2ac_cfg_qu_c0_jf_b0;
+extern const struct iwl_cfg iwl9560_2ac_160_cfg_qu_c0_jf_b0;
extern const struct iwl_cfg killer1550i_2ac_cfg_qu_b0_jf_b0;
extern const struct iwl_cfg killer1550s_2ac_cfg_qu_b0_jf_b0;
extern const struct iwl_cfg iwl22000_2ax_cfg_jf;
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-csr.h b/drivers/net/wireless/intel/iwlwifi/iwl-csr.h
index 93da96a7247c..cb4c5514a556 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-csr.h
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-csr.h
@@ -328,6 +328,8 @@ enum {
#define CSR_HW_REV_TYPE_NONE (0x00001F0)
#define CSR_HW_REV_TYPE_QNJ (0x0000360)
#define CSR_HW_REV_TYPE_QNJ_B0 (0x0000364)
+#define CSR_HW_REV_TYPE_QU_B0 (0x0000334)
+#define CSR_HW_REV_TYPE_QU_C0 (0x0000338)
#define CSR_HW_REV_TYPE_QUZ (0x0000354)
#define CSR_HW_REV_TYPE_HR_CDB (0x0000340)
#define CSR_HW_REV_TYPE_SO (0x0000370)
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/drv.c b/drivers/net/wireless/intel/iwlwifi/pcie/drv.c
index ccc83fd74649..ea2a03d4bf55 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/drv.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/drv.c
@@ -604,6 +604,7 @@ static const struct pci_device_id iwl_hw_card_ids[] = {
{IWL_PCI_DEVICE(0x2526, 0x40A4, iwl9460_2ac_cfg)},
{IWL_PCI_DEVICE(0x2526, 0x4234, iwl9560_2ac_cfg_soc)},
{IWL_PCI_DEVICE(0x2526, 0x42A4, iwl9462_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0x2526, 0x6014, iwl9260_2ac_160_cfg)},
{IWL_PCI_DEVICE(0x2526, 0x8014, iwl9260_2ac_160_cfg)},
{IWL_PCI_DEVICE(0x2526, 0x8010, iwl9260_2ac_160_cfg)},
{IWL_PCI_DEVICE(0x2526, 0xA014, iwl9260_2ac_160_cfg)},
@@ -971,6 +972,7 @@ static const struct pci_device_id iwl_hw_card_ids[] = {
{IWL_PCI_DEVICE(0x7A70, 0x0310, iwlax211_2ax_cfg_so_gf_a0)},
{IWL_PCI_DEVICE(0x7A70, 0x0510, iwlax211_2ax_cfg_so_gf_a0)},
{IWL_PCI_DEVICE(0x7A70, 0x0A10, iwlax211_2ax_cfg_so_gf_a0)},
+ {IWL_PCI_DEVICE(0x7AF0, 0x0090, iwlax211_2ax_cfg_so_gf_a0)},
{IWL_PCI_DEVICE(0x7AF0, 0x0310, iwlax211_2ax_cfg_so_gf_a0)},
{IWL_PCI_DEVICE(0x7AF0, 0x0510, iwlax211_2ax_cfg_so_gf_a0)},
{IWL_PCI_DEVICE(0x7AF0, 0x0A10, iwlax211_2ax_cfg_so_gf_a0)},
@@ -1037,6 +1039,27 @@ static int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
}
iwl_trans->cfg = cfg;
}
+
+ /*
+ * This is a hack to switch from Qu B0 to Qu C0. We need to
+ * do this for all cfgs that use Qu B0. All this code is in
+ * urgent need for a refactor, but for now this is the easiest
+ * thing to do to support Qu C-step.
+ */
+ if (iwl_trans->hw_rev == CSR_HW_REV_TYPE_QU_C0) {
+ if (iwl_trans->cfg == &iwl_ax101_cfg_qu_hr)
+ iwl_trans->cfg = &iwl_ax101_cfg_qu_c0_hr_b0;
+ else if (iwl_trans->cfg == &iwl_ax201_cfg_qu_hr)
+ iwl_trans->cfg = &iwl_ax201_cfg_qu_c0_hr_b0;
+ else if (iwl_trans->cfg == &iwl9461_2ac_cfg_qu_b0_jf_b0)
+ iwl_trans->cfg = &iwl9461_2ac_cfg_qu_c0_jf_b0;
+ else if (iwl_trans->cfg == &iwl9462_2ac_cfg_qu_b0_jf_b0)
+ iwl_trans->cfg = &iwl9462_2ac_cfg_qu_c0_jf_b0;
+ else if (iwl_trans->cfg == &iwl9560_2ac_cfg_qu_b0_jf_b0)
+ iwl_trans->cfg = &iwl9560_2ac_cfg_qu_c0_jf_b0;
+ else if (iwl_trans->cfg == &iwl9560_2ac_160_cfg_qu_b0_jf_b0)
+ iwl_trans->cfg = &iwl9560_2ac_160_cfg_qu_c0_jf_b0;
+ }
#endif
pci_set_drvdata(pdev, iwl_trans);
diff --git a/drivers/net/wireless/ralink/rt2x00/rt2x00usb.c b/drivers/net/wireless/ralink/rt2x00/rt2x00usb.c
index 67b81c7221c4..7e3a621b9c0d 100644
--- a/drivers/net/wireless/ralink/rt2x00/rt2x00usb.c
+++ b/drivers/net/wireless/ralink/rt2x00/rt2x00usb.c
@@ -372,15 +372,10 @@ static void rt2x00usb_interrupt_rxdone(struct urb *urb)
struct queue_entry *entry = (struct queue_entry *)urb->context;
struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev;
- if (!test_and_clear_bit(ENTRY_OWNER_DEVICE_DATA, &entry->flags))
+ if (!test_bit(ENTRY_OWNER_DEVICE_DATA, &entry->flags))
return;
/*
- * Report the frame as DMA done
- */
- rt2x00lib_dmadone(entry);
-
- /*
* Check if the received data is simply too small
* to be actually valid, or if the urb is signaling
* a problem.
@@ -389,6 +384,11 @@ static void rt2x00usb_interrupt_rxdone(struct urb *urb)
set_bit(ENTRY_DATA_IO_FAILED, &entry->flags);
/*
+ * Report the frame as DMA done
+ */
+ rt2x00lib_dmadone(entry);
+
+ /*
* Schedule the delayed work for reading the RX status
* from the device.
*/
diff --git a/drivers/ntb/Kconfig b/drivers/ntb/Kconfig
index c99eed87382a..df16c755b4da 100644
--- a/drivers/ntb/Kconfig
+++ b/drivers/ntb/Kconfig
@@ -13,6 +13,17 @@ menuconfig NTB
if NTB
+config NTB_MSI
+ bool "MSI Interrupt Support"
+ depends on PCI_MSI
+ help
+ Support using MSI interrupt forwarding instead of (or in addition to)
+ hardware doorbells. MSI interrupts typically offer lower latency
+ than doorbells and more MSI interrupts can be made available to
+ clients. However this requires an extra memory window and support
+ in the hardware driver for creating the MSI interrupts.
+
+ If unsure, say N.
source "drivers/ntb/hw/Kconfig"
source "drivers/ntb/test/Kconfig"
diff --git a/drivers/ntb/Makefile b/drivers/ntb/Makefile
index 5c64438d5b3f..3a6fa181ff99 100644
--- a/drivers/ntb/Makefile
+++ b/drivers/ntb/Makefile
@@ -1,3 +1,6 @@
# SPDX-License-Identifier: GPL-2.0-only
obj-$(CONFIG_NTB) += ntb.o hw/ test/
obj-$(CONFIG_NTB_TRANSPORT) += ntb_transport.o
+
+ntb-y := core.o
+ntb-$(CONFIG_NTB_MSI) += msi.o
diff --git a/drivers/ntb/ntb.c b/drivers/ntb/core.c
index 2581ab724c34..2581ab724c34 100644
--- a/drivers/ntb/ntb.c
+++ b/drivers/ntb/core.c
diff --git a/drivers/ntb/hw/amd/ntb_hw_amd.c b/drivers/ntb/hw/amd/ntb_hw_amd.c
index efb214fc545a..2859cc99b73e 100644
--- a/drivers/ntb/hw/amd/ntb_hw_amd.c
+++ b/drivers/ntb/hw/amd/ntb_hw_amd.c
@@ -160,8 +160,8 @@ static int amd_ntb_mw_set_trans(struct ntb_dev *ntb, int pidx, int idx,
}
/* set and verify setting the limit */
- write64(limit, mmio + limit_reg);
- reg_val = read64(mmio + limit_reg);
+ write64(limit, peer_mmio + limit_reg);
+ reg_val = read64(peer_mmio + limit_reg);
if (reg_val != limit) {
write64(base_addr, mmio + limit_reg);
write64(0, peer_mmio + xlat_reg);
@@ -183,8 +183,8 @@ static int amd_ntb_mw_set_trans(struct ntb_dev *ntb, int pidx, int idx,
}
/* set and verify setting the limit */
- writel(limit, mmio + limit_reg);
- reg_val = readl(mmio + limit_reg);
+ writel(limit, peer_mmio + limit_reg);
+ reg_val = readl(peer_mmio + limit_reg);
if (reg_val != limit) {
writel(base_addr, mmio + limit_reg);
writel(0, peer_mmio + xlat_reg);
@@ -333,7 +333,7 @@ static u64 amd_ntb_db_vector_mask(struct ntb_dev *ntb, int db_vector)
if (db_vector < 0 || db_vector > ndev->db_count)
return 0;
- return ntb_ndev(ntb)->db_valid_mask & (1 << db_vector);
+ return ntb_ndev(ntb)->db_valid_mask & (1ULL << db_vector);
}
static u64 amd_ntb_db_read(struct ntb_dev *ntb)
diff --git a/drivers/ntb/hw/intel/ntb_hw_gen3.c b/drivers/ntb/hw/intel/ntb_hw_gen3.c
index f475b56a3f49..c3397160db7f 100644
--- a/drivers/ntb/hw/intel/ntb_hw_gen3.c
+++ b/drivers/ntb/hw/intel/ntb_hw_gen3.c
@@ -532,9 +532,9 @@ static int intel_ntb3_mw_set_trans(struct ntb_dev *ntb, int pidx, int idx,
return 0;
}
-int intel_ntb3_peer_db_addr(struct ntb_dev *ntb, phys_addr_t *db_addr,
- resource_size_t *db_size,
- u64 *db_data, int db_bit)
+static int intel_ntb3_peer_db_addr(struct ntb_dev *ntb, phys_addr_t *db_addr,
+ resource_size_t *db_size,
+ u64 *db_data, int db_bit)
{
phys_addr_t db_addr_base;
struct intel_ntb_dev *ndev = ntb_ndev(ntb);
diff --git a/drivers/ntb/hw/mscc/ntb_hw_switchtec.c b/drivers/ntb/hw/mscc/ntb_hw_switchtec.c
index db4967748e4d..f4959458d909 100644
--- a/drivers/ntb/hw/mscc/ntb_hw_switchtec.c
+++ b/drivers/ntb/hw/mscc/ntb_hw_switchtec.c
@@ -86,7 +86,8 @@ struct switchtec_ntb {
bool link_is_up;
enum ntb_speed link_speed;
enum ntb_width link_width;
- struct work_struct link_reinit_work;
+ struct work_struct check_link_status_work;
+ bool link_force_down;
};
static struct switchtec_ntb *ntb_sndev(struct ntb_dev *ntb)
@@ -485,33 +486,11 @@ enum switchtec_msg {
static int switchtec_ntb_reinit_peer(struct switchtec_ntb *sndev);
-static void link_reinit_work(struct work_struct *work)
-{
- struct switchtec_ntb *sndev;
-
- sndev = container_of(work, struct switchtec_ntb, link_reinit_work);
-
- switchtec_ntb_reinit_peer(sndev);
-}
-
-static void switchtec_ntb_check_link(struct switchtec_ntb *sndev,
- enum switchtec_msg msg)
+static void switchtec_ntb_link_status_update(struct switchtec_ntb *sndev)
{
int link_sta;
int old = sndev->link_is_up;
- if (msg == MSG_LINK_FORCE_DOWN) {
- schedule_work(&sndev->link_reinit_work);
-
- if (sndev->link_is_up) {
- sndev->link_is_up = 0;
- ntb_link_event(&sndev->ntb);
- dev_info(&sndev->stdev->dev, "ntb link forced down\n");
- }
-
- return;
- }
-
link_sta = sndev->self_shared->link_sta;
if (link_sta) {
u64 peer = ioread64(&sndev->peer_shared->magic);
@@ -536,6 +515,38 @@ static void switchtec_ntb_check_link(struct switchtec_ntb *sndev,
}
}
+static void check_link_status_work(struct work_struct *work)
+{
+ struct switchtec_ntb *sndev;
+
+ sndev = container_of(work, struct switchtec_ntb,
+ check_link_status_work);
+
+ if (sndev->link_force_down) {
+ sndev->link_force_down = false;
+ switchtec_ntb_reinit_peer(sndev);
+
+ if (sndev->link_is_up) {
+ sndev->link_is_up = 0;
+ ntb_link_event(&sndev->ntb);
+ dev_info(&sndev->stdev->dev, "ntb link forced down\n");
+ }
+
+ return;
+ }
+
+ switchtec_ntb_link_status_update(sndev);
+}
+
+static void switchtec_ntb_check_link(struct switchtec_ntb *sndev,
+ enum switchtec_msg msg)
+{
+ if (msg == MSG_LINK_FORCE_DOWN)
+ sndev->link_force_down = true;
+
+ schedule_work(&sndev->check_link_status_work);
+}
+
static void switchtec_ntb_link_notification(struct switchtec_dev *stdev)
{
struct switchtec_ntb *sndev = stdev->sndev;
@@ -568,7 +579,7 @@ static int switchtec_ntb_link_enable(struct ntb_dev *ntb,
sndev->self_shared->link_sta = 1;
switchtec_ntb_send_msg(sndev, LINK_MESSAGE, MSG_LINK_UP);
- switchtec_ntb_check_link(sndev, MSG_CHECK_LINK);
+ switchtec_ntb_link_status_update(sndev);
return 0;
}
@@ -582,7 +593,7 @@ static int switchtec_ntb_link_disable(struct ntb_dev *ntb)
sndev->self_shared->link_sta = 0;
switchtec_ntb_send_msg(sndev, LINK_MESSAGE, MSG_LINK_DOWN);
- switchtec_ntb_check_link(sndev, MSG_CHECK_LINK);
+ switchtec_ntb_link_status_update(sndev);
return 0;
}
@@ -835,7 +846,8 @@ static int switchtec_ntb_init_sndev(struct switchtec_ntb *sndev)
sndev->ntb.topo = NTB_TOPO_SWITCH;
sndev->ntb.ops = &switchtec_ntb_ops;
- INIT_WORK(&sndev->link_reinit_work, link_reinit_work);
+ INIT_WORK(&sndev->check_link_status_work, check_link_status_work);
+ sndev->link_force_down = false;
sndev->self_partition = sndev->stdev->partition;
@@ -872,7 +884,7 @@ static int switchtec_ntb_init_sndev(struct switchtec_ntb *sndev)
}
sndev->peer_partition = ffs(tpart_vec) - 1;
- if (!(part_map & (1 << sndev->peer_partition))) {
+ if (!(part_map & (1ULL << sndev->peer_partition))) {
dev_err(&sndev->stdev->dev,
"ntb target partition is not NT partition\n");
return -ENODEV;
@@ -1448,10 +1460,16 @@ static void switchtec_ntb_deinit_db_msg_irq(struct switchtec_ntb *sndev)
static int switchtec_ntb_reinit_peer(struct switchtec_ntb *sndev)
{
- dev_info(&sndev->stdev->dev, "peer reinitialized\n");
- switchtec_ntb_deinit_shared_mw(sndev);
- switchtec_ntb_init_mw(sndev);
- return switchtec_ntb_init_shared_mw(sndev);
+ int rc;
+
+ if (crosslink_is_enabled(sndev))
+ return 0;
+
+ dev_info(&sndev->stdev->dev, "reinitialize shared memory window\n");
+ rc = config_rsvd_lut_win(sndev, sndev->mmio_peer_ctrl, 0,
+ sndev->self_partition,
+ sndev->self_shared_dma);
+ return rc;
}
static int switchtec_ntb_add(struct device *dev,
diff --git a/drivers/ntb/msi.c b/drivers/ntb/msi.c
new file mode 100644
index 000000000000..9dddf133658f
--- /dev/null
+++ b/drivers/ntb/msi.c
@@ -0,0 +1,415 @@
+// SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause)
+
+#include <linux/irq.h>
+#include <linux/module.h>
+#include <linux/ntb.h>
+#include <linux/msi.h>
+#include <linux/pci.h>
+
+MODULE_LICENSE("Dual BSD/GPL");
+MODULE_VERSION("0.1");
+MODULE_AUTHOR("Logan Gunthorpe <logang@deltatee.com>");
+MODULE_DESCRIPTION("NTB MSI Interrupt Library");
+
+struct ntb_msi {
+ u64 base_addr;
+ u64 end_addr;
+
+ void (*desc_changed)(void *ctx);
+
+ u32 __iomem *peer_mws[];
+};
+
+/**
+ * ntb_msi_init() - Initialize the MSI context
+ * @ntb: NTB device context
+ *
+ * This function must be called before any other ntb_msi function.
+ * It initializes the context for MSI operations and maps
+ * the peer memory windows.
+ *
+ * This function reserves the last N outbound memory windows (where N
+ * is the number of peers).
+ *
+ * Return: Zero on success, otherwise a negative error number.
+ */
+int ntb_msi_init(struct ntb_dev *ntb,
+ void (*desc_changed)(void *ctx))
+{
+ phys_addr_t mw_phys_addr;
+ resource_size_t mw_size;
+ size_t struct_size;
+ int peer_widx;
+ int peers;
+ int ret;
+ int i;
+
+ peers = ntb_peer_port_count(ntb);
+ if (peers <= 0)
+ return -EINVAL;
+
+ struct_size = sizeof(*ntb->msi) + sizeof(*ntb->msi->peer_mws) * peers;
+
+ ntb->msi = devm_kzalloc(&ntb->dev, struct_size, GFP_KERNEL);
+ if (!ntb->msi)
+ return -ENOMEM;
+
+ ntb->msi->desc_changed = desc_changed;
+
+ for (i = 0; i < peers; i++) {
+ peer_widx = ntb_peer_mw_count(ntb) - 1 - i;
+
+ ret = ntb_peer_mw_get_addr(ntb, peer_widx, &mw_phys_addr,
+ &mw_size);
+ if (ret)
+ goto unroll;
+
+ ntb->msi->peer_mws[i] = devm_ioremap(&ntb->dev, mw_phys_addr,
+ mw_size);
+ if (!ntb->msi->peer_mws[i]) {
+ ret = -EFAULT;
+ goto unroll;
+ }
+ }
+
+ return 0;
+
+unroll:
+ for (i = 0; i < peers; i++)
+ if (ntb->msi->peer_mws[i])
+ devm_iounmap(&ntb->dev, ntb->msi->peer_mws[i]);
+
+ devm_kfree(&ntb->dev, ntb->msi);
+ ntb->msi = NULL;
+ return ret;
+}
+EXPORT_SYMBOL(ntb_msi_init);
+
+/**
+ * ntb_msi_setup_mws() - Initialize the MSI inbound memory windows
+ * @ntb: NTB device context
+ *
+ * This function sets up the required inbound memory windows. It should be
+ * called from a work function after a link up event.
+ *
+ * Over the entire network, this function will reserves the last N
+ * inbound memory windows for each peer (where N is the number of peers).
+ *
+ * ntb_msi_init() must be called before this function.
+ *
+ * Return: Zero on success, otherwise a negative error number.
+ */
+int ntb_msi_setup_mws(struct ntb_dev *ntb)
+{
+ struct msi_desc *desc;
+ u64 addr;
+ int peer, peer_widx;
+ resource_size_t addr_align, size_align, size_max;
+ resource_size_t mw_size = SZ_32K;
+ resource_size_t mw_min_size = mw_size;
+ int i;
+ int ret;
+
+ if (!ntb->msi)
+ return -EINVAL;
+
+ desc = first_msi_entry(&ntb->pdev->dev);
+ addr = desc->msg.address_lo + ((uint64_t)desc->msg.address_hi << 32);
+
+ for (peer = 0; peer < ntb_peer_port_count(ntb); peer++) {
+ peer_widx = ntb_peer_highest_mw_idx(ntb, peer);
+ if (peer_widx < 0)
+ return peer_widx;
+
+ ret = ntb_mw_get_align(ntb, peer, peer_widx, &addr_align,
+ NULL, NULL);
+ if (ret)
+ return ret;
+
+ addr &= ~(addr_align - 1);
+ }
+
+ for (peer = 0; peer < ntb_peer_port_count(ntb); peer++) {
+ peer_widx = ntb_peer_highest_mw_idx(ntb, peer);
+ if (peer_widx < 0) {
+ ret = peer_widx;
+ goto error_out;
+ }
+
+ ret = ntb_mw_get_align(ntb, peer, peer_widx, NULL,
+ &size_align, &size_max);
+ if (ret)
+ goto error_out;
+
+ mw_size = round_up(mw_size, size_align);
+ mw_size = max(mw_size, size_max);
+ if (mw_size < mw_min_size)
+ mw_min_size = mw_size;
+
+ ret = ntb_mw_set_trans(ntb, peer, peer_widx,
+ addr, mw_size);
+ if (ret)
+ goto error_out;
+ }
+
+ ntb->msi->base_addr = addr;
+ ntb->msi->end_addr = addr + mw_min_size;
+
+ return 0;
+
+error_out:
+ for (i = 0; i < peer; i++) {
+ peer_widx = ntb_peer_highest_mw_idx(ntb, peer);
+ if (peer_widx < 0)
+ continue;
+
+ ntb_mw_clear_trans(ntb, i, peer_widx);
+ }
+
+ return ret;
+}
+EXPORT_SYMBOL(ntb_msi_setup_mws);
+
+/**
+ * ntb_msi_clear_mws() - Clear all inbound memory windows
+ * @ntb: NTB device context
+ *
+ * This function tears down the resources used by ntb_msi_setup_mws().
+ */
+void ntb_msi_clear_mws(struct ntb_dev *ntb)
+{
+ int peer;
+ int peer_widx;
+
+ for (peer = 0; peer < ntb_peer_port_count(ntb); peer++) {
+ peer_widx = ntb_peer_highest_mw_idx(ntb, peer);
+ if (peer_widx < 0)
+ continue;
+
+ ntb_mw_clear_trans(ntb, peer, peer_widx);
+ }
+}
+EXPORT_SYMBOL(ntb_msi_clear_mws);
+
+struct ntb_msi_devres {
+ struct ntb_dev *ntb;
+ struct msi_desc *entry;
+ struct ntb_msi_desc *msi_desc;
+};
+
+static int ntb_msi_set_desc(struct ntb_dev *ntb, struct msi_desc *entry,
+ struct ntb_msi_desc *msi_desc)
+{
+ u64 addr;
+
+ addr = entry->msg.address_lo +
+ ((uint64_t)entry->msg.address_hi << 32);
+
+ if (addr < ntb->msi->base_addr || addr >= ntb->msi->end_addr) {
+ dev_warn_once(&ntb->dev,
+ "IRQ %d: MSI Address not within the memory window (%llx, [%llx %llx])\n",
+ entry->irq, addr, ntb->msi->base_addr,
+ ntb->msi->end_addr);
+ return -EFAULT;
+ }
+
+ msi_desc->addr_offset = addr - ntb->msi->base_addr;
+ msi_desc->data = entry->msg.data;
+
+ return 0;
+}
+
+static void ntb_msi_write_msg(struct msi_desc *entry, void *data)
+{
+ struct ntb_msi_devres *dr = data;
+
+ WARN_ON(ntb_msi_set_desc(dr->ntb, entry, dr->msi_desc));
+
+ if (dr->ntb->msi->desc_changed)
+ dr->ntb->msi->desc_changed(dr->ntb->ctx);
+}
+
+static void ntbm_msi_callback_release(struct device *dev, void *res)
+{
+ struct ntb_msi_devres *dr = res;
+
+ dr->entry->write_msi_msg = NULL;
+ dr->entry->write_msi_msg_data = NULL;
+}
+
+static int ntbm_msi_setup_callback(struct ntb_dev *ntb, struct msi_desc *entry,
+ struct ntb_msi_desc *msi_desc)
+{
+ struct ntb_msi_devres *dr;
+
+ dr = devres_alloc(ntbm_msi_callback_release,
+ sizeof(struct ntb_msi_devres), GFP_KERNEL);
+ if (!dr)
+ return -ENOMEM;
+
+ dr->ntb = ntb;
+ dr->entry = entry;
+ dr->msi_desc = msi_desc;
+
+ devres_add(&ntb->dev, dr);
+
+ dr->entry->write_msi_msg = ntb_msi_write_msg;
+ dr->entry->write_msi_msg_data = dr;
+
+ return 0;
+}
+
+/**
+ * ntbm_msi_request_threaded_irq() - allocate an MSI interrupt
+ * @ntb: NTB device context
+ * @handler: Function to be called when the IRQ occurs
+ * @thread_fn: Function to be called in a threaded interrupt context. NULL
+ * for clients which handle everything in @handler
+ * @devname: An ascii name for the claiming device, dev_name(dev) if NULL
+ * @dev_id: A cookie passed back to the handler function
+ *
+ * This function assigns an interrupt handler to an unused
+ * MSI interrupt and returns the descriptor used to trigger
+ * it. The descriptor can then be sent to a peer to trigger
+ * the interrupt.
+ *
+ * The interrupt resource is managed with devres so it will
+ * be automatically freed when the NTB device is torn down.
+ *
+ * If an IRQ allocated with this function needs to be freed
+ * separately, ntbm_free_irq() must be used.
+ *
+ * Return: IRQ number assigned on success, otherwise a negative error number.
+ */
+int ntbm_msi_request_threaded_irq(struct ntb_dev *ntb, irq_handler_t handler,
+ irq_handler_t thread_fn,
+ const char *name, void *dev_id,
+ struct ntb_msi_desc *msi_desc)
+{
+ struct msi_desc *entry;
+ struct irq_desc *desc;
+ int ret;
+
+ if (!ntb->msi)
+ return -EINVAL;
+
+ for_each_pci_msi_entry(entry, ntb->pdev) {
+ desc = irq_to_desc(entry->irq);
+ if (desc->action)
+ continue;
+
+ ret = devm_request_threaded_irq(&ntb->dev, entry->irq, handler,
+ thread_fn, 0, name, dev_id);
+ if (ret)
+ continue;
+
+ if (ntb_msi_set_desc(ntb, entry, msi_desc)) {
+ devm_free_irq(&ntb->dev, entry->irq, dev_id);
+ continue;
+ }
+
+ ret = ntbm_msi_setup_callback(ntb, entry, msi_desc);
+ if (ret) {
+ devm_free_irq(&ntb->dev, entry->irq, dev_id);
+ return ret;
+ }
+
+
+ return entry->irq;
+ }
+
+ return -ENODEV;
+}
+EXPORT_SYMBOL(ntbm_msi_request_threaded_irq);
+
+static int ntbm_msi_callback_match(struct device *dev, void *res, void *data)
+{
+ struct ntb_dev *ntb = dev_ntb(dev);
+ struct ntb_msi_devres *dr = res;
+
+ return dr->ntb == ntb && dr->entry == data;
+}
+
+/**
+ * ntbm_msi_free_irq() - free an interrupt
+ * @ntb: NTB device context
+ * @irq: Interrupt line to free
+ * @dev_id: Device identity to free
+ *
+ * This function should be used to manually free IRQs allocated with
+ * ntbm_request_[threaded_]irq().
+ */
+void ntbm_msi_free_irq(struct ntb_dev *ntb, unsigned int irq, void *dev_id)
+{
+ struct msi_desc *entry = irq_get_msi_desc(irq);
+
+ entry->write_msi_msg = NULL;
+ entry->write_msi_msg_data = NULL;
+
+ WARN_ON(devres_destroy(&ntb->dev, ntbm_msi_callback_release,
+ ntbm_msi_callback_match, entry));
+
+ devm_free_irq(&ntb->dev, irq, dev_id);
+}
+EXPORT_SYMBOL(ntbm_msi_free_irq);
+
+/**
+ * ntb_msi_peer_trigger() - Trigger an interrupt handler on a peer
+ * @ntb: NTB device context
+ * @peer: Peer index
+ * @desc: MSI descriptor data which triggers the interrupt
+ *
+ * This function triggers an interrupt on a peer. It requires
+ * the descriptor structure to have been passed from that peer
+ * by some other means.
+ *
+ * Return: Zero on success, otherwise a negative error number.
+ */
+int ntb_msi_peer_trigger(struct ntb_dev *ntb, int peer,
+ struct ntb_msi_desc *desc)
+{
+ int idx;
+
+ if (!ntb->msi)
+ return -EINVAL;
+
+ idx = desc->addr_offset / sizeof(*ntb->msi->peer_mws[peer]);
+
+ iowrite32(desc->data, &ntb->msi->peer_mws[peer][idx]);
+
+ return 0;
+}
+EXPORT_SYMBOL(ntb_msi_peer_trigger);
+
+/**
+ * ntb_msi_peer_addr() - Get the DMA address to trigger a peer's MSI interrupt
+ * @ntb: NTB device context
+ * @peer: Peer index
+ * @desc: MSI descriptor data which triggers the interrupt
+ * @msi_addr: Physical address to trigger the interrupt
+ *
+ * This function allows using DMA engines to trigger an interrupt
+ * (for example, trigger an interrupt to process the data after
+ * sending it). To trigger the interrupt, write @desc.data to the address
+ * returned in @msi_addr
+ *
+ * Return: Zero on success, otherwise a negative error number.
+ */
+int ntb_msi_peer_addr(struct ntb_dev *ntb, int peer,
+ struct ntb_msi_desc *desc,
+ phys_addr_t *msi_addr)
+{
+ int peer_widx = ntb_peer_mw_count(ntb) - 1 - peer;
+ phys_addr_t mw_phys_addr;
+ int ret;
+
+ ret = ntb_peer_mw_get_addr(ntb, peer_widx, &mw_phys_addr, NULL);
+ if (ret)
+ return ret;
+
+ if (msi_addr)
+ *msi_addr = mw_phys_addr + desc->addr_offset;
+
+ return 0;
+}
+EXPORT_SYMBOL(ntb_msi_peer_addr);
diff --git a/drivers/ntb/ntb_transport.c b/drivers/ntb/ntb_transport.c
index d4f39ba1d976..40c90ca10729 100644
--- a/drivers/ntb/ntb_transport.c
+++ b/drivers/ntb/ntb_transport.c
@@ -93,6 +93,12 @@ static bool use_dma;
module_param(use_dma, bool, 0644);
MODULE_PARM_DESC(use_dma, "Use DMA engine to perform large data copy");
+static bool use_msi;
+#ifdef CONFIG_NTB_MSI
+module_param(use_msi, bool, 0644);
+MODULE_PARM_DESC(use_msi, "Use MSI interrupts instead of doorbells");
+#endif
+
static struct dentry *nt_debugfs_dir;
/* Only two-ports NTB devices are supported */
@@ -188,6 +194,11 @@ struct ntb_transport_qp {
u64 tx_err_no_buf;
u64 tx_memcpy;
u64 tx_async;
+
+ bool use_msi;
+ int msi_irq;
+ struct ntb_msi_desc msi_desc;
+ struct ntb_msi_desc peer_msi_desc;
};
struct ntb_transport_mw {
@@ -221,6 +232,10 @@ struct ntb_transport_ctx {
u64 qp_bitmap;
u64 qp_bitmap_free;
+ bool use_msi;
+ unsigned int msi_spad_offset;
+ u64 msi_db_mask;
+
bool link_is_up;
struct delayed_work link_work;
struct work_struct link_cleanup;
@@ -667,6 +682,114 @@ static int ntb_transport_setup_qp_mw(struct ntb_transport_ctx *nt,
return 0;
}
+static irqreturn_t ntb_transport_isr(int irq, void *dev)
+{
+ struct ntb_transport_qp *qp = dev;
+
+ tasklet_schedule(&qp->rxc_db_work);
+
+ return IRQ_HANDLED;
+}
+
+static void ntb_transport_setup_qp_peer_msi(struct ntb_transport_ctx *nt,
+ unsigned int qp_num)
+{
+ struct ntb_transport_qp *qp = &nt->qp_vec[qp_num];
+ int spad = qp_num * 2 + nt->msi_spad_offset;
+
+ if (!nt->use_msi)
+ return;
+
+ if (spad >= ntb_spad_count(nt->ndev))
+ return;
+
+ qp->peer_msi_desc.addr_offset =
+ ntb_peer_spad_read(qp->ndev, PIDX, spad);
+ qp->peer_msi_desc.data =
+ ntb_peer_spad_read(qp->ndev, PIDX, spad + 1);
+
+ dev_dbg(&qp->ndev->pdev->dev, "QP%d Peer MSI addr=%x data=%x\n",
+ qp_num, qp->peer_msi_desc.addr_offset, qp->peer_msi_desc.data);
+
+ if (qp->peer_msi_desc.addr_offset) {
+ qp->use_msi = true;
+ dev_info(&qp->ndev->pdev->dev,
+ "Using MSI interrupts for QP%d\n", qp_num);
+ }
+}
+
+static void ntb_transport_setup_qp_msi(struct ntb_transport_ctx *nt,
+ unsigned int qp_num)
+{
+ struct ntb_transport_qp *qp = &nt->qp_vec[qp_num];
+ int spad = qp_num * 2 + nt->msi_spad_offset;
+ int rc;
+
+ if (!nt->use_msi)
+ return;
+
+ if (spad >= ntb_spad_count(nt->ndev)) {
+ dev_warn_once(&qp->ndev->pdev->dev,
+ "Not enough SPADS to use MSI interrupts\n");
+ return;
+ }
+
+ ntb_spad_write(qp->ndev, spad, 0);
+ ntb_spad_write(qp->ndev, spad + 1, 0);
+
+ if (!qp->msi_irq) {
+ qp->msi_irq = ntbm_msi_request_irq(qp->ndev, ntb_transport_isr,
+ KBUILD_MODNAME, qp,
+ &qp->msi_desc);
+ if (qp->msi_irq < 0) {
+ dev_warn(&qp->ndev->pdev->dev,
+ "Unable to allocate MSI interrupt for qp%d\n",
+ qp_num);
+ return;
+ }
+ }
+
+ rc = ntb_spad_write(qp->ndev, spad, qp->msi_desc.addr_offset);
+ if (rc)
+ goto err_free_interrupt;
+
+ rc = ntb_spad_write(qp->ndev, spad + 1, qp->msi_desc.data);
+ if (rc)
+ goto err_free_interrupt;
+
+ dev_dbg(&qp->ndev->pdev->dev, "QP%d MSI %d addr=%x data=%x\n",
+ qp_num, qp->msi_irq, qp->msi_desc.addr_offset,
+ qp->msi_desc.data);
+
+ return;
+
+err_free_interrupt:
+ devm_free_irq(&nt->ndev->dev, qp->msi_irq, qp);
+}
+
+static void ntb_transport_msi_peer_desc_changed(struct ntb_transport_ctx *nt)
+{
+ int i;
+
+ dev_dbg(&nt->ndev->pdev->dev, "Peer MSI descriptors changed");
+
+ for (i = 0; i < nt->qp_count; i++)
+ ntb_transport_setup_qp_peer_msi(nt, i);
+}
+
+static void ntb_transport_msi_desc_changed(void *data)
+{
+ struct ntb_transport_ctx *nt = data;
+ int i;
+
+ dev_dbg(&nt->ndev->pdev->dev, "MSI descriptors changed");
+
+ for (i = 0; i < nt->qp_count; i++)
+ ntb_transport_setup_qp_msi(nt, i);
+
+ ntb_peer_db_set(nt->ndev, nt->msi_db_mask);
+}
+
static void ntb_free_mw(struct ntb_transport_ctx *nt, int num_mw)
{
struct ntb_transport_mw *mw = &nt->mw_vec[num_mw];
@@ -905,6 +1028,20 @@ static void ntb_transport_link_work(struct work_struct *work)
int rc = 0, i, spad;
/* send the local info, in the opposite order of the way we read it */
+
+ if (nt->use_msi) {
+ rc = ntb_msi_setup_mws(ndev);
+ if (rc) {
+ dev_warn(&pdev->dev,
+ "Failed to register MSI memory window: %d\n",
+ rc);
+ nt->use_msi = false;
+ }
+ }
+
+ for (i = 0; i < nt->qp_count; i++)
+ ntb_transport_setup_qp_msi(nt, i);
+
for (i = 0; i < nt->mw_count; i++) {
size = nt->mw_vec[i].phys_size;
@@ -962,6 +1099,7 @@ static void ntb_transport_link_work(struct work_struct *work)
struct ntb_transport_qp *qp = &nt->qp_vec[i];
ntb_transport_setup_qp_mw(nt, i);
+ ntb_transport_setup_qp_peer_msi(nt, i);
if (qp->client_ready)
schedule_delayed_work(&qp->link_work, 0);
@@ -1135,6 +1273,19 @@ static int ntb_transport_probe(struct ntb_client *self, struct ntb_dev *ndev)
return -ENOMEM;
nt->ndev = ndev;
+
+ /*
+ * If we are using MSI, and have at least one extra memory window,
+ * we will reserve the last MW for the MSI window.
+ */
+ if (use_msi && mw_count > 1) {
+ rc = ntb_msi_init(ndev, ntb_transport_msi_desc_changed);
+ if (!rc) {
+ mw_count -= 1;
+ nt->use_msi = true;
+ }
+ }
+
spad_count = ntb_spad_count(ndev);
/* Limit the MW's based on the availability of scratchpads */
@@ -1148,6 +1299,8 @@ static int ntb_transport_probe(struct ntb_client *self, struct ntb_dev *ndev)
max_mw_count_for_spads = (spad_count - MW0_SZ_HIGH) / 2;
nt->mw_count = min(mw_count, max_mw_count_for_spads);
+ nt->msi_spad_offset = nt->mw_count * 2 + MW0_SZ_HIGH;
+
nt->mw_vec = kcalloc_node(mw_count, sizeof(*nt->mw_vec),
GFP_KERNEL, node);
if (!nt->mw_vec) {
@@ -1178,6 +1331,12 @@ static int ntb_transport_probe(struct ntb_client *self, struct ntb_dev *ndev)
qp_bitmap = ntb_db_valid_mask(ndev);
qp_count = ilog2(qp_bitmap);
+ if (nt->use_msi) {
+ qp_count -= 1;
+ nt->msi_db_mask = 1 << qp_count;
+ ntb_db_clear_mask(ndev, nt->msi_db_mask);
+ }
+
if (max_num_clients && max_num_clients < qp_count)
qp_count = max_num_clients;
else if (nt->mw_count < qp_count)
@@ -1601,7 +1760,10 @@ static void ntb_tx_copy_callback(void *data,
iowrite32(entry->flags | DESC_DONE_FLAG, &hdr->flags);
- ntb_peer_db_set(qp->ndev, BIT_ULL(qp->qp_num));
+ if (qp->use_msi)
+ ntb_msi_peer_trigger(qp->ndev, PIDX, &qp->peer_msi_desc);
+ else
+ ntb_peer_db_set(qp->ndev, BIT_ULL(qp->qp_num));
/* The entry length can only be zero if the packet is intended to be a
* "link down" or similar. Since no payload is being sent in these
@@ -1869,6 +2031,7 @@ ntb_transport_create_queue(void *data, struct device *client_dev,
qp->rx_dma_chan = NULL;
}
+ qp->tx_mw_dma_addr = 0;
if (qp->tx_dma_chan) {
qp->tx_mw_dma_addr =
dma_map_resource(qp->tx_dma_chan->device->dev,
@@ -2268,6 +2431,11 @@ static void ntb_transport_doorbell_callback(void *data, int vector)
u64 db_bits;
unsigned int qp_num;
+ if (ntb_db_read(nt->ndev) & nt->msi_db_mask) {
+ ntb_transport_msi_peer_desc_changed(nt);
+ ntb_db_clear(nt->ndev, nt->msi_db_mask);
+ }
+
db_bits = (nt->qp_bitmap & ~nt->qp_bitmap_free &
ntb_db_vector_mask(nt->ndev, vector));
diff --git a/drivers/ntb/test/Kconfig b/drivers/ntb/test/Kconfig
index a8db00a7e087..516b991f33b9 100644
--- a/drivers/ntb/test/Kconfig
+++ b/drivers/ntb/test/Kconfig
@@ -26,3 +26,12 @@ config NTB_PERF
to and from the window without additional software interaction.
If unsure, say N.
+
+config NTB_MSI_TEST
+ tristate "NTB MSI Test Client"
+ depends on NTB_MSI
+ help
+ This tool demonstrates the use of the NTB MSI library to
+ send MSI interrupts between peers.
+
+ If unsure, say N.
diff --git a/drivers/ntb/test/Makefile b/drivers/ntb/test/Makefile
index cbfd67622ef7..19ed91d8a3b1 100644
--- a/drivers/ntb/test/Makefile
+++ b/drivers/ntb/test/Makefile
@@ -2,3 +2,4 @@
obj-$(CONFIG_NTB_PINGPONG) += ntb_pingpong.o
obj-$(CONFIG_NTB_TOOL) += ntb_tool.o
obj-$(CONFIG_NTB_PERF) += ntb_perf.o
+obj-$(CONFIG_NTB_MSI_TEST) += ntb_msi_test.o
diff --git a/drivers/ntb/test/ntb_msi_test.c b/drivers/ntb/test/ntb_msi_test.c
new file mode 100644
index 000000000000..99d826ed9c34
--- /dev/null
+++ b/drivers/ntb/test/ntb_msi_test.c
@@ -0,0 +1,433 @@
+// SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause)
+
+#include <linux/module.h>
+#include <linux/debugfs.h>
+#include <linux/ntb.h>
+#include <linux/pci.h>
+#include <linux/radix-tree.h>
+#include <linux/workqueue.h>
+
+MODULE_LICENSE("Dual BSD/GPL");
+MODULE_VERSION("0.1");
+MODULE_AUTHOR("Logan Gunthorpe <logang@deltatee.com>");
+MODULE_DESCRIPTION("Test for sending MSI interrupts over an NTB memory window");
+
+static int num_irqs = 4;
+module_param(num_irqs, int, 0644);
+MODULE_PARM_DESC(num_irqs, "number of irqs to use");
+
+struct ntb_msit_ctx {
+ struct ntb_dev *ntb;
+ struct dentry *dbgfs_dir;
+ struct work_struct setup_work;
+
+ struct ntb_msit_isr_ctx {
+ int irq_idx;
+ int irq_num;
+ int occurrences;
+ struct ntb_msit_ctx *nm;
+ struct ntb_msi_desc desc;
+ } *isr_ctx;
+
+ struct ntb_msit_peer {
+ struct ntb_msit_ctx *nm;
+ int pidx;
+ int num_irqs;
+ struct completion init_comp;
+ struct ntb_msi_desc *msi_desc;
+ } peers[];
+};
+
+static struct dentry *ntb_msit_dbgfs_topdir;
+
+static irqreturn_t ntb_msit_isr(int irq, void *dev)
+{
+ struct ntb_msit_isr_ctx *isr_ctx = dev;
+ struct ntb_msit_ctx *nm = isr_ctx->nm;
+
+ dev_dbg(&nm->ntb->dev, "Interrupt Occurred: %d",
+ isr_ctx->irq_idx);
+
+ isr_ctx->occurrences++;
+
+ return IRQ_HANDLED;
+}
+
+static void ntb_msit_setup_work(struct work_struct *work)
+{
+ struct ntb_msit_ctx *nm = container_of(work, struct ntb_msit_ctx,
+ setup_work);
+ int irq_count = 0;
+ int irq;
+ int ret;
+ uintptr_t i;
+
+ ret = ntb_msi_setup_mws(nm->ntb);
+ if (ret) {
+ dev_err(&nm->ntb->dev, "Unable to setup MSI windows: %d\n",
+ ret);
+ return;
+ }
+
+ for (i = 0; i < num_irqs; i++) {
+ nm->isr_ctx[i].irq_idx = i;
+ nm->isr_ctx[i].nm = nm;
+
+ if (!nm->isr_ctx[i].irq_num) {
+ irq = ntbm_msi_request_irq(nm->ntb, ntb_msit_isr,
+ KBUILD_MODNAME,
+ &nm->isr_ctx[i],
+ &nm->isr_ctx[i].desc);
+ if (irq < 0)
+ break;
+
+ nm->isr_ctx[i].irq_num = irq;
+ }
+
+ ret = ntb_spad_write(nm->ntb, 2 * i + 1,
+ nm->isr_ctx[i].desc.addr_offset);
+ if (ret)
+ break;
+
+ ret = ntb_spad_write(nm->ntb, 2 * i + 2,
+ nm->isr_ctx[i].desc.data);
+ if (ret)
+ break;
+
+ irq_count++;
+ }
+
+ ntb_spad_write(nm->ntb, 0, irq_count);
+ ntb_peer_db_set(nm->ntb, BIT(ntb_port_number(nm->ntb)));
+}
+
+static void ntb_msit_desc_changed(void *ctx)
+{
+ struct ntb_msit_ctx *nm = ctx;
+ int i;
+
+ dev_dbg(&nm->ntb->dev, "MSI Descriptors Changed\n");
+
+ for (i = 0; i < num_irqs; i++) {
+ ntb_spad_write(nm->ntb, 2 * i + 1,
+ nm->isr_ctx[i].desc.addr_offset);
+ ntb_spad_write(nm->ntb, 2 * i + 2,
+ nm->isr_ctx[i].desc.data);
+ }
+
+ ntb_peer_db_set(nm->ntb, BIT(ntb_port_number(nm->ntb)));
+}
+
+static void ntb_msit_link_event(void *ctx)
+{
+ struct ntb_msit_ctx *nm = ctx;
+
+ if (!ntb_link_is_up(nm->ntb, NULL, NULL))
+ return;
+
+ schedule_work(&nm->setup_work);
+}
+
+static void ntb_msit_copy_peer_desc(struct ntb_msit_ctx *nm, int peer)
+{
+ int i;
+ struct ntb_msi_desc *desc = nm->peers[peer].msi_desc;
+ int irq_count = nm->peers[peer].num_irqs;
+
+ for (i = 0; i < irq_count; i++) {
+ desc[i].addr_offset = ntb_peer_spad_read(nm->ntb, peer,
+ 2 * i + 1);
+ desc[i].data = ntb_peer_spad_read(nm->ntb, peer, 2 * i + 2);
+ }
+
+ dev_info(&nm->ntb->dev, "Found %d interrupts on peer %d\n",
+ irq_count, peer);
+
+ complete_all(&nm->peers[peer].init_comp);
+}
+
+static void ntb_msit_db_event(void *ctx, int vec)
+{
+ struct ntb_msit_ctx *nm = ctx;
+ struct ntb_msi_desc *desc;
+ u64 peer_mask = ntb_db_read(nm->ntb);
+ u32 irq_count;
+ int peer;
+
+ ntb_db_clear(nm->ntb, peer_mask);
+
+ for (peer = 0; peer < sizeof(peer_mask) * 8; peer++) {
+ if (!(peer_mask & BIT(peer)))
+ continue;
+
+ irq_count = ntb_peer_spad_read(nm->ntb, peer, 0);
+ if (irq_count == -1)
+ continue;
+
+ desc = kcalloc(irq_count, sizeof(*desc), GFP_ATOMIC);
+ if (!desc)
+ continue;
+
+ kfree(nm->peers[peer].msi_desc);
+ nm->peers[peer].msi_desc = desc;
+ nm->peers[peer].num_irqs = irq_count;
+
+ ntb_msit_copy_peer_desc(nm, peer);
+ }
+}
+
+static const struct ntb_ctx_ops ntb_msit_ops = {
+ .link_event = ntb_msit_link_event,
+ .db_event = ntb_msit_db_event,
+};
+
+static int ntb_msit_dbgfs_trigger(void *data, u64 idx)
+{
+ struct ntb_msit_peer *peer = data;
+
+ if (idx >= peer->num_irqs)
+ return -EINVAL;
+
+ dev_dbg(&peer->nm->ntb->dev, "trigger irq %llu on peer %u\n",
+ idx, peer->pidx);
+
+ return ntb_msi_peer_trigger(peer->nm->ntb, peer->pidx,
+ &peer->msi_desc[idx]);
+}
+
+DEFINE_DEBUGFS_ATTRIBUTE(ntb_msit_trigger_fops, NULL,
+ ntb_msit_dbgfs_trigger, "%llu\n");
+
+static int ntb_msit_dbgfs_port_get(void *data, u64 *port)
+{
+ struct ntb_msit_peer *peer = data;
+
+ *port = ntb_peer_port_number(peer->nm->ntb, peer->pidx);
+
+ return 0;
+}
+
+DEFINE_DEBUGFS_ATTRIBUTE(ntb_msit_port_fops, ntb_msit_dbgfs_port_get,
+ NULL, "%llu\n");
+
+static int ntb_msit_dbgfs_count_get(void *data, u64 *count)
+{
+ struct ntb_msit_peer *peer = data;
+
+ *count = peer->num_irqs;
+
+ return 0;
+}
+
+DEFINE_DEBUGFS_ATTRIBUTE(ntb_msit_count_fops, ntb_msit_dbgfs_count_get,
+ NULL, "%llu\n");
+
+static int ntb_msit_dbgfs_ready_get(void *data, u64 *ready)
+{
+ struct ntb_msit_peer *peer = data;
+
+ *ready = try_wait_for_completion(&peer->init_comp);
+
+ return 0;
+}
+
+static int ntb_msit_dbgfs_ready_set(void *data, u64 ready)
+{
+ struct ntb_msit_peer *peer = data;
+
+ return wait_for_completion_interruptible(&peer->init_comp);
+}
+
+DEFINE_DEBUGFS_ATTRIBUTE(ntb_msit_ready_fops, ntb_msit_dbgfs_ready_get,
+ ntb_msit_dbgfs_ready_set, "%llu\n");
+
+static int ntb_msit_dbgfs_occurrences_get(void *data, u64 *occurrences)
+{
+ struct ntb_msit_isr_ctx *isr_ctx = data;
+
+ *occurrences = isr_ctx->occurrences;
+
+ return 0;
+}
+
+DEFINE_DEBUGFS_ATTRIBUTE(ntb_msit_occurrences_fops,
+ ntb_msit_dbgfs_occurrences_get,
+ NULL, "%llu\n");
+
+static int ntb_msit_dbgfs_local_port_get(void *data, u64 *port)
+{
+ struct ntb_msit_ctx *nm = data;
+
+ *port = ntb_port_number(nm->ntb);
+
+ return 0;
+}
+
+DEFINE_DEBUGFS_ATTRIBUTE(ntb_msit_local_port_fops,
+ ntb_msit_dbgfs_local_port_get,
+ NULL, "%llu\n");
+
+static void ntb_msit_create_dbgfs(struct ntb_msit_ctx *nm)
+{
+ struct pci_dev *pdev = nm->ntb->pdev;
+ char buf[32];
+ int i;
+ struct dentry *peer_dir;
+
+ nm->dbgfs_dir = debugfs_create_dir(pci_name(pdev),
+ ntb_msit_dbgfs_topdir);
+ debugfs_create_file("port", 0400, nm->dbgfs_dir, nm,
+ &ntb_msit_local_port_fops);
+
+ for (i = 0; i < ntb_peer_port_count(nm->ntb); i++) {
+ nm->peers[i].pidx = i;
+ nm->peers[i].nm = nm;
+ init_completion(&nm->peers[i].init_comp);
+
+ snprintf(buf, sizeof(buf), "peer%d", i);
+ peer_dir = debugfs_create_dir(buf, nm->dbgfs_dir);
+
+ debugfs_create_file_unsafe("trigger", 0200, peer_dir,
+ &nm->peers[i],
+ &ntb_msit_trigger_fops);
+
+ debugfs_create_file_unsafe("port", 0400, peer_dir,
+ &nm->peers[i], &ntb_msit_port_fops);
+
+ debugfs_create_file_unsafe("count", 0400, peer_dir,
+ &nm->peers[i],
+ &ntb_msit_count_fops);
+
+ debugfs_create_file_unsafe("ready", 0600, peer_dir,
+ &nm->peers[i],
+ &ntb_msit_ready_fops);
+ }
+
+ for (i = 0; i < num_irqs; i++) {
+ snprintf(buf, sizeof(buf), "irq%d_occurrences", i);
+ debugfs_create_file_unsafe(buf, 0400, nm->dbgfs_dir,
+ &nm->isr_ctx[i],
+ &ntb_msit_occurrences_fops);
+ }
+}
+
+static void ntb_msit_remove_dbgfs(struct ntb_msit_ctx *nm)
+{
+ debugfs_remove_recursive(nm->dbgfs_dir);
+}
+
+static int ntb_msit_probe(struct ntb_client *client, struct ntb_dev *ntb)
+{
+ struct ntb_msit_ctx *nm;
+ size_t struct_size;
+ int peers;
+ int ret;
+
+ peers = ntb_peer_port_count(ntb);
+ if (peers <= 0)
+ return -EINVAL;
+
+ if (ntb_spad_is_unsafe(ntb) || ntb_spad_count(ntb) < 2 * num_irqs + 1) {
+ dev_err(&ntb->dev, "NTB MSI test requires at least %d spads for %d irqs\n",
+ 2 * num_irqs + 1, num_irqs);
+ return -EFAULT;
+ }
+
+ ret = ntb_spad_write(ntb, 0, -1);
+ if (ret) {
+ dev_err(&ntb->dev, "Unable to write spads: %d\n", ret);
+ return ret;
+ }
+
+ ret = ntb_db_clear_mask(ntb, GENMASK(peers - 1, 0));
+ if (ret) {
+ dev_err(&ntb->dev, "Unable to clear doorbell mask: %d\n", ret);
+ return ret;
+ }
+
+ ret = ntb_msi_init(ntb, ntb_msit_desc_changed);
+ if (ret) {
+ dev_err(&ntb->dev, "Unable to initialize MSI library: %d\n",
+ ret);
+ return ret;
+ }
+
+ struct_size = sizeof(*nm) + sizeof(*nm->peers) * peers;
+
+ nm = devm_kzalloc(&ntb->dev, struct_size, GFP_KERNEL);
+ if (!nm)
+ return -ENOMEM;
+
+ nm->isr_ctx = devm_kcalloc(&ntb->dev, num_irqs, sizeof(*nm->isr_ctx),
+ GFP_KERNEL);
+ if (!nm->isr_ctx)
+ return -ENOMEM;
+
+ INIT_WORK(&nm->setup_work, ntb_msit_setup_work);
+ nm->ntb = ntb;
+
+ ntb_msit_create_dbgfs(nm);
+
+ ret = ntb_set_ctx(ntb, nm, &ntb_msit_ops);
+ if (ret)
+ goto remove_dbgfs;
+
+ if (!nm->isr_ctx)
+ goto remove_dbgfs;
+
+ ntb_link_enable(ntb, NTB_SPEED_AUTO, NTB_WIDTH_AUTO);
+
+ return 0;
+
+remove_dbgfs:
+ ntb_msit_remove_dbgfs(nm);
+ devm_kfree(&ntb->dev, nm->isr_ctx);
+ devm_kfree(&ntb->dev, nm);
+ return ret;
+}
+
+static void ntb_msit_remove(struct ntb_client *client, struct ntb_dev *ntb)
+{
+ struct ntb_msit_ctx *nm = ntb->ctx;
+ int i;
+
+ ntb_link_disable(ntb);
+ ntb_db_set_mask(ntb, ntb_db_valid_mask(ntb));
+ ntb_msi_clear_mws(ntb);
+
+ for (i = 0; i < ntb_peer_port_count(ntb); i++)
+ kfree(nm->peers[i].msi_desc);
+
+ ntb_clear_ctx(ntb);
+ ntb_msit_remove_dbgfs(nm);
+}
+
+static struct ntb_client ntb_msit_client = {
+ .ops = {
+ .probe = ntb_msit_probe,
+ .remove = ntb_msit_remove
+ }
+};
+
+static int __init ntb_msit_init(void)
+{
+ int ret;
+
+ if (debugfs_initialized())
+ ntb_msit_dbgfs_topdir = debugfs_create_dir(KBUILD_MODNAME,
+ NULL);
+
+ ret = ntb_register_client(&ntb_msit_client);
+ if (ret)
+ debugfs_remove_recursive(ntb_msit_dbgfs_topdir);
+
+ return ret;
+}
+module_init(ntb_msit_init);
+
+static void __exit ntb_msit_exit(void)
+{
+ ntb_unregister_client(&ntb_msit_client);
+ debugfs_remove_recursive(ntb_msit_dbgfs_topdir);
+}
+module_exit(ntb_msit_exit);
diff --git a/drivers/ntb/test/ntb_perf.c b/drivers/ntb/test/ntb_perf.c
index 11a6cd374004..d028331558ea 100644
--- a/drivers/ntb/test/ntb_perf.c
+++ b/drivers/ntb/test/ntb_perf.c
@@ -100,7 +100,7 @@ MODULE_DESCRIPTION("PCIe NTB Performance Measurement Tool");
#define DMA_TRIES 100
#define DMA_MDELAY 10
-#define MSG_TRIES 500
+#define MSG_TRIES 1000
#define MSG_UDELAY_LOW 1000
#define MSG_UDELAY_HIGH 2000
@@ -734,8 +734,6 @@ static void perf_disable_service(struct perf_ctx *perf)
{
int pidx;
- ntb_link_disable(perf->ntb);
-
if (perf->cmd_send == perf_msg_cmd_send) {
u64 inbits;
@@ -752,6 +750,16 @@ static void perf_disable_service(struct perf_ctx *perf)
for (pidx = 0; pidx < perf->pcnt; pidx++)
flush_work(&perf->peers[pidx].service);
+
+ for (pidx = 0; pidx < perf->pcnt; pidx++) {
+ struct perf_peer *peer = &perf->peers[pidx];
+
+ ntb_spad_write(perf->ntb, PERF_SPAD_CMD(peer->gidx), 0);
+ }
+
+ ntb_db_clear(perf->ntb, PERF_SPAD_NOTIFY(perf->gidx));
+
+ ntb_link_disable(perf->ntb);
}
/*==============================================================================
diff --git a/drivers/nvdimm/dax_devs.c b/drivers/nvdimm/dax_devs.c
index 49fc18ee0565..6d22b0f83b3b 100644
--- a/drivers/nvdimm/dax_devs.c
+++ b/drivers/nvdimm/dax_devs.c
@@ -118,7 +118,7 @@ int nd_dax_probe(struct device *dev, struct nd_namespace_common *ndns)
nvdimm_bus_unlock(&ndns->dev);
if (!dax_dev)
return -ENOMEM;
- pfn_sb = devm_kzalloc(dev, sizeof(*pfn_sb), GFP_KERNEL);
+ pfn_sb = devm_kmalloc(dev, sizeof(*pfn_sb), GFP_KERNEL);
nd_pfn->pfn_sb = pfn_sb;
rc = nd_pfn_validate(nd_pfn, DAX_SIG);
dev_dbg(dev, "dax: %s\n", rc == 0 ? dev_name(dax_dev) : "<none>");
diff --git a/drivers/nvdimm/pfn.h b/drivers/nvdimm/pfn.h
index f58b849e455b..7381673b7b70 100644
--- a/drivers/nvdimm/pfn.h
+++ b/drivers/nvdimm/pfn.h
@@ -28,22 +28,9 @@ struct nd_pfn_sb {
__le32 end_trunc;
/* minor-version-2 record the base alignment of the mapping */
__le32 align;
+ /* minor-version-3 guarantee the padding and flags are zero */
u8 padding[4000];
__le64 checksum;
};
-#ifdef CONFIG_SPARSEMEM
-#define PFN_SECTION_ALIGN_DOWN(x) SECTION_ALIGN_DOWN(x)
-#define PFN_SECTION_ALIGN_UP(x) SECTION_ALIGN_UP(x)
-#else
-/*
- * In this case ZONE_DEVICE=n and we will disable 'pfn' device support,
- * but we still want pmem to compile.
- */
-#define PFN_SECTION_ALIGN_DOWN(x) (x)
-#define PFN_SECTION_ALIGN_UP(x) (x)
-#endif
-
-#define PHYS_SECTION_ALIGN_DOWN(x) PFN_PHYS(PFN_SECTION_ALIGN_DOWN(PHYS_PFN(x)))
-#define PHYS_SECTION_ALIGN_UP(x) PFN_PHYS(PFN_SECTION_ALIGN_UP(PHYS_PFN(x)))
#endif /* __NVDIMM_PFN_H */
diff --git a/drivers/nvdimm/pfn_devs.c b/drivers/nvdimm/pfn_devs.c
index 55fb6b7433ed..df2bdbd22450 100644
--- a/drivers/nvdimm/pfn_devs.c
+++ b/drivers/nvdimm/pfn_devs.c
@@ -412,6 +412,15 @@ static int nd_pfn_clear_memmap_errors(struct nd_pfn *nd_pfn)
return 0;
}
+/**
+ * nd_pfn_validate - read and validate info-block
+ * @nd_pfn: fsdax namespace runtime state / properties
+ * @sig: 'devdax' or 'fsdax' signature
+ *
+ * Upon return the info-block buffer contents (->pfn_sb) are
+ * indeterminate when validation fails, and a coherent info-block
+ * otherwise.
+ */
int nd_pfn_validate(struct nd_pfn *nd_pfn, const char *sig)
{
u64 checksum, offset;
@@ -557,7 +566,7 @@ int nd_pfn_probe(struct device *dev, struct nd_namespace_common *ndns)
nvdimm_bus_unlock(&ndns->dev);
if (!pfn_dev)
return -ENOMEM;
- pfn_sb = devm_kzalloc(dev, sizeof(*pfn_sb), GFP_KERNEL);
+ pfn_sb = devm_kmalloc(dev, sizeof(*pfn_sb), GFP_KERNEL);
nd_pfn = to_nd_pfn(pfn_dev);
nd_pfn->pfn_sb = pfn_sb;
rc = nd_pfn_validate(nd_pfn, PFN_SIG);
@@ -578,14 +587,14 @@ static u32 info_block_reserve(void)
}
/*
- * We hotplug memory at section granularity, pad the reserved area from
- * the previous section base to the namespace base address.
+ * We hotplug memory at sub-section granularity, pad the reserved area
+ * from the previous section base to the namespace base address.
*/
static unsigned long init_altmap_base(resource_size_t base)
{
unsigned long base_pfn = PHYS_PFN(base);
- return PFN_SECTION_ALIGN_DOWN(base_pfn);
+ return SUBSECTION_ALIGN_DOWN(base_pfn);
}
static unsigned long init_altmap_reserve(resource_size_t base)
@@ -593,7 +602,7 @@ static unsigned long init_altmap_reserve(resource_size_t base)
unsigned long reserve = info_block_reserve() >> PAGE_SHIFT;
unsigned long base_pfn = PHYS_PFN(base);
- reserve += base_pfn - PFN_SECTION_ALIGN_DOWN(base_pfn);
+ reserve += base_pfn - SUBSECTION_ALIGN_DOWN(base_pfn);
return reserve;
}
@@ -623,8 +632,7 @@ static int __nvdimm_setup_pfn(struct nd_pfn *nd_pfn, struct dev_pagemap *pgmap)
return -EINVAL;
nd_pfn->npfns = le64_to_cpu(pfn_sb->npfns);
} else if (nd_pfn->mode == PFN_MODE_PMEM) {
- nd_pfn->npfns = PFN_SECTION_ALIGN_UP((resource_size(res)
- - offset) / PAGE_SIZE);
+ nd_pfn->npfns = PHYS_PFN((resource_size(res) - offset));
if (le64_to_cpu(nd_pfn->pfn_sb->npfns) > nd_pfn->npfns)
dev_info(&nd_pfn->dev,
"number of pfns truncated from %lld to %ld\n",
@@ -640,60 +648,20 @@ static int __nvdimm_setup_pfn(struct nd_pfn *nd_pfn, struct dev_pagemap *pgmap)
return 0;
}
-static u64 phys_pmem_align_down(struct nd_pfn *nd_pfn, u64 phys)
-{
- return min_t(u64, PHYS_SECTION_ALIGN_DOWN(phys),
- ALIGN_DOWN(phys, nd_pfn->align));
-}
-
-/*
- * Check if pmem collides with 'System RAM', or other regions when
- * section aligned. Trim it accordingly.
- */
-static void trim_pfn_device(struct nd_pfn *nd_pfn, u32 *start_pad, u32 *end_trunc)
-{
- struct nd_namespace_common *ndns = nd_pfn->ndns;
- struct nd_namespace_io *nsio = to_nd_namespace_io(&ndns->dev);
- struct nd_region *nd_region = to_nd_region(nd_pfn->dev.parent);
- const resource_size_t start = nsio->res.start;
- const resource_size_t end = start + resource_size(&nsio->res);
- resource_size_t adjust, size;
-
- *start_pad = 0;
- *end_trunc = 0;
-
- adjust = start - PHYS_SECTION_ALIGN_DOWN(start);
- size = resource_size(&nsio->res) + adjust;
- if (region_intersects(start - adjust, size, IORESOURCE_SYSTEM_RAM,
- IORES_DESC_NONE) == REGION_MIXED
- || nd_region_conflict(nd_region, start - adjust, size))
- *start_pad = PHYS_SECTION_ALIGN_UP(start) - start;
-
- /* Now check that end of the range does not collide. */
- adjust = PHYS_SECTION_ALIGN_UP(end) - end;
- size = resource_size(&nsio->res) + adjust;
- if (region_intersects(start, size, IORESOURCE_SYSTEM_RAM,
- IORES_DESC_NONE) == REGION_MIXED
- || !IS_ALIGNED(end, nd_pfn->align)
- || nd_region_conflict(nd_region, start, size))
- *end_trunc = end - phys_pmem_align_down(nd_pfn, end);
-}
-
static int nd_pfn_init(struct nd_pfn *nd_pfn)
{
struct nd_namespace_common *ndns = nd_pfn->ndns;
struct nd_namespace_io *nsio = to_nd_namespace_io(&ndns->dev);
- u32 start_pad, end_trunc, reserve = info_block_reserve();
resource_size_t start, size;
struct nd_region *nd_region;
+ unsigned long npfns, align;
struct nd_pfn_sb *pfn_sb;
- unsigned long npfns;
phys_addr_t offset;
const char *sig;
u64 checksum;
int rc;
- pfn_sb = devm_kzalloc(&nd_pfn->dev, sizeof(*pfn_sb), GFP_KERNEL);
+ pfn_sb = devm_kmalloc(&nd_pfn->dev, sizeof(*pfn_sb), GFP_KERNEL);
if (!pfn_sb)
return -ENOMEM;
@@ -702,11 +670,14 @@ static int nd_pfn_init(struct nd_pfn *nd_pfn)
sig = DAX_SIG;
else
sig = PFN_SIG;
+
rc = nd_pfn_validate(nd_pfn, sig);
if (rc != -ENODEV)
return rc;
/* no info block, do init */;
+ memset(pfn_sb, 0, sizeof(*pfn_sb));
+
nd_region = to_nd_region(nd_pfn->dev.parent);
if (nd_region->ro) {
dev_info(&nd_pfn->dev,
@@ -715,43 +686,35 @@ static int nd_pfn_init(struct nd_pfn *nd_pfn)
return -ENXIO;
}
- memset(pfn_sb, 0, sizeof(*pfn_sb));
-
- trim_pfn_device(nd_pfn, &start_pad, &end_trunc);
- if (start_pad + end_trunc)
- dev_info(&nd_pfn->dev, "%s alignment collision, truncate %d bytes\n",
- dev_name(&ndns->dev), start_pad + end_trunc);
-
/*
* Note, we use 64 here for the standard size of struct page,
* debugging options may cause it to be larger in which case the
* implementation will limit the pfns advertised through
* ->direct_access() to those that are included in the memmap.
*/
- start = nsio->res.start + start_pad;
+ start = nsio->res.start;
size = resource_size(&nsio->res);
- npfns = PFN_SECTION_ALIGN_UP((size - start_pad - end_trunc - reserve)
- / PAGE_SIZE);
+ npfns = PHYS_PFN(size - SZ_8K);
+ align = max(nd_pfn->align, (1UL << SUBSECTION_SHIFT));
if (nd_pfn->mode == PFN_MODE_PMEM) {
/*
* The altmap should be padded out to the block size used
* when populating the vmemmap. This *should* be equal to
* PMD_SIZE for most architectures.
*/
- offset = ALIGN(start + reserve + 64 * npfns,
- max(nd_pfn->align, PMD_SIZE)) - start;
+ offset = ALIGN(start + SZ_8K + 64 * npfns, align) - start;
} else if (nd_pfn->mode == PFN_MODE_RAM)
- offset = ALIGN(start + reserve, nd_pfn->align) - start;
+ offset = ALIGN(start + SZ_8K, align) - start;
else
return -ENXIO;
- if (offset + start_pad + end_trunc >= size) {
+ if (offset >= size) {
dev_err(&nd_pfn->dev, "%s unable to satisfy requested alignment\n",
dev_name(&ndns->dev));
return -ENXIO;
}
- npfns = (size - offset - start_pad - end_trunc) / SZ_4K;
+ npfns = PHYS_PFN(size - offset);
pfn_sb->mode = cpu_to_le32(nd_pfn->mode);
pfn_sb->dataoff = cpu_to_le64(offset);
pfn_sb->npfns = cpu_to_le64(npfns);
@@ -759,9 +722,7 @@ static int nd_pfn_init(struct nd_pfn *nd_pfn)
memcpy(pfn_sb->uuid, nd_pfn->uuid, 16);
memcpy(pfn_sb->parent_uuid, nd_dev_to_uuid(&ndns->dev), 16);
pfn_sb->version_major = cpu_to_le16(1);
- pfn_sb->version_minor = cpu_to_le16(2);
- pfn_sb->start_pad = cpu_to_le32(start_pad);
- pfn_sb->end_trunc = cpu_to_le32(end_trunc);
+ pfn_sb->version_minor = cpu_to_le16(3);
pfn_sb->align = cpu_to_le32(nd_pfn->align);
checksum = nd_sb_checksum((struct nd_gen_sb *) pfn_sb);
pfn_sb->checksum = cpu_to_le64(checksum);
diff --git a/drivers/oprofile/oprofilefs.c b/drivers/oprofile/oprofilefs.c
index 4ea08979312c..0875f2f122b3 100644
--- a/drivers/oprofile/oprofilefs.c
+++ b/drivers/oprofile/oprofilefs.c
@@ -14,6 +14,7 @@
#include <linux/module.h>
#include <linux/oprofile.h>
#include <linux/fs.h>
+#include <linux/fs_context.h>
#include <linux/pagemap.h>
#include <linux/uaccess.h>
@@ -238,7 +239,7 @@ struct dentry *oprofilefs_mkdir(struct dentry *parent, char const *name)
}
-static int oprofilefs_fill_super(struct super_block *sb, void *data, int silent)
+static int oprofilefs_fill_super(struct super_block *sb, struct fs_context *fc)
{
struct inode *root_inode;
@@ -263,18 +264,25 @@ static int oprofilefs_fill_super(struct super_block *sb, void *data, int silent)
return 0;
}
-
-static struct dentry *oprofilefs_mount(struct file_system_type *fs_type,
- int flags, const char *dev_name, void *data)
+static int oprofilefs_get_tree(struct fs_context *fc)
{
- return mount_single(fs_type, flags, data, oprofilefs_fill_super);
+ return get_tree_single(fc, oprofilefs_fill_super);
}
+static const struct fs_context_operations oprofilefs_context_ops = {
+ .get_tree = oprofilefs_get_tree,
+};
+
+static int oprofilefs_init_fs_context(struct fs_context *fc)
+{
+ fc->ops = &oprofilefs_context_ops;
+ return 0;
+}
static struct file_system_type oprofilefs_type = {
.owner = THIS_MODULE,
.name = "oprofilefs",
- .mount = oprofilefs_mount,
+ .init_fs_context = oprofilefs_init_fs_context,
.kill_sb = kill_litter_super,
};
MODULE_ALIAS_FS("oprofilefs");
diff --git a/drivers/pci/msi.c b/drivers/pci/msi.c
index 59a6d232f77a..0884bedcfc7a 100644
--- a/drivers/pci/msi.c
+++ b/drivers/pci/msi.c
@@ -192,6 +192,9 @@ static void msi_mask_irq(struct msi_desc *desc, u32 mask, u32 flag)
static void __iomem *pci_msix_desc_addr(struct msi_desc *desc)
{
+ if (desc->msi_attrib.is_virtual)
+ return NULL;
+
return desc->mask_base +
desc->msi_attrib.entry_nr * PCI_MSIX_ENTRY_SIZE;
}
@@ -206,14 +209,19 @@ static void __iomem *pci_msix_desc_addr(struct msi_desc *desc)
u32 __pci_msix_desc_mask_irq(struct msi_desc *desc, u32 flag)
{
u32 mask_bits = desc->masked;
+ void __iomem *desc_addr;
if (pci_msi_ignore_mask)
return 0;
+ desc_addr = pci_msix_desc_addr(desc);
+ if (!desc_addr)
+ return 0;
mask_bits &= ~PCI_MSIX_ENTRY_CTRL_MASKBIT;
if (flag)
mask_bits |= PCI_MSIX_ENTRY_CTRL_MASKBIT;
- writel(mask_bits, pci_msix_desc_addr(desc) + PCI_MSIX_ENTRY_VECTOR_CTRL);
+
+ writel(mask_bits, desc_addr + PCI_MSIX_ENTRY_VECTOR_CTRL);
return mask_bits;
}
@@ -273,6 +281,11 @@ void __pci_read_msi_msg(struct msi_desc *entry, struct msi_msg *msg)
if (entry->msi_attrib.is_msix) {
void __iomem *base = pci_msix_desc_addr(entry);
+ if (!base) {
+ WARN_ON(1);
+ return;
+ }
+
msg->address_lo = readl(base + PCI_MSIX_ENTRY_LOWER_ADDR);
msg->address_hi = readl(base + PCI_MSIX_ENTRY_UPPER_ADDR);
msg->data = readl(base + PCI_MSIX_ENTRY_DATA);
@@ -303,6 +316,9 @@ void __pci_write_msi_msg(struct msi_desc *entry, struct msi_msg *msg)
} else if (entry->msi_attrib.is_msix) {
void __iomem *base = pci_msix_desc_addr(entry);
+ if (!base)
+ goto skip;
+
writel(msg->address_lo, base + PCI_MSIX_ENTRY_LOWER_ADDR);
writel(msg->address_hi, base + PCI_MSIX_ENTRY_UPPER_ADDR);
writel(msg->data, base + PCI_MSIX_ENTRY_DATA);
@@ -327,7 +343,13 @@ void __pci_write_msi_msg(struct msi_desc *entry, struct msi_msg *msg)
msg->data);
}
}
+
+skip:
entry->msg = *msg;
+
+ if (entry->write_msi_msg)
+ entry->write_msi_msg(entry, entry->write_msi_msg_data);
+
}
void pci_write_msi_msg(unsigned int irq, struct msi_msg *msg)
@@ -550,6 +572,7 @@ msi_setup_entry(struct pci_dev *dev, int nvec, struct irq_affinity *affd)
entry->msi_attrib.is_msix = 0;
entry->msi_attrib.is_64 = !!(control & PCI_MSI_FLAGS_64BIT);
+ entry->msi_attrib.is_virtual = 0;
entry->msi_attrib.entry_nr = 0;
entry->msi_attrib.maskbit = !!(control & PCI_MSI_FLAGS_MASKBIT);
entry->msi_attrib.default_irq = dev->irq; /* Save IOAPIC IRQ */
@@ -674,6 +697,7 @@ static int msix_setup_entries(struct pci_dev *dev, void __iomem *base,
struct irq_affinity_desc *curmsk, *masks = NULL;
struct msi_desc *entry;
int ret, i;
+ int vec_count = pci_msix_vec_count(dev);
if (affd)
masks = irq_create_affinity_masks(nvec, affd);
@@ -696,6 +720,10 @@ static int msix_setup_entries(struct pci_dev *dev, void __iomem *base,
entry->msi_attrib.entry_nr = entries[i].entry;
else
entry->msi_attrib.entry_nr = i;
+
+ entry->msi_attrib.is_virtual =
+ entry->msi_attrib.entry_nr >= vec_count;
+
entry->msi_attrib.default_irq = dev->irq;
entry->mask_base = base;
@@ -714,12 +742,19 @@ static void msix_program_entries(struct pci_dev *dev,
{
struct msi_desc *entry;
int i = 0;
+ void __iomem *desc_addr;
for_each_pci_msi_entry(entry, dev) {
if (entries)
entries[i++].vector = entry->irq;
- entry->masked = readl(pci_msix_desc_addr(entry) +
- PCI_MSIX_ENTRY_VECTOR_CTRL);
+
+ desc_addr = pci_msix_desc_addr(entry);
+ if (desc_addr)
+ entry->masked = readl(desc_addr +
+ PCI_MSIX_ENTRY_VECTOR_CTRL);
+ else
+ entry->masked = 0;
+
msix_mask_irq(entry, 1);
}
}
@@ -932,7 +967,7 @@ int pci_msix_vec_count(struct pci_dev *dev)
EXPORT_SYMBOL(pci_msix_vec_count);
static int __pci_enable_msix(struct pci_dev *dev, struct msix_entry *entries,
- int nvec, struct irq_affinity *affd)
+ int nvec, struct irq_affinity *affd, int flags)
{
int nr_entries;
int i, j;
@@ -943,7 +978,7 @@ static int __pci_enable_msix(struct pci_dev *dev, struct msix_entry *entries,
nr_entries = pci_msix_vec_count(dev);
if (nr_entries < 0)
return nr_entries;
- if (nvec > nr_entries)
+ if (nvec > nr_entries && !(flags & PCI_IRQ_VIRTUAL))
return nr_entries;
if (entries) {
@@ -1079,7 +1114,8 @@ EXPORT_SYMBOL(pci_enable_msi);
static int __pci_enable_msix_range(struct pci_dev *dev,
struct msix_entry *entries, int minvec,
- int maxvec, struct irq_affinity *affd)
+ int maxvec, struct irq_affinity *affd,
+ int flags)
{
int rc, nvec = maxvec;
@@ -1096,7 +1132,7 @@ static int __pci_enable_msix_range(struct pci_dev *dev,
return -ENOSPC;
}
- rc = __pci_enable_msix(dev, entries, nvec, affd);
+ rc = __pci_enable_msix(dev, entries, nvec, affd, flags);
if (rc == 0)
return nvec;
@@ -1127,7 +1163,7 @@ static int __pci_enable_msix_range(struct pci_dev *dev,
int pci_enable_msix_range(struct pci_dev *dev, struct msix_entry *entries,
int minvec, int maxvec)
{
- return __pci_enable_msix_range(dev, entries, minvec, maxvec, NULL);
+ return __pci_enable_msix_range(dev, entries, minvec, maxvec, NULL, 0);
}
EXPORT_SYMBOL(pci_enable_msix_range);
@@ -1167,7 +1203,7 @@ int pci_alloc_irq_vectors_affinity(struct pci_dev *dev, unsigned int min_vecs,
if (flags & PCI_IRQ_MSIX) {
msix_vecs = __pci_enable_msix_range(dev, NULL, min_vecs,
- max_vecs, affd);
+ max_vecs, affd, flags);
if (msix_vecs > 0)
return msix_vecs;
}
diff --git a/drivers/pci/switch/switchtec.c b/drivers/pci/switch/switchtec.c
index bebbde4ebec0..8c94cd3fd1f2 100644
--- a/drivers/pci/switch/switchtec.c
+++ b/drivers/pci/switch/switchtec.c
@@ -30,6 +30,10 @@ module_param(use_dma_mrpc, bool, 0644);
MODULE_PARM_DESC(use_dma_mrpc,
"Enable the use of the DMA MRPC feature");
+static int nirqs = 32;
+module_param(nirqs, int, 0644);
+MODULE_PARM_DESC(nirqs, "number of interrupts to allocate (more may be useful for NTB applications)");
+
static dev_t switchtec_devt;
static DEFINE_IDA(switchtec_minor_ida);
@@ -1263,8 +1267,12 @@ static int switchtec_init_isr(struct switchtec_dev *stdev)
int dma_mrpc_irq;
int rc;
- nvecs = pci_alloc_irq_vectors(stdev->pdev, 1, 4,
- PCI_IRQ_MSIX | PCI_IRQ_MSI);
+ if (nirqs < 4)
+ nirqs = 4;
+
+ nvecs = pci_alloc_irq_vectors(stdev->pdev, 1, nirqs,
+ PCI_IRQ_MSIX | PCI_IRQ_MSI |
+ PCI_IRQ_VIRTUAL);
if (nvecs < 0)
return nvecs;
diff --git a/drivers/reset/Kconfig b/drivers/reset/Kconfig
index d506d32385fc..21efb7d39d62 100644
--- a/drivers/reset/Kconfig
+++ b/drivers/reset/Kconfig
@@ -118,7 +118,7 @@ config RESET_QCOM_PDC
config RESET_SIMPLE
bool "Simple Reset Controller Driver" if COMPILE_TEST
- default ARCH_STM32 || ARCH_STRATIX10 || ARCH_SUNXI || ARCH_ZX || ARCH_ASPEED
+ default ARCH_STM32 || ARCH_STRATIX10 || ARCH_SUNXI || ARCH_ZX || ARCH_ASPEED || ARCH_BITMAIN
help
This enables a simple reset controller driver for reset lines that
that can be asserted and deasserted by toggling bits in a contiguous,
@@ -130,6 +130,7 @@ config RESET_SIMPLE
- RCC reset controller in STM32 MCUs
- Allwinner SoCs
- ZTE's zx2967 family
+ - Bitmain BM1880 SoC
config RESET_STM32MP157
bool "STM32MP157 Reset Driver" if COMPILE_TEST
diff --git a/drivers/reset/core.c b/drivers/reset/core.c
index 21b9bd5692e1..213ff40dda11 100644
--- a/drivers/reset/core.c
+++ b/drivers/reset/core.c
@@ -690,9 +690,6 @@ __reset_control_get_from_lookup(struct device *dev, const char *con_id,
const char *dev_id = dev_name(dev);
struct reset_control *rstc = NULL;
- if (!dev)
- return ERR_PTR(-EINVAL);
-
mutex_lock(&reset_lookup_mutex);
list_for_each_entry(lookup, &reset_lookup_list, list) {
diff --git a/drivers/reset/reset-simple.c b/drivers/reset/reset-simple.c
index 7e48b9c05ecd..1154f7b1f4dd 100644
--- a/drivers/reset/reset-simple.c
+++ b/drivers/reset/reset-simple.c
@@ -125,6 +125,8 @@ static const struct of_device_id reset_simple_dt_ids[] = {
.data = &reset_simple_active_low },
{ .compatible = "aspeed,ast2400-lpc-reset" },
{ .compatible = "aspeed,ast2500-lpc-reset" },
+ { .compatible = "bitmain,bm1880-reset",
+ .data = &reset_simple_active_low },
{ /* sentinel */ },
};
diff --git a/drivers/s390/scsi/zfcp_erp.c b/drivers/s390/scsi/zfcp_erp.c
index e8fc28dba8df..96f0d34e9459 100644
--- a/drivers/s390/scsi/zfcp_erp.c
+++ b/drivers/s390/scsi/zfcp_erp.c
@@ -11,6 +11,7 @@
#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
#include <linux/kthread.h>
+#include <linux/bug.h>
#include "zfcp_ext.h"
#include "zfcp_reqlist.h"
@@ -217,6 +218,12 @@ static struct zfcp_erp_action *zfcp_erp_setup_act(enum zfcp_erp_act_type need,
struct zfcp_erp_action *erp_action;
struct zfcp_scsi_dev *zfcp_sdev;
+ if (WARN_ON_ONCE(need != ZFCP_ERP_ACTION_REOPEN_LUN &&
+ need != ZFCP_ERP_ACTION_REOPEN_PORT &&
+ need != ZFCP_ERP_ACTION_REOPEN_PORT_FORCED &&
+ need != ZFCP_ERP_ACTION_REOPEN_ADAPTER))
+ return NULL;
+
switch (need) {
case ZFCP_ERP_ACTION_REOPEN_LUN:
zfcp_sdev = sdev_to_zfcp(sdev);
diff --git a/drivers/s390/scsi/zfcp_fsf.c b/drivers/s390/scsi/zfcp_fsf.c
index d94496ee6883..296bbc3c4606 100644
--- a/drivers/s390/scsi/zfcp_fsf.c
+++ b/drivers/s390/scsi/zfcp_fsf.c
@@ -11,6 +11,7 @@
#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
#include <linux/blktrace_api.h>
+#include <linux/types.h>
#include <linux/slab.h>
#include <scsi/fc/fc_els.h>
#include "zfcp_ext.h"
@@ -741,6 +742,7 @@ static struct zfcp_fsf_req *zfcp_fsf_req_create(struct zfcp_qdio *qdio,
static int zfcp_fsf_req_send(struct zfcp_fsf_req *req)
{
+ const bool is_srb = zfcp_fsf_req_is_status_read_buffer(req);
struct zfcp_adapter *adapter = req->adapter;
struct zfcp_qdio *qdio = adapter->qdio;
int req_id = req->req_id;
@@ -757,8 +759,20 @@ static int zfcp_fsf_req_send(struct zfcp_fsf_req *req)
return -EIO;
}
+ /*
+ * NOTE: DO NOT TOUCH ASYNC req PAST THIS POINT.
+ * ONLY TOUCH SYNC req AGAIN ON req->completion.
+ *
+ * The request might complete and be freed concurrently at any point
+ * now. This is not protected by the QDIO-lock (req_q_lock). So any
+ * uncontrolled access after this might result in an use-after-free bug.
+ * Only if the request doesn't have ZFCP_STATUS_FSFREQ_CLEANUP set, and
+ * when it is completed via req->completion, is it safe to use req
+ * again.
+ */
+
/* Don't increase for unsolicited status */
- if (!zfcp_fsf_req_is_status_read_buffer(req))
+ if (!is_srb)
adapter->fsf_req_seq_no++;
adapter->req_no++;
@@ -805,6 +819,7 @@ int zfcp_fsf_status_read(struct zfcp_qdio *qdio)
retval = zfcp_fsf_req_send(req);
if (retval)
goto failed_req_send;
+ /* NOTE: DO NOT TOUCH req PAST THIS POINT! */
goto out;
@@ -914,8 +929,10 @@ struct zfcp_fsf_req *zfcp_fsf_abort_fcp_cmnd(struct scsi_cmnd *scmnd)
req->qtcb->bottom.support.req_handle = (u64) old_req_id;
zfcp_fsf_start_timer(req, ZFCP_FSF_SCSI_ER_TIMEOUT);
- if (!zfcp_fsf_req_send(req))
+ if (!zfcp_fsf_req_send(req)) {
+ /* NOTE: DO NOT TOUCH req, UNTIL IT COMPLETES! */
goto out;
+ }
out_error_free:
zfcp_fsf_req_free(req);
@@ -1098,6 +1115,7 @@ int zfcp_fsf_send_ct(struct zfcp_fc_wka_port *wka_port,
ret = zfcp_fsf_req_send(req);
if (ret)
goto failed_send;
+ /* NOTE: DO NOT TOUCH req PAST THIS POINT! */
goto out;
@@ -1198,6 +1216,7 @@ int zfcp_fsf_send_els(struct zfcp_adapter *adapter, u32 d_id,
ret = zfcp_fsf_req_send(req);
if (ret)
goto failed_send;
+ /* NOTE: DO NOT TOUCH req PAST THIS POINT! */
goto out;
@@ -1243,6 +1262,7 @@ int zfcp_fsf_exchange_config_data(struct zfcp_erp_action *erp_action)
zfcp_fsf_req_free(req);
erp_action->fsf_req_id = 0;
}
+ /* NOTE: DO NOT TOUCH req PAST THIS POINT! */
out:
spin_unlock_irq(&qdio->req_q_lock);
return retval;
@@ -1279,8 +1299,10 @@ int zfcp_fsf_exchange_config_data_sync(struct zfcp_qdio *qdio,
zfcp_fsf_start_timer(req, ZFCP_FSF_REQUEST_TIMEOUT);
retval = zfcp_fsf_req_send(req);
spin_unlock_irq(&qdio->req_q_lock);
- if (!retval)
+ if (!retval) {
+ /* NOTE: ONLY TOUCH SYNC req AGAIN ON req->completion. */
wait_for_completion(&req->completion);
+ }
zfcp_fsf_req_free(req);
return retval;
@@ -1330,6 +1352,7 @@ int zfcp_fsf_exchange_port_data(struct zfcp_erp_action *erp_action)
zfcp_fsf_req_free(req);
erp_action->fsf_req_id = 0;
}
+ /* NOTE: DO NOT TOUCH req PAST THIS POINT! */
out:
spin_unlock_irq(&qdio->req_q_lock);
return retval;
@@ -1372,8 +1395,10 @@ int zfcp_fsf_exchange_port_data_sync(struct zfcp_qdio *qdio,
retval = zfcp_fsf_req_send(req);
spin_unlock_irq(&qdio->req_q_lock);
- if (!retval)
+ if (!retval) {
+ /* NOTE: ONLY TOUCH SYNC req AGAIN ON req->completion. */
wait_for_completion(&req->completion);
+ }
zfcp_fsf_req_free(req);
@@ -1493,6 +1518,7 @@ int zfcp_fsf_open_port(struct zfcp_erp_action *erp_action)
erp_action->fsf_req_id = 0;
put_device(&port->dev);
}
+ /* NOTE: DO NOT TOUCH req PAST THIS POINT! */
out:
spin_unlock_irq(&qdio->req_q_lock);
return retval;
@@ -1557,6 +1583,7 @@ int zfcp_fsf_close_port(struct zfcp_erp_action *erp_action)
zfcp_fsf_req_free(req);
erp_action->fsf_req_id = 0;
}
+ /* NOTE: DO NOT TOUCH req PAST THIS POINT! */
out:
spin_unlock_irq(&qdio->req_q_lock);
return retval;
@@ -1600,6 +1627,7 @@ int zfcp_fsf_open_wka_port(struct zfcp_fc_wka_port *wka_port)
{
struct zfcp_qdio *qdio = wka_port->adapter->qdio;
struct zfcp_fsf_req *req;
+ unsigned long req_id = 0;
int retval = -EIO;
spin_lock_irq(&qdio->req_q_lock);
@@ -1622,14 +1650,17 @@ int zfcp_fsf_open_wka_port(struct zfcp_fc_wka_port *wka_port)
hton24(req->qtcb->bottom.support.d_id, wka_port->d_id);
req->data = wka_port;
+ req_id = req->req_id;
+
zfcp_fsf_start_timer(req, ZFCP_FSF_REQUEST_TIMEOUT);
retval = zfcp_fsf_req_send(req);
if (retval)
zfcp_fsf_req_free(req);
+ /* NOTE: DO NOT TOUCH req PAST THIS POINT! */
out:
spin_unlock_irq(&qdio->req_q_lock);
if (!retval)
- zfcp_dbf_rec_run_wka("fsowp_1", wka_port, req->req_id);
+ zfcp_dbf_rec_run_wka("fsowp_1", wka_port, req_id);
return retval;
}
@@ -1655,6 +1686,7 @@ int zfcp_fsf_close_wka_port(struct zfcp_fc_wka_port *wka_port)
{
struct zfcp_qdio *qdio = wka_port->adapter->qdio;
struct zfcp_fsf_req *req;
+ unsigned long req_id = 0;
int retval = -EIO;
spin_lock_irq(&qdio->req_q_lock);
@@ -1677,14 +1709,17 @@ int zfcp_fsf_close_wka_port(struct zfcp_fc_wka_port *wka_port)
req->data = wka_port;
req->qtcb->header.port_handle = wka_port->handle;
+ req_id = req->req_id;
+
zfcp_fsf_start_timer(req, ZFCP_FSF_REQUEST_TIMEOUT);
retval = zfcp_fsf_req_send(req);
if (retval)
zfcp_fsf_req_free(req);
+ /* NOTE: DO NOT TOUCH req PAST THIS POINT! */
out:
spin_unlock_irq(&qdio->req_q_lock);
if (!retval)
- zfcp_dbf_rec_run_wka("fscwp_1", wka_port, req->req_id);
+ zfcp_dbf_rec_run_wka("fscwp_1", wka_port, req_id);
return retval;
}
@@ -1776,6 +1811,7 @@ int zfcp_fsf_close_physical_port(struct zfcp_erp_action *erp_action)
zfcp_fsf_req_free(req);
erp_action->fsf_req_id = 0;
}
+ /* NOTE: DO NOT TOUCH req PAST THIS POINT! */
out:
spin_unlock_irq(&qdio->req_q_lock);
return retval;
@@ -1899,6 +1935,7 @@ int zfcp_fsf_open_lun(struct zfcp_erp_action *erp_action)
zfcp_fsf_req_free(req);
erp_action->fsf_req_id = 0;
}
+ /* NOTE: DO NOT TOUCH req PAST THIS POINT! */
out:
spin_unlock_irq(&qdio->req_q_lock);
return retval;
@@ -1987,6 +2024,7 @@ int zfcp_fsf_close_lun(struct zfcp_erp_action *erp_action)
zfcp_fsf_req_free(req);
erp_action->fsf_req_id = 0;
}
+ /* NOTE: DO NOT TOUCH req PAST THIS POINT! */
out:
spin_unlock_irq(&qdio->req_q_lock);
return retval;
@@ -2299,6 +2337,7 @@ int zfcp_fsf_fcp_cmnd(struct scsi_cmnd *scsi_cmnd)
retval = zfcp_fsf_req_send(req);
if (unlikely(retval))
goto failed_scsi_cmnd;
+ /* NOTE: DO NOT TOUCH req PAST THIS POINT! */
goto out;
@@ -2373,8 +2412,10 @@ struct zfcp_fsf_req *zfcp_fsf_fcp_task_mgmt(struct scsi_device *sdev,
zfcp_fc_fcp_tm(fcp_cmnd, sdev, tm_flags);
zfcp_fsf_start_timer(req, ZFCP_FSF_SCSI_ER_TIMEOUT);
- if (!zfcp_fsf_req_send(req))
+ if (!zfcp_fsf_req_send(req)) {
+ /* NOTE: DO NOT TOUCH req, UNTIL IT COMPLETES! */
goto out;
+ }
zfcp_fsf_req_free(req);
req = NULL;
diff --git a/drivers/scsi/Makefile b/drivers/scsi/Makefile
index aeda53901064..c00e3dd57990 100644
--- a/drivers/scsi/Makefile
+++ b/drivers/scsi/Makefile
@@ -185,7 +185,7 @@ zalon7xx-objs := zalon.o ncr53c8xx.o
# Files generated that shall be removed upon make clean
clean-files := 53c700_d.h 53c700_u.h scsi_devinfo_tbl.c
-$(obj)/53c700.o $(MODVERDIR)/$(obj)/53c700.ver: $(obj)/53c700_d.h
+$(obj)/53c700.o: $(obj)/53c700_d.h
$(obj)/scsi_sysfs.o: $(obj)/scsi_devinfo_tbl.c
diff --git a/drivers/scsi/cxlflash/ocxl_hw.c b/drivers/scsi/cxlflash/ocxl_hw.c
index 96740c6fcd92..7018cd802569 100644
--- a/drivers/scsi/cxlflash/ocxl_hw.c
+++ b/drivers/scsi/cxlflash/ocxl_hw.c
@@ -12,6 +12,7 @@
#include <linux/idr.h>
#include <linux/module.h>
#include <linux/mount.h>
+#include <linux/pseudo_fs.h>
#include <linux/poll.h>
#include <linux/sched/signal.h>
@@ -31,31 +32,15 @@
static int ocxlflash_fs_cnt;
static struct vfsmount *ocxlflash_vfs_mount;
-static const struct dentry_operations ocxlflash_fs_dops = {
- .d_dname = simple_dname,
-};
-
-/*
- * ocxlflash_fs_mount() - mount the pseudo-filesystem
- * @fs_type: File system type.
- * @flags: Flags for the filesystem.
- * @dev_name: Device name associated with the filesystem.
- * @data: Data pointer.
- *
- * Return: pointer to the directory entry structure
- */
-static struct dentry *ocxlflash_fs_mount(struct file_system_type *fs_type,
- int flags, const char *dev_name,
- void *data)
+static int ocxlflash_fs_init_fs_context(struct fs_context *fc)
{
- return mount_pseudo(fs_type, "ocxlflash:", NULL, &ocxlflash_fs_dops,
- OCXLFLASH_FS_MAGIC);
+ return init_pseudo(fc, OCXLFLASH_FS_MAGIC) ? 0 : -ENOMEM;
}
static struct file_system_type ocxlflash_fs_type = {
.name = "ocxlflash",
.owner = THIS_MODULE,
- .mount = ocxlflash_fs_mount,
+ .init_fs_context = ocxlflash_fs_init_fs_context,
.kill_sb = kill_anon_super,
};
diff --git a/drivers/scsi/hosts.c b/drivers/scsi/hosts.c
index ff0d8c6a8d0c..55522b7162d3 100644
--- a/drivers/scsi/hosts.c
+++ b/drivers/scsi/hosts.c
@@ -462,6 +462,9 @@ struct Scsi_Host *scsi_host_alloc(struct scsi_host_template *sht, int privsize)
else
shost->dma_boundary = 0xffffffff;
+ if (sht->virt_boundary_mask)
+ shost->virt_boundary_mask = sht->virt_boundary_mask;
+
device_initialize(&shost->shost_gendev);
dev_set_name(&shost->shost_gendev, "host%d", shost->host_no);
shost->shost_gendev.bus = &scsi_bus_type;
diff --git a/drivers/scsi/libfc/fc_exch.c b/drivers/scsi/libfc/fc_exch.c
index 8e1053bdd843..52e866659853 100644
--- a/drivers/scsi/libfc/fc_exch.c
+++ b/drivers/scsi/libfc/fc_exch.c
@@ -2591,7 +2591,7 @@ void fc_exch_recv(struct fc_lport *lport, struct fc_frame *fp)
/* lport lock ? */
if (!lport || lport->state == LPORT_ST_DISABLED) {
- FC_LPORT_DBG(lport, "Receiving frames for an lport that "
+ FC_LIBFC_DBG("Receiving frames for an lport that "
"has not been initialized correctly\n");
fc_frame_free(fp);
return;
diff --git a/drivers/scsi/libsas/sas_scsi_host.c b/drivers/scsi/libsas/sas_scsi_host.c
index 4f339f939a51..bec83eb8ab87 100644
--- a/drivers/scsi/libsas/sas_scsi_host.c
+++ b/drivers/scsi/libsas/sas_scsi_host.c
@@ -414,7 +414,6 @@ static void sas_wait_eh(struct domain_device *dev)
goto retry;
}
}
-EXPORT_SYMBOL(sas_wait_eh);
static int sas_queue_reset(struct domain_device *dev, int reset_type,
u64 lun, int wait)
diff --git a/drivers/scsi/lpfc/lpfc_debugfs.h b/drivers/scsi/lpfc/lpfc_debugfs.h
index 2322ddb085c0..34070874616d 100644
--- a/drivers/scsi/lpfc/lpfc_debugfs.h
+++ b/drivers/scsi/lpfc/lpfc_debugfs.h
@@ -330,7 +330,7 @@ enum {
* This function dumps an entry indexed by @idx from a queue specified by the
* queue descriptor @q.
**/
-static inline void
+static void
lpfc_debug_dump_qe(struct lpfc_queue *q, uint32_t idx)
{
char line_buf[LPFC_LBUF_SZ];
diff --git a/drivers/scsi/megaraid/megaraid_sas.h b/drivers/scsi/megaraid/megaraid_sas.h
index ca724fe91b8d..a14e8344822b 100644
--- a/drivers/scsi/megaraid/megaraid_sas.h
+++ b/drivers/scsi/megaraid/megaraid_sas.h
@@ -21,8 +21,8 @@
/*
* MegaRAID SAS Driver meta data
*/
-#define MEGASAS_VERSION "07.710.06.00-rc1"
-#define MEGASAS_RELDATE "June 18, 2019"
+#define MEGASAS_VERSION "07.710.50.00-rc1"
+#define MEGASAS_RELDATE "June 28, 2019"
/*
* Device IDs
diff --git a/drivers/scsi/megaraid/megaraid_sas_base.c b/drivers/scsi/megaraid/megaraid_sas_base.c
index 80ab9700f1de..b2339d04a700 100644
--- a/drivers/scsi/megaraid/megaraid_sas_base.c
+++ b/drivers/scsi/megaraid/megaraid_sas_base.c
@@ -105,6 +105,10 @@ MODULE_PARM_DESC(perf_mode, "Performance mode (only for Aero adapters), options:
"default mode is 'balanced'"
);
+int event_log_level = MFI_EVT_CLASS_CRITICAL;
+module_param(event_log_level, int, 0644);
+MODULE_PARM_DESC(event_log_level, "Asynchronous event logging level- range is: -2(CLASS_DEBUG) to 4(CLASS_DEAD), Default: 2(CLASS_CRITICAL)");
+
MODULE_LICENSE("GPL");
MODULE_VERSION(MEGASAS_VERSION);
MODULE_AUTHOR("megaraidlinux.pdl@broadcom.com");
@@ -280,7 +284,7 @@ void megasas_set_dma_settings(struct megasas_instance *instance,
}
}
-void
+static void
megasas_issue_dcmd(struct megasas_instance *instance, struct megasas_cmd *cmd)
{
instance->instancet->fire_cmd(instance,
@@ -404,7 +408,13 @@ megasas_decode_evt(struct megasas_instance *instance)
union megasas_evt_class_locale class_locale;
class_locale.word = le32_to_cpu(evt_detail->cl.word);
- if (class_locale.members.class >= MFI_EVT_CLASS_CRITICAL)
+ if ((event_log_level < MFI_EVT_CLASS_DEBUG) ||
+ (event_log_level > MFI_EVT_CLASS_DEAD)) {
+ printk(KERN_WARNING "megaraid_sas: provided event log level is out of range, setting it to default 2(CLASS_CRITICAL), permissible range is: -2 to 4\n");
+ event_log_level = MFI_EVT_CLASS_CRITICAL;
+ }
+
+ if (class_locale.members.class >= event_log_level)
dev_info(&instance->pdev->dev, "%d (%s/0x%04x/%s) - %s\n",
le32_to_cpu(evt_detail->seq_num),
format_timestamp(le32_to_cpu(evt_detail->time_stamp)),
@@ -2237,7 +2247,7 @@ megasas_internal_reset_defer_cmds(struct megasas_instance *instance);
static void
process_fw_state_change_wq(struct work_struct *work);
-void megasas_do_ocr(struct megasas_instance *instance)
+static void megasas_do_ocr(struct megasas_instance *instance)
{
if ((instance->pdev->device == PCI_DEVICE_ID_LSI_SAS1064R) ||
(instance->pdev->device == PCI_DEVICE_ID_DELL_PERC5) ||
@@ -3303,7 +3313,7 @@ static DEVICE_ATTR_RO(fw_cmds_outstanding);
static DEVICE_ATTR_RO(dump_system_regs);
static DEVICE_ATTR_RO(raid_map_id);
-struct device_attribute *megaraid_host_attrs[] = {
+static struct device_attribute *megaraid_host_attrs[] = {
&dev_attr_fw_crash_buffer_size,
&dev_attr_fw_crash_buffer,
&dev_attr_fw_crash_state,
@@ -3334,6 +3344,7 @@ static struct scsi_host_template megasas_template = {
.shost_attrs = megaraid_host_attrs,
.bios_param = megasas_bios_param,
.change_queue_depth = scsi_change_queue_depth,
+ .max_segment_size = 0xffffffff,
.no_write_same = 1,
};
@@ -5933,7 +5944,8 @@ static int megasas_init_fw(struct megasas_instance *instance)
instance->is_rdpq = (scratch_pad_1 & MR_RDPQ_MODE_OFFSET) ?
1 : 0;
- if (!instance->msix_combined) {
+ if (instance->adapter_type >= INVADER_SERIES &&
+ !instance->msix_combined) {
instance->msix_load_balance = true;
instance->smp_affinity_enable = false;
}
@@ -6546,7 +6558,8 @@ megasas_get_target_prop(struct megasas_instance *instance,
int ret;
struct megasas_cmd *cmd;
struct megasas_dcmd_frame *dcmd;
- u16 targetId = (sdev->channel % 2) + sdev->id;
+ u16 targetId = ((sdev->channel % 2) * MEGASAS_MAX_DEV_PER_CHANNEL) +
+ sdev->id;
cmd = megasas_get_cmd(instance);
@@ -8748,6 +8761,12 @@ static int __init megasas_init(void)
goto err_pcidrv;
}
+ if ((event_log_level < MFI_EVT_CLASS_DEBUG) ||
+ (event_log_level > MFI_EVT_CLASS_DEAD)) {
+ printk(KERN_WARNING "megarid_sas: provided event log level is out of range, setting it to default 2(CLASS_CRITICAL), permissible range is: -2 to 4\n");
+ event_log_level = MFI_EVT_CLASS_CRITICAL;
+ }
+
rval = driver_create_file(&megasas_pci_driver.driver,
&driver_attr_version);
if (rval)
diff --git a/drivers/scsi/mpt3sas/mpt3sas_scsih.c b/drivers/scsi/mpt3sas/mpt3sas_scsih.c
index 27c731a3fb49..717ba0845a2a 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_scsih.c
+++ b/drivers/scsi/mpt3sas/mpt3sas_scsih.c
@@ -10238,6 +10238,7 @@ static struct scsi_host_template mpt3sas_driver_template = {
.this_id = -1,
.sg_tablesize = MPT3SAS_SG_DEPTH,
.max_sectors = 32767,
+ .max_segment_size = 0xffffffff,
.cmd_per_lun = 7,
.shost_attrs = mpt3sas_host_attrs,
.sdev_attrs = mpt3sas_dev_attrs,
diff --git a/drivers/scsi/pm8001/pm8001_sas.c b/drivers/scsi/pm8001/pm8001_sas.c
index dd38c356a1a4..9453705f643a 100644
--- a/drivers/scsi/pm8001/pm8001_sas.c
+++ b/drivers/scsi/pm8001/pm8001_sas.c
@@ -888,6 +888,8 @@ static void pm8001_dev_gone_notify(struct domain_device *dev)
spin_unlock_irqrestore(&pm8001_ha->lock, flags);
pm8001_exec_internal_task_abort(pm8001_ha, pm8001_dev ,
dev, 1, 0);
+ while (pm8001_dev->running_req)
+ msleep(20);
spin_lock_irqsave(&pm8001_ha->lock, flags);
}
PM8001_CHIP_DISP->dereg_dev_req(pm8001_ha, device_id);
@@ -1256,8 +1258,10 @@ int pm8001_abort_task(struct sas_task *task)
PM8001_MSG_DBG(pm8001_ha,
pm8001_printk("Waiting for Port reset\n"));
wait_for_completion(&completion_reset);
- if (phy->port_reset_status)
+ if (phy->port_reset_status) {
+ pm8001_dev_gone_notify(dev);
goto out;
+ }
/*
* 4. SATA Abort ALL
diff --git a/drivers/scsi/pm8001/pm80xx_hwi.c b/drivers/scsi/pm8001/pm80xx_hwi.c
index 1128d86d241a..73261902d75d 100644
--- a/drivers/scsi/pm8001/pm80xx_hwi.c
+++ b/drivers/scsi/pm8001/pm80xx_hwi.c
@@ -604,7 +604,7 @@ static void update_main_config_table(struct pm8001_hba_info *pm8001_ha)
pm8001_ha->main_cfg_tbl.pm80xx_tbl.port_recovery_timer &=
0x0000ffff;
pm8001_ha->main_cfg_tbl.pm80xx_tbl.port_recovery_timer |=
- 0x140000;
+ CHIP_8006_PORT_RECOVERY_TIMEOUT;
}
pm8001_mw32(address, MAIN_PORT_RECOVERY_TIMER,
pm8001_ha->main_cfg_tbl.pm80xx_tbl.port_recovery_timer);
diff --git a/drivers/scsi/pm8001/pm80xx_hwi.h b/drivers/scsi/pm8001/pm80xx_hwi.h
index 84d7426441bf..dc9ab7689060 100644
--- a/drivers/scsi/pm8001/pm80xx_hwi.h
+++ b/drivers/scsi/pm8001/pm80xx_hwi.h
@@ -230,6 +230,8 @@
#define SAS_MAX_AIP 0x200000
#define IT_NEXUS_TIMEOUT 0x7D0
#define PORT_RECOVERY_TIMEOUT ((IT_NEXUS_TIMEOUT/100) + 30)
+/* Port recovery timeout, 10000 ms for PM8006 controller */
+#define CHIP_8006_PORT_RECOVERY_TIMEOUT 0x640000
#ifdef __LITTLE_ENDIAN_BITFIELD
struct sas_identify_frame_local {
diff --git a/drivers/scsi/scsi_devinfo.c b/drivers/scsi/scsi_devinfo.c
index a08ff3bd6310..df14597752ec 100644
--- a/drivers/scsi/scsi_devinfo.c
+++ b/drivers/scsi/scsi_devinfo.c
@@ -239,6 +239,8 @@ static struct {
{"LSI", "Universal Xport", "*", BLIST_NO_ULD_ATTACH},
{"ENGENIO", "Universal Xport", "*", BLIST_NO_ULD_ATTACH},
{"LENOVO", "Universal Xport", "*", BLIST_NO_ULD_ATTACH},
+ {"SanDisk", "Cruzer Blade", NULL, BLIST_TRY_VPD_PAGES |
+ BLIST_INQUIRY_36},
{"SMSC", "USB 2 HS-CF", NULL, BLIST_SPARSELUN | BLIST_INQUIRY_36},
{"SONY", "CD-ROM CDU-8001", NULL, BLIST_BORKEN},
{"SONY", "TSL", NULL, BLIST_FORCELUN}, /* DDS3 & DDS4 autoloaders */
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
index e1da8c70a266..9381171c2fc0 100644
--- a/drivers/scsi/scsi_lib.c
+++ b/drivers/scsi/scsi_lib.c
@@ -84,11 +84,11 @@ int scsi_init_sense_cache(struct Scsi_Host *shost)
struct kmem_cache *cache;
int ret = 0;
+ mutex_lock(&scsi_sense_cache_mutex);
cache = scsi_select_sense_cache(shost->unchecked_isa_dma);
if (cache)
- return 0;
+ goto exit;
- mutex_lock(&scsi_sense_cache_mutex);
if (shost->unchecked_isa_dma) {
scsi_sense_isadma_cache =
kmem_cache_create("scsi_sense_cache(DMA)",
@@ -104,7 +104,7 @@ int scsi_init_sense_cache(struct Scsi_Host *shost)
if (!scsi_sense_cache)
ret = -ENOMEM;
}
-
+ exit:
mutex_unlock(&scsi_sense_cache_mutex);
return ret;
}
@@ -1452,7 +1452,7 @@ static void scsi_softirq_done(struct request *rq)
disposition = scsi_decide_disposition(cmd);
if (disposition != SUCCESS &&
time_before(cmd->jiffies_at_alloc + wait_for, jiffies)) {
- sdev_printk(KERN_ERR, cmd->device,
+ scmd_printk(KERN_ERR, cmd,
"timing out command, waited %lus\n",
wait_for/HZ);
disposition = SUCCESS;
@@ -1784,6 +1784,8 @@ void __scsi_init_queue(struct Scsi_Host *shost, struct request_queue *q)
blk_queue_max_integrity_segments(q, shost->sg_prot_tablesize);
}
+ shost->max_sectors = min_t(unsigned int, shost->max_sectors,
+ dma_max_mapping_size(dev) << SECTOR_SHIFT);
blk_queue_max_hw_sectors(q, shost->max_sectors);
if (shost->unchecked_isa_dma)
blk_queue_bounce_limit(q, BLK_BOUNCE_ISA);
@@ -1791,7 +1793,8 @@ void __scsi_init_queue(struct Scsi_Host *shost, struct request_queue *q)
dma_set_seg_boundary(dev, shost->dma_boundary);
blk_queue_max_segment_size(q, shost->max_segment_size);
- dma_set_max_seg_size(dev, shost->max_segment_size);
+ blk_queue_virt_boundary(q, shost->virt_boundary_mask);
+ dma_set_max_seg_size(dev, queue_max_segment_size(q));
/*
* Set a reasonable default alignment: The larger of 32-byte (dword),
diff --git a/drivers/scsi/sd_zbc.c b/drivers/scsi/sd_zbc.c
index db16c19e05c4..5d6ff3931632 100644
--- a/drivers/scsi/sd_zbc.c
+++ b/drivers/scsi/sd_zbc.c
@@ -461,7 +461,7 @@ int sd_zbc_read_zones(struct scsi_disk *sdkp, unsigned char *buf)
{
struct gendisk *disk = sdkp->disk;
unsigned int nr_zones;
- u32 zone_blocks;
+ u32 zone_blocks = 0;
int ret;
if (!sd_is_zoned(sdkp))
diff --git a/drivers/scsi/storvsc_drv.c b/drivers/scsi/storvsc_drv.c
index c2b6a0ca6933..ed8b9ac805e6 100644
--- a/drivers/scsi/storvsc_drv.c
+++ b/drivers/scsi/storvsc_drv.c
@@ -1423,9 +1423,6 @@ static int storvsc_device_configure(struct scsi_device *sdevice)
{
blk_queue_rq_timeout(sdevice->request_queue, (storvsc_timeout * HZ));
- /* Ensure there are no gaps in presented sgls */
- blk_queue_virt_boundary(sdevice->request_queue, PAGE_SIZE - 1);
-
sdevice->no_write_same = 1;
/*
@@ -1698,6 +1695,8 @@ static struct scsi_host_template scsi_driver = {
.this_id = -1,
/* Make sure we dont get a sg segment crosses a page boundary */
.dma_boundary = PAGE_SIZE-1,
+ /* Ensure there are no gaps in presented sgls */
+ .virt_boundary_mask = PAGE_SIZE-1,
.no_write_same = 1,
.track_queue_depth = 1,
.change_queue_depth = storvsc_change_queue_depth,
diff --git a/drivers/scsi/ufs/ufshcd.c b/drivers/scsi/ufs/ufshcd.c
index 04d3686511c8..e274053109d0 100644
--- a/drivers/scsi/ufs/ufshcd.c
+++ b/drivers/scsi/ufs/ufshcd.c
@@ -4587,8 +4587,6 @@ static int ufshcd_slave_configure(struct scsi_device *sdev)
struct request_queue *q = sdev->request_queue;
blk_queue_update_dma_pad(q, PRDT_DATA_BYTE_COUNT_PAD - 1);
- blk_queue_max_segment_size(q, PRDT_DATA_BYTE_COUNT_MAX);
-
return 0;
}
@@ -7022,6 +7020,7 @@ static struct scsi_host_template ufshcd_driver_template = {
.sg_tablesize = SG_ALL,
.cmd_per_lun = UFSHCD_CMD_PER_LUN,
.can_queue = UFSHCD_CAN_QUEUE,
+ .max_segment_size = PRDT_DATA_BYTE_COUNT_MAX,
.max_host_blocked = 1,
.track_queue_depth = 1,
.sdev_groups = ufshcd_driver_groups,
diff --git a/drivers/soc/amlogic/meson-canvas.c b/drivers/soc/amlogic/meson-canvas.c
index be95a37c3fec..c655f5f92b12 100644
--- a/drivers/soc/amlogic/meson-canvas.c
+++ b/drivers/soc/amlogic/meson-canvas.c
@@ -35,6 +35,7 @@ struct meson_canvas {
void __iomem *reg_base;
spinlock_t lock; /* canvas device lock */
u8 used[NUM_CANVAS];
+ bool supports_endianness;
};
static void canvas_write(struct meson_canvas *canvas, u32 reg, u32 val)
@@ -86,6 +87,12 @@ int meson_canvas_config(struct meson_canvas *canvas, u8 canvas_index,
{
unsigned long flags;
+ if (endian && !canvas->supports_endianness) {
+ dev_err(canvas->dev,
+ "Endianness is not supported on this SoC\n");
+ return -EINVAL;
+ }
+
spin_lock_irqsave(&canvas->lock, flags);
if (!canvas->used[canvas_index]) {
dev_err(canvas->dev,
@@ -172,6 +179,8 @@ static int meson_canvas_probe(struct platform_device *pdev)
if (IS_ERR(canvas->reg_base))
return PTR_ERR(canvas->reg_base);
+ canvas->supports_endianness = of_device_get_match_data(dev);
+
canvas->dev = dev;
spin_lock_init(&canvas->lock);
dev_set_drvdata(dev, canvas);
@@ -180,7 +189,10 @@ static int meson_canvas_probe(struct platform_device *pdev)
}
static const struct of_device_id canvas_dt_match[] = {
- { .compatible = "amlogic,canvas" },
+ { .compatible = "amlogic,meson8-canvas", .data = (void *)false, },
+ { .compatible = "amlogic,meson8b-canvas", .data = (void *)false, },
+ { .compatible = "amlogic,meson8m2-canvas", .data = (void *)false, },
+ { .compatible = "amlogic,canvas", .data = (void *)true, },
{}
};
MODULE_DEVICE_TABLE(of, canvas_dt_match);
diff --git a/drivers/soc/aspeed/aspeed-lpc-ctrl.c b/drivers/soc/aspeed/aspeed-lpc-ctrl.c
index 61276ec692f8..01ed21e8bfee 100644
--- a/drivers/soc/aspeed/aspeed-lpc-ctrl.c
+++ b/drivers/soc/aspeed/aspeed-lpc-ctrl.c
@@ -64,6 +64,7 @@ static long aspeed_lpc_ctrl_ioctl(struct file *file, unsigned int cmd,
unsigned long param)
{
struct aspeed_lpc_ctrl *lpc_ctrl = file_aspeed_lpc_ctrl(file);
+ struct device *dev = file->private_data;
void __user *p = (void __user *)param;
struct aspeed_lpc_ctrl_mapping map;
u32 addr;
@@ -86,6 +87,12 @@ static long aspeed_lpc_ctrl_ioctl(struct file *file, unsigned int cmd,
if (map.window_id != 0)
return -EINVAL;
+ /* If memory-region is not described in device tree */
+ if (!lpc_ctrl->mem_size) {
+ dev_dbg(dev, "Didn't find reserved memory\n");
+ return -ENXIO;
+ }
+
map.size = lpc_ctrl->mem_size;
return copy_to_user(p, &map, sizeof(map)) ? -EFAULT : 0;
@@ -122,9 +129,18 @@ static long aspeed_lpc_ctrl_ioctl(struct file *file, unsigned int cmd,
return -EINVAL;
if (map.window_type == ASPEED_LPC_CTRL_WINDOW_FLASH) {
+ if (!lpc_ctrl->pnor_size) {
+ dev_dbg(dev, "Didn't find host pnor flash\n");
+ return -ENXIO;
+ }
addr = lpc_ctrl->pnor_base;
size = lpc_ctrl->pnor_size;
} else if (map.window_type == ASPEED_LPC_CTRL_WINDOW_MEMORY) {
+ /* If memory-region is not described in device tree */
+ if (!lpc_ctrl->mem_size) {
+ dev_dbg(dev, "Didn't find reserved memory\n");
+ return -ENXIO;
+ }
addr = lpc_ctrl->mem_base;
size = lpc_ctrl->mem_size;
} else {
@@ -192,40 +208,41 @@ static int aspeed_lpc_ctrl_probe(struct platform_device *pdev)
if (!lpc_ctrl)
return -ENOMEM;
+ /* If flash is described in device tree then store */
node = of_parse_phandle(dev->of_node, "flash", 0);
if (!node) {
- dev_err(dev, "Didn't find host pnor flash node\n");
- return -ENODEV;
- }
+ dev_dbg(dev, "Didn't find host pnor flash node\n");
+ } else {
+ rc = of_address_to_resource(node, 1, &resm);
+ of_node_put(node);
+ if (rc) {
+ dev_err(dev, "Couldn't address to resource for flash\n");
+ return rc;
+ }
- rc = of_address_to_resource(node, 1, &resm);
- of_node_put(node);
- if (rc) {
- dev_err(dev, "Couldn't address to resource for flash\n");
- return rc;
+ lpc_ctrl->pnor_size = resource_size(&resm);
+ lpc_ctrl->pnor_base = resm.start;
}
- lpc_ctrl->pnor_size = resource_size(&resm);
- lpc_ctrl->pnor_base = resm.start;
dev_set_drvdata(&pdev->dev, lpc_ctrl);
+ /* If memory-region is described in device tree then store */
node = of_parse_phandle(dev->of_node, "memory-region", 0);
if (!node) {
- dev_err(dev, "Didn't find reserved memory\n");
- return -EINVAL;
- }
+ dev_dbg(dev, "Didn't find reserved memory\n");
+ } else {
+ rc = of_address_to_resource(node, 0, &resm);
+ of_node_put(node);
+ if (rc) {
+ dev_err(dev, "Couldn't address to resource for reserved memory\n");
+ return -ENXIO;
+ }
- rc = of_address_to_resource(node, 0, &resm);
- of_node_put(node);
- if (rc) {
- dev_err(dev, "Couldn't address to resource for reserved memory\n");
- return -ENOMEM;
+ lpc_ctrl->mem_size = resource_size(&resm);
+ lpc_ctrl->mem_base = resm.start;
}
- lpc_ctrl->mem_size = resource_size(&resm);
- lpc_ctrl->mem_base = resm.start;
-
lpc_ctrl->regmap = syscon_node_to_regmap(
pdev->dev.parent->of_node);
if (IS_ERR(lpc_ctrl->regmap)) {
@@ -254,8 +271,6 @@ static int aspeed_lpc_ctrl_probe(struct platform_device *pdev)
goto err;
}
- dev_info(dev, "Loaded at %pr\n", &resm);
-
return 0;
err:
diff --git a/drivers/soc/fsl/Kconfig b/drivers/soc/fsl/Kconfig
index 217f7752cf2c..f9ad8ad54a7d 100644
--- a/drivers/soc/fsl/Kconfig
+++ b/drivers/soc/fsl/Kconfig
@@ -30,4 +30,14 @@ config FSL_MC_DPIO
other DPAA2 objects. This driver does not expose the DPIO
objects individually, but groups them under a service layer
API.
+
+config DPAA2_CONSOLE
+ tristate "QorIQ DPAA2 console driver"
+ depends on OF && (ARCH_LAYERSCAPE || COMPILE_TEST)
+ default y
+ help
+ Console driver for DPAA2 platforms. Exports 2 char devices,
+ /dev/dpaa2_mc_console and /dev/dpaa2_aiop_console,
+ which can be used to dump the Management Complex and AIOP
+ firmware logs.
endmenu
diff --git a/drivers/soc/fsl/Makefile b/drivers/soc/fsl/Makefile
index 158541a83d26..71dee8d0d1f0 100644
--- a/drivers/soc/fsl/Makefile
+++ b/drivers/soc/fsl/Makefile
@@ -8,3 +8,4 @@ obj-$(CONFIG_QUICC_ENGINE) += qe/
obj-$(CONFIG_CPM) += qe/
obj-$(CONFIG_FSL_GUTS) += guts.o
obj-$(CONFIG_FSL_MC_DPIO) += dpio/
+obj-$(CONFIG_DPAA2_CONSOLE) += dpaa2-console.o
diff --git a/drivers/soc/fsl/dpaa2-console.c b/drivers/soc/fsl/dpaa2-console.c
new file mode 100644
index 000000000000..9168d8ddc932
--- /dev/null
+++ b/drivers/soc/fsl/dpaa2-console.c
@@ -0,0 +1,329 @@
+// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
+/*
+ * Freescale DPAA2 Platforms Console Driver
+ *
+ * Copyright 2015-2016 Freescale Semiconductor Inc.
+ * Copyright 2018 NXP
+ */
+
+#define pr_fmt(fmt) "dpaa2-console: " fmt
+
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/of_address.h>
+#include <linux/miscdevice.h>
+#include <linux/uaccess.h>
+#include <linux/slab.h>
+#include <linux/fs.h>
+#include <linux/io.h>
+
+/* MC firmware base low/high registers indexes */
+#define MCFBALR_OFFSET 0
+#define MCFBAHR_OFFSET 1
+
+/* Bit masks used to get the most/least significant part of the MC base addr */
+#define MC_FW_ADDR_MASK_HIGH 0x1FFFF
+#define MC_FW_ADDR_MASK_LOW 0xE0000000
+
+#define MC_BUFFER_OFFSET 0x01000000
+#define MC_BUFFER_SIZE (1024 * 1024 * 16)
+#define MC_OFFSET_DELTA MC_BUFFER_OFFSET
+
+#define AIOP_BUFFER_OFFSET 0x06000000
+#define AIOP_BUFFER_SIZE (1024 * 1024 * 16)
+#define AIOP_OFFSET_DELTA 0
+
+#define LOG_HEADER_FLAG_BUFFER_WRAPAROUND 0x80000000
+#define LAST_BYTE(a) ((a) & ~(LOG_HEADER_FLAG_BUFFER_WRAPAROUND))
+
+/* MC and AIOP Magic words */
+#define MAGIC_MC 0x4d430100
+#define MAGIC_AIOP 0x41494F50
+
+struct log_header {
+ __le32 magic_word;
+ char reserved[4];
+ __le32 buf_start;
+ __le32 buf_length;
+ __le32 last_byte;
+};
+
+struct console_data {
+ void __iomem *map_addr;
+ struct log_header __iomem *hdr;
+ void __iomem *start_addr;
+ void __iomem *end_addr;
+ void __iomem *end_of_data;
+ void __iomem *cur_ptr;
+};
+
+static struct resource mc_base_addr;
+
+static inline void adjust_end(struct console_data *cd)
+{
+ u32 last_byte = readl(&cd->hdr->last_byte);
+
+ cd->end_of_data = cd->start_addr + LAST_BYTE(last_byte);
+}
+
+static u64 get_mc_fw_base_address(void)
+{
+ u64 mcfwbase = 0ULL;
+ u32 __iomem *mcfbaregs;
+
+ mcfbaregs = ioremap(mc_base_addr.start, resource_size(&mc_base_addr));
+ if (!mcfbaregs) {
+ pr_err("could not map MC Firmaware Base registers\n");
+ return 0;
+ }
+
+ mcfwbase = readl(mcfbaregs + MCFBAHR_OFFSET) &
+ MC_FW_ADDR_MASK_HIGH;
+ mcfwbase <<= 32;
+ mcfwbase |= readl(mcfbaregs + MCFBALR_OFFSET) & MC_FW_ADDR_MASK_LOW;
+ iounmap(mcfbaregs);
+
+ pr_debug("MC base address at 0x%016llx\n", mcfwbase);
+ return mcfwbase;
+}
+
+static ssize_t dpaa2_console_size(struct console_data *cd)
+{
+ ssize_t size;
+
+ if (cd->cur_ptr <= cd->end_of_data)
+ size = cd->end_of_data - cd->cur_ptr;
+ else
+ size = (cd->end_addr - cd->cur_ptr) +
+ (cd->end_of_data - cd->start_addr);
+
+ return size;
+}
+
+static int dpaa2_generic_console_open(struct inode *node, struct file *fp,
+ u64 offset, u64 size,
+ u32 expected_magic,
+ u32 offset_delta)
+{
+ u32 read_magic, wrapped, last_byte, buf_start, buf_length;
+ struct console_data *cd;
+ u64 base_addr;
+ int err;
+
+ cd = kmalloc(sizeof(*cd), GFP_KERNEL);
+ if (!cd)
+ return -ENOMEM;
+
+ base_addr = get_mc_fw_base_address();
+ if (!base_addr) {
+ err = -EIO;
+ goto err_fwba;
+ }
+
+ cd->map_addr = ioremap(base_addr + offset, size);
+ if (!cd->map_addr) {
+ pr_err("cannot map console log memory\n");
+ err = -EIO;
+ goto err_ioremap;
+ }
+
+ cd->hdr = (struct log_header __iomem *)cd->map_addr;
+ read_magic = readl(&cd->hdr->magic_word);
+ last_byte = readl(&cd->hdr->last_byte);
+ buf_start = readl(&cd->hdr->buf_start);
+ buf_length = readl(&cd->hdr->buf_length);
+
+ if (read_magic != expected_magic) {
+ pr_warn("expected = %08x, read = %08x\n",
+ expected_magic, read_magic);
+ err = -EIO;
+ goto err_magic;
+ }
+
+ cd->start_addr = cd->map_addr + buf_start - offset_delta;
+ cd->end_addr = cd->start_addr + buf_length;
+
+ wrapped = last_byte & LOG_HEADER_FLAG_BUFFER_WRAPAROUND;
+
+ adjust_end(cd);
+ if (wrapped && cd->end_of_data != cd->end_addr)
+ cd->cur_ptr = cd->end_of_data + 1;
+ else
+ cd->cur_ptr = cd->start_addr;
+
+ fp->private_data = cd;
+
+ return 0;
+
+err_magic:
+ iounmap(cd->map_addr);
+
+err_ioremap:
+err_fwba:
+ kfree(cd);
+
+ return err;
+}
+
+static int dpaa2_mc_console_open(struct inode *node, struct file *fp)
+{
+ return dpaa2_generic_console_open(node, fp,
+ MC_BUFFER_OFFSET, MC_BUFFER_SIZE,
+ MAGIC_MC, MC_OFFSET_DELTA);
+}
+
+static int dpaa2_aiop_console_open(struct inode *node, struct file *fp)
+{
+ return dpaa2_generic_console_open(node, fp,
+ AIOP_BUFFER_OFFSET, AIOP_BUFFER_SIZE,
+ MAGIC_AIOP, AIOP_OFFSET_DELTA);
+}
+
+static int dpaa2_console_close(struct inode *node, struct file *fp)
+{
+ struct console_data *cd = fp->private_data;
+
+ iounmap(cd->map_addr);
+ kfree(cd);
+ return 0;
+}
+
+static ssize_t dpaa2_console_read(struct file *fp, char __user *buf,
+ size_t count, loff_t *f_pos)
+{
+ struct console_data *cd = fp->private_data;
+ size_t bytes = dpaa2_console_size(cd);
+ size_t bytes_end = cd->end_addr - cd->cur_ptr;
+ size_t written = 0;
+ void *kbuf;
+ int err;
+
+ /* Check if we need to adjust the end of data addr */
+ adjust_end(cd);
+
+ if (cd->end_of_data == cd->cur_ptr)
+ return 0;
+
+ if (count < bytes)
+ bytes = count;
+
+ kbuf = kmalloc(bytes, GFP_KERNEL);
+ if (!kbuf)
+ return -ENOMEM;
+
+ if (bytes > bytes_end) {
+ memcpy_fromio(kbuf, cd->cur_ptr, bytes_end);
+ if (copy_to_user(buf, kbuf, bytes_end)) {
+ err = -EFAULT;
+ goto err_free_buf;
+ }
+ buf += bytes_end;
+ cd->cur_ptr = cd->start_addr;
+ bytes -= bytes_end;
+ written += bytes_end;
+ }
+
+ memcpy_fromio(kbuf, cd->cur_ptr, bytes);
+ if (copy_to_user(buf, kbuf, bytes)) {
+ err = -EFAULT;
+ goto err_free_buf;
+ }
+ cd->cur_ptr += bytes;
+ written += bytes;
+
+ return written;
+
+err_free_buf:
+ kfree(kbuf);
+
+ return err;
+}
+
+static const struct file_operations dpaa2_mc_console_fops = {
+ .owner = THIS_MODULE,
+ .open = dpaa2_mc_console_open,
+ .release = dpaa2_console_close,
+ .read = dpaa2_console_read,
+};
+
+static struct miscdevice dpaa2_mc_console_dev = {
+ .minor = MISC_DYNAMIC_MINOR,
+ .name = "dpaa2_mc_console",
+ .fops = &dpaa2_mc_console_fops
+};
+
+static const struct file_operations dpaa2_aiop_console_fops = {
+ .owner = THIS_MODULE,
+ .open = dpaa2_aiop_console_open,
+ .release = dpaa2_console_close,
+ .read = dpaa2_console_read,
+};
+
+static struct miscdevice dpaa2_aiop_console_dev = {
+ .minor = MISC_DYNAMIC_MINOR,
+ .name = "dpaa2_aiop_console",
+ .fops = &dpaa2_aiop_console_fops
+};
+
+static int dpaa2_console_probe(struct platform_device *pdev)
+{
+ int error;
+
+ error = of_address_to_resource(pdev->dev.of_node, 0, &mc_base_addr);
+ if (error < 0) {
+ pr_err("of_address_to_resource() failed for %pOF with %d\n",
+ pdev->dev.of_node, error);
+ return error;
+ }
+
+ error = misc_register(&dpaa2_mc_console_dev);
+ if (error) {
+ pr_err("cannot register device %s\n",
+ dpaa2_mc_console_dev.name);
+ goto err_register_mc;
+ }
+
+ error = misc_register(&dpaa2_aiop_console_dev);
+ if (error) {
+ pr_err("cannot register device %s\n",
+ dpaa2_aiop_console_dev.name);
+ goto err_register_aiop;
+ }
+
+ return 0;
+
+err_register_aiop:
+ misc_deregister(&dpaa2_mc_console_dev);
+err_register_mc:
+ return error;
+}
+
+static int dpaa2_console_remove(struct platform_device *pdev)
+{
+ misc_deregister(&dpaa2_mc_console_dev);
+ misc_deregister(&dpaa2_aiop_console_dev);
+
+ return 0;
+}
+
+static const struct of_device_id dpaa2_console_match_table[] = {
+ { .compatible = "fsl,dpaa2-console",},
+ {},
+};
+
+MODULE_DEVICE_TABLE(of, dpaa2_console_match_table);
+
+static struct platform_driver dpaa2_console_driver = {
+ .driver = {
+ .name = "dpaa2-console",
+ .pm = NULL,
+ .of_match_table = dpaa2_console_match_table,
+ },
+ .probe = dpaa2_console_probe,
+ .remove = dpaa2_console_remove,
+};
+module_platform_driver(dpaa2_console_driver);
+
+MODULE_LICENSE("Dual BSD/GPL");
+MODULE_AUTHOR("Roy Pledge <roy.pledge@nxp.com>");
+MODULE_DESCRIPTION("DPAA2 console driver");
diff --git a/drivers/soc/fsl/dpio/dpio-driver.c b/drivers/soc/fsl/dpio/dpio-driver.c
index c0cdc8946031..70014ecce2a7 100644
--- a/drivers/soc/fsl/dpio/dpio-driver.c
+++ b/drivers/soc/fsl/dpio/dpio-driver.c
@@ -197,13 +197,22 @@ static int dpaa2_dpio_probe(struct fsl_mc_device *dpio_dev)
desc.cpu);
}
- /*
- * Set the CENA regs to be the cache inhibited area of the portal to
- * avoid coherency issues if a user migrates to another core.
- */
- desc.regs_cena = devm_memremap(dev, dpio_dev->regions[1].start,
- resource_size(&dpio_dev->regions[1]),
- MEMREMAP_WC);
+ if (dpio_dev->obj_desc.region_count < 3) {
+ /* No support for DDR backed portals, use classic mapping */
+ /*
+ * Set the CENA regs to be the cache inhibited area of the
+ * portal to avoid coherency issues if a user migrates to
+ * another core.
+ */
+ desc.regs_cena = devm_memremap(dev, dpio_dev->regions[1].start,
+ resource_size(&dpio_dev->regions[1]),
+ MEMREMAP_WC);
+ } else {
+ desc.regs_cena = devm_memremap(dev, dpio_dev->regions[2].start,
+ resource_size(&dpio_dev->regions[2]),
+ MEMREMAP_WB);
+ }
+
if (IS_ERR(desc.regs_cena)) {
dev_err(dev, "devm_memremap failed\n");
err = PTR_ERR(desc.regs_cena);
diff --git a/drivers/soc/fsl/dpio/qbman-portal.c b/drivers/soc/fsl/dpio/qbman-portal.c
index d02013556a1b..c66f5b73777c 100644
--- a/drivers/soc/fsl/dpio/qbman-portal.c
+++ b/drivers/soc/fsl/dpio/qbman-portal.c
@@ -15,6 +15,8 @@
#define QMAN_REV_4000 0x04000000
#define QMAN_REV_4100 0x04010000
#define QMAN_REV_4101 0x04010001
+#define QMAN_REV_5000 0x05000000
+
#define QMAN_REV_MASK 0xffff0000
/* All QBMan command and result structures use this "valid bit" encoding */
@@ -25,10 +27,17 @@
#define QBMAN_WQCHAN_CONFIGURE 0x46
/* CINH register offsets */
+#define QBMAN_CINH_SWP_EQCR_PI 0x800
#define QBMAN_CINH_SWP_EQAR 0x8c0
+#define QBMAN_CINH_SWP_CR_RT 0x900
+#define QBMAN_CINH_SWP_VDQCR_RT 0x940
+#define QBMAN_CINH_SWP_EQCR_AM_RT 0x980
+#define QBMAN_CINH_SWP_RCR_AM_RT 0x9c0
#define QBMAN_CINH_SWP_DQPI 0xa00
#define QBMAN_CINH_SWP_DCAP 0xac0
#define QBMAN_CINH_SWP_SDQCR 0xb00
+#define QBMAN_CINH_SWP_EQCR_AM_RT2 0xb40
+#define QBMAN_CINH_SWP_RCR_PI 0xc00
#define QBMAN_CINH_SWP_RAR 0xcc0
#define QBMAN_CINH_SWP_ISR 0xe00
#define QBMAN_CINH_SWP_IER 0xe40
@@ -43,6 +52,13 @@
#define QBMAN_CENA_SWP_RR(vb) (0x700 + ((u32)(vb) >> 1))
#define QBMAN_CENA_SWP_VDQCR 0x780
+/* CENA register offsets in memory-backed mode */
+#define QBMAN_CENA_SWP_DQRR_MEM(n) (0x800 + ((u32)(n) << 6))
+#define QBMAN_CENA_SWP_RCR_MEM(n) (0x1400 + ((u32)(n) << 6))
+#define QBMAN_CENA_SWP_CR_MEM 0x1600
+#define QBMAN_CENA_SWP_RR_MEM 0x1680
+#define QBMAN_CENA_SWP_VDQCR_MEM 0x1780
+
/* Reverse mapping of QBMAN_CENA_SWP_DQRR() */
#define QBMAN_IDX_FROM_DQRR(p) (((unsigned long)(p) & 0x1ff) >> 6)
@@ -96,10 +112,13 @@ static inline void *qbman_get_cmd(struct qbman_swp *p, u32 offset)
#define SWP_CFG_DQRR_MF_SHIFT 20
#define SWP_CFG_EST_SHIFT 16
+#define SWP_CFG_CPBS_SHIFT 15
#define SWP_CFG_WN_SHIFT 14
#define SWP_CFG_RPM_SHIFT 12
#define SWP_CFG_DCM_SHIFT 10
#define SWP_CFG_EPM_SHIFT 8
+#define SWP_CFG_VPM_SHIFT 7
+#define SWP_CFG_CPM_SHIFT 6
#define SWP_CFG_SD_SHIFT 5
#define SWP_CFG_SP_SHIFT 4
#define SWP_CFG_SE_SHIFT 3
@@ -125,6 +144,8 @@ static inline u32 qbman_set_swp_cfg(u8 max_fill, u8 wn, u8 est, u8 rpm, u8 dcm,
ep << SWP_CFG_EP_SHIFT);
}
+#define QMAN_RT_MODE 0x00000100
+
/**
* qbman_swp_init() - Create a functional object representing the given
* QBMan portal descriptor.
@@ -146,6 +167,8 @@ struct qbman_swp *qbman_swp_init(const struct qbman_swp_desc *d)
p->sdq |= qbman_sdqcr_dct_prio_ics << QB_SDQCR_DCT_SHIFT;
p->sdq |= qbman_sdqcr_fc_up_to_3 << QB_SDQCR_FC_SHIFT;
p->sdq |= QMAN_SDQCR_TOKEN << QB_SDQCR_TOK_SHIFT;
+ if ((p->desc->qman_version & QMAN_REV_MASK) >= QMAN_REV_5000)
+ p->mr.valid_bit = QB_VALID_BIT;
atomic_set(&p->vdq.available, 1);
p->vdq.valid_bit = QB_VALID_BIT;
@@ -163,6 +186,9 @@ struct qbman_swp *qbman_swp_init(const struct qbman_swp_desc *d)
p->addr_cena = d->cena_bar;
p->addr_cinh = d->cinh_bar;
+ if ((p->desc->qman_version & QMAN_REV_MASK) >= QMAN_REV_5000)
+ memset(p->addr_cena, 0, 64 * 1024);
+
reg = qbman_set_swp_cfg(p->dqrr.dqrr_size,
1, /* Writes Non-cacheable */
0, /* EQCR_CI stashing threshold */
@@ -175,6 +201,10 @@ struct qbman_swp *qbman_swp_init(const struct qbman_swp_desc *d)
1, /* dequeue stashing priority == TRUE */
0, /* dequeue stashing enable == FALSE */
0); /* EQCR_CI stashing priority == FALSE */
+ if ((p->desc->qman_version & QMAN_REV_MASK) >= QMAN_REV_5000)
+ reg |= 1 << SWP_CFG_CPBS_SHIFT | /* memory-backed mode */
+ 1 << SWP_CFG_VPM_SHIFT | /* VDQCR read triggered mode */
+ 1 << SWP_CFG_CPM_SHIFT; /* CR read triggered mode */
qbman_write_register(p, QBMAN_CINH_SWP_CFG, reg);
reg = qbman_read_register(p, QBMAN_CINH_SWP_CFG);
@@ -184,6 +214,10 @@ struct qbman_swp *qbman_swp_init(const struct qbman_swp_desc *d)
return NULL;
}
+ if ((p->desc->qman_version & QMAN_REV_MASK) >= QMAN_REV_5000) {
+ qbman_write_register(p, QBMAN_CINH_SWP_EQCR_PI, QMAN_RT_MODE);
+ qbman_write_register(p, QBMAN_CINH_SWP_RCR_PI, QMAN_RT_MODE);
+ }
/*
* SDQCR needs to be initialized to 0 when no channels are
* being dequeued from or else the QMan HW will indicate an
@@ -278,7 +312,10 @@ void qbman_swp_interrupt_set_inhibit(struct qbman_swp *p, int inhibit)
*/
void *qbman_swp_mc_start(struct qbman_swp *p)
{
- return qbman_get_cmd(p, QBMAN_CENA_SWP_CR);
+ if ((p->desc->qman_version & QMAN_REV_MASK) < QMAN_REV_5000)
+ return qbman_get_cmd(p, QBMAN_CENA_SWP_CR);
+ else
+ return qbman_get_cmd(p, QBMAN_CENA_SWP_CR_MEM);
}
/*
@@ -289,8 +326,14 @@ void qbman_swp_mc_submit(struct qbman_swp *p, void *cmd, u8 cmd_verb)
{
u8 *v = cmd;
- dma_wmb();
- *v = cmd_verb | p->mc.valid_bit;
+ if ((p->desc->qman_version & QMAN_REV_MASK) < QMAN_REV_5000) {
+ dma_wmb();
+ *v = cmd_verb | p->mc.valid_bit;
+ } else {
+ *v = cmd_verb | p->mc.valid_bit;
+ dma_wmb();
+ qbman_write_register(p, QBMAN_CINH_SWP_CR_RT, QMAN_RT_MODE);
+ }
}
/*
@@ -301,13 +344,27 @@ void *qbman_swp_mc_result(struct qbman_swp *p)
{
u32 *ret, verb;
- ret = qbman_get_cmd(p, QBMAN_CENA_SWP_RR(p->mc.valid_bit));
+ if ((p->desc->qman_version & QMAN_REV_MASK) < QMAN_REV_5000) {
+ ret = qbman_get_cmd(p, QBMAN_CENA_SWP_RR(p->mc.valid_bit));
+ /* Remove the valid-bit - command completed if the rest
+ * is non-zero.
+ */
+ verb = ret[0] & ~QB_VALID_BIT;
+ if (!verb)
+ return NULL;
+ p->mc.valid_bit ^= QB_VALID_BIT;
+ } else {
+ ret = qbman_get_cmd(p, QBMAN_CENA_SWP_RR_MEM);
+ /* Command completed if the valid bit is toggled */
+ if (p->mr.valid_bit != (ret[0] & QB_VALID_BIT))
+ return NULL;
+ /* Command completed if the rest is non-zero */
+ verb = ret[0] & ~QB_VALID_BIT;
+ if (!verb)
+ return NULL;
+ p->mr.valid_bit ^= QB_VALID_BIT;
+ }
- /* Remove the valid-bit - command completed if the rest is non-zero */
- verb = ret[0] & ~QB_VALID_BIT;
- if (!verb)
- return NULL;
- p->mc.valid_bit ^= QB_VALID_BIT;
return ret;
}
@@ -384,6 +441,18 @@ void qbman_eq_desc_set_qd(struct qbman_eq_desc *d, u32 qdid,
#define EQAR_VB(eqar) ((eqar) & 0x80)
#define EQAR_SUCCESS(eqar) ((eqar) & 0x100)
+static inline void qbman_write_eqcr_am_rt_register(struct qbman_swp *p,
+ u8 idx)
+{
+ if (idx < 16)
+ qbman_write_register(p, QBMAN_CINH_SWP_EQCR_AM_RT + idx * 4,
+ QMAN_RT_MODE);
+ else
+ qbman_write_register(p, QBMAN_CINH_SWP_EQCR_AM_RT2 +
+ (idx - 16) * 4,
+ QMAN_RT_MODE);
+}
+
/**
* qbman_swp_enqueue() - Issue an enqueue command
* @s: the software portal used for enqueue
@@ -408,9 +477,15 @@ int qbman_swp_enqueue(struct qbman_swp *s, const struct qbman_eq_desc *d,
memcpy(&p->dca, &d->dca, 31);
memcpy(&p->fd, fd, sizeof(*fd));
- /* Set the verb byte, have to substitute in the valid-bit */
- dma_wmb();
- p->verb = d->verb | EQAR_VB(eqar);
+ if ((s->desc->qman_version & QMAN_REV_MASK) < QMAN_REV_5000) {
+ /* Set the verb byte, have to substitute in the valid-bit */
+ dma_wmb();
+ p->verb = d->verb | EQAR_VB(eqar);
+ } else {
+ p->verb = d->verb | EQAR_VB(eqar);
+ dma_wmb();
+ qbman_write_eqcr_am_rt_register(s, EQAR_IDX(eqar));
+ }
return 0;
}
@@ -587,17 +662,27 @@ int qbman_swp_pull(struct qbman_swp *s, struct qbman_pull_desc *d)
return -EBUSY;
}
s->vdq.storage = (void *)(uintptr_t)d->rsp_addr_virt;
- p = qbman_get_cmd(s, QBMAN_CENA_SWP_VDQCR);
+ if ((s->desc->qman_version & QMAN_REV_MASK) < QMAN_REV_5000)
+ p = qbman_get_cmd(s, QBMAN_CENA_SWP_VDQCR);
+ else
+ p = qbman_get_cmd(s, QBMAN_CENA_SWP_VDQCR_MEM);
p->numf = d->numf;
p->tok = QMAN_DQ_TOKEN_VALID;
p->dq_src = d->dq_src;
p->rsp_addr = d->rsp_addr;
p->rsp_addr_virt = d->rsp_addr_virt;
- dma_wmb();
- /* Set the verb byte, have to substitute in the valid-bit */
- p->verb = d->verb | s->vdq.valid_bit;
- s->vdq.valid_bit ^= QB_VALID_BIT;
+ if ((s->desc->qman_version & QMAN_REV_MASK) < QMAN_REV_5000) {
+ dma_wmb();
+ /* Set the verb byte, have to substitute in the valid-bit */
+ p->verb = d->verb | s->vdq.valid_bit;
+ s->vdq.valid_bit ^= QB_VALID_BIT;
+ } else {
+ p->verb = d->verb | s->vdq.valid_bit;
+ s->vdq.valid_bit ^= QB_VALID_BIT;
+ dma_wmb();
+ qbman_write_register(s, QBMAN_CINH_SWP_VDQCR_RT, QMAN_RT_MODE);
+ }
return 0;
}
@@ -655,7 +740,10 @@ const struct dpaa2_dq *qbman_swp_dqrr_next(struct qbman_swp *s)
QBMAN_CENA_SWP_DQRR(s->dqrr.next_idx)));
}
- p = qbman_get_cmd(s, QBMAN_CENA_SWP_DQRR(s->dqrr.next_idx));
+ if ((s->desc->qman_version & QMAN_REV_MASK) < QMAN_REV_5000)
+ p = qbman_get_cmd(s, QBMAN_CENA_SWP_DQRR(s->dqrr.next_idx));
+ else
+ p = qbman_get_cmd(s, QBMAN_CENA_SWP_DQRR_MEM(s->dqrr.next_idx));
verb = p->dq.verb;
/*
@@ -807,18 +895,28 @@ int qbman_swp_release(struct qbman_swp *s, const struct qbman_release_desc *d,
return -EBUSY;
/* Start the release command */
- p = qbman_get_cmd(s, QBMAN_CENA_SWP_RCR(RAR_IDX(rar)));
+ if ((s->desc->qman_version & QMAN_REV_MASK) < QMAN_REV_5000)
+ p = qbman_get_cmd(s, QBMAN_CENA_SWP_RCR(RAR_IDX(rar)));
+ else
+ p = qbman_get_cmd(s, QBMAN_CENA_SWP_RCR_MEM(RAR_IDX(rar)));
/* Copy the caller's buffer pointers to the command */
for (i = 0; i < num_buffers; i++)
p->buf[i] = cpu_to_le64(buffers[i]);
p->bpid = d->bpid;
- /*
- * Set the verb byte, have to substitute in the valid-bit and the number
- * of buffers.
- */
- dma_wmb();
- p->verb = d->verb | RAR_VB(rar) | num_buffers;
+ if ((s->desc->qman_version & QMAN_REV_MASK) < QMAN_REV_5000) {
+ /*
+ * Set the verb byte, have to substitute in the valid-bit
+ * and the number of buffers.
+ */
+ dma_wmb();
+ p->verb = d->verb | RAR_VB(rar) | num_buffers;
+ } else {
+ p->verb = d->verb | RAR_VB(rar) | num_buffers;
+ dma_wmb();
+ qbman_write_register(s, QBMAN_CINH_SWP_RCR_AM_RT +
+ RAR_IDX(rar) * 4, QMAN_RT_MODE);
+ }
return 0;
}
diff --git a/drivers/soc/fsl/dpio/qbman-portal.h b/drivers/soc/fsl/dpio/qbman-portal.h
index fa35fc1afeaa..f3ec5d2044fb 100644
--- a/drivers/soc/fsl/dpio/qbman-portal.h
+++ b/drivers/soc/fsl/dpio/qbman-portal.h
@@ -1,7 +1,7 @@
/* SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause) */
/*
* Copyright (C) 2014-2016 Freescale Semiconductor, Inc.
- * Copyright 2016 NXP
+ * Copyright 2016-2019 NXP
*
*/
#ifndef __FSL_QBMAN_PORTAL_H
@@ -110,6 +110,11 @@ struct qbman_swp {
u32 valid_bit; /* 0x00 or 0x80 */
} mc;
+ /* Management response */
+ struct {
+ u32 valid_bit; /* 0x00 or 0x80 */
+ } mr;
+
/* Push dequeues */
u32 sdq;
@@ -428,7 +433,7 @@ static inline int qbman_swp_CDAN_set_context_enable(struct qbman_swp *s,
static inline void *qbman_swp_mc_complete(struct qbman_swp *swp, void *cmd,
u8 cmd_verb)
{
- int loopvar = 1000;
+ int loopvar = 2000;
qbman_swp_mc_submit(swp, cmd, cmd_verb);
diff --git a/drivers/soc/fsl/guts.c b/drivers/soc/fsl/guts.c
index 78607da7320e..1ef8068c8dd3 100644
--- a/drivers/soc/fsl/guts.c
+++ b/drivers/soc/fsl/guts.c
@@ -97,6 +97,11 @@ static const struct fsl_soc_die_attr fsl_soc_die[] = {
.svr = 0x87000000,
.mask = 0xfff70000,
},
+ /* Die: LX2160A, SoC: LX2160A/LX2120A/LX2080A */
+ { .die = "LX2160A",
+ .svr = 0x87360000,
+ .mask = 0xff3f0000,
+ },
{ },
};
@@ -218,6 +223,7 @@ static const struct of_device_id fsl_guts_of_match[] = {
{ .compatible = "fsl,ls1088a-dcfg", },
{ .compatible = "fsl,ls1012a-dcfg", },
{ .compatible = "fsl,ls1046a-dcfg", },
+ { .compatible = "fsl,lx2160a-dcfg", },
{}
};
MODULE_DEVICE_TABLE(of, fsl_guts_of_match);
diff --git a/drivers/soc/fsl/qbman/bman_portal.c b/drivers/soc/fsl/qbman/bman_portal.c
index 2c95cf59f3e7..cf4f10d6f590 100644
--- a/drivers/soc/fsl/qbman/bman_portal.c
+++ b/drivers/soc/fsl/qbman/bman_portal.c
@@ -32,6 +32,7 @@
static struct bman_portal *affine_bportals[NR_CPUS];
static struct cpumask portal_cpus;
+static int __bman_portals_probed;
/* protect bman global registers and global data shared among portals */
static DEFINE_SPINLOCK(bman_lock);
@@ -87,6 +88,12 @@ static int bman_online_cpu(unsigned int cpu)
return 0;
}
+int bman_portals_probed(void)
+{
+ return __bman_portals_probed;
+}
+EXPORT_SYMBOL_GPL(bman_portals_probed);
+
static int bman_portal_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
@@ -104,8 +111,10 @@ static int bman_portal_probe(struct platform_device *pdev)
}
pcfg = devm_kmalloc(dev, sizeof(*pcfg), GFP_KERNEL);
- if (!pcfg)
+ if (!pcfg) {
+ __bman_portals_probed = -1;
return -ENOMEM;
+ }
pcfg->dev = dev;
@@ -113,14 +122,14 @@ static int bman_portal_probe(struct platform_device *pdev)
DPAA_PORTAL_CE);
if (!addr_phys[0]) {
dev_err(dev, "Can't get %pOF property 'reg::CE'\n", node);
- return -ENXIO;
+ goto err_ioremap1;
}
addr_phys[1] = platform_get_resource(pdev, IORESOURCE_MEM,
DPAA_PORTAL_CI);
if (!addr_phys[1]) {
dev_err(dev, "Can't get %pOF property 'reg::CI'\n", node);
- return -ENXIO;
+ goto err_ioremap1;
}
pcfg->cpu = -1;
@@ -128,7 +137,7 @@ static int bman_portal_probe(struct platform_device *pdev)
irq = platform_get_irq(pdev, 0);
if (irq <= 0) {
dev_err(dev, "Can't get %pOF IRQ'\n", node);
- return -ENXIO;
+ goto err_ioremap1;
}
pcfg->irq = irq;
@@ -150,6 +159,7 @@ static int bman_portal_probe(struct platform_device *pdev)
spin_lock(&bman_lock);
cpu = cpumask_next_zero(-1, &portal_cpus);
if (cpu >= nr_cpu_ids) {
+ __bman_portals_probed = 1;
/* unassigned portal, skip init */
spin_unlock(&bman_lock);
return 0;
@@ -175,6 +185,8 @@ err_portal_init:
err_ioremap2:
memunmap(pcfg->addr_virt_ce);
err_ioremap1:
+ __bman_portals_probed = -1;
+
return -ENXIO;
}
diff --git a/drivers/soc/fsl/qbman/qman_ccsr.c b/drivers/soc/fsl/qbman/qman_ccsr.c
index 109b38de3176..a6bb43007d03 100644
--- a/drivers/soc/fsl/qbman/qman_ccsr.c
+++ b/drivers/soc/fsl/qbman/qman_ccsr.c
@@ -596,7 +596,7 @@ static int qman_init_ccsr(struct device *dev)
}
#define LIO_CFG_LIODN_MASK 0x0fff0000
-void qman_liodn_fixup(u16 channel)
+void __qman_liodn_fixup(u16 channel)
{
static int done;
static u32 liodn_offset;
diff --git a/drivers/soc/fsl/qbman/qman_portal.c b/drivers/soc/fsl/qbman/qman_portal.c
index 661c9b234d32..e2186b681d87 100644
--- a/drivers/soc/fsl/qbman/qman_portal.c
+++ b/drivers/soc/fsl/qbman/qman_portal.c
@@ -38,6 +38,7 @@ EXPORT_SYMBOL(qman_dma_portal);
#define CONFIG_FSL_DPA_PIRQ_FAST 1
static struct cpumask portal_cpus;
+static int __qman_portals_probed;
/* protect qman global registers and global data shared among portals */
static DEFINE_SPINLOCK(qman_lock);
@@ -220,6 +221,12 @@ static int qman_online_cpu(unsigned int cpu)
return 0;
}
+int qman_portals_probed(void)
+{
+ return __qman_portals_probed;
+}
+EXPORT_SYMBOL_GPL(qman_portals_probed);
+
static int qman_portal_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
@@ -238,8 +245,10 @@ static int qman_portal_probe(struct platform_device *pdev)
}
pcfg = devm_kmalloc(dev, sizeof(*pcfg), GFP_KERNEL);
- if (!pcfg)
+ if (!pcfg) {
+ __qman_portals_probed = -1;
return -ENOMEM;
+ }
pcfg->dev = dev;
@@ -247,19 +256,20 @@ static int qman_portal_probe(struct platform_device *pdev)
DPAA_PORTAL_CE);
if (!addr_phys[0]) {
dev_err(dev, "Can't get %pOF property 'reg::CE'\n", node);
- return -ENXIO;
+ goto err_ioremap1;
}
addr_phys[1] = platform_get_resource(pdev, IORESOURCE_MEM,
DPAA_PORTAL_CI);
if (!addr_phys[1]) {
dev_err(dev, "Can't get %pOF property 'reg::CI'\n", node);
- return -ENXIO;
+ goto err_ioremap1;
}
err = of_property_read_u32(node, "cell-index", &val);
if (err) {
dev_err(dev, "Can't get %pOF property 'cell-index'\n", node);
+ __qman_portals_probed = -1;
return err;
}
pcfg->channel = val;
@@ -267,7 +277,7 @@ static int qman_portal_probe(struct platform_device *pdev)
irq = platform_get_irq(pdev, 0);
if (irq <= 0) {
dev_err(dev, "Can't get %pOF IRQ\n", node);
- return -ENXIO;
+ goto err_ioremap1;
}
pcfg->irq = irq;
@@ -291,6 +301,7 @@ static int qman_portal_probe(struct platform_device *pdev)
spin_lock(&qman_lock);
cpu = cpumask_next_zero(-1, &portal_cpus);
if (cpu >= nr_cpu_ids) {
+ __qman_portals_probed = 1;
/* unassigned portal, skip init */
spin_unlock(&qman_lock);
return 0;
@@ -321,6 +332,8 @@ err_portal_init:
err_ioremap2:
memunmap(pcfg->addr_virt_ce);
err_ioremap1:
+ __qman_portals_probed = -1;
+
return -ENXIO;
}
diff --git a/drivers/soc/fsl/qbman/qman_priv.h b/drivers/soc/fsl/qbman/qman_priv.h
index 75a8f905f8f7..04515718cfd9 100644
--- a/drivers/soc/fsl/qbman/qman_priv.h
+++ b/drivers/soc/fsl/qbman/qman_priv.h
@@ -193,7 +193,14 @@ extern struct gen_pool *qm_cgralloc; /* CGR ID allocator */
u32 qm_get_pools_sdqcr(void);
int qman_wq_alloc(void);
-void qman_liodn_fixup(u16 channel);
+#ifdef CONFIG_FSL_PAMU
+#define qman_liodn_fixup __qman_liodn_fixup
+#else
+static inline void qman_liodn_fixup(u16 channel)
+{
+}
+#endif
+void __qman_liodn_fixup(u16 channel);
void qman_set_sdest(u16 channel, unsigned int cpu_idx);
struct qman_portal *qman_create_affine_portal(
diff --git a/drivers/soc/imx/Kconfig b/drivers/soc/imx/Kconfig
index ade1b46d669c..8aaebf13e2e6 100644
--- a/drivers/soc/imx/Kconfig
+++ b/drivers/soc/imx/Kconfig
@@ -8,4 +8,13 @@ config IMX_GPCV2_PM_DOMAINS
select PM_GENERIC_DOMAINS
default y if SOC_IMX7D
+config IMX_SCU_SOC
+ bool "i.MX System Controller Unit SoC info support"
+ depends on IMX_SCU
+ select SOC_BUS
+ help
+ If you say yes here you get support for the NXP i.MX System
+ Controller Unit SoC info module, it will provide the SoC info
+ like SoC family, ID and revision etc.
+
endmenu
diff --git a/drivers/soc/imx/Makefile b/drivers/soc/imx/Makefile
index caa8653600f2..cf9ca42ff739 100644
--- a/drivers/soc/imx/Makefile
+++ b/drivers/soc/imx/Makefile
@@ -2,3 +2,4 @@
obj-$(CONFIG_HAVE_IMX_GPC) += gpc.o
obj-$(CONFIG_IMX_GPCV2_PM_DOMAINS) += gpcv2.o
obj-$(CONFIG_ARCH_MXC) += soc-imx8.o
+obj-$(CONFIG_IMX_SCU_SOC) += soc-imx-scu.o
diff --git a/drivers/soc/imx/soc-imx-scu.c b/drivers/soc/imx/soc-imx-scu.c
new file mode 100644
index 000000000000..676f612f6488
--- /dev/null
+++ b/drivers/soc/imx/soc-imx-scu.c
@@ -0,0 +1,144 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright 2019 NXP.
+ */
+
+#include <dt-bindings/firmware/imx/rsrc.h>
+#include <linux/firmware/imx/sci.h>
+#include <linux/slab.h>
+#include <linux/sys_soc.h>
+#include <linux/platform_device.h>
+#include <linux/of.h>
+
+#define IMX_SCU_SOC_DRIVER_NAME "imx-scu-soc"
+
+static struct imx_sc_ipc *soc_ipc_handle;
+
+struct imx_sc_msg_misc_get_soc_id {
+ struct imx_sc_rpc_msg hdr;
+ union {
+ struct {
+ u32 control;
+ u16 resource;
+ } __packed req;
+ struct {
+ u32 id;
+ } resp;
+ } data;
+} __packed;
+
+static int imx_scu_soc_id(void)
+{
+ struct imx_sc_msg_misc_get_soc_id msg;
+ struct imx_sc_rpc_msg *hdr = &msg.hdr;
+ int ret;
+
+ hdr->ver = IMX_SC_RPC_VERSION;
+ hdr->svc = IMX_SC_RPC_SVC_MISC;
+ hdr->func = IMX_SC_MISC_FUNC_GET_CONTROL;
+ hdr->size = 3;
+
+ msg.data.req.control = IMX_SC_C_ID;
+ msg.data.req.resource = IMX_SC_R_SYSTEM;
+
+ ret = imx_scu_call_rpc(soc_ipc_handle, &msg, true);
+ if (ret) {
+ pr_err("%s: get soc info failed, ret %d\n", __func__, ret);
+ return ret;
+ }
+
+ return msg.data.resp.id;
+}
+
+static int imx_scu_soc_probe(struct platform_device *pdev)
+{
+ struct soc_device_attribute *soc_dev_attr;
+ struct soc_device *soc_dev;
+ int id, ret;
+ u32 val;
+
+ ret = imx_scu_get_handle(&soc_ipc_handle);
+ if (ret)
+ return ret;
+
+ soc_dev_attr = devm_kzalloc(&pdev->dev, sizeof(*soc_dev_attr),
+ GFP_KERNEL);
+ if (!soc_dev_attr)
+ return -ENOMEM;
+
+ soc_dev_attr->family = "Freescale i.MX";
+
+ ret = of_property_read_string(of_root,
+ "model",
+ &soc_dev_attr->machine);
+ if (ret)
+ return ret;
+
+ id = imx_scu_soc_id();
+ if (id < 0)
+ return -EINVAL;
+
+ /* format soc_id value passed from SCU firmware */
+ val = id & 0x1f;
+ soc_dev_attr->soc_id = kasprintf(GFP_KERNEL, "0x%x", val);
+ if (!soc_dev_attr->soc_id)
+ return -ENOMEM;
+
+ /* format revision value passed from SCU firmware */
+ val = (id >> 5) & 0xf;
+ val = (((val >> 2) + 1) << 4) | (val & 0x3);
+ soc_dev_attr->revision = kasprintf(GFP_KERNEL,
+ "%d.%d",
+ (val >> 4) & 0xf,
+ val & 0xf);
+ if (!soc_dev_attr->revision) {
+ ret = -ENOMEM;
+ goto free_soc_id;
+ }
+
+ soc_dev = soc_device_register(soc_dev_attr);
+ if (IS_ERR(soc_dev)) {
+ ret = PTR_ERR(soc_dev);
+ goto free_revision;
+ }
+
+ return 0;
+
+free_revision:
+ kfree(soc_dev_attr->revision);
+free_soc_id:
+ kfree(soc_dev_attr->soc_id);
+ return ret;
+}
+
+static struct platform_driver imx_scu_soc_driver = {
+ .driver = {
+ .name = IMX_SCU_SOC_DRIVER_NAME,
+ },
+ .probe = imx_scu_soc_probe,
+};
+
+static int __init imx_scu_soc_init(void)
+{
+ struct platform_device *pdev;
+ struct device_node *np;
+ int ret;
+
+ np = of_find_compatible_node(NULL, NULL, "fsl,imx-scu");
+ if (!np)
+ return -ENODEV;
+
+ of_node_put(np);
+
+ ret = platform_driver_register(&imx_scu_soc_driver);
+ if (ret)
+ return ret;
+
+ pdev = platform_device_register_simple(IMX_SCU_SOC_DRIVER_NAME,
+ -1, NULL, 0);
+ if (IS_ERR(pdev))
+ platform_driver_unregister(&imx_scu_soc_driver);
+
+ return PTR_ERR_OR_ZERO(pdev);
+}
+device_initcall(imx_scu_soc_init);
diff --git a/drivers/soc/imx/soc-imx8.c b/drivers/soc/imx/soc-imx8.c
index b1bd8e2543ac..f924ae8c6514 100644
--- a/drivers/soc/imx/soc-imx8.c
+++ b/drivers/soc/imx/soc-imx8.c
@@ -16,6 +16,9 @@
#define IMX8MQ_SW_INFO_B1 0x40
#define IMX8MQ_SW_MAGIC_B1 0xff0055aa
+/* Same as ANADIG_DIGPROG_IMX7D */
+#define ANADIG_DIGPROG_IMX8MM 0x800
+
struct imx8_soc_data {
char *name;
u32 (*soc_revision)(void);
@@ -46,13 +49,45 @@ out:
return rev;
}
+static u32 __init imx8mm_soc_revision(void)
+{
+ struct device_node *np;
+ void __iomem *anatop_base;
+ u32 rev;
+
+ np = of_find_compatible_node(NULL, NULL, "fsl,imx8mm-anatop");
+ if (!np)
+ return 0;
+
+ anatop_base = of_iomap(np, 0);
+ WARN_ON(!anatop_base);
+
+ rev = readl_relaxed(anatop_base + ANADIG_DIGPROG_IMX8MM);
+
+ iounmap(anatop_base);
+ of_node_put(np);
+ return rev;
+}
+
static const struct imx8_soc_data imx8mq_soc_data = {
.name = "i.MX8MQ",
.soc_revision = imx8mq_soc_revision,
};
+static const struct imx8_soc_data imx8mm_soc_data = {
+ .name = "i.MX8MM",
+ .soc_revision = imx8mm_soc_revision,
+};
+
+static const struct imx8_soc_data imx8mn_soc_data = {
+ .name = "i.MX8MN",
+ .soc_revision = imx8mm_soc_revision,
+};
+
static const struct of_device_id imx8_soc_match[] = {
{ .compatible = "fsl,imx8mq", .data = &imx8mq_soc_data, },
+ { .compatible = "fsl,imx8mm", .data = &imx8mm_soc_data, },
+ { .compatible = "fsl,imx8mn", .data = &imx8mn_soc_data, },
{ }
};
@@ -65,7 +100,6 @@ static int __init imx8_soc_init(void)
{
struct soc_device_attribute *soc_dev_attr;
struct soc_device *soc_dev;
- struct device_node *root;
const struct of_device_id *id;
u32 soc_rev = 0;
const struct imx8_soc_data *data;
@@ -73,20 +107,19 @@ static int __init imx8_soc_init(void)
soc_dev_attr = kzalloc(sizeof(*soc_dev_attr), GFP_KERNEL);
if (!soc_dev_attr)
- return -ENODEV;
+ return -ENOMEM;
soc_dev_attr->family = "Freescale i.MX";
- root = of_find_node_by_path("/");
- ret = of_property_read_string(root, "model", &soc_dev_attr->machine);
+ ret = of_property_read_string(of_root, "model", &soc_dev_attr->machine);
if (ret)
goto free_soc;
- id = of_match_node(imx8_soc_match, root);
- if (!id)
+ id = of_match_node(imx8_soc_match, of_root);
+ if (!id) {
+ ret = -ENODEV;
goto free_soc;
-
- of_node_put(root);
+ }
data = id->data;
if (data) {
@@ -96,12 +129,16 @@ static int __init imx8_soc_init(void)
}
soc_dev_attr->revision = imx8_revision(soc_rev);
- if (!soc_dev_attr->revision)
+ if (!soc_dev_attr->revision) {
+ ret = -ENOMEM;
goto free_soc;
+ }
soc_dev = soc_device_register(soc_dev_attr);
- if (IS_ERR(soc_dev))
+ if (IS_ERR(soc_dev)) {
+ ret = PTR_ERR(soc_dev);
goto free_rev;
+ }
if (IS_ENABLED(CONFIG_ARM_IMX_CPUFREQ_DT))
platform_device_register_simple("imx-cpufreq-dt", -1, NULL, 0);
@@ -109,10 +146,10 @@ static int __init imx8_soc_init(void)
return 0;
free_rev:
- kfree(soc_dev_attr->revision);
+ if (strcmp(soc_dev_attr->revision, "unknown"))
+ kfree(soc_dev_attr->revision);
free_soc:
kfree(soc_dev_attr);
- of_node_put(root);
- return -ENODEV;
+ return ret;
}
device_initcall(imx8_soc_init);
diff --git a/drivers/soc/qcom/Kconfig b/drivers/soc/qcom/Kconfig
index 880cf0290962..a6d1bfb17279 100644
--- a/drivers/soc/qcom/Kconfig
+++ b/drivers/soc/qcom/Kconfig
@@ -4,6 +4,18 @@
#
menu "Qualcomm SoC drivers"
+config QCOM_AOSS_QMP
+ tristate "Qualcomm AOSS Driver"
+ depends on ARCH_QCOM || COMPILE_TEST
+ depends on MAILBOX
+ depends on COMMON_CLK && PM
+ select PM_GENERIC_DOMAINS
+ help
+ This driver provides the means of communicating with and controlling
+ the low-power state for resources related to the remoteproc
+ subsystems as well as controlling the debug clocks exposed by the Always On
+ Subsystem (AOSS) using Qualcomm Messaging Protocol (QMP).
+
config QCOM_COMMAND_DB
bool "Qualcomm Command DB"
depends on ARCH_QCOM || COMPILE_TEST
diff --git a/drivers/soc/qcom/Makefile b/drivers/soc/qcom/Makefile
index ffe519b0cb66..eeb088beb15f 100644
--- a/drivers/soc/qcom/Makefile
+++ b/drivers/soc/qcom/Makefile
@@ -1,5 +1,6 @@
# SPDX-License-Identifier: GPL-2.0
CFLAGS_rpmh-rsc.o := -I$(src)
+obj-$(CONFIG_QCOM_AOSS_QMP) += qcom_aoss.o
obj-$(CONFIG_QCOM_GENI_SE) += qcom-geni-se.o
obj-$(CONFIG_QCOM_COMMAND_DB) += cmd-db.o
obj-$(CONFIG_QCOM_GLINK_SSR) += glink_ssr.o
diff --git a/drivers/soc/qcom/apr.c b/drivers/soc/qcom/apr.c
index 74f8b9607daa..4fcc32420c47 100644
--- a/drivers/soc/qcom/apr.c
+++ b/drivers/soc/qcom/apr.c
@@ -8,6 +8,7 @@
#include <linux/spinlock.h>
#include <linux/idr.h>
#include <linux/slab.h>
+#include <linux/workqueue.h>
#include <linux/of_device.h>
#include <linux/soc/qcom/apr.h>
#include <linux/rpmsg.h>
@@ -17,8 +18,18 @@ struct apr {
struct rpmsg_endpoint *ch;
struct device *dev;
spinlock_t svcs_lock;
+ spinlock_t rx_lock;
struct idr svcs_idr;
int dest_domain_id;
+ struct workqueue_struct *rxwq;
+ struct work_struct rx_work;
+ struct list_head rx_list;
+};
+
+struct apr_rx_buf {
+ struct list_head node;
+ int len;
+ uint8_t buf[];
};
/**
@@ -62,11 +73,7 @@ static int apr_callback(struct rpmsg_device *rpdev, void *buf,
int len, void *priv, u32 addr)
{
struct apr *apr = dev_get_drvdata(&rpdev->dev);
- uint16_t hdr_size, msg_type, ver, svc_id;
- struct apr_device *svc = NULL;
- struct apr_driver *adrv = NULL;
- struct apr_resp_pkt resp;
- struct apr_hdr *hdr;
+ struct apr_rx_buf *abuf;
unsigned long flags;
if (len <= APR_HDR_SIZE) {
@@ -75,6 +82,34 @@ static int apr_callback(struct rpmsg_device *rpdev, void *buf,
return -EINVAL;
}
+ abuf = kzalloc(sizeof(*abuf) + len, GFP_ATOMIC);
+ if (!abuf)
+ return -ENOMEM;
+
+ abuf->len = len;
+ memcpy(abuf->buf, buf, len);
+
+ spin_lock_irqsave(&apr->rx_lock, flags);
+ list_add_tail(&abuf->node, &apr->rx_list);
+ spin_unlock_irqrestore(&apr->rx_lock, flags);
+
+ queue_work(apr->rxwq, &apr->rx_work);
+
+ return 0;
+}
+
+
+static int apr_do_rx_callback(struct apr *apr, struct apr_rx_buf *abuf)
+{
+ uint16_t hdr_size, msg_type, ver, svc_id;
+ struct apr_device *svc = NULL;
+ struct apr_driver *adrv = NULL;
+ struct apr_resp_pkt resp;
+ struct apr_hdr *hdr;
+ unsigned long flags;
+ void *buf = abuf->buf;
+ int len = abuf->len;
+
hdr = buf;
ver = APR_HDR_FIELD_VER(hdr->hdr_field);
if (ver > APR_PKT_VER + 1)
@@ -132,6 +167,23 @@ static int apr_callback(struct rpmsg_device *rpdev, void *buf,
return 0;
}
+static void apr_rxwq(struct work_struct *work)
+{
+ struct apr *apr = container_of(work, struct apr, rx_work);
+ struct apr_rx_buf *abuf, *b;
+ unsigned long flags;
+
+ if (!list_empty(&apr->rx_list)) {
+ list_for_each_entry_safe(abuf, b, &apr->rx_list, node) {
+ apr_do_rx_callback(apr, abuf);
+ spin_lock_irqsave(&apr->rx_lock, flags);
+ list_del(&abuf->node);
+ spin_unlock_irqrestore(&apr->rx_lock, flags);
+ kfree(abuf);
+ }
+ }
+}
+
static int apr_device_match(struct device *dev, struct device_driver *drv)
{
struct apr_device *adev = to_apr_device(dev);
@@ -276,7 +328,7 @@ static int apr_probe(struct rpmsg_device *rpdev)
if (!apr)
return -ENOMEM;
- ret = of_property_read_u32(dev->of_node, "reg", &apr->dest_domain_id);
+ ret = of_property_read_u32(dev->of_node, "qcom,apr-domain", &apr->dest_domain_id);
if (ret) {
dev_err(dev, "APR Domain ID not specified in DT\n");
return ret;
@@ -285,6 +337,14 @@ static int apr_probe(struct rpmsg_device *rpdev)
dev_set_drvdata(dev, apr);
apr->ch = rpdev->ept;
apr->dev = dev;
+ apr->rxwq = create_singlethread_workqueue("qcom_apr_rx");
+ if (!apr->rxwq) {
+ dev_err(apr->dev, "Failed to start Rx WQ\n");
+ return -ENOMEM;
+ }
+ INIT_WORK(&apr->rx_work, apr_rxwq);
+ INIT_LIST_HEAD(&apr->rx_list);
+ spin_lock_init(&apr->rx_lock);
spin_lock_init(&apr->svcs_lock);
idr_init(&apr->svcs_idr);
of_register_apr_devices(dev);
@@ -303,7 +363,11 @@ static int apr_remove_device(struct device *dev, void *null)
static void apr_remove(struct rpmsg_device *rpdev)
{
+ struct apr *apr = dev_get_drvdata(&rpdev->dev);
+
device_for_each_child(&rpdev->dev, NULL, apr_remove_device);
+ flush_workqueue(apr->rxwq);
+ destroy_workqueue(apr->rxwq);
}
/*
diff --git a/drivers/soc/qcom/qcom_aoss.c b/drivers/soc/qcom/qcom_aoss.c
new file mode 100644
index 000000000000..5f885196f4d0
--- /dev/null
+++ b/drivers/soc/qcom/qcom_aoss.c
@@ -0,0 +1,480 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2019, Linaro Ltd
+ */
+#include <dt-bindings/power/qcom-aoss-qmp.h>
+#include <linux/clk-provider.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/mailbox_client.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/pm_domain.h>
+
+#define QMP_DESC_MAGIC 0x0
+#define QMP_DESC_VERSION 0x4
+#define QMP_DESC_FEATURES 0x8
+
+/* AOP-side offsets */
+#define QMP_DESC_UCORE_LINK_STATE 0xc
+#define QMP_DESC_UCORE_LINK_STATE_ACK 0x10
+#define QMP_DESC_UCORE_CH_STATE 0x14
+#define QMP_DESC_UCORE_CH_STATE_ACK 0x18
+#define QMP_DESC_UCORE_MBOX_SIZE 0x1c
+#define QMP_DESC_UCORE_MBOX_OFFSET 0x20
+
+/* Linux-side offsets */
+#define QMP_DESC_MCORE_LINK_STATE 0x24
+#define QMP_DESC_MCORE_LINK_STATE_ACK 0x28
+#define QMP_DESC_MCORE_CH_STATE 0x2c
+#define QMP_DESC_MCORE_CH_STATE_ACK 0x30
+#define QMP_DESC_MCORE_MBOX_SIZE 0x34
+#define QMP_DESC_MCORE_MBOX_OFFSET 0x38
+
+#define QMP_STATE_UP GENMASK(15, 0)
+#define QMP_STATE_DOWN GENMASK(31, 16)
+
+#define QMP_MAGIC 0x4d41494c /* mail */
+#define QMP_VERSION 1
+
+/* 64 bytes is enough to store the requests and provides padding to 4 bytes */
+#define QMP_MSG_LEN 64
+
+/**
+ * struct qmp - driver state for QMP implementation
+ * @msgram: iomem referencing the message RAM used for communication
+ * @dev: reference to QMP device
+ * @mbox_client: mailbox client used to ring the doorbell on transmit
+ * @mbox_chan: mailbox channel used to ring the doorbell on transmit
+ * @offset: offset within @msgram where messages should be written
+ * @size: maximum size of the messages to be transmitted
+ * @event: wait_queue for synchronization with the IRQ
+ * @tx_lock: provides synchronization between multiple callers of qmp_send()
+ * @qdss_clk: QDSS clock hw struct
+ * @pd_data: genpd data
+ */
+struct qmp {
+ void __iomem *msgram;
+ struct device *dev;
+
+ struct mbox_client mbox_client;
+ struct mbox_chan *mbox_chan;
+
+ size_t offset;
+ size_t size;
+
+ wait_queue_head_t event;
+
+ struct mutex tx_lock;
+
+ struct clk_hw qdss_clk;
+ struct genpd_onecell_data pd_data;
+};
+
+struct qmp_pd {
+ struct qmp *qmp;
+ struct generic_pm_domain pd;
+};
+
+#define to_qmp_pd_resource(res) container_of(res, struct qmp_pd, pd)
+
+static void qmp_kick(struct qmp *qmp)
+{
+ mbox_send_message(qmp->mbox_chan, NULL);
+ mbox_client_txdone(qmp->mbox_chan, 0);
+}
+
+static bool qmp_magic_valid(struct qmp *qmp)
+{
+ return readl(qmp->msgram + QMP_DESC_MAGIC) == QMP_MAGIC;
+}
+
+static bool qmp_link_acked(struct qmp *qmp)
+{
+ return readl(qmp->msgram + QMP_DESC_MCORE_LINK_STATE_ACK) == QMP_STATE_UP;
+}
+
+static bool qmp_mcore_channel_acked(struct qmp *qmp)
+{
+ return readl(qmp->msgram + QMP_DESC_MCORE_CH_STATE_ACK) == QMP_STATE_UP;
+}
+
+static bool qmp_ucore_channel_up(struct qmp *qmp)
+{
+ return readl(qmp->msgram + QMP_DESC_UCORE_CH_STATE) == QMP_STATE_UP;
+}
+
+static int qmp_open(struct qmp *qmp)
+{
+ int ret;
+ u32 val;
+
+ if (!qmp_magic_valid(qmp)) {
+ dev_err(qmp->dev, "QMP magic doesn't match\n");
+ return -EINVAL;
+ }
+
+ val = readl(qmp->msgram + QMP_DESC_VERSION);
+ if (val != QMP_VERSION) {
+ dev_err(qmp->dev, "unsupported QMP version %d\n", val);
+ return -EINVAL;
+ }
+
+ qmp->offset = readl(qmp->msgram + QMP_DESC_MCORE_MBOX_OFFSET);
+ qmp->size = readl(qmp->msgram + QMP_DESC_MCORE_MBOX_SIZE);
+ if (!qmp->size) {
+ dev_err(qmp->dev, "invalid mailbox size\n");
+ return -EINVAL;
+ }
+
+ /* Ack remote core's link state */
+ val = readl(qmp->msgram + QMP_DESC_UCORE_LINK_STATE);
+ writel(val, qmp->msgram + QMP_DESC_UCORE_LINK_STATE_ACK);
+
+ /* Set local core's link state to up */
+ writel(QMP_STATE_UP, qmp->msgram + QMP_DESC_MCORE_LINK_STATE);
+
+ qmp_kick(qmp);
+
+ ret = wait_event_timeout(qmp->event, qmp_link_acked(qmp), HZ);
+ if (!ret) {
+ dev_err(qmp->dev, "ucore didn't ack link\n");
+ goto timeout_close_link;
+ }
+
+ writel(QMP_STATE_UP, qmp->msgram + QMP_DESC_MCORE_CH_STATE);
+
+ qmp_kick(qmp);
+
+ ret = wait_event_timeout(qmp->event, qmp_ucore_channel_up(qmp), HZ);
+ if (!ret) {
+ dev_err(qmp->dev, "ucore didn't open channel\n");
+ goto timeout_close_channel;
+ }
+
+ /* Ack remote core's channel state */
+ writel(QMP_STATE_UP, qmp->msgram + QMP_DESC_UCORE_CH_STATE_ACK);
+
+ qmp_kick(qmp);
+
+ ret = wait_event_timeout(qmp->event, qmp_mcore_channel_acked(qmp), HZ);
+ if (!ret) {
+ dev_err(qmp->dev, "ucore didn't ack channel\n");
+ goto timeout_close_channel;
+ }
+
+ return 0;
+
+timeout_close_channel:
+ writel(QMP_STATE_DOWN, qmp->msgram + QMP_DESC_MCORE_CH_STATE);
+
+timeout_close_link:
+ writel(QMP_STATE_DOWN, qmp->msgram + QMP_DESC_MCORE_LINK_STATE);
+ qmp_kick(qmp);
+
+ return -ETIMEDOUT;
+}
+
+static void qmp_close(struct qmp *qmp)
+{
+ writel(QMP_STATE_DOWN, qmp->msgram + QMP_DESC_MCORE_CH_STATE);
+ writel(QMP_STATE_DOWN, qmp->msgram + QMP_DESC_MCORE_LINK_STATE);
+ qmp_kick(qmp);
+}
+
+static irqreturn_t qmp_intr(int irq, void *data)
+{
+ struct qmp *qmp = data;
+
+ wake_up_interruptible_all(&qmp->event);
+
+ return IRQ_HANDLED;
+}
+
+static bool qmp_message_empty(struct qmp *qmp)
+{
+ return readl(qmp->msgram + qmp->offset) == 0;
+}
+
+/**
+ * qmp_send() - send a message to the AOSS
+ * @qmp: qmp context
+ * @data: message to be sent
+ * @len: length of the message
+ *
+ * Transmit @data to AOSS and wait for the AOSS to acknowledge the message.
+ * @len must be a multiple of 4 and not longer than the mailbox size. Access is
+ * synchronized by this implementation.
+ *
+ * Return: 0 on success, negative errno on failure
+ */
+static int qmp_send(struct qmp *qmp, const void *data, size_t len)
+{
+ long time_left;
+ int ret;
+
+ if (WARN_ON(len + sizeof(u32) > qmp->size))
+ return -EINVAL;
+
+ if (WARN_ON(len % sizeof(u32)))
+ return -EINVAL;
+
+ mutex_lock(&qmp->tx_lock);
+
+ /* The message RAM only implements 32-bit accesses */
+ __iowrite32_copy(qmp->msgram + qmp->offset + sizeof(u32),
+ data, len / sizeof(u32));
+ writel(len, qmp->msgram + qmp->offset);
+ qmp_kick(qmp);
+
+ time_left = wait_event_interruptible_timeout(qmp->event,
+ qmp_message_empty(qmp), HZ);
+ if (!time_left) {
+ dev_err(qmp->dev, "ucore did not ack channel\n");
+ ret = -ETIMEDOUT;
+
+ /* Clear message from buffer */
+ writel(0, qmp->msgram + qmp->offset);
+ } else {
+ ret = 0;
+ }
+
+ mutex_unlock(&qmp->tx_lock);
+
+ return ret;
+}
+
+static int qmp_qdss_clk_prepare(struct clk_hw *hw)
+{
+ static const char buf[QMP_MSG_LEN] = "{class: clock, res: qdss, val: 1}";
+ struct qmp *qmp = container_of(hw, struct qmp, qdss_clk);
+
+ return qmp_send(qmp, buf, sizeof(buf));
+}
+
+static void qmp_qdss_clk_unprepare(struct clk_hw *hw)
+{
+ static const char buf[QMP_MSG_LEN] = "{class: clock, res: qdss, val: 0}";
+ struct qmp *qmp = container_of(hw, struct qmp, qdss_clk);
+
+ qmp_send(qmp, buf, sizeof(buf));
+}
+
+static const struct clk_ops qmp_qdss_clk_ops = {
+ .prepare = qmp_qdss_clk_prepare,
+ .unprepare = qmp_qdss_clk_unprepare,
+};
+
+static int qmp_qdss_clk_add(struct qmp *qmp)
+{
+ static const struct clk_init_data qdss_init = {
+ .ops = &qmp_qdss_clk_ops,
+ .name = "qdss",
+ };
+ int ret;
+
+ qmp->qdss_clk.init = &qdss_init;
+ ret = clk_hw_register(qmp->dev, &qmp->qdss_clk);
+ if (ret < 0) {
+ dev_err(qmp->dev, "failed to register qdss clock\n");
+ return ret;
+ }
+
+ ret = of_clk_add_hw_provider(qmp->dev->of_node, of_clk_hw_simple_get,
+ &qmp->qdss_clk);
+ if (ret < 0) {
+ dev_err(qmp->dev, "unable to register of clk hw provider\n");
+ clk_hw_unregister(&qmp->qdss_clk);
+ }
+
+ return ret;
+}
+
+static void qmp_qdss_clk_remove(struct qmp *qmp)
+{
+ of_clk_del_provider(qmp->dev->of_node);
+ clk_hw_unregister(&qmp->qdss_clk);
+}
+
+static int qmp_pd_power_toggle(struct qmp_pd *res, bool enable)
+{
+ char buf[QMP_MSG_LEN] = {};
+
+ snprintf(buf, sizeof(buf),
+ "{class: image, res: load_state, name: %s, val: %s}",
+ res->pd.name, enable ? "on" : "off");
+ return qmp_send(res->qmp, buf, sizeof(buf));
+}
+
+static int qmp_pd_power_on(struct generic_pm_domain *domain)
+{
+ return qmp_pd_power_toggle(to_qmp_pd_resource(domain), true);
+}
+
+static int qmp_pd_power_off(struct generic_pm_domain *domain)
+{
+ return qmp_pd_power_toggle(to_qmp_pd_resource(domain), false);
+}
+
+static const char * const sdm845_resources[] = {
+ [AOSS_QMP_LS_CDSP] = "cdsp",
+ [AOSS_QMP_LS_LPASS] = "adsp",
+ [AOSS_QMP_LS_MODEM] = "modem",
+ [AOSS_QMP_LS_SLPI] = "slpi",
+ [AOSS_QMP_LS_SPSS] = "spss",
+ [AOSS_QMP_LS_VENUS] = "venus",
+};
+
+static int qmp_pd_add(struct qmp *qmp)
+{
+ struct genpd_onecell_data *data = &qmp->pd_data;
+ struct device *dev = qmp->dev;
+ struct qmp_pd *res;
+ size_t num = ARRAY_SIZE(sdm845_resources);
+ int ret;
+ int i;
+
+ res = devm_kcalloc(dev, num, sizeof(*res), GFP_KERNEL);
+ if (!res)
+ return -ENOMEM;
+
+ data->domains = devm_kcalloc(dev, num, sizeof(*data->domains),
+ GFP_KERNEL);
+ if (!data->domains)
+ return -ENOMEM;
+
+ for (i = 0; i < num; i++) {
+ res[i].qmp = qmp;
+ res[i].pd.name = sdm845_resources[i];
+ res[i].pd.power_on = qmp_pd_power_on;
+ res[i].pd.power_off = qmp_pd_power_off;
+
+ ret = pm_genpd_init(&res[i].pd, NULL, true);
+ if (ret < 0) {
+ dev_err(dev, "failed to init genpd\n");
+ goto unroll_genpds;
+ }
+
+ data->domains[i] = &res[i].pd;
+ }
+
+ data->num_domains = i;
+
+ ret = of_genpd_add_provider_onecell(dev->of_node, data);
+ if (ret < 0)
+ goto unroll_genpds;
+
+ return 0;
+
+unroll_genpds:
+ for (i--; i >= 0; i--)
+ pm_genpd_remove(data->domains[i]);
+
+ return ret;
+}
+
+static void qmp_pd_remove(struct qmp *qmp)
+{
+ struct genpd_onecell_data *data = &qmp->pd_data;
+ struct device *dev = qmp->dev;
+ int i;
+
+ of_genpd_del_provider(dev->of_node);
+
+ for (i = 0; i < data->num_domains; i++)
+ pm_genpd_remove(data->domains[i]);
+}
+
+static int qmp_probe(struct platform_device *pdev)
+{
+ struct resource *res;
+ struct qmp *qmp;
+ int irq;
+ int ret;
+
+ qmp = devm_kzalloc(&pdev->dev, sizeof(*qmp), GFP_KERNEL);
+ if (!qmp)
+ return -ENOMEM;
+
+ qmp->dev = &pdev->dev;
+ init_waitqueue_head(&qmp->event);
+ mutex_init(&qmp->tx_lock);
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ qmp->msgram = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(qmp->msgram))
+ return PTR_ERR(qmp->msgram);
+
+ qmp->mbox_client.dev = &pdev->dev;
+ qmp->mbox_client.knows_txdone = true;
+ qmp->mbox_chan = mbox_request_channel(&qmp->mbox_client, 0);
+ if (IS_ERR(qmp->mbox_chan)) {
+ dev_err(&pdev->dev, "failed to acquire ipc mailbox\n");
+ return PTR_ERR(qmp->mbox_chan);
+ }
+
+ irq = platform_get_irq(pdev, 0);
+ ret = devm_request_irq(&pdev->dev, irq, qmp_intr, IRQF_ONESHOT,
+ "aoss-qmp", qmp);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "failed to request interrupt\n");
+ goto err_free_mbox;
+ }
+
+ ret = qmp_open(qmp);
+ if (ret < 0)
+ goto err_free_mbox;
+
+ ret = qmp_qdss_clk_add(qmp);
+ if (ret)
+ goto err_close_qmp;
+
+ ret = qmp_pd_add(qmp);
+ if (ret)
+ goto err_remove_qdss_clk;
+
+ platform_set_drvdata(pdev, qmp);
+
+ return 0;
+
+err_remove_qdss_clk:
+ qmp_qdss_clk_remove(qmp);
+err_close_qmp:
+ qmp_close(qmp);
+err_free_mbox:
+ mbox_free_channel(qmp->mbox_chan);
+
+ return ret;
+}
+
+static int qmp_remove(struct platform_device *pdev)
+{
+ struct qmp *qmp = platform_get_drvdata(pdev);
+
+ qmp_qdss_clk_remove(qmp);
+ qmp_pd_remove(qmp);
+
+ qmp_close(qmp);
+ mbox_free_channel(qmp->mbox_chan);
+
+ return 0;
+}
+
+static const struct of_device_id qmp_dt_match[] = {
+ { .compatible = "qcom,sdm845-aoss-qmp", },
+ {}
+};
+MODULE_DEVICE_TABLE(of, qmp_dt_match);
+
+static struct platform_driver qmp_driver = {
+ .driver = {
+ .name = "qcom_aoss_qmp",
+ .of_match_table = qmp_dt_match,
+ },
+ .probe = qmp_probe,
+ .remove = qmp_remove,
+};
+module_platform_driver(qmp_driver);
+
+MODULE_DESCRIPTION("Qualcomm AOSS QMP driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/soc/qcom/rpmpd.c b/drivers/soc/qcom/rpmpd.c
index 005326050c23..3c1a55cf25d6 100644
--- a/drivers/soc/qcom/rpmpd.c
+++ b/drivers/soc/qcom/rpmpd.c
@@ -16,56 +16,76 @@
#define domain_to_rpmpd(domain) container_of(domain, struct rpmpd, pd)
-/* Resource types */
+/* Resource types:
+ * RPMPD_X is X encoded as a little-endian, lower-case, ASCII string */
#define RPMPD_SMPA 0x61706d73
#define RPMPD_LDOA 0x616f646c
+#define RPMPD_RWCX 0x78637772
+#define RPMPD_RWMX 0x786d7772
+#define RPMPD_RWLC 0x636c7772
+#define RPMPD_RWLM 0x6d6c7772
+#define RPMPD_RWSC 0x63737772
+#define RPMPD_RWSM 0x6d737772
/* Operation Keys */
#define KEY_CORNER 0x6e726f63 /* corn */
#define KEY_ENABLE 0x6e657773 /* swen */
#define KEY_FLOOR_CORNER 0x636676 /* vfc */
+#define KEY_FLOOR_LEVEL 0x6c6676 /* vfl */
+#define KEY_LEVEL 0x6c766c76 /* vlvl */
-#define MAX_RPMPD_STATE 6
+#define MAX_8996_RPMPD_STATE 6
-#define DEFINE_RPMPD_CORNER_SMPA(_platform, _name, _active, r_id) \
+#define DEFINE_RPMPD_PAIR(_platform, _name, _active, r_type, r_key, \
+ r_id) \
static struct rpmpd _platform##_##_active; \
static struct rpmpd _platform##_##_name = { \
.pd = { .name = #_name, }, \
.peer = &_platform##_##_active, \
- .res_type = RPMPD_SMPA, \
+ .res_type = RPMPD_##r_type, \
.res_id = r_id, \
- .key = KEY_CORNER, \
+ .key = KEY_##r_key, \
}; \
static struct rpmpd _platform##_##_active = { \
.pd = { .name = #_active, }, \
.peer = &_platform##_##_name, \
.active_only = true, \
- .res_type = RPMPD_SMPA, \
+ .res_type = RPMPD_##r_type, \
.res_id = r_id, \
- .key = KEY_CORNER, \
+ .key = KEY_##r_key, \
}
-#define DEFINE_RPMPD_CORNER_LDOA(_platform, _name, r_id) \
+#define DEFINE_RPMPD_CORNER(_platform, _name, r_type, r_id) \
static struct rpmpd _platform##_##_name = { \
.pd = { .name = #_name, }, \
- .res_type = RPMPD_LDOA, \
+ .res_type = RPMPD_##r_type, \
.res_id = r_id, \
.key = KEY_CORNER, \
}
-#define DEFINE_RPMPD_VFC(_platform, _name, r_id, r_type) \
+#define DEFINE_RPMPD_LEVEL(_platform, _name, r_type, r_id) \
static struct rpmpd _platform##_##_name = { \
.pd = { .name = #_name, }, \
- .res_type = r_type, \
+ .res_type = RPMPD_##r_type, \
.res_id = r_id, \
- .key = KEY_FLOOR_CORNER, \
+ .key = KEY_LEVEL, \
}
-#define DEFINE_RPMPD_VFC_SMPA(_platform, _name, r_id) \
- DEFINE_RPMPD_VFC(_platform, _name, r_id, RPMPD_SMPA)
+#define DEFINE_RPMPD_VFC(_platform, _name, r_type, r_id) \
+ static struct rpmpd _platform##_##_name = { \
+ .pd = { .name = #_name, }, \
+ .res_type = RPMPD_##r_type, \
+ .res_id = r_id, \
+ .key = KEY_FLOOR_CORNER, \
+ }
-#define DEFINE_RPMPD_VFC_LDOA(_platform, _name, r_id) \
- DEFINE_RPMPD_VFC(_platform, _name, r_id, RPMPD_LDOA)
+#define DEFINE_RPMPD_VFL(_platform, _name, r_type, r_id) \
+ static struct rpmpd _platform##_##_name = { \
+ .pd = { .name = #_name, }, \
+ .res_type = RPMPD_##r_type, \
+ .res_id = r_id, \
+ .key = KEY_FLOOR_LEVEL, \
+ }
struct rpmpd_req {
__le32 key;
@@ -83,23 +103,25 @@ struct rpmpd {
const int res_type;
const int res_id;
struct qcom_smd_rpm *rpm;
+ unsigned int max_state;
__le32 key;
};
struct rpmpd_desc {
struct rpmpd **rpmpds;
size_t num_pds;
+ unsigned int max_state;
};
static DEFINE_MUTEX(rpmpd_lock);
/* msm8996 RPM Power domains */
-DEFINE_RPMPD_CORNER_SMPA(msm8996, vddcx, vddcx_ao, 1);
-DEFINE_RPMPD_CORNER_SMPA(msm8996, vddmx, vddmx_ao, 2);
-DEFINE_RPMPD_CORNER_LDOA(msm8996, vddsscx, 26);
+DEFINE_RPMPD_PAIR(msm8996, vddcx, vddcx_ao, SMPA, CORNER, 1);
+DEFINE_RPMPD_PAIR(msm8996, vddmx, vddmx_ao, SMPA, CORNER, 2);
+DEFINE_RPMPD_CORNER(msm8996, vddsscx, LDOA, 26);
-DEFINE_RPMPD_VFC_SMPA(msm8996, vddcx_vfc, 1);
-DEFINE_RPMPD_VFC_LDOA(msm8996, vddsscx_vfc, 26);
+DEFINE_RPMPD_VFC(msm8996, vddcx_vfc, SMPA, 1);
+DEFINE_RPMPD_VFC(msm8996, vddsscx_vfc, LDOA, 26);
static struct rpmpd *msm8996_rpmpds[] = {
[MSM8996_VDDCX] = &msm8996_vddcx,
@@ -114,10 +136,71 @@ static struct rpmpd *msm8996_rpmpds[] = {
static const struct rpmpd_desc msm8996_desc = {
.rpmpds = msm8996_rpmpds,
.num_pds = ARRAY_SIZE(msm8996_rpmpds),
+ .max_state = MAX_8996_RPMPD_STATE,
+};
+
+/* msm8998 RPM Power domains */
+DEFINE_RPMPD_PAIR(msm8998, vddcx, vddcx_ao, RWCX, LEVEL, 0);
+DEFINE_RPMPD_VFL(msm8998, vddcx_vfl, RWCX, 0);
+
+DEFINE_RPMPD_PAIR(msm8998, vddmx, vddmx_ao, RWMX, LEVEL, 0);
+DEFINE_RPMPD_VFL(msm8998, vddmx_vfl, RWMX, 0);
+
+DEFINE_RPMPD_LEVEL(msm8998, vdd_ssccx, RWSC, 0);
+DEFINE_RPMPD_VFL(msm8998, vdd_ssccx_vfl, RWSC, 0);
+
+DEFINE_RPMPD_LEVEL(msm8998, vdd_sscmx, RWSM, 0);
+DEFINE_RPMPD_VFL(msm8998, vdd_sscmx_vfl, RWSM, 0);
+
+static struct rpmpd *msm8998_rpmpds[] = {
+ [MSM8998_VDDCX] = &msm8998_vddcx,
+ [MSM8998_VDDCX_AO] = &msm8998_vddcx_ao,
+ [MSM8998_VDDCX_VFL] = &msm8998_vddcx_vfl,
+ [MSM8998_VDDMX] = &msm8998_vddmx,
+ [MSM8998_VDDMX_AO] = &msm8998_vddmx_ao,
+ [MSM8998_VDDMX_VFL] = &msm8998_vddmx_vfl,
+ [MSM8998_SSCCX] = &msm8998_vdd_ssccx,
+ [MSM8998_SSCCX_VFL] = &msm8998_vdd_ssccx_vfl,
+ [MSM8998_SSCMX] = &msm8998_vdd_sscmx,
+ [MSM8998_SSCMX_VFL] = &msm8998_vdd_sscmx_vfl,
+};
+
+static const struct rpmpd_desc msm8998_desc = {
+ .rpmpds = msm8998_rpmpds,
+ .num_pds = ARRAY_SIZE(msm8998_rpmpds),
+ .max_state = RPM_SMD_LEVEL_BINNING,
+};
+
+/* qcs404 RPM Power domains */
+DEFINE_RPMPD_PAIR(qcs404, vddmx, vddmx_ao, RWMX, LEVEL, 0);
+DEFINE_RPMPD_VFL(qcs404, vddmx_vfl, RWMX, 0);
+
+DEFINE_RPMPD_LEVEL(qcs404, vdd_lpicx, RWLC, 0);
+DEFINE_RPMPD_VFL(qcs404, vdd_lpicx_vfl, RWLC, 0);
+
+DEFINE_RPMPD_LEVEL(qcs404, vdd_lpimx, RWLM, 0);
+DEFINE_RPMPD_VFL(qcs404, vdd_lpimx_vfl, RWLM, 0);
+
+static struct rpmpd *qcs404_rpmpds[] = {
+ [QCS404_VDDMX] = &qcs404_vddmx,
+ [QCS404_VDDMX_AO] = &qcs404_vddmx_ao,
+ [QCS404_VDDMX_VFL] = &qcs404_vddmx_vfl,
+ [QCS404_LPICX] = &qcs404_vdd_lpicx,
+ [QCS404_LPICX_VFL] = &qcs404_vdd_lpicx_vfl,
+ [QCS404_LPIMX] = &qcs404_vdd_lpimx,
+ [QCS404_LPIMX_VFL] = &qcs404_vdd_lpimx_vfl,
+};
+
+static const struct rpmpd_desc qcs404_desc = {
+ .rpmpds = qcs404_rpmpds,
+ .num_pds = ARRAY_SIZE(qcs404_rpmpds),
+ .max_state = RPM_SMD_LEVEL_BINNING,
};
static const struct of_device_id rpmpd_match_table[] = {
{ .compatible = "qcom,msm8996-rpmpd", .data = &msm8996_desc },
+ { .compatible = "qcom,msm8998-rpmpd", .data = &msm8998_desc },
+ { .compatible = "qcom,qcs404-rpmpd", .data = &qcs404_desc },
{ }
};
@@ -225,14 +308,16 @@ static int rpmpd_set_performance(struct generic_pm_domain *domain,
int ret = 0;
struct rpmpd *pd = domain_to_rpmpd(domain);
- if (state > MAX_RPMPD_STATE)
- goto out;
+ if (state > pd->max_state)
+ state = pd->max_state;
mutex_lock(&rpmpd_lock);
pd->corner = state;
- if (!pd->enabled && pd->key != KEY_FLOOR_CORNER)
+ /* Always send updates for vfc and vfl */
+ if (!pd->enabled && pd->key != KEY_FLOOR_CORNER &&
+ pd->key != KEY_FLOOR_LEVEL)
goto out;
ret = rpmpd_aggregate_corner(pd);
@@ -287,6 +372,7 @@ static int rpmpd_probe(struct platform_device *pdev)
}
rpmpds[i]->rpm = rpm;
+ rpmpds[i]->max_state = desc->max_state;
rpmpds[i]->pd.power_off = rpmpd_power_off;
rpmpds[i]->pd.power_on = rpmpd_power_on;
rpmpds[i]->pd.set_performance_state = rpmpd_set_performance;
diff --git a/drivers/soc/renesas/Kconfig b/drivers/soc/renesas/Kconfig
index 68bfca6f20dd..2bbf49e5d441 100644
--- a/drivers/soc/renesas/Kconfig
+++ b/drivers/soc/renesas/Kconfig
@@ -57,14 +57,16 @@ config ARCH_R7S72100
bool "RZ/A1H (R7S72100)"
select PM
select PM_GENERIC_DOMAINS
- select SYS_SUPPORTS_SH_MTU2
select RENESAS_OSTM
+ select RENESAS_RZA1_IRQC
+ select SYS_SUPPORTS_SH_MTU2
config ARCH_R7S9210
bool "RZ/A2 (R7S9210)"
select PM
select PM_GENERIC_DOMAINS
select RENESAS_OSTM
+ select RENESAS_RZA1_IRQC
config ARCH_R8A73A4
bool "R-Mobile APE6 (R8A73A40)"
diff --git a/drivers/soc/rockchip/pm_domains.c b/drivers/soc/rockchip/pm_domains.c
index 3342332cc007..54eb6cfc5d5b 100644
--- a/drivers/soc/rockchip/pm_domains.c
+++ b/drivers/soc/rockchip/pm_domains.c
@@ -86,47 +86,47 @@ struct rockchip_pmu {
#define to_rockchip_pd(gpd) container_of(gpd, struct rockchip_pm_domain, genpd)
#define DOMAIN(pwr, status, req, idle, ack, wakeup) \
-{ \
- .pwr_mask = (pwr >= 0) ? BIT(pwr) : 0, \
- .status_mask = (status >= 0) ? BIT(status) : 0, \
- .req_mask = (req >= 0) ? BIT(req) : 0, \
- .idle_mask = (idle >= 0) ? BIT(idle) : 0, \
- .ack_mask = (ack >= 0) ? BIT(ack) : 0, \
- .active_wakeup = wakeup, \
+{ \
+ .pwr_mask = (pwr), \
+ .status_mask = (status), \
+ .req_mask = (req), \
+ .idle_mask = (idle), \
+ .ack_mask = (ack), \
+ .active_wakeup = (wakeup), \
}
#define DOMAIN_M(pwr, status, req, idle, ack, wakeup) \
{ \
- .pwr_w_mask = (pwr >= 0) ? BIT(pwr + 16) : 0, \
- .pwr_mask = (pwr >= 0) ? BIT(pwr) : 0, \
- .status_mask = (status >= 0) ? BIT(status) : 0, \
- .req_w_mask = (req >= 0) ? BIT(req + 16) : 0, \
- .req_mask = (req >= 0) ? BIT(req) : 0, \
- .idle_mask = (idle >= 0) ? BIT(idle) : 0, \
- .ack_mask = (ack >= 0) ? BIT(ack) : 0, \
+ .pwr_w_mask = (pwr) << 16, \
+ .pwr_mask = (pwr), \
+ .status_mask = (status), \
+ .req_w_mask = (req) << 16, \
+ .req_mask = (req), \
+ .idle_mask = (idle), \
+ .ack_mask = (ack), \
.active_wakeup = wakeup, \
}
#define DOMAIN_RK3036(req, ack, idle, wakeup) \
{ \
- .req_mask = (req >= 0) ? BIT(req) : 0, \
- .req_w_mask = (req >= 0) ? BIT(req + 16) : 0, \
- .ack_mask = (ack >= 0) ? BIT(ack) : 0, \
- .idle_mask = (idle >= 0) ? BIT(idle) : 0, \
+ .req_mask = (req), \
+ .req_w_mask = (req) << 16, \
+ .ack_mask = (ack), \
+ .idle_mask = (idle), \
.active_wakeup = wakeup, \
}
#define DOMAIN_PX30(pwr, status, req, wakeup) \
- DOMAIN_M(pwr, status, req, (req) + 16, req, wakeup)
+ DOMAIN_M(pwr, status, req, (req) << 16, req, wakeup)
#define DOMAIN_RK3288(pwr, status, req, wakeup) \
- DOMAIN(pwr, status, req, req, (req) + 16, wakeup)
+ DOMAIN(pwr, status, req, req, (req) << 16, wakeup)
#define DOMAIN_RK3328(pwr, status, req, wakeup) \
- DOMAIN_M(pwr, pwr, req, (req) + 10, req, wakeup)
+ DOMAIN_M(pwr, pwr, req, (req) << 10, req, wakeup)
#define DOMAIN_RK3368(pwr, status, req, wakeup) \
- DOMAIN(pwr, status, req, (req) + 16, req, wakeup)
+ DOMAIN(pwr, status, req, (req) << 16, req, wakeup)
#define DOMAIN_RK3399(pwr, status, req, wakeup) \
DOMAIN(pwr, status, req, req, req, wakeup)
@@ -716,129 +716,129 @@ err_out:
}
static const struct rockchip_domain_info px30_pm_domains[] = {
- [PX30_PD_USB] = DOMAIN_PX30(5, 5, 10, false),
- [PX30_PD_SDCARD] = DOMAIN_PX30(8, 8, 9, false),
- [PX30_PD_GMAC] = DOMAIN_PX30(10, 10, 6, false),
- [PX30_PD_MMC_NAND] = DOMAIN_PX30(11, 11, 5, false),
- [PX30_PD_VPU] = DOMAIN_PX30(12, 12, 14, false),
- [PX30_PD_VO] = DOMAIN_PX30(13, 13, 7, false),
- [PX30_PD_VI] = DOMAIN_PX30(14, 14, 8, false),
- [PX30_PD_GPU] = DOMAIN_PX30(15, 15, 2, false),
+ [PX30_PD_USB] = DOMAIN_PX30(BIT(5), BIT(5), BIT(10), false),
+ [PX30_PD_SDCARD] = DOMAIN_PX30(BIT(8), BIT(8), BIT(9), false),
+ [PX30_PD_GMAC] = DOMAIN_PX30(BIT(10), BIT(10), BIT(6), false),
+ [PX30_PD_MMC_NAND] = DOMAIN_PX30(BIT(11), BIT(11), BIT(5), false),
+ [PX30_PD_VPU] = DOMAIN_PX30(BIT(12), BIT(12), BIT(14), false),
+ [PX30_PD_VO] = DOMAIN_PX30(BIT(13), BIT(13), BIT(7), false),
+ [PX30_PD_VI] = DOMAIN_PX30(BIT(14), BIT(14), BIT(8), false),
+ [PX30_PD_GPU] = DOMAIN_PX30(BIT(15), BIT(15), BIT(2), false),
};
static const struct rockchip_domain_info rk3036_pm_domains[] = {
- [RK3036_PD_MSCH] = DOMAIN_RK3036(14, 23, 30, true),
- [RK3036_PD_CORE] = DOMAIN_RK3036(13, 17, 24, false),
- [RK3036_PD_PERI] = DOMAIN_RK3036(12, 18, 25, false),
- [RK3036_PD_VIO] = DOMAIN_RK3036(11, 19, 26, false),
- [RK3036_PD_VPU] = DOMAIN_RK3036(10, 20, 27, false),
- [RK3036_PD_GPU] = DOMAIN_RK3036(9, 21, 28, false),
- [RK3036_PD_SYS] = DOMAIN_RK3036(8, 22, 29, false),
+ [RK3036_PD_MSCH] = DOMAIN_RK3036(BIT(14), BIT(23), BIT(30), true),
+ [RK3036_PD_CORE] = DOMAIN_RK3036(BIT(13), BIT(17), BIT(24), false),
+ [RK3036_PD_PERI] = DOMAIN_RK3036(BIT(12), BIT(18), BIT(25), false),
+ [RK3036_PD_VIO] = DOMAIN_RK3036(BIT(11), BIT(19), BIT(26), false),
+ [RK3036_PD_VPU] = DOMAIN_RK3036(BIT(10), BIT(20), BIT(27), false),
+ [RK3036_PD_GPU] = DOMAIN_RK3036(BIT(9), BIT(21), BIT(28), false),
+ [RK3036_PD_SYS] = DOMAIN_RK3036(BIT(8), BIT(22), BIT(29), false),
};
static const struct rockchip_domain_info rk3066_pm_domains[] = {
- [RK3066_PD_GPU] = DOMAIN(9, 9, 3, 24, 29, false),
- [RK3066_PD_VIDEO] = DOMAIN(8, 8, 4, 23, 28, false),
- [RK3066_PD_VIO] = DOMAIN(7, 7, 5, 22, 27, false),
- [RK3066_PD_PERI] = DOMAIN(6, 6, 2, 25, 30, false),
- [RK3066_PD_CPU] = DOMAIN(-1, 5, 1, 26, 31, false),
+ [RK3066_PD_GPU] = DOMAIN(BIT(9), BIT(9), BIT(3), BIT(24), BIT(29), false),
+ [RK3066_PD_VIDEO] = DOMAIN(BIT(8), BIT(8), BIT(4), BIT(23), BIT(28), false),
+ [RK3066_PD_VIO] = DOMAIN(BIT(7), BIT(7), BIT(5), BIT(22), BIT(27), false),
+ [RK3066_PD_PERI] = DOMAIN(BIT(6), BIT(6), BIT(2), BIT(25), BIT(30), false),
+ [RK3066_PD_CPU] = DOMAIN(0, BIT(5), BIT(1), BIT(26), BIT(31), false),
};
static const struct rockchip_domain_info rk3128_pm_domains[] = {
- [RK3128_PD_CORE] = DOMAIN_RK3288(0, 0, 4, false),
- [RK3128_PD_MSCH] = DOMAIN_RK3288(-1, -1, 6, true),
- [RK3128_PD_VIO] = DOMAIN_RK3288(3, 3, 2, false),
- [RK3128_PD_VIDEO] = DOMAIN_RK3288(2, 2, 1, false),
- [RK3128_PD_GPU] = DOMAIN_RK3288(1, 1, 3, false),
+ [RK3128_PD_CORE] = DOMAIN_RK3288(BIT(0), BIT(0), BIT(4), false),
+ [RK3128_PD_MSCH] = DOMAIN_RK3288(0, 0, BIT(6), true),
+ [RK3128_PD_VIO] = DOMAIN_RK3288(BIT(3), BIT(3), BIT(2), false),
+ [RK3128_PD_VIDEO] = DOMAIN_RK3288(BIT(2), BIT(2), BIT(1), false),
+ [RK3128_PD_GPU] = DOMAIN_RK3288(BIT(1), BIT(1), BIT(3), false),
};
static const struct rockchip_domain_info rk3188_pm_domains[] = {
- [RK3188_PD_GPU] = DOMAIN(9, 9, 3, 24, 29, false),
- [RK3188_PD_VIDEO] = DOMAIN(8, 8, 4, 23, 28, false),
- [RK3188_PD_VIO] = DOMAIN(7, 7, 5, 22, 27, false),
- [RK3188_PD_PERI] = DOMAIN(6, 6, 2, 25, 30, false),
- [RK3188_PD_CPU] = DOMAIN(5, 5, 1, 26, 31, false),
+ [RK3188_PD_GPU] = DOMAIN(BIT(9), BIT(9), BIT(3), BIT(24), BIT(29), false),
+ [RK3188_PD_VIDEO] = DOMAIN(BIT(8), BIT(8), BIT(4), BIT(23), BIT(28), false),
+ [RK3188_PD_VIO] = DOMAIN(BIT(7), BIT(7), BIT(5), BIT(22), BIT(27), false),
+ [RK3188_PD_PERI] = DOMAIN(BIT(6), BIT(6), BIT(2), BIT(25), BIT(30), false),
+ [RK3188_PD_CPU] = DOMAIN(BIT(5), BIT(5), BIT(1), BIT(26), BIT(31), false),
};
static const struct rockchip_domain_info rk3228_pm_domains[] = {
- [RK3228_PD_CORE] = DOMAIN_RK3036(0, 0, 16, true),
- [RK3228_PD_MSCH] = DOMAIN_RK3036(1, 1, 17, true),
- [RK3228_PD_BUS] = DOMAIN_RK3036(2, 2, 18, true),
- [RK3228_PD_SYS] = DOMAIN_RK3036(3, 3, 19, true),
- [RK3228_PD_VIO] = DOMAIN_RK3036(4, 4, 20, false),
- [RK3228_PD_VOP] = DOMAIN_RK3036(5, 5, 21, false),
- [RK3228_PD_VPU] = DOMAIN_RK3036(6, 6, 22, false),
- [RK3228_PD_RKVDEC] = DOMAIN_RK3036(7, 7, 23, false),
- [RK3228_PD_GPU] = DOMAIN_RK3036(8, 8, 24, false),
- [RK3228_PD_PERI] = DOMAIN_RK3036(9, 9, 25, true),
- [RK3228_PD_GMAC] = DOMAIN_RK3036(10, 10, 26, false),
+ [RK3228_PD_CORE] = DOMAIN_RK3036(BIT(0), BIT(0), BIT(16), true),
+ [RK3228_PD_MSCH] = DOMAIN_RK3036(BIT(1), BIT(1), BIT(17), true),
+ [RK3228_PD_BUS] = DOMAIN_RK3036(BIT(2), BIT(2), BIT(18), true),
+ [RK3228_PD_SYS] = DOMAIN_RK3036(BIT(3), BIT(3), BIT(19), true),
+ [RK3228_PD_VIO] = DOMAIN_RK3036(BIT(4), BIT(4), BIT(20), false),
+ [RK3228_PD_VOP] = DOMAIN_RK3036(BIT(5), BIT(5), BIT(21), false),
+ [RK3228_PD_VPU] = DOMAIN_RK3036(BIT(6), BIT(6), BIT(22), false),
+ [RK3228_PD_RKVDEC] = DOMAIN_RK3036(BIT(7), BIT(7), BIT(23), false),
+ [RK3228_PD_GPU] = DOMAIN_RK3036(BIT(8), BIT(8), BIT(24), false),
+ [RK3228_PD_PERI] = DOMAIN_RK3036(BIT(9), BIT(9), BIT(25), true),
+ [RK3228_PD_GMAC] = DOMAIN_RK3036(BIT(10), BIT(10), BIT(26), false),
};
static const struct rockchip_domain_info rk3288_pm_domains[] = {
- [RK3288_PD_VIO] = DOMAIN_RK3288(7, 7, 4, false),
- [RK3288_PD_HEVC] = DOMAIN_RK3288(14, 10, 9, false),
- [RK3288_PD_VIDEO] = DOMAIN_RK3288(8, 8, 3, false),
- [RK3288_PD_GPU] = DOMAIN_RK3288(9, 9, 2, false),
+ [RK3288_PD_VIO] = DOMAIN_RK3288(BIT(7), BIT(7), BIT(4), false),
+ [RK3288_PD_HEVC] = DOMAIN_RK3288(BIT(14), BIT(10), BIT(9), false),
+ [RK3288_PD_VIDEO] = DOMAIN_RK3288(BIT(8), BIT(8), BIT(3), false),
+ [RK3288_PD_GPU] = DOMAIN_RK3288(BIT(9), BIT(9), BIT(2), false),
};
static const struct rockchip_domain_info rk3328_pm_domains[] = {
- [RK3328_PD_CORE] = DOMAIN_RK3328(-1, 0, 0, false),
- [RK3328_PD_GPU] = DOMAIN_RK3328(-1, 1, 1, false),
- [RK3328_PD_BUS] = DOMAIN_RK3328(-1, 2, 2, true),
- [RK3328_PD_MSCH] = DOMAIN_RK3328(-1, 3, 3, true),
- [RK3328_PD_PERI] = DOMAIN_RK3328(-1, 4, 4, true),
- [RK3328_PD_VIDEO] = DOMAIN_RK3328(-1, 5, 5, false),
- [RK3328_PD_HEVC] = DOMAIN_RK3328(-1, 6, 6, false),
- [RK3328_PD_VIO] = DOMAIN_RK3328(-1, 8, 8, false),
- [RK3328_PD_VPU] = DOMAIN_RK3328(-1, 9, 9, false),
+ [RK3328_PD_CORE] = DOMAIN_RK3328(0, BIT(0), BIT(0), false),
+ [RK3328_PD_GPU] = DOMAIN_RK3328(0, BIT(1), BIT(1), false),
+ [RK3328_PD_BUS] = DOMAIN_RK3328(0, BIT(2), BIT(2), true),
+ [RK3328_PD_MSCH] = DOMAIN_RK3328(0, BIT(3), BIT(3), true),
+ [RK3328_PD_PERI] = DOMAIN_RK3328(0, BIT(4), BIT(4), true),
+ [RK3328_PD_VIDEO] = DOMAIN_RK3328(0, BIT(5), BIT(5), false),
+ [RK3328_PD_HEVC] = DOMAIN_RK3328(0, BIT(6), BIT(6), false),
+ [RK3328_PD_VIO] = DOMAIN_RK3328(0, BIT(8), BIT(8), false),
+ [RK3328_PD_VPU] = DOMAIN_RK3328(0, BIT(9), BIT(9), false),
};
static const struct rockchip_domain_info rk3366_pm_domains[] = {
- [RK3366_PD_PERI] = DOMAIN_RK3368(10, 10, 6, true),
- [RK3366_PD_VIO] = DOMAIN_RK3368(14, 14, 8, false),
- [RK3366_PD_VIDEO] = DOMAIN_RK3368(13, 13, 7, false),
- [RK3366_PD_RKVDEC] = DOMAIN_RK3368(11, 11, 7, false),
- [RK3366_PD_WIFIBT] = DOMAIN_RK3368(8, 8, 9, false),
- [RK3366_PD_VPU] = DOMAIN_RK3368(12, 12, 7, false),
- [RK3366_PD_GPU] = DOMAIN_RK3368(15, 15, 2, false),
+ [RK3366_PD_PERI] = DOMAIN_RK3368(BIT(10), BIT(10), BIT(6), true),
+ [RK3366_PD_VIO] = DOMAIN_RK3368(BIT(14), BIT(14), BIT(8), false),
+ [RK3366_PD_VIDEO] = DOMAIN_RK3368(BIT(13), BIT(13), BIT(7), false),
+ [RK3366_PD_RKVDEC] = DOMAIN_RK3368(BIT(11), BIT(11), BIT(7), false),
+ [RK3366_PD_WIFIBT] = DOMAIN_RK3368(BIT(8), BIT(8), BIT(9), false),
+ [RK3366_PD_VPU] = DOMAIN_RK3368(BIT(12), BIT(12), BIT(7), false),
+ [RK3366_PD_GPU] = DOMAIN_RK3368(BIT(15), BIT(15), BIT(2), false),
};
static const struct rockchip_domain_info rk3368_pm_domains[] = {
- [RK3368_PD_PERI] = DOMAIN_RK3368(13, 12, 6, true),
- [RK3368_PD_VIO] = DOMAIN_RK3368(15, 14, 8, false),
- [RK3368_PD_VIDEO] = DOMAIN_RK3368(14, 13, 7, false),
- [RK3368_PD_GPU_0] = DOMAIN_RK3368(16, 15, 2, false),
- [RK3368_PD_GPU_1] = DOMAIN_RK3368(17, 16, 2, false),
+ [RK3368_PD_PERI] = DOMAIN_RK3368(BIT(13), BIT(12), BIT(6), true),
+ [RK3368_PD_VIO] = DOMAIN_RK3368(BIT(15), BIT(14), BIT(8), false),
+ [RK3368_PD_VIDEO] = DOMAIN_RK3368(BIT(14), BIT(13), BIT(7), false),
+ [RK3368_PD_GPU_0] = DOMAIN_RK3368(BIT(16), BIT(15), BIT(2), false),
+ [RK3368_PD_GPU_1] = DOMAIN_RK3368(BIT(17), BIT(16), BIT(2), false),
};
static const struct rockchip_domain_info rk3399_pm_domains[] = {
- [RK3399_PD_TCPD0] = DOMAIN_RK3399(8, 8, -1, false),
- [RK3399_PD_TCPD1] = DOMAIN_RK3399(9, 9, -1, false),
- [RK3399_PD_CCI] = DOMAIN_RK3399(10, 10, -1, true),
- [RK3399_PD_CCI0] = DOMAIN_RK3399(-1, -1, 15, true),
- [RK3399_PD_CCI1] = DOMAIN_RK3399(-1, -1, 16, true),
- [RK3399_PD_PERILP] = DOMAIN_RK3399(11, 11, 1, true),
- [RK3399_PD_PERIHP] = DOMAIN_RK3399(12, 12, 2, true),
- [RK3399_PD_CENTER] = DOMAIN_RK3399(13, 13, 14, true),
- [RK3399_PD_VIO] = DOMAIN_RK3399(14, 14, 17, false),
- [RK3399_PD_GPU] = DOMAIN_RK3399(15, 15, 0, false),
- [RK3399_PD_VCODEC] = DOMAIN_RK3399(16, 16, 3, false),
- [RK3399_PD_VDU] = DOMAIN_RK3399(17, 17, 4, false),
- [RK3399_PD_RGA] = DOMAIN_RK3399(18, 18, 5, false),
- [RK3399_PD_IEP] = DOMAIN_RK3399(19, 19, 6, false),
- [RK3399_PD_VO] = DOMAIN_RK3399(20, 20, -1, false),
- [RK3399_PD_VOPB] = DOMAIN_RK3399(-1, -1, 7, false),
- [RK3399_PD_VOPL] = DOMAIN_RK3399(-1, -1, 8, false),
- [RK3399_PD_ISP0] = DOMAIN_RK3399(22, 22, 9, false),
- [RK3399_PD_ISP1] = DOMAIN_RK3399(23, 23, 10, false),
- [RK3399_PD_HDCP] = DOMAIN_RK3399(24, 24, 11, false),
- [RK3399_PD_GMAC] = DOMAIN_RK3399(25, 25, 23, true),
- [RK3399_PD_EMMC] = DOMAIN_RK3399(26, 26, 24, true),
- [RK3399_PD_USB3] = DOMAIN_RK3399(27, 27, 12, true),
- [RK3399_PD_EDP] = DOMAIN_RK3399(28, 28, 22, false),
- [RK3399_PD_GIC] = DOMAIN_RK3399(29, 29, 27, true),
- [RK3399_PD_SD] = DOMAIN_RK3399(30, 30, 28, true),
- [RK3399_PD_SDIOAUDIO] = DOMAIN_RK3399(31, 31, 29, true),
+ [RK3399_PD_TCPD0] = DOMAIN_RK3399(BIT(8), BIT(8), 0, false),
+ [RK3399_PD_TCPD1] = DOMAIN_RK3399(BIT(9), BIT(9), 0, false),
+ [RK3399_PD_CCI] = DOMAIN_RK3399(BIT(10), BIT(10), 0, true),
+ [RK3399_PD_CCI0] = DOMAIN_RK3399(0, 0, BIT(15), true),
+ [RK3399_PD_CCI1] = DOMAIN_RK3399(0, 0, BIT(16), true),
+ [RK3399_PD_PERILP] = DOMAIN_RK3399(BIT(11), BIT(11), BIT(1), true),
+ [RK3399_PD_PERIHP] = DOMAIN_RK3399(BIT(12), BIT(12), BIT(2), true),
+ [RK3399_PD_CENTER] = DOMAIN_RK3399(BIT(13), BIT(13), BIT(14), true),
+ [RK3399_PD_VIO] = DOMAIN_RK3399(BIT(14), BIT(14), BIT(17), false),
+ [RK3399_PD_GPU] = DOMAIN_RK3399(BIT(15), BIT(15), BIT(0), false),
+ [RK3399_PD_VCODEC] = DOMAIN_RK3399(BIT(16), BIT(16), BIT(3), false),
+ [RK3399_PD_VDU] = DOMAIN_RK3399(BIT(17), BIT(17), BIT(4), false),
+ [RK3399_PD_RGA] = DOMAIN_RK3399(BIT(18), BIT(18), BIT(5), false),
+ [RK3399_PD_IEP] = DOMAIN_RK3399(BIT(19), BIT(19), BIT(6), false),
+ [RK3399_PD_VO] = DOMAIN_RK3399(BIT(20), BIT(20), 0, false),
+ [RK3399_PD_VOPB] = DOMAIN_RK3399(0, 0, BIT(7), false),
+ [RK3399_PD_VOPL] = DOMAIN_RK3399(0, 0, BIT(8), false),
+ [RK3399_PD_ISP0] = DOMAIN_RK3399(BIT(22), BIT(22), BIT(9), false),
+ [RK3399_PD_ISP1] = DOMAIN_RK3399(BIT(23), BIT(23), BIT(10), false),
+ [RK3399_PD_HDCP] = DOMAIN_RK3399(BIT(24), BIT(24), BIT(11), false),
+ [RK3399_PD_GMAC] = DOMAIN_RK3399(BIT(25), BIT(25), BIT(23), true),
+ [RK3399_PD_EMMC] = DOMAIN_RK3399(BIT(26), BIT(26), BIT(24), true),
+ [RK3399_PD_USB3] = DOMAIN_RK3399(BIT(27), BIT(27), BIT(12), true),
+ [RK3399_PD_EDP] = DOMAIN_RK3399(BIT(28), BIT(28), BIT(22), false),
+ [RK3399_PD_GIC] = DOMAIN_RK3399(BIT(29), BIT(29), BIT(27), true),
+ [RK3399_PD_SD] = DOMAIN_RK3399(BIT(30), BIT(30), BIT(28), true),
+ [RK3399_PD_SDIOAUDIO] = DOMAIN_RK3399(BIT(31), BIT(31), BIT(29), true),
};
static const struct rockchip_pmu_info px30_pmu = {
diff --git a/drivers/soc/tegra/Kconfig b/drivers/soc/tegra/Kconfig
index fbfce48ffb0d..c8ef05d6b8c7 100644
--- a/drivers/soc/tegra/Kconfig
+++ b/drivers/soc/tegra/Kconfig
@@ -109,6 +109,7 @@ config ARCH_TEGRA_186_SOC
config ARCH_TEGRA_194_SOC
bool "NVIDIA Tegra194 SoC"
select MAILBOX
+ select PINCTRL_TEGRA194
select TEGRA_BPMP
select TEGRA_HSP_MBOX
select TEGRA_IVC
diff --git a/drivers/soc/tegra/fuse/fuse-tegra.c b/drivers/soc/tegra/fuse/fuse-tegra.c
index 9b84bcc356d0..3eb44e65b326 100644
--- a/drivers/soc/tegra/fuse/fuse-tegra.c
+++ b/drivers/soc/tegra/fuse/fuse-tegra.c
@@ -133,8 +133,10 @@ static int tegra_fuse_probe(struct platform_device *pdev)
fuse->clk = devm_clk_get(&pdev->dev, "fuse");
if (IS_ERR(fuse->clk)) {
- dev_err(&pdev->dev, "failed to get FUSE clock: %ld",
- PTR_ERR(fuse->clk));
+ if (PTR_ERR(fuse->clk) != -EPROBE_DEFER)
+ dev_err(&pdev->dev, "failed to get FUSE clock: %ld",
+ PTR_ERR(fuse->clk));
+
fuse->base = base;
return PTR_ERR(fuse->clk);
}
diff --git a/drivers/soc/tegra/pmc.c b/drivers/soc/tegra/pmc.c
index 17e7796a832b..9f9c1c677cf4 100644
--- a/drivers/soc/tegra/pmc.c
+++ b/drivers/soc/tegra/pmc.c
@@ -232,6 +232,11 @@ struct tegra_pmc_soc {
const char * const *reset_levels;
unsigned int num_reset_levels;
+ /*
+ * These describe events that can wake the system from sleep (i.e.
+ * LP0 or SC7). Wakeup from other sleep states (such as LP1 or LP2)
+ * are dealt with in the LIC.
+ */
const struct tegra_wake_event *wake_events;
unsigned int num_wake_events;
};
@@ -1855,6 +1860,9 @@ static int tegra_pmc_irq_alloc(struct irq_domain *domain, unsigned int virq,
unsigned int i;
int err = 0;
+ if (WARN_ON(num_irqs > 1))
+ return -EINVAL;
+
for (i = 0; i < soc->num_wake_events; i++) {
const struct tegra_wake_event *event = &soc->wake_events[i];
@@ -1895,6 +1903,11 @@ static int tegra_pmc_irq_alloc(struct irq_domain *domain, unsigned int virq,
}
}
+ /*
+ * For interrupts that don't have associated wake events, assign a
+ * dummy hardware IRQ number. This is used in the ->irq_set_type()
+ * and ->irq_set_wake() callbacks to return early for these IRQs.
+ */
if (i == soc->num_wake_events)
err = irq_domain_set_hwirq_and_chip(domain, virq, ULONG_MAX,
&pmc->irq, pmc);
@@ -1913,6 +1926,10 @@ static int tegra_pmc_irq_set_wake(struct irq_data *data, unsigned int on)
unsigned int offset, bit;
u32 value;
+ /* nothing to do if there's no associated wake event */
+ if (WARN_ON(data->hwirq == ULONG_MAX))
+ return 0;
+
offset = data->hwirq / 32;
bit = data->hwirq % 32;
@@ -1940,6 +1957,7 @@ static int tegra_pmc_irq_set_type(struct irq_data *data, unsigned int type)
struct tegra_pmc *pmc = irq_data_get_irq_chip_data(data);
u32 value;
+ /* nothing to do if there's no associated wake event */
if (data->hwirq == ULONG_MAX)
return 0;
diff --git a/drivers/soc/ti/Kconfig b/drivers/soc/ti/Kconfig
index d7d50d48d05d..cf545f428d03 100644
--- a/drivers/soc/ti/Kconfig
+++ b/drivers/soc/ti/Kconfig
@@ -9,6 +9,11 @@ config ARCH_K3_AM6_SOC
help
Enable support for TI's AM6 SoC Family support
+config ARCH_K3_J721E_SOC
+ bool "K3 J721E SoC"
+ help
+ Enable support for TI's J721E SoC Family support
+
endif
endif
diff --git a/drivers/soc/ti/pm33xx.c b/drivers/soc/ti/pm33xx.c
index fc5802ccb1c0..bb77c220b6f8 100644
--- a/drivers/soc/ti/pm33xx.c
+++ b/drivers/soc/ti/pm33xx.c
@@ -178,6 +178,7 @@ static int am33xx_pm_suspend(suspend_state_t suspend_state)
suspend_wfi_flags);
suspend_wfi_flags &= ~WFI_FLAG_RTC_ONLY;
+ dev_info(pm33xx_dev, "Entering RTC Only mode with DDR in self-refresh\n");
if (!ret) {
clk_restore_context();
diff --git a/drivers/tty/serial/Kconfig b/drivers/tty/serial/Kconfig
index 04c23951b831..fd385c8c53a5 100644
--- a/drivers/tty/serial/Kconfig
+++ b/drivers/tty/serial/Kconfig
@@ -497,6 +497,7 @@ config SERIAL_SA1100
bool "SA1100 serial port support"
depends on ARCH_SA1100
select SERIAL_CORE
+ select SERIAL_MCTRL_GPIO if GPIOLIB
help
If you have a machine based on a SA1100/SA1110 StrongARM(R) CPU you
can enable its onboard serial port by enabling this option.
diff --git a/drivers/tty/serial/sa1100.c b/drivers/tty/serial/sa1100.c
index a399772be3fc..8e618129e65c 100644
--- a/drivers/tty/serial/sa1100.c
+++ b/drivers/tty/serial/sa1100.c
@@ -28,6 +28,8 @@
#include <mach/hardware.h>
#include <mach/irqs.h>
+#include "serial_mctrl_gpio.h"
+
/* We've been assigned a range on the "Low-density serial ports" major */
#define SERIAL_SA1100_MAJOR 204
#define MINOR_START 5
@@ -77,6 +79,7 @@ struct sa1100_port {
struct uart_port port;
struct timer_list timer;
unsigned int old_status;
+ struct mctrl_gpios *gpios;
};
/*
@@ -174,6 +177,8 @@ static void sa1100_enable_ms(struct uart_port *port)
container_of(port, struct sa1100_port, port);
mod_timer(&sport->timer, jiffies);
+
+ mctrl_gpio_enable_ms(sport->gpios);
}
static void
@@ -322,11 +327,21 @@ static unsigned int sa1100_tx_empty(struct uart_port *port)
static unsigned int sa1100_get_mctrl(struct uart_port *port)
{
- return TIOCM_CTS | TIOCM_DSR | TIOCM_CAR;
+ struct sa1100_port *sport =
+ container_of(port, struct sa1100_port, port);
+ int ret = TIOCM_CTS | TIOCM_DSR | TIOCM_CAR;
+
+ mctrl_gpio_get(sport->gpios, &ret);
+
+ return ret;
}
static void sa1100_set_mctrl(struct uart_port *port, unsigned int mctrl)
{
+ struct sa1100_port *sport =
+ container_of(port, struct sa1100_port, port);
+
+ mctrl_gpio_set(sport->gpios, mctrl);
}
/*
@@ -842,6 +857,31 @@ static int sa1100_serial_resume(struct platform_device *dev)
return 0;
}
+static int sa1100_serial_add_one_port(struct sa1100_port *sport, struct platform_device *dev)
+{
+ sport->port.dev = &dev->dev;
+
+ // mctrl_gpio_init() requires that the GPIO driver supports interrupts,
+ // but we need to support GPIO drivers for hardware that has no such
+ // interrupts. Use mctrl_gpio_init_noauto() instead.
+ sport->gpios = mctrl_gpio_init_noauto(sport->port.dev, 0);
+ if (IS_ERR(sport->gpios)) {
+ int err = PTR_ERR(sport->gpios);
+
+ dev_err(sport->port.dev, "failed to get mctrl gpios: %d\n",
+ err);
+
+ if (err == -EPROBE_DEFER)
+ return err;
+
+ sport->gpios = NULL;
+ }
+
+ platform_set_drvdata(dev, sport);
+
+ return uart_add_one_port(&sa1100_reg, &sport->port);
+}
+
static int sa1100_serial_probe(struct platform_device *dev)
{
struct resource *res = dev->resource;
@@ -856,9 +896,7 @@ static int sa1100_serial_probe(struct platform_device *dev)
if (sa1100_ports[i].port.mapbase != res->start)
continue;
- sa1100_ports[i].port.dev = &dev->dev;
- uart_add_one_port(&sa1100_reg, &sa1100_ports[i].port);
- platform_set_drvdata(dev, &sa1100_ports[i]);
+ sa1100_serial_add_one_port(&sa1100_ports[i], dev);
break;
}
}
diff --git a/drivers/tty/tty_ldisc.c b/drivers/tty/tty_ldisc.c
index fde8d4073e74..4c49f53afa3e 100644
--- a/drivers/tty/tty_ldisc.c
+++ b/drivers/tty/tty_ldisc.c
@@ -855,8 +855,6 @@ void tty_ldisc_deinit(struct tty_struct *tty)
tty->ldisc = NULL;
}
-static int zero;
-static int one = 1;
static struct ctl_table tty_table[] = {
{
.procname = "ldisc_autoload",
@@ -864,8 +862,8 @@ static struct ctl_table tty_table[] = {
.maxlen = sizeof(tty_ldisc_autoload),
.mode = 0644,
.proc_handler = proc_dointvec,
- .extra1 = &zero,
- .extra2 = &one,
+ .extra1 = SYSCTL_ZERO,
+ .extra2 = SYSCTL_ONE,
},
{ }
};
diff --git a/drivers/usb/gadget/legacy/inode.c b/drivers/usb/gadget/legacy/inode.c
index 249277d0e53f..b47938dff1a2 100644
--- a/drivers/usb/gadget/legacy/inode.c
+++ b/drivers/usb/gadget/legacy/inode.c
@@ -12,6 +12,7 @@
#include <linux/init.h>
#include <linux/module.h>
#include <linux/fs.h>
+#include <linux/fs_context.h>
#include <linux/pagemap.h>
#include <linux/uts.h>
#include <linux/wait.h>
@@ -1990,7 +1991,7 @@ static const struct super_operations gadget_fs_operations = {
};
static int
-gadgetfs_fill_super (struct super_block *sb, void *opts, int silent)
+gadgetfs_fill_super (struct super_block *sb, struct fs_context *fc)
{
struct inode *inode;
struct dev_data *dev;
@@ -2044,11 +2045,19 @@ Enomem:
}
/* "mount -t gadgetfs path /dev/gadget" ends up here */
-static struct dentry *
-gadgetfs_mount (struct file_system_type *t, int flags,
- const char *path, void *opts)
+static int gadgetfs_get_tree(struct fs_context *fc)
{
- return mount_single (t, flags, opts, gadgetfs_fill_super);
+ return get_tree_single(fc, gadgetfs_fill_super);
+}
+
+static const struct fs_context_operations gadgetfs_context_ops = {
+ .get_tree = gadgetfs_get_tree,
+};
+
+static int gadgetfs_init_fs_context(struct fs_context *fc)
+{
+ fc->ops = &gadgetfs_context_ops;
+ return 0;
}
static void
@@ -2068,7 +2077,7 @@ gadgetfs_kill_sb (struct super_block *sb)
static struct file_system_type gadgetfs_type = {
.owner = THIS_MODULE,
.name = shortname,
- .mount = gadgetfs_mount,
+ .init_fs_context = gadgetfs_init_fs_context,
.kill_sb = gadgetfs_kill_sb,
};
MODULE_ALIAS_FS("gadgetfs");
diff --git a/drivers/virtio/virtio_balloon.c b/drivers/virtio/virtio_balloon.c
index 44339fc87cc7..226fbb995fb0 100644
--- a/drivers/virtio/virtio_balloon.c
+++ b/drivers/virtio/virtio_balloon.c
@@ -18,6 +18,7 @@
#include <linux/mm.h>
#include <linux/mount.h>
#include <linux/magic.h>
+#include <linux/pseudo_fs.h>
/*
* Balloon device works in 4K page units. So each page is pointed to by
@@ -745,20 +746,14 @@ static int virtballoon_migratepage(struct balloon_dev_info *vb_dev_info,
return MIGRATEPAGE_SUCCESS;
}
-static struct dentry *balloon_mount(struct file_system_type *fs_type,
- int flags, const char *dev_name, void *data)
+static int balloon_init_fs_context(struct fs_context *fc)
{
- static const struct dentry_operations ops = {
- .d_dname = simple_dname,
- };
-
- return mount_pseudo(fs_type, "balloon-kvm:", NULL, &ops,
- BALLOON_KVM_MAGIC);
+ return init_pseudo(fc, BALLOON_KVM_MAGIC) ? 0 : -ENOMEM;
}
static struct file_system_type balloon_fs = {
.name = "balloon-kvm",
- .mount = balloon_mount,
+ .init_fs_context = balloon_init_fs_context,
.kill_sb = kill_anon_super,
};
diff --git a/drivers/xen/Kconfig b/drivers/xen/Kconfig
index ec6558b79e9d..79cc75096f42 100644
--- a/drivers/xen/Kconfig
+++ b/drivers/xen/Kconfig
@@ -10,21 +10,6 @@ config XEN_BALLOON
the system to expand the domain's memory allocation, or alternatively
return unneeded memory to the system.
-config XEN_SELFBALLOONING
- bool "Dynamically self-balloon kernel memory to target"
- depends on XEN && XEN_BALLOON && CLEANCACHE && SWAP && XEN_TMEM
- help
- Self-ballooning dynamically balloons available kernel memory driven
- by the current usage of anonymous memory ("committed AS") and
- controlled by various sysfs-settable parameters. Configuring
- FRONTSWAP is highly recommended; if it is not configured, self-
- ballooning is disabled by default. If FRONTSWAP is configured,
- frontswap-selfshrinking is enabled by default but can be disabled
- with the 'tmem.selfshrink=0' kernel boot parameter; and self-ballooning
- is enabled by default but can be disabled with the 'tmem.selfballooning=0'
- kernel boot parameter. Note that systems without a sufficiently
- large swap device should not enable self-ballooning.
-
config XEN_BALLOON_MEMORY_HOTPLUG
bool "Memory hotplug support for Xen balloon driver"
depends on XEN_BALLOON && MEMORY_HOTPLUG
@@ -191,14 +176,6 @@ config SWIOTLB_XEN
def_bool y
select SWIOTLB
-config XEN_TMEM
- tristate
- depends on !ARM && !ARM64
- default m if (CLEANCACHE || FRONTSWAP)
- help
- Shim to interface in-kernel Transcendent Memory hooks
- (e.g. cleancache and frontswap) to Xen tmem hypercalls.
-
config XEN_PCIDEV_BACKEND
tristate "Xen PCI-device backend driver"
depends on PCI && X86 && XEN
diff --git a/drivers/xen/Makefile b/drivers/xen/Makefile
index ad3844d9f876..0c4efa6fe450 100644
--- a/drivers/xen/Makefile
+++ b/drivers/xen/Makefile
@@ -17,14 +17,12 @@ dom0-$(CONFIG_X86) += pcpu.o
obj-$(CONFIG_XEN_DOM0) += $(dom0-y)
obj-$(CONFIG_BLOCK) += biomerge.o
obj-$(CONFIG_XEN_BALLOON) += xen-balloon.o
-obj-$(CONFIG_XEN_SELFBALLOONING) += xen-selfballoon.o
obj-$(CONFIG_XEN_DEV_EVTCHN) += xen-evtchn.o
obj-$(CONFIG_XEN_GNTDEV) += xen-gntdev.o
obj-$(CONFIG_XEN_GRANT_DEV_ALLOC) += xen-gntalloc.o
obj-$(CONFIG_XENFS) += xenfs/
obj-$(CONFIG_XEN_SYS_HYPERVISOR) += sys-hypervisor.o
obj-$(CONFIG_XEN_PVHVM) += platform-pci.o
-obj-$(CONFIG_XEN_TMEM) += tmem.o
obj-$(CONFIG_SWIOTLB_XEN) += swiotlb-xen.o
obj-$(CONFIG_XEN_MCE_LOG) += mcelog.o
obj-$(CONFIG_XEN_PCIDEV_BACKEND) += xen-pciback/
diff --git a/drivers/xen/balloon.c b/drivers/xen/balloon.c
index d37dd5bb7a8f..4e11de6cde81 100644
--- a/drivers/xen/balloon.c
+++ b/drivers/xen/balloon.c
@@ -77,9 +77,6 @@ static int xen_hotplug_unpopulated;
#ifdef CONFIG_XEN_BALLOON_MEMORY_HOTPLUG
-static int zero;
-static int one = 1;
-
static struct ctl_table balloon_table[] = {
{
.procname = "hotplug_unpopulated",
@@ -87,8 +84,8 @@ static struct ctl_table balloon_table[] = {
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = proc_dointvec_minmax,
- .extra1 = &zero,
- .extra2 = &one,
+ .extra1 = SYSCTL_ZERO,
+ .extra2 = SYSCTL_ONE,
},
{ }
};
@@ -538,8 +535,15 @@ static void balloon_process(struct work_struct *work)
state = reserve_additional_memory();
}
- if (credit < 0)
- state = decrease_reservation(-credit, GFP_BALLOON);
+ if (credit < 0) {
+ long n_pages;
+
+ n_pages = min(-credit, si_mem_available());
+ state = decrease_reservation(n_pages, GFP_BALLOON);
+ if (state == BP_DONE && n_pages != -credit &&
+ n_pages < totalreserve_pages)
+ state = BP_EAGAIN;
+ }
state = update_schedule(state);
@@ -578,6 +582,9 @@ static int add_ballooned_pages(int nr_pages)
}
}
+ if (si_mem_available() < nr_pages)
+ return -ENOMEM;
+
st = decrease_reservation(nr_pages, GFP_USER);
if (st != BP_DONE)
return -ENOMEM;
@@ -710,7 +717,7 @@ static int __init balloon_init(void)
balloon_stats.schedule_delay = 1;
balloon_stats.max_schedule_delay = 32;
balloon_stats.retry_count = 1;
- balloon_stats.max_retry_count = RETRY_UNLIMITED;
+ balloon_stats.max_retry_count = 4;
#ifdef CONFIG_XEN_BALLOON_MEMORY_HOTPLUG
set_online_page_callback(&xen_online_page);
diff --git a/drivers/xen/events/events_base.c b/drivers/xen/events/events_base.c
index ff9b51055b14..2e8570c09789 100644
--- a/drivers/xen/events/events_base.c
+++ b/drivers/xen/events/events_base.c
@@ -1294,7 +1294,7 @@ void rebind_evtchn_irq(int evtchn, int irq)
}
/* Rebind an evtchn so that it gets delivered to a specific cpu */
-int xen_rebind_evtchn_to_cpu(int evtchn, unsigned tcpu)
+static int xen_rebind_evtchn_to_cpu(int evtchn, unsigned int tcpu)
{
struct evtchn_bind_vcpu bind_vcpu;
int masked;
@@ -1328,7 +1328,6 @@ int xen_rebind_evtchn_to_cpu(int evtchn, unsigned tcpu)
return 0;
}
-EXPORT_SYMBOL_GPL(xen_rebind_evtchn_to_cpu);
static int set_affinity_irq(struct irq_data *data, const struct cpumask *dest,
bool force)
@@ -1342,6 +1341,15 @@ static int set_affinity_irq(struct irq_data *data, const struct cpumask *dest,
return ret;
}
+/* To be called with desc->lock held. */
+int xen_set_affinity_evtchn(struct irq_desc *desc, unsigned int tcpu)
+{
+ struct irq_data *d = irq_desc_get_irq_data(desc);
+
+ return set_affinity_irq(d, cpumask_of(tcpu), false);
+}
+EXPORT_SYMBOL_GPL(xen_set_affinity_evtchn);
+
static void enable_dynirq(struct irq_data *data)
{
int evtchn = evtchn_from_irq(data->irq);
diff --git a/drivers/xen/evtchn.c b/drivers/xen/evtchn.c
index f341b016672f..052b55a14ebc 100644
--- a/drivers/xen/evtchn.c
+++ b/drivers/xen/evtchn.c
@@ -447,7 +447,7 @@ static void evtchn_bind_interdom_next_vcpu(int evtchn)
this_cpu_write(bind_last_selected_cpu, selected_cpu);
/* unmask expects irqs to be disabled */
- xen_rebind_evtchn_to_cpu(evtchn, selected_cpu);
+ xen_set_affinity_evtchn(desc, selected_cpu);
raw_spin_unlock_irqrestore(&desc->lock, flags);
}
diff --git a/drivers/xen/swiotlb-xen.c b/drivers/xen/swiotlb-xen.c
index d53f3493a6b9..cfbe46785a3b 100644
--- a/drivers/xen/swiotlb-xen.c
+++ b/drivers/xen/swiotlb-xen.c
@@ -402,7 +402,7 @@ static dma_addr_t xen_swiotlb_map_page(struct device *dev, struct page *page,
map = swiotlb_tbl_map_single(dev, start_dma_addr, phys, size, dir,
attrs);
- if (map == DMA_MAPPING_ERROR)
+ if (map == (phys_addr_t)DMA_MAPPING_ERROR)
return DMA_MAPPING_ERROR;
dev_addr = xen_phys_to_bus(map);
diff --git a/drivers/xen/tmem.c b/drivers/xen/tmem.c
deleted file mode 100644
index 64d7479ad5ad..000000000000
--- a/drivers/xen/tmem.c
+++ /dev/null
@@ -1,419 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Xen implementation for transcendent memory (tmem)
- *
- * Copyright (C) 2009-2011 Oracle Corp. All rights reserved.
- * Author: Dan Magenheimer
- */
-
-#define pr_fmt(fmt) "xen:" KBUILD_MODNAME ": " fmt
-
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/types.h>
-#include <linux/init.h>
-#include <linux/pagemap.h>
-#include <linux/cleancache.h>
-#include <linux/frontswap.h>
-
-#include <xen/xen.h>
-#include <xen/interface/xen.h>
-#include <xen/page.h>
-#include <asm/xen/hypercall.h>
-#include <asm/xen/hypervisor.h>
-#include <xen/tmem.h>
-
-#ifndef CONFIG_XEN_TMEM_MODULE
-bool __read_mostly tmem_enabled = false;
-
-static int __init enable_tmem(char *s)
-{
- tmem_enabled = true;
- return 1;
-}
-__setup("tmem", enable_tmem);
-#endif
-
-#ifdef CONFIG_CLEANCACHE
-static bool cleancache __read_mostly = true;
-module_param(cleancache, bool, S_IRUGO);
-static bool selfballooning __read_mostly = true;
-module_param(selfballooning, bool, S_IRUGO);
-#endif /* CONFIG_CLEANCACHE */
-
-#ifdef CONFIG_FRONTSWAP
-static bool frontswap __read_mostly = true;
-module_param(frontswap, bool, S_IRUGO);
-#else /* CONFIG_FRONTSWAP */
-#define frontswap (0)
-#endif /* CONFIG_FRONTSWAP */
-
-#ifdef CONFIG_XEN_SELFBALLOONING
-static bool selfshrinking __read_mostly = true;
-module_param(selfshrinking, bool, S_IRUGO);
-#endif /* CONFIG_XEN_SELFBALLOONING */
-
-#define TMEM_CONTROL 0
-#define TMEM_NEW_POOL 1
-#define TMEM_DESTROY_POOL 2
-#define TMEM_NEW_PAGE 3
-#define TMEM_PUT_PAGE 4
-#define TMEM_GET_PAGE 5
-#define TMEM_FLUSH_PAGE 6
-#define TMEM_FLUSH_OBJECT 7
-#define TMEM_READ 8
-#define TMEM_WRITE 9
-#define TMEM_XCHG 10
-
-/* Bits for HYPERVISOR_tmem_op(TMEM_NEW_POOL) */
-#define TMEM_POOL_PERSIST 1
-#define TMEM_POOL_SHARED 2
-#define TMEM_POOL_PAGESIZE_SHIFT 4
-#define TMEM_VERSION_SHIFT 24
-
-
-struct tmem_pool_uuid {
- u64 uuid_lo;
- u64 uuid_hi;
-};
-
-struct tmem_oid {
- u64 oid[3];
-};
-
-#define TMEM_POOL_PRIVATE_UUID { 0, 0 }
-
-/* flags for tmem_ops.new_pool */
-#define TMEM_POOL_PERSIST 1
-#define TMEM_POOL_SHARED 2
-
-/* xen tmem foundation ops/hypercalls */
-
-static inline int xen_tmem_op(u32 tmem_cmd, u32 tmem_pool, struct tmem_oid oid,
- u32 index, unsigned long gmfn, u32 tmem_offset, u32 pfn_offset, u32 len)
-{
- struct tmem_op op;
- int rc = 0;
-
- op.cmd = tmem_cmd;
- op.pool_id = tmem_pool;
- op.u.gen.oid[0] = oid.oid[0];
- op.u.gen.oid[1] = oid.oid[1];
- op.u.gen.oid[2] = oid.oid[2];
- op.u.gen.index = index;
- op.u.gen.tmem_offset = tmem_offset;
- op.u.gen.pfn_offset = pfn_offset;
- op.u.gen.len = len;
- set_xen_guest_handle(op.u.gen.gmfn, (void *)gmfn);
- rc = HYPERVISOR_tmem_op(&op);
- return rc;
-}
-
-static int xen_tmem_new_pool(struct tmem_pool_uuid uuid,
- u32 flags, unsigned long pagesize)
-{
- struct tmem_op op;
- int rc = 0, pageshift;
-
- for (pageshift = 0; pagesize != 1; pageshift++)
- pagesize >>= 1;
- flags |= (pageshift - 12) << TMEM_POOL_PAGESIZE_SHIFT;
- flags |= TMEM_SPEC_VERSION << TMEM_VERSION_SHIFT;
- op.cmd = TMEM_NEW_POOL;
- op.u.new.uuid[0] = uuid.uuid_lo;
- op.u.new.uuid[1] = uuid.uuid_hi;
- op.u.new.flags = flags;
- rc = HYPERVISOR_tmem_op(&op);
- return rc;
-}
-
-/* xen generic tmem ops */
-
-static int xen_tmem_put_page(u32 pool_id, struct tmem_oid oid,
- u32 index, struct page *page)
-{
- return xen_tmem_op(TMEM_PUT_PAGE, pool_id, oid, index,
- xen_page_to_gfn(page), 0, 0, 0);
-}
-
-static int xen_tmem_get_page(u32 pool_id, struct tmem_oid oid,
- u32 index, struct page *page)
-{
- return xen_tmem_op(TMEM_GET_PAGE, pool_id, oid, index,
- xen_page_to_gfn(page), 0, 0, 0);
-}
-
-static int xen_tmem_flush_page(u32 pool_id, struct tmem_oid oid, u32 index)
-{
- return xen_tmem_op(TMEM_FLUSH_PAGE, pool_id, oid, index,
- 0, 0, 0, 0);
-}
-
-static int xen_tmem_flush_object(u32 pool_id, struct tmem_oid oid)
-{
- return xen_tmem_op(TMEM_FLUSH_OBJECT, pool_id, oid, 0, 0, 0, 0, 0);
-}
-
-
-#ifdef CONFIG_CLEANCACHE
-static int xen_tmem_destroy_pool(u32 pool_id)
-{
- struct tmem_oid oid = { { 0 } };
-
- return xen_tmem_op(TMEM_DESTROY_POOL, pool_id, oid, 0, 0, 0, 0, 0);
-}
-
-/* cleancache ops */
-
-static void tmem_cleancache_put_page(int pool, struct cleancache_filekey key,
- pgoff_t index, struct page *page)
-{
- u32 ind = (u32) index;
- struct tmem_oid oid = *(struct tmem_oid *)&key;
-
- if (pool < 0)
- return;
- if (ind != index)
- return;
- mb(); /* ensure page is quiescent; tmem may address it with an alias */
- (void)xen_tmem_put_page((u32)pool, oid, ind, page);
-}
-
-static int tmem_cleancache_get_page(int pool, struct cleancache_filekey key,
- pgoff_t index, struct page *page)
-{
- u32 ind = (u32) index;
- struct tmem_oid oid = *(struct tmem_oid *)&key;
- int ret;
-
- /* translate return values to linux semantics */
- if (pool < 0)
- return -1;
- if (ind != index)
- return -1;
- ret = xen_tmem_get_page((u32)pool, oid, ind, page);
- if (ret == 1)
- return 0;
- else
- return -1;
-}
-
-static void tmem_cleancache_flush_page(int pool, struct cleancache_filekey key,
- pgoff_t index)
-{
- u32 ind = (u32) index;
- struct tmem_oid oid = *(struct tmem_oid *)&key;
-
- if (pool < 0)
- return;
- if (ind != index)
- return;
- (void)xen_tmem_flush_page((u32)pool, oid, ind);
-}
-
-static void tmem_cleancache_flush_inode(int pool, struct cleancache_filekey key)
-{
- struct tmem_oid oid = *(struct tmem_oid *)&key;
-
- if (pool < 0)
- return;
- (void)xen_tmem_flush_object((u32)pool, oid);
-}
-
-static void tmem_cleancache_flush_fs(int pool)
-{
- if (pool < 0)
- return;
- (void)xen_tmem_destroy_pool((u32)pool);
-}
-
-static int tmem_cleancache_init_fs(size_t pagesize)
-{
- struct tmem_pool_uuid uuid_private = TMEM_POOL_PRIVATE_UUID;
-
- return xen_tmem_new_pool(uuid_private, 0, pagesize);
-}
-
-static int tmem_cleancache_init_shared_fs(uuid_t *uuid, size_t pagesize)
-{
- struct tmem_pool_uuid shared_uuid;
-
- shared_uuid.uuid_lo = *(u64 *)&uuid->b[0];
- shared_uuid.uuid_hi = *(u64 *)&uuid->b[8];
- return xen_tmem_new_pool(shared_uuid, TMEM_POOL_SHARED, pagesize);
-}
-
-static const struct cleancache_ops tmem_cleancache_ops = {
- .put_page = tmem_cleancache_put_page,
- .get_page = tmem_cleancache_get_page,
- .invalidate_page = tmem_cleancache_flush_page,
- .invalidate_inode = tmem_cleancache_flush_inode,
- .invalidate_fs = tmem_cleancache_flush_fs,
- .init_shared_fs = tmem_cleancache_init_shared_fs,
- .init_fs = tmem_cleancache_init_fs
-};
-#endif
-
-#ifdef CONFIG_FRONTSWAP
-/* frontswap tmem operations */
-
-/* a single tmem poolid is used for all frontswap "types" (swapfiles) */
-static int tmem_frontswap_poolid;
-
-/*
- * Swizzling increases objects per swaptype, increasing tmem concurrency
- * for heavy swaploads. Later, larger nr_cpus -> larger SWIZ_BITS
- */
-#define SWIZ_BITS 4
-#define SWIZ_MASK ((1 << SWIZ_BITS) - 1)
-#define _oswiz(_type, _ind) ((_type << SWIZ_BITS) | (_ind & SWIZ_MASK))
-#define iswiz(_ind) (_ind >> SWIZ_BITS)
-
-static inline struct tmem_oid oswiz(unsigned type, u32 ind)
-{
- struct tmem_oid oid = { .oid = { 0 } };
- oid.oid[0] = _oswiz(type, ind);
- return oid;
-}
-
-/* returns 0 if the page was successfully put into frontswap, -1 if not */
-static int tmem_frontswap_store(unsigned type, pgoff_t offset,
- struct page *page)
-{
- u64 ind64 = (u64)offset;
- u32 ind = (u32)offset;
- int pool = tmem_frontswap_poolid;
- int ret;
-
- /* THP isn't supported */
- if (PageTransHuge(page))
- return -1;
-
- if (pool < 0)
- return -1;
- if (ind64 != ind)
- return -1;
- mb(); /* ensure page is quiescent; tmem may address it with an alias */
- ret = xen_tmem_put_page(pool, oswiz(type, ind), iswiz(ind), page);
- /* translate Xen tmem return values to linux semantics */
- if (ret == 1)
- return 0;
- else
- return -1;
-}
-
-/*
- * returns 0 if the page was successfully gotten from frontswap, -1 if
- * was not present (should never happen!)
- */
-static int tmem_frontswap_load(unsigned type, pgoff_t offset,
- struct page *page)
-{
- u64 ind64 = (u64)offset;
- u32 ind = (u32)offset;
- int pool = tmem_frontswap_poolid;
- int ret;
-
- if (pool < 0)
- return -1;
- if (ind64 != ind)
- return -1;
- ret = xen_tmem_get_page(pool, oswiz(type, ind), iswiz(ind), page);
- /* translate Xen tmem return values to linux semantics */
- if (ret == 1)
- return 0;
- else
- return -1;
-}
-
-/* flush a single page from frontswap */
-static void tmem_frontswap_flush_page(unsigned type, pgoff_t offset)
-{
- u64 ind64 = (u64)offset;
- u32 ind = (u32)offset;
- int pool = tmem_frontswap_poolid;
-
- if (pool < 0)
- return;
- if (ind64 != ind)
- return;
- (void) xen_tmem_flush_page(pool, oswiz(type, ind), iswiz(ind));
-}
-
-/* flush all pages from the passed swaptype */
-static void tmem_frontswap_flush_area(unsigned type)
-{
- int pool = tmem_frontswap_poolid;
- int ind;
-
- if (pool < 0)
- return;
- for (ind = SWIZ_MASK; ind >= 0; ind--)
- (void)xen_tmem_flush_object(pool, oswiz(type, ind));
-}
-
-static void tmem_frontswap_init(unsigned ignored)
-{
- struct tmem_pool_uuid private = TMEM_POOL_PRIVATE_UUID;
-
- /* a single tmem poolid is used for all frontswap "types" (swapfiles) */
- if (tmem_frontswap_poolid < 0)
- tmem_frontswap_poolid =
- xen_tmem_new_pool(private, TMEM_POOL_PERSIST, PAGE_SIZE);
-}
-
-static struct frontswap_ops tmem_frontswap_ops = {
- .store = tmem_frontswap_store,
- .load = tmem_frontswap_load,
- .invalidate_page = tmem_frontswap_flush_page,
- .invalidate_area = tmem_frontswap_flush_area,
- .init = tmem_frontswap_init
-};
-#endif
-
-static int __init xen_tmem_init(void)
-{
- if (!xen_domain())
- return 0;
-#ifdef CONFIG_FRONTSWAP
- if (tmem_enabled && frontswap) {
- char *s = "";
-
- tmem_frontswap_poolid = -1;
- frontswap_register_ops(&tmem_frontswap_ops);
- pr_info("frontswap enabled, RAM provided by Xen Transcendent Memory%s\n",
- s);
- }
-#endif
-#ifdef CONFIG_CLEANCACHE
- BUILD_BUG_ON(sizeof(struct cleancache_filekey) != sizeof(struct tmem_oid));
- if (tmem_enabled && cleancache) {
- int err;
-
- err = cleancache_register_ops(&tmem_cleancache_ops);
- if (err)
- pr_warn("xen-tmem: failed to enable cleancache: %d\n",
- err);
- else
- pr_info("cleancache enabled, RAM provided by "
- "Xen Transcendent Memory\n");
- }
-#endif
-#ifdef CONFIG_XEN_SELFBALLOONING
- /*
- * There is no point of driving pages to the swap system if they
- * aren't going anywhere in tmem universe.
- */
- if (!frontswap) {
- selfshrinking = false;
- selfballooning = false;
- }
- xen_selfballoon_init(selfballooning, selfshrinking);
-#endif
- return 0;
-}
-
-module_init(xen_tmem_init)
-MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Dan Magenheimer <dan.magenheimer@oracle.com>");
-MODULE_DESCRIPTION("Shim to Xen transcendent memory");
diff --git a/drivers/xen/xen-balloon.c b/drivers/xen/xen-balloon.c
index a67236b02452..6d12fc368210 100644
--- a/drivers/xen/xen-balloon.c
+++ b/drivers/xen/xen-balloon.c
@@ -129,8 +129,6 @@ void xen_balloon_init(void)
{
register_balloon(&balloon_dev);
- register_xen_selfballooning(&balloon_dev);
-
register_xenstore_notifier(&xenstore_notifier);
}
EXPORT_SYMBOL_GPL(xen_balloon_init);
diff --git a/drivers/xen/xen-selfballoon.c b/drivers/xen/xen-selfballoon.c
deleted file mode 100644
index 246f6122c9ee..000000000000
--- a/drivers/xen/xen-selfballoon.c
+++ /dev/null
@@ -1,579 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/******************************************************************************
- * Xen selfballoon driver (and optional frontswap self-shrinking driver)
- *
- * Copyright (c) 2009-2011, Dan Magenheimer, Oracle Corp.
- *
- * This code complements the cleancache and frontswap patchsets to optimize
- * support for Xen Transcendent Memory ("tmem"). The policy it implements
- * is rudimentary and will likely improve over time, but it does work well
- * enough today.
- *
- * Two functionalities are implemented here which both use "control theory"
- * (feedback) to optimize memory utilization. In a virtualized environment
- * such as Xen, RAM is often a scarce resource and we would like to ensure
- * that each of a possibly large number of virtual machines is using RAM
- * efficiently, i.e. using as little as possible when under light load
- * and obtaining as much as possible when memory demands are high.
- * Since RAM needs vary highly dynamically and sometimes dramatically,
- * "hysteresis" is used, that is, memory target is determined not just
- * on current data but also on past data stored in the system.
- *
- * "Selfballooning" creates memory pressure by managing the Xen balloon
- * driver to decrease and increase available kernel memory, driven
- * largely by the target value of "Committed_AS" (see /proc/meminfo).
- * Since Committed_AS does not account for clean mapped pages (i.e. pages
- * in RAM that are identical to pages on disk), selfballooning has the
- * affect of pushing less frequently used clean pagecache pages out of
- * kernel RAM and, presumably using cleancache, into Xen tmem where
- * Xen can more efficiently optimize RAM utilization for such pages.
- *
- * When kernel memory demand unexpectedly increases faster than Xen, via
- * the selfballoon driver, is able to (or chooses to) provide usable RAM,
- * the kernel may invoke swapping. In most cases, frontswap is able
- * to absorb this swapping into Xen tmem. However, due to the fact
- * that the kernel swap subsystem assumes swapping occurs to a disk,
- * swapped pages may sit on the disk for a very long time; even if
- * the kernel knows the page will never be used again. This is because
- * the disk space costs very little and can be overwritten when
- * necessary. When such stale pages are in frontswap, however, they
- * are taking up valuable real estate. "Frontswap selfshrinking" works
- * to resolve this: When frontswap activity is otherwise stable
- * and the guest kernel is not under memory pressure, the "frontswap
- * selfshrinking" accounts for this by providing pressure to remove some
- * pages from frontswap and return them to kernel memory.
- *
- * For both "selfballooning" and "frontswap-selfshrinking", a worker
- * thread is used and sysfs tunables are provided to adjust the frequency
- * and rate of adjustments to achieve the goal, as well as to disable one
- * or both functions independently.
- *
- * While some argue that this functionality can and should be implemented
- * in userspace, it has been observed that bad things happen (e.g. OOMs).
- *
- * System configuration note: Selfballooning should not be enabled on
- * systems without a sufficiently large swap device configured; for best
- * results, it is recommended that total swap be increased by the size
- * of the guest memory. Note, that selfballooning should be disabled by default
- * if frontswap is not configured. Similarly selfballooning should be enabled
- * by default if frontswap is configured and can be disabled with the
- * "tmem.selfballooning=0" kernel boot option. Finally, when frontswap is
- * configured, frontswap-selfshrinking can be disabled with the
- * "tmem.selfshrink=0" kernel boot option.
- *
- * Selfballooning is disallowed in domain0 and force-disabled.
- *
- */
-
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-
-#include <linux/kernel.h>
-#include <linux/memblock.h>
-#include <linux/swap.h>
-#include <linux/mm.h>
-#include <linux/mman.h>
-#include <linux/workqueue.h>
-#include <linux/device.h>
-#include <xen/balloon.h>
-#include <xen/tmem.h>
-#include <xen/xen.h>
-
-/* Enable/disable with sysfs. */
-static int xen_selfballooning_enabled __read_mostly;
-
-/*
- * Controls rate at which memory target (this iteration) approaches
- * ultimate goal when memory need is increasing (up-hysteresis) or
- * decreasing (down-hysteresis). Higher values of hysteresis cause
- * slower increases/decreases. The default values for the various
- * parameters were deemed reasonable by experimentation, may be
- * workload-dependent, and can all be adjusted via sysfs.
- */
-static unsigned int selfballoon_downhysteresis __read_mostly = 8;
-static unsigned int selfballoon_uphysteresis __read_mostly = 1;
-
-/* In HZ, controls frequency of worker invocation. */
-static unsigned int selfballoon_interval __read_mostly = 5;
-
-/*
- * Minimum usable RAM in MB for selfballooning target for balloon.
- * If non-zero, it is added to totalreserve_pages and self-ballooning
- * will not balloon below the sum. If zero, a piecewise linear function
- * is calculated as a minimum and added to totalreserve_pages. Note that
- * setting this value indiscriminately may cause OOMs and crashes.
- */
-static unsigned int selfballoon_min_usable_mb;
-
-/*
- * Amount of RAM in MB to add to the target number of pages.
- * Can be used to reserve some more room for caches and the like.
- */
-static unsigned int selfballoon_reserved_mb;
-
-static void selfballoon_process(struct work_struct *work);
-static DECLARE_DELAYED_WORK(selfballoon_worker, selfballoon_process);
-
-#ifdef CONFIG_FRONTSWAP
-#include <linux/frontswap.h>
-
-/* Enable/disable with sysfs. */
-static bool frontswap_selfshrinking __read_mostly;
-
-/*
- * The default values for the following parameters were deemed reasonable
- * by experimentation, may be workload-dependent, and can all be
- * adjusted via sysfs.
- */
-
-/* Control rate for frontswap shrinking. Higher hysteresis is slower. */
-static unsigned int frontswap_hysteresis __read_mostly = 20;
-
-/*
- * Number of selfballoon worker invocations to wait before observing that
- * frontswap selfshrinking should commence. Note that selfshrinking does
- * not use a separate worker thread.
- */
-static unsigned int frontswap_inertia __read_mostly = 3;
-
-/* Countdown to next invocation of frontswap_shrink() */
-static unsigned long frontswap_inertia_counter;
-
-/*
- * Invoked by the selfballoon worker thread, uses current number of pages
- * in frontswap (frontswap_curr_pages()), previous status, and control
- * values (hysteresis and inertia) to determine if frontswap should be
- * shrunk and what the new frontswap size should be. Note that
- * frontswap_shrink is essentially a partial swapoff that immediately
- * transfers pages from the "swap device" (frontswap) back into kernel
- * RAM; despite the name, frontswap "shrinking" is very different from
- * the "shrinker" interface used by the kernel MM subsystem to reclaim
- * memory.
- */
-static void frontswap_selfshrink(void)
-{
- static unsigned long cur_frontswap_pages;
- unsigned long last_frontswap_pages;
- unsigned long tgt_frontswap_pages;
-
- last_frontswap_pages = cur_frontswap_pages;
- cur_frontswap_pages = frontswap_curr_pages();
- if (!cur_frontswap_pages ||
- (cur_frontswap_pages > last_frontswap_pages)) {
- frontswap_inertia_counter = frontswap_inertia;
- return;
- }
- if (frontswap_inertia_counter && --frontswap_inertia_counter)
- return;
- if (cur_frontswap_pages <= frontswap_hysteresis)
- tgt_frontswap_pages = 0;
- else
- tgt_frontswap_pages = cur_frontswap_pages -
- (cur_frontswap_pages / frontswap_hysteresis);
- frontswap_shrink(tgt_frontswap_pages);
- frontswap_inertia_counter = frontswap_inertia;
-}
-
-#endif /* CONFIG_FRONTSWAP */
-
-#define MB2PAGES(mb) ((mb) << (20 - PAGE_SHIFT))
-#define PAGES2MB(pages) ((pages) >> (20 - PAGE_SHIFT))
-
-/*
- * Use current balloon size, the goal (vm_committed_as), and hysteresis
- * parameters to set a new target balloon size
- */
-static void selfballoon_process(struct work_struct *work)
-{
- unsigned long cur_pages, goal_pages, tgt_pages, floor_pages;
- unsigned long useful_pages;
- bool reset_timer = false;
-
- if (xen_selfballooning_enabled) {
- cur_pages = totalram_pages();
- tgt_pages = cur_pages; /* default is no change */
- goal_pages = vm_memory_committed() +
- totalreserve_pages +
- MB2PAGES(selfballoon_reserved_mb);
-#ifdef CONFIG_FRONTSWAP
- /* allow space for frontswap pages to be repatriated */
- if (frontswap_selfshrinking)
- goal_pages += frontswap_curr_pages();
-#endif
- if (cur_pages > goal_pages)
- tgt_pages = cur_pages -
- ((cur_pages - goal_pages) /
- selfballoon_downhysteresis);
- else if (cur_pages < goal_pages)
- tgt_pages = cur_pages +
- ((goal_pages - cur_pages) /
- selfballoon_uphysteresis);
- /* else if cur_pages == goal_pages, no change */
- useful_pages = max_pfn - totalreserve_pages;
- if (selfballoon_min_usable_mb != 0)
- floor_pages = totalreserve_pages +
- MB2PAGES(selfballoon_min_usable_mb);
- /* piecewise linear function ending in ~3% slope */
- else if (useful_pages < MB2PAGES(16))
- floor_pages = max_pfn; /* not worth ballooning */
- else if (useful_pages < MB2PAGES(64))
- floor_pages = totalreserve_pages + MB2PAGES(16) +
- ((useful_pages - MB2PAGES(16)) >> 1);
- else if (useful_pages < MB2PAGES(512))
- floor_pages = totalreserve_pages + MB2PAGES(40) +
- ((useful_pages - MB2PAGES(40)) >> 3);
- else /* useful_pages >= MB2PAGES(512) */
- floor_pages = totalreserve_pages + MB2PAGES(99) +
- ((useful_pages - MB2PAGES(99)) >> 5);
- if (tgt_pages < floor_pages)
- tgt_pages = floor_pages;
- balloon_set_new_target(tgt_pages +
- balloon_stats.current_pages - totalram_pages());
- reset_timer = true;
- }
-#ifdef CONFIG_FRONTSWAP
- if (frontswap_selfshrinking) {
- frontswap_selfshrink();
- reset_timer = true;
- }
-#endif
- if (reset_timer)
- schedule_delayed_work(&selfballoon_worker,
- selfballoon_interval * HZ);
-}
-
-#ifdef CONFIG_SYSFS
-
-#include <linux/capability.h>
-
-#define SELFBALLOON_SHOW(name, format, args...) \
- static ssize_t show_##name(struct device *dev, \
- struct device_attribute *attr, \
- char *buf) \
- { \
- return sprintf(buf, format, ##args); \
- }
-
-SELFBALLOON_SHOW(selfballooning, "%d\n", xen_selfballooning_enabled);
-
-static ssize_t store_selfballooning(struct device *dev,
- struct device_attribute *attr,
- const char *buf,
- size_t count)
-{
- bool was_enabled = xen_selfballooning_enabled;
- unsigned long tmp;
- int err;
-
- if (!capable(CAP_SYS_ADMIN))
- return -EPERM;
-
- err = kstrtoul(buf, 10, &tmp);
- if (err)
- return err;
- if ((tmp != 0) && (tmp != 1))
- return -EINVAL;
-
- xen_selfballooning_enabled = !!tmp;
- if (!was_enabled && xen_selfballooning_enabled)
- schedule_delayed_work(&selfballoon_worker,
- selfballoon_interval * HZ);
-
- return count;
-}
-
-static DEVICE_ATTR(selfballooning, S_IRUGO | S_IWUSR,
- show_selfballooning, store_selfballooning);
-
-SELFBALLOON_SHOW(selfballoon_interval, "%d\n", selfballoon_interval);
-
-static ssize_t store_selfballoon_interval(struct device *dev,
- struct device_attribute *attr,
- const char *buf,
- size_t count)
-{
- unsigned long val;
- int err;
-
- if (!capable(CAP_SYS_ADMIN))
- return -EPERM;
- err = kstrtoul(buf, 10, &val);
- if (err)
- return err;
- if (val == 0)
- return -EINVAL;
- selfballoon_interval = val;
- return count;
-}
-
-static DEVICE_ATTR(selfballoon_interval, S_IRUGO | S_IWUSR,
- show_selfballoon_interval, store_selfballoon_interval);
-
-SELFBALLOON_SHOW(selfballoon_downhys, "%d\n", selfballoon_downhysteresis);
-
-static ssize_t store_selfballoon_downhys(struct device *dev,
- struct device_attribute *attr,
- const char *buf,
- size_t count)
-{
- unsigned long val;
- int err;
-
- if (!capable(CAP_SYS_ADMIN))
- return -EPERM;
- err = kstrtoul(buf, 10, &val);
- if (err)
- return err;
- if (val == 0)
- return -EINVAL;
- selfballoon_downhysteresis = val;
- return count;
-}
-
-static DEVICE_ATTR(selfballoon_downhysteresis, S_IRUGO | S_IWUSR,
- show_selfballoon_downhys, store_selfballoon_downhys);
-
-
-SELFBALLOON_SHOW(selfballoon_uphys, "%d\n", selfballoon_uphysteresis);
-
-static ssize_t store_selfballoon_uphys(struct device *dev,
- struct device_attribute *attr,
- const char *buf,
- size_t count)
-{
- unsigned long val;
- int err;
-
- if (!capable(CAP_SYS_ADMIN))
- return -EPERM;
- err = kstrtoul(buf, 10, &val);
- if (err)
- return err;
- if (val == 0)
- return -EINVAL;
- selfballoon_uphysteresis = val;
- return count;
-}
-
-static DEVICE_ATTR(selfballoon_uphysteresis, S_IRUGO | S_IWUSR,
- show_selfballoon_uphys, store_selfballoon_uphys);
-
-SELFBALLOON_SHOW(selfballoon_min_usable_mb, "%d\n",
- selfballoon_min_usable_mb);
-
-static ssize_t store_selfballoon_min_usable_mb(struct device *dev,
- struct device_attribute *attr,
- const char *buf,
- size_t count)
-{
- unsigned long val;
- int err;
-
- if (!capable(CAP_SYS_ADMIN))
- return -EPERM;
- err = kstrtoul(buf, 10, &val);
- if (err)
- return err;
- if (val == 0)
- return -EINVAL;
- selfballoon_min_usable_mb = val;
- return count;
-}
-
-static DEVICE_ATTR(selfballoon_min_usable_mb, S_IRUGO | S_IWUSR,
- show_selfballoon_min_usable_mb,
- store_selfballoon_min_usable_mb);
-
-SELFBALLOON_SHOW(selfballoon_reserved_mb, "%d\n",
- selfballoon_reserved_mb);
-
-static ssize_t store_selfballoon_reserved_mb(struct device *dev,
- struct device_attribute *attr,
- const char *buf,
- size_t count)
-{
- unsigned long val;
- int err;
-
- if (!capable(CAP_SYS_ADMIN))
- return -EPERM;
- err = kstrtoul(buf, 10, &val);
- if (err)
- return err;
- if (val == 0)
- return -EINVAL;
- selfballoon_reserved_mb = val;
- return count;
-}
-
-static DEVICE_ATTR(selfballoon_reserved_mb, S_IRUGO | S_IWUSR,
- show_selfballoon_reserved_mb,
- store_selfballoon_reserved_mb);
-
-
-#ifdef CONFIG_FRONTSWAP
-SELFBALLOON_SHOW(frontswap_selfshrinking, "%d\n", frontswap_selfshrinking);
-
-static ssize_t store_frontswap_selfshrinking(struct device *dev,
- struct device_attribute *attr,
- const char *buf,
- size_t count)
-{
- bool was_enabled = frontswap_selfshrinking;
- unsigned long tmp;
- int err;
-
- if (!capable(CAP_SYS_ADMIN))
- return -EPERM;
- err = kstrtoul(buf, 10, &tmp);
- if (err)
- return err;
- if ((tmp != 0) && (tmp != 1))
- return -EINVAL;
- frontswap_selfshrinking = !!tmp;
- if (!was_enabled && !xen_selfballooning_enabled &&
- frontswap_selfshrinking)
- schedule_delayed_work(&selfballoon_worker,
- selfballoon_interval * HZ);
-
- return count;
-}
-
-static DEVICE_ATTR(frontswap_selfshrinking, S_IRUGO | S_IWUSR,
- show_frontswap_selfshrinking, store_frontswap_selfshrinking);
-
-SELFBALLOON_SHOW(frontswap_inertia, "%d\n", frontswap_inertia);
-
-static ssize_t store_frontswap_inertia(struct device *dev,
- struct device_attribute *attr,
- const char *buf,
- size_t count)
-{
- unsigned long val;
- int err;
-
- if (!capable(CAP_SYS_ADMIN))
- return -EPERM;
- err = kstrtoul(buf, 10, &val);
- if (err)
- return err;
- if (val == 0)
- return -EINVAL;
- frontswap_inertia = val;
- frontswap_inertia_counter = val;
- return count;
-}
-
-static DEVICE_ATTR(frontswap_inertia, S_IRUGO | S_IWUSR,
- show_frontswap_inertia, store_frontswap_inertia);
-
-SELFBALLOON_SHOW(frontswap_hysteresis, "%d\n", frontswap_hysteresis);
-
-static ssize_t store_frontswap_hysteresis(struct device *dev,
- struct device_attribute *attr,
- const char *buf,
- size_t count)
-{
- unsigned long val;
- int err;
-
- if (!capable(CAP_SYS_ADMIN))
- return -EPERM;
- err = kstrtoul(buf, 10, &val);
- if (err)
- return err;
- if (val == 0)
- return -EINVAL;
- frontswap_hysteresis = val;
- return count;
-}
-
-static DEVICE_ATTR(frontswap_hysteresis, S_IRUGO | S_IWUSR,
- show_frontswap_hysteresis, store_frontswap_hysteresis);
-
-#endif /* CONFIG_FRONTSWAP */
-
-static struct attribute *selfballoon_attrs[] = {
- &dev_attr_selfballooning.attr,
- &dev_attr_selfballoon_interval.attr,
- &dev_attr_selfballoon_downhysteresis.attr,
- &dev_attr_selfballoon_uphysteresis.attr,
- &dev_attr_selfballoon_min_usable_mb.attr,
- &dev_attr_selfballoon_reserved_mb.attr,
-#ifdef CONFIG_FRONTSWAP
- &dev_attr_frontswap_selfshrinking.attr,
- &dev_attr_frontswap_hysteresis.attr,
- &dev_attr_frontswap_inertia.attr,
-#endif
- NULL
-};
-
-static const struct attribute_group selfballoon_group = {
- .name = "selfballoon",
- .attrs = selfballoon_attrs
-};
-#endif
-
-int register_xen_selfballooning(struct device *dev)
-{
- int error = -1;
-
-#ifdef CONFIG_SYSFS
- error = sysfs_create_group(&dev->kobj, &selfballoon_group);
-#endif
- return error;
-}
-EXPORT_SYMBOL(register_xen_selfballooning);
-
-int xen_selfballoon_init(bool use_selfballooning, bool use_frontswap_selfshrink)
-{
- bool enable = false;
- unsigned long reserve_pages;
-
- if (!xen_domain())
- return -ENODEV;
-
- if (xen_initial_domain()) {
- pr_info("Xen selfballooning driver disabled for domain0\n");
- return -ENODEV;
- }
-
- xen_selfballooning_enabled = tmem_enabled && use_selfballooning;
- if (xen_selfballooning_enabled) {
- pr_info("Initializing Xen selfballooning driver\n");
- enable = true;
- }
-#ifdef CONFIG_FRONTSWAP
- frontswap_selfshrinking = tmem_enabled && use_frontswap_selfshrink;
- if (frontswap_selfshrinking) {
- pr_info("Initializing frontswap selfshrinking driver\n");
- enable = true;
- }
-#endif
- if (!enable)
- return -ENODEV;
-
- /*
- * Give selfballoon_reserved_mb a default value(10% of total ram pages)
- * to make selfballoon not so aggressive.
- *
- * There are mainly two reasons:
- * 1) The original goal_page didn't consider some pages used by kernel
- * space, like slab pages and memory used by device drivers.
- *
- * 2) The balloon driver may not give back memory to guest OS fast
- * enough when the workload suddenly aquries a lot of physical memory.
- *
- * In both cases, the guest OS will suffer from memory pressure and
- * OOM killer may be triggered.
- * By reserving extra 10% of total ram pages, we can keep the system
- * much more reliably and response faster in some cases.
- */
- if (!selfballoon_reserved_mb) {
- reserve_pages = totalram_pages() / 10;
- selfballoon_reserved_mb = PAGES2MB(reserve_pages);
- }
- schedule_delayed_work(&selfballoon_worker, selfballoon_interval * HZ);
-
- return 0;
-}
-EXPORT_SYMBOL(xen_selfballoon_init);
diff --git a/drivers/xen/xenfs/super.c b/drivers/xen/xenfs/super.c
index 20c1448f1ce7..d7d64235010d 100644
--- a/drivers/xen/xenfs/super.c
+++ b/drivers/xen/xenfs/super.c
@@ -14,6 +14,7 @@
#include <linux/errno.h>
#include <linux/module.h>
#include <linux/fs.h>
+#include <linux/fs_context.h>
#include <linux/magic.h>
#include <xen/xen.h>
@@ -43,7 +44,7 @@ static const struct file_operations capabilities_file_ops = {
.llseek = default_llseek,
};
-static int xenfs_fill_super(struct super_block *sb, void *data, int silent)
+static int xenfs_fill_super(struct super_block *sb, struct fs_context *fc)
{
static const struct tree_descr xenfs_files[] = {
[2] = { "xenbus", &xen_xenbus_fops, S_IRUSR|S_IWUSR },
@@ -68,17 +69,25 @@ static int xenfs_fill_super(struct super_block *sb, void *data, int silent)
xen_initial_domain() ? xenfs_init_files : xenfs_files);
}
-static struct dentry *xenfs_mount(struct file_system_type *fs_type,
- int flags, const char *dev_name,
- void *data)
+static int xenfs_get_tree(struct fs_context *fc)
{
- return mount_single(fs_type, flags, data, xenfs_fill_super);
+ return get_tree_single(fc, xenfs_fill_super);
+}
+
+static const struct fs_context_operations xenfs_context_ops = {
+ .get_tree = xenfs_get_tree,
+};
+
+static int xenfs_init_fs_context(struct fs_context *fc)
+{
+ fc->ops = &xenfs_context_ops;
+ return 0;
}
static struct file_system_type xenfs_type = {
.owner = THIS_MODULE,
.name = "xenfs",
- .mount = xenfs_mount,
+ .init_fs_context = xenfs_init_fs_context,
.kill_sb = kill_litter_super,
};
MODULE_ALIAS_FS("xenfs");