summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorArnd Bergmann <arnd@arndb.de>2018-03-09 14:13:42 +0100
committerArnd Bergmann <arnd@arndb.de>2018-03-16 10:56:03 +0100
commitbb9d812643d8a121df7d614a2b9c60193a92deb0 (patch)
tree419096f57ca0501d8813151a5236387074edb4ea
parent4ba66a9760722ccbb691b8f7116cad2f791cca7b (diff)
downloadlinux-bb9d812643d8a121df7d614a2b9c60193a92deb0.tar.gz
linux-bb9d812643d8a121df7d614a2b9c60193a92deb0.tar.bz2
linux-bb9d812643d8a121df7d614a2b9c60193a92deb0.zip
arch: remove tile port
The Tile architecture port was added by Chris Metcalf in 2010, and maintained until early 2018 when he orphaned it due to his departure from Mellanox, and nobody else stepped up to maintain it. The product line is still around in the form of the BlueField SoC, but no longer uses the Tile architecture. There are also still products for sale with Tile-GX SoCs, notably the Mikrotik CCR router family. The products all use old (linux-3.3) kernels with lots of patches and won't be upgraded by their manufacturers. There have been efforts to port both OpenWRT and Debian to these, but both projects have stalled and are very unlikely to be continued in the future. Given that we are reasonably sure that nobody is still using the port with an upstream kernel any more, it seems better to remove it now while the port is in a good shape than to let it bitrot for a few years first. Cc: Chris Metcalf <chris.d.metcalf@gmail.com> Cc: John Paul Adrian Glaubitz <glaubitz@physik.fu-berlin.de> Link: http://www.mellanox.com/page/npu_multicore_overview Link: https://jenkins.debian.net/view/rebootstrap/job/rebootstrap_tilegx_gcc7/ Signed-off-by: Arnd Bergmann <arnd@arndb.de>
-rw-r--r--Documentation/ioctl/ioctl-number.txt1
-rw-r--r--MAINTAINERS13
-rw-r--r--Makefile8
-rw-r--r--arch/tile/Kbuild3
-rw-r--r--arch/tile/Kconfig481
-rw-r--r--arch/tile/Kconfig.debug26
-rw-r--r--arch/tile/Makefile77
-rw-r--r--arch/tile/configs/tilegx_defconfig411
-rw-r--r--arch/tile/configs/tilepro_defconfig524
-rw-r--r--arch/tile/gxio/Kconfig34
-rw-r--r--arch/tile/gxio/Makefile11
-rw-r--r--arch/tile/gxio/dma_queue.c176
-rw-r--r--arch/tile/gxio/iorpc_globals.c89
-rw-r--r--arch/tile/gxio/iorpc_mpipe.c593
-rw-r--r--arch/tile/gxio/iorpc_mpipe_info.c102
-rw-r--r--arch/tile/gxio/iorpc_trio.c350
-rw-r--r--arch/tile/gxio/iorpc_uart.c77
-rw-r--r--arch/tile/gxio/iorpc_usb_host.c99
-rw-r--r--arch/tile/gxio/kiorpc.c61
-rw-r--r--arch/tile/gxio/mpipe.c584
-rw-r--r--arch/tile/gxio/trio.c49
-rw-r--r--arch/tile/gxio/uart.c87
-rw-r--r--arch/tile/gxio/usb_host.c91
-rw-r--r--arch/tile/include/arch/mpipe.h371
-rw-r--r--arch/tile/include/arch/mpipe_constants.h42
-rw-r--r--arch/tile/include/arch/mpipe_def.h39
-rw-r--r--arch/tile/include/arch/mpipe_shm.h521
-rw-r--r--arch/tile/include/arch/mpipe_shm_def.h23
-rw-r--r--arch/tile/include/arch/spr_def.h109
-rw-r--r--arch/tile/include/arch/trio.h111
-rw-r--r--arch/tile/include/arch/trio_constants.h36
-rw-r--r--arch/tile/include/arch/trio_def.h41
-rw-r--r--arch/tile/include/arch/trio_pcie_intfc.h229
-rw-r--r--arch/tile/include/arch/trio_pcie_intfc_def.h32
-rw-r--r--arch/tile/include/arch/trio_pcie_rc.h156
-rw-r--r--arch/tile/include/arch/trio_pcie_rc_def.h24
-rw-r--r--arch/tile/include/arch/trio_shm.h125
-rw-r--r--arch/tile/include/arch/trio_shm_def.h19
-rw-r--r--arch/tile/include/arch/uart.h300
-rw-r--r--arch/tile/include/arch/uart_def.h120
-rw-r--r--arch/tile/include/arch/usb_host.h26
-rw-r--r--arch/tile/include/arch/usb_host_def.h19
-rw-r--r--arch/tile/include/asm/Kbuild18
-rw-r--r--arch/tile/include/asm/asm-offsets.h1
-rw-r--r--arch/tile/include/asm/atomic.h210
-rw-r--r--arch/tile/include/asm/atomic_32.h297
-rw-r--r--arch/tile/include/asm/atomic_64.h200
-rw-r--r--arch/tile/include/asm/backtrace.h162
-rw-r--r--arch/tile/include/asm/barrier.h100
-rw-r--r--arch/tile/include/asm/bitops.h94
-rw-r--r--arch/tile/include/asm/bitops_32.h126
-rw-r--r--arch/tile/include/asm/bitops_64.h95
-rw-r--r--arch/tile/include/asm/cache.h64
-rw-r--r--arch/tile/include/asm/cacheflush.h160
-rw-r--r--arch/tile/include/asm/checksum.h42
-rw-r--r--arch/tile/include/asm/cmpxchg.h132
-rw-r--r--arch/tile/include/asm/compat.h233
-rw-r--r--arch/tile/include/asm/current.h31
-rw-r--r--arch/tile/include/asm/delay.h34
-rw-r--r--arch/tile/include/asm/device.h33
-rw-r--r--arch/tile/include/asm/div64.h17
-rw-r--r--arch/tile/include/asm/dma-mapping.h50
-rw-r--r--arch/tile/include/asm/dma.h25
-rw-r--r--arch/tile/include/asm/elf.h182
-rw-r--r--arch/tile/include/asm/fixmap.h87
-rw-r--r--arch/tile/include/asm/ftrace.h42
-rw-r--r--arch/tile/include/asm/futex.h166
-rw-r--r--arch/tile/include/asm/hardirq.h45
-rw-r--r--arch/tile/include/asm/hardwall.h30
-rw-r--r--arch/tile/include/asm/highmem.h71
-rw-r--r--arch/tile/include/asm/homecache.h123
-rw-r--r--arch/tile/include/asm/hugetlb.h122
-rw-r--r--arch/tile/include/asm/hv_driver.h60
-rw-r--r--arch/tile/include/asm/ide.h25
-rw-r--r--arch/tile/include/asm/insn.h59
-rw-r--r--arch/tile/include/asm/io.h509
-rw-r--r--arch/tile/include/asm/irq.h87
-rw-r--r--arch/tile/include/asm/irq_work.h15
-rw-r--r--arch/tile/include/asm/irqflags.h311
-rw-r--r--arch/tile/include/asm/jump_label.h58
-rw-r--r--arch/tile/include/asm/kdebug.h28
-rw-r--r--arch/tile/include/asm/kexec.h65
-rw-r--r--arch/tile/include/asm/kgdb.h71
-rw-r--r--arch/tile/include/asm/kmap_types.h28
-rw-r--r--arch/tile/include/asm/kprobes.h83
-rw-r--r--arch/tile/include/asm/linkage.h51
-rw-r--r--arch/tile/include/asm/mmu.h32
-rw-r--r--arch/tile/include/asm/mmu_context.h137
-rw-r--r--arch/tile/include/asm/mmzone.h70
-rw-r--r--arch/tile/include/asm/module.h40
-rw-r--r--arch/tile/include/asm/page.h345
-rw-r--r--arch/tile/include/asm/pci.h229
-rw-r--r--arch/tile/include/asm/percpu.h52
-rw-r--r--arch/tile/include/asm/perf_event.h22
-rw-r--r--arch/tile/include/asm/pgalloc.h164
-rw-r--r--arch/tile/include/asm/pgtable.h518
-rw-r--r--arch/tile/include/asm/pgtable_32.h122
-rw-r--r--arch/tile/include/asm/pgtable_64.h172
-rw-r--r--arch/tile/include/asm/pmc.h64
-rw-r--r--arch/tile/include/asm/processor.h368
-rw-r--r--arch/tile/include/asm/ptrace.h97
-rw-r--r--arch/tile/include/asm/sections.h44
-rw-r--r--arch/tile/include/asm/setup.h57
-rw-r--r--arch/tile/include/asm/sigframe.h33
-rw-r--r--arch/tile/include/asm/signal.h29
-rw-r--r--arch/tile/include/asm/smp.h139
-rw-r--r--arch/tile/include/asm/spinlock.h24
-rw-r--r--arch/tile/include/asm/spinlock_32.h109
-rw-r--r--arch/tile/include/asm/spinlock_64.h138
-rw-r--r--arch/tile/include/asm/spinlock_types.h60
-rw-r--r--arch/tile/include/asm/stack.h73
-rw-r--r--arch/tile/include/asm/string.h34
-rw-r--r--arch/tile/include/asm/switch_to.h77
-rw-r--r--arch/tile/include/asm/syscall.h111
-rw-r--r--arch/tile/include/asm/syscalls.h70
-rw-r--r--arch/tile/include/asm/thread_info.h167
-rw-r--r--arch/tile/include/asm/tile-desc.h19
-rw-r--r--arch/tile/include/asm/tile-desc_32.h553
-rw-r--r--arch/tile/include/asm/tile-desc_64.h483
-rw-r--r--arch/tile/include/asm/timex.h52
-rw-r--r--arch/tile/include/asm/tlb.h25
-rw-r--r--arch/tile/include/asm/tlbflush.h123
-rw-r--r--arch/tile/include/asm/topology.h52
-rw-r--r--arch/tile/include/asm/traps.h93
-rw-r--r--arch/tile/include/asm/uaccess.h411
-rw-r--r--arch/tile/include/asm/unaligned.h43
-rw-r--r--arch/tile/include/asm/unistd.h20
-rw-r--r--arch/tile/include/asm/user.h21
-rw-r--r--arch/tile/include/asm/vdso.h55
-rw-r--r--arch/tile/include/asm/vga.h39
-rw-r--r--arch/tile/include/asm/word-at-a-time.h43
-rw-r--r--arch/tile/include/gxio/common.h40
-rw-r--r--arch/tile/include/gxio/dma_queue.h161
-rw-r--r--arch/tile/include/gxio/iorpc_globals.h38
-rw-r--r--arch/tile/include/gxio/iorpc_mpipe.h144
-rw-r--r--arch/tile/include/gxio/iorpc_mpipe_info.h50
-rw-r--r--arch/tile/include/gxio/iorpc_trio.h104
-rw-r--r--arch/tile/include/gxio/iorpc_uart.h40
-rw-r--r--arch/tile/include/gxio/iorpc_usb_host.h46
-rw-r--r--arch/tile/include/gxio/kiorpc.h29
-rw-r--r--arch/tile/include/gxio/mpipe.h1871
-rw-r--r--arch/tile/include/gxio/trio.h298
-rw-r--r--arch/tile/include/gxio/uart.h105
-rw-r--r--arch/tile/include/gxio/usb_host.h87
-rw-r--r--arch/tile/include/hv/drv_mpipe_intf.h605
-rw-r--r--arch/tile/include/hv/drv_mshim_intf.h50
-rw-r--r--arch/tile/include/hv/drv_pcie_rc_intf.h38
-rw-r--r--arch/tile/include/hv/drv_srom_intf.h41
-rw-r--r--arch/tile/include/hv/drv_trio_intf.h199
-rw-r--r--arch/tile/include/hv/drv_uart_intf.h33
-rw-r--r--arch/tile/include/hv/drv_usb_host_intf.h39
-rw-r--r--arch/tile/include/hv/drv_xgbe_impl.h300
-rw-r--r--arch/tile/include/hv/drv_xgbe_intf.h615
-rw-r--r--arch/tile/include/hv/hypervisor.h2656
-rw-r--r--arch/tile/include/hv/iorpc.h714
-rw-r--r--arch/tile/include/hv/netio_errors.h122
-rw-r--r--arch/tile/include/hv/netio_intf.h2975
-rw-r--r--arch/tile/include/hv/syscall_public.h42
-rw-r--r--arch/tile/include/uapi/arch/abi.h101
-rw-r--r--arch/tile/include/uapi/arch/chip.h22
-rw-r--r--arch/tile/include/uapi/arch/chip_tilegx.h259
-rw-r--r--arch/tile/include/uapi/arch/chip_tilepro.h259
-rw-r--r--arch/tile/include/uapi/arch/icache.h94
-rw-r--r--arch/tile/include/uapi/arch/interrupts.h20
-rw-r--r--arch/tile/include/uapi/arch/interrupts_32.h310
-rw-r--r--arch/tile/include/uapi/arch/interrupts_64.h279
-rw-r--r--arch/tile/include/uapi/arch/intreg.h71
-rw-r--r--arch/tile/include/uapi/arch/opcode.h22
-rw-r--r--arch/tile/include/uapi/arch/opcode_tilegx.h1407
-rw-r--r--arch/tile/include/uapi/arch/opcode_tilepro.h1473
-rw-r--r--arch/tile/include/uapi/arch/sim.h644
-rw-r--r--arch/tile/include/uapi/arch/sim_def.h506
-rw-r--r--arch/tile/include/uapi/arch/spr_def.h27
-rw-r--r--arch/tile/include/uapi/arch/spr_def_32.h256
-rw-r--r--arch/tile/include/uapi/arch/spr_def_64.h217
-rw-r--r--arch/tile/include/uapi/asm/Kbuild24
-rw-r--r--arch/tile/include/uapi/asm/auxvec.h24
-rw-r--r--arch/tile/include/uapi/asm/bitsperlong.h27
-rw-r--r--arch/tile/include/uapi/asm/byteorder.h20
-rw-r--r--arch/tile/include/uapi/asm/cachectl.h43
-rw-r--r--arch/tile/include/uapi/asm/hardwall.h52
-rw-r--r--arch/tile/include/uapi/asm/kvm_para.h2
-rw-r--r--arch/tile/include/uapi/asm/mman.h43
-rw-r--r--arch/tile/include/uapi/asm/ptrace.h99
-rw-r--r--arch/tile/include/uapi/asm/setup.h22
-rw-r--r--arch/tile/include/uapi/asm/sigcontext.h44
-rw-r--r--arch/tile/include/uapi/asm/siginfo.h27
-rw-r--r--arch/tile/include/uapi/asm/signal.h28
-rw-r--r--arch/tile/include/uapi/asm/stat.h5
-rw-r--r--arch/tile/include/uapi/asm/swab.h24
-rw-r--r--arch/tile/include/uapi/asm/unistd.h38
-rw-r--r--arch/tile/kernel/Makefile38
-rw-r--r--arch/tile/kernel/asm-offsets.c84
-rw-r--r--arch/tile/kernel/backtrace.c683
-rw-r--r--arch/tile/kernel/compat.c117
-rw-r--r--arch/tile/kernel/compat_signal.c172
-rw-r--r--arch/tile/kernel/early_printk.c75
-rw-r--r--arch/tile/kernel/entry.S64
-rw-r--r--arch/tile/kernel/ftrace.c239
-rw-r--r--arch/tile/kernel/hardwall.c1096
-rw-r--r--arch/tile/kernel/head_32.S183
-rw-r--r--arch/tile/kernel/head_64.S279
-rw-r--r--arch/tile/kernel/hvglue.S76
-rw-r--r--arch/tile/kernel/hvglue_trace.c270
-rw-r--r--arch/tile/kernel/intvec_32.S1906
-rw-r--r--arch/tile/kernel/intvec_64.S1564
-rw-r--r--arch/tile/kernel/irq.c280
-rw-r--r--arch/tile/kernel/jump_label.c62
-rw-r--r--arch/tile/kernel/kgdb.c497
-rw-r--r--arch/tile/kernel/kprobes.c527
-rw-r--r--arch/tile/kernel/machine_kexec.c298
-rw-r--r--arch/tile/kernel/mcount_64.S211
-rw-r--r--arch/tile/kernel/messaging.c115
-rw-r--r--arch/tile/kernel/module.c231
-rw-r--r--arch/tile/kernel/pci-dma.c607
-rw-r--r--arch/tile/kernel/pci.c592
-rw-r--r--arch/tile/kernel/pci_gx.c1592
-rw-r--r--arch/tile/kernel/perf_event.c1005
-rw-r--r--arch/tile/kernel/pmc.c118
-rw-r--r--arch/tile/kernel/proc.c160
-rw-r--r--arch/tile/kernel/process.c659
-rw-r--r--arch/tile/kernel/ptrace.c316
-rw-r--r--arch/tile/kernel/reboot.c51
-rw-r--r--arch/tile/kernel/regs_32.S145
-rw-r--r--arch/tile/kernel/regs_64.S145
-rw-r--r--arch/tile/kernel/relocate_kernel_32.S269
-rw-r--r--arch/tile/kernel/relocate_kernel_64.S263
-rw-r--r--arch/tile/kernel/setup.c1743
-rw-r--r--arch/tile/kernel/signal.c411
-rw-r--r--arch/tile/kernel/single_step.c786
-rw-r--r--arch/tile/kernel/smp.c287
-rw-r--r--arch/tile/kernel/smpboot.c269
-rw-r--r--arch/tile/kernel/stack.c539
-rw-r--r--arch/tile/kernel/sys.c130
-rw-r--r--arch/tile/kernel/sysfs.c266
-rw-r--r--arch/tile/kernel/tile-desc_32.c2605
-rw-r--r--arch/tile/kernel/tile-desc_64.c2218
-rw-r--r--arch/tile/kernel/time.c306
-rw-r--r--arch/tile/kernel/tlb.c104
-rw-r--r--arch/tile/kernel/traps.c421
-rw-r--r--arch/tile/kernel/unaligned.c1603
-rw-r--r--arch/tile/kernel/usb.c71
-rw-r--r--arch/tile/kernel/vdso.c197
-rw-r--r--arch/tile/kernel/vdso/Makefile117
-rw-r--r--arch/tile/kernel/vdso/vdso.S28
-rw-r--r--arch/tile/kernel/vdso/vdso.lds.S89
-rw-r--r--arch/tile/kernel/vdso/vdso32.S28
-rw-r--r--arch/tile/kernel/vdso/vgettimeofday.c198
-rw-r--r--arch/tile/kernel/vdso/vrt_sigreturn.S30
-rw-r--r--arch/tile/kernel/vmlinux.lds.S105
-rw-r--r--arch/tile/kvm/Kconfig39
-rw-r--r--arch/tile/lib/Makefile19
-rw-r--r--arch/tile/lib/atomic_32.c206
-rw-r--r--arch/tile/lib/atomic_asm_32.S205
-rw-r--r--arch/tile/lib/cacheflush.c167
-rw-r--r--arch/tile/lib/checksum.c89
-rw-r--r--arch/tile/lib/cpumask.c54
-rw-r--r--arch/tile/lib/delay.c45
-rw-r--r--arch/tile/lib/exports.c94
-rw-r--r--arch/tile/lib/memchr_32.c71
-rw-r--r--arch/tile/lib/memchr_64.c69
-rw-r--r--arch/tile/lib/memcpy_32.S544
-rw-r--r--arch/tile/lib/memcpy_64.c367
-rw-r--r--arch/tile/lib/memcpy_user_64.c85
-rw-r--r--arch/tile/lib/memmove.c63
-rw-r--r--arch/tile/lib/memset_32.c143
-rw-r--r--arch/tile/lib/memset_64.c142
-rw-r--r--arch/tile/lib/spinlock_32.c251
-rw-r--r--arch/tile/lib/spinlock_64.c97
-rw-r--r--arch/tile/lib/spinlock_common.h64
-rw-r--r--arch/tile/lib/strchr_32.c64
-rw-r--r--arch/tile/lib/strchr_64.c62
-rw-r--r--arch/tile/lib/string-endian.h44
-rw-r--r--arch/tile/lib/strlen_32.c36
-rw-r--r--arch/tile/lib/strlen_64.c35
-rw-r--r--arch/tile/lib/strnlen_32.c47
-rw-r--r--arch/tile/lib/strnlen_64.c48
-rw-r--r--arch/tile/lib/uaccess.c24
-rw-r--r--arch/tile/lib/usercopy_32.S89
-rw-r--r--arch/tile/lib/usercopy_64.S89
-rw-r--r--arch/tile/mm/Makefile9
-rw-r--r--arch/tile/mm/elf.c165
-rw-r--r--arch/tile/mm/extable.c30
-rw-r--r--arch/tile/mm/fault.c924
-rw-r--r--arch/tile/mm/highmem.c277
-rw-r--r--arch/tile/mm/homecache.c428
-rw-r--r--arch/tile/mm/hugetlbpage.c348
-rw-r--r--arch/tile/mm/init.c956
-rw-r--r--arch/tile/mm/migrate.h56
-rw-r--r--arch/tile/mm/migrate_32.S192
-rw-r--r--arch/tile/mm/migrate_64.S167
-rw-r--r--arch/tile/mm/mmap.c93
-rw-r--r--arch/tile/mm/pgtable.c550
-rw-r--r--drivers/pci/quirks.c19
-rw-r--r--samples/kprobes/kprobe_example.c8
-rw-r--r--tools/arch/tile/include/asm/barrier.h16
-rw-r--r--tools/arch/tile/include/uapi/asm/bitsperlong.h27
-rw-r--r--tools/arch/tile/include/uapi/asm/mman.h16
-rw-r--r--tools/scripts/Makefile.arch11
-rwxr-xr-xtools/testing/ktest/ktest.pl2
300 files changed, 1 insertions, 69477 deletions
diff --git a/Documentation/ioctl/ioctl-number.txt b/Documentation/ioctl/ioctl-number.txt
index 6501389d55b9..84bb74dcae12 100644
--- a/Documentation/ioctl/ioctl-number.txt
+++ b/Documentation/ioctl/ioctl-number.txt
@@ -305,7 +305,6 @@ Code Seq#(hex) Include File Comments
0xA0 all linux/sdp/sdp.h Industrial Device Project
<mailto:kenji@bitgate.com>
0xA1 0 linux/vtpm_proxy.h TPM Emulator Proxy Driver
-0xA2 00-0F arch/tile/include/asm/hardwall.h
0xA3 80-8F Port ACL in development:
<mailto:tlewis@mindspring.com>
0xA3 90-9F linux/dtlk.h
diff --git a/MAINTAINERS b/MAINTAINERS
index 9e0c097824f5..ac6083ae4f94 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -13840,19 +13840,6 @@ S: Orphan
F: drivers/net/wireless/ti/
F: include/linux/wl12xx.h
-TILE ARCHITECTURE
-W: http://www.mellanox.com/repository/solutions/tile-scm/
-S: Orphan
-F: arch/tile/
-F: drivers/char/tile-srom.c
-F: drivers/edac/tile_edac.c
-F: drivers/net/ethernet/tile/
-F: drivers/rtc/rtc-tile.c
-F: drivers/tty/hvc/hvc_tile.c
-F: drivers/tty/serial/tilegx.c
-F: drivers/usb/host/*-tilegx.c
-F: include/linux/usb/tilegx.h
-
TIMEKEEPING, CLOCKSOURCE CORE, NTP, ALARMTIMER
M: John Stultz <john.stultz@linaro.org>
M: Thomas Gleixner <tglx@linutronix.de>
diff --git a/Makefile b/Makefile
index c4322dea3ca2..4114da991ae3 100644
--- a/Makefile
+++ b/Makefile
@@ -339,14 +339,6 @@ ifeq ($(ARCH),sh64)
SRCARCH := sh
endif
-# Additional ARCH settings for tile
-ifeq ($(ARCH),tilepro)
- SRCARCH := tile
-endif
-ifeq ($(ARCH),tilegx)
- SRCARCH := tile
-endif
-
KCONFIG_CONFIG ?= .config
export KCONFIG_CONFIG
diff --git a/arch/tile/Kbuild b/arch/tile/Kbuild
deleted file mode 100644
index a9b922716092..000000000000
--- a/arch/tile/Kbuild
+++ /dev/null
@@ -1,3 +0,0 @@
-
-obj-y += kernel/
-obj-y += mm/
diff --git a/arch/tile/Kconfig b/arch/tile/Kconfig
deleted file mode 100644
index ef9d403cbbe4..000000000000
--- a/arch/tile/Kconfig
+++ /dev/null
@@ -1,481 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0
-# For a description of the syntax of this configuration file,
-# see Documentation/kbuild/kconfig-language.txt.
-
-config TILE
- def_bool y
- select ARCH_HAS_DEVMEM_IS_ALLOWED
- select ARCH_HAVE_NMI_SAFE_CMPXCHG
- select ARCH_WANT_FRAME_POINTERS
- select CC_OPTIMIZE_FOR_SIZE
- select EDAC_SUPPORT
- select GENERIC_CLOCKEVENTS
- select GENERIC_FIND_FIRST_BIT
- select GENERIC_IRQ_PROBE
- select GENERIC_IRQ_SHOW
- select GENERIC_PENDING_IRQ if SMP
- select GENERIC_STRNCPY_FROM_USER
- select GENERIC_STRNLEN_USER
- select HAVE_ARCH_SECCOMP_FILTER
- select HAVE_ARCH_TRACEHOOK
- select HAVE_CONTEXT_TRACKING
- select HAVE_DEBUG_BUGVERBOSE
- select HAVE_DEBUG_KMEMLEAK
- select HAVE_DEBUG_STACKOVERFLOW
- select HAVE_DMA_API_DEBUG
- select HAVE_EXIT_THREAD
- select HAVE_KVM if !TILEGX
- select HAVE_NMI if USE_PMC
- select HAVE_PERF_EVENTS
- select HAVE_SYSCALL_TRACEPOINTS
- select MODULES_USE_ELF_RELA
- select SYSCTL_EXCEPTION_TRACE
- select SYS_HYPERVISOR
- select USER_STACKTRACE_SUPPORT
- select USE_PMC if PERF_EVENTS
- select VIRT_TO_BUS
-
-config MMU
- def_bool y
-
-config GENERIC_CSUM
- def_bool y
-
-config HAVE_ARCH_ALLOC_REMAP
- def_bool y
-
-config HAVE_SETUP_PER_CPU_AREA
- def_bool y
-
-config NEED_PER_CPU_PAGE_FIRST_CHUNK
- def_bool y
-
-config SYS_SUPPORTS_HUGETLBFS
- def_bool y
-
-# Support for additional huge page sizes besides HPAGE_SIZE.
-# The software support is currently only present in the TILE-Gx
-# hypervisor. TILEPro in any case does not support page sizes
-# larger than the default HPAGE_SIZE.
-config HUGETLB_SUPER_PAGES
- depends on HUGETLB_PAGE && TILEGX
- def_bool y
-
-config GENERIC_TIME_VSYSCALL
- def_bool y
-
-# Enable PMC if PERF_EVENTS, OPROFILE, or WATCHPOINTS are enabled.
-config USE_PMC
- bool
-
-# FIXME: tilegx can implement a more efficient rwsem.
-config RWSEM_GENERIC_SPINLOCK
- def_bool y
-
-# We only support gcc 4.4 and above, so this should work.
-config ARCH_SUPPORTS_OPTIMIZED_INLINING
- def_bool y
-
-config ARCH_PHYS_ADDR_T_64BIT
- def_bool y
-
-config ARCH_DMA_ADDR_T_64BIT
- def_bool y
-
-config NEED_DMA_MAP_STATE
- def_bool y
-
-config ARCH_HAS_DMA_SET_COHERENT_MASK
- bool
-
-config LOCKDEP_SUPPORT
- def_bool y
-
-config STACKTRACE_SUPPORT
- def_bool y
- select STACKTRACE
-
-# We use discontigmem for now; at some point we may want to switch
-# to sparsemem (Tilera bug 7996).
-config ARCH_DISCONTIGMEM_ENABLE
- def_bool y
-
-config ARCH_DISCONTIGMEM_DEFAULT
- def_bool y
-
-config TRACE_IRQFLAGS_SUPPORT
- def_bool y
-
-# SMP is required for Tilera Linux.
-config SMP
- def_bool y
-
-config HVC_TILE
- depends on TTY
- select HVC_DRIVER
- select HVC_IRQ if TILEGX
- def_bool y
-
-# Building with ARCH=tilegx (or ARCH=tile) implies using the
-# 64-bit TILE-Gx toolchain, so force CONFIG_TILEGX on.
-config TILEGX
- def_bool ARCH != "tilepro"
- select ARCH_SUPPORTS_ATOMIC_RMW
- select GENERIC_IRQ_LEGACY_ALLOC_HWIRQ
- select HAVE_ARCH_JUMP_LABEL
- select HAVE_ARCH_KGDB
- select HAVE_DYNAMIC_FTRACE
- select HAVE_FTRACE_MCOUNT_RECORD
- select HAVE_FUNCTION_GRAPH_TRACER
- select HAVE_FUNCTION_TRACER
- select HAVE_KPROBES
- select HAVE_KRETPROBES
- select SPARSE_IRQ
-
-config TILEPRO
- def_bool !TILEGX
-
-config 64BIT
- def_bool TILEGX
-
-config ARCH_DEFCONFIG
- string
- default "arch/tile/configs/tilepro_defconfig" if !TILEGX
- default "arch/tile/configs/tilegx_defconfig" if TILEGX
-
-config PGTABLE_LEVELS
- int
- default 3 if 64BIT
- default 2
-
-source "init/Kconfig"
-
-source "kernel/Kconfig.freezer"
-
-menu "Tilera-specific configuration"
-
-config NR_CPUS
- int "Maximum number of tiles (2-255)"
- range 2 255
- depends on SMP
- default "64"
- ---help---
- Building with 64 is the recommended value, but a slightly
- smaller kernel memory footprint results from using a smaller
- value on chips with fewer tiles.
-
-choice
- prompt "Kernel page size"
- default PAGE_SIZE_64KB
- help
- This lets you select the page size of the kernel. For best
- performance on memory-intensive applications, a page size of 64KB
- is recommended. For workloads involving many small files, many
- connections, etc., it may be better to select 16KB, which uses
- memory more efficiently at some cost in TLB performance.
-
- Note that for TILEPro, you must also rebuild the hypervisor
- with a matching page size.
-
-config PAGE_SIZE_4KB
- bool "4KB" if TILEPRO
-
-config PAGE_SIZE_16KB
- bool "16KB"
-
-config PAGE_SIZE_64KB
- bool "64KB"
-
-endchoice
-
-source "kernel/Kconfig.hz"
-
-config KEXEC
- bool "kexec system call"
- select KEXEC_CORE
- ---help---
- kexec is a system call that implements the ability to shutdown your
- current kernel, and to start another kernel. It is like a reboot
- but it is independent of the system firmware. It is used
- to implement the "mboot" Tilera booter.
-
- The name comes from the similarity to the exec system call.
-
-config COMPAT
- bool "Support 32-bit TILE-Gx binaries in addition to 64-bit"
- depends on TILEGX
- select COMPAT_BINFMT_ELF
- default y
- ---help---
- If enabled, the kernel will support running TILE-Gx binaries
- that were built with the -m32 option.
-
-config SECCOMP
- bool "Enable seccomp to safely compute untrusted bytecode"
- depends on PROC_FS
- help
- This kernel feature is useful for number crunching applications
- that may need to compute untrusted bytecode during their
- execution. By using pipes or other transports made available to
- the process as file descriptors supporting the read/write
- syscalls, it's possible to isolate those applications in
- their own address space using seccomp. Once seccomp is
- enabled via prctl, it cannot be disabled and the task is only
- allowed to execute a few safe syscalls defined by each seccomp
- mode.
-
- If unsure, say N.
-
-config SYSVIPC_COMPAT
- def_bool y
- depends on COMPAT && SYSVIPC
-
-# We do not currently support disabling HIGHMEM on tilepro.
-config HIGHMEM
- bool # "Support for more than 512 MB of RAM"
- default !TILEGX
- ---help---
- Linux can use the full amount of RAM in the system by
- default. However, the address space of TILE processors is
- only 4 Gigabytes large. That means that, if you have a large
- amount of physical memory, not all of it can be "permanently
- mapped" by the kernel. The physical memory that's not
- permanently mapped is called "high memory".
-
- If you are compiling a kernel which will never run on a
- machine with more than 512 MB total physical RAM, answer
- "false" here. This will result in the kernel mapping all of
- physical memory into the top 1 GB of virtual memory space.
-
- If unsure, say "true".
-
-config ZONE_DMA32
- def_bool y
-
-config IOMMU_HELPER
- bool
-
-config NEED_SG_DMA_LENGTH
- bool
-
-config SWIOTLB
- bool
- default TILEGX
- select DMA_DIRECT_OPS
- select IOMMU_HELPER
- select NEED_SG_DMA_LENGTH
- select ARCH_HAS_DMA_SET_COHERENT_MASK
-
-# We do not currently support disabling NUMA.
-config NUMA
- bool # "NUMA Memory Allocation and Scheduler Support"
- depends on SMP && DISCONTIGMEM
- default y
- ---help---
- NUMA memory allocation is required for TILE processors
- unless booting with memory striping enabled in the
- hypervisor, or with only a single memory controller.
- It is recommended that this option always be enabled.
-
-config NODES_SHIFT
- int "Log base 2 of the max number of memory controllers"
- default 2
- depends on NEED_MULTIPLE_NODES
- ---help---
- By default, 2, i.e. 2^2 == 4 DDR2 controllers.
- In a system with more controllers, this value should be raised.
-
-choice
- depends on !TILEGX
- prompt "Memory split" if EXPERT
- default VMSPLIT_3G
- ---help---
- Select the desired split between kernel and user memory.
-
- If the address range available to the kernel is less than the
- physical memory installed, the remaining memory will be available
- as "high memory". Accessing high memory is a little more costly
- than low memory, as it needs to be mapped into the kernel first.
- Note that increasing the kernel address space limits the range
- available to user programs, making the address space there
- tighter. Selecting anything other than the default 3G/1G split
- will also likely make your kernel incompatible with binary-only
- kernel modules.
-
- If you are not absolutely sure what you are doing, leave this
- option alone!
-
- config VMSPLIT_3_75G
- bool "3.75G/0.25G user/kernel split (no kernel networking)"
- config VMSPLIT_3_5G
- bool "3.5G/0.5G user/kernel split"
- config VMSPLIT_3G
- bool "3G/1G user/kernel split"
- config VMSPLIT_2_75G
- bool "2.75G/1.25G user/kernel split (for full 1G low memory)"
- config VMSPLIT_2_5G
- bool "2.5G/1.5G user/kernel split"
- config VMSPLIT_2_25G
- bool "2.25G/1.75G user/kernel split"
- config VMSPLIT_2G
- bool "2G/2G user/kernel split"
- config VMSPLIT_1G
- bool "1G/3G user/kernel split"
-endchoice
-
-config PAGE_OFFSET
- hex
- depends on !64BIT
- default 0xF0000000 if VMSPLIT_3_75G
- default 0xE0000000 if VMSPLIT_3_5G
- default 0xB0000000 if VMSPLIT_2_75G
- default 0xA0000000 if VMSPLIT_2_5G
- default 0x90000000 if VMSPLIT_2_25G
- default 0x80000000 if VMSPLIT_2G
- default 0x40000000 if VMSPLIT_1G
- default 0xC0000000
-
-source "mm/Kconfig"
-
-source "kernel/Kconfig.preempt"
-
-config CMDLINE_BOOL
- bool "Built-in kernel command line"
- default n
- ---help---
- Allow for specifying boot arguments to the kernel at
- build time. On some systems (e.g. embedded ones), it is
- necessary or convenient to provide some or all of the
- kernel boot arguments with the kernel itself (that is,
- to not rely on the boot loader to provide them.)
-
- To compile command line arguments into the kernel,
- set this option to 'Y', then fill in the
- the boot arguments in CONFIG_CMDLINE.
-
- Systems with fully functional boot loaders (e.g. mboot, or
- if booting over PCI) should leave this option set to 'N'.
-
-config CMDLINE
- string "Built-in kernel command string"
- depends on CMDLINE_BOOL
- default ""
- ---help---
- Enter arguments here that should be compiled into the kernel
- image and used at boot time. If the boot loader provides a
- command line at boot time, it is appended to this string to
- form the full kernel command line, when the system boots.
-
- However, you can use the CONFIG_CMDLINE_OVERRIDE option to
- change this behavior.
-
- In most cases, the command line (whether built-in or provided
- by the boot loader) should specify the device for the root
- file system.
-
-config CMDLINE_OVERRIDE
- bool "Built-in command line overrides boot loader arguments"
- default n
- depends on CMDLINE_BOOL
- ---help---
- Set this option to 'Y' to have the kernel ignore the boot loader
- command line, and use ONLY the built-in command line.
-
- This is used to work around broken boot loaders. This should
- be set to 'N' under normal conditions.
-
-config VMALLOC_RESERVE
- hex
- default 0x2000000
-
-config HARDWALL
- bool "Hardwall support to allow access to user dynamic network"
- default y
-
-config KERNEL_PL
- int "Processor protection level for kernel"
- range 1 2
- default 2 if TILEGX
- default 1 if !TILEGX
- ---help---
- Since MDE 4.2, the Tilera hypervisor runs the kernel
- at PL2 by default. If running under an older hypervisor,
- or as a KVM guest, you must run at PL1. (The current
- hypervisor may also be recompiled with "make HV_PL=2" to
- allow it to run a kernel at PL1, but clients running at PL1
- are not expected to be supported indefinitely.)
-
- If you're not sure, don't change the default.
-
-source "arch/tile/gxio/Kconfig"
-
-endmenu # Tilera-specific configuration
-
-menu "Bus options"
-
-config PCI
- bool "PCI support"
- default y
- select PCI_DOMAINS
- select GENERIC_PCI_IOMAP
- select TILE_GXIO_TRIO if TILEGX
- select PCI_MSI if TILEGX
- ---help---
- Enable PCI root complex support, so PCIe endpoint devices can
- be attached to the Tile chip. Many, but not all, PCI devices
- are supported under Tilera's root complex driver.
-
-config PCI_DOMAINS
- bool
-
-config NO_IOMEM
- def_bool !PCI
-
-config NO_IOPORT_MAP
- def_bool !PCI
-
-config TILE_PCI_IO
- bool "PCI I/O space support"
- default n
- depends on PCI
- depends on TILEGX
- ---help---
- Enable PCI I/O space support on TILEGx. Since the PCI I/O space
- is used by few modern PCIe endpoint devices, its support is disabled
- by default to save the TRIO PIO Region resource for other purposes.
-
-source "drivers/pci/Kconfig"
-
-config TILE_USB
- tristate "Tilera USB host adapter support"
- default y
- depends on USB
- depends on TILEGX
- select TILE_GXIO_USB_HOST
- ---help---
- Provides USB host adapter support for the built-in EHCI and OHCI
- interfaces on TILE-Gx chips.
-
-endmenu
-
-menu "Executable file formats"
-
-source "fs/Kconfig.binfmt"
-
-endmenu
-
-source "net/Kconfig"
-
-source "drivers/Kconfig"
-
-source "fs/Kconfig"
-
-source "arch/tile/Kconfig.debug"
-
-source "security/Kconfig"
-
-source "crypto/Kconfig"
-
-source "lib/Kconfig"
-
-source "arch/tile/kvm/Kconfig"
diff --git a/arch/tile/Kconfig.debug b/arch/tile/Kconfig.debug
deleted file mode 100644
index 9f665d1a805f..000000000000
--- a/arch/tile/Kconfig.debug
+++ /dev/null
@@ -1,26 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0
-menu "Kernel hacking"
-
-source "lib/Kconfig.debug"
-
-config EARLY_PRINTK
- bool "Early printk" if EXPERT && DEBUG_KERNEL
- default y
- help
- Write kernel log output directly via the hypervisor console.
-
- This is useful for kernel debugging when your machine crashes very
- early before the console code is initialized. For normal operation
- it is not recommended because it looks ugly and doesn't cooperate
- with klogd/syslogd. You should normally N here,
- unless you want to debug such a crash.
-
-config TILE_HVGLUE_TRACE
- bool "Provide wrapper functions for hypervisor ABI calls"
- default n
- help
- Provide wrapper functions for the hypervisor ABI calls
- defined in arch/tile/kernel/hvglue.S. This allows tracing
- mechanisms, etc., to have visibility into those calls.
-
-endmenu
diff --git a/arch/tile/Makefile b/arch/tile/Makefile
deleted file mode 100644
index 8fa0befba32b..000000000000
--- a/arch/tile/Makefile
+++ /dev/null
@@ -1,77 +0,0 @@
-#
-# This file is subject to the terms and conditions of the GNU General Public
-# License. See the file "COPYING" in the main directory of this archive
-# for more details.
-#
-# This file is included by the global makefile so that you can add your own
-# architecture-specific flags and dependencies. Remember to do have actions
-# for "archclean" and "archdep" for cleaning up and making dependencies for
-# this architecture
-
-# If building with TILERA_ROOT set (i.e. using the Tilera Multicore
-# Development Environment) we can set CROSS_COMPILE based on that.
-# If we're not cross-compiling, make sure we're on the right architecture.
-# Only bother to test for a few common targets, to avoid useless errors.
-ifeq ($(CROSS_COMPILE),)
- ifdef TILERA_ROOT
- CROSS_COMPILE := $(TILERA_ROOT)/bin/tile-
- else
- goals := $(if $(MAKECMDGOALS), $(MAKECMDGOALS), all)
- ifneq ($(strip $(filter vmlinux modules all,$(goals))),)
- HOST_ARCH := $(shell uname -m)
- ifneq ($(HOST_ARCH),$(ARCH))
-$(error Set TILERA_ROOT or CROSS_COMPILE when building $(ARCH) on $(HOST_ARCH))
- endif
- endif
- endif
-endif
-
-# The tile compiler may emit .eh_frame information for backtracing.
-# In kernel modules, this causes load failures due to unsupported relocations.
-KBUILD_CFLAGS += -fno-asynchronous-unwind-tables
-
-LIBGCC_PATH := \
- $(shell $(CC) $(KBUILD_CFLAGS) $(KCFLAGS) -print-libgcc-file-name)
-
-# Provide the path to use for "make defconfig".
-# We default to the newer TILE-Gx architecture if only "tile" is given.
-ifeq ($(ARCH),tile)
- KBUILD_DEFCONFIG := tilegx_defconfig
-else
- KBUILD_DEFCONFIG := $(ARCH)_defconfig
-endif
-
-# Used as a file extension when useful, e.g. head_$(BITS).o
-# Not needed for (e.g.) "$(CC) -m32" since the compiler automatically
-# uses the right default anyway.
-export BITS
-ifeq ($(CONFIG_TILEGX),y)
-BITS := 64
-else
-BITS := 32
-endif
-
-CHECKFLAGS += -m$(BITS)
-
-head-y := arch/tile/kernel/head_$(BITS).o
-
-libs-y += arch/tile/lib/
-libs-y += $(LIBGCC_PATH)
-
-# See arch/tile/Kbuild for content of core part of the kernel
-core-y += arch/tile/
-
-core-$(CONFIG_TILE_GXIO) += arch/tile/gxio/
-
-ifdef TILERA_ROOT
-INSTALL_PATH ?= $(TILERA_ROOT)/tile/boot
-endif
-
-install:
- install -D -m 755 vmlinux $(INSTALL_PATH)/vmlinux-$(KERNELRELEASE)
- install -D -m 644 .config $(INSTALL_PATH)/config-$(KERNELRELEASE)
- install -D -m 644 System.map $(INSTALL_PATH)/System.map-$(KERNELRELEASE)
-
-define archhelp
- echo ' install - install kernel into $(INSTALL_PATH)'
-endef
diff --git a/arch/tile/configs/tilegx_defconfig b/arch/tile/configs/tilegx_defconfig
deleted file mode 100644
index 357a4c271ad4..000000000000
--- a/arch/tile/configs/tilegx_defconfig
+++ /dev/null
@@ -1,411 +0,0 @@
-CONFIG_TILEGX=y
-CONFIG_SYSVIPC=y
-CONFIG_POSIX_MQUEUE=y
-CONFIG_FHANDLE=y
-CONFIG_AUDIT=y
-CONFIG_NO_HZ=y
-CONFIG_BSD_PROCESS_ACCT=y
-CONFIG_BSD_PROCESS_ACCT_V3=y
-CONFIG_TASKSTATS=y
-CONFIG_TASK_DELAY_ACCT=y
-CONFIG_TASK_XACCT=y
-CONFIG_TASK_IO_ACCOUNTING=y
-CONFIG_LOG_BUF_SHIFT=19
-CONFIG_CGROUPS=y
-CONFIG_CGROUP_DEBUG=y
-CONFIG_CGROUP_DEVICE=y
-CONFIG_CPUSETS=y
-CONFIG_CGROUP_CPUACCT=y
-CONFIG_CGROUP_SCHED=y
-CONFIG_RT_GROUP_SCHED=y
-CONFIG_BLK_CGROUP=y
-CONFIG_NAMESPACES=y
-CONFIG_RELAY=y
-CONFIG_BLK_DEV_INITRD=y
-CONFIG_RD_XZ=y
-CONFIG_SYSCTL_SYSCALL=y
-CONFIG_EMBEDDED=y
-# CONFIG_COMPAT_BRK is not set
-CONFIG_PROFILING=y
-CONFIG_KPROBES=y
-CONFIG_MODULES=y
-CONFIG_MODULE_FORCE_LOAD=y
-CONFIG_MODULE_UNLOAD=y
-CONFIG_BLK_DEV_INTEGRITY=y
-CONFIG_PARTITION_ADVANCED=y
-CONFIG_OSF_PARTITION=y
-CONFIG_AMIGA_PARTITION=y
-CONFIG_MAC_PARTITION=y
-CONFIG_BSD_DISKLABEL=y
-CONFIG_MINIX_SUBPARTITION=y
-CONFIG_SOLARIS_X86_PARTITION=y
-CONFIG_UNIXWARE_DISKLABEL=y
-CONFIG_SGI_PARTITION=y
-CONFIG_SUN_PARTITION=y
-CONFIG_KARMA_PARTITION=y
-CONFIG_CFQ_GROUP_IOSCHED=y
-CONFIG_NR_CPUS=100
-CONFIG_HZ_100=y
-# CONFIG_COMPACTION is not set
-CONFIG_PREEMPT_VOLUNTARY=y
-CONFIG_TILE_PCI_IO=y
-CONFIG_PCI_DEBUG=y
-# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
-CONFIG_BINFMT_MISC=y
-CONFIG_NET=y
-CONFIG_PACKET=y
-CONFIG_UNIX=y
-CONFIG_XFRM_USER=y
-CONFIG_XFRM_SUB_POLICY=y
-CONFIG_XFRM_STATISTICS=y
-CONFIG_NET_KEY=m
-CONFIG_NET_KEY_MIGRATE=y
-CONFIG_INET=y
-CONFIG_IP_MULTICAST=y
-CONFIG_IP_ADVANCED_ROUTER=y
-CONFIG_IP_MULTIPLE_TABLES=y
-CONFIG_IP_ROUTE_MULTIPATH=y
-CONFIG_IP_ROUTE_VERBOSE=y
-CONFIG_NET_IPIP=m
-CONFIG_IP_MROUTE=y
-CONFIG_IP_PIMSM_V1=y
-CONFIG_IP_PIMSM_V2=y
-CONFIG_SYN_COOKIES=y
-CONFIG_INET_AH=m
-CONFIG_INET_ESP=m
-CONFIG_INET_IPCOMP=m
-CONFIG_INET_XFRM_MODE_TRANSPORT=m
-CONFIG_INET_XFRM_MODE_TUNNEL=m
-CONFIG_INET_XFRM_MODE_BEET=m
-CONFIG_INET_DIAG=m
-CONFIG_TCP_CONG_ADVANCED=y
-CONFIG_TCP_CONG_HSTCP=m
-CONFIG_TCP_CONG_HYBLA=m
-CONFIG_TCP_CONG_SCALABLE=m
-CONFIG_TCP_CONG_LP=m
-CONFIG_TCP_CONG_VENO=m
-CONFIG_TCP_CONG_YEAH=m
-CONFIG_TCP_CONG_ILLINOIS=m
-CONFIG_TCP_MD5SIG=y
-CONFIG_IPV6=y
-CONFIG_IPV6_ROUTER_PREF=y
-CONFIG_IPV6_ROUTE_INFO=y
-CONFIG_IPV6_OPTIMISTIC_DAD=y
-CONFIG_INET6_AH=m
-CONFIG_INET6_ESP=m
-CONFIG_INET6_IPCOMP=m
-CONFIG_IPV6_MIP6=m
-CONFIG_INET6_XFRM_MODE_TRANSPORT=m
-CONFIG_INET6_XFRM_MODE_TUNNEL=m
-CONFIG_INET6_XFRM_MODE_BEET=m
-CONFIG_INET6_XFRM_MODE_ROUTEOPTIMIZATION=m
-CONFIG_IPV6_SIT=m
-CONFIG_IPV6_TUNNEL=m
-CONFIG_IPV6_MULTIPLE_TABLES=y
-CONFIG_IPV6_MROUTE=y
-CONFIG_IPV6_PIMSM_V2=y
-CONFIG_NETLABEL=y
-CONFIG_RDS=m
-CONFIG_RDS_TCP=m
-CONFIG_BRIDGE=m
-CONFIG_VLAN_8021Q=m
-CONFIG_VLAN_8021Q_GVRP=y
-CONFIG_PHONET=m
-CONFIG_NET_SCHED=y
-CONFIG_NET_SCH_CBQ=m
-CONFIG_NET_SCH_HTB=m
-CONFIG_NET_SCH_HFSC=m
-CONFIG_NET_SCH_PRIO=m
-CONFIG_NET_SCH_MULTIQ=m
-CONFIG_NET_SCH_RED=m
-CONFIG_NET_SCH_SFQ=m
-CONFIG_NET_SCH_TEQL=m
-CONFIG_NET_SCH_TBF=m
-CONFIG_NET_SCH_GRED=m
-CONFIG_NET_SCH_DSMARK=m
-CONFIG_NET_SCH_NETEM=m
-CONFIG_NET_SCH_DRR=m
-CONFIG_NET_SCH_INGRESS=m
-CONFIG_NET_CLS_BASIC=m
-CONFIG_NET_CLS_TCINDEX=m
-CONFIG_NET_CLS_ROUTE4=m
-CONFIG_NET_CLS_FW=m
-CONFIG_NET_CLS_U32=m
-CONFIG_CLS_U32_PERF=y
-CONFIG_CLS_U32_MARK=y
-CONFIG_NET_CLS_RSVP=m
-CONFIG_NET_CLS_RSVP6=m
-CONFIG_NET_CLS_FLOW=m
-CONFIG_NET_CLS_CGROUP=y
-CONFIG_NET_EMATCH=y
-CONFIG_NET_EMATCH_CMP=m
-CONFIG_NET_EMATCH_NBYTE=m
-CONFIG_NET_EMATCH_U32=m
-CONFIG_NET_EMATCH_META=m
-CONFIG_NET_EMATCH_TEXT=m
-CONFIG_NET_CLS_ACT=y
-CONFIG_NET_ACT_POLICE=m
-CONFIG_NET_ACT_GACT=m
-CONFIG_GACT_PROB=y
-CONFIG_NET_ACT_MIRRED=m
-CONFIG_NET_ACT_NAT=m
-CONFIG_NET_ACT_PEDIT=m
-CONFIG_NET_ACT_SIMP=m
-CONFIG_NET_ACT_SKBEDIT=m
-CONFIG_NET_CLS_IND=y
-CONFIG_DCB=y
-CONFIG_DNS_RESOLVER=y
-# CONFIG_WIRELESS is not set
-CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
-CONFIG_DEVTMPFS=y
-CONFIG_DEVTMPFS_MOUNT=y
-CONFIG_CONNECTOR=y
-CONFIG_BLK_DEV_LOOP=y
-CONFIG_BLK_DEV_CRYPTOLOOP=m
-CONFIG_BLK_DEV_SX8=m
-CONFIG_BLK_DEV_RAM=y
-CONFIG_BLK_DEV_RAM_SIZE=16384
-CONFIG_ATA_OVER_ETH=m
-CONFIG_RAID_ATTRS=m
-CONFIG_BLK_DEV_SD=y
-CONFIG_SCSI_CONSTANTS=y
-CONFIG_SCSI_LOGGING=y
-CONFIG_SCSI_SAS_ATA=y
-CONFIG_ISCSI_TCP=m
-CONFIG_SCSI_MVSAS=y
-# CONFIG_SCSI_MVSAS_DEBUG is not set
-CONFIG_SCSI_MVSAS_TASKLET=y
-CONFIG_ATA=y
-CONFIG_SATA_AHCI=y
-CONFIG_SATA_SIL24=y
-# CONFIG_ATA_SFF is not set
-CONFIG_MD=y
-CONFIG_BLK_DEV_MD=y
-CONFIG_MD_LINEAR=m
-CONFIG_MD_RAID0=m
-CONFIG_MD_RAID1=m
-CONFIG_MD_RAID10=m
-CONFIG_MD_RAID456=m
-CONFIG_MD_FAULTY=m
-CONFIG_BLK_DEV_DM=m
-CONFIG_DM_DEBUG=y
-CONFIG_DM_CRYPT=m
-CONFIG_DM_SNAPSHOT=m
-CONFIG_DM_MIRROR=m
-CONFIG_DM_LOG_USERSPACE=m
-CONFIG_DM_ZERO=m
-CONFIG_DM_MULTIPATH=m
-CONFIG_DM_MULTIPATH_QL=m
-CONFIG_DM_MULTIPATH_ST=m
-CONFIG_DM_DELAY=m
-CONFIG_DM_UEVENT=y
-CONFIG_TARGET_CORE=m
-CONFIG_TCM_IBLOCK=m
-CONFIG_TCM_FILEIO=m
-CONFIG_TCM_PSCSI=m
-CONFIG_LOOPBACK_TARGET=m
-CONFIG_ISCSI_TARGET=m
-CONFIG_FUSION=y
-CONFIG_FUSION_SAS=y
-CONFIG_NETDEVICES=y
-CONFIG_BONDING=m
-CONFIG_DUMMY=m
-CONFIG_IFB=m
-CONFIG_MACVLAN=m
-CONFIG_MACVTAP=m
-CONFIG_NETCONSOLE=m
-CONFIG_NETCONSOLE_DYNAMIC=y
-CONFIG_TUN=y
-CONFIG_VETH=m
-CONFIG_NET_DSA_MV88E6060=y
-CONFIG_NET_DSA_MV88E6XXX=y
-CONFIG_SKY2=y
-CONFIG_PTP_1588_CLOCK_TILEGX=y
-# CONFIG_WLAN is not set
-# CONFIG_INPUT_MOUSEDEV is not set
-# CONFIG_INPUT_KEYBOARD is not set
-# CONFIG_INPUT_MOUSE is not set
-# CONFIG_SERIO is not set
-# CONFIG_VT is not set
-# CONFIG_LEGACY_PTYS is not set
-CONFIG_SERIAL_TILEGX=y
-CONFIG_HW_RANDOM=y
-CONFIG_HW_RANDOM_TIMERIOMEM=m
-CONFIG_I2C=y
-CONFIG_I2C_CHARDEV=y
-# CONFIG_HWMON is not set
-CONFIG_WATCHDOG=y
-CONFIG_WATCHDOG_NOWAYOUT=y
-# CONFIG_VGA_ARB is not set
-CONFIG_DRM=m
-CONFIG_DRM_TDFX=m
-CONFIG_DRM_R128=m
-CONFIG_DRM_MGA=m
-CONFIG_DRM_VIA=m
-CONFIG_DRM_SAVAGE=m
-CONFIG_USB=y
-CONFIG_USB_EHCI_HCD=y
-CONFIG_USB_OHCI_HCD=y
-CONFIG_USB_STORAGE=y
-CONFIG_EDAC=y
-CONFIG_RTC_CLASS=y
-CONFIG_RTC_DRV_TILE=y
-CONFIG_EXT2_FS=y
-CONFIG_EXT2_FS_XATTR=y
-CONFIG_EXT2_FS_POSIX_ACL=y
-CONFIG_EXT2_FS_SECURITY=y
-CONFIG_EXT2_FS_XIP=y
-CONFIG_EXT3_FS=y
-CONFIG_EXT3_FS_POSIX_ACL=y
-CONFIG_EXT3_FS_SECURITY=y
-CONFIG_EXT4_FS=y
-CONFIG_EXT4_FS_POSIX_ACL=y
-CONFIG_EXT4_FS_SECURITY=y
-CONFIG_XFS_FS=y
-CONFIG_XFS_QUOTA=y
-CONFIG_XFS_POSIX_ACL=y
-CONFIG_GFS2_FS=m
-CONFIG_GFS2_FS_LOCKING_DLM=y
-CONFIG_BTRFS_FS=m
-CONFIG_BTRFS_FS_POSIX_ACL=y
-CONFIG_QUOTA=y
-CONFIG_QUOTA_NETLINK_INTERFACE=y
-# CONFIG_PRINT_QUOTA_WARNING is not set
-CONFIG_QFMT_V2=y
-CONFIG_AUTOFS4_FS=m
-CONFIG_FUSE_FS=y
-CONFIG_CUSE=m
-CONFIG_FSCACHE=m
-CONFIG_FSCACHE_STATS=y
-CONFIG_CACHEFILES=m
-CONFIG_ISO9660_FS=m
-CONFIG_JOLIET=y
-CONFIG_ZISOFS=y
-CONFIG_UDF_FS=m
-CONFIG_MSDOS_FS=m
-CONFIG_VFAT_FS=m
-CONFIG_FAT_DEFAULT_IOCHARSET="ascii"
-CONFIG_PROC_KCORE=y
-CONFIG_TMPFS=y
-CONFIG_TMPFS_POSIX_ACL=y
-CONFIG_HUGETLBFS=y
-CONFIG_ECRYPT_FS=m
-CONFIG_CRAMFS=m
-CONFIG_SQUASHFS=m
-CONFIG_NFS_FS=m
-CONFIG_NFS_V3_ACL=y
-CONFIG_NFS_V4=m
-CONFIG_NFS_V4_1=y
-CONFIG_NFS_FSCACHE=y
-CONFIG_NFSD=m
-CONFIG_NFSD_V3_ACL=y
-CONFIG_NFSD_V4=y
-CONFIG_CIFS=m
-CONFIG_CIFS_STATS=y
-CONFIG_CIFS_WEAK_PW_HASH=y
-CONFIG_CIFS_UPCALL=y
-CONFIG_CIFS_XATTR=y
-CONFIG_CIFS_POSIX=y
-CONFIG_CIFS_DFS_UPCALL=y
-CONFIG_CIFS_FSCACHE=y
-CONFIG_NLS_DEFAULT="utf8"
-CONFIG_NLS_CODEPAGE_437=y
-CONFIG_NLS_CODEPAGE_737=m
-CONFIG_NLS_CODEPAGE_775=m
-CONFIG_NLS_CODEPAGE_850=m
-CONFIG_NLS_CODEPAGE_852=m
-CONFIG_NLS_CODEPAGE_855=m
-CONFIG_NLS_CODEPAGE_857=m
-CONFIG_NLS_CODEPAGE_860=m
-CONFIG_NLS_CODEPAGE_861=m
-CONFIG_NLS_CODEPAGE_862=m
-CONFIG_NLS_CODEPAGE_863=m
-CONFIG_NLS_CODEPAGE_864=m
-CONFIG_NLS_CODEPAGE_865=m
-CONFIG_NLS_CODEPAGE_866=m
-CONFIG_NLS_CODEPAGE_869=m
-CONFIG_NLS_CODEPAGE_936=m
-CONFIG_NLS_CODEPAGE_950=m
-CONFIG_NLS_CODEPAGE_932=m
-CONFIG_NLS_CODEPAGE_949=m
-CONFIG_NLS_CODEPAGE_874=m
-CONFIG_NLS_ISO8859_8=m
-CONFIG_NLS_CODEPAGE_1250=m
-CONFIG_NLS_CODEPAGE_1251=m
-CONFIG_NLS_ASCII=y
-CONFIG_NLS_ISO8859_1=m
-CONFIG_NLS_ISO8859_2=m
-CONFIG_NLS_ISO8859_3=m
-CONFIG_NLS_ISO8859_4=m
-CONFIG_NLS_ISO8859_5=m
-CONFIG_NLS_ISO8859_6=m
-CONFIG_NLS_ISO8859_7=m
-CONFIG_NLS_ISO8859_9=m
-CONFIG_NLS_ISO8859_13=m
-CONFIG_NLS_ISO8859_14=m
-CONFIG_NLS_ISO8859_15=m
-CONFIG_NLS_KOI8_R=m
-CONFIG_NLS_KOI8_U=m
-CONFIG_NLS_UTF8=m
-CONFIG_DLM=m
-CONFIG_DLM_DEBUG=y
-CONFIG_DYNAMIC_DEBUG=y
-CONFIG_DEBUG_INFO=y
-CONFIG_DEBUG_INFO_REDUCED=y
-# CONFIG_ENABLE_WARN_DEPRECATED is not set
-CONFIG_STRIP_ASM_SYMS=y
-CONFIG_DEBUG_FS=y
-CONFIG_HEADERS_CHECK=y
-# CONFIG_FRAME_POINTER is not set
-CONFIG_DEBUG_FORCE_WEAK_PER_CPU=y
-CONFIG_DEBUG_VM=y
-CONFIG_DEBUG_MEMORY_INIT=y
-CONFIG_DEBUG_STACKOVERFLOW=y
-CONFIG_LOCKUP_DETECTOR=y
-CONFIG_SCHEDSTATS=y
-CONFIG_TIMER_STATS=y
-CONFIG_DEBUG_LIST=y
-CONFIG_DEBUG_CREDENTIALS=y
-CONFIG_RCU_CPU_STALL_TIMEOUT=60
-CONFIG_ASYNC_RAID6_TEST=m
-CONFIG_KGDB=y
-CONFIG_SECURITY=y
-CONFIG_SECURITYFS=y
-CONFIG_SECURITY_NETWORK=y
-CONFIG_SECURITY_NETWORK_XFRM=y
-CONFIG_SECURITY_SELINUX=y
-CONFIG_SECURITY_SELINUX_BOOTPARAM=y
-CONFIG_SECURITY_SELINUX_DISABLE=y
-CONFIG_CRYPTO_PCRYPT=m
-CONFIG_CRYPTO_CRYPTD=m
-CONFIG_CRYPTO_TEST=m
-CONFIG_CRYPTO_CCM=m
-CONFIG_CRYPTO_GCM=m
-CONFIG_CRYPTO_CTS=m
-CONFIG_CRYPTO_LRW=m
-CONFIG_CRYPTO_PCBC=m
-CONFIG_CRYPTO_XTS=m
-CONFIG_CRYPTO_HMAC=y
-CONFIG_CRYPTO_XCBC=m
-CONFIG_CRYPTO_VMAC=m
-CONFIG_CRYPTO_MICHAEL_MIC=m
-CONFIG_CRYPTO_RMD128=m
-CONFIG_CRYPTO_RMD160=m
-CONFIG_CRYPTO_RMD256=m
-CONFIG_CRYPTO_RMD320=m
-CONFIG_CRYPTO_SHA1=y
-CONFIG_CRYPTO_SHA512=m
-CONFIG_CRYPTO_TGR192=m
-CONFIG_CRYPTO_WP512=m
-CONFIG_CRYPTO_ANUBIS=m
-CONFIG_CRYPTO_BLOWFISH=m
-CONFIG_CRYPTO_CAMELLIA=m
-CONFIG_CRYPTO_CAST5=m
-CONFIG_CRYPTO_CAST6=m
-CONFIG_CRYPTO_FCRYPT=m
-CONFIG_CRYPTO_KHAZAD=m
-CONFIG_CRYPTO_SEED=m
-CONFIG_CRYPTO_SERPENT=m
-CONFIG_CRYPTO_TEA=m
-CONFIG_CRYPTO_TWOFISH=m
-CONFIG_CRYPTO_LZO=m
diff --git a/arch/tile/configs/tilepro_defconfig b/arch/tile/configs/tilepro_defconfig
deleted file mode 100644
index da2858755fa1..000000000000
--- a/arch/tile/configs/tilepro_defconfig
+++ /dev/null
@@ -1,524 +0,0 @@
-CONFIG_SYSVIPC=y
-CONFIG_POSIX_MQUEUE=y
-CONFIG_AUDIT=y
-CONFIG_NO_HZ=y
-CONFIG_HIGH_RES_TIMERS=y
-CONFIG_BSD_PROCESS_ACCT=y
-CONFIG_BSD_PROCESS_ACCT_V3=y
-CONFIG_TASKSTATS=y
-CONFIG_TASK_DELAY_ACCT=y
-CONFIG_TASK_XACCT=y
-CONFIG_TASK_IO_ACCOUNTING=y
-CONFIG_LOG_BUF_SHIFT=19
-CONFIG_CGROUPS=y
-CONFIG_CGROUP_DEBUG=y
-CONFIG_CGROUP_DEVICE=y
-CONFIG_CPUSETS=y
-CONFIG_CGROUP_CPUACCT=y
-CONFIG_CGROUP_SCHED=y
-CONFIG_RT_GROUP_SCHED=y
-CONFIG_BLK_CGROUP=y
-CONFIG_NAMESPACES=y
-CONFIG_RELAY=y
-CONFIG_BLK_DEV_INITRD=y
-CONFIG_RD_XZ=y
-CONFIG_SYSCTL_SYSCALL=y
-CONFIG_EMBEDDED=y
-# CONFIG_COMPAT_BRK is not set
-CONFIG_PROFILING=y
-CONFIG_MODULES=y
-CONFIG_MODULE_FORCE_LOAD=y
-CONFIG_MODULE_UNLOAD=y
-CONFIG_BLK_DEV_INTEGRITY=y
-CONFIG_PARTITION_ADVANCED=y
-CONFIG_OSF_PARTITION=y
-CONFIG_AMIGA_PARTITION=y
-CONFIG_MAC_PARTITION=y
-CONFIG_BSD_DISKLABEL=y
-CONFIG_MINIX_SUBPARTITION=y
-CONFIG_SOLARIS_X86_PARTITION=y
-CONFIG_UNIXWARE_DISKLABEL=y
-CONFIG_SGI_PARTITION=y
-CONFIG_SUN_PARTITION=y
-CONFIG_KARMA_PARTITION=y
-CONFIG_CFQ_GROUP_IOSCHED=y
-CONFIG_HZ_100=y
-# CONFIG_COMPACTION is not set
-CONFIG_PREEMPT_VOLUNTARY=y
-CONFIG_PCI_DEBUG=y
-# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
-CONFIG_BINFMT_MISC=y
-CONFIG_NET=y
-CONFIG_PACKET=y
-CONFIG_UNIX=y
-CONFIG_XFRM_USER=y
-CONFIG_XFRM_SUB_POLICY=y
-CONFIG_XFRM_STATISTICS=y
-CONFIG_NET_KEY=m
-CONFIG_NET_KEY_MIGRATE=y
-CONFIG_INET=y
-CONFIG_IP_MULTICAST=y
-CONFIG_IP_ADVANCED_ROUTER=y
-CONFIG_IP_MULTIPLE_TABLES=y
-CONFIG_IP_ROUTE_MULTIPATH=y
-CONFIG_IP_ROUTE_VERBOSE=y
-CONFIG_NET_IPIP=m
-CONFIG_IP_MROUTE=y
-CONFIG_IP_PIMSM_V1=y
-CONFIG_IP_PIMSM_V2=y
-CONFIG_SYN_COOKIES=y
-CONFIG_INET_AH=m
-CONFIG_INET_ESP=m
-CONFIG_INET_IPCOMP=m
-CONFIG_INET_XFRM_MODE_TRANSPORT=m
-CONFIG_INET_XFRM_MODE_TUNNEL=m
-CONFIG_INET_XFRM_MODE_BEET=m
-CONFIG_INET_DIAG=m
-CONFIG_TCP_CONG_ADVANCED=y
-CONFIG_TCP_CONG_HSTCP=m
-CONFIG_TCP_CONG_HYBLA=m
-CONFIG_TCP_CONG_SCALABLE=m
-CONFIG_TCP_CONG_LP=m
-CONFIG_TCP_CONG_VENO=m
-CONFIG_TCP_CONG_YEAH=m
-CONFIG_TCP_CONG_ILLINOIS=m
-CONFIG_TCP_MD5SIG=y
-CONFIG_IPV6=y
-CONFIG_IPV6_ROUTER_PREF=y
-CONFIG_IPV6_ROUTE_INFO=y
-CONFIG_IPV6_OPTIMISTIC_DAD=y
-CONFIG_INET6_AH=m
-CONFIG_INET6_ESP=m
-CONFIG_INET6_IPCOMP=m
-CONFIG_IPV6_MIP6=m
-CONFIG_INET6_XFRM_MODE_TRANSPORT=m
-CONFIG_INET6_XFRM_MODE_TUNNEL=m
-CONFIG_INET6_XFRM_MODE_BEET=m
-CONFIG_INET6_XFRM_MODE_ROUTEOPTIMIZATION=m
-CONFIG_IPV6_SIT=m
-CONFIG_IPV6_TUNNEL=m
-CONFIG_IPV6_MULTIPLE_TABLES=y
-CONFIG_IPV6_MROUTE=y
-CONFIG_IPV6_PIMSM_V2=y
-CONFIG_NETLABEL=y
-CONFIG_NETFILTER=y
-CONFIG_NF_CONNTRACK=m
-CONFIG_NF_CONNTRACK_SECMARK=y
-CONFIG_NF_CONNTRACK_ZONES=y
-CONFIG_NF_CONNTRACK_EVENTS=y
-CONFIG_NF_CT_PROTO_DCCP=m
-CONFIG_NF_CT_PROTO_UDPLITE=m
-CONFIG_NF_CONNTRACK_AMANDA=m
-CONFIG_NF_CONNTRACK_FTP=m
-CONFIG_NF_CONNTRACK_H323=m
-CONFIG_NF_CONNTRACK_IRC=m
-CONFIG_NF_CONNTRACK_NETBIOS_NS=m
-CONFIG_NF_CONNTRACK_PPTP=m
-CONFIG_NF_CONNTRACK_SANE=m
-CONFIG_NF_CONNTRACK_SIP=m
-CONFIG_NF_CONNTRACK_TFTP=m
-CONFIG_NETFILTER_XT_TARGET_CLASSIFY=m
-CONFIG_NETFILTER_XT_TARGET_CONNMARK=m
-CONFIG_NETFILTER_XT_TARGET_CONNSECMARK=m
-CONFIG_NETFILTER_XT_TARGET_DSCP=m
-CONFIG_NETFILTER_XT_TARGET_IDLETIMER=m
-CONFIG_NETFILTER_XT_TARGET_MARK=m
-CONFIG_NETFILTER_XT_TARGET_NFLOG=m
-CONFIG_NETFILTER_XT_TARGET_NFQUEUE=m
-CONFIG_NETFILTER_XT_TARGET_NOTRACK=m
-CONFIG_NETFILTER_XT_TARGET_TEE=m
-CONFIG_NETFILTER_XT_TARGET_TPROXY=m
-CONFIG_NETFILTER_XT_TARGET_TRACE=m
-CONFIG_NETFILTER_XT_TARGET_SECMARK=m
-CONFIG_NETFILTER_XT_TARGET_TCPMSS=m
-CONFIG_NETFILTER_XT_TARGET_TCPOPTSTRIP=m
-CONFIG_NETFILTER_XT_MATCH_CLUSTER=m
-CONFIG_NETFILTER_XT_MATCH_COMMENT=m
-CONFIG_NETFILTER_XT_MATCH_CONNBYTES=m
-CONFIG_NETFILTER_XT_MATCH_CONNLIMIT=m
-CONFIG_NETFILTER_XT_MATCH_CONNMARK=m
-CONFIG_NETFILTER_XT_MATCH_CONNTRACK=m
-CONFIG_NETFILTER_XT_MATCH_DCCP=m
-CONFIG_NETFILTER_XT_MATCH_DSCP=m
-CONFIG_NETFILTER_XT_MATCH_ESP=m
-CONFIG_NETFILTER_XT_MATCH_HASHLIMIT=m
-CONFIG_NETFILTER_XT_MATCH_HELPER=m
-CONFIG_NETFILTER_XT_MATCH_IPRANGE=m
-CONFIG_NETFILTER_XT_MATCH_IPVS=m
-CONFIG_NETFILTER_XT_MATCH_LENGTH=m
-CONFIG_NETFILTER_XT_MATCH_LIMIT=m
-CONFIG_NETFILTER_XT_MATCH_MAC=m
-CONFIG_NETFILTER_XT_MATCH_MARK=m
-CONFIG_NETFILTER_XT_MATCH_MULTIPORT=m
-CONFIG_NETFILTER_XT_MATCH_OSF=m
-CONFIG_NETFILTER_XT_MATCH_OWNER=m
-CONFIG_NETFILTER_XT_MATCH_POLICY=m
-CONFIG_NETFILTER_XT_MATCH_PHYSDEV=m
-CONFIG_NETFILTER_XT_MATCH_PKTTYPE=m
-CONFIG_NETFILTER_XT_MATCH_QUOTA=m
-CONFIG_NETFILTER_XT_MATCH_RATEEST=m
-CONFIG_NETFILTER_XT_MATCH_REALM=m
-CONFIG_NETFILTER_XT_MATCH_RECENT=m
-CONFIG_NETFILTER_XT_MATCH_SOCKET=m
-CONFIG_NETFILTER_XT_MATCH_STATE=m
-CONFIG_NETFILTER_XT_MATCH_STATISTIC=m
-CONFIG_NETFILTER_XT_MATCH_STRING=m
-CONFIG_NETFILTER_XT_MATCH_TCPMSS=m
-CONFIG_NETFILTER_XT_MATCH_TIME=m
-CONFIG_NETFILTER_XT_MATCH_U32=m
-CONFIG_IP_VS=m
-CONFIG_IP_VS_IPV6=y
-CONFIG_IP_VS_PROTO_TCP=y
-CONFIG_IP_VS_PROTO_UDP=y
-CONFIG_IP_VS_PROTO_ESP=y
-CONFIG_IP_VS_PROTO_AH=y
-CONFIG_IP_VS_PROTO_SCTP=y
-CONFIG_IP_VS_RR=m
-CONFIG_IP_VS_WRR=m
-CONFIG_IP_VS_LC=m
-CONFIG_IP_VS_WLC=m
-CONFIG_IP_VS_LBLC=m
-CONFIG_IP_VS_LBLCR=m
-CONFIG_IP_VS_SED=m
-CONFIG_IP_VS_NQ=m
-CONFIG_NF_CONNTRACK_IPV4=m
-# CONFIG_NF_CONNTRACK_PROC_COMPAT is not set
-CONFIG_IP_NF_IPTABLES=y
-CONFIG_IP_NF_MATCH_AH=m
-CONFIG_IP_NF_MATCH_ECN=m
-CONFIG_IP_NF_MATCH_TTL=m
-CONFIG_IP_NF_FILTER=y
-CONFIG_IP_NF_TARGET_REJECT=y
-CONFIG_IP_NF_MANGLE=m
-CONFIG_IP_NF_TARGET_ECN=m
-CONFIG_IP_NF_TARGET_TTL=m
-CONFIG_IP_NF_RAW=m
-CONFIG_IP_NF_SECURITY=m
-CONFIG_IP_NF_ARPTABLES=m
-CONFIG_IP_NF_ARPFILTER=m
-CONFIG_IP_NF_ARP_MANGLE=m
-CONFIG_NF_CONNTRACK_IPV6=m
-CONFIG_IP6_NF_MATCH_AH=m
-CONFIG_IP6_NF_MATCH_EUI64=m
-CONFIG_IP6_NF_MATCH_FRAG=m
-CONFIG_IP6_NF_MATCH_OPTS=m
-CONFIG_IP6_NF_MATCH_HL=m
-CONFIG_IP6_NF_MATCH_IPV6HEADER=m
-CONFIG_IP6_NF_MATCH_MH=m
-CONFIG_IP6_NF_MATCH_RT=m
-CONFIG_IP6_NF_TARGET_HL=m
-CONFIG_IP6_NF_FILTER=m
-CONFIG_IP6_NF_TARGET_REJECT=m
-CONFIG_IP6_NF_MANGLE=m
-CONFIG_IP6_NF_RAW=m
-CONFIG_IP6_NF_SECURITY=m
-CONFIG_BRIDGE_NF_EBTABLES=m
-CONFIG_BRIDGE_EBT_BROUTE=m
-CONFIG_BRIDGE_EBT_T_FILTER=m
-CONFIG_BRIDGE_EBT_T_NAT=m
-CONFIG_BRIDGE_EBT_802_3=m
-CONFIG_BRIDGE_EBT_AMONG=m
-CONFIG_BRIDGE_EBT_ARP=m
-CONFIG_BRIDGE_EBT_IP=m
-CONFIG_BRIDGE_EBT_IP6=m
-CONFIG_BRIDGE_EBT_LIMIT=m
-CONFIG_BRIDGE_EBT_MARK=m
-CONFIG_BRIDGE_EBT_PKTTYPE=m
-CONFIG_BRIDGE_EBT_STP=m
-CONFIG_BRIDGE_EBT_VLAN=m
-CONFIG_BRIDGE_EBT_ARPREPLY=m
-CONFIG_BRIDGE_EBT_DNAT=m
-CONFIG_BRIDGE_EBT_MARK_T=m
-CONFIG_BRIDGE_EBT_REDIRECT=m
-CONFIG_BRIDGE_EBT_SNAT=m
-CONFIG_BRIDGE_EBT_LOG=m
-CONFIG_BRIDGE_EBT_ULOG=m
-CONFIG_BRIDGE_EBT_NFLOG=m
-CONFIG_RDS=m
-CONFIG_RDS_TCP=m
-CONFIG_BRIDGE=m
-CONFIG_VLAN_8021Q=m
-CONFIG_VLAN_8021Q_GVRP=y
-CONFIG_PHONET=m
-CONFIG_NET_SCHED=y
-CONFIG_NET_SCH_CBQ=m
-CONFIG_NET_SCH_HTB=m
-CONFIG_NET_SCH_HFSC=m
-CONFIG_NET_SCH_PRIO=m
-CONFIG_NET_SCH_MULTIQ=m
-CONFIG_NET_SCH_RED=m
-CONFIG_NET_SCH_SFQ=m
-CONFIG_NET_SCH_TEQL=m
-CONFIG_NET_SCH_TBF=m
-CONFIG_NET_SCH_GRED=m
-CONFIG_NET_SCH_DSMARK=m
-CONFIG_NET_SCH_NETEM=m
-CONFIG_NET_SCH_DRR=m
-CONFIG_NET_SCH_INGRESS=m
-CONFIG_NET_CLS_BASIC=m
-CONFIG_NET_CLS_TCINDEX=m
-CONFIG_NET_CLS_ROUTE4=m
-CONFIG_NET_CLS_FW=m
-CONFIG_NET_CLS_U32=m
-CONFIG_CLS_U32_PERF=y
-CONFIG_CLS_U32_MARK=y
-CONFIG_NET_CLS_RSVP=m
-CONFIG_NET_CLS_RSVP6=m
-CONFIG_NET_CLS_FLOW=m
-CONFIG_NET_CLS_CGROUP=y
-CONFIG_NET_EMATCH=y
-CONFIG_NET_EMATCH_CMP=m
-CONFIG_NET_EMATCH_NBYTE=m
-CONFIG_NET_EMATCH_U32=m
-CONFIG_NET_EMATCH_META=m
-CONFIG_NET_EMATCH_TEXT=m
-CONFIG_NET_CLS_ACT=y
-CONFIG_NET_ACT_POLICE=m
-CONFIG_NET_ACT_GACT=m
-CONFIG_GACT_PROB=y
-CONFIG_NET_ACT_MIRRED=m
-CONFIG_NET_ACT_IPT=m
-CONFIG_NET_ACT_NAT=m
-CONFIG_NET_ACT_PEDIT=m
-CONFIG_NET_ACT_SIMP=m
-CONFIG_NET_ACT_SKBEDIT=m
-CONFIG_NET_CLS_IND=y
-CONFIG_DCB=y
-CONFIG_DNS_RESOLVER=y
-# CONFIG_WIRELESS is not set
-CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
-CONFIG_DEVTMPFS=y
-CONFIG_DEVTMPFS_MOUNT=y
-CONFIG_CONNECTOR=y
-CONFIG_BLK_DEV_LOOP=y
-CONFIG_BLK_DEV_CRYPTOLOOP=m
-CONFIG_BLK_DEV_SX8=m
-CONFIG_BLK_DEV_RAM=y
-CONFIG_BLK_DEV_RAM_SIZE=16384
-CONFIG_ATA_OVER_ETH=m
-CONFIG_RAID_ATTRS=m
-CONFIG_BLK_DEV_SD=y
-CONFIG_SCSI_CONSTANTS=y
-CONFIG_SCSI_LOGGING=y
-CONFIG_ATA=y
-CONFIG_SATA_SIL24=y
-# CONFIG_ATA_SFF is not set
-CONFIG_MD=y
-CONFIG_BLK_DEV_MD=y
-CONFIG_MD_LINEAR=m
-CONFIG_MD_RAID0=m
-CONFIG_MD_RAID1=m
-CONFIG_MD_RAID10=m
-CONFIG_MD_RAID456=m
-CONFIG_MD_FAULTY=m
-CONFIG_BLK_DEV_DM=m
-CONFIG_DM_DEBUG=y
-CONFIG_DM_CRYPT=m
-CONFIG_DM_SNAPSHOT=m
-CONFIG_DM_MIRROR=m
-CONFIG_DM_LOG_USERSPACE=m
-CONFIG_DM_ZERO=m
-CONFIG_DM_MULTIPATH=m
-CONFIG_DM_MULTIPATH_QL=m
-CONFIG_DM_MULTIPATH_ST=m
-CONFIG_DM_DELAY=m
-CONFIG_DM_UEVENT=y
-CONFIG_FUSION=y
-CONFIG_FUSION_SAS=y
-CONFIG_NETDEVICES=y
-CONFIG_BONDING=m
-CONFIG_DUMMY=m
-CONFIG_IFB=m
-CONFIG_MACVLAN=m
-CONFIG_MACVTAP=m
-CONFIG_NETCONSOLE=m
-CONFIG_NETCONSOLE_DYNAMIC=y
-CONFIG_TUN=y
-CONFIG_VETH=m
-CONFIG_NET_DSA_MV88E6060=y
-CONFIG_NET_DSA_MV88E6XXX=y
-# CONFIG_NET_VENDOR_3COM is not set
-CONFIG_E1000E=y
-# CONFIG_WLAN is not set
-# CONFIG_INPUT_MOUSEDEV is not set
-# CONFIG_INPUT_KEYBOARD is not set
-# CONFIG_INPUT_MOUSE is not set
-# CONFIG_SERIO is not set
-# CONFIG_VT is not set
-# CONFIG_LEGACY_PTYS is not set
-CONFIG_HW_RANDOM=y
-CONFIG_HW_RANDOM_TIMERIOMEM=m
-CONFIG_I2C=y
-CONFIG_I2C_CHARDEV=y
-# CONFIG_HWMON is not set
-CONFIG_WATCHDOG=y
-CONFIG_WATCHDOG_NOWAYOUT=y
-# CONFIG_VGA_ARB is not set
-# CONFIG_USB_SUPPORT is not set
-CONFIG_EDAC=y
-CONFIG_RTC_CLASS=y
-CONFIG_RTC_DRV_TILE=y
-CONFIG_EXT2_FS=y
-CONFIG_EXT2_FS_XATTR=y
-CONFIG_EXT2_FS_POSIX_ACL=y
-CONFIG_EXT2_FS_SECURITY=y
-CONFIG_EXT2_FS_XIP=y
-CONFIG_EXT3_FS=y
-CONFIG_EXT3_FS_POSIX_ACL=y
-CONFIG_EXT3_FS_SECURITY=y
-CONFIG_EXT4_FS=y
-CONFIG_EXT4_FS_POSIX_ACL=y
-CONFIG_EXT4_FS_SECURITY=y
-CONFIG_XFS_FS=y
-CONFIG_XFS_QUOTA=y
-CONFIG_XFS_POSIX_ACL=y
-CONFIG_GFS2_FS=m
-CONFIG_GFS2_FS_LOCKING_DLM=y
-CONFIG_BTRFS_FS=m
-CONFIG_BTRFS_FS_POSIX_ACL=y
-CONFIG_QUOTA=y
-CONFIG_QUOTA_NETLINK_INTERFACE=y
-# CONFIG_PRINT_QUOTA_WARNING is not set
-CONFIG_QFMT_V2=y
-CONFIG_AUTOFS4_FS=m
-CONFIG_FUSE_FS=y
-CONFIG_CUSE=m
-CONFIG_FSCACHE=m
-CONFIG_FSCACHE_STATS=y
-CONFIG_CACHEFILES=m
-CONFIG_ISO9660_FS=m
-CONFIG_JOLIET=y
-CONFIG_ZISOFS=y
-CONFIG_UDF_FS=m
-CONFIG_MSDOS_FS=m
-CONFIG_VFAT_FS=m
-CONFIG_FAT_DEFAULT_IOCHARSET="ascii"
-CONFIG_PROC_KCORE=y
-CONFIG_TMPFS=y
-CONFIG_TMPFS_POSIX_ACL=y
-CONFIG_HUGETLBFS=y
-CONFIG_CONFIGFS_FS=m
-CONFIG_ECRYPT_FS=m
-CONFIG_CRAMFS=m
-CONFIG_SQUASHFS=m
-CONFIG_NFS_FS=m
-CONFIG_NFS_V3_ACL=y
-CONFIG_NFS_V4=m
-CONFIG_NFS_V4_1=y
-CONFIG_NFS_FSCACHE=y
-CONFIG_NFSD=m
-CONFIG_NFSD_V3_ACL=y
-CONFIG_NFSD_V4=y
-CONFIG_CIFS=m
-CONFIG_CIFS_STATS=y
-CONFIG_CIFS_WEAK_PW_HASH=y
-CONFIG_CIFS_UPCALL=y
-CONFIG_CIFS_XATTR=y
-CONFIG_CIFS_POSIX=y
-CONFIG_CIFS_DFS_UPCALL=y
-CONFIG_CIFS_FSCACHE=y
-CONFIG_NLS=y
-CONFIG_NLS_DEFAULT="utf8"
-CONFIG_NLS_CODEPAGE_437=y
-CONFIG_NLS_CODEPAGE_737=m
-CONFIG_NLS_CODEPAGE_775=m
-CONFIG_NLS_CODEPAGE_850=m
-CONFIG_NLS_CODEPAGE_852=m
-CONFIG_NLS_CODEPAGE_855=m
-CONFIG_NLS_CODEPAGE_857=m
-CONFIG_NLS_CODEPAGE_860=m
-CONFIG_NLS_CODEPAGE_861=m
-CONFIG_NLS_CODEPAGE_862=m
-CONFIG_NLS_CODEPAGE_863=m
-CONFIG_NLS_CODEPAGE_864=m
-CONFIG_NLS_CODEPAGE_865=m
-CONFIG_NLS_CODEPAGE_866=m
-CONFIG_NLS_CODEPAGE_869=m
-CONFIG_NLS_CODEPAGE_936=m
-CONFIG_NLS_CODEPAGE_950=m
-CONFIG_NLS_CODEPAGE_932=m
-CONFIG_NLS_CODEPAGE_949=m
-CONFIG_NLS_CODEPAGE_874=m
-CONFIG_NLS_ISO8859_8=m
-CONFIG_NLS_CODEPAGE_1250=m
-CONFIG_NLS_CODEPAGE_1251=m
-CONFIG_NLS_ASCII=y
-CONFIG_NLS_ISO8859_1=m
-CONFIG_NLS_ISO8859_2=m
-CONFIG_NLS_ISO8859_3=m
-CONFIG_NLS_ISO8859_4=m
-CONFIG_NLS_ISO8859_5=m
-CONFIG_NLS_ISO8859_6=m
-CONFIG_NLS_ISO8859_7=m
-CONFIG_NLS_ISO8859_9=m
-CONFIG_NLS_ISO8859_13=m
-CONFIG_NLS_ISO8859_14=m
-CONFIG_NLS_ISO8859_15=m
-CONFIG_NLS_KOI8_R=m
-CONFIG_NLS_KOI8_U=m
-CONFIG_NLS_UTF8=m
-CONFIG_DLM=m
-CONFIG_DLM_DEBUG=y
-CONFIG_DYNAMIC_DEBUG=y
-CONFIG_DEBUG_INFO=y
-CONFIG_DEBUG_INFO_REDUCED=y
-# CONFIG_ENABLE_WARN_DEPRECATED is not set
-CONFIG_FRAME_WARN=2048
-CONFIG_STRIP_ASM_SYMS=y
-CONFIG_DEBUG_FS=y
-CONFIG_HEADERS_CHECK=y
-# CONFIG_FRAME_POINTER is not set
-CONFIG_DEBUG_FORCE_WEAK_PER_CPU=y
-CONFIG_MAGIC_SYSRQ=y
-CONFIG_DEBUG_VM=y
-CONFIG_DEBUG_MEMORY_INIT=y
-CONFIG_DEBUG_STACKOVERFLOW=y
-CONFIG_LOCKUP_DETECTOR=y
-CONFIG_SCHEDSTATS=y
-CONFIG_TIMER_STATS=y
-CONFIG_DEBUG_LIST=y
-CONFIG_DEBUG_CREDENTIALS=y
-CONFIG_RCU_CPU_STALL_TIMEOUT=60
-CONFIG_ASYNC_RAID6_TEST=m
-CONFIG_SECURITY=y
-CONFIG_SECURITYFS=y
-CONFIG_SECURITY_NETWORK=y
-CONFIG_SECURITY_NETWORK_XFRM=y
-CONFIG_SECURITY_SELINUX=y
-CONFIG_SECURITY_SELINUX_BOOTPARAM=y
-CONFIG_SECURITY_SELINUX_DISABLE=y
-CONFIG_CRYPTO_PCRYPT=m
-CONFIG_CRYPTO_CRYPTD=m
-CONFIG_CRYPTO_TEST=m
-CONFIG_CRYPTO_CCM=m
-CONFIG_CRYPTO_GCM=m
-CONFIG_CRYPTO_CTS=m
-CONFIG_CRYPTO_LRW=m
-CONFIG_CRYPTO_PCBC=m
-CONFIG_CRYPTO_XTS=m
-CONFIG_CRYPTO_HMAC=y
-CONFIG_CRYPTO_XCBC=m
-CONFIG_CRYPTO_VMAC=m
-CONFIG_CRYPTO_MICHAEL_MIC=m
-CONFIG_CRYPTO_RMD128=m
-CONFIG_CRYPTO_RMD160=m
-CONFIG_CRYPTO_RMD256=m
-CONFIG_CRYPTO_RMD320=m
-CONFIG_CRYPTO_SHA1=y
-CONFIG_CRYPTO_SHA512=m
-CONFIG_CRYPTO_TGR192=m
-CONFIG_CRYPTO_WP512=m
-CONFIG_CRYPTO_ANUBIS=m
-CONFIG_CRYPTO_BLOWFISH=m
-CONFIG_CRYPTO_CAMELLIA=m
-CONFIG_CRYPTO_CAST5=m
-CONFIG_CRYPTO_CAST6=m
-CONFIG_CRYPTO_FCRYPT=m
-CONFIG_CRYPTO_KHAZAD=m
-CONFIG_CRYPTO_SEED=m
-CONFIG_CRYPTO_SERPENT=m
-CONFIG_CRYPTO_TEA=m
-CONFIG_CRYPTO_TWOFISH=m
-CONFIG_CRYPTO_LZO=m
-CONFIG_CRC_CCITT=m
-CONFIG_CRC7=m
diff --git a/arch/tile/gxio/Kconfig b/arch/tile/gxio/Kconfig
deleted file mode 100644
index 903c8646bdd7..000000000000
--- a/arch/tile/gxio/Kconfig
+++ /dev/null
@@ -1,34 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0
-# Support direct access to TILE-Gx hardware from user space, via the
-# gxio library, or from kernel space, via kernel IORPC support.
-config TILE_GXIO
- bool
- depends on TILEGX
-
-# Support direct access to the common I/O DMA facility within the
-# TILE-Gx mPIPE and Trio hardware from kernel space.
-config TILE_GXIO_DMA
- bool
- select TILE_GXIO
-
-# Support direct access to the TILE-Gx mPIPE hardware from kernel space.
-config TILE_GXIO_MPIPE
- bool
- select TILE_GXIO
- select TILE_GXIO_DMA
-
-# Support direct access to the TILE-Gx TRIO hardware from kernel space.
-config TILE_GXIO_TRIO
- bool
- select TILE_GXIO
- select TILE_GXIO_DMA
-
-# Support direct access to the TILE-Gx USB hardware from kernel space.
-config TILE_GXIO_USB_HOST
- bool
- select TILE_GXIO
-
-# Support direct access to the TILE-Gx UART hardware from kernel space.
-config TILE_GXIO_UART
- bool
- select TILE_GXIO
diff --git a/arch/tile/gxio/Makefile b/arch/tile/gxio/Makefile
deleted file mode 100644
index fcc903c4cf87..000000000000
--- a/arch/tile/gxio/Makefile
+++ /dev/null
@@ -1,11 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0
-#
-# Makefile for the Tile-Gx device access support.
-#
-
-obj-$(CONFIG_TILE_GXIO) += iorpc_globals.o kiorpc.o
-obj-$(CONFIG_TILE_GXIO_DMA) += dma_queue.o
-obj-$(CONFIG_TILE_GXIO_MPIPE) += mpipe.o iorpc_mpipe.o iorpc_mpipe_info.o
-obj-$(CONFIG_TILE_GXIO_TRIO) += trio.o iorpc_trio.o
-obj-$(CONFIG_TILE_GXIO_UART) += uart.o iorpc_uart.o
-obj-$(CONFIG_TILE_GXIO_USB_HOST) += usb_host.o iorpc_usb_host.o
diff --git a/arch/tile/gxio/dma_queue.c b/arch/tile/gxio/dma_queue.c
deleted file mode 100644
index b7ba577d82ca..000000000000
--- a/arch/tile/gxio/dma_queue.c
+++ /dev/null
@@ -1,176 +0,0 @@
-/*
- * Copyright 2012 Tilera Corporation. All Rights Reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation, version 2.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
- * NON INFRINGEMENT. See the GNU General Public License for
- * more details.
- */
-
-#include <linux/io.h>
-#include <linux/atomic.h>
-#include <linux/module.h>
-#include <gxio/dma_queue.h>
-
-/* Wait for a memory read to complete. */
-#define wait_for_value(val) \
- __asm__ __volatile__("move %0, %0" :: "r"(val))
-
-/* The index is in the low 16. */
-#define DMA_QUEUE_INDEX_MASK ((1 << 16) - 1)
-
-/*
- * The hardware descriptor-ring type.
- * This matches the types used by mpipe (MPIPE_EDMA_POST_REGION_VAL_t)
- * and trio (TRIO_PUSH_DMA_REGION_VAL_t or TRIO_PULL_DMA_REGION_VAL_t).
- * See those types for more documentation on the individual fields.
- */
-typedef union {
- struct {
-#ifndef __BIG_ENDIAN__
- uint64_t ring_idx:16;
- uint64_t count:16;
- uint64_t gen:1;
- uint64_t __reserved:31;
-#else
- uint64_t __reserved:31;
- uint64_t gen:1;
- uint64_t count:16;
- uint64_t ring_idx:16;
-#endif
- };
- uint64_t word;
-} __gxio_ring_t;
-
-void __gxio_dma_queue_init(__gxio_dma_queue_t *dma_queue,
- void *post_region_addr, unsigned int num_entries)
-{
- /*
- * Limit 65536 entry rings to 65535 credits because we only have a
- * 16 bit completion counter.
- */
- int64_t credits = (num_entries < 65536) ? num_entries : 65535;
-
- memset(dma_queue, 0, sizeof(*dma_queue));
-
- dma_queue->post_region_addr = post_region_addr;
- dma_queue->hw_complete_count = 0;
- dma_queue->credits_and_next_index = credits << DMA_QUEUE_CREDIT_SHIFT;
-}
-
-EXPORT_SYMBOL_GPL(__gxio_dma_queue_init);
-
-void __gxio_dma_queue_update_credits(__gxio_dma_queue_t *dma_queue)
-{
- __gxio_ring_t val;
- uint64_t count;
- uint64_t delta;
- uint64_t new_count;
-
- /*
- * Read the 64-bit completion count without touching the cache, so
- * we later avoid having to evict any sharers of this cache line
- * when we update it below.
- */
- uint64_t orig_hw_complete_count =
- cmpxchg(&dma_queue->hw_complete_count,
- -1, -1);
-
- /* Make sure the load completes before we access the hardware. */
- wait_for_value(orig_hw_complete_count);
-
- /* Read the 16-bit count of how many packets it has completed. */
- val.word = __gxio_mmio_read(dma_queue->post_region_addr);
- count = val.count;
-
- /*
- * Calculate the number of completions since we last updated the
- * 64-bit counter. It's safe to ignore the high bits because the
- * maximum credit value is 65535.
- */
- delta = (count - orig_hw_complete_count) & 0xffff;
- if (delta == 0)
- return;
-
- /*
- * Try to write back the count, advanced by delta. If we race with
- * another thread, this might fail, in which case we return
- * immediately on the assumption that some credits are (or at least
- * were) available.
- */
- new_count = orig_hw_complete_count + delta;
- if (cmpxchg(&dma_queue->hw_complete_count,
- orig_hw_complete_count,
- new_count) != orig_hw_complete_count)
- return;
-
- /*
- * We succeeded in advancing the completion count; add back the
- * corresponding number of egress credits.
- */
- __insn_fetchadd(&dma_queue->credits_and_next_index,
- (delta << DMA_QUEUE_CREDIT_SHIFT));
-}
-
-EXPORT_SYMBOL_GPL(__gxio_dma_queue_update_credits);
-
-/*
- * A separate 'blocked' method for put() so that backtraces and
- * profiles will clearly indicate that we're wasting time spinning on
- * egress availability rather than actually posting commands.
- */
-int64_t __gxio_dma_queue_wait_for_credits(__gxio_dma_queue_t *dma_queue,
- int64_t modifier)
-{
- int backoff = 16;
- int64_t old;
-
- do {
- int i;
- /* Back off to avoid spamming memory networks. */
- for (i = backoff; i > 0; i--)
- __insn_mfspr(SPR_PASS);
-
- /* Check credits again. */
- __gxio_dma_queue_update_credits(dma_queue);
- old = __insn_fetchaddgez(&dma_queue->credits_and_next_index,
- modifier);
-
- /* Calculate bounded exponential backoff for next iteration. */
- if (backoff < 256)
- backoff *= 2;
- } while (old + modifier < 0);
-
- return old;
-}
-
-EXPORT_SYMBOL_GPL(__gxio_dma_queue_wait_for_credits);
-
-int64_t __gxio_dma_queue_reserve_aux(__gxio_dma_queue_t *dma_queue,
- unsigned int num, int wait)
-{
- return __gxio_dma_queue_reserve(dma_queue, num, wait != 0, true);
-}
-
-EXPORT_SYMBOL_GPL(__gxio_dma_queue_reserve_aux);
-
-int __gxio_dma_queue_is_complete(__gxio_dma_queue_t *dma_queue,
- int64_t completion_slot, int update)
-{
- if (update) {
- if (READ_ONCE(dma_queue->hw_complete_count) >
- completion_slot)
- return 1;
-
- __gxio_dma_queue_update_credits(dma_queue);
- }
-
- return READ_ONCE(dma_queue->hw_complete_count) > completion_slot;
-}
-
-EXPORT_SYMBOL_GPL(__gxio_dma_queue_is_complete);
diff --git a/arch/tile/gxio/iorpc_globals.c b/arch/tile/gxio/iorpc_globals.c
deleted file mode 100644
index e178e90805a2..000000000000
--- a/arch/tile/gxio/iorpc_globals.c
+++ /dev/null
@@ -1,89 +0,0 @@
-/*
- * Copyright 2012 Tilera Corporation. All Rights Reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation, version 2.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
- * NON INFRINGEMENT. See the GNU General Public License for
- * more details.
- */
-
-/* This file is machine-generated; DO NOT EDIT! */
-#include "gxio/iorpc_globals.h"
-
-struct arm_pollfd_param {
- union iorpc_pollfd pollfd;
-};
-
-int __iorpc_arm_pollfd(int fd, int pollfd_cookie)
-{
- struct arm_pollfd_param temp;
- struct arm_pollfd_param *params = &temp;
-
- params->pollfd.kernel.cookie = pollfd_cookie;
-
- return hv_dev_pwrite(fd, 0, (HV_VirtAddr) params, sizeof(*params),
- IORPC_OP_ARM_POLLFD);
-}
-
-EXPORT_SYMBOL(__iorpc_arm_pollfd);
-
-struct close_pollfd_param {
- union iorpc_pollfd pollfd;
-};
-
-int __iorpc_close_pollfd(int fd, int pollfd_cookie)
-{
- struct close_pollfd_param temp;
- struct close_pollfd_param *params = &temp;
-
- params->pollfd.kernel.cookie = pollfd_cookie;
-
- return hv_dev_pwrite(fd, 0, (HV_VirtAddr) params, sizeof(*params),
- IORPC_OP_CLOSE_POLLFD);
-}
-
-EXPORT_SYMBOL(__iorpc_close_pollfd);
-
-struct get_mmio_base_param {
- HV_PTE base;
-};
-
-int __iorpc_get_mmio_base(int fd, HV_PTE *base)
-{
- int __result;
- struct get_mmio_base_param temp;
- struct get_mmio_base_param *params = &temp;
-
- __result =
- hv_dev_pread(fd, 0, (HV_VirtAddr) params, sizeof(*params),
- IORPC_OP_GET_MMIO_BASE);
- *base = params->base;
-
- return __result;
-}
-
-EXPORT_SYMBOL(__iorpc_get_mmio_base);
-
-struct check_mmio_offset_param {
- unsigned long offset;
- unsigned long size;
-};
-
-int __iorpc_check_mmio_offset(int fd, unsigned long offset, unsigned long size)
-{
- struct check_mmio_offset_param temp;
- struct check_mmio_offset_param *params = &temp;
-
- params->offset = offset;
- params->size = size;
-
- return hv_dev_pwrite(fd, 0, (HV_VirtAddr) params, sizeof(*params),
- IORPC_OP_CHECK_MMIO_OFFSET);
-}
-
-EXPORT_SYMBOL(__iorpc_check_mmio_offset);
diff --git a/arch/tile/gxio/iorpc_mpipe.c b/arch/tile/gxio/iorpc_mpipe.c
deleted file mode 100644
index e19325c4c431..000000000000
--- a/arch/tile/gxio/iorpc_mpipe.c
+++ /dev/null
@@ -1,593 +0,0 @@
-/*
- * Copyright 2012 Tilera Corporation. All Rights Reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation, version 2.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
- * NON INFRINGEMENT. See the GNU General Public License for
- * more details.
- */
-
-/* This file is machine-generated; DO NOT EDIT! */
-#include "gxio/iorpc_mpipe.h"
-
-struct alloc_buffer_stacks_param {
- unsigned int count;
- unsigned int first;
- unsigned int flags;
-};
-
-int gxio_mpipe_alloc_buffer_stacks(gxio_mpipe_context_t *context,
- unsigned int count, unsigned int first,
- unsigned int flags)
-{
- struct alloc_buffer_stacks_param temp;
- struct alloc_buffer_stacks_param *params = &temp;
-
- params->count = count;
- params->first = first;
- params->flags = flags;
-
- return hv_dev_pwrite(context->fd, 0, (HV_VirtAddr) params,
- sizeof(*params),
- GXIO_MPIPE_OP_ALLOC_BUFFER_STACKS);
-}
-
-EXPORT_SYMBOL(gxio_mpipe_alloc_buffer_stacks);
-
-struct init_buffer_stack_aux_param {
- union iorpc_mem_buffer buffer;
- unsigned int stack;
- unsigned int buffer_size_enum;
-};
-
-int gxio_mpipe_init_buffer_stack_aux(gxio_mpipe_context_t *context,
- void *mem_va, size_t mem_size,
- unsigned int mem_flags, unsigned int stack,
- unsigned int buffer_size_enum)
-{
- int __result;
- unsigned long long __cpa;
- pte_t __pte;
- struct init_buffer_stack_aux_param temp;
- struct init_buffer_stack_aux_param *params = &temp;
-
- __result = va_to_cpa_and_pte(mem_va, &__cpa, &__pte);
- if (__result != 0)
- return __result;
- params->buffer.kernel.cpa = __cpa;
- params->buffer.kernel.size = mem_size;
- params->buffer.kernel.pte = __pte;
- params->buffer.kernel.flags = mem_flags;
- params->stack = stack;
- params->buffer_size_enum = buffer_size_enum;
-
- return hv_dev_pwrite(context->fd, 0, (HV_VirtAddr) params,
- sizeof(*params),
- GXIO_MPIPE_OP_INIT_BUFFER_STACK_AUX);
-}
-
-EXPORT_SYMBOL(gxio_mpipe_init_buffer_stack_aux);
-
-
-struct alloc_notif_rings_param {
- unsigned int count;
- unsigned int first;
- unsigned int flags;
-};
-
-int gxio_mpipe_alloc_notif_rings(gxio_mpipe_context_t *context,
- unsigned int count, unsigned int first,
- unsigned int flags)
-{
- struct alloc_notif_rings_param temp;
- struct alloc_notif_rings_param *params = &temp;
-
- params->count = count;
- params->first = first;
- params->flags = flags;
-
- return hv_dev_pwrite(context->fd, 0, (HV_VirtAddr) params,
- sizeof(*params), GXIO_MPIPE_OP_ALLOC_NOTIF_RINGS);
-}
-
-EXPORT_SYMBOL(gxio_mpipe_alloc_notif_rings);
-
-struct init_notif_ring_aux_param {
- union iorpc_mem_buffer buffer;
- unsigned int ring;
-};
-
-int gxio_mpipe_init_notif_ring_aux(gxio_mpipe_context_t *context, void *mem_va,
- size_t mem_size, unsigned int mem_flags,
- unsigned int ring)
-{
- int __result;
- unsigned long long __cpa;
- pte_t __pte;
- struct init_notif_ring_aux_param temp;
- struct init_notif_ring_aux_param *params = &temp;
-
- __result = va_to_cpa_and_pte(mem_va, &__cpa, &__pte);
- if (__result != 0)
- return __result;
- params->buffer.kernel.cpa = __cpa;
- params->buffer.kernel.size = mem_size;
- params->buffer.kernel.pte = __pte;
- params->buffer.kernel.flags = mem_flags;
- params->ring = ring;
-
- return hv_dev_pwrite(context->fd, 0, (HV_VirtAddr) params,
- sizeof(*params),
- GXIO_MPIPE_OP_INIT_NOTIF_RING_AUX);
-}
-
-EXPORT_SYMBOL(gxio_mpipe_init_notif_ring_aux);
-
-struct request_notif_ring_interrupt_param {
- union iorpc_interrupt interrupt;
- unsigned int ring;
-};
-
-int gxio_mpipe_request_notif_ring_interrupt(gxio_mpipe_context_t *context,
- int inter_x, int inter_y,
- int inter_ipi, int inter_event,
- unsigned int ring)
-{
- struct request_notif_ring_interrupt_param temp;
- struct request_notif_ring_interrupt_param *params = &temp;
-
- params->interrupt.kernel.x = inter_x;
- params->interrupt.kernel.y = inter_y;
- params->interrupt.kernel.ipi = inter_ipi;
- params->interrupt.kernel.event = inter_event;
- params->ring = ring;
-
- return hv_dev_pwrite(context->fd, 0, (HV_VirtAddr) params,
- sizeof(*params),
- GXIO_MPIPE_OP_REQUEST_NOTIF_RING_INTERRUPT);
-}
-
-EXPORT_SYMBOL(gxio_mpipe_request_notif_ring_interrupt);
-
-struct enable_notif_ring_interrupt_param {
- unsigned int ring;
-};
-
-int gxio_mpipe_enable_notif_ring_interrupt(gxio_mpipe_context_t *context,
- unsigned int ring)
-{
- struct enable_notif_ring_interrupt_param temp;
- struct enable_notif_ring_interrupt_param *params = &temp;
-
- params->ring = ring;
-
- return hv_dev_pwrite(context->fd, 0, (HV_VirtAddr) params,
- sizeof(*params),
- GXIO_MPIPE_OP_ENABLE_NOTIF_RING_INTERRUPT);
-}
-
-EXPORT_SYMBOL(gxio_mpipe_enable_notif_ring_interrupt);
-
-struct alloc_notif_groups_param {
- unsigned int count;
- unsigned int first;
- unsigned int flags;
-};
-
-int gxio_mpipe_alloc_notif_groups(gxio_mpipe_context_t *context,
- unsigned int count, unsigned int first,
- unsigned int flags)
-{
- struct alloc_notif_groups_param temp;
- struct alloc_notif_groups_param *params = &temp;
-
- params->count = count;
- params->first = first;
- params->flags = flags;
-
- return hv_dev_pwrite(context->fd, 0, (HV_VirtAddr) params,
- sizeof(*params), GXIO_MPIPE_OP_ALLOC_NOTIF_GROUPS);
-}
-
-EXPORT_SYMBOL(gxio_mpipe_alloc_notif_groups);
-
-struct init_notif_group_param {
- unsigned int group;
- gxio_mpipe_notif_group_bits_t bits;
-};
-
-int gxio_mpipe_init_notif_group(gxio_mpipe_context_t *context,
- unsigned int group,
- gxio_mpipe_notif_group_bits_t bits)
-{
- struct init_notif_group_param temp;
- struct init_notif_group_param *params = &temp;
-
- params->group = group;
- params->bits = bits;
-
- return hv_dev_pwrite(context->fd, 0, (HV_VirtAddr) params,
- sizeof(*params), GXIO_MPIPE_OP_INIT_NOTIF_GROUP);
-}
-
-EXPORT_SYMBOL(gxio_mpipe_init_notif_group);
-
-struct alloc_buckets_param {
- unsigned int count;
- unsigned int first;
- unsigned int flags;
-};
-
-int gxio_mpipe_alloc_buckets(gxio_mpipe_context_t *context, unsigned int count,
- unsigned int first, unsigned int flags)
-{
- struct alloc_buckets_param temp;
- struct alloc_buckets_param *params = &temp;
-
- params->count = count;
- params->first = first;
- params->flags = flags;
-
- return hv_dev_pwrite(context->fd, 0, (HV_VirtAddr) params,
- sizeof(*params), GXIO_MPIPE_OP_ALLOC_BUCKETS);
-}
-
-EXPORT_SYMBOL(gxio_mpipe_alloc_buckets);
-
-struct init_bucket_param {
- unsigned int bucket;
- MPIPE_LBL_INIT_DAT_BSTS_TBL_t bucket_info;
-};
-
-int gxio_mpipe_init_bucket(gxio_mpipe_context_t *context, unsigned int bucket,
- MPIPE_LBL_INIT_DAT_BSTS_TBL_t bucket_info)
-{
- struct init_bucket_param temp;
- struct init_bucket_param *params = &temp;
-
- params->bucket = bucket;
- params->bucket_info = bucket_info;
-
- return hv_dev_pwrite(context->fd, 0, (HV_VirtAddr) params,
- sizeof(*params), GXIO_MPIPE_OP_INIT_BUCKET);
-}
-
-EXPORT_SYMBOL(gxio_mpipe_init_bucket);
-
-struct alloc_edma_rings_param {
- unsigned int count;
- unsigned int first;
- unsigned int flags;
-};
-
-int gxio_mpipe_alloc_edma_rings(gxio_mpipe_context_t *context,
- unsigned int count, unsigned int first,
- unsigned int flags)
-{
- struct alloc_edma_rings_param temp;
- struct alloc_edma_rings_param *params = &temp;
-
- params->count = count;
- params->first = first;
- params->flags = flags;
-
- return hv_dev_pwrite(context->fd, 0, (HV_VirtAddr) params,
- sizeof(*params), GXIO_MPIPE_OP_ALLOC_EDMA_RINGS);
-}
-
-EXPORT_SYMBOL(gxio_mpipe_alloc_edma_rings);
-
-struct init_edma_ring_aux_param {
- union iorpc_mem_buffer buffer;
- unsigned int ring;
- unsigned int channel;
-};
-
-int gxio_mpipe_init_edma_ring_aux(gxio_mpipe_context_t *context, void *mem_va,
- size_t mem_size, unsigned int mem_flags,
- unsigned int ring, unsigned int channel)
-{
- int __result;
- unsigned long long __cpa;
- pte_t __pte;
- struct init_edma_ring_aux_param temp;
- struct init_edma_ring_aux_param *params = &temp;
-
- __result = va_to_cpa_and_pte(mem_va, &__cpa, &__pte);
- if (__result != 0)
- return __result;
- params->buffer.kernel.cpa = __cpa;
- params->buffer.kernel.size = mem_size;
- params->buffer.kernel.pte = __pte;
- params->buffer.kernel.flags = mem_flags;
- params->ring = ring;
- params->channel = channel;
-
- return hv_dev_pwrite(context->fd, 0, (HV_VirtAddr) params,
- sizeof(*params), GXIO_MPIPE_OP_INIT_EDMA_RING_AUX);
-}
-
-EXPORT_SYMBOL(gxio_mpipe_init_edma_ring_aux);
-
-
-int gxio_mpipe_commit_rules(gxio_mpipe_context_t *context, const void *blob,
- size_t blob_size)
-{
- const void *params = blob;
-
- return hv_dev_pwrite(context->fd, 0, (HV_VirtAddr) params, blob_size,
- GXIO_MPIPE_OP_COMMIT_RULES);
-}
-
-EXPORT_SYMBOL(gxio_mpipe_commit_rules);
-
-struct register_client_memory_param {
- unsigned int iotlb;
- HV_PTE pte;
- unsigned int flags;
-};
-
-int gxio_mpipe_register_client_memory(gxio_mpipe_context_t *context,
- unsigned int iotlb, HV_PTE pte,
- unsigned int flags)
-{
- struct register_client_memory_param temp;
- struct register_client_memory_param *params = &temp;
-
- params->iotlb = iotlb;
- params->pte = pte;
- params->flags = flags;
-
- return hv_dev_pwrite(context->fd, 0, (HV_VirtAddr) params,
- sizeof(*params),
- GXIO_MPIPE_OP_REGISTER_CLIENT_MEMORY);
-}
-
-EXPORT_SYMBOL(gxio_mpipe_register_client_memory);
-
-struct link_open_aux_param {
- _gxio_mpipe_link_name_t name;
- unsigned int flags;
-};
-
-int gxio_mpipe_link_open_aux(gxio_mpipe_context_t *context,
- _gxio_mpipe_link_name_t name, unsigned int flags)
-{
- struct link_open_aux_param temp;
- struct link_open_aux_param *params = &temp;
-
- params->name = name;
- params->flags = flags;
-
- return hv_dev_pwrite(context->fd, 0, (HV_VirtAddr) params,
- sizeof(*params), GXIO_MPIPE_OP_LINK_OPEN_AUX);
-}
-
-EXPORT_SYMBOL(gxio_mpipe_link_open_aux);
-
-struct link_close_aux_param {
- int mac;
-};
-
-int gxio_mpipe_link_close_aux(gxio_mpipe_context_t *context, int mac)
-{
- struct link_close_aux_param temp;
- struct link_close_aux_param *params = &temp;
-
- params->mac = mac;
-
- return hv_dev_pwrite(context->fd, 0, (HV_VirtAddr) params,
- sizeof(*params), GXIO_MPIPE_OP_LINK_CLOSE_AUX);
-}
-
-EXPORT_SYMBOL(gxio_mpipe_link_close_aux);
-
-struct link_set_attr_aux_param {
- int mac;
- uint32_t attr;
- int64_t val;
-};
-
-int gxio_mpipe_link_set_attr_aux(gxio_mpipe_context_t *context, int mac,
- uint32_t attr, int64_t val)
-{
- struct link_set_attr_aux_param temp;
- struct link_set_attr_aux_param *params = &temp;
-
- params->mac = mac;
- params->attr = attr;
- params->val = val;
-
- return hv_dev_pwrite(context->fd, 0, (HV_VirtAddr) params,
- sizeof(*params), GXIO_MPIPE_OP_LINK_SET_ATTR_AUX);
-}
-
-EXPORT_SYMBOL(gxio_mpipe_link_set_attr_aux);
-
-struct get_timestamp_aux_param {
- uint64_t sec;
- uint64_t nsec;
- uint64_t cycles;
-};
-
-int gxio_mpipe_get_timestamp_aux(gxio_mpipe_context_t *context, uint64_t *sec,
- uint64_t *nsec, uint64_t *cycles)
-{
- int __result;
- struct get_timestamp_aux_param temp;
- struct get_timestamp_aux_param *params = &temp;
-
- __result =
- hv_dev_pread(context->fd, 0, (HV_VirtAddr) params, sizeof(*params),
- GXIO_MPIPE_OP_GET_TIMESTAMP_AUX);
- *sec = params->sec;
- *nsec = params->nsec;
- *cycles = params->cycles;
-
- return __result;
-}
-
-EXPORT_SYMBOL(gxio_mpipe_get_timestamp_aux);
-
-struct set_timestamp_aux_param {
- uint64_t sec;
- uint64_t nsec;
- uint64_t cycles;
-};
-
-int gxio_mpipe_set_timestamp_aux(gxio_mpipe_context_t *context, uint64_t sec,
- uint64_t nsec, uint64_t cycles)
-{
- struct set_timestamp_aux_param temp;
- struct set_timestamp_aux_param *params = &temp;
-
- params->sec = sec;
- params->nsec = nsec;
- params->cycles = cycles;
-
- return hv_dev_pwrite(context->fd, 0, (HV_VirtAddr) params,
- sizeof(*params), GXIO_MPIPE_OP_SET_TIMESTAMP_AUX);
-}
-
-EXPORT_SYMBOL(gxio_mpipe_set_timestamp_aux);
-
-struct adjust_timestamp_aux_param {
- int64_t nsec;
-};
-
-int gxio_mpipe_adjust_timestamp_aux(gxio_mpipe_context_t *context, int64_t nsec)
-{
- struct adjust_timestamp_aux_param temp;
- struct adjust_timestamp_aux_param *params = &temp;
-
- params->nsec = nsec;
-
- return hv_dev_pwrite(context->fd, 0, (HV_VirtAddr) params,
- sizeof(*params),
- GXIO_MPIPE_OP_ADJUST_TIMESTAMP_AUX);
-}
-
-EXPORT_SYMBOL(gxio_mpipe_adjust_timestamp_aux);
-
-struct config_edma_ring_blks_param {
- unsigned int ering;
- unsigned int max_blks;
- unsigned int min_snf_blks;
- unsigned int db;
-};
-
-int gxio_mpipe_config_edma_ring_blks(gxio_mpipe_context_t *context,
- unsigned int ering, unsigned int max_blks,
- unsigned int min_snf_blks, unsigned int db)
-{
- struct config_edma_ring_blks_param temp;
- struct config_edma_ring_blks_param *params = &temp;
-
- params->ering = ering;
- params->max_blks = max_blks;
- params->min_snf_blks = min_snf_blks;
- params->db = db;
-
- return hv_dev_pwrite(context->fd, 0, (HV_VirtAddr) params,
- sizeof(*params),
- GXIO_MPIPE_OP_CONFIG_EDMA_RING_BLKS);
-}
-
-EXPORT_SYMBOL(gxio_mpipe_config_edma_ring_blks);
-
-struct adjust_timestamp_freq_param {
- int32_t ppb;
-};
-
-int gxio_mpipe_adjust_timestamp_freq(gxio_mpipe_context_t *context, int32_t ppb)
-{
- struct adjust_timestamp_freq_param temp;
- struct adjust_timestamp_freq_param *params = &temp;
-
- params->ppb = ppb;
-
- return hv_dev_pwrite(context->fd, 0, (HV_VirtAddr) params,
- sizeof(*params),
- GXIO_MPIPE_OP_ADJUST_TIMESTAMP_FREQ);
-}
-
-EXPORT_SYMBOL(gxio_mpipe_adjust_timestamp_freq);
-
-struct arm_pollfd_param {
- union iorpc_pollfd pollfd;
-};
-
-int gxio_mpipe_arm_pollfd(gxio_mpipe_context_t *context, int pollfd_cookie)
-{
- struct arm_pollfd_param temp;
- struct arm_pollfd_param *params = &temp;
-
- params->pollfd.kernel.cookie = pollfd_cookie;
-
- return hv_dev_pwrite(context->fd, 0, (HV_VirtAddr) params,
- sizeof(*params), GXIO_MPIPE_OP_ARM_POLLFD);
-}
-
-EXPORT_SYMBOL(gxio_mpipe_arm_pollfd);
-
-struct close_pollfd_param {
- union iorpc_pollfd pollfd;
-};
-
-int gxio_mpipe_close_pollfd(gxio_mpipe_context_t *context, int pollfd_cookie)
-{
- struct close_pollfd_param temp;
- struct close_pollfd_param *params = &temp;
-
- params->pollfd.kernel.cookie = pollfd_cookie;
-
- return hv_dev_pwrite(context->fd, 0, (HV_VirtAddr) params,
- sizeof(*params), GXIO_MPIPE_OP_CLOSE_POLLFD);
-}
-
-EXPORT_SYMBOL(gxio_mpipe_close_pollfd);
-
-struct get_mmio_base_param {
- HV_PTE base;
-};
-
-int gxio_mpipe_get_mmio_base(gxio_mpipe_context_t *context, HV_PTE *base)
-{
- int __result;
- struct get_mmio_base_param temp;
- struct get_mmio_base_param *params = &temp;
-
- __result =
- hv_dev_pread(context->fd, 0, (HV_VirtAddr) params, sizeof(*params),
- GXIO_MPIPE_OP_GET_MMIO_BASE);
- *base = params->base;
-
- return __result;
-}
-
-EXPORT_SYMBOL(gxio_mpipe_get_mmio_base);
-
-struct check_mmio_offset_param {
- unsigned long offset;
- unsigned long size;
-};
-
-int gxio_mpipe_check_mmio_offset(gxio_mpipe_context_t *context,
- unsigned long offset, unsigned long size)
-{
- struct check_mmio_offset_param temp;
- struct check_mmio_offset_param *params = &temp;
-
- params->offset = offset;
- params->size = size;
-
- return hv_dev_pwrite(context->fd, 0, (HV_VirtAddr) params,
- sizeof(*params), GXIO_MPIPE_OP_CHECK_MMIO_OFFSET);
-}
-
-EXPORT_SYMBOL(gxio_mpipe_check_mmio_offset);
diff --git a/arch/tile/gxio/iorpc_mpipe_info.c b/arch/tile/gxio/iorpc_mpipe_info.c
deleted file mode 100644
index 77019c6e9b4a..000000000000
--- a/arch/tile/gxio/iorpc_mpipe_info.c
+++ /dev/null
@@ -1,102 +0,0 @@
-/*
- * Copyright 2012 Tilera Corporation. All Rights Reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation, version 2.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
- * NON INFRINGEMENT. See the GNU General Public License for
- * more details.
- */
-
-/* This file is machine-generated; DO NOT EDIT! */
-#include "gxio/iorpc_mpipe_info.h"
-
-struct instance_aux_param {
- _gxio_mpipe_link_name_t name;
-};
-
-int gxio_mpipe_info_instance_aux(gxio_mpipe_info_context_t *context,
- _gxio_mpipe_link_name_t name)
-{
- struct instance_aux_param temp;
- struct instance_aux_param *params = &temp;
-
- params->name = name;
-
- return hv_dev_pwrite(context->fd, 0, (HV_VirtAddr) params,
- sizeof(*params), GXIO_MPIPE_INFO_OP_INSTANCE_AUX);
-}
-
-EXPORT_SYMBOL(gxio_mpipe_info_instance_aux);
-
-struct enumerate_aux_param {
- _gxio_mpipe_link_name_t name;
- _gxio_mpipe_link_mac_t mac;
-};
-
-int gxio_mpipe_info_enumerate_aux(gxio_mpipe_info_context_t *context,
- unsigned int idx,
- _gxio_mpipe_link_name_t *name,
- _gxio_mpipe_link_mac_t *mac)
-{
- int __result;
- struct enumerate_aux_param temp;
- struct enumerate_aux_param *params = &temp;
-
- __result =
- hv_dev_pread(context->fd, 0, (HV_VirtAddr) params, sizeof(*params),
- (((uint64_t)idx << 32) |
- GXIO_MPIPE_INFO_OP_ENUMERATE_AUX));
- *name = params->name;
- *mac = params->mac;
-
- return __result;
-}
-
-EXPORT_SYMBOL(gxio_mpipe_info_enumerate_aux);
-
-struct get_mmio_base_param {
- HV_PTE base;
-};
-
-int gxio_mpipe_info_get_mmio_base(gxio_mpipe_info_context_t *context,
- HV_PTE *base)
-{
- int __result;
- struct get_mmio_base_param temp;
- struct get_mmio_base_param *params = &temp;
-
- __result =
- hv_dev_pread(context->fd, 0, (HV_VirtAddr) params, sizeof(*params),
- GXIO_MPIPE_INFO_OP_GET_MMIO_BASE);
- *base = params->base;
-
- return __result;
-}
-
-EXPORT_SYMBOL(gxio_mpipe_info_get_mmio_base);
-
-struct check_mmio_offset_param {
- unsigned long offset;
- unsigned long size;
-};
-
-int gxio_mpipe_info_check_mmio_offset(gxio_mpipe_info_context_t *context,
- unsigned long offset, unsigned long size)
-{
- struct check_mmio_offset_param temp;
- struct check_mmio_offset_param *params = &temp;
-
- params->offset = offset;
- params->size = size;
-
- return hv_dev_pwrite(context->fd, 0, (HV_VirtAddr) params,
- sizeof(*params),
- GXIO_MPIPE_INFO_OP_CHECK_MMIO_OFFSET);
-}
-
-EXPORT_SYMBOL(gxio_mpipe_info_check_mmio_offset);
diff --git a/arch/tile/gxio/iorpc_trio.c b/arch/tile/gxio/iorpc_trio.c
deleted file mode 100644
index 1d3cedb9aeb4..000000000000
--- a/arch/tile/gxio/iorpc_trio.c
+++ /dev/null
@@ -1,350 +0,0 @@
-/*
- * Copyright 2012 Tilera Corporation. All Rights Reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation, version 2.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
- * NON INFRINGEMENT. See the GNU General Public License for
- * more details.
- */
-
-/* This file is machine-generated; DO NOT EDIT! */
-#include "gxio/iorpc_trio.h"
-
-struct alloc_asids_param {
- unsigned int count;
- unsigned int first;
- unsigned int flags;
-};
-
-int gxio_trio_alloc_asids(gxio_trio_context_t *context, unsigned int count,
- unsigned int first, unsigned int flags)
-{
- struct alloc_asids_param temp;
- struct alloc_asids_param *params = &temp;
-
- params->count = count;
- params->first = first;
- params->flags = flags;
-
- return hv_dev_pwrite(context->fd, 0, (HV_VirtAddr) params,
- sizeof(*params), GXIO_TRIO_OP_ALLOC_ASIDS);
-}
-
-EXPORT_SYMBOL(gxio_trio_alloc_asids);
-
-
-struct alloc_memory_maps_param {
- unsigned int count;
- unsigned int first;
- unsigned int flags;
-};
-
-int gxio_trio_alloc_memory_maps(gxio_trio_context_t *context,
- unsigned int count, unsigned int first,
- unsigned int flags)
-{
- struct alloc_memory_maps_param temp;
- struct alloc_memory_maps_param *params = &temp;
-
- params->count = count;
- params->first = first;
- params->flags = flags;
-
- return hv_dev_pwrite(context->fd, 0, (HV_VirtAddr) params,
- sizeof(*params), GXIO_TRIO_OP_ALLOC_MEMORY_MAPS);
-}
-
-EXPORT_SYMBOL(gxio_trio_alloc_memory_maps);
-
-struct alloc_scatter_queues_param {
- unsigned int count;
- unsigned int first;
- unsigned int flags;
-};
-
-int gxio_trio_alloc_scatter_queues(gxio_trio_context_t *context,
- unsigned int count, unsigned int first,
- unsigned int flags)
-{
- struct alloc_scatter_queues_param temp;
- struct alloc_scatter_queues_param *params = &temp;
-
- params->count = count;
- params->first = first;
- params->flags = flags;
-
- return hv_dev_pwrite(context->fd, 0, (HV_VirtAddr) params,
- sizeof(*params),
- GXIO_TRIO_OP_ALLOC_SCATTER_QUEUES);
-}
-
-EXPORT_SYMBOL(gxio_trio_alloc_scatter_queues);
-
-struct alloc_pio_regions_param {
- unsigned int count;
- unsigned int first;
- unsigned int flags;
-};
-
-int gxio_trio_alloc_pio_regions(gxio_trio_context_t *context,
- unsigned int count, unsigned int first,
- unsigned int flags)
-{
- struct alloc_pio_regions_param temp;
- struct alloc_pio_regions_param *params = &temp;
-
- params->count = count;
- params->first = first;
- params->flags = flags;
-
- return hv_dev_pwrite(context->fd, 0, (HV_VirtAddr) params,
- sizeof(*params), GXIO_TRIO_OP_ALLOC_PIO_REGIONS);
-}
-
-EXPORT_SYMBOL(gxio_trio_alloc_pio_regions);
-
-struct init_pio_region_aux_param {
- unsigned int pio_region;
- unsigned int mac;
- uint32_t bus_address_hi;
- unsigned int flags;
-};
-
-int gxio_trio_init_pio_region_aux(gxio_trio_context_t *context,
- unsigned int pio_region, unsigned int mac,
- uint32_t bus_address_hi, unsigned int flags)
-{
- struct init_pio_region_aux_param temp;
- struct init_pio_region_aux_param *params = &temp;
-
- params->pio_region = pio_region;
- params->mac = mac;
- params->bus_address_hi = bus_address_hi;
- params->flags = flags;
-
- return hv_dev_pwrite(context->fd, 0, (HV_VirtAddr) params,
- sizeof(*params), GXIO_TRIO_OP_INIT_PIO_REGION_AUX);
-}
-
-EXPORT_SYMBOL(gxio_trio_init_pio_region_aux);
-
-
-struct init_memory_map_mmu_aux_param {
- unsigned int map;
- unsigned long va;
- uint64_t size;
- unsigned int asid;
- unsigned int mac;
- uint64_t bus_address;
- unsigned int node;
- unsigned int order_mode;
-};
-
-int gxio_trio_init_memory_map_mmu_aux(gxio_trio_context_t *context,
- unsigned int map, unsigned long va,
- uint64_t size, unsigned int asid,
- unsigned int mac, uint64_t bus_address,
- unsigned int node,
- unsigned int order_mode)
-{
- struct init_memory_map_mmu_aux_param temp;
- struct init_memory_map_mmu_aux_param *params = &temp;
-
- params->map = map;
- params->va = va;
- params->size = size;
- params->asid = asid;
- params->mac = mac;
- params->bus_address = bus_address;
- params->node = node;
- params->order_mode = order_mode;
-
- return hv_dev_pwrite(context->fd, 0, (HV_VirtAddr) params,
- sizeof(*params),
- GXIO_TRIO_OP_INIT_MEMORY_MAP_MMU_AUX);
-}
-
-EXPORT_SYMBOL(gxio_trio_init_memory_map_mmu_aux);
-
-struct get_port_property_param {
- struct pcie_trio_ports_property trio_ports;
-};
-
-int gxio_trio_get_port_property(gxio_trio_context_t *context,
- struct pcie_trio_ports_property *trio_ports)
-{
- int __result;
- struct get_port_property_param temp;
- struct get_port_property_param *params = &temp;
-
- __result =
- hv_dev_pread(context->fd, 0, (HV_VirtAddr) params, sizeof(*params),
- GXIO_TRIO_OP_GET_PORT_PROPERTY);
- *trio_ports = params->trio_ports;
-
- return __result;
-}
-
-EXPORT_SYMBOL(gxio_trio_get_port_property);
-
-struct config_legacy_intr_param {
- union iorpc_interrupt interrupt;
- unsigned int mac;
- unsigned int intx;
-};
-
-int gxio_trio_config_legacy_intr(gxio_trio_context_t *context, int inter_x,
- int inter_y, int inter_ipi, int inter_event,
- unsigned int mac, unsigned int intx)
-{
- struct config_legacy_intr_param temp;
- struct config_legacy_intr_param *params = &temp;
-
- params->interrupt.kernel.x = inter_x;
- params->interrupt.kernel.y = inter_y;
- params->interrupt.kernel.ipi = inter_ipi;
- params->interrupt.kernel.event = inter_event;
- params->mac = mac;
- params->intx = intx;
-
- return hv_dev_pwrite(context->fd, 0, (HV_VirtAddr) params,
- sizeof(*params), GXIO_TRIO_OP_CONFIG_LEGACY_INTR);
-}
-
-EXPORT_SYMBOL(gxio_trio_config_legacy_intr);
-
-struct config_msi_intr_param {
- union iorpc_interrupt interrupt;
- unsigned int mac;
- unsigned int mem_map;
- uint64_t mem_map_base;
- uint64_t mem_map_limit;
- unsigned int asid;
-};
-
-int gxio_trio_config_msi_intr(gxio_trio_context_t *context, int inter_x,
- int inter_y, int inter_ipi, int inter_event,
- unsigned int mac, unsigned int mem_map,
- uint64_t mem_map_base, uint64_t mem_map_limit,
- unsigned int asid)
-{
- struct config_msi_intr_param temp;
- struct config_msi_intr_param *params = &temp;
-
- params->interrupt.kernel.x = inter_x;
- params->interrupt.kernel.y = inter_y;
- params->interrupt.kernel.ipi = inter_ipi;
- params->interrupt.kernel.event = inter_event;
- params->mac = mac;
- params->mem_map = mem_map;
- params->mem_map_base = mem_map_base;
- params->mem_map_limit = mem_map_limit;
- params->asid = asid;
-
- return hv_dev_pwrite(context->fd, 0, (HV_VirtAddr) params,
- sizeof(*params), GXIO_TRIO_OP_CONFIG_MSI_INTR);
-}
-
-EXPORT_SYMBOL(gxio_trio_config_msi_intr);
-
-
-struct set_mps_mrs_param {
- uint16_t mps;
- uint16_t mrs;
- unsigned int mac;
-};
-
-int gxio_trio_set_mps_mrs(gxio_trio_context_t *context, uint16_t mps,
- uint16_t mrs, unsigned int mac)
-{
- struct set_mps_mrs_param temp;
- struct set_mps_mrs_param *params = &temp;
-
- params->mps = mps;
- params->mrs = mrs;
- params->mac = mac;
-
- return hv_dev_pwrite(context->fd, 0, (HV_VirtAddr) params,
- sizeof(*params), GXIO_TRIO_OP_SET_MPS_MRS);
-}
-
-EXPORT_SYMBOL(gxio_trio_set_mps_mrs);
-
-struct force_rc_link_up_param {
- unsigned int mac;
-};
-
-int gxio_trio_force_rc_link_up(gxio_trio_context_t *context, unsigned int mac)
-{
- struct force_rc_link_up_param temp;
- struct force_rc_link_up_param *params = &temp;
-
- params->mac = mac;
-
- return hv_dev_pwrite(context->fd, 0, (HV_VirtAddr) params,
- sizeof(*params), GXIO_TRIO_OP_FORCE_RC_LINK_UP);
-}
-
-EXPORT_SYMBOL(gxio_trio_force_rc_link_up);
-
-struct force_ep_link_up_param {
- unsigned int mac;
-};
-
-int gxio_trio_force_ep_link_up(gxio_trio_context_t *context, unsigned int mac)
-{
- struct force_ep_link_up_param temp;
- struct force_ep_link_up_param *params = &temp;
-
- params->mac = mac;
-
- return hv_dev_pwrite(context->fd, 0, (HV_VirtAddr) params,
- sizeof(*params), GXIO_TRIO_OP_FORCE_EP_LINK_UP);
-}
-
-EXPORT_SYMBOL(gxio_trio_force_ep_link_up);
-
-struct get_mmio_base_param {
- HV_PTE base;
-};
-
-int gxio_trio_get_mmio_base(gxio_trio_context_t *context, HV_PTE *base)
-{
- int __result;
- struct get_mmio_base_param temp;
- struct get_mmio_base_param *params = &temp;
-
- __result =
- hv_dev_pread(context->fd, 0, (HV_VirtAddr) params, sizeof(*params),
- GXIO_TRIO_OP_GET_MMIO_BASE);
- *base = params->base;
-
- return __result;
-}
-
-EXPORT_SYMBOL(gxio_trio_get_mmio_base);
-
-struct check_mmio_offset_param {
- unsigned long offset;
- unsigned long size;
-};
-
-int gxio_trio_check_mmio_offset(gxio_trio_context_t *context,
- unsigned long offset, unsigned long size)
-{
- struct check_mmio_offset_param temp;
- struct check_mmio_offset_param *params = &temp;
-
- params->offset = offset;
- params->size = size;
-
- return hv_dev_pwrite(context->fd, 0, (HV_VirtAddr) params,
- sizeof(*params), GXIO_TRIO_OP_CHECK_MMIO_OFFSET);
-}
-
-EXPORT_SYMBOL(gxio_trio_check_mmio_offset);
diff --git a/arch/tile/gxio/iorpc_uart.c b/arch/tile/gxio/iorpc_uart.c
deleted file mode 100644
index b9a6d6193d73..000000000000
--- a/arch/tile/gxio/iorpc_uart.c
+++ /dev/null
@@ -1,77 +0,0 @@
-/*
- * Copyright 2013 Tilera Corporation. All Rights Reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation, version 2.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
- * NON INFRINGEMENT. See the GNU General Public License for
- * more details.
- */
-
-/* This file is machine-generated; DO NOT EDIT! */
-#include "gxio/iorpc_uart.h"
-
-struct cfg_interrupt_param {
- union iorpc_interrupt interrupt;
-};
-
-int gxio_uart_cfg_interrupt(gxio_uart_context_t *context, int inter_x,
- int inter_y, int inter_ipi, int inter_event)
-{
- struct cfg_interrupt_param temp;
- struct cfg_interrupt_param *params = &temp;
-
- params->interrupt.kernel.x = inter_x;
- params->interrupt.kernel.y = inter_y;
- params->interrupt.kernel.ipi = inter_ipi;
- params->interrupt.kernel.event = inter_event;
-
- return hv_dev_pwrite(context->fd, 0, (HV_VirtAddr) params,
- sizeof(*params), GXIO_UART_OP_CFG_INTERRUPT);
-}
-
-EXPORT_SYMBOL(gxio_uart_cfg_interrupt);
-
-struct get_mmio_base_param {
- HV_PTE base;
-};
-
-int gxio_uart_get_mmio_base(gxio_uart_context_t *context, HV_PTE *base)
-{
- int __result;
- struct get_mmio_base_param temp;
- struct get_mmio_base_param *params = &temp;
-
- __result =
- hv_dev_pread(context->fd, 0, (HV_VirtAddr) params, sizeof(*params),
- GXIO_UART_OP_GET_MMIO_BASE);
- *base = params->base;
-
- return __result;
-}
-
-EXPORT_SYMBOL(gxio_uart_get_mmio_base);
-
-struct check_mmio_offset_param {
- unsigned long offset;
- unsigned long size;
-};
-
-int gxio_uart_check_mmio_offset(gxio_uart_context_t *context,
- unsigned long offset, unsigned long size)
-{
- struct check_mmio_offset_param temp;
- struct check_mmio_offset_param *params = &temp;
-
- params->offset = offset;
- params->size = size;
-
- return hv_dev_pwrite(context->fd, 0, (HV_VirtAddr) params,
- sizeof(*params), GXIO_UART_OP_CHECK_MMIO_OFFSET);
-}
-
-EXPORT_SYMBOL(gxio_uart_check_mmio_offset);
diff --git a/arch/tile/gxio/iorpc_usb_host.c b/arch/tile/gxio/iorpc_usb_host.c
deleted file mode 100644
index 9c820073bfc0..000000000000
--- a/arch/tile/gxio/iorpc_usb_host.c
+++ /dev/null
@@ -1,99 +0,0 @@
-/*
- * Copyright 2012 Tilera Corporation. All Rights Reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation, version 2.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
- * NON INFRINGEMENT. See the GNU General Public License for
- * more details.
- */
-
-/* This file is machine-generated; DO NOT EDIT! */
-#include "gxio/iorpc_usb_host.h"
-
-struct cfg_interrupt_param {
- union iorpc_interrupt interrupt;
-};
-
-int gxio_usb_host_cfg_interrupt(gxio_usb_host_context_t *context, int inter_x,
- int inter_y, int inter_ipi, int inter_event)
-{
- struct cfg_interrupt_param temp;
- struct cfg_interrupt_param *params = &temp;
-
- params->interrupt.kernel.x = inter_x;
- params->interrupt.kernel.y = inter_y;
- params->interrupt.kernel.ipi = inter_ipi;
- params->interrupt.kernel.event = inter_event;
-
- return hv_dev_pwrite(context->fd, 0, (HV_VirtAddr) params,
- sizeof(*params), GXIO_USB_HOST_OP_CFG_INTERRUPT);
-}
-
-EXPORT_SYMBOL(gxio_usb_host_cfg_interrupt);
-
-struct register_client_memory_param {
- HV_PTE pte;
- unsigned int flags;
-};
-
-int gxio_usb_host_register_client_memory(gxio_usb_host_context_t *context,
- HV_PTE pte, unsigned int flags)
-{
- struct register_client_memory_param temp;
- struct register_client_memory_param *params = &temp;
-
- params->pte = pte;
- params->flags = flags;
-
- return hv_dev_pwrite(context->fd, 0, (HV_VirtAddr) params,
- sizeof(*params),
- GXIO_USB_HOST_OP_REGISTER_CLIENT_MEMORY);
-}
-
-EXPORT_SYMBOL(gxio_usb_host_register_client_memory);
-
-struct get_mmio_base_param {
- HV_PTE base;
-};
-
-int gxio_usb_host_get_mmio_base(gxio_usb_host_context_t *context, HV_PTE *base)
-{
- int __result;
- struct get_mmio_base_param temp;
- struct get_mmio_base_param *params = &temp;
-
- __result =
- hv_dev_pread(context->fd, 0, (HV_VirtAddr) params, sizeof(*params),
- GXIO_USB_HOST_OP_GET_MMIO_BASE);
- *base = params->base;
-
- return __result;
-}
-
-EXPORT_SYMBOL(gxio_usb_host_get_mmio_base);
-
-struct check_mmio_offset_param {
- unsigned long offset;
- unsigned long size;
-};
-
-int gxio_usb_host_check_mmio_offset(gxio_usb_host_context_t *context,
- unsigned long offset, unsigned long size)
-{
- struct check_mmio_offset_param temp;
- struct check_mmio_offset_param *params = &temp;
-
- params->offset = offset;
- params->size = size;
-
- return hv_dev_pwrite(context->fd, 0, (HV_VirtAddr) params,
- sizeof(*params),
- GXIO_USB_HOST_OP_CHECK_MMIO_OFFSET);
-}
-
-EXPORT_SYMBOL(gxio_usb_host_check_mmio_offset);
diff --git a/arch/tile/gxio/kiorpc.c b/arch/tile/gxio/kiorpc.c
deleted file mode 100644
index c8096aa5a3fc..000000000000
--- a/arch/tile/gxio/kiorpc.c
+++ /dev/null
@@ -1,61 +0,0 @@
-/*
- * Copyright 2012 Tilera Corporation. All Rights Reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation, version 2.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
- * NON INFRINGEMENT. See the GNU General Public License for
- * more details.
- *
- * TILE-Gx IORPC support for kernel I/O drivers.
- */
-
-#include <linux/mmzone.h>
-#include <linux/module.h>
-#include <linux/io.h>
-#include <gxio/iorpc_globals.h>
-#include <gxio/kiorpc.h>
-
-#ifdef DEBUG_IORPC
-#define TRACE(FMT, ...) pr_info(SIMPLE_MSG_LINE FMT, ## __VA_ARGS__)
-#else
-#define TRACE(...)
-#endif
-
-/* Create kernel-VA-space MMIO mapping for an on-chip IO device. */
-void __iomem *iorpc_ioremap(int hv_fd, resource_size_t offset,
- unsigned long size)
-{
- pgprot_t mmio_base, prot = { 0 };
- unsigned long pfn;
- int err;
-
- /* Look up the shim's lotar and base PA. */
- err = __iorpc_get_mmio_base(hv_fd, &mmio_base);
- if (err) {
- TRACE("get_mmio_base() failure: %d\n", err);
- return NULL;
- }
-
- /* Make sure the HV driver approves of our offset and size. */
- err = __iorpc_check_mmio_offset(hv_fd, offset, size);
- if (err) {
- TRACE("check_mmio_offset() failure: %d\n", err);
- return NULL;
- }
-
- /*
- * mmio_base contains a base pfn and homing coordinates. Turn
- * it into an MMIO pgprot and offset pfn.
- */
- prot = hv_pte_set_lotar(prot, hv_pte_get_lotar(mmio_base));
- pfn = pte_pfn(mmio_base) + PFN_DOWN(offset);
-
- return ioremap_prot(PFN_PHYS(pfn), size, prot);
-}
-
-EXPORT_SYMBOL(iorpc_ioremap);
diff --git a/arch/tile/gxio/mpipe.c b/arch/tile/gxio/mpipe.c
deleted file mode 100644
index 34de300ab320..000000000000
--- a/arch/tile/gxio/mpipe.c
+++ /dev/null
@@ -1,584 +0,0 @@
-/*
- * Copyright 2012 Tilera Corporation. All Rights Reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation, version 2.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
- * NON INFRINGEMENT. See the GNU General Public License for
- * more details.
- */
-
-/*
- * Implementation of mpipe gxio calls.
- */
-
-#include <linux/errno.h>
-#include <linux/io.h>
-#include <linux/module.h>
-#include <linux/string.h>
-
-#include <gxio/iorpc_globals.h>
-#include <gxio/iorpc_mpipe.h>
-#include <gxio/iorpc_mpipe_info.h>
-#include <gxio/kiorpc.h>
-#include <gxio/mpipe.h>
-
-/* HACK: Avoid pointless "shadow" warnings. */
-#define link link_shadow
-
-int gxio_mpipe_init(gxio_mpipe_context_t *context, unsigned int mpipe_index)
-{
- char file[32];
-
- int fd;
- int i;
-
- if (mpipe_index >= GXIO_MPIPE_INSTANCE_MAX)
- return -EINVAL;
-
- snprintf(file, sizeof(file), "mpipe/%d/iorpc", mpipe_index);
- fd = hv_dev_open((HV_VirtAddr) file, 0);
-
- context->fd = fd;
-
- if (fd < 0) {
- if (fd >= GXIO_ERR_MIN && fd <= GXIO_ERR_MAX)
- return fd;
- else
- return -ENODEV;
- }
-
- /* Map in the MMIO space. */
- context->mmio_cfg_base = (void __force *)
- iorpc_ioremap(fd, HV_MPIPE_CONFIG_MMIO_OFFSET,
- HV_MPIPE_CONFIG_MMIO_SIZE);
- if (context->mmio_cfg_base == NULL)
- goto cfg_failed;
-
- context->mmio_fast_base = (void __force *)
- iorpc_ioremap(fd, HV_MPIPE_FAST_MMIO_OFFSET,
- HV_MPIPE_FAST_MMIO_SIZE);
- if (context->mmio_fast_base == NULL)
- goto fast_failed;
-
- /* Initialize the stacks. */
- for (i = 0; i < 8; i++)
- context->__stacks.stacks[i] = 255;
-
- context->instance = mpipe_index;
-
- return 0;
-
- fast_failed:
- iounmap((void __force __iomem *)(context->mmio_cfg_base));
- cfg_failed:
- hv_dev_close(context->fd);
- context->fd = -1;
- return -ENODEV;
-}
-
-EXPORT_SYMBOL_GPL(gxio_mpipe_init);
-
-int gxio_mpipe_destroy(gxio_mpipe_context_t *context)
-{
- iounmap((void __force __iomem *)(context->mmio_cfg_base));
- iounmap((void __force __iomem *)(context->mmio_fast_base));
- return hv_dev_close(context->fd);
-}
-
-EXPORT_SYMBOL_GPL(gxio_mpipe_destroy);
-
-static int16_t gxio_mpipe_buffer_sizes[8] =
- { 128, 256, 512, 1024, 1664, 4096, 10368, 16384 };
-
-gxio_mpipe_buffer_size_enum_t gxio_mpipe_buffer_size_to_buffer_size_enum(size_t
- size)
-{
- int i;
- for (i = 0; i < 7; i++)
- if (size <= gxio_mpipe_buffer_sizes[i])
- break;
- return i;
-}
-
-EXPORT_SYMBOL_GPL(gxio_mpipe_buffer_size_to_buffer_size_enum);
-
-size_t gxio_mpipe_buffer_size_enum_to_buffer_size(gxio_mpipe_buffer_size_enum_t
- buffer_size_enum)
-{
- if (buffer_size_enum > 7)
- buffer_size_enum = 7;
-
- return gxio_mpipe_buffer_sizes[buffer_size_enum];
-}
-
-EXPORT_SYMBOL_GPL(gxio_mpipe_buffer_size_enum_to_buffer_size);
-
-size_t gxio_mpipe_calc_buffer_stack_bytes(unsigned long buffers)
-{
- const int BUFFERS_PER_LINE = 12;
-
- /* Count the number of cachelines. */
- unsigned long lines =
- (buffers + BUFFERS_PER_LINE - 1) / BUFFERS_PER_LINE;
-
- /* Convert to bytes. */
- return lines * CHIP_L2_LINE_SIZE();
-}
-
-EXPORT_SYMBOL_GPL(gxio_mpipe_calc_buffer_stack_bytes);
-
-int gxio_mpipe_init_buffer_stack(gxio_mpipe_context_t *context,
- unsigned int stack,
- gxio_mpipe_buffer_size_enum_t
- buffer_size_enum, void *mem, size_t mem_size,
- unsigned int mem_flags)
-{
- int result;
-
- memset(mem, 0, mem_size);
-
- result = gxio_mpipe_init_buffer_stack_aux(context, mem, mem_size,
- mem_flags, stack,
- buffer_size_enum);
- if (result < 0)
- return result;
-
- /* Save the stack. */
- context->__stacks.stacks[buffer_size_enum] = stack;
-
- return 0;
-}
-
-EXPORT_SYMBOL_GPL(gxio_mpipe_init_buffer_stack);
-
-int gxio_mpipe_init_notif_ring(gxio_mpipe_context_t *context,
- unsigned int ring,
- void *mem, size_t mem_size,
- unsigned int mem_flags)
-{
- return gxio_mpipe_init_notif_ring_aux(context, mem, mem_size,
- mem_flags, ring);
-}
-
-EXPORT_SYMBOL_GPL(gxio_mpipe_init_notif_ring);
-
-int gxio_mpipe_init_notif_group_and_buckets(gxio_mpipe_context_t *context,
- unsigned int group,
- unsigned int ring,
- unsigned int num_rings,
- unsigned int bucket,
- unsigned int num_buckets,
- gxio_mpipe_bucket_mode_t mode)
-{
- int i;
- int result;
-
- gxio_mpipe_bucket_info_t bucket_info = { {
- .group = group,
- .mode = mode,
- }
- };
-
- gxio_mpipe_notif_group_bits_t bits = { {0} };
-
- for (i = 0; i < num_rings; i++)
- gxio_mpipe_notif_group_add_ring(&bits, ring + i);
-
- result = gxio_mpipe_init_notif_group(context, group, bits);
- if (result != 0)
- return result;
-
- for (i = 0; i < num_buckets; i++) {
- bucket_info.notifring = ring + (i % num_rings);
-
- result = gxio_mpipe_init_bucket(context, bucket + i,
- bucket_info);
- if (result != 0)
- return result;
- }
-
- return 0;
-}
-
-EXPORT_SYMBOL_GPL(gxio_mpipe_init_notif_group_and_buckets);
-
-int gxio_mpipe_init_edma_ring(gxio_mpipe_context_t *context,
- unsigned int ring, unsigned int channel,
- void *mem, size_t mem_size,
- unsigned int mem_flags)
-{
- memset(mem, 0, mem_size);
-
- return gxio_mpipe_init_edma_ring_aux(context, mem, mem_size, mem_flags,
- ring, channel);
-}
-
-EXPORT_SYMBOL_GPL(gxio_mpipe_init_edma_ring);
-
-void gxio_mpipe_rules_init(gxio_mpipe_rules_t *rules,
- gxio_mpipe_context_t *context)
-{
- rules->context = context;
- memset(&rules->list, 0, sizeof(rules->list));
-}
-
-EXPORT_SYMBOL_GPL(gxio_mpipe_rules_init);
-
-int gxio_mpipe_rules_begin(gxio_mpipe_rules_t *rules,
- unsigned int bucket, unsigned int num_buckets,
- gxio_mpipe_rules_stacks_t *stacks)
-{
- int i;
- int stack = 255;
-
- gxio_mpipe_rules_list_t *list = &rules->list;
-
- /* Current rule. */
- gxio_mpipe_rules_rule_t *rule =
- (gxio_mpipe_rules_rule_t *) (list->rules + list->head);
-
- unsigned int head = list->tail;
-
- /*
- * Align next rule properly.
- *Note that "dmacs_and_vlans" will also be aligned.
- */
- unsigned int pad = 0;
- while (((head + pad) % __alignof__(gxio_mpipe_rules_rule_t)) != 0)
- pad++;
-
- /*
- * Verify room.
- * ISSUE: Mark rules as broken on error?
- */
- if (head + pad + sizeof(*rule) >= sizeof(list->rules))
- return GXIO_MPIPE_ERR_RULES_FULL;
-
- /* Verify num_buckets is a power of 2. */
- if (__builtin_popcount(num_buckets) != 1)
- return GXIO_MPIPE_ERR_RULES_INVALID;
-
- /* Add padding to previous rule. */
- rule->size += pad;
-
- /* Start a new rule. */
- list->head = head + pad;
-
- rule = (gxio_mpipe_rules_rule_t *) (list->rules + list->head);
-
- /* Default some values. */
- rule->headroom = 2;
- rule->tailroom = 0;
- rule->capacity = 16384;
-
- /* Save the bucket info. */
- rule->bucket_mask = num_buckets - 1;
- rule->bucket_first = bucket;
-
- for (i = 8 - 1; i >= 0; i--) {
- int maybe =
- stacks ? stacks->stacks[i] : rules->context->__stacks.
- stacks[i];
- if (maybe != 255)
- stack = maybe;
- rule->stacks.stacks[i] = stack;
- }
-
- if (stack == 255)
- return GXIO_MPIPE_ERR_RULES_INVALID;
-
- /* NOTE: Only entries at the end of the array can be 255. */
- for (i = 8 - 1; i > 0; i--) {
- if (rule->stacks.stacks[i] == 255) {
- rule->stacks.stacks[i] = stack;
- rule->capacity =
- gxio_mpipe_buffer_size_enum_to_buffer_size(i -
- 1);
- }
- }
-
- rule->size = sizeof(*rule);
- list->tail = list->head + rule->size;
-
- return 0;
-}
-
-EXPORT_SYMBOL_GPL(gxio_mpipe_rules_begin);
-
-int gxio_mpipe_rules_add_channel(gxio_mpipe_rules_t *rules,
- unsigned int channel)
-{
- gxio_mpipe_rules_list_t *list = &rules->list;
-
- gxio_mpipe_rules_rule_t *rule =
- (gxio_mpipe_rules_rule_t *) (list->rules + list->head);
-
- /* Verify channel. */
- if (channel >= 32)
- return GXIO_MPIPE_ERR_RULES_INVALID;
-
- /* Verify begun. */
- if (list->tail == 0)
- return GXIO_MPIPE_ERR_RULES_EMPTY;
-
- rule->channel_bits |= (1UL << channel);
-
- return 0;
-}
-
-EXPORT_SYMBOL_GPL(gxio_mpipe_rules_add_channel);
-
-int gxio_mpipe_rules_set_headroom(gxio_mpipe_rules_t *rules, uint8_t headroom)
-{
- gxio_mpipe_rules_list_t *list = &rules->list;
-
- gxio_mpipe_rules_rule_t *rule =
- (gxio_mpipe_rules_rule_t *) (list->rules + list->head);
-
- /* Verify begun. */
- if (list->tail == 0)
- return GXIO_MPIPE_ERR_RULES_EMPTY;
-
- rule->headroom = headroom;
-
- return 0;
-}
-
-EXPORT_SYMBOL_GPL(gxio_mpipe_rules_set_headroom);
-
-int gxio_mpipe_rules_commit(gxio_mpipe_rules_t *rules)
-{
- gxio_mpipe_rules_list_t *list = &rules->list;
- unsigned int size =
- offsetof(gxio_mpipe_rules_list_t, rules) + list->tail;
- return gxio_mpipe_commit_rules(rules->context, list, size);
-}
-
-EXPORT_SYMBOL_GPL(gxio_mpipe_rules_commit);
-
-int gxio_mpipe_iqueue_init(gxio_mpipe_iqueue_t *iqueue,
- gxio_mpipe_context_t *context,
- unsigned int ring,
- void *mem, size_t mem_size, unsigned int mem_flags)
-{
- /* The init call below will verify that "mem_size" is legal. */
- unsigned int num_entries = mem_size / sizeof(gxio_mpipe_idesc_t);
-
- iqueue->context = context;
- iqueue->idescs = (gxio_mpipe_idesc_t *)mem;
- iqueue->ring = ring;
- iqueue->num_entries = num_entries;
- iqueue->mask_num_entries = num_entries - 1;
- iqueue->log2_num_entries = __builtin_ctz(num_entries);
- iqueue->head = 1;
-#ifdef __BIG_ENDIAN__
- iqueue->swapped = 0;
-#endif
-
- /* Initialize the "tail". */
- __gxio_mmio_write(mem, iqueue->head);
-
- return gxio_mpipe_init_notif_ring(context, ring, mem, mem_size,
- mem_flags);
-}
-
-EXPORT_SYMBOL_GPL(gxio_mpipe_iqueue_init);
-
-int gxio_mpipe_equeue_init(gxio_mpipe_equeue_t *equeue,
- gxio_mpipe_context_t *context,
- unsigned int ering,
- unsigned int channel,
- void *mem, unsigned int mem_size,
- unsigned int mem_flags)
-{
- /* The init call below will verify that "mem_size" is legal. */
- unsigned int num_entries = mem_size / sizeof(gxio_mpipe_edesc_t);
-
- /* Offset used to read number of completed commands. */
- MPIPE_EDMA_POST_REGION_ADDR_t offset;
-
- int result = gxio_mpipe_init_edma_ring(context, ering, channel,
- mem, mem_size, mem_flags);
- if (result < 0)
- return result;
-
- memset(equeue, 0, sizeof(*equeue));
-
- offset.word = 0;
- offset.region =
- MPIPE_MMIO_ADDR__REGION_VAL_EDMA -
- MPIPE_MMIO_ADDR__REGION_VAL_IDMA;
- offset.ring = ering;
-
- __gxio_dma_queue_init(&equeue->dma_queue,
- context->mmio_fast_base + offset.word,
- num_entries);
- equeue->edescs = mem;
- equeue->mask_num_entries = num_entries - 1;
- equeue->log2_num_entries = __builtin_ctz(num_entries);
- equeue->context = context;
- equeue->ering = ering;
- equeue->channel = channel;
-
- return 0;
-}
-
-EXPORT_SYMBOL_GPL(gxio_mpipe_equeue_init);
-
-int gxio_mpipe_set_timestamp(gxio_mpipe_context_t *context,
- const struct timespec64 *ts)
-{
- cycles_t cycles = get_cycles();
- return gxio_mpipe_set_timestamp_aux(context, (uint64_t)ts->tv_sec,
- (uint64_t)ts->tv_nsec,
- (uint64_t)cycles);
-}
-EXPORT_SYMBOL_GPL(gxio_mpipe_set_timestamp);
-
-int gxio_mpipe_get_timestamp(gxio_mpipe_context_t *context,
- struct timespec64 *ts)
-{
- int ret;
- cycles_t cycles_prev, cycles_now, clock_rate;
- cycles_prev = get_cycles();
- ret = gxio_mpipe_get_timestamp_aux(context, (uint64_t *)&ts->tv_sec,
- (uint64_t *)&ts->tv_nsec,
- (uint64_t *)&cycles_now);
- if (ret < 0) {
- return ret;
- }
-
- clock_rate = get_clock_rate();
- ts->tv_nsec -= (cycles_now - cycles_prev) * 1000000000LL / clock_rate;
- if (ts->tv_nsec < 0) {
- ts->tv_nsec += 1000000000LL;
- ts->tv_sec -= 1;
- }
- return ret;
-}
-EXPORT_SYMBOL_GPL(gxio_mpipe_get_timestamp);
-
-int gxio_mpipe_adjust_timestamp(gxio_mpipe_context_t *context, int64_t delta)
-{
- return gxio_mpipe_adjust_timestamp_aux(context, delta);
-}
-EXPORT_SYMBOL_GPL(gxio_mpipe_adjust_timestamp);
-
-/* Get our internal context used for link name access. This context is
- * special in that it is not associated with an mPIPE service domain.
- */
-static gxio_mpipe_context_t *_gxio_get_link_context(void)
-{
- static gxio_mpipe_context_t context;
- static gxio_mpipe_context_t *contextp;
- static int tried_open = 0;
- static DEFINE_MUTEX(mutex);
-
- mutex_lock(&mutex);
-
- if (!tried_open) {
- int i = 0;
- tried_open = 1;
-
- /*
- * "4" here is the maximum possible number of mPIPE shims; it's
- * an exaggeration but we shouldn't ever go beyond 2 anyway.
- */
- for (i = 0; i < 4; i++) {
- char file[80];
-
- snprintf(file, sizeof(file), "mpipe/%d/iorpc_info", i);
- context.fd = hv_dev_open((HV_VirtAddr) file, 0);
- if (context.fd < 0)
- continue;
-
- contextp = &context;
- break;
- }
- }
-
- mutex_unlock(&mutex);
-
- return contextp;
-}
-
-int gxio_mpipe_link_instance(const char *link_name)
-{
- _gxio_mpipe_link_name_t name;
- gxio_mpipe_context_t *context = _gxio_get_link_context();
-
- if (!context)
- return GXIO_ERR_NO_DEVICE;
-
- if (strscpy(name.name, link_name, sizeof(name.name)) < 0)
- return GXIO_ERR_NO_DEVICE;
-
- return gxio_mpipe_info_instance_aux(context, name);
-}
-EXPORT_SYMBOL_GPL(gxio_mpipe_link_instance);
-
-int gxio_mpipe_link_enumerate_mac(int idx, char *link_name, uint8_t *link_mac)
-{
- int rv;
- _gxio_mpipe_link_name_t name;
- _gxio_mpipe_link_mac_t mac;
-
- gxio_mpipe_context_t *context = _gxio_get_link_context();
- if (!context)
- return GXIO_ERR_NO_DEVICE;
-
- rv = gxio_mpipe_info_enumerate_aux(context, idx, &name, &mac);
- if (rv >= 0) {
- if (strscpy(link_name, name.name, sizeof(name.name)) < 0)
- return GXIO_ERR_INVAL_MEMORY_SIZE;
- memcpy(link_mac, mac.mac, sizeof(mac.mac));
- }
-
- return rv;
-}
-
-EXPORT_SYMBOL_GPL(gxio_mpipe_link_enumerate_mac);
-
-int gxio_mpipe_link_open(gxio_mpipe_link_t *link,
- gxio_mpipe_context_t *context, const char *link_name,
- unsigned int flags)
-{
- _gxio_mpipe_link_name_t name;
- int rv;
-
- if (strscpy(name.name, link_name, sizeof(name.name)) < 0)
- return GXIO_ERR_NO_DEVICE;
-
- rv = gxio_mpipe_link_open_aux(context, name, flags);
- if (rv < 0)
- return rv;
-
- link->context = context;
- link->channel = rv >> 8;
- link->mac = rv & 0xFF;
-
- return 0;
-}
-
-EXPORT_SYMBOL_GPL(gxio_mpipe_link_open);
-
-int gxio_mpipe_link_close(gxio_mpipe_link_t *link)
-{
- return gxio_mpipe_link_close_aux(link->context, link->mac);
-}
-
-EXPORT_SYMBOL_GPL(gxio_mpipe_link_close);
-
-int gxio_mpipe_link_set_attr(gxio_mpipe_link_t *link, uint32_t attr,
- int64_t val)
-{
- return gxio_mpipe_link_set_attr_aux(link->context, link->mac, attr,
- val);
-}
-
-EXPORT_SYMBOL_GPL(gxio_mpipe_link_set_attr);
diff --git a/arch/tile/gxio/trio.c b/arch/tile/gxio/trio.c
deleted file mode 100644
index 69f0b8df3ce3..000000000000
--- a/arch/tile/gxio/trio.c
+++ /dev/null
@@ -1,49 +0,0 @@
-/*
- * Copyright 2012 Tilera Corporation. All Rights Reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation, version 2.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
- * NON INFRINGEMENT. See the GNU General Public License for
- * more details.
- */
-
-/*
- * Implementation of trio gxio calls.
- */
-
-#include <linux/errno.h>
-#include <linux/io.h>
-#include <linux/module.h>
-
-#include <gxio/trio.h>
-#include <gxio/iorpc_globals.h>
-#include <gxio/iorpc_trio.h>
-#include <gxio/kiorpc.h>
-
-int gxio_trio_init(gxio_trio_context_t *context, unsigned int trio_index)
-{
- char file[32];
- int fd;
-
- snprintf(file, sizeof(file), "trio/%d/iorpc", trio_index);
- fd = hv_dev_open((HV_VirtAddr) file, 0);
- if (fd < 0) {
- context->fd = -1;
-
- if (fd >= GXIO_ERR_MIN && fd <= GXIO_ERR_MAX)
- return fd;
- else
- return -ENODEV;
- }
-
- context->fd = fd;
-
- return 0;
-}
-
-EXPORT_SYMBOL_GPL(gxio_trio_init);
diff --git a/arch/tile/gxio/uart.c b/arch/tile/gxio/uart.c
deleted file mode 100644
index ba585175ef88..000000000000
--- a/arch/tile/gxio/uart.c
+++ /dev/null
@@ -1,87 +0,0 @@
-/*
- * Copyright 2013 Tilera Corporation. All Rights Reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation, version 2.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
- * NON INFRINGEMENT. See the GNU General Public License for
- * more details.
- */
-
-/*
- * Implementation of UART gxio calls.
- */
-
-#include <linux/io.h>
-#include <linux/errno.h>
-#include <linux/module.h>
-
-#include <gxio/uart.h>
-#include <gxio/iorpc_globals.h>
-#include <gxio/iorpc_uart.h>
-#include <gxio/kiorpc.h>
-
-int gxio_uart_init(gxio_uart_context_t *context, int uart_index)
-{
- char file[32];
- int fd;
-
- snprintf(file, sizeof(file), "uart/%d/iorpc", uart_index);
- fd = hv_dev_open((HV_VirtAddr) file, 0);
- if (fd < 0) {
- if (fd >= GXIO_ERR_MIN && fd <= GXIO_ERR_MAX)
- return fd;
- else
- return -ENODEV;
- }
-
- context->fd = fd;
-
- /* Map in the MMIO space. */
- context->mmio_base = (void __force *)
- iorpc_ioremap(fd, HV_UART_MMIO_OFFSET, HV_UART_MMIO_SIZE);
-
- if (context->mmio_base == NULL) {
- hv_dev_close(context->fd);
- context->fd = -1;
- return -ENODEV;
- }
-
- return 0;
-}
-
-EXPORT_SYMBOL_GPL(gxio_uart_init);
-
-int gxio_uart_destroy(gxio_uart_context_t *context)
-{
- iounmap((void __force __iomem *)(context->mmio_base));
- hv_dev_close(context->fd);
-
- context->mmio_base = NULL;
- context->fd = -1;
-
- return 0;
-}
-
-EXPORT_SYMBOL_GPL(gxio_uart_destroy);
-
-/* UART register write wrapper. */
-void gxio_uart_write(gxio_uart_context_t *context, uint64_t offset,
- uint64_t word)
-{
- __gxio_mmio_write(context->mmio_base + offset, word);
-}
-
-EXPORT_SYMBOL_GPL(gxio_uart_write);
-
-/* UART register read wrapper. */
-uint64_t gxio_uart_read(gxio_uart_context_t *context, uint64_t offset)
-{
- return __gxio_mmio_read(context->mmio_base + offset);
-}
-
-EXPORT_SYMBOL_GPL(gxio_uart_read);
diff --git a/arch/tile/gxio/usb_host.c b/arch/tile/gxio/usb_host.c
deleted file mode 100644
index 785afad7922e..000000000000
--- a/arch/tile/gxio/usb_host.c
+++ /dev/null
@@ -1,91 +0,0 @@
-/*
- * Copyright 2012 Tilera Corporation. All Rights Reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation, version 2.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
- * NON INFRINGEMENT. See the GNU General Public License for
- * more details.
- */
-
-/*
- *
- * Implementation of USB gxio calls.
- */
-
-#include <linux/io.h>
-#include <linux/errno.h>
-#include <linux/module.h>
-
-#include <gxio/iorpc_globals.h>
-#include <gxio/iorpc_usb_host.h>
-#include <gxio/kiorpc.h>
-#include <gxio/usb_host.h>
-
-int gxio_usb_host_init(gxio_usb_host_context_t *context, int usb_index,
- int is_ehci)
-{
- char file[32];
- int fd;
-
- if (is_ehci)
- snprintf(file, sizeof(file), "usb_host/%d/iorpc/ehci",
- usb_index);
- else
- snprintf(file, sizeof(file), "usb_host/%d/iorpc/ohci",
- usb_index);
-
- fd = hv_dev_open((HV_VirtAddr) file, 0);
- if (fd < 0) {
- if (fd >= GXIO_ERR_MIN && fd <= GXIO_ERR_MAX)
- return fd;
- else
- return -ENODEV;
- }
-
- context->fd = fd;
-
- // Map in the MMIO space.
- context->mmio_base =
- (void __force *)iorpc_ioremap(fd, 0, HV_USB_HOST_MMIO_SIZE);
-
- if (context->mmio_base == NULL) {
- hv_dev_close(context->fd);
- return -ENODEV;
- }
-
- return 0;
-}
-
-EXPORT_SYMBOL_GPL(gxio_usb_host_init);
-
-int gxio_usb_host_destroy(gxio_usb_host_context_t *context)
-{
- iounmap((void __force __iomem *)(context->mmio_base));
- hv_dev_close(context->fd);
-
- context->mmio_base = NULL;
- context->fd = -1;
-
- return 0;
-}
-
-EXPORT_SYMBOL_GPL(gxio_usb_host_destroy);
-
-void *gxio_usb_host_get_reg_start(gxio_usb_host_context_t *context)
-{
- return context->mmio_base;
-}
-
-EXPORT_SYMBOL_GPL(gxio_usb_host_get_reg_start);
-
-size_t gxio_usb_host_get_reg_len(gxio_usb_host_context_t *context)
-{
- return HV_USB_HOST_MMIO_SIZE;
-}
-
-EXPORT_SYMBOL_GPL(gxio_usb_host_get_reg_len);
diff --git a/arch/tile/include/arch/mpipe.h b/arch/tile/include/arch/mpipe.h
deleted file mode 100644
index 904538e754d8..000000000000
--- a/arch/tile/include/arch/mpipe.h
+++ /dev/null
@@ -1,371 +0,0 @@
-/*
- * Copyright 2012 Tilera Corporation. All Rights Reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation, version 2.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
- * NON INFRINGEMENT. See the GNU General Public License for
- * more details.
- */
-
-/* Machine-generated file; do not edit. */
-
-#ifndef __ARCH_MPIPE_H__
-#define __ARCH_MPIPE_H__
-
-#include <arch/abi.h>
-#include <arch/mpipe_def.h>
-
-#ifndef __ASSEMBLER__
-
-/*
- * MMIO Ingress DMA Release Region Address.
- * This is a description of the physical addresses used to manipulate ingress
- * credit counters. Accesses to this address space should use an address of
- * this form and a value like that specified in IDMA_RELEASE_REGION_VAL.
- */
-
-__extension__
-typedef union
-{
- struct
- {
-#ifndef __BIG_ENDIAN__
- /* Reserved. */
- uint_reg_t __reserved_0 : 3;
- /* NotifRing to be released */
- uint_reg_t ring : 8;
- /* Bucket to be released */
- uint_reg_t bucket : 13;
- /* Enable NotifRing release */
- uint_reg_t ring_enable : 1;
- /* Enable Bucket release */
- uint_reg_t bucket_enable : 1;
- /*
- * This field of the address selects the region (address space) to be
- * accessed. For the iDMA release region, this field must be 4.
- */
- uint_reg_t region : 3;
- /* Reserved. */
- uint_reg_t __reserved_1 : 6;
- /* This field of the address indexes the 32 entry service domain table. */
- uint_reg_t svc_dom : 5;
- /* Reserved. */
- uint_reg_t __reserved_2 : 24;
-#else /* __BIG_ENDIAN__ */
- uint_reg_t __reserved_2 : 24;
- uint_reg_t svc_dom : 5;
- uint_reg_t __reserved_1 : 6;
- uint_reg_t region : 3;
- uint_reg_t bucket_enable : 1;
- uint_reg_t ring_enable : 1;
- uint_reg_t bucket : 13;
- uint_reg_t ring : 8;
- uint_reg_t __reserved_0 : 3;
-#endif
- };
-
- uint_reg_t word;
-} MPIPE_IDMA_RELEASE_REGION_ADDR_t;
-
-/*
- * MMIO Ingress DMA Release Region Value - Release NotifRing and/or Bucket.
- * Provides release of the associated NotifRing. The address of the MMIO
- * operation is described in IDMA_RELEASE_REGION_ADDR.
- */
-
-__extension__
-typedef union
-{
- struct
- {
-#ifndef __BIG_ENDIAN__
- /*
- * Number of packets being released. The load balancer's count of
- * inflight packets will be decremented by this amount for the associated
- * Bucket and/or NotifRing
- */
- uint_reg_t count : 16;
- /* Reserved. */
- uint_reg_t __reserved : 48;
-#else /* __BIG_ENDIAN__ */
- uint_reg_t __reserved : 48;
- uint_reg_t count : 16;
-#endif
- };
-
- uint_reg_t word;
-} MPIPE_IDMA_RELEASE_REGION_VAL_t;
-
-/*
- * MMIO Buffer Stack Manager Region Address.
- * This MMIO region is used for posting or fetching buffers to/from the
- * buffer stack manager. On an MMIO load, this pops a buffer descriptor from
- * the top of stack if one is available. On an MMIO store, this pushes a
- * buffer to the stack. The value read or written is described in
- * BSM_REGION_VAL.
- */
-
-__extension__
-typedef union
-{
- struct
- {
-#ifndef __BIG_ENDIAN__
- /* Reserved. */
- uint_reg_t __reserved_0 : 3;
- /* BufferStack being accessed. */
- uint_reg_t stack : 5;
- /* Reserved. */
- uint_reg_t __reserved_1 : 18;
- /*
- * This field of the address selects the region (address space) to be
- * accessed. For the buffer stack manager region, this field must be 6.
- */
- uint_reg_t region : 3;
- /* Reserved. */
- uint_reg_t __reserved_2 : 6;
- /* This field of the address indexes the 32 entry service domain table. */
- uint_reg_t svc_dom : 5;
- /* Reserved. */
- uint_reg_t __reserved_3 : 24;
-#else /* __BIG_ENDIAN__ */
- uint_reg_t __reserved_3 : 24;
- uint_reg_t svc_dom : 5;
- uint_reg_t __reserved_2 : 6;
- uint_reg_t region : 3;
- uint_reg_t __reserved_1 : 18;
- uint_reg_t stack : 5;
- uint_reg_t __reserved_0 : 3;
-#endif
- };
-
- uint_reg_t word;
-} MPIPE_BSM_REGION_ADDR_t;
-
-/*
- * MMIO Buffer Stack Manager Region Value.
- * This MMIO region is used for posting or fetching buffers to/from the
- * buffer stack manager. On an MMIO load, this pops a buffer descriptor from
- * the top of stack if one is available. On an MMIO store, this pushes a
- * buffer to the stack. The address of the MMIO operation is described in
- * BSM_REGION_ADDR.
- */
-
-__extension__
-typedef union
-{
- struct
- {
-#ifndef __BIG_ENDIAN__
- /* Reserved. */
- uint_reg_t __reserved_0 : 7;
- /*
- * Base virtual address of the buffer. Must be sign extended by consumer.
- */
- int_reg_t va : 35;
- /* Reserved. */
- uint_reg_t __reserved_1 : 6;
- /*
- * Index of the buffer stack to which this buffer belongs. Ignored on
- * writes since the offset bits specify the stack being accessed.
- */
- uint_reg_t stack_idx : 5;
- /* Reserved. */
- uint_reg_t __reserved_2 : 3;
- /*
- * Instance ID. For devices that support automatic buffer return between
- * mPIPE instances, this field indicates the buffer owner. If the INST
- * field does not match the mPIPE's instance number when a packet is
- * egressed, buffers with HWB set will be returned to the other mPIPE
- * instance. Note that not all devices support multi-mPIPE buffer
- * return. The MPIPE_EDMA_INFO.REMOTE_BUFF_RTN_SUPPORT bit indicates
- * whether the INST field in the buffer descriptor is populated by iDMA
- * hardware. This field is ignored on writes.
- */
- uint_reg_t inst : 2;
- /*
- * Reads as one to indicate that this is a hardware managed buffer.
- * Ignored on writes since all buffers on a given stack are the same size.
- */
- uint_reg_t hwb : 1;
- /*
- * Encoded size of buffer (ignored on writes):
- * 0 = 128 bytes
- * 1 = 256 bytes
- * 2 = 512 bytes
- * 3 = 1024 bytes
- * 4 = 1664 bytes
- * 5 = 4096 bytes
- * 6 = 10368 bytes
- * 7 = 16384 bytes
- */
- uint_reg_t size : 3;
- /*
- * Valid indication for the buffer. Ignored on writes.
- * 0 : Valid buffer descriptor popped from stack.
- * 3 : Could not pop a buffer from the stack. Either the stack is empty,
- * or the hardware's prefetch buffer is empty for this stack.
- */
- uint_reg_t c : 2;
-#else /* __BIG_ENDIAN__ */
- uint_reg_t c : 2;
- uint_reg_t size : 3;
- uint_reg_t hwb : 1;
- uint_reg_t inst : 2;
- uint_reg_t __reserved_2 : 3;
- uint_reg_t stack_idx : 5;
- uint_reg_t __reserved_1 : 6;
- int_reg_t va : 35;
- uint_reg_t __reserved_0 : 7;
-#endif
- };
-
- uint_reg_t word;
-} MPIPE_BSM_REGION_VAL_t;
-
-/*
- * MMIO Egress DMA Post Region Address.
- * Used to post descriptor locations to the eDMA descriptor engine. The
- * value to be written is described in EDMA_POST_REGION_VAL
- */
-
-__extension__
-typedef union
-{
- struct
- {
-#ifndef __BIG_ENDIAN__
- /* Reserved. */
- uint_reg_t __reserved_0 : 3;
- /* eDMA ring being accessed */
- uint_reg_t ring : 6;
- /* Reserved. */
- uint_reg_t __reserved_1 : 17;
- /*
- * This field of the address selects the region (address space) to be
- * accessed. For the egress DMA post region, this field must be 5.
- */
- uint_reg_t region : 3;
- /* Reserved. */
- uint_reg_t __reserved_2 : 6;
- /* This field of the address indexes the 32 entry service domain table. */
- uint_reg_t svc_dom : 5;
- /* Reserved. */
- uint_reg_t __reserved_3 : 24;
-#else /* __BIG_ENDIAN__ */
- uint_reg_t __reserved_3 : 24;
- uint_reg_t svc_dom : 5;
- uint_reg_t __reserved_2 : 6;
- uint_reg_t region : 3;
- uint_reg_t __reserved_1 : 17;
- uint_reg_t ring : 6;
- uint_reg_t __reserved_0 : 3;
-#endif
- };
-
- uint_reg_t word;
-} MPIPE_EDMA_POST_REGION_ADDR_t;
-
-/*
- * MMIO Egress DMA Post Region Value.
- * Used to post descriptor locations to the eDMA descriptor engine. The
- * address is described in EDMA_POST_REGION_ADDR.
- */
-
-__extension__
-typedef union
-{
- struct
- {
-#ifndef __BIG_ENDIAN__
- /*
- * For writes, this specifies the current ring tail pointer prior to any
- * post. For example, to post 1 or more descriptors starting at location
- * 23, this would contain 23 (not 24). On writes, this index must be
- * masked based on the ring size. The new tail pointer after this post
- * is COUNT+RING_IDX (masked by the ring size).
- *
- * For reads, this provides the hardware descriptor fetcher's head
- * pointer. The descriptors prior to the head pointer, however, may not
- * yet have been processed so this indicator is only used to determine
- * how full the ring is and if software may post more descriptors.
- */
- uint_reg_t ring_idx : 16;
- /*
- * For writes, this specifies number of contiguous descriptors that are
- * being posted. Software may post up to RingSize descriptors with a
- * single MMIO store. A zero in this field on a write will "wake up" an
- * eDMA ring and cause it fetch descriptors regardless of the hardware's
- * current view of the state of the tail pointer.
- *
- * For reads, this field provides a rolling count of the number of
- * descriptors that have been completely processed. This may be used by
- * software to determine when buffers associated with a descriptor may be
- * returned or reused. When the ring's flush bit is cleared by software
- * (after having been set by HW or SW), the COUNT will be cleared.
- */
- uint_reg_t count : 16;
- /*
- * For writes, this specifies the generation number of the tail being
- * posted. Note that if tail+cnt wraps to the beginning of the ring, the
- * eDMA hardware assumes that the descriptors posted at the beginning of
- * the ring are also valid so it is okay to post around the wrap point.
- *
- * For reads, this is the current generation number. Valid descriptors
- * will have the inverse of this generation number.
- */
- uint_reg_t gen : 1;
- /* Reserved. */
- uint_reg_t __reserved : 31;
-#else /* __BIG_ENDIAN__ */
- uint_reg_t __reserved : 31;
- uint_reg_t gen : 1;
- uint_reg_t count : 16;
- uint_reg_t ring_idx : 16;
-#endif
- };
-
- uint_reg_t word;
-} MPIPE_EDMA_POST_REGION_VAL_t;
-
-/*
- * Load Balancer Bucket Status Data.
- * Read/Write data for load balancer Bucket-Status Table. 4160 entries
- * indexed by LBL_INIT_CTL.IDX when LBL_INIT_CTL.STRUCT_SEL is BSTS_TBL
- */
-
-__extension__
-typedef union
-{
- struct
- {
-#ifndef __BIG_ENDIAN__
- /* NotifRing currently assigned to this bucket. */
- uint_reg_t notifring : 8;
- /* Current reference count. */
- uint_reg_t count : 16;
- /* Group associated with this bucket. */
- uint_reg_t group : 5;
- /* Mode select for this bucket. */
- uint_reg_t mode : 3;
- /* Reserved. */
- uint_reg_t __reserved : 32;
-#else /* __BIG_ENDIAN__ */
- uint_reg_t __reserved : 32;
- uint_reg_t mode : 3;
- uint_reg_t group : 5;
- uint_reg_t count : 16;
- uint_reg_t notifring : 8;
-#endif
- };
-
- uint_reg_t word;
-} MPIPE_LBL_INIT_DAT_BSTS_TBL_t;
-#endif /* !defined(__ASSEMBLER__) */
-
-#endif /* !defined(__ARCH_MPIPE_H__) */
diff --git a/arch/tile/include/arch/mpipe_constants.h b/arch/tile/include/arch/mpipe_constants.h
deleted file mode 100644
index 84022ac5fe82..000000000000
--- a/arch/tile/include/arch/mpipe_constants.h
+++ /dev/null
@@ -1,42 +0,0 @@
-/*
- * Copyright 2012 Tilera Corporation. All Rights Reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation, version 2.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
- * NON INFRINGEMENT. See the GNU General Public License for
- * more details.
- */
-
-
-#ifndef __ARCH_MPIPE_CONSTANTS_H__
-#define __ARCH_MPIPE_CONSTANTS_H__
-
-#define MPIPE_NUM_CLASSIFIERS 16
-#define MPIPE_CLS_MHZ 1200
-
-#define MPIPE_NUM_EDMA_RINGS 64
-
-#define MPIPE_NUM_SGMII_MACS 16
-#define MPIPE_NUM_XAUI_MACS 16
-#define MPIPE_NUM_LOOPBACK_CHANNELS 4
-#define MPIPE_NUM_NON_LB_CHANNELS 28
-
-#define MPIPE_NUM_IPKT_BLOCKS 1536
-
-#define MPIPE_NUM_BUCKETS 4160
-
-#define MPIPE_NUM_NOTIF_RINGS 256
-
-#define MPIPE_NUM_NOTIF_GROUPS 32
-
-#define MPIPE_NUM_TLBS_PER_ASID 16
-#define MPIPE_TLB_IDX_WIDTH 4
-
-#define MPIPE_MMIO_NUM_SVC_DOM 32
-
-#endif /* __ARCH_MPIPE_CONSTANTS_H__ */
diff --git a/arch/tile/include/arch/mpipe_def.h b/arch/tile/include/arch/mpipe_def.h
deleted file mode 100644
index c3d30217fc66..000000000000
--- a/arch/tile/include/arch/mpipe_def.h
+++ /dev/null
@@ -1,39 +0,0 @@
-/*
- * Copyright 2012 Tilera Corporation. All Rights Reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation, version 2.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
- * NON INFRINGEMENT. See the GNU General Public License for
- * more details.
- */
-
-/* Machine-generated file; do not edit. */
-
-#ifndef __ARCH_MPIPE_DEF_H__
-#define __ARCH_MPIPE_DEF_H__
-#define MPIPE_MMIO_ADDR__REGION_SHIFT 26
-#define MPIPE_MMIO_ADDR__REGION_VAL_CFG 0x0
-#define MPIPE_MMIO_ADDR__REGION_VAL_IDMA 0x4
-#define MPIPE_MMIO_ADDR__REGION_VAL_EDMA 0x5
-#define MPIPE_MMIO_ADDR__REGION_VAL_BSM 0x6
-#define MPIPE_BSM_REGION_VAL__VA_SHIFT 7
-#define MPIPE_BSM_INIT_DAT_1__SIZE_VAL_BSZ_128 0x0
-#define MPIPE_BSM_INIT_DAT_1__SIZE_VAL_BSZ_256 0x1
-#define MPIPE_BSM_INIT_DAT_1__SIZE_VAL_BSZ_512 0x2
-#define MPIPE_BSM_INIT_DAT_1__SIZE_VAL_BSZ_1024 0x3
-#define MPIPE_BSM_INIT_DAT_1__SIZE_VAL_BSZ_1664 0x4
-#define MPIPE_BSM_INIT_DAT_1__SIZE_VAL_BSZ_4096 0x5
-#define MPIPE_BSM_INIT_DAT_1__SIZE_VAL_BSZ_10368 0x6
-#define MPIPE_BSM_INIT_DAT_1__SIZE_VAL_BSZ_16384 0x7
-#define MPIPE_LBL_INIT_DAT_BSTS_TBL__MODE_VAL_DFA 0x0
-#define MPIPE_LBL_INIT_DAT_BSTS_TBL__MODE_VAL_FIXED 0x1
-#define MPIPE_LBL_INIT_DAT_BSTS_TBL__MODE_VAL_ALWAYS_PICK 0x2
-#define MPIPE_LBL_INIT_DAT_BSTS_TBL__MODE_VAL_STICKY 0x3
-#define MPIPE_LBL_INIT_DAT_BSTS_TBL__MODE_VAL_STICKY_RAND 0x7
-#define MPIPE_LBL_NR_STATE__FIRST_WORD 0x2138
-#endif /* !defined(__ARCH_MPIPE_DEF_H__) */
diff --git a/arch/tile/include/arch/mpipe_shm.h b/arch/tile/include/arch/mpipe_shm.h
deleted file mode 100644
index 13b3c4300e50..000000000000
--- a/arch/tile/include/arch/mpipe_shm.h
+++ /dev/null
@@ -1,521 +0,0 @@
-/*
- * Copyright 2012 Tilera Corporation. All Rights Reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation, version 2.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
- * NON INFRINGEMENT. See the GNU General Public License for
- * more details.
- */
-
-/* Machine-generated file; do not edit. */
-
-
-#ifndef __ARCH_MPIPE_SHM_H__
-#define __ARCH_MPIPE_SHM_H__
-
-#include <arch/abi.h>
-#include <arch/mpipe_shm_def.h>
-
-#ifndef __ASSEMBLER__
-/**
- * MPIPE eDMA Descriptor.
- * The eDMA descriptor is written by software and consumed by hardware. It
- * is used to specify the location of egress packet data to be sent out of
- * the chip via one of the packet interfaces.
- */
-
-__extension__
-typedef union
-{
- struct
- {
- /* Word 0 */
-
-#ifndef __BIG_ENDIAN__
- /**
- * Generation number. Used to indicate a valid descriptor in ring. When
- * a new descriptor is written into the ring, software must toggle this
- * bit. The net effect is that the GEN bit being written into new
- * descriptors toggles each time the ring tail pointer wraps.
- */
- uint_reg_t gen : 1;
- /**
- * For devices with EDMA reorder support, this field allows the
- * descriptor to select the egress FIFO. The associated DMA ring must
- * have ALLOW_EFIFO_SEL enabled.
- */
- uint_reg_t efifo_sel : 6;
- /** Reserved. Must be zero. */
- uint_reg_t r0 : 1;
- /** Checksum generation enabled for this transfer. */
- uint_reg_t csum : 1;
- /**
- * Nothing to be sent. Used, for example, when software has dropped a
- * packet but still wishes to return all of the associated buffers.
- */
- uint_reg_t ns : 1;
- /**
- * Notification interrupt will be delivered when packet has been egressed.
- */
- uint_reg_t notif : 1;
- /**
- * Boundary indicator. When 1, this transfer includes the EOP for this
- * command. Must be clear on all but the last descriptor for an egress
- * packet.
- */
- uint_reg_t bound : 1;
- /** Reserved. Must be zero. */
- uint_reg_t r1 : 4;
- /**
- * Number of bytes to be sent for this descriptor. When zero, no data
- * will be moved and the buffer descriptor will be ignored. If the
- * buffer descriptor indicates that it is chained, the low 7 bits of the
- * VA indicate the offset within the first buffer (e.g. 127 bytes is the
- * maximum offset into the first buffer). If the size exceeds a single
- * buffer, subsequent buffer descriptors will be fetched prior to
- * processing the next eDMA descriptor in the ring.
- */
- uint_reg_t xfer_size : 14;
- /** Reserved. Must be zero. */
- uint_reg_t r2 : 2;
- /**
- * Destination of checksum relative to CSUM_START relative to the first
- * byte moved by this descriptor. Must be zero if CSUM=0 in this
- * descriptor. Must be less than XFER_SIZE (e.g. the first byte of the
- * CSUM_DEST must be within the span of this descriptor).
- */
- uint_reg_t csum_dest : 8;
- /**
- * Start byte of checksum relative to the first byte moved by this
- * descriptor. If this is not the first descriptor for the egress
- * packet, CSUM_START is still relative to the first byte in this
- * descriptor. Must be zero if CSUM=0 in this descriptor.
- */
- uint_reg_t csum_start : 8;
- /**
- * Initial value for 16-bit 1's compliment checksum if enabled via CSUM.
- * Specified in network order. That is, bits[7:0] will be added to the
- * byte pointed to by CSUM_START and bits[15:8] will be added to the byte
- * pointed to by CSUM_START+1 (with appropriate 1's compliment carries).
- * Must be zero if CSUM=0 in this descriptor.
- */
- uint_reg_t csum_seed : 16;
-#else /* __BIG_ENDIAN__ */
- uint_reg_t csum_seed : 16;
- uint_reg_t csum_start : 8;
- uint_reg_t csum_dest : 8;
- uint_reg_t r2 : 2;
- uint_reg_t xfer_size : 14;
- uint_reg_t r1 : 4;
- uint_reg_t bound : 1;
- uint_reg_t notif : 1;
- uint_reg_t ns : 1;
- uint_reg_t csum : 1;
- uint_reg_t r0 : 1;
- uint_reg_t efifo_sel : 6;
- uint_reg_t gen : 1;
-#endif
-
- /* Word 1 */
-
-#ifndef __BIG_ENDIAN__
- /** Virtual address. Must be sign extended by consumer. */
- int_reg_t va : 42;
- /** Reserved. */
- uint_reg_t __reserved_0 : 6;
- /** Index of the buffer stack to which this buffer belongs. */
- uint_reg_t stack_idx : 5;
- /** Reserved. */
- uint_reg_t __reserved_1 : 3;
- /**
- * Instance ID. For devices that support automatic buffer return between
- * mPIPE instances, this field indicates the buffer owner. If the INST
- * field does not match the mPIPE's instance number when a packet is
- * egressed, buffers with HWB set will be returned to the other mPIPE
- * instance. Note that not all devices support multi-mPIPE buffer
- * return. The MPIPE_EDMA_INFO.REMOTE_BUFF_RTN_SUPPORT bit indicates
- * whether the INST field in the buffer descriptor is populated by iDMA
- * hardware.
- */
- uint_reg_t inst : 2;
- /**
- * Always set to one by hardware in iDMA packet descriptors. For eDMA,
- * indicates whether the buffer will be released to the buffer stack
- * manager. When 0, software is responsible for releasing the buffer.
- */
- uint_reg_t hwb : 1;
- /**
- * Encoded size of buffer. Set by the ingress hardware for iDMA packet
- * descriptors. For eDMA descriptors, indicates the buffer size if .c
- * indicates a chained packet. If an eDMA descriptor is not chained and
- * the .hwb bit is not set, this field is ignored and the size is
- * specified by the .xfer_size field.
- * 0 = 128 bytes
- * 1 = 256 bytes
- * 2 = 512 bytes
- * 3 = 1024 bytes
- * 4 = 1664 bytes
- * 5 = 4096 bytes
- * 6 = 10368 bytes
- * 7 = 16384 bytes
- */
- uint_reg_t size : 3;
- /**
- * Chaining configuration for the buffer. Indicates that an ingress
- * packet or egress command is chained across multiple buffers, with each
- * buffer's size indicated by the .size field.
- */
- uint_reg_t c : 2;
-#else /* __BIG_ENDIAN__ */
- uint_reg_t c : 2;
- uint_reg_t size : 3;
- uint_reg_t hwb : 1;
- uint_reg_t inst : 2;
- uint_reg_t __reserved_1 : 3;
- uint_reg_t stack_idx : 5;
- uint_reg_t __reserved_0 : 6;
- int_reg_t va : 42;
-#endif
-
- };
-
- /** Word access */
- uint_reg_t words[2];
-} MPIPE_EDMA_DESC_t;
-
-/**
- * MPIPE Packet Descriptor.
- * The packet descriptor is filled by the mPIPE's classification,
- * load-balancing, and buffer management services. Some fields are consumed
- * by mPIPE hardware, and others are consumed by Tile software.
- */
-
-__extension__
-typedef union
-{
- struct
- {
- /* Word 0 */
-
-#ifndef __BIG_ENDIAN__
- /**
- * Notification ring into which this packet descriptor is written.
- * Typically written by load balancer, but can be overridden by
- * classification program if NR is asserted.
- */
- uint_reg_t notif_ring : 8;
- /** Source channel for this packet. Written by mPIPE DMA hardware. */
- uint_reg_t channel : 5;
- /** Reserved. */
- uint_reg_t __reserved_0 : 1;
- /**
- * MAC Error.
- * Generated by the MAC interface. Asserted if there was an overrun of
- * the MAC's receive FIFO. This condition generally only occurs if the
- * mPIPE clock is running too slowly.
- */
- uint_reg_t me : 1;
- /**
- * Truncation Error.
- * Written by the iDMA hardware. Asserted if packet was truncated due to
- * insufficient space in iPkt buffer
- */
- uint_reg_t tr : 1;
- /**
- * Written by the iDMA hardware. Indicates the number of bytes written
- * to Tile memory. In general, this is the actual size of the packet as
- * received from the MAC. But if the packet is truncated due to running
- * out of buffers or due to the iPkt buffer filling up, then the L2_SIZE
- * will be reduced to reflect the actual number of valid bytes written to
- * Tile memory.
- */
- uint_reg_t l2_size : 14;
- /**
- * CRC Error.
- * Generated by the MAC. Asserted if MAC indicated an L2 CRC error or
- * other L2 error (bad length etc.) on the packet.
- */
- uint_reg_t ce : 1;
- /**
- * Cut Through.
- * Written by the iDMA hardware. Asserted if packet was not completely
- * received before being sent to classifier. L2_Size will indicate
- * number of bytes received so far.
- */
- uint_reg_t ct : 1;
- /**
- * Written by the classification program. Used by the load balancer to
- * select the ring into which this packet descriptor is written.
- */
- uint_reg_t bucket_id : 13;
- /** Reserved. */
- uint_reg_t __reserved_1 : 3;
- /**
- * Checksum.
- * Written by classification program. When 1, the checksum engine will
- * perform checksum based on the CSUM_SEED, CSUM_START, and CSUM_BYTES
- * fields. The result will be placed in CSUM_VAL.
- */
- uint_reg_t cs : 1;
- /**
- * Notification Ring Select.
- * Written by the classification program. When 1, the NotifRingIDX is
- * set by classification program rather than being set by load balancer.
- */
- uint_reg_t nr : 1;
- /**
- * Written by classification program. Indicates whether packet and
- * descriptor should both be dropped, both be delivered, or only the
- * descriptor should be delivered.
- */
- uint_reg_t dest : 2;
- /**
- * General Purpose Sequence Number Enable.
- * Written by the classification program. When 1, the GP_SQN_SEL field
- * contains the sequence number selector and the GP_SQN field will be
- * replaced with the associated sequence number. When clear, the GP_SQN
- * field is left intact and be used as "Custom" bytes.
- */
- uint_reg_t sq : 1;
- /**
- * TimeStamp Enable.
- * Enable TimeStamp insertion. When clear, timestamp field may be filled
- * with custom data by classifier. When set, hardware inserts the
- * timestamp when the start of packet is received from the MAC.
- */
- uint_reg_t ts : 1;
- /**
- * Packet Sequence Number Enable.
- * Enable PacketSQN insertion. When clear, PacketSQN field may be filled
- * with custom data by classifier. When set, hardware inserts the packet
- * sequence number when the packet descriptor is written to a
- * notification ring.
- */
- uint_reg_t ps : 1;
- /**
- * Buffer Error.
- * Written by the iDMA hardware. Asserted if iDMA ran out of buffers
- * while writing the packet. Software must still return any buffer
- * descriptors whose C field indicates a valid descriptor was consumed.
- */
- uint_reg_t be : 1;
- /**
- * Written by the classification program. The associated counter is
- * incremented when the packet is sent.
- */
- uint_reg_t ctr0 : 5;
- /** Reserved. */
- uint_reg_t __reserved_2 : 3;
-#else /* __BIG_ENDIAN__ */
- uint_reg_t __reserved_2 : 3;
- uint_reg_t ctr0 : 5;
- uint_reg_t be : 1;
- uint_reg_t ps : 1;
- uint_reg_t ts : 1;
- uint_reg_t sq : 1;
- uint_reg_t dest : 2;
- uint_reg_t nr : 1;
- uint_reg_t cs : 1;
- uint_reg_t __reserved_1 : 3;
- uint_reg_t bucket_id : 13;
- uint_reg_t ct : 1;
- uint_reg_t ce : 1;
- uint_reg_t l2_size : 14;
- uint_reg_t tr : 1;
- uint_reg_t me : 1;
- uint_reg_t __reserved_0 : 1;
- uint_reg_t channel : 5;
- uint_reg_t notif_ring : 8;
-#endif
-
- /* Word 1 */
-
-#ifndef __BIG_ENDIAN__
- /**
- * Written by the classification program. The associated counter is
- * incremented when the packet is sent.
- */
- uint_reg_t ctr1 : 5;
- /** Reserved. */
- uint_reg_t __reserved_3 : 3;
- /**
- * Written by classification program. Indicates the start byte for
- * checksum. Relative to 1st byte received from MAC.
- */
- uint_reg_t csum_start : 8;
- /**
- * Checksum seed written by classification program. Overwritten with
- * resultant checksum if CS bit is asserted. The endianness of the CSUM
- * value bits when viewed by Tile software match the packet byte order.
- * That is, bits[7:0] of the resulting checksum value correspond to
- * earlier (more significant) bytes in the packet. To avoid classifier
- * software from having to byte swap the CSUM_SEED, the iDMA checksum
- * engine byte swaps the classifier's result before seeding the checksum
- * calculation. Thus, the CSUM_START byte of packet data is added to
- * bits[15:8] of the CSUM_SEED field generated by the classifier. This
- * byte swap will be visible to Tile software if the CS bit is clear.
- */
- uint_reg_t csum_seed_val : 16;
- /**
- * Written by the classification program. Not interpreted by mPIPE
- * hardware.
- */
- uint_reg_t custom0 : 32;
-#else /* __BIG_ENDIAN__ */
- uint_reg_t custom0 : 32;
- uint_reg_t csum_seed_val : 16;
- uint_reg_t csum_start : 8;
- uint_reg_t __reserved_3 : 3;
- uint_reg_t ctr1 : 5;
-#endif
-
- /* Word 2 */
-
-#ifndef __BIG_ENDIAN__
- /**
- * Written by the classification program. Not interpreted by mPIPE
- * hardware.
- */
- uint_reg_t custom1 : 64;
-#else /* __BIG_ENDIAN__ */
- uint_reg_t custom1 : 64;
-#endif
-
- /* Word 3 */
-
-#ifndef __BIG_ENDIAN__
- /**
- * Written by the classification program. Not interpreted by mPIPE
- * hardware.
- */
- uint_reg_t custom2 : 64;
-#else /* __BIG_ENDIAN__ */
- uint_reg_t custom2 : 64;
-#endif
-
- /* Word 4 */
-
-#ifndef __BIG_ENDIAN__
- /**
- * Written by the classification program. Not interpreted by mPIPE
- * hardware.
- */
- uint_reg_t custom3 : 64;
-#else /* __BIG_ENDIAN__ */
- uint_reg_t custom3 : 64;
-#endif
-
- /* Word 5 */
-
-#ifndef __BIG_ENDIAN__
- /**
- * Sequence number applied when packet is distributed. Classifier
- * selects which sequence number is to be applied by writing the 13-bit
- * SQN-selector into this field. For devices that support EXT_SQN (as
- * indicated in IDMA_INFO.EXT_SQN_SUPPORT), the GP_SQN can be extended to
- * 32-bits via the IDMA_CTL.EXT_SQN register. In this case the
- * PACKET_SQN will be reduced to 32 bits.
- */
- uint_reg_t gp_sqn : 16;
- /**
- * Written by notification hardware. The packet sequence number is
- * incremented for each packet that wasn't dropped.
- */
- uint_reg_t packet_sqn : 48;
-#else /* __BIG_ENDIAN__ */
- uint_reg_t packet_sqn : 48;
- uint_reg_t gp_sqn : 16;
-#endif
-
- /* Word 6 */
-
-#ifndef __BIG_ENDIAN__
- /**
- * Written by hardware when the start-of-packet is received by the mPIPE
- * from the MAC. This is the nanoseconds part of the packet timestamp.
- */
- uint_reg_t time_stamp_ns : 32;
- /**
- * Written by hardware when the start-of-packet is received by the mPIPE
- * from the MAC. This is the seconds part of the packet timestamp.
- */
- uint_reg_t time_stamp_sec : 32;
-#else /* __BIG_ENDIAN__ */
- uint_reg_t time_stamp_sec : 32;
- uint_reg_t time_stamp_ns : 32;
-#endif
-
- /* Word 7 */
-
-#ifndef __BIG_ENDIAN__
- /** Virtual address. Must be sign extended by consumer. */
- int_reg_t va : 42;
- /** Reserved. */
- uint_reg_t __reserved_4 : 6;
- /** Index of the buffer stack to which this buffer belongs. */
- uint_reg_t stack_idx : 5;
- /** Reserved. */
- uint_reg_t __reserved_5 : 3;
- /**
- * Instance ID. For devices that support automatic buffer return between
- * mPIPE instances, this field indicates the buffer owner. If the INST
- * field does not match the mPIPE's instance number when a packet is
- * egressed, buffers with HWB set will be returned to the other mPIPE
- * instance. Note that not all devices support multi-mPIPE buffer
- * return. The MPIPE_EDMA_INFO.REMOTE_BUFF_RTN_SUPPORT bit indicates
- * whether the INST field in the buffer descriptor is populated by iDMA
- * hardware.
- */
- uint_reg_t inst : 2;
- /**
- * Always set to one by hardware in iDMA packet descriptors. For eDMA,
- * indicates whether the buffer will be released to the buffer stack
- * manager. When 0, software is responsible for releasing the buffer.
- */
- uint_reg_t hwb : 1;
- /**
- * Encoded size of buffer. Set by the ingress hardware for iDMA packet
- * descriptors. For eDMA descriptors, indicates the buffer size if .c
- * indicates a chained packet. If an eDMA descriptor is not chained and
- * the .hwb bit is not set, this field is ignored and the size is
- * specified by the .xfer_size field.
- * 0 = 128 bytes
- * 1 = 256 bytes
- * 2 = 512 bytes
- * 3 = 1024 bytes
- * 4 = 1664 bytes
- * 5 = 4096 bytes
- * 6 = 10368 bytes
- * 7 = 16384 bytes
- */
- uint_reg_t size : 3;
- /**
- * Chaining configuration for the buffer. Indicates that an ingress
- * packet or egress command is chained across multiple buffers, with each
- * buffer's size indicated by the .size field.
- */
- uint_reg_t c : 2;
-#else /* __BIG_ENDIAN__ */
- uint_reg_t c : 2;
- uint_reg_t size : 3;
- uint_reg_t hwb : 1;
- uint_reg_t inst : 2;
- uint_reg_t __reserved_5 : 3;
- uint_reg_t stack_idx : 5;
- uint_reg_t __reserved_4 : 6;
- int_reg_t va : 42;
-#endif
-
- };
-
- /** Word access */
- uint_reg_t words[8];
-} MPIPE_PDESC_t;
-#endif /* !defined(__ASSEMBLER__) */
-
-#endif /* !defined(__ARCH_MPIPE_SHM_H__) */
diff --git a/arch/tile/include/arch/mpipe_shm_def.h b/arch/tile/include/arch/mpipe_shm_def.h
deleted file mode 100644
index 6124d39c8318..000000000000
--- a/arch/tile/include/arch/mpipe_shm_def.h
+++ /dev/null
@@ -1,23 +0,0 @@
-/*
- * Copyright 2012 Tilera Corporation. All Rights Reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation, version 2.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
- * NON INFRINGEMENT. See the GNU General Public License for
- * more details.
- */
-
-/* Machine-generated file; do not edit. */
-
-#ifndef __ARCH_MPIPE_SHM_DEF_H__
-#define __ARCH_MPIPE_SHM_DEF_H__
-#define MPIPE_EDMA_DESC_WORD1__C_VAL_UNCHAINED 0x0
-#define MPIPE_EDMA_DESC_WORD1__C_VAL_CHAINED 0x1
-#define MPIPE_EDMA_DESC_WORD1__C_VAL_NOT_RDY 0x2
-#define MPIPE_EDMA_DESC_WORD1__C_VAL_INVALID 0x3
-#endif /* !defined(__ARCH_MPIPE_SHM_DEF_H__) */
diff --git a/arch/tile/include/arch/spr_def.h b/arch/tile/include/arch/spr_def.h
deleted file mode 100644
index 2de83e7aff3e..000000000000
--- a/arch/tile/include/arch/spr_def.h
+++ /dev/null
@@ -1,109 +0,0 @@
-/*
- * Copyright 2010 Tilera Corporation. All Rights Reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation, version 2.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
- * NON INFRINGEMENT. See the GNU General Public License for
- * more details.
- */
-#ifndef __ARCH_SPR_DEF_H__
-#define __ARCH_SPR_DEF_H__
-
-#include <uapi/arch/spr_def.h>
-
-
-/*
- * In addition to including the proper base SPR definition file, depending
- * on machine architecture, this file defines several macros which allow
- * kernel code to use protection-level dependent SPRs without worrying
- * about which PL it's running at. In these macros, the PL that the SPR
- * or interrupt number applies to is replaced by K.
- */
-
-#if CONFIG_KERNEL_PL != 1 && CONFIG_KERNEL_PL != 2
-#error CONFIG_KERNEL_PL must be 1 or 2
-#endif
-
-/* Concatenate 4 strings. */
-#define __concat4(a, b, c, d) a ## b ## c ## d
-#define _concat4(a, b, c, d) __concat4(a, b, c, d)
-
-#ifdef __tilegx__
-
-/* TILE-Gx dependent, protection-level dependent SPRs. */
-
-#define SPR_INTERRUPT_MASK_K \
- _concat4(SPR_INTERRUPT_MASK_, CONFIG_KERNEL_PL,,)
-#define SPR_INTERRUPT_MASK_SET_K \
- _concat4(SPR_INTERRUPT_MASK_SET_, CONFIG_KERNEL_PL,,)
-#define SPR_INTERRUPT_MASK_RESET_K \
- _concat4(SPR_INTERRUPT_MASK_RESET_, CONFIG_KERNEL_PL,,)
-#define SPR_INTERRUPT_VECTOR_BASE_K \
- _concat4(SPR_INTERRUPT_VECTOR_BASE_, CONFIG_KERNEL_PL,,)
-
-#define SPR_IPI_MASK_K \
- _concat4(SPR_IPI_MASK_, CONFIG_KERNEL_PL,,)
-#define SPR_IPI_MASK_RESET_K \
- _concat4(SPR_IPI_MASK_RESET_, CONFIG_KERNEL_PL,,)
-#define SPR_IPI_MASK_SET_K \
- _concat4(SPR_IPI_MASK_SET_, CONFIG_KERNEL_PL,,)
-#define SPR_IPI_EVENT_K \
- _concat4(SPR_IPI_EVENT_, CONFIG_KERNEL_PL,,)
-#define SPR_IPI_EVENT_RESET_K \
- _concat4(SPR_IPI_EVENT_RESET_, CONFIG_KERNEL_PL,,)
-#define SPR_IPI_EVENT_SET_K \
- _concat4(SPR_IPI_EVENT_SET_, CONFIG_KERNEL_PL,,)
-#define INT_IPI_K \
- _concat4(INT_IPI_, CONFIG_KERNEL_PL,,)
-
-#define SPR_SINGLE_STEP_CONTROL_K \
- _concat4(SPR_SINGLE_STEP_CONTROL_, CONFIG_KERNEL_PL,,)
-#define SPR_SINGLE_STEP_EN_K_K \
- _concat4(SPR_SINGLE_STEP_EN_, CONFIG_KERNEL_PL, _, CONFIG_KERNEL_PL)
-#define INT_SINGLE_STEP_K \
- _concat4(INT_SINGLE_STEP_, CONFIG_KERNEL_PL,,)
-
-#else
-
-/* TILEPro dependent, protection-level dependent SPRs. */
-
-#define SPR_INTERRUPT_MASK_K_0 \
- _concat4(SPR_INTERRUPT_MASK_, CONFIG_KERNEL_PL, _0,)
-#define SPR_INTERRUPT_MASK_K_1 \
- _concat4(SPR_INTERRUPT_MASK_, CONFIG_KERNEL_PL, _1,)
-#define SPR_INTERRUPT_MASK_SET_K_0 \
- _concat4(SPR_INTERRUPT_MASK_SET_, CONFIG_KERNEL_PL, _0,)
-#define SPR_INTERRUPT_MASK_SET_K_1 \
- _concat4(SPR_INTERRUPT_MASK_SET_, CONFIG_KERNEL_PL, _1,)
-#define SPR_INTERRUPT_MASK_RESET_K_0 \
- _concat4(SPR_INTERRUPT_MASK_RESET_, CONFIG_KERNEL_PL, _0,)
-#define SPR_INTERRUPT_MASK_RESET_K_1 \
- _concat4(SPR_INTERRUPT_MASK_RESET_, CONFIG_KERNEL_PL, _1,)
-
-#endif
-
-/* Generic protection-level dependent SPRs. */
-
-#define SPR_SYSTEM_SAVE_K_0 \
- _concat4(SPR_SYSTEM_SAVE_, CONFIG_KERNEL_PL, _0,)
-#define SPR_SYSTEM_SAVE_K_1 \
- _concat4(SPR_SYSTEM_SAVE_, CONFIG_KERNEL_PL, _1,)
-#define SPR_SYSTEM_SAVE_K_2 \
- _concat4(SPR_SYSTEM_SAVE_, CONFIG_KERNEL_PL, _2,)
-#define SPR_SYSTEM_SAVE_K_3 \
- _concat4(SPR_SYSTEM_SAVE_, CONFIG_KERNEL_PL, _3,)
-#define SPR_EX_CONTEXT_K_0 \
- _concat4(SPR_EX_CONTEXT_, CONFIG_KERNEL_PL, _0,)
-#define SPR_EX_CONTEXT_K_1 \
- _concat4(SPR_EX_CONTEXT_, CONFIG_KERNEL_PL, _1,)
-#define SPR_INTCTRL_K_STATUS \
- _concat4(SPR_INTCTRL_, CONFIG_KERNEL_PL, _STATUS,)
-#define INT_INTCTRL_K \
- _concat4(INT_INTCTRL_, CONFIG_KERNEL_PL,,)
-
-#endif /* __ARCH_SPR_DEF_H__ */
diff --git a/arch/tile/include/arch/trio.h b/arch/tile/include/arch/trio.h
deleted file mode 100644
index c0ddedcae085..000000000000
--- a/arch/tile/include/arch/trio.h
+++ /dev/null
@@ -1,111 +0,0 @@
-/*
- * Copyright 2012 Tilera Corporation. All Rights Reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation, version 2.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
- * NON INFRINGEMENT. See the GNU General Public License for
- * more details.
- */
-
-/* Machine-generated file; do not edit. */
-
-#ifndef __ARCH_TRIO_H__
-#define __ARCH_TRIO_H__
-
-#include <arch/abi.h>
-#include <arch/trio_def.h>
-
-#ifndef __ASSEMBLER__
-
-/*
- * Map SQ Doorbell Format.
- * This describes the format of the write-only doorbell register that exists
- * in the last 8-bytes of the MAP_SQ_BASE/LIM range. This register is only
- * writable from PCIe space. Writes to this register will not be written to
- * Tile memory space and thus no IO VA translation is required if the last
- * page of the BASE/LIM range is not otherwise written.
- */
-
-__extension__
-typedef union
-{
- struct
- {
-#ifndef __BIG_ENDIAN__
- /*
- * When written with a 1, the associated MAP_SQ region's doorbell
- * interrupt will be triggered once all previous writes are visible to
- * Tile software.
- */
- uint_reg_t doorbell : 1;
- /*
- * When written with a 1, the descriptor at the head of the associated
- * MAP_SQ's FIFO will be dequeued.
- */
- uint_reg_t pop : 1;
- /* Reserved. */
- uint_reg_t __reserved : 62;
-#else /* __BIG_ENDIAN__ */
- uint_reg_t __reserved : 62;
- uint_reg_t pop : 1;
- uint_reg_t doorbell : 1;
-#endif
- };
-
- uint_reg_t word;
-} TRIO_MAP_SQ_DOORBELL_FMT_t;
-
-
-/*
- * Tile PIO Region Configuration - CFG Address Format.
- * This register describes the address format for PIO accesses when the
- * associated region is setup with TYPE=CFG.
- */
-
-__extension__
-typedef union
-{
- struct
- {
-#ifndef __BIG_ENDIAN__
- /* Register Address (full byte address). */
- uint_reg_t reg_addr : 12;
- /* Function Number */
- uint_reg_t fn : 3;
- /* Device Number */
- uint_reg_t dev : 5;
- /* BUS Number */
- uint_reg_t bus : 8;
- /* Config Type: 0 for access to directly-attached device. 1 otherwise. */
- uint_reg_t type : 1;
- /* Reserved. */
- uint_reg_t __reserved_0 : 1;
- /*
- * MAC select. This must match the configuration in
- * TILE_PIO_REGION_SETUP.MAC.
- */
- uint_reg_t mac : 2;
- /* Reserved. */
- uint_reg_t __reserved_1 : 32;
-#else /* __BIG_ENDIAN__ */
- uint_reg_t __reserved_1 : 32;
- uint_reg_t mac : 2;
- uint_reg_t __reserved_0 : 1;
- uint_reg_t type : 1;
- uint_reg_t bus : 8;
- uint_reg_t dev : 5;
- uint_reg_t fn : 3;
- uint_reg_t reg_addr : 12;
-#endif
- };
-
- uint_reg_t word;
-} TRIO_TILE_PIO_REGION_SETUP_CFG_ADDR_t;
-#endif /* !defined(__ASSEMBLER__) */
-
-#endif /* !defined(__ARCH_TRIO_H__) */
diff --git a/arch/tile/include/arch/trio_constants.h b/arch/tile/include/arch/trio_constants.h
deleted file mode 100644
index 85647e91a458..000000000000
--- a/arch/tile/include/arch/trio_constants.h
+++ /dev/null
@@ -1,36 +0,0 @@
-/*
- * Copyright 2012 Tilera Corporation. All Rights Reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation, version 2.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
- * NON INFRINGEMENT. See the GNU General Public License for
- * more details.
- */
-
-
-#ifndef __ARCH_TRIO_CONSTANTS_H__
-#define __ARCH_TRIO_CONSTANTS_H__
-
-#define TRIO_NUM_ASIDS 32
-#define TRIO_NUM_TLBS_PER_ASID 16
-
-#define TRIO_NUM_TPIO_REGIONS 8
-#define TRIO_LOG2_NUM_TPIO_REGIONS 3
-
-#define TRIO_NUM_MAP_MEM_REGIONS 32
-#define TRIO_LOG2_NUM_MAP_MEM_REGIONS 5
-#define TRIO_NUM_MAP_SQ_REGIONS 8
-#define TRIO_LOG2_NUM_MAP_SQ_REGIONS 3
-
-#define TRIO_LOG2_NUM_SQ_FIFO_ENTRIES 6
-
-#define TRIO_NUM_PUSH_DMA_RINGS 64
-
-#define TRIO_NUM_PULL_DMA_RINGS 64
-
-#endif /* __ARCH_TRIO_CONSTANTS_H__ */
diff --git a/arch/tile/include/arch/trio_def.h b/arch/tile/include/arch/trio_def.h
deleted file mode 100644
index e80500317dc4..000000000000
--- a/arch/tile/include/arch/trio_def.h
+++ /dev/null
@@ -1,41 +0,0 @@
-/*
- * Copyright 2012 Tilera Corporation. All Rights Reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation, version 2.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
- * NON INFRINGEMENT. See the GNU General Public License for
- * more details.
- */
-
-/* Machine-generated file; do not edit. */
-
-#ifndef __ARCH_TRIO_DEF_H__
-#define __ARCH_TRIO_DEF_H__
-#define TRIO_CFG_REGION_ADDR__REG_SHIFT 0
-#define TRIO_CFG_REGION_ADDR__INTFC_SHIFT 16
-#define TRIO_CFG_REGION_ADDR__INTFC_VAL_TRIO 0x0
-#define TRIO_CFG_REGION_ADDR__INTFC_VAL_MAC_INTERFACE 0x1
-#define TRIO_CFG_REGION_ADDR__INTFC_VAL_MAC_STANDARD 0x2
-#define TRIO_CFG_REGION_ADDR__INTFC_VAL_MAC_PROTECTED 0x3
-#define TRIO_CFG_REGION_ADDR__MAC_SEL_SHIFT 18
-#define TRIO_CFG_REGION_ADDR__PROT_SHIFT 20
-#define TRIO_PIO_REGIONS_ADDR__REGION_SHIFT 32
-#define TRIO_MAP_MEM_REG_INT0 0x1000000000
-#define TRIO_MAP_MEM_REG_INT1 0x1000000008
-#define TRIO_MAP_MEM_REG_INT2 0x1000000010
-#define TRIO_MAP_MEM_REG_INT3 0x1000000018
-#define TRIO_MAP_MEM_REG_INT4 0x1000000020
-#define TRIO_MAP_MEM_REG_INT5 0x1000000028
-#define TRIO_MAP_MEM_REG_INT6 0x1000000030
-#define TRIO_MAP_MEM_REG_INT7 0x1000000038
-#define TRIO_MAP_MEM_LIM__ADDR_SHIFT 12
-#define TRIO_MAP_MEM_SETUP__ORDER_MODE_VAL_UNORDERED 0x0
-#define TRIO_MAP_MEM_SETUP__ORDER_MODE_VAL_STRICT 0x1
-#define TRIO_MAP_MEM_SETUP__ORDER_MODE_VAL_REL_ORD 0x2
-#define TRIO_TILE_PIO_REGION_SETUP_CFG_ADDR__MAC_SHIFT 30
-#endif /* !defined(__ARCH_TRIO_DEF_H__) */
diff --git a/arch/tile/include/arch/trio_pcie_intfc.h b/arch/tile/include/arch/trio_pcie_intfc.h
deleted file mode 100644
index 0487fdb9d581..000000000000
--- a/arch/tile/include/arch/trio_pcie_intfc.h
+++ /dev/null
@@ -1,229 +0,0 @@
-/*
- * Copyright 2012 Tilera Corporation. All Rights Reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation, version 2.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
- * NON INFRINGEMENT. See the GNU General Public License for
- * more details.
- */
-
-/* Machine-generated file; do not edit. */
-
-#ifndef __ARCH_TRIO_PCIE_INTFC_H__
-#define __ARCH_TRIO_PCIE_INTFC_H__
-
-#include <arch/abi.h>
-#include <arch/trio_pcie_intfc_def.h>
-
-#ifndef __ASSEMBLER__
-
-/*
- * Port Configuration.
- * Configuration of the PCIe Port
- */
-
-__extension__
-typedef union
-{
- struct
- {
-#ifndef __BIG_ENDIAN__
- /* Provides the state of the strapping pins for this port. */
- uint_reg_t strap_state : 3;
- /* Reserved. */
- uint_reg_t __reserved_0 : 1;
- /*
- * When 1, the device type will be overridden using OVD_DEV_TYPE_VAL.
- * When 0, the device type is determined based on the STRAP_STATE.
- */
- uint_reg_t ovd_dev_type : 1;
- /* Provides the device type when OVD_DEV_TYPE is 1. */
- uint_reg_t ovd_dev_type_val : 4;
- /* Determines how link is trained. */
- uint_reg_t train_mode : 2;
- /* Reserved. */
- uint_reg_t __reserved_1 : 1;
- /*
- * For PCIe, used to flip physical RX lanes that were not properly wired.
- * This is not the same as lane reversal which is handled automatically
- * during link training. When 0, RX Lane0 must be wired to the link
- * partner (either to its Lane0 or it's LaneN). When RX_LANE_FLIP is 1,
- * the highest numbered lane for this port becomes Lane0 and Lane0 does
- * NOT have to be wired to the link partner.
- */
- uint_reg_t rx_lane_flip : 1;
- /*
- * For PCIe, used to flip physical TX lanes that were not properly wired.
- * This is not the same as lane reversal which is handled automatically
- * during link training. When 0, TX Lane0 must be wired to the link
- * partner (either to its Lane0 or it's LaneN). When TX_LANE_FLIP is 1,
- * the highest numbered lane for this port becomes Lane0 and Lane0 does
- * NOT have to be wired to the link partner.
- */
- uint_reg_t tx_lane_flip : 1;
- /*
- * For StreamIO port, configures the width of the port when TRAIN_MODE is
- * not STRAP.
- */
- uint_reg_t stream_width : 2;
- /*
- * For StreamIO port, configures the rate of the port when TRAIN_MODE is
- * not STRAP.
- */
- uint_reg_t stream_rate : 2;
- /* Reserved. */
- uint_reg_t __reserved_2 : 46;
-#else /* __BIG_ENDIAN__ */
- uint_reg_t __reserved_2 : 46;
- uint_reg_t stream_rate : 2;
- uint_reg_t stream_width : 2;
- uint_reg_t tx_lane_flip : 1;
- uint_reg_t rx_lane_flip : 1;
- uint_reg_t __reserved_1 : 1;
- uint_reg_t train_mode : 2;
- uint_reg_t ovd_dev_type_val : 4;
- uint_reg_t ovd_dev_type : 1;
- uint_reg_t __reserved_0 : 1;
- uint_reg_t strap_state : 3;
-#endif
- };
-
- uint_reg_t word;
-} TRIO_PCIE_INTFC_PORT_CONFIG_t;
-
-/*
- * Port Status.
- * Status of the PCIe Port. This register applies to the StreamIO port when
- * StreamIO is enabled.
- */
-
-__extension__
-typedef union
-{
- struct
- {
-#ifndef __BIG_ENDIAN__
- /*
- * Indicates the DL state of the port. When 1, the port is up and ready
- * to receive traffic.
- */
- uint_reg_t dl_up : 1;
- /*
- * Indicates the number of times the link has gone down. Clears on read.
- */
- uint_reg_t dl_down_cnt : 7;
- /* Indicates the SERDES PLL has spun up and is providing a valid clock. */
- uint_reg_t clock_ready : 1;
- /* Reserved. */
- uint_reg_t __reserved_0 : 7;
- /* Device revision ID. */
- uint_reg_t device_rev : 8;
- /* Link state (PCIe). */
- uint_reg_t ltssm_state : 6;
- /* Link power management state (PCIe). */
- uint_reg_t pm_state : 3;
- /* Reserved. */
- uint_reg_t __reserved_1 : 31;
-#else /* __BIG_ENDIAN__ */
- uint_reg_t __reserved_1 : 31;
- uint_reg_t pm_state : 3;
- uint_reg_t ltssm_state : 6;
- uint_reg_t device_rev : 8;
- uint_reg_t __reserved_0 : 7;
- uint_reg_t clock_ready : 1;
- uint_reg_t dl_down_cnt : 7;
- uint_reg_t dl_up : 1;
-#endif
- };
-
- uint_reg_t word;
-} TRIO_PCIE_INTFC_PORT_STATUS_t;
-
-/*
- * Transmit FIFO Control.
- * Contains TX FIFO thresholds. These registers are for diagnostics purposes
- * only. Changing these values causes undefined behavior.
- */
-
-__extension__
-typedef union
-{
- struct
- {
-#ifndef __BIG_ENDIAN__
- /*
- * Almost-Empty level for TX0 data. Typically set to at least
- * roundup(38.0*M/N) where N=tclk frequency and M=MAC symbol rate in MHz
- * for a x4 port (250MHz).
- */
- uint_reg_t tx0_data_ae_lvl : 7;
- /* Reserved. */
- uint_reg_t __reserved_0 : 1;
- /* Almost-Empty level for TX1 data. */
- uint_reg_t tx1_data_ae_lvl : 7;
- /* Reserved. */
- uint_reg_t __reserved_1 : 1;
- /* Almost-Full level for TX0 data. */
- uint_reg_t tx0_data_af_lvl : 7;
- /* Reserved. */
- uint_reg_t __reserved_2 : 1;
- /* Almost-Full level for TX1 data. */
- uint_reg_t tx1_data_af_lvl : 7;
- /* Reserved. */
- uint_reg_t __reserved_3 : 1;
- /* Almost-Full level for TX0 info. */
- uint_reg_t tx0_info_af_lvl : 5;
- /* Reserved. */
- uint_reg_t __reserved_4 : 3;
- /* Almost-Full level for TX1 info. */
- uint_reg_t tx1_info_af_lvl : 5;
- /* Reserved. */
- uint_reg_t __reserved_5 : 3;
- /*
- * This register provides performance adjustment for high bandwidth
- * flows. The MAC will assert almost-full to TRIO if non-posted credits
- * fall below this level. Note that setting this larger than the initial
- * PORT_CREDIT.NPH value will cause READS to never be sent. If the
- * initial credit value from the link partner is smaller than this value
- * when the link comes up, the value will be reset to the initial credit
- * value to prevent lockup.
- */
- uint_reg_t min_np_credits : 8;
- /*
- * This register provides performance adjustment for high bandwidth
- * flows. The MAC will assert almost-full to TRIO if posted credits fall
- * below this level. Note that setting this larger than the initial
- * PORT_CREDIT.PH value will cause WRITES to never be sent. If the
- * initial credit value from the link partner is smaller than this value
- * when the link comes up, the value will be reset to the initial credit
- * value to prevent lockup.
- */
- uint_reg_t min_p_credits : 8;
-#else /* __BIG_ENDIAN__ */
- uint_reg_t min_p_credits : 8;
- uint_reg_t min_np_credits : 8;
- uint_reg_t __reserved_5 : 3;
- uint_reg_t tx1_info_af_lvl : 5;
- uint_reg_t __reserved_4 : 3;
- uint_reg_t tx0_info_af_lvl : 5;
- uint_reg_t __reserved_3 : 1;
- uint_reg_t tx1_data_af_lvl : 7;
- uint_reg_t __reserved_2 : 1;
- uint_reg_t tx0_data_af_lvl : 7;
- uint_reg_t __reserved_1 : 1;
- uint_reg_t tx1_data_ae_lvl : 7;
- uint_reg_t __reserved_0 : 1;
- uint_reg_t tx0_data_ae_lvl : 7;
-#endif
- };
-
- uint_reg_t word;
-} TRIO_PCIE_INTFC_TX_FIFO_CTL_t;
-#endif /* !defined(__ASSEMBLER__) */
-
-#endif /* !defined(__ARCH_TRIO_PCIE_INTFC_H__) */
diff --git a/arch/tile/include/arch/trio_pcie_intfc_def.h b/arch/tile/include/arch/trio_pcie_intfc_def.h
deleted file mode 100644
index d3fd6781fb24..000000000000
--- a/arch/tile/include/arch/trio_pcie_intfc_def.h
+++ /dev/null
@@ -1,32 +0,0 @@
-/*
- * Copyright 2012 Tilera Corporation. All Rights Reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation, version 2.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
- * NON INFRINGEMENT. See the GNU General Public License for
- * more details.
- */
-
-/* Machine-generated file; do not edit. */
-
-#ifndef __ARCH_TRIO_PCIE_INTFC_DEF_H__
-#define __ARCH_TRIO_PCIE_INTFC_DEF_H__
-#define TRIO_PCIE_INTFC_MAC_INT_STS 0x0000
-#define TRIO_PCIE_INTFC_MAC_INT_STS__INT_LEVEL_MASK 0xf000
-#define TRIO_PCIE_INTFC_PORT_CONFIG 0x0018
-#define TRIO_PCIE_INTFC_PORT_CONFIG__STRAP_STATE_VAL_DISABLED 0x0
-#define TRIO_PCIE_INTFC_PORT_CONFIG__STRAP_STATE_VAL_AUTO_CONFIG_ENDPOINT 0x1
-#define TRIO_PCIE_INTFC_PORT_CONFIG__STRAP_STATE_VAL_AUTO_CONFIG_RC 0x2
-#define TRIO_PCIE_INTFC_PORT_CONFIG__STRAP_STATE_VAL_AUTO_CONFIG_ENDPOINT_G1 0x3
-#define TRIO_PCIE_INTFC_PORT_CONFIG__STRAP_STATE_VAL_AUTO_CONFIG_RC_G1 0x4
-#define TRIO_PCIE_INTFC_PORT_CONFIG__STRAP_STATE_VAL_AUTO_XLINK 0x5
-#define TRIO_PCIE_INTFC_PORT_CONFIG__STRAP_STATE_VAL_STREAM_X1 0x6
-#define TRIO_PCIE_INTFC_PORT_CONFIG__STRAP_STATE_VAL_STREAM_X4 0x7
-#define TRIO_PCIE_INTFC_PORT_STATUS 0x0020
-#define TRIO_PCIE_INTFC_TX_FIFO_CTL 0x0050
-#endif /* !defined(__ARCH_TRIO_PCIE_INTFC_DEF_H__) */
diff --git a/arch/tile/include/arch/trio_pcie_rc.h b/arch/tile/include/arch/trio_pcie_rc.h
deleted file mode 100644
index 6a25d0aca857..000000000000
--- a/arch/tile/include/arch/trio_pcie_rc.h
+++ /dev/null
@@ -1,156 +0,0 @@
-/*
- * Copyright 2012 Tilera Corporation. All Rights Reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation, version 2.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
- * NON INFRINGEMENT. See the GNU General Public License for
- * more details.
- */
-
-/* Machine-generated file; do not edit. */
-
-#ifndef __ARCH_TRIO_PCIE_RC_H__
-#define __ARCH_TRIO_PCIE_RC_H__
-
-#include <arch/abi.h>
-#include <arch/trio_pcie_rc_def.h>
-
-#ifndef __ASSEMBLER__
-
-/* Device Capabilities Register. */
-
-__extension__
-typedef union
-{
- struct
- {
-#ifndef __BIG_ENDIAN__
- /*
- * Max_Payload_Size Supported, writablethrough the MAC_STANDARD interface
- */
- uint_reg_t mps_sup : 3;
- /*
- * This field is writable through the MAC_STANDARD interface. However,
- * Phantom Function is not supported. Therefore, the application must
- * not write any value other than 0x0 to this field.
- */
- uint_reg_t phantom_function_supported : 2;
- /* This bit is writable through the MAC_STANDARD interface. */
- uint_reg_t ext_tag_field_supported : 1;
- /* Reserved. */
- uint_reg_t __reserved_0 : 3;
- /* Endpoint L1 Acceptable Latency Must be 0x0 for non-Endpoint devices. */
- uint_reg_t l1_lat : 3;
- /*
- * Undefined since PCI Express 1.1 (Was Attention Button Present for PCI
- * Express 1.0a)
- */
- uint_reg_t r1 : 1;
- /*
- * Undefined since PCI Express 1.1 (Was Attention Indicator Present for
- * PCI Express 1.0a)
- */
- uint_reg_t r2 : 1;
- /*
- * Undefined since PCI Express 1.1 (Was Power Indicator Present for PCI
- * Express 1.0a)
- */
- uint_reg_t r3 : 1;
- /*
- * Role-Based Error Reporting, writable through the MAC_STANDARD
- * interface. Required to be set for device compliant to 1.1 spec and
- * later.
- */
- uint_reg_t rer : 1;
- /* Reserved. */
- uint_reg_t __reserved_1 : 2;
- /* Captured Slot Power Limit Value Upstream port only. */
- uint_reg_t slot_pwr_lim : 8;
- /* Captured Slot Power Limit Scale Upstream port only. */
- uint_reg_t slot_pwr_scale : 2;
- /* Reserved. */
- uint_reg_t __reserved_2 : 4;
- /* Endpoint L0s Acceptable LatencyMust be 0x0 for non-Endpoint devices. */
- uint_reg_t l0s_lat : 1;
- /* Reserved. */
- uint_reg_t __reserved_3 : 31;
-#else /* __BIG_ENDIAN__ */
- uint_reg_t __reserved_3 : 31;
- uint_reg_t l0s_lat : 1;
- uint_reg_t __reserved_2 : 4;
- uint_reg_t slot_pwr_scale : 2;
- uint_reg_t slot_pwr_lim : 8;
- uint_reg_t __reserved_1 : 2;
- uint_reg_t rer : 1;
- uint_reg_t r3 : 1;
- uint_reg_t r2 : 1;
- uint_reg_t r1 : 1;
- uint_reg_t l1_lat : 3;
- uint_reg_t __reserved_0 : 3;
- uint_reg_t ext_tag_field_supported : 1;
- uint_reg_t phantom_function_supported : 2;
- uint_reg_t mps_sup : 3;
-#endif
- };
-
- uint_reg_t word;
-} TRIO_PCIE_RC_DEVICE_CAP_t;
-
-/* Device Control Register. */
-
-__extension__
-typedef union
-{
- struct
- {
-#ifndef __BIG_ENDIAN__
- /* Correctable Error Reporting Enable */
- uint_reg_t cor_err_ena : 1;
- /* Non-Fatal Error Reporting Enable */
- uint_reg_t nf_err_ena : 1;
- /* Fatal Error Reporting Enable */
- uint_reg_t fatal_err_ena : 1;
- /* Unsupported Request Reporting Enable */
- uint_reg_t ur_ena : 1;
- /* Relaxed orderring enable */
- uint_reg_t ro_ena : 1;
- /* Max Payload Size */
- uint_reg_t max_payload_size : 3;
- /* Extended Tag Field Enable */
- uint_reg_t ext_tag : 1;
- /* Phantom Function Enable */
- uint_reg_t ph_fn_ena : 1;
- /* AUX Power PM Enable */
- uint_reg_t aux_pm_ena : 1;
- /* Enable NoSnoop */
- uint_reg_t no_snoop : 1;
- /* Max read request size */
- uint_reg_t max_read_req_sz : 3;
- /* Reserved. */
- uint_reg_t __reserved : 49;
-#else /* __BIG_ENDIAN__ */
- uint_reg_t __reserved : 49;
- uint_reg_t max_read_req_sz : 3;
- uint_reg_t no_snoop : 1;
- uint_reg_t aux_pm_ena : 1;
- uint_reg_t ph_fn_ena : 1;
- uint_reg_t ext_tag : 1;
- uint_reg_t max_payload_size : 3;
- uint_reg_t ro_ena : 1;
- uint_reg_t ur_ena : 1;
- uint_reg_t fatal_err_ena : 1;
- uint_reg_t nf_err_ena : 1;
- uint_reg_t cor_err_ena : 1;
-#endif
- };
-
- uint_reg_t word;
-} TRIO_PCIE_RC_DEVICE_CONTROL_t;
-#endif /* !defined(__ASSEMBLER__) */
-
-#endif /* !defined(__ARCH_TRIO_PCIE_RC_H__) */
diff --git a/arch/tile/include/arch/trio_pcie_rc_def.h b/arch/tile/include/arch/trio_pcie_rc_def.h
deleted file mode 100644
index 74081a65b6f2..000000000000
--- a/arch/tile/include/arch/trio_pcie_rc_def.h
+++ /dev/null
@@ -1,24 +0,0 @@
-/*
- * Copyright 2012 Tilera Corporation. All Rights Reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation, version 2.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
- * NON INFRINGEMENT. See the GNU General Public License for
- * more details.
- */
-
-/* Machine-generated file; do not edit. */
-
-#ifndef __ARCH_TRIO_PCIE_RC_DEF_H__
-#define __ARCH_TRIO_PCIE_RC_DEF_H__
-#define TRIO_PCIE_RC_DEVICE_CAP 0x0074
-#define TRIO_PCIE_RC_DEVICE_CONTROL 0x0078
-#define TRIO_PCIE_RC_DEVICE_ID_VEN_ID 0x0000
-#define TRIO_PCIE_RC_DEVICE_ID_VEN_ID__DEV_ID_SHIFT 16
-#define TRIO_PCIE_RC_REVISION_ID 0x0008
-#endif /* !defined(__ARCH_TRIO_PCIE_RC_DEF_H__) */
diff --git a/arch/tile/include/arch/trio_shm.h b/arch/tile/include/arch/trio_shm.h
deleted file mode 100644
index 3382e38245af..000000000000
--- a/arch/tile/include/arch/trio_shm.h
+++ /dev/null
@@ -1,125 +0,0 @@
-/*
- * Copyright 2012 Tilera Corporation. All Rights Reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation, version 2.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
- * NON INFRINGEMENT. See the GNU General Public License for
- * more details.
- */
-
-/* Machine-generated file; do not edit. */
-
-
-#ifndef __ARCH_TRIO_SHM_H__
-#define __ARCH_TRIO_SHM_H__
-
-#include <arch/abi.h>
-#include <arch/trio_shm_def.h>
-
-#ifndef __ASSEMBLER__
-/**
- * TRIO DMA Descriptor.
- * The TRIO DMA descriptor is written by software and consumed by hardware.
- * It is used to specify the location of transaction data in the IO and Tile
- * domains.
- */
-
-__extension__
-typedef union
-{
- struct
- {
- /* Word 0 */
-
-#ifndef __BIG_ENDIAN__
- /** Tile side virtual address. */
- int_reg_t va : 42;
- /**
- * Encoded size of buffer used on push DMA when C=1:
- * 0 = 128 bytes
- * 1 = 256 bytes
- * 2 = 512 bytes
- * 3 = 1024 bytes
- * 4 = 1664 bytes
- * 5 = 4096 bytes
- * 6 = 10368 bytes
- * 7 = 16384 bytes
- */
- uint_reg_t bsz : 3;
- /**
- * Chaining designation. Always zero for pull DMA
- * 0 : Unchained buffer pointer
- * 1 : Chained buffer pointer. Next buffer descriptor (e.g. VA) stored
- * in 1st 8-bytes in buffer. For chained buffers, first 8-bytes of each
- * buffer contain the next buffer descriptor formatted exactly like a PDE
- * buffer descriptor. This allows a chained PDE buffer to be sent using
- * push DMA.
- */
- uint_reg_t c : 1;
- /**
- * Notification interrupt will be delivered when the transaction has
- * completed (all data has been read from or written to the Tile-side
- * buffer).
- */
- uint_reg_t notif : 1;
- /**
- * When 0, the XSIZE field specifies the total byte count for the
- * transaction. When 1, the XSIZE field is encoded as 2^(N+14) for N in
- * {0..6}:
- * 0 = 16KB
- * 1 = 32KB
- * 2 = 64KB
- * 3 = 128KB
- * 4 = 256KB
- * 5 = 512KB
- * 6 = 1MB
- * All other encodings of the XSIZE field are reserved when SMOD=1
- */
- uint_reg_t smod : 1;
- /**
- * Total number of bytes to move for this transaction. When SMOD=1,
- * this field is encoded - see SMOD description.
- */
- uint_reg_t xsize : 14;
- /** Reserved. */
- uint_reg_t __reserved_0 : 1;
- /**
- * Generation number. Used to indicate a valid descriptor in ring. When
- * a new descriptor is written into the ring, software must toggle this
- * bit. The net effect is that the GEN bit being written into new
- * descriptors toggles each time the ring tail pointer wraps.
- */
- uint_reg_t gen : 1;
-#else /* __BIG_ENDIAN__ */
- uint_reg_t gen : 1;
- uint_reg_t __reserved_0 : 1;
- uint_reg_t xsize : 14;
- uint_reg_t smod : 1;
- uint_reg_t notif : 1;
- uint_reg_t c : 1;
- uint_reg_t bsz : 3;
- int_reg_t va : 42;
-#endif
-
- /* Word 1 */
-
-#ifndef __BIG_ENDIAN__
- /** IO-side address */
- uint_reg_t io_address : 64;
-#else /* __BIG_ENDIAN__ */
- uint_reg_t io_address : 64;
-#endif
-
- };
-
- /** Word access */
- uint_reg_t words[2];
-} TRIO_DMA_DESC_t;
-#endif /* !defined(__ASSEMBLER__) */
-
-#endif /* !defined(__ARCH_TRIO_SHM_H__) */
diff --git a/arch/tile/include/arch/trio_shm_def.h b/arch/tile/include/arch/trio_shm_def.h
deleted file mode 100644
index 72a59c88b06a..000000000000
--- a/arch/tile/include/arch/trio_shm_def.h
+++ /dev/null
@@ -1,19 +0,0 @@
-/*
- * Copyright 2012 Tilera Corporation. All Rights Reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation, version 2.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
- * NON INFRINGEMENT. See the GNU General Public License for
- * more details.
- */
-
-/* Machine-generated file; do not edit. */
-
-#ifndef __ARCH_TRIO_SHM_DEF_H__
-#define __ARCH_TRIO_SHM_DEF_H__
-#endif /* !defined(__ARCH_TRIO_SHM_DEF_H__) */
diff --git a/arch/tile/include/arch/uart.h b/arch/tile/include/arch/uart.h
deleted file mode 100644
index 07966970adad..000000000000
--- a/arch/tile/include/arch/uart.h
+++ /dev/null
@@ -1,300 +0,0 @@
-/*
- * Copyright 2013 Tilera Corporation. All Rights Reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation, version 2.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
- * NON INFRINGEMENT. See the GNU General Public License for
- * more details.
- */
-
-/* Machine-generated file; do not edit. */
-
-#ifndef __ARCH_UART_H__
-#define __ARCH_UART_H__
-
-#include <arch/abi.h>
-#include <arch/uart_def.h>
-
-#ifndef __ASSEMBLER__
-
-/* Divisor. */
-
-__extension__
-typedef union
-{
- struct
- {
-#ifndef __BIG_ENDIAN__
- /*
- * Baud Rate Divisor. Desired_baud_rate = REF_CLK frequency / (baud *
- * 16).
- * Note: REF_CLK is always 125 MHz, the default
- * divisor = 68, baud rate = 125M/(68*16) = 115200 baud.
- */
- uint_reg_t divisor : 12;
- /* Reserved. */
- uint_reg_t __reserved : 52;
-#else /* __BIG_ENDIAN__ */
- uint_reg_t __reserved : 52;
- uint_reg_t divisor : 12;
-#endif
- };
-
- uint_reg_t word;
-} UART_DIVISOR_t;
-
-/* FIFO Count. */
-
-__extension__
-typedef union
-{
- struct
- {
-#ifndef __BIG_ENDIAN__
- /*
- * n: n active entries in the receive FIFO (max is 2**8). Each entry has
- * 8 bits.
- * 0: no active entry in the receive FIFO (that is empty).
- */
- uint_reg_t rfifo_count : 9;
- /* Reserved. */
- uint_reg_t __reserved_0 : 7;
- /*
- * n: n active entries in the transmit FIFO (max is 2**8). Each entry has
- * 8 bits.
- * 0: no active entry in the transmit FIFO (that is empty).
- */
- uint_reg_t tfifo_count : 9;
- /* Reserved. */
- uint_reg_t __reserved_1 : 7;
- /*
- * n: n active entries in the write FIFO (max is 2**2). Each entry has 8
- * bits.
- * 0: no active entry in the write FIFO (that is empty).
- */
- uint_reg_t wfifo_count : 3;
- /* Reserved. */
- uint_reg_t __reserved_2 : 29;
-#else /* __BIG_ENDIAN__ */
- uint_reg_t __reserved_2 : 29;
- uint_reg_t wfifo_count : 3;
- uint_reg_t __reserved_1 : 7;
- uint_reg_t tfifo_count : 9;
- uint_reg_t __reserved_0 : 7;
- uint_reg_t rfifo_count : 9;
-#endif
- };
-
- uint_reg_t word;
-} UART_FIFO_COUNT_t;
-
-/* FLAG. */
-
-__extension__
-typedef union
-{
- struct
- {
-#ifndef __BIG_ENDIAN__
- /* Reserved. */
- uint_reg_t __reserved_0 : 1;
- /* 1: receive FIFO is empty */
- uint_reg_t rfifo_empty : 1;
- /* 1: write FIFO is empty. */
- uint_reg_t wfifo_empty : 1;
- /* 1: transmit FIFO is empty. */
- uint_reg_t tfifo_empty : 1;
- /* 1: receive FIFO is full. */
- uint_reg_t rfifo_full : 1;
- /* 1: write FIFO is full. */
- uint_reg_t wfifo_full : 1;
- /* 1: transmit FIFO is full. */
- uint_reg_t tfifo_full : 1;
- /* Reserved. */
- uint_reg_t __reserved_1 : 57;
-#else /* __BIG_ENDIAN__ */
- uint_reg_t __reserved_1 : 57;
- uint_reg_t tfifo_full : 1;
- uint_reg_t wfifo_full : 1;
- uint_reg_t rfifo_full : 1;
- uint_reg_t tfifo_empty : 1;
- uint_reg_t wfifo_empty : 1;
- uint_reg_t rfifo_empty : 1;
- uint_reg_t __reserved_0 : 1;
-#endif
- };
-
- uint_reg_t word;
-} UART_FLAG_t;
-
-/*
- * Interrupt Vector Mask.
- * Each bit in this register corresponds to a specific interrupt. When set,
- * the associated interrupt will not be dispatched.
- */
-
-__extension__
-typedef union
-{
- struct
- {
-#ifndef __BIG_ENDIAN__
- /* Read data FIFO read and no data available */
- uint_reg_t rdat_err : 1;
- /* Write FIFO was written but it was full */
- uint_reg_t wdat_err : 1;
- /* Stop bit not found when current data was received */
- uint_reg_t frame_err : 1;
- /* Parity error was detected when current data was received */
- uint_reg_t parity_err : 1;
- /* Data was received but the receive FIFO was full */
- uint_reg_t rfifo_overflow : 1;
- /*
- * An almost full event is reached when data is to be written to the
- * receive FIFO, and the receive FIFO has more than or equal to
- * BUFFER_THRESHOLD.RFIFO_AFULL bytes.
- */
- uint_reg_t rfifo_afull : 1;
- /* Reserved. */
- uint_reg_t __reserved_0 : 1;
- /* An entry in the transmit FIFO was popped */
- uint_reg_t tfifo_re : 1;
- /* An entry has been pushed into the receive FIFO */
- uint_reg_t rfifo_we : 1;
- /* An entry of the write FIFO has been popped */
- uint_reg_t wfifo_re : 1;
- /* Rshim read receive FIFO in protocol mode */
- uint_reg_t rfifo_err : 1;
- /*
- * An almost empty event is reached when data is to be read from the
- * transmit FIFO, and the transmit FIFO has less than or equal to
- * BUFFER_THRESHOLD.TFIFO_AEMPTY bytes.
- */
- uint_reg_t tfifo_aempty : 1;
- /* Reserved. */
- uint_reg_t __reserved_1 : 52;
-#else /* __BIG_ENDIAN__ */
- uint_reg_t __reserved_1 : 52;
- uint_reg_t tfifo_aempty : 1;
- uint_reg_t rfifo_err : 1;
- uint_reg_t wfifo_re : 1;
- uint_reg_t rfifo_we : 1;
- uint_reg_t tfifo_re : 1;
- uint_reg_t __reserved_0 : 1;
- uint_reg_t rfifo_afull : 1;
- uint_reg_t rfifo_overflow : 1;
- uint_reg_t parity_err : 1;
- uint_reg_t frame_err : 1;
- uint_reg_t wdat_err : 1;
- uint_reg_t rdat_err : 1;
-#endif
- };
-
- uint_reg_t word;
-} UART_INTERRUPT_MASK_t;
-
-/*
- * Interrupt vector, write-one-to-clear.
- * Each bit in this register corresponds to a specific interrupt. Hardware
- * sets the bit when the associated condition has occurred. Writing a 1
- * clears the status bit.
- */
-
-__extension__
-typedef union
-{
- struct
- {
-#ifndef __BIG_ENDIAN__
- /* Read data FIFO read and no data available */
- uint_reg_t rdat_err : 1;
- /* Write FIFO was written but it was full */
- uint_reg_t wdat_err : 1;
- /* Stop bit not found when current data was received */
- uint_reg_t frame_err : 1;
- /* Parity error was detected when current data was received */
- uint_reg_t parity_err : 1;
- /* Data was received but the receive FIFO was full */
- uint_reg_t rfifo_overflow : 1;
- /*
- * Data was received and the receive FIFO is now almost full (more than
- * BUFFER_THRESHOLD.RFIFO_AFULL bytes in it)
- */
- uint_reg_t rfifo_afull : 1;
- /* Reserved. */
- uint_reg_t __reserved_0 : 1;
- /* An entry in the transmit FIFO was popped */
- uint_reg_t tfifo_re : 1;
- /* An entry has been pushed into the receive FIFO */
- uint_reg_t rfifo_we : 1;
- /* An entry of the write FIFO has been popped */
- uint_reg_t wfifo_re : 1;
- /* Rshim read receive FIFO in protocol mode */
- uint_reg_t rfifo_err : 1;
- /*
- * Data was read from the transmit FIFO and now it is almost empty (less
- * than or equal to BUFFER_THRESHOLD.TFIFO_AEMPTY bytes in it).
- */
- uint_reg_t tfifo_aempty : 1;
- /* Reserved. */
- uint_reg_t __reserved_1 : 52;
-#else /* __BIG_ENDIAN__ */
- uint_reg_t __reserved_1 : 52;
- uint_reg_t tfifo_aempty : 1;
- uint_reg_t rfifo_err : 1;
- uint_reg_t wfifo_re : 1;
- uint_reg_t rfifo_we : 1;
- uint_reg_t tfifo_re : 1;
- uint_reg_t __reserved_0 : 1;
- uint_reg_t rfifo_afull : 1;
- uint_reg_t rfifo_overflow : 1;
- uint_reg_t parity_err : 1;
- uint_reg_t frame_err : 1;
- uint_reg_t wdat_err : 1;
- uint_reg_t rdat_err : 1;
-#endif
- };
-
- uint_reg_t word;
-} UART_INTERRUPT_STATUS_t;
-
-/* Type. */
-
-__extension__
-typedef union
-{
- struct
- {
-#ifndef __BIG_ENDIAN__
- /* Number of stop bits, rx and tx */
- uint_reg_t sbits : 1;
- /* Reserved. */
- uint_reg_t __reserved_0 : 1;
- /* Data word size, rx and tx */
- uint_reg_t dbits : 1;
- /* Reserved. */
- uint_reg_t __reserved_1 : 1;
- /* Parity selection, rx and tx */
- uint_reg_t ptype : 3;
- /* Reserved. */
- uint_reg_t __reserved_2 : 57;
-#else /* __BIG_ENDIAN__ */
- uint_reg_t __reserved_2 : 57;
- uint_reg_t ptype : 3;
- uint_reg_t __reserved_1 : 1;
- uint_reg_t dbits : 1;
- uint_reg_t __reserved_0 : 1;
- uint_reg_t sbits : 1;
-#endif
- };
-
- uint_reg_t word;
-} UART_TYPE_t;
-#endif /* !defined(__ASSEMBLER__) */
-
-#endif /* !defined(__ARCH_UART_H__) */
diff --git a/arch/tile/include/arch/uart_def.h b/arch/tile/include/arch/uart_def.h
deleted file mode 100644
index 42bcaf535379..000000000000
--- a/arch/tile/include/arch/uart_def.h
+++ /dev/null
@@ -1,120 +0,0 @@
-/*
- * Copyright 2013 Tilera Corporation. All Rights Reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation, version 2.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
- * NON INFRINGEMENT. See the GNU General Public License for
- * more details.
- */
-
-/* Machine-generated file; do not edit. */
-
-#ifndef __ARCH_UART_DEF_H__
-#define __ARCH_UART_DEF_H__
-#define UART_DIVISOR 0x0158
-#define UART_FIFO_COUNT 0x0110
-#define UART_FLAG 0x0108
-#define UART_INTERRUPT_MASK 0x0208
-#define UART_INTERRUPT_MASK__RDAT_ERR_SHIFT 0
-#define UART_INTERRUPT_MASK__RDAT_ERR_WIDTH 1
-#define UART_INTERRUPT_MASK__RDAT_ERR_RESET_VAL 1
-#define UART_INTERRUPT_MASK__RDAT_ERR_RMASK 0x1
-#define UART_INTERRUPT_MASK__RDAT_ERR_MASK 0x1
-#define UART_INTERRUPT_MASK__RDAT_ERR_FIELD 0,0
-#define UART_INTERRUPT_MASK__WDAT_ERR_SHIFT 1
-#define UART_INTERRUPT_MASK__WDAT_ERR_WIDTH 1
-#define UART_INTERRUPT_MASK__WDAT_ERR_RESET_VAL 1
-#define UART_INTERRUPT_MASK__WDAT_ERR_RMASK 0x1
-#define UART_INTERRUPT_MASK__WDAT_ERR_MASK 0x2
-#define UART_INTERRUPT_MASK__WDAT_ERR_FIELD 1,1
-#define UART_INTERRUPT_MASK__FRAME_ERR_SHIFT 2
-#define UART_INTERRUPT_MASK__FRAME_ERR_WIDTH 1
-#define UART_INTERRUPT_MASK__FRAME_ERR_RESET_VAL 1
-#define UART_INTERRUPT_MASK__FRAME_ERR_RMASK 0x1
-#define UART_INTERRUPT_MASK__FRAME_ERR_MASK 0x4
-#define UART_INTERRUPT_MASK__FRAME_ERR_FIELD 2,2
-#define UART_INTERRUPT_MASK__PARITY_ERR_SHIFT 3
-#define UART_INTERRUPT_MASK__PARITY_ERR_WIDTH 1
-#define UART_INTERRUPT_MASK__PARITY_ERR_RESET_VAL 1
-#define UART_INTERRUPT_MASK__PARITY_ERR_RMASK 0x1
-#define UART_INTERRUPT_MASK__PARITY_ERR_MASK 0x8
-#define UART_INTERRUPT_MASK__PARITY_ERR_FIELD 3,3
-#define UART_INTERRUPT_MASK__RFIFO_OVERFLOW_SHIFT 4
-#define UART_INTERRUPT_MASK__RFIFO_OVERFLOW_WIDTH 1
-#define UART_INTERRUPT_MASK__RFIFO_OVERFLOW_RESET_VAL 1
-#define UART_INTERRUPT_MASK__RFIFO_OVERFLOW_RMASK 0x1
-#define UART_INTERRUPT_MASK__RFIFO_OVERFLOW_MASK 0x10
-#define UART_INTERRUPT_MASK__RFIFO_OVERFLOW_FIELD 4,4
-#define UART_INTERRUPT_MASK__RFIFO_AFULL_SHIFT 5
-#define UART_INTERRUPT_MASK__RFIFO_AFULL_WIDTH 1
-#define UART_INTERRUPT_MASK__RFIFO_AFULL_RESET_VAL 1
-#define UART_INTERRUPT_MASK__RFIFO_AFULL_RMASK 0x1
-#define UART_INTERRUPT_MASK__RFIFO_AFULL_MASK 0x20
-#define UART_INTERRUPT_MASK__RFIFO_AFULL_FIELD 5,5
-#define UART_INTERRUPT_MASK__TFIFO_RE_SHIFT 7
-#define UART_INTERRUPT_MASK__TFIFO_RE_WIDTH 1
-#define UART_INTERRUPT_MASK__TFIFO_RE_RESET_VAL 1
-#define UART_INTERRUPT_MASK__TFIFO_RE_RMASK 0x1
-#define UART_INTERRUPT_MASK__TFIFO_RE_MASK 0x80
-#define UART_INTERRUPT_MASK__TFIFO_RE_FIELD 7,7
-#define UART_INTERRUPT_MASK__RFIFO_WE_SHIFT 8
-#define UART_INTERRUPT_MASK__RFIFO_WE_WIDTH 1
-#define UART_INTERRUPT_MASK__RFIFO_WE_RESET_VAL 1
-#define UART_INTERRUPT_MASK__RFIFO_WE_RMASK 0x1
-#define UART_INTERRUPT_MASK__RFIFO_WE_MASK 0x100
-#define UART_INTERRUPT_MASK__RFIFO_WE_FIELD 8,8
-#define UART_INTERRUPT_MASK__WFIFO_RE_SHIFT 9
-#define UART_INTERRUPT_MASK__WFIFO_RE_WIDTH 1
-#define UART_INTERRUPT_MASK__WFIFO_RE_RESET_VAL 1
-#define UART_INTERRUPT_MASK__WFIFO_RE_RMASK 0x1
-#define UART_INTERRUPT_MASK__WFIFO_RE_MASK 0x200
-#define UART_INTERRUPT_MASK__WFIFO_RE_FIELD 9,9
-#define UART_INTERRUPT_MASK__RFIFO_ERR_SHIFT 10
-#define UART_INTERRUPT_MASK__RFIFO_ERR_WIDTH 1
-#define UART_INTERRUPT_MASK__RFIFO_ERR_RESET_VAL 1
-#define UART_INTERRUPT_MASK__RFIFO_ERR_RMASK 0x1
-#define UART_INTERRUPT_MASK__RFIFO_ERR_MASK 0x400
-#define UART_INTERRUPT_MASK__RFIFO_ERR_FIELD 10,10
-#define UART_INTERRUPT_MASK__TFIFO_AEMPTY_SHIFT 11
-#define UART_INTERRUPT_MASK__TFIFO_AEMPTY_WIDTH 1
-#define UART_INTERRUPT_MASK__TFIFO_AEMPTY_RESET_VAL 1
-#define UART_INTERRUPT_MASK__TFIFO_AEMPTY_RMASK 0x1
-#define UART_INTERRUPT_MASK__TFIFO_AEMPTY_MASK 0x800
-#define UART_INTERRUPT_MASK__TFIFO_AEMPTY_FIELD 11,11
-#define UART_INTERRUPT_STATUS 0x0200
-#define UART_RECEIVE_DATA 0x0148
-#define UART_TRANSMIT_DATA 0x0140
-#define UART_TYPE 0x0160
-#define UART_TYPE__SBITS_SHIFT 0
-#define UART_TYPE__SBITS_WIDTH 1
-#define UART_TYPE__SBITS_RESET_VAL 1
-#define UART_TYPE__SBITS_RMASK 0x1
-#define UART_TYPE__SBITS_MASK 0x1
-#define UART_TYPE__SBITS_FIELD 0,0
-#define UART_TYPE__SBITS_VAL_ONE_SBITS 0x0
-#define UART_TYPE__SBITS_VAL_TWO_SBITS 0x1
-#define UART_TYPE__DBITS_SHIFT 2
-#define UART_TYPE__DBITS_WIDTH 1
-#define UART_TYPE__DBITS_RESET_VAL 0
-#define UART_TYPE__DBITS_RMASK 0x1
-#define UART_TYPE__DBITS_MASK 0x4
-#define UART_TYPE__DBITS_FIELD 2,2
-#define UART_TYPE__DBITS_VAL_EIGHT_DBITS 0x0
-#define UART_TYPE__DBITS_VAL_SEVEN_DBITS 0x1
-#define UART_TYPE__PTYPE_SHIFT 4
-#define UART_TYPE__PTYPE_WIDTH 3
-#define UART_TYPE__PTYPE_RESET_VAL 3
-#define UART_TYPE__PTYPE_RMASK 0x7
-#define UART_TYPE__PTYPE_MASK 0x70
-#define UART_TYPE__PTYPE_FIELD 4,6
-#define UART_TYPE__PTYPE_VAL_NONE 0x0
-#define UART_TYPE__PTYPE_VAL_MARK 0x1
-#define UART_TYPE__PTYPE_VAL_SPACE 0x2
-#define UART_TYPE__PTYPE_VAL_EVEN 0x3
-#define UART_TYPE__PTYPE_VAL_ODD 0x4
-#endif /* !defined(__ARCH_UART_DEF_H__) */
diff --git a/arch/tile/include/arch/usb_host.h b/arch/tile/include/arch/usb_host.h
deleted file mode 100644
index d09f32683962..000000000000
--- a/arch/tile/include/arch/usb_host.h
+++ /dev/null
@@ -1,26 +0,0 @@
-/*
- * Copyright 2012 Tilera Corporation. All Rights Reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation, version 2.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
- * NON INFRINGEMENT. See the GNU General Public License for
- * more details.
- */
-
-/* Machine-generated file; do not edit. */
-
-#ifndef __ARCH_USB_HOST_H__
-#define __ARCH_USB_HOST_H__
-
-#include <arch/abi.h>
-#include <arch/usb_host_def.h>
-
-#ifndef __ASSEMBLER__
-#endif /* !defined(__ASSEMBLER__) */
-
-#endif /* !defined(__ARCH_USB_HOST_H__) */
diff --git a/arch/tile/include/arch/usb_host_def.h b/arch/tile/include/arch/usb_host_def.h
deleted file mode 100644
index aeed7753e8e1..000000000000
--- a/arch/tile/include/arch/usb_host_def.h
+++ /dev/null
@@ -1,19 +0,0 @@
-/*
- * Copyright 2012 Tilera Corporation. All Rights Reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation, version 2.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
- * NON INFRINGEMENT. See the GNU General Public License for
- * more details.
- */
-
-/* Machine-generated file; do not edit. */
-
-#ifndef __ARCH_USB_HOST_DEF_H__
-#define __ARCH_USB_HOST_DEF_H__
-#endif /* !defined(__ARCH_USB_HOST_DEF_H__) */
diff --git a/arch/tile/include/asm/Kbuild b/arch/tile/include/asm/Kbuild
deleted file mode 100644
index 414dfc3a1808..000000000000
--- a/arch/tile/include/asm/Kbuild
+++ /dev/null
@@ -1,18 +0,0 @@
-generic-y += bug.h
-generic-y += bugs.h
-generic-y += emergency-restart.h
-generic-y += exec.h
-generic-y += extable.h
-generic-y += fb.h
-generic-y += hw_irq.h
-generic-y += irq_regs.h
-generic-y += local.h
-generic-y += local64.h
-generic-y += mcs_spinlock.h
-generic-y += mm-arch-hooks.h
-generic-y += parport.h
-generic-y += preempt.h
-generic-y += seccomp.h
-generic-y += serial.h
-generic-y += trace_clock.h
-generic-y += xor.h
diff --git a/arch/tile/include/asm/asm-offsets.h b/arch/tile/include/asm/asm-offsets.h
deleted file mode 100644
index d370ee36a182..000000000000
--- a/arch/tile/include/asm/asm-offsets.h
+++ /dev/null
@@ -1 +0,0 @@
-#include <generated/asm-offsets.h>
diff --git a/arch/tile/include/asm/atomic.h b/arch/tile/include/asm/atomic.h
deleted file mode 100644
index 8dda3c8ff5ab..000000000000
--- a/arch/tile/include/asm/atomic.h
+++ /dev/null
@@ -1,210 +0,0 @@
-/*
- * Copyright 2010 Tilera Corporation. All Rights Reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation, version 2.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
- * NON INFRINGEMENT. See the GNU General Public License for
- * more details.
- *
- * Atomic primitives.
- */
-
-#ifndef _ASM_TILE_ATOMIC_H
-#define _ASM_TILE_ATOMIC_H
-
-#include <asm/cmpxchg.h>
-
-#ifndef __ASSEMBLY__
-
-#include <linux/compiler.h>
-#include <linux/types.h>
-
-#define ATOMIC_INIT(i) { (i) }
-
-/**
- * atomic_read - read atomic variable
- * @v: pointer of type atomic_t
- *
- * Atomically reads the value of @v.
- */
-static inline int atomic_read(const atomic_t *v)
-{
- return READ_ONCE(v->counter);
-}
-
-/**
- * atomic_sub_return - subtract integer and return
- * @v: pointer of type atomic_t
- * @i: integer value to subtract
- *
- * Atomically subtracts @i from @v and returns @v - @i
- */
-#define atomic_sub_return(i, v) atomic_add_return((int)(-(i)), (v))
-
-#define atomic_fetch_sub(i, v) atomic_fetch_add(-(int)(i), (v))
-
-/**
- * atomic_sub - subtract integer from atomic variable
- * @i: integer value to subtract
- * @v: pointer of type atomic_t
- *
- * Atomically subtracts @i from @v.
- */
-#define atomic_sub(i, v) atomic_add((int)(-(i)), (v))
-
-/**
- * atomic_sub_and_test - subtract value from variable and test result
- * @i: integer value to subtract
- * @v: pointer of type atomic_t
- *
- * Atomically subtracts @i from @v and returns true if the result is
- * zero, or false for all other cases.
- */
-#define atomic_sub_and_test(i, v) (atomic_sub_return((i), (v)) == 0)
-
-/**
- * atomic_inc_return - increment memory and return
- * @v: pointer of type atomic_t
- *
- * Atomically increments @v by 1 and returns the new value.
- */
-#define atomic_inc_return(v) atomic_add_return(1, (v))
-
-/**
- * atomic_dec_return - decrement memory and return
- * @v: pointer of type atomic_t
- *
- * Atomically decrements @v by 1 and returns the new value.
- */
-#define atomic_dec_return(v) atomic_sub_return(1, (v))
-
-/**
- * atomic_inc - increment atomic variable
- * @v: pointer of type atomic_t
- *
- * Atomically increments @v by 1.
- */
-#define atomic_inc(v) atomic_add(1, (v))
-
-/**
- * atomic_dec - decrement atomic variable
- * @v: pointer of type atomic_t
- *
- * Atomically decrements @v by 1.
- */
-#define atomic_dec(v) atomic_sub(1, (v))
-
-/**
- * atomic_dec_and_test - decrement and test
- * @v: pointer of type atomic_t
- *
- * Atomically decrements @v by 1 and returns true if the result is 0.
- */
-#define atomic_dec_and_test(v) (atomic_dec_return(v) == 0)
-
-/**
- * atomic_inc_and_test - increment and test
- * @v: pointer of type atomic_t
- *
- * Atomically increments @v by 1 and returns true if the result is 0.
- */
-#define atomic_inc_and_test(v) (atomic_inc_return(v) == 0)
-
-/**
- * atomic_xchg - atomically exchange contents of memory with a new value
- * @v: pointer of type atomic_t
- * @i: integer value to store in memory
- *
- * Atomically sets @v to @i and returns old @v
- */
-static inline int atomic_xchg(atomic_t *v, int n)
-{
- return xchg(&v->counter, n);
-}
-
-/**
- * atomic_cmpxchg - atomically exchange contents of memory if it matches
- * @v: pointer of type atomic_t
- * @o: old value that memory should have
- * @n: new value to write to memory if it matches
- *
- * Atomically checks if @v holds @o and replaces it with @n if so.
- * Returns the old value at @v.
- */
-static inline int atomic_cmpxchg(atomic_t *v, int o, int n)
-{
- return cmpxchg(&v->counter, o, n);
-}
-
-/**
- * atomic_add_negative - add and test if negative
- * @v: pointer of type atomic_t
- * @i: integer value to add
- *
- * Atomically adds @i to @v and returns true if the result is
- * negative, or false when result is greater than or equal to zero.
- */
-#define atomic_add_negative(i, v) (atomic_add_return((i), (v)) < 0)
-
-#endif /* __ASSEMBLY__ */
-
-#ifndef __tilegx__
-#include <asm/atomic_32.h>
-#else
-#include <asm/atomic_64.h>
-#endif
-
-#ifndef __ASSEMBLY__
-
-/**
- * atomic64_xchg - atomically exchange contents of memory with a new value
- * @v: pointer of type atomic64_t
- * @i: integer value to store in memory
- *
- * Atomically sets @v to @i and returns old @v
- */
-static inline long long atomic64_xchg(atomic64_t *v, long long n)
-{
- return xchg64(&v->counter, n);
-}
-
-/**
- * atomic64_cmpxchg - atomically exchange contents of memory if it matches
- * @v: pointer of type atomic64_t
- * @o: old value that memory should have
- * @n: new value to write to memory if it matches
- *
- * Atomically checks if @v holds @o and replaces it with @n if so.
- * Returns the old value at @v.
- */
-static inline long long atomic64_cmpxchg(atomic64_t *v, long long o,
- long long n)
-{
- return cmpxchg64(&v->counter, o, n);
-}
-
-static inline long long atomic64_dec_if_positive(atomic64_t *v)
-{
- long long c, old, dec;
-
- c = atomic64_read(v);
- for (;;) {
- dec = c - 1;
- if (unlikely(dec < 0))
- break;
- old = atomic64_cmpxchg((v), c, dec);
- if (likely(old == c))
- break;
- c = old;
- }
- return dec;
-}
-
-#endif /* __ASSEMBLY__ */
-
-#endif /* _ASM_TILE_ATOMIC_H */
diff --git a/arch/tile/include/asm/atomic_32.h b/arch/tile/include/asm/atomic_32.h
deleted file mode 100644
index 53a423e7cb92..000000000000
--- a/arch/tile/include/asm/atomic_32.h
+++ /dev/null
@@ -1,297 +0,0 @@
-/*
- * Copyright 2010 Tilera Corporation. All Rights Reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation, version 2.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
- * NON INFRINGEMENT. See the GNU General Public License for
- * more details.
- *
- * Do not include directly; use <linux/atomic.h>.
- */
-
-#ifndef _ASM_TILE_ATOMIC_32_H
-#define _ASM_TILE_ATOMIC_32_H
-
-#include <asm/barrier.h>
-#include <arch/chip.h>
-
-#ifndef __ASSEMBLY__
-
-/**
- * atomic_add - add integer to atomic variable
- * @i: integer value to add
- * @v: pointer of type atomic_t
- *
- * Atomically adds @i to @v.
- */
-static inline void atomic_add(int i, atomic_t *v)
-{
- _atomic_xchg_add(&v->counter, i);
-}
-
-#define ATOMIC_OPS(op) \
-unsigned long _atomic_fetch_##op(volatile unsigned long *p, unsigned long mask); \
-static inline void atomic_##op(int i, atomic_t *v) \
-{ \
- _atomic_fetch_##op((unsigned long *)&v->counter, i); \
-} \
-static inline int atomic_fetch_##op(int i, atomic_t *v) \
-{ \
- smp_mb(); \
- return _atomic_fetch_##op((unsigned long *)&v->counter, i); \
-}
-
-ATOMIC_OPS(and)
-ATOMIC_OPS(or)
-ATOMIC_OPS(xor)
-
-#undef ATOMIC_OPS
-
-static inline int atomic_fetch_add(int i, atomic_t *v)
-{
- smp_mb();
- return _atomic_xchg_add(&v->counter, i);
-}
-
-/**
- * atomic_add_return - add integer and return
- * @v: pointer of type atomic_t
- * @i: integer value to add
- *
- * Atomically adds @i to @v and returns @i + @v
- */
-static inline int atomic_add_return(int i, atomic_t *v)
-{
- smp_mb(); /* barrier for proper semantics */
- return _atomic_xchg_add(&v->counter, i) + i;
-}
-
-/**
- * __atomic_add_unless - add unless the number is already a given value
- * @v: pointer of type atomic_t
- * @a: the amount to add to v...
- * @u: ...unless v is equal to u.
- *
- * Atomically adds @a to @v, so long as @v was not already @u.
- * Returns the old value of @v.
- */
-static inline int __atomic_add_unless(atomic_t *v, int a, int u)
-{
- smp_mb(); /* barrier for proper semantics */
- return _atomic_xchg_add_unless(&v->counter, a, u);
-}
-
-/**
- * atomic_set - set atomic variable
- * @v: pointer of type atomic_t
- * @i: required value
- *
- * Atomically sets the value of @v to @i.
- *
- * atomic_set() can't be just a raw store, since it would be lost if it
- * fell between the load and store of one of the other atomic ops.
- */
-static inline void atomic_set(atomic_t *v, int n)
-{
- _atomic_xchg(&v->counter, n);
-}
-
-#define atomic_set_release(v, i) atomic_set((v), (i))
-
-/* A 64bit atomic type */
-
-typedef struct {
- long long counter;
-} atomic64_t;
-
-#define ATOMIC64_INIT(val) { (val) }
-
-/**
- * atomic64_read - read atomic variable
- * @v: pointer of type atomic64_t
- *
- * Atomically reads the value of @v.
- */
-static inline long long atomic64_read(const atomic64_t *v)
-{
- /*
- * Requires an atomic op to read both 32-bit parts consistently.
- * Casting away const is safe since the atomic support routines
- * do not write to memory if the value has not been modified.
- */
- return _atomic64_xchg_add((long long *)&v->counter, 0);
-}
-
-/**
- * atomic64_add - add integer to atomic variable
- * @i: integer value to add
- * @v: pointer of type atomic64_t
- *
- * Atomically adds @i to @v.
- */
-static inline void atomic64_add(long long i, atomic64_t *v)
-{
- _atomic64_xchg_add(&v->counter, i);
-}
-
-#define ATOMIC64_OPS(op) \
-long long _atomic64_fetch_##op(long long *v, long long n); \
-static inline void atomic64_##op(long long i, atomic64_t *v) \
-{ \
- _atomic64_fetch_##op(&v->counter, i); \
-} \
-static inline long long atomic64_fetch_##op(long long i, atomic64_t *v) \
-{ \
- smp_mb(); \
- return _atomic64_fetch_##op(&v->counter, i); \
-}
-
-ATOMIC64_OPS(and)
-ATOMIC64_OPS(or)
-ATOMIC64_OPS(xor)
-
-#undef ATOMIC64_OPS
-
-static inline long long atomic64_fetch_add(long long i, atomic64_t *v)
-{
- smp_mb();
- return _atomic64_xchg_add(&v->counter, i);
-}
-
-/**
- * atomic64_add_return - add integer and return
- * @v: pointer of type atomic64_t
- * @i: integer value to add
- *
- * Atomically adds @i to @v and returns @i + @v
- */
-static inline long long atomic64_add_return(long long i, atomic64_t *v)
-{
- smp_mb(); /* barrier for proper semantics */
- return _atomic64_xchg_add(&v->counter, i) + i;
-}
-
-/**
- * atomic64_add_unless - add unless the number is already a given value
- * @v: pointer of type atomic64_t
- * @a: the amount to add to v...
- * @u: ...unless v is equal to u.
- *
- * Atomically adds @a to @v, so long as @v was not already @u.
- * Returns non-zero if @v was not @u, and zero otherwise.
- */
-static inline long long atomic64_add_unless(atomic64_t *v, long long a,
- long long u)
-{
- smp_mb(); /* barrier for proper semantics */
- return _atomic64_xchg_add_unless(&v->counter, a, u) != u;
-}
-
-/**
- * atomic64_set - set atomic variable
- * @v: pointer of type atomic64_t
- * @i: required value
- *
- * Atomically sets the value of @v to @i.
- *
- * atomic64_set() can't be just a raw store, since it would be lost if it
- * fell between the load and store of one of the other atomic ops.
- */
-static inline void atomic64_set(atomic64_t *v, long long n)
-{
- _atomic64_xchg(&v->counter, n);
-}
-
-#define atomic64_add_negative(a, v) (atomic64_add_return((a), (v)) < 0)
-#define atomic64_inc(v) atomic64_add(1LL, (v))
-#define atomic64_inc_return(v) atomic64_add_return(1LL, (v))
-#define atomic64_inc_and_test(v) (atomic64_inc_return(v) == 0)
-#define atomic64_sub_return(i, v) atomic64_add_return(-(i), (v))
-#define atomic64_fetch_sub(i, v) atomic64_fetch_add(-(i), (v))
-#define atomic64_sub_and_test(a, v) (atomic64_sub_return((a), (v)) == 0)
-#define atomic64_sub(i, v) atomic64_add(-(i), (v))
-#define atomic64_dec(v) atomic64_sub(1LL, (v))
-#define atomic64_dec_return(v) atomic64_sub_return(1LL, (v))
-#define atomic64_dec_and_test(v) (atomic64_dec_return((v)) == 0)
-#define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1LL, 0LL)
-
-#endif /* !__ASSEMBLY__ */
-
-/*
- * Internal definitions only beyond this point.
- */
-
-/*
- * Number of atomic locks in atomic_locks[]. Must be a power of two.
- * There is no reason for more than PAGE_SIZE / 8 entries, since that
- * is the maximum number of pointer bits we can use to index this.
- * And we cannot have more than PAGE_SIZE / 4, since this has to
- * fit on a single page and each entry takes 4 bytes.
- */
-#define ATOMIC_HASH_SHIFT (PAGE_SHIFT - 3)
-#define ATOMIC_HASH_SIZE (1 << ATOMIC_HASH_SHIFT)
-
-#ifndef __ASSEMBLY__
-extern int atomic_locks[];
-#endif
-
-/*
- * All the code that may fault while holding an atomic lock must
- * place the pointer to the lock in ATOMIC_LOCK_REG so the fault code
- * can correctly release and reacquire the lock. Note that we
- * mention the register number in a comment in "lib/atomic_asm.S" to help
- * assembly coders from using this register by mistake, so if it
- * is changed here, change that comment as well.
- */
-#define ATOMIC_LOCK_REG 20
-#define ATOMIC_LOCK_REG_NAME r20
-
-#ifndef __ASSEMBLY__
-/* Called from setup to initialize a hash table to point to per_cpu locks. */
-void __init_atomic_per_cpu(void);
-
-#ifdef CONFIG_SMP
-/* Support releasing the atomic lock in do_page_fault_ics(). */
-void __atomic_fault_unlock(int *lock_ptr);
-#endif
-
-/* Return a pointer to the lock for the given address. */
-int *__atomic_hashed_lock(volatile void *v);
-
-/* Private helper routines in lib/atomic_asm_32.S */
-struct __get_user {
- unsigned long val;
- int err;
-};
-extern struct __get_user __atomic32_cmpxchg(volatile int *p,
- int *lock, int o, int n);
-extern struct __get_user __atomic32_xchg(volatile int *p, int *lock, int n);
-extern struct __get_user __atomic32_xchg_add(volatile int *p, int *lock, int n);
-extern struct __get_user __atomic32_xchg_add_unless(volatile int *p,
- int *lock, int o, int n);
-extern struct __get_user __atomic32_fetch_or(volatile int *p, int *lock, int n);
-extern struct __get_user __atomic32_fetch_and(volatile int *p, int *lock, int n);
-extern struct __get_user __atomic32_fetch_andn(volatile int *p, int *lock, int n);
-extern struct __get_user __atomic32_fetch_xor(volatile int *p, int *lock, int n);
-extern long long __atomic64_cmpxchg(volatile long long *p, int *lock,
- long long o, long long n);
-extern long long __atomic64_xchg(volatile long long *p, int *lock, long long n);
-extern long long __atomic64_xchg_add(volatile long long *p, int *lock,
- long long n);
-extern long long __atomic64_xchg_add_unless(volatile long long *p,
- int *lock, long long o, long long n);
-extern long long __atomic64_fetch_and(volatile long long *p, int *lock, long long n);
-extern long long __atomic64_fetch_or(volatile long long *p, int *lock, long long n);
-extern long long __atomic64_fetch_xor(volatile long long *p, int *lock, long long n);
-
-/* Return failure from the atomic wrappers. */
-struct __get_user __atomic_bad_address(int __user *addr);
-
-#endif /* !__ASSEMBLY__ */
-
-#endif /* _ASM_TILE_ATOMIC_32_H */
diff --git a/arch/tile/include/asm/atomic_64.h b/arch/tile/include/asm/atomic_64.h
deleted file mode 100644
index 4cefa0c9fd81..000000000000
--- a/arch/tile/include/asm/atomic_64.h
+++ /dev/null
@@ -1,200 +0,0 @@
-/*
- * Copyright 2011 Tilera Corporation. All Rights Reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation, version 2.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
- * NON INFRINGEMENT. See the GNU General Public License for
- * more details.
- *
- * Do not include directly; use <linux/atomic.h>.
- */
-
-#ifndef _ASM_TILE_ATOMIC_64_H
-#define _ASM_TILE_ATOMIC_64_H
-
-#ifndef __ASSEMBLY__
-
-#include <asm/barrier.h>
-#include <arch/spr_def.h>
-
-/* First, the 32-bit atomic ops that are "real" on our 64-bit platform. */
-
-#define atomic_set(v, i) WRITE_ONCE((v)->counter, (i))
-
-/*
- * The smp_mb() operations throughout are to support the fact that
- * Linux requires memory barriers before and after the operation,
- * on any routine which updates memory and returns a value.
- */
-
-/*
- * Note a subtlety of the locking here. We are required to provide a
- * full memory barrier before and after the operation. However, we
- * only provide an explicit mb before the operation. After the
- * operation, we use barrier() to get a full mb for free, because:
- *
- * (1) The barrier directive to the compiler prohibits any instructions
- * being statically hoisted before the barrier;
- * (2) the microarchitecture will not issue any further instructions
- * until the fetchadd result is available for the "+ i" add instruction;
- * (3) the smb_mb before the fetchadd ensures that no other memory
- * operations are in flight at this point.
- */
-static inline int atomic_add_return(int i, atomic_t *v)
-{
- int val;
- smp_mb(); /* barrier for proper semantics */
- val = __insn_fetchadd4((void *)&v->counter, i) + i;
- barrier(); /* equivalent to smp_mb(); see block comment above */
- return val;
-}
-
-#define ATOMIC_OPS(op) \
-static inline int atomic_fetch_##op(int i, atomic_t *v) \
-{ \
- int val; \
- smp_mb(); \
- val = __insn_fetch##op##4((void *)&v->counter, i); \
- smp_mb(); \
- return val; \
-} \
-static inline void atomic_##op(int i, atomic_t *v) \
-{ \
- __insn_fetch##op##4((void *)&v->counter, i); \
-}
-
-ATOMIC_OPS(add)
-ATOMIC_OPS(and)
-ATOMIC_OPS(or)
-
-#undef ATOMIC_OPS
-
-static inline int atomic_fetch_xor(int i, atomic_t *v)
-{
- int guess, oldval = v->counter;
- smp_mb();
- do {
- guess = oldval;
- __insn_mtspr(SPR_CMPEXCH_VALUE, guess);
- oldval = __insn_cmpexch4(&v->counter, guess ^ i);
- } while (guess != oldval);
- smp_mb();
- return oldval;
-}
-
-static inline void atomic_xor(int i, atomic_t *v)
-{
- int guess, oldval = v->counter;
- do {
- guess = oldval;
- __insn_mtspr(SPR_CMPEXCH_VALUE, guess);
- oldval = __insn_cmpexch4(&v->counter, guess ^ i);
- } while (guess != oldval);
-}
-
-static inline int __atomic_add_unless(atomic_t *v, int a, int u)
-{
- int guess, oldval = v->counter;
- do {
- if (oldval == u)
- break;
- guess = oldval;
- oldval = cmpxchg(&v->counter, guess, guess + a);
- } while (guess != oldval);
- return oldval;
-}
-
-/* Now the true 64-bit operations. */
-
-#define ATOMIC64_INIT(i) { (i) }
-
-#define atomic64_read(v) READ_ONCE((v)->counter)
-#define atomic64_set(v, i) WRITE_ONCE((v)->counter, (i))
-
-static inline long atomic64_add_return(long i, atomic64_t *v)
-{
- int val;
- smp_mb(); /* barrier for proper semantics */
- val = __insn_fetchadd((void *)&v->counter, i) + i;
- barrier(); /* equivalent to smp_mb; see atomic_add_return() */
- return val;
-}
-
-#define ATOMIC64_OPS(op) \
-static inline long atomic64_fetch_##op(long i, atomic64_t *v) \
-{ \
- long val; \
- smp_mb(); \
- val = __insn_fetch##op((void *)&v->counter, i); \
- smp_mb(); \
- return val; \
-} \
-static inline void atomic64_##op(long i, atomic64_t *v) \
-{ \
- __insn_fetch##op((void *)&v->counter, i); \
-}
-
-ATOMIC64_OPS(add)
-ATOMIC64_OPS(and)
-ATOMIC64_OPS(or)
-
-#undef ATOMIC64_OPS
-
-static inline long atomic64_fetch_xor(long i, atomic64_t *v)
-{
- long guess, oldval = v->counter;
- smp_mb();
- do {
- guess = oldval;
- __insn_mtspr(SPR_CMPEXCH_VALUE, guess);
- oldval = __insn_cmpexch(&v->counter, guess ^ i);
- } while (guess != oldval);
- smp_mb();
- return oldval;
-}
-
-static inline void atomic64_xor(long i, atomic64_t *v)
-{
- long guess, oldval = v->counter;
- do {
- guess = oldval;
- __insn_mtspr(SPR_CMPEXCH_VALUE, guess);
- oldval = __insn_cmpexch(&v->counter, guess ^ i);
- } while (guess != oldval);
-}
-
-static inline long atomic64_add_unless(atomic64_t *v, long a, long u)
-{
- long guess, oldval = v->counter;
- do {
- if (oldval == u)
- break;
- guess = oldval;
- oldval = cmpxchg(&v->counter, guess, guess + a);
- } while (guess != oldval);
- return oldval != u;
-}
-
-#define atomic64_sub_return(i, v) atomic64_add_return(-(i), (v))
-#define atomic64_fetch_sub(i, v) atomic64_fetch_add(-(i), (v))
-#define atomic64_sub(i, v) atomic64_add(-(i), (v))
-#define atomic64_inc_return(v) atomic64_add_return(1, (v))
-#define atomic64_dec_return(v) atomic64_sub_return(1, (v))
-#define atomic64_inc(v) atomic64_add(1, (v))
-#define atomic64_dec(v) atomic64_sub(1, (v))
-
-#define atomic64_inc_and_test(v) (atomic64_inc_return(v) == 0)
-#define atomic64_dec_and_test(v) (atomic64_dec_return(v) == 0)
-#define atomic64_sub_and_test(i, v) (atomic64_sub_return((i), (v)) == 0)
-#define atomic64_add_negative(i, v) (atomic64_add_return((i), (v)) < 0)
-
-#define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
-
-#endif /* !__ASSEMBLY__ */
-
-#endif /* _ASM_TILE_ATOMIC_64_H */
diff --git a/arch/tile/include/asm/backtrace.h b/arch/tile/include/asm/backtrace.h
deleted file mode 100644
index bd5399a69edf..000000000000
--- a/arch/tile/include/asm/backtrace.h
+++ /dev/null
@@ -1,162 +0,0 @@
-/*
- * Copyright 2010 Tilera Corporation. All Rights Reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation, version 2.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
- * NON INFRINGEMENT. See the GNU General Public License for
- * more details.
- */
-
-#ifndef _ASM_TILE_BACKTRACE_H
-#define _ASM_TILE_BACKTRACE_H
-
-#include <linux/types.h>
-
-/* Reads 'size' bytes from 'address' and writes the data to 'result'.
- * Returns true if successful, else false (e.g. memory not readable).
- */
-typedef bool (*BacktraceMemoryReader)(void *result,
- unsigned long address,
- unsigned int size,
- void *extra);
-
-typedef struct {
- /* Current PC. */
- unsigned long pc;
-
- /* Current stack pointer value. */
- unsigned long sp;
-
- /* Current frame pointer value (i.e. caller's stack pointer) */
- unsigned long fp;
-
- /* Internal use only: caller's PC for first frame. */
- unsigned long initial_frame_caller_pc;
-
- /* Internal use only: callback to read memory. */
- BacktraceMemoryReader read_memory_func;
-
- /* Internal use only: arbitrary argument to read_memory_func. */
- void *read_memory_func_extra;
-
-} BacktraceIterator;
-
-
-typedef enum {
-
- /* We have no idea what the caller's pc is. */
- PC_LOC_UNKNOWN,
-
- /* The caller's pc is currently in lr. */
- PC_LOC_IN_LR,
-
- /* The caller's pc can be found by dereferencing the caller's sp. */
- PC_LOC_ON_STACK
-
-} CallerPCLocation;
-
-
-typedef enum {
-
- /* We have no idea what the caller's sp is. */
- SP_LOC_UNKNOWN,
-
- /* The caller's sp is currently in r52. */
- SP_LOC_IN_R52,
-
- /* The caller's sp can be found by adding a certain constant
- * to the current value of sp.
- */
- SP_LOC_OFFSET
-
-} CallerSPLocation;
-
-
-/* Bit values ORed into CALLER_* values for info ops. */
-enum {
- /* Setting the low bit on any of these values means the info op
- * applies only to one bundle ago.
- */
- ONE_BUNDLE_AGO_FLAG = 1,
-
- /* Setting this bit on a CALLER_SP_* value means the PC is in LR.
- * If not set, PC is on the stack.
- */
- PC_IN_LR_FLAG = 2,
-
- /* This many of the low bits of a CALLER_SP_* value are for the
- * flag bits above.
- */
- NUM_INFO_OP_FLAGS = 2,
-
- /* We cannot have one in the memory pipe so this is the maximum. */
- MAX_INFO_OPS_PER_BUNDLE = 2
-};
-
-
-/* Internal constants used to define 'info' operands. */
-enum {
- /* 0 and 1 are reserved, as are all negative numbers. */
-
- CALLER_UNKNOWN_BASE = 2,
-
- CALLER_SP_IN_R52_BASE = 4,
-
- CALLER_SP_OFFSET_BASE = 8,
-};
-
-
-/* Current backtracer state describing where it thinks the caller is. */
-typedef struct {
- /*
- * Public fields
- */
-
- /* How do we find the caller's PC? */
- CallerPCLocation pc_location : 8;
-
- /* How do we find the caller's SP? */
- CallerSPLocation sp_location : 8;
-
- /* If sp_location == SP_LOC_OFFSET, then caller_sp == sp +
- * loc->sp_offset. Else this field is undefined.
- */
- uint16_t sp_offset;
-
- /* In the most recently visited bundle a terminating bundle? */
- bool at_terminating_bundle;
-
- /*
- * Private fields
- */
-
- /* Will the forward scanner see someone clobbering sp
- * (i.e. changing it with something other than addi sp, sp, N?)
- */
- bool sp_clobber_follows;
-
- /* Operand to next "visible" info op (no more than one bundle past
- * the next terminating bundle), or -32768 if none.
- */
- int16_t next_info_operand;
-
- /* Is the info of in next_info_op in the very next bundle? */
- bool is_next_info_operand_adjacent;
-
-} CallerLocation;
-
-extern void backtrace_init(BacktraceIterator *state,
- BacktraceMemoryReader read_memory_func,
- void *read_memory_func_extra,
- unsigned long pc, unsigned long lr,
- unsigned long sp, unsigned long r52);
-
-
-extern bool backtrace_next(BacktraceIterator *state);
-
-#endif /* _ASM_TILE_BACKTRACE_H */
diff --git a/arch/tile/include/asm/barrier.h b/arch/tile/include/asm/barrier.h
deleted file mode 100644
index 4c419ab95ab7..000000000000
--- a/arch/tile/include/asm/barrier.h
+++ /dev/null
@@ -1,100 +0,0 @@
-/*
- * Copyright 2010 Tilera Corporation. All Rights Reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation, version 2.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
- * NON INFRINGEMENT. See the GNU General Public License for
- * more details.
- */
-
-#ifndef _ASM_TILE_BARRIER_H
-#define _ASM_TILE_BARRIER_H
-
-#ifndef __ASSEMBLY__
-
-#include <linux/types.h>
-#include <arch/chip.h>
-#include <arch/spr_def.h>
-#include <asm/timex.h>
-
-#define __sync() __insn_mf()
-
-#include <hv/syscall_public.h>
-/*
- * Issue an uncacheable load to each memory controller, then
- * wait until those loads have completed.
- */
-static inline void __mb_incoherent(void)
-{
- long clobber_r10;
- asm volatile("swint2"
- : "=R10" (clobber_r10)
- : "R10" (HV_SYS_fence_incoherent)
- : "r0", "r1", "r2", "r3", "r4",
- "r5", "r6", "r7", "r8", "r9",
- "r11", "r12", "r13", "r14",
- "r15", "r16", "r17", "r18", "r19",
- "r20", "r21", "r22", "r23", "r24",
- "r25", "r26", "r27", "r28", "r29");
-}
-
-/* Fence to guarantee visibility of stores to incoherent memory. */
-static inline void
-mb_incoherent(void)
-{
- __insn_mf();
-
- {
-#if CHIP_HAS_TILE_WRITE_PENDING()
- const unsigned long WRITE_TIMEOUT_CYCLES = 400;
- unsigned long start = get_cycles_low();
- do {
- if (__insn_mfspr(SPR_TILE_WRITE_PENDING) == 0)
- return;
- } while ((get_cycles_low() - start) < WRITE_TIMEOUT_CYCLES);
-#endif /* CHIP_HAS_TILE_WRITE_PENDING() */
- (void) __mb_incoherent();
- }
-}
-
-#define fast_wmb() __sync()
-#define fast_rmb() __sync()
-#define fast_mb() __sync()
-#define fast_iob() mb_incoherent()
-
-#define wmb() fast_wmb()
-#define rmb() fast_rmb()
-#define mb() fast_mb()
-#define iob() fast_iob()
-
-#ifndef __tilegx__ /* 32 bit */
-/*
- * We need to barrier before modifying the word, since the _atomic_xxx()
- * routines just tns the lock and then read/modify/write of the word.
- * But after the word is updated, the routine issues an "mf" before returning,
- * and since it's a function call, we don't even need a compiler barrier.
- */
-#define __smp_mb__before_atomic() __smp_mb()
-#define __smp_mb__after_atomic() do { } while (0)
-#define smp_mb__after_atomic() __smp_mb__after_atomic()
-#else /* 64 bit */
-#define __smp_mb__before_atomic() __smp_mb()
-#define __smp_mb__after_atomic() __smp_mb()
-#endif
-
-/*
- * The TILE architecture does not do speculative reads; this ensures
- * that a control dependency also orders against loads and already provides
- * a LOAD->{LOAD,STORE} order and can forgo the additional RMB.
- */
-#define smp_acquire__after_ctrl_dep() barrier()
-
-#include <asm-generic/barrier.h>
-
-#endif /* !__ASSEMBLY__ */
-#endif /* _ASM_TILE_BARRIER_H */
diff --git a/arch/tile/include/asm/bitops.h b/arch/tile/include/asm/bitops.h
deleted file mode 100644
index 20caa346ac06..000000000000
--- a/arch/tile/include/asm/bitops.h
+++ /dev/null
@@ -1,94 +0,0 @@
-/*
- * Copyright 1992, Linus Torvalds.
- * Copyright 2010 Tilera Corporation. All Rights Reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation, version 2.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
- * NON INFRINGEMENT. See the GNU General Public License for
- * more details.
- */
-
-#ifndef _ASM_TILE_BITOPS_H
-#define _ASM_TILE_BITOPS_H
-
-#include <linux/types.h>
-#include <asm/barrier.h>
-
-#ifndef _LINUX_BITOPS_H
-#error only <linux/bitops.h> can be included directly
-#endif
-
-#ifdef __tilegx__
-#include <asm/bitops_64.h>
-#else
-#include <asm/bitops_32.h>
-#endif
-
-/**
- * ffz - find first zero bit in word
- * @word: The word to search
- *
- * Undefined if no zero exists, so code should check against ~0UL first.
- */
-static inline unsigned long ffz(unsigned long word)
-{
- return __builtin_ctzl(~word);
-}
-
-static inline int fls64(__u64 w)
-{
- return (sizeof(__u64) * 8) - __builtin_clzll(w);
-}
-
-/**
- * fls - find last set bit in word
- * @x: the word to search
- *
- * This is defined in a similar way as the libc and compiler builtin
- * ffs, but returns the position of the most significant set bit.
- *
- * fls(value) returns 0 if value is 0 or the position of the last
- * set bit if value is nonzero. The last (most significant) bit is
- * at position 32.
- */
-static inline int fls(int x)
-{
- return fls64((unsigned int) x);
-}
-
-static inline unsigned int __arch_hweight32(unsigned int w)
-{
- return __builtin_popcount(w);
-}
-
-static inline unsigned int __arch_hweight16(unsigned int w)
-{
- return __builtin_popcount(w & 0xffff);
-}
-
-static inline unsigned int __arch_hweight8(unsigned int w)
-{
- return __builtin_popcount(w & 0xff);
-}
-
-static inline unsigned long __arch_hweight64(__u64 w)
-{
- return __builtin_popcountll(w);
-}
-
-#include <asm-generic/bitops/builtin-__ffs.h>
-#include <asm-generic/bitops/builtin-__fls.h>
-#include <asm-generic/bitops/builtin-ffs.h>
-#include <asm-generic/bitops/const_hweight.h>
-#include <asm-generic/bitops/lock.h>
-#include <asm-generic/bitops/find.h>
-#include <asm-generic/bitops/sched.h>
-#include <asm-generic/bitops/non-atomic.h>
-#include <asm-generic/bitops/le.h>
-
-#endif /* _ASM_TILE_BITOPS_H */
diff --git a/arch/tile/include/asm/bitops_32.h b/arch/tile/include/asm/bitops_32.h
deleted file mode 100644
index d1406a95f6b7..000000000000
--- a/arch/tile/include/asm/bitops_32.h
+++ /dev/null
@@ -1,126 +0,0 @@
-/*
- * Copyright 2010 Tilera Corporation. All Rights Reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation, version 2.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
- * NON INFRINGEMENT. See the GNU General Public License for
- * more details.
- */
-
-#ifndef _ASM_TILE_BITOPS_32_H
-#define _ASM_TILE_BITOPS_32_H
-
-#include <linux/compiler.h>
-#include <asm/barrier.h>
-
-/* Tile-specific routines to support <asm/bitops.h>. */
-unsigned long _atomic_fetch_or(volatile unsigned long *p, unsigned long mask);
-unsigned long _atomic_fetch_andn(volatile unsigned long *p, unsigned long mask);
-unsigned long _atomic_fetch_xor(volatile unsigned long *p, unsigned long mask);
-
-/**
- * set_bit - Atomically set a bit in memory
- * @nr: the bit to set
- * @addr: the address to start counting from
- *
- * This function is atomic and may not be reordered.
- * See __set_bit() if you do not require the atomic guarantees.
- * Note that @nr may be almost arbitrarily large; this function is not
- * restricted to acting on a single-word quantity.
- */
-static inline void set_bit(unsigned nr, volatile unsigned long *addr)
-{
- _atomic_fetch_or(addr + BIT_WORD(nr), BIT_MASK(nr));
-}
-
-/**
- * clear_bit - Clears a bit in memory
- * @nr: Bit to clear
- * @addr: Address to start counting from
- *
- * clear_bit() is atomic and may not be reordered.
- * See __clear_bit() if you do not require the atomic guarantees.
- * Note that @nr may be almost arbitrarily large; this function is not
- * restricted to acting on a single-word quantity.
- *
- * clear_bit() may not contain a memory barrier, so if it is used for
- * locking purposes, you should call smp_mb__before_atomic() and/or
- * smp_mb__after_atomic() to ensure changes are visible on other cpus.
- */
-static inline void clear_bit(unsigned nr, volatile unsigned long *addr)
-{
- _atomic_fetch_andn(addr + BIT_WORD(nr), BIT_MASK(nr));
-}
-
-/**
- * change_bit - Toggle a bit in memory
- * @nr: Bit to change
- * @addr: Address to start counting from
- *
- * change_bit() is atomic and may not be reordered.
- * See __change_bit() if you do not require the atomic guarantees.
- * Note that @nr may be almost arbitrarily large; this function is not
- * restricted to acting on a single-word quantity.
- */
-static inline void change_bit(unsigned nr, volatile unsigned long *addr)
-{
- _atomic_fetch_xor(addr + BIT_WORD(nr), BIT_MASK(nr));
-}
-
-/**
- * test_and_set_bit - Set a bit and return its old value
- * @nr: Bit to set
- * @addr: Address to count from
- *
- * This operation is atomic and cannot be reordered.
- * It also implies a memory barrier.
- */
-static inline int test_and_set_bit(unsigned nr, volatile unsigned long *addr)
-{
- unsigned long mask = BIT_MASK(nr);
- addr += BIT_WORD(nr);
- smp_mb(); /* barrier for proper semantics */
- return (_atomic_fetch_or(addr, mask) & mask) != 0;
-}
-
-/**
- * test_and_clear_bit - Clear a bit and return its old value
- * @nr: Bit to clear
- * @addr: Address to count from
- *
- * This operation is atomic and cannot be reordered.
- * It also implies a memory barrier.
- */
-static inline int test_and_clear_bit(unsigned nr, volatile unsigned long *addr)
-{
- unsigned long mask = BIT_MASK(nr);
- addr += BIT_WORD(nr);
- smp_mb(); /* barrier for proper semantics */
- return (_atomic_fetch_andn(addr, mask) & mask) != 0;
-}
-
-/**
- * test_and_change_bit - Change a bit and return its old value
- * @nr: Bit to change
- * @addr: Address to count from
- *
- * This operation is atomic and cannot be reordered.
- * It also implies a memory barrier.
- */
-static inline int test_and_change_bit(unsigned nr,
- volatile unsigned long *addr)
-{
- unsigned long mask = BIT_MASK(nr);
- addr += BIT_WORD(nr);
- smp_mb(); /* barrier for proper semantics */
- return (_atomic_fetch_xor(addr, mask) & mask) != 0;
-}
-
-#include <asm-generic/bitops/ext2-atomic.h>
-
-#endif /* _ASM_TILE_BITOPS_32_H */
diff --git a/arch/tile/include/asm/bitops_64.h b/arch/tile/include/asm/bitops_64.h
deleted file mode 100644
index bb1a29221fcd..000000000000
--- a/arch/tile/include/asm/bitops_64.h
+++ /dev/null
@@ -1,95 +0,0 @@
-/*
- * Copyright 2011 Tilera Corporation. All Rights Reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation, version 2.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
- * NON INFRINGEMENT. See the GNU General Public License for
- * more details.
- */
-
-#ifndef _ASM_TILE_BITOPS_64_H
-#define _ASM_TILE_BITOPS_64_H
-
-#include <linux/compiler.h>
-#include <asm/cmpxchg.h>
-
-/* See <asm/bitops.h> for API comments. */
-
-static inline void set_bit(unsigned nr, volatile unsigned long *addr)
-{
- unsigned long mask = (1UL << (nr % BITS_PER_LONG));
- __insn_fetchor((void *)(addr + nr / BITS_PER_LONG), mask);
-}
-
-static inline void clear_bit(unsigned nr, volatile unsigned long *addr)
-{
- unsigned long mask = (1UL << (nr % BITS_PER_LONG));
- __insn_fetchand((void *)(addr + nr / BITS_PER_LONG), ~mask);
-}
-
-static inline void change_bit(unsigned nr, volatile unsigned long *addr)
-{
- unsigned long mask = (1UL << (nr % BITS_PER_LONG));
- unsigned long guess, oldval;
- addr += nr / BITS_PER_LONG;
- oldval = *addr;
- do {
- guess = oldval;
- oldval = cmpxchg(addr, guess, guess ^ mask);
- } while (guess != oldval);
-}
-
-
-/*
- * The test_and_xxx_bit() routines require a memory fence before we
- * start the operation, and after the operation completes. We use
- * smp_mb() before, and rely on the "!= 0" comparison, plus a compiler
- * barrier(), to block until the atomic op is complete.
- */
-
-static inline int test_and_set_bit(unsigned nr, volatile unsigned long *addr)
-{
- int val;
- unsigned long mask = (1UL << (nr % BITS_PER_LONG));
- smp_mb(); /* barrier for proper semantics */
- val = (__insn_fetchor((void *)(addr + nr / BITS_PER_LONG), mask)
- & mask) != 0;
- barrier();
- return val;
-}
-
-
-static inline int test_and_clear_bit(unsigned nr, volatile unsigned long *addr)
-{
- int val;
- unsigned long mask = (1UL << (nr % BITS_PER_LONG));
- smp_mb(); /* barrier for proper semantics */
- val = (__insn_fetchand((void *)(addr + nr / BITS_PER_LONG), ~mask)
- & mask) != 0;
- barrier();
- return val;
-}
-
-
-static inline int test_and_change_bit(unsigned nr,
- volatile unsigned long *addr)
-{
- unsigned long mask = (1UL << (nr % BITS_PER_LONG));
- unsigned long guess, oldval;
- addr += nr / BITS_PER_LONG;
- oldval = *addr;
- do {
- guess = oldval;
- oldval = cmpxchg(addr, guess, guess ^ mask);
- } while (guess != oldval);
- return (oldval & mask) != 0;
-}
-
-#include <asm-generic/bitops/ext2-atomic-setbit.h>
-
-#endif /* _ASM_TILE_BITOPS_64_H */
diff --git a/arch/tile/include/asm/cache.h b/arch/tile/include/asm/cache.h
deleted file mode 100644
index 7d6aaa128e8b..000000000000
--- a/arch/tile/include/asm/cache.h
+++ /dev/null
@@ -1,64 +0,0 @@
-/*
- * Copyright 2010 Tilera Corporation. All Rights Reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation, version 2.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
- * NON INFRINGEMENT. See the GNU General Public License for
- * more details.
- */
-
-#ifndef _ASM_TILE_CACHE_H
-#define _ASM_TILE_CACHE_H
-
-#include <arch/chip.h>
-
-/* bytes per L1 data cache line */
-#define L1_CACHE_SHIFT CHIP_L1D_LOG_LINE_SIZE()
-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
-
-/* bytes per L2 cache line */
-#define L2_CACHE_SHIFT CHIP_L2_LOG_LINE_SIZE()
-#define L2_CACHE_BYTES (1 << L2_CACHE_SHIFT)
-#define L2_CACHE_ALIGN(x) (((x)+(L2_CACHE_BYTES-1)) & -L2_CACHE_BYTES)
-
-/*
- * TILEPro I/O is not always coherent (networking typically uses coherent
- * I/O, but PCI traffic does not) and setting ARCH_DMA_MINALIGN to the
- * L2 cacheline size helps ensure that kernel heap allocations are aligned.
- * TILE-Gx I/O is always coherent when used on hash-for-home pages.
- *
- * However, it's possible at runtime to request not to use hash-for-home
- * for the kernel heap, in which case the kernel will use flush-and-inval
- * to manage coherence. As a result, we use L2_CACHE_BYTES for the
- * DMA minimum alignment to avoid false sharing in the kernel heap.
- */
-#define ARCH_DMA_MINALIGN L2_CACHE_BYTES
-
-/* use the cache line size for the L2, which is where it counts */
-#define SMP_CACHE_BYTES_SHIFT L2_CACHE_SHIFT
-#define SMP_CACHE_BYTES L2_CACHE_BYTES
-#define INTERNODE_CACHE_SHIFT L2_CACHE_SHIFT
-#define INTERNODE_CACHE_BYTES L2_CACHE_BYTES
-
-/* Group together read-mostly things to avoid cache false sharing */
-#define __read_mostly __attribute__((__section__(".data..read_mostly")))
-
-/*
- * Originally we used small TLB pages for kernel data and grouped some
- * things together as ro-after-init, enforcing the property at the end
- * of initialization by making those pages read-only and non-coherent.
- * This allowed better cache utilization since cache inclusion did not
- * need to be maintained. However, to do this requires an extra TLB
- * entry, which on balance is more of a performance hit than the
- * non-coherence is a performance gain, so we now just make "read
- * mostly" and "ro-after-init" be synonyms. We keep the attribute
- * separate in case we change our minds at a future date.
- */
-#define __ro_after_init __read_mostly
-
-#endif /* _ASM_TILE_CACHE_H */
diff --git a/arch/tile/include/asm/cacheflush.h b/arch/tile/include/asm/cacheflush.h
deleted file mode 100644
index 92ee4c8a4f76..000000000000
--- a/arch/tile/include/asm/cacheflush.h
+++ /dev/null
@@ -1,160 +0,0 @@
-/*
- * Copyright 2010 Tilera Corporation. All Rights Reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation, version 2.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
- * NON INFRINGEMENT. See the GNU General Public License for
- * more details.
- */
-
-#ifndef _ASM_TILE_CACHEFLUSH_H
-#define _ASM_TILE_CACHEFLUSH_H
-
-#include <arch/chip.h>
-
-/* Keep includes the same across arches. */
-#include <linux/mm.h>
-#include <linux/cache.h>
-#include <arch/icache.h>
-
-/* Caches are physically-indexed and so don't need special treatment */
-#define flush_cache_all() do { } while (0)
-#define flush_cache_mm(mm) do { } while (0)
-#define flush_cache_dup_mm(mm) do { } while (0)
-#define flush_cache_range(vma, start, end) do { } while (0)
-#define flush_cache_page(vma, vmaddr, pfn) do { } while (0)
-#define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 0
-#define flush_dcache_page(page) do { } while (0)
-#define flush_dcache_mmap_lock(mapping) do { } while (0)
-#define flush_dcache_mmap_unlock(mapping) do { } while (0)
-#define flush_cache_vmap(start, end) do { } while (0)
-#define flush_cache_vunmap(start, end) do { } while (0)
-#define flush_icache_page(vma, pg) do { } while (0)
-#define flush_icache_user_range(vma, pg, adr, len) do { } while (0)
-
-/* Flush the icache just on this cpu */
-extern void __flush_icache_range(unsigned long start, unsigned long end);
-
-/* Flush the entire icache on this cpu. */
-#define __flush_icache() __flush_icache_range(0, CHIP_L1I_CACHE_SIZE())
-
-#ifdef CONFIG_SMP
-/*
- * When the kernel writes to its own text we need to do an SMP
- * broadcast to make the L1I coherent everywhere. This includes
- * module load and single step.
- */
-extern void flush_icache_range(unsigned long start, unsigned long end);
-#else
-#define flush_icache_range __flush_icache_range
-#endif
-
-/*
- * An update to an executable user page requires icache flushing.
- * We could carefully update only tiles that are running this process,
- * and rely on the fact that we flush the icache on every context
- * switch to avoid doing extra work here. But for now, I'll be
- * conservative and just do a global icache flush.
- */
-static inline void copy_to_user_page(struct vm_area_struct *vma,
- struct page *page, unsigned long vaddr,
- void *dst, void *src, int len)
-{
- memcpy(dst, src, len);
- if (vma->vm_flags & VM_EXEC) {
- flush_icache_range((unsigned long) dst,
- (unsigned long) dst + len);
- }
-}
-
-#define copy_from_user_page(vma, page, vaddr, dst, src, len) \
- memcpy((dst), (src), (len))
-
-/* Flush a VA range; pads to L2 cacheline boundaries. */
-static inline void __flush_buffer(void *buffer, size_t size)
-{
- char *next = (char *)((long)buffer & -L2_CACHE_BYTES);
- char *finish = (char *)L2_CACHE_ALIGN((long)buffer + size);
- while (next < finish) {
- __insn_flush(next);
- next += CHIP_FLUSH_STRIDE();
- }
-}
-
-/* Flush & invalidate a VA range; pads to L2 cacheline boundaries. */
-static inline void __finv_buffer(void *buffer, size_t size)
-{
- char *next = (char *)((long)buffer & -L2_CACHE_BYTES);
- char *finish = (char *)L2_CACHE_ALIGN((long)buffer + size);
- while (next < finish) {
- __insn_finv(next);
- next += CHIP_FINV_STRIDE();
- }
-}
-
-
-/*
- * Flush a locally-homecached VA range and wait for the evicted
- * cachelines to hit memory.
- */
-static inline void flush_buffer_local(void *buffer, size_t size)
-{
- __flush_buffer(buffer, size);
- mb_incoherent();
-}
-
-/*
- * Flush and invalidate a locally-homecached VA range and wait for the
- * evicted cachelines to hit memory.
- */
-static inline void finv_buffer_local(void *buffer, size_t size)
-{
- __finv_buffer(buffer, size);
- mb_incoherent();
-}
-
-#ifdef __tilepro__
-/* Invalidate a VA range; pads to L2 cacheline boundaries. */
-static inline void __inv_buffer(void *buffer, size_t size)
-{
- char *next = (char *)((long)buffer & -L2_CACHE_BYTES);
- char *finish = (char *)L2_CACHE_ALIGN((long)buffer + size);
- while (next < finish) {
- __insn_inv(next);
- next += CHIP_INV_STRIDE();
- }
-}
-
-/* Invalidate a VA range and wait for it to be complete. */
-static inline void inv_buffer(void *buffer, size_t size)
-{
- __inv_buffer(buffer, size);
- mb();
-}
-#endif
-
-/*
- * Flush and invalidate a VA range that is homed remotely, waiting
- * until the memory controller holds the flushed values. If "hfh" is
- * true, we will do a more expensive flush involving additional loads
- * to make sure we have touched all the possible home cpus of a buffer
- * that is homed with "hash for home".
- */
-void finv_buffer_remote(void *buffer, size_t size, int hfh);
-
-/*
- * On SMP systems, when the scheduler does migration-cost autodetection,
- * it needs a way to flush as much of the CPU's caches as possible:
- *
- * TODO: fill this in!
- */
-static inline void sched_cacheflush(void)
-{
-}
-
-#endif /* _ASM_TILE_CACHEFLUSH_H */
diff --git a/arch/tile/include/asm/checksum.h b/arch/tile/include/asm/checksum.h
deleted file mode 100644
index b21a2fdec9f7..000000000000
--- a/arch/tile/include/asm/checksum.h
+++ /dev/null
@@ -1,42 +0,0 @@
-/*
- * Copyright 2010 Tilera Corporation. All Rights Reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation, version 2.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
- * NON INFRINGEMENT. See the GNU General Public License for
- * more details.
- */
-
-#ifndef _ASM_TILE_CHECKSUM_H
-#define _ASM_TILE_CHECKSUM_H
-
-#include <asm-generic/checksum.h>
-
-/* Allow us to provide a more optimized do_csum(). */
-__wsum do_csum(const unsigned char *buff, int len);
-#define do_csum do_csum
-
-/*
- * Return the sum of all the 16-bit subwords in a long.
- * This sums two subwords on a 32-bit machine, and four on 64 bits.
- * The implementation does two vector adds to capture any overflow.
- */
-static inline unsigned int csum_long(unsigned long x)
-{
- unsigned long ret;
-#ifdef __tilegx__
- ret = __insn_v2sadu(x, 0);
- ret = __insn_v2sadu(ret, 0);
-#else
- ret = __insn_sadh_u(x, 0);
- ret = __insn_sadh_u(ret, 0);
-#endif
- return ret;
-}
-
-#endif /* _ASM_TILE_CHECKSUM_H */
diff --git a/arch/tile/include/asm/cmpxchg.h b/arch/tile/include/asm/cmpxchg.h
deleted file mode 100644
index 25d5899497be..000000000000
--- a/arch/tile/include/asm/cmpxchg.h
+++ /dev/null
@@ -1,132 +0,0 @@
-/*
- * cmpxchg.h -- forked from asm/atomic.h with this copyright:
- *
- * Copyright 2010 Tilera Corporation. All Rights Reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation, version 2.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
- * NON INFRINGEMENT. See the GNU General Public License for
- * more details.
- *
- */
-
-#ifndef _ASM_TILE_CMPXCHG_H
-#define _ASM_TILE_CMPXCHG_H
-
-#ifndef __ASSEMBLY__
-
-#include <asm/barrier.h>
-
-/* Nonexistent functions intended to cause compile errors. */
-extern void __xchg_called_with_bad_pointer(void)
- __compiletime_error("Bad argument size for xchg");
-extern void __cmpxchg_called_with_bad_pointer(void)
- __compiletime_error("Bad argument size for cmpxchg");
-
-#ifndef __tilegx__
-
-/* Note the _atomic_xxx() routines include a final mb(). */
-int _atomic_xchg(int *ptr, int n);
-int _atomic_xchg_add(int *v, int i);
-int _atomic_xchg_add_unless(int *v, int a, int u);
-int _atomic_cmpxchg(int *ptr, int o, int n);
-long long _atomic64_xchg(long long *v, long long n);
-long long _atomic64_xchg_add(long long *v, long long i);
-long long _atomic64_xchg_add_unless(long long *v, long long a, long long u);
-long long _atomic64_cmpxchg(long long *v, long long o, long long n);
-
-#define xchg(ptr, n) \
- ({ \
- if (sizeof(*(ptr)) != 4) \
- __xchg_called_with_bad_pointer(); \
- smp_mb(); \
- (typeof(*(ptr)))_atomic_xchg((int *)(ptr), (int)(n)); \
- })
-
-#define cmpxchg(ptr, o, n) \
- ({ \
- if (sizeof(*(ptr)) != 4) \
- __cmpxchg_called_with_bad_pointer(); \
- smp_mb(); \
- (typeof(*(ptr)))_atomic_cmpxchg((int *)ptr, (int)o, \
- (int)n); \
- })
-
-#define xchg64(ptr, n) \
- ({ \
- if (sizeof(*(ptr)) != 8) \
- __xchg_called_with_bad_pointer(); \
- smp_mb(); \
- (typeof(*(ptr)))_atomic64_xchg((long long *)(ptr), \
- (long long)(n)); \
- })
-
-#define cmpxchg64(ptr, o, n) \
- ({ \
- if (sizeof(*(ptr)) != 8) \
- __cmpxchg_called_with_bad_pointer(); \
- smp_mb(); \
- (typeof(*(ptr)))_atomic64_cmpxchg((long long *)ptr, \
- (long long)o, (long long)n); \
- })
-
-#else
-
-#define xchg(ptr, n) \
- ({ \
- typeof(*(ptr)) __x; \
- smp_mb(); \
- switch (sizeof(*(ptr))) { \
- case 4: \
- __x = (typeof(__x))(unsigned long) \
- __insn_exch4((ptr), \
- (u32)(unsigned long)(n)); \
- break; \
- case 8: \
- __x = (typeof(__x)) \
- __insn_exch((ptr), (unsigned long)(n)); \
- break; \
- default: \
- __xchg_called_with_bad_pointer(); \
- break; \
- } \
- smp_mb(); \
- __x; \
- })
-
-#define cmpxchg(ptr, o, n) \
- ({ \
- typeof(*(ptr)) __x; \
- __insn_mtspr(SPR_CMPEXCH_VALUE, (unsigned long)(o)); \
- smp_mb(); \
- switch (sizeof(*(ptr))) { \
- case 4: \
- __x = (typeof(__x))(unsigned long) \
- __insn_cmpexch4((ptr), \
- (u32)(unsigned long)(n)); \
- break; \
- case 8: \
- __x = (typeof(__x))__insn_cmpexch((ptr), \
- (long long)(n)); \
- break; \
- default: \
- __cmpxchg_called_with_bad_pointer(); \
- break; \
- } \
- smp_mb(); \
- __x; \
- })
-
-#define xchg64 xchg
-#define cmpxchg64 cmpxchg
-
-#endif
-
-#endif /* __ASSEMBLY__ */
-
-#endif /* _ASM_TILE_CMPXCHG_H */
diff --git a/arch/tile/include/asm/compat.h b/arch/tile/include/asm/compat.h
deleted file mode 100644
index 769ff6ac0bf5..000000000000
--- a/arch/tile/include/asm/compat.h
+++ /dev/null
@@ -1,233 +0,0 @@
-/*
- * Copyright 2010 Tilera Corporation. All Rights Reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation, version 2.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
- * NON INFRINGEMENT. See the GNU General Public License for
- * more details.
- */
-
-#ifndef _ASM_TILE_COMPAT_H
-#define _ASM_TILE_COMPAT_H
-
-/*
- * Architecture specific compatibility types
- */
-#include <linux/types.h>
-#include <linux/sched.h>
-
-#define COMPAT_USER_HZ 100
-
-/* "long" and pointer-based types are different. */
-typedef s32 compat_long_t;
-typedef u32 compat_ulong_t;
-typedef u32 compat_size_t;
-typedef s32 compat_ssize_t;
-typedef s32 compat_off_t;
-typedef s32 compat_time_t;
-typedef s32 compat_clock_t;
-typedef u32 compat_ino_t;
-typedef u32 compat_caddr_t;
-typedef u32 compat_uptr_t;
-
-/* Many types are "int" or otherwise the same. */
-typedef __kernel_pid_t compat_pid_t;
-typedef __kernel_uid_t __compat_uid_t;
-typedef __kernel_gid_t __compat_gid_t;
-typedef __kernel_uid32_t __compat_uid32_t;
-typedef __kernel_uid32_t __compat_gid32_t;
-typedef __kernel_mode_t compat_mode_t;
-typedef __kernel_dev_t compat_dev_t;
-typedef __kernel_loff_t compat_loff_t;
-typedef __kernel_ipc_pid_t compat_ipc_pid_t;
-typedef __kernel_daddr_t compat_daddr_t;
-typedef __kernel_fsid_t compat_fsid_t;
-typedef __kernel_timer_t compat_timer_t;
-typedef __kernel_key_t compat_key_t;
-typedef int compat_int_t;
-typedef s64 compat_s64;
-typedef uint compat_uint_t;
-typedef u64 compat_u64;
-
-/* We use the same register dump format in 32-bit images. */
-typedef unsigned long compat_elf_greg_t;
-#define COMPAT_ELF_NGREG (sizeof(struct pt_regs) / sizeof(compat_elf_greg_t))
-typedef compat_elf_greg_t compat_elf_gregset_t[COMPAT_ELF_NGREG];
-
-struct compat_timespec {
- compat_time_t tv_sec;
- s32 tv_nsec;
-};
-
-struct compat_timeval {
- compat_time_t tv_sec;
- s32 tv_usec;
-};
-
-#define compat_stat stat
-#define compat_statfs statfs
-
-struct compat_sysctl {
- unsigned int name;
- int nlen;
- unsigned int oldval;
- unsigned int oldlenp;
- unsigned int newval;
- unsigned int newlen;
- unsigned int __unused[4];
-};
-
-
-struct compat_flock {
- short l_type;
- short l_whence;
- compat_off_t l_start;
- compat_off_t l_len;
- compat_pid_t l_pid;
-};
-
-#define F_GETLK64 12 /* using 'struct flock64' */
-#define F_SETLK64 13
-#define F_SETLKW64 14
-
-struct compat_flock64 {
- short l_type;
- short l_whence;
- compat_loff_t l_start;
- compat_loff_t l_len;
- compat_pid_t l_pid;
-};
-
-#define COMPAT_RLIM_INFINITY 0xffffffff
-
-#define _COMPAT_NSIG 64
-#define _COMPAT_NSIG_BPW 32
-
-typedef u32 compat_sigset_word;
-
-#define COMPAT_OFF_T_MAX 0x7fffffff
-
-struct compat_ipc64_perm {
- compat_key_t key;
- __compat_uid32_t uid;
- __compat_gid32_t gid;
- __compat_uid32_t cuid;
- __compat_gid32_t cgid;
- unsigned short mode;
- unsigned short __pad1;
- unsigned short seq;
- unsigned short __pad2;
- compat_ulong_t unused1;
- compat_ulong_t unused2;
-};
-
-struct compat_semid64_ds {
- struct compat_ipc64_perm sem_perm;
- compat_time_t sem_otime;
- compat_ulong_t __unused1;
- compat_time_t sem_ctime;
- compat_ulong_t __unused2;
- compat_ulong_t sem_nsems;
- compat_ulong_t __unused3;
- compat_ulong_t __unused4;
-};
-
-struct compat_msqid64_ds {
- struct compat_ipc64_perm msg_perm;
- compat_time_t msg_stime;
- compat_ulong_t __unused1;
- compat_time_t msg_rtime;
- compat_ulong_t __unused2;
- compat_time_t msg_ctime;
- compat_ulong_t __unused3;
- compat_ulong_t msg_cbytes;
- compat_ulong_t msg_qnum;
- compat_ulong_t msg_qbytes;
- compat_pid_t msg_lspid;
- compat_pid_t msg_lrpid;
- compat_ulong_t __unused4;
- compat_ulong_t __unused5;
-};
-
-struct compat_shmid64_ds {
- struct compat_ipc64_perm shm_perm;
- compat_size_t shm_segsz;
- compat_time_t shm_atime;
- compat_ulong_t __unused1;
- compat_time_t shm_dtime;
- compat_ulong_t __unused2;
- compat_time_t shm_ctime;
- compat_ulong_t __unused3;
- compat_pid_t shm_cpid;
- compat_pid_t shm_lpid;
- compat_ulong_t shm_nattch;
- compat_ulong_t __unused4;
- compat_ulong_t __unused5;
-};
-
-/*
- * A pointer passed in from user mode. This should not
- * be used for syscall parameters, just declare them
- * as pointers because the syscall entry code will have
- * appropriately converted them already.
- */
-
-static inline void __user *compat_ptr(compat_uptr_t uptr)
-{
- return (void __user *)(long)(s32)uptr;
-}
-
-static inline compat_uptr_t ptr_to_compat(void __user *uptr)
-{
- return (u32)(unsigned long)uptr;
-}
-
-/* Sign-extend when storing a kernel pointer to a user's ptregs. */
-static inline unsigned long ptr_to_compat_reg(void __user *uptr)
-{
- return (long)(int)(long __force)uptr;
-}
-
-static inline void __user *arch_compat_alloc_user_space(long len)
-{
- struct pt_regs *regs = task_pt_regs(current);
- return (void __user *)regs->sp - len;
-}
-
-static inline int is_compat_task(void)
-{
- return current_thread_info()->status & TS_COMPAT;
-}
-
-extern int compat_setup_rt_frame(struct ksignal *ksig, sigset_t *set,
- struct pt_regs *regs);
-
-/* Compat syscalls. */
-struct compat_siginfo;
-struct compat_sigaltstack;
-long compat_sys_rt_sigreturn(void);
-long compat_sys_truncate64(char __user *filename, u32 dummy, u32 low, u32 high);
-long compat_sys_ftruncate64(unsigned int fd, u32 dummy, u32 low, u32 high);
-long compat_sys_pread64(unsigned int fd, char __user *ubuf, size_t count,
- u32 dummy, u32 low, u32 high);
-long compat_sys_pwrite64(unsigned int fd, char __user *ubuf, size_t count,
- u32 dummy, u32 low, u32 high);
-long compat_sys_sync_file_range2(int fd, unsigned int flags,
- u32 offset_lo, u32 offset_hi,
- u32 nbytes_lo, u32 nbytes_hi);
-long compat_sys_fallocate(int fd, int mode,
- u32 offset_lo, u32 offset_hi,
- u32 len_lo, u32 len_hi);
-long compat_sys_llseek(unsigned int fd, unsigned int offset_high,
- unsigned int offset_low, loff_t __user * result,
- unsigned int origin);
-
-/* Assembly trampoline to avoid clobbering r0. */
-long _compat_sys_rt_sigreturn(void);
-
-#endif /* _ASM_TILE_COMPAT_H */
diff --git a/arch/tile/include/asm/current.h b/arch/tile/include/asm/current.h
deleted file mode 100644
index da21acf020d3..000000000000
--- a/arch/tile/include/asm/current.h
+++ /dev/null
@@ -1,31 +0,0 @@
-/*
- * Copyright 2010 Tilera Corporation. All Rights Reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation, version 2.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
- * NON INFRINGEMENT. See the GNU General Public License for
- * more details.
- */
-
-#ifndef _ASM_TILE_CURRENT_H
-#define _ASM_TILE_CURRENT_H
-
-#include <linux/thread_info.h>
-
-struct task_struct;
-
-static inline struct task_struct *get_current(void)
-{
- return current_thread_info()->task;
-}
-#define current get_current()
-
-/* Return a usable "task_struct" pointer even if the real one is corrupt. */
-struct task_struct *validate_current(void);
-
-#endif /* _ASM_TILE_CURRENT_H */
diff --git a/arch/tile/include/asm/delay.h b/arch/tile/include/asm/delay.h
deleted file mode 100644
index 97b0e69e704e..000000000000
--- a/arch/tile/include/asm/delay.h
+++ /dev/null
@@ -1,34 +0,0 @@
-/*
- * Copyright 2010 Tilera Corporation. All Rights Reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation, version 2.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
- * NON INFRINGEMENT. See the GNU General Public License for
- * more details.
- */
-
-#ifndef _ASM_TILE_DELAY_H
-#define _ASM_TILE_DELAY_H
-
-/* Undefined functions to get compile-time errors. */
-extern void __bad_udelay(void);
-extern void __bad_ndelay(void);
-
-extern void __udelay(unsigned long usecs);
-extern void __ndelay(unsigned long nsecs);
-extern void __delay(unsigned long loops);
-
-#define udelay(n) (__builtin_constant_p(n) ? \
- ((n) > 20000 ? __bad_udelay() : __ndelay((n) * 1000)) : \
- __udelay(n))
-
-#define ndelay(n) (__builtin_constant_p(n) ? \
- ((n) > 20000 ? __bad_ndelay() : __ndelay(n)) : \
- __ndelay(n))
-
-#endif /* _ASM_TILE_DELAY_H */
diff --git a/arch/tile/include/asm/device.h b/arch/tile/include/asm/device.h
deleted file mode 100644
index 1cf45422a0df..000000000000
--- a/arch/tile/include/asm/device.h
+++ /dev/null
@@ -1,33 +0,0 @@
-/*
- * Copyright 2010 Tilera Corporation. All Rights Reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation, version 2.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
- * NON INFRINGEMENT. See the GNU General Public License for
- * more details.
- * Arch specific extensions to struct device
- */
-
-#ifndef _ASM_TILE_DEVICE_H
-#define _ASM_TILE_DEVICE_H
-
-struct dev_archdata {
- /* Offset of the DMA address from the PA. */
- dma_addr_t dma_offset;
-
- /*
- * Highest DMA address that can be generated by devices that
- * have limited DMA capability, i.e. non 64-bit capable.
- */
- dma_addr_t max_direct_dma_addr;
-};
-
-struct pdev_archdata {
-};
-
-#endif /* _ASM_TILE_DEVICE_H */
diff --git a/arch/tile/include/asm/div64.h b/arch/tile/include/asm/div64.h
deleted file mode 100644
index a0a798344d5f..000000000000
--- a/arch/tile/include/asm/div64.h
+++ /dev/null
@@ -1,17 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _ASM_TILE_DIV64_H
-#define _ASM_TILE_DIV64_H
-
-#include <linux/types.h>
-
-#ifdef __tilegx__
-static inline u64 mul_u32_u32(u32 a, u32 b)
-{
- return __insn_mul_lu_lu(a, b);
-}
-#define mul_u32_u32 mul_u32_u32
-#endif
-
-#include <asm-generic/div64.h>
-
-#endif /* _ASM_TILE_DIV64_H */
diff --git a/arch/tile/include/asm/dma-mapping.h b/arch/tile/include/asm/dma-mapping.h
deleted file mode 100644
index d25fce101fc0..000000000000
--- a/arch/tile/include/asm/dma-mapping.h
+++ /dev/null
@@ -1,50 +0,0 @@
-/*
- * Copyright 2010 Tilera Corporation. All Rights Reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation, version 2.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
- * NON INFRINGEMENT. See the GNU General Public License for
- * more details.
- */
-
-#ifndef _ASM_TILE_DMA_MAPPING_H
-#define _ASM_TILE_DMA_MAPPING_H
-
-#include <linux/mm.h>
-#include <linux/scatterlist.h>
-#include <linux/cache.h>
-#include <linux/io.h>
-
-#ifdef __tilegx__
-#define ARCH_HAS_DMA_GET_REQUIRED_MASK
-#endif
-
-extern const struct dma_map_ops *tile_dma_map_ops;
-extern const struct dma_map_ops *gx_pci_dma_map_ops;
-extern const struct dma_map_ops *gx_legacy_pci_dma_map_ops;
-extern const struct dma_map_ops *gx_hybrid_pci_dma_map_ops;
-
-static inline const struct dma_map_ops *get_arch_dma_ops(struct bus_type *bus)
-{
- return tile_dma_map_ops;
-}
-
-static inline dma_addr_t get_dma_offset(struct device *dev)
-{
- return dev->archdata.dma_offset;
-}
-
-static inline void set_dma_offset(struct device *dev, dma_addr_t off)
-{
- dev->archdata.dma_offset = off;
-}
-
-#define HAVE_ARCH_DMA_SET_MASK 1
-int dma_set_mask(struct device *dev, u64 mask);
-
-#endif /* _ASM_TILE_DMA_MAPPING_H */
diff --git a/arch/tile/include/asm/dma.h b/arch/tile/include/asm/dma.h
deleted file mode 100644
index 12a7ca16d164..000000000000
--- a/arch/tile/include/asm/dma.h
+++ /dev/null
@@ -1,25 +0,0 @@
-/*
- * Copyright 2010 Tilera Corporation. All Rights Reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation, version 2.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
- * NON INFRINGEMENT. See the GNU General Public License for
- * more details.
- */
-
-#ifndef _ASM_TILE_DMA_H
-#define _ASM_TILE_DMA_H
-
-#include <asm-generic/dma.h>
-
-/* Needed by drivers/pci/quirks.c */
-#ifdef CONFIG_PCI
-extern int isa_dma_bridge_buggy;
-#endif
-
-#endif /* _ASM_TILE_DMA_H */
diff --git a/arch/tile/include/asm/elf.h b/arch/tile/include/asm/elf.h
deleted file mode 100644
index e9d54a06736f..000000000000
--- a/arch/tile/include/asm/elf.h
+++ /dev/null
@@ -1,182 +0,0 @@
-/*
- * Copyright 2010 Tilera Corporation. All Rights Reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation, version 2.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
- * NON INFRINGEMENT. See the GNU General Public License for
- * more details.
- */
-
-#ifndef _ASM_TILE_ELF_H
-#define _ASM_TILE_ELF_H
-
-/*
- * ELF register definitions.
- */
-
-#include <arch/chip.h>
-
-#include <linux/ptrace.h>
-#include <linux/elf-em.h>
-#include <asm/byteorder.h>
-#include <asm/page.h>
-
-typedef unsigned long elf_greg_t;
-
-#define ELF_NGREG (sizeof(struct pt_regs) / sizeof(elf_greg_t))
-typedef elf_greg_t elf_gregset_t[ELF_NGREG];
-
-/* Provide a nominal data structure. */
-#define ELF_NFPREG 0
-typedef double elf_fpreg_t;
-typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG];
-
-#ifdef __tilegx__
-#define ELF_CLASS ELFCLASS64
-#else
-#define ELF_CLASS ELFCLASS32
-#endif
-#ifdef __BIG_ENDIAN__
-#define ELF_DATA ELFDATA2MSB
-#else
-#define ELF_DATA ELFDATA2LSB
-#endif
-
-/*
- * There seems to be a bug in how compat_binfmt_elf.c works: it
- * #undefs ELF_ARCH, but it is then used in binfmt_elf.c for fill_note_info().
- * Hack around this by providing an enum value of ELF_ARCH.
- */
-enum { ELF_ARCH = CHIP_ELF_TYPE() };
-#define ELF_ARCH ELF_ARCH
-
-/*
- * This is used to ensure we don't load something for the wrong architecture.
- */
-#define elf_check_arch(x) \
- ((x)->e_ident[EI_CLASS] == ELF_CLASS && \
- (x)->e_ident[EI_DATA] == ELF_DATA && \
- (x)->e_machine == CHIP_ELF_TYPE())
-
-/* The module loader only handles a few relocation types. */
-#ifndef __tilegx__
-#define R_TILE_32 1
-#define R_TILE_JOFFLONG_X1 15
-#define R_TILE_IMM16_X0_LO 25
-#define R_TILE_IMM16_X1_LO 26
-#define R_TILE_IMM16_X0_HA 29
-#define R_TILE_IMM16_X1_HA 30
-#else
-#define R_TILEGX_64 1
-#define R_TILEGX_JUMPOFF_X1 21
-#define R_TILEGX_IMM16_X0_HW0 36
-#define R_TILEGX_IMM16_X1_HW0 37
-#define R_TILEGX_IMM16_X0_HW1 38
-#define R_TILEGX_IMM16_X1_HW1 39
-#define R_TILEGX_IMM16_X0_HW2_LAST 48
-#define R_TILEGX_IMM16_X1_HW2_LAST 49
-#endif
-
-/* Use standard page size for core dumps. */
-#define ELF_EXEC_PAGESIZE PAGE_SIZE
-
-/*
- * This is the location that an ET_DYN program is loaded if exec'ed. Typical
- * use of this is to invoke "./ld.so someprog" to test out a new version of
- * the loader. We need to make sure that it is out of the way of the program
- * that it will "exec", and that there is sufficient room for the brk.
- */
-#define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
-
-#define ELF_CORE_COPY_REGS(_dest, _regs) \
- memcpy((char *) &_dest, (char *) _regs, \
- sizeof(struct pt_regs));
-
-/* No additional FP registers to copy. */
-#define ELF_CORE_COPY_FPREGS(t, fpu) 0
-
-/*
- * This yields a mask that user programs can use to figure out what
- * instruction set this CPU supports. This could be done in user space,
- * but it's not easy, and we've already done it here.
- */
-#define ELF_HWCAP (0)
-
-/*
- * This yields a string that ld.so will use to load implementation
- * specific libraries for optimization. This is more specific in
- * intent than poking at uname or /proc/cpuinfo.
- */
-#define ELF_PLATFORM (NULL)
-
-extern void elf_plat_init(struct pt_regs *regs, unsigned long load_addr);
-
-#define ELF_PLAT_INIT(_r, load_addr) elf_plat_init(_r, load_addr)
-
-extern int dump_task_regs(struct task_struct *, elf_gregset_t *);
-#define ELF_CORE_COPY_TASK_REGS(tsk, elf_regs) dump_task_regs(tsk, elf_regs)
-
-/* Tilera Linux has no personalities currently, so no need to do anything. */
-#define SET_PERSONALITY(ex) do { } while (0)
-
-#define ARCH_HAS_SETUP_ADDITIONAL_PAGES
-/* Support auto-mapping of the user interrupt vectors. */
-struct linux_binprm;
-extern int arch_setup_additional_pages(struct linux_binprm *bprm,
- int executable_stack);
-/* update AT_VECTOR_SIZE_ARCH if the number of NEW_AUX_ENT entries changes */
-#define ARCH_DLINFO \
-do { \
- NEW_AUX_ENT(AT_SYSINFO_EHDR, VDSO_BASE); \
-} while (0)
-
-struct mm_struct;
-extern unsigned long arch_randomize_brk(struct mm_struct *mm);
-#define arch_randomize_brk arch_randomize_brk
-
-#ifdef CONFIG_COMPAT
-
-#define COMPAT_ELF_PLATFORM "tilegx-m32"
-
-/*
- * "Compat" binaries have the same machine type, but 32-bit class,
- * since they're not a separate machine type, but just a 32-bit
- * variant of the standard 64-bit architecture.
- */
-#define compat_elf_check_arch(x) \
- ((x)->e_ident[EI_CLASS] == ELFCLASS32 && \
- (x)->e_machine == CHIP_ELF_TYPE())
-
-#define compat_start_thread(regs, ip, usp) do { \
- regs->pc = ptr_to_compat_reg((void *)(ip)); \
- regs->sp = ptr_to_compat_reg((void *)(usp)); \
- single_step_execve(); \
- } while (0)
-
-/*
- * Use SET_PERSONALITY to indicate compatibility via TS_COMPAT.
- */
-#undef SET_PERSONALITY
-#define SET_PERSONALITY(ex) \
-do { \
- set_personality(PER_LINUX | (current->personality & (~PER_MASK))); \
- current_thread_info()->status &= ~TS_COMPAT; \
-} while (0)
-#define COMPAT_SET_PERSONALITY(ex) \
-do { \
- set_personality(PER_LINUX | (current->personality & (~PER_MASK))); \
- current_thread_info()->status |= TS_COMPAT; \
-} while (0)
-
-#define COMPAT_ELF_ET_DYN_BASE (0xffffffff / 3 * 2)
-
-#endif /* CONFIG_COMPAT */
-
-#define CORE_DUMP_USE_REGSET
-
-#endif /* _ASM_TILE_ELF_H */
diff --git a/arch/tile/include/asm/fixmap.h b/arch/tile/include/asm/fixmap.h
deleted file mode 100644
index ffe2637aeb31..000000000000
--- a/arch/tile/include/asm/fixmap.h
+++ /dev/null
@@ -1,87 +0,0 @@
-/*
- * Copyright (C) 1998 Ingo Molnar
- * Copyright 2010 Tilera Corporation. All Rights Reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation, version 2.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
- * NON INFRINGEMENT. See the GNU General Public License for
- * more details.
- */
-
-#ifndef _ASM_TILE_FIXMAP_H
-#define _ASM_TILE_FIXMAP_H
-
-#include <asm/page.h>
-
-#ifndef __ASSEMBLY__
-#include <linux/kernel.h>
-#ifdef CONFIG_HIGHMEM
-#include <linux/threads.h>
-#include <asm/kmap_types.h>
-#endif
-
-/*
- * Here we define all the compile-time 'special' virtual
- * addresses. The point is to have a constant address at
- * compile time, but to set the physical address only
- * in the boot process. We allocate these special addresses
- * from the end of supervisor virtual memory backwards.
- * Also this lets us do fail-safe vmalloc(), we
- * can guarantee that these special addresses and
- * vmalloc()-ed addresses never overlap.
- *
- * these 'compile-time allocated' memory buffers are
- * fixed-size 4k pages. (or larger if used with an increment
- * higher than 1) use fixmap_set(idx,phys) to associate
- * physical memory with fixmap indices.
- *
- * TLB entries of such buffers will not be flushed across
- * task switches.
- */
-enum fixed_addresses {
-#ifdef __tilegx__
- /*
- * TILEPro has unmapped memory above so the hole isn't needed,
- * and in any case the hole pushes us over a single 16MB pmd.
- */
- FIX_HOLE,
-#endif
-#ifdef CONFIG_HIGHMEM
- FIX_KMAP_BEGIN, /* reserved pte's for temporary kernel mappings */
- FIX_KMAP_END = FIX_KMAP_BEGIN+(KM_TYPE_NR*NR_CPUS)-1,
-#endif
-#ifdef __tilegx__ /* see homecache.c */
- FIX_HOMECACHE_BEGIN,
- FIX_HOMECACHE_END = FIX_HOMECACHE_BEGIN+(NR_CPUS)-1,
-#endif
- __end_of_permanent_fixed_addresses,
-
- /*
- * Temporary boot-time mappings, used before ioremap() is functional.
- * Not currently needed by the Tile architecture.
- */
-#define NR_FIX_BTMAPS 0
-#if NR_FIX_BTMAPS
- FIX_BTMAP_END = __end_of_permanent_fixed_addresses,
- FIX_BTMAP_BEGIN = FIX_BTMAP_END + NR_FIX_BTMAPS - 1,
- __end_of_fixed_addresses
-#else
- __end_of_fixed_addresses = __end_of_permanent_fixed_addresses
-#endif
-};
-
-#define __FIXADDR_SIZE (__end_of_permanent_fixed_addresses << PAGE_SHIFT)
-#define __FIXADDR_BOOT_SIZE (__end_of_fixed_addresses << PAGE_SHIFT)
-#define FIXADDR_START (FIXADDR_TOP + PAGE_SIZE - __FIXADDR_SIZE)
-#define FIXADDR_BOOT_START (FIXADDR_TOP + PAGE_SIZE - __FIXADDR_BOOT_SIZE)
-
-#include <asm-generic/fixmap.h>
-
-#endif /* !__ASSEMBLY__ */
-
-#endif /* _ASM_TILE_FIXMAP_H */
diff --git a/arch/tile/include/asm/ftrace.h b/arch/tile/include/asm/ftrace.h
deleted file mode 100644
index 738d239b792f..000000000000
--- a/arch/tile/include/asm/ftrace.h
+++ /dev/null
@@ -1,42 +0,0 @@
-/*
- * Copyright 2010 Tilera Corporation. All Rights Reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation, version 2.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
- * NON INFRINGEMENT. See the GNU General Public License for
- * more details.
- */
-
-#ifndef _ASM_TILE_FTRACE_H
-#define _ASM_TILE_FTRACE_H
-
-#ifdef CONFIG_FUNCTION_TRACER
-
-#define MCOUNT_ADDR ((unsigned long)(__mcount))
-#define MCOUNT_INSN_SIZE 8 /* sizeof mcount call */
-
-#ifndef __ASSEMBLY__
-extern void __mcount(void);
-
-#define ARCH_SUPPORTS_FTRACE_OPS 1
-
-#ifdef CONFIG_DYNAMIC_FTRACE
-static inline unsigned long ftrace_call_adjust(unsigned long addr)
-{
- return addr;
-}
-
-struct dyn_arch_ftrace {
-};
-#endif /* CONFIG_DYNAMIC_FTRACE */
-
-#endif /* __ASSEMBLY__ */
-
-#endif /* CONFIG_FUNCTION_TRACER */
-
-#endif /* _ASM_TILE_FTRACE_H */
diff --git a/arch/tile/include/asm/futex.h b/arch/tile/include/asm/futex.h
deleted file mode 100644
index 83c1e639b411..000000000000
--- a/arch/tile/include/asm/futex.h
+++ /dev/null
@@ -1,166 +0,0 @@
-/*
- * Copyright 2010 Tilera Corporation. All Rights Reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation, version 2.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
- * NON INFRINGEMENT. See the GNU General Public License for
- * more details.
- *
- * These routines make two important assumptions:
- *
- * 1. atomic_t is really an int and can be freely cast back and forth
- * (validated in __init_atomic_per_cpu).
- *
- * 2. userspace uses sys_cmpxchg() for all atomic operations, thus using
- * the same locking convention that all the kernel atomic routines use.
- */
-
-#ifndef _ASM_TILE_FUTEX_H
-#define _ASM_TILE_FUTEX_H
-
-#ifndef __ASSEMBLY__
-
-#include <linux/futex.h>
-#include <linux/uaccess.h>
-#include <linux/errno.h>
-#include <asm/atomic.h>
-
-/*
- * Support macros for futex operations. Do not use these macros directly.
- * They assume "ret", "val", "oparg", and "uaddr" in the lexical context.
- * __futex_cmpxchg() additionally assumes "oldval".
- */
-
-#ifdef __tilegx__
-
-#define __futex_asm(OP) \
- asm("1: {" #OP " %1, %3, %4; movei %0, 0 }\n" \
- ".pushsection .fixup,\"ax\"\n" \
- "0: { movei %0, %5; j 9f }\n" \
- ".section __ex_table,\"a\"\n" \
- ".align 8\n" \
- ".quad 1b, 0b\n" \
- ".popsection\n" \
- "9:" \
- : "=r" (ret), "=r" (val), "+m" (*(uaddr)) \
- : "r" (uaddr), "r" (oparg), "i" (-EFAULT))
-
-#define __futex_set() __futex_asm(exch4)
-#define __futex_add() __futex_asm(fetchadd4)
-#define __futex_or() __futex_asm(fetchor4)
-#define __futex_andn() ({ oparg = ~oparg; __futex_asm(fetchand4); })
-#define __futex_cmpxchg() \
- ({ __insn_mtspr(SPR_CMPEXCH_VALUE, oldval); __futex_asm(cmpexch4); })
-
-#define __futex_xor() \
- ({ \
- u32 oldval, n = oparg; \
- if ((ret = __get_user(oldval, uaddr)) == 0) { \
- do { \
- oparg = oldval ^ n; \
- __futex_cmpxchg(); \
- } while (ret == 0 && oldval != val); \
- } \
- })
-
-/* No need to prefetch, since the atomic ops go to the home cache anyway. */
-#define __futex_prolog()
-
-#else
-
-#define __futex_call(FN) \
- { \
- struct __get_user gu = FN((u32 __force *)uaddr, lock, oparg); \
- val = gu.val; \
- ret = gu.err; \
- }
-
-#define __futex_set() __futex_call(__atomic32_xchg)
-#define __futex_add() __futex_call(__atomic32_xchg_add)
-#define __futex_or() __futex_call(__atomic32_fetch_or)
-#define __futex_andn() __futex_call(__atomic32_fetch_andn)
-#define __futex_xor() __futex_call(__atomic32_fetch_xor)
-
-#define __futex_cmpxchg() \
- { \
- struct __get_user gu = __atomic32_cmpxchg((u32 __force *)uaddr, \
- lock, oldval, oparg); \
- val = gu.val; \
- ret = gu.err; \
- }
-
-/*
- * Find the lock pointer for the atomic calls to use, and issue a
- * prefetch to the user address to bring it into cache. Similar to
- * __atomic_setup(), but we can't do a read into the L1 since it might
- * fault; instead we do a prefetch into the L2.
- */
-#define __futex_prolog() \
- int *lock; \
- __insn_prefetch(uaddr); \
- lock = __atomic_hashed_lock((int __force *)uaddr)
-#endif
-
-static inline int arch_futex_atomic_op_inuser(int op, u32 oparg, int *oval,
- u32 __user *uaddr)
-{
- int uninitialized_var(val), ret;
-
- __futex_prolog();
-
- /* The 32-bit futex code makes this assumption, so validate it here. */
- BUILD_BUG_ON(sizeof(atomic_t) != sizeof(int));
-
- pagefault_disable();
- switch (op) {
- case FUTEX_OP_SET:
- __futex_set();
- break;
- case FUTEX_OP_ADD:
- __futex_add();
- break;
- case FUTEX_OP_OR:
- __futex_or();
- break;
- case FUTEX_OP_ANDN:
- __futex_andn();
- break;
- case FUTEX_OP_XOR:
- __futex_xor();
- break;
- default:
- ret = -ENOSYS;
- break;
- }
- pagefault_enable();
-
- if (!ret)
- *oval = val;
-
- return ret;
-}
-
-static inline int futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
- u32 oldval, u32 oparg)
-{
- int ret, val;
-
- __futex_prolog();
-
- if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32)))
- return -EFAULT;
-
- __futex_cmpxchg();
-
- *uval = val;
- return ret;
-}
-
-#endif /* !__ASSEMBLY__ */
-
-#endif /* _ASM_TILE_FUTEX_H */
diff --git a/arch/tile/include/asm/hardirq.h b/arch/tile/include/asm/hardirq.h
deleted file mode 100644
index 54110af23985..000000000000
--- a/arch/tile/include/asm/hardirq.h
+++ /dev/null
@@ -1,45 +0,0 @@
-/*
- * Copyright 2010 Tilera Corporation. All Rights Reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation, version 2.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
- * NON INFRINGEMENT. See the GNU General Public License for
- * more details.
- */
-
-#ifndef _ASM_TILE_HARDIRQ_H
-#define _ASM_TILE_HARDIRQ_H
-
-#include <linux/threads.h>
-#include <linux/cache.h>
-
-#include <asm/irq.h>
-
-typedef struct {
- unsigned int __softirq_pending;
- long idle_timestamp;
-
- /* Hard interrupt statistics. */
- unsigned int irq_timer_count;
- unsigned int irq_syscall_count;
- unsigned int irq_resched_count;
- unsigned int irq_hv_flush_count;
- unsigned int irq_call_count;
- unsigned int irq_hv_msg_count;
- unsigned int irq_dev_intr_count;
-
-} ____cacheline_aligned irq_cpustat_t;
-
-DECLARE_PER_CPU(irq_cpustat_t, irq_stat);
-
-#define __ARCH_IRQ_STAT
-#define __IRQ_STAT(cpu, member) (per_cpu(irq_stat, cpu).member)
-
-#include <linux/irq_cpustat.h> /* Standard mappings for irq_cpustat_t above */
-
-#endif /* _ASM_TILE_HARDIRQ_H */
diff --git a/arch/tile/include/asm/hardwall.h b/arch/tile/include/asm/hardwall.h
deleted file mode 100644
index 44d2765bde2b..000000000000
--- a/arch/tile/include/asm/hardwall.h
+++ /dev/null
@@ -1,30 +0,0 @@
-/*
- * Copyright 2010 Tilera Corporation. All Rights Reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation, version 2.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
- * NON INFRINGEMENT. See the GNU General Public License for
- * more details.
- *
- * Provide methods for access control of per-cpu resources like
- * UDN, IDN, or IPI.
- */
-#ifndef _ASM_TILE_HARDWALL_H
-#define _ASM_TILE_HARDWALL_H
-
-#include <uapi/asm/hardwall.h>
-
-/* /proc hooks for hardwall. */
-struct proc_dir_entry;
-#ifdef CONFIG_HARDWALL
-void proc_tile_hardwall_init(struct proc_dir_entry *root);
-int proc_pid_hardwall(struct seq_file *m, struct pid_namespace *ns, struct pid *pid, struct task_struct *task);
-#else
-static inline void proc_tile_hardwall_init(struct proc_dir_entry *root) {}
-#endif
-#endif /* _ASM_TILE_HARDWALL_H */
diff --git a/arch/tile/include/asm/highmem.h b/arch/tile/include/asm/highmem.h
deleted file mode 100644
index 979579b38e57..000000000000
--- a/arch/tile/include/asm/highmem.h
+++ /dev/null
@@ -1,71 +0,0 @@
-/*
- * Copyright (C) 1999 Gerhard Wichert, Siemens AG
- * Gerhard.Wichert@pdb.siemens.de
- * Copyright 2010 Tilera Corporation. All Rights Reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation, version 2.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
- * NON INFRINGEMENT. See the GNU General Public License for
- * more details.
- *
- * Used in CONFIG_HIGHMEM systems for memory pages which
- * are not addressable by direct kernel virtual addresses.
- *
- */
-
-#ifndef _ASM_TILE_HIGHMEM_H
-#define _ASM_TILE_HIGHMEM_H
-
-#include <linux/interrupt.h>
-#include <linux/threads.h>
-#include <asm/tlbflush.h>
-#include <asm/homecache.h>
-
-/* declarations for highmem.c */
-extern unsigned long highstart_pfn, highend_pfn;
-
-extern pte_t *pkmap_page_table;
-
-/*
- * Ordering is:
- *
- * FIXADDR_TOP
- * fixed_addresses
- * FIXADDR_START
- * temp fixed addresses
- * FIXADDR_BOOT_START
- * Persistent kmap area
- * PKMAP_BASE
- * VMALLOC_END
- * Vmalloc area
- * VMALLOC_START
- * high_memory
- */
-#define LAST_PKMAP_MASK (LAST_PKMAP-1)
-#define PKMAP_NR(virt) ((virt-PKMAP_BASE) >> PAGE_SHIFT)
-#define PKMAP_ADDR(nr) (PKMAP_BASE + ((nr) << PAGE_SHIFT))
-
-void *kmap_high(struct page *page);
-void kunmap_high(struct page *page);
-void *kmap(struct page *page);
-void kunmap(struct page *page);
-void *kmap_fix_kpte(struct page *page, int finished);
-
-/* This macro is used only in map_new_virtual() to map "page". */
-#define kmap_prot page_to_kpgprot(page)
-
-void *kmap_atomic(struct page *page);
-void __kunmap_atomic(void *kvaddr);
-void *kmap_atomic_pfn(unsigned long pfn);
-void *kmap_atomic_prot_pfn(unsigned long pfn, pgprot_t prot);
-void *kmap_atomic_prot(struct page *page, pgprot_t prot);
-void kmap_atomic_fix_kpte(struct page *page, int finished);
-
-#define flush_cache_kmaps() do { } while (0)
-
-#endif /* _ASM_TILE_HIGHMEM_H */
diff --git a/arch/tile/include/asm/homecache.h b/arch/tile/include/asm/homecache.h
deleted file mode 100644
index 7ddd1b8d6910..000000000000
--- a/arch/tile/include/asm/homecache.h
+++ /dev/null
@@ -1,123 +0,0 @@
-/*
- * Copyright 2010 Tilera Corporation. All Rights Reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation, version 2.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
- * NON INFRINGEMENT. See the GNU General Public License for
- * more details.
- *
- * Handle issues around the Tile "home cache" model of coherence.
- */
-
-#ifndef _ASM_TILE_HOMECACHE_H
-#define _ASM_TILE_HOMECACHE_H
-
-#include <asm/page.h>
-#include <linux/cpumask.h>
-
-struct page;
-struct task_struct;
-struct vm_area_struct;
-struct zone;
-
-/*
- * Coherence point for the page is its memory controller.
- * It is not present in any cache (L1 or L2).
- */
-#define PAGE_HOME_UNCACHED -1
-
-/*
- * Is this page immutable (unwritable) and thus able to be cached more
- * widely than would otherwise be possible? This means we have "nc" set.
- */
-#define PAGE_HOME_IMMUTABLE -2
-
-/*
- * Each cpu considers its own cache to be the home for the page,
- * which makes it incoherent.
- */
-#define PAGE_HOME_INCOHERENT -3
-
-/* Home for the page is distributed via hash-for-home. */
-#define PAGE_HOME_HASH -4
-
-/* Support wrapper to use instead of explicit hv_flush_remote(). */
-extern void flush_remote(unsigned long cache_pfn, unsigned long cache_length,
- const struct cpumask *cache_cpumask,
- HV_VirtAddr tlb_va, unsigned long tlb_length,
- unsigned long tlb_pgsize,
- const struct cpumask *tlb_cpumask,
- HV_Remote_ASID *asids, int asidcount);
-
-/* Set homing-related bits in a PTE (can also pass a pgprot_t). */
-extern pte_t pte_set_home(pte_t pte, int home);
-
-/* Do a cache eviction on the specified cpus. */
-extern void homecache_evict(const struct cpumask *mask);
-
-/*
- * Change a kernel page's homecache. It must not be mapped in user space.
- * If !CONFIG_HOMECACHE, only usable on LOWMEM, and can only be called when
- * no other cpu can reference the page, and causes a full-chip cache/TLB flush.
- */
-extern void homecache_change_page_home(struct page *, int order, int home);
-
-/*
- * Flush a page out of whatever cache(s) it is in.
- * This is more than just finv, since it properly handles waiting
- * for the data to reach memory, but it can be quite
- * heavyweight, particularly on incoherent or immutable memory.
- */
-extern void homecache_finv_page(struct page *);
-
-/*
- * Flush a page out of the specified home cache.
- * Note that the specified home need not be the actual home of the page,
- * as for example might be the case when coordinating with I/O devices.
- */
-extern void homecache_finv_map_page(struct page *, int home);
-
-/*
- * Allocate a page with the given GFP flags, home, and optionally
- * node. These routines are actually just wrappers around the normal
- * alloc_pages() / alloc_pages_node() functions, which set and clear
- * a per-cpu variable to communicate with homecache_new_kernel_page().
- * If !CONFIG_HOMECACHE, uses homecache_change_page_home().
- */
-extern struct page *homecache_alloc_pages(gfp_t gfp_mask,
- unsigned int order, int home);
-extern struct page *homecache_alloc_pages_node(int nid, gfp_t gfp_mask,
- unsigned int order, int home);
-#define homecache_alloc_page(gfp_mask, home) \
- homecache_alloc_pages(gfp_mask, 0, home)
-
-/*
- * These routines are just pass-throughs to free_pages() when
- * we support full homecaching. If !CONFIG_HOMECACHE, then these
- * routines use homecache_change_page_home() to reset the home
- * back to the default before returning the page to the allocator.
- */
-void __homecache_free_pages(struct page *, unsigned int order);
-void homecache_free_pages(unsigned long addr, unsigned int order);
-#define __homecache_free_page(page) __homecache_free_pages((page), 0)
-#define homecache_free_page(page) homecache_free_pages((page), 0)
-
-
-/*
- * Report the page home for LOWMEM pages by examining their kernel PTE,
- * or for highmem pages as the default home.
- */
-extern int page_home(struct page *);
-
-#define homecache_migrate_kthread() do {} while (0)
-
-#define homecache_kpte_lock() 0
-#define homecache_kpte_unlock(flags) do {} while (0)
-
-
-#endif /* _ASM_TILE_HOMECACHE_H */
diff --git a/arch/tile/include/asm/hugetlb.h b/arch/tile/include/asm/hugetlb.h
deleted file mode 100644
index 2fac5be4de26..000000000000
--- a/arch/tile/include/asm/hugetlb.h
+++ /dev/null
@@ -1,122 +0,0 @@
-/*
- * Copyright 2010 Tilera Corporation. All Rights Reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation, version 2.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
- * NON INFRINGEMENT. See the GNU General Public License for
- * more details.
- */
-
-#ifndef _ASM_TILE_HUGETLB_H
-#define _ASM_TILE_HUGETLB_H
-
-#include <asm/page.h>
-#include <asm-generic/hugetlb.h>
-
-
-static inline int is_hugepage_only_range(struct mm_struct *mm,
- unsigned long addr,
- unsigned long len) {
- return 0;
-}
-
-/*
- * If the arch doesn't supply something else, assume that hugepage
- * size aligned regions are ok without further preparation.
- */
-static inline int prepare_hugepage_range(struct file *file,
- unsigned long addr, unsigned long len)
-{
- struct hstate *h = hstate_file(file);
- if (len & ~huge_page_mask(h))
- return -EINVAL;
- if (addr & ~huge_page_mask(h))
- return -EINVAL;
- return 0;
-}
-
-static inline void hugetlb_free_pgd_range(struct mmu_gather *tlb,
- unsigned long addr, unsigned long end,
- unsigned long floor,
- unsigned long ceiling)
-{
- free_pgd_range(tlb, addr, end, floor, ceiling);
-}
-
-static inline void set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
- pte_t *ptep, pte_t pte)
-{
- set_pte(ptep, pte);
-}
-
-static inline pte_t huge_ptep_get_and_clear(struct mm_struct *mm,
- unsigned long addr, pte_t *ptep)
-{
- return ptep_get_and_clear(mm, addr, ptep);
-}
-
-static inline void huge_ptep_clear_flush(struct vm_area_struct *vma,
- unsigned long addr, pte_t *ptep)
-{
- ptep_clear_flush(vma, addr, ptep);
-}
-
-static inline int huge_pte_none(pte_t pte)
-{
- return pte_none(pte);
-}
-
-static inline pte_t huge_pte_wrprotect(pte_t pte)
-{
- return pte_wrprotect(pte);
-}
-
-static inline void huge_ptep_set_wrprotect(struct mm_struct *mm,
- unsigned long addr, pte_t *ptep)
-{
- ptep_set_wrprotect(mm, addr, ptep);
-}
-
-static inline int huge_ptep_set_access_flags(struct vm_area_struct *vma,
- unsigned long addr, pte_t *ptep,
- pte_t pte, int dirty)
-{
- return ptep_set_access_flags(vma, addr, ptep, pte, dirty);
-}
-
-static inline pte_t huge_ptep_get(pte_t *ptep)
-{
- return *ptep;
-}
-
-static inline void arch_clear_hugepage_flags(struct page *page)
-{
-}
-
-#ifdef CONFIG_HUGETLB_SUPER_PAGES
-static inline pte_t arch_make_huge_pte(pte_t entry, struct vm_area_struct *vma,
- struct page *page, int writable)
-{
- size_t pagesize = huge_page_size(hstate_vma(vma));
- if (pagesize != PUD_SIZE && pagesize != PMD_SIZE)
- entry = pte_mksuper(entry);
- return entry;
-}
-#define arch_make_huge_pte arch_make_huge_pte
-
-/* Sizes to scale up page size for PTEs with HV_PTE_SUPER bit. */
-enum {
- HUGE_SHIFT_PGDIR = 0,
- HUGE_SHIFT_PMD = 1,
- HUGE_SHIFT_PAGE = 2,
- HUGE_SHIFT_ENTRIES
-};
-extern int huge_shift[HUGE_SHIFT_ENTRIES];
-#endif
-
-#endif /* _ASM_TILE_HUGETLB_H */
diff --git a/arch/tile/include/asm/hv_driver.h b/arch/tile/include/asm/hv_driver.h
deleted file mode 100644
index ad614de899b3..000000000000
--- a/arch/tile/include/asm/hv_driver.h
+++ /dev/null
@@ -1,60 +0,0 @@
-/*
- * Copyright 2010 Tilera Corporation. All Rights Reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation, version 2.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
- * NON INFRINGEMENT. See the GNU General Public License for
- * more details.
- *
- * This header defines a wrapper interface for managing hypervisor
- * device calls that will result in an interrupt at some later time.
- * In particular, this provides wrappers for hv_preada() and
- * hv_pwritea().
- */
-
-#ifndef _ASM_TILE_HV_DRIVER_H
-#define _ASM_TILE_HV_DRIVER_H
-
-#include <hv/hypervisor.h>
-
-struct hv_driver_cb;
-
-/* A callback to be invoked when an operation completes. */
-typedef void hv_driver_callback_t(struct hv_driver_cb *cb, __hv32 result);
-
-/*
- * A structure to hold information about an outstanding call.
- * The driver must allocate a separate structure for each call.
- */
-struct hv_driver_cb {
- hv_driver_callback_t *callback; /* Function to call on interrupt. */
- void *dev; /* Driver-specific state variable. */
-};
-
-/* Wrapper for invoking hv_dev_preada(). */
-static inline int
-tile_hv_dev_preada(int devhdl, __hv32 flags, __hv32 sgl_len,
- HV_SGL sgl[/* sgl_len */], __hv64 offset,
- struct hv_driver_cb *callback)
-{
- return hv_dev_preada(devhdl, flags, sgl_len, sgl,
- offset, (HV_IntArg)callback);
-}
-
-/* Wrapper for invoking hv_dev_pwritea(). */
-static inline int
-tile_hv_dev_pwritea(int devhdl, __hv32 flags, __hv32 sgl_len,
- HV_SGL sgl[/* sgl_len */], __hv64 offset,
- struct hv_driver_cb *callback)
-{
- return hv_dev_pwritea(devhdl, flags, sgl_len, sgl,
- offset, (HV_IntArg)callback);
-}
-
-
-#endif /* _ASM_TILE_HV_DRIVER_H */
diff --git a/arch/tile/include/asm/ide.h b/arch/tile/include/asm/ide.h
deleted file mode 100644
index 3c6f2ed894ce..000000000000
--- a/arch/tile/include/asm/ide.h
+++ /dev/null
@@ -1,25 +0,0 @@
-/*
- * Copyright 2010 Tilera Corporation. All Rights Reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation, version 2.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
- * NON INFRINGEMENT. See the GNU General Public License for
- * more details.
- */
-
-#ifndef _ASM_TILE_IDE_H
-#define _ASM_TILE_IDE_H
-
-/* For IDE on PCI */
-#define MAX_HWIFS 10
-
-#define ide_default_io_ctl(base) (0)
-
-#include <asm-generic/ide_iops.h>
-
-#endif /* _ASM_TILE_IDE_H */
diff --git a/arch/tile/include/asm/insn.h b/arch/tile/include/asm/insn.h
deleted file mode 100644
index f78ba5c16722..000000000000
--- a/arch/tile/include/asm/insn.h
+++ /dev/null
@@ -1,59 +0,0 @@
-/*
- * Copyright 2015 Tilera Corporation. All Rights Reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation, version 2.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
- * NON INFRINGEMENT. See the GNU General Public License for
- * more details.
- */
-#ifndef __ASM_TILE_INSN_H
-#define __ASM_TILE_INSN_H
-
-#include <arch/opcode.h>
-
-static inline tilegx_bundle_bits NOP(void)
-{
- return create_UnaryOpcodeExtension_X0(FNOP_UNARY_OPCODE_X0) |
- create_RRROpcodeExtension_X0(UNARY_RRR_0_OPCODE_X0) |
- create_Opcode_X0(RRR_0_OPCODE_X0) |
- create_UnaryOpcodeExtension_X1(NOP_UNARY_OPCODE_X1) |
- create_RRROpcodeExtension_X1(UNARY_RRR_0_OPCODE_X1) |
- create_Opcode_X1(RRR_0_OPCODE_X1);
-}
-
-static inline tilegx_bundle_bits tilegx_gen_branch(unsigned long pc,
- unsigned long addr,
- bool link)
-{
- tilegx_bundle_bits opcode_x0, opcode_x1;
- long pcrel_by_instr = (addr - pc) >> TILEGX_LOG2_BUNDLE_SIZE_IN_BYTES;
-
- if (link) {
- /* opcode: jal addr */
- opcode_x1 =
- create_Opcode_X1(JUMP_OPCODE_X1) |
- create_JumpOpcodeExtension_X1(JAL_JUMP_OPCODE_X1) |
- create_JumpOff_X1(pcrel_by_instr);
- } else {
- /* opcode: j addr */
- opcode_x1 =
- create_Opcode_X1(JUMP_OPCODE_X1) |
- create_JumpOpcodeExtension_X1(J_JUMP_OPCODE_X1) |
- create_JumpOff_X1(pcrel_by_instr);
- }
-
- /* opcode: fnop */
- opcode_x0 =
- create_UnaryOpcodeExtension_X0(FNOP_UNARY_OPCODE_X0) |
- create_RRROpcodeExtension_X0(UNARY_RRR_0_OPCODE_X0) |
- create_Opcode_X0(RRR_0_OPCODE_X0);
-
- return opcode_x1 | opcode_x0;
-}
-
-#endif /* __ASM_TILE_INSN_H */
diff --git a/arch/tile/include/asm/io.h b/arch/tile/include/asm/io.h
deleted file mode 100644
index 30f4a210d148..000000000000
--- a/arch/tile/include/asm/io.h
+++ /dev/null
@@ -1,509 +0,0 @@
-/*
- * Copyright 2010 Tilera Corporation. All Rights Reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation, version 2.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
- * NON INFRINGEMENT. See the GNU General Public License for
- * more details.
- */
-
-#ifndef _ASM_TILE_IO_H
-#define _ASM_TILE_IO_H
-
-#include <linux/kernel.h>
-#include <linux/bug.h>
-#include <asm/page.h>
-
-/* Maximum PCI I/O space address supported. */
-#define IO_SPACE_LIMIT 0xffffffff
-
-/*
- * Convert a physical pointer to a virtual kernel pointer for /dev/mem
- * access.
- */
-#define xlate_dev_mem_ptr(p) __va(p)
-
-/*
- * Convert a virtual cached pointer to an uncached pointer.
- */
-#define xlate_dev_kmem_ptr(p) p
-
-/*
- * Change "struct page" to physical address.
- */
-#define page_to_phys(page) ((dma_addr_t)page_to_pfn(page) << PAGE_SHIFT)
-
-/*
- * Some places try to pass in an loff_t for PHYSADDR (?!), so we cast it to
- * long before casting it to a pointer to avoid compiler warnings.
- */
-#if CHIP_HAS_MMIO()
-extern void __iomem *ioremap(resource_size_t offset, unsigned long size);
-extern void __iomem *ioremap_prot(resource_size_t offset, unsigned long size,
- pgprot_t pgprot);
-extern void iounmap(volatile void __iomem *addr);
-#else
-#define ioremap(physaddr, size) ((void __iomem *)(unsigned long)(physaddr))
-#define iounmap(addr) ((void)0)
-#endif
-
-#define ioremap_nocache(physaddr, size) ioremap(physaddr, size)
-#define ioremap_wc(physaddr, size) ioremap(physaddr, size)
-#define ioremap_wt(physaddr, size) ioremap(physaddr, size)
-#define ioremap_uc(physaddr, size) ioremap(physaddr, size)
-#define ioremap_fullcache(physaddr, size) ioremap(physaddr, size)
-
-#define mmiowb()
-
-/* Conversion between virtual and physical mappings. */
-#define mm_ptov(addr) ((void *)phys_to_virt(addr))
-#define mm_vtop(addr) ((unsigned long)virt_to_phys(addr))
-
-#if CHIP_HAS_MMIO()
-
-/*
- * We use inline assembly to guarantee that the compiler does not
- * split an access into multiple byte-sized accesses as it might
- * sometimes do if a register data structure is marked "packed".
- * Obviously on tile we can't tolerate such an access being
- * actually unaligned, but we want to avoid the case where the
- * compiler conservatively would generate multiple accesses even
- * for an aligned read or write.
- */
-
-static inline u8 __raw_readb(const volatile void __iomem *addr)
-{
- return *(const volatile u8 __force *)addr;
-}
-
-static inline u16 __raw_readw(const volatile void __iomem *addr)
-{
- u16 ret;
- asm volatile("ld2u %0, %1" : "=r" (ret) : "r" (addr));
- barrier();
- return le16_to_cpu(ret);
-}
-
-static inline u32 __raw_readl(const volatile void __iomem *addr)
-{
- u32 ret;
- /* Sign-extend to conform to u32 ABI sign-extension convention. */
- asm volatile("ld4s %0, %1" : "=r" (ret) : "r" (addr));
- barrier();
- return le32_to_cpu(ret);
-}
-
-static inline u64 __raw_readq(const volatile void __iomem *addr)
-{
- u64 ret;
- asm volatile("ld %0, %1" : "=r" (ret) : "r" (addr));
- barrier();
- return le64_to_cpu(ret);
-}
-
-static inline void __raw_writeb(u8 val, volatile void __iomem *addr)
-{
- *(volatile u8 __force *)addr = val;
-}
-
-static inline void __raw_writew(u16 val, volatile void __iomem *addr)
-{
- asm volatile("st2 %0, %1" :: "r" (addr), "r" (cpu_to_le16(val)));
-}
-
-static inline void __raw_writel(u32 val, volatile void __iomem *addr)
-{
- asm volatile("st4 %0, %1" :: "r" (addr), "r" (cpu_to_le32(val)));
-}
-
-static inline void __raw_writeq(u64 val, volatile void __iomem *addr)
-{
- asm volatile("st %0, %1" :: "r" (addr), "r" (cpu_to_le64(val)));
-}
-
-/*
- * The on-chip I/O hardware on tilegx is configured with VA=PA for the
- * kernel's PA range. The low-level APIs and field names use "va" and
- * "void *" nomenclature, to be consistent with the general notion
- * that the addresses in question are virtualizable, but in the kernel
- * context we are actually manipulating PA values. (In other contexts,
- * e.g. access from user space, we do in fact use real virtual addresses
- * in the va fields.) To allow readers of the code to understand what's
- * happening, we direct their attention to this comment by using the
- * following two functions that just duplicate __va() and __pa().
- */
-typedef unsigned long tile_io_addr_t;
-static inline tile_io_addr_t va_to_tile_io_addr(void *va)
-{
- BUILD_BUG_ON(sizeof(phys_addr_t) != sizeof(tile_io_addr_t));
- return __pa(va);
-}
-static inline void *tile_io_addr_to_va(tile_io_addr_t tile_io_addr)
-{
- return __va(tile_io_addr);
-}
-
-#else /* CHIP_HAS_MMIO() */
-
-#ifdef CONFIG_PCI
-
-extern u8 _tile_readb(unsigned long addr);
-extern u16 _tile_readw(unsigned long addr);
-extern u32 _tile_readl(unsigned long addr);
-extern u64 _tile_readq(unsigned long addr);
-extern void _tile_writeb(u8 val, unsigned long addr);
-extern void _tile_writew(u16 val, unsigned long addr);
-extern void _tile_writel(u32 val, unsigned long addr);
-extern void _tile_writeq(u64 val, unsigned long addr);
-
-#define __raw_readb(addr) _tile_readb((unsigned long)(addr))
-#define __raw_readw(addr) _tile_readw((unsigned long)(addr))
-#define __raw_readl(addr) _tile_readl((unsigned long)(addr))
-#define __raw_readq(addr) _tile_readq((unsigned long)(addr))
-#define __raw_writeb(val, addr) _tile_writeb(val, (unsigned long)(addr))
-#define __raw_writew(val, addr) _tile_writew(val, (unsigned long)(addr))
-#define __raw_writel(val, addr) _tile_writel(val, (unsigned long)(addr))
-#define __raw_writeq(val, addr) _tile_writeq(val, (unsigned long)(addr))
-
-#else /* CONFIG_PCI */
-
-/*
- * The tilepro architecture does not support IOMEM unless PCI is enabled.
- * Unfortunately we can't yet simply not declare these methods,
- * since some generic code that compiles into the kernel, but
- * we never run, uses them unconditionally.
- */
-
-static inline int iomem_panic(void)
-{
- panic("readb/writeb and friends do not exist on tile without PCI");
- return 0;
-}
-
-static inline u8 readb(unsigned long addr)
-{
- return iomem_panic();
-}
-
-static inline u16 _readw(unsigned long addr)
-{
- return iomem_panic();
-}
-
-static inline u32 readl(unsigned long addr)
-{
- return iomem_panic();
-}
-
-static inline u64 readq(unsigned long addr)
-{
- return iomem_panic();
-}
-
-static inline void writeb(u8 val, unsigned long addr)
-{
- iomem_panic();
-}
-
-static inline void writew(u16 val, unsigned long addr)
-{
- iomem_panic();
-}
-
-static inline void writel(u32 val, unsigned long addr)
-{
- iomem_panic();
-}
-
-static inline void writeq(u64 val, unsigned long addr)
-{
- iomem_panic();
-}
-
-#endif /* CONFIG_PCI */
-
-#endif /* CHIP_HAS_MMIO() */
-
-#define readb __raw_readb
-#define readw __raw_readw
-#define readl __raw_readl
-#define readq __raw_readq
-#define writeb __raw_writeb
-#define writew __raw_writew
-#define writel __raw_writel
-#define writeq __raw_writeq
-
-#define readb_relaxed readb
-#define readw_relaxed readw
-#define readl_relaxed readl
-#define readq_relaxed readq
-#define writeb_relaxed writeb
-#define writew_relaxed writew
-#define writel_relaxed writel
-#define writeq_relaxed writeq
-
-#define ioread8 readb
-#define ioread16 readw
-#define ioread32 readl
-#define ioread64 readq
-#define iowrite8 writeb
-#define iowrite16 writew
-#define iowrite32 writel
-#define iowrite64 writeq
-
-#if CHIP_HAS_MMIO() || defined(CONFIG_PCI)
-
-static inline void memset_io(volatile void *dst, int val, size_t len)
-{
- size_t x;
- BUG_ON((unsigned long)dst & 0x3);
- val = (val & 0xff) * 0x01010101;
- for (x = 0; x < len; x += 4)
- writel(val, dst + x);
-}
-
-static inline void memcpy_fromio(void *dst, const volatile void __iomem *src,
- size_t len)
-{
- size_t x;
- BUG_ON((unsigned long)src & 0x3);
- for (x = 0; x < len; x += 4)
- *(u32 *)(dst + x) = readl(src + x);
-}
-
-static inline void memcpy_toio(volatile void __iomem *dst, const void *src,
- size_t len)
-{
- size_t x;
- BUG_ON((unsigned long)dst & 0x3);
- for (x = 0; x < len; x += 4)
- writel(*(u32 *)(src + x), dst + x);
-}
-
-#endif
-
-#if CHIP_HAS_MMIO() && defined(CONFIG_TILE_PCI_IO)
-
-static inline u8 inb(unsigned long addr)
-{
- return readb((volatile void __iomem *) addr);
-}
-
-static inline u16 inw(unsigned long addr)
-{
- return readw((volatile void __iomem *) addr);
-}
-
-static inline u32 inl(unsigned long addr)
-{
- return readl((volatile void __iomem *) addr);
-}
-
-static inline void outb(u8 b, unsigned long addr)
-{
- writeb(b, (volatile void __iomem *) addr);
-}
-
-static inline void outw(u16 b, unsigned long addr)
-{
- writew(b, (volatile void __iomem *) addr);
-}
-
-static inline void outl(u32 b, unsigned long addr)
-{
- writel(b, (volatile void __iomem *) addr);
-}
-
-static inline void insb(unsigned long addr, void *buffer, int count)
-{
- if (count) {
- u8 *buf = buffer;
- do {
- u8 x = inb(addr);
- *buf++ = x;
- } while (--count);
- }
-}
-
-static inline void insw(unsigned long addr, void *buffer, int count)
-{
- if (count) {
- u16 *buf = buffer;
- do {
- u16 x = inw(addr);
- *buf++ = x;
- } while (--count);
- }
-}
-
-static inline void insl(unsigned long addr, void *buffer, int count)
-{
- if (count) {
- u32 *buf = buffer;
- do {
- u32 x = inl(addr);
- *buf++ = x;
- } while (--count);
- }
-}
-
-static inline void outsb(unsigned long addr, const void *buffer, int count)
-{
- if (count) {
- const u8 *buf = buffer;
- do {
- outb(*buf++, addr);
- } while (--count);
- }
-}
-
-static inline void outsw(unsigned long addr, const void *buffer, int count)
-{
- if (count) {
- const u16 *buf = buffer;
- do {
- outw(*buf++, addr);
- } while (--count);
- }
-}
-
-static inline void outsl(unsigned long addr, const void *buffer, int count)
-{
- if (count) {
- const u32 *buf = buffer;
- do {
- outl(*buf++, addr);
- } while (--count);
- }
-}
-
-extern void __iomem *ioport_map(unsigned long port, unsigned int len);
-extern void ioport_unmap(void __iomem *addr);
-
-#else
-
-/*
- * The TilePro architecture does not support IOPORT, even with PCI.
- * Unfortunately we can't yet simply not declare these methods,
- * since some generic code that compiles into the kernel, but
- * we never run, uses them unconditionally.
- */
-
-static inline long ioport_panic(void)
-{
-#ifdef __tilegx__
- panic("PCI IO space support is disabled. Configure the kernel with CONFIG_TILE_PCI_IO to enable it");
-#else
- panic("inb/outb and friends do not exist on tile");
-#endif
- return 0;
-}
-
-static inline void __iomem *ioport_map(unsigned long port, unsigned int len)
-{
- pr_info("ioport_map: mapping IO resources is unsupported on tile\n");
- return NULL;
-}
-
-static inline void ioport_unmap(void __iomem *addr)
-{
- ioport_panic();
-}
-
-static inline u8 inb(unsigned long addr)
-{
- return ioport_panic();
-}
-
-static inline u16 inw(unsigned long addr)
-{
- return ioport_panic();
-}
-
-static inline u32 inl(unsigned long addr)
-{
- return ioport_panic();
-}
-
-static inline void outb(u8 b, unsigned long addr)
-{
- ioport_panic();
-}
-
-static inline void outw(u16 b, unsigned long addr)
-{
- ioport_panic();
-}
-
-static inline void outl(u32 b, unsigned long addr)
-{
- ioport_panic();
-}
-
-static inline void insb(unsigned long addr, void *buffer, int count)
-{
- ioport_panic();
-}
-
-static inline void insw(unsigned long addr, void *buffer, int count)
-{
- ioport_panic();
-}
-
-static inline void insl(unsigned long addr, void *buffer, int count)
-{
- ioport_panic();
-}
-
-static inline void outsb(unsigned long addr, const void *buffer, int count)
-{
- ioport_panic();
-}
-
-static inline void outsw(unsigned long addr, const void *buffer, int count)
-{
- ioport_panic();
-}
-
-static inline void outsl(unsigned long addr, const void *buffer, int count)
-{
- ioport_panic();
-}
-
-#endif /* CHIP_HAS_MMIO() && defined(CONFIG_TILE_PCI_IO) */
-
-#define inb_p(addr) inb(addr)
-#define inw_p(addr) inw(addr)
-#define inl_p(addr) inl(addr)
-#define outb_p(x, addr) outb((x), (addr))
-#define outw_p(x, addr) outw((x), (addr))
-#define outl_p(x, addr) outl((x), (addr))
-
-#define ioread16be(addr) be16_to_cpu(ioread16(addr))
-#define ioread32be(addr) be32_to_cpu(ioread32(addr))
-#define iowrite16be(v, addr) iowrite16(be16_to_cpu(v), (addr))
-#define iowrite32be(v, addr) iowrite32(be32_to_cpu(v), (addr))
-
-#define ioread8_rep(p, dst, count) \
- insb((unsigned long) (p), (dst), (count))
-#define ioread16_rep(p, dst, count) \
- insw((unsigned long) (p), (dst), (count))
-#define ioread32_rep(p, dst, count) \
- insl((unsigned long) (p), (dst), (count))
-
-#define iowrite8_rep(p, src, count) \
- outsb((unsigned long) (p), (src), (count))
-#define iowrite16_rep(p, src, count) \
- outsw((unsigned long) (p), (src), (count))
-#define iowrite32_rep(p, src, count) \
- outsl((unsigned long) (p), (src), (count))
-
-#define virt_to_bus virt_to_phys
-#define bus_to_virt phys_to_virt
-
-#endif /* _ASM_TILE_IO_H */
diff --git a/arch/tile/include/asm/irq.h b/arch/tile/include/asm/irq.h
deleted file mode 100644
index 1fa1f2544ff9..000000000000
--- a/arch/tile/include/asm/irq.h
+++ /dev/null
@@ -1,87 +0,0 @@
-/*
- * Copyright 2010 Tilera Corporation. All Rights Reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation, version 2.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
- * NON INFRINGEMENT. See the GNU General Public License for
- * more details.
- */
-
-#ifndef _ASM_TILE_IRQ_H
-#define _ASM_TILE_IRQ_H
-
-#include <linux/hardirq.h>
-
-/* The hypervisor interface provides 32 IRQs. */
-#define NR_IRQS 32
-
-/* IRQ numbers used for linux IPIs. */
-#define IRQ_RESCHEDULE 0
-/* Interrupts for dynamic allocation start at 1. Let the core allocate irq0 */
-#define NR_IRQS_LEGACY 1
-
-#define irq_canonicalize(irq) (irq)
-
-void ack_bad_irq(unsigned int irq);
-
-/*
- * Different ways of handling interrupts. Tile interrupts are always
- * per-cpu; there is no global interrupt controller to implement
- * enable/disable. Most onboard devices can send their interrupts to
- * many tiles at the same time, and Tile-specific drivers know how to
- * deal with this.
- *
- * However, generic devices (usually PCIE based, sometimes GPIO)
- * expect that interrupts will fire on a single core at a time and
- * that the irq can be enabled or disabled from any core at any time.
- * We implement this by directing such interrupts to a single core.
- *
- * One added wrinkle is that PCI interrupts can be either
- * hardware-cleared (legacy interrupts) or software cleared (MSI).
- * Other generic device systems (GPIO) are always software-cleared.
- *
- * The enums below are used by drivers for onboard devices, including
- * the internals of PCI root complex and GPIO. They allow the driver
- * to tell the generic irq code what kind of interrupt is mapped to a
- * particular IRQ number.
- */
-enum {
- /* per-cpu interrupt; use enable/disable_percpu_irq() to mask */
- TILE_IRQ_PERCPU,
- /* global interrupt, hardware responsible for clearing. */
- TILE_IRQ_HW_CLEAR,
- /* global interrupt, software responsible for clearing. */
- TILE_IRQ_SW_CLEAR,
-};
-
-
-/*
- * Paravirtualized drivers should call this when they dynamically
- * allocate a new IRQ or discover an IRQ that was pre-allocated by the
- * hypervisor for use with their particular device. This gives the
- * IRQ subsystem an opportunity to do interrupt-type-specific
- * initialization.
- *
- * ISSUE: We should modify this API so that registering anything
- * except percpu interrupts also requires providing callback methods
- * for enabling and disabling the interrupt. This would allow the
- * generic IRQ code to proxy enable/disable_irq() calls back into the
- * PCI subsystem, which in turn could enable or disable the interrupt
- * at the PCI shim.
- */
-void tile_irq_activate(unsigned int irq, int tile_irq_type);
-
-void setup_irq_regs(void);
-
-#ifdef __tilegx__
-void arch_trigger_cpumask_backtrace(const struct cpumask *mask,
- bool exclude_self);
-#define arch_trigger_cpumask_backtrace arch_trigger_cpumask_backtrace
-#endif
-
-#endif /* _ASM_TILE_IRQ_H */
diff --git a/arch/tile/include/asm/irq_work.h b/arch/tile/include/asm/irq_work.h
deleted file mode 100644
index 78d3b6a7b27a..000000000000
--- a/arch/tile/include/asm/irq_work.h
+++ /dev/null
@@ -1,15 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef __ASM_IRQ_WORK_H
-#define __ASM_IRQ_WORK_H
-
-static inline bool arch_irq_work_has_interrupt(void)
-{
-#ifdef CONFIG_SMP
- extern bool self_interrupt_ok;
- return self_interrupt_ok;
-#else
- return false;
-#endif
-}
-
-#endif /* __ASM_IRQ_WORK_H */
diff --git a/arch/tile/include/asm/irqflags.h b/arch/tile/include/asm/irqflags.h
deleted file mode 100644
index 60d62a292fce..000000000000
--- a/arch/tile/include/asm/irqflags.h
+++ /dev/null
@@ -1,311 +0,0 @@
-/*
- * Copyright 2010 Tilera Corporation. All Rights Reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation, version 2.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
- * NON INFRINGEMENT. See the GNU General Public License for
- * more details.
- */
-
-#ifndef _ASM_TILE_IRQFLAGS_H
-#define _ASM_TILE_IRQFLAGS_H
-
-#include <arch/interrupts.h>
-#include <arch/chip.h>
-
-/*
- * The set of interrupts we want to allow when interrupts are nominally
- * disabled. The remainder are effectively "NMI" interrupts from
- * the point of view of the generic Linux code. Note that synchronous
- * interrupts (aka "non-queued") are not blocked by the mask in any case.
- */
-#define LINUX_MASKABLE_INTERRUPTS \
- (~((_AC(1,ULL) << INT_PERF_COUNT) | (_AC(1,ULL) << INT_AUX_PERF_COUNT)))
-
-#if CHIP_HAS_SPLIT_INTR_MASK()
-/* The same macro, but for the two 32-bit SPRs separately. */
-#define LINUX_MASKABLE_INTERRUPTS_LO (-1)
-#define LINUX_MASKABLE_INTERRUPTS_HI \
- (~((1 << (INT_PERF_COUNT - 32)) | (1 << (INT_AUX_PERF_COUNT - 32))))
-#endif
-
-#ifndef __ASSEMBLY__
-
-/* NOTE: we can't include <linux/percpu.h> due to #include dependencies. */
-#include <asm/percpu.h>
-#include <arch/spr_def.h>
-
-/*
- * Set and clear kernel interrupt masks.
- *
- * NOTE: __insn_mtspr() is a compiler builtin marked as a memory
- * clobber. We rely on it being equivalent to a compiler barrier in
- * this code since arch_local_irq_save() and friends must act as
- * compiler barriers. This compiler semantic is baked into enough
- * places that the compiler will maintain it going forward.
- */
-#if CHIP_HAS_SPLIT_INTR_MASK()
-#if INT_PERF_COUNT < 32 || INT_AUX_PERF_COUNT < 32 || INT_MEM_ERROR >= 32
-# error Fix assumptions about which word various interrupts are in
-#endif
-#define interrupt_mask_set(n) do { \
- int __n = (n); \
- int __mask = 1 << (__n & 0x1f); \
- if (__n < 32) \
- __insn_mtspr(SPR_INTERRUPT_MASK_SET_K_0, __mask); \
- else \
- __insn_mtspr(SPR_INTERRUPT_MASK_SET_K_1, __mask); \
-} while (0)
-#define interrupt_mask_reset(n) do { \
- int __n = (n); \
- int __mask = 1 << (__n & 0x1f); \
- if (__n < 32) \
- __insn_mtspr(SPR_INTERRUPT_MASK_RESET_K_0, __mask); \
- else \
- __insn_mtspr(SPR_INTERRUPT_MASK_RESET_K_1, __mask); \
-} while (0)
-#define interrupt_mask_check(n) ({ \
- int __n = (n); \
- (((__n < 32) ? \
- __insn_mfspr(SPR_INTERRUPT_MASK_K_0) : \
- __insn_mfspr(SPR_INTERRUPT_MASK_K_1)) \
- >> (__n & 0x1f)) & 1; \
-})
-#define interrupt_mask_set_mask(mask) do { \
- unsigned long long __m = (mask); \
- __insn_mtspr(SPR_INTERRUPT_MASK_SET_K_0, (unsigned long)(__m)); \
- __insn_mtspr(SPR_INTERRUPT_MASK_SET_K_1, (unsigned long)(__m>>32)); \
-} while (0)
-#define interrupt_mask_reset_mask(mask) do { \
- unsigned long long __m = (mask); \
- __insn_mtspr(SPR_INTERRUPT_MASK_RESET_K_0, (unsigned long)(__m)); \
- __insn_mtspr(SPR_INTERRUPT_MASK_RESET_K_1, (unsigned long)(__m>>32)); \
-} while (0)
-#define interrupt_mask_save_mask() \
- (__insn_mfspr(SPR_INTERRUPT_MASK_SET_K_0) | \
- (((unsigned long long)__insn_mfspr(SPR_INTERRUPT_MASK_SET_K_1))<<32))
-#define interrupt_mask_restore_mask(mask) do { \
- unsigned long long __m = (mask); \
- __insn_mtspr(SPR_INTERRUPT_MASK_K_0, (unsigned long)(__m)); \
- __insn_mtspr(SPR_INTERRUPT_MASK_K_1, (unsigned long)(__m>>32)); \
-} while (0)
-#else
-#define interrupt_mask_set(n) \
- __insn_mtspr(SPR_INTERRUPT_MASK_SET_K, (1UL << (n)))
-#define interrupt_mask_reset(n) \
- __insn_mtspr(SPR_INTERRUPT_MASK_RESET_K, (1UL << (n)))
-#define interrupt_mask_check(n) \
- ((__insn_mfspr(SPR_INTERRUPT_MASK_K) >> (n)) & 1)
-#define interrupt_mask_set_mask(mask) \
- __insn_mtspr(SPR_INTERRUPT_MASK_SET_K, (mask))
-#define interrupt_mask_reset_mask(mask) \
- __insn_mtspr(SPR_INTERRUPT_MASK_RESET_K, (mask))
-#define interrupt_mask_save_mask() \
- __insn_mfspr(SPR_INTERRUPT_MASK_K)
-#define interrupt_mask_restore_mask(mask) \
- __insn_mtspr(SPR_INTERRUPT_MASK_K, (mask))
-#endif
-
-/*
- * The set of interrupts we want active if irqs are enabled.
- * Note that in particular, the tile timer interrupt comes and goes
- * from this set, since we have no other way to turn off the timer.
- * Likewise, INTCTRL_K is removed and re-added during device
- * interrupts, as is the the hardwall UDN_FIREWALL interrupt.
- * We use a low bit (MEM_ERROR) as our sentinel value and make sure it
- * is always claimed as an "active interrupt" so we can query that bit
- * to know our current state.
- */
-DECLARE_PER_CPU(unsigned long long, interrupts_enabled_mask);
-#define INITIAL_INTERRUPTS_ENABLED (1ULL << INT_MEM_ERROR)
-
-#ifdef CONFIG_DEBUG_PREEMPT
-/* Due to inclusion issues, we can't rely on <linux/smp.h> here. */
-extern unsigned int debug_smp_processor_id(void);
-# define smp_processor_id() debug_smp_processor_id()
-#endif
-
-/* Disable interrupts. */
-#define arch_local_irq_disable() \
- interrupt_mask_set_mask(LINUX_MASKABLE_INTERRUPTS)
-
-/* Disable all interrupts, including NMIs. */
-#define arch_local_irq_disable_all() \
- interrupt_mask_set_mask(-1ULL)
-
-/*
- * Read the set of maskable interrupts.
- * We avoid the preemption warning here via raw_cpu_ptr since even
- * if irqs are already enabled, it's harmless to read the wrong cpu's
- * enabled mask.
- */
-#define arch_local_irqs_enabled() \
- (*raw_cpu_ptr(&interrupts_enabled_mask))
-
-/* Re-enable all maskable interrupts. */
-#define arch_local_irq_enable() \
- interrupt_mask_reset_mask(arch_local_irqs_enabled())
-
-/* Disable or enable interrupts based on flag argument. */
-#define arch_local_irq_restore(disabled) do { \
- if (disabled) \
- arch_local_irq_disable(); \
- else \
- arch_local_irq_enable(); \
-} while (0)
-
-/* Return true if "flags" argument means interrupts are disabled. */
-#define arch_irqs_disabled_flags(flags) ((flags) != 0)
-
-/* Return true if interrupts are currently disabled. */
-#define arch_irqs_disabled() interrupt_mask_check(INT_MEM_ERROR)
-
-/* Save whether interrupts are currently disabled. */
-#define arch_local_save_flags() arch_irqs_disabled()
-
-/* Save whether interrupts are currently disabled, then disable them. */
-#define arch_local_irq_save() ({ \
- unsigned long __flags = arch_local_save_flags(); \
- arch_local_irq_disable(); \
- __flags; })
-
-/* Prevent the given interrupt from being enabled next time we enable irqs. */
-#define arch_local_irq_mask(interrupt) \
- this_cpu_and(interrupts_enabled_mask, ~(1ULL << (interrupt)))
-
-/* Prevent the given interrupt from being enabled immediately. */
-#define arch_local_irq_mask_now(interrupt) do { \
- arch_local_irq_mask(interrupt); \
- interrupt_mask_set(interrupt); \
-} while (0)
-
-/* Allow the given interrupt to be enabled next time we enable irqs. */
-#define arch_local_irq_unmask(interrupt) \
- this_cpu_or(interrupts_enabled_mask, (1ULL << (interrupt)))
-
-/* Allow the given interrupt to be enabled immediately, if !irqs_disabled. */
-#define arch_local_irq_unmask_now(interrupt) do { \
- arch_local_irq_unmask(interrupt); \
- if (!irqs_disabled()) \
- interrupt_mask_reset(interrupt); \
-} while (0)
-
-#else /* __ASSEMBLY__ */
-
-/* We provide a somewhat more restricted set for assembly. */
-
-#ifdef __tilegx__
-
-#if INT_MEM_ERROR != 0
-# error Fix IRQS_DISABLED() macro
-#endif
-
-/* Return 0 or 1 to indicate whether interrupts are currently disabled. */
-#define IRQS_DISABLED(tmp) \
- mfspr tmp, SPR_INTERRUPT_MASK_K; \
- andi tmp, tmp, 1
-
-/* Load up a pointer to &interrupts_enabled_mask. */
-#define GET_INTERRUPTS_ENABLED_MASK_PTR(reg) \
- moveli reg, hw2_last(interrupts_enabled_mask); \
- shl16insli reg, reg, hw1(interrupts_enabled_mask); \
- shl16insli reg, reg, hw0(interrupts_enabled_mask); \
- add reg, reg, tp
-
-/* Disable interrupts. */
-#define IRQ_DISABLE(tmp0, tmp1) \
- moveli tmp0, hw2_last(LINUX_MASKABLE_INTERRUPTS); \
- shl16insli tmp0, tmp0, hw1(LINUX_MASKABLE_INTERRUPTS); \
- shl16insli tmp0, tmp0, hw0(LINUX_MASKABLE_INTERRUPTS); \
- mtspr SPR_INTERRUPT_MASK_SET_K, tmp0
-
-/* Disable ALL synchronous interrupts (used by NMI entry). */
-#define IRQ_DISABLE_ALL(tmp) \
- movei tmp, -1; \
- mtspr SPR_INTERRUPT_MASK_SET_K, tmp
-
-/* Enable interrupts. */
-#define IRQ_ENABLE_LOAD(tmp0, tmp1) \
- GET_INTERRUPTS_ENABLED_MASK_PTR(tmp0); \
- ld tmp0, tmp0
-#define IRQ_ENABLE_APPLY(tmp0, tmp1) \
- mtspr SPR_INTERRUPT_MASK_RESET_K, tmp0
-
-#else /* !__tilegx__ */
-
-/*
- * Return 0 or 1 to indicate whether interrupts are currently disabled.
- * Note that it's important that we use a bit from the "low" mask word,
- * since when we are enabling, that is the word we write first, so if we
- * are interrupted after only writing half of the mask, the interrupt
- * handler will correctly observe that we have interrupts enabled, and
- * will enable interrupts itself on return from the interrupt handler
- * (making the original code's write of the "high" mask word idempotent).
- */
-#define IRQS_DISABLED(tmp) \
- mfspr tmp, SPR_INTERRUPT_MASK_K_0; \
- shri tmp, tmp, INT_MEM_ERROR; \
- andi tmp, tmp, 1
-
-/* Load up a pointer to &interrupts_enabled_mask. */
-#define GET_INTERRUPTS_ENABLED_MASK_PTR(reg) \
- moveli reg, lo16(interrupts_enabled_mask); \
- auli reg, reg, ha16(interrupts_enabled_mask); \
- add reg, reg, tp
-
-/* Disable interrupts. */
-#define IRQ_DISABLE(tmp0, tmp1) \
- { \
- movei tmp0, LINUX_MASKABLE_INTERRUPTS_LO; \
- moveli tmp1, lo16(LINUX_MASKABLE_INTERRUPTS_HI) \
- }; \
- { \
- mtspr SPR_INTERRUPT_MASK_SET_K_0, tmp0; \
- auli tmp1, tmp1, ha16(LINUX_MASKABLE_INTERRUPTS_HI) \
- }; \
- mtspr SPR_INTERRUPT_MASK_SET_K_1, tmp1
-
-/* Disable ALL synchronous interrupts (used by NMI entry). */
-#define IRQ_DISABLE_ALL(tmp) \
- movei tmp, -1; \
- mtspr SPR_INTERRUPT_MASK_SET_K_0, tmp; \
- mtspr SPR_INTERRUPT_MASK_SET_K_1, tmp
-
-/* Enable interrupts. */
-#define IRQ_ENABLE_LOAD(tmp0, tmp1) \
- GET_INTERRUPTS_ENABLED_MASK_PTR(tmp0); \
- { \
- lw tmp0, tmp0; \
- addi tmp1, tmp0, 4 \
- }; \
- lw tmp1, tmp1
-#define IRQ_ENABLE_APPLY(tmp0, tmp1) \
- mtspr SPR_INTERRUPT_MASK_RESET_K_0, tmp0; \
- mtspr SPR_INTERRUPT_MASK_RESET_K_1, tmp1
-#endif
-
-#define IRQ_ENABLE(tmp0, tmp1) \
- IRQ_ENABLE_LOAD(tmp0, tmp1); \
- IRQ_ENABLE_APPLY(tmp0, tmp1)
-
-/*
- * Do the CPU's IRQ-state tracing from assembly code. We call a
- * C function, but almost everywhere we do, we don't mind clobbering
- * all the caller-saved registers.
- */
-#ifdef CONFIG_TRACE_IRQFLAGS
-# define TRACE_IRQS_ON jal trace_hardirqs_on
-# define TRACE_IRQS_OFF jal trace_hardirqs_off
-#else
-# define TRACE_IRQS_ON
-# define TRACE_IRQS_OFF
-#endif
-
-#endif /* __ASSEMBLY__ */
-
-#endif /* _ASM_TILE_IRQFLAGS_H */
diff --git a/arch/tile/include/asm/jump_label.h b/arch/tile/include/asm/jump_label.h
deleted file mode 100644
index cde7573f397b..000000000000
--- a/arch/tile/include/asm/jump_label.h
+++ /dev/null
@@ -1,58 +0,0 @@
-/*
- * Copyright 2015 Tilera Corporation. All Rights Reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation, version 2.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
- * NON INFRINGEMENT. See the GNU General Public License for
- * more details.
- */
-
-#ifndef _ASM_TILE_JUMP_LABEL_H
-#define _ASM_TILE_JUMP_LABEL_H
-
-#include <arch/opcode.h>
-
-#define JUMP_LABEL_NOP_SIZE TILE_BUNDLE_SIZE_IN_BYTES
-
-static __always_inline bool arch_static_branch(struct static_key *key,
- bool branch)
-{
- asm_volatile_goto("1:\n\t"
- "nop" "\n\t"
- ".pushsection __jump_table, \"aw\"\n\t"
- ".quad 1b, %l[l_yes], %0 + %1 \n\t"
- ".popsection\n\t"
- : : "i" (key), "i" (branch) : : l_yes);
- return false;
-l_yes:
- return true;
-}
-
-static __always_inline bool arch_static_branch_jump(struct static_key *key,
- bool branch)
-{
- asm_volatile_goto("1:\n\t"
- "j %l[l_yes]" "\n\t"
- ".pushsection __jump_table, \"aw\"\n\t"
- ".quad 1b, %l[l_yes], %0 + %1 \n\t"
- ".popsection\n\t"
- : : "i" (key), "i" (branch) : : l_yes);
- return false;
-l_yes:
- return true;
-}
-
-typedef u64 jump_label_t;
-
-struct jump_entry {
- jump_label_t code;
- jump_label_t target;
- jump_label_t key;
-};
-
-#endif /* _ASM_TILE_JUMP_LABEL_H */
diff --git a/arch/tile/include/asm/kdebug.h b/arch/tile/include/asm/kdebug.h
deleted file mode 100644
index 5bbbfa904c2d..000000000000
--- a/arch/tile/include/asm/kdebug.h
+++ /dev/null
@@ -1,28 +0,0 @@
-/*
- * Copyright 2012 Tilera Corporation. All Rights Reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation, version 2.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
- * NON INFRINGEMENT. See the GNU General Public License for
- * more details.
- */
-
-#ifndef _ASM_TILE_KDEBUG_H
-#define _ASM_TILE_KDEBUG_H
-
-#include <linux/notifier.h>
-
-enum die_val {
- DIE_OOPS = 1,
- DIE_BREAK,
- DIE_SSTEPBP,
- DIE_PAGE_FAULT,
- DIE_COMPILED_BPT
-};
-
-#endif /* _ASM_TILE_KDEBUG_H */
diff --git a/arch/tile/include/asm/kexec.h b/arch/tile/include/asm/kexec.h
deleted file mode 100644
index fc98ccfc98ac..000000000000
--- a/arch/tile/include/asm/kexec.h
+++ /dev/null
@@ -1,65 +0,0 @@
-/*
- * Copyright 2010 Tilera Corporation. All Rights Reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation, version 2.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
- * NON INFRINGEMENT. See the GNU General Public License for
- * more details.
- *
- * based on kexec.h from other architectures in linux-2.6.18
- */
-
-#ifndef _ASM_TILE_KEXEC_H
-#define _ASM_TILE_KEXEC_H
-
-#include <asm/page.h>
-
-#ifndef __tilegx__
-/* Maximum physical address we can use pages from. */
-#define KEXEC_SOURCE_MEMORY_LIMIT TASK_SIZE
-/* Maximum address we can reach in physical address mode. */
-#define KEXEC_DESTINATION_MEMORY_LIMIT TASK_SIZE
-/* Maximum address we can use for the control code buffer. */
-#define KEXEC_CONTROL_MEMORY_LIMIT TASK_SIZE
-#else
-/* We need to limit the memory below PGDIR_SIZE since
- * we only setup page table for [0, PGDIR_SIZE) before final kexec.
- */
-/* Maximum physical address we can use pages from. */
-#define KEXEC_SOURCE_MEMORY_LIMIT PGDIR_SIZE
-/* Maximum address we can reach in physical address mode. */
-#define KEXEC_DESTINATION_MEMORY_LIMIT PGDIR_SIZE
-/* Maximum address we can use for the control code buffer. */
-#define KEXEC_CONTROL_MEMORY_LIMIT PGDIR_SIZE
-#endif
-
-#define KEXEC_CONTROL_PAGE_SIZE PAGE_SIZE
-
-/*
- * We don't bother to provide a unique identifier, since we can only
- * reboot with a single type of kernel image anyway.
- */
-#define KEXEC_ARCH KEXEC_ARCH_DEFAULT
-
-/* Use the tile override for the page allocator. */
-struct page *kimage_alloc_pages_arch(gfp_t gfp_mask, unsigned int order);
-#define kimage_alloc_pages_arch kimage_alloc_pages_arch
-
-#define MAX_NOTE_BYTES 1024
-
-/* Defined in arch/tile/kernel/relocate_kernel.S */
-extern const unsigned char relocate_new_kernel[];
-extern const unsigned long relocate_new_kernel_size;
-extern void relocate_new_kernel_end(void);
-
-/* Provide a dummy definition to avoid build failures. */
-static inline void crash_setup_regs(struct pt_regs *n, struct pt_regs *o)
-{
-}
-
-#endif /* _ASM_TILE_KEXEC_H */
diff --git a/arch/tile/include/asm/kgdb.h b/arch/tile/include/asm/kgdb.h
deleted file mode 100644
index 280c181cf0db..000000000000
--- a/arch/tile/include/asm/kgdb.h
+++ /dev/null
@@ -1,71 +0,0 @@
-/*
- * Copyright 2013 Tilera Corporation. All Rights Reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation, version 2.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
- * NON INFRINGEMENT. See the GNU General Public License for
- * more details.
- *
- * TILE-Gx KGDB support.
- */
-
-#ifndef __TILE_KGDB_H__
-#define __TILE_KGDB_H__
-
-#include <linux/kdebug.h>
-#include <arch/opcode.h>
-
-#define GDB_SIZEOF_REG sizeof(unsigned long)
-
-/*
- * TILE-Gx gdb is expecting the following register layout:
- * 56 GPRs(R0 - R52, TP, SP, LR), 8 special GPRs(networks and ZERO),
- * plus the PC and the faultnum.
- *
- * Even though kernel not use the 8 special GPRs, they need to be present
- * in the registers sent for correct processing in the host-side gdb.
- *
- */
-#define DBG_MAX_REG_NUM (56+8+2)
-#define NUMREGBYTES (DBG_MAX_REG_NUM * GDB_SIZEOF_REG)
-
-/*
- * BUFMAX defines the maximum number of characters in inbound/outbound
- * buffers at least NUMREGBYTES*2 are needed for register packets,
- * Longer buffer is needed to list all threads.
- */
-#define BUFMAX 2048
-
-#define BREAK_INSTR_SIZE TILEGX_BUNDLE_SIZE_IN_BYTES
-
-/*
- * Require cache flush for set/clear a software breakpoint or write memory.
- */
-#define CACHE_FLUSH_IS_SAFE 1
-
-/*
- * The compiled-in breakpoint instruction can be used to "break" into
- * the debugger via magic system request key (sysrq-G).
- */
-static tile_bundle_bits compiled_bpt = TILEGX_BPT_BUNDLE | DIE_COMPILED_BPT;
-
-enum tilegx_regnum {
- TILEGX_PC_REGNUM = TREG_LAST_GPR + 9,
- TILEGX_FAULTNUM_REGNUM,
-};
-
-/*
- * Generate a breakpoint exception to "break" into the debugger.
- */
-static inline void arch_kgdb_breakpoint(void)
-{
- asm volatile (".quad %0\n\t"
- ::""(compiled_bpt));
-}
-
-#endif /* __TILE_KGDB_H__ */
diff --git a/arch/tile/include/asm/kmap_types.h b/arch/tile/include/asm/kmap_types.h
deleted file mode 100644
index 92b28e3e9972..000000000000
--- a/arch/tile/include/asm/kmap_types.h
+++ /dev/null
@@ -1,28 +0,0 @@
-/*
- * Copyright 2010 Tilera Corporation. All Rights Reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation, version 2.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
- * NON INFRINGEMENT. See the GNU General Public License for
- * more details.
- */
-
-#ifndef _ASM_TILE_KMAP_TYPES_H
-#define _ASM_TILE_KMAP_TYPES_H
-
-/*
- * In 32-bit TILE Linux we have to balance the desire to have a lot of
- * nested atomic mappings with the fact that large page sizes and many
- * processors chew up address space quickly. In a typical
- * 64-processor, 64KB-page layout build, making KM_TYPE_NR one larger
- * adds 4MB of required address-space. For now we leave KM_TYPE_NR
- * set to depth 8.
- */
-#define KM_TYPE_NR 8
-
-#endif /* _ASM_TILE_KMAP_TYPES_H */
diff --git a/arch/tile/include/asm/kprobes.h b/arch/tile/include/asm/kprobes.h
deleted file mode 100644
index 4a8b1cadca24..000000000000
--- a/arch/tile/include/asm/kprobes.h
+++ /dev/null
@@ -1,83 +0,0 @@
-/*
- * arch/tile/include/asm/kprobes.h
- *
- * Copyright 2012 Tilera Corporation. All Rights Reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation, version 2.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
- * NON INFRINGEMENT. See the GNU General Public License for
- * more details.
- */
-
-#ifndef _ASM_TILE_KPROBES_H
-#define _ASM_TILE_KPROBES_H
-
-#include <asm-generic/kprobes.h>
-
-#ifdef CONFIG_KPROBES
-
-#include <linux/types.h>
-#include <linux/ptrace.h>
-#include <linux/percpu.h>
-#include <arch/opcode.h>
-
-#define __ARCH_WANT_KPROBES_INSN_SLOT
-#define MAX_INSN_SIZE 2
-
-#define kretprobe_blacklist_size 0
-
-typedef tile_bundle_bits kprobe_opcode_t;
-
-#define flush_insn_slot(p) \
- flush_icache_range((unsigned long)p->addr, \
- (unsigned long)p->addr + \
- (MAX_INSN_SIZE * sizeof(kprobe_opcode_t)))
-
-struct kprobe;
-
-/* Architecture specific copy of original instruction. */
-struct arch_specific_insn {
- kprobe_opcode_t *insn;
-};
-
-struct prev_kprobe {
- struct kprobe *kp;
- unsigned long status;
- unsigned long saved_pc;
-};
-
-#define MAX_JPROBES_STACK_SIZE 128
-#define MAX_JPROBES_STACK_ADDR \
- (((unsigned long)current_thread_info()) + THREAD_SIZE - 32 \
- - sizeof(struct pt_regs))
-
-#define MIN_JPROBES_STACK_SIZE(ADDR) \
- ((((ADDR) + MAX_JPROBES_STACK_SIZE) > MAX_JPROBES_STACK_ADDR) \
- ? MAX_JPROBES_STACK_ADDR - (ADDR) \
- : MAX_JPROBES_STACK_SIZE)
-
-/* per-cpu kprobe control block. */
-struct kprobe_ctlblk {
- unsigned long kprobe_status;
- unsigned long kprobe_saved_pc;
- unsigned long jprobe_saved_sp;
- struct prev_kprobe prev_kprobe;
- struct pt_regs jprobe_saved_regs;
- char jprobes_stack[MAX_JPROBES_STACK_SIZE];
-};
-
-extern tile_bundle_bits breakpoint2_insn;
-extern tile_bundle_bits breakpoint_insn;
-
-void arch_remove_kprobe(struct kprobe *);
-
-extern int kprobe_exceptions_notify(struct notifier_block *self,
- unsigned long val, void *data);
-
-#endif /* CONFIG_KPROBES */
-#endif /* _ASM_TILE_KPROBES_H */
diff --git a/arch/tile/include/asm/linkage.h b/arch/tile/include/asm/linkage.h
deleted file mode 100644
index e121c39751a7..000000000000
--- a/arch/tile/include/asm/linkage.h
+++ /dev/null
@@ -1,51 +0,0 @@
-/*
- * Copyright 2010 Tilera Corporation. All Rights Reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation, version 2.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
- * NON INFRINGEMENT. See the GNU General Public License for
- * more details.
- */
-
-#ifndef _ASM_TILE_LINKAGE_H
-#define _ASM_TILE_LINKAGE_H
-
-#include <feedback.h>
-
-#define __ALIGN .align 8
-
-/*
- * The STD_ENTRY and STD_ENDPROC macros put the function in a
- * self-named .text.foo section, and if linker feedback collection
- * is enabled, add a suitable call to the feedback collection code.
- * STD_ENTRY_SECTION lets you specify a non-standard section name.
- */
-
-#define STD_ENTRY(name) \
- .pushsection .text.##name, "ax"; \
- ENTRY(name); \
- FEEDBACK_ENTER(name)
-
-#define STD_ENTRY_SECTION(name, section) \
- .pushsection section, "ax"; \
- ENTRY(name); \
- FEEDBACK_ENTER_EXPLICIT(name, section, .Lend_##name - name)
-
-#define STD_ENDPROC(name) \
- ENDPROC(name); \
- .Lend_##name:; \
- .popsection
-
-/* Create a file-static function entry set up for feedback gathering. */
-#define STD_ENTRY_LOCAL(name) \
- .pushsection .text.##name, "ax"; \
- ALIGN; \
- name:; \
- FEEDBACK_ENTER(name)
-
-#endif /* _ASM_TILE_LINKAGE_H */
diff --git a/arch/tile/include/asm/mmu.h b/arch/tile/include/asm/mmu.h
deleted file mode 100644
index 0cab1182bde1..000000000000
--- a/arch/tile/include/asm/mmu.h
+++ /dev/null
@@ -1,32 +0,0 @@
-/*
- * Copyright 2010 Tilera Corporation. All Rights Reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation, version 2.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
- * NON INFRINGEMENT. See the GNU General Public License for
- * more details.
- */
-
-#ifndef _ASM_TILE_MMU_H
-#define _ASM_TILE_MMU_H
-
-/* Capture any arch- and mm-specific information. */
-struct mm_context {
- /*
- * Written under the mmap_sem semaphore; read without the
- * semaphore but atomically, but it is conservatively set.
- */
- unsigned long priority_cached;
- unsigned long vdso_base;
-};
-
-typedef struct mm_context mm_context_t;
-
-void leave_mm(int cpu);
-
-#endif /* _ASM_TILE_MMU_H */
diff --git a/arch/tile/include/asm/mmu_context.h b/arch/tile/include/asm/mmu_context.h
deleted file mode 100644
index 45a4b4c424cf..000000000000
--- a/arch/tile/include/asm/mmu_context.h
+++ /dev/null
@@ -1,137 +0,0 @@
-/*
- * Copyright 2010 Tilera Corporation. All Rights Reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation, version 2.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
- * NON INFRINGEMENT. See the GNU General Public License for
- * more details.
- */
-
-#ifndef _ASM_TILE_MMU_CONTEXT_H
-#define _ASM_TILE_MMU_CONTEXT_H
-
-#include <linux/smp.h>
-#include <linux/mm_types.h>
-
-#include <asm/setup.h>
-#include <asm/page.h>
-#include <asm/pgalloc.h>
-#include <asm/pgtable.h>
-#include <asm/tlbflush.h>
-#include <asm/homecache.h>
-#include <asm-generic/mm_hooks.h>
-
-static inline int
-init_new_context(struct task_struct *tsk, struct mm_struct *mm)
-{
- return 0;
-}
-
-/*
- * Note that arch/tile/kernel/head_NN.S and arch/tile/mm/migrate_NN.S
- * also call hv_install_context().
- */
-static inline void __install_page_table(pgd_t *pgdir, int asid, pgprot_t prot)
-{
- /* FIXME: DIRECTIO should not always be set. FIXME. */
- int rc = hv_install_context(__pa(pgdir), prot, asid,
- HV_CTX_DIRECTIO | CTX_PAGE_FLAG);
- if (rc < 0)
- panic("hv_install_context failed: %d", rc);
-}
-
-static inline void install_page_table(pgd_t *pgdir, int asid)
-{
- pte_t *ptep = virt_to_kpte((unsigned long)pgdir);
- __install_page_table(pgdir, asid, *ptep);
-}
-
-/*
- * "Lazy" TLB mode is entered when we are switching to a kernel task,
- * which borrows the mm of the previous task. The goal of this
- * optimization is to avoid having to install a new page table. On
- * early x86 machines (where the concept originated) you couldn't do
- * anything short of a full page table install for invalidation, so
- * handling a remote TLB invalidate required doing a page table
- * re-install. Someone clearly decided that it was silly to keep
- * doing this while in "lazy" TLB mode, so the optimization involves
- * installing the swapper page table instead the first time one
- * occurs, and clearing the cpu out of cpu_vm_mask, so the cpu running
- * the kernel task doesn't need to take any more interrupts. At that
- * point it's then necessary to explicitly reinstall it when context
- * switching back to the original mm.
- *
- * On Tile, we have to do a page-table install whenever DMA is enabled,
- * so in that case lazy mode doesn't help anyway. And more generally,
- * we have efficient per-page TLB shootdown, and don't expect to spend
- * that much time in kernel tasks in general, so just leaving the
- * kernel task borrowing the old page table, but handling TLB
- * shootdowns, is a reasonable thing to do. And importantly, this
- * lets us use the hypervisor's internal APIs for TLB shootdown, which
- * means we don't have to worry about having TLB shootdowns blocked
- * when Linux is disabling interrupts; see the page migration code for
- * an example of where it's important for TLB shootdowns to complete
- * even when interrupts are disabled at the Linux level.
- */
-static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *t)
-{
-#if CHIP_HAS_TILE_DMA()
- /*
- * We have to do an "identity" page table switch in order to
- * clear any pending DMA interrupts.
- */
- if (current->thread.tile_dma_state.enabled)
- install_page_table(mm->pgd, __this_cpu_read(current_asid));
-#endif
-}
-
-static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
- struct task_struct *tsk)
-{
- if (likely(prev != next)) {
-
- int cpu = smp_processor_id();
-
- /* Pick new ASID. */
- int asid = __this_cpu_read(current_asid) + 1;
- if (asid > max_asid) {
- asid = min_asid;
- local_flush_tlb();
- }
- __this_cpu_write(current_asid, asid);
-
- /* Clear cpu from the old mm, and set it in the new one. */
- cpumask_clear_cpu(cpu, mm_cpumask(prev));
- cpumask_set_cpu(cpu, mm_cpumask(next));
-
- /* Re-load page tables */
- install_page_table(next->pgd, asid);
-
- /* See how we should set the red/black cache info */
- check_mm_caching(prev, next);
-
- /*
- * Since we're changing to a new mm, we have to flush
- * the icache in case some physical page now being mapped
- * has subsequently been repurposed and has new code.
- */
- __flush_icache();
-
- }
-}
-
-static inline void activate_mm(struct mm_struct *prev_mm,
- struct mm_struct *next_mm)
-{
- switch_mm(prev_mm, next_mm, NULL);
-}
-
-#define destroy_context(mm) do { } while (0)
-#define deactivate_mm(tsk, mm) do { } while (0)
-
-#endif /* _ASM_TILE_MMU_CONTEXT_H */
diff --git a/arch/tile/include/asm/mmzone.h b/arch/tile/include/asm/mmzone.h
deleted file mode 100644
index 804f1098b6cd..000000000000
--- a/arch/tile/include/asm/mmzone.h
+++ /dev/null
@@ -1,70 +0,0 @@
-/*
- * Copyright 2010 Tilera Corporation. All Rights Reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation, version 2.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
- * NON INFRINGEMENT. See the GNU General Public License for
- * more details.
- */
-
-#ifndef _ASM_TILE_MMZONE_H
-#define _ASM_TILE_MMZONE_H
-
-extern struct pglist_data node_data[];
-#define NODE_DATA(nid) (&node_data[nid])
-
-extern void get_memcfg_numa(void);
-
-#ifdef CONFIG_DISCONTIGMEM
-
-#include <asm/page.h>
-
-/*
- * Generally, memory ranges are always doled out by the hypervisor in
- * fixed-size, power-of-two increments. That would make computing the node
- * very easy. We could just take a couple high bits of the PA, which
- * denote the memory shim, and we'd be done. However, when we're doing
- * memory striping, this may not be true; PAs with different high bit
- * values might be in the same node. Thus, we keep a lookup table to
- * translate the high bits of the PFN to the node number.
- */
-extern int highbits_to_node[];
-
-static inline int pfn_to_nid(unsigned long pfn)
-{
- return highbits_to_node[__pfn_to_highbits(pfn)];
-}
-
-#define kern_addr_valid(kaddr) virt_addr_valid((void *)kaddr)
-
-static inline int pfn_valid(unsigned long pfn)
-{
- int nid = pfn_to_nid(pfn);
-
- if (nid >= 0)
- return (pfn < node_end_pfn(nid));
- return 0;
-}
-
-/* Information on the NUMA nodes that we compute early */
-extern unsigned long node_start_pfn[];
-extern unsigned long node_end_pfn[];
-extern unsigned long node_memmap_pfn[];
-extern unsigned long node_percpu_pfn[];
-extern unsigned long node_free_pfn[];
-#ifdef CONFIG_HIGHMEM
-extern unsigned long node_lowmem_end_pfn[];
-#endif
-#ifdef CONFIG_PCI
-extern unsigned long pci_reserve_start_pfn;
-extern unsigned long pci_reserve_end_pfn;
-#endif
-
-#endif /* CONFIG_DISCONTIGMEM */
-
-#endif /* _ASM_TILE_MMZONE_H */
diff --git a/arch/tile/include/asm/module.h b/arch/tile/include/asm/module.h
deleted file mode 100644
index 44ed07ccd3d2..000000000000
--- a/arch/tile/include/asm/module.h
+++ /dev/null
@@ -1,40 +0,0 @@
-/*
- * Copyright 2011 Tilera Corporation. All Rights Reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation, version 2.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
- * NON INFRINGEMENT. See the GNU General Public License for
- * more details.
- */
-
-#ifndef _ASM_TILE_MODULE_H
-#define _ASM_TILE_MODULE_H
-
-#include <arch/chip.h>
-
-#include <asm-generic/module.h>
-
-/* We can't use modules built with different page sizes. */
-#if defined(CONFIG_PAGE_SIZE_16KB)
-# define MODULE_PGSZ " 16KB"
-#elif defined(CONFIG_PAGE_SIZE_64KB)
-# define MODULE_PGSZ " 64KB"
-#else
-# define MODULE_PGSZ ""
-#endif
-
-/* We don't really support no-SMP so tag if someone tries. */
-#ifdef CONFIG_SMP
-#define MODULE_NOSMP ""
-#else
-#define MODULE_NOSMP " nosmp"
-#endif
-
-#define MODULE_ARCH_VERMAGIC CHIP_ARCH_NAME MODULE_PGSZ MODULE_NOSMP
-
-#endif /* _ASM_TILE_MODULE_H */
diff --git a/arch/tile/include/asm/page.h b/arch/tile/include/asm/page.h
deleted file mode 100644
index 498a5f71245d..000000000000
--- a/arch/tile/include/asm/page.h
+++ /dev/null
@@ -1,345 +0,0 @@
-/*
- * Copyright 2010 Tilera Corporation. All Rights Reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation, version 2.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
- * NON INFRINGEMENT. See the GNU General Public License for
- * more details.
- */
-
-#ifndef _ASM_TILE_PAGE_H
-#define _ASM_TILE_PAGE_H
-
-#include <linux/const.h>
-#include <hv/hypervisor.h>
-#include <arch/chip.h>
-
-/* PAGE_SHIFT and HPAGE_SHIFT determine the page sizes. */
-#if defined(CONFIG_PAGE_SIZE_4KB) /* tilepro only */
-#define PAGE_SHIFT 12
-#define CTX_PAGE_FLAG HV_CTX_PG_SM_4K
-#elif defined(CONFIG_PAGE_SIZE_16KB)
-#define PAGE_SHIFT 14
-#define CTX_PAGE_FLAG HV_CTX_PG_SM_16K
-#elif defined(CONFIG_PAGE_SIZE_64KB)
-#define PAGE_SHIFT 16
-#define CTX_PAGE_FLAG HV_CTX_PG_SM_64K
-#else
-#error Page size not specified in Kconfig
-#endif
-#define HPAGE_SHIFT HV_LOG2_DEFAULT_PAGE_SIZE_LARGE
-
-#define PAGE_SIZE (_AC(1, UL) << PAGE_SHIFT)
-#define HPAGE_SIZE (_AC(1, UL) << HPAGE_SHIFT)
-
-#define PAGE_MASK (~(PAGE_SIZE - 1))
-#define HPAGE_MASK (~(HPAGE_SIZE - 1))
-
-/*
- * If the Kconfig doesn't specify, set a maximum zone order that
- * is enough so that we can create huge pages from small pages given
- * the respective sizes of the two page types. See <linux/mmzone.h>.
- */
-#ifndef CONFIG_FORCE_MAX_ZONEORDER
-#define CONFIG_FORCE_MAX_ZONEORDER (HPAGE_SHIFT - PAGE_SHIFT + 1)
-#endif
-
-#ifndef __ASSEMBLY__
-
-#include <linux/types.h>
-#include <linux/string.h>
-
-struct page;
-
-static inline void clear_page(void *page)
-{
- memset(page, 0, PAGE_SIZE);
-}
-
-static inline void copy_page(void *to, void *from)
-{
- memcpy(to, from, PAGE_SIZE);
-}
-
-static inline void clear_user_page(void *page, unsigned long vaddr,
- struct page *pg)
-{
- clear_page(page);
-}
-
-static inline void copy_user_page(void *to, void *from, unsigned long vaddr,
- struct page *topage)
-{
- copy_page(to, from);
-}
-
-/*
- * Hypervisor page tables are made of the same basic structure.
- */
-
-typedef HV_PTE pte_t;
-typedef HV_PTE pgd_t;
-typedef HV_PTE pgprot_t;
-
-/*
- * User L2 page tables are managed as one L2 page table per page,
- * because we use the page allocator for them. This keeps the allocation
- * simple, but it's also inefficient, since L2 page tables are much smaller
- * than pages (currently 2KB vs 64KB). So we should revisit this.
- */
-typedef struct page *pgtable_t;
-
-/* Must be a macro since it is used to create constants. */
-#define __pgprot(val) hv_pte(val)
-
-/* Rarely-used initializers, typically with a "zero" value. */
-#define __pte(x) hv_pte(x)
-#define __pgd(x) hv_pte(x)
-
-static inline u64 pgprot_val(pgprot_t pgprot)
-{
- return hv_pte_val(pgprot);
-}
-
-static inline u64 pte_val(pte_t pte)
-{
- return hv_pte_val(pte);
-}
-
-static inline u64 pgd_val(pgd_t pgd)
-{
- return hv_pte_val(pgd);
-}
-
-#ifdef __tilegx__
-
-typedef HV_PTE pmd_t;
-
-#define __pmd(x) hv_pte(x)
-
-static inline u64 pmd_val(pmd_t pmd)
-{
- return hv_pte_val(pmd);
-}
-
-#endif
-
-static inline __attribute_const__ int get_order(unsigned long size)
-{
- return BITS_PER_LONG - __builtin_clzl((size - 1) >> PAGE_SHIFT);
-}
-
-#endif /* !__ASSEMBLY__ */
-
-#define HUGETLB_PAGE_ORDER (HPAGE_SHIFT - PAGE_SHIFT)
-
-#define HUGE_MAX_HSTATE 6
-
-#ifdef CONFIG_HUGETLB_PAGE
-#define HAVE_ARCH_HUGETLB_UNMAPPED_AREA
-#endif
-
-/* Allow overriding how much VA or PA the kernel will use. */
-#define MAX_PA_WIDTH CHIP_PA_WIDTH()
-#define MAX_VA_WIDTH CHIP_VA_WIDTH()
-
-/* Each memory controller has PAs distinct in their high bits. */
-#define NR_PA_HIGHBIT_SHIFT (MAX_PA_WIDTH - CHIP_LOG_NUM_MSHIMS())
-#define NR_PA_HIGHBIT_VALUES (1 << CHIP_LOG_NUM_MSHIMS())
-#define __pa_to_highbits(pa) ((phys_addr_t)(pa) >> NR_PA_HIGHBIT_SHIFT)
-#define __pfn_to_highbits(pfn) ((pfn) >> (NR_PA_HIGHBIT_SHIFT - PAGE_SHIFT))
-
-#ifdef __tilegx__
-
-/*
- * We reserve the lower half of memory for user-space programs, and the
- * upper half for system code. We re-map all of physical memory in the
- * upper half, which takes a quarter of our VA space. Then we have
- * the vmalloc regions. The supervisor code lives at the highest address,
- * with the hypervisor above that.
- *
- * Loadable kernel modules are placed immediately after the static
- * supervisor code, with each being allocated a 256MB region of
- * address space, so we don't have to worry about the range of "jal"
- * and other branch instructions.
- *
- * For now we keep life simple and just allocate one pmd (4GB) for vmalloc.
- * Similarly, for now we don't play any struct page mapping games.
- */
-
-#if MAX_PA_WIDTH + 2 > MAX_VA_WIDTH
-# error Too much PA to map with the VA available!
-#endif
-
-#define PAGE_OFFSET (-(_AC(1, UL) << (MAX_VA_WIDTH - 1)))
-#define KERNEL_HIGH_VADDR _AC(0xfffffff800000000, UL) /* high 32GB */
-#define FIXADDR_BASE (KERNEL_HIGH_VADDR - 0x300000000) /* 4 GB */
-#define FIXADDR_TOP (KERNEL_HIGH_VADDR - 0x200000000) /* 4 GB */
-#define _VMALLOC_START FIXADDR_TOP
-#define MEM_SV_START (KERNEL_HIGH_VADDR - 0x100000000) /* 256 MB */
-#define MEM_MODULE_START (MEM_SV_START + (256*1024*1024)) /* 256 MB */
-#define MEM_MODULE_END (MEM_MODULE_START + (256*1024*1024))
-
-#else /* !__tilegx__ */
-
-/*
- * A PAGE_OFFSET of 0xC0000000 means that the kernel has
- * a virtual address space of one gigabyte, which limits the
- * amount of physical memory you can use to about 768MB.
- * If you want more physical memory than this then see the CONFIG_HIGHMEM
- * option in the kernel configuration.
- *
- * The top 16MB chunk in the table below is unavailable to Linux. Since
- * the kernel interrupt vectors must live at ether 0xfe000000 or 0xfd000000
- * (depending on whether the kernel is at PL2 or Pl1), we map all of the
- * bottom of RAM at this address with a huge page table entry to minimize
- * its ITLB footprint (as well as at PAGE_OFFSET). The last architected
- * requirement is that user interrupt vectors live at 0xfc000000, so we
- * make that range of memory available to user processes. The remaining
- * regions are sized as shown; the first four addresses use the PL 1
- * values, and after that, we show "typical" values, since the actual
- * addresses depend on kernel #defines.
- *
- * MEM_HV_START 0xfe000000
- * MEM_SV_START (kernel code) 0xfd000000
- * MEM_USER_INTRPT (user vector) 0xfc000000
- * FIX_KMAP_xxx 0xfa000000 (via NR_CPUS * KM_TYPE_NR)
- * PKMAP_BASE 0xf9000000 (via LAST_PKMAP)
- * VMALLOC_START 0xf7000000 (via VMALLOC_RESERVE)
- * mapped LOWMEM 0xc0000000
- */
-
-#define MEM_USER_INTRPT _AC(0xfc000000, UL)
-#define MEM_SV_START _AC(0xfd000000, UL)
-#define MEM_HV_START _AC(0xfe000000, UL)
-
-#define INTRPT_SIZE 0x4000
-
-/* Tolerate page size larger than the architecture interrupt region size. */
-#if PAGE_SIZE > INTRPT_SIZE
-#undef INTRPT_SIZE
-#define INTRPT_SIZE PAGE_SIZE
-#endif
-
-#define KERNEL_HIGH_VADDR MEM_USER_INTRPT
-#define FIXADDR_TOP (KERNEL_HIGH_VADDR - PAGE_SIZE)
-
-#define PAGE_OFFSET _AC(CONFIG_PAGE_OFFSET, UL)
-
-/* On 32-bit architectures we mix kernel modules in with other vmaps. */
-#define MEM_MODULE_START VMALLOC_START
-#define MEM_MODULE_END VMALLOC_END
-
-#endif /* __tilegx__ */
-
-#if !defined(__ASSEMBLY__) && !defined(VDSO_BUILD)
-
-#ifdef CONFIG_HIGHMEM
-
-/* Map kernel virtual addresses to page frames, in HPAGE_SIZE chunks. */
-extern unsigned long pbase_map[];
-extern void *vbase_map[];
-
-static inline unsigned long kaddr_to_pfn(const volatile void *_kaddr)
-{
- unsigned long kaddr = (unsigned long)_kaddr;
- return pbase_map[kaddr >> HPAGE_SHIFT] +
- ((kaddr & (HPAGE_SIZE - 1)) >> PAGE_SHIFT);
-}
-
-static inline void *pfn_to_kaddr(unsigned long pfn)
-{
- return vbase_map[__pfn_to_highbits(pfn)] + (pfn << PAGE_SHIFT);
-}
-
-static inline phys_addr_t virt_to_phys(const volatile void *kaddr)
-{
- unsigned long pfn = kaddr_to_pfn(kaddr);
- return ((phys_addr_t)pfn << PAGE_SHIFT) +
- ((unsigned long)kaddr & (PAGE_SIZE-1));
-}
-
-static inline void *phys_to_virt(phys_addr_t paddr)
-{
- return pfn_to_kaddr(paddr >> PAGE_SHIFT) + (paddr & (PAGE_SIZE-1));
-}
-
-/* With HIGHMEM, we pack PAGE_OFFSET through high_memory with all valid VAs. */
-static inline int virt_addr_valid(const volatile void *kaddr)
-{
- extern void *high_memory; /* copied from <linux/mm.h> */
- return ((unsigned long)kaddr >= PAGE_OFFSET && kaddr < high_memory);
-}
-
-#else /* !CONFIG_HIGHMEM */
-
-static inline unsigned long kaddr_to_pfn(const volatile void *kaddr)
-{
- return ((unsigned long)kaddr - PAGE_OFFSET) >> PAGE_SHIFT;
-}
-
-static inline void *pfn_to_kaddr(unsigned long pfn)
-{
- return (void *)((pfn << PAGE_SHIFT) + PAGE_OFFSET);
-}
-
-static inline phys_addr_t virt_to_phys(const volatile void *kaddr)
-{
- return (phys_addr_t)((unsigned long)kaddr - PAGE_OFFSET);
-}
-
-static inline void *phys_to_virt(phys_addr_t paddr)
-{
- return (void *)((unsigned long)paddr + PAGE_OFFSET);
-}
-
-/* Check that the given address is within some mapped range of PAs. */
-#define virt_addr_valid(kaddr) pfn_valid(kaddr_to_pfn(kaddr))
-
-#endif /* !CONFIG_HIGHMEM */
-
-/* All callers are not consistent in how they call these functions. */
-#define __pa(kaddr) virt_to_phys((void *)(unsigned long)(kaddr))
-#define __va(paddr) phys_to_virt((phys_addr_t)(paddr))
-
-extern int devmem_is_allowed(unsigned long pagenr);
-
-#ifdef CONFIG_FLATMEM
-static inline int pfn_valid(unsigned long pfn)
-{
- return pfn < max_mapnr;
-}
-#endif
-
-/* Provide as macros since these require some other headers included. */
-#define page_to_pa(page) ((phys_addr_t)(page_to_pfn(page)) << PAGE_SHIFT)
-#define virt_to_page(kaddr) pfn_to_page(kaddr_to_pfn((void *)(kaddr)))
-#define page_to_virt(page) pfn_to_kaddr(page_to_pfn(page))
-
-/*
- * The kernel text is mapped at MEM_SV_START as read-only. To allow
- * modifying kernel text, it is also mapped at PAGE_OFFSET as read-write.
- * This macro converts a kernel address to its writable kernel text mapping,
- * which is used to modify the text code on a running kernel by kgdb,
- * ftrace, kprobe, jump label, etc.
- */
-#define ktext_writable_addr(kaddr) \
- ((unsigned long)(kaddr) - MEM_SV_START + PAGE_OFFSET)
-
-struct mm_struct;
-extern pte_t *virt_to_pte(struct mm_struct *mm, unsigned long addr);
-extern pte_t *virt_to_kpte(unsigned long kaddr);
-
-#endif /* !__ASSEMBLY__ */
-
-#define VM_DATA_DEFAULT_FLAGS \
- (VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
-
-#include <asm-generic/memory_model.h>
-
-#endif /* _ASM_TILE_PAGE_H */
diff --git a/arch/tile/include/asm/pci.h b/arch/tile/include/asm/pci.h
deleted file mode 100644
index fe3de505b024..000000000000
--- a/arch/tile/include/asm/pci.h
+++ /dev/null
@@ -1,229 +0,0 @@
-/*
- * Copyright 2010 Tilera Corporation. All Rights Reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation, version 2.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
- * NON INFRINGEMENT. See the GNU General Public License for
- * more details.
- */
-
-#ifndef _ASM_TILE_PCI_H
-#define _ASM_TILE_PCI_H
-
-#include <linux/dma-mapping.h>
-#include <linux/pci.h>
-#include <asm-generic/pci_iomap.h>
-
-#ifndef __tilegx__
-
-/*
- * Structure of a PCI controller (host bridge)
- */
-struct pci_controller {
- int index; /* PCI domain number */
- struct pci_bus *root_bus;
-
- int last_busno;
-
- int hv_cfg_fd[2]; /* config{0,1} fds for this PCIe controller */
- int hv_mem_fd; /* fd to Hypervisor for MMIO operations */
-
- struct pci_ops *ops;
-
- int irq_base; /* Base IRQ from the Hypervisor */
- int plx_gen1; /* flag for PLX Gen 1 configuration */
-
- /* Address ranges that are routed to this controller/bridge. */
- struct resource mem_resources[3];
-};
-
-/*
- * This flag tells if the platform is TILEmpower that needs
- * special configuration for the PLX switch chip.
- */
-extern int tile_plx_gen1;
-
-static inline void pci_iounmap(struct pci_dev *dev, void __iomem *addr) {}
-
-#define TILE_NUM_PCIE 2
-
-/*
- * The hypervisor maps the entirety of CPA-space as bus addresses, so
- * bus addresses are physical addresses. The networking and block
- * device layers use this boolean for bounce buffer decisions.
- */
-#define PCI_DMA_BUS_IS_PHYS 1
-
-/* generic pci stuff */
-#include <asm-generic/pci.h>
-
-#else
-
-#include <asm/page.h>
-#include <gxio/trio.h>
-
-/**
- * We reserve the hugepage-size address range at the top of the 64-bit address
- * space to serve as the PCI window, emulating the BAR0 space of an endpoint
- * device. This window is used by the chip-to-chip applications running on
- * the RC node. The reason for carving out this window is that Mem-Maps that
- * back up this window will not overlap with those that map the real physical
- * memory.
- */
-#define PCIE_HOST_BAR0_SIZE HPAGE_SIZE
-#define PCIE_HOST_BAR0_START HPAGE_MASK
-
-/**
- * The first PAGE_SIZE of the above "BAR" window is mapped to the
- * gxpci_host_regs structure.
- */
-#define PCIE_HOST_REGS_SIZE PAGE_SIZE
-
-/*
- * This is the PCI address where the Mem-Map interrupt regions start.
- * We use the 2nd to the last huge page of the 64-bit address space.
- * The last huge page is used for the rootcomplex "bar", for C2C purpose.
- */
-#define MEM_MAP_INTR_REGIONS_BASE (HPAGE_MASK - HPAGE_SIZE)
-
-/*
- * Each Mem-Map interrupt region occupies 4KB.
- */
-#define MEM_MAP_INTR_REGION_SIZE (1 << TRIO_MAP_MEM_LIM__ADDR_SHIFT)
-
-/*
- * Allocate the PCI BAR window right below 4GB.
- */
-#define TILE_PCI_BAR_WINDOW_TOP (1ULL << 32)
-
-/*
- * Allocate 1GB for the PCI BAR window.
- */
-#define TILE_PCI_BAR_WINDOW_SIZE (1 << 30)
-
-/*
- * This is the highest bus address targeting the host memory that
- * can be generated by legacy PCI devices with 32-bit or less
- * DMA capability, dictated by the BAR window size and location.
- */
-#define TILE_PCI_MAX_DIRECT_DMA_ADDRESS \
- (TILE_PCI_BAR_WINDOW_TOP - TILE_PCI_BAR_WINDOW_SIZE - 1)
-
-/*
- * We shift the PCI bus range for all the physical memory up by the whole PA
- * range. The corresponding CPA of an incoming PCI request will be the PCI
- * address minus TILE_PCI_MEM_MAP_BASE_OFFSET. This also implies
- * that the 64-bit capable devices will be given DMA addresses as
- * the CPA plus TILE_PCI_MEM_MAP_BASE_OFFSET. To support 32-bit
- * devices, we create a separate map region that handles the low
- * 4GB.
- *
- * This design lets us avoid the "PCI hole" problem where the host bridge
- * won't pass DMA traffic with target addresses that happen to fall within the
- * BAR space. This enables us to use all the physical memory for DMA, instead
- * of wasting the same amount of physical memory as the BAR window size.
- */
-#define TILE_PCI_MEM_MAP_BASE_OFFSET (1ULL << CHIP_PA_WIDTH())
-
-/*
- * Start of the PCI memory resource, which starts at the end of the
- * maximum system physical RAM address.
- */
-#define TILE_PCI_MEM_START (1ULL << CHIP_PA_WIDTH())
-
-/*
- * Structure of a PCI controller (host bridge) on Gx.
- */
-struct pci_controller {
-
- /* Pointer back to the TRIO that this PCIe port is connected to. */
- gxio_trio_context_t *trio;
- int mac; /* PCIe mac index on the TRIO shim */
- int trio_index; /* Index of TRIO shim that contains the MAC. */
-
- int pio_mem_index; /* PIO region index for memory access */
-
-#ifdef CONFIG_TILE_PCI_IO
- int pio_io_index; /* PIO region index for I/O space access */
-#endif
-
- /*
- * Mem-Map regions for all the memory controllers so that Linux can
- * map all of its physical memory space to the PCI bus.
- */
- int mem_maps[MAX_NUMNODES];
-
- int index; /* PCI domain number */
- struct pci_bus *root_bus;
-
- /* PCI I/O space resource for this controller. */
- struct resource io_space;
- char io_space_name[32];
-
- /* PCI memory space resource for this controller. */
- struct resource mem_space;
- char mem_space_name[32];
-
- uint64_t mem_offset; /* cpu->bus memory mapping offset. */
-
- int first_busno;
-
- struct pci_ops *ops;
-
- /* Table that maps the INTx numbers to Linux irq numbers. */
- int irq_intx_table[4];
-};
-
-extern struct pci_controller pci_controllers[TILEGX_NUM_TRIO * TILEGX_TRIO_PCIES];
-extern gxio_trio_context_t trio_contexts[TILEGX_NUM_TRIO];
-extern int num_trio_shims;
-
-extern void pci_iounmap(struct pci_dev *dev, void __iomem *);
-
-/*
- * The PCI address space does not equal the physical memory address
- * space (we have an IOMMU). The IDE and SCSI device layers use this
- * boolean for bounce buffer decisions.
- */
-#define PCI_DMA_BUS_IS_PHYS 0
-
-#endif /* __tilegx__ */
-
-int __init tile_pci_init(void);
-int __init pcibios_init(void);
-
-void pcibios_fixup_bus(struct pci_bus *bus);
-
-#define pci_domain_nr(bus) (((struct pci_controller *)(bus)->sysdata)->index)
-
-/*
- * This decides whether to display the domain number in /proc.
- */
-static inline int pci_proc_domain(struct pci_bus *bus)
-{
- return 1;
-}
-
-/*
- * pcibios_assign_all_busses() tells whether or not the bus numbers
- * should be reassigned, in case the BIOS didn't do it correctly, or
- * in case we don't have a BIOS and we want to let Linux do it.
- */
-static inline int pcibios_assign_all_busses(void)
-{
- return 1;
-}
-
-#define PCIBIOS_MIN_MEM 0
-/* Minimum PCI I/O address, starting at the page boundary. */
-#define PCIBIOS_MIN_IO PAGE_SIZE
-
-/* Use any cpu for PCI. */
-#define cpumask_of_pcibus(bus) cpu_online_mask
-
-#endif /* _ASM_TILE_PCI_H */
diff --git a/arch/tile/include/asm/percpu.h b/arch/tile/include/asm/percpu.h
deleted file mode 100644
index 4f7ae39fa202..000000000000
--- a/arch/tile/include/asm/percpu.h
+++ /dev/null
@@ -1,52 +0,0 @@
-/*
- * Copyright 2010 Tilera Corporation. All Rights Reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation, version 2.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
- * NON INFRINGEMENT. See the GNU General Public License for
- * more details.
- */
-
-#ifndef _ASM_TILE_PERCPU_H
-#define _ASM_TILE_PERCPU_H
-
-register unsigned long my_cpu_offset_reg asm("tp");
-
-#ifdef CONFIG_PREEMPT
-/*
- * For full preemption, we can't just use the register variable
- * directly, since we need barrier() to hazard against it, causing the
- * compiler to reload anything computed from a previous "tp" value.
- * But we also don't want to use volatile asm, since we'd like the
- * compiler to be able to cache the value across multiple percpu reads.
- * So we use a fake stack read as a hazard against barrier().
- * The 'U' constraint is like 'm' but disallows postincrement.
- */
-static inline unsigned long __my_cpu_offset(void)
-{
- unsigned long tp;
- register unsigned long *sp asm("sp");
- asm("move %0, tp" : "=r" (tp) : "U" (*sp));
- return tp;
-}
-#define __my_cpu_offset __my_cpu_offset()
-#else
-/*
- * We don't need to hazard against barrier() since "tp" doesn't ever
- * change with PREEMPT_NONE, and with PREEMPT_VOLUNTARY it only
- * changes at function call points, at which we are already re-reading
- * the value of "tp" due to "my_cpu_offset_reg" being a global variable.
- */
-#define __my_cpu_offset my_cpu_offset_reg
-#endif
-
-#define set_my_cpu_offset(tp) (my_cpu_offset_reg = (tp))
-
-#include <asm-generic/percpu.h>
-
-#endif /* _ASM_TILE_PERCPU_H */
diff --git a/arch/tile/include/asm/perf_event.h b/arch/tile/include/asm/perf_event.h
deleted file mode 100644
index 59c5b164e5b6..000000000000
--- a/arch/tile/include/asm/perf_event.h
+++ /dev/null
@@ -1,22 +0,0 @@
-/*
- * Copyright 2014 Tilera Corporation. All Rights Reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation, version 2.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
- * NON INFRINGEMENT. See the GNU General Public License for
- * more details.
- */
-
-#ifndef _ASM_TILE_PERF_EVENT_H
-#define _ASM_TILE_PERF_EVENT_H
-
-#include <linux/percpu.h>
-DECLARE_PER_CPU(u64, perf_irqs);
-
-unsigned long handle_syscall_link_address(void);
-#endif /* _ASM_TILE_PERF_EVENT_H */
diff --git a/arch/tile/include/asm/pgalloc.h b/arch/tile/include/asm/pgalloc.h
deleted file mode 100644
index 1b902508b664..000000000000
--- a/arch/tile/include/asm/pgalloc.h
+++ /dev/null
@@ -1,164 +0,0 @@
-/*
- * Copyright 2010 Tilera Corporation. All Rights Reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation, version 2.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
- * NON INFRINGEMENT. See the GNU General Public License for
- * more details.
- */
-
-#ifndef _ASM_TILE_PGALLOC_H
-#define _ASM_TILE_PGALLOC_H
-
-#include <linux/threads.h>
-#include <linux/mm.h>
-#include <linux/mmzone.h>
-#include <asm/fixmap.h>
-#include <asm/page.h>
-#include <hv/hypervisor.h>
-
-/* Bits for the size of the second-level page table. */
-#define L2_KERNEL_PGTABLE_SHIFT _HV_LOG2_L2_SIZE(HPAGE_SHIFT, PAGE_SHIFT)
-
-/* How big is a kernel L2 page table? */
-#define L2_KERNEL_PGTABLE_SIZE (1UL << L2_KERNEL_PGTABLE_SHIFT)
-
-/* We currently allocate user L2 page tables by page (unlike kernel L2s). */
-#if L2_KERNEL_PGTABLE_SHIFT < PAGE_SHIFT
-#define L2_USER_PGTABLE_SHIFT PAGE_SHIFT
-#else
-#define L2_USER_PGTABLE_SHIFT L2_KERNEL_PGTABLE_SHIFT
-#endif
-
-/* How many pages do we need, as an "order", for a user L2 page table? */
-#define L2_USER_PGTABLE_ORDER (L2_USER_PGTABLE_SHIFT - PAGE_SHIFT)
-
-static inline void set_pmd(pmd_t *pmdp, pmd_t pmd)
-{
-#ifdef CONFIG_64BIT
- set_pte(pmdp, pmd);
-#else
- set_pte(&pmdp->pud.pgd, pmd.pud.pgd);
-#endif
-}
-
-static inline void pmd_populate_kernel(struct mm_struct *mm,
- pmd_t *pmd, pte_t *ptep)
-{
- set_pmd(pmd, ptfn_pmd(HV_CPA_TO_PTFN(__pa(ptep)),
- __pgprot(_PAGE_PRESENT)));
-}
-
-static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmd,
- pgtable_t page)
-{
- set_pmd(pmd, ptfn_pmd(HV_CPA_TO_PTFN(PFN_PHYS(page_to_pfn(page))),
- __pgprot(_PAGE_PRESENT)));
-}
-
-/*
- * Allocate and free page tables.
- */
-
-extern pgd_t *pgd_alloc(struct mm_struct *mm);
-extern void pgd_free(struct mm_struct *mm, pgd_t *pgd);
-
-extern pgtable_t pgtable_alloc_one(struct mm_struct *mm, unsigned long address,
- int order);
-extern void pgtable_free(struct mm_struct *mm, struct page *pte, int order);
-
-static inline pgtable_t pte_alloc_one(struct mm_struct *mm,
- unsigned long address)
-{
- return pgtable_alloc_one(mm, address, L2_USER_PGTABLE_ORDER);
-}
-
-static inline void pte_free(struct mm_struct *mm, struct page *pte)
-{
- pgtable_free(mm, pte, L2_USER_PGTABLE_ORDER);
-}
-
-#define pmd_pgtable(pmd) pmd_page(pmd)
-
-static inline pte_t *
-pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address)
-{
- return pfn_to_kaddr(page_to_pfn(pte_alloc_one(mm, address)));
-}
-
-static inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte)
-{
- BUG_ON((unsigned long)pte & (PAGE_SIZE-1));
- pte_free(mm, virt_to_page(pte));
-}
-
-extern void __pgtable_free_tlb(struct mmu_gather *tlb, struct page *pte,
- unsigned long address, int order);
-static inline void __pte_free_tlb(struct mmu_gather *tlb, struct page *pte,
- unsigned long address)
-{
- __pgtable_free_tlb(tlb, pte, address, L2_USER_PGTABLE_ORDER);
-}
-
-#define check_pgt_cache() do { } while (0)
-
-/*
- * Get the small-page pte_t lowmem entry for a given pfn.
- * This may or may not be in use, depending on whether the initial
- * huge-page entry for the page has already been shattered.
- */
-pte_t *get_prealloc_pte(unsigned long pfn);
-
-/* During init, we can shatter kernel huge pages if needed. */
-void shatter_pmd(pmd_t *pmd);
-
-/* After init, a more complex technique is required. */
-void shatter_huge_page(unsigned long addr);
-
-#ifdef __tilegx__
-
-#define pud_populate(mm, pud, pmd) \
- pmd_populate_kernel((mm), (pmd_t *)(pud), (pte_t *)(pmd))
-
-/* Bits for the size of the L1 (intermediate) page table. */
-#define L1_KERNEL_PGTABLE_SHIFT _HV_LOG2_L1_SIZE(HPAGE_SHIFT)
-
-/* How big is a kernel L2 page table? */
-#define L1_KERNEL_PGTABLE_SIZE (1UL << L1_KERNEL_PGTABLE_SHIFT)
-
-/* We currently allocate L1 page tables by page. */
-#if L1_KERNEL_PGTABLE_SHIFT < PAGE_SHIFT
-#define L1_USER_PGTABLE_SHIFT PAGE_SHIFT
-#else
-#define L1_USER_PGTABLE_SHIFT L1_KERNEL_PGTABLE_SHIFT
-#endif
-
-/* How many pages do we need, as an "order", for an L1 page table? */
-#define L1_USER_PGTABLE_ORDER (L1_USER_PGTABLE_SHIFT - PAGE_SHIFT)
-
-static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long address)
-{
- struct page *p = pgtable_alloc_one(mm, address, L1_USER_PGTABLE_ORDER);
- return (pmd_t *)page_to_virt(p);
-}
-
-static inline void pmd_free(struct mm_struct *mm, pmd_t *pmdp)
-{
- pgtable_free(mm, virt_to_page(pmdp), L1_USER_PGTABLE_ORDER);
-}
-
-static inline void __pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmdp,
- unsigned long address)
-{
- __pgtable_free_tlb(tlb, virt_to_page(pmdp), address,
- L1_USER_PGTABLE_ORDER);
-}
-
-#endif /* __tilegx__ */
-
-#endif /* _ASM_TILE_PGALLOC_H */
diff --git a/arch/tile/include/asm/pgtable.h b/arch/tile/include/asm/pgtable.h
deleted file mode 100644
index adfa21b18488..000000000000
--- a/arch/tile/include/asm/pgtable.h
+++ /dev/null
@@ -1,518 +0,0 @@
-/*
- * Copyright 2010 Tilera Corporation. All Rights Reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation, version 2.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
- * NON INFRINGEMENT. See the GNU General Public License for
- * more details.
- *
- * This file contains the functions and defines necessary to modify and use
- * the TILE page table tree.
- */
-
-#ifndef _ASM_TILE_PGTABLE_H
-#define _ASM_TILE_PGTABLE_H
-
-#include <hv/hypervisor.h>
-
-#ifndef __ASSEMBLY__
-
-#include <linux/bitops.h>
-#include <linux/threads.h>
-#include <linux/slab.h>
-#include <linux/list.h>
-#include <linux/spinlock.h>
-#include <linux/pfn.h>
-#include <asm/processor.h>
-#include <asm/fixmap.h>
-#include <asm/page.h>
-
-struct mm_struct;
-struct vm_area_struct;
-
-/*
- * ZERO_PAGE is a global shared page that is always zero: used
- * for zero-mapped memory areas etc..
- */
-extern unsigned long empty_zero_page[PAGE_SIZE/sizeof(unsigned long)];
-#define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page))
-
-extern pgd_t swapper_pg_dir[];
-extern pgprot_t swapper_pgprot;
-extern struct kmem_cache *pgd_cache;
-extern spinlock_t pgd_lock;
-extern struct list_head pgd_list;
-
-/*
- * The very last slots in the pgd_t are for addresses unusable by Linux
- * (pgd_addr_invalid() returns true). So we use them for the list structure.
- * The x86 code we are modelled on uses the page->private/index fields
- * (older 2.6 kernels) or the lru list (newer 2.6 kernels), but since
- * our pgds are so much smaller than a page, it seems a waste to
- * spend a whole page on each pgd.
- */
-#define PGD_LIST_OFFSET \
- ((PTRS_PER_PGD * sizeof(pgd_t)) - sizeof(struct list_head))
-#define pgd_to_list(pgd) \
- ((struct list_head *)((char *)(pgd) + PGD_LIST_OFFSET))
-#define list_to_pgd(list) \
- ((pgd_t *)((char *)(list) - PGD_LIST_OFFSET))
-
-extern void pgtable_cache_init(void);
-extern void paging_init(void);
-extern void set_page_homes(void);
-
-#define FIRST_USER_ADDRESS 0UL
-
-#define _PAGE_PRESENT HV_PTE_PRESENT
-#define _PAGE_HUGE_PAGE HV_PTE_PAGE
-#define _PAGE_SUPER_PAGE HV_PTE_SUPER
-#define _PAGE_READABLE HV_PTE_READABLE
-#define _PAGE_WRITABLE HV_PTE_WRITABLE
-#define _PAGE_EXECUTABLE HV_PTE_EXECUTABLE
-#define _PAGE_ACCESSED HV_PTE_ACCESSED
-#define _PAGE_DIRTY HV_PTE_DIRTY
-#define _PAGE_GLOBAL HV_PTE_GLOBAL
-#define _PAGE_USER HV_PTE_USER
-
-/*
- * All the "standard" bits. Cache-control bits are managed elsewhere.
- * This is used to test for valid level-2 page table pointers by checking
- * all the bits, and to mask away the cache control bits for mprotect.
- */
-#define _PAGE_ALL (\
- _PAGE_PRESENT | \
- _PAGE_HUGE_PAGE | \
- _PAGE_SUPER_PAGE | \
- _PAGE_READABLE | \
- _PAGE_WRITABLE | \
- _PAGE_EXECUTABLE | \
- _PAGE_ACCESSED | \
- _PAGE_DIRTY | \
- _PAGE_GLOBAL | \
- _PAGE_USER \
-)
-
-#define PAGE_NONE \
- __pgprot(_PAGE_PRESENT | _PAGE_ACCESSED)
-#define PAGE_SHARED \
- __pgprot(_PAGE_PRESENT | _PAGE_READABLE | _PAGE_WRITABLE | \
- _PAGE_USER | _PAGE_ACCESSED)
-
-#define PAGE_SHARED_EXEC \
- __pgprot(_PAGE_PRESENT | _PAGE_READABLE | _PAGE_WRITABLE | \
- _PAGE_EXECUTABLE | _PAGE_USER | _PAGE_ACCESSED)
-#define PAGE_COPY_NOEXEC \
- __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED | _PAGE_READABLE)
-#define PAGE_COPY_EXEC \
- __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED | \
- _PAGE_READABLE | _PAGE_EXECUTABLE)
-#define PAGE_COPY \
- PAGE_COPY_NOEXEC
-#define PAGE_READONLY \
- __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED | _PAGE_READABLE)
-#define PAGE_READONLY_EXEC \
- __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED | \
- _PAGE_READABLE | _PAGE_EXECUTABLE)
-
-#define _PAGE_KERNEL_RO \
- (_PAGE_PRESENT | _PAGE_GLOBAL | _PAGE_READABLE | _PAGE_ACCESSED)
-#define _PAGE_KERNEL \
- (_PAGE_KERNEL_RO | _PAGE_WRITABLE | _PAGE_DIRTY)
-#define _PAGE_KERNEL_EXEC (_PAGE_KERNEL_RO | _PAGE_EXECUTABLE)
-
-#define PAGE_KERNEL __pgprot(_PAGE_KERNEL)
-#define PAGE_KERNEL_RO __pgprot(_PAGE_KERNEL_RO)
-#define PAGE_KERNEL_EXEC __pgprot(_PAGE_KERNEL_EXEC)
-
-#define page_to_kpgprot(p) PAGE_KERNEL
-
-/*
- * We could tighten these up, but for now writable or executable
- * implies readable.
- */
-#define __P000 PAGE_NONE
-#define __P001 PAGE_READONLY
-#define __P010 PAGE_COPY /* this is write-only, which we won't support */
-#define __P011 PAGE_COPY
-#define __P100 PAGE_READONLY_EXEC
-#define __P101 PAGE_READONLY_EXEC
-#define __P110 PAGE_COPY_EXEC
-#define __P111 PAGE_COPY_EXEC
-
-#define __S000 PAGE_NONE
-#define __S001 PAGE_READONLY
-#define __S010 PAGE_SHARED
-#define __S011 PAGE_SHARED
-#define __S100 PAGE_READONLY_EXEC
-#define __S101 PAGE_READONLY_EXEC
-#define __S110 PAGE_SHARED_EXEC
-#define __S111 PAGE_SHARED_EXEC
-
-/*
- * All the normal _PAGE_ALL bits are ignored for PMDs, except PAGE_PRESENT
- * and PAGE_HUGE_PAGE, which must be one and zero, respectively.
- * We set the ignored bits to zero.
- */
-#define _PAGE_TABLE _PAGE_PRESENT
-
-/* Inherit the caching flags from the old protection bits. */
-#define pgprot_modify(oldprot, newprot) \
- (pgprot_t) { ((oldprot).val & ~_PAGE_ALL) | (newprot).val }
-
-/* Just setting the PFN to zero suffices. */
-#define pte_pgprot(x) hv_pte_set_pa((x), 0)
-
-/*
- * For PTEs and PDEs, we must clear the Present bit first when
- * clearing a page table entry, so clear the bottom half first and
- * enforce ordering with a barrier.
- */
-static inline void __pte_clear(pte_t *ptep)
-{
-#ifdef __tilegx__
- ptep->val = 0;
-#else
- u32 *tmp = (u32 *)ptep;
- tmp[0] = 0;
- barrier();
- tmp[1] = 0;
-#endif
-}
-#define pte_clear(mm, addr, ptep) __pte_clear(ptep)
-
-/*
- * The following only work if pte_present() is true.
- * Undefined behaviour if not..
- */
-#define pte_present hv_pte_get_present
-#define pte_mknotpresent hv_pte_clear_present
-#define pte_user hv_pte_get_user
-#define pte_read hv_pte_get_readable
-#define pte_dirty hv_pte_get_dirty
-#define pte_young hv_pte_get_accessed
-#define pte_write hv_pte_get_writable
-#define pte_exec hv_pte_get_executable
-#define pte_huge hv_pte_get_page
-#define pte_super hv_pte_get_super
-#define pte_rdprotect hv_pte_clear_readable
-#define pte_exprotect hv_pte_clear_executable
-#define pte_mkclean hv_pte_clear_dirty
-#define pte_mkold hv_pte_clear_accessed
-#define pte_wrprotect hv_pte_clear_writable
-#define pte_mksmall hv_pte_clear_page
-#define pte_mkread hv_pte_set_readable
-#define pte_mkexec hv_pte_set_executable
-#define pte_mkdirty hv_pte_set_dirty
-#define pte_mkyoung hv_pte_set_accessed
-#define pte_mkwrite hv_pte_set_writable
-#define pte_mkhuge hv_pte_set_page
-#define pte_mksuper hv_pte_set_super
-
-#define pte_special(pte) 0
-#define pte_mkspecial(pte) (pte)
-
-/*
- * Use some spare bits in the PTE for user-caching tags.
- */
-#define pte_set_forcecache hv_pte_set_client0
-#define pte_get_forcecache hv_pte_get_client0
-#define pte_clear_forcecache hv_pte_clear_client0
-#define pte_set_anyhome hv_pte_set_client1
-#define pte_get_anyhome hv_pte_get_client1
-#define pte_clear_anyhome hv_pte_clear_client1
-
-/*
- * A migrating PTE has PAGE_PRESENT clear but all the other bits preserved.
- */
-#define pte_migrating hv_pte_get_migrating
-#define pte_mkmigrate(x) hv_pte_set_migrating(hv_pte_clear_present(x))
-#define pte_donemigrate(x) hv_pte_set_present(hv_pte_clear_migrating(x))
-
-#define pte_ERROR(e) \
- pr_err("%s:%d: bad pte 0x%016llx\n", __FILE__, __LINE__, pte_val(e))
-#define pgd_ERROR(e) \
- pr_err("%s:%d: bad pgd 0x%016llx\n", __FILE__, __LINE__, pgd_val(e))
-
-/* Return PA and protection info for a given kernel VA. */
-int va_to_cpa_and_pte(void *va, phys_addr_t *cpa, pte_t *pte);
-
-/*
- * __set_pte() ensures we write the 64-bit PTE with 32-bit words in
- * the right order on 32-bit platforms and also allows us to write
- * hooks to check valid PTEs, etc., if we want.
- */
-void __set_pte(pte_t *ptep, pte_t pte);
-
-/*
- * set_pte() sets the given PTE and also sanity-checks the
- * requested PTE against the page homecaching. Unspecified parts
- * of the PTE are filled in when it is written to memory, i.e. all
- * caching attributes if "!forcecache", or the home cpu if "anyhome".
- */
-extern void set_pte(pte_t *ptep, pte_t pte);
-#define set_pte_at(mm, addr, ptep, pteval) set_pte(ptep, pteval)
-#define set_pte_atomic(pteptr, pteval) set_pte(pteptr, pteval)
-
-#define pte_page(x) pfn_to_page(pte_pfn(x))
-
-static inline int pte_none(pte_t pte)
-{
- return !pte.val;
-}
-
-static inline unsigned long pte_pfn(pte_t pte)
-{
- return PFN_DOWN(hv_pte_get_pa(pte));
-}
-
-/* Set or get the remote cache cpu in a pgprot with remote caching. */
-extern pgprot_t set_remote_cache_cpu(pgprot_t prot, int cpu);
-extern int get_remote_cache_cpu(pgprot_t prot);
-
-static inline pte_t pfn_pte(unsigned long pfn, pgprot_t prot)
-{
- return hv_pte_set_pa(prot, PFN_PHYS(pfn));
-}
-
-/* Support for priority mappings. */
-extern void start_mm_caching(struct mm_struct *mm);
-extern void check_mm_caching(struct mm_struct *prev, struct mm_struct *next);
-
-/*
- * Encode and de-code a swap entry (see <linux/swapops.h>).
- * We put the swap file type+offset in the 32 high bits;
- * I believe we can just leave the low bits clear.
- */
-#define __swp_type(swp) ((swp).val & 0x1f)
-#define __swp_offset(swp) ((swp).val >> 5)
-#define __swp_entry(type, off) ((swp_entry_t) { (type) | ((off) << 5) })
-#define __pte_to_swp_entry(pte) ((swp_entry_t) { (pte).val >> 32 })
-#define __swp_entry_to_pte(swp) ((pte_t) { (((long long) ((swp).val)) << 32) })
-
-/*
- * Conversion functions: convert a page and protection to a page entry,
- * and a page entry and page directory to the page they refer to.
- */
-
-#define mk_pte(page, pgprot) pfn_pte(page_to_pfn(page), (pgprot))
-
-/*
- * If we are doing an mprotect(), just accept the new vma->vm_page_prot
- * value and combine it with the PFN from the old PTE to get a new PTE.
- */
-static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
-{
- return pfn_pte(pte_pfn(pte), newprot);
-}
-
-/*
- * The pgd page can be thought of an array like this: pgd_t[PTRS_PER_PGD]
- *
- * This macro returns the index of the entry in the pgd page which would
- * control the given virtual address.
- */
-#define pgd_index(address) (((address) >> PGDIR_SHIFT) & (PTRS_PER_PGD - 1))
-
-/*
- * pgd_offset() returns a (pgd_t *)
- * pgd_index() is used get the offset into the pgd page's array of pgd_t's.
- */
-#define pgd_offset(mm, address) ((mm)->pgd + pgd_index(address))
-
-/*
- * A shortcut which implies the use of the kernel's pgd, instead
- * of a process's.
- */
-#define pgd_offset_k(address) pgd_offset(&init_mm, address)
-
-#define pte_offset_map(dir, address) pte_offset_kernel(dir, address)
-#define pte_unmap(pte) do { } while (0)
-
-/* Clear a non-executable kernel PTE and flush it from the TLB. */
-#define kpte_clear_flush(ptep, vaddr) \
-do { \
- pte_clear(&init_mm, (vaddr), (ptep)); \
- local_flush_tlb_page(FLUSH_NONEXEC, (vaddr), PAGE_SIZE); \
-} while (0)
-
-/*
- * The kernel page tables contain what we need, and we flush when we
- * change specific page table entries.
- */
-#define update_mmu_cache(vma, address, pte) do { } while (0)
-
-#ifdef CONFIG_FLATMEM
-#define kern_addr_valid(addr) (1)
-#endif /* CONFIG_FLATMEM */
-
-extern void vmalloc_sync_all(void);
-
-#endif /* !__ASSEMBLY__ */
-
-#ifdef __tilegx__
-#include <asm/pgtable_64.h>
-#else
-#include <asm/pgtable_32.h>
-#endif
-
-#ifndef __ASSEMBLY__
-
-static inline int pmd_none(pmd_t pmd)
-{
- /*
- * Only check low word on 32-bit platforms, since it might be
- * out of sync with upper half.
- */
- return (unsigned long)pmd_val(pmd) == 0;
-}
-
-static inline int pmd_present(pmd_t pmd)
-{
- return pmd_val(pmd) & _PAGE_PRESENT;
-}
-
-static inline int pmd_bad(pmd_t pmd)
-{
- return ((pmd_val(pmd) & _PAGE_ALL) != _PAGE_TABLE);
-}
-
-static inline unsigned long pages_to_mb(unsigned long npg)
-{
- return npg >> (20 - PAGE_SHIFT);
-}
-
-/*
- * The pmd can be thought of an array like this: pmd_t[PTRS_PER_PMD]
- *
- * This function returns the index of the entry in the pmd which would
- * control the given virtual address.
- */
-static inline unsigned long pmd_index(unsigned long address)
-{
- return (address >> PMD_SHIFT) & (PTRS_PER_PMD - 1);
-}
-
-#define __HAVE_ARCH_PMDP_TEST_AND_CLEAR_YOUNG
-static inline int pmdp_test_and_clear_young(struct vm_area_struct *vma,
- unsigned long address,
- pmd_t *pmdp)
-{
- return ptep_test_and_clear_young(vma, address, pmdp_ptep(pmdp));
-}
-
-#define __HAVE_ARCH_PMDP_SET_WRPROTECT
-static inline void pmdp_set_wrprotect(struct mm_struct *mm,
- unsigned long address, pmd_t *pmdp)
-{
- ptep_set_wrprotect(mm, address, pmdp_ptep(pmdp));
-}
-
-
-#define __HAVE_ARCH_PMDP_HUGE_GET_AND_CLEAR
-static inline pmd_t pmdp_huge_get_and_clear(struct mm_struct *mm,
- unsigned long address,
- pmd_t *pmdp)
-{
- return pte_pmd(ptep_get_and_clear(mm, address, pmdp_ptep(pmdp)));
-}
-
-static inline void __set_pmd(pmd_t *pmdp, pmd_t pmdval)
-{
- set_pte(pmdp_ptep(pmdp), pmd_pte(pmdval));
-}
-
-#define set_pmd_at(mm, addr, pmdp, pmdval) __set_pmd(pmdp, pmdval)
-
-/* Create a pmd from a PTFN. */
-static inline pmd_t ptfn_pmd(unsigned long ptfn, pgprot_t prot)
-{
- return pte_pmd(hv_pte_set_ptfn(prot, ptfn));
-}
-
-/* Return the page-table frame number (ptfn) that a pmd_t points at. */
-#define pmd_ptfn(pmd) hv_pte_get_ptfn(pmd_pte(pmd))
-
-/*
- * A given kernel pmd_t maps to a specific virtual address (either a
- * kernel huge page or a kernel pte_t table). Since kernel pte_t
- * tables can be aligned at sub-page granularity, this function can
- * return non-page-aligned pointers, despite its name.
- */
-static inline unsigned long pmd_page_vaddr(pmd_t pmd)
-{
- phys_addr_t pa =
- (phys_addr_t)pmd_ptfn(pmd) << HV_LOG2_PAGE_TABLE_ALIGN;
- return (unsigned long)__va(pa);
-}
-
-/*
- * A pmd_t points to the base of a huge page or to a pte_t array.
- * If a pte_t array, since we can have multiple per page, we don't
- * have a one-to-one mapping of pmd_t's to pages. However, this is
- * OK for pte_lockptr(), since we just end up with potentially one
- * lock being used for several pte_t arrays.
- */
-#define pmd_page(pmd) pfn_to_page(PFN_DOWN(HV_PTFN_TO_CPA(pmd_ptfn(pmd))))
-
-static inline void pmd_clear(pmd_t *pmdp)
-{
- __pte_clear(pmdp_ptep(pmdp));
-}
-
-#define pmd_mknotpresent(pmd) pte_pmd(pte_mknotpresent(pmd_pte(pmd)))
-#define pmd_young(pmd) pte_young(pmd_pte(pmd))
-#define pmd_mkyoung(pmd) pte_pmd(pte_mkyoung(pmd_pte(pmd)))
-#define pmd_mkold(pmd) pte_pmd(pte_mkold(pmd_pte(pmd)))
-#define pmd_mkwrite(pmd) pte_pmd(pte_mkwrite(pmd_pte(pmd)))
-#define pmd_write(pmd) pte_write(pmd_pte(pmd))
-#define pmd_wrprotect(pmd) pte_pmd(pte_wrprotect(pmd_pte(pmd)))
-#define pmd_mkdirty(pmd) pte_pmd(pte_mkdirty(pmd_pte(pmd)))
-#define pmd_huge_page(pmd) pte_huge(pmd_pte(pmd))
-#define pmd_mkhuge(pmd) pte_pmd(pte_mkhuge(pmd_pte(pmd)))
-
-#define pfn_pmd(pfn, pgprot) pte_pmd(pfn_pte((pfn), (pgprot)))
-#define pmd_pfn(pmd) pte_pfn(pmd_pte(pmd))
-#define mk_pmd(page, pgprot) pfn_pmd(page_to_pfn(page), (pgprot))
-
-static inline pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot)
-{
- return pfn_pmd(pmd_pfn(pmd), newprot);
-}
-
-#ifdef CONFIG_TRANSPARENT_HUGEPAGE
-#define pmd_trans_huge pmd_huge_page
-#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
-
-/*
- * The pte page can be thought of an array like this: pte_t[PTRS_PER_PTE]
- *
- * This macro returns the index of the entry in the pte page which would
- * control the given virtual address.
- */
-static inline unsigned long pte_index(unsigned long address)
-{
- return (address >> PAGE_SHIFT) & (PTRS_PER_PTE - 1);
-}
-
-static inline pte_t *pte_offset_kernel(pmd_t *pmd, unsigned long address)
-{
- return (pte_t *)pmd_page_vaddr(*pmd) + pte_index(address);
-}
-
-#include <asm-generic/pgtable.h>
-
-/* Support /proc/NN/pgtable API. */
-struct seq_file;
-int arch_proc_pgtable_show(struct seq_file *m, struct mm_struct *mm,
- unsigned long vaddr, unsigned long pagesize,
- pte_t *ptep, void **datap);
-
-#endif /* !__ASSEMBLY__ */
-
-#endif /* _ASM_TILE_PGTABLE_H */
diff --git a/arch/tile/include/asm/pgtable_32.h b/arch/tile/include/asm/pgtable_32.h
deleted file mode 100644
index 5f8c615cb5e9..000000000000
--- a/arch/tile/include/asm/pgtable_32.h
+++ /dev/null
@@ -1,122 +0,0 @@
-/*
- * Copyright 2010 Tilera Corporation. All Rights Reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation, version 2.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
- * NON INFRINGEMENT. See the GNU General Public License for
- * more details.
- *
- */
-
-#ifndef _ASM_TILE_PGTABLE_32_H
-#define _ASM_TILE_PGTABLE_32_H
-
-/*
- * The level-1 index is defined by the huge page size. A PGD is composed
- * of PTRS_PER_PGD pgd_t's and is the top level of the page table.
- */
-#define PGDIR_SHIFT HPAGE_SHIFT
-#define PGDIR_SIZE HPAGE_SIZE
-#define PGDIR_MASK (~(PGDIR_SIZE-1))
-#define PTRS_PER_PGD _HV_L1_ENTRIES(HPAGE_SHIFT)
-#define PGD_INDEX(va) _HV_L1_INDEX(va, HPAGE_SHIFT)
-#define SIZEOF_PGD _HV_L1_SIZE(HPAGE_SHIFT)
-
-/*
- * The level-2 index is defined by the difference between the huge
- * page size and the normal page size. A PTE is composed of
- * PTRS_PER_PTE pte_t's and is the bottom level of the page table.
- * Note that the hypervisor docs use PTE for what we call pte_t, so
- * this nomenclature is somewhat confusing.
- */
-#define PTRS_PER_PTE _HV_L2_ENTRIES(HPAGE_SHIFT, PAGE_SHIFT)
-#define PTE_INDEX(va) _HV_L2_INDEX(va, HPAGE_SHIFT, PAGE_SHIFT)
-#define SIZEOF_PTE _HV_L2_SIZE(HPAGE_SHIFT, PAGE_SHIFT)
-
-#ifndef __ASSEMBLY__
-
-/*
- * Right now we initialize only a single pte table. It can be extended
- * easily, subsequent pte tables have to be allocated in one physical
- * chunk of RAM.
- *
- * HOWEVER, if we are using an allocation scheme with slop after the
- * end of the page table (e.g. where our L2 page tables are 2KB but
- * our pages are 64KB and we are allocating via the page allocator)
- * we can't extend it easily.
- */
-#define LAST_PKMAP PTRS_PER_PTE
-
-#define PKMAP_BASE ((FIXADDR_BOOT_START - PAGE_SIZE*LAST_PKMAP) & PGDIR_MASK)
-
-#ifdef CONFIG_HIGHMEM
-# define _VMALLOC_END (PKMAP_BASE & ~(HPAGE_SIZE-1))
-#else
-# define _VMALLOC_END (FIXADDR_START & ~(HPAGE_SIZE-1))
-#endif
-
-/*
- * Align the vmalloc area to an L2 page table, and leave a guard page
- * at the beginning and end. The vmalloc code also puts in an internal
- * guard page between each allocation.
- */
-#define VMALLOC_END (_VMALLOC_END - PAGE_SIZE)
-extern unsigned long VMALLOC_RESERVE /* = CONFIG_VMALLOC_RESERVE */;
-#define _VMALLOC_START (_VMALLOC_END - VMALLOC_RESERVE)
-#define VMALLOC_START (_VMALLOC_START + PAGE_SIZE)
-
-/* This is the maximum possible amount of lowmem. */
-#define MAXMEM (_VMALLOC_START - PAGE_OFFSET)
-
-/* We have no pmd or pud since we are strictly a two-level page table */
-#define __ARCH_USE_5LEVEL_HACK
-#include <asm-generic/pgtable-nopmd.h>
-
-static inline int pud_huge_page(pud_t pud) { return 0; }
-
-/* We don't define any pgds for these addresses. */
-static inline int pgd_addr_invalid(unsigned long addr)
-{
- return addr >= MEM_HV_START;
-}
-
-/*
- * Provide versions of these routines that can be used safely when
- * the hypervisor may be asynchronously modifying dirty/accessed bits.
- * ptep_get_and_clear() matches the generic one but we provide it to
- * be parallel with the 64-bit code.
- */
-#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
-#define __HAVE_ARCH_PTEP_SET_WRPROTECT
-
-extern int ptep_test_and_clear_young(struct vm_area_struct *,
- unsigned long addr, pte_t *);
-extern void ptep_set_wrprotect(struct mm_struct *,
- unsigned long addr, pte_t *);
-
-#define __HAVE_ARCH_PTEP_GET_AND_CLEAR
-static inline pte_t ptep_get_and_clear(struct mm_struct *mm,
- unsigned long addr, pte_t *ptep)
-{
- pte_t pte = *ptep;
- pte_clear(_mm, addr, ptep);
- return pte;
-}
-
-/*
- * pmds are wrappers around pgds, which are the same as ptes.
- * It's often convenient to "cast" back and forth and use the pte methods,
- * which are the methods supplied by the hypervisor.
- */
-#define pmd_pte(pmd) ((pmd).pud.pgd)
-#define pmdp_ptep(pmdp) (&(pmdp)->pud.pgd)
-#define pte_pmd(pte) ((pmd_t){ { (pte) } })
-
-#endif /* __ASSEMBLY__ */
-
-#endif /* _ASM_TILE_PGTABLE_32_H */
diff --git a/arch/tile/include/asm/pgtable_64.h b/arch/tile/include/asm/pgtable_64.h
deleted file mode 100644
index 96fe58b45118..000000000000
--- a/arch/tile/include/asm/pgtable_64.h
+++ /dev/null
@@ -1,172 +0,0 @@
-/*
- * Copyright 2011 Tilera Corporation. All Rights Reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation, version 2.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
- * NON INFRINGEMENT. See the GNU General Public License for
- * more details.
- *
- */
-
-#ifndef _ASM_TILE_PGTABLE_64_H
-#define _ASM_TILE_PGTABLE_64_H
-
-/* The level-0 page table breaks the address space into 32-bit chunks. */
-#define PGDIR_SHIFT HV_LOG2_L1_SPAN
-#define PGDIR_SIZE HV_L1_SPAN
-#define PGDIR_MASK (~(PGDIR_SIZE-1))
-#define PTRS_PER_PGD HV_L0_ENTRIES
-#define PGD_INDEX(va) HV_L0_INDEX(va)
-#define SIZEOF_PGD HV_L0_SIZE
-
-/*
- * The level-1 index is defined by the huge page size. A PMD is composed
- * of PTRS_PER_PMD pgd_t's and is the middle level of the page table.
- */
-#define PMD_SHIFT HPAGE_SHIFT
-#define PMD_SIZE HPAGE_SIZE
-#define PMD_MASK (~(PMD_SIZE-1))
-#define PTRS_PER_PMD _HV_L1_ENTRIES(HPAGE_SHIFT)
-#define PMD_INDEX(va) _HV_L1_INDEX(va, HPAGE_SHIFT)
-#define SIZEOF_PMD _HV_L1_SIZE(HPAGE_SHIFT)
-
-/*
- * The level-2 index is defined by the difference between the huge
- * page size and the normal page size. A PTE is composed of
- * PTRS_PER_PTE pte_t's and is the bottom level of the page table.
- * Note that the hypervisor docs use PTE for what we call pte_t, so
- * this nomenclature is somewhat confusing.
- */
-#define PTRS_PER_PTE _HV_L2_ENTRIES(HPAGE_SHIFT, PAGE_SHIFT)
-#define PTE_INDEX(va) _HV_L2_INDEX(va, HPAGE_SHIFT, PAGE_SHIFT)
-#define SIZEOF_PTE _HV_L2_SIZE(HPAGE_SHIFT, PAGE_SHIFT)
-
-/*
- * Align the vmalloc area to an L2 page table. Omit guard pages at
- * the beginning and end for simplicity (particularly in the per-cpu
- * memory allocation code). The vmalloc code puts in an internal
- * guard page between each allocation.
- */
-#define _VMALLOC_END MEM_SV_START
-#define VMALLOC_END _VMALLOC_END
-#define VMALLOC_START _VMALLOC_START
-
-#ifndef __ASSEMBLY__
-
-/* We have no pud since we are a three-level page table. */
-#define __ARCH_USE_5LEVEL_HACK
-#include <asm-generic/pgtable-nopud.h>
-
-/*
- * pmds are the same as pgds and ptes, so converting is a no-op.
- */
-#define pmd_pte(pmd) (pmd)
-#define pmdp_ptep(pmdp) (pmdp)
-#define pte_pmd(pte) (pte)
-
-#define pud_pte(pud) ((pud).pgd)
-
-static inline int pud_none(pud_t pud)
-{
- return pud_val(pud) == 0;
-}
-
-static inline int pud_present(pud_t pud)
-{
- return pud_val(pud) & _PAGE_PRESENT;
-}
-
-static inline int pud_huge_page(pud_t pud)
-{
- return pud_val(pud) & _PAGE_HUGE_PAGE;
-}
-
-#define pmd_ERROR(e) \
- pr_err("%s:%d: bad pmd 0x%016llx\n", __FILE__, __LINE__, pmd_val(e))
-
-static inline void pud_clear(pud_t *pudp)
-{
- __pte_clear(&pudp->pgd);
-}
-
-static inline int pud_bad(pud_t pud)
-{
- return ((pud_val(pud) & _PAGE_ALL) != _PAGE_TABLE);
-}
-
-/* Return the page-table frame number (ptfn) that a pud_t points at. */
-#define pud_ptfn(pud) hv_pte_get_ptfn((pud).pgd)
-
-/* Return the page frame number (pfn) that a pud_t points at. */
-#define pud_pfn(pud) pte_pfn(pud_pte(pud))
-
-/*
- * A given kernel pud_t maps to a kernel pmd_t table at a specific
- * virtual address. Since kernel pmd_t tables can be aligned at
- * sub-page granularity, this macro can return non-page-aligned
- * pointers, despite its name.
- */
-#define pud_page_vaddr(pud) \
- (__va((phys_addr_t)pud_ptfn(pud) << HV_LOG2_PAGE_TABLE_ALIGN))
-
-/*
- * A pud_t points to a pmd_t array. Since we can have multiple per
- * page, we don't have a one-to-one mapping of pud_t's to pages.
- */
-#define pud_page(pud) pfn_to_page(PFN_DOWN(HV_PTFN_TO_CPA(pud_ptfn(pud))))
-
-static inline unsigned long pud_index(unsigned long address)
-{
- return (address >> PUD_SHIFT) & (PTRS_PER_PUD - 1);
-}
-
-#define pmd_offset(pud, address) \
- ((pmd_t *)pud_page_vaddr(*(pud)) + pmd_index(address))
-
-/* Normalize an address to having the correct high bits set. */
-#define pgd_addr_normalize pgd_addr_normalize
-static inline unsigned long pgd_addr_normalize(unsigned long addr)
-{
- return ((long)addr << (CHIP_WORD_SIZE() - CHIP_VA_WIDTH())) >>
- (CHIP_WORD_SIZE() - CHIP_VA_WIDTH());
-}
-
-/* We don't define any pgds for these addresses. */
-static inline int pgd_addr_invalid(unsigned long addr)
-{
- return addr >= KERNEL_HIGH_VADDR || addr != pgd_addr_normalize(addr);
-}
-
-/*
- * Use atomic instructions to provide atomicity against the hypervisor.
- */
-#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
-static inline int ptep_test_and_clear_young(struct vm_area_struct *vma,
- unsigned long addr, pte_t *ptep)
-{
- return (__insn_fetchand(&ptep->val, ~HV_PTE_ACCESSED) >>
- HV_PTE_INDEX_ACCESSED) & 0x1;
-}
-
-#define __HAVE_ARCH_PTEP_SET_WRPROTECT
-static inline void ptep_set_wrprotect(struct mm_struct *mm,
- unsigned long addr, pte_t *ptep)
-{
- __insn_fetchand(&ptep->val, ~HV_PTE_WRITABLE);
-}
-
-#define __HAVE_ARCH_PTEP_GET_AND_CLEAR
-static inline pte_t ptep_get_and_clear(struct mm_struct *mm,
- unsigned long addr, pte_t *ptep)
-{
- return hv_pte(__insn_exch(&ptep->val, 0UL));
-}
-
-#endif /* __ASSEMBLY__ */
-
-#endif /* _ASM_TILE_PGTABLE_64_H */
diff --git a/arch/tile/include/asm/pmc.h b/arch/tile/include/asm/pmc.h
deleted file mode 100644
index 7ae3956d9008..000000000000
--- a/arch/tile/include/asm/pmc.h
+++ /dev/null
@@ -1,64 +0,0 @@
-/*
- * Copyright 2014 Tilera Corporation. All Rights Reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation, version 2.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
- * NON INFRINGEMENT. See the GNU General Public License for
- * more details.
- */
-
-#ifndef _ASM_TILE_PMC_H
-#define _ASM_TILE_PMC_H
-
-#include <linux/ptrace.h>
-
-#define TILE_BASE_COUNTERS 2
-
-/* Bitfields below are derived from SPR PERF_COUNT_CTL*/
-#ifndef __tilegx__
-/* PERF_COUNT_CTL on TILEPro */
-#define TILE_CTL_EXCL_USER (1 << 7) /* exclude user level */
-#define TILE_CTL_EXCL_KERNEL (1 << 8) /* exclude kernel level */
-#define TILE_CTL_EXCL_HV (1 << 9) /* exclude hypervisor level */
-
-#define TILE_SEL_MASK 0x7f /* 7 bits for event SEL,
- COUNT_0_SEL */
-#define TILE_PLM_MASK 0x780 /* 4 bits priv level msks,
- COUNT_0_MASK*/
-#define TILE_EVENT_MASK (TILE_SEL_MASK | TILE_PLM_MASK)
-
-#else /* __tilegx__*/
-/* PERF_COUNT_CTL on TILEGx*/
-#define TILE_CTL_EXCL_USER (1 << 10) /* exclude user level */
-#define TILE_CTL_EXCL_KERNEL (1 << 11) /* exclude kernel level */
-#define TILE_CTL_EXCL_HV (1 << 12) /* exclude hypervisor level */
-
-#define TILE_SEL_MASK 0x3f /* 6 bits for event SEL,
- COUNT_0_SEL*/
-#define TILE_BOX_MASK 0x1c0 /* 3 bits box msks,
- COUNT_0_BOX */
-#define TILE_PLM_MASK 0x3c00 /* 4 bits priv level msks,
- COUNT_0_MASK */
-#define TILE_EVENT_MASK (TILE_SEL_MASK | TILE_BOX_MASK | TILE_PLM_MASK)
-#endif /* __tilegx__*/
-
-/* Takes register and fault number. Returns error to disable the interrupt. */
-typedef int (*perf_irq_t)(struct pt_regs *, int);
-
-int userspace_perf_handler(struct pt_regs *regs, int fault);
-
-perf_irq_t reserve_pmc_hardware(perf_irq_t new_perf_irq);
-void release_pmc_hardware(void);
-
-unsigned long pmc_get_overflow(void);
-void pmc_ack_overflow(unsigned long status);
-
-void unmask_pmc_interrupts(void);
-void mask_pmc_interrupts(void);
-
-#endif /* _ASM_TILE_PMC_H */
diff --git a/arch/tile/include/asm/processor.h b/arch/tile/include/asm/processor.h
deleted file mode 100644
index f71e5206650b..000000000000
--- a/arch/tile/include/asm/processor.h
+++ /dev/null
@@ -1,368 +0,0 @@
-/*
- * Copyright 2010 Tilera Corporation. All Rights Reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation, version 2.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
- * NON INFRINGEMENT. See the GNU General Public License for
- * more details.
- */
-
-#ifndef _ASM_TILE_PROCESSOR_H
-#define _ASM_TILE_PROCESSOR_H
-
-#include <arch/chip.h>
-
-#ifndef __ASSEMBLY__
-
-/*
- * NOTE: we don't include <linux/ptrace.h> or <linux/percpu.h> as one
- * normally would, due to #include dependencies.
- */
-#include <linux/types.h>
-#include <asm/ptrace.h>
-#include <asm/percpu.h>
-
-#include <arch/spr_def.h>
-
-struct task_struct;
-struct thread_struct;
-
-typedef struct {
- unsigned long seg;
-} mm_segment_t;
-
-/*
- * Default implementation of macro that returns current
- * instruction pointer ("program counter").
- */
-void *current_text_addr(void);
-
-#if CHIP_HAS_TILE_DMA()
-/* Capture the state of a suspended DMA. */
-struct tile_dma_state {
- int enabled;
- unsigned long src;
- unsigned long dest;
- unsigned long strides;
- unsigned long chunk_size;
- unsigned long src_chunk;
- unsigned long dest_chunk;
- unsigned long byte;
- unsigned long status;
-};
-
-/*
- * A mask of the DMA status register for selecting only the 'running'
- * and 'done' bits.
- */
-#define DMA_STATUS_MASK \
- (SPR_DMA_STATUS__RUNNING_MASK | SPR_DMA_STATUS__DONE_MASK)
-#endif
-
-/*
- * Track asynchronous TLB events (faults and access violations)
- * that occur while we are in kernel mode from DMA or the SN processor.
- */
-struct async_tlb {
- short fault_num; /* original fault number; 0 if none */
- char is_fault; /* was it a fault (vs an access violation) */
- char is_write; /* for fault: was it caused by a write? */
- unsigned long address; /* what address faulted? */
-};
-
-#ifdef CONFIG_HARDWALL
-struct hardwall_info;
-struct hardwall_task {
- /* Which hardwall is this task tied to? (or NULL if none) */
- struct hardwall_info *info;
- /* Chains this task into the list at info->task_head. */
- struct list_head list;
-};
-#ifdef __tilepro__
-#define HARDWALL_TYPES 1 /* udn */
-#else
-#define HARDWALL_TYPES 3 /* udn, idn, and ipi */
-#endif
-#endif
-
-struct thread_struct {
- /* kernel stack pointer */
- unsigned long ksp;
- /* kernel PC */
- unsigned long pc;
- /* starting user stack pointer (for page migration) */
- unsigned long usp0;
- /* pid of process that created this one */
- pid_t creator_pid;
-#if CHIP_HAS_TILE_DMA()
- /* DMA info for suspended threads (byte == 0 means no DMA state) */
- struct tile_dma_state tile_dma_state;
-#endif
- /* User EX_CONTEXT registers */
- unsigned long ex_context[2];
- /* User SYSTEM_SAVE registers */
- unsigned long system_save[4];
- /* User interrupt mask */
- unsigned long long interrupt_mask;
- /* User interrupt-control 0 state */
- unsigned long intctrl_0;
- /* Any other miscellaneous processor state bits */
- unsigned long proc_status;
-#if !CHIP_HAS_FIXED_INTVEC_BASE()
- /* Interrupt base for PL0 interrupts */
- unsigned long interrupt_vector_base;
-#endif
- /* Tile cache retry fifo high-water mark */
- unsigned long tile_rtf_hwm;
-#if CHIP_HAS_DSTREAM_PF()
- /* Data stream prefetch control */
- unsigned long dstream_pf;
-#endif
-#ifdef CONFIG_HARDWALL
- /* Hardwall information for various resources. */
- struct hardwall_task hardwall[HARDWALL_TYPES];
-#endif
-#if CHIP_HAS_TILE_DMA()
- /* Async DMA TLB fault information */
- struct async_tlb dma_async_tlb;
-#endif
-};
-
-#endif /* !__ASSEMBLY__ */
-
-/*
- * Start with "sp" this many bytes below the top of the kernel stack.
- * This allows us to be cache-aware when handling the initial save
- * of the pt_regs value to the stack.
- */
-#define STACK_TOP_DELTA 64
-
-/*
- * When entering the kernel via a fault, start with the top of the
- * pt_regs structure this many bytes below the top of the page.
- * This aligns the pt_regs structure optimally for cache-line access.
- */
-#ifdef __tilegx__
-#define KSTK_PTREGS_GAP 48
-#else
-#define KSTK_PTREGS_GAP 56
-#endif
-
-#ifndef __ASSEMBLY__
-
-#ifdef __tilegx__
-#define TASK_SIZE_MAX (_AC(1, UL) << (MAX_VA_WIDTH - 1))
-#else
-#define TASK_SIZE_MAX PAGE_OFFSET
-#endif
-
-/* TASK_SIZE and related variables are always checked in "current" context. */
-#ifdef CONFIG_COMPAT
-#define COMPAT_TASK_SIZE (1UL << 31)
-#define TASK_SIZE ((current_thread_info()->status & TS_COMPAT) ?\
- COMPAT_TASK_SIZE : TASK_SIZE_MAX)
-#else
-#define TASK_SIZE TASK_SIZE_MAX
-#endif
-
-#define VDSO_BASE ((unsigned long)current->active_mm->context.vdso_base)
-#define VDSO_SYM(x) (VDSO_BASE + (unsigned long)(x))
-
-#define STACK_TOP TASK_SIZE
-
-/* STACK_TOP_MAX is used temporarily in execve and should not check COMPAT. */
-#define STACK_TOP_MAX TASK_SIZE_MAX
-
-/*
- * This decides where the kernel will search for a free chunk of vm
- * space during mmap's, if it is using bottom-up mapping.
- */
-#define TASK_UNMAPPED_BASE (PAGE_ALIGN(TASK_SIZE / 3))
-
-#define HAVE_ARCH_PICK_MMAP_LAYOUT
-
-#define INIT_THREAD { \
- .ksp = (unsigned long)init_stack + THREAD_SIZE - STACK_TOP_DELTA, \
- .interrupt_mask = -1ULL \
-}
-
-/* Kernel stack top for the task that first boots on this cpu. */
-DECLARE_PER_CPU(unsigned long, boot_sp);
-
-/* PC to boot from on this cpu. */
-DECLARE_PER_CPU(unsigned long, boot_pc);
-
-/* Do necessary setup to start up a newly executed thread. */
-static inline void start_thread(struct pt_regs *regs,
- unsigned long pc, unsigned long usp)
-{
- regs->pc = pc;
- regs->sp = usp;
- single_step_execve();
-}
-
-/* Free all resources held by a thread. */
-static inline void release_thread(struct task_struct *dead_task)
-{
- /* Nothing for now */
-}
-
-extern void prepare_exit_to_usermode(struct pt_regs *regs, u32 flags);
-
-unsigned long get_wchan(struct task_struct *p);
-
-/* Return initial ksp value for given task. */
-#define task_ksp0(task) \
- ((unsigned long)(task)->stack + THREAD_SIZE - STACK_TOP_DELTA)
-
-/* Return some info about the user process TASK. */
-#define task_pt_regs(task) \
- ((struct pt_regs *)(task_ksp0(task) - KSTK_PTREGS_GAP) - 1)
-#define current_pt_regs() \
- ((struct pt_regs *)((stack_pointer | (THREAD_SIZE - 1)) - \
- STACK_TOP_DELTA - (KSTK_PTREGS_GAP - 1)) - 1)
-#define task_sp(task) (task_pt_regs(task)->sp)
-#define task_pc(task) (task_pt_regs(task)->pc)
-/* Aliases for pc and sp (used in fs/proc/array.c) */
-#define KSTK_EIP(task) task_pc(task)
-#define KSTK_ESP(task) task_sp(task)
-
-/* Fine-grained unaligned JIT support */
-#define GET_UNALIGN_CTL(tsk, adr) get_unalign_ctl((tsk), (adr))
-#define SET_UNALIGN_CTL(tsk, val) set_unalign_ctl((tsk), (val))
-
-extern int get_unalign_ctl(struct task_struct *tsk, unsigned long adr);
-extern int set_unalign_ctl(struct task_struct *tsk, unsigned int val);
-
-/* Standard format for printing registers and other word-size data. */
-#ifdef __tilegx__
-# define REGFMT "0x%016lx"
-#else
-# define REGFMT "0x%08lx"
-#endif
-
-/*
- * Do some slow action (e.g. read a slow SPR).
- * Note that this must also have compiler-barrier semantics since
- * it may be used in a busy loop reading memory.
- */
-static inline void cpu_relax(void)
-{
- __insn_mfspr(SPR_PASS);
- barrier();
-}
-
-/* Info on this processor (see fs/proc/cpuinfo.c) */
-struct seq_operations;
-extern const struct seq_operations cpuinfo_op;
-
-/* Provide information about the chip model. */
-extern char chip_model[64];
-
-/* Data on which physical memory controller corresponds to which NUMA node. */
-extern int node_controller[];
-
-/* Does the heap allocator return hash-for-home pages by default? */
-extern int hash_default;
-
-/* Should kernel stack pages be hash-for-home? */
-extern int kstack_hash;
-
-/* Does MAP_ANONYMOUS return hash-for-home pages by default? */
-#define uheap_hash hash_default
-
-
-/* Are we using huge pages in the TLB for kernel data? */
-extern int kdata_huge;
-
-/* Support standard Linux prefetching. */
-#define ARCH_HAS_PREFETCH
-#define prefetch(x) __builtin_prefetch(x)
-#define PREFETCH_STRIDE CHIP_L2_LINE_SIZE()
-
-/* Bring a value into the L1D, faulting the TLB if necessary. */
-#ifdef __tilegx__
-#define prefetch_L1(x) __insn_prefetch_l1_fault((void *)(x))
-#else
-#define prefetch_L1(x) __insn_prefetch_L1((void *)(x))
-#endif
-
-#else /* __ASSEMBLY__ */
-
-/* Do some slow action (e.g. read a slow SPR). */
-#define CPU_RELAX mfspr zero, SPR_PASS
-
-#endif /* !__ASSEMBLY__ */
-
-/* Assembly code assumes that the PL is in the low bits. */
-#if SPR_EX_CONTEXT_1_1__PL_SHIFT != 0
-# error Fix assembly assumptions about PL
-#endif
-
-/* We sometimes use these macros for EX_CONTEXT_0_1 as well. */
-#if SPR_EX_CONTEXT_1_1__PL_SHIFT != SPR_EX_CONTEXT_0_1__PL_SHIFT || \
- SPR_EX_CONTEXT_1_1__PL_RMASK != SPR_EX_CONTEXT_0_1__PL_RMASK || \
- SPR_EX_CONTEXT_1_1__ICS_SHIFT != SPR_EX_CONTEXT_0_1__ICS_SHIFT || \
- SPR_EX_CONTEXT_1_1__ICS_RMASK != SPR_EX_CONTEXT_0_1__ICS_RMASK
-# error Fix assumptions that EX1 macros work for both PL0 and PL1
-#endif
-
-/* Allow pulling apart and recombining the PL and ICS bits in EX_CONTEXT. */
-#define EX1_PL(ex1) \
- (((ex1) >> SPR_EX_CONTEXT_1_1__PL_SHIFT) & SPR_EX_CONTEXT_1_1__PL_RMASK)
-#define EX1_ICS(ex1) \
- (((ex1) >> SPR_EX_CONTEXT_1_1__ICS_SHIFT) & SPR_EX_CONTEXT_1_1__ICS_RMASK)
-#define PL_ICS_EX1(pl, ics) \
- (((pl) << SPR_EX_CONTEXT_1_1__PL_SHIFT) | \
- ((ics) << SPR_EX_CONTEXT_1_1__ICS_SHIFT))
-
-/*
- * Provide symbolic constants for PLs.
- */
-#define USER_PL 0
-#if CONFIG_KERNEL_PL == 2
-#define GUEST_PL 1
-#endif
-#define KERNEL_PL CONFIG_KERNEL_PL
-
-/* SYSTEM_SAVE_K_0 holds the current cpu number ORed with ksp0. */
-#ifdef __tilegx__
-#define CPU_SHIFT 48
-#if CHIP_VA_WIDTH() > CPU_SHIFT
-# error Too many VA bits!
-#endif
-#define MAX_CPU_ID ((1 << (64 - CPU_SHIFT)) - 1)
-#define raw_smp_processor_id() \
- ((int)(__insn_mfspr(SPR_SYSTEM_SAVE_K_0) >> CPU_SHIFT))
-#define get_current_ksp0() \
- ((unsigned long)(((long)__insn_mfspr(SPR_SYSTEM_SAVE_K_0) << \
- (64 - CPU_SHIFT)) >> (64 - CPU_SHIFT)))
-#define next_current_ksp0(task) ({ \
- unsigned long __ksp0 = task_ksp0(task) & ((1UL << CPU_SHIFT) - 1); \
- unsigned long __cpu = (long)raw_smp_processor_id() << CPU_SHIFT; \
- __ksp0 | __cpu; \
-})
-#else
-#define LOG2_NR_CPU_IDS 6
-#define MAX_CPU_ID ((1 << LOG2_NR_CPU_IDS) - 1)
-#define raw_smp_processor_id() \
- ((int)__insn_mfspr(SPR_SYSTEM_SAVE_K_0) & MAX_CPU_ID)
-#define get_current_ksp0() \
- (__insn_mfspr(SPR_SYSTEM_SAVE_K_0) & ~MAX_CPU_ID)
-#define next_current_ksp0(task) ({ \
- unsigned long __ksp0 = task_ksp0(task); \
- int __cpu = raw_smp_processor_id(); \
- BUG_ON(__ksp0 & MAX_CPU_ID); \
- __ksp0 | __cpu; \
-})
-#endif
-#if CONFIG_NR_CPUS > (MAX_CPU_ID + 1)
-# error Too many cpus!
-#endif
-
-#endif /* _ASM_TILE_PROCESSOR_H */
diff --git a/arch/tile/include/asm/ptrace.h b/arch/tile/include/asm/ptrace.h
deleted file mode 100644
index b9620c077abc..000000000000
--- a/arch/tile/include/asm/ptrace.h
+++ /dev/null
@@ -1,97 +0,0 @@
-/*
- * Copyright 2010 Tilera Corporation. All Rights Reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation, version 2.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
- * NON INFRINGEMENT. See the GNU General Public License for
- * more details.
- */
-#ifndef _ASM_TILE_PTRACE_H
-#define _ASM_TILE_PTRACE_H
-
-#include <linux/compiler.h>
-
-#ifndef __ASSEMBLY__
-/* Benefit from consistent use of "long" on all chips. */
-typedef unsigned long pt_reg_t;
-#endif
-
-#include <uapi/asm/ptrace.h>
-
-#define PTRACE_O_MASK_TILE (PTRACE_O_TRACEMIGRATE)
-#define PT_TRACE_MIGRATE PT_EVENT_FLAG(PTRACE_EVENT_MIGRATE)
-
-/* Flag bits in pt_regs.flags */
-#define PT_FLAGS_DISABLE_IRQ 1 /* on return to kernel, disable irqs */
-#define PT_FLAGS_CALLER_SAVES 2 /* caller-save registers are valid */
-#define PT_FLAGS_RESTORE_REGS 4 /* restore callee-save regs on return */
-
-#ifndef __ASSEMBLY__
-
-#define regs_return_value(regs) ((regs)->regs[0])
-#define instruction_pointer(regs) ((regs)->pc)
-#define profile_pc(regs) instruction_pointer(regs)
-#define user_stack_pointer(regs) ((regs)->sp)
-
-/* Does the process account for user or for system time? */
-#define user_mode(regs) (EX1_PL((regs)->ex1) < KERNEL_PL)
-
-/* Fill in a struct pt_regs with the current kernel registers. */
-struct pt_regs *get_pt_regs(struct pt_regs *);
-
-/* Trace the current syscall. */
-extern int do_syscall_trace_enter(struct pt_regs *regs);
-extern void do_syscall_trace_exit(struct pt_regs *regs);
-
-#define arch_has_single_step() (1)
-
-/*
- * A structure for all single-stepper state.
- *
- * Also update defines in assembler section if it changes
- */
-struct single_step_state {
- /* the page to which we will write hacked-up bundles */
- void __user *buffer;
-
- union {
- int flags;
- struct {
- unsigned long is_enabled:1, update:1, update_reg:6;
- };
- };
-
- unsigned long orig_pc; /* the original PC */
- unsigned long next_pc; /* return PC if no branch (PC + 1) */
- unsigned long branch_next_pc; /* return PC if we did branch/jump */
- unsigned long update_value; /* value to restore to update_target */
-};
-
-/* Single-step the instruction at regs->pc */
-extern void single_step_once(struct pt_regs *regs);
-
-/* Clean up after execve(). */
-extern void single_step_execve(void);
-
-struct task_struct;
-
-extern void send_sigtrap(struct task_struct *tsk, struct pt_regs *regs);
-
-#ifdef __tilegx__
-/* We need this since sigval_t has a user pointer in it, for GETSIGINFO etc. */
-#define __ARCH_WANT_COMPAT_SYS_PTRACE
-#endif
-
-#endif /* !__ASSEMBLY__ */
-
-#define SINGLESTEP_STATE_MASK_IS_ENABLED 0x1
-#define SINGLESTEP_STATE_MASK_UPDATE 0x2
-#define SINGLESTEP_STATE_TARGET_LB 2
-#define SINGLESTEP_STATE_TARGET_UB 7
-
-#endif /* _ASM_TILE_PTRACE_H */
diff --git a/arch/tile/include/asm/sections.h b/arch/tile/include/asm/sections.h
deleted file mode 100644
index 50343bfe7936..000000000000
--- a/arch/tile/include/asm/sections.h
+++ /dev/null
@@ -1,44 +0,0 @@
-/*
- * Copyright 2010 Tilera Corporation. All Rights Reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation, version 2.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
- * NON INFRINGEMENT. See the GNU General Public License for
- * more details.
- */
-
-#ifndef _ASM_TILE_SECTIONS_H
-#define _ASM_TILE_SECTIONS_H
-
-#define arch_is_kernel_data arch_is_kernel_data
-
-#include <asm-generic/sections.h>
-
-extern char vdso_start[], vdso_end[];
-#ifdef CONFIG_COMPAT
-extern char vdso32_start[], vdso32_end[];
-#endif
-
-/* Not exactly sections, but PC comparison points in the code. */
-extern char __rt_sigreturn[], __rt_sigreturn_end[];
-#ifdef __tilegx__
-extern char __start_unalign_asm_code[], __end_unalign_asm_code[];
-#else
-extern char sys_cmpxchg[], __sys_cmpxchg_end[];
-extern char __sys_cmpxchg_grab_lock[];
-extern char __start_atomic_asm_code[], __end_atomic_asm_code[];
-#endif
-
-/* Handle the discontiguity between _sdata and _text. */
-static inline int arch_is_kernel_data(unsigned long addr)
-{
- return addr >= (unsigned long)_sdata &&
- addr < (unsigned long)_end;
-}
-
-#endif /* _ASM_TILE_SECTIONS_H */
diff --git a/arch/tile/include/asm/setup.h b/arch/tile/include/asm/setup.h
deleted file mode 100644
index 2a0347af0702..000000000000
--- a/arch/tile/include/asm/setup.h
+++ /dev/null
@@ -1,57 +0,0 @@
-/*
- * Copyright 2010 Tilera Corporation. All Rights Reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation, version 2.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
- * NON INFRINGEMENT. See the GNU General Public License for
- * more details.
- */
-#ifndef _ASM_TILE_SETUP_H
-#define _ASM_TILE_SETUP_H
-
-
-#include <linux/pfn.h>
-#include <linux/init.h>
-#include <uapi/asm/setup.h>
-
-/*
- * Reserved space for vmalloc and iomap - defined in asm/page.h
- */
-#define MAXMEM_PFN PFN_DOWN(MAXMEM)
-
-int tile_console_write(const char *buf, int count);
-
-#ifdef CONFIG_EARLY_PRINTK
-void early_panic(const char *fmt, ...);
-#else
-#define early_panic panic
-#endif
-
-/* Init-time routine to do tile-specific per-cpu setup. */
-void setup_cpu(int boot);
-
-/* User-level DMA management functions */
-void grant_dma_mpls(void);
-void restrict_dma_mpls(void);
-
-#ifdef CONFIG_HARDWALL
-/* User-level network management functions */
-void reset_network_state(void);
-struct task_struct;
-void hardwall_switch_tasks(struct task_struct *prev, struct task_struct *next);
-void hardwall_deactivate_all(struct task_struct *task);
-int hardwall_ipi_valid(int cpu);
-
-/* Hook hardwall code into changes in affinity. */
-#define arch_set_cpus_allowed(p, new_mask) do { \
- if (!cpumask_equal(&p->cpus_allowed, new_mask)) \
- hardwall_deactivate_all(p); \
-} while (0)
-#endif
-
-#endif /* _ASM_TILE_SETUP_H */
diff --git a/arch/tile/include/asm/sigframe.h b/arch/tile/include/asm/sigframe.h
deleted file mode 100644
index 994d3d30205f..000000000000
--- a/arch/tile/include/asm/sigframe.h
+++ /dev/null
@@ -1,33 +0,0 @@
-/*
- * Copyright 2010 Tilera Corporation. All Rights Reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation, version 2.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
- * NON INFRINGEMENT. See the GNU General Public License for
- * more details.
- */
-
-#ifndef _ASM_TILE_SIGFRAME_H
-#define _ASM_TILE_SIGFRAME_H
-
-/* Indicate that syscall return should not examine r0 */
-#define INT_SWINT_1_SIGRETURN (~0)
-
-#ifndef __ASSEMBLY__
-
-#include <arch/abi.h>
-
-struct rt_sigframe {
- unsigned char save_area[C_ABI_SAVE_AREA_SIZE]; /* caller save area */
- struct siginfo info;
- struct ucontext uc;
-};
-
-#endif /* !__ASSEMBLY__ */
-
-#endif /* _ASM_TILE_SIGFRAME_H */
diff --git a/arch/tile/include/asm/signal.h b/arch/tile/include/asm/signal.h
deleted file mode 100644
index 10e183de96d3..000000000000
--- a/arch/tile/include/asm/signal.h
+++ /dev/null
@@ -1,29 +0,0 @@
-/*
- * Copyright 2010 Tilera Corporation. All Rights Reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation, version 2.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
- * NON INFRINGEMENT. See the GNU General Public License for
- * more details.
- */
-#ifndef _ASM_TILE_SIGNAL_H
-#define _ASM_TILE_SIGNAL_H
-
-#include <uapi/asm/signal.h>
-
-#if !defined(__ASSEMBLY__)
-struct pt_regs;
-int restore_sigcontext(struct pt_regs *, struct sigcontext __user *);
-int setup_sigcontext(struct sigcontext __user *, struct pt_regs *);
-void do_signal(struct pt_regs *regs);
-void signal_fault(const char *type, struct pt_regs *,
- void __user *frame, int sig);
-void trace_unhandled_signal(const char *type, struct pt_regs *regs,
- unsigned long address, int signo);
-#endif
-#endif /* _ASM_TILE_SIGNAL_H */
diff --git a/arch/tile/include/asm/smp.h b/arch/tile/include/asm/smp.h
deleted file mode 100644
index 735e7f144733..000000000000
--- a/arch/tile/include/asm/smp.h
+++ /dev/null
@@ -1,139 +0,0 @@
-/*
- * Copyright 2010 Tilera Corporation. All Rights Reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation, version 2.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
- * NON INFRINGEMENT. See the GNU General Public License for
- * more details.
- */
-
-#ifndef _ASM_TILE_SMP_H
-#define _ASM_TILE_SMP_H
-
-#ifdef CONFIG_SMP
-
-#include <asm/processor.h>
-#include <linux/cpumask.h>
-#include <linux/irqreturn.h>
-#include <hv/hypervisor.h>
-
-/* Set up this tile to support receiving hypervisor messages */
-void init_messaging(void);
-
-/* Set up this tile to support receiving device interrupts and IPIs. */
-void init_per_tile_IRQs(void);
-
-/* Send a message to processors specified in mask */
-void send_IPI_many(const struct cpumask *mask, int tag);
-
-/* Send a message to all but the sending processor */
-void send_IPI_allbutself(int tag);
-
-/* Send a message to a specific processor */
-void send_IPI_single(int dest, int tag);
-
-/* Process an IPI message */
-void evaluate_message(int tag);
-
-/* Boot a secondary cpu */
-void online_secondary(void);
-
-/* Topology of the supervisor tile grid, and coordinates of boot processor */
-extern HV_Topology smp_topology;
-
-/* Accessors for grid size */
-#define smp_height (smp_topology.height)
-#define smp_width (smp_topology.width)
-
-/* Convenience functions for converting cpu <-> coords. */
-static inline int cpu_x(int cpu)
-{
- return cpu % smp_width;
-}
-static inline int cpu_y(int cpu)
-{
- return cpu / smp_width;
-}
-static inline int xy_to_cpu(int x, int y)
-{
- return y * smp_width + x;
-}
-
-/* Hypervisor message tags sent via the tile send_IPI*() routines. */
-#define MSG_TAG_START_CPU 1
-#define MSG_TAG_STOP_CPU 2
-#define MSG_TAG_CALL_FUNCTION_MANY 3
-#define MSG_TAG_CALL_FUNCTION_SINGLE 4
-#define MSG_TAG_IRQ_WORK 5
-
-/* Hook for the generic smp_call_function_many() routine. */
-static inline void arch_send_call_function_ipi_mask(struct cpumask *mask)
-{
- send_IPI_many(mask, MSG_TAG_CALL_FUNCTION_MANY);
-}
-
-/* Hook for the generic smp_call_function_single() routine. */
-static inline void arch_send_call_function_single_ipi(int cpu)
-{
- send_IPI_single(cpu, MSG_TAG_CALL_FUNCTION_SINGLE);
-}
-
-/* Print out the boot string describing which cpus were disabled. */
-void print_disabled_cpus(void);
-
-#else /* !CONFIG_SMP */
-
-#define smp_master_cpu 0
-#define smp_height 1
-#define smp_width 1
-#define cpu_x(cpu) 0
-#define cpu_y(cpu) 0
-#define xy_to_cpu(x, y) 0
-
-#endif /* !CONFIG_SMP */
-
-
-/* Which cpus may be used as the lotar in a page table entry. */
-extern struct cpumask cpu_lotar_map;
-#define cpu_is_valid_lotar(cpu) cpumask_test_cpu((cpu), &cpu_lotar_map)
-
-/* Which processors are used for hash-for-home mapping */
-extern struct cpumask hash_for_home_map;
-
-/* Which cpus can have their cache flushed by hv_flush_remote(). */
-extern struct cpumask cpu_cacheable_map;
-#define cpu_cacheable(cpu) cpumask_test_cpu((cpu), &cpu_cacheable_map)
-
-/* Convert an HV_LOTAR value into a cpu. */
-static inline int hv_lotar_to_cpu(HV_LOTAR lotar)
-{
- return HV_LOTAR_X(lotar) + (HV_LOTAR_Y(lotar) * smp_width);
-}
-
-/*
- * Extension of <linux/cpumask.h> functionality when you just want
- * to express a mask or suppression or inclusion region without
- * being too concerned about exactly which cpus are valid in that region.
- */
-int bitmap_parselist_crop(const char *bp, unsigned long *maskp, int nmaskbits);
-
-#define cpulist_parse_crop(buf, dst) \
- __cpulist_parse_crop((buf), (dst), NR_CPUS)
-static inline int __cpulist_parse_crop(const char *buf, struct cpumask *dstp,
- int nbits)
-{
- return bitmap_parselist_crop(buf, cpumask_bits(dstp), nbits);
-}
-
-/* Initialize the IPI subsystem. */
-void ipi_init(void);
-
-/* Function for start-cpu message to cause us to jump to. */
-extern unsigned long start_cpu_function_addr;
-
-#endif /* _ASM_TILE_SMP_H */
diff --git a/arch/tile/include/asm/spinlock.h b/arch/tile/include/asm/spinlock.h
deleted file mode 100644
index 1a8bd4740c28..000000000000
--- a/arch/tile/include/asm/spinlock.h
+++ /dev/null
@@ -1,24 +0,0 @@
-/*
- * Copyright 2010 Tilera Corporation. All Rights Reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation, version 2.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
- * NON INFRINGEMENT. See the GNU General Public License for
- * more details.
- */
-
-#ifndef _ASM_TILE_SPINLOCK_H
-#define _ASM_TILE_SPINLOCK_H
-
-#ifdef __tilegx__
-#include <asm/spinlock_64.h>
-#else
-#include <asm/spinlock_32.h>
-#endif
-
-#endif /* _ASM_TILE_SPINLOCK_H */
diff --git a/arch/tile/include/asm/spinlock_32.h b/arch/tile/include/asm/spinlock_32.h
deleted file mode 100644
index fb5313d77315..000000000000
--- a/arch/tile/include/asm/spinlock_32.h
+++ /dev/null
@@ -1,109 +0,0 @@
-/*
- * Copyright 2010 Tilera Corporation. All Rights Reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation, version 2.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
- * NON INFRINGEMENT. See the GNU General Public License for
- * more details.
- *
- * 32-bit SMP spinlocks.
- */
-
-#ifndef _ASM_TILE_SPINLOCK_32_H
-#define _ASM_TILE_SPINLOCK_32_H
-
-#include <linux/atomic.h>
-#include <asm/page.h>
-#include <linux/compiler.h>
-
-/*
- * We only use even ticket numbers so the '1' inserted by a tns is
- * an unambiguous "ticket is busy" flag.
- */
-#define TICKET_QUANTUM 2
-
-
-/*
- * SMP ticket spinlocks, allowing only a single CPU anywhere
- *
- * (the type definitions are in asm/spinlock_types.h)
- */
-static inline int arch_spin_is_locked(arch_spinlock_t *lock)
-{
- /*
- * Note that even if a new ticket is in the process of being
- * acquired, so lock->next_ticket is 1, it's still reasonable
- * to claim the lock is held, since it will be momentarily
- * if not already. There's no need to wait for a "valid"
- * lock->next_ticket to become available.
- * Use READ_ONCE() to ensure that calling this in a loop is OK.
- */
- int curr = READ_ONCE(lock->current_ticket);
- int next = READ_ONCE(lock->next_ticket);
-
- return next != curr;
-}
-
-void arch_spin_lock(arch_spinlock_t *lock);
-
-int arch_spin_trylock(arch_spinlock_t *lock);
-
-static inline void arch_spin_unlock(arch_spinlock_t *lock)
-{
- /* For efficiency, overlap fetching the old ticket with the wmb(). */
- int old_ticket = lock->current_ticket;
- wmb(); /* guarantee anything modified under the lock is visible */
- lock->current_ticket = old_ticket + TICKET_QUANTUM;
-}
-
-/*
- * Read-write spinlocks, allowing multiple readers
- * but only one writer.
- *
- * We use a "tns/store-back" technique on a single word to manage
- * the lock state, looping around to retry if the tns returns 1.
- */
-
-/* Internal layout of the word; do not use. */
-#define _WR_NEXT_SHIFT 8
-#define _WR_CURR_SHIFT 16
-#define _WR_WIDTH 8
-#define _RD_COUNT_SHIFT 24
-#define _RD_COUNT_WIDTH 8
-
-/**
- * arch_read_lock() - acquire a read lock.
- */
-void arch_read_lock(arch_rwlock_t *rwlock);
-
-/**
- * arch_write_lock() - acquire a write lock.
- */
-void arch_write_lock(arch_rwlock_t *rwlock);
-
-/**
- * arch_read_trylock() - try to acquire a read lock.
- */
-int arch_read_trylock(arch_rwlock_t *rwlock);
-
-/**
- * arch_write_trylock() - try to acquire a write lock.
- */
-int arch_write_trylock(arch_rwlock_t *rwlock);
-
-/**
- * arch_read_unlock() - release a read lock.
- */
-void arch_read_unlock(arch_rwlock_t *rwlock);
-
-/**
- * arch_write_unlock() - release a write lock.
- */
-void arch_write_unlock(arch_rwlock_t *rwlock);
-
-#endif /* _ASM_TILE_SPINLOCK_32_H */
diff --git a/arch/tile/include/asm/spinlock_64.h b/arch/tile/include/asm/spinlock_64.h
deleted file mode 100644
index 5b616ef642a8..000000000000
--- a/arch/tile/include/asm/spinlock_64.h
+++ /dev/null
@@ -1,138 +0,0 @@
-/*
- * Copyright 2011 Tilera Corporation. All Rights Reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation, version 2.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
- * NON INFRINGEMENT. See the GNU General Public License for
- * more details.
- *
- * 64-bit SMP ticket spinlocks, allowing only a single CPU anywhere
- * (the type definitions are in asm/spinlock_types.h)
- */
-
-#ifndef _ASM_TILE_SPINLOCK_64_H
-#define _ASM_TILE_SPINLOCK_64_H
-
-#include <linux/compiler.h>
-
-/* Shifts and masks for the various fields in "lock". */
-#define __ARCH_SPIN_CURRENT_SHIFT 17
-#define __ARCH_SPIN_NEXT_MASK 0x7fff
-#define __ARCH_SPIN_NEXT_OVERFLOW 0x8000
-
-/*
- * Return the "current" portion of a ticket lock value,
- * i.e. the number that currently owns the lock.
- */
-static inline u32 arch_spin_current(u32 val)
-{
- return val >> __ARCH_SPIN_CURRENT_SHIFT;
-}
-
-/*
- * Return the "next" portion of a ticket lock value,
- * i.e. the number that the next task to try to acquire the lock will get.
- */
-static inline u32 arch_spin_next(u32 val)
-{
- return val & __ARCH_SPIN_NEXT_MASK;
-}
-
-/* The lock is locked if a task would have to wait to get it. */
-static inline int arch_spin_is_locked(arch_spinlock_t *lock)
-{
- /* Use READ_ONCE() to ensure that calling this in a loop is OK. */
- u32 val = READ_ONCE(lock->lock);
- return arch_spin_current(val) != arch_spin_next(val);
-}
-
-/* Bump the current ticket so the next task owns the lock. */
-static inline void arch_spin_unlock(arch_spinlock_t *lock)
-{
- wmb(); /* guarantee anything modified under the lock is visible */
- __insn_fetchadd4(&lock->lock, 1U << __ARCH_SPIN_CURRENT_SHIFT);
-}
-
-void arch_spin_lock_slow(arch_spinlock_t *lock, u32 val);
-
-/* Grab the "next" ticket number and bump it atomically.
- * If the current ticket is not ours, go to the slow path.
- * We also take the slow path if the "next" value overflows.
- */
-static inline void arch_spin_lock(arch_spinlock_t *lock)
-{
- u32 val = __insn_fetchadd4(&lock->lock, 1);
- u32 ticket = val & (__ARCH_SPIN_NEXT_MASK | __ARCH_SPIN_NEXT_OVERFLOW);
- if (unlikely(arch_spin_current(val) != ticket))
- arch_spin_lock_slow(lock, ticket);
-}
-
-/* Try to get the lock, and return whether we succeeded. */
-int arch_spin_trylock(arch_spinlock_t *lock);
-
-/*
- * Read-write spinlocks, allowing multiple readers
- * but only one writer.
- *
- * We use fetchadd() for readers, and fetchor() with the sign bit
- * for writers.
- */
-
-#define __WRITE_LOCK_BIT (1 << 31)
-
-static inline int arch_write_val_locked(int val)
-{
- return val < 0; /* Optimize "val & __WRITE_LOCK_BIT". */
-}
-
-extern void __read_lock_failed(arch_rwlock_t *rw);
-
-static inline void arch_read_lock(arch_rwlock_t *rw)
-{
- u32 val = __insn_fetchaddgez4(&rw->lock, 1);
- if (unlikely(arch_write_val_locked(val)))
- __read_lock_failed(rw);
-}
-
-extern void __write_lock_failed(arch_rwlock_t *rw, u32 val);
-
-static inline void arch_write_lock(arch_rwlock_t *rw)
-{
- u32 val = __insn_fetchor4(&rw->lock, __WRITE_LOCK_BIT);
- if (unlikely(val != 0))
- __write_lock_failed(rw, val);
-}
-
-static inline void arch_read_unlock(arch_rwlock_t *rw)
-{
- __insn_mf();
- __insn_fetchadd4(&rw->lock, -1);
-}
-
-static inline void arch_write_unlock(arch_rwlock_t *rw)
-{
- __insn_mf();
- __insn_exch4(&rw->lock, 0); /* Avoid waiting in the write buffer. */
-}
-
-static inline int arch_read_trylock(arch_rwlock_t *rw)
-{
- return !arch_write_val_locked(__insn_fetchaddgez4(&rw->lock, 1));
-}
-
-static inline int arch_write_trylock(arch_rwlock_t *rw)
-{
- u32 val = __insn_fetchor4(&rw->lock, __WRITE_LOCK_BIT);
- if (likely(val == 0))
- return 1;
- if (!arch_write_val_locked(val))
- __insn_fetchand4(&rw->lock, ~__WRITE_LOCK_BIT);
- return 0;
-}
-
-#endif /* _ASM_TILE_SPINLOCK_64_H */
diff --git a/arch/tile/include/asm/spinlock_types.h b/arch/tile/include/asm/spinlock_types.h
deleted file mode 100644
index a71f59b49c50..000000000000
--- a/arch/tile/include/asm/spinlock_types.h
+++ /dev/null
@@ -1,60 +0,0 @@
-/*
- * Copyright 2010 Tilera Corporation. All Rights Reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation, version 2.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
- * NON INFRINGEMENT. See the GNU General Public License for
- * more details.
- */
-
-#ifndef _ASM_TILE_SPINLOCK_TYPES_H
-#define _ASM_TILE_SPINLOCK_TYPES_H
-
-#ifndef __LINUX_SPINLOCK_TYPES_H
-# error "please don't include this file directly"
-#endif
-
-#ifdef __tilegx__
-
-/* Low 15 bits are "next"; high 15 bits are "current". */
-typedef struct arch_spinlock {
- unsigned int lock;
-} arch_spinlock_t;
-
-#define __ARCH_SPIN_LOCK_UNLOCKED { 0 }
-
-/* High bit is "writer owns"; low 31 bits are a count of readers. */
-typedef struct arch_rwlock {
- unsigned int lock;
-} arch_rwlock_t;
-
-#define __ARCH_RW_LOCK_UNLOCKED { 0 }
-
-#else
-
-typedef struct arch_spinlock {
- /* Next ticket number to hand out. */
- int next_ticket;
- /* The ticket number that currently owns this lock. */
- int current_ticket;
-} arch_spinlock_t;
-
-#define __ARCH_SPIN_LOCK_UNLOCKED { 0, 0 }
-
-/*
- * Byte 0 for tns (only the low bit is used), byte 1 for ticket-lock "next",
- * byte 2 for ticket-lock "current", byte 3 for reader count.
- */
-typedef struct arch_rwlock {
- unsigned int lock;
-} arch_rwlock_t;
-
-#define __ARCH_RW_LOCK_UNLOCKED { 0 }
-
-#endif
-#endif /* _ASM_TILE_SPINLOCK_TYPES_H */
diff --git a/arch/tile/include/asm/stack.h b/arch/tile/include/asm/stack.h
deleted file mode 100644
index 3573325e340b..000000000000
--- a/arch/tile/include/asm/stack.h
+++ /dev/null
@@ -1,73 +0,0 @@
-/*
- * Copyright 2010 Tilera Corporation. All Rights Reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation, version 2.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
- * NON INFRINGEMENT. See the GNU General Public License for
- * more details.
- */
-
-#ifndef _ASM_TILE_STACK_H
-#define _ASM_TILE_STACK_H
-
-#include <linux/types.h>
-#include <linux/sched.h>
-#include <linux/sched/debug.h>
-
-#include <asm/backtrace.h>
-#include <asm/page.h>
-#include <hv/hypervisor.h>
-
-/* Everything we need to keep track of a backtrace iteration */
-struct KBacktraceIterator {
- BacktraceIterator it;
- struct task_struct *task; /* task we are backtracing */
- int end; /* iteration complete. */
- int new_context; /* new context is starting */
- int profile; /* profiling, so stop on async intrpt */
- int verbose; /* printk extra info (don't want to
- * do this for profiling) */
- int is_current; /* backtracing current task */
-};
-
-/* Iteration methods for kernel backtraces */
-
-/*
- * Initialize a KBacktraceIterator from a task_struct, and optionally from
- * a set of registers. If the registers are omitted, the process is
- * assumed to be descheduled, and registers are read from the process's
- * thread_struct and stack. "verbose" means to printk some additional
- * information about fault handlers as we pass them on the stack.
- */
-extern void KBacktraceIterator_init(struct KBacktraceIterator *kbt,
- struct task_struct *, struct pt_regs *);
-
-/* Initialize iterator based on current stack. */
-extern void KBacktraceIterator_init_current(struct KBacktraceIterator *kbt);
-
-/* Helper method for above. */
-extern void _KBacktraceIterator_init_current(struct KBacktraceIterator *kbt,
- ulong pc, ulong lr, ulong sp, ulong r52);
-
-/* No more frames? */
-extern int KBacktraceIterator_end(struct KBacktraceIterator *kbt);
-
-/* Advance to the next frame. */
-extern void KBacktraceIterator_next(struct KBacktraceIterator *kbt);
-
-/* Dump just the contents of the pt_regs structure. */
-extern void tile_show_regs(struct pt_regs *);
-
-/*
- * Dump stack given complete register info. Use only from the
- * architecture-specific code; show_stack()
- * and dump_stack() are architecture-independent entry points.
- */
-extern void tile_show_stack(struct KBacktraceIterator *);
-
-#endif /* _ASM_TILE_STACK_H */
diff --git a/arch/tile/include/asm/string.h b/arch/tile/include/asm/string.h
deleted file mode 100644
index 92b271bd9ebd..000000000000
--- a/arch/tile/include/asm/string.h
+++ /dev/null
@@ -1,34 +0,0 @@
-/*
- * Copyright 2010 Tilera Corporation. All Rights Reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation, version 2.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
- * NON INFRINGEMENT. See the GNU General Public License for
- * more details.
- */
-
-#ifndef _ASM_TILE_STRING_H
-#define _ASM_TILE_STRING_H
-
-#define __HAVE_ARCH_MEMCHR
-#define __HAVE_ARCH_MEMSET
-#define __HAVE_ARCH_MEMCPY
-#define __HAVE_ARCH_MEMMOVE
-#define __HAVE_ARCH_STRCHR
-#define __HAVE_ARCH_STRLEN
-#define __HAVE_ARCH_STRNLEN
-
-extern __kernel_size_t strlen(const char *);
-extern __kernel_size_t strnlen(const char *, __kernel_size_t);
-extern char *strchr(const char *s, int c);
-extern void *memchr(const void *s, int c, size_t n);
-extern void *memset(void *, int, __kernel_size_t);
-extern void *memcpy(void *, const void *, __kernel_size_t);
-extern void *memmove(void *, const void *, __kernel_size_t);
-
-#endif /* _ASM_TILE_STRING_H */
diff --git a/arch/tile/include/asm/switch_to.h b/arch/tile/include/asm/switch_to.h
deleted file mode 100644
index 34ee72705521..000000000000
--- a/arch/tile/include/asm/switch_to.h
+++ /dev/null
@@ -1,77 +0,0 @@
-/*
- * Copyright 2010 Tilera Corporation. All Rights Reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation, version 2.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
- * NON INFRINGEMENT. See the GNU General Public License for
- * more details.
- */
-
-#ifndef _ASM_TILE_SWITCH_TO_H
-#define _ASM_TILE_SWITCH_TO_H
-
-#include <arch/sim_def.h>
-
-/*
- * switch_to(n) should switch tasks to task nr n, first
- * checking that n isn't the current task, in which case it does nothing.
- * The number of callee-saved registers saved on the kernel stack
- * is defined here for use in copy_thread() and must agree with __switch_to().
- */
-#define CALLEE_SAVED_FIRST_REG 30
-#define CALLEE_SAVED_REGS_COUNT 24 /* r30 to r52, plus an empty to align */
-
-#ifndef __ASSEMBLY__
-
-struct task_struct;
-
-/*
- * Pause the DMA engine and static network before task switching.
- */
-#define prepare_arch_switch(next) _prepare_arch_switch(next)
-void _prepare_arch_switch(struct task_struct *next);
-
-struct task_struct;
-#define switch_to(prev, next, last) ((last) = _switch_to((prev), (next)))
-extern struct task_struct *_switch_to(struct task_struct *prev,
- struct task_struct *next);
-
-/* Helper function for _switch_to(). */
-extern struct task_struct *__switch_to(struct task_struct *prev,
- struct task_struct *next,
- unsigned long new_system_save_k_0);
-
-/* Address that switched-away from tasks are at. */
-extern unsigned long get_switch_to_pc(void);
-
-/*
- * Kernel threads can check to see if they need to migrate their
- * stack whenever they return from a context switch; for user
- * threads, we defer until they are returning to user-space.
- * We defer homecache migration until the runqueue lock is released.
- */
-#define finish_arch_post_lock_switch() do { \
- __insn_mtspr(SPR_SIM_CONTROL, SIM_CONTROL_OS_SWITCH | \
- (current->pid << _SIM_CONTROL_OPERATOR_BITS)); \
- if (current->mm == NULL && !kstack_hash && \
- current_thread_info()->homecache_cpu != raw_smp_processor_id()) \
- homecache_migrate_kthread(); \
-} while (0)
-
-/* Support function for forking a new task. */
-void ret_from_fork(void);
-
-/* Support function for forking a new kernel thread. */
-void ret_from_kernel_thread(void *fn, void *arg);
-
-/* Called from ret_from_xxx() when a new process starts up. */
-struct task_struct *sim_notify_fork(struct task_struct *prev);
-
-#endif /* !__ASSEMBLY__ */
-
-#endif /* _ASM_TILE_SWITCH_TO_H */
diff --git a/arch/tile/include/asm/syscall.h b/arch/tile/include/asm/syscall.h
deleted file mode 100644
index 373d73064ea1..000000000000
--- a/arch/tile/include/asm/syscall.h
+++ /dev/null
@@ -1,111 +0,0 @@
-/*
- * Copyright (C) 2008-2009 Red Hat, Inc. All rights reserved.
- * Copyright 2010 Tilera Corporation. All Rights Reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation, version 2.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
- * NON INFRINGEMENT. See the GNU General Public License for
- * more details.
- *
- * See asm-generic/syscall.h for descriptions of what we must do here.
- */
-
-#ifndef _ASM_TILE_SYSCALL_H
-#define _ASM_TILE_SYSCALL_H
-
-#include <linux/sched.h>
-#include <linux/err.h>
-#include <linux/audit.h>
-#include <linux/compat.h>
-#include <arch/abi.h>
-
-/* The array of function pointers for syscalls. */
-extern void *sys_call_table[];
-#ifdef CONFIG_COMPAT
-extern void *compat_sys_call_table[];
-#endif
-
-/*
- * Only the low 32 bits of orig_r0 are meaningful, so we return int.
- * This importantly ignores the high bits on 64-bit, so comparisons
- * sign-extend the low 32 bits.
- */
-static inline int syscall_get_nr(struct task_struct *t, struct pt_regs *regs)
-{
- return regs->regs[TREG_SYSCALL_NR];
-}
-
-static inline void syscall_rollback(struct task_struct *task,
- struct pt_regs *regs)
-{
- regs->regs[0] = regs->orig_r0;
-}
-
-static inline long syscall_get_error(struct task_struct *task,
- struct pt_regs *regs)
-{
- unsigned long error = regs->regs[0];
- return IS_ERR_VALUE(error) ? error : 0;
-}
-
-static inline long syscall_get_return_value(struct task_struct *task,
- struct pt_regs *regs)
-{
- return regs->regs[0];
-}
-
-static inline void syscall_set_return_value(struct task_struct *task,
- struct pt_regs *regs,
- int error, long val)
-{
- if (error) {
- /* R0 is the passed-in negative error, R1 is positive. */
- regs->regs[0] = error;
- regs->regs[1] = -error;
- } else {
- /* R1 set to zero to indicate no error. */
- regs->regs[0] = val;
- regs->regs[1] = 0;
- }
-}
-
-static inline void syscall_get_arguments(struct task_struct *task,
- struct pt_regs *regs,
- unsigned int i, unsigned int n,
- unsigned long *args)
-{
- BUG_ON(i + n > 6);
- memcpy(args, &regs[i], n * sizeof(args[0]));
-}
-
-static inline void syscall_set_arguments(struct task_struct *task,
- struct pt_regs *regs,
- unsigned int i, unsigned int n,
- const unsigned long *args)
-{
- BUG_ON(i + n > 6);
- memcpy(&regs[i], args, n * sizeof(args[0]));
-}
-
-/*
- * We don't care about endianness (__AUDIT_ARCH_LE bit) here because
- * tile has the same system calls both on little- and big- endian.
- */
-static inline int syscall_get_arch(void)
-{
- if (is_compat_task())
- return AUDIT_ARCH_TILEGX32;
-
-#ifdef CONFIG_TILEGX
- return AUDIT_ARCH_TILEGX;
-#else
- return AUDIT_ARCH_TILEPRO;
-#endif
-}
-
-#endif /* _ASM_TILE_SYSCALL_H */
diff --git a/arch/tile/include/asm/syscalls.h b/arch/tile/include/asm/syscalls.h
deleted file mode 100644
index 07b298450ef2..000000000000
--- a/arch/tile/include/asm/syscalls.h
+++ /dev/null
@@ -1,70 +0,0 @@
-/*
- * syscalls.h - Linux syscall interfaces (arch-specific)
- *
- * Copyright (c) 2008 Jaswinder Singh Rajput
- * Copyright 2010 Tilera Corporation. All Rights Reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation, version 2.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
- * NON INFRINGEMENT. See the GNU General Public License for
- * more details.
- */
-
-#ifndef _ASM_TILE_SYSCALLS_H
-#define _ASM_TILE_SYSCALLS_H
-
-#include <linux/compiler.h>
-#include <linux/linkage.h>
-#include <linux/signal.h>
-#include <linux/types.h>
-#include <linux/compat.h>
-
-/*
- * Note that by convention, any syscall which requires the current
- * register set takes an additional "struct pt_regs *" pointer; a
- * _sys_xxx() trampoline in intvec*.S just sets up the pointer and
- * jumps to sys_xxx().
- */
-
-/* kernel/sys.c */
-ssize_t sys32_readahead(int fd, u32 offset_lo, u32 offset_hi, u32 count);
-long sys32_fadvise64(int fd, u32 offset_lo, u32 offset_hi,
- u32 len, int advice);
-int sys32_fadvise64_64(int fd, u32 offset_lo, u32 offset_hi,
- u32 len_lo, u32 len_hi, int advice);
-long sys_cacheflush(unsigned long addr, unsigned long len,
- unsigned long flags);
-#ifndef __tilegx__ /* No mmap() in the 32-bit kernel. */
-#define sys_mmap sys_mmap
-#endif
-
-#ifndef __tilegx__
-/* mm/fault.c */
-long sys_cmpxchg_badaddr(unsigned long address);
-#endif
-
-#ifdef CONFIG_COMPAT
-/* These four are not defined for 64-bit, but serve as "compat" syscalls. */
-long sys_fcntl64(unsigned int fd, unsigned int cmd, unsigned long arg);
-long sys_fstat64(unsigned long fd, struct stat64 __user *statbuf);
-long sys_truncate64(const char __user *path, loff_t length);
-long sys_ftruncate64(unsigned int fd, loff_t length);
-#endif
-
-/* Provide versions of standard syscalls that use current_pt_regs(). */
-long sys_rt_sigreturn(void);
-#define sys_rt_sigreturn sys_rt_sigreturn
-
-/* These are the intvec*.S trampolines. */
-long _sys_rt_sigreturn(void);
-long _sys_clone(unsigned long clone_flags, unsigned long newsp,
- void __user *parent_tid, void __user *child_tid);
-
-#include <asm-generic/syscalls.h>
-
-#endif /* _ASM_TILE_SYSCALLS_H */
diff --git a/arch/tile/include/asm/thread_info.h b/arch/tile/include/asm/thread_info.h
deleted file mode 100644
index 2adcacd85749..000000000000
--- a/arch/tile/include/asm/thread_info.h
+++ /dev/null
@@ -1,167 +0,0 @@
-/*
- * Copyright (C) 2002 David Howells (dhowells@redhat.com)
- * Copyright 2010 Tilera Corporation. All Rights Reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation, version 2.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
- * NON INFRINGEMENT. See the GNU General Public License for
- * more details.
- */
-
-#ifndef _ASM_TILE_THREAD_INFO_H
-#define _ASM_TILE_THREAD_INFO_H
-
-#include <asm/processor.h>
-#include <asm/page.h>
-#ifndef __ASSEMBLY__
-
-/*
- * Low level task data that assembly code needs immediate access to.
- * The structure is placed at the bottom of the supervisor stack.
- */
-struct thread_info {
- struct task_struct *task; /* main task structure */
- unsigned long flags; /* low level flags */
- unsigned long status; /* thread-synchronous flags */
- __u32 homecache_cpu; /* CPU we are homecached on */
- __u32 cpu; /* current CPU */
- int preempt_count; /* 0 => preemptable,
- <0 => BUG */
-
- mm_segment_t addr_limit; /* thread address space
- (KERNEL_DS or USER_DS) */
- struct single_step_state *step_state; /* single step state
- (if non-zero) */
- int align_ctl; /* controls unaligned access */
-#ifdef __tilegx__
- unsigned long unalign_jit_tmp[4]; /* temp r0..r3 storage */
- void __user *unalign_jit_base; /* unalign fixup JIT base */
-#endif
- bool in_backtrace; /* currently doing backtrace? */
-};
-
-/*
- * macros/functions for gaining access to the thread information structure.
- */
-#define INIT_THREAD_INFO(tsk) \
-{ \
- .task = &tsk, \
- .flags = 0, \
- .cpu = 0, \
- .preempt_count = INIT_PREEMPT_COUNT, \
- .addr_limit = KERNEL_DS, \
- .step_state = NULL, \
- .align_ctl = 0, \
-}
-
-#endif /* !__ASSEMBLY__ */
-
-#if PAGE_SIZE < 8192
-#define THREAD_SIZE_ORDER (13 - PAGE_SHIFT)
-#else
-#define THREAD_SIZE_ORDER (0)
-#endif
-#define THREAD_SIZE_PAGES (1 << THREAD_SIZE_ORDER)
-
-#define THREAD_SIZE (PAGE_SIZE << THREAD_SIZE_ORDER)
-#define LOG2_THREAD_SIZE (PAGE_SHIFT + THREAD_SIZE_ORDER)
-
-#define STACK_WARN (THREAD_SIZE/8)
-
-#ifndef __ASSEMBLY__
-
-void arch_release_thread_stack(unsigned long *stack);
-
-/* How to get the thread information struct from C. */
-register unsigned long stack_pointer __asm__("sp");
-
-#define current_thread_info() \
- ((struct thread_info *)(stack_pointer & -THREAD_SIZE))
-
-/* Sit on a nap instruction until interrupted. */
-extern void smp_nap(void);
-
-/* Enable interrupts racelessly and nap forever: helper for arch_cpu_idle(). */
-extern void _cpu_idle(void);
-
-#else /* __ASSEMBLY__ */
-
-/*
- * How to get the thread information struct from assembly.
- * Note that we use different macros since different architectures
- * have different semantics in their "mm" instruction and we would
- * like to guarantee that the macro expands to exactly one instruction.
- */
-#ifdef __tilegx__
-#define EXTRACT_THREAD_INFO(reg) mm reg, zero, LOG2_THREAD_SIZE, 63
-#else
-#define GET_THREAD_INFO(reg) mm reg, sp, zero, LOG2_THREAD_SIZE, 31
-#endif
-
-#endif /* !__ASSEMBLY__ */
-
-/*
- * Thread information flags that various assembly files may need to access.
- * Keep flags accessed frequently in low bits, particular since it makes
- * it easier to build constants in assembly.
- */
-#define TIF_SIGPENDING 0 /* signal pending */
-#define TIF_NEED_RESCHED 1 /* rescheduling necessary */
-#define TIF_SINGLESTEP 2 /* restore singlestep on return to
- user mode */
-#define TIF_ASYNC_TLB 3 /* got an async TLB fault in kernel */
-#define TIF_SYSCALL_TRACE 4 /* syscall trace active */
-#define TIF_SYSCALL_AUDIT 5 /* syscall auditing active */
-#define TIF_SECCOMP 6 /* secure computing */
-#define TIF_MEMDIE 7 /* OOM killer at work */
-#define TIF_NOTIFY_RESUME 8 /* callback before returning to user */
-#define TIF_SYSCALL_TRACEPOINT 9 /* syscall tracepoint instrumentation */
-#define TIF_POLLING_NRFLAG 10 /* idle is polling for TIF_NEED_RESCHED */
-#define TIF_NOHZ 11 /* in adaptive nohz mode */
-
-#define _TIF_SIGPENDING (1<<TIF_SIGPENDING)
-#define _TIF_NEED_RESCHED (1<<TIF_NEED_RESCHED)
-#define _TIF_SINGLESTEP (1<<TIF_SINGLESTEP)
-#define _TIF_ASYNC_TLB (1<<TIF_ASYNC_TLB)
-#define _TIF_SYSCALL_TRACE (1<<TIF_SYSCALL_TRACE)
-#define _TIF_SYSCALL_AUDIT (1<<TIF_SYSCALL_AUDIT)
-#define _TIF_SECCOMP (1<<TIF_SECCOMP)
-#define _TIF_MEMDIE (1<<TIF_MEMDIE)
-#define _TIF_NOTIFY_RESUME (1<<TIF_NOTIFY_RESUME)
-#define _TIF_SYSCALL_TRACEPOINT (1<<TIF_SYSCALL_TRACEPOINT)
-#define _TIF_POLLING_NRFLAG (1<<TIF_POLLING_NRFLAG)
-#define _TIF_NOHZ (1<<TIF_NOHZ)
-
-/* Work to do as we loop to exit to user space. */
-#define _TIF_WORK_MASK \
- (_TIF_SIGPENDING | _TIF_NEED_RESCHED | \
- _TIF_ASYNC_TLB | _TIF_NOTIFY_RESUME)
-
-/* Work to do on any return to user space. */
-#define _TIF_ALLWORK_MASK \
- (_TIF_WORK_MASK | _TIF_SINGLESTEP | _TIF_NOHZ)
-
-/* Work to do at syscall entry. */
-#define _TIF_SYSCALL_ENTRY_WORK \
- (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_TRACEPOINT | _TIF_NOHZ)
-
-/* Work to do at syscall exit. */
-#define _TIF_SYSCALL_EXIT_WORK (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_TRACEPOINT)
-
-/*
- * Thread-synchronous status.
- *
- * This is different from the flags in that nobody else
- * ever touches our thread-synchronous status, so we don't
- * have to worry about atomic accesses.
- */
-#ifdef __tilegx__
-#define TS_COMPAT 0x0001 /* 32-bit compatibility mode */
-#endif
-
-#endif /* _ASM_TILE_THREAD_INFO_H */
diff --git a/arch/tile/include/asm/tile-desc.h b/arch/tile/include/asm/tile-desc.h
deleted file mode 100644
index 43849bf79dcb..000000000000
--- a/arch/tile/include/asm/tile-desc.h
+++ /dev/null
@@ -1,19 +0,0 @@
-/*
- * Copyright 2011 Tilera Corporation. All Rights Reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation, version 2.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
- * NON INFRINGEMENT. See the GNU General Public License for
- * more details.
- */
-
-#ifndef __tilegx__
-#include <asm/tile-desc_32.h>
-#else
-#include <asm/tile-desc_64.h>
-#endif
diff --git a/arch/tile/include/asm/tile-desc_32.h b/arch/tile/include/asm/tile-desc_32.h
deleted file mode 100644
index f09c5c43b0b2..000000000000
--- a/arch/tile/include/asm/tile-desc_32.h
+++ /dev/null
@@ -1,553 +0,0 @@
-/* TILEPro opcode information.
- *
- * Copyright 2011 Tilera Corporation. All Rights Reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation, version 2.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
- * NON INFRINGEMENT. See the GNU General Public License for
- * more details.
- *
- *
- *
- *
- *
- */
-
-#ifndef opcode_tilepro_h
-#define opcode_tilepro_h
-
-#include <arch/opcode.h>
-
-
-enum
-{
- TILEPRO_MAX_OPERANDS = 5 /* mm */
-};
-
-typedef enum
-{
- TILEPRO_OPC_BPT,
- TILEPRO_OPC_INFO,
- TILEPRO_OPC_INFOL,
- TILEPRO_OPC_J,
- TILEPRO_OPC_JAL,
- TILEPRO_OPC_MOVE,
- TILEPRO_OPC_MOVE_SN,
- TILEPRO_OPC_MOVEI,
- TILEPRO_OPC_MOVEI_SN,
- TILEPRO_OPC_MOVELI,
- TILEPRO_OPC_MOVELI_SN,
- TILEPRO_OPC_MOVELIS,
- TILEPRO_OPC_PREFETCH,
- TILEPRO_OPC_RAISE,
- TILEPRO_OPC_ADD,
- TILEPRO_OPC_ADD_SN,
- TILEPRO_OPC_ADDB,
- TILEPRO_OPC_ADDB_SN,
- TILEPRO_OPC_ADDBS_U,
- TILEPRO_OPC_ADDBS_U_SN,
- TILEPRO_OPC_ADDH,
- TILEPRO_OPC_ADDH_SN,
- TILEPRO_OPC_ADDHS,
- TILEPRO_OPC_ADDHS_SN,
- TILEPRO_OPC_ADDI,
- TILEPRO_OPC_ADDI_SN,
- TILEPRO_OPC_ADDIB,
- TILEPRO_OPC_ADDIB_SN,
- TILEPRO_OPC_ADDIH,
- TILEPRO_OPC_ADDIH_SN,
- TILEPRO_OPC_ADDLI,
- TILEPRO_OPC_ADDLI_SN,
- TILEPRO_OPC_ADDLIS,
- TILEPRO_OPC_ADDS,
- TILEPRO_OPC_ADDS_SN,
- TILEPRO_OPC_ADIFFB_U,
- TILEPRO_OPC_ADIFFB_U_SN,
- TILEPRO_OPC_ADIFFH,
- TILEPRO_OPC_ADIFFH_SN,
- TILEPRO_OPC_AND,
- TILEPRO_OPC_AND_SN,
- TILEPRO_OPC_ANDI,
- TILEPRO_OPC_ANDI_SN,
- TILEPRO_OPC_AULI,
- TILEPRO_OPC_AVGB_U,
- TILEPRO_OPC_AVGB_U_SN,
- TILEPRO_OPC_AVGH,
- TILEPRO_OPC_AVGH_SN,
- TILEPRO_OPC_BBNS,
- TILEPRO_OPC_BBNS_SN,
- TILEPRO_OPC_BBNST,
- TILEPRO_OPC_BBNST_SN,
- TILEPRO_OPC_BBS,
- TILEPRO_OPC_BBS_SN,
- TILEPRO_OPC_BBST,
- TILEPRO_OPC_BBST_SN,
- TILEPRO_OPC_BGEZ,
- TILEPRO_OPC_BGEZ_SN,
- TILEPRO_OPC_BGEZT,
- TILEPRO_OPC_BGEZT_SN,
- TILEPRO_OPC_BGZ,
- TILEPRO_OPC_BGZ_SN,
- TILEPRO_OPC_BGZT,
- TILEPRO_OPC_BGZT_SN,
- TILEPRO_OPC_BITX,
- TILEPRO_OPC_BITX_SN,
- TILEPRO_OPC_BLEZ,
- TILEPRO_OPC_BLEZ_SN,
- TILEPRO_OPC_BLEZT,
- TILEPRO_OPC_BLEZT_SN,
- TILEPRO_OPC_BLZ,
- TILEPRO_OPC_BLZ_SN,
- TILEPRO_OPC_BLZT,
- TILEPRO_OPC_BLZT_SN,
- TILEPRO_OPC_BNZ,
- TILEPRO_OPC_BNZ_SN,
- TILEPRO_OPC_BNZT,
- TILEPRO_OPC_BNZT_SN,
- TILEPRO_OPC_BYTEX,
- TILEPRO_OPC_BYTEX_SN,
- TILEPRO_OPC_BZ,
- TILEPRO_OPC_BZ_SN,
- TILEPRO_OPC_BZT,
- TILEPRO_OPC_BZT_SN,
- TILEPRO_OPC_CLZ,
- TILEPRO_OPC_CLZ_SN,
- TILEPRO_OPC_CRC32_32,
- TILEPRO_OPC_CRC32_32_SN,
- TILEPRO_OPC_CRC32_8,
- TILEPRO_OPC_CRC32_8_SN,
- TILEPRO_OPC_CTZ,
- TILEPRO_OPC_CTZ_SN,
- TILEPRO_OPC_DRAIN,
- TILEPRO_OPC_DTLBPR,
- TILEPRO_OPC_DWORD_ALIGN,
- TILEPRO_OPC_DWORD_ALIGN_SN,
- TILEPRO_OPC_FINV,
- TILEPRO_OPC_FLUSH,
- TILEPRO_OPC_FNOP,
- TILEPRO_OPC_ICOH,
- TILEPRO_OPC_ILL,
- TILEPRO_OPC_INTHB,
- TILEPRO_OPC_INTHB_SN,
- TILEPRO_OPC_INTHH,
- TILEPRO_OPC_INTHH_SN,
- TILEPRO_OPC_INTLB,
- TILEPRO_OPC_INTLB_SN,
- TILEPRO_OPC_INTLH,
- TILEPRO_OPC_INTLH_SN,
- TILEPRO_OPC_INV,
- TILEPRO_OPC_IRET,
- TILEPRO_OPC_JALB,
- TILEPRO_OPC_JALF,
- TILEPRO_OPC_JALR,
- TILEPRO_OPC_JALRP,
- TILEPRO_OPC_JB,
- TILEPRO_OPC_JF,
- TILEPRO_OPC_JR,
- TILEPRO_OPC_JRP,
- TILEPRO_OPC_LB,
- TILEPRO_OPC_LB_SN,
- TILEPRO_OPC_LB_U,
- TILEPRO_OPC_LB_U_SN,
- TILEPRO_OPC_LBADD,
- TILEPRO_OPC_LBADD_SN,
- TILEPRO_OPC_LBADD_U,
- TILEPRO_OPC_LBADD_U_SN,
- TILEPRO_OPC_LH,
- TILEPRO_OPC_LH_SN,
- TILEPRO_OPC_LH_U,
- TILEPRO_OPC_LH_U_SN,
- TILEPRO_OPC_LHADD,
- TILEPRO_OPC_LHADD_SN,
- TILEPRO_OPC_LHADD_U,
- TILEPRO_OPC_LHADD_U_SN,
- TILEPRO_OPC_LNK,
- TILEPRO_OPC_LNK_SN,
- TILEPRO_OPC_LW,
- TILEPRO_OPC_LW_SN,
- TILEPRO_OPC_LW_NA,
- TILEPRO_OPC_LW_NA_SN,
- TILEPRO_OPC_LWADD,
- TILEPRO_OPC_LWADD_SN,
- TILEPRO_OPC_LWADD_NA,
- TILEPRO_OPC_LWADD_NA_SN,
- TILEPRO_OPC_MAXB_U,
- TILEPRO_OPC_MAXB_U_SN,
- TILEPRO_OPC_MAXH,
- TILEPRO_OPC_MAXH_SN,
- TILEPRO_OPC_MAXIB_U,
- TILEPRO_OPC_MAXIB_U_SN,
- TILEPRO_OPC_MAXIH,
- TILEPRO_OPC_MAXIH_SN,
- TILEPRO_OPC_MF,
- TILEPRO_OPC_MFSPR,
- TILEPRO_OPC_MINB_U,
- TILEPRO_OPC_MINB_U_SN,
- TILEPRO_OPC_MINH,
- TILEPRO_OPC_MINH_SN,
- TILEPRO_OPC_MINIB_U,
- TILEPRO_OPC_MINIB_U_SN,
- TILEPRO_OPC_MINIH,
- TILEPRO_OPC_MINIH_SN,
- TILEPRO_OPC_MM,
- TILEPRO_OPC_MNZ,
- TILEPRO_OPC_MNZ_SN,
- TILEPRO_OPC_MNZB,
- TILEPRO_OPC_MNZB_SN,
- TILEPRO_OPC_MNZH,
- TILEPRO_OPC_MNZH_SN,
- TILEPRO_OPC_MTSPR,
- TILEPRO_OPC_MULHH_SS,
- TILEPRO_OPC_MULHH_SS_SN,
- TILEPRO_OPC_MULHH_SU,
- TILEPRO_OPC_MULHH_SU_SN,
- TILEPRO_OPC_MULHH_UU,
- TILEPRO_OPC_MULHH_UU_SN,
- TILEPRO_OPC_MULHHA_SS,
- TILEPRO_OPC_MULHHA_SS_SN,
- TILEPRO_OPC_MULHHA_SU,
- TILEPRO_OPC_MULHHA_SU_SN,
- TILEPRO_OPC_MULHHA_UU,
- TILEPRO_OPC_MULHHA_UU_SN,
- TILEPRO_OPC_MULHHSA_UU,
- TILEPRO_OPC_MULHHSA_UU_SN,
- TILEPRO_OPC_MULHL_SS,
- TILEPRO_OPC_MULHL_SS_SN,
- TILEPRO_OPC_MULHL_SU,
- TILEPRO_OPC_MULHL_SU_SN,
- TILEPRO_OPC_MULHL_US,
- TILEPRO_OPC_MULHL_US_SN,
- TILEPRO_OPC_MULHL_UU,
- TILEPRO_OPC_MULHL_UU_SN,
- TILEPRO_OPC_MULHLA_SS,
- TILEPRO_OPC_MULHLA_SS_SN,
- TILEPRO_OPC_MULHLA_SU,
- TILEPRO_OPC_MULHLA_SU_SN,
- TILEPRO_OPC_MULHLA_US,
- TILEPRO_OPC_MULHLA_US_SN,
- TILEPRO_OPC_MULHLA_UU,
- TILEPRO_OPC_MULHLA_UU_SN,
- TILEPRO_OPC_MULHLSA_UU,
- TILEPRO_OPC_MULHLSA_UU_SN,
- TILEPRO_OPC_MULLL_SS,
- TILEPRO_OPC_MULLL_SS_SN,
- TILEPRO_OPC_MULLL_SU,
- TILEPRO_OPC_MULLL_SU_SN,
- TILEPRO_OPC_MULLL_UU,
- TILEPRO_OPC_MULLL_UU_SN,
- TILEPRO_OPC_MULLLA_SS,
- TILEPRO_OPC_MULLLA_SS_SN,
- TILEPRO_OPC_MULLLA_SU,
- TILEPRO_OPC_MULLLA_SU_SN,
- TILEPRO_OPC_MULLLA_UU,
- TILEPRO_OPC_MULLLA_UU_SN,
- TILEPRO_OPC_MULLLSA_UU,
- TILEPRO_OPC_MULLLSA_UU_SN,
- TILEPRO_OPC_MVNZ,
- TILEPRO_OPC_MVNZ_SN,
- TILEPRO_OPC_MVZ,
- TILEPRO_OPC_MVZ_SN,
- TILEPRO_OPC_MZ,
- TILEPRO_OPC_MZ_SN,
- TILEPRO_OPC_MZB,
- TILEPRO_OPC_MZB_SN,
- TILEPRO_OPC_MZH,
- TILEPRO_OPC_MZH_SN,
- TILEPRO_OPC_NAP,
- TILEPRO_OPC_NOP,
- TILEPRO_OPC_NOR,
- TILEPRO_OPC_NOR_SN,
- TILEPRO_OPC_OR,
- TILEPRO_OPC_OR_SN,
- TILEPRO_OPC_ORI,
- TILEPRO_OPC_ORI_SN,
- TILEPRO_OPC_PACKBS_U,
- TILEPRO_OPC_PACKBS_U_SN,
- TILEPRO_OPC_PACKHB,
- TILEPRO_OPC_PACKHB_SN,
- TILEPRO_OPC_PACKHS,
- TILEPRO_OPC_PACKHS_SN,
- TILEPRO_OPC_PACKLB,
- TILEPRO_OPC_PACKLB_SN,
- TILEPRO_OPC_PCNT,
- TILEPRO_OPC_PCNT_SN,
- TILEPRO_OPC_RL,
- TILEPRO_OPC_RL_SN,
- TILEPRO_OPC_RLI,
- TILEPRO_OPC_RLI_SN,
- TILEPRO_OPC_S1A,
- TILEPRO_OPC_S1A_SN,
- TILEPRO_OPC_S2A,
- TILEPRO_OPC_S2A_SN,
- TILEPRO_OPC_S3A,
- TILEPRO_OPC_S3A_SN,
- TILEPRO_OPC_SADAB_U,
- TILEPRO_OPC_SADAB_U_SN,
- TILEPRO_OPC_SADAH,
- TILEPRO_OPC_SADAH_SN,
- TILEPRO_OPC_SADAH_U,
- TILEPRO_OPC_SADAH_U_SN,
- TILEPRO_OPC_SADB_U,
- TILEPRO_OPC_SADB_U_SN,
- TILEPRO_OPC_SADH,
- TILEPRO_OPC_SADH_SN,
- TILEPRO_OPC_SADH_U,
- TILEPRO_OPC_SADH_U_SN,
- TILEPRO_OPC_SB,
- TILEPRO_OPC_SBADD,
- TILEPRO_OPC_SEQ,
- TILEPRO_OPC_SEQ_SN,
- TILEPRO_OPC_SEQB,
- TILEPRO_OPC_SEQB_SN,
- TILEPRO_OPC_SEQH,
- TILEPRO_OPC_SEQH_SN,
- TILEPRO_OPC_SEQI,
- TILEPRO_OPC_SEQI_SN,
- TILEPRO_OPC_SEQIB,
- TILEPRO_OPC_SEQIB_SN,
- TILEPRO_OPC_SEQIH,
- TILEPRO_OPC_SEQIH_SN,
- TILEPRO_OPC_SH,
- TILEPRO_OPC_SHADD,
- TILEPRO_OPC_SHL,
- TILEPRO_OPC_SHL_SN,
- TILEPRO_OPC_SHLB,
- TILEPRO_OPC_SHLB_SN,
- TILEPRO_OPC_SHLH,
- TILEPRO_OPC_SHLH_SN,
- TILEPRO_OPC_SHLI,
- TILEPRO_OPC_SHLI_SN,
- TILEPRO_OPC_SHLIB,
- TILEPRO_OPC_SHLIB_SN,
- TILEPRO_OPC_SHLIH,
- TILEPRO_OPC_SHLIH_SN,
- TILEPRO_OPC_SHR,
- TILEPRO_OPC_SHR_SN,
- TILEPRO_OPC_SHRB,
- TILEPRO_OPC_SHRB_SN,
- TILEPRO_OPC_SHRH,
- TILEPRO_OPC_SHRH_SN,
- TILEPRO_OPC_SHRI,
- TILEPRO_OPC_SHRI_SN,
- TILEPRO_OPC_SHRIB,
- TILEPRO_OPC_SHRIB_SN,
- TILEPRO_OPC_SHRIH,
- TILEPRO_OPC_SHRIH_SN,
- TILEPRO_OPC_SLT,
- TILEPRO_OPC_SLT_SN,
- TILEPRO_OPC_SLT_U,
- TILEPRO_OPC_SLT_U_SN,
- TILEPRO_OPC_SLTB,
- TILEPRO_OPC_SLTB_SN,
- TILEPRO_OPC_SLTB_U,
- TILEPRO_OPC_SLTB_U_SN,
- TILEPRO_OPC_SLTE,
- TILEPRO_OPC_SLTE_SN,
- TILEPRO_OPC_SLTE_U,
- TILEPRO_OPC_SLTE_U_SN,
- TILEPRO_OPC_SLTEB,
- TILEPRO_OPC_SLTEB_SN,
- TILEPRO_OPC_SLTEB_U,
- TILEPRO_OPC_SLTEB_U_SN,
- TILEPRO_OPC_SLTEH,
- TILEPRO_OPC_SLTEH_SN,
- TILEPRO_OPC_SLTEH_U,
- TILEPRO_OPC_SLTEH_U_SN,
- TILEPRO_OPC_SLTH,
- TILEPRO_OPC_SLTH_SN,
- TILEPRO_OPC_SLTH_U,
- TILEPRO_OPC_SLTH_U_SN,
- TILEPRO_OPC_SLTI,
- TILEPRO_OPC_SLTI_SN,
- TILEPRO_OPC_SLTI_U,
- TILEPRO_OPC_SLTI_U_SN,
- TILEPRO_OPC_SLTIB,
- TILEPRO_OPC_SLTIB_SN,
- TILEPRO_OPC_SLTIB_U,
- TILEPRO_OPC_SLTIB_U_SN,
- TILEPRO_OPC_SLTIH,
- TILEPRO_OPC_SLTIH_SN,
- TILEPRO_OPC_SLTIH_U,
- TILEPRO_OPC_SLTIH_U_SN,
- TILEPRO_OPC_SNE,
- TILEPRO_OPC_SNE_SN,
- TILEPRO_OPC_SNEB,
- TILEPRO_OPC_SNEB_SN,
- TILEPRO_OPC_SNEH,
- TILEPRO_OPC_SNEH_SN,
- TILEPRO_OPC_SRA,
- TILEPRO_OPC_SRA_SN,
- TILEPRO_OPC_SRAB,
- TILEPRO_OPC_SRAB_SN,
- TILEPRO_OPC_SRAH,
- TILEPRO_OPC_SRAH_SN,
- TILEPRO_OPC_SRAI,
- TILEPRO_OPC_SRAI_SN,
- TILEPRO_OPC_SRAIB,
- TILEPRO_OPC_SRAIB_SN,
- TILEPRO_OPC_SRAIH,
- TILEPRO_OPC_SRAIH_SN,
- TILEPRO_OPC_SUB,
- TILEPRO_OPC_SUB_SN,
- TILEPRO_OPC_SUBB,
- TILEPRO_OPC_SUBB_SN,
- TILEPRO_OPC_SUBBS_U,
- TILEPRO_OPC_SUBBS_U_SN,
- TILEPRO_OPC_SUBH,
- TILEPRO_OPC_SUBH_SN,
- TILEPRO_OPC_SUBHS,
- TILEPRO_OPC_SUBHS_SN,
- TILEPRO_OPC_SUBS,
- TILEPRO_OPC_SUBS_SN,
- TILEPRO_OPC_SW,
- TILEPRO_OPC_SWADD,
- TILEPRO_OPC_SWINT0,
- TILEPRO_OPC_SWINT1,
- TILEPRO_OPC_SWINT2,
- TILEPRO_OPC_SWINT3,
- TILEPRO_OPC_TBLIDXB0,
- TILEPRO_OPC_TBLIDXB0_SN,
- TILEPRO_OPC_TBLIDXB1,
- TILEPRO_OPC_TBLIDXB1_SN,
- TILEPRO_OPC_TBLIDXB2,
- TILEPRO_OPC_TBLIDXB2_SN,
- TILEPRO_OPC_TBLIDXB3,
- TILEPRO_OPC_TBLIDXB3_SN,
- TILEPRO_OPC_TNS,
- TILEPRO_OPC_TNS_SN,
- TILEPRO_OPC_WH64,
- TILEPRO_OPC_XOR,
- TILEPRO_OPC_XOR_SN,
- TILEPRO_OPC_XORI,
- TILEPRO_OPC_XORI_SN,
- TILEPRO_OPC_NONE
-} tilepro_mnemonic;
-
-
-
-
-typedef enum
-{
- TILEPRO_PIPELINE_X0,
- TILEPRO_PIPELINE_X1,
- TILEPRO_PIPELINE_Y0,
- TILEPRO_PIPELINE_Y1,
- TILEPRO_PIPELINE_Y2,
-} tilepro_pipeline;
-
-#define tilepro_is_x_pipeline(p) ((int)(p) <= (int)TILEPRO_PIPELINE_X1)
-
-typedef enum
-{
- TILEPRO_OP_TYPE_REGISTER,
- TILEPRO_OP_TYPE_IMMEDIATE,
- TILEPRO_OP_TYPE_ADDRESS,
- TILEPRO_OP_TYPE_SPR
-} tilepro_operand_type;
-
-struct tilepro_operand
-{
- /* Is this operand a register, immediate or address? */
- tilepro_operand_type type;
-
- /* The default relocation type for this operand. */
- signed int default_reloc : 16;
-
- /* How many bits is this value? (used for range checking) */
- unsigned int num_bits : 5;
-
- /* Is the value signed? (used for range checking) */
- unsigned int is_signed : 1;
-
- /* Is this operand a source register? */
- unsigned int is_src_reg : 1;
-
- /* Is this operand written? (i.e. is it a destination register) */
- unsigned int is_dest_reg : 1;
-
- /* Is this operand PC-relative? */
- unsigned int is_pc_relative : 1;
-
- /* By how many bits do we right shift the value before inserting? */
- unsigned int rightshift : 2;
-
- /* Return the bits for this operand to be ORed into an existing bundle. */
- tilepro_bundle_bits (*insert) (int op);
-
- /* Extract this operand and return it. */
- unsigned int (*extract) (tilepro_bundle_bits bundle);
-};
-
-
-extern const struct tilepro_operand tilepro_operands[];
-
-/* One finite-state machine per pipe for rapid instruction decoding. */
-extern const unsigned short * const
-tilepro_bundle_decoder_fsms[TILEPRO_NUM_PIPELINE_ENCODINGS];
-
-
-struct tilepro_opcode
-{
- /* The opcode mnemonic, e.g. "add" */
- const char *name;
-
- /* The enum value for this mnemonic. */
- tilepro_mnemonic mnemonic;
-
- /* A bit mask of which of the five pipes this instruction
- is compatible with:
- X0 0x01
- X1 0x02
- Y0 0x04
- Y1 0x08
- Y2 0x10 */
- unsigned char pipes;
-
- /* How many operands are there? */
- unsigned char num_operands;
-
- /* Which register does this write implicitly, or TREG_ZERO if none? */
- unsigned char implicitly_written_register;
-
- /* Can this be bundled with other instructions (almost always true). */
- unsigned char can_bundle;
-
- /* The description of the operands. Each of these is an
- * index into the tilepro_operands[] table. */
- unsigned char operands[TILEPRO_NUM_PIPELINE_ENCODINGS][TILEPRO_MAX_OPERANDS];
-
-};
-
-extern const struct tilepro_opcode tilepro_opcodes[];
-
-
-/* Used for non-textual disassembly into structs. */
-struct tilepro_decoded_instruction
-{
- const struct tilepro_opcode *opcode;
- const struct tilepro_operand *operands[TILEPRO_MAX_OPERANDS];
- int operand_values[TILEPRO_MAX_OPERANDS];
-};
-
-
-/* Disassemble a bundle into a struct for machine processing. */
-extern int parse_insn_tilepro(tilepro_bundle_bits bits,
- unsigned int pc,
- struct tilepro_decoded_instruction
- decoded[TILEPRO_MAX_INSTRUCTIONS_PER_BUNDLE]);
-
-
-/* Given a set of bundle bits and a specific pipe, returns which
- * instruction the bundle contains in that pipe.
- */
-extern const struct tilepro_opcode *
-find_opcode(tilepro_bundle_bits bits, tilepro_pipeline pipe);
-
-
-
-#endif /* opcode_tilepro_h */
diff --git a/arch/tile/include/asm/tile-desc_64.h b/arch/tile/include/asm/tile-desc_64.h
deleted file mode 100644
index 1819efcba54d..000000000000
--- a/arch/tile/include/asm/tile-desc_64.h
+++ /dev/null
@@ -1,483 +0,0 @@
-/* TILE-Gx opcode information.
- *
- * Copyright 2011 Tilera Corporation. All Rights Reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation, version 2.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
- * NON INFRINGEMENT. See the GNU General Public License for
- * more details.
- *
- *
- *
- *
- *
- */
-
-#ifndef opcode_tile_h
-#define opcode_tile_h
-
-#include <arch/opcode.h>
-
-
-enum
-{
- TILEGX_MAX_OPERANDS = 4 /* bfexts */
-};
-
-typedef enum
-{
- TILEGX_OPC_BPT,
- TILEGX_OPC_INFO,
- TILEGX_OPC_INFOL,
- TILEGX_OPC_MOVE,
- TILEGX_OPC_MOVEI,
- TILEGX_OPC_MOVELI,
- TILEGX_OPC_PREFETCH,
- TILEGX_OPC_PREFETCH_ADD_L1,
- TILEGX_OPC_PREFETCH_ADD_L1_FAULT,
- TILEGX_OPC_PREFETCH_ADD_L2,
- TILEGX_OPC_PREFETCH_ADD_L2_FAULT,
- TILEGX_OPC_PREFETCH_ADD_L3,
- TILEGX_OPC_PREFETCH_ADD_L3_FAULT,
- TILEGX_OPC_PREFETCH_L1,
- TILEGX_OPC_PREFETCH_L1_FAULT,
- TILEGX_OPC_PREFETCH_L2,
- TILEGX_OPC_PREFETCH_L2_FAULT,
- TILEGX_OPC_PREFETCH_L3,
- TILEGX_OPC_PREFETCH_L3_FAULT,
- TILEGX_OPC_RAISE,
- TILEGX_OPC_ADD,
- TILEGX_OPC_ADDI,
- TILEGX_OPC_ADDLI,
- TILEGX_OPC_ADDX,
- TILEGX_OPC_ADDXI,
- TILEGX_OPC_ADDXLI,
- TILEGX_OPC_ADDXSC,
- TILEGX_OPC_AND,
- TILEGX_OPC_ANDI,
- TILEGX_OPC_BEQZ,
- TILEGX_OPC_BEQZT,
- TILEGX_OPC_BFEXTS,
- TILEGX_OPC_BFEXTU,
- TILEGX_OPC_BFINS,
- TILEGX_OPC_BGEZ,
- TILEGX_OPC_BGEZT,
- TILEGX_OPC_BGTZ,
- TILEGX_OPC_BGTZT,
- TILEGX_OPC_BLBC,
- TILEGX_OPC_BLBCT,
- TILEGX_OPC_BLBS,
- TILEGX_OPC_BLBST,
- TILEGX_OPC_BLEZ,
- TILEGX_OPC_BLEZT,
- TILEGX_OPC_BLTZ,
- TILEGX_OPC_BLTZT,
- TILEGX_OPC_BNEZ,
- TILEGX_OPC_BNEZT,
- TILEGX_OPC_CLZ,
- TILEGX_OPC_CMOVEQZ,
- TILEGX_OPC_CMOVNEZ,
- TILEGX_OPC_CMPEQ,
- TILEGX_OPC_CMPEQI,
- TILEGX_OPC_CMPEXCH,
- TILEGX_OPC_CMPEXCH4,
- TILEGX_OPC_CMPLES,
- TILEGX_OPC_CMPLEU,
- TILEGX_OPC_CMPLTS,
- TILEGX_OPC_CMPLTSI,
- TILEGX_OPC_CMPLTU,
- TILEGX_OPC_CMPLTUI,
- TILEGX_OPC_CMPNE,
- TILEGX_OPC_CMUL,
- TILEGX_OPC_CMULA,
- TILEGX_OPC_CMULAF,
- TILEGX_OPC_CMULF,
- TILEGX_OPC_CMULFR,
- TILEGX_OPC_CMULH,
- TILEGX_OPC_CMULHR,
- TILEGX_OPC_CRC32_32,
- TILEGX_OPC_CRC32_8,
- TILEGX_OPC_CTZ,
- TILEGX_OPC_DBLALIGN,
- TILEGX_OPC_DBLALIGN2,
- TILEGX_OPC_DBLALIGN4,
- TILEGX_OPC_DBLALIGN6,
- TILEGX_OPC_DRAIN,
- TILEGX_OPC_DTLBPR,
- TILEGX_OPC_EXCH,
- TILEGX_OPC_EXCH4,
- TILEGX_OPC_FDOUBLE_ADD_FLAGS,
- TILEGX_OPC_FDOUBLE_ADDSUB,
- TILEGX_OPC_FDOUBLE_MUL_FLAGS,
- TILEGX_OPC_FDOUBLE_PACK1,
- TILEGX_OPC_FDOUBLE_PACK2,
- TILEGX_OPC_FDOUBLE_SUB_FLAGS,
- TILEGX_OPC_FDOUBLE_UNPACK_MAX,
- TILEGX_OPC_FDOUBLE_UNPACK_MIN,
- TILEGX_OPC_FETCHADD,
- TILEGX_OPC_FETCHADD4,
- TILEGX_OPC_FETCHADDGEZ,
- TILEGX_OPC_FETCHADDGEZ4,
- TILEGX_OPC_FETCHAND,
- TILEGX_OPC_FETCHAND4,
- TILEGX_OPC_FETCHOR,
- TILEGX_OPC_FETCHOR4,
- TILEGX_OPC_FINV,
- TILEGX_OPC_FLUSH,
- TILEGX_OPC_FLUSHWB,
- TILEGX_OPC_FNOP,
- TILEGX_OPC_FSINGLE_ADD1,
- TILEGX_OPC_FSINGLE_ADDSUB2,
- TILEGX_OPC_FSINGLE_MUL1,
- TILEGX_OPC_FSINGLE_MUL2,
- TILEGX_OPC_FSINGLE_PACK1,
- TILEGX_OPC_FSINGLE_PACK2,
- TILEGX_OPC_FSINGLE_SUB1,
- TILEGX_OPC_ICOH,
- TILEGX_OPC_ILL,
- TILEGX_OPC_INV,
- TILEGX_OPC_IRET,
- TILEGX_OPC_J,
- TILEGX_OPC_JAL,
- TILEGX_OPC_JALR,
- TILEGX_OPC_JALRP,
- TILEGX_OPC_JR,
- TILEGX_OPC_JRP,
- TILEGX_OPC_LD,
- TILEGX_OPC_LD1S,
- TILEGX_OPC_LD1S_ADD,
- TILEGX_OPC_LD1U,
- TILEGX_OPC_LD1U_ADD,
- TILEGX_OPC_LD2S,
- TILEGX_OPC_LD2S_ADD,
- TILEGX_OPC_LD2U,
- TILEGX_OPC_LD2U_ADD,
- TILEGX_OPC_LD4S,
- TILEGX_OPC_LD4S_ADD,
- TILEGX_OPC_LD4U,
- TILEGX_OPC_LD4U_ADD,
- TILEGX_OPC_LD_ADD,
- TILEGX_OPC_LDNA,
- TILEGX_OPC_LDNA_ADD,
- TILEGX_OPC_LDNT,
- TILEGX_OPC_LDNT1S,
- TILEGX_OPC_LDNT1S_ADD,
- TILEGX_OPC_LDNT1U,
- TILEGX_OPC_LDNT1U_ADD,
- TILEGX_OPC_LDNT2S,
- TILEGX_OPC_LDNT2S_ADD,
- TILEGX_OPC_LDNT2U,
- TILEGX_OPC_LDNT2U_ADD,
- TILEGX_OPC_LDNT4S,
- TILEGX_OPC_LDNT4S_ADD,
- TILEGX_OPC_LDNT4U,
- TILEGX_OPC_LDNT4U_ADD,
- TILEGX_OPC_LDNT_ADD,
- TILEGX_OPC_LNK,
- TILEGX_OPC_MF,
- TILEGX_OPC_MFSPR,
- TILEGX_OPC_MM,
- TILEGX_OPC_MNZ,
- TILEGX_OPC_MTSPR,
- TILEGX_OPC_MUL_HS_HS,
- TILEGX_OPC_MUL_HS_HU,
- TILEGX_OPC_MUL_HS_LS,
- TILEGX_OPC_MUL_HS_LU,
- TILEGX_OPC_MUL_HU_HU,
- TILEGX_OPC_MUL_HU_LS,
- TILEGX_OPC_MUL_HU_LU,
- TILEGX_OPC_MUL_LS_LS,
- TILEGX_OPC_MUL_LS_LU,
- TILEGX_OPC_MUL_LU_LU,
- TILEGX_OPC_MULA_HS_HS,
- TILEGX_OPC_MULA_HS_HU,
- TILEGX_OPC_MULA_HS_LS,
- TILEGX_OPC_MULA_HS_LU,
- TILEGX_OPC_MULA_HU_HU,
- TILEGX_OPC_MULA_HU_LS,
- TILEGX_OPC_MULA_HU_LU,
- TILEGX_OPC_MULA_LS_LS,
- TILEGX_OPC_MULA_LS_LU,
- TILEGX_OPC_MULA_LU_LU,
- TILEGX_OPC_MULAX,
- TILEGX_OPC_MULX,
- TILEGX_OPC_MZ,
- TILEGX_OPC_NAP,
- TILEGX_OPC_NOP,
- TILEGX_OPC_NOR,
- TILEGX_OPC_OR,
- TILEGX_OPC_ORI,
- TILEGX_OPC_PCNT,
- TILEGX_OPC_REVBITS,
- TILEGX_OPC_REVBYTES,
- TILEGX_OPC_ROTL,
- TILEGX_OPC_ROTLI,
- TILEGX_OPC_SHL,
- TILEGX_OPC_SHL16INSLI,
- TILEGX_OPC_SHL1ADD,
- TILEGX_OPC_SHL1ADDX,
- TILEGX_OPC_SHL2ADD,
- TILEGX_OPC_SHL2ADDX,
- TILEGX_OPC_SHL3ADD,
- TILEGX_OPC_SHL3ADDX,
- TILEGX_OPC_SHLI,
- TILEGX_OPC_SHLX,
- TILEGX_OPC_SHLXI,
- TILEGX_OPC_SHRS,
- TILEGX_OPC_SHRSI,
- TILEGX_OPC_SHRU,
- TILEGX_OPC_SHRUI,
- TILEGX_OPC_SHRUX,
- TILEGX_OPC_SHRUXI,
- TILEGX_OPC_SHUFFLEBYTES,
- TILEGX_OPC_ST,
- TILEGX_OPC_ST1,
- TILEGX_OPC_ST1_ADD,
- TILEGX_OPC_ST2,
- TILEGX_OPC_ST2_ADD,
- TILEGX_OPC_ST4,
- TILEGX_OPC_ST4_ADD,
- TILEGX_OPC_ST_ADD,
- TILEGX_OPC_STNT,
- TILEGX_OPC_STNT1,
- TILEGX_OPC_STNT1_ADD,
- TILEGX_OPC_STNT2,
- TILEGX_OPC_STNT2_ADD,
- TILEGX_OPC_STNT4,
- TILEGX_OPC_STNT4_ADD,
- TILEGX_OPC_STNT_ADD,
- TILEGX_OPC_SUB,
- TILEGX_OPC_SUBX,
- TILEGX_OPC_SUBXSC,
- TILEGX_OPC_SWINT0,
- TILEGX_OPC_SWINT1,
- TILEGX_OPC_SWINT2,
- TILEGX_OPC_SWINT3,
- TILEGX_OPC_TBLIDXB0,
- TILEGX_OPC_TBLIDXB1,
- TILEGX_OPC_TBLIDXB2,
- TILEGX_OPC_TBLIDXB3,
- TILEGX_OPC_V1ADD,
- TILEGX_OPC_V1ADDI,
- TILEGX_OPC_V1ADDUC,
- TILEGX_OPC_V1ADIFFU,
- TILEGX_OPC_V1AVGU,
- TILEGX_OPC_V1CMPEQ,
- TILEGX_OPC_V1CMPEQI,
- TILEGX_OPC_V1CMPLES,
- TILEGX_OPC_V1CMPLEU,
- TILEGX_OPC_V1CMPLTS,
- TILEGX_OPC_V1CMPLTSI,
- TILEGX_OPC_V1CMPLTU,
- TILEGX_OPC_V1CMPLTUI,
- TILEGX_OPC_V1CMPNE,
- TILEGX_OPC_V1DDOTPU,
- TILEGX_OPC_V1DDOTPUA,
- TILEGX_OPC_V1DDOTPUS,
- TILEGX_OPC_V1DDOTPUSA,
- TILEGX_OPC_V1DOTP,
- TILEGX_OPC_V1DOTPA,
- TILEGX_OPC_V1DOTPU,
- TILEGX_OPC_V1DOTPUA,
- TILEGX_OPC_V1DOTPUS,
- TILEGX_OPC_V1DOTPUSA,
- TILEGX_OPC_V1INT_H,
- TILEGX_OPC_V1INT_L,
- TILEGX_OPC_V1MAXU,
- TILEGX_OPC_V1MAXUI,
- TILEGX_OPC_V1MINU,
- TILEGX_OPC_V1MINUI,
- TILEGX_OPC_V1MNZ,
- TILEGX_OPC_V1MULTU,
- TILEGX_OPC_V1MULU,
- TILEGX_OPC_V1MULUS,
- TILEGX_OPC_V1MZ,
- TILEGX_OPC_V1SADAU,
- TILEGX_OPC_V1SADU,
- TILEGX_OPC_V1SHL,
- TILEGX_OPC_V1SHLI,
- TILEGX_OPC_V1SHRS,
- TILEGX_OPC_V1SHRSI,
- TILEGX_OPC_V1SHRU,
- TILEGX_OPC_V1SHRUI,
- TILEGX_OPC_V1SUB,
- TILEGX_OPC_V1SUBUC,
- TILEGX_OPC_V2ADD,
- TILEGX_OPC_V2ADDI,
- TILEGX_OPC_V2ADDSC,
- TILEGX_OPC_V2ADIFFS,
- TILEGX_OPC_V2AVGS,
- TILEGX_OPC_V2CMPEQ,
- TILEGX_OPC_V2CMPEQI,
- TILEGX_OPC_V2CMPLES,
- TILEGX_OPC_V2CMPLEU,
- TILEGX_OPC_V2CMPLTS,
- TILEGX_OPC_V2CMPLTSI,
- TILEGX_OPC_V2CMPLTU,
- TILEGX_OPC_V2CMPLTUI,
- TILEGX_OPC_V2CMPNE,
- TILEGX_OPC_V2DOTP,
- TILEGX_OPC_V2DOTPA,
- TILEGX_OPC_V2INT_H,
- TILEGX_OPC_V2INT_L,
- TILEGX_OPC_V2MAXS,
- TILEGX_OPC_V2MAXSI,
- TILEGX_OPC_V2MINS,
- TILEGX_OPC_V2MINSI,
- TILEGX_OPC_V2MNZ,
- TILEGX_OPC_V2MULFSC,
- TILEGX_OPC_V2MULS,
- TILEGX_OPC_V2MULTS,
- TILEGX_OPC_V2MZ,
- TILEGX_OPC_V2PACKH,
- TILEGX_OPC_V2PACKL,
- TILEGX_OPC_V2PACKUC,
- TILEGX_OPC_V2SADAS,
- TILEGX_OPC_V2SADAU,
- TILEGX_OPC_V2SADS,
- TILEGX_OPC_V2SADU,
- TILEGX_OPC_V2SHL,
- TILEGX_OPC_V2SHLI,
- TILEGX_OPC_V2SHLSC,
- TILEGX_OPC_V2SHRS,
- TILEGX_OPC_V2SHRSI,
- TILEGX_OPC_V2SHRU,
- TILEGX_OPC_V2SHRUI,
- TILEGX_OPC_V2SUB,
- TILEGX_OPC_V2SUBSC,
- TILEGX_OPC_V4ADD,
- TILEGX_OPC_V4ADDSC,
- TILEGX_OPC_V4INT_H,
- TILEGX_OPC_V4INT_L,
- TILEGX_OPC_V4PACKSC,
- TILEGX_OPC_V4SHL,
- TILEGX_OPC_V4SHLSC,
- TILEGX_OPC_V4SHRS,
- TILEGX_OPC_V4SHRU,
- TILEGX_OPC_V4SUB,
- TILEGX_OPC_V4SUBSC,
- TILEGX_OPC_WH64,
- TILEGX_OPC_XOR,
- TILEGX_OPC_XORI,
- TILEGX_OPC_NONE
-} tilegx_mnemonic;
-
-
-
-typedef enum
-{
- TILEGX_PIPELINE_X0,
- TILEGX_PIPELINE_X1,
- TILEGX_PIPELINE_Y0,
- TILEGX_PIPELINE_Y1,
- TILEGX_PIPELINE_Y2,
-} tilegx_pipeline;
-
-#define tilegx_is_x_pipeline(p) ((int)(p) <= (int)TILEGX_PIPELINE_X1)
-
-typedef enum
-{
- TILEGX_OP_TYPE_REGISTER,
- TILEGX_OP_TYPE_IMMEDIATE,
- TILEGX_OP_TYPE_ADDRESS,
- TILEGX_OP_TYPE_SPR
-} tilegx_operand_type;
-
-struct tilegx_operand
-{
- /* Is this operand a register, immediate or address? */
- tilegx_operand_type type;
-
- /* The default relocation type for this operand. */
- signed int default_reloc : 16;
-
- /* How many bits is this value? (used for range checking) */
- unsigned int num_bits : 5;
-
- /* Is the value signed? (used for range checking) */
- unsigned int is_signed : 1;
-
- /* Is this operand a source register? */
- unsigned int is_src_reg : 1;
-
- /* Is this operand written? (i.e. is it a destination register) */
- unsigned int is_dest_reg : 1;
-
- /* Is this operand PC-relative? */
- unsigned int is_pc_relative : 1;
-
- /* By how many bits do we right shift the value before inserting? */
- unsigned int rightshift : 2;
-
- /* Return the bits for this operand to be ORed into an existing bundle. */
- tilegx_bundle_bits (*insert) (int op);
-
- /* Extract this operand and return it. */
- unsigned int (*extract) (tilegx_bundle_bits bundle);
-};
-
-
-extern const struct tilegx_operand tilegx_operands[];
-
-/* One finite-state machine per pipe for rapid instruction decoding. */
-extern const unsigned short * const
-tilegx_bundle_decoder_fsms[TILEGX_NUM_PIPELINE_ENCODINGS];
-
-
-struct tilegx_opcode
-{
- /* The opcode mnemonic, e.g. "add" */
- const char *name;
-
- /* The enum value for this mnemonic. */
- tilegx_mnemonic mnemonic;
-
- /* A bit mask of which of the five pipes this instruction
- is compatible with:
- X0 0x01
- X1 0x02
- Y0 0x04
- Y1 0x08
- Y2 0x10 */
- unsigned char pipes;
-
- /* How many operands are there? */
- unsigned char num_operands;
-
- /* Which register does this write implicitly, or TREG_ZERO if none? */
- unsigned char implicitly_written_register;
-
- /* Can this be bundled with other instructions (almost always true). */
- unsigned char can_bundle;
-
- /* The description of the operands. Each of these is an
- * index into the tilegx_operands[] table. */
- unsigned char operands[TILEGX_NUM_PIPELINE_ENCODINGS][TILEGX_MAX_OPERANDS];
-
-};
-
-extern const struct tilegx_opcode tilegx_opcodes[];
-
-/* Used for non-textual disassembly into structs. */
-struct tilegx_decoded_instruction
-{
- const struct tilegx_opcode *opcode;
- const struct tilegx_operand *operands[TILEGX_MAX_OPERANDS];
- long long operand_values[TILEGX_MAX_OPERANDS];
-};
-
-
-/* Disassemble a bundle into a struct for machine processing. */
-extern int parse_insn_tilegx(tilegx_bundle_bits bits,
- unsigned long long pc,
- struct tilegx_decoded_instruction
- decoded[TILEGX_MAX_INSTRUCTIONS_PER_BUNDLE]);
-
-
-
-#endif /* opcode_tilegx_h */
diff --git a/arch/tile/include/asm/timex.h b/arch/tile/include/asm/timex.h
deleted file mode 100644
index dc987d53e2a9..000000000000
--- a/arch/tile/include/asm/timex.h
+++ /dev/null
@@ -1,52 +0,0 @@
-/*
- * Copyright 2010 Tilera Corporation. All Rights Reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation, version 2.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
- * NON INFRINGEMENT. See the GNU General Public License for
- * more details.
- */
-
-#ifndef _ASM_TILE_TIMEX_H
-#define _ASM_TILE_TIMEX_H
-
-/*
- * This rate should be a multiple of the possible HZ values (100, 250, 1000)
- * and a fraction of the possible hardware timer frequencies. Our timer
- * frequency is highly tunable but also quite precise, so for the primary use
- * of this value (setting ACT_HZ from HZ) we just pick a value that causes
- * ACT_HZ to be set to HZ. We make the value somewhat large just to be
- * more robust in case someone tries out a new value of HZ.
- */
-#define CLOCK_TICK_RATE 1000000
-
-typedef unsigned long long cycles_t;
-
-#if CHIP_HAS_SPLIT_CYCLE()
-cycles_t get_cycles(void);
-#define get_cycles_low() __insn_mfspr(SPR_CYCLE_LOW)
-#else
-static inline cycles_t get_cycles(void)
-{
- return __insn_mfspr(SPR_CYCLE);
-}
-#define get_cycles_low() __insn_mfspr(SPR_CYCLE) /* just get all 64 bits */
-#endif
-
-cycles_t get_clock_rate(void);
-
-/* Convert nanoseconds to core clock cycles. */
-cycles_t ns2cycles(unsigned long nsecs);
-
-/* Called at cpu initialization to set some low-level constants. */
-void setup_clock(void);
-
-/* Called at cpu initialization to start the tile-timer clock device. */
-void setup_tile_timer(void);
-
-#endif /* _ASM_TILE_TIMEX_H */
diff --git a/arch/tile/include/asm/tlb.h b/arch/tile/include/asm/tlb.h
deleted file mode 100644
index 4a891a1a8df3..000000000000
--- a/arch/tile/include/asm/tlb.h
+++ /dev/null
@@ -1,25 +0,0 @@
-/*
- * Copyright 2010 Tilera Corporation. All Rights Reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation, version 2.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
- * NON INFRINGEMENT. See the GNU General Public License for
- * more details.
- */
-
-#ifndef _ASM_TILE_TLB_H
-#define _ASM_TILE_TLB_H
-
-#define tlb_start_vma(tlb, vma) do { } while (0)
-#define tlb_end_vma(tlb, vma) do { } while (0)
-#define __tlb_remove_tlb_entry(tlb, ptep, address) do { } while (0)
-#define tlb_flush(tlb) flush_tlb_mm((tlb)->mm)
-
-#include <asm-generic/tlb.h>
-
-#endif /* _ASM_TILE_TLB_H */
diff --git a/arch/tile/include/asm/tlbflush.h b/arch/tile/include/asm/tlbflush.h
deleted file mode 100644
index dcf91b25a1e5..000000000000
--- a/arch/tile/include/asm/tlbflush.h
+++ /dev/null
@@ -1,123 +0,0 @@
-/*
- * Copyright 2010 Tilera Corporation. All Rights Reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation, version 2.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
- * NON INFRINGEMENT. See the GNU General Public License for
- * more details.
- */
-
-#ifndef _ASM_TILE_TLBFLUSH_H
-#define _ASM_TILE_TLBFLUSH_H
-
-#include <linux/mm.h>
-#include <linux/sched.h>
-#include <linux/smp.h>
-#include <asm/cacheflush.h>
-#include <asm/page.h>
-#include <hv/hypervisor.h>
-
-/*
- * Rather than associating each mm with its own ASID, we just use
- * ASIDs to allow us to lazily flush the TLB when we switch mms.
- * This way we only have to do an actual TLB flush on mm switch
- * every time we wrap ASIDs, not every single time we switch.
- *
- * FIXME: We might improve performance by keeping ASIDs around
- * properly, though since the hypervisor direct-maps VAs to TSB
- * entries, we're likely to have lost at least the executable page
- * mappings by the time we switch back to the original mm.
- */
-DECLARE_PER_CPU(int, current_asid);
-
-/* The hypervisor tells us what ASIDs are available to us. */
-extern int min_asid, max_asid;
-
-/* Pass as vma pointer for non-executable mapping, if no vma available. */
-#define FLUSH_NONEXEC ((struct vm_area_struct *)-1UL)
-
-/* Flush a single user page on this cpu. */
-static inline void local_flush_tlb_page(struct vm_area_struct *vma,
- unsigned long addr,
- unsigned long page_size)
-{
- int rc = hv_flush_page(addr, page_size);
- if (rc < 0)
- panic("hv_flush_page(%#lx,%#lx) failed: %d",
- addr, page_size, rc);
- if (!vma || (vma != FLUSH_NONEXEC && (vma->vm_flags & VM_EXEC)))
- __flush_icache();
-}
-
-/* Flush range of user pages on this cpu. */
-static inline void local_flush_tlb_pages(struct vm_area_struct *vma,
- unsigned long addr,
- unsigned long page_size,
- unsigned long len)
-{
- int rc = hv_flush_pages(addr, page_size, len);
- if (rc < 0)
- panic("hv_flush_pages(%#lx,%#lx,%#lx) failed: %d",
- addr, page_size, len, rc);
- if (!vma || (vma != FLUSH_NONEXEC && (vma->vm_flags & VM_EXEC)))
- __flush_icache();
-}
-
-/* Flush all user pages on this cpu. */
-static inline void local_flush_tlb(void)
-{
- int rc = hv_flush_all(1); /* preserve global mappings */
- if (rc < 0)
- panic("hv_flush_all(1) failed: %d", rc);
- __flush_icache();
-}
-
-/*
- * Global pages have to be flushed a bit differently. Not a real
- * performance problem because this does not happen often.
- */
-static inline void local_flush_tlb_all(void)
-{
- int i;
- for (i = 0; ; ++i) {
- HV_VirtAddrRange r = hv_inquire_virtual(i);
- if (r.size == 0)
- break;
- local_flush_tlb_pages(NULL, r.start, PAGE_SIZE, r.size);
- local_flush_tlb_pages(NULL, r.start, HPAGE_SIZE, r.size);
- }
-}
-
-/*
- * TLB flushing:
- *
- * - flush_tlb() flushes the current mm struct TLBs
- * - flush_tlb_all() flushes all processes TLBs
- * - flush_tlb_mm(mm) flushes the specified mm context TLB's
- * - flush_tlb_page(vma, vmaddr) flushes one page
- * - flush_tlb_range(vma, start, end) flushes a range of pages
- * - flush_tlb_kernel_range(start, end) flushes a range of kernel pages
- * - flush_tlb_others(cpumask, mm, va) flushes TLBs on other cpus
- *
- * Here (as in vm_area_struct), "end" means the first byte after
- * our end address.
- */
-
-extern void flush_tlb_all(void);
-extern void flush_tlb_kernel_range(unsigned long start, unsigned long end);
-extern void flush_tlb_current_task(void);
-extern void flush_tlb_mm(struct mm_struct *);
-extern void flush_tlb_page(struct vm_area_struct *, unsigned long);
-extern void flush_tlb_page_mm(struct vm_area_struct *,
- struct mm_struct *, unsigned long);
-extern void flush_tlb_range(struct vm_area_struct *,
- unsigned long start, unsigned long end);
-
-#define flush_tlb() flush_tlb_current_task()
-
-#endif /* _ASM_TILE_TLBFLUSH_H */
diff --git a/arch/tile/include/asm/topology.h b/arch/tile/include/asm/topology.h
deleted file mode 100644
index 635a0a4596f0..000000000000
--- a/arch/tile/include/asm/topology.h
+++ /dev/null
@@ -1,52 +0,0 @@
-/*
- * Copyright 2010 Tilera Corporation. All Rights Reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation, version 2.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
- * NON INFRINGEMENT. See the GNU General Public License for
- * more details.
- */
-
-#ifndef _ASM_TILE_TOPOLOGY_H
-#define _ASM_TILE_TOPOLOGY_H
-
-#ifdef CONFIG_NUMA
-
-#include <linux/cpumask.h>
-
-/* Mappings between logical cpu number and node number. */
-extern struct cpumask node_2_cpu_mask[];
-extern char cpu_2_node[];
-
-/* Returns the number of the node containing CPU 'cpu'. */
-static inline int cpu_to_node(int cpu)
-{
- return cpu_2_node[cpu];
-}
-
-/* Returns a bitmask of CPUs on Node 'node'. */
-static inline const struct cpumask *cpumask_of_node(int node)
-{
- return &node_2_cpu_mask[node];
-}
-
-/* For now, use numa node -1 for global allocation. */
-#define pcibus_to_node(bus) ((void)(bus), -1)
-
-#endif /* CONFIG_NUMA */
-
-#include <asm-generic/topology.h>
-
-#ifdef CONFIG_SMP
-#define topology_physical_package_id(cpu) ((void)(cpu), 0)
-#define topology_core_id(cpu) (cpu)
-#define topology_core_cpumask(cpu) ((void)(cpu), cpu_online_mask)
-#define topology_sibling_cpumask(cpu) cpumask_of(cpu)
-#endif
-
-#endif /* _ASM_TILE_TOPOLOGY_H */
diff --git a/arch/tile/include/asm/traps.h b/arch/tile/include/asm/traps.h
deleted file mode 100644
index 11c82270c1f5..000000000000
--- a/arch/tile/include/asm/traps.h
+++ /dev/null
@@ -1,93 +0,0 @@
-/*
- * Copyright 2010 Tilera Corporation. All Rights Reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation, version 2.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
- * NON INFRINGEMENT. See the GNU General Public License for
- * more details.
- */
-
-#ifndef _ASM_TILE_TRAPS_H
-#define _ASM_TILE_TRAPS_H
-
-#ifndef __ASSEMBLY__
-#include <arch/chip.h>
-
-/* mm/fault.c */
-void do_page_fault(struct pt_regs *, int fault_num,
- unsigned long address, unsigned long write);
-#if CHIP_HAS_TILE_DMA()
-void do_async_page_fault(struct pt_regs *);
-#endif
-
-#ifndef __tilegx__
-/*
- * We return this structure in registers to avoid having to write
- * additional save/restore code in the intvec.S caller.
- */
-struct intvec_state {
- void *handler;
- unsigned long vecnum;
- unsigned long fault_num;
- unsigned long info;
- unsigned long retval;
-};
-struct intvec_state do_page_fault_ics(struct pt_regs *regs, int fault_num,
- unsigned long address,
- unsigned long info);
-#endif
-
-/* kernel/traps.c */
-void do_trap(struct pt_regs *, int fault_num, unsigned long reason);
-void kernel_double_fault(int dummy, ulong pc, ulong lr, ulong sp, ulong r52);
-
-/* kernel/time.c */
-void do_timer_interrupt(struct pt_regs *, int fault_num);
-
-/* kernel/messaging.c */
-void hv_message_intr(struct pt_regs *, int intnum);
-
-#define TILE_NMI_DUMP_STACK 1 /* Dump stack for sysrq+'l' */
-
-/* kernel/process.c */
-void do_nmi_dump_stack(struct pt_regs *regs);
-
-/* kernel/traps.c */
-void do_nmi(struct pt_regs *, int fault_num, unsigned long reason);
-
-/* kernel/irq.c */
-void tile_dev_intr(struct pt_regs *, int intnum);
-
-#ifdef CONFIG_HARDWALL
-/* kernel/hardwall.c */
-void do_hardwall_trap(struct pt_regs *, int fault_num);
-#endif
-
-/* kernel/ptrace.c */
-void do_breakpoint(struct pt_regs *, int fault_num);
-
-
-#ifdef __tilegx__
-/* kernel/single_step.c */
-void gx_singlestep_handle(struct pt_regs *, int fault_num);
-
-/* kernel/intvec_64.S */
-void fill_ra_stack(void);
-
-/* Handle unalign data fixup. */
-extern void do_unaligned(struct pt_regs *regs, int vecnum);
-#endif
-
-#endif /* __ASSEMBLY__ */
-
-#ifdef __tilegx__
-/* 128 byte JIT per unalign fixup. */
-#define UNALIGN_JIT_SHIFT 7
-#endif
-
-#endif /* _ASM_TILE_TRAPS_H */
diff --git a/arch/tile/include/asm/uaccess.h b/arch/tile/include/asm/uaccess.h
deleted file mode 100644
index cb4fbe7e4f88..000000000000
--- a/arch/tile/include/asm/uaccess.h
+++ /dev/null
@@ -1,411 +0,0 @@
-/*
- * Copyright 2010 Tilera Corporation. All Rights Reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation, version 2.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
- * NON INFRINGEMENT. See the GNU General Public License for
- * more details.
- */
-
-#ifndef _ASM_TILE_UACCESS_H
-#define _ASM_TILE_UACCESS_H
-
-/*
- * User space memory access functions
- */
-#include <linux/mm.h>
-#include <asm/processor.h>
-#include <asm/page.h>
-
-/*
- * The fs value determines whether argument validity checking should be
- * performed or not. If get_fs() == USER_DS, checking is performed, with
- * get_fs() == KERNEL_DS, checking is bypassed.
- *
- * For historical reasons, these macros are grossly misnamed.
- */
-#define MAKE_MM_SEG(a) ((mm_segment_t) { (a) })
-
-#define KERNEL_DS MAKE_MM_SEG(-1UL)
-#define USER_DS MAKE_MM_SEG(PAGE_OFFSET)
-
-#define get_ds() (KERNEL_DS)
-#define get_fs() (current_thread_info()->addr_limit)
-#define set_fs(x) (current_thread_info()->addr_limit = (x))
-
-#define segment_eq(a, b) ((a).seg == (b).seg)
-
-#ifndef __tilegx__
-/*
- * We could allow mapping all 16 MB at 0xfc000000, but we set up a
- * special hack in arch_setup_additional_pages() to auto-create a mapping
- * for the first 16 KB, and it would seem strange to have different
- * user-accessible semantics for memory at 0xfc000000 and above 0xfc004000.
- */
-static inline int is_arch_mappable_range(unsigned long addr,
- unsigned long size)
-{
- return (addr >= MEM_USER_INTRPT &&
- addr < (MEM_USER_INTRPT + INTRPT_SIZE) &&
- size <= (MEM_USER_INTRPT + INTRPT_SIZE) - addr);
-}
-#define is_arch_mappable_range is_arch_mappable_range
-#else
-#define is_arch_mappable_range(addr, size) 0
-#endif
-
-/*
- * Note that using this definition ignores is_arch_mappable_range(),
- * so on tilepro code that uses user_addr_max() is constrained not
- * to reference the tilepro user-interrupt region.
- */
-#define user_addr_max() (current_thread_info()->addr_limit.seg)
-
-/*
- * Test whether a block of memory is a valid user space address.
- * Returns 0 if the range is valid, nonzero otherwise.
- */
-int __range_ok(unsigned long addr, unsigned long size);
-
-/**
- * access_ok: - Checks if a user space pointer is valid
- * @type: Type of access: %VERIFY_READ or %VERIFY_WRITE. Note that
- * %VERIFY_WRITE is a superset of %VERIFY_READ - if it is safe
- * to write to a block, it is always safe to read from it.
- * @addr: User space pointer to start of block to check
- * @size: Size of block to check
- *
- * Context: User context only. This function may sleep if pagefaults are
- * enabled.
- *
- * Checks if a pointer to a block of memory in user space is valid.
- *
- * Returns true (nonzero) if the memory block may be valid, false (zero)
- * if it is definitely invalid.
- *
- * Note that, depending on architecture, this function probably just
- * checks that the pointer is in the user space range - after calling
- * this function, memory access functions may still return -EFAULT.
- */
-#define access_ok(type, addr, size) ({ \
- __chk_user_ptr(addr); \
- likely(__range_ok((unsigned long)(addr), (size)) == 0); \
-})
-
-#include <asm/extable.h>
-
-/*
- * This is a type: either unsigned long, if the argument fits into
- * that type, or otherwise unsigned long long.
- */
-#define __inttype(x) \
- __typeof__(__builtin_choose_expr(sizeof(x) > sizeof(0UL), 0ULL, 0UL))
-
-/*
- * Support macros for __get_user().
- * Note that __get_user() and __put_user() assume proper alignment.
- */
-
-#ifdef __LP64__
-#define _ASM_PTR ".quad"
-#define _ASM_ALIGN ".align 8"
-#else
-#define _ASM_PTR ".long"
-#define _ASM_ALIGN ".align 4"
-#endif
-
-#define __get_user_asm(OP, x, ptr, ret) \
- asm volatile("1: {" #OP " %1, %2; movei %0, 0 }\n" \
- ".pushsection .fixup,\"ax\"\n" \
- "0: { movei %1, 0; movei %0, %3 }\n" \
- "j 9f\n" \
- ".section __ex_table,\"a\"\n" \
- _ASM_ALIGN "\n" \
- _ASM_PTR " 1b, 0b\n" \
- ".popsection\n" \
- "9:" \
- : "=r" (ret), "=r" (x) \
- : "r" (ptr), "i" (-EFAULT))
-
-#ifdef __tilegx__
-#define __get_user_1(x, ptr, ret) __get_user_asm(ld1u, x, ptr, ret)
-#define __get_user_2(x, ptr, ret) __get_user_asm(ld2u, x, ptr, ret)
-#define __get_user_4(x, ptr, ret) __get_user_asm(ld4s, x, ptr, ret)
-#define __get_user_8(x, ptr, ret) __get_user_asm(ld, x, ptr, ret)
-#else
-#define __get_user_1(x, ptr, ret) __get_user_asm(lb_u, x, ptr, ret)
-#define __get_user_2(x, ptr, ret) __get_user_asm(lh_u, x, ptr, ret)
-#define __get_user_4(x, ptr, ret) __get_user_asm(lw, x, ptr, ret)
-#ifdef __LITTLE_ENDIAN
-#define __lo32(a, b) a
-#define __hi32(a, b) b
-#else
-#define __lo32(a, b) b
-#define __hi32(a, b) a
-#endif
-#define __get_user_8(x, ptr, ret) \
- ({ \
- unsigned int __a, __b; \
- asm volatile("1: { lw %1, %3; addi %2, %3, 4 }\n" \
- "2: { lw %2, %2; movei %0, 0 }\n" \
- ".pushsection .fixup,\"ax\"\n" \
- "0: { movei %1, 0; movei %2, 0 }\n" \
- "{ movei %0, %4; j 9f }\n" \
- ".section __ex_table,\"a\"\n" \
- ".align 4\n" \
- ".word 1b, 0b\n" \
- ".word 2b, 0b\n" \
- ".popsection\n" \
- "9:" \
- : "=r" (ret), "=r" (__a), "=&r" (__b) \
- : "r" (ptr), "i" (-EFAULT)); \
- (x) = (__force __typeof(x))(__inttype(x)) \
- (((u64)__hi32(__a, __b) << 32) | \
- __lo32(__a, __b)); \
- })
-#endif
-
-extern int __get_user_bad(void)
- __attribute__((warning("sizeof __get_user argument not 1, 2, 4 or 8")));
-
-/**
- * __get_user: - Get a simple variable from user space, with less checking.
- * @x: Variable to store result.
- * @ptr: Source address, in user space.
- *
- * Context: User context only. This function may sleep if pagefaults are
- * enabled.
- *
- * This macro copies a single simple variable from user space to kernel
- * space. It supports simple types like char and int, but not larger
- * data types like structures or arrays.
- *
- * @ptr must have pointer-to-simple-variable type, and the result of
- * dereferencing @ptr must be assignable to @x without a cast.
- *
- * Returns zero on success, or -EFAULT on error.
- * On error, the variable @x is set to zero.
- *
- * Caller must check the pointer with access_ok() before calling this
- * function.
- */
-#define __get_user(x, ptr) \
- ({ \
- int __ret; \
- typeof(x) _x; \
- __chk_user_ptr(ptr); \
- switch (sizeof(*(ptr))) { \
- case 1: __get_user_1(_x, ptr, __ret); break; \
- case 2: __get_user_2(_x, ptr, __ret); break; \
- case 4: __get_user_4(_x, ptr, __ret); break; \
- case 8: __get_user_8(_x, ptr, __ret); break; \
- default: __ret = __get_user_bad(); break; \
- } \
- (x) = (typeof(*(ptr))) _x; \
- __ret; \
- })
-
-/* Support macros for __put_user(). */
-
-#define __put_user_asm(OP, x, ptr, ret) \
- asm volatile("1: {" #OP " %1, %2; movei %0, 0 }\n" \
- ".pushsection .fixup,\"ax\"\n" \
- "0: { movei %0, %3; j 9f }\n" \
- ".section __ex_table,\"a\"\n" \
- _ASM_ALIGN "\n" \
- _ASM_PTR " 1b, 0b\n" \
- ".popsection\n" \
- "9:" \
- : "=r" (ret) \
- : "r" (ptr), "r" (x), "i" (-EFAULT))
-
-#ifdef __tilegx__
-#define __put_user_1(x, ptr, ret) __put_user_asm(st1, x, ptr, ret)
-#define __put_user_2(x, ptr, ret) __put_user_asm(st2, x, ptr, ret)
-#define __put_user_4(x, ptr, ret) __put_user_asm(st4, x, ptr, ret)
-#define __put_user_8(x, ptr, ret) __put_user_asm(st, x, ptr, ret)
-#else
-#define __put_user_1(x, ptr, ret) __put_user_asm(sb, x, ptr, ret)
-#define __put_user_2(x, ptr, ret) __put_user_asm(sh, x, ptr, ret)
-#define __put_user_4(x, ptr, ret) __put_user_asm(sw, x, ptr, ret)
-#define __put_user_8(x, ptr, ret) \
- ({ \
- u64 __x = (__force __inttype(x))(x); \
- int __lo = (int) __x, __hi = (int) (__x >> 32); \
- asm volatile("1: { sw %1, %2; addi %0, %1, 4 }\n" \
- "2: { sw %0, %3; movei %0, 0 }\n" \
- ".pushsection .fixup,\"ax\"\n" \
- "0: { movei %0, %4; j 9f }\n" \
- ".section __ex_table,\"a\"\n" \
- ".align 4\n" \
- ".word 1b, 0b\n" \
- ".word 2b, 0b\n" \
- ".popsection\n" \
- "9:" \
- : "=&r" (ret) \
- : "r" (ptr), "r" (__lo32(__lo, __hi)), \
- "r" (__hi32(__lo, __hi)), "i" (-EFAULT)); \
- })
-#endif
-
-extern int __put_user_bad(void)
- __attribute__((warning("sizeof __put_user argument not 1, 2, 4 or 8")));
-
-/**
- * __put_user: - Write a simple value into user space, with less checking.
- * @x: Value to copy to user space.
- * @ptr: Destination address, in user space.
- *
- * Context: User context only. This function may sleep if pagefaults are
- * enabled.
- *
- * This macro copies a single simple value from kernel space to user
- * space. It supports simple types like char and int, but not larger
- * data types like structures or arrays.
- *
- * @ptr must have pointer-to-simple-variable type, and @x must be assignable
- * to the result of dereferencing @ptr.
- *
- * Caller must check the pointer with access_ok() before calling this
- * function.
- *
- * Returns zero on success, or -EFAULT on error.
- */
-#define __put_user(x, ptr) \
-({ \
- int __ret; \
- typeof(*(ptr)) _x = (x); \
- __chk_user_ptr(ptr); \
- switch (sizeof(*(ptr))) { \
- case 1: __put_user_1(_x, ptr, __ret); break; \
- case 2: __put_user_2(_x, ptr, __ret); break; \
- case 4: __put_user_4(_x, ptr, __ret); break; \
- case 8: __put_user_8(_x, ptr, __ret); break; \
- default: __ret = __put_user_bad(); break; \
- } \
- __ret; \
-})
-
-/*
- * The versions of get_user and put_user without initial underscores
- * check the address of their arguments to make sure they are not
- * in kernel space.
- */
-#define put_user(x, ptr) \
-({ \
- __typeof__(*(ptr)) __user *__Pu_addr = (ptr); \
- access_ok(VERIFY_WRITE, (__Pu_addr), sizeof(*(__Pu_addr))) ? \
- __put_user((x), (__Pu_addr)) : \
- -EFAULT; \
-})
-
-#define get_user(x, ptr) \
-({ \
- __typeof__(*(ptr)) const __user *__Gu_addr = (ptr); \
- access_ok(VERIFY_READ, (__Gu_addr), sizeof(*(__Gu_addr))) ? \
- __get_user((x), (__Gu_addr)) : \
- ((x) = 0, -EFAULT); \
-})
-
-extern unsigned long __must_check
-raw_copy_to_user(void __user *to, const void *from, unsigned long n);
-extern unsigned long __must_check
-raw_copy_from_user(void *to, const void __user *from, unsigned long n);
-#define INLINE_COPY_FROM_USER
-#define INLINE_COPY_TO_USER
-
-#ifdef __tilegx__
-extern unsigned long raw_copy_in_user(
- void __user *to, const void __user *from, unsigned long n);
-#endif
-
-
-extern long strnlen_user(const char __user *str, long n);
-extern long strncpy_from_user(char *dst, const char __user *src, long);
-
-/**
- * clear_user: - Zero a block of memory in user space.
- * @mem: Destination address, in user space.
- * @len: Number of bytes to zero.
- *
- * Zero a block of memory in user space.
- *
- * Returns number of bytes that could not be cleared.
- * On success, this will be zero.
- */
-extern unsigned long clear_user_asm(void __user *mem, unsigned long len);
-static inline unsigned long __must_check __clear_user(
- void __user *mem, unsigned long len)
-{
- might_fault();
- return clear_user_asm(mem, len);
-}
-static inline unsigned long __must_check clear_user(
- void __user *mem, unsigned long len)
-{
- if (access_ok(VERIFY_WRITE, mem, len))
- return __clear_user(mem, len);
- return len;
-}
-
-/**
- * flush_user: - Flush a block of memory in user space from cache.
- * @mem: Destination address, in user space.
- * @len: Number of bytes to flush.
- *
- * Returns number of bytes that could not be flushed.
- * On success, this will be zero.
- */
-extern unsigned long flush_user_asm(void __user *mem, unsigned long len);
-static inline unsigned long __must_check __flush_user(
- void __user *mem, unsigned long len)
-{
- int retval;
-
- might_fault();
- retval = flush_user_asm(mem, len);
- mb_incoherent();
- return retval;
-}
-
-static inline unsigned long __must_check flush_user(
- void __user *mem, unsigned long len)
-{
- if (access_ok(VERIFY_WRITE, mem, len))
- return __flush_user(mem, len);
- return len;
-}
-
-/**
- * finv_user: - Flush-inval a block of memory in user space from cache.
- * @mem: Destination address, in user space.
- * @len: Number of bytes to invalidate.
- *
- * Returns number of bytes that could not be flush-invalidated.
- * On success, this will be zero.
- */
-extern unsigned long finv_user_asm(void __user *mem, unsigned long len);
-static inline unsigned long __must_check __finv_user(
- void __user *mem, unsigned long len)
-{
- int retval;
-
- might_fault();
- retval = finv_user_asm(mem, len);
- mb_incoherent();
- return retval;
-}
-static inline unsigned long __must_check finv_user(
- void __user *mem, unsigned long len)
-{
- if (access_ok(VERIFY_WRITE, mem, len))
- return __finv_user(mem, len);
- return len;
-}
-
-#endif /* _ASM_TILE_UACCESS_H */
diff --git a/arch/tile/include/asm/unaligned.h b/arch/tile/include/asm/unaligned.h
deleted file mode 100644
index 5a58a0d11449..000000000000
--- a/arch/tile/include/asm/unaligned.h
+++ /dev/null
@@ -1,43 +0,0 @@
-/*
- * Copyright 2010 Tilera Corporation. All Rights Reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation, version 2.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
- * NON INFRINGEMENT. See the GNU General Public License for
- * more details.
- */
-
-#ifndef _ASM_TILE_UNALIGNED_H
-#define _ASM_TILE_UNALIGNED_H
-
-/*
- * We could implement faster get_unaligned_[be/le]64 using the ldna
- * instruction on tilegx; however, we need to either copy all of the
- * other generic functions to here (which is pretty ugly) or else
- * modify both the generic code and other arch code to allow arch
- * specific unaligned data access functions. Given these functions
- * are not often called, we'll stick with the generic version.
- */
-#include <asm-generic/unaligned.h>
-
-/*
- * Is the kernel doing fixups of unaligned accesses? If <0, no kernel
- * intervention occurs and SIGBUS is delivered with no data address
- * info. If 0, the kernel single-steps the instruction to discover
- * the data address to provide with the SIGBUS. If 1, the kernel does
- * a fixup.
- */
-extern int unaligned_fixup;
-
-/* Is the kernel printing on each unaligned fixup? */
-extern int unaligned_printk;
-
-/* Number of unaligned fixups performed */
-extern unsigned int unaligned_fixup_count;
-
-#endif /* _ASM_TILE_UNALIGNED_H */
diff --git a/arch/tile/include/asm/unistd.h b/arch/tile/include/asm/unistd.h
deleted file mode 100644
index 940831fe9e94..000000000000
--- a/arch/tile/include/asm/unistd.h
+++ /dev/null
@@ -1,20 +0,0 @@
-/*
- * Copyright 2010 Tilera Corporation. All Rights Reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation, version 2.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
- * NON INFRINGEMENT. See the GNU General Public License for
- * more details.
- */
-/* In compat mode, we use sys_llseek() for compat_sys_llseek(). */
-#ifdef CONFIG_COMPAT
-#define __ARCH_WANT_SYS_LLSEEK
-#endif
-#define __ARCH_WANT_SYS_NEWFSTATAT
-#define __ARCH_WANT_SYS_CLONE
-#include <uapi/asm/unistd.h>
diff --git a/arch/tile/include/asm/user.h b/arch/tile/include/asm/user.h
deleted file mode 100644
index cbc8b4d5a5ce..000000000000
--- a/arch/tile/include/asm/user.h
+++ /dev/null
@@ -1,21 +0,0 @@
-/*
- * Copyright 2010 Tilera Corporation. All Rights Reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation, version 2.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
- * NON INFRINGEMENT. See the GNU General Public License for
- * more details.
- *
- */
-
-#ifndef _ASM_TILE_USER_H
-#define _ASM_TILE_USER_H
-
-/* This header is for a.out file formats, which TILE does not support. */
-
-#endif /* _ASM_TILE_USER_H */
diff --git a/arch/tile/include/asm/vdso.h b/arch/tile/include/asm/vdso.h
deleted file mode 100644
index 9b069692153f..000000000000
--- a/arch/tile/include/asm/vdso.h
+++ /dev/null
@@ -1,55 +0,0 @@
-/*
- * Copyright 2012 Tilera Corporation. All Rights Reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation, version 2.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
- * NON INFRINGEMENT. See the GNU General Public License for
- * more details.
- */
-
-#ifndef __TILE_VDSO_H__
-#define __TILE_VDSO_H__
-
-#include <linux/seqlock.h>
-#include <linux/types.h>
-
-/*
- * Note about the vdso_data structure:
- *
- * NEVER USE THEM IN USERSPACE CODE DIRECTLY. The layout of the
- * structure is supposed to be known only to the function in the vdso
- * itself and may change without notice.
- */
-
-struct vdso_data {
- seqcount_t tz_seq; /* Timezone seqlock */
- seqcount_t tb_seq; /* Timebase seqlock */
- __u64 cycle_last; /* TOD clock for xtime */
- __u64 mask; /* Cycle mask */
- __u32 mult; /* Cycle to nanosecond multiplier */
- __u32 shift; /* Cycle to nanosecond divisor (power of two) */
- __u64 wall_time_sec;
- __u64 wall_time_snsec;
- __u64 monotonic_time_sec;
- __u64 monotonic_time_snsec;
- __u64 wall_time_coarse_sec;
- __u64 wall_time_coarse_nsec;
- __u64 monotonic_time_coarse_sec;
- __u64 monotonic_time_coarse_nsec;
- __u32 tz_minuteswest; /* Minutes west of Greenwich */
- __u32 tz_dsttime; /* Type of dst correction */
-};
-
-extern struct vdso_data *vdso_data;
-
-/* __vdso_rt_sigreturn is defined with the addresses in the vdso page. */
-extern void __vdso_rt_sigreturn(void);
-
-extern int setup_vdso_pages(void);
-
-#endif /* __TILE_VDSO_H__ */
diff --git a/arch/tile/include/asm/vga.h b/arch/tile/include/asm/vga.h
deleted file mode 100644
index 7b46e754d611..000000000000
--- a/arch/tile/include/asm/vga.h
+++ /dev/null
@@ -1,39 +0,0 @@
-/*
- * Copyright 2010 Tilera Corporation. All Rights Reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation, version 2.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
- * NON INFRINGEMENT. See the GNU General Public License for
- * more details.
- *
- * Access to VGA videoram.
- */
-
-#ifndef _ASM_TILE_VGA_H
-#define _ASM_TILE_VGA_H
-
-#include <asm/io.h>
-
-#define VT_BUF_HAVE_RW
-
-static inline void scr_writew(u16 val, volatile u16 *addr)
-{
- __raw_writew(val, (volatile u16 __iomem *) addr);
-}
-
-static inline u16 scr_readw(volatile const u16 *addr)
-{
- return __raw_readw((volatile const u16 __iomem *) addr);
-}
-
-#define vga_readb(a) readb((u8 __iomem *)(a))
-#define vga_writeb(v,a) writeb(v, (u8 __iomem *)(a))
-
-#define VGA_MAP_MEM(x,s) ((unsigned long) ioremap(x, s))
-
-#endif
diff --git a/arch/tile/include/asm/word-at-a-time.h b/arch/tile/include/asm/word-at-a-time.h
deleted file mode 100644
index 2f2515867760..000000000000
--- a/arch/tile/include/asm/word-at-a-time.h
+++ /dev/null
@@ -1,43 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _ASM_WORD_AT_A_TIME_H
-#define _ASM_WORD_AT_A_TIME_H
-
-#include <asm/byteorder.h>
-
-struct word_at_a_time { /* unused */ };
-#define WORD_AT_A_TIME_CONSTANTS {}
-
-/* Generate 0x01 byte values for zero bytes using a SIMD instruction. */
-static inline unsigned long has_zero(unsigned long val, unsigned long *data,
- const struct word_at_a_time *c)
-{
-#ifdef __tilegx__
- unsigned long mask = __insn_v1cmpeqi(val, 0);
-#else /* tilepro */
- unsigned long mask = __insn_seqib(val, 0);
-#endif
- *data = mask;
- return mask;
-}
-
-/* These operations are both nops. */
-#define prep_zero_mask(val, data, c) (data)
-#define create_zero_mask(data) (data)
-
-/* And this operation just depends on endianness. */
-static inline long find_zero(unsigned long mask)
-{
-#ifdef __BIG_ENDIAN
- return __builtin_clzl(mask) >> 3;
-#else
- return __builtin_ctzl(mask) >> 3;
-#endif
-}
-
-#ifdef __BIG_ENDIAN
-#define zero_bytemask(mask) (~1ul << (63 - __builtin_clzl(mask)))
-#else
-#define zero_bytemask(mask) ((2ul << __builtin_ctzl(mask)) - 1)
-#endif
-
-#endif /* _ASM_WORD_AT_A_TIME_H */
diff --git a/arch/tile/include/gxio/common.h b/arch/tile/include/gxio/common.h
deleted file mode 100644
index 724595a24d04..000000000000
--- a/arch/tile/include/gxio/common.h
+++ /dev/null
@@ -1,40 +0,0 @@
-/*
- * Copyright 2012 Tilera Corporation. All Rights Reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation, version 2.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
- * NON INFRINGEMENT. See the GNU General Public License for
- * more details.
- */
-
-#ifndef _GXIO_COMMON_H_
-#define _GXIO_COMMON_H_
-
-/*
- * Routines shared between the various GXIO device components.
- */
-
-#include <hv/iorpc.h>
-
-#include <linux/types.h>
-#include <linux/compiler.h>
-#include <linux/io.h>
-
-/* Define the standard gxio MMIO functions using kernel functions. */
-#define __gxio_mmio_read8(addr) readb(addr)
-#define __gxio_mmio_read16(addr) readw(addr)
-#define __gxio_mmio_read32(addr) readl(addr)
-#define __gxio_mmio_read64(addr) readq(addr)
-#define __gxio_mmio_write8(addr, val) writeb((val), (addr))
-#define __gxio_mmio_write16(addr, val) writew((val), (addr))
-#define __gxio_mmio_write32(addr, val) writel((val), (addr))
-#define __gxio_mmio_write64(addr, val) writeq((val), (addr))
-#define __gxio_mmio_read(addr) __gxio_mmio_read64(addr)
-#define __gxio_mmio_write(addr, val) __gxio_mmio_write64((addr), (val))
-
-#endif /* !_GXIO_COMMON_H_ */
diff --git a/arch/tile/include/gxio/dma_queue.h b/arch/tile/include/gxio/dma_queue.h
deleted file mode 100644
index c8fd47edba30..000000000000
--- a/arch/tile/include/gxio/dma_queue.h
+++ /dev/null
@@ -1,161 +0,0 @@
-/*
- * Copyright 2012 Tilera Corporation. All Rights Reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation, version 2.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
- * NON INFRINGEMENT. See the GNU General Public License for
- * more details.
- */
-
-#ifndef _GXIO_DMA_QUEUE_H_
-#define _GXIO_DMA_QUEUE_H_
-
-/*
- * DMA queue management APIs shared between TRIO and mPIPE.
- */
-
-#include <gxio/common.h>
-
-/* The credit counter lives in the high 32 bits. */
-#define DMA_QUEUE_CREDIT_SHIFT 32
-
-/*
- * State object that tracks a DMA queue's head and tail indices, as
- * well as the number of commands posted and completed. The
- * structure is accessed via a thread-safe, lock-free algorithm.
- */
-typedef struct {
- /*
- * Address of a MPIPE_EDMA_POST_REGION_VAL_t,
- * TRIO_PUSH_DMA_REGION_VAL_t, or TRIO_PULL_DMA_REGION_VAL_t
- * register. These register have identical encodings and provide
- * information about how many commands have been processed.
- */
- void *post_region_addr;
-
- /*
- * A lazily-updated count of how many edescs the hardware has
- * completed.
- */
- uint64_t hw_complete_count __attribute__ ((aligned(64)));
-
- /*
- * High 32 bits are a count of available egress command credits,
- * low 24 bits are the next egress "slot".
- */
- int64_t credits_and_next_index;
-
-} __gxio_dma_queue_t;
-
-/* Initialize a dma queue. */
-extern void __gxio_dma_queue_init(__gxio_dma_queue_t *dma_queue,
- void *post_region_addr,
- unsigned int num_entries);
-
-/*
- * Update the "credits_and_next_index" and "hw_complete_count" fields
- * based on pending hardware completions. Note that some other thread
- * may have already done this and, importantly, may still be in the
- * process of updating "credits_and_next_index".
- */
-extern void __gxio_dma_queue_update_credits(__gxio_dma_queue_t *dma_queue);
-
-/* Wait for credits to become available. */
-extern int64_t __gxio_dma_queue_wait_for_credits(__gxio_dma_queue_t *dma_queue,
- int64_t modifier);
-
-/* Reserve slots in the queue, optionally waiting for slots to become
- * available, and optionally returning a "completion_slot" suitable for
- * direct comparison to "hw_complete_count".
- */
-static inline int64_t __gxio_dma_queue_reserve(__gxio_dma_queue_t *dma_queue,
- unsigned int num, bool wait,
- bool completion)
-{
- uint64_t slot;
-
- /*
- * Try to reserve 'num' egress command slots. We do this by
- * constructing a constant that subtracts N credits and adds N to
- * the index, and using fetchaddgez to only apply it if the credits
- * count doesn't go negative.
- */
- int64_t modifier = (((int64_t)(-num)) << DMA_QUEUE_CREDIT_SHIFT) | num;
- int64_t old =
- __insn_fetchaddgez(&dma_queue->credits_and_next_index,
- modifier);
-
- if (unlikely(old + modifier < 0)) {
- /*
- * We're out of credits. Try once to get more by checking for
- * completed egress commands. If that fails, wait or fail.
- */
- __gxio_dma_queue_update_credits(dma_queue);
- old = __insn_fetchaddgez(&dma_queue->credits_and_next_index,
- modifier);
- if (old + modifier < 0) {
- if (wait)
- old = __gxio_dma_queue_wait_for_credits
- (dma_queue, modifier);
- else
- return GXIO_ERR_DMA_CREDITS;
- }
- }
-
- /* The bottom 24 bits of old encode the "slot". */
- slot = (old & 0xffffff);
-
- if (completion) {
- /*
- * A "completion_slot" is a "slot" which can be compared to
- * "hw_complete_count" at any time in the future. To convert
- * "slot" into a "completion_slot", we access "hw_complete_count"
- * once (knowing that we have reserved a slot, and thus, it will
- * be "basically" accurate), and combine its high 40 bits with
- * the 24 bit "slot", and handle "wrapping" by adding "1 << 24"
- * if the result is LESS than "hw_complete_count".
- */
- uint64_t complete;
- complete = READ_ONCE(dma_queue->hw_complete_count);
- slot |= (complete & 0xffffffffff000000);
- if (slot < complete)
- slot += 0x1000000;
- }
-
- /*
- * If any of our slots mod 256 were equivalent to 0, go ahead and
- * collect some egress credits, and update "hw_complete_count", and
- * make sure the index doesn't overflow into the credits.
- */
- if (unlikely(((old + num) & 0xff) < num)) {
- __gxio_dma_queue_update_credits(dma_queue);
-
- /* Make sure the index doesn't overflow into the credits. */
-#ifdef __BIG_ENDIAN__
- *(((uint8_t *)&dma_queue->credits_and_next_index) + 4) = 0;
-#else
- *(((uint8_t *)&dma_queue->credits_and_next_index) + 3) = 0;
-#endif
- }
-
- return slot;
-}
-
-/* Non-inlinable "__gxio_dma_queue_reserve(..., true)". */
-extern int64_t __gxio_dma_queue_reserve_aux(__gxio_dma_queue_t *dma_queue,
- unsigned int num, int wait);
-
-/* Check whether a particular "completion slot" has completed.
- *
- * Note that this function requires a "completion slot", and thus
- * cannot be used with the result of any "reserve_fast" function.
- */
-extern int __gxio_dma_queue_is_complete(__gxio_dma_queue_t *dma_queue,
- int64_t completion_slot, int update);
-
-#endif /* !_GXIO_DMA_QUEUE_H_ */
diff --git a/arch/tile/include/gxio/iorpc_globals.h b/arch/tile/include/gxio/iorpc_globals.h
deleted file mode 100644
index 52c721f8dad9..000000000000
--- a/arch/tile/include/gxio/iorpc_globals.h
+++ /dev/null
@@ -1,38 +0,0 @@
-/*
- * Copyright 2012 Tilera Corporation. All Rights Reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation, version 2.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
- * NON INFRINGEMENT. See the GNU General Public License for
- * more details.
- */
-
-/* This file is machine-generated; DO NOT EDIT! */
-#ifndef __IORPC_LINUX_RPC_H__
-#define __IORPC_LINUX_RPC_H__
-
-#include <hv/iorpc.h>
-
-#include <linux/string.h>
-#include <linux/module.h>
-#include <asm/pgtable.h>
-
-#define IORPC_OP_ARM_POLLFD IORPC_OPCODE(IORPC_FORMAT_KERNEL_POLLFD, 0x9000)
-#define IORPC_OP_CLOSE_POLLFD IORPC_OPCODE(IORPC_FORMAT_KERNEL_POLLFD, 0x9001)
-#define IORPC_OP_GET_MMIO_BASE IORPC_OPCODE(IORPC_FORMAT_NONE_NOUSER, 0x8000)
-#define IORPC_OP_CHECK_MMIO_OFFSET IORPC_OPCODE(IORPC_FORMAT_NONE_NOUSER, 0x8001)
-
-int __iorpc_arm_pollfd(int fd, int pollfd_cookie);
-
-int __iorpc_close_pollfd(int fd, int pollfd_cookie);
-
-int __iorpc_get_mmio_base(int fd, HV_PTE *base);
-
-int __iorpc_check_mmio_offset(int fd, unsigned long offset, unsigned long size);
-
-#endif /* !__IORPC_LINUX_RPC_H__ */
diff --git a/arch/tile/include/gxio/iorpc_mpipe.h b/arch/tile/include/gxio/iorpc_mpipe.h
deleted file mode 100644
index 4cda03de734f..000000000000
--- a/arch/tile/include/gxio/iorpc_mpipe.h
+++ /dev/null
@@ -1,144 +0,0 @@
-/*
- * Copyright 2012 Tilera Corporation. All Rights Reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation, version 2.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
- * NON INFRINGEMENT. See the GNU General Public License for
- * more details.
- */
-
-/* This file is machine-generated; DO NOT EDIT! */
-#ifndef __GXIO_MPIPE_LINUX_RPC_H__
-#define __GXIO_MPIPE_LINUX_RPC_H__
-
-#include <hv/iorpc.h>
-
-#include <hv/drv_mpipe_intf.h>
-#include <asm/page.h>
-#include <gxio/kiorpc.h>
-#include <gxio/mpipe.h>
-#include <linux/string.h>
-#include <linux/module.h>
-#include <asm/pgtable.h>
-
-#define GXIO_MPIPE_OP_ALLOC_BUFFER_STACKS IORPC_OPCODE(IORPC_FORMAT_NONE, 0x1200)
-#define GXIO_MPIPE_OP_INIT_BUFFER_STACK_AUX IORPC_OPCODE(IORPC_FORMAT_KERNEL_MEM, 0x1201)
-
-#define GXIO_MPIPE_OP_ALLOC_NOTIF_RINGS IORPC_OPCODE(IORPC_FORMAT_NONE, 0x1203)
-#define GXIO_MPIPE_OP_INIT_NOTIF_RING_AUX IORPC_OPCODE(IORPC_FORMAT_KERNEL_MEM, 0x1204)
-#define GXIO_MPIPE_OP_REQUEST_NOTIF_RING_INTERRUPT IORPC_OPCODE(IORPC_FORMAT_KERNEL_INTERRUPT, 0x1205)
-#define GXIO_MPIPE_OP_ENABLE_NOTIF_RING_INTERRUPT IORPC_OPCODE(IORPC_FORMAT_NONE, 0x1206)
-#define GXIO_MPIPE_OP_ALLOC_NOTIF_GROUPS IORPC_OPCODE(IORPC_FORMAT_NONE, 0x1207)
-#define GXIO_MPIPE_OP_INIT_NOTIF_GROUP IORPC_OPCODE(IORPC_FORMAT_NONE, 0x1208)
-#define GXIO_MPIPE_OP_ALLOC_BUCKETS IORPC_OPCODE(IORPC_FORMAT_NONE, 0x1209)
-#define GXIO_MPIPE_OP_INIT_BUCKET IORPC_OPCODE(IORPC_FORMAT_NONE, 0x120a)
-#define GXIO_MPIPE_OP_ALLOC_EDMA_RINGS IORPC_OPCODE(IORPC_FORMAT_NONE, 0x120b)
-#define GXIO_MPIPE_OP_INIT_EDMA_RING_AUX IORPC_OPCODE(IORPC_FORMAT_KERNEL_MEM, 0x120c)
-
-#define GXIO_MPIPE_OP_COMMIT_RULES IORPC_OPCODE(IORPC_FORMAT_NONE, 0x120f)
-#define GXIO_MPIPE_OP_REGISTER_CLIENT_MEMORY IORPC_OPCODE(IORPC_FORMAT_NONE_NOUSER, 0x1210)
-#define GXIO_MPIPE_OP_LINK_OPEN_AUX IORPC_OPCODE(IORPC_FORMAT_NONE, 0x1211)
-#define GXIO_MPIPE_OP_LINK_CLOSE_AUX IORPC_OPCODE(IORPC_FORMAT_NONE, 0x1212)
-#define GXIO_MPIPE_OP_LINK_SET_ATTR_AUX IORPC_OPCODE(IORPC_FORMAT_NONE, 0x1213)
-
-#define GXIO_MPIPE_OP_GET_TIMESTAMP_AUX IORPC_OPCODE(IORPC_FORMAT_NONE, 0x121e)
-#define GXIO_MPIPE_OP_SET_TIMESTAMP_AUX IORPC_OPCODE(IORPC_FORMAT_NONE, 0x121f)
-#define GXIO_MPIPE_OP_ADJUST_TIMESTAMP_AUX IORPC_OPCODE(IORPC_FORMAT_NONE, 0x1220)
-#define GXIO_MPIPE_OP_CONFIG_EDMA_RING_BLKS IORPC_OPCODE(IORPC_FORMAT_NONE, 0x1221)
-#define GXIO_MPIPE_OP_ADJUST_TIMESTAMP_FREQ IORPC_OPCODE(IORPC_FORMAT_NONE, 0x1222)
-#define GXIO_MPIPE_OP_ARM_POLLFD IORPC_OPCODE(IORPC_FORMAT_KERNEL_POLLFD, 0x9000)
-#define GXIO_MPIPE_OP_CLOSE_POLLFD IORPC_OPCODE(IORPC_FORMAT_KERNEL_POLLFD, 0x9001)
-#define GXIO_MPIPE_OP_GET_MMIO_BASE IORPC_OPCODE(IORPC_FORMAT_NONE_NOUSER, 0x8000)
-#define GXIO_MPIPE_OP_CHECK_MMIO_OFFSET IORPC_OPCODE(IORPC_FORMAT_NONE_NOUSER, 0x8001)
-
-int gxio_mpipe_alloc_buffer_stacks(gxio_mpipe_context_t *context,
- unsigned int count, unsigned int first,
- unsigned int flags);
-
-int gxio_mpipe_init_buffer_stack_aux(gxio_mpipe_context_t *context,
- void *mem_va, size_t mem_size,
- unsigned int mem_flags, unsigned int stack,
- unsigned int buffer_size_enum);
-
-
-int gxio_mpipe_alloc_notif_rings(gxio_mpipe_context_t *context,
- unsigned int count, unsigned int first,
- unsigned int flags);
-
-int gxio_mpipe_init_notif_ring_aux(gxio_mpipe_context_t *context, void *mem_va,
- size_t mem_size, unsigned int mem_flags,
- unsigned int ring);
-
-int gxio_mpipe_request_notif_ring_interrupt(gxio_mpipe_context_t *context,
- int inter_x, int inter_y,
- int inter_ipi, int inter_event,
- unsigned int ring);
-
-int gxio_mpipe_enable_notif_ring_interrupt(gxio_mpipe_context_t *context,
- unsigned int ring);
-
-int gxio_mpipe_alloc_notif_groups(gxio_mpipe_context_t *context,
- unsigned int count, unsigned int first,
- unsigned int flags);
-
-int gxio_mpipe_init_notif_group(gxio_mpipe_context_t *context,
- unsigned int group,
- gxio_mpipe_notif_group_bits_t bits);
-
-int gxio_mpipe_alloc_buckets(gxio_mpipe_context_t *context, unsigned int count,
- unsigned int first, unsigned int flags);
-
-int gxio_mpipe_init_bucket(gxio_mpipe_context_t *context, unsigned int bucket,
- MPIPE_LBL_INIT_DAT_BSTS_TBL_t bucket_info);
-
-int gxio_mpipe_alloc_edma_rings(gxio_mpipe_context_t *context,
- unsigned int count, unsigned int first,
- unsigned int flags);
-
-int gxio_mpipe_init_edma_ring_aux(gxio_mpipe_context_t *context, void *mem_va,
- size_t mem_size, unsigned int mem_flags,
- unsigned int ring, unsigned int channel);
-
-
-int gxio_mpipe_commit_rules(gxio_mpipe_context_t *context, const void *blob,
- size_t blob_size);
-
-int gxio_mpipe_register_client_memory(gxio_mpipe_context_t *context,
- unsigned int iotlb, HV_PTE pte,
- unsigned int flags);
-
-int gxio_mpipe_link_open_aux(gxio_mpipe_context_t *context,
- _gxio_mpipe_link_name_t name, unsigned int flags);
-
-int gxio_mpipe_link_close_aux(gxio_mpipe_context_t *context, int mac);
-
-int gxio_mpipe_link_set_attr_aux(gxio_mpipe_context_t *context, int mac,
- uint32_t attr, int64_t val);
-
-int gxio_mpipe_get_timestamp_aux(gxio_mpipe_context_t *context, uint64_t *sec,
- uint64_t *nsec, uint64_t *cycles);
-
-int gxio_mpipe_set_timestamp_aux(gxio_mpipe_context_t *context, uint64_t sec,
- uint64_t nsec, uint64_t cycles);
-
-int gxio_mpipe_adjust_timestamp_aux(gxio_mpipe_context_t *context,
- int64_t nsec);
-
-int gxio_mpipe_adjust_timestamp_freq(gxio_mpipe_context_t *context,
- int32_t ppb);
-
-int gxio_mpipe_arm_pollfd(gxio_mpipe_context_t *context, int pollfd_cookie);
-
-int gxio_mpipe_close_pollfd(gxio_mpipe_context_t *context, int pollfd_cookie);
-
-int gxio_mpipe_get_mmio_base(gxio_mpipe_context_t *context, HV_PTE *base);
-
-int gxio_mpipe_check_mmio_offset(gxio_mpipe_context_t *context,
- unsigned long offset, unsigned long size);
-
-#endif /* !__GXIO_MPIPE_LINUX_RPC_H__ */
diff --git a/arch/tile/include/gxio/iorpc_mpipe_info.h b/arch/tile/include/gxio/iorpc_mpipe_info.h
deleted file mode 100644
index f0b04284468b..000000000000
--- a/arch/tile/include/gxio/iorpc_mpipe_info.h
+++ /dev/null
@@ -1,50 +0,0 @@
-/*
- * Copyright 2012 Tilera Corporation. All Rights Reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation, version 2.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
- * NON INFRINGEMENT. See the GNU General Public License for
- * more details.
- */
-
-/* This file is machine-generated; DO NOT EDIT! */
-#ifndef __GXIO_MPIPE_INFO_LINUX_RPC_H__
-#define __GXIO_MPIPE_INFO_LINUX_RPC_H__
-
-#include <hv/iorpc.h>
-
-#include <hv/drv_mpipe_intf.h>
-#include <asm/page.h>
-#include <gxio/kiorpc.h>
-#include <gxio/mpipe.h>
-#include <linux/string.h>
-#include <linux/module.h>
-#include <asm/pgtable.h>
-
-
-#define GXIO_MPIPE_INFO_OP_INSTANCE_AUX IORPC_OPCODE(IORPC_FORMAT_NONE, 0x1250)
-#define GXIO_MPIPE_INFO_OP_ENUMERATE_AUX IORPC_OPCODE(IORPC_FORMAT_NONE, 0x1251)
-#define GXIO_MPIPE_INFO_OP_GET_MMIO_BASE IORPC_OPCODE(IORPC_FORMAT_NONE_NOUSER, 0x8000)
-#define GXIO_MPIPE_INFO_OP_CHECK_MMIO_OFFSET IORPC_OPCODE(IORPC_FORMAT_NONE_NOUSER, 0x8001)
-
-
-int gxio_mpipe_info_instance_aux(gxio_mpipe_info_context_t *context,
- _gxio_mpipe_link_name_t name);
-
-int gxio_mpipe_info_enumerate_aux(gxio_mpipe_info_context_t *context,
- unsigned int idx,
- _gxio_mpipe_link_name_t *name,
- _gxio_mpipe_link_mac_t *mac);
-
-int gxio_mpipe_info_get_mmio_base(gxio_mpipe_info_context_t *context,
- HV_PTE *base);
-
-int gxio_mpipe_info_check_mmio_offset(gxio_mpipe_info_context_t *context,
- unsigned long offset, unsigned long size);
-
-#endif /* !__GXIO_MPIPE_INFO_LINUX_RPC_H__ */
diff --git a/arch/tile/include/gxio/iorpc_trio.h b/arch/tile/include/gxio/iorpc_trio.h
deleted file mode 100644
index 376a4f771167..000000000000
--- a/arch/tile/include/gxio/iorpc_trio.h
+++ /dev/null
@@ -1,104 +0,0 @@
-/*
- * Copyright 2012 Tilera Corporation. All Rights Reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation, version 2.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
- * NON INFRINGEMENT. See the GNU General Public License for
- * more details.
- */
-
-/* This file is machine-generated; DO NOT EDIT! */
-#ifndef __GXIO_TRIO_LINUX_RPC_H__
-#define __GXIO_TRIO_LINUX_RPC_H__
-
-#include <hv/iorpc.h>
-
-#include <hv/drv_trio_intf.h>
-#include <gxio/trio.h>
-#include <gxio/kiorpc.h>
-#include <linux/string.h>
-#include <linux/module.h>
-#include <asm/pgtable.h>
-
-#define GXIO_TRIO_OP_DEALLOC_ASID IORPC_OPCODE(IORPC_FORMAT_NONE, 0x1400)
-#define GXIO_TRIO_OP_ALLOC_ASIDS IORPC_OPCODE(IORPC_FORMAT_NONE, 0x1401)
-
-#define GXIO_TRIO_OP_ALLOC_MEMORY_MAPS IORPC_OPCODE(IORPC_FORMAT_NONE, 0x1404)
-
-#define GXIO_TRIO_OP_ALLOC_SCATTER_QUEUES IORPC_OPCODE(IORPC_FORMAT_NONE, 0x140e)
-#define GXIO_TRIO_OP_ALLOC_PIO_REGIONS IORPC_OPCODE(IORPC_FORMAT_NONE, 0x1412)
-
-#define GXIO_TRIO_OP_INIT_PIO_REGION_AUX IORPC_OPCODE(IORPC_FORMAT_NONE, 0x1414)
-
-#define GXIO_TRIO_OP_INIT_MEMORY_MAP_MMU_AUX IORPC_OPCODE(IORPC_FORMAT_NONE_NOUSER, 0x141e)
-#define GXIO_TRIO_OP_GET_PORT_PROPERTY IORPC_OPCODE(IORPC_FORMAT_NONE_NOUSER, 0x141f)
-#define GXIO_TRIO_OP_CONFIG_LEGACY_INTR IORPC_OPCODE(IORPC_FORMAT_KERNEL_INTERRUPT, 0x1420)
-#define GXIO_TRIO_OP_CONFIG_MSI_INTR IORPC_OPCODE(IORPC_FORMAT_KERNEL_INTERRUPT, 0x1421)
-
-#define GXIO_TRIO_OP_SET_MPS_MRS IORPC_OPCODE(IORPC_FORMAT_NONE_NOUSER, 0x1423)
-#define GXIO_TRIO_OP_FORCE_RC_LINK_UP IORPC_OPCODE(IORPC_FORMAT_NONE_NOUSER, 0x1424)
-#define GXIO_TRIO_OP_FORCE_EP_LINK_UP IORPC_OPCODE(IORPC_FORMAT_NONE_NOUSER, 0x1425)
-#define GXIO_TRIO_OP_GET_MMIO_BASE IORPC_OPCODE(IORPC_FORMAT_NONE_NOUSER, 0x8000)
-#define GXIO_TRIO_OP_CHECK_MMIO_OFFSET IORPC_OPCODE(IORPC_FORMAT_NONE_NOUSER, 0x8001)
-
-int gxio_trio_alloc_asids(gxio_trio_context_t *context, unsigned int count,
- unsigned int first, unsigned int flags);
-
-
-int gxio_trio_alloc_memory_maps(gxio_trio_context_t *context,
- unsigned int count, unsigned int first,
- unsigned int flags);
-
-
-int gxio_trio_alloc_scatter_queues(gxio_trio_context_t *context,
- unsigned int count, unsigned int first,
- unsigned int flags);
-
-int gxio_trio_alloc_pio_regions(gxio_trio_context_t *context,
- unsigned int count, unsigned int first,
- unsigned int flags);
-
-int gxio_trio_init_pio_region_aux(gxio_trio_context_t *context,
- unsigned int pio_region, unsigned int mac,
- uint32_t bus_address_hi, unsigned int flags);
-
-
-int gxio_trio_init_memory_map_mmu_aux(gxio_trio_context_t *context,
- unsigned int map, unsigned long va,
- uint64_t size, unsigned int asid,
- unsigned int mac, uint64_t bus_address,
- unsigned int node,
- unsigned int order_mode);
-
-int gxio_trio_get_port_property(gxio_trio_context_t *context,
- struct pcie_trio_ports_property *trio_ports);
-
-int gxio_trio_config_legacy_intr(gxio_trio_context_t *context, int inter_x,
- int inter_y, int inter_ipi, int inter_event,
- unsigned int mac, unsigned int intx);
-
-int gxio_trio_config_msi_intr(gxio_trio_context_t *context, int inter_x,
- int inter_y, int inter_ipi, int inter_event,
- unsigned int mac, unsigned int mem_map,
- uint64_t mem_map_base, uint64_t mem_map_limit,
- unsigned int asid);
-
-
-int gxio_trio_set_mps_mrs(gxio_trio_context_t *context, uint16_t mps,
- uint16_t mrs, unsigned int mac);
-
-int gxio_trio_force_rc_link_up(gxio_trio_context_t *context, unsigned int mac);
-
-int gxio_trio_force_ep_link_up(gxio_trio_context_t *context, unsigned int mac);
-
-int gxio_trio_get_mmio_base(gxio_trio_context_t *context, HV_PTE *base);
-
-int gxio_trio_check_mmio_offset(gxio_trio_context_t *context,
- unsigned long offset, unsigned long size);
-
-#endif /* !__GXIO_TRIO_LINUX_RPC_H__ */
diff --git a/arch/tile/include/gxio/iorpc_uart.h b/arch/tile/include/gxio/iorpc_uart.h
deleted file mode 100644
index 55429d48ea56..000000000000
--- a/arch/tile/include/gxio/iorpc_uart.h
+++ /dev/null
@@ -1,40 +0,0 @@
-/*
- * Copyright 2013 Tilera Corporation. All Rights Reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation, version 2.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
- * NON INFRINGEMENT. See the GNU General Public License for
- * more details.
- */
-
-/* This file is machine-generated; DO NOT EDIT! */
-#ifndef __GXIO_UART_LINUX_RPC_H__
-#define __GXIO_UART_LINUX_RPC_H__
-
-#include <hv/iorpc.h>
-
-#include <hv/drv_uart_intf.h>
-#include <gxio/uart.h>
-#include <gxio/kiorpc.h>
-#include <linux/string.h>
-#include <linux/module.h>
-#include <asm/pgtable.h>
-
-#define GXIO_UART_OP_CFG_INTERRUPT IORPC_OPCODE(IORPC_FORMAT_KERNEL_INTERRUPT, 0x1900)
-#define GXIO_UART_OP_GET_MMIO_BASE IORPC_OPCODE(IORPC_FORMAT_NONE_NOUSER, 0x8000)
-#define GXIO_UART_OP_CHECK_MMIO_OFFSET IORPC_OPCODE(IORPC_FORMAT_NONE_NOUSER, 0x8001)
-
-int gxio_uart_cfg_interrupt(gxio_uart_context_t *context, int inter_x,
- int inter_y, int inter_ipi, int inter_event);
-
-int gxio_uart_get_mmio_base(gxio_uart_context_t *context, HV_PTE *base);
-
-int gxio_uart_check_mmio_offset(gxio_uart_context_t *context,
- unsigned long offset, unsigned long size);
-
-#endif /* !__GXIO_UART_LINUX_RPC_H__ */
diff --git a/arch/tile/include/gxio/iorpc_usb_host.h b/arch/tile/include/gxio/iorpc_usb_host.h
deleted file mode 100644
index 79962a97de8e..000000000000
--- a/arch/tile/include/gxio/iorpc_usb_host.h
+++ /dev/null
@@ -1,46 +0,0 @@
-/*
- * Copyright 2012 Tilera Corporation. All Rights Reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation, version 2.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
- * NON INFRINGEMENT. See the GNU General Public License for
- * more details.
- */
-
-/* This file is machine-generated; DO NOT EDIT! */
-#ifndef __GXIO_USB_HOST_LINUX_RPC_H__
-#define __GXIO_USB_HOST_LINUX_RPC_H__
-
-#include <hv/iorpc.h>
-
-#include <hv/drv_usb_host_intf.h>
-#include <asm/page.h>
-#include <gxio/kiorpc.h>
-#include <gxio/usb_host.h>
-#include <linux/string.h>
-#include <linux/module.h>
-#include <asm/pgtable.h>
-
-#define GXIO_USB_HOST_OP_CFG_INTERRUPT IORPC_OPCODE(IORPC_FORMAT_KERNEL_INTERRUPT, 0x1800)
-#define GXIO_USB_HOST_OP_REGISTER_CLIENT_MEMORY IORPC_OPCODE(IORPC_FORMAT_NONE_NOUSER, 0x1801)
-#define GXIO_USB_HOST_OP_GET_MMIO_BASE IORPC_OPCODE(IORPC_FORMAT_NONE_NOUSER, 0x8000)
-#define GXIO_USB_HOST_OP_CHECK_MMIO_OFFSET IORPC_OPCODE(IORPC_FORMAT_NONE_NOUSER, 0x8001)
-
-int gxio_usb_host_cfg_interrupt(gxio_usb_host_context_t *context, int inter_x,
- int inter_y, int inter_ipi, int inter_event);
-
-int gxio_usb_host_register_client_memory(gxio_usb_host_context_t *context,
- HV_PTE pte, unsigned int flags);
-
-int gxio_usb_host_get_mmio_base(gxio_usb_host_context_t *context,
- HV_PTE *base);
-
-int gxio_usb_host_check_mmio_offset(gxio_usb_host_context_t *context,
- unsigned long offset, unsigned long size);
-
-#endif /* !__GXIO_USB_HOST_LINUX_RPC_H__ */
diff --git a/arch/tile/include/gxio/kiorpc.h b/arch/tile/include/gxio/kiorpc.h
deleted file mode 100644
index ee5820979ff3..000000000000
--- a/arch/tile/include/gxio/kiorpc.h
+++ /dev/null
@@ -1,29 +0,0 @@
-/*
- * Copyright 2012 Tilera Corporation. All Rights Reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation, version 2.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
- * NON INFRINGEMENT. See the GNU General Public License for
- * more details.
- *
- * Support routines for kernel IORPC drivers.
- */
-
-#ifndef _GXIO_KIORPC_H
-#define _GXIO_KIORPC_H
-
-#include <linux/types.h>
-#include <asm/page.h>
-#include <arch/chip.h>
-
-#if CHIP_HAS_MMIO()
-void __iomem *iorpc_ioremap(int hv_fd, resource_size_t offset,
- unsigned long size);
-#endif
-
-#endif /* _GXIO_KIORPC_H */
diff --git a/arch/tile/include/gxio/mpipe.h b/arch/tile/include/gxio/mpipe.h
deleted file mode 100644
index 73e83a187866..000000000000
--- a/arch/tile/include/gxio/mpipe.h
+++ /dev/null
@@ -1,1871 +0,0 @@
-/*
- * Copyright 2012 Tilera Corporation. All Rights Reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation, version 2.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
- * NON INFRINGEMENT. See the GNU General Public License for
- * more details.
- */
-
-#ifndef _GXIO_MPIPE_H_
-#define _GXIO_MPIPE_H_
-
-/*
- *
- * An API for allocating, configuring, and manipulating mPIPE hardware
- * resources.
- */
-
-#include <gxio/common.h>
-#include <gxio/dma_queue.h>
-
-#include <linux/time.h>
-
-#include <arch/mpipe_def.h>
-#include <arch/mpipe_shm.h>
-
-#include <hv/drv_mpipe_intf.h>
-#include <hv/iorpc.h>
-
-/*
- *
- * The TILE-Gx mPIPE&tm; shim provides Ethernet connectivity, packet
- * classification, and packet load balancing services. The
- * gxio_mpipe_ API, declared in <gxio/mpipe.h>, allows applications to
- * allocate mPIPE IO channels, configure packet distribution
- * parameters, and send and receive Ethernet packets. The API is
- * designed to be a minimal wrapper around the mPIPE hardware, making
- * system calls only where necessary to preserve inter-process
- * protection guarantees.
- *
- * The APIs described below allow the programmer to allocate and
- * configure mPIPE resources. As described below, the mPIPE is a
- * single shared hardware device that provides partitionable resources
- * that are shared between all applications in the system. The
- * gxio_mpipe_ API allows userspace code to make resource request
- * calls to the hypervisor, which in turns keeps track of the
- * resources in use by all applications, maintains protection
- * guarantees, and resets resources upon application shutdown.
- *
- * We strongly recommend reading the mPIPE section of the IO Device
- * Guide (UG404) before working with this API. Most functions in the
- * gxio_mpipe_ API are directly analogous to hardware interfaces and
- * the documentation assumes that the reader understands those
- * hardware interfaces.
- *
- * @section mpipe__ingress mPIPE Ingress Hardware Resources
- *
- * The mPIPE ingress hardware provides extensive hardware offload for
- * tasks like packet header parsing, load balancing, and memory
- * management. This section provides a brief introduction to the
- * hardware components and the gxio_mpipe_ calls used to manage them;
- * see the IO Device Guide for a much more detailed description of the
- * mPIPE's capabilities.
- *
- * When a packet arrives at one of the mPIPE's Ethernet MACs, it is
- * assigned a channel number indicating which MAC received it. It
- * then proceeds through the following hardware pipeline:
- *
- * @subsection mpipe__classification Classification
- *
- * A set of classification processors run header parsing code on each
- * incoming packet, extracting information including the destination
- * MAC address, VLAN, Ethernet type, and five-tuple hash. Some of
- * this information is then used to choose which buffer stack will be
- * used to hold the packet, and which bucket will be used by the load
- * balancer to determine which application will receive the packet.
- *
- * The rules by which the buffer stack and bucket are chosen can be
- * configured via the @ref gxio_mpipe_classifier API. A given app can
- * specify multiple rules, each one specifying a bucket range, and a
- * set of buffer stacks, to be used for packets matching the rule.
- * Each rule can optionally specify a restricted set of channels,
- * VLANs, and/or dMACs, in which it is interested. By default, a
- * given rule starts out matching all channels associated with the
- * mPIPE context's set of open links; all VLANs; and all dMACs.
- * Subsequent restrictions can then be added.
- *
- * @subsection mpipe__load_balancing Load Balancing
- *
- * The mPIPE load balancer is responsible for choosing the NotifRing
- * to which the packet will be delivered. This decision is based on
- * the bucket number indicated by the classification program. In
- * general, the bucket number is based on some number of low bits of
- * the packet's flow hash (applications that aren't interested in flow
- * hashing use a single bucket). Each load balancer bucket keeps a
- * record of the NotifRing to which packets directed to that bucket
- * are currently being delivered. Based on the bucket's load
- * balancing mode (@ref gxio_mpipe_bucket_mode_t), the load balancer
- * either forwards the packet to the previously assigned NotifRing or
- * decides to choose a new NotifRing. If a new NotifRing is required,
- * the load balancer chooses the least loaded ring in the NotifGroup
- * associated with the bucket.
- *
- * The load balancer is a shared resource. Each application needs to
- * explicitly allocate NotifRings, NotifGroups, and buckets, using
- * gxio_mpipe_alloc_notif_rings(), gxio_mpipe_alloc_notif_groups(),
- * and gxio_mpipe_alloc_buckets(). Then the application needs to
- * configure them using gxio_mpipe_init_notif_ring() and
- * gxio_mpipe_init_notif_group_and_buckets().
- *
- * @subsection mpipe__buffers Buffer Selection and Packet Delivery
- *
- * Once the load balancer has chosen the destination NotifRing, the
- * mPIPE DMA engine pops at least one buffer off of the 'buffer stack'
- * chosen by the classification program and DMAs the packet data into
- * that buffer. Each buffer stack provides a hardware-accelerated
- * stack of data buffers with the same size. If the packet data is
- * larger than the buffers provided by the chosen buffer stack, the
- * mPIPE hardware pops off multiple buffers and chains the packet data
- * through a multi-buffer linked list. Once the packet data is
- * delivered to the buffer(s), the mPIPE hardware writes the
- * ::gxio_mpipe_idesc_t metadata object (calculated by the classifier)
- * into the NotifRing and increments the number of packets delivered
- * to that ring.
- *
- * Applications can push buffers onto a buffer stack by calling
- * gxio_mpipe_push_buffer() or by egressing a packet with the
- * ::gxio_mpipe_edesc_t::hwb bit set, indicating that the egressed
- * buffers should be returned to the stack.
- *
- * Applications can allocate and initialize buffer stacks with the
- * gxio_mpipe_alloc_buffer_stacks() and gxio_mpipe_init_buffer_stack()
- * APIs.
- *
- * The application must also register the memory pages that will hold
- * packets. This requires calling gxio_mpipe_register_page() for each
- * memory page that will hold packets allocated by the application for
- * a given buffer stack. Since each buffer stack is limited to 16
- * registered pages, it may be necessary to use huge pages, or even
- * extremely huge pages, to hold all the buffers.
- *
- * @subsection mpipe__iqueue NotifRings
- *
- * Each NotifRing is a region of shared memory, allocated by the
- * application, to which the mPIPE delivers packet descriptors
- * (::gxio_mpipe_idesc_t). The application can allocate them via
- * gxio_mpipe_alloc_notif_rings(). The application can then either
- * explicitly initialize them with gxio_mpipe_init_notif_ring() and
- * then read from them manually, or can make use of the convenience
- * wrappers provided by @ref gxio_mpipe_wrappers.
- *
- * @section mpipe__egress mPIPE Egress Hardware
- *
- * Applications use eDMA rings to queue packets for egress. The
- * application can allocate them via gxio_mpipe_alloc_edma_rings().
- * The application can then either explicitly initialize them with
- * gxio_mpipe_init_edma_ring() and then write to them manually, or
- * can make use of the convenience wrappers provided by
- * @ref gxio_mpipe_wrappers.
- *
- * @section gxio__shortcomings Plans for Future API Revisions
- *
- * The API defined here is only an initial version of the mPIPE API.
- * Future plans include:
- *
- * - Higher level wrapper functions to provide common initialization
- * patterns. This should help users start writing mPIPE programs
- * without having to learn the details of the hardware.
- *
- * - Support for reset and deallocation of resources, including
- * cleanup upon application shutdown.
- *
- * - Support for calling these APIs in the BME.
- *
- * - Support for IO interrupts.
- *
- * - Clearer definitions of thread safety guarantees.
- *
- * @section gxio__mpipe_examples Examples
- *
- * See the following mPIPE example programs for more information about
- * allocating mPIPE resources and using them in real applications:
- *
- * - @ref mpipe/ingress/app.c : Receiving packets.
- *
- * - @ref mpipe/forward/app.c : Forwarding packets.
- *
- * Note that there are several more examples.
- */
-
-/* Flags that can be passed to resource allocation functions. */
-enum gxio_mpipe_alloc_flags_e {
- /* Require an allocation to start at a specified resource index. */
- GXIO_MPIPE_ALLOC_FIXED = HV_MPIPE_ALLOC_FIXED,
-};
-
-/* Flags that can be passed to memory registration functions. */
-enum gxio_mpipe_mem_flags_e {
- /* Do not fill L3 when writing, and invalidate lines upon egress. */
- GXIO_MPIPE_MEM_FLAG_NT_HINT = IORPC_MEM_BUFFER_FLAG_NT_HINT,
-
- /* L3 cache fills should only populate IO cache ways. */
- GXIO_MPIPE_MEM_FLAG_IO_PIN = IORPC_MEM_BUFFER_FLAG_IO_PIN,
-};
-
-/* An ingress packet descriptor. When a packet arrives, the mPIPE
- * hardware generates this structure and writes it into a NotifRing.
- */
-typedef MPIPE_PDESC_t gxio_mpipe_idesc_t;
-
-/* An egress command descriptor. Applications write this structure
- * into eDMA rings and the hardware performs the indicated operation
- * (normally involving egressing some bytes). Note that egressing a
- * single packet may involve multiple egress command descriptors.
- */
-typedef MPIPE_EDMA_DESC_t gxio_mpipe_edesc_t;
-
-/*
- * Max # of mpipe instances. 2 currently.
- */
-#define GXIO_MPIPE_INSTANCE_MAX HV_MPIPE_INSTANCE_MAX
-
-#define NR_MPIPE_MAX GXIO_MPIPE_INSTANCE_MAX
-
-/* Get the "va" field from an "idesc".
- *
- * This is the address at which the ingress hardware copied the first
- * byte of the packet.
- *
- * If the classifier detected a custom header, then this will point to
- * the custom header, and gxio_mpipe_idesc_get_l2_start() will point
- * to the actual L2 header.
- *
- * Note that this value may be misleading if "idesc->be" is set.
- *
- * @param idesc An ingress packet descriptor.
- */
-static inline unsigned char *gxio_mpipe_idesc_get_va(gxio_mpipe_idesc_t *idesc)
-{
- return (unsigned char *)(long)idesc->va;
-}
-
-/* Get the "xfer_size" from an "idesc".
- *
- * This is the actual number of packet bytes transferred into memory
- * by the hardware.
- *
- * Note that this value may be misleading if "idesc->be" is set.
- *
- * @param idesc An ingress packet descriptor.
- *
- * ISSUE: Is this the best name for this?
- * FIXME: Add more docs about chaining, clipping, etc.
- */
-static inline unsigned int gxio_mpipe_idesc_get_xfer_size(gxio_mpipe_idesc_t
- *idesc)
-{
- return idesc->l2_size;
-}
-
-/* Get the "l2_offset" from an "idesc".
- *
- * Extremely customized classifiers might not support this function.
- *
- * This is the number of bytes between the "va" and the L2 header.
- *
- * The L2 header consists of a destination mac address, a source mac
- * address, and an initial ethertype. Various initial ethertypes
- * allow encoding extra information in the L2 header, often including
- * a vlan, and/or a new ethertype.
- *
- * Note that the "l2_offset" will be non-zero if (and only if) the
- * classifier processed a custom header for the packet.
- *
- * @param idesc An ingress packet descriptor.
- */
-static inline uint8_t gxio_mpipe_idesc_get_l2_offset(gxio_mpipe_idesc_t *idesc)
-{
- return (idesc->custom1 >> 32) & 0xFF;
-}
-
-/* Get the "l2_start" from an "idesc".
- *
- * This is simply gxio_mpipe_idesc_get_va() plus
- * gxio_mpipe_idesc_get_l2_offset().
- *
- * @param idesc An ingress packet descriptor.
- */
-static inline unsigned char *gxio_mpipe_idesc_get_l2_start(gxio_mpipe_idesc_t
- *idesc)
-{
- unsigned char *va = gxio_mpipe_idesc_get_va(idesc);
- return va + gxio_mpipe_idesc_get_l2_offset(idesc);
-}
-
-/* Get the "l2_length" from an "idesc".
- *
- * This is simply gxio_mpipe_idesc_get_xfer_size() minus
- * gxio_mpipe_idesc_get_l2_offset().
- *
- * @param idesc An ingress packet descriptor.
- */
-static inline unsigned int gxio_mpipe_idesc_get_l2_length(gxio_mpipe_idesc_t
- *idesc)
-{
- unsigned int xfer_size = idesc->l2_size;
- return xfer_size - gxio_mpipe_idesc_get_l2_offset(idesc);
-}
-
-/* A context object used to manage mPIPE hardware resources. */
-typedef struct {
-
- /* File descriptor for calling up to Linux (and thus the HV). */
- int fd;
-
- /* Corresponding mpipe instance #. */
- int instance;
-
- /* The VA at which configuration registers are mapped. */
- char *mmio_cfg_base;
-
- /* The VA at which IDMA, EDMA, and buffer manager are mapped. */
- char *mmio_fast_base;
-
- /* The "initialized" buffer stacks. */
- gxio_mpipe_rules_stacks_t __stacks;
-
-} gxio_mpipe_context_t;
-
-/* This is only used internally, but it's most easily made visible here. */
-typedef gxio_mpipe_context_t gxio_mpipe_info_context_t;
-
-/* Initialize an mPIPE context.
- *
- * This function allocates an mPIPE "service domain" and maps the MMIO
- * registers into the caller's VA space.
- *
- * @param context Context object to be initialized.
- * @param mpipe_instance Instance number of mPIPE shim to be controlled via
- * context.
- */
-extern int gxio_mpipe_init(gxio_mpipe_context_t *context,
- unsigned int mpipe_instance);
-
-/* Destroy an mPIPE context.
- *
- * This function frees the mPIPE "service domain" and unmaps the MMIO
- * registers from the caller's VA space.
- *
- * If a user process exits without calling this routine, the kernel
- * will destroy the mPIPE context as part of process teardown.
- *
- * @param context Context object to be destroyed.
- */
-extern int gxio_mpipe_destroy(gxio_mpipe_context_t *context);
-
-/*****************************************************************
- * Buffer Stacks *
- ******************************************************************/
-
-/* Allocate a set of buffer stacks.
- *
- * The return value is NOT interesting if count is zero.
- *
- * @param context An initialized mPIPE context.
- * @param count Number of stacks required.
- * @param first Index of first stack if ::GXIO_MPIPE_ALLOC_FIXED flag is set,
- * otherwise ignored.
- * @param flags Flag bits from ::gxio_mpipe_alloc_flags_e.
- * @return Index of first allocated buffer stack, or
- * ::GXIO_MPIPE_ERR_NO_BUFFER_STACK if allocation failed.
- */
-extern int gxio_mpipe_alloc_buffer_stacks(gxio_mpipe_context_t *context,
- unsigned int count,
- unsigned int first,
- unsigned int flags);
-
-/* Enum codes for buffer sizes supported by mPIPE. */
-typedef enum {
- /* 128 byte packet data buffer. */
- GXIO_MPIPE_BUFFER_SIZE_128 = MPIPE_BSM_INIT_DAT_1__SIZE_VAL_BSZ_128,
- /* 256 byte packet data buffer. */
- GXIO_MPIPE_BUFFER_SIZE_256 = MPIPE_BSM_INIT_DAT_1__SIZE_VAL_BSZ_256,
- /* 512 byte packet data buffer. */
- GXIO_MPIPE_BUFFER_SIZE_512 = MPIPE_BSM_INIT_DAT_1__SIZE_VAL_BSZ_512,
- /* 1024 byte packet data buffer. */
- GXIO_MPIPE_BUFFER_SIZE_1024 = MPIPE_BSM_INIT_DAT_1__SIZE_VAL_BSZ_1024,
- /* 1664 byte packet data buffer. */
- GXIO_MPIPE_BUFFER_SIZE_1664 = MPIPE_BSM_INIT_DAT_1__SIZE_VAL_BSZ_1664,
- /* 4096 byte packet data buffer. */
- GXIO_MPIPE_BUFFER_SIZE_4096 = MPIPE_BSM_INIT_DAT_1__SIZE_VAL_BSZ_4096,
- /* 10368 byte packet data buffer. */
- GXIO_MPIPE_BUFFER_SIZE_10368 =
- MPIPE_BSM_INIT_DAT_1__SIZE_VAL_BSZ_10368,
- /* 16384 byte packet data buffer. */
- GXIO_MPIPE_BUFFER_SIZE_16384 = MPIPE_BSM_INIT_DAT_1__SIZE_VAL_BSZ_16384
-} gxio_mpipe_buffer_size_enum_t;
-
-/* Convert a buffer size in bytes into a buffer size enum. */
-extern gxio_mpipe_buffer_size_enum_t
-gxio_mpipe_buffer_size_to_buffer_size_enum(size_t size);
-
-/* Convert a buffer size enum into a buffer size in bytes. */
-extern size_t
-gxio_mpipe_buffer_size_enum_to_buffer_size(gxio_mpipe_buffer_size_enum_t
- buffer_size_enum);
-
-/* Calculate the number of bytes required to store a given number of
- * buffers in the memory registered with a buffer stack via
- * gxio_mpipe_init_buffer_stack().
- */
-extern size_t gxio_mpipe_calc_buffer_stack_bytes(unsigned long buffers);
-
-/* Initialize a buffer stack. This function binds a region of memory
- * to be used by the hardware for storing buffer addresses pushed via
- * gxio_mpipe_push_buffer() or as the result of sending a buffer out
- * the egress with the 'push to stack when done' bit set. Once this
- * function returns, the memory region's contents may be arbitrarily
- * modified by the hardware at any time and software should not access
- * the memory region again.
- *
- * @param context An initialized mPIPE context.
- * @param stack The buffer stack index.
- * @param buffer_size_enum The size of each buffer in the buffer stack,
- * as an enum.
- * @param mem The address of the buffer stack. This memory must be
- * physically contiguous and aligned to a 64kB boundary.
- * @param mem_size The size of the buffer stack, in bytes.
- * @param mem_flags ::gxio_mpipe_mem_flags_e memory flags.
- * @return Zero on success, ::GXIO_MPIPE_ERR_INVAL_BUFFER_SIZE if
- * buffer_size_enum is invalid, ::GXIO_MPIPE_ERR_BAD_BUFFER_STACK if
- * stack has not been allocated.
- */
-extern int gxio_mpipe_init_buffer_stack(gxio_mpipe_context_t *context,
- unsigned int stack,
- gxio_mpipe_buffer_size_enum_t
- buffer_size_enum, void *mem,
- size_t mem_size,
- unsigned int mem_flags);
-
-/* Push a buffer onto a previously initialized buffer stack.
- *
- * The size of the buffer being pushed must match the size that was
- * registered with gxio_mpipe_init_buffer_stack(). All packet buffer
- * addresses are 128-byte aligned; the low 7 bits of the specified
- * buffer address will be ignored.
- *
- * @param context An initialized mPIPE context.
- * @param stack The buffer stack index.
- * @param buffer The buffer (the low seven bits are ignored).
- */
-static inline void gxio_mpipe_push_buffer(gxio_mpipe_context_t *context,
- unsigned int stack, void *buffer)
-{
- MPIPE_BSM_REGION_ADDR_t offset = { {0} };
- MPIPE_BSM_REGION_VAL_t val = { {0} };
-
- /*
- * The mmio_fast_base region starts at the IDMA region, so subtract
- * off that initial offset.
- */
- offset.region =
- MPIPE_MMIO_ADDR__REGION_VAL_BSM -
- MPIPE_MMIO_ADDR__REGION_VAL_IDMA;
- offset.stack = stack;
-
-#if __SIZEOF_POINTER__ == 4
- val.va = ((ulong) buffer) >> MPIPE_BSM_REGION_VAL__VA_SHIFT;
-#else
- val.va = ((long)buffer) >> MPIPE_BSM_REGION_VAL__VA_SHIFT;
-#endif
-
- __gxio_mmio_write(context->mmio_fast_base + offset.word, val.word);
-}
-
-/* Pop a buffer off of a previously initialized buffer stack.
- *
- * @param context An initialized mPIPE context.
- * @param stack The buffer stack index.
- * @return The buffer, or NULL if the stack is empty.
- */
-static inline void *gxio_mpipe_pop_buffer(gxio_mpipe_context_t *context,
- unsigned int stack)
-{
- MPIPE_BSM_REGION_ADDR_t offset = { {0} };
-
- /*
- * The mmio_fast_base region starts at the IDMA region, so subtract
- * off that initial offset.
- */
- offset.region =
- MPIPE_MMIO_ADDR__REGION_VAL_BSM -
- MPIPE_MMIO_ADDR__REGION_VAL_IDMA;
- offset.stack = stack;
-
- while (1) {
- /*
- * Case 1: val.c == ..._UNCHAINED, va is non-zero.
- * Case 2: val.c == ..._INVALID, va is zero.
- * Case 3: val.c == ..._NOT_RDY, va is zero.
- */
- MPIPE_BSM_REGION_VAL_t val;
- val.word =
- __gxio_mmio_read(context->mmio_fast_base +
- offset.word);
-
- /*
- * Handle case 1 and 2 by returning the buffer (or NULL).
- * Handle case 3 by waiting for the prefetch buffer to refill.
- */
- if (val.c != MPIPE_EDMA_DESC_WORD1__C_VAL_NOT_RDY)
- return (void *)((unsigned long)val.
- va << MPIPE_BSM_REGION_VAL__VA_SHIFT);
- }
-}
-
-/*****************************************************************
- * NotifRings *
- ******************************************************************/
-
-/* Allocate a set of NotifRings.
- *
- * The return value is NOT interesting if count is zero.
- *
- * Note that NotifRings are allocated in chunks, so allocating one at
- * a time is much less efficient than allocating several at once.
- *
- * @param context An initialized mPIPE context.
- * @param count Number of NotifRings required.
- * @param first Index of first NotifRing if ::GXIO_MPIPE_ALLOC_FIXED flag
- * is set, otherwise ignored.
- * @param flags Flag bits from ::gxio_mpipe_alloc_flags_e.
- * @return Index of first allocated buffer NotifRing, or
- * ::GXIO_MPIPE_ERR_NO_NOTIF_RING if allocation failed.
- */
-extern int gxio_mpipe_alloc_notif_rings(gxio_mpipe_context_t *context,
- unsigned int count, unsigned int first,
- unsigned int flags);
-
-/* Initialize a NotifRing, using the given memory and size.
- *
- * @param context An initialized mPIPE context.
- * @param ring The NotifRing index.
- * @param mem A physically contiguous region of memory to be filled
- * with a ring of ::gxio_mpipe_idesc_t structures.
- * @param mem_size Number of bytes in the ring. Must be 128, 512,
- * 2048, or 65536 * sizeof(gxio_mpipe_idesc_t).
- * @param mem_flags ::gxio_mpipe_mem_flags_e memory flags.
- *
- * @return 0 on success, ::GXIO_MPIPE_ERR_BAD_NOTIF_RING or
- * ::GXIO_ERR_INVAL_MEMORY_SIZE on failure.
- */
-extern int gxio_mpipe_init_notif_ring(gxio_mpipe_context_t *context,
- unsigned int ring,
- void *mem, size_t mem_size,
- unsigned int mem_flags);
-
-/* Configure an interrupt to be sent to a tile on incoming NotifRing
- * traffic. Once an interrupt is sent for a particular ring, no more
- * will be sent until gxio_mica_enable_notif_ring_interrupt() is called.
- *
- * @param context An initialized mPIPE context.
- * @param x X coordinate of interrupt target tile.
- * @param y Y coordinate of interrupt target tile.
- * @param i Index of the IPI register which will receive the interrupt.
- * @param e Specific event which will be set in the target IPI register when
- * the interrupt occurs.
- * @param ring The NotifRing index.
- * @return Zero on success, GXIO_ERR_INVAL if params are out of range.
- */
-extern int gxio_mpipe_request_notif_ring_interrupt(gxio_mpipe_context_t
- *context, int x, int y,
- int i, int e,
- unsigned int ring);
-
-/* Enable an interrupt on incoming NotifRing traffic.
- *
- * @param context An initialized mPIPE context.
- * @param ring The NotifRing index.
- * @return Zero on success, GXIO_ERR_INVAL if params are out of range.
- */
-extern int gxio_mpipe_enable_notif_ring_interrupt(gxio_mpipe_context_t
- *context, unsigned int ring);
-
-/* Map all of a client's memory via the given IOTLB.
- * @param context An initialized mPIPE context.
- * @param iotlb IOTLB index.
- * @param pte Page table entry.
- * @param flags Flags.
- * @return Zero on success, or a negative error code.
- */
-extern int gxio_mpipe_register_client_memory(gxio_mpipe_context_t *context,
- unsigned int iotlb, HV_PTE pte,
- unsigned int flags);
-
-/*****************************************************************
- * Notif Groups *
- ******************************************************************/
-
-/* Allocate a set of NotifGroups.
- *
- * The return value is NOT interesting if count is zero.
- *
- * @param context An initialized mPIPE context.
- * @param count Number of NotifGroups required.
- * @param first Index of first NotifGroup if ::GXIO_MPIPE_ALLOC_FIXED flag
- * is set, otherwise ignored.
- * @param flags Flag bits from ::gxio_mpipe_alloc_flags_e.
- * @return Index of first allocated buffer NotifGroup, or
- * ::GXIO_MPIPE_ERR_NO_NOTIF_GROUP if allocation failed.
- */
-extern int gxio_mpipe_alloc_notif_groups(gxio_mpipe_context_t *context,
- unsigned int count,
- unsigned int first,
- unsigned int flags);
-
-/* Add a NotifRing to a NotifGroup. This only sets a bit in the
- * application's 'group' object; the hardware NotifGroup can be
- * initialized by passing 'group' to gxio_mpipe_init_notif_group() or
- * gxio_mpipe_init_notif_group_and_buckets().
- */
-static inline void
-gxio_mpipe_notif_group_add_ring(gxio_mpipe_notif_group_bits_t *bits, int ring)
-{
- bits->ring_mask[ring / 64] |= (1ull << (ring % 64));
-}
-
-/* Set a particular NotifGroup bitmask. Since the load balancer
- * makes decisions based on both bucket and NotifGroup state, most
- * applications should use gxio_mpipe_init_notif_group_and_buckets()
- * rather than using this function to configure just a NotifGroup.
- */
-extern int gxio_mpipe_init_notif_group(gxio_mpipe_context_t *context,
- unsigned int group,
- gxio_mpipe_notif_group_bits_t bits);
-
-/*****************************************************************
- * Load Balancer *
- ******************************************************************/
-
-/* Allocate a set of load balancer buckets.
- *
- * The return value is NOT interesting if count is zero.
- *
- * Note that buckets are allocated in chunks, so allocating one at
- * a time is much less efficient than allocating several at once.
- *
- * Note that the buckets are actually divided into two sub-ranges, of
- * different sizes, and different chunk sizes, and the range you get
- * by default is determined by the size of the request. Allocations
- * cannot span the two sub-ranges.
- *
- * @param context An initialized mPIPE context.
- * @param count Number of buckets required.
- * @param first Index of first bucket if ::GXIO_MPIPE_ALLOC_FIXED flag is set,
- * otherwise ignored.
- * @param flags Flag bits from ::gxio_mpipe_alloc_flags_e.
- * @return Index of first allocated buffer bucket, or
- * ::GXIO_MPIPE_ERR_NO_BUCKET if allocation failed.
- */
-extern int gxio_mpipe_alloc_buckets(gxio_mpipe_context_t *context,
- unsigned int count, unsigned int first,
- unsigned int flags);
-
-/* The legal modes for gxio_mpipe_bucket_info_t and
- * gxio_mpipe_init_notif_group_and_buckets().
- *
- * All modes except ::GXIO_MPIPE_BUCKET_ROUND_ROBIN expect that the user
- * will allocate a power-of-two number of buckets and initialize them
- * to the same mode. The classifier program then uses the appropriate
- * number of low bits from the incoming packet's flow hash to choose a
- * load balancer bucket. Based on that bucket's load balancing mode,
- * reference count, and currently active NotifRing, the load balancer
- * chooses the NotifRing to which the packet will be delivered.
- */
-typedef enum {
- /* All packets for a bucket go to the same NotifRing unless the
- * NotifRing gets full, in which case packets will be dropped. If
- * the bucket reference count ever reaches zero, a new NotifRing may
- * be chosen.
- */
- GXIO_MPIPE_BUCKET_DYNAMIC_FLOW_AFFINITY =
- MPIPE_LBL_INIT_DAT_BSTS_TBL__MODE_VAL_DFA,
-
- /* All packets for a bucket always go to the same NotifRing.
- */
- GXIO_MPIPE_BUCKET_STATIC_FLOW_AFFINITY =
- MPIPE_LBL_INIT_DAT_BSTS_TBL__MODE_VAL_FIXED,
-
- /* All packets for a bucket go to the least full NotifRing in the
- * group, providing load balancing round robin behavior.
- */
- GXIO_MPIPE_BUCKET_ROUND_ROBIN =
- MPIPE_LBL_INIT_DAT_BSTS_TBL__MODE_VAL_ALWAYS_PICK,
-
- /* All packets for a bucket go to the same NotifRing unless the
- * NotifRing gets full, at which point the bucket starts using the
- * least full NotifRing in the group. If all NotifRings in the
- * group are full, packets will be dropped.
- */
- GXIO_MPIPE_BUCKET_STICKY_FLOW_LOCALITY =
- MPIPE_LBL_INIT_DAT_BSTS_TBL__MODE_VAL_STICKY,
-
- /* All packets for a bucket go to the same NotifRing unless the
- * NotifRing gets full, or a random timer fires, at which point the
- * bucket starts using the least full NotifRing in the group. If
- * all NotifRings in the group are full, packets will be dropped.
- * WARNING: This mode is BROKEN on chips with fewer than 64 tiles.
- */
- GXIO_MPIPE_BUCKET_PREFER_FLOW_LOCALITY =
- MPIPE_LBL_INIT_DAT_BSTS_TBL__MODE_VAL_STICKY_RAND,
-
-} gxio_mpipe_bucket_mode_t;
-
-/* Copy a set of bucket initialization values into the mPIPE
- * hardware. Since the load balancer makes decisions based on both
- * bucket and NotifGroup state, most applications should use
- * gxio_mpipe_init_notif_group_and_buckets() rather than using this
- * function to configure a single bucket.
- *
- * @param context An initialized mPIPE context.
- * @param bucket Bucket index to be initialized.
- * @param bucket_info Initial reference count, NotifRing index, and mode.
- * @return 0 on success, ::GXIO_MPIPE_ERR_BAD_BUCKET on failure.
- */
-extern int gxio_mpipe_init_bucket(gxio_mpipe_context_t *context,
- unsigned int bucket,
- gxio_mpipe_bucket_info_t bucket_info);
-
-/* Initializes a group and range of buckets and range of rings such
- * that the load balancer runs a particular load balancing function.
- *
- * First, the group is initialized with the given rings.
- *
- * Second, each bucket is initialized with the mode and group, and a
- * ring chosen round-robin from the given rings.
- *
- * Normally, the classifier picks a bucket, and then the load balancer
- * picks a ring, based on the bucket's mode, group, and current ring,
- * possibly updating the bucket's ring.
- *
- * @param context An initialized mPIPE context.
- * @param group The group.
- * @param ring The first ring.
- * @param num_rings The number of rings.
- * @param bucket The first bucket.
- * @param num_buckets The number of buckets.
- * @param mode The load balancing mode.
- *
- * @return 0 on success, ::GXIO_MPIPE_ERR_BAD_BUCKET,
- * ::GXIO_MPIPE_ERR_BAD_NOTIF_GROUP, or
- * ::GXIO_MPIPE_ERR_BAD_NOTIF_RING on failure.
- */
-extern int gxio_mpipe_init_notif_group_and_buckets(gxio_mpipe_context_t
- *context,
- unsigned int group,
- unsigned int ring,
- unsigned int num_rings,
- unsigned int bucket,
- unsigned int num_buckets,
- gxio_mpipe_bucket_mode_t
- mode);
-
-/* Return credits to a NotifRing and/or bucket.
- *
- * @param context An initialized mPIPE context.
- * @param ring The NotifRing index, or -1.
- * @param bucket The bucket, or -1.
- * @param count The number of credits to return.
- */
-static inline void gxio_mpipe_credit(gxio_mpipe_context_t *context,
- int ring, int bucket, unsigned int count)
-{
- /* NOTE: Fancy struct initialization would break "C89" header test. */
-
- MPIPE_IDMA_RELEASE_REGION_ADDR_t offset = { {0} };
- MPIPE_IDMA_RELEASE_REGION_VAL_t val = { {0} };
-
- /*
- * The mmio_fast_base region starts at the IDMA region, so subtract
- * off that initial offset.
- */
- offset.region =
- MPIPE_MMIO_ADDR__REGION_VAL_IDMA -
- MPIPE_MMIO_ADDR__REGION_VAL_IDMA;
- offset.ring = ring;
- offset.bucket = bucket;
- offset.ring_enable = (ring >= 0);
- offset.bucket_enable = (bucket >= 0);
- val.count = count;
-
- __gxio_mmio_write(context->mmio_fast_base + offset.word, val.word);
-}
-
-/*****************************************************************
- * Egress Rings *
- ******************************************************************/
-
-/* Allocate a set of eDMA rings.
- *
- * The return value is NOT interesting if count is zero.
- *
- * @param context An initialized mPIPE context.
- * @param count Number of eDMA rings required.
- * @param first Index of first eDMA ring if ::GXIO_MPIPE_ALLOC_FIXED flag
- * is set, otherwise ignored.
- * @param flags Flag bits from ::gxio_mpipe_alloc_flags_e.
- * @return Index of first allocated buffer eDMA ring, or
- * ::GXIO_MPIPE_ERR_NO_EDMA_RING if allocation failed.
- */
-extern int gxio_mpipe_alloc_edma_rings(gxio_mpipe_context_t *context,
- unsigned int count, unsigned int first,
- unsigned int flags);
-
-/* Initialize an eDMA ring, using the given memory and size.
- *
- * @param context An initialized mPIPE context.
- * @param ering The eDMA ring index.
- * @param channel The channel to use. This must be one of the channels
- * associated with the context's set of open links.
- * @param mem A physically contiguous region of memory to be filled
- * with a ring of ::gxio_mpipe_edesc_t structures.
- * @param mem_size Number of bytes in the ring. Must be 512, 2048,
- * 8192 or 65536, times 16 (i.e. sizeof(gxio_mpipe_edesc_t)).
- * @param mem_flags ::gxio_mpipe_mem_flags_e memory flags.
- *
- * @return 0 on success, ::GXIO_MPIPE_ERR_BAD_EDMA_RING or
- * ::GXIO_ERR_INVAL_MEMORY_SIZE on failure.
- */
-extern int gxio_mpipe_init_edma_ring(gxio_mpipe_context_t *context,
- unsigned int ering, unsigned int channel,
- void *mem, size_t mem_size,
- unsigned int mem_flags);
-
-/* Set the "max_blks", "min_snf_blks", and "db" fields of
- * ::MPIPE_EDMA_RG_INIT_DAT_THRESH_t for a given edma ring.
- *
- * The global pool of dynamic blocks will be automatically adjusted.
- *
- * This function should not be called after any egress has been done
- * on the edma ring.
- *
- * Most applications should just use gxio_mpipe_equeue_set_snf_size().
- *
- * @param context An initialized mPIPE context.
- * @param ering The eDMA ring index.
- * @param max_blks The number of blocks to dedicate to the ring
- * (normally min_snf_blks + 1). Must be greater than min_snf_blocks.
- * @param min_snf_blks The number of blocks which must be stored
- * prior to starting to send the packet (normally 12).
- * @param db Whether to allow use of dynamic blocks by the ring
- * (normally 1).
- *
- * @return 0 on success, negative on error.
- */
-extern int gxio_mpipe_config_edma_ring_blks(gxio_mpipe_context_t *context,
- unsigned int ering,
- unsigned int max_blks,
- unsigned int min_snf_blks,
- unsigned int db);
-
-/*****************************************************************
- * Classifier Program *
- ******************************************************************/
-
-/*
- *
- * Functions for loading or configuring the mPIPE classifier program.
- *
- * The mPIPE classification processors all run a special "classifier"
- * program which, for each incoming packet, parses the packet headers,
- * encodes some packet metadata in the "idesc", and either drops the
- * packet, or picks a notif ring to handle the packet, and a buffer
- * stack to contain the packet, usually based on the channel, VLAN,
- * dMAC, flow hash, and packet size, under the guidance of the "rules"
- * API described below.
- *
- * @section gxio_mpipe_classifier_default Default Classifier
- *
- * The MDE provides a simple "default" classifier program. It is
- * shipped as source in "$TILERA_ROOT/src/sys/mpipe/classifier.c",
- * which serves as its official documentation. It is shipped as a
- * binary program in "$TILERA_ROOT/tile/boot/classifier", which is
- * automatically included in bootroms created by "tile-monitor", and
- * is automatically loaded by the hypervisor at boot time.
- *
- * The L2 analysis handles LLC packets, SNAP packets, and "VLAN
- * wrappers" (keeping the outer VLAN).
- *
- * The L3 analysis handles IPv4 and IPv6, dropping packets with bad
- * IPv4 header checksums, requesting computation of a TCP/UDP checksum
- * if appropriate, and hashing the dest and src IP addresses, plus the
- * ports for TCP/UDP packets, into the flow hash. No special analysis
- * is done for "fragmented" packets or "tunneling" protocols. Thus,
- * the first fragment of a fragmented TCP/UDP packet is hashed using
- * src/dest IP address and ports and all subsequent fragments are only
- * hashed according to src/dest IP address.
- *
- * The L3 analysis handles other packets too, hashing the dMAC
- * smac into a flow hash.
- *
- * The channel, VLAN, and dMAC used to pick a "rule" (see the
- * "rules" APIs below), which in turn is used to pick a buffer stack
- * (based on the packet size) and a bucket (based on the flow hash).
- *
- * To receive traffic matching a particular (channel/VLAN/dMAC
- * pattern, an application should allocate its own buffer stacks and
- * load balancer buckets, and map traffic to those stacks and buckets,
- * as decribed by the "rules" API below.
- *
- * Various packet metadata is encoded in the idesc. The flow hash is
- * four bytes at 0x0C. The VLAN is two bytes at 0x10. The ethtype is
- * two bytes at 0x12. The l3 start is one byte at 0x14. The l4 start
- * is one byte at 0x15 for IPv4 and IPv6 packets, and otherwise zero.
- * The protocol is one byte at 0x16 for IPv4 and IPv6 packets, and
- * otherwise zero.
- *
- * @section gxio_mpipe_classifier_custom Custom Classifiers.
- *
- * A custom classifier may be created using "tile-mpipe-cc" with a
- * customized version of the default classifier sources.
- *
- * The custom classifier may be included in bootroms using the
- * "--classifier" option to "tile-monitor", or loaded dynamically
- * using gxio_mpipe_classifier_load_from_file().
- *
- * Be aware that "extreme" customizations may break the assumptions of
- * the "rules" APIs described below, but simple customizations, such
- * as adding new packet metadata, should be fine.
- */
-
-/* A set of classifier rules, plus a context. */
-typedef struct {
-
- /* The context. */
- gxio_mpipe_context_t *context;
-
- /* The actual rules. */
- gxio_mpipe_rules_list_t list;
-
-} gxio_mpipe_rules_t;
-
-/* Initialize a classifier program rules list.
- *
- * This function can be called on a previously initialized rules list
- * to discard any previously added rules.
- *
- * @param rules Rules list to initialize.
- * @param context An initialized mPIPE context.
- */
-extern void gxio_mpipe_rules_init(gxio_mpipe_rules_t *rules,
- gxio_mpipe_context_t *context);
-
-/* Begin a new rule on the indicated rules list.
- *
- * Note that an empty rule matches all packets, but an empty rule list
- * matches no packets.
- *
- * @param rules Rules list to which new rule is appended.
- * @param bucket First load balancer bucket to which packets will be
- * delivered.
- * @param num_buckets Number of buckets (must be a power of two) across
- * which packets will be distributed based on the "flow hash".
- * @param stacks Either NULL, to assign each packet to the smallest
- * initialized buffer stack which does not induce chaining (and to
- * drop packets which exceed the largest initialized buffer stack
- * buffer size), or an array, with each entry indicating which buffer
- * stack should be used for packets up to that size (with 255
- * indicating that those packets should be dropped).
- * @return 0 on success, or a negative error code on failure.
- */
-extern int gxio_mpipe_rules_begin(gxio_mpipe_rules_t *rules,
- unsigned int bucket,
- unsigned int num_buckets,
- gxio_mpipe_rules_stacks_t *stacks);
-
-/* Set the headroom of the current rule.
- *
- * @param rules Rules list whose current rule will be modified.
- * @param headroom The headroom.
- * @return 0 on success, or a negative error code on failure.
- */
-extern int gxio_mpipe_rules_set_headroom(gxio_mpipe_rules_t *rules,
- uint8_t headroom);
-
-/* Indicate that packets from a particular channel can be delivered
- * to the buckets and buffer stacks associated with the current rule.
- *
- * Channels added must be associated with links opened by the mPIPE context
- * used in gxio_mpipe_rules_init(). A rule with no channels is equivalent
- * to a rule naming all such associated channels.
- *
- * @param rules Rules list whose current rule will be modified.
- * @param channel The channel to add.
- * @return 0 on success, or a negative error code on failure.
- */
-extern int gxio_mpipe_rules_add_channel(gxio_mpipe_rules_t *rules,
- unsigned int channel);
-
-/* Commit rules.
- *
- * The rules are sent to the hypervisor, where they are combined with
- * the rules from other apps, and used to program the hardware classifier.
- *
- * Note that if this function returns an error, then the rules will NOT
- * have been committed, even if the error is due to interactions with
- * rules from another app.
- *
- * @param rules Rules list to commit.
- * @return 0 on success, or a negative error code on failure.
- */
-extern int gxio_mpipe_rules_commit(gxio_mpipe_rules_t *rules);
-
-/*****************************************************************
- * Ingress Queue Wrapper *
- ******************************************************************/
-
-/*
- *
- * Convenience functions for receiving packets from a NotifRing and
- * sending packets via an eDMA ring.
- *
- * The mpipe ingress and egress hardware uses shared memory packet
- * descriptors to describe packets that have arrived on ingress or
- * are destined for egress. These descriptors are stored in shared
- * memory ring buffers and written or read by hardware as necessary.
- * The gxio library provides wrapper functions that manage the head and
- * tail pointers for these rings, allowing the user to easily read or
- * write packet descriptors.
- *
- * The initialization interface for ingress and egress rings is quite
- * similar. For example, to create an ingress queue, the user passes
- * a ::gxio_mpipe_iqueue_t state object, a ring number from
- * gxio_mpipe_alloc_notif_rings(), and the address of memory to hold a
- * ring buffer to the gxio_mpipe_iqueue_init() function. The function
- * returns success when the state object has been initialized and the
- * hardware configured to deliver packets to the specified ring
- * buffer. Similarly, gxio_mpipe_equeue_init() takes a
- * ::gxio_mpipe_equeue_t state object, a ring number from
- * gxio_mpipe_alloc_edma_rings(), and a shared memory buffer.
- *
- * @section gxio_mpipe_iqueue Working with Ingress Queues
- *
- * Once initialized, the gxio_mpipe_iqueue_t API provides two flows
- * for getting the ::gxio_mpipe_idesc_t packet descriptor associated
- * with incoming packets. The simplest is to call
- * gxio_mpipe_iqueue_get() or gxio_mpipe_iqueue_try_get(). These
- * functions copy the oldest packet descriptor out of the NotifRing and
- * into a descriptor provided by the caller. They also immediately
- * inform the hardware that a descriptor has been processed.
- *
- * For applications with stringent performance requirements, higher
- * efficiency can be achieved by avoiding the packet descriptor copy
- * and processing multiple descriptors at once. The
- * gxio_mpipe_iqueue_peek() and gxio_mpipe_iqueue_try_peek() functions
- * allow such optimizations. These functions provide a pointer to the
- * next valid ingress descriptor in the NotifRing's shared memory ring
- * buffer, and a count of how many contiguous descriptors are ready to
- * be processed. The application can then process any number of those
- * descriptors in place, calling gxio_mpipe_iqueue_consume() to inform
- * the hardware after each one has been processed.
- *
- * @section gxio_mpipe_equeue Working with Egress Queues
- *
- * Similarly, the egress queue API provides a high-performance
- * interface plus a simple wrapper for use in posting
- * ::gxio_mpipe_edesc_t egress packet descriptors. The simple
- * version, gxio_mpipe_equeue_put(), allows the programmer to wait for
- * an eDMA ring slot to become available and write a single descriptor
- * into the ring.
- *
- * Alternatively, you can reserve slots in the eDMA ring using
- * gxio_mpipe_equeue_reserve() or gxio_mpipe_equeue_try_reserve(), and
- * then fill in each slot using gxio_mpipe_equeue_put_at(). This
- * capability can be used to amortize the cost of reserving slots
- * across several packets. It also allows gather operations to be
- * performed on a shared equeue, by ensuring that the edescs for all
- * the fragments are all contiguous in the eDMA ring.
- *
- * The gxio_mpipe_equeue_reserve() and gxio_mpipe_equeue_try_reserve()
- * functions return a 63-bit "completion slot", which is actually a
- * sequence number, the low bits of which indicate the ring buffer
- * index and the high bits the number of times the application has
- * gone around the egress ring buffer. The extra bits allow an
- * application to check for egress completion by calling
- * gxio_mpipe_equeue_is_complete() to see whether a particular 'slot'
- * number has finished. Given the maximum packet rates of the Gx
- * processor, the 63-bit slot number will never wrap.
- *
- * In practice, most applications use the ::gxio_mpipe_edesc_t::hwb
- * bit to indicate that the buffers containing egress packet data
- * should be pushed onto a buffer stack when egress is complete. Such
- * applications generally do not need to know when an egress operation
- * completes (since there is no need to free a buffer post-egress),
- * and thus can use the optimized gxio_mpipe_equeue_reserve_fast() or
- * gxio_mpipe_equeue_try_reserve_fast() functions, which return a 24
- * bit "slot", instead of a 63-bit "completion slot".
- *
- * Once a slot has been "reserved", it MUST be filled. If the
- * application reserves a slot and then decides that it does not
- * actually need it, it can set the ::gxio_mpipe_edesc_t::ns (no send)
- * bit on the descriptor passed to gxio_mpipe_equeue_put_at() to
- * indicate that no data should be sent. This technique can also be
- * used to drop an incoming packet, instead of forwarding it, since
- * any buffer will still be pushed onto the buffer stack when the
- * egress descriptor is processed.
- */
-
-/* A convenient interface to a NotifRing, for use by a single thread.
- */
-typedef struct {
-
- /* The context. */
- gxio_mpipe_context_t *context;
-
- /* The actual NotifRing. */
- gxio_mpipe_idesc_t *idescs;
-
- /* The number of entries. */
- unsigned long num_entries;
-
- /* The number of entries minus one. */
- unsigned long mask_num_entries;
-
- /* The log2() of the number of entries. */
- unsigned long log2_num_entries;
-
- /* The next entry. */
- unsigned int head;
-
- /* The NotifRing id. */
- unsigned int ring;
-
-#ifdef __BIG_ENDIAN__
- /* The number of byteswapped entries. */
- unsigned int swapped;
-#endif
-
-} gxio_mpipe_iqueue_t;
-
-/* Initialize an "iqueue".
- *
- * Takes the iqueue plus the same args as gxio_mpipe_init_notif_ring().
- */
-extern int gxio_mpipe_iqueue_init(gxio_mpipe_iqueue_t *iqueue,
- gxio_mpipe_context_t *context,
- unsigned int ring,
- void *mem, size_t mem_size,
- unsigned int mem_flags);
-
-/* Advance over some old entries in an iqueue.
- *
- * Please see the documentation for gxio_mpipe_iqueue_consume().
- *
- * @param iqueue An ingress queue initialized via gxio_mpipe_iqueue_init().
- * @param count The number of entries to advance over.
- */
-static inline void gxio_mpipe_iqueue_advance(gxio_mpipe_iqueue_t *iqueue,
- int count)
-{
- /* Advance with proper wrap. */
- int head = iqueue->head + count;
- iqueue->head =
- (head & iqueue->mask_num_entries) +
- (head >> iqueue->log2_num_entries);
-
-#ifdef __BIG_ENDIAN__
- /* HACK: Track swapped entries. */
- iqueue->swapped -= count;
-#endif
-}
-
-/* Release the ring and bucket for an old entry in an iqueue.
- *
- * Releasing the ring allows more packets to be delivered to the ring.
- *
- * Releasing the bucket allows flows using the bucket to be moved to a
- * new ring when using GXIO_MPIPE_BUCKET_DYNAMIC_FLOW_AFFINITY.
- *
- * This function is shorthand for "gxio_mpipe_credit(iqueue->context,
- * iqueue->ring, idesc->bucket_id, 1)", and it may be more convenient
- * to make that underlying call, using those values, instead of
- * tracking the entire "idesc".
- *
- * If packet processing is deferred, optimal performance requires that
- * the releasing be deferred as well.
- *
- * Please see the documentation for gxio_mpipe_iqueue_consume().
- *
- * @param iqueue An ingress queue initialized via gxio_mpipe_iqueue_init().
- * @param idesc The descriptor which was processed.
- */
-static inline void gxio_mpipe_iqueue_release(gxio_mpipe_iqueue_t *iqueue,
- gxio_mpipe_idesc_t *idesc)
-{
- gxio_mpipe_credit(iqueue->context, iqueue->ring, idesc->bucket_id, 1);
-}
-
-/* Consume a packet from an "iqueue".
- *
- * After processing packets peeked at via gxio_mpipe_iqueue_peek()
- * or gxio_mpipe_iqueue_try_peek(), you must call this function, or
- * gxio_mpipe_iqueue_advance() plus gxio_mpipe_iqueue_release(), to
- * advance over those entries, and release their rings and buckets.
- *
- * You may call this function as each packet is processed, or you can
- * wait until several packets have been processed.
- *
- * Note that if you are using a single bucket, and you are handling
- * batches of N packets, then you can replace several calls to this
- * function with calls to "gxio_mpipe_iqueue_advance(iqueue, N)" and
- * "gxio_mpipe_credit(iqueue->context, iqueue->ring, bucket, N)".
- *
- * Note that if your classifier sets "idesc->nr", then you should
- * explicitly call "gxio_mpipe_iqueue_advance(iqueue, idesc)" plus
- * "gxio_mpipe_credit(iqueue->context, iqueue->ring, -1, 1)", to
- * avoid incorrectly crediting the (unused) bucket.
- *
- * @param iqueue An ingress queue initialized via gxio_mpipe_iqueue_init().
- * @param idesc The descriptor which was processed.
- */
-static inline void gxio_mpipe_iqueue_consume(gxio_mpipe_iqueue_t *iqueue,
- gxio_mpipe_idesc_t *idesc)
-{
- gxio_mpipe_iqueue_advance(iqueue, 1);
- gxio_mpipe_iqueue_release(iqueue, idesc);
-}
-
-/* Peek at the next packet(s) in an "iqueue", without waiting.
- *
- * If no packets are available, fills idesc_ref with NULL, and then
- * returns ::GXIO_MPIPE_ERR_IQUEUE_EMPTY. Otherwise, fills idesc_ref
- * with the address of the next valid packet descriptor, and returns
- * the maximum number of valid descriptors which can be processed.
- * You may process fewer descriptors if desired.
- *
- * Call gxio_mpipe_iqueue_consume() on each packet once it has been
- * processed (or dropped), to allow more packets to be delivered.
- *
- * @param iqueue An ingress queue initialized via gxio_mpipe_iqueue_init().
- * @param idesc_ref A pointer to a packet descriptor pointer.
- * @return The (positive) number of packets which can be processed,
- * or ::GXIO_MPIPE_ERR_IQUEUE_EMPTY if no packets are available.
- */
-static inline int gxio_mpipe_iqueue_try_peek(gxio_mpipe_iqueue_t *iqueue,
- gxio_mpipe_idesc_t **idesc_ref)
-{
- gxio_mpipe_idesc_t *next;
-
- uint64_t head = iqueue->head;
- uint64_t tail = __gxio_mmio_read(iqueue->idescs);
-
- /* Available entries. */
- uint64_t avail =
- (tail >= head) ? (tail - head) : (iqueue->num_entries - head);
-
- if (avail == 0) {
- *idesc_ref = NULL;
- return GXIO_MPIPE_ERR_IQUEUE_EMPTY;
- }
-
- next = &iqueue->idescs[head];
-
- /* ISSUE: Is this helpful? */
- __insn_prefetch(next);
-
-#ifdef __BIG_ENDIAN__
- /* HACK: Swap new entries directly in memory. */
- {
- int i, j;
- for (i = iqueue->swapped; i < avail; i++) {
- for (j = 0; j < 8; j++)
- next[i].words[j] =
- __builtin_bswap64(next[i].words[j]);
- }
- iqueue->swapped = avail;
- }
-#endif
-
- *idesc_ref = next;
-
- return avail;
-}
-
-/* Drop a packet by pushing its buffer (if appropriate).
- *
- * NOTE: The caller must still call gxio_mpipe_iqueue_consume() if idesc
- * came from gxio_mpipe_iqueue_try_peek() or gxio_mpipe_iqueue_peek().
- *
- * @param iqueue An ingress queue initialized via gxio_mpipe_iqueue_init().
- * @param idesc A packet descriptor.
- */
-static inline void gxio_mpipe_iqueue_drop(gxio_mpipe_iqueue_t *iqueue,
- gxio_mpipe_idesc_t *idesc)
-{
- /* FIXME: Handle "chaining" properly. */
-
- if (!idesc->be) {
- unsigned char *va = gxio_mpipe_idesc_get_va(idesc);
- gxio_mpipe_push_buffer(iqueue->context, idesc->stack_idx, va);
- }
-}
-
-/*****************************************************************
- * Egress Queue Wrapper *
- ******************************************************************/
-
-/* A convenient, thread-safe interface to an eDMA ring. */
-typedef struct {
-
- /* State object for tracking head and tail pointers. */
- __gxio_dma_queue_t dma_queue;
-
- /* The ring entries. */
- gxio_mpipe_edesc_t *edescs;
-
- /* The number of entries minus one. */
- unsigned long mask_num_entries;
-
- /* The log2() of the number of entries. */
- unsigned long log2_num_entries;
-
- /* The context. */
- gxio_mpipe_context_t *context;
-
- /* The ering. */
- unsigned int ering;
-
- /* The channel. */
- unsigned int channel;
-
-} gxio_mpipe_equeue_t;
-
-/* Initialize an "equeue".
- *
- * This function uses gxio_mpipe_init_edma_ring() to initialize the
- * underlying edma_ring using the provided arguments.
- *
- * @param equeue An egress queue to be initialized.
- * @param context An initialized mPIPE context.
- * @param ering The eDMA ring index.
- * @param channel The channel to use. This must be one of the channels
- * associated with the context's set of open links.
- * @param mem A physically contiguous region of memory to be filled
- * with a ring of ::gxio_mpipe_edesc_t structures.
- * @param mem_size Number of bytes in the ring. Must be 512, 2048,
- * 8192 or 65536, times 16 (i.e. sizeof(gxio_mpipe_edesc_t)).
- * @param mem_flags ::gxio_mpipe_mem_flags_e memory flags.
- *
- * @return 0 on success, ::GXIO_MPIPE_ERR_BAD_EDMA_RING or
- * ::GXIO_ERR_INVAL_MEMORY_SIZE on failure.
- */
-extern int gxio_mpipe_equeue_init(gxio_mpipe_equeue_t *equeue,
- gxio_mpipe_context_t *context,
- unsigned int ering,
- unsigned int channel,
- void *mem, unsigned int mem_size,
- unsigned int mem_flags);
-
-/* Reserve completion slots for edescs.
- *
- * Use gxio_mpipe_equeue_put_at() to actually populate the slots.
- *
- * This function is slower than gxio_mpipe_equeue_reserve_fast(), but
- * returns a full 64 bit completion slot, which can be used with
- * gxio_mpipe_equeue_is_complete().
- *
- * @param equeue An egress queue initialized via gxio_mpipe_equeue_init().
- * @param num Number of slots to reserve (must be non-zero).
- * @return The first reserved completion slot, or a negative error code.
- */
-static inline int64_t gxio_mpipe_equeue_reserve(gxio_mpipe_equeue_t *equeue,
- unsigned int num)
-{
- return __gxio_dma_queue_reserve_aux(&equeue->dma_queue, num, true);
-}
-
-/* Reserve completion slots for edescs, if possible.
- *
- * Use gxio_mpipe_equeue_put_at() to actually populate the slots.
- *
- * This function is slower than gxio_mpipe_equeue_try_reserve_fast(),
- * but returns a full 64 bit completion slot, which can be used with
- * gxio_mpipe_equeue_is_complete().
- *
- * @param equeue An egress queue initialized via gxio_mpipe_equeue_init().
- * @param num Number of slots to reserve (must be non-zero).
- * @return The first reserved completion slot, or a negative error code.
- */
-static inline int64_t gxio_mpipe_equeue_try_reserve(gxio_mpipe_equeue_t
- *equeue, unsigned int num)
-{
- return __gxio_dma_queue_reserve_aux(&equeue->dma_queue, num, false);
-}
-
-/* Reserve slots for edescs.
- *
- * Use gxio_mpipe_equeue_put_at() to actually populate the slots.
- *
- * This function is faster than gxio_mpipe_equeue_reserve(), but
- * returns a 24 bit slot (instead of a 64 bit completion slot), which
- * thus cannot be used with gxio_mpipe_equeue_is_complete().
- *
- * @param equeue An egress queue initialized via gxio_mpipe_equeue_init().
- * @param num Number of slots to reserve (should be non-zero).
- * @return The first reserved slot, or a negative error code.
- */
-static inline int64_t gxio_mpipe_equeue_reserve_fast(gxio_mpipe_equeue_t
- *equeue, unsigned int num)
-{
- return __gxio_dma_queue_reserve(&equeue->dma_queue, num, true, false);
-}
-
-/* Reserve slots for edescs, if possible.
- *
- * Use gxio_mpipe_equeue_put_at() to actually populate the slots.
- *
- * This function is faster than gxio_mpipe_equeue_try_reserve(), but
- * returns a 24 bit slot (instead of a 64 bit completion slot), which
- * thus cannot be used with gxio_mpipe_equeue_is_complete().
- *
- * @param equeue An egress queue initialized via gxio_mpipe_equeue_init().
- * @param num Number of slots to reserve (should be non-zero).
- * @return The first reserved slot, or a negative error code.
- */
-static inline int64_t gxio_mpipe_equeue_try_reserve_fast(gxio_mpipe_equeue_t
- *equeue,
- unsigned int num)
-{
- return __gxio_dma_queue_reserve(&equeue->dma_queue, num, false, false);
-}
-
-/*
- * HACK: This helper function tricks gcc 4.6 into avoiding saving
- * a copy of "edesc->words[0]" on the stack for no obvious reason.
- */
-
-static inline void gxio_mpipe_equeue_put_at_aux(gxio_mpipe_equeue_t *equeue,
- uint_reg_t ew[2],
- unsigned long slot)
-{
- unsigned long edma_slot = slot & equeue->mask_num_entries;
- gxio_mpipe_edesc_t *edesc_p = &equeue->edescs[edma_slot];
-
- /*
- * ISSUE: Could set eDMA ring to be on generation 1 at start, which
- * would avoid the negation here, perhaps allowing "__insn_bfins()".
- */
- ew[0] |= !((slot >> equeue->log2_num_entries) & 1);
-
- /*
- * NOTE: We use "__gxio_mpipe_write()", plus the fact that the eDMA
- * queue alignment restrictions ensure that these two words are on
- * the same cacheline, to force proper ordering between the stores.
- */
- __gxio_mmio_write64(&edesc_p->words[1], ew[1]);
- __gxio_mmio_write64(&edesc_p->words[0], ew[0]);
-}
-
-/* Post an edesc to a given slot in an equeue.
- *
- * This function copies the supplied edesc into entry "slot mod N" in
- * the underlying ring, setting the "gen" bit to the appropriate value
- * based on "(slot mod N*2)", where "N" is the size of the ring. Note
- * that the higher bits of slot are unused, and thus, this function
- * can handle "slots" as well as "completion slots".
- *
- * Normally this function is used to fill in slots reserved by
- * gxio_mpipe_equeue_try_reserve(), gxio_mpipe_equeue_reserve(),
- * gxio_mpipe_equeue_try_reserve_fast(), or
- * gxio_mpipe_equeue_reserve_fast(),
- *
- * This function can also be used without "reserving" slots, if the
- * application KNOWS that the ring can never overflow, for example, by
- * pushing fewer buffers into the buffer stacks than there are total
- * slots in the equeue, but this is NOT recommended.
- *
- * @param equeue An egress queue initialized via gxio_mpipe_equeue_init().
- * @param edesc The egress descriptor to be posted.
- * @param slot An egress slot (only the low bits are actually used).
- */
-static inline void gxio_mpipe_equeue_put_at(gxio_mpipe_equeue_t *equeue,
- gxio_mpipe_edesc_t edesc,
- unsigned long slot)
-{
- gxio_mpipe_equeue_put_at_aux(equeue, edesc.words, slot);
-}
-
-/* Post an edesc to the next slot in an equeue.
- *
- * This is a convenience wrapper around
- * gxio_mpipe_equeue_reserve_fast() and gxio_mpipe_equeue_put_at().
- *
- * @param equeue An egress queue initialized via gxio_mpipe_equeue_init().
- * @param edesc The egress descriptor to be posted.
- * @return 0 on success.
- */
-static inline int gxio_mpipe_equeue_put(gxio_mpipe_equeue_t *equeue,
- gxio_mpipe_edesc_t edesc)
-{
- int64_t slot = gxio_mpipe_equeue_reserve_fast(equeue, 1);
- if (slot < 0)
- return (int)slot;
-
- gxio_mpipe_equeue_put_at(equeue, edesc, slot);
-
- return 0;
-}
-
-/* Ask the mPIPE hardware to egress outstanding packets immediately.
- *
- * This call is not necessary, but may slightly reduce overall latency.
- *
- * Technically, you should flush all gxio_mpipe_equeue_put_at() writes
- * to memory before calling this function, to ensure the descriptors
- * are visible in memory before the mPIPE hardware actually looks for
- * them. But this should be very rare, and the only side effect would
- * be increased latency, so it is up to the caller to decide whether
- * or not to flush memory.
- *
- * @param equeue An egress queue initialized via gxio_mpipe_equeue_init().
- */
-static inline void gxio_mpipe_equeue_flush(gxio_mpipe_equeue_t *equeue)
-{
- /* Use "ring_idx = 0" and "count = 0" to "wake up" the eDMA ring. */
- MPIPE_EDMA_POST_REGION_VAL_t val = { {0} };
- /* Flush the write buffers. */
- __insn_flushwb();
- __gxio_mmio_write(equeue->dma_queue.post_region_addr, val.word);
-}
-
-/* Determine if a given edesc has been completed.
- *
- * Note that this function requires a "completion slot", and thus may
- * NOT be used with a "slot" from gxio_mpipe_equeue_reserve_fast() or
- * gxio_mpipe_equeue_try_reserve_fast().
- *
- * @param equeue An egress queue initialized via gxio_mpipe_equeue_init().
- * @param completion_slot The completion slot used by the edesc.
- * @param update If true, and the desc does not appear to have completed
- * yet, then update any software cache of the hardware completion counter,
- * and check again. This should normally be true.
- * @return True iff the given edesc has been completed.
- */
-static inline int gxio_mpipe_equeue_is_complete(gxio_mpipe_equeue_t *equeue,
- int64_t completion_slot,
- int update)
-{
- return __gxio_dma_queue_is_complete(&equeue->dma_queue,
- completion_slot, update);
-}
-
-/* Set the snf (store and forward) size for an equeue.
- *
- * The snf size for an equeue defaults to 1536, and encodes the size
- * of the largest packet for which egress is guaranteed to avoid
- * transmission underruns and/or corrupt checksums under heavy load.
- *
- * The snf size affects a global resource pool which cannot support,
- * for example, all 24 equeues each requesting an snf size of 8K.
- *
- * To ensure that jumbo packets can be egressed properly, the snf size
- * should be set to the size of the largest possible packet, which
- * will usually be limited by the size of the app's largest buffer.
- *
- * This is a convenience wrapper around
- * gxio_mpipe_config_edma_ring_blks().
- *
- * This function should not be called after any egress has been done
- * on the equeue.
- *
- * @param equeue An egress queue initialized via gxio_mpipe_equeue_init().
- * @param size The snf size, in bytes.
- * @return Zero on success, negative error otherwise.
- */
-static inline int gxio_mpipe_equeue_set_snf_size(gxio_mpipe_equeue_t *equeue,
- size_t size)
-{
- int blks = (size + 127) / 128;
- return gxio_mpipe_config_edma_ring_blks(equeue->context, equeue->ering,
- blks + 1, blks, 1);
-}
-
-/*****************************************************************
- * Link Management *
- ******************************************************************/
-
-/*
- *
- * Functions for manipulating and sensing the state and configuration
- * of physical network links.
- *
- * @section gxio_mpipe_link_perm Link Permissions
- *
- * Opening a link (with gxio_mpipe_link_open()) requests a set of link
- * permissions, which control what may be done with the link, and potentially
- * what permissions may be granted to other processes.
- *
- * Data permission allows the process to receive packets from the link by
- * specifying the link's channel number in mPIPE packet distribution rules,
- * and to send packets to the link by using the link's channel number as
- * the target for an eDMA ring.
- *
- * Stats permission allows the process to retrieve link attributes (such as
- * the speeds it is capable of running at, or whether it is currently up), and
- * to read and write certain statistics-related registers in the link's MAC.
- *
- * Control permission allows the process to retrieve and modify link attributes
- * (so that it may, for example, bring the link up and take it down), and
- * read and write many registers in the link's MAC and PHY.
- *
- * Any permission may be requested as shared, which allows other processes
- * to also request shared permission, or exclusive, which prevents other
- * processes from requesting it. In keeping with GXIO's typical usage in
- * an embedded environment, the defaults for all permissions are shared.
- *
- * Permissions are granted on a first-come, first-served basis, so if two
- * applications request an exclusive permission on the same link, the one
- * to run first will win. Note, however, that some system components, like
- * the kernel Ethernet driver, may get an opportunity to open links before
- * any applications run.
- *
- * @section gxio_mpipe_link_names Link Names
- *
- * Link names are of the form gbe<em>number</em> (for Gigabit Ethernet),
- * xgbe<em>number</em> (for 10 Gigabit Ethernet), loop<em>number</em> (for
- * internal mPIPE loopback), or ilk<em>number</em>/<em>channel</em>
- * (for Interlaken links); for instance, gbe0, xgbe1, loop3, and
- * ilk0/12 are all possible link names. The correspondence between
- * the link name and an mPIPE instance number or mPIPE channel number is
- * system-dependent; all links will not exist on all systems, and the set
- * of numbers used for a particular link type may not start at zero and may
- * not be contiguous. Use gxio_mpipe_link_enumerate() to retrieve the set of
- * links which exist on a system, and always use gxio_mpipe_link_instance()
- * to determine which mPIPE controls a particular link.
- *
- * Note that in some cases, links may share hardware, such as PHYs, or
- * internal mPIPE buffers; in these cases, only one of the links may be
- * opened at a time. This is especially common with xgbe and gbe ports,
- * since each xgbe port uses 4 SERDES lanes, each of which may also be
- * configured as one gbe port.
- *
- * @section gxio_mpipe_link_states Link States
- *
- * The mPIPE link management model revolves around three different states,
- * which are maintained for each link:
- *
- * 1. The <em>current</em> link state: is the link up now, and if so, at
- * what speed?
- *
- * 2. The <em>desired</em> link state: what do we want the link state to be?
- * The system is always working to make this state the current state;
- * thus, if the desired state is up, and the link is down, we'll be
- * constantly trying to bring it up, automatically.
- *
- * 3. The <em>possible</em> link state: what speeds are valid for this
- * particular link? Or, in other words, what are the capabilities of
- * the link hardware?
- *
- * These link states are not, strictly speaking, related to application
- * state; they may be manipulated at any time, whether or not the link
- * is currently being used for data transfer. However, for convenience,
- * gxio_mpipe_link_open() and gxio_mpipe_link_close() (or application exit)
- * can affect the link state. These implicit link management operations
- * may be modified or disabled by the use of link open flags.
- *
- * From an application, you can use gxio_mpipe_link_get_attr()
- * and gxio_mpipe_link_set_attr() to manipulate the link states.
- * gxio_mpipe_link_get_attr() with ::GXIO_MPIPE_LINK_POSSIBLE_STATE
- * gets you the possible link state. gxio_mpipe_link_get_attr() with
- * ::GXIO_MPIPE_LINK_CURRENT_STATE gets you the current link state.
- * Finally, gxio_mpipe_link_set_attr() and gxio_mpipe_link_get_attr()
- * with ::GXIO_MPIPE_LINK_DESIRED_STATE allow you to modify or retrieve
- * the desired link state.
- *
- * If you want to manage a link from a part of your application which isn't
- * involved in packet processing, you can use the ::GXIO_MPIPE_LINK_NO_DATA
- * flags on a gxio_mpipe_link_open() call. This opens the link, but does
- * not request data permission, so it does not conflict with any exclusive
- * permissions which may be held by other processes. You can then can use
- * gxio_mpipe_link_get_attr() and gxio_mpipe_link_set_attr() on this link
- * object to bring up or take down the link.
- *
- * Some links support link state bits which support various loopback
- * modes. ::GXIO_MPIPE_LINK_LOOP_MAC tests datapaths within the Tile
- * Processor itself; ::GXIO_MPIPE_LINK_LOOP_PHY tests the datapath between
- * the Tile Processor and the external physical layer interface chip; and
- * ::GXIO_MPIPE_LINK_LOOP_EXT tests the entire network datapath with the
- * aid of an external loopback connector. In addition to enabling hardware
- * testing, such configuration can be useful for software testing, as well.
- *
- * When LOOP_MAC or LOOP_PHY is enabled, packets transmitted on a channel
- * will be received by that channel, instead of being emitted on the
- * physical link, and packets received on the physical link will be ignored.
- * Other than that, all standard GXIO operations work as you might expect.
- * Note that loopback operation requires that the link be brought up using
- * one or more of the GXIO_MPIPE_LINK_SPEED_xxx link state bits.
- *
- * Those familiar with previous versions of the MDE on TILEPro hardware
- * will notice significant similarities between the NetIO link management
- * model and the mPIPE link management model. However, the NetIO model
- * was developed in stages, and some of its features -- for instance,
- * the default setting of certain flags -- were shaped by the need to be
- * compatible with previous versions of NetIO. Since the features provided
- * by the mPIPE hardware and the mPIPE GXIO library are significantly
- * different than those provided by NetIO, in some cases, we have made
- * different choices in the mPIPE link management API. Thus, please read
- * this documentation carefully before assuming that mPIPE link management
- * operations are exactly equivalent to their NetIO counterparts.
- */
-
-/* An object used to manage mPIPE link state and resources. */
-typedef struct {
- /* The overall mPIPE context. */
- gxio_mpipe_context_t *context;
-
- /* The channel number used by this link. */
- uint8_t channel;
-
- /* The MAC index used by this link. */
- uint8_t mac;
-} gxio_mpipe_link_t;
-
-/* Translate a link name to the instance number of the mPIPE shim which is
- * connected to that link. This call does not verify whether the link is
- * currently available, and does not reserve any link resources;
- * gxio_mpipe_link_open() must be called to perform those functions.
- *
- * Typically applications will call this function to translate a link name
- * to an mPIPE instance number; call gxio_mpipe_init(), passing it that
- * instance number, to initialize the mPIPE shim; and then call
- * gxio_mpipe_link_open(), passing it the same link name plus the mPIPE
- * context, to configure the link.
- *
- * @param link_name Name of the link; see @ref gxio_mpipe_link_names.
- * @return The mPIPE instance number which is associated with the named
- * link, or a negative error code (::GXIO_ERR_NO_DEVICE) if the link does
- * not exist.
- */
-extern int gxio_mpipe_link_instance(const char *link_name);
-
-/* Retrieve one of this system's legal link names, and its MAC address.
- *
- * @param index Link name index. If a system supports N legal link names,
- * then indices between 0 and N - 1, inclusive, each correspond to one of
- * those names. Thus, to retrieve all of a system's legal link names,
- * call this function in a loop, starting with an index of zero, and
- * incrementing it once per iteration until -1 is returned.
- * @param link_name Pointer to the buffer which will receive the retrieved
- * link name. The buffer should contain space for at least
- * ::GXIO_MPIPE_LINK_NAME_LEN bytes; the returned name, including the
- * terminating null byte, will be no longer than that.
- * @param link_name Pointer to the buffer which will receive the retrieved
- * MAC address. The buffer should contain space for at least 6 bytes.
- * @return Zero if a link name was successfully retrieved; -1 if one was
- * not.
- */
-extern int gxio_mpipe_link_enumerate_mac(int index, char *link_name,
- uint8_t *mac_addr);
-
-/* Open an mPIPE link.
- *
- * A link must be opened before it may be used to send or receive packets,
- * and before its state may be examined or changed. Depending up on the
- * link's intended use, one or more link permissions may be requested via
- * the flags parameter; see @ref gxio_mpipe_link_perm. In addition, flags
- * may request that the link's state be modified at open time. See @ref
- * gxio_mpipe_link_states and @ref gxio_mpipe_link_open_flags for more detail.
- *
- * @param link A link state object, which will be initialized if this
- * function completes successfully.
- * @param context An initialized mPIPE context.
- * @param link_name Name of the link.
- * @param flags Zero or more @ref gxio_mpipe_link_open_flags, ORed together.
- * @return 0 if the link was successfully opened, or a negative error code.
- *
- */
-extern int gxio_mpipe_link_open(gxio_mpipe_link_t *link,
- gxio_mpipe_context_t *context,
- const char *link_name, unsigned int flags);
-
-/* Close an mPIPE link.
- *
- * Closing a link makes it available for use by other processes. Once
- * a link has been closed, packets may no longer be sent on or received
- * from the link, and its state may not be examined or changed.
- *
- * @param link A link state object, which will no longer be initialized
- * if this function completes successfully.
- * @return 0 if the link was successfully closed, or a negative error code.
- *
- */
-extern int gxio_mpipe_link_close(gxio_mpipe_link_t *link);
-
-/* Return a link's channel number.
- *
- * @param link A properly initialized link state object.
- * @return The channel number for the link.
- */
-static inline int gxio_mpipe_link_channel(gxio_mpipe_link_t *link)
-{
- return link->channel;
-}
-
-/* Set a link attribute.
- *
- * @param link A properly initialized link state object.
- * @param attr An attribute from the set of @ref gxio_mpipe_link_attrs.
- * @param val New value of the attribute.
- * @return 0 if the attribute was successfully set, or a negative error
- * code.
- */
-extern int gxio_mpipe_link_set_attr(gxio_mpipe_link_t *link, uint32_t attr,
- int64_t val);
-
-///////////////////////////////////////////////////////////////////
-// Timestamp //
-///////////////////////////////////////////////////////////////////
-
-/* Get the timestamp of mPIPE when this routine is called.
- *
- * @param context An initialized mPIPE context.
- * @param ts A timespec structure to store the current clock.
- * @return If the call was successful, zero; otherwise, a negative error
- * code.
- */
-extern int gxio_mpipe_get_timestamp(gxio_mpipe_context_t *context,
- struct timespec64 *ts);
-
-/* Set the timestamp of mPIPE.
- *
- * @param context An initialized mPIPE context.
- * @param ts A timespec structure to store the requested clock.
- * @return If the call was successful, zero; otherwise, a negative error
- * code.
- */
-extern int gxio_mpipe_set_timestamp(gxio_mpipe_context_t *context,
- const struct timespec64 *ts);
-
-/* Adjust the timestamp of mPIPE.
- *
- * @param context An initialized mPIPE context.
- * @param delta A signed time offset to adjust, in nanoseconds.
- * The absolute value of this parameter must be less than or
- * equal to 1000000000.
- * @return If the call was successful, zero; otherwise, a negative error
- * code.
- */
-extern int gxio_mpipe_adjust_timestamp(gxio_mpipe_context_t *context,
- int64_t delta);
-
-/** Adjust the mPIPE timestamp clock frequency.
- *
- * @param context An initialized mPIPE context.
- * @param ppb A 32-bit signed PPB (Parts Per Billion) value to adjust.
- * The absolute value of ppb must be less than or equal to 1000000000.
- * Values less than about 30000 will generally cause a GXIO_ERR_INVAL
- * return due to the granularity of the hardware that converts reference
- * clock cycles into seconds and nanoseconds.
- * @return If the call was successful, zero; otherwise, a negative error
- * code.
- */
-extern int gxio_mpipe_adjust_timestamp_freq(gxio_mpipe_context_t* context,
- int32_t ppb);
-
-#endif /* !_GXIO_MPIPE_H_ */
diff --git a/arch/tile/include/gxio/trio.h b/arch/tile/include/gxio/trio.h
deleted file mode 100644
index df10a662cc25..000000000000
--- a/arch/tile/include/gxio/trio.h
+++ /dev/null
@@ -1,298 +0,0 @@
-/*
- * Copyright 2012 Tilera Corporation. All Rights Reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation, version 2.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
- * NON INFRINGEMENT. See the GNU General Public License for
- * more details.
- */
-
-/*
- *
- * An API for allocating, configuring, and manipulating TRIO hardware
- * resources
- */
-
-/*
- *
- * The TILE-Gx TRIO shim provides connections to external devices via
- * PCIe or other transaction IO standards. The gxio_trio_ API,
- * declared in <gxio/trio.h>, allows applications to allocate and
- * configure TRIO IO resources like DMA command rings, memory map
- * windows, and device interrupts. The following sections introduce
- * the various components of the API. We strongly recommend reading
- * the TRIO section of the IO Device Guide (UG404) before working with
- * this API.
- *
- * @section trio__ingress TRIO Ingress Hardware Resources
- *
- * The TRIO ingress hardware is responsible for examining incoming
- * PCIe or StreamIO packets and choosing a processing mechanism based
- * on the packets' bus address. The gxio_trio_ API can be used to
- * configure different handlers for different ranges of bus address
- * space. The user can configure "mapped memory" and "scatter queue"
- * regions to match incoming packets within 4kB-aligned ranges of bus
- * addresses. Each range specifies a different set of mapping
- * parameters to be applied when handling the ingress packet. The
- * following sections describe how to work with MapMem and scatter
- * queue regions.
- *
- * @subsection trio__mapmem TRIO MapMem Regions
- *
- * TRIO mapped memory (or MapMem) regions allow the user to map
- * incoming read and write requests directly to the application's
- * memory space. MapMem regions are allocated via
- * gxio_trio_alloc_memory_maps(). Given an integer MapMem number,
- * applications can use gxio_trio_init_memory_map() to specify the
- * range of bus addresses that will match the region and the range of
- * virtual addresses to which those packets will be applied.
- *
- * As with many other gxio APIs, the programmer must be sure to
- * register memory pages that will be used with MapMem regions. Pages
- * can be registered with TRIO by allocating an ASID (address space
- * identifier) and then using gxio_trio_register_page() to register up to
- * 16 pages with the hardware. The initialization functions for
- * resources that require registered memory (MapMem, scatter queues,
- * push DMA, and pull DMA) then take an 'asid' parameter in order to
- * configure which set of registered pages is used by each resource.
- *
- * @subsection trio__scatter_queue TRIO Scatter Queues
- *
- * The TRIO shim's scatter queue regions allow users to dynamically
- * map buffers from a large address space into a small range of bus
- * addresses. This is particularly helpful for PCIe endpoint devices,
- * where the host generally limits the size of BARs to tens of
- * megabytes.
- *
- * Each scatter queue consists of a memory map region, a queue of
- * tile-side buffer VAs to be mapped to that region, and a bus-mapped
- * "doorbell" register that the remote endpoint can write to trigger a
- * dequeue of the current buffer VA, thus swapping in a new buffer.
- * The VAs pushed onto a scatter queue must be 4kB aligned, so
- * applications may need to use higher-level protocols to inform
- * remote entities that they should apply some additional, sub-4kB
- * offset when reading or writing the scatter queue region. For more
- * information, see the IO Device Guide (UG404).
- *
- * @section trio__egress TRIO Egress Hardware Resources
- *
- * The TRIO shim supports two mechanisms for egress packet generation:
- * programmed IO (PIO) and push/pull DMA. PIO allows applications to
- * create MMIO mappings for PCIe or StreamIO address space, such that
- * the application can generate word-sized read or write transactions
- * by issuing load or store instructions. Push and pull DMA are tuned
- * for larger transactions; they use specialized hardware engines to
- * transfer large blocks of data at line rate.
- *
- * @subsection trio__pio TRIO Programmed IO
- *
- * Programmed IO allows applications to create MMIO mappings for PCIe
- * or StreamIO address space. The hardware PIO regions support access
- * to PCIe configuration, IO, and memory space, but the gxio_trio API
- * only supports memory space accesses. PIO regions are allocated
- * with gxio_trio_alloc_pio_regions() and initialized via
- * gxio_trio_init_pio_region(). Once a region is bound to a range of
- * bus address via the initialization function, the application can
- * use gxio_trio_map_pio_region() to create MMIO mappings from its VA
- * space onto the range of bus addresses supported by the PIO region.
- *
- * @subsection trio_dma TRIO Push and Pull DMA
- *
- * The TRIO push and pull DMA engines allow users to copy blocks of
- * data between application memory and the bus. Push DMA generates
- * write packets that copy from application memory to the bus and pull
- * DMA generates read packets that copy from the bus into application
- * memory. The DMA engines are managed via an API that is very
- * similar to the mPIPE eDMA interface. For a detailed explanation of
- * the eDMA queue API, see @ref gxio_mpipe_wrappers.
- *
- * Push and pull DMA queues are allocated via
- * gxio_trio_alloc_push_dma_ring() / gxio_trio_alloc_pull_dma_ring().
- * Once allocated, users generally use a ::gxio_trio_dma_queue_t
- * object to manage the queue, providing easy wrappers for reserving
- * command slots in the DMA command ring, filling those slots, and
- * waiting for commands to complete. DMA queues can be initialized
- * via gxio_trio_init_push_dma_queue() or
- * gxio_trio_init_pull_dma_queue().
- *
- * See @ref trio/push_dma/app.c for an example of how to use push DMA.
- *
- * @section trio_shortcomings Plans for Future API Revisions
- *
- * The simulation framework is incomplete. Future features include:
- *
- * - Support for reset and deallocation of resources.
- *
- * - Support for pull DMA.
- *
- * - Support for interrupt regions and user-space interrupt delivery.
- *
- * - Support for getting BAR mappings and reserving regions of BAR
- * address space.
- */
-#ifndef _GXIO_TRIO_H_
-#define _GXIO_TRIO_H_
-
-#include <linux/types.h>
-
-#include <gxio/common.h>
-#include <gxio/dma_queue.h>
-
-#include <arch/trio_constants.h>
-#include <arch/trio.h>
-#include <arch/trio_pcie_intfc.h>
-#include <arch/trio_pcie_rc.h>
-#include <arch/trio_shm.h>
-#include <hv/drv_trio_intf.h>
-#include <hv/iorpc.h>
-
-/* A context object used to manage TRIO hardware resources. */
-typedef struct {
-
- /* File descriptor for calling up to Linux (and thus the HV). */
- int fd;
-
- /* The VA at which the MAC MMIO registers are mapped. */
- char *mmio_base_mac;
-
- /* The VA at which the PIO config space are mapped for each PCIe MAC.
- Gx36 has max 3 PCIe MACs per TRIO shim. */
- char *mmio_base_pio_cfg[TILEGX_TRIO_PCIES];
-
-#ifdef USE_SHARED_PCIE_CONFIG_REGION
- /* Index of the shared PIO region for PCI config access. */
- int pio_cfg_index;
-#else
- /* Index of the PIO region for PCI config access per MAC. */
- int pio_cfg_index[TILEGX_TRIO_PCIES];
-#endif
-
- /* The VA at which the push DMA MMIO registers are mapped. */
- char *mmio_push_dma[TRIO_NUM_PUSH_DMA_RINGS];
-
- /* The VA at which the pull DMA MMIO registers are mapped. */
- char *mmio_pull_dma[TRIO_NUM_PUSH_DMA_RINGS];
-
- /* Application space ID. */
- unsigned int asid;
-
-} gxio_trio_context_t;
-
-/* Command descriptor for push or pull DMA. */
-typedef TRIO_DMA_DESC_t gxio_trio_dma_desc_t;
-
-/* A convenient, thread-safe interface to an eDMA ring. */
-typedef struct {
-
- /* State object for tracking head and tail pointers. */
- __gxio_dma_queue_t dma_queue;
-
- /* The ring entries. */
- gxio_trio_dma_desc_t *dma_descs;
-
- /* The number of entries minus one. */
- unsigned long mask_num_entries;
-
- /* The log2() of the number of entries. */
- unsigned int log2_num_entries;
-
-} gxio_trio_dma_queue_t;
-
-/* Initialize a TRIO context.
- *
- * This function allocates a TRIO "service domain" and maps the MMIO
- * registers into the the caller's VA space.
- *
- * @param trio_index Which TRIO shim; Gx36 must pass 0.
- * @param context Context object to be initialized.
- */
-extern int gxio_trio_init(gxio_trio_context_t *context,
- unsigned int trio_index);
-
-/* This indicates that an ASID hasn't been allocated. */
-#define GXIO_ASID_NULL -1
-
-/* Ordering modes for map memory regions and scatter queue regions. */
-typedef enum gxio_trio_order_mode_e {
- /* Writes are not ordered. Reads always wait for previous writes. */
- GXIO_TRIO_ORDER_MODE_UNORDERED =
- TRIO_MAP_MEM_SETUP__ORDER_MODE_VAL_UNORDERED,
- /* Both writes and reads wait for previous transactions to complete. */
- GXIO_TRIO_ORDER_MODE_STRICT =
- TRIO_MAP_MEM_SETUP__ORDER_MODE_VAL_STRICT,
- /* Writes are ordered unless the incoming packet has the
- relaxed-ordering attributes set. */
- GXIO_TRIO_ORDER_MODE_OBEY_PACKET =
- TRIO_MAP_MEM_SETUP__ORDER_MODE_VAL_REL_ORD
-} gxio_trio_order_mode_t;
-
-/* Initialize a memory mapping region.
- *
- * @param context An initialized TRIO context.
- * @param map A Memory map region allocated by gxio_trio_alloc_memory_map().
- * @param target_mem VA of backing memory, should be registered via
- * gxio_trio_register_page() and aligned to 4kB.
- * @param target_size Length of the memory mapping, must be a multiple
- * of 4kB.
- * @param asid ASID to be used for Tile-side address translation.
- * @param mac MAC number.
- * @param bus_address Bus address at which the mapping starts.
- * @param order_mode Memory ordering mode for this mapping.
- * @return Zero on success, else ::GXIO_TRIO_ERR_BAD_MEMORY_MAP,
- * GXIO_TRIO_ERR_BAD_ASID, or ::GXIO_TRIO_ERR_BAD_BUS_RANGE.
- */
-extern int gxio_trio_init_memory_map(gxio_trio_context_t *context,
- unsigned int map, void *target_mem,
- size_t target_size, unsigned int asid,
- unsigned int mac, uint64_t bus_address,
- gxio_trio_order_mode_t order_mode);
-
-/* Flags that can be passed to resource allocation functions. */
-enum gxio_trio_alloc_flags_e {
- GXIO_TRIO_ALLOC_FIXED = HV_TRIO_ALLOC_FIXED,
-};
-
-/* Flags that can be passed to memory registration functions. */
-enum gxio_trio_mem_flags_e {
- /* Do not fill L3 when writing, and invalidate lines upon egress. */
- GXIO_TRIO_MEM_FLAG_NT_HINT = IORPC_MEM_BUFFER_FLAG_NT_HINT,
-
- /* L3 cache fills should only populate IO cache ways. */
- GXIO_TRIO_MEM_FLAG_IO_PIN = IORPC_MEM_BUFFER_FLAG_IO_PIN,
-};
-
-/* Flag indicating a request generator uses a special traffic
- class. */
-#define GXIO_TRIO_FLAG_TRAFFIC_CLASS(N) HV_TRIO_FLAG_TC(N)
-
-/* Flag indicating a request generator uses a virtual function
- number. */
-#define GXIO_TRIO_FLAG_VFUNC(N) HV_TRIO_FLAG_VFUNC(N)
-
-/*****************************************************************
- * Memory Registration *
- ******************************************************************/
-
-/* Allocate Application Space Identifiers (ASIDs). Each ASID can
- * register up to 16 page translations. ASIDs are used by memory map
- * regions, scatter queues, and DMA queues to translate application
- * VAs into memory system PAs.
- *
- * @param context An initialized TRIO context.
- * @param count Number of ASIDs required.
- * @param first Index of first ASID if ::GXIO_TRIO_ALLOC_FIXED flag
- * is set, otherwise ignored.
- * @param flags Flag bits, including bits from ::gxio_trio_alloc_flags_e.
- * @return Index of first ASID, or ::GXIO_TRIO_ERR_NO_ASID if allocation
- * failed.
- */
-extern int gxio_trio_alloc_asids(gxio_trio_context_t *context,
- unsigned int count, unsigned int first,
- unsigned int flags);
-
-#endif /* ! _GXIO_TRIO_H_ */
diff --git a/arch/tile/include/gxio/uart.h b/arch/tile/include/gxio/uart.h
deleted file mode 100644
index 438ee7e46c7b..000000000000
--- a/arch/tile/include/gxio/uart.h
+++ /dev/null
@@ -1,105 +0,0 @@
-/*
- * Copyright 2013 Tilera Corporation. All Rights Reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation, version 2.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
- * NON INFRINGEMENT. See the GNU General Public License for
- * more details.
- */
-
-#ifndef _GXIO_UART_H_
-#define _GXIO_UART_H_
-
-#include "common.h"
-
-#include <hv/drv_uart_intf.h>
-#include <hv/iorpc.h>
-
-/*
- *
- * An API for manipulating UART interface.
- */
-
-/*
- *
- * The Rshim allows access to the processor's UART interface.
- */
-
-/* A context object used to manage UART resources. */
-typedef struct {
-
- /* File descriptor for calling up to the hypervisor. */
- int fd;
-
- /* The VA at which our MMIO registers are mapped. */
- char *mmio_base;
-
-} gxio_uart_context_t;
-
-/* Request UART interrupts.
- *
- * Request that interrupts be delivered to a tile when the UART's
- * Receive FIFO is written, or the Write FIFO is read.
- *
- * @param context Pointer to a properly initialized gxio_uart_context_t.
- * @param bind_cpu_x X coordinate of CPU to which interrupt will be delivered.
- * @param bind_cpu_y Y coordinate of CPU to which interrupt will be delivered.
- * @param bind_interrupt IPI interrupt number.
- * @param bind_event Sub-interrupt event bit number; a negative value can
- * disable the interrupt.
- * @return Zero if all of the requested UART events were successfully
- * configured to interrupt.
- */
-extern int gxio_uart_cfg_interrupt(gxio_uart_context_t *context,
- int bind_cpu_x,
- int bind_cpu_y,
- int bind_interrupt, int bind_event);
-
-/* Initialize a UART context.
- *
- * A properly initialized context must be obtained before any of the other
- * gxio_uart routines may be used.
- *
- * @param context Pointer to a gxio_uart_context_t, which will be initialized
- * by this routine, if it succeeds.
- * @param uart_index Index of the UART to use.
- * @return Zero if the context was successfully initialized, else a
- * GXIO_ERR_xxx error code.
- */
-extern int gxio_uart_init(gxio_uart_context_t *context, int uart_index);
-
-/* Destroy a UART context.
- *
- * Once destroyed, a context may not be used with any gxio_uart routines
- * other than gxio_uart_init(). After this routine returns, no further
- * interrupts requested on this context will be delivered. The state and
- * configuration of the pins which had been attached to this context are
- * unchanged by this operation.
- *
- * @param context Pointer to a gxio_uart_context_t.
- * @return Zero if the context was successfully destroyed, else a
- * GXIO_ERR_xxx error code.
- */
-extern int gxio_uart_destroy(gxio_uart_context_t *context);
-
-/* Write UART register.
- * @param context Pointer to a gxio_uart_context_t.
- * @param offset UART register offset.
- * @param word Data will be wrote to UART reigister.
- */
-extern void gxio_uart_write(gxio_uart_context_t *context, uint64_t offset,
- uint64_t word);
-
-/* Read UART register.
- * @param context Pointer to a gxio_uart_context_t.
- * @param offset UART register offset.
- * @return Data read from UART register.
- */
-extern uint64_t gxio_uart_read(gxio_uart_context_t *context, uint64_t offset);
-
-#endif /* _GXIO_UART_H_ */
diff --git a/arch/tile/include/gxio/usb_host.h b/arch/tile/include/gxio/usb_host.h
deleted file mode 100644
index 93c9636d2dd7..000000000000
--- a/arch/tile/include/gxio/usb_host.h
+++ /dev/null
@@ -1,87 +0,0 @@
-/*
- * Copyright 2012 Tilera Corporation. All Rights Reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation, version 2.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
- * NON INFRINGEMENT. See the GNU General Public License for
- * more details.
- */
-#ifndef _GXIO_USB_H_
-#define _GXIO_USB_H_
-
-#include <gxio/common.h>
-
-#include <hv/drv_usb_host_intf.h>
-#include <hv/iorpc.h>
-
-/*
- *
- * An API for manipulating general-purpose I/O pins.
- */
-
-/*
- *
- * The USB shim allows access to the processor's Universal Serial Bus
- * connections.
- */
-
-/* A context object used to manage USB hardware resources. */
-typedef struct {
-
- /* File descriptor for calling up to the hypervisor. */
- int fd;
-
- /* The VA at which our MMIO registers are mapped. */
- char *mmio_base;
-} gxio_usb_host_context_t;
-
-/* Initialize a USB context.
- *
- * A properly initialized context must be obtained before any of the other
- * gxio_usb_host routines may be used.
- *
- * @param context Pointer to a gxio_usb_host_context_t, which will be
- * initialized by this routine, if it succeeds.
- * @param usb_index Index of the USB shim to use.
- * @param is_ehci Nonzero to use the EHCI interface; zero to use the OHCI
- * intereface.
- * @return Zero if the context was successfully initialized, else a
- * GXIO_ERR_xxx error code.
- */
-extern int gxio_usb_host_init(gxio_usb_host_context_t *context, int usb_index,
- int is_ehci);
-
-/* Destroy a USB context.
- *
- * Once destroyed, a context may not be used with any gxio_usb_host routines
- * other than gxio_usb_host_init(). After this routine returns, no further
- * interrupts or signals requested on this context will be delivered. The
- * state and configuration of the pins which had been attached to this
- * context are unchanged by this operation.
- *
- * @param context Pointer to a gxio_usb_host_context_t.
- * @return Zero if the context was successfully destroyed, else a
- * GXIO_ERR_xxx error code.
- */
-extern int gxio_usb_host_destroy(gxio_usb_host_context_t *context);
-
-/* Retrieve the address of the shim's MMIO registers.
- *
- * @param context Pointer to a properly initialized gxio_usb_host_context_t.
- * @return The address of the shim's MMIO registers.
- */
-extern void *gxio_usb_host_get_reg_start(gxio_usb_host_context_t *context);
-
-/* Retrieve the length of the shim's MMIO registers.
- *
- * @param context Pointer to a properly initialized gxio_usb_host_context_t.
- * @return The length of the shim's MMIO registers.
- */
-extern size_t gxio_usb_host_get_reg_len(gxio_usb_host_context_t *context);
-
-#endif /* _GXIO_USB_H_ */
diff --git a/arch/tile/include/hv/drv_mpipe_intf.h b/arch/tile/include/hv/drv_mpipe_intf.h
deleted file mode 100644
index ff7f50f970a5..000000000000
--- a/arch/tile/include/hv/drv_mpipe_intf.h
+++ /dev/null
@@ -1,605 +0,0 @@
-/*
- * Copyright 2011 Tilera Corporation. All Rights Reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation, version 2.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
- * NON INFRINGEMENT. See the GNU General Public License for
- * more details.
- */
-
-/**
- * Interface definitions for the mpipe driver.
- */
-
-#ifndef _SYS_HV_DRV_MPIPE_INTF_H
-#define _SYS_HV_DRV_MPIPE_INTF_H
-
-#include <arch/mpipe.h>
-#include <arch/mpipe_constants.h>
-
-
-/** Number of mPIPE instances supported */
-#define HV_MPIPE_INSTANCE_MAX (2)
-
-/** Number of buffer stacks (32). */
-#define HV_MPIPE_NUM_BUFFER_STACKS \
- (MPIPE_MMIO_INIT_DAT_GX36_1__BUFFER_STACK_MASK_WIDTH)
-
-/** Number of NotifRings (256). */
-#define HV_MPIPE_NUM_NOTIF_RINGS (MPIPE_NUM_NOTIF_RINGS)
-
-/** Number of NotifGroups (32). */
-#define HV_MPIPE_NUM_NOTIF_GROUPS (MPIPE_NUM_NOTIF_GROUPS)
-
-/** Number of buckets (4160). */
-#define HV_MPIPE_NUM_BUCKETS (MPIPE_NUM_BUCKETS)
-
-/** Number of "lo" buckets (4096). */
-#define HV_MPIPE_NUM_LO_BUCKETS 4096
-
-/** Number of "hi" buckets (64). */
-#define HV_MPIPE_NUM_HI_BUCKETS \
- (HV_MPIPE_NUM_BUCKETS - HV_MPIPE_NUM_LO_BUCKETS)
-
-/** Number of edma rings (24). */
-#define HV_MPIPE_NUM_EDMA_RINGS \
- (MPIPE_MMIO_INIT_DAT_GX36_1__EDMA_POST_MASK_WIDTH)
-
-
-
-
-/** A flag bit indicating a fixed resource allocation. */
-#define HV_MPIPE_ALLOC_FIXED 0x01
-
-/** Offset for the config register MMIO region. */
-#define HV_MPIPE_CONFIG_MMIO_OFFSET \
- (MPIPE_MMIO_ADDR__REGION_VAL_CFG << MPIPE_MMIO_ADDR__REGION_SHIFT)
-
-/** Size of the config register MMIO region. */
-#define HV_MPIPE_CONFIG_MMIO_SIZE (64 * 1024)
-
-/** Offset for the config register MMIO region. */
-#define HV_MPIPE_FAST_MMIO_OFFSET \
- (MPIPE_MMIO_ADDR__REGION_VAL_IDMA << MPIPE_MMIO_ADDR__REGION_SHIFT)
-
-/** Size of the fast register MMIO region (IDMA, EDMA, buffer stack). */
-#define HV_MPIPE_FAST_MMIO_SIZE \
- ((MPIPE_MMIO_ADDR__REGION_VAL_BSM + 1 - MPIPE_MMIO_ADDR__REGION_VAL_IDMA) \
- << MPIPE_MMIO_ADDR__REGION_SHIFT)
-
-
-/*
- * Each type of resource allocation comes in quantized chunks, where
- * XXX_BITS is the number of chunks, and XXX_RES_PER_BIT is the number
- * of resources in each chunk.
- */
-
-/** Number of buffer stack chunks available (32). */
-#define HV_MPIPE_ALLOC_BUFFER_STACKS_BITS \
- MPIPE_MMIO_INIT_DAT_GX36_1__BUFFER_STACK_MASK_WIDTH
-
-/** Granularity of buffer stack allocation (1). */
-#define HV_MPIPE_ALLOC_BUFFER_STACKS_RES_PER_BIT \
- (HV_MPIPE_NUM_BUFFER_STACKS / HV_MPIPE_ALLOC_BUFFER_STACKS_BITS)
-
-/** Number of NotifRing chunks available (32). */
-#define HV_MPIPE_ALLOC_NOTIF_RINGS_BITS \
- MPIPE_MMIO_INIT_DAT_GX36_0__NOTIF_RING_MASK_WIDTH
-
-/** Granularity of NotifRing allocation (8). */
-#define HV_MPIPE_ALLOC_NOTIF_RINGS_RES_PER_BIT \
- (HV_MPIPE_NUM_NOTIF_RINGS / HV_MPIPE_ALLOC_NOTIF_RINGS_BITS)
-
-/** Number of NotifGroup chunks available (32). */
-#define HV_MPIPE_ALLOC_NOTIF_GROUPS_BITS \
- HV_MPIPE_NUM_NOTIF_GROUPS
-
-/** Granularity of NotifGroup allocation (1). */
-#define HV_MPIPE_ALLOC_NOTIF_GROUPS_RES_PER_BIT \
- (HV_MPIPE_NUM_NOTIF_GROUPS / HV_MPIPE_ALLOC_NOTIF_GROUPS_BITS)
-
-/** Number of lo bucket chunks available (16). */
-#define HV_MPIPE_ALLOC_LO_BUCKETS_BITS \
- MPIPE_MMIO_INIT_DAT_GX36_0__BUCKET_RELEASE_MASK_LO_WIDTH
-
-/** Granularity of lo bucket allocation (256). */
-#define HV_MPIPE_ALLOC_LO_BUCKETS_RES_PER_BIT \
- (HV_MPIPE_NUM_LO_BUCKETS / HV_MPIPE_ALLOC_LO_BUCKETS_BITS)
-
-/** Number of hi bucket chunks available (16). */
-#define HV_MPIPE_ALLOC_HI_BUCKETS_BITS \
- MPIPE_MMIO_INIT_DAT_GX36_0__BUCKET_RELEASE_MASK_HI_WIDTH
-
-/** Granularity of hi bucket allocation (4). */
-#define HV_MPIPE_ALLOC_HI_BUCKETS_RES_PER_BIT \
- (HV_MPIPE_NUM_HI_BUCKETS / HV_MPIPE_ALLOC_HI_BUCKETS_BITS)
-
-/** Number of eDMA ring chunks available (24). */
-#define HV_MPIPE_ALLOC_EDMA_RINGS_BITS \
- MPIPE_MMIO_INIT_DAT_GX36_1__EDMA_POST_MASK_WIDTH
-
-/** Granularity of eDMA ring allocation (1). */
-#define HV_MPIPE_ALLOC_EDMA_RINGS_RES_PER_BIT \
- (HV_MPIPE_NUM_EDMA_RINGS / HV_MPIPE_ALLOC_EDMA_RINGS_BITS)
-
-
-
-
-/** Bit vector encoding which NotifRings are in a NotifGroup. */
-typedef struct
-{
- /** The actual bits. */
- uint64_t ring_mask[4];
-
-} gxio_mpipe_notif_group_bits_t;
-
-
-/** Another name for MPIPE_LBL_INIT_DAT_BSTS_TBL_t. */
-typedef MPIPE_LBL_INIT_DAT_BSTS_TBL_t gxio_mpipe_bucket_info_t;
-
-
-
-/** Eight buffer stack ids. */
-typedef struct
-{
- /** The stacks. */
- uint8_t stacks[8];
-
-} gxio_mpipe_rules_stacks_t;
-
-
-/** A destination mac address. */
-typedef struct
-{
- /** The octets. */
- uint8_t octets[6];
-
-} gxio_mpipe_rules_dmac_t;
-
-
-/** A vlan. */
-typedef uint16_t gxio_mpipe_rules_vlan_t;
-
-
-
-/** Maximum number of characters in a link name. */
-#define GXIO_MPIPE_LINK_NAME_LEN 32
-
-
-/** Structure holding a link name. Only needed, and only typedef'ed,
- * because the IORPC stub generator only handles types which are single
- * words coming before the parameter name. */
-typedef struct
-{
- /** The name itself. */
- char name[GXIO_MPIPE_LINK_NAME_LEN];
-}
-_gxio_mpipe_link_name_t;
-
-/** Maximum number of characters in a symbol name. */
-#define GXIO_MPIPE_SYMBOL_NAME_LEN 128
-
-
-/** Structure holding a symbol name. Only needed, and only typedef'ed,
- * because the IORPC stub generator only handles types which are single
- * words coming before the parameter name. */
-typedef struct
-{
- /** The name itself. */
- char name[GXIO_MPIPE_SYMBOL_NAME_LEN];
-}
-_gxio_mpipe_symbol_name_t;
-
-
-/** Structure holding a MAC address. */
-typedef struct
-{
- /** The address. */
- uint8_t mac[6];
-}
-_gxio_mpipe_link_mac_t;
-
-
-
-/** Request shared data permission -- that is, the ability to send and
- * receive packets -- on the specified link. Other processes may also
- * request shared data permission on the same link.
- *
- * No more than one of ::GXIO_MPIPE_LINK_DATA, ::GXIO_MPIPE_LINK_NO_DATA,
- * or ::GXIO_MPIPE_LINK_EXCL_DATA may be specified in a gxio_mpipe_link_open()
- * call. If none are specified, ::GXIO_MPIPE_LINK_DATA is assumed.
- */
-#define GXIO_MPIPE_LINK_DATA 0x00000001UL
-
-/** Do not request data permission on the specified link.
- *
- * No more than one of ::GXIO_MPIPE_LINK_DATA, ::GXIO_MPIPE_LINK_NO_DATA,
- * or ::GXIO_MPIPE_LINK_EXCL_DATA may be specified in a gxio_mpipe_link_open()
- * call. If none are specified, ::GXIO_MPIPE_LINK_DATA is assumed.
- */
-#define GXIO_MPIPE_LINK_NO_DATA 0x00000002UL
-
-/** Request exclusive data permission -- that is, the ability to send and
- * receive packets -- on the specified link. No other processes may
- * request data permission on this link, and if any process already has
- * data permission on it, this open will fail.
- *
- * No more than one of ::GXIO_MPIPE_LINK_DATA, ::GXIO_MPIPE_LINK_NO_DATA,
- * or ::GXIO_MPIPE_LINK_EXCL_DATA may be specified in a gxio_mpipe_link_open()
- * call. If none are specified, ::GXIO_MPIPE_LINK_DATA is assumed.
- */
-#define GXIO_MPIPE_LINK_EXCL_DATA 0x00000004UL
-
-/** Request shared stats permission -- that is, the ability to read and write
- * registers which contain link statistics, and to get link attributes --
- * on the specified link. Other processes may also request shared stats
- * permission on the same link.
- *
- * No more than one of ::GXIO_MPIPE_LINK_STATS, ::GXIO_MPIPE_LINK_NO_STATS,
- * or ::GXIO_MPIPE_LINK_EXCL_STATS may be specified in a gxio_mpipe_link_open()
- * call. If none are specified, ::GXIO_MPIPE_LINK_STATS is assumed.
- */
-#define GXIO_MPIPE_LINK_STATS 0x00000008UL
-
-/** Do not request stats permission on the specified link.
- *
- * No more than one of ::GXIO_MPIPE_LINK_STATS, ::GXIO_MPIPE_LINK_NO_STATS,
- * or ::GXIO_MPIPE_LINK_EXCL_STATS may be specified in a gxio_mpipe_link_open()
- * call. If none are specified, ::GXIO_MPIPE_LINK_STATS is assumed.
- */
-#define GXIO_MPIPE_LINK_NO_STATS 0x00000010UL
-
-/** Request exclusive stats permission -- that is, the ability to read and
- * write registers which contain link statistics, and to get link
- * attributes -- on the specified link. No other processes may request
- * stats permission on this link, and if any process already
- * has stats permission on it, this open will fail.
- *
- * Requesting exclusive stats permission is normally a very bad idea, since
- * it prevents programs like mpipe-stat from providing information on this
- * link. Applications should only do this if they use MAC statistics
- * registers, and cannot tolerate any of the clear-on-read registers being
- * reset by other statistics programs.
- *
- * No more than one of ::GXIO_MPIPE_LINK_STATS, ::GXIO_MPIPE_LINK_NO_STATS,
- * or ::GXIO_MPIPE_LINK_EXCL_STATS may be specified in a gxio_mpipe_link_open()
- * call. If none are specified, ::GXIO_MPIPE_LINK_STATS is assumed.
- */
-#define GXIO_MPIPE_LINK_EXCL_STATS 0x00000020UL
-
-/** Request shared control permission -- that is, the ability to modify link
- * attributes, and read and write MAC and MDIO registers -- on the
- * specified link. Other processes may also request shared control
- * permission on the same link.
- *
- * No more than one of ::GXIO_MPIPE_LINK_CTL, ::GXIO_MPIPE_LINK_NO_CTL,
- * or ::GXIO_MPIPE_LINK_EXCL_CTL may be specified in a gxio_mpipe_link_open()
- * call. If none are specified, ::GXIO_MPIPE_LINK_CTL is assumed.
- */
-#define GXIO_MPIPE_LINK_CTL 0x00000040UL
-
-/** Do not request control permission on the specified link.
- *
- * No more than one of ::GXIO_MPIPE_LINK_CTL, ::GXIO_MPIPE_LINK_NO_CTL,
- * or ::GXIO_MPIPE_LINK_EXCL_CTL may be specified in a gxio_mpipe_link_open()
- * call. If none are specified, ::GXIO_MPIPE_LINK_CTL is assumed.
- */
-#define GXIO_MPIPE_LINK_NO_CTL 0x00000080UL
-
-/** Request exclusive control permission -- that is, the ability to modify
- * link attributes, and read and write MAC and MDIO registers -- on the
- * specified link. No other processes may request control permission on
- * this link, and if any process already has control permission on it,
- * this open will fail.
- *
- * Requesting exclusive control permission is not always a good idea, since
- * it prevents programs like mpipe-link from configuring the link.
- *
- * No more than one of ::GXIO_MPIPE_LINK_CTL, ::GXIO_MPIPE_LINK_NO_CTL,
- * or ::GXIO_MPIPE_LINK_EXCL_CTL may be specified in a gxio_mpipe_link_open()
- * call. If none are specified, ::GXIO_MPIPE_LINK_CTL is assumed.
- */
-#define GXIO_MPIPE_LINK_EXCL_CTL 0x00000100UL
-
-/** Set the desired state of the link to up, allowing any speeds which are
- * supported by the link hardware, as part of this open operation; do not
- * change the desired state of the link when it is closed or the process
- * exits. No more than one of ::GXIO_MPIPE_LINK_AUTO_UP,
- * ::GXIO_MPIPE_LINK_AUTO_UPDOWN, ::GXIO_MPIPE_LINK_AUTO_DOWN, or
- * ::GXIO_MPIPE_LINK_AUTO_NONE may be specified in a gxio_mpipe_link_open()
- * call. If none are specified, ::GXIO_MPIPE_LINK_AUTO_UPDOWN is assumed.
- */
-#define GXIO_MPIPE_LINK_AUTO_UP 0x00000200UL
-
-/** Set the desired state of the link to up, allowing any speeds which are
- * supported by the link hardware, as part of this open operation; when the
- * link is closed or this process exits, if no other process has the link
- * open, set the desired state of the link to down. No more than one of
- * ::GXIO_MPIPE_LINK_AUTO_UP, ::GXIO_MPIPE_LINK_AUTO_UPDOWN,
- * ::GXIO_MPIPE_LINK_AUTO_DOWN, or ::GXIO_MPIPE_LINK_AUTO_NONE may be
- * specified in a gxio_mpipe_link_open() call. If none are specified,
- * ::GXIO_MPIPE_LINK_AUTO_UPDOWN is assumed.
- */
-#define GXIO_MPIPE_LINK_AUTO_UPDOWN 0x00000400UL
-
-/** Do not change the desired state of the link as part of the open
- * operation; when the link is closed or this process exits, if no other
- * process has the link open, set the desired state of the link to down.
- * No more than one of ::GXIO_MPIPE_LINK_AUTO_UP,
- * ::GXIO_MPIPE_LINK_AUTO_UPDOWN, ::GXIO_MPIPE_LINK_AUTO_DOWN, or
- * ::GXIO_MPIPE_LINK_AUTO_NONE may be specified in a gxio_mpipe_link_open()
- * call. If none are specified, ::GXIO_MPIPE_LINK_AUTO_UPDOWN is assumed.
- */
-#define GXIO_MPIPE_LINK_AUTO_DOWN 0x00000800UL
-
-/** Do not change the desired state of the link as part of the open
- * operation; do not change the desired state of the link when it is
- * closed or the process exits. No more than one of
- * ::GXIO_MPIPE_LINK_AUTO_UP, ::GXIO_MPIPE_LINK_AUTO_UPDOWN,
- * ::GXIO_MPIPE_LINK_AUTO_DOWN, or ::GXIO_MPIPE_LINK_AUTO_NONE may be
- * specified in a gxio_mpipe_link_open() call. If none are specified,
- * ::GXIO_MPIPE_LINK_AUTO_UPDOWN is assumed.
- */
-#define GXIO_MPIPE_LINK_AUTO_NONE 0x00001000UL
-
-/** Request that this open call not complete until the network link is up.
- * The process will wait as long as necessary for this to happen;
- * applications which wish to abandon waiting for the link after a
- * specific time period should not specify this flag when opening a link,
- * but should instead call gxio_mpipe_link_wait() afterward. The link
- * must be opened with stats permission. Note that this flag by itself
- * does not change the desired link state; if other open flags or previous
- * link state changes have not requested a desired state of up, the open
- * call will never complete. This flag is not available to kernel
- * clients.
- */
-#define GXIO_MPIPE_LINK_WAIT 0x00002000UL
-
-
-/*
- * Note: link attributes must fit in 24 bits, since we use the top 8 bits
- * of the IORPC offset word for the channel number.
- */
-
-/** Determine whether jumbo frames may be received. If this attribute's
- * value value is nonzero, the MAC will accept frames of up to 10240 bytes.
- * If the value is zero, the MAC will only accept frames of up to 1544
- * bytes. The default value is zero. */
-#define GXIO_MPIPE_LINK_RECEIVE_JUMBO 0x010000
-
-/** Determine whether to send pause frames on this link if the mPIPE packet
- * FIFO is nearly full. If the value is zero, pause frames are not sent.
- * If the value is nonzero, it is the delay value which will be sent in any
- * pause frames which are output, in units of 512 bit times.
- *
- * Bear in mind that in almost all circumstances, the mPIPE packet FIFO
- * will never fill up, since mPIPE will empty it as fast as or faster than
- * the incoming data rate, by either delivering or dropping packets. The
- * only situation in which this is not true is if the memory and cache
- * subsystem is extremely heavily loaded, and mPIPE cannot perform DMA of
- * packet data to memory in a timely fashion. In particular, pause frames
- * will <em>not</em> be sent if packets cannot be delivered because
- * NotifRings are full, buckets are full, or buffers are not available in
- * a buffer stack. */
-#define GXIO_MPIPE_LINK_SEND_PAUSE 0x020000
-
-/** Determine whether to suspend output on the receipt of pause frames.
- * If the value is nonzero, mPIPE shim will suspend output on the link's
- * channel when a pause frame is received. If the value is zero, pause
- * frames will be ignored. The default value is zero. */
-#define GXIO_MPIPE_LINK_RECEIVE_PAUSE 0x030000
-
-/** Interface MAC address. The value is a 6-byte MAC address, in the least
- * significant 48 bits of the value; in other words, an address which would
- * be printed as '12:34:56:78:90:AB' in IEEE 802 canonical format would
- * be returned as 0x12345678ab.
- *
- * Depending upon the overall system design, a MAC address may or may not
- * be available for each interface. Note that the interface's MAC address
- * does not limit the packets received on its channel, although the
- * classifier's rules could be configured to do that. Similarly, the MAC
- * address is not used when transmitting packets, although applications
- * could certainly decide to use the assigned address as a source MAC
- * address when doing so. This attribute may only be retrieved with
- * gxio_mpipe_link_get_attr(); it may not be modified.
- */
-#define GXIO_MPIPE_LINK_MAC 0x040000
-
-/** Determine whether to discard egress packets on link down. If this value
- * is nonzero, packets sent on this link while the link is down will be
- * discarded. If this value is zero, no packets will be sent on this link
- * while it is down. The default value is one. */
-#define GXIO_MPIPE_LINK_DISCARD_IF_DOWN 0x050000
-
-/** Possible link state. The value is a combination of link state flags,
- * ORed together, that indicate link modes which are actually supported by
- * the hardware. This attribute may only be retrieved with
- * gxio_mpipe_link_get_attr(); it may not be modified. */
-#define GXIO_MPIPE_LINK_POSSIBLE_STATE 0x060000
-
-/** Current link state. The value is a combination of link state flags,
- * ORed together, that indicate the current state of the hardware. If the
- * link is down, the value ANDed with ::GXIO_MPIPE_LINK_SPEED will be zero;
- * if the link is up, the value ANDed with ::GXIO_MPIPE_LINK_SPEED will
- * result in exactly one of the speed values, indicating the current speed.
- * This attribute may only be retrieved with gxio_mpipe_link_get_attr(); it
- * may not be modified. */
-#define GXIO_MPIPE_LINK_CURRENT_STATE 0x070000
-
-/** Desired link state. The value is a conbination of flags, which specify
- * the desired state for the link. With gxio_mpipe_link_set_attr(), this
- * will, in the background, attempt to bring up the link using whichever of
- * the requested flags are reasonable, or take down the link if the flags
- * are zero. The actual link up or down operation may happen after this
- * call completes. If the link state changes in the future, the system
- * will continue to try to get back to the desired link state; for
- * instance, if the link is brought up successfully, and then the network
- * cable is disconnected, the link will go down. However, the desired
- * state of the link is still up, so if the cable is reconnected, the link
- * will be brought up again.
- *
- * With gxio_mpipe_link_set_attr(), this will indicate the desired state
- * for the link, as set with a previous gxio_mpipe_link_set_attr() call,
- * or implicitly by a gxio_mpipe_link_open() or link close operation.
- * This may not reflect the current state of the link; to get that, use
- * ::GXIO_MPIPE_LINK_CURRENT_STATE.
- */
-#define GXIO_MPIPE_LINK_DESIRED_STATE 0x080000
-
-
-
-/** Link can run, should run, or is running at 10 Mbps. */
-#define GXIO_MPIPE_LINK_10M 0x0000000000000001UL
-
-/** Link can run, should run, or is running at 100 Mbps. */
-#define GXIO_MPIPE_LINK_100M 0x0000000000000002UL
-
-/** Link can run, should run, or is running at 1 Gbps. */
-#define GXIO_MPIPE_LINK_1G 0x0000000000000004UL
-
-/** Link can run, should run, or is running at 10 Gbps. */
-#define GXIO_MPIPE_LINK_10G 0x0000000000000008UL
-
-/** Link can run, should run, or is running at 20 Gbps. */
-#define GXIO_MPIPE_LINK_20G 0x0000000000000010UL
-
-/** Link can run, should run, or is running at 25 Gbps. */
-#define GXIO_MPIPE_LINK_25G 0x0000000000000020UL
-
-/** Link can run, should run, or is running at 50 Gbps. */
-#define GXIO_MPIPE_LINK_50G 0x0000000000000040UL
-
-/** Link should run at the highest speed supported by the link and by
- * the device connected to the link. Only usable as a value for
- * the link's desired state; never returned as a value for the current
- * or possible states. */
-#define GXIO_MPIPE_LINK_ANYSPEED 0x0000000000000800UL
-
-/** All legal link speeds. This value is provided for use in extracting
- * the speed-related subset of the link state flags; it is not intended
- * to be set directly as a value for one of the GXIO_MPIPE_LINK_xxx_STATE
- * attributes. A link is up or is requested to be up if its current or
- * desired state, respectively, ANDED with this value, is nonzero. */
-#define GXIO_MPIPE_LINK_SPEED_MASK 0x0000000000000FFFUL
-
-/** Link can run, should run, or is running in MAC loopback mode. This
- * loops transmitted packets back to the receiver, inside the Tile
- * Processor. */
-#define GXIO_MPIPE_LINK_LOOP_MAC 0x0000000000001000UL
-
-/** Link can run, should run, or is running in PHY loopback mode. This
- * loops transmitted packets back to the receiver, inside the external
- * PHY chip. */
-#define GXIO_MPIPE_LINK_LOOP_PHY 0x0000000000002000UL
-
-/** Link can run, should run, or is running in external loopback mode.
- * This requires that an external loopback plug be installed on the
- * Ethernet port. Note that only some links require that this be
- * configured via the gxio_mpipe_link routines; other links can do
- * external loopack with the plug and no special configuration. */
-#define GXIO_MPIPE_LINK_LOOP_EXT 0x0000000000004000UL
-
-/** All legal loopback types. */
-#define GXIO_MPIPE_LINK_LOOP_MASK 0x000000000000F000UL
-
-/** Link can run, should run, or is running in full-duplex mode.
- * If neither ::GXIO_MPIPE_LINK_FDX nor ::GXIO_MPIPE_LINK_HDX are
- * specified in a set of desired state flags, both are assumed. */
-#define GXIO_MPIPE_LINK_FDX 0x0000000000010000UL
-
-/** Link can run, should run, or is running in half-duplex mode.
- * If neither ::GXIO_MPIPE_LINK_FDX nor ::GXIO_MPIPE_LINK_HDX are
- * specified in a set of desired state flags, both are assumed. */
-#define GXIO_MPIPE_LINK_HDX 0x0000000000020000UL
-
-
-/** An individual rule. */
-typedef struct
-{
- /** The total size. */
- uint16_t size;
-
- /** The priority. */
- int16_t priority;
-
- /** The "headroom" in each buffer. */
- uint8_t headroom;
-
- /** The "tailroom" in each buffer. */
- uint8_t tailroom;
-
- /** The "capacity" of the largest buffer. */
- uint16_t capacity;
-
- /** The mask for converting a flow hash into a bucket. */
- uint16_t bucket_mask;
-
- /** The offset for converting a flow hash into a bucket. */
- uint16_t bucket_first;
-
- /** The buffer stack ids. */
- gxio_mpipe_rules_stacks_t stacks;
-
- /** The actual channels. */
- uint32_t channel_bits;
-
- /** The number of dmacs. */
- uint16_t num_dmacs;
-
- /** The number of vlans. */
- uint16_t num_vlans;
-
- /** The actual dmacs and vlans. */
- uint8_t dmacs_and_vlans[];
-
-} gxio_mpipe_rules_rule_t;
-
-
-/** A list of classifier rules. */
-typedef struct
-{
- /** The offset to the end of the current rule. */
- uint16_t tail;
-
- /** The offset to the start of the current rule. */
- uint16_t head;
-
- /** The actual rules. */
- uint8_t rules[4096 - 4];
-
-} gxio_mpipe_rules_list_t;
-
-
-
-
-/** mPIPE statistics structure. These counters include all relevant
- * events occurring on all links within the mPIPE shim. */
-typedef struct
-{
- /** Number of ingress packets dropped for any reason. */
- uint64_t ingress_drops;
- /** Number of ingress packets dropped because a buffer stack was empty. */
- uint64_t ingress_drops_no_buf;
- /** Number of ingress packets dropped or truncated due to lack of space in
- * the iPkt buffer. */
- uint64_t ingress_drops_ipkt;
- /** Number of ingress packets dropped by the classifier or load balancer */
- uint64_t ingress_drops_cls_lb;
- /** Total number of ingress packets. */
- uint64_t ingress_packets;
- /** Total number of egress packets. */
- uint64_t egress_packets;
- /** Total number of ingress bytes. */
- uint64_t ingress_bytes;
- /** Total number of egress bytes. */
- uint64_t egress_bytes;
-}
-gxio_mpipe_stats_t;
-
-
-#endif /* _SYS_HV_DRV_MPIPE_INTF_H */
diff --git a/arch/tile/include/hv/drv_mshim_intf.h b/arch/tile/include/hv/drv_mshim_intf.h
deleted file mode 100644
index c6ef3bdc55cf..000000000000
--- a/arch/tile/include/hv/drv_mshim_intf.h
+++ /dev/null
@@ -1,50 +0,0 @@
-/*
- * Copyright 2011 Tilera Corporation. All Rights Reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation, version 2.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
- * NON INFRINGEMENT. See the GNU General Public License for
- * more details.
- */
-
-/**
- * @file drv_mshim_intf.h
- * Interface definitions for the Linux EDAC memory controller driver.
- */
-
-#ifndef _SYS_HV_INCLUDE_DRV_MSHIM_INTF_H
-#define _SYS_HV_INCLUDE_DRV_MSHIM_INTF_H
-
-/** Number of memory controllers in the public API. */
-#define TILE_MAX_MSHIMS 4
-
-/** Memory info under each memory controller. */
-struct mshim_mem_info
-{
- uint64_t mem_size; /**< Total memory size in bytes. */
- uint8_t mem_type; /**< Memory type, DDR2 or DDR3. */
- uint8_t mem_ecc; /**< Memory supports ECC. */
-};
-
-/**
- * DIMM error structure.
- * For now, only correctable errors are counted and the mshim doesn't record
- * the error PA. HV takes panic upon uncorrectable errors.
- */
-struct mshim_mem_error
-{
- uint32_t sbe_count; /**< Number of single-bit errors. */
-};
-
-/** Read this offset to get the memory info per mshim. */
-#define MSHIM_MEM_INFO_OFF 0x100
-
-/** Read this offset to check DIMM error. */
-#define MSHIM_MEM_ERROR_OFF 0x200
-
-#endif /* _SYS_HV_INCLUDE_DRV_MSHIM_INTF_H */
diff --git a/arch/tile/include/hv/drv_pcie_rc_intf.h b/arch/tile/include/hv/drv_pcie_rc_intf.h
deleted file mode 100644
index 9bd2243bece0..000000000000
--- a/arch/tile/include/hv/drv_pcie_rc_intf.h
+++ /dev/null
@@ -1,38 +0,0 @@
-/*
- * Copyright 2010 Tilera Corporation. All Rights Reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation, version 2.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
- * NON INFRINGEMENT. See the GNU General Public License for
- * more details.
- */
-
-/**
- * @file drv_pcie_rc_intf.h
- * Interface definitions for the PCIE Root Complex.
- */
-
-#ifndef _SYS_HV_DRV_PCIE_RC_INTF_H
-#define _SYS_HV_DRV_PCIE_RC_INTF_H
-
-/** File offset for reading the interrupt base number used for PCIE legacy
- interrupts and PLX Gen 1 requirement flag */
-#define PCIE_RC_CONFIG_MASK_OFF 0
-
-
-/**
- * Structure used for obtaining PCIe config information, read from the PCIE
- * subsystem /ctl file at initialization
- */
-typedef struct pcie_rc_config
-{
- int intr; /**< interrupt number used for downcall */
- int plx_gen1; /**< flag for PLX Gen 1 configuration */
-} pcie_rc_config_t;
-
-#endif /* _SYS_HV_DRV_PCIE_RC_INTF_H */
diff --git a/arch/tile/include/hv/drv_srom_intf.h b/arch/tile/include/hv/drv_srom_intf.h
deleted file mode 100644
index 6395faa6d9e6..000000000000
--- a/arch/tile/include/hv/drv_srom_intf.h
+++ /dev/null
@@ -1,41 +0,0 @@
-/*
- * Copyright 2011 Tilera Corporation. All Rights Reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation, version 2.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
- * NON INFRINGEMENT. See the GNU General Public License for
- * more details.
- */
-
-/**
- * @file drv_srom_intf.h
- * Interface definitions for the SPI Flash ROM driver.
- */
-
-#ifndef _SYS_HV_INCLUDE_DRV_SROM_INTF_H
-#define _SYS_HV_INCLUDE_DRV_SROM_INTF_H
-
-/** Read this offset to get the total device size. */
-#define SROM_TOTAL_SIZE_OFF 0xF0000000
-
-/** Read this offset to get the device sector size. */
-#define SROM_SECTOR_SIZE_OFF 0xF0000004
-
-/** Read this offset to get the device page size. */
-#define SROM_PAGE_SIZE_OFF 0xF0000008
-
-/** Write this offset to flush any pending writes. */
-#define SROM_FLUSH_OFF 0xF1000000
-
-/** Write this offset, plus the byte offset of the start of a sector, to
- * erase a sector. Any write data is ignored, but there must be at least
- * one byte of write data. Only applies when the driver is in MTD mode.
- */
-#define SROM_ERASE_OFF 0xF2000000
-
-#endif /* _SYS_HV_INCLUDE_DRV_SROM_INTF_H */
diff --git a/arch/tile/include/hv/drv_trio_intf.h b/arch/tile/include/hv/drv_trio_intf.h
deleted file mode 100644
index 237e04dee66c..000000000000
--- a/arch/tile/include/hv/drv_trio_intf.h
+++ /dev/null
@@ -1,199 +0,0 @@
-/*
- * Copyright 2012 Tilera Corporation. All Rights Reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation, version 2.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
- * NON INFRINGEMENT. See the GNU General Public License for
- * more details.
- */
-
-/**
- * Interface definitions for the trio driver.
- */
-
-#ifndef _SYS_HV_DRV_TRIO_INTF_H
-#define _SYS_HV_DRV_TRIO_INTF_H
-
-#include <arch/trio.h>
-
-/** The vendor ID for all Tilera processors. */
-#define TILERA_VENDOR_ID 0x1a41
-
-/** The device ID for the Gx36 processor. */
-#define TILERA_GX36_DEV_ID 0x0200
-
-/** Device ID for our internal bridge when running as RC. */
-#define TILERA_GX36_RC_DEV_ID 0x2000
-
-/** Maximum number of TRIO interfaces. */
-#define TILEGX_NUM_TRIO 2
-
-/** Gx36 has max 3 PCIe MACs per TRIO interface. */
-#define TILEGX_TRIO_PCIES 3
-
-/** Specify port properties for a PCIe MAC. */
-struct pcie_port_property
-{
- /** If true, the link can be configured in PCIe root complex mode. */
- uint8_t allow_rc: 1;
-
- /** If true, the link can be configured in PCIe endpoint mode. */
- uint8_t allow_ep: 1;
-
- /** If true, the link can be configured in StreamIO mode. */
- uint8_t allow_sio: 1;
-
- /** If true, the link is allowed to support 1-lane operation. Software
- * will not consider it an error if the link comes up as a x1 link. */
- uint8_t allow_x1: 1;
-
- /** If true, the link is allowed to support 2-lane operation. Software
- * will not consider it an error if the link comes up as a x2 link. */
- uint8_t allow_x2: 1;
-
- /** If true, the link is allowed to support 4-lane operation. Software
- * will not consider it an error if the link comes up as a x4 link. */
- uint8_t allow_x4: 1;
-
- /** If true, the link is allowed to support 8-lane operation. Software
- * will not consider it an error if the link comes up as a x8 link. */
- uint8_t allow_x8: 1;
-
- /** If true, this link is connected to a device which may or may not
- * be present. */
- uint8_t removable: 1;
-
-};
-
-/** Configurations can be issued to configure a char stream interrupt. */
-typedef enum pcie_stream_intr_config_sel_e
-{
- /** Interrupt configuration for memory map regions. */
- MEM_MAP_SEL,
-
- /** Interrupt configuration for push DMAs. */
- PUSH_DMA_SEL,
-
- /** Interrupt configuration for pull DMAs. */
- PULL_DMA_SEL,
-}
-pcie_stream_intr_config_sel_t;
-
-
-/** The mmap file offset (PA) of the TRIO config region. */
-#define HV_TRIO_CONFIG_OFFSET \
- ((unsigned long long)TRIO_MMIO_ADDRESS_SPACE__REGION_VAL_CFG << \
- TRIO_MMIO_ADDRESS_SPACE__REGION_SHIFT)
-
-/** The maximum size of the TRIO config region. */
-#define HV_TRIO_CONFIG_SIZE \
- (1ULL << TRIO_CFG_REGION_ADDR__REGION_SHIFT)
-
-/** Size of the config region mapped into client. We can't use
- * TRIO_MMIO_ADDRESS_SPACE__OFFSET_WIDTH because it
- * will require the kernel to allocate 4GB VA space
- * from the VMALLOC region which has a total range
- * of 4GB.
- */
-#define HV_TRIO_CONFIG_IOREMAP_SIZE \
- ((uint64_t) 1 << TRIO_CFG_REGION_ADDR__PROT_SHIFT)
-
-/** The mmap file offset (PA) of a scatter queue region. */
-#define HV_TRIO_SQ_OFFSET(queue) \
- (((unsigned long long)TRIO_MMIO_ADDRESS_SPACE__REGION_VAL_MAP_SQ << \
- TRIO_MMIO_ADDRESS_SPACE__REGION_SHIFT) | \
- ((queue) << TRIO_MAP_SQ_REGION_ADDR__SQ_SEL_SHIFT))
-
-/** The maximum size of a scatter queue region. */
-#define HV_TRIO_SQ_SIZE \
- (1ULL << TRIO_MAP_SQ_REGION_ADDR__SQ_SEL_SHIFT)
-
-
-/** The "hardware MMIO region" of the first PIO region. */
-#define HV_TRIO_FIRST_PIO_REGION 8
-
-/** The mmap file offset (PA) of a PIO region. */
-#define HV_TRIO_PIO_OFFSET(region) \
- (((unsigned long long)(region) + HV_TRIO_FIRST_PIO_REGION) \
- << TRIO_PIO_REGIONS_ADDR__REGION_SHIFT)
-
-/** The maximum size of a PIO region. */
-#define HV_TRIO_PIO_SIZE (1ULL << TRIO_PIO_REGIONS_ADDR__ADDR_WIDTH)
-
-
-/** The mmap file offset (PA) of a push DMA region. */
-#define HV_TRIO_PUSH_DMA_OFFSET(ring) \
- (((unsigned long long)TRIO_MMIO_ADDRESS_SPACE__REGION_VAL_PUSH_DMA << \
- TRIO_MMIO_ADDRESS_SPACE__REGION_SHIFT) | \
- ((ring) << TRIO_PUSH_DMA_REGION_ADDR__RING_SEL_SHIFT))
-
-/** The mmap file offset (PA) of a pull DMA region. */
-#define HV_TRIO_PULL_DMA_OFFSET(ring) \
- (((unsigned long long)TRIO_MMIO_ADDRESS_SPACE__REGION_VAL_PULL_DMA << \
- TRIO_MMIO_ADDRESS_SPACE__REGION_SHIFT) | \
- ((ring) << TRIO_PULL_DMA_REGION_ADDR__RING_SEL_SHIFT))
-
-/** The maximum size of a DMA region. */
-#define HV_TRIO_DMA_REGION_SIZE \
- (1ULL << TRIO_PUSH_DMA_REGION_ADDR__RING_SEL_SHIFT)
-
-
-/** The mmap file offset (PA) of a Mem-Map interrupt region. */
-#define HV_TRIO_MEM_MAP_INTR_OFFSET(map) \
- (((unsigned long long)TRIO_MMIO_ADDRESS_SPACE__REGION_VAL_MAP_MEM << \
- TRIO_MMIO_ADDRESS_SPACE__REGION_SHIFT) | \
- ((map) << TRIO_MAP_MEM_REGION_ADDR__MAP_SEL_SHIFT))
-
-/** The maximum size of a Mem-Map interrupt region. */
-#define HV_TRIO_MEM_MAP_INTR_SIZE \
- (1ULL << TRIO_MAP_MEM_REGION_ADDR__MAP_SEL_SHIFT)
-
-
-/** A flag bit indicating a fixed resource allocation. */
-#define HV_TRIO_ALLOC_FIXED 0x01
-
-/** TRIO requires that all mappings have 4kB aligned start addresses. */
-#define HV_TRIO_PAGE_SHIFT 12
-
-/** TRIO requires that all mappings have 4kB aligned start addresses. */
-#define HV_TRIO_PAGE_SIZE (1ull << HV_TRIO_PAGE_SHIFT)
-
-
-/* Specify all PCIe port properties for a TRIO. */
-struct pcie_trio_ports_property
-{
- struct pcie_port_property ports[TILEGX_TRIO_PCIES];
-
- /** Set if this TRIO belongs to a Gx72 device. */
- uint8_t is_gx72;
-};
-
-/* Flags indicating traffic class. */
-#define HV_TRIO_FLAG_TC_SHIFT 4
-#define HV_TRIO_FLAG_TC_RMASK 0xf
-#define HV_TRIO_FLAG_TC(N) \
- ((((N) & HV_TRIO_FLAG_TC_RMASK) + 1) << HV_TRIO_FLAG_TC_SHIFT)
-
-/* Flags indicating virtual functions. */
-#define HV_TRIO_FLAG_VFUNC_SHIFT 8
-#define HV_TRIO_FLAG_VFUNC_RMASK 0xff
-#define HV_TRIO_FLAG_VFUNC(N) \
- ((((N) & HV_TRIO_FLAG_VFUNC_RMASK) + 1) << HV_TRIO_FLAG_VFUNC_SHIFT)
-
-
-/* Flag indicating an ordered PIO region. */
-#define HV_TRIO_PIO_FLAG_ORDERED (1 << 16)
-
-/* Flags indicating special types of PIO regions. */
-#define HV_TRIO_PIO_FLAG_SPACE_SHIFT 17
-#define HV_TRIO_PIO_FLAG_SPACE_MASK (0x3 << HV_TRIO_PIO_FLAG_SPACE_SHIFT)
-#define HV_TRIO_PIO_FLAG_CONFIG_SPACE (0x1 << HV_TRIO_PIO_FLAG_SPACE_SHIFT)
-#define HV_TRIO_PIO_FLAG_IO_SPACE (0x2 << HV_TRIO_PIO_FLAG_SPACE_SHIFT)
-
-
-#endif /* _SYS_HV_DRV_TRIO_INTF_H */
diff --git a/arch/tile/include/hv/drv_uart_intf.h b/arch/tile/include/hv/drv_uart_intf.h
deleted file mode 100644
index f5379e2404fd..000000000000
--- a/arch/tile/include/hv/drv_uart_intf.h
+++ /dev/null
@@ -1,33 +0,0 @@
-/*
- * Copyright 2013 Tilera Corporation. All Rights Reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation, version 2.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
- * NON INFRINGEMENT. See the GNU General Public License for
- * more details.
- */
-
-/**
- * Interface definitions for the UART driver.
- */
-
-#ifndef _SYS_HV_DRV_UART_INTF_H
-#define _SYS_HV_DRV_UART_INTF_H
-
-#include <arch/uart.h>
-
-/** Number of UART ports supported. */
-#define TILEGX_UART_NR 2
-
-/** The mmap file offset (PA) of the UART MMIO region. */
-#define HV_UART_MMIO_OFFSET 0
-
-/** The maximum size of the UARTs MMIO region (64K Bytes). */
-#define HV_UART_MMIO_SIZE (1UL << 16)
-
-#endif /* _SYS_HV_DRV_UART_INTF_H */
diff --git a/arch/tile/include/hv/drv_usb_host_intf.h b/arch/tile/include/hv/drv_usb_host_intf.h
deleted file mode 100644
index 24ce774a3f1d..000000000000
--- a/arch/tile/include/hv/drv_usb_host_intf.h
+++ /dev/null
@@ -1,39 +0,0 @@
-/*
- * Copyright 2012 Tilera Corporation. All Rights Reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation, version 2.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
- * NON INFRINGEMENT. See the GNU General Public License for
- * more details.
- */
-
-/**
- * Interface definitions for the USB host driver.
- */
-
-#ifndef _SYS_HV_DRV_USB_HOST_INTF_H
-#define _SYS_HV_DRV_USB_HOST_INTF_H
-
-#include <arch/usb_host.h>
-
-
-/** Offset for the EHCI register MMIO region. */
-#define HV_USB_HOST_MMIO_OFFSET_EHCI ((uint64_t) USB_HOST_HCCAPBASE_REG)
-
-/** Offset for the OHCI register MMIO region. */
-#define HV_USB_HOST_MMIO_OFFSET_OHCI ((uint64_t) USB_HOST_OHCD_HC_REVISION_REG)
-
-/** Size of the register MMIO region. This turns out to be the same for
- * both EHCI and OHCI. */
-#define HV_USB_HOST_MMIO_SIZE ((uint64_t) 0x1000)
-
-/** The number of service domains supported by the USB host shim. */
-#define HV_USB_HOST_NUM_SVC_DOM 1
-
-
-#endif /* _SYS_HV_DRV_USB_HOST_INTF_H */
diff --git a/arch/tile/include/hv/drv_xgbe_impl.h b/arch/tile/include/hv/drv_xgbe_impl.h
deleted file mode 100644
index 3a73b2b44913..000000000000
--- a/arch/tile/include/hv/drv_xgbe_impl.h
+++ /dev/null
@@ -1,300 +0,0 @@
-/*
- * Copyright 2010 Tilera Corporation. All Rights Reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation, version 2.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
- * NON INFRINGEMENT. See the GNU General Public License for
- * more details.
- */
-
-/**
- * @file drivers/xgbe/impl.h
- * Implementation details for the NetIO library.
- */
-
-#ifndef __DRV_XGBE_IMPL_H__
-#define __DRV_XGBE_IMPL_H__
-
-#include <hv/netio_errors.h>
-#include <hv/netio_intf.h>
-#include <hv/drv_xgbe_intf.h>
-
-
-/** How many groups we have (log2). */
-#define LOG2_NUM_GROUPS (12)
-/** How many groups we have. */
-#define NUM_GROUPS (1 << LOG2_NUM_GROUPS)
-
-/** Number of output requests we'll buffer per tile. */
-#define EPP_REQS_PER_TILE (32)
-
-/** Words used in an eDMA command without checksum acceleration. */
-#define EDMA_WDS_NO_CSUM 8
-/** Words used in an eDMA command with checksum acceleration. */
-#define EDMA_WDS_CSUM 10
-/** Total available words in the eDMA command FIFO. */
-#define EDMA_WDS_TOTAL 128
-
-
-/*
- * FIXME: These definitions are internal and should have underscores!
- * NOTE: The actual numeric values here are intentional and allow us to
- * optimize the concept "if small ... else if large ... else ...", by
- * checking for the low bit being set, and then for non-zero.
- * These are used as array indices, so they must have the values (0, 1, 2)
- * in some order.
- */
-#define SIZE_SMALL (1) /**< Small packet queue. */
-#define SIZE_LARGE (2) /**< Large packet queue. */
-#define SIZE_JUMBO (0) /**< Jumbo packet queue. */
-
-/** The number of "SIZE_xxx" values. */
-#define NETIO_NUM_SIZES 3
-
-
-/*
- * Default numbers of packets for IPP drivers. These values are chosen
- * such that CIPP1 will not overflow its L2 cache.
- */
-
-/** The default number of small packets. */
-#define NETIO_DEFAULT_SMALL_PACKETS 2750
-/** The default number of large packets. */
-#define NETIO_DEFAULT_LARGE_PACKETS 2500
-/** The default number of jumbo packets. */
-#define NETIO_DEFAULT_JUMBO_PACKETS 250
-
-
-/** Log2 of the size of a memory arena. */
-#define NETIO_ARENA_SHIFT 24 /* 16 MB */
-/** Size of a memory arena. */
-#define NETIO_ARENA_SIZE (1 << NETIO_ARENA_SHIFT)
-
-
-/** A queue of packets.
- *
- * This structure partially defines a queue of packets waiting to be
- * processed. The queue as a whole is written to by an interrupt handler and
- * read by non-interrupt code; this data structure is what's touched by the
- * interrupt handler. The other part of the queue state, the read offset, is
- * kept in user space, not in hypervisor space, so it is in a separate data
- * structure.
- *
- * The read offset (__packet_receive_read in the user part of the queue
- * structure) points to the next packet to be read. When the read offset is
- * equal to the write offset, the queue is empty; therefore the queue must
- * contain one more slot than the required maximum queue size.
- *
- * Here's an example of all 3 state variables and what they mean. All
- * pointers move left to right.
- *
- * @code
- * I I V V V V I I I I
- * 0 1 2 3 4 5 6 7 8 9 10
- * ^ ^ ^ ^
- * | | |
- * | | __last_packet_plus_one
- * | __buffer_write
- * __packet_receive_read
- * @endcode
- *
- * This queue has 10 slots, and thus can hold 9 packets (_last_packet_plus_one
- * = 10). The read pointer is at 2, and the write pointer is at 6; thus,
- * there are valid, unread packets in slots 2, 3, 4, and 5. The remaining
- * slots are invalid (do not contain a packet).
- */
-typedef struct {
- /** Byte offset of the next notify packet to be written: zero for the first
- * packet on the queue, sizeof (netio_pkt_t) for the second packet on the
- * queue, etc. */
- volatile uint32_t __packet_write;
-
- /** Offset of the packet after the last valid packet (i.e., when any
- * pointer is incremented to this value, it wraps back to zero). */
- uint32_t __last_packet_plus_one;
-}
-__netio_packet_queue_t;
-
-
-/** A queue of buffers.
- *
- * This structure partially defines a queue of empty buffers which have been
- * obtained via requests to the IPP. (The elements of the queue are packet
- * handles, which are transformed into a full netio_pkt_t when the buffer is
- * retrieved.) The queue as a whole is written to by an interrupt handler and
- * read by non-interrupt code; this data structure is what's touched by the
- * interrupt handler. The other parts of the queue state, the read offset and
- * requested write offset, are kept in user space, not in hypervisor space, so
- * they are in a separate data structure.
- *
- * The read offset (__buffer_read in the user part of the queue structure)
- * points to the next buffer to be read. When the read offset is equal to the
- * write offset, the queue is empty; therefore the queue must contain one more
- * slot than the required maximum queue size.
- *
- * The requested write offset (__buffer_requested_write in the user part of
- * the queue structure) points to the slot which will hold the next buffer we
- * request from the IPP, once we get around to sending such a request. When
- * the requested write offset is equal to the write offset, no requests for
- * new buffers are outstanding; when the requested write offset is one greater
- * than the read offset, no more requests may be sent.
- *
- * Note that, unlike the packet_queue, the buffer_queue places incoming
- * buffers at decreasing addresses. This makes the check for "is it time to
- * wrap the buffer pointer" cheaper in the assembly code which receives new
- * buffers, and means that the value which defines the queue size,
- * __last_buffer, is different than in the packet queue. Also, the offset
- * used in the packet_queue is already scaled by the size of a packet; here we
- * use unscaled slot indices for the offsets. (These differences are
- * historical, and in the future it's possible that the packet_queue will look
- * more like this queue.)
- *
- * @code
- * Here's an example of all 4 state variables and what they mean. Remember:
- * all pointers move right to left.
- *
- * V V V I I R R V V V
- * 0 1 2 3 4 5 6 7 8 9
- * ^ ^ ^ ^
- * | | | |
- * | | | __last_buffer
- * | | __buffer_write
- * | __buffer_requested_write
- * __buffer_read
- * @endcode
- *
- * This queue has 10 slots, and thus can hold 9 buffers (_last_buffer = 9).
- * The read pointer is at 2, and the write pointer is at 6; thus, there are
- * valid, unread buffers in slots 2, 1, 0, 9, 8, and 7. The requested write
- * pointer is at 4; thus, requests have been made to the IPP for buffers which
- * will be placed in slots 6 and 5 when they arrive. Finally, the remaining
- * slots are invalid (do not contain a buffer).
- */
-typedef struct
-{
- /** Ordinal number of the next buffer to be written: 0 for the first slot in
- * the queue, 1 for the second slot in the queue, etc. */
- volatile uint32_t __buffer_write;
-
- /** Ordinal number of the last buffer (i.e., when any pointer is decremented
- * below zero, it is reloaded with this value). */
- uint32_t __last_buffer;
-}
-__netio_buffer_queue_t;
-
-
-/**
- * An object for providing Ethernet packets to a process.
- */
-typedef struct __netio_queue_impl_t
-{
- /** The queue of packets waiting to be received. */
- __netio_packet_queue_t __packet_receive_queue;
- /** The intr bit mask that IDs this device. */
- unsigned int __intr_id;
- /** Offset to queues of empty buffers, one per size. */
- uint32_t __buffer_queue[NETIO_NUM_SIZES];
- /** The address of the first EPP tile, or -1 if no EPP. */
- /* ISSUE: Actually this is always "0" or "~0". */
- uint32_t __epp_location;
- /** The queue ID that this queue represents. */
- unsigned int __queue_id;
- /** Number of acknowledgements received. */
- volatile uint32_t __acks_received;
- /** Last completion number received for packet_sendv. */
- volatile uint32_t __last_completion_rcv;
- /** Number of packets allowed to be outstanding. */
- uint32_t __max_outstanding;
- /** First VA available for packets. */
- void* __va_0;
- /** First VA in second range available for packets. */
- void* __va_1;
- /** Padding to align the "__packets" field to the size of a netio_pkt_t. */
- uint32_t __padding[3];
- /** The packets themselves. */
- netio_pkt_t __packets[0];
-}
-netio_queue_impl_t;
-
-
-/**
- * An object for managing the user end of a NetIO queue.
- */
-typedef struct __netio_queue_user_impl_t
-{
- /** The next incoming packet to be read. */
- uint32_t __packet_receive_read;
- /** The next empty buffers to be read, one index per size. */
- uint8_t __buffer_read[NETIO_NUM_SIZES];
- /** Where the empty buffer we next request from the IPP will go, one index
- * per size. */
- uint8_t __buffer_requested_write[NETIO_NUM_SIZES];
- /** PCIe interface flag. */
- uint8_t __pcie;
- /** Number of packets left to be received before we send a credit update. */
- uint32_t __receive_credit_remaining;
- /** Value placed in __receive_credit_remaining when it reaches zero. */
- uint32_t __receive_credit_interval;
- /** First fast I/O routine index. */
- uint32_t __fastio_index;
- /** Number of acknowledgements expected. */
- uint32_t __acks_outstanding;
- /** Last completion number requested. */
- uint32_t __last_completion_req;
- /** File descriptor for driver. */
- int __fd;
-}
-netio_queue_user_impl_t;
-
-
-#define NETIO_GROUP_CHUNK_SIZE 64 /**< Max # groups in one IPP request */
-#define NETIO_BUCKET_CHUNK_SIZE 64 /**< Max # buckets in one IPP request */
-
-
-/** Internal structure used to convey packet send information to the
- * hypervisor. FIXME: Actually, it's not used for that anymore, but
- * netio_packet_send() still uses it internally.
- */
-typedef struct
-{
- uint16_t flags; /**< Packet flags (__NETIO_SEND_FLG_xxx) */
- uint16_t transfer_size; /**< Size of packet */
- uint32_t va; /**< VA of start of packet */
- __netio_pkt_handle_t handle; /**< Packet handle */
- uint32_t csum0; /**< First checksum word */
- uint32_t csum1; /**< Second checksum word */
-}
-__netio_send_cmd_t;
-
-
-/** Flags used in two contexts:
- * - As the "flags" member in the __netio_send_cmd_t, above; used only
- * for netio_pkt_send_{prepare,commit}.
- * - As part of the flags passed to the various send packet fast I/O calls.
- */
-
-/** Need acknowledgement on this packet. Note that some code in the
- * normal send_pkt fast I/O handler assumes that this is equal to 1. */
-#define __NETIO_SEND_FLG_ACK 0x1
-
-/** Do checksum on this packet. (Only used with the __netio_send_cmd_t;
- * normal packet sends use a special fast I/O index to denote checksumming,
- * and multi-segment sends test the checksum descriptor.) */
-#define __NETIO_SEND_FLG_CSUM 0x2
-
-/** Get a completion on this packet. Only used with multi-segment sends. */
-#define __NETIO_SEND_FLG_COMPLETION 0x4
-
-/** Position of the number-of-extra-segments value in the flags word.
- Only used with multi-segment sends. */
-#define __NETIO_SEND_FLG_XSEG_SHIFT 3
-
-/** Width of the number-of-extra-segments value in the flags word. */
-#define __NETIO_SEND_FLG_XSEG_WIDTH 2
-
-#endif /* __DRV_XGBE_IMPL_H__ */
diff --git a/arch/tile/include/hv/drv_xgbe_intf.h b/arch/tile/include/hv/drv_xgbe_intf.h
deleted file mode 100644
index 2a20b266d944..000000000000
--- a/arch/tile/include/hv/drv_xgbe_intf.h
+++ /dev/null
@@ -1,615 +0,0 @@
-/*
- * Copyright 2010 Tilera Corporation. All Rights Reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation, version 2.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
- * NON INFRINGEMENT. See the GNU General Public License for
- * more details.
- */
-
-/**
- * @file drv_xgbe_intf.h
- * Interface to the hypervisor XGBE driver.
- */
-
-#ifndef __DRV_XGBE_INTF_H__
-#define __DRV_XGBE_INTF_H__
-
-/**
- * An object for forwarding VAs and PAs to the hypervisor.
- * @ingroup types
- *
- * This allows the supervisor to specify a number of areas of memory to
- * store packet buffers.
- */
-typedef struct
-{
- /** The physical address of the memory. */
- HV_PhysAddr pa;
- /** Page table entry for the memory. This is only used to derive the
- * memory's caching mode; the PA bits are ignored. */
- HV_PTE pte;
- /** The virtual address of the memory. */
- HV_VirtAddr va;
- /** Size (in bytes) of the memory area. */
- int size;
-
-}
-netio_ipp_address_t;
-
-/** The various pread/pwrite offsets into the hypervisor-level driver.
- * @ingroup types
- */
-typedef enum
-{
- /** Inform the Linux driver of the address of the NetIO arena memory.
- * This offset is actually only used to convey information from netio
- * to the Linux driver; it never makes it from there to the hypervisor.
- * Write-only; takes a uint32_t specifying the VA address. */
- NETIO_FIXED_ADDR = 0x5000000000000000ULL,
-
- /** Inform the Linux driver of the size of the NetIO arena memory.
- * This offset is actually only used to convey information from netio
- * to the Linux driver; it never makes it from there to the hypervisor.
- * Write-only; takes a uint32_t specifying the VA size. */
- NETIO_FIXED_SIZE = 0x5100000000000000ULL,
-
- /** Register current tile with IPP. Write then read: write, takes a
- * netio_input_config_t, read returns a pointer to a netio_queue_impl_t. */
- NETIO_IPP_INPUT_REGISTER_OFF = 0x6000000000000000ULL,
-
- /** Unregister current tile from IPP. Write-only, takes a dummy argument. */
- NETIO_IPP_INPUT_UNREGISTER_OFF = 0x6100000000000000ULL,
-
- /** Start packets flowing. Write-only, takes a dummy argument. */
- NETIO_IPP_INPUT_INIT_OFF = 0x6200000000000000ULL,
-
- /** Stop packets flowing. Write-only, takes a dummy argument. */
- NETIO_IPP_INPUT_UNINIT_OFF = 0x6300000000000000ULL,
-
- /** Configure group (typically we group on VLAN). Write-only: takes an
- * array of netio_group_t's, low 24 bits of the offset is the base group
- * number times the size of a netio_group_t. */
- NETIO_IPP_INPUT_GROUP_CFG_OFF = 0x6400000000000000ULL,
-
- /** Configure bucket. Write-only: takes an array of netio_bucket_t's, low
- * 24 bits of the offset is the base bucket number times the size of a
- * netio_bucket_t. */
- NETIO_IPP_INPUT_BUCKET_CFG_OFF = 0x6500000000000000ULL,
-
- /** Get/set a parameter. Read or write: read or write data is the parameter
- * value, low 32 bits of the offset is a __netio_getset_offset_t. */
- NETIO_IPP_PARAM_OFF = 0x6600000000000000ULL,
-
- /** Get fast I/O index. Read-only; returns a 4-byte base index value. */
- NETIO_IPP_GET_FASTIO_OFF = 0x6700000000000000ULL,
-
- /** Configure hijack IP address. Packets with this IPv4 dest address
- * go to bucket NETIO_NUM_BUCKETS - 1. Write-only: takes an IP address
- * in some standard form. FIXME: Define the form! */
- NETIO_IPP_INPUT_HIJACK_CFG_OFF = 0x6800000000000000ULL,
-
- /**
- * Offsets beyond this point are reserved for the supervisor (although that
- * enforcement must be done by the supervisor driver itself).
- */
- NETIO_IPP_USER_MAX_OFF = 0x6FFFFFFFFFFFFFFFULL,
-
- /** Register I/O memory. Write-only, takes a netio_ipp_address_t. */
- NETIO_IPP_IOMEM_REGISTER_OFF = 0x7000000000000000ULL,
-
- /** Unregister I/O memory. Write-only, takes a netio_ipp_address_t. */
- NETIO_IPP_IOMEM_UNREGISTER_OFF = 0x7100000000000000ULL,
-
- /* Offsets greater than 0x7FFFFFFF can't be used directly from Linux
- * userspace code due to limitations in the pread/pwrite syscalls. */
-
- /** Drain LIPP buffers. */
- NETIO_IPP_DRAIN_OFF = 0xFA00000000000000ULL,
-
- /** Supply a netio_ipp_address_t to be used as shared memory for the
- * LEPP command queue. */
- NETIO_EPP_SHM_OFF = 0xFB00000000000000ULL,
-
- /* 0xFC... is currently unused. */
-
- /** Stop IPP/EPP tiles. Write-only, takes a dummy argument. */
- NETIO_IPP_STOP_SHIM_OFF = 0xFD00000000000000ULL,
-
- /** Start IPP/EPP tiles. Write-only, takes a dummy argument. */
- NETIO_IPP_START_SHIM_OFF = 0xFE00000000000000ULL,
-
- /** Supply packet arena. Write-only, takes an array of
- * netio_ipp_address_t values. */
- NETIO_IPP_ADDRESS_OFF = 0xFF00000000000000ULL,
-} netio_hv_offset_t;
-
-/** Extract the base offset from an offset */
-#define NETIO_BASE_OFFSET(off) ((off) & 0xFF00000000000000ULL)
-/** Extract the local offset from an offset */
-#define NETIO_LOCAL_OFFSET(off) ((off) & 0x00FFFFFFFFFFFFFFULL)
-
-
-/**
- * Get/set offset.
- */
-typedef union
-{
- struct
- {
- uint64_t addr:48; /**< Class-specific address */
- unsigned int class:8; /**< Class (e.g., NETIO_PARAM) */
- unsigned int opcode:8; /**< High 8 bits of NETIO_IPP_PARAM_OFF */
- }
- bits; /**< Bitfields */
- uint64_t word; /**< Aggregated value to use as the offset */
-}
-__netio_getset_offset_t;
-
-/**
- * Fast I/O index offsets (must be contiguous).
- */
-typedef enum
-{
- NETIO_FASTIO_ALLOCATE = 0, /**< Get empty packet buffer */
- NETIO_FASTIO_FREE_BUFFER = 1, /**< Give buffer back to IPP */
- NETIO_FASTIO_RETURN_CREDITS = 2, /**< Give credits to IPP */
- NETIO_FASTIO_SEND_PKT_NOCK = 3, /**< Send a packet, no checksum */
- NETIO_FASTIO_SEND_PKT_CK = 4, /**< Send a packet, with checksum */
- NETIO_FASTIO_SEND_PKT_VEC = 5, /**< Send a vector of packets */
- NETIO_FASTIO_SENDV_PKT = 6, /**< Sendv one packet */
- NETIO_FASTIO_NUM_INDEX = 7, /**< Total number of fast I/O indices */
-} netio_fastio_index_t;
-
-/** 3-word return type for Fast I/O call. */
-typedef struct
-{
- int err; /**< Error code. */
- uint32_t val0; /**< Value. Meaning depends upon the specific call. */
- uint32_t val1; /**< Value. Meaning depends upon the specific call. */
-} netio_fastio_rv3_t;
-
-/** 0-argument fast I/O call */
-int __netio_fastio0(uint32_t fastio_index);
-/** 1-argument fast I/O call */
-int __netio_fastio1(uint32_t fastio_index, uint32_t arg0);
-/** 3-argument fast I/O call, 2-word return value */
-netio_fastio_rv3_t __netio_fastio3_rv3(uint32_t fastio_index, uint32_t arg0,
- uint32_t arg1, uint32_t arg2);
-/** 4-argument fast I/O call */
-int __netio_fastio4(uint32_t fastio_index, uint32_t arg0, uint32_t arg1,
- uint32_t arg2, uint32_t arg3);
-/** 6-argument fast I/O call */
-int __netio_fastio6(uint32_t fastio_index, uint32_t arg0, uint32_t arg1,
- uint32_t arg2, uint32_t arg3, uint32_t arg4, uint32_t arg5);
-/** 9-argument fast I/O call */
-int __netio_fastio9(uint32_t fastio_index, uint32_t arg0, uint32_t arg1,
- uint32_t arg2, uint32_t arg3, uint32_t arg4, uint32_t arg5,
- uint32_t arg6, uint32_t arg7, uint32_t arg8);
-
-/** Allocate an empty packet.
- * @param fastio_index Fast I/O index.
- * @param size Size of the packet to allocate.
- */
-#define __netio_fastio_allocate(fastio_index, size) \
- __netio_fastio1((fastio_index) + NETIO_FASTIO_ALLOCATE, size)
-
-/** Free a buffer.
- * @param fastio_index Fast I/O index.
- * @param handle Handle for the packet to free.
- */
-#define __netio_fastio_free_buffer(fastio_index, handle) \
- __netio_fastio1((fastio_index) + NETIO_FASTIO_FREE_BUFFER, handle)
-
-/** Increment our receive credits.
- * @param fastio_index Fast I/O index.
- * @param credits Number of credits to add.
- */
-#define __netio_fastio_return_credits(fastio_index, credits) \
- __netio_fastio1((fastio_index) + NETIO_FASTIO_RETURN_CREDITS, credits)
-
-/** Send packet, no checksum.
- * @param fastio_index Fast I/O index.
- * @param ackflag Nonzero if we want an ack.
- * @param size Size of the packet.
- * @param va Virtual address of start of packet.
- * @param handle Packet handle.
- */
-#define __netio_fastio_send_pkt_nock(fastio_index, ackflag, size, va, handle) \
- __netio_fastio4((fastio_index) + NETIO_FASTIO_SEND_PKT_NOCK, ackflag, \
- size, va, handle)
-
-/** Send packet, calculate checksum.
- * @param fastio_index Fast I/O index.
- * @param ackflag Nonzero if we want an ack.
- * @param size Size of the packet.
- * @param va Virtual address of start of packet.
- * @param handle Packet handle.
- * @param csum0 Shim checksum header.
- * @param csum1 Checksum seed.
- */
-#define __netio_fastio_send_pkt_ck(fastio_index, ackflag, size, va, handle, \
- csum0, csum1) \
- __netio_fastio6((fastio_index) + NETIO_FASTIO_SEND_PKT_CK, ackflag, \
- size, va, handle, csum0, csum1)
-
-
-/** Format for the "csum0" argument to the __netio_fastio_send routines
- * and LEPP. Note that this is currently exactly identical to the
- * ShimProtocolOffloadHeader.
- */
-typedef union
-{
- struct
- {
- unsigned int start_byte:7; /**< The first byte to be checksummed */
- unsigned int count:14; /**< Number of bytes to be checksummed. */
- unsigned int destination_byte:7; /**< The byte to write the checksum to. */
- unsigned int reserved:4; /**< Reserved. */
- } bits; /**< Decomposed method of access. */
- unsigned int word; /**< To send out the IDN. */
-} __netio_checksum_header_t;
-
-
-/** Sendv packet with 1 or 2 segments.
- * @param fastio_index Fast I/O index.
- * @param flags Ack/csum/notify flags in low 3 bits; number of segments minus
- * 1 in next 2 bits; expected checksum in high 16 bits.
- * @param confno Confirmation number to request, if notify flag set.
- * @param csum0 Checksum descriptor; if zero, no checksum.
- * @param va_F Virtual address of first segment.
- * @param va_L Virtual address of last segment, if 2 segments.
- * @param len_F_L Length of first segment in low 16 bits; length of last
- * segment, if 2 segments, in high 16 bits.
- */
-#define __netio_fastio_sendv_pkt_1_2(fastio_index, flags, confno, csum0, \
- va_F, va_L, len_F_L) \
- __netio_fastio6((fastio_index) + NETIO_FASTIO_SENDV_PKT, flags, confno, \
- csum0, va_F, va_L, len_F_L)
-
-/** Send packet on PCIe interface.
- * @param fastio_index Fast I/O index.
- * @param flags Ack/csum/notify flags in low 3 bits.
- * @param confno Confirmation number to request, if notify flag set.
- * @param csum0 Checksum descriptor; Hard wired 0, not needed for PCIe.
- * @param va_F Virtual address of the packet buffer.
- * @param va_L Virtual address of last segment, if 2 segments. Hard wired 0.
- * @param len_F_L Length of the packet buffer in low 16 bits.
- */
-#define __netio_fastio_send_pcie_pkt(fastio_index, flags, confno, csum0, \
- va_F, va_L, len_F_L) \
- __netio_fastio6((fastio_index) + PCIE_FASTIO_SENDV_PKT, flags, confno, \
- csum0, va_F, va_L, len_F_L)
-
-/** Sendv packet with 3 or 4 segments.
- * @param fastio_index Fast I/O index.
- * @param flags Ack/csum/notify flags in low 3 bits; number of segments minus
- * 1 in next 2 bits; expected checksum in high 16 bits.
- * @param confno Confirmation number to request, if notify flag set.
- * @param csum0 Checksum descriptor; if zero, no checksum.
- * @param va_F Virtual address of first segment.
- * @param va_L Virtual address of last segment (third segment if 3 segments,
- * fourth segment if 4 segments).
- * @param len_F_L Length of first segment in low 16 bits; length of last
- * segment in high 16 bits.
- * @param va_M0 Virtual address of "middle 0" segment; this segment is sent
- * second when there are three segments, and third if there are four.
- * @param va_M1 Virtual address of "middle 1" segment; this segment is sent
- * second when there are four segments.
- * @param len_M0_M1 Length of middle 0 segment in low 16 bits; length of middle
- * 1 segment, if 4 segments, in high 16 bits.
- */
-#define __netio_fastio_sendv_pkt_3_4(fastio_index, flags, confno, csum0, va_F, \
- va_L, len_F_L, va_M0, va_M1, len_M0_M1) \
- __netio_fastio9((fastio_index) + NETIO_FASTIO_SENDV_PKT, flags, confno, \
- csum0, va_F, va_L, len_F_L, va_M0, va_M1, len_M0_M1)
-
-/** Send vector of packets.
- * @param fastio_index Fast I/O index.
- * @param seqno Number of packets transmitted so far on this interface;
- * used to decide which packets should be acknowledged.
- * @param nentries Number of entries in vector.
- * @param va Virtual address of start of vector entry array.
- * @return 3-word netio_fastio_rv3_t structure. The structure's err member
- * is an error code, or zero if no error. The val0 member is the
- * updated value of seqno; it has been incremented by 1 for each
- * packet sent. That increment may be less than nentries if an
- * error occurred, or if some of the entries in the vector contain
- * handles equal to NETIO_PKT_HANDLE_NONE. The val1 member is the
- * updated value of nentries; it has been decremented by 1 for each
- * vector entry processed. Again, that decrement may be less than
- * nentries (leaving the returned value positive) if an error
- * occurred.
- */
-#define __netio_fastio_send_pkt_vec(fastio_index, seqno, nentries, va) \
- __netio_fastio3_rv3((fastio_index) + NETIO_FASTIO_SEND_PKT_VEC, seqno, \
- nentries, va)
-
-
-/** An egress DMA command for LEPP. */
-typedef struct
-{
- /** Is this a TSO transfer?
- *
- * NOTE: This field is always 0, to distinguish it from
- * lepp_tso_cmd_t. It must come first!
- */
- uint8_t tso : 1;
-
- /** Unused padding bits. */
- uint8_t _unused : 3;
-
- /** Should this packet be sent directly from caches instead of DRAM,
- * using hash-for-home to locate the packet data?
- */
- uint8_t hash_for_home : 1;
-
- /** Should we compute a checksum? */
- uint8_t compute_checksum : 1;
-
- /** Is this the final buffer for this packet?
- *
- * A single packet can be split over several input buffers (a "gather"
- * operation). This flag indicates that this is the last buffer
- * in a packet.
- */
- uint8_t end_of_packet : 1;
-
- /** Should LEPP advance 'comp_busy' when this DMA is fully finished? */
- uint8_t send_completion : 1;
-
- /** High bits of Client Physical Address of the start of the buffer
- * to be egressed.
- *
- * NOTE: Only 6 bits are actually needed here, as CPAs are
- * currently 38 bits. So two bits could be scavenged from this.
- */
- uint8_t cpa_hi;
-
- /** The number of bytes to be egressed. */
- uint16_t length;
-
- /** Low 32 bits of Client Physical Address of the start of the buffer
- * to be egressed.
- */
- uint32_t cpa_lo;
-
- /** Checksum information (only used if 'compute_checksum'). */
- __netio_checksum_header_t checksum_data;
-
-} lepp_cmd_t;
-
-
-/** A chunk of physical memory for a TSO egress. */
-typedef struct
-{
- /** The low bits of the CPA. */
- uint32_t cpa_lo;
- /** The high bits of the CPA. */
- uint16_t cpa_hi : 15;
- /** Should this packet be sent directly from caches instead of DRAM,
- * using hash-for-home to locate the packet data?
- */
- uint16_t hash_for_home : 1;
- /** The length in bytes. */
- uint16_t length;
-} lepp_frag_t;
-
-
-/** An LEPP command that handles TSO. */
-typedef struct
-{
- /** Is this a TSO transfer?
- *
- * NOTE: This field is always 1, to distinguish it from
- * lepp_cmd_t. It must come first!
- */
- uint8_t tso : 1;
-
- /** Unused padding bits. */
- uint8_t _unused : 7;
-
- /** Size of the header[] array in bytes. It must be in the range
- * [40, 127], which are the smallest header for a TCP packet over
- * Ethernet and the maximum possible prepend size supported by
- * hardware, respectively. Note that the array storage must be
- * padded out to a multiple of four bytes so that the following
- * LEPP command is aligned properly.
- */
- uint8_t header_size;
-
- /** Byte offset of the IP header in header[]. */
- uint8_t ip_offset;
-
- /** Byte offset of the TCP header in header[]. */
- uint8_t tcp_offset;
-
- /** The number of bytes to use for the payload of each packet,
- * except of course the last one, which may not have enough bytes.
- * This means that each Ethernet packet except the last will have a
- * size of header_size + payload_size.
- */
- uint16_t payload_size;
-
- /** The length of the 'frags' array that follows this struct. */
- uint16_t num_frags;
-
- /** The actual frags. */
- lepp_frag_t frags[0 /* Variable-sized; num_frags entries. */];
-
- /*
- * The packet header template logically follows frags[],
- * but you can't declare that in C.
- *
- * uint32_t header[header_size_in_words_rounded_up];
- */
-
-} lepp_tso_cmd_t;
-
-
-/** An LEPP completion ring entry. */
-typedef void* lepp_comp_t;
-
-
-/** Maximum number of frags for one TSO command. This is adapted from
- * linux's "MAX_SKB_FRAGS", and presumably over-estimates by one, for
- * our page size of exactly 65536. We add one for a "body" fragment.
- */
-#define LEPP_MAX_FRAGS (65536 / HV_DEFAULT_PAGE_SIZE_SMALL + 2 + 1)
-
-/** Total number of bytes needed for an lepp_tso_cmd_t. */
-#define LEPP_TSO_CMD_SIZE(num_frags, header_size) \
- (sizeof(lepp_tso_cmd_t) + \
- (num_frags) * sizeof(lepp_frag_t) + \
- (((header_size) + 3) & -4))
-
-/** The size of the lepp "cmd" queue. */
-#define LEPP_CMD_QUEUE_BYTES \
- (((CHIP_L2_CACHE_SIZE() - 2 * CHIP_L2_LINE_SIZE()) / \
- (sizeof(lepp_cmd_t) + sizeof(lepp_comp_t))) * sizeof(lepp_cmd_t))
-
-/** The largest possible command that can go in lepp_queue_t::cmds[]. */
-#define LEPP_MAX_CMD_SIZE LEPP_TSO_CMD_SIZE(LEPP_MAX_FRAGS, 128)
-
-/** The largest possible value of lepp_queue_t::cmd_{head, tail} (inclusive).
- */
-#define LEPP_CMD_LIMIT \
- (LEPP_CMD_QUEUE_BYTES - LEPP_MAX_CMD_SIZE)
-
-/** The maximum number of completions in an LEPP queue. */
-#define LEPP_COMP_QUEUE_SIZE \
- ((LEPP_CMD_LIMIT + sizeof(lepp_cmd_t) - 1) / sizeof(lepp_cmd_t))
-
-/** Increment an index modulo the queue size. */
-#define LEPP_QINC(var) \
- (var = __insn_mnz(var - (LEPP_COMP_QUEUE_SIZE - 1), var + 1))
-
-/** A queue used to convey egress commands from the client to LEPP. */
-typedef struct
-{
- /** Index of first completion not yet processed by user code.
- * If this is equal to comp_busy, there are no such completions.
- *
- * NOTE: This is only read/written by the user.
- */
- unsigned int comp_head;
-
- /** Index of first completion record not yet completed.
- * If this is equal to comp_tail, there are no such completions.
- * This index gets advanced (modulo LEPP_QUEUE_SIZE) whenever
- * a command with the 'completion' bit set is finished.
- *
- * NOTE: This is only written by LEPP, only read by the user.
- */
- volatile unsigned int comp_busy;
-
- /** Index of the first empty slot in the completion ring.
- * Entries from this up to but not including comp_head (in ring order)
- * can be filled in with completion data.
- *
- * NOTE: This is only read/written by the user.
- */
- unsigned int comp_tail;
-
- /** Byte index of first command enqueued for LEPP but not yet processed.
- *
- * This is always divisible by sizeof(void*) and always <= LEPP_CMD_LIMIT.
- *
- * NOTE: LEPP advances this counter as soon as it no longer needs
- * the cmds[] storage for this entry, but the transfer is not actually
- * complete (i.e. the buffer pointed to by the command is no longer
- * needed) until comp_busy advances.
- *
- * If this is equal to cmd_tail, the ring is empty.
- *
- * NOTE: This is only written by LEPP, only read by the user.
- */
- volatile unsigned int cmd_head;
-
- /** Byte index of first empty slot in the command ring. This field can
- * be incremented up to but not equal to cmd_head (because that would
- * mean the ring is empty).
- *
- * This is always divisible by sizeof(void*) and always <= LEPP_CMD_LIMIT.
- *
- * NOTE: This is read/written by the user, only read by LEPP.
- */
- volatile unsigned int cmd_tail;
-
- /** A ring of variable-sized egress DMA commands.
- *
- * NOTE: Only written by the user, only read by LEPP.
- */
- char cmds[LEPP_CMD_QUEUE_BYTES]
- __attribute__((aligned(CHIP_L2_LINE_SIZE())));
-
- /** A ring of user completion data.
- * NOTE: Only read/written by the user.
- */
- lepp_comp_t comps[LEPP_COMP_QUEUE_SIZE]
- __attribute__((aligned(CHIP_L2_LINE_SIZE())));
-} lepp_queue_t;
-
-
-/** An internal helper function for determining the number of entries
- * available in a ring buffer, given that there is one sentinel.
- */
-static inline unsigned int
-_lepp_num_free_slots(unsigned int head, unsigned int tail)
-{
- /*
- * One entry is reserved for use as a sentinel, to distinguish
- * "empty" from "full". So we compute
- * (head - tail - 1) % LEPP_QUEUE_SIZE, but without using a slow % operation.
- */
- return (head - tail - 1) + ((head <= tail) ? LEPP_COMP_QUEUE_SIZE : 0);
-}
-
-
-/** Returns how many new comp entries can be enqueued. */
-static inline unsigned int
-lepp_num_free_comp_slots(const lepp_queue_t* q)
-{
- return _lepp_num_free_slots(q->comp_head, q->comp_tail);
-}
-
-static inline int
-lepp_qsub(int v1, int v2)
-{
- int delta = v1 - v2;
- return delta + ((delta >> 31) & LEPP_COMP_QUEUE_SIZE);
-}
-
-
-/** FIXME: Check this from linux, via a new "pwrite()" call. */
-#define LIPP_VERSION 1
-
-
-/** We use exactly two bytes of alignment padding. */
-#define LIPP_PACKET_PADDING 2
-
-/** The minimum size of a "small" buffer (including the padding). */
-#define LIPP_SMALL_PACKET_SIZE 128
-
-/*
- * NOTE: The following two values should total to less than around
- * 13582, to keep the total size used for "lipp_state_t" below 64K.
- */
-
-/** The maximum number of "small" buffers.
- * This is enough for 53 network cpus with 128 credits. Note that
- * if these are exhausted, we will fall back to using large buffers.
- */
-#define LIPP_SMALL_BUFFERS 6785
-
-/** The maximum number of "large" buffers.
- * This is enough for 53 network cpus with 128 credits.
- */
-#define LIPP_LARGE_BUFFERS 6785
-
-#endif /* __DRV_XGBE_INTF_H__ */
diff --git a/arch/tile/include/hv/hypervisor.h b/arch/tile/include/hv/hypervisor.h
deleted file mode 100644
index f10b332b3b65..000000000000
--- a/arch/tile/include/hv/hypervisor.h
+++ /dev/null
@@ -1,2656 +0,0 @@
-/*
- * Copyright 2010 Tilera Corporation. All Rights Reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation, version 2.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
- * NON INFRINGEMENT. See the GNU General Public License for
- * more details.
- */
-
-/**
- * @file hypervisor.h
- * The hypervisor's public API.
- */
-
-#ifndef _HV_HV_H
-#define _HV_HV_H
-
-#include <arch/chip.h>
-
-/* Linux builds want unsigned long constants, but assembler wants numbers */
-#ifdef __ASSEMBLER__
-/** One, for assembler */
-#define __HV_SIZE_ONE 1
-#elif !defined(__tile__) && CHIP_VA_WIDTH() > 32
-/** One, for 64-bit on host */
-#define __HV_SIZE_ONE 1ULL
-#else
-/** One, for Linux */
-#define __HV_SIZE_ONE 1UL
-#endif
-
-/** The log2 of the span of a level-1 page table, in bytes.
- */
-#define HV_LOG2_L1_SPAN 32
-
-/** The span of a level-1 page table, in bytes.
- */
-#define HV_L1_SPAN (__HV_SIZE_ONE << HV_LOG2_L1_SPAN)
-
-/** The log2 of the initial size of small pages, in bytes.
- * See HV_DEFAULT_PAGE_SIZE_SMALL.
- */
-#define HV_LOG2_DEFAULT_PAGE_SIZE_SMALL 16
-
-/** The initial size of small pages, in bytes. This value should be verified
- * at runtime by calling hv_sysconf(HV_SYSCONF_PAGE_SIZE_SMALL).
- * It may also be modified when installing a new context.
- */
-#define HV_DEFAULT_PAGE_SIZE_SMALL \
- (__HV_SIZE_ONE << HV_LOG2_DEFAULT_PAGE_SIZE_SMALL)
-
-/** The log2 of the initial size of large pages, in bytes.
- * See HV_DEFAULT_PAGE_SIZE_LARGE.
- */
-#define HV_LOG2_DEFAULT_PAGE_SIZE_LARGE 24
-
-/** The initial size of large pages, in bytes. This value should be verified
- * at runtime by calling hv_sysconf(HV_SYSCONF_PAGE_SIZE_LARGE).
- * It may also be modified when installing a new context.
- */
-#define HV_DEFAULT_PAGE_SIZE_LARGE \
- (__HV_SIZE_ONE << HV_LOG2_DEFAULT_PAGE_SIZE_LARGE)
-
-#if CHIP_VA_WIDTH() > 32
-
-/** The log2 of the initial size of jumbo pages, in bytes.
- * See HV_DEFAULT_PAGE_SIZE_JUMBO.
- */
-#define HV_LOG2_DEFAULT_PAGE_SIZE_JUMBO 32
-
-/** The initial size of jumbo pages, in bytes. This value should
- * be verified at runtime by calling hv_sysconf(HV_SYSCONF_PAGE_SIZE_JUMBO).
- * It may also be modified when installing a new context.
- */
-#define HV_DEFAULT_PAGE_SIZE_JUMBO \
- (__HV_SIZE_ONE << HV_LOG2_DEFAULT_PAGE_SIZE_JUMBO)
-
-#endif
-
-/** The log2 of the granularity at which page tables must be aligned;
- * in other words, the CPA for a page table must have this many zero
- * bits at the bottom of the address.
- */
-#define HV_LOG2_PAGE_TABLE_ALIGN 11
-
-/** The granularity at which page tables must be aligned.
- */
-#define HV_PAGE_TABLE_ALIGN (__HV_SIZE_ONE << HV_LOG2_PAGE_TABLE_ALIGN)
-
-/** Normal start of hypervisor glue in client physical memory. */
-#define HV_GLUE_START_CPA 0x10000
-
-/** This much space is reserved at HV_GLUE_START_CPA
- * for the hypervisor glue. The client program must start at
- * some address higher than this, and in particular the address of
- * its text section should be equal to zero modulo HV_PAGE_SIZE_LARGE
- * so that relative offsets to the HV glue are correct.
- */
-#define HV_GLUE_RESERVED_SIZE 0x10000
-
-/** Each entry in the hv dispatch array takes this many bytes. */
-#define HV_DISPATCH_ENTRY_SIZE 32
-
-/** Version of the hypervisor interface defined by this file */
-#define _HV_VERSION 13
-
-/** Last version of the hypervisor interface with old hv_init() ABI.
- *
- * The change from version 12 to version 13 corresponds to launching
- * the client by default at PL2 instead of PL1 (corresponding to the
- * hv itself running at PL3 instead of PL2). To make this explicit,
- * the hv_init() API was also extended so the client can report its
- * desired PL, resulting in a more helpful failure diagnostic. If you
- * call hv_init() with _HV_VERSION_OLD_HV_INIT and omit the client_pl
- * argument, the hypervisor will assume client_pl = 1.
- *
- * Note that this is a deprecated solution and we do not expect to
- * support clients of the Tilera hypervisor running at PL1 indefinitely.
- */
-#define _HV_VERSION_OLD_HV_INIT 12
-
-/* Index into hypervisor interface dispatch code blocks.
- *
- * Hypervisor calls are invoked from user space by calling code
- * at an address HV_BASE_ADDRESS + (index) * HV_DISPATCH_ENTRY_SIZE,
- * where index is one of these enum values.
- *
- * Normally a supervisor is expected to produce a set of symbols
- * starting at HV_BASE_ADDRESS that obey this convention, but a user
- * program could call directly through function pointers if desired.
- *
- * These numbers are part of the binary API and will not be changed
- * without updating HV_VERSION, which should be a rare event.
- */
-
-/** reserved. */
-#define _HV_DISPATCH_RESERVED 0
-
-/** hv_init */
-#define HV_DISPATCH_INIT 1
-
-/** hv_install_context */
-#define HV_DISPATCH_INSTALL_CONTEXT 2
-
-/** hv_sysconf */
-#define HV_DISPATCH_SYSCONF 3
-
-/** hv_get_rtc */
-#define HV_DISPATCH_GET_RTC 4
-
-/** hv_set_rtc */
-#define HV_DISPATCH_SET_RTC 5
-
-/** hv_flush_asid */
-#define HV_DISPATCH_FLUSH_ASID 6
-
-/** hv_flush_page */
-#define HV_DISPATCH_FLUSH_PAGE 7
-
-/** hv_flush_pages */
-#define HV_DISPATCH_FLUSH_PAGES 8
-
-/** hv_restart */
-#define HV_DISPATCH_RESTART 9
-
-/** hv_halt */
-#define HV_DISPATCH_HALT 10
-
-/** hv_power_off */
-#define HV_DISPATCH_POWER_OFF 11
-
-/** hv_inquire_physical */
-#define HV_DISPATCH_INQUIRE_PHYSICAL 12
-
-/** hv_inquire_memory_controller */
-#define HV_DISPATCH_INQUIRE_MEMORY_CONTROLLER 13
-
-/** hv_inquire_virtual */
-#define HV_DISPATCH_INQUIRE_VIRTUAL 14
-
-/** hv_inquire_asid */
-#define HV_DISPATCH_INQUIRE_ASID 15
-
-/** hv_nanosleep */
-#define HV_DISPATCH_NANOSLEEP 16
-
-/** hv_console_read_if_ready */
-#define HV_DISPATCH_CONSOLE_READ_IF_READY 17
-
-/** hv_console_write */
-#define HV_DISPATCH_CONSOLE_WRITE 18
-
-/** hv_downcall_dispatch */
-#define HV_DISPATCH_DOWNCALL_DISPATCH 19
-
-/** hv_inquire_topology */
-#define HV_DISPATCH_INQUIRE_TOPOLOGY 20
-
-/** hv_fs_findfile */
-#define HV_DISPATCH_FS_FINDFILE 21
-
-/** hv_fs_fstat */
-#define HV_DISPATCH_FS_FSTAT 22
-
-/** hv_fs_pread */
-#define HV_DISPATCH_FS_PREAD 23
-
-/** hv_physaddr_read64 */
-#define HV_DISPATCH_PHYSADDR_READ64 24
-
-/** hv_physaddr_write64 */
-#define HV_DISPATCH_PHYSADDR_WRITE64 25
-
-/** hv_get_command_line */
-#define HV_DISPATCH_GET_COMMAND_LINE 26
-
-/** hv_set_caching */
-#define HV_DISPATCH_SET_CACHING 27
-
-/** hv_bzero_page */
-#define HV_DISPATCH_BZERO_PAGE 28
-
-/** hv_register_message_state */
-#define HV_DISPATCH_REGISTER_MESSAGE_STATE 29
-
-/** hv_send_message */
-#define HV_DISPATCH_SEND_MESSAGE 30
-
-/** hv_receive_message */
-#define HV_DISPATCH_RECEIVE_MESSAGE 31
-
-/** hv_inquire_context */
-#define HV_DISPATCH_INQUIRE_CONTEXT 32
-
-/** hv_start_all_tiles */
-#define HV_DISPATCH_START_ALL_TILES 33
-
-/** hv_dev_open */
-#define HV_DISPATCH_DEV_OPEN 34
-
-/** hv_dev_close */
-#define HV_DISPATCH_DEV_CLOSE 35
-
-/** hv_dev_pread */
-#define HV_DISPATCH_DEV_PREAD 36
-
-/** hv_dev_pwrite */
-#define HV_DISPATCH_DEV_PWRITE 37
-
-/** hv_dev_poll */
-#define HV_DISPATCH_DEV_POLL 38
-
-/** hv_dev_poll_cancel */
-#define HV_DISPATCH_DEV_POLL_CANCEL 39
-
-/** hv_dev_preada */
-#define HV_DISPATCH_DEV_PREADA 40
-
-/** hv_dev_pwritea */
-#define HV_DISPATCH_DEV_PWRITEA 41
-
-/** hv_flush_remote */
-#define HV_DISPATCH_FLUSH_REMOTE 42
-
-/** hv_console_putc */
-#define HV_DISPATCH_CONSOLE_PUTC 43
-
-/** hv_inquire_tiles */
-#define HV_DISPATCH_INQUIRE_TILES 44
-
-/** hv_confstr */
-#define HV_DISPATCH_CONFSTR 45
-
-/** hv_reexec */
-#define HV_DISPATCH_REEXEC 46
-
-/** hv_set_command_line */
-#define HV_DISPATCH_SET_COMMAND_LINE 47
-
-#if !CHIP_HAS_IPI()
-
-/** hv_clear_intr */
-#define HV_DISPATCH_CLEAR_INTR 48
-
-/** hv_enable_intr */
-#define HV_DISPATCH_ENABLE_INTR 49
-
-/** hv_disable_intr */
-#define HV_DISPATCH_DISABLE_INTR 50
-
-/** hv_raise_intr */
-#define HV_DISPATCH_RAISE_INTR 51
-
-/** hv_trigger_ipi */
-#define HV_DISPATCH_TRIGGER_IPI 52
-
-#endif /* !CHIP_HAS_IPI() */
-
-/** hv_store_mapping */
-#define HV_DISPATCH_STORE_MAPPING 53
-
-/** hv_inquire_realpa */
-#define HV_DISPATCH_INQUIRE_REALPA 54
-
-/** hv_flush_all */
-#define HV_DISPATCH_FLUSH_ALL 55
-
-#if CHIP_HAS_IPI()
-/** hv_get_ipi_pte */
-#define HV_DISPATCH_GET_IPI_PTE 56
-#endif
-
-/** hv_set_pte_super_shift */
-#define HV_DISPATCH_SET_PTE_SUPER_SHIFT 57
-
-/** hv_console_set_ipi */
-#define HV_DISPATCH_CONSOLE_SET_IPI 63
-
-/** hv_send_nmi */
-#define HV_DISPATCH_SEND_NMI 65
-
-/** One more than the largest dispatch value */
-#define _HV_DISPATCH_END 66
-
-
-#ifndef __ASSEMBLER__
-
-#ifdef __KERNEL__
-#include <asm/types.h>
-typedef u32 __hv32; /**< 32-bit value */
-typedef u64 __hv64; /**< 64-bit value */
-#else
-#include <stdint.h>
-typedef uint32_t __hv32; /**< 32-bit value */
-typedef uint64_t __hv64; /**< 64-bit value */
-#endif
-
-
-/** Hypervisor physical address. */
-typedef __hv64 HV_PhysAddr;
-
-#if CHIP_VA_WIDTH() > 32
-/** Hypervisor virtual address. */
-typedef __hv64 HV_VirtAddr;
-#else
-/** Hypervisor virtual address. */
-typedef __hv32 HV_VirtAddr;
-#endif /* CHIP_VA_WIDTH() > 32 */
-
-/** Hypervisor ASID. */
-typedef unsigned int HV_ASID;
-
-/** Hypervisor tile location for a memory access
- * ("location overridden target").
- */
-typedef unsigned int HV_LOTAR;
-
-/** Hypervisor size of a page. */
-typedef unsigned long HV_PageSize;
-
-/** A page table entry.
- */
-typedef struct
-{
- __hv64 val; /**< Value of PTE */
-} HV_PTE;
-
-/** Hypervisor error code. */
-typedef int HV_Errno;
-
-#endif /* !__ASSEMBLER__ */
-
-#define HV_OK 0 /**< No error */
-#define HV_EINVAL -801 /**< Invalid argument */
-#define HV_ENODEV -802 /**< No such device */
-#define HV_ENOENT -803 /**< No such file or directory */
-#define HV_EBADF -804 /**< Bad file number */
-#define HV_EFAULT -805 /**< Bad address */
-#define HV_ERECIP -806 /**< Bad recipients */
-#define HV_E2BIG -807 /**< Message too big */
-#define HV_ENOTSUP -808 /**< Service not supported */
-#define HV_EBUSY -809 /**< Device busy */
-#define HV_ENOSYS -810 /**< Invalid syscall */
-#define HV_EPERM -811 /**< No permission */
-#define HV_ENOTREADY -812 /**< Device not ready */
-#define HV_EIO -813 /**< I/O error */
-#define HV_ENOMEM -814 /**< Out of memory */
-#define HV_EAGAIN -815 /**< Try again */
-
-#define HV_ERR_MAX -801 /**< Largest HV error code */
-#define HV_ERR_MIN -815 /**< Smallest HV error code */
-
-#ifndef __ASSEMBLER__
-
-/** Pass HV_VERSION to hv_init to request this version of the interface. */
-typedef enum {
- HV_VERSION = _HV_VERSION,
- HV_VERSION_OLD_HV_INIT = _HV_VERSION_OLD_HV_INIT,
-
-} HV_VersionNumber;
-
-/** Initializes the hypervisor.
- *
- * @param interface_version_number The version of the hypervisor interface
- * that this program expects, typically HV_VERSION.
- * @param chip_num Architecture number of the chip the client was built for.
- * @param chip_rev_num Revision number of the chip the client was built for.
- * @param client_pl Privilege level the client is built for
- * (not required if interface_version_number == HV_VERSION_OLD_HV_INIT).
- */
-void hv_init(HV_VersionNumber interface_version_number,
- int chip_num, int chip_rev_num, int client_pl);
-
-
-/** Queries we can make for hv_sysconf().
- *
- * These numbers are part of the binary API and guaranteed not to change.
- */
-typedef enum {
- /** An invalid value; do not use. */
- _HV_SYSCONF_RESERVED = 0,
-
- /** The length of the glue section containing the hv_ procs, in bytes. */
- HV_SYSCONF_GLUE_SIZE = 1,
-
- /** The size of small pages, in bytes. */
- HV_SYSCONF_PAGE_SIZE_SMALL = 2,
-
- /** The size of large pages, in bytes. */
- HV_SYSCONF_PAGE_SIZE_LARGE = 3,
-
- /** Processor clock speed, in hertz. */
- HV_SYSCONF_CPU_SPEED = 4,
-
- /** Processor temperature, in degrees Kelvin. The value
- * HV_SYSCONF_TEMP_KTOC may be subtracted from this to get degrees
- * Celsius. If that Celsius value is HV_SYSCONF_OVERTEMP, this indicates
- * that the temperature has hit an upper limit and is no longer being
- * accurately tracked.
- */
- HV_SYSCONF_CPU_TEMP = 5,
-
- /** Board temperature, in degrees Kelvin. The value
- * HV_SYSCONF_TEMP_KTOC may be subtracted from this to get degrees
- * Celsius. If that Celsius value is HV_SYSCONF_OVERTEMP, this indicates
- * that the temperature has hit an upper limit and is no longer being
- * accurately tracked.
- */
- HV_SYSCONF_BOARD_TEMP = 6,
-
- /** Legal page size bitmask for hv_install_context().
- * For example, if 16KB and 64KB small pages are supported,
- * it would return "HV_CTX_PG_SM_16K | HV_CTX_PG_SM_64K".
- */
- HV_SYSCONF_VALID_PAGE_SIZES = 7,
-
- /** The size of jumbo pages, in bytes.
- * If no jumbo pages are available, zero will be returned.
- */
- HV_SYSCONF_PAGE_SIZE_JUMBO = 8,
-
-} HV_SysconfQuery;
-
-/** Offset to subtract from returned Kelvin temperature to get degrees
- Celsius. */
-#define HV_SYSCONF_TEMP_KTOC 273
-
-/** Pseudo-temperature value indicating that the temperature has
- * pegged at its upper limit and is no longer accurate; note that this is
- * the value after subtracting HV_SYSCONF_TEMP_KTOC. */
-#define HV_SYSCONF_OVERTEMP 999
-
-/** Query a configuration value from the hypervisor.
- * @param query Which value is requested (HV_SYSCONF_xxx).
- * @return The requested value, or -1 the requested value is illegal or
- * unavailable.
- */
-long hv_sysconf(HV_SysconfQuery query);
-
-
-/** Queries we can make for hv_confstr().
- *
- * These numbers are part of the binary API and guaranteed not to change.
- */
-typedef enum {
- /** An invalid value; do not use. */
- _HV_CONFSTR_RESERVED = 0,
-
- /** Board part number. */
- HV_CONFSTR_BOARD_PART_NUM = 1,
-
- /** Board serial number. */
- HV_CONFSTR_BOARD_SERIAL_NUM = 2,
-
- /** Chip serial number. */
- HV_CONFSTR_CHIP_SERIAL_NUM = 3,
-
- /** Board revision level. */
- HV_CONFSTR_BOARD_REV = 4,
-
- /** Hypervisor software version. */
- HV_CONFSTR_HV_SW_VER = 5,
-
- /** The name for this chip model. */
- HV_CONFSTR_CHIP_MODEL = 6,
-
- /** Human-readable board description. */
- HV_CONFSTR_BOARD_DESC = 7,
-
- /** Human-readable description of the hypervisor configuration. */
- HV_CONFSTR_HV_CONFIG = 8,
-
- /** Human-readable version string for the boot image (for instance,
- * who built it and when, what configuration file was used). */
- HV_CONFSTR_HV_CONFIG_VER = 9,
-
- /** Mezzanine part number. */
- HV_CONFSTR_MEZZ_PART_NUM = 10,
-
- /** Mezzanine serial number. */
- HV_CONFSTR_MEZZ_SERIAL_NUM = 11,
-
- /** Mezzanine revision level. */
- HV_CONFSTR_MEZZ_REV = 12,
-
- /** Human-readable mezzanine description. */
- HV_CONFSTR_MEZZ_DESC = 13,
-
- /** Control path for the onboard network switch. */
- HV_CONFSTR_SWITCH_CONTROL = 14,
-
- /** Chip revision level. */
- HV_CONFSTR_CHIP_REV = 15,
-
- /** CPU module part number. */
- HV_CONFSTR_CPUMOD_PART_NUM = 16,
-
- /** CPU module serial number. */
- HV_CONFSTR_CPUMOD_SERIAL_NUM = 17,
-
- /** CPU module revision level. */
- HV_CONFSTR_CPUMOD_REV = 18,
-
- /** Human-readable CPU module description. */
- HV_CONFSTR_CPUMOD_DESC = 19,
-
- /** Per-tile hypervisor statistics. When this identifier is specified,
- * the hv_confstr call takes two extra arguments. The first is the
- * HV_XY_TO_LOTAR of the target tile's coordinates. The second is
- * a flag word. The only current flag is the lowest bit, which means
- * "zero out the stats instead of retrieving them"; in this case the
- * buffer and buffer length are ignored. */
- HV_CONFSTR_HV_STATS = 20
-
-} HV_ConfstrQuery;
-
-/** Query a configuration string from the hypervisor.
- *
- * @param query Identifier for the specific string to be retrieved
- * (HV_CONFSTR_xxx). Some strings may require or permit extra
- * arguments to be appended which select specific objects to be
- * described; see the string descriptions above.
- * @param buf Buffer in which to place the string.
- * @param len Length of the buffer.
- * @return If query is valid, then the length of the corresponding string,
- * including the trailing null; if this is greater than len, the string
- * was truncated. If query is invalid, HV_EINVAL. If the specified
- * buffer is not writable by the client, HV_EFAULT.
- */
-int hv_confstr(HV_ConfstrQuery query, HV_VirtAddr buf, int len, ...);
-
-/** Tile coordinate */
-typedef struct
-{
- /** X coordinate, relative to supervisor's top-left coordinate */
- int x;
-
- /** Y coordinate, relative to supervisor's top-left coordinate */
- int y;
-} HV_Coord;
-
-
-#if CHIP_HAS_IPI()
-
-/** Get the PTE for sending an IPI to a particular tile.
- *
- * @param tile Tile which will receive the IPI.
- * @param pl Indicates which IPI registers: 0 = IPI_0, 1 = IPI_1.
- * @param pte Filled with resulting PTE.
- * @result Zero if no error, non-zero for invalid parameters.
- */
-int hv_get_ipi_pte(HV_Coord tile, int pl, HV_PTE* pte);
-
-/** Configure the console interrupt.
- *
- * When the console client interrupt is enabled, the hypervisor will
- * deliver the specified IPI to the client in the following situations:
- *
- * - The console has at least one character available for input.
- *
- * - The console can accept new characters for output, and the last call
- * to hv_console_write() did not write all of the characters requested
- * by the client.
- *
- * Note that in some system configurations, console interrupt will not
- * be available; clients should be prepared for this routine to fail and
- * to fall back to periodic console polling in that case.
- *
- * @param ipi Index of the IPI register which will receive the interrupt.
- * @param event IPI event number for console interrupt. If less than 0,
- * disable the console IPI interrupt.
- * @param coord Tile to be targeted for console interrupt.
- * @return 0 on success, otherwise, HV_EINVAL if illegal parameter,
- * HV_ENOTSUP if console interrupt are not available.
- */
-int hv_console_set_ipi(int ipi, int event, HV_Coord coord);
-
-#else /* !CHIP_HAS_IPI() */
-
-/** A set of interrupts. */
-typedef __hv32 HV_IntrMask;
-
-/** The low interrupt numbers are reserved for use by the client in
- * delivering IPIs. Any interrupt numbers higher than this value are
- * reserved for use by HV device drivers. */
-#define HV_MAX_IPI_INTERRUPT 7
-
-/** Enable a set of device interrupts.
- *
- * @param enab_mask Bitmap of interrupts to enable.
- */
-void hv_enable_intr(HV_IntrMask enab_mask);
-
-/** Disable a set of device interrupts.
- *
- * @param disab_mask Bitmap of interrupts to disable.
- */
-void hv_disable_intr(HV_IntrMask disab_mask);
-
-/** Clear a set of device interrupts.
- *
- * @param clear_mask Bitmap of interrupts to clear.
- */
-void hv_clear_intr(HV_IntrMask clear_mask);
-
-/** Raise a set of device interrupts.
- *
- * @param raise_mask Bitmap of interrupts to raise.
- */
-void hv_raise_intr(HV_IntrMask raise_mask);
-
-/** Trigger a one-shot interrupt on some tile
- *
- * @param tile Which tile to interrupt.
- * @param interrupt Interrupt number to trigger; must be between 0 and
- * HV_MAX_IPI_INTERRUPT.
- * @return HV_OK on success, or a hypervisor error code.
- */
-HV_Errno hv_trigger_ipi(HV_Coord tile, int interrupt);
-
-#endif /* !CHIP_HAS_IPI() */
-
-/** Store memory mapping in debug memory so that external debugger can read it.
- * A maximum of 16 entries can be stored.
- *
- * @param va VA of memory that is mapped.
- * @param len Length of mapped memory.
- * @param pa PA of memory that is mapped.
- * @return 0 on success, -1 if the maximum number of mappings is exceeded.
- */
-int hv_store_mapping(HV_VirtAddr va, unsigned int len, HV_PhysAddr pa);
-
-/** Given a client PA and a length, return its real (HV) PA.
- *
- * @param cpa Client physical address.
- * @param len Length of mapped memory.
- * @return physical address, or -1 if cpa or len is not valid.
- */
-HV_PhysAddr hv_inquire_realpa(HV_PhysAddr cpa, unsigned int len);
-
-/** RTC return flag for no RTC chip present.
- */
-#define HV_RTC_NO_CHIP 0x1
-
-/** RTC return flag for low-voltage condition, indicating that battery had
- * died and time read is unreliable.
- */
-#define HV_RTC_LOW_VOLTAGE 0x2
-
-/** Date/Time of day */
-typedef struct {
-#if CHIP_WORD_SIZE() > 32
- __hv64 tm_sec; /**< Seconds, 0-59 */
- __hv64 tm_min; /**< Minutes, 0-59 */
- __hv64 tm_hour; /**< Hours, 0-23 */
- __hv64 tm_mday; /**< Day of month, 0-30 */
- __hv64 tm_mon; /**< Month, 0-11 */
- __hv64 tm_year; /**< Years since 1900, 0-199 */
- __hv64 flags; /**< Return flags, 0 if no error */
-#else
- __hv32 tm_sec; /**< Seconds, 0-59 */
- __hv32 tm_min; /**< Minutes, 0-59 */
- __hv32 tm_hour; /**< Hours, 0-23 */
- __hv32 tm_mday; /**< Day of month, 0-30 */
- __hv32 tm_mon; /**< Month, 0-11 */
- __hv32 tm_year; /**< Years since 1900, 0-199 */
- __hv32 flags; /**< Return flags, 0 if no error */
-#endif
-} HV_RTCTime;
-
-/** Read the current time-of-day clock.
- * @return HV_RTCTime of current time (GMT).
- */
-HV_RTCTime hv_get_rtc(void);
-
-
-/** Set the current time-of-day clock.
- * @param time time to reset time-of-day to (GMT).
- */
-void hv_set_rtc(HV_RTCTime time);
-
-/** Installs a context, comprising a page table and other attributes.
- *
- * Once this service completes, page_table will be used to translate
- * subsequent virtual address references to physical memory.
- *
- * Installing a context does not cause an implicit TLB flush. Before
- * reusing an ASID value for a different address space, the client is
- * expected to flush old references from the TLB with hv_flush_asid().
- * (Alternately, hv_flush_all() may be used to flush many ASIDs at once.)
- * After invalidating a page table entry, changing its attributes, or
- * changing its target CPA, the client is expected to flush old references
- * from the TLB with hv_flush_page() or hv_flush_pages(). Making a
- * previously invalid page valid does not require a flush.
- *
- * Specifying an invalid ASID, or an invalid CPA (client physical address)
- * (either as page_table_pointer, or within the referenced table),
- * or another page table data item documented as above as illegal may
- * lead to client termination; since the validation of the table is
- * done as needed, this may happen before the service returns, or at
- * some later time, or never, depending upon the client's pattern of
- * memory references. Page table entries which supply translations for
- * invalid virtual addresses may result in client termination, or may
- * be silently ignored. "Invalid" in this context means a value which
- * was not provided to the client via the appropriate hv_inquire_* routine.
- *
- * To support changing the instruction VAs at the same time as
- * installing the new page table, this call explicitly supports
- * setting the "lr" register to a different address and then jumping
- * directly to the hv_install_context() routine. In this case, the
- * new page table does not need to contain any mapping for the
- * hv_install_context address itself.
- *
- * At most one HV_CTX_PG_SM_* flag may be specified in "flags";
- * if multiple flags are specified, HV_EINVAL is returned.
- * Specifying none of the flags results in using the default page size.
- * All cores participating in a given client must request the same
- * page size, or the results are undefined.
- *
- * @param page_table Root of the page table.
- * @param access PTE providing info on how to read the page table. This
- * value must be consistent between multiple tiles sharing a page table,
- * and must also be consistent with any virtual mappings the client
- * may be using to access the page table.
- * @param asid HV_ASID the page table is to be used for.
- * @param flags Context flags, denoting attributes or privileges of the
- * current context (HV_CTX_xxx).
- * @return Zero on success, or a hypervisor error code on failure.
- */
-int hv_install_context(HV_PhysAddr page_table, HV_PTE access, HV_ASID asid,
- __hv32 flags);
-
-#endif /* !__ASSEMBLER__ */
-
-#define HV_CTX_DIRECTIO 0x1 /**< Direct I/O requests are accepted from
- PL0. */
-
-#define HV_CTX_PG_SM_4K 0x10 /**< Use 4K small pages, if available. */
-#define HV_CTX_PG_SM_16K 0x20 /**< Use 16K small pages, if available. */
-#define HV_CTX_PG_SM_64K 0x40 /**< Use 64K small pages, if available. */
-#define HV_CTX_PG_SM_MASK 0xf0 /**< Mask of all possible small pages. */
-
-#ifndef __ASSEMBLER__
-
-
-/** Set the number of pages ganged together by HV_PTE_SUPER at a
- * particular level of the page table.
- *
- * The current TILE-Gx hardware only supports powers of four
- * (i.e. log2_count must be a multiple of two), and the requested
- * "super" page size must be less than the span of the next level in
- * the page table. The largest size that can be requested is 64GB.
- *
- * The shift value is initially "0" for all page table levels,
- * indicating that the HV_PTE_SUPER bit is effectively ignored.
- *
- * If you change the count from one non-zero value to another, the
- * hypervisor will flush the entire TLB and TSB to avoid confusion.
- *
- * @param level Page table level (0, 1, or 2)
- * @param log2_count Base-2 log of the number of pages to gang together,
- * i.e. how much to shift left the base page size for the super page size.
- * @return Zero on success, or a hypervisor error code on failure.
- */
-int hv_set_pte_super_shift(int level, int log2_count);
-
-
-/** Value returned from hv_inquire_context(). */
-typedef struct
-{
- /** Physical address of page table */
- HV_PhysAddr page_table;
-
- /** PTE which defines access method for top of page table */
- HV_PTE access;
-
- /** ASID associated with this page table */
- HV_ASID asid;
-
- /** Context flags */
- __hv32 flags;
-} HV_Context;
-
-/** Retrieve information about the currently installed context.
- * @return The data passed to the last successful hv_install_context call.
- */
-HV_Context hv_inquire_context(void);
-
-
-/** Flushes all translations associated with the named address space
- * identifier from the TLB and any other hypervisor data structures.
- * Translations installed with the "global" bit are not flushed.
- *
- * Specifying an invalid ASID may lead to client termination. "Invalid"
- * in this context means a value which was not provided to the client
- * via <tt>hv_inquire_asid()</tt>.
- *
- * @param asid HV_ASID whose entries are to be flushed.
- * @return Zero on success, or a hypervisor error code on failure.
-*/
-int hv_flush_asid(HV_ASID asid);
-
-
-/** Flushes all translations associated with the named virtual address
- * and page size from the TLB and other hypervisor data structures. Only
- * pages visible to the current ASID are affected; note that this includes
- * global pages in addition to pages specific to the current ASID.
- *
- * The supplied VA need not be aligned; it may be anywhere in the
- * subject page.
- *
- * Specifying an invalid virtual address may lead to client termination,
- * or may silently succeed. "Invalid" in this context means a value
- * which was not provided to the client via hv_inquire_virtual.
- *
- * @param address Address of the page to flush.
- * @param page_size Size of pages to assume.
- * @return Zero on success, or a hypervisor error code on failure.
- */
-int hv_flush_page(HV_VirtAddr address, HV_PageSize page_size);
-
-
-/** Flushes all translations associated with the named virtual address range
- * and page size from the TLB and other hypervisor data structures. Only
- * pages visible to the current ASID are affected; note that this includes
- * global pages in addition to pages specific to the current ASID.
- *
- * The supplied VA need not be aligned; it may be anywhere in the
- * subject page.
- *
- * Specifying an invalid virtual address may lead to client termination,
- * or may silently succeed. "Invalid" in this context means a value
- * which was not provided to the client via hv_inquire_virtual.
- *
- * @param start Address to flush.
- * @param page_size Size of pages to assume.
- * @param size The number of bytes to flush. Any page in the range
- * [start, start + size) will be flushed from the TLB.
- * @return Zero on success, or a hypervisor error code on failure.
- */
-int hv_flush_pages(HV_VirtAddr start, HV_PageSize page_size,
- unsigned long size);
-
-
-/** Flushes all non-global translations (if preserve_global is true),
- * or absolutely all translations (if preserve_global is false).
- *
- * @param preserve_global Non-zero if we want to preserve "global" mappings.
- * @return Zero on success, or a hypervisor error code on failure.
-*/
-int hv_flush_all(int preserve_global);
-
-
-/** Restart machine with optional restart command and optional args.
- * @param cmd Const pointer to command to restart with, or NULL
- * @param args Const pointer to argument string to restart with, or NULL
- */
-void hv_restart(HV_VirtAddr cmd, HV_VirtAddr args);
-
-
-/** Halt machine. */
-void hv_halt(void);
-
-
-/** Power off machine. */
-void hv_power_off(void);
-
-
-/** Re-enter virtual-is-physical memory translation mode and restart
- * execution at a given address.
- * @param entry Client physical address at which to begin execution.
- * @return A hypervisor error code on failure; if the operation is
- * successful the call does not return.
- */
-int hv_reexec(HV_PhysAddr entry);
-
-
-/** Chip topology */
-typedef struct
-{
- /** Relative coordinates of the querying tile */
- HV_Coord coord;
-
- /** Width of the querying supervisor's tile rectangle. */
- int width;
-
- /** Height of the querying supervisor's tile rectangle. */
- int height;
-
-} HV_Topology;
-
-/** Returns information about the tile coordinate system.
- *
- * Each supervisor is given a rectangle of tiles it potentially controls.
- * These tiles are labeled using a relative coordinate system with (0,0) as
- * the upper left tile regardless of their physical location on the chip.
- *
- * This call returns both the size of that rectangle and the position
- * within that rectangle of the querying tile.
- *
- * Not all tiles within that rectangle may be available to the supervisor;
- * to get the precise set of available tiles, you must also call
- * hv_inquire_tiles(HV_INQ_TILES_AVAIL, ...).
- **/
-HV_Topology hv_inquire_topology(void);
-
-/** Sets of tiles we can retrieve with hv_inquire_tiles().
- *
- * These numbers are part of the binary API and guaranteed not to change.
- */
-typedef enum {
- /** An invalid value; do not use. */
- _HV_INQ_TILES_RESERVED = 0,
-
- /** All available tiles within the supervisor's tile rectangle. */
- HV_INQ_TILES_AVAIL = 1,
-
- /** The set of tiles used for hash-for-home caching. */
- HV_INQ_TILES_HFH_CACHE = 2,
-
- /** The set of tiles that can be legally used as a LOTAR for a PTE. */
- HV_INQ_TILES_LOTAR = 3,
-
- /** The set of "shared" driver tiles that the hypervisor may
- * periodically interrupt. */
- HV_INQ_TILES_SHARED = 4
-} HV_InqTileSet;
-
-/** Returns specific information about various sets of tiles within the
- * supervisor's tile rectangle.
- *
- * @param set Which set of tiles to retrieve.
- * @param cpumask Pointer to a returned bitmask (in row-major order,
- * supervisor-relative) of tiles. The low bit of the first word
- * corresponds to the tile at the upper left-hand corner of the
- * supervisor's rectangle. In order for the supervisor to know the
- * buffer length to supply, it should first call hv_inquire_topology.
- * @param length Number of bytes available for the returned bitmask.
- **/
-HV_Errno hv_inquire_tiles(HV_InqTileSet set, HV_VirtAddr cpumask, int length);
-
-
-/** An identifier for a memory controller. Multiple memory controllers
- * may be connected to one chip, and this uniquely identifies each one.
- */
-typedef int HV_MemoryController;
-
-/** A range of physical memory. */
-typedef struct
-{
- HV_PhysAddr start; /**< Starting address. */
- __hv64 size; /**< Size in bytes. */
- HV_MemoryController controller; /**< Which memory controller owns this. */
-} HV_PhysAddrRange;
-
-/** Returns information about a range of physical memory.
- *
- * hv_inquire_physical() returns one of the ranges of client
- * physical addresses which are available to this client.
- *
- * The first range is retrieved by specifying an idx of 0, and
- * successive ranges are returned with subsequent idx values. Ranges
- * are ordered by increasing start address (i.e., as idx increases,
- * so does start), do not overlap, and do not touch (i.e., the
- * available memory is described with the fewest possible ranges).
- *
- * If an out-of-range idx value is specified, the returned size will be zero.
- * A client can count the number of ranges by increasing idx until the
- * returned size is zero. There will always be at least one valid range.
- *
- * Some clients might not be prepared to deal with more than one
- * physical address range; they still ought to call this routine and
- * issue a warning message if they're given more than one range, on the
- * theory that whoever configured the hypervisor to provide that memory
- * should know that it's being wasted.
- */
-HV_PhysAddrRange hv_inquire_physical(int idx);
-
-/** Possible DIMM types. */
-typedef enum
-{
- NO_DIMM = 0, /**< No DIMM */
- DDR2 = 1, /**< DDR2 */
- DDR3 = 2 /**< DDR3 */
-} HV_DIMM_Type;
-
-#ifdef __tilegx__
-
-/** Log2 of minimum DIMM bytes supported by the memory controller. */
-#define HV_MSH_MIN_DIMM_SIZE_SHIFT 29
-
-/** Max number of DIMMs contained by one memory controller. */
-#define HV_MSH_MAX_DIMMS 8
-
-#else
-
-/** Log2 of minimum DIMM bytes supported by the memory controller. */
-#define HV_MSH_MIN_DIMM_SIZE_SHIFT 26
-
-/** Max number of DIMMs contained by one memory controller. */
-#define HV_MSH_MAX_DIMMS 2
-
-#endif
-
-/** Number of bits to right-shift to get the DIMM type. */
-#define HV_DIMM_TYPE_SHIFT 0
-
-/** Bits to mask to get the DIMM type. */
-#define HV_DIMM_TYPE_MASK 0xf
-
-/** Number of bits to right-shift to get the DIMM size. */
-#define HV_DIMM_SIZE_SHIFT 4
-
-/** Bits to mask to get the DIMM size. */
-#define HV_DIMM_SIZE_MASK 0xf
-
-/** Memory controller information. */
-typedef struct
-{
- HV_Coord coord; /**< Relative tile coordinates of the port used by a
- specified tile to communicate with this controller. */
- __hv64 speed; /**< Speed of this controller in bytes per second. */
-} HV_MemoryControllerInfo;
-
-/** Returns information about a particular memory controller.
- *
- * hv_inquire_memory_controller(coord,idx) returns information about a
- * particular controller. Two pieces of information are returned:
- * - The relative coordinates of the port on the controller that the specified
- * tile would use to contact it. The relative coordinates may lie
- * outside the supervisor's rectangle, i.e. the controller may not
- * be attached to a node managed by the querying node's supervisor.
- * In particular note that x or y may be negative.
- * - The speed of the memory controller. (This is a not-to-exceed value
- * based on the raw hardware data rate, and may not be achievable in
- * practice; it is provided to give clients information on the relative
- * performance of the available controllers.)
- *
- * Clients should avoid calling this interface with invalid values.
- * A client who does may be terminated.
- * @param coord Tile for which to calculate the relative port position.
- * @param controller Index of the controller; identical to value returned
- * from other routines like hv_inquire_physical.
- * @return Information about the controller.
- */
-HV_MemoryControllerInfo hv_inquire_memory_controller(HV_Coord coord,
- int controller);
-
-
-/** A range of virtual memory. */
-typedef struct
-{
- HV_VirtAddr start; /**< Starting address. */
- __hv64 size; /**< Size in bytes. */
-} HV_VirtAddrRange;
-
-/** Returns information about a range of virtual memory.
- *
- * hv_inquire_virtual() returns one of the ranges of client
- * virtual addresses which are available to this client.
- *
- * The first range is retrieved by specifying an idx of 0, and
- * successive ranges are returned with subsequent idx values. Ranges
- * are ordered by increasing start address (i.e., as idx increases,
- * so does start), do not overlap, and do not touch (i.e., the
- * available memory is described with the fewest possible ranges).
- *
- * If an out-of-range idx value is specified, the returned size will be zero.
- * A client can count the number of ranges by increasing idx until the
- * returned size is zero. There will always be at least one valid range.
- *
- * Some clients may well have various virtual addresses hardwired
- * into themselves; for instance, their instruction stream may
- * have been compiled expecting to live at a particular address.
- * Such clients should use this interface to verify they've been
- * given the virtual address space they expect, and issue a (potentially
- * fatal) warning message otherwise.
- *
- * Note that the returned size is a __hv64, not a __hv32, so it is
- * possible to express a single range spanning the entire 32-bit
- * address space.
- */
-HV_VirtAddrRange hv_inquire_virtual(int idx);
-
-
-/** A range of ASID values. */
-typedef struct
-{
- HV_ASID start; /**< First ASID in the range. */
- unsigned int size; /**< Number of ASIDs. Zero for an invalid range. */
-} HV_ASIDRange;
-
-/** Returns information about a range of ASIDs.
- *
- * hv_inquire_asid() returns one of the ranges of address
- * space identifiers which are available to this client.
- *
- * The first range is retrieved by specifying an idx of 0, and
- * successive ranges are returned with subsequent idx values. Ranges
- * are ordered by increasing start value (i.e., as idx increases,
- * so does start), do not overlap, and do not touch (i.e., the
- * available ASIDs are described with the fewest possible ranges).
- *
- * If an out-of-range idx value is specified, the returned size will be zero.
- * A client can count the number of ranges by increasing idx until the
- * returned size is zero. There will always be at least one valid range.
- */
-HV_ASIDRange hv_inquire_asid(int idx);
-
-
-/** Waits for at least the specified number of nanoseconds then returns.
- *
- * NOTE: this deprecated function currently assumes a 750 MHz clock,
- * and is thus not generally suitable for use. New code should call
- * hv_sysconf(HV_SYSCONF_CPU_SPEED), compute a cycle count to wait for,
- * and delay by looping while checking the cycle counter SPR.
- *
- * @param nanosecs The number of nanoseconds to sleep.
- */
-void hv_nanosleep(int nanosecs);
-
-
-/** Reads a character from the console without blocking.
- *
- * @return A value from 0-255 indicates the value successfully read.
- * A negative value means no value was ready.
- */
-int hv_console_read_if_ready(void);
-
-
-/** Writes a character to the console, blocking if the console is busy.
- *
- * This call cannot fail. If the console is broken for some reason,
- * output will simply vanish.
- * @param byte Character to write.
- */
-void hv_console_putc(int byte);
-
-
-/** Writes a string to the console, blocking if the console is busy.
- * @param bytes Pointer to characters to write.
- * @param len Number of characters to write.
- * @return Number of characters written, or HV_EFAULT if the buffer is invalid.
- */
-int hv_console_write(HV_VirtAddr bytes, int len);
-
-
-/** Dispatch the next interrupt from the client downcall mechanism.
- *
- * The hypervisor uses downcalls to notify the client of asynchronous
- * events. Some of these events are hypervisor-created (like incoming
- * messages). Some are regular interrupts which initially occur in
- * the hypervisor, and are normally handled directly by the client;
- * when these occur in a client's interrupt critical section, they must
- * be delivered through the downcall mechanism.
- *
- * A downcall is initially delivered to the client as an INTCTRL_CL
- * interrupt, where CL is the client's PL. Upon entry to the INTCTRL_CL
- * vector, the client must immediately invoke the hv_downcall_dispatch
- * service. This service will not return; instead it will cause one of
- * the client's actual downcall-handling interrupt vectors to be entered.
- * The EX_CONTEXT registers in the client will be set so that when the
- * client irets, it will return to the code which was interrupted by the
- * INTCTRL_CL interrupt.
- *
- * Under some circumstances, the firing of INTCTRL_CL can race with
- * the lowering of a device interrupt. In such a case, the
- * hv_downcall_dispatch service may issue an iret instruction instead
- * of entering one of the client's actual downcall-handling interrupt
- * vectors. This will return execution to the location that was
- * interrupted by INTCTRL_CL.
- *
- * Any saving of registers should be done by the actual handling
- * vectors; no registers should be changed by the INTCTRL_CL handler.
- * In particular, the client should not use a jal instruction to invoke
- * the hv_downcall_dispatch service, as that would overwrite the client's
- * lr register. Note that the hv_downcall_dispatch service may overwrite
- * one or more of the client's system save registers.
- *
- * The client must not modify the INTCTRL_CL_STATUS SPR. The hypervisor
- * will set this register to cause a downcall to happen, and will clear
- * it when no further downcalls are pending.
- *
- * When a downcall vector is entered, the INTCTRL_CL interrupt will be
- * masked. When the client is done processing a downcall, and is ready
- * to accept another, it must unmask this interrupt; if more downcalls
- * are pending, this will cause the INTCTRL_CL vector to be reentered.
- * Currently the following interrupt vectors can be entered through a
- * downcall:
- *
- * INT_MESSAGE_RCV_DWNCL (hypervisor message available)
- * INT_DEV_INTR_DWNCL (device interrupt)
- * INT_DMATLB_MISS_DWNCL (DMA TLB miss)
- * INT_SNITLB_MISS_DWNCL (SNI TLB miss)
- * INT_DMATLB_ACCESS_DWNCL (DMA TLB access violation)
- */
-void hv_downcall_dispatch(void);
-
-#endif /* !__ASSEMBLER__ */
-
-/** We use actual interrupt vectors which never occur (they're only there
- * to allow setting MPLs for related SPRs) for our downcall vectors.
- */
-/** Message receive downcall interrupt vector */
-#define INT_MESSAGE_RCV_DWNCL INT_BOOT_ACCESS
-/** DMA TLB miss downcall interrupt vector */
-#define INT_DMATLB_MISS_DWNCL INT_DMA_ASID
-/** Static nework processor instruction TLB miss interrupt vector */
-#define INT_SNITLB_MISS_DWNCL INT_SNI_ASID
-/** DMA TLB access violation downcall interrupt vector */
-#define INT_DMATLB_ACCESS_DWNCL INT_DMA_CPL
-/** Device interrupt downcall interrupt vector */
-#define INT_DEV_INTR_DWNCL INT_WORLD_ACCESS
-/** NMI downcall interrupt vector */
-#define INT_NMI_DWNCL 64
-
-#define HV_NMI_FLAG_FORCE 0x1 /**< Force an NMI downcall regardless of
- the ICS bit of the client. */
-
-#ifndef __ASSEMBLER__
-
-/** Requests the inode for a specific full pathname.
- *
- * Performs a lookup in the hypervisor filesystem for a given filename.
- * Multiple calls with the same filename will always return the same inode.
- * If there is no such filename, HV_ENOENT is returned.
- * A bad filename pointer may result in HV_EFAULT instead.
- *
- * @param filename Constant pointer to name of requested file
- * @return Inode of requested file
- */
-int hv_fs_findfile(HV_VirtAddr filename);
-
-
-/** Data returned from an fstat request.
- * Note that this structure should be no more than 40 bytes in size so
- * that it can always be returned completely in registers.
- */
-typedef struct
-{
- int size; /**< Size of file (or HV_Errno on error) */
- unsigned int flags; /**< Flags (see HV_FS_FSTAT_FLAGS) */
-} HV_FS_StatInfo;
-
-/** Bitmask flags for fstat request */
-typedef enum
-{
- HV_FS_ISDIR = 0x0001 /**< Is the entry a directory? */
-} HV_FS_FSTAT_FLAGS;
-
-/** Get stat information on a given file inode.
- *
- * Return information on the file with the given inode.
- *
- * IF the HV_FS_ISDIR bit is set, the "file" is a directory. Reading
- * it will return NUL-separated filenames (no directory part) relative
- * to the path to the inode of the directory "file". These can be
- * appended to the path to the directory "file" after a forward slash
- * to create additional filenames. Note that it is not required
- * that all valid paths be decomposable into valid parent directories;
- * a filesystem may validly have just a few files, none of which have
- * HV_FS_ISDIR set. However, if clients may wish to enumerate the
- * files in the filesystem, it is recommended to include all the
- * appropriate parent directory "files" to give a consistent view.
- *
- * An invalid file inode will cause an HV_EBADF error to be returned.
- *
- * @param inode The inode number of the query
- * @return An HV_FS_StatInfo structure
- */
-HV_FS_StatInfo hv_fs_fstat(int inode);
-
-
-/** Read data from a specific hypervisor file.
- * On error, may return HV_EBADF for a bad inode or HV_EFAULT for a bad buf.
- * Reads near the end of the file will return fewer bytes than requested.
- * Reads at or beyond the end of a file will return zero.
- *
- * @param inode the hypervisor file to read
- * @param buf the buffer to read data into
- * @param length the number of bytes of data to read
- * @param offset the offset into the file to read the data from
- * @return number of bytes successfully read, or an HV_Errno code
- */
-int hv_fs_pread(int inode, HV_VirtAddr buf, int length, int offset);
-
-
-/** Read a 64-bit word from the specified physical address.
- * The address must be 8-byte aligned.
- * Specifying an invalid physical address will lead to client termination.
- * @param addr The physical address to read
- * @param access The PTE describing how to read the memory
- * @return The 64-bit value read from the given address
- */
-unsigned long long hv_physaddr_read64(HV_PhysAddr addr, HV_PTE access);
-
-
-/** Write a 64-bit word to the specified physical address.
- * The address must be 8-byte aligned.
- * Specifying an invalid physical address will lead to client termination.
- * @param addr The physical address to write
- * @param access The PTE that says how to write the memory
- * @param val The 64-bit value to write to the given address
- */
-void hv_physaddr_write64(HV_PhysAddr addr, HV_PTE access,
- unsigned long long val);
-
-
-/** Get the value of the command-line for the supervisor, if any.
- * This will not include the filename of the booted supervisor, but may
- * include configured-in boot arguments or the hv_restart() arguments.
- * If the buffer is not long enough the hypervisor will NUL the first
- * character of the buffer but not write any other data.
- * @param buf The virtual address to write the command-line string to.
- * @param length The length of buf, in characters.
- * @return The actual length of the command line, including the trailing NUL
- * (may be larger than "length").
- */
-int hv_get_command_line(HV_VirtAddr buf, int length);
-
-
-/** Set a new value for the command-line for the supervisor, which will
- * be returned from subsequent invocations of hv_get_command_line() on
- * this tile.
- * @param buf The virtual address to read the command-line string from.
- * @param length The length of buf, in characters; must be no more than
- * HV_COMMAND_LINE_LEN.
- * @return Zero if successful, or a hypervisor error code.
- */
-HV_Errno hv_set_command_line(HV_VirtAddr buf, int length);
-
-/** Maximum size of a command line passed to hv_set_command_line(); note
- * that a line returned from hv_get_command_line() could be larger than
- * this.*/
-#define HV_COMMAND_LINE_LEN 256
-
-/** Tell the hypervisor how to cache non-priority pages
- * (its own as well as pages explicitly represented in page tables).
- * Normally these will be represented as red/black pages, but
- * when the supervisor starts to allocate "priority" pages in the PTE
- * the hypervisor will need to start marking those pages as (e.g.) "red"
- * and non-priority pages as either "black" (if they cache-alias
- * with the existing priority pages) or "red/black" (if they don't).
- * The bitmask provides information on which parts of the cache
- * have been used for pinned pages so far on this tile; if (1 << N)
- * appears in the bitmask, that indicates that a 4KB region of the
- * cache starting at (N * 4KB) is in use by a "priority" page.
- * The portion of cache used by a particular page can be computed
- * by taking the page's PA, modulo CHIP_L2_CACHE_SIZE(), and setting
- * all the "4KB" bits corresponding to the actual page size.
- * @param bitmask A bitmap of priority page set values
- */
-void hv_set_caching(unsigned long bitmask);
-
-
-/** Zero out a specified number of pages.
- * The va and size must both be multiples of 4096.
- * Caches are bypassed and memory is directly set to zero.
- * This API is implemented only in the magic hypervisor and is intended
- * to provide a performance boost to the minimal supervisor by
- * giving it a fast way to zero memory pages when allocating them.
- * @param va Virtual address where the page has been mapped
- * @param size Number of bytes (must be a page size multiple)
- */
-void hv_bzero_page(HV_VirtAddr va, unsigned int size);
-
-
-/** State object for the hypervisor messaging subsystem. */
-typedef struct
-{
-#if CHIP_VA_WIDTH() > 32
- __hv64 opaque[2]; /**< No user-serviceable parts inside */
-#else
- __hv32 opaque[2]; /**< No user-serviceable parts inside */
-#endif
-}
-HV_MsgState;
-
-/** Register to receive incoming messages.
- *
- * This routine configures the current tile so that it can receive
- * incoming messages. It must be called before the client can receive
- * messages with the hv_receive_message routine, and must be called on
- * each tile which will receive messages.
- *
- * msgstate is the virtual address of a state object of type HV_MsgState.
- * Once the state is registered, the client must not read or write the
- * state object; doing so will cause undefined results.
- *
- * If this routine is called with msgstate set to 0, the client's message
- * state will be freed and it will no longer be able to receive messages.
- * Note that this may cause the loss of any as-yet-undelivered messages
- * for the client.
- *
- * If another client attempts to send a message to a client which has
- * not yet called hv_register_message_state, or which has freed its
- * message state, the message will not be delivered, as if the client
- * had insufficient buffering.
- *
- * This routine returns HV_OK if the registration was successful, and
- * HV_EINVAL if the supplied state object is unsuitable. Note that some
- * errors may not be detected during this routine, but might be detected
- * during a subsequent message delivery.
- * @param msgstate State object.
- **/
-HV_Errno hv_register_message_state(HV_MsgState* msgstate);
-
-/** Possible message recipient states. */
-typedef enum
-{
- HV_TO_BE_SENT, /**< Not sent (not attempted, or recipient not ready) */
- HV_SENT, /**< Successfully sent */
- HV_BAD_RECIP /**< Bad recipient coordinates (permanent error) */
-} HV_Recip_State;
-
-/** Message recipient. */
-typedef struct
-{
- /** X coordinate, relative to supervisor's top-left coordinate */
- unsigned int x:11;
-
- /** Y coordinate, relative to supervisor's top-left coordinate */
- unsigned int y:11;
-
- /** Status of this recipient */
- HV_Recip_State state:10;
-} HV_Recipient;
-
-/** Send a message to a set of recipients.
- *
- * This routine sends a message to a set of recipients.
- *
- * recips is an array of HV_Recipient structures. Each specifies a tile,
- * and a message state; initially, it is expected that the state will
- * be set to HV_TO_BE_SENT. nrecip specifies the number of recipients
- * in the recips array.
- *
- * For each recipient whose state is HV_TO_BE_SENT, the hypervisor attempts
- * to send that tile the specified message. In order to successfully
- * receive the message, the receiver must be a valid tile to which the
- * sender has access, must not be the sending tile itself, and must have
- * sufficient free buffer space. (The hypervisor guarantees that each
- * tile which has called hv_register_message_state() will be able to
- * buffer one message from every other tile which can legally send to it;
- * more space may be provided but is not guaranteed.) If an invalid tile
- * is specified, the recipient's state is set to HV_BAD_RECIP; this is a
- * permanent delivery error. If the message is successfully delivered
- * to the recipient's buffer, the recipient's state is set to HV_SENT.
- * Otherwise, the recipient's state is unchanged. Message delivery is
- * synchronous; all attempts to send messages are completed before this
- * routine returns.
- *
- * If no permanent delivery errors were encountered, the routine returns
- * the number of messages successfully sent: that is, the number of
- * recipients whose states changed from HV_TO_BE_SENT to HV_SENT during
- * this operation. If any permanent delivery errors were encountered,
- * the routine returns HV_ERECIP. In the event of permanent delivery
- * errors, it may be the case that delivery was not attempted to all
- * recipients; if any messages were successfully delivered, however,
- * recipients' state values will be updated appropriately.
- *
- * It is explicitly legal to specify a recipient structure whose state
- * is not HV_TO_BE_SENT; such a recipient is ignored. One suggested way
- * of using hv_send_message to send a message to multiple tiles is to set
- * up a list of recipients, and then call the routine repeatedly with the
- * same list, each time accumulating the number of messages successfully
- * sent, until all messages are sent, a permanent error is encountered,
- * or the desired number of attempts have been made. When used in this
- * way, the routine will deliver each message no more than once to each
- * recipient.
- *
- * Note that a message being successfully delivered to the recipient's
- * buffer space does not guarantee that it is received by the recipient,
- * either immediately or at any time in the future; the recipient might
- * never call hv_receive_message, or could register a different state
- * buffer, losing the message.
- *
- * Specifying the same recipient more than once in the recipient list
- * is an error, which will not result in an error return but which may
- * or may not result in more than one message being delivered to the
- * recipient tile.
- *
- * buf and buflen specify the message to be sent. buf is a virtual address
- * which must be currently mapped in the client's page table; if not, the
- * routine returns HV_EFAULT. buflen must be greater than zero and less
- * than or equal to HV_MAX_MESSAGE_SIZE, and nrecip must be less than the
- * number of tiles to which the sender has access; if not, the routine
- * returns HV_EINVAL.
- * @param recips List of recipients.
- * @param nrecip Number of recipients.
- * @param buf Address of message data.
- * @param buflen Length of message data.
- **/
-int hv_send_message(HV_Recipient *recips, int nrecip,
- HV_VirtAddr buf, int buflen);
-
-/** Maximum hypervisor message size, in bytes */
-#define HV_MAX_MESSAGE_SIZE 28
-
-
-/** Return value from hv_receive_message() */
-typedef struct
-{
- int msglen; /**< Message length in bytes, or an error code */
- __hv32 source; /**< Code identifying message sender (HV_MSG_xxx) */
-} HV_RcvMsgInfo;
-
-#define HV_MSG_TILE 0x0 /**< Message source is another tile */
-#define HV_MSG_INTR 0x1 /**< Message source is a driver interrupt */
-
-/** Receive a message.
- *
- * This routine retrieves a message from the client's incoming message
- * buffer.
- *
- * Multiple messages sent from a particular sending tile to a particular
- * receiving tile are received in the order that they were sent; however,
- * no ordering is guaranteed between messages sent by different tiles.
- *
- * Whenever the a client's message buffer is empty, the first message
- * subsequently received will cause the client's MESSAGE_RCV_DWNCL
- * interrupt vector to be invoked through the interrupt downcall mechanism
- * (see the description of the hv_downcall_dispatch() routine for details
- * on downcalls).
- *
- * Another message-available downcall will not occur until a call to
- * this routine is made when the message buffer is empty, and a message
- * subsequently arrives. Note that such a downcall could occur while
- * this routine is executing. If the calling code does not wish this
- * to happen, it is recommended that this routine be called with the
- * INTCTRL_1 interrupt masked, or inside an interrupt critical section.
- *
- * msgstate is the value previously passed to hv_register_message_state().
- * buf is the virtual address of the buffer into which the message will
- * be written; buflen is the length of the buffer.
- *
- * This routine returns an HV_RcvMsgInfo structure. The msglen member
- * of that structure is the length of the message received, zero if no
- * message is available, or HV_E2BIG if the message is too large for the
- * specified buffer. If the message is too large, it is not consumed,
- * and may be retrieved by a subsequent call to this routine specifying
- * a sufficiently large buffer. A buffer which is HV_MAX_MESSAGE_SIZE
- * bytes long is guaranteed to be able to receive any possible message.
- *
- * The source member of the HV_RcvMsgInfo structure describes the sender
- * of the message. For messages sent by another client tile via an
- * hv_send_message() call, this value is HV_MSG_TILE; for messages sent
- * as a result of a device interrupt, this value is HV_MSG_INTR.
- */
-
-HV_RcvMsgInfo hv_receive_message(HV_MsgState msgstate, HV_VirtAddr buf,
- int buflen);
-
-
-/** Start remaining tiles owned by this supervisor. Initially, only one tile
- * executes the client program; after it calls this service, the other tiles
- * are started. This allows the initial tile to do one-time configuration
- * of shared data structures without having to lock them against simultaneous
- * access.
- */
-void hv_start_all_tiles(void);
-
-
-/** Open a hypervisor device.
- *
- * This service initializes an I/O device and its hypervisor driver software,
- * and makes it available for use. The open operation is per-device per-chip;
- * once it has been performed, the device handle returned may be used in other
- * device services calls made by any tile.
- *
- * @param name Name of the device. A base device name is just a text string
- * (say, "pcie"). If there is more than one instance of a device, the
- * base name is followed by a slash and a device number (say, "pcie/0").
- * Some devices may support further structure beneath those components;
- * most notably, devices which require control operations do so by
- * supporting reads and/or writes to a control device whose name
- * includes a trailing "/ctl" (say, "pcie/0/ctl").
- * @param flags Flags (HV_DEV_xxx).
- * @return A positive integer device handle, or a negative error code.
- */
-int hv_dev_open(HV_VirtAddr name, __hv32 flags);
-
-
-/** Close a hypervisor device.
- *
- * This service uninitializes an I/O device and its hypervisor driver
- * software, and makes it unavailable for use. The close operation is
- * per-device per-chip; once it has been performed, the device is no longer
- * available. Normally there is no need to ever call the close service.
- *
- * @param devhdl Device handle of the device to be closed.
- * @return Zero if the close is successful, otherwise, a negative error code.
- */
-int hv_dev_close(int devhdl);
-
-
-/** Read data from a hypervisor device synchronously.
- *
- * This service transfers data from a hypervisor device to a memory buffer.
- * When the service returns, the data has been written from the memory buffer,
- * and the buffer will not be further modified by the driver.
- *
- * No ordering is guaranteed between requests issued from different tiles.
- *
- * Devices may choose to support both the synchronous and asynchronous read
- * operations, only one of them, or neither of them.
- *
- * @param devhdl Device handle of the device to be read from.
- * @param flags Flags (HV_DEV_xxx).
- * @param va Virtual address of the target data buffer. This buffer must
- * be mapped in the currently installed page table; if not, HV_EFAULT
- * may be returned.
- * @param len Number of bytes to be transferred.
- * @param offset Driver-dependent offset. For a random-access device, this is
- * often a byte offset from the beginning of the device; in other cases,
- * like on a control device, it may have a different meaning.
- * @return A non-negative value if the read was at least partially successful;
- * otherwise, a negative error code. The precise interpretation of
- * the return value is driver-dependent, but many drivers will return
- * the number of bytes successfully transferred.
- */
-int hv_dev_pread(int devhdl, __hv32 flags, HV_VirtAddr va, __hv32 len,
- __hv64 offset);
-
-#define HV_DEV_NB_EMPTY 0x1 /**< Don't block when no bytes of data can
- be transferred. */
-#define HV_DEV_NB_PARTIAL 0x2 /**< Don't block when some bytes, but not all
- of the requested bytes, can be
- transferred. */
-#define HV_DEV_NOCACHE 0x4 /**< The caller warrants that none of the
- cache lines which might contain data
- from the requested buffer are valid.
- Useful with asynchronous operations
- only. */
-
-#define HV_DEV_ALLFLAGS (HV_DEV_NB_EMPTY | HV_DEV_NB_PARTIAL | \
- HV_DEV_NOCACHE) /**< All HV_DEV_xxx flags */
-
-/** Write data to a hypervisor device synchronously.
- *
- * This service transfers data from a memory buffer to a hypervisor device.
- * When the service returns, the data has been read from the memory buffer,
- * and the buffer may be overwritten by the client; the data may not
- * necessarily have been conveyed to the actual hardware I/O interface.
- *
- * No ordering is guaranteed between requests issued from different tiles.
- *
- * Devices may choose to support both the synchronous and asynchronous write
- * operations, only one of them, or neither of them.
- *
- * @param devhdl Device handle of the device to be written to.
- * @param flags Flags (HV_DEV_xxx).
- * @param va Virtual address of the source data buffer. This buffer must
- * be mapped in the currently installed page table; if not, HV_EFAULT
- * may be returned.
- * @param len Number of bytes to be transferred.
- * @param offset Driver-dependent offset. For a random-access device, this is
- * often a byte offset from the beginning of the device; in other cases,
- * like on a control device, it may have a different meaning.
- * @return A non-negative value if the write was at least partially successful;
- * otherwise, a negative error code. The precise interpretation of
- * the return value is driver-dependent, but many drivers will return
- * the number of bytes successfully transferred.
- */
-int hv_dev_pwrite(int devhdl, __hv32 flags, HV_VirtAddr va, __hv32 len,
- __hv64 offset);
-
-
-/** Interrupt arguments, used in the asynchronous I/O interfaces. */
-#if CHIP_VA_WIDTH() > 32
-typedef __hv64 HV_IntArg;
-#else
-typedef __hv32 HV_IntArg;
-#endif
-
-/** Interrupt messages are delivered via the mechanism as normal messages,
- * but have a message source of HV_DEV_INTR. The message is formatted
- * as an HV_IntrMsg structure.
- */
-
-typedef struct
-{
- HV_IntArg intarg; /**< Interrupt argument, passed to the poll/preada/pwritea
- services */
- HV_IntArg intdata; /**< Interrupt-specific interrupt data */
-} HV_IntrMsg;
-
-/** Request an interrupt message when a device condition is satisfied.
- *
- * This service requests that an interrupt message be delivered to the
- * requesting tile when a device becomes readable or writable, or when any
- * data queued to the device via previous write operations from this tile
- * has been actually sent out on the hardware I/O interface. Devices may
- * choose to support any, all, or none of the available conditions.
- *
- * If multiple conditions are specified, only one message will be
- * delivered. If the event mask delivered to that interrupt handler
- * indicates that some of the conditions have not yet occurred, the
- * client must issue another poll() call if it wishes to wait for those
- * conditions.
- *
- * Only one poll may be outstanding per device handle per tile. If more than
- * one tile is polling on the same device and condition, they will all be
- * notified when it happens. Because of this, clients may not assume that
- * the condition signaled is necessarily still true when they request a
- * subsequent service; for instance, the readable data which caused the
- * poll call to interrupt may have been read by another tile in the interim.
- *
- * The notification interrupt message could come directly, or via the
- * downcall (intctrl1) method, depending on what the tile is doing
- * when the condition is satisfied. Note that it is possible for the
- * requested interrupt to be delivered after this service is called but
- * before it returns.
- *
- * @param devhdl Device handle of the device to be polled.
- * @param events Flags denoting the events which will cause the interrupt to
- * be delivered (HV_DEVPOLL_xxx).
- * @param intarg Value which will be delivered as the intarg member of the
- * eventual interrupt message; the intdata member will be set to a
- * mask of HV_DEVPOLL_xxx values indicating which conditions have been
- * satisifed.
- * @return Zero if the interrupt was successfully scheduled; otherwise, a
- * negative error code.
- */
-int hv_dev_poll(int devhdl, __hv32 events, HV_IntArg intarg);
-
-#define HV_DEVPOLL_READ 0x1 /**< Test device for readability */
-#define HV_DEVPOLL_WRITE 0x2 /**< Test device for writability */
-#define HV_DEVPOLL_FLUSH 0x4 /**< Test device for output drained */
-
-
-/** Cancel a request for an interrupt when a device event occurs.
- *
- * This service requests that no interrupt be delivered when the events
- * noted in the last-issued poll() call happen. Once this service returns,
- * the interrupt has been canceled; however, it is possible for the interrupt
- * to be delivered after this service is called but before it returns.
- *
- * @param devhdl Device handle of the device on which to cancel polling.
- * @return Zero if the poll was successfully canceled; otherwise, a negative
- * error code.
- */
-int hv_dev_poll_cancel(int devhdl);
-
-
-/** NMI information */
-typedef struct
-{
- /** Result: negative error, or HV_NMI_RESULT_xxx. */
- int result;
-
- /** PC from interrupted remote core (if result != HV_NMI_RESULT_FAIL_HV). */
- HV_VirtAddr pc;
-
-} HV_NMI_Info;
-
-/** NMI issued successfully. */
-#define HV_NMI_RESULT_OK 0
-
-/** NMI not issued: remote tile running at client PL with ICS set. */
-#define HV_NMI_RESULT_FAIL_ICS 1
-
-/** NMI not issued: remote tile waiting in hypervisor. */
-#define HV_NMI_RESULT_FAIL_HV 2
-
-/** Force an NMI downcall regardless of the ICS bit of the client. */
-#define HV_NMI_FLAG_FORCE 0x1
-
-/** Send an NMI interrupt request to a particular tile.
- *
- * This will cause the NMI to be issued on the remote tile regardless
- * of the state of the client interrupt mask. However, if the remote
- * tile is in the hypervisor, it will not execute the NMI, and
- * HV_NMI_RESULT_FAIL_HV will be returned. Similarly, if the remote
- * tile is in a client interrupt critical section at the time of the
- * NMI, it will not execute the NMI, and HV_NMI_RESULT_FAIL_ICS will
- * be returned. In this second case, however, if HV_NMI_FLAG_FORCE
- * is set in flags, then the remote tile will enter its NMI interrupt
- * vector regardless. Forcing the NMI vector during an interrupt
- * critical section will mean that the client can not safely continue
- * execution after handling the interrupt.
- *
- * @param tile Tile to which the NMI request is sent.
- * @param info NMI information which is defined by and interpreted by the
- * supervisor, is passed to the specified tile, and is
- * stored in the SPR register SYSTEM_SAVE_{CLIENT_PL}_2 on the
- * specified tile when entering the NMI handler routine.
- * Typically, this parameter stores the NMI type, or an aligned
- * VA plus some special bits, etc.
- * @param flags Flags (HV_NMI_FLAG_xxx).
- * @return Information about the requested NMI.
- */
-HV_NMI_Info hv_send_nmi(HV_Coord tile, unsigned long info, __hv64 flags);
-
-
-/** Scatter-gather list for preada/pwritea calls. */
-typedef struct
-#if CHIP_VA_WIDTH() <= 32
-__attribute__ ((packed, aligned(4)))
-#endif
-{
- HV_PhysAddr pa; /**< Client physical address of the buffer segment. */
- HV_PTE pte; /**< Page table entry describing the caching and location
- override characteristics of the buffer segment. Some
- drivers ignore this element and will require that
- the NOCACHE flag be set on their requests. */
- __hv32 len; /**< Length of the buffer segment. */
-} HV_SGL;
-
-#define HV_SGL_MAXLEN 16 /**< Maximum number of entries in a scatter-gather
- list */
-
-/** Read data from a hypervisor device asynchronously.
- *
- * This service transfers data from a hypervisor device to a memory buffer.
- * When the service returns, the read has been scheduled. When the read
- * completes, an interrupt message will be delivered, and the buffer will
- * not be further modified by the driver.
- *
- * The number of possible outstanding asynchronous requests is defined by
- * each driver, but it is recommended that it be at least two requests
- * per tile per device.
- *
- * No ordering is guaranteed between synchronous and asynchronous requests,
- * even those issued on the same tile.
- *
- * The completion interrupt message could come directly, or via the downcall
- * (intctrl1) method, depending on what the tile is doing when the read
- * completes. Interrupts do not coalesce; one is delivered for each
- * asynchronous I/O request. Note that it is possible for the requested
- * interrupt to be delivered after this service is called but before it
- * returns.
- *
- * Devices may choose to support both the synchronous and asynchronous read
- * operations, only one of them, or neither of them.
- *
- * @param devhdl Device handle of the device to be read from.
- * @param flags Flags (HV_DEV_xxx).
- * @param sgl_len Number of elements in the scatter-gather list.
- * @param sgl Scatter-gather list describing the memory to which data will be
- * written.
- * @param offset Driver-dependent offset. For a random-access device, this is
- * often a byte offset from the beginning of the device; in other cases,
- * like on a control device, it may have a different meaning.
- * @param intarg Value which will be delivered as the intarg member of the
- * eventual interrupt message; the intdata member will be set to the
- * normal return value from the read request.
- * @return Zero if the read was successfully scheduled; otherwise, a negative
- * error code. Note that some drivers may choose to pre-validate
- * their arguments, and may thus detect certain device error
- * conditions at this time rather than when the completion notification
- * occurs, but this is not required.
- */
-int hv_dev_preada(int devhdl, __hv32 flags, __hv32 sgl_len,
- HV_SGL sgl[/* sgl_len */], __hv64 offset, HV_IntArg intarg);
-
-
-/** Write data to a hypervisor device asynchronously.
- *
- * This service transfers data from a memory buffer to a hypervisor
- * device. When the service returns, the write has been scheduled.
- * When the write completes, an interrupt message will be delivered,
- * and the buffer may be overwritten by the client; the data may not
- * necessarily have been conveyed to the actual hardware I/O interface.
- *
- * The number of possible outstanding asynchronous requests is defined by
- * each driver, but it is recommended that it be at least two requests
- * per tile per device.
- *
- * No ordering is guaranteed between synchronous and asynchronous requests,
- * even those issued on the same tile.
- *
- * The completion interrupt message could come directly, or via the downcall
- * (intctrl1) method, depending on what the tile is doing when the read
- * completes. Interrupts do not coalesce; one is delivered for each
- * asynchronous I/O request. Note that it is possible for the requested
- * interrupt to be delivered after this service is called but before it
- * returns.
- *
- * Devices may choose to support both the synchronous and asynchronous write
- * operations, only one of them, or neither of them.
- *
- * @param devhdl Device handle of the device to be read from.
- * @param flags Flags (HV_DEV_xxx).
- * @param sgl_len Number of elements in the scatter-gather list.
- * @param sgl Scatter-gather list describing the memory from which data will be
- * read.
- * @param offset Driver-dependent offset. For a random-access device, this is
- * often a byte offset from the beginning of the device; in other cases,
- * like on a control device, it may have a different meaning.
- * @param intarg Value which will be delivered as the intarg member of the
- * eventual interrupt message; the intdata member will be set to the
- * normal return value from the write request.
- * @return Zero if the write was successfully scheduled; otherwise, a negative
- * error code. Note that some drivers may choose to pre-validate
- * their arguments, and may thus detect certain device error
- * conditions at this time rather than when the completion notification
- * occurs, but this is not required.
- */
-int hv_dev_pwritea(int devhdl, __hv32 flags, __hv32 sgl_len,
- HV_SGL sgl[/* sgl_len */], __hv64 offset, HV_IntArg intarg);
-
-
-/** Define a pair of tile and ASID to identify a user process context. */
-typedef struct
-{
- /** X coordinate, relative to supervisor's top-left coordinate */
- unsigned int x:11;
-
- /** Y coordinate, relative to supervisor's top-left coordinate */
- unsigned int y:11;
-
- /** ASID of the process on this x,y tile */
- HV_ASID asid:10;
-} HV_Remote_ASID;
-
-/** Flush cache and/or TLB state on remote tiles.
- *
- * @param cache_pa Client physical address to flush from cache (ignored if
- * the length encoded in cache_control is zero, or if
- * HV_FLUSH_EVICT_L2 is set, or if cache_cpumask is NULL).
- * @param cache_control This argument allows you to specify a length of
- * physical address space to flush (maximum HV_FLUSH_MAX_CACHE_LEN).
- * You can "or" in HV_FLUSH_EVICT_L2 to flush the whole L2 cache.
- * You can "or" in HV_FLUSH_EVICT_L1I to flush the whole L1I cache.
- * HV_FLUSH_ALL flushes all caches.
- * @param cache_cpumask Bitmask (in row-major order, supervisor-relative) of
- * tile indices to perform cache flush on. The low bit of the first
- * word corresponds to the tile at the upper left-hand corner of the
- * supervisor's rectangle. If passed as a NULL pointer, equivalent
- * to an empty bitmask. On chips which support hash-for-home caching,
- * if passed as -1, equivalent to a mask containing tiles which could
- * be doing hash-for-home caching.
- * @param tlb_va Virtual address to flush from TLB (ignored if
- * tlb_length is zero or tlb_cpumask is NULL).
- * @param tlb_length Number of bytes of data to flush from the TLB.
- * @param tlb_pgsize Page size to use for TLB flushes.
- * tlb_va and tlb_length need not be aligned to this size.
- * @param tlb_cpumask Bitmask for tlb flush, like cache_cpumask.
- * If passed as a NULL pointer, equivalent to an empty bitmask.
- * @param asids Pointer to an HV_Remote_ASID array of tile/ASID pairs to flush.
- * @param asidcount Number of HV_Remote_ASID entries in asids[].
- * @return Zero for success, or else HV_EINVAL or HV_EFAULT for errors that
- * are detected while parsing the arguments.
- */
-int hv_flush_remote(HV_PhysAddr cache_pa, unsigned long cache_control,
- unsigned long* cache_cpumask,
- HV_VirtAddr tlb_va, unsigned long tlb_length,
- unsigned long tlb_pgsize, unsigned long* tlb_cpumask,
- HV_Remote_ASID* asids, int asidcount);
-
-/** Include in cache_control to ensure a flush of the entire L2. */
-#define HV_FLUSH_EVICT_L2 (1UL << 31)
-
-/** Include in cache_control to ensure a flush of the entire L1I. */
-#define HV_FLUSH_EVICT_L1I (1UL << 30)
-
-/** Maximum legal size to use for the "length" component of cache_control. */
-#define HV_FLUSH_MAX_CACHE_LEN ((1UL << 30) - 1)
-
-/** Use for cache_control to ensure a flush of all caches. */
-#define HV_FLUSH_ALL -1UL
-
-#else /* __ASSEMBLER__ */
-
-/** Include in cache_control to ensure a flush of the entire L2. */
-#define HV_FLUSH_EVICT_L2 (1 << 31)
-
-/** Include in cache_control to ensure a flush of the entire L1I. */
-#define HV_FLUSH_EVICT_L1I (1 << 30)
-
-/** Maximum legal size to use for the "length" component of cache_control. */
-#define HV_FLUSH_MAX_CACHE_LEN ((1 << 30) - 1)
-
-/** Use for cache_control to ensure a flush of all caches. */
-#define HV_FLUSH_ALL -1
-
-#endif /* __ASSEMBLER__ */
-
-#ifndef __ASSEMBLER__
-
-/** Return a 64-bit value corresponding to the PTE if needed */
-#define hv_pte_val(pte) ((pte).val)
-
-/** Cast a 64-bit value to an HV_PTE */
-#define hv_pte(val) ((HV_PTE) { val })
-
-#endif /* !__ASSEMBLER__ */
-
-
-/** Bits in the size of an HV_PTE */
-#define HV_LOG2_PTE_SIZE 3
-
-/** Size of an HV_PTE */
-#define HV_PTE_SIZE (1 << HV_LOG2_PTE_SIZE)
-
-
-/* Bits in HV_PTE's low word. */
-#define HV_PTE_INDEX_PRESENT 0 /**< PTE is valid */
-#define HV_PTE_INDEX_MIGRATING 1 /**< Page is migrating */
-#define HV_PTE_INDEX_CLIENT0 2 /**< Page client state 0 */
-#define HV_PTE_INDEX_CLIENT1 3 /**< Page client state 1 */
-#define HV_PTE_INDEX_NC 4 /**< L1$/L2$ incoherent with L3$ */
-#define HV_PTE_INDEX_NO_ALLOC_L1 5 /**< Page is uncached in local L1$ */
-#define HV_PTE_INDEX_NO_ALLOC_L2 6 /**< Page is uncached in local L2$ */
-#define HV_PTE_INDEX_CACHED_PRIORITY 7 /**< Page is priority cached */
-#define HV_PTE_INDEX_PAGE 8 /**< PTE describes a page */
-#define HV_PTE_INDEX_GLOBAL 9 /**< Page is global */
-#define HV_PTE_INDEX_USER 10 /**< Page is user-accessible */
-#define HV_PTE_INDEX_ACCESSED 11 /**< Page has been accessed */
-#define HV_PTE_INDEX_DIRTY 12 /**< Page has been written */
- /* Bits 13-14 are reserved for
- future use. */
-#define HV_PTE_INDEX_SUPER 15 /**< Pages ganged together for TLB */
-#define HV_PTE_INDEX_MODE 16 /**< Page mode; see HV_PTE_MODE_xxx */
-#define HV_PTE_MODE_BITS 3 /**< Number of bits in mode */
-#define HV_PTE_INDEX_CLIENT2 19 /**< Page client state 2 */
-#define HV_PTE_INDEX_LOTAR 20 /**< Page's LOTAR; must be high bits
- of word */
-#define HV_PTE_LOTAR_BITS 12 /**< Number of bits in a LOTAR */
-
-/* Bits in HV_PTE's high word. */
-#define HV_PTE_INDEX_READABLE 32 /**< Page is readable */
-#define HV_PTE_INDEX_WRITABLE 33 /**< Page is writable */
-#define HV_PTE_INDEX_EXECUTABLE 34 /**< Page is executable */
-#define HV_PTE_INDEX_PTFN 35 /**< Page's PTFN; must be high bits
- of word */
-#define HV_PTE_PTFN_BITS 29 /**< Number of bits in a PTFN */
-
-/*
- * Legal values for the PTE's mode field
- */
-/** Data is not resident in any caches; loads and stores access memory
- * directly.
- */
-#define HV_PTE_MODE_UNCACHED 1
-
-/** Data is resident in the tile's local L1 and/or L2 caches; if a load
- * or store misses there, it goes to memory.
- *
- * The copy in the local L1$/L2$ is not invalidated when the copy in
- * memory is changed.
- */
-#define HV_PTE_MODE_CACHE_NO_L3 2
-
-/** Data is resident in the tile's local L1 and/or L2 caches. If a load
- * or store misses there, it goes to an L3 cache in a designated tile;
- * if it misses there, it goes to memory.
- *
- * If the NC bit is not set, the copy in the local L1$/L2$ is invalidated
- * when the copy in the remote L3$ is changed. Otherwise, such
- * invalidation will not occur.
- *
- * Chips for which CHIP_HAS_COHERENT_LOCAL_CACHE() is 0 do not support
- * invalidation from an L3$ to another tile's L1$/L2$. If the NC bit is
- * clear on such a chip, no copy is kept in the local L1$/L2$ in this mode.
- */
-#define HV_PTE_MODE_CACHE_TILE_L3 3
-
-/** Data is resident in the tile's local L1 and/or L2 caches. If a load
- * or store misses there, it goes to an L3 cache in one of a set of
- * designated tiles; if it misses there, it goes to memory. Which tile
- * is chosen from the set depends upon a hash function applied to the
- * physical address. This mode is not supported on chips for which
- * CHIP_HAS_CBOX_HOME_MAP() is 0.
- *
- * If the NC bit is not set, the copy in the local L1$/L2$ is invalidated
- * when the copy in the remote L3$ is changed. Otherwise, such
- * invalidation will not occur.
- *
- * Chips for which CHIP_HAS_COHERENT_LOCAL_CACHE() is 0 do not support
- * invalidation from an L3$ to another tile's L1$/L2$. If the NC bit is
- * clear on such a chip, no copy is kept in the local L1$/L2$ in this mode.
- */
-#define HV_PTE_MODE_CACHE_HASH_L3 4
-
-/** Data is not resident in memory; accesses are instead made to an I/O
- * device, whose tile coordinates are given by the PTE's LOTAR field.
- * This mode is only supported on chips for which CHIP_HAS_MMIO() is 1.
- * The EXECUTABLE bit may not be set in an MMIO PTE.
- */
-#define HV_PTE_MODE_MMIO 5
-
-
-/* C wants 1ULL so it is typed as __hv64, but the assembler needs just numbers.
- * The assembler can't handle shifts greater than 31, but treats them
- * as shifts mod 32, so assembler code must be aware of which word
- * the bit belongs in when using these macros.
- */
-#ifdef __ASSEMBLER__
-#define __HV_PTE_ONE 1 /**< One, for assembler */
-#else
-#define __HV_PTE_ONE 1ULL /**< One, for C */
-#endif
-
-/** Is this PTE present?
- *
- * If this bit is set, this PTE represents a valid translation or level-2
- * page table pointer. Otherwise, the page table does not contain a
- * translation for the subject virtual pages.
- *
- * If this bit is not set, the other bits in the PTE are not
- * interpreted by the hypervisor, and may contain any value.
- */
-#define HV_PTE_PRESENT (__HV_PTE_ONE << HV_PTE_INDEX_PRESENT)
-
-/** Does this PTE map a page?
- *
- * If this bit is set in a level-0 page table, the entry should be
- * interpreted as a level-2 page table entry mapping a jumbo page.
- *
- * If this bit is set in a level-1 page table, the entry should be
- * interpreted as a level-2 page table entry mapping a large page.
- *
- * This bit should not be modified by the client while PRESENT is set, as
- * doing so may race with the hypervisor's update of ACCESSED and DIRTY bits.
- *
- * In a level-2 page table, this bit is ignored and must be zero.
- */
-#define HV_PTE_PAGE (__HV_PTE_ONE << HV_PTE_INDEX_PAGE)
-
-/** Does this PTE implicitly reference multiple pages?
- *
- * If this bit is set in the page table (either in the level-2 page table,
- * or in a higher level page table in conjunction with the PAGE bit)
- * then the PTE specifies a range of contiguous pages, not a single page.
- * The hv_set_pte_super_shift() allows you to specify the count for
- * each level of the page table.
- *
- * Note: this bit is not supported on TILEPro systems.
- */
-#define HV_PTE_SUPER (__HV_PTE_ONE << HV_PTE_INDEX_SUPER)
-
-/** Is this a global (non-ASID) mapping?
- *
- * If this bit is set, the translations established by this PTE will
- * not be flushed from the TLB by the hv_flush_asid() service; they
- * will be flushed by the hv_flush_page() or hv_flush_pages() services.
- *
- * Setting this bit for translations which are identical in all page
- * tables (for instance, code and data belonging to a client OS) can
- * be very beneficial, as it will reduce the number of TLB misses.
- * Note that, while it is not an error which will be detected by the
- * hypervisor, it is an extremely bad idea to set this bit for
- * translations which are _not_ identical in all page tables.
- *
- * This bit should not be modified by the client while PRESENT is set, as
- * doing so may race with the hypervisor's update of ACCESSED and DIRTY bits.
- *
- * This bit is ignored in level-1 PTEs unless the Page bit is set.
- */
-#define HV_PTE_GLOBAL (__HV_PTE_ONE << HV_PTE_INDEX_GLOBAL)
-
-/** Is this mapping accessible to users?
- *
- * If this bit is set, code running at any PL will be permitted to
- * access the virtual addresses mapped by this PTE. Otherwise, only
- * code running at PL 1 or above will be allowed to do so.
- *
- * This bit should not be modified by the client while PRESENT is set, as
- * doing so may race with the hypervisor's update of ACCESSED and DIRTY bits.
- *
- * This bit is ignored in level-1 PTEs unless the Page bit is set.
- */
-#define HV_PTE_USER (__HV_PTE_ONE << HV_PTE_INDEX_USER)
-
-/** Has this mapping been accessed?
- *
- * This bit is set by the hypervisor when the memory described by the
- * translation is accessed for the first time. It is never cleared by
- * the hypervisor, but may be cleared by the client. After the bit
- * has been cleared, subsequent references are not guaranteed to set
- * it again until the translation has been flushed from the TLB.
- *
- * This bit is ignored in level-1 PTEs unless the Page bit is set.
- */
-#define HV_PTE_ACCESSED (__HV_PTE_ONE << HV_PTE_INDEX_ACCESSED)
-
-/** Is this mapping dirty?
- *
- * This bit is set by the hypervisor when the memory described by the
- * translation is written for the first time. It is never cleared by
- * the hypervisor, but may be cleared by the client. After the bit
- * has been cleared, subsequent references are not guaranteed to set
- * it again until the translation has been flushed from the TLB.
- *
- * This bit is ignored in level-1 PTEs unless the Page bit is set.
- */
-#define HV_PTE_DIRTY (__HV_PTE_ONE << HV_PTE_INDEX_DIRTY)
-
-/** Migrating bit in PTE.
- *
- * This bit is guaranteed not to be inspected or modified by the
- * hypervisor. The name is indicative of the suggested use by the client
- * to tag pages whose L3 cache is being migrated from one cpu to another.
- */
-#define HV_PTE_MIGRATING (__HV_PTE_ONE << HV_PTE_INDEX_MIGRATING)
-
-/** Client-private bit in PTE.
- *
- * This bit is guaranteed not to be inspected or modified by the
- * hypervisor.
- */
-#define HV_PTE_CLIENT0 (__HV_PTE_ONE << HV_PTE_INDEX_CLIENT0)
-
-/** Client-private bit in PTE.
- *
- * This bit is guaranteed not to be inspected or modified by the
- * hypervisor.
- */
-#define HV_PTE_CLIENT1 (__HV_PTE_ONE << HV_PTE_INDEX_CLIENT1)
-
-/** Client-private bit in PTE.
- *
- * This bit is guaranteed not to be inspected or modified by the
- * hypervisor.
- */
-#define HV_PTE_CLIENT2 (__HV_PTE_ONE << HV_PTE_INDEX_CLIENT2)
-
-/** Non-coherent (NC) bit in PTE.
- *
- * If this bit is set, the mapping that is set up will be non-coherent
- * (also known as non-inclusive). This means that changes to the L3
- * cache will not cause a local copy to be invalidated. It is generally
- * recommended only for read-only mappings.
- *
- * In level-1 PTEs, if the Page bit is clear, this bit determines how the
- * level-2 page table is accessed.
- */
-#define HV_PTE_NC (__HV_PTE_ONE << HV_PTE_INDEX_NC)
-
-/** Is this page prevented from filling the L1$?
- *
- * If this bit is set, the page described by the PTE will not be cached
- * the local cpu's L1 cache.
- *
- * If CHIP_HAS_NC_AND_NOALLOC_BITS() is not true in <chip.h> for this chip,
- * it is illegal to use this attribute, and may cause client termination.
- *
- * In level-1 PTEs, if the Page bit is clear, this bit
- * determines how the level-2 page table is accessed.
- */
-#define HV_PTE_NO_ALLOC_L1 (__HV_PTE_ONE << HV_PTE_INDEX_NO_ALLOC_L1)
-
-/** Is this page prevented from filling the L2$?
- *
- * If this bit is set, the page described by the PTE will not be cached
- * the local cpu's L2 cache.
- *
- * If CHIP_HAS_NC_AND_NOALLOC_BITS() is not true in <chip.h> for this chip,
- * it is illegal to use this attribute, and may cause client termination.
- *
- * In level-1 PTEs, if the Page bit is clear, this bit determines how the
- * level-2 page table is accessed.
- */
-#define HV_PTE_NO_ALLOC_L2 (__HV_PTE_ONE << HV_PTE_INDEX_NO_ALLOC_L2)
-
-/** Is this a priority page?
- *
- * If this bit is set, the page described by the PTE will be given
- * priority in the cache. Normally this translates into allowing the
- * page to use only the "red" half of the cache. The client may wish to
- * then use the hv_set_caching service to specify that other pages which
- * alias this page will use only the "black" half of the cache.
- *
- * If the Cached Priority bit is clear, the hypervisor uses the
- * current hv_set_caching() value to choose how to cache the page.
- *
- * It is illegal to set the Cached Priority bit if the Non-Cached bit
- * is set and the Cached Remotely bit is clear, i.e. if requests to
- * the page map directly to memory.
- *
- * This bit is ignored in level-1 PTEs unless the Page bit is set.
- */
-#define HV_PTE_CACHED_PRIORITY (__HV_PTE_ONE << \
- HV_PTE_INDEX_CACHED_PRIORITY)
-
-/** Is this a readable mapping?
- *
- * If this bit is set, code will be permitted to read from (e.g.,
- * issue load instructions against) the virtual addresses mapped by
- * this PTE.
- *
- * It is illegal for this bit to be clear if the Writable bit is set.
- *
- * This bit is ignored in level-1 PTEs unless the Page bit is set.
- */
-#define HV_PTE_READABLE (__HV_PTE_ONE << HV_PTE_INDEX_READABLE)
-
-/** Is this a writable mapping?
- *
- * If this bit is set, code will be permitted to write to (e.g., issue
- * store instructions against) the virtual addresses mapped by this
- * PTE.
- *
- * This bit is ignored in level-1 PTEs unless the Page bit is set.
- */
-#define HV_PTE_WRITABLE (__HV_PTE_ONE << HV_PTE_INDEX_WRITABLE)
-
-/** Is this an executable mapping?
- *
- * If this bit is set, code will be permitted to execute from
- * (e.g., jump to) the virtual addresses mapped by this PTE.
- *
- * This bit applies to any processor on the tile, if there are more
- * than one.
- *
- * This bit is ignored in level-1 PTEs unless the Page bit is set.
- */
-#define HV_PTE_EXECUTABLE (__HV_PTE_ONE << HV_PTE_INDEX_EXECUTABLE)
-
-/** The width of a LOTAR's x or y bitfield. */
-#define HV_LOTAR_WIDTH 11
-
-/** Converts an x,y pair to a LOTAR value. */
-#define HV_XY_TO_LOTAR(x, y) ((HV_LOTAR)(((x) << HV_LOTAR_WIDTH) | (y)))
-
-/** Extracts the X component of a lotar. */
-#define HV_LOTAR_X(lotar) ((lotar) >> HV_LOTAR_WIDTH)
-
-/** Extracts the Y component of a lotar. */
-#define HV_LOTAR_Y(lotar) ((lotar) & ((1 << HV_LOTAR_WIDTH) - 1))
-
-#ifndef __ASSEMBLER__
-
-/** Define accessor functions for a PTE bit. */
-#define _HV_BIT(name, bit) \
-static __inline int \
-hv_pte_get_##name(HV_PTE pte) \
-{ \
- return (pte.val >> HV_PTE_INDEX_##bit) & 1; \
-} \
- \
-static __inline HV_PTE \
-hv_pte_set_##name(HV_PTE pte) \
-{ \
- pte.val |= 1ULL << HV_PTE_INDEX_##bit; \
- return pte; \
-} \
- \
-static __inline HV_PTE \
-hv_pte_clear_##name(HV_PTE pte) \
-{ \
- pte.val &= ~(1ULL << HV_PTE_INDEX_##bit); \
- return pte; \
-}
-
-/* Generate accessors to get, set, and clear various PTE flags.
- */
-_HV_BIT(present, PRESENT)
-_HV_BIT(page, PAGE)
-_HV_BIT(super, SUPER)
-_HV_BIT(client0, CLIENT0)
-_HV_BIT(client1, CLIENT1)
-_HV_BIT(client2, CLIENT2)
-_HV_BIT(migrating, MIGRATING)
-_HV_BIT(nc, NC)
-_HV_BIT(readable, READABLE)
-_HV_BIT(writable, WRITABLE)
-_HV_BIT(executable, EXECUTABLE)
-_HV_BIT(accessed, ACCESSED)
-_HV_BIT(dirty, DIRTY)
-_HV_BIT(no_alloc_l1, NO_ALLOC_L1)
-_HV_BIT(no_alloc_l2, NO_ALLOC_L2)
-_HV_BIT(cached_priority, CACHED_PRIORITY)
-_HV_BIT(global, GLOBAL)
-_HV_BIT(user, USER)
-
-#undef _HV_BIT
-
-/** Get the page mode from the PTE.
- *
- * This field generally determines whether and how accesses to the page
- * are cached; the HV_PTE_MODE_xxx symbols define the legal values for the
- * page mode. The NC, NO_ALLOC_L1, and NO_ALLOC_L2 bits modify this
- * general policy.
- */
-static __inline unsigned int
-hv_pte_get_mode(const HV_PTE pte)
-{
- return (((__hv32) pte.val) >> HV_PTE_INDEX_MODE) &
- ((1 << HV_PTE_MODE_BITS) - 1);
-}
-
-/** Set the page mode into a PTE. See hv_pte_get_mode. */
-static __inline HV_PTE
-hv_pte_set_mode(HV_PTE pte, unsigned int val)
-{
- pte.val &= ~(((1ULL << HV_PTE_MODE_BITS) - 1) << HV_PTE_INDEX_MODE);
- pte.val |= val << HV_PTE_INDEX_MODE;
- return pte;
-}
-
-/** Get the page frame number from the PTE.
- *
- * This field contains the upper bits of the CPA (client physical
- * address) of the target page; the complete CPA is this field with
- * HV_LOG2_PAGE_TABLE_ALIGN zero bits appended to it.
- *
- * For all PTEs in the lowest-level page table, and for all PTEs with
- * the Page bit set in all page tables, the CPA must be aligned modulo
- * the relevant page size.
- */
-static __inline unsigned long
-hv_pte_get_ptfn(const HV_PTE pte)
-{
- return pte.val >> HV_PTE_INDEX_PTFN;
-}
-
-/** Set the page table frame number into a PTE. See hv_pte_get_ptfn. */
-static __inline HV_PTE
-hv_pte_set_ptfn(HV_PTE pte, unsigned long val)
-{
- pte.val &= ~(((1ULL << HV_PTE_PTFN_BITS)-1) << HV_PTE_INDEX_PTFN);
- pte.val |= (__hv64) val << HV_PTE_INDEX_PTFN;
- return pte;
-}
-
-/** Get the client physical address from the PTE. See hv_pte_set_ptfn. */
-static __inline HV_PhysAddr
-hv_pte_get_pa(const HV_PTE pte)
-{
- return (__hv64) hv_pte_get_ptfn(pte) << HV_LOG2_PAGE_TABLE_ALIGN;
-}
-
-/** Set the client physical address into a PTE. See hv_pte_get_ptfn. */
-static __inline HV_PTE
-hv_pte_set_pa(HV_PTE pte, HV_PhysAddr pa)
-{
- return hv_pte_set_ptfn(pte, pa >> HV_LOG2_PAGE_TABLE_ALIGN);
-}
-
-
-/** Get the remote tile caching this page.
- *
- * Specifies the remote tile which is providing the L3 cache for this page.
- *
- * This field is ignored unless the page mode is HV_PTE_MODE_CACHE_TILE_L3.
- *
- * In level-1 PTEs, if the Page bit is clear, this field determines how the
- * level-2 page table is accessed.
- */
-static __inline unsigned int
-hv_pte_get_lotar(const HV_PTE pte)
-{
- unsigned int lotar = ((__hv32) pte.val) >> HV_PTE_INDEX_LOTAR;
-
- return HV_XY_TO_LOTAR( (lotar >> (HV_PTE_LOTAR_BITS / 2)),
- (lotar & ((1 << (HV_PTE_LOTAR_BITS / 2)) - 1)) );
-}
-
-
-/** Set the remote tile caching a page into a PTE. See hv_pte_get_lotar. */
-static __inline HV_PTE
-hv_pte_set_lotar(HV_PTE pte, unsigned int val)
-{
- unsigned int x = HV_LOTAR_X(val);
- unsigned int y = HV_LOTAR_Y(val);
-
- pte.val &= ~(((1ULL << HV_PTE_LOTAR_BITS)-1) << HV_PTE_INDEX_LOTAR);
- pte.val |= (x << (HV_PTE_INDEX_LOTAR + HV_PTE_LOTAR_BITS / 2)) |
- (y << HV_PTE_INDEX_LOTAR);
- return pte;
-}
-
-#endif /* !__ASSEMBLER__ */
-
-/** Converts a client physical address to a ptfn. */
-#define HV_CPA_TO_PTFN(p) ((p) >> HV_LOG2_PAGE_TABLE_ALIGN)
-
-/** Converts a ptfn to a client physical address. */
-#define HV_PTFN_TO_CPA(p) (((HV_PhysAddr)(p)) << HV_LOG2_PAGE_TABLE_ALIGN)
-
-#if CHIP_VA_WIDTH() > 32
-
-/*
- * Note that we currently do not allow customizing the page size
- * of the L0 pages, but fix them at 4GB, so we do not use the
- * "_HV_xxx" nomenclature for the L0 macros.
- */
-
-/** Log number of HV_PTE entries in L0 page table */
-#define HV_LOG2_L0_ENTRIES (CHIP_VA_WIDTH() - HV_LOG2_L1_SPAN)
-
-/** Number of HV_PTE entries in L0 page table */
-#define HV_L0_ENTRIES (1 << HV_LOG2_L0_ENTRIES)
-
-/** Log size of L0 page table in bytes */
-#define HV_LOG2_L0_SIZE (HV_LOG2_PTE_SIZE + HV_LOG2_L0_ENTRIES)
-
-/** Size of L0 page table in bytes */
-#define HV_L0_SIZE (1 << HV_LOG2_L0_SIZE)
-
-#ifdef __ASSEMBLER__
-
-/** Index in L0 for a specific VA */
-#define HV_L0_INDEX(va) \
- (((va) >> HV_LOG2_L1_SPAN) & (HV_L0_ENTRIES - 1))
-
-#else
-
-/** Index in L1 for a specific VA */
-#define HV_L0_INDEX(va) \
- (((HV_VirtAddr)(va) >> HV_LOG2_L1_SPAN) & (HV_L0_ENTRIES - 1))
-
-#endif
-
-#endif /* CHIP_VA_WIDTH() > 32 */
-
-/** Log number of HV_PTE entries in L1 page table */
-#define _HV_LOG2_L1_ENTRIES(log2_page_size_large) \
- (HV_LOG2_L1_SPAN - log2_page_size_large)
-
-/** Number of HV_PTE entries in L1 page table */
-#define _HV_L1_ENTRIES(log2_page_size_large) \
- (1 << _HV_LOG2_L1_ENTRIES(log2_page_size_large))
-
-/** Log size of L1 page table in bytes */
-#define _HV_LOG2_L1_SIZE(log2_page_size_large) \
- (HV_LOG2_PTE_SIZE + _HV_LOG2_L1_ENTRIES(log2_page_size_large))
-
-/** Size of L1 page table in bytes */
-#define _HV_L1_SIZE(log2_page_size_large) \
- (1 << _HV_LOG2_L1_SIZE(log2_page_size_large))
-
-/** Log number of HV_PTE entries in level-2 page table */
-#define _HV_LOG2_L2_ENTRIES(log2_page_size_large, log2_page_size_small) \
- (log2_page_size_large - log2_page_size_small)
-
-/** Number of HV_PTE entries in level-2 page table */
-#define _HV_L2_ENTRIES(log2_page_size_large, log2_page_size_small) \
- (1 << _HV_LOG2_L2_ENTRIES(log2_page_size_large, log2_page_size_small))
-
-/** Log size of level-2 page table in bytes */
-#define _HV_LOG2_L2_SIZE(log2_page_size_large, log2_page_size_small) \
- (HV_LOG2_PTE_SIZE + \
- _HV_LOG2_L2_ENTRIES(log2_page_size_large, log2_page_size_small))
-
-/** Size of level-2 page table in bytes */
-#define _HV_L2_SIZE(log2_page_size_large, log2_page_size_small) \
- (1 << _HV_LOG2_L2_SIZE(log2_page_size_large, log2_page_size_small))
-
-#ifdef __ASSEMBLER__
-
-#if CHIP_VA_WIDTH() > 32
-
-/** Index in L1 for a specific VA */
-#define _HV_L1_INDEX(va, log2_page_size_large) \
- (((va) >> log2_page_size_large) & (_HV_L1_ENTRIES(log2_page_size_large) - 1))
-
-#else /* CHIP_VA_WIDTH() > 32 */
-
-/** Index in L1 for a specific VA */
-#define _HV_L1_INDEX(va, log2_page_size_large) \
- (((va) >> log2_page_size_large))
-
-#endif /* CHIP_VA_WIDTH() > 32 */
-
-/** Index in level-2 page table for a specific VA */
-#define _HV_L2_INDEX(va, log2_page_size_large, log2_page_size_small) \
- (((va) >> log2_page_size_small) & \
- (_HV_L2_ENTRIES(log2_page_size_large, log2_page_size_small) - 1))
-
-#else /* __ASSEMBLER __ */
-
-#if CHIP_VA_WIDTH() > 32
-
-/** Index in L1 for a specific VA */
-#define _HV_L1_INDEX(va, log2_page_size_large) \
- (((HV_VirtAddr)(va) >> log2_page_size_large) & \
- (_HV_L1_ENTRIES(log2_page_size_large) - 1))
-
-#else /* CHIP_VA_WIDTH() > 32 */
-
-/** Index in L1 for a specific VA */
-#define _HV_L1_INDEX(va, log2_page_size_large) \
- (((HV_VirtAddr)(va) >> log2_page_size_large))
-
-#endif /* CHIP_VA_WIDTH() > 32 */
-
-/** Index in level-2 page table for a specific VA */
-#define _HV_L2_INDEX(va, log2_page_size_large, log2_page_size_small) \
- (((HV_VirtAddr)(va) >> log2_page_size_small) & \
- (_HV_L2_ENTRIES(log2_page_size_large, log2_page_size_small) - 1))
-
-#endif /* __ASSEMBLER __ */
-
-/** Position of the PFN field within the PTE (subset of the PTFN). */
-#define _HV_PTE_INDEX_PFN(log2_page_size) \
- (HV_PTE_INDEX_PTFN + (log2_page_size - HV_LOG2_PAGE_TABLE_ALIGN))
-
-/** Length of the PFN field within the PTE (subset of the PTFN). */
-#define _HV_PTE_INDEX_PFN_BITS(log2_page_size) \
- (HV_PTE_INDEX_PTFN_BITS - (log2_page_size - HV_LOG2_PAGE_TABLE_ALIGN))
-
-/** Converts a client physical address to a pfn. */
-#define _HV_CPA_TO_PFN(p, log2_page_size) ((p) >> log2_page_size)
-
-/** Converts a pfn to a client physical address. */
-#define _HV_PFN_TO_CPA(p, log2_page_size) \
- (((HV_PhysAddr)(p)) << log2_page_size)
-
-/** Converts a ptfn to a pfn. */
-#define _HV_PTFN_TO_PFN(p, log2_page_size) \
- ((p) >> (log2_page_size - HV_LOG2_PAGE_TABLE_ALIGN))
-
-/** Converts a pfn to a ptfn. */
-#define _HV_PFN_TO_PTFN(p, log2_page_size) \
- ((p) << (log2_page_size - HV_LOG2_PAGE_TABLE_ALIGN))
-
-#endif /* _HV_HV_H */
diff --git a/arch/tile/include/hv/iorpc.h b/arch/tile/include/hv/iorpc.h
deleted file mode 100644
index ddf1604482b3..000000000000
--- a/arch/tile/include/hv/iorpc.h
+++ /dev/null
@@ -1,714 +0,0 @@
-/*
- * Copyright 2012 Tilera Corporation. All Rights Reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation, version 2.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
- * NON INFRINGEMENT. See the GNU General Public License for
- * more details.
- */
-#ifndef _HV_IORPC_H_
-#define _HV_IORPC_H_
-
-/**
- *
- * Error codes and struct definitions for the IO RPC library.
- *
- * The hypervisor's IO RPC component provides a convenient way for
- * driver authors to proxy system calls between user space, linux, and
- * the hypervisor driver. The core of the system is a set of Python
- * files that take ".idl" files as input and generates the following
- * source code:
- *
- * - _rpc_call() routines for use in userspace IO libraries. These
- * routines take an argument list specified in the .idl file, pack the
- * arguments in to a buffer, and read or write that buffer via the
- * Linux iorpc driver.
- *
- * - dispatch_read() and dispatch_write() routines that hypervisor
- * drivers can use to implement most of their dev_pread() and
- * dev_pwrite() methods. These routines decode the incoming parameter
- * blob, permission check and translate parameters where appropriate,
- * and then invoke a callback routine for whichever RPC call has
- * arrived. The driver simply implements the set of callback
- * routines.
- *
- * The IO RPC system also includes the Linux 'iorpc' driver, which
- * proxies calls between the userspace library and the hypervisor
- * driver. The Linux driver is almost entirely device agnostic; it
- * watches for special flags indicating cases where a memory buffer
- * address might need to be translated, etc. As a result, driver
- * writers can avoid many of the problem cases related to registering
- * hardware resources like memory pages or interrupts. However, the
- * drivers must be careful to obey the conventions documented below in
- * order to work properly with the generic Linux iorpc driver.
- *
- * @section iorpc_domains Service Domains
- *
- * All iorpc-based drivers must support a notion of service domains.
- * A service domain is basically an application context - state
- * indicating resources that are allocated to that particular app
- * which it may access and (perhaps) other applications may not
- * access. Drivers can support any number of service domains they
- * choose. In some cases the design is limited by a number of service
- * domains supported by the IO hardware; in other cases the service
- * domains are a purely software concept and the driver chooses a
- * maximum number of domains based on how much state memory it is
- * willing to preallocate.
- *
- * For example, the mPIPE driver only supports as many service domains
- * as are supported by the mPIPE hardware. This limitation is
- * required because the hardware implements its own MMIO protection
- * scheme to allow large MMIO mappings while still protecting small
- * register ranges within the page that should only be accessed by the
- * hypervisor.
- *
- * In contrast, drivers with no hardware service domain limitations
- * (for instance the TRIO shim) can implement an arbitrary number of
- * service domains. In these cases, each service domain is limited to
- * a carefully restricted set of legal MMIO addresses if necessary to
- * keep one application from corrupting another application's state.
- *
- * @section iorpc_conventions System Call Conventions
- *
- * The driver's open routine is responsible for allocating a new
- * service domain for each hv_dev_open() call. By convention, the
- * return value from open() should be the service domain number on
- * success, or GXIO_ERR_NO_SVC_DOM if no more service domains are
- * available.
- *
- * The implementations of hv_dev_pread() and hv_dev_pwrite() are
- * responsible for validating the devhdl value passed up by the
- * client. Since the device handle returned by hv_dev_open() should
- * embed the positive service domain number, drivers should make sure
- * that DRV_HDL2BITS(devhdl) is a legal service domain. If the client
- * passes an illegal service domain number, the routine should return
- * GXIO_ERR_INVAL_SVC_DOM. Once the service domain number has been
- * validated, the driver can copy to/from the client buffer and call
- * the dispatch_read() or dispatch_write() methods created by the RPC
- * generator.
- *
- * The hv_dev_close() implementation should reset all service domain
- * state and put the service domain back on a free list for
- * reallocation by a future application. In most cases, this will
- * require executing a hardware reset or drain flow and denying any
- * MMIO regions that were created for the service domain.
- *
- * @section iorpc_data Special Data Types
- *
- * The .idl file syntax allows the creation of syscalls with special
- * parameters that require permission checks or translations as part
- * of the system call path. Because of limitations in the code
- * generator, APIs are generally limited to just one of these special
- * parameters per system call, and they are sometimes required to be
- * the first or last parameter to the call. Special parameters
- * include:
- *
- * @subsection iorpc_mem_buffer MEM_BUFFER
- *
- * The MEM_BUFFER() datatype allows user space to "register" memory
- * buffers with a device. Registering memory accomplishes two tasks:
- * Linux keeps track of all buffers that might be modified by a
- * hardware device, and the hardware device drivers bind registered
- * buffers to particular hardware resources like ingress NotifRings.
- * The MEM_BUFFER() idl syntax can take extra flags like ALIGN_64KB,
- * ALIGN_SELF_SIZE, and FLAGS indicating that memory buffers must have
- * certain alignment or that the user should be able to pass a "memory
- * flags" word specifying attributes like nt_hint or IO cache pinning.
- * The parser will accept multiple MEM_BUFFER() flags.
- *
- * Implementations must obey the following conventions when
- * registering memory buffers via the iorpc flow. These rules are a
- * result of the Linux driver implementation, which needs to keep
- * track of how many times a particular page has been registered with
- * the hardware so that it can release the page when all those
- * registrations are cleared.
- *
- * - Memory registrations that refer to a resource which has already
- * been bound must return GXIO_ERR_ALREADY_INIT. Thus, it is an
- * error to register memory twice without resetting (i.e. closing) the
- * resource in between. This convention keeps the Linux driver from
- * having to track which particular devices a page is bound to.
- *
- * - At present, a memory registration is only cleared when the
- * service domain is reset. In this case, the Linux driver simply
- * closes the HV device file handle and then decrements the reference
- * counts of all pages that were previously registered with the
- * device.
- *
- * - In the future, we may add a mechanism for unregistering memory.
- * One possible implementation would require that the user specify
- * which buffer is currently registered. The HV would then verify
- * that that page was actually the one currently mapped and return
- * success or failure to Linux, which would then only decrement the
- * page reference count if the addresses were mapped. Another scheme
- * might allow Linux to pass a token to the HV to be returned when the
- * resource is unmapped.
- *
- * @subsection iorpc_interrupt INTERRUPT
- *
- * The INTERRUPT .idl datatype allows the client to bind hardware
- * interrupts to a particular combination of IPI parameters - CPU, IPI
- * PL, and event bit number. This data is passed via a special
- * datatype so that the Linux driver can validate the CPU and PL and
- * the HV generic iorpc code can translate client CPUs to real CPUs.
- *
- * @subsection iorpc_pollfd_setup POLLFD_SETUP
- *
- * The POLLFD_SETUP .idl datatype allows the client to set up hardware
- * interrupt bindings which are received by Linux but which are made
- * visible to user processes as state transitions on a file descriptor;
- * this allows user processes to use Linux primitives, such as poll(), to
- * await particular hardware events. This data is passed via a special
- * datatype so that the Linux driver may recognize the pollable file
- * descriptor and translate it to a set of interrupt target information,
- * and so that the HV generic iorpc code can translate client CPUs to real
- * CPUs.
- *
- * @subsection iorpc_pollfd POLLFD
- *
- * The POLLFD .idl datatype allows manipulation of hardware interrupt
- * bindings set up via the POLLFD_SETUP datatype; common operations are
- * resetting the state of the requested interrupt events, and unbinding any
- * bound interrupts. This data is passed via a special datatype so that
- * the Linux driver may recognize the pollable file descriptor and
- * translate it to an interrupt identifier previously supplied by the
- * hypervisor as the result of an earlier pollfd_setup operation.
- *
- * @subsection iorpc_blob BLOB
- *
- * The BLOB .idl datatype allows the client to write an arbitrary
- * length string of bytes up to the hypervisor driver. This can be
- * useful for passing up large, arbitrarily structured data like
- * classifier programs. The iorpc stack takes care of validating the
- * buffer VA and CPA as the data passes up to the hypervisor. Unlike
- * MEM_BUFFER(), the buffer is not registered - Linux does not bump
- * page refcounts and the HV driver should not reuse the buffer once
- * the system call is complete.
- *
- * @section iorpc_translation Translating User Space Calls
- *
- * The ::iorpc_offset structure describes the formatting of the offset
- * that is passed to pread() or pwrite() as part of the generated RPC code.
- * When the user calls up to Linux, the rpc code fills in all the fields of
- * the offset, including a 16-bit opcode, a 16 bit format indicator, and 32
- * bits of user-specified "sub-offset". The opcode indicates which syscall
- * is being requested. The format indicates whether there is a "prefix
- * struct" at the start of the memory buffer passed to pwrite(), and if so
- * what data is in that prefix struct. These prefix structs are used to
- * implement special datatypes like MEM_BUFFER() and INTERRUPT - we arrange
- * to put data that needs translation and permission checks at the start of
- * the buffer so that the Linux driver and generic portions of the HV iorpc
- * code can easily access the data. The 32 bits of user-specified
- * "sub-offset" are most useful for pread() calls where the user needs to
- * also pass in a few bits indicating which register to read, etc.
- *
- * The Linux iorpc driver watches for system calls that contain prefix
- * structs so that it can translate parameters and bump reference
- * counts as appropriate. It does not (currently) have any knowledge
- * of the per-device opcodes - it doesn't care what operation you're
- * doing to mPIPE, so long as it can do all the generic book-keeping.
- * The hv/iorpc.h header file defines all of the generic encoding bits
- * needed to translate iorpc calls without knowing which particular
- * opcode is being issued.
- *
- * @section iorpc_globals Global iorpc Calls
- *
- * Implementing mmap() required adding some special iorpc syscalls
- * that are only called by the Linux driver, never by userspace.
- * These include get_mmio_base() and check_mmio_offset(). These
- * routines are described in globals.idl and must be included in every
- * iorpc driver. By providing these routines in every driver, Linux's
- * mmap implementation can easily get the PTE bits it needs and
- * validate the PA offset without needing to know the per-device
- * opcodes to perform those tasks.
- *
- * @section iorpc_kernel Supporting gxio APIs in the Kernel
- *
- * The iorpc code generator also supports generation of kernel code
- * implementing the gxio APIs. This capability is currently used by
- * the mPIPE network driver, and will likely be used by the TRIO root
- * complex and endpoint drivers and perhaps an in-kernel crypto
- * driver. Each driver that wants to instantiate iorpc calls in the
- * kernel needs to generate a kernel version of the generate rpc code
- * and (probably) copy any related gxio source files into the kernel.
- * The mPIPE driver provides a good example of this pattern.
- */
-
-#ifdef __KERNEL__
-#include <linux/stddef.h>
-#else
-#include <stddef.h>
-#endif
-
-#if defined(__HV__)
-#include <hv/hypervisor.h>
-#elif defined(__KERNEL__)
-#include <hv/hypervisor.h>
-#include <linux/types.h>
-#else
-#include <stdint.h>
-#endif
-
-
-/** Code indicating translation services required within the RPC path.
- * These indicate whether there is a translatable struct at the start
- * of the RPC buffer and what information that struct contains.
- */
-enum iorpc_format_e
-{
- /** No translation required, no prefix struct. */
- IORPC_FORMAT_NONE,
-
- /** No translation required, no prefix struct, no access to this
- * operation from user space. */
- IORPC_FORMAT_NONE_NOUSER,
-
- /** Prefix struct contains user VA and size. */
- IORPC_FORMAT_USER_MEM,
-
- /** Prefix struct contains CPA, size, and homing bits. */
- IORPC_FORMAT_KERNEL_MEM,
-
- /** Prefix struct contains interrupt. */
- IORPC_FORMAT_KERNEL_INTERRUPT,
-
- /** Prefix struct contains user-level interrupt. */
- IORPC_FORMAT_USER_INTERRUPT,
-
- /** Prefix struct contains pollfd_setup (interrupt information). */
- IORPC_FORMAT_KERNEL_POLLFD_SETUP,
-
- /** Prefix struct contains user-level pollfd_setup (file descriptor). */
- IORPC_FORMAT_USER_POLLFD_SETUP,
-
- /** Prefix struct contains pollfd (interrupt cookie). */
- IORPC_FORMAT_KERNEL_POLLFD,
-
- /** Prefix struct contains user-level pollfd (file descriptor). */
- IORPC_FORMAT_USER_POLLFD,
-};
-
-
-/** Generate an opcode given format and code. */
-#define IORPC_OPCODE(FORMAT, CODE) (((FORMAT) << 16) | (CODE))
-
-/** The offset passed through the read() and write() system calls
- combines an opcode with 32 bits of user-specified offset. */
-union iorpc_offset
-{
-#ifndef __BIG_ENDIAN__
- uint64_t offset; /**< All bits. */
-
- struct
- {
- uint16_t code; /**< RPC code. */
- uint16_t format; /**< iorpc_format_e */
- uint32_t sub_offset; /**< caller-specified offset. */
- };
-
- uint32_t opcode; /**< Opcode combines code & format. */
-#else
- uint64_t offset; /**< All bits. */
-
- struct
- {
- uint32_t sub_offset; /**< caller-specified offset. */
- uint16_t format; /**< iorpc_format_e */
- uint16_t code; /**< RPC code. */
- };
-
- struct
- {
- uint32_t padding;
- uint32_t opcode; /**< Opcode combines code & format. */
- };
-#endif
-};
-
-
-/** Homing and cache hinting bits that can be used by IO devices. */
-struct iorpc_mem_attr
-{
- unsigned int lotar_x:4; /**< lotar X bits (or Gx page_mask). */
- unsigned int lotar_y:4; /**< lotar Y bits (or Gx page_offset). */
- unsigned int hfh:1; /**< Uses hash-for-home. */
- unsigned int nt_hint:1; /**< Non-temporal hint. */
- unsigned int io_pin:1; /**< Only fill 'IO' cache ways. */
-};
-
-/** Set the nt_hint bit. */
-#define IORPC_MEM_BUFFER_FLAG_NT_HINT (1 << 0)
-
-/** Set the IO pin bit. */
-#define IORPC_MEM_BUFFER_FLAG_IO_PIN (1 << 1)
-
-
-/** A structure used to describe memory registration. Different
- protection levels describe memory differently, so this union
- contains all the different possible descriptions. As a request
- moves up the call chain, each layer translates from one
- description format to the next. In particular, the Linux iorpc
- driver translates user VAs into CPAs and homing parameters. */
-union iorpc_mem_buffer
-{
- struct
- {
- uint64_t va; /**< User virtual address. */
- uint64_t size; /**< Buffer size. */
- unsigned int flags; /**< nt_hint, IO pin. */
- }
- user; /**< Buffer as described by user apps. */
-
- struct
- {
- unsigned long long cpa; /**< Client physical address. */
-#if defined(__KERNEL__) || defined(__HV__)
- size_t size; /**< Buffer size. */
- HV_PTE pte; /**< PTE describing memory homing. */
-#else
- uint64_t size;
- uint64_t pte;
-#endif
- unsigned int flags; /**< nt_hint, IO pin. */
- }
- kernel; /**< Buffer as described by kernel. */
-
- struct
- {
- unsigned long long pa; /**< Physical address. */
- size_t size; /**< Buffer size. */
- struct iorpc_mem_attr attr; /**< Homing and locality hint bits. */
- }
- hv; /**< Buffer parameters for HV driver. */
-};
-
-
-/** A structure used to describe interrupts. The format differs slightly
- * for user and kernel interrupts. As with the mem_buffer_t, translation
- * between the formats is done at each level. */
-union iorpc_interrupt
-{
- struct
- {
- int cpu; /**< CPU. */
- int event; /**< evt_num */
- }
- user; /**< Interrupt as described by user applications. */
-
- struct
- {
- int x; /**< X coord. */
- int y; /**< Y coord. */
- int ipi; /**< int_num */
- int event; /**< evt_num */
- }
- kernel; /**< Interrupt as described by the kernel. */
-
-};
-
-
-/** A structure used to describe interrupts used with poll(). The format
- * differs significantly for requests from user to kernel, and kernel to
- * hypervisor. As with the mem_buffer_t, translation between the formats
- * is done at each level. */
-union iorpc_pollfd_setup
-{
- struct
- {
- int fd; /**< Pollable file descriptor. */
- }
- user; /**< pollfd_setup as described by user applications. */
-
- struct
- {
- int x; /**< X coord. */
- int y; /**< Y coord. */
- int ipi; /**< int_num */
- int event; /**< evt_num */
- }
- kernel; /**< pollfd_setup as described by the kernel. */
-
-};
-
-
-/** A structure used to describe previously set up interrupts used with
- * poll(). The format differs significantly for requests from user to
- * kernel, and kernel to hypervisor. As with the mem_buffer_t, translation
- * between the formats is done at each level. */
-union iorpc_pollfd
-{
- struct
- {
- int fd; /**< Pollable file descriptor. */
- }
- user; /**< pollfd as described by user applications. */
-
- struct
- {
- int cookie; /**< hv cookie returned by the pollfd_setup operation. */
- }
- kernel; /**< pollfd as described by the kernel. */
-
-};
-
-
-/** The various iorpc devices use error codes from -1100 to -1299.
- *
- * This range is distinct from netio (-700 to -799), the hypervisor
- * (-800 to -899), tilepci (-900 to -999), ilib (-1000 to -1099),
- * gxcr (-1300 to -1399) and gxpci (-1400 to -1499).
- */
-enum gxio_err_e {
-
- /** Largest iorpc error number. */
- GXIO_ERR_MAX = -1101,
-
-
- /********************************************************/
- /* Generic Error Codes */
- /********************************************************/
-
- /** Bad RPC opcode - possible version incompatibility. */
- GXIO_ERR_OPCODE = -1101,
-
- /** Invalid parameter. */
- GXIO_ERR_INVAL = -1102,
-
- /** Memory buffer did not meet alignment requirements. */
- GXIO_ERR_ALIGNMENT = -1103,
-
- /** Memory buffers must be coherent and cacheable. */
- GXIO_ERR_COHERENCE = -1104,
-
- /** Resource already initialized. */
- GXIO_ERR_ALREADY_INIT = -1105,
-
- /** No service domains available. */
- GXIO_ERR_NO_SVC_DOM = -1106,
-
- /** Illegal service domain number. */
- GXIO_ERR_INVAL_SVC_DOM = -1107,
-
- /** Illegal MMIO address. */
- GXIO_ERR_MMIO_ADDRESS = -1108,
-
- /** Illegal interrupt binding. */
- GXIO_ERR_INTERRUPT = -1109,
-
- /** Unreasonable client memory. */
- GXIO_ERR_CLIENT_MEMORY = -1110,
-
- /** No more IOTLB entries. */
- GXIO_ERR_IOTLB_ENTRY = -1111,
-
- /** Invalid memory size. */
- GXIO_ERR_INVAL_MEMORY_SIZE = -1112,
-
- /** Unsupported operation. */
- GXIO_ERR_UNSUPPORTED_OP = -1113,
-
- /** Insufficient DMA credits. */
- GXIO_ERR_DMA_CREDITS = -1114,
-
- /** Operation timed out. */
- GXIO_ERR_TIMEOUT = -1115,
-
- /** No such device or object. */
- GXIO_ERR_NO_DEVICE = -1116,
-
- /** Device or resource busy. */
- GXIO_ERR_BUSY = -1117,
-
- /** I/O error. */
- GXIO_ERR_IO = -1118,
-
- /** Permissions error. */
- GXIO_ERR_PERM = -1119,
-
-
-
- /********************************************************/
- /* Test Device Error Codes */
- /********************************************************/
-
- /** Illegal register number. */
- GXIO_TEST_ERR_REG_NUMBER = -1120,
-
- /** Illegal buffer slot. */
- GXIO_TEST_ERR_BUFFER_SLOT = -1121,
-
-
- /********************************************************/
- /* MPIPE Error Codes */
- /********************************************************/
-
-
- /** Invalid buffer size. */
- GXIO_MPIPE_ERR_INVAL_BUFFER_SIZE = -1131,
-
- /** Cannot allocate buffer stack. */
- GXIO_MPIPE_ERR_NO_BUFFER_STACK = -1140,
-
- /** Invalid buffer stack number. */
- GXIO_MPIPE_ERR_BAD_BUFFER_STACK = -1141,
-
- /** Cannot allocate NotifRing. */
- GXIO_MPIPE_ERR_NO_NOTIF_RING = -1142,
-
- /** Invalid NotifRing number. */
- GXIO_MPIPE_ERR_BAD_NOTIF_RING = -1143,
-
- /** Cannot allocate NotifGroup. */
- GXIO_MPIPE_ERR_NO_NOTIF_GROUP = -1144,
-
- /** Invalid NotifGroup number. */
- GXIO_MPIPE_ERR_BAD_NOTIF_GROUP = -1145,
-
- /** Cannot allocate bucket. */
- GXIO_MPIPE_ERR_NO_BUCKET = -1146,
-
- /** Invalid bucket number. */
- GXIO_MPIPE_ERR_BAD_BUCKET = -1147,
-
- /** Cannot allocate eDMA ring. */
- GXIO_MPIPE_ERR_NO_EDMA_RING = -1148,
-
- /** Invalid eDMA ring number. */
- GXIO_MPIPE_ERR_BAD_EDMA_RING = -1149,
-
- /** Invalid channel number. */
- GXIO_MPIPE_ERR_BAD_CHANNEL = -1150,
-
- /** Bad configuration. */
- GXIO_MPIPE_ERR_BAD_CONFIG = -1151,
-
- /** Empty iqueue. */
- GXIO_MPIPE_ERR_IQUEUE_EMPTY = -1152,
-
- /** Empty rules. */
- GXIO_MPIPE_ERR_RULES_EMPTY = -1160,
-
- /** Full rules. */
- GXIO_MPIPE_ERR_RULES_FULL = -1161,
-
- /** Corrupt rules. */
- GXIO_MPIPE_ERR_RULES_CORRUPT = -1162,
-
- /** Invalid rules. */
- GXIO_MPIPE_ERR_RULES_INVALID = -1163,
-
- /** Classifier is too big. */
- GXIO_MPIPE_ERR_CLASSIFIER_TOO_BIG = -1170,
-
- /** Classifier is too complex. */
- GXIO_MPIPE_ERR_CLASSIFIER_TOO_COMPLEX = -1171,
-
- /** Classifier has bad header. */
- GXIO_MPIPE_ERR_CLASSIFIER_BAD_HEADER = -1172,
-
- /** Classifier has bad contents. */
- GXIO_MPIPE_ERR_CLASSIFIER_BAD_CONTENTS = -1173,
-
- /** Classifier encountered invalid symbol. */
- GXIO_MPIPE_ERR_CLASSIFIER_INVAL_SYMBOL = -1174,
-
- /** Classifier encountered invalid bounds. */
- GXIO_MPIPE_ERR_CLASSIFIER_INVAL_BOUNDS = -1175,
-
- /** Classifier encountered invalid relocation. */
- GXIO_MPIPE_ERR_CLASSIFIER_INVAL_RELOCATION = -1176,
-
- /** Classifier encountered undefined symbol. */
- GXIO_MPIPE_ERR_CLASSIFIER_UNDEF_SYMBOL = -1177,
-
-
- /********************************************************/
- /* TRIO Error Codes */
- /********************************************************/
-
- /** Cannot allocate memory map region. */
- GXIO_TRIO_ERR_NO_MEMORY_MAP = -1180,
-
- /** Invalid memory map region number. */
- GXIO_TRIO_ERR_BAD_MEMORY_MAP = -1181,
-
- /** Cannot allocate scatter queue. */
- GXIO_TRIO_ERR_NO_SCATTER_QUEUE = -1182,
-
- /** Invalid scatter queue number. */
- GXIO_TRIO_ERR_BAD_SCATTER_QUEUE = -1183,
-
- /** Cannot allocate push DMA ring. */
- GXIO_TRIO_ERR_NO_PUSH_DMA_RING = -1184,
-
- /** Invalid push DMA ring index. */
- GXIO_TRIO_ERR_BAD_PUSH_DMA_RING = -1185,
-
- /** Cannot allocate pull DMA ring. */
- GXIO_TRIO_ERR_NO_PULL_DMA_RING = -1186,
-
- /** Invalid pull DMA ring index. */
- GXIO_TRIO_ERR_BAD_PULL_DMA_RING = -1187,
-
- /** Cannot allocate PIO region. */
- GXIO_TRIO_ERR_NO_PIO = -1188,
-
- /** Invalid PIO region index. */
- GXIO_TRIO_ERR_BAD_PIO = -1189,
-
- /** Cannot allocate ASID. */
- GXIO_TRIO_ERR_NO_ASID = -1190,
-
- /** Invalid ASID. */
- GXIO_TRIO_ERR_BAD_ASID = -1191,
-
-
- /********************************************************/
- /* MICA Error Codes */
- /********************************************************/
-
- /** No such accelerator type. */
- GXIO_MICA_ERR_BAD_ACCEL_TYPE = -1220,
-
- /** Cannot allocate context. */
- GXIO_MICA_ERR_NO_CONTEXT = -1221,
-
- /** PKA command queue is full, can't add another command. */
- GXIO_MICA_ERR_PKA_CMD_QUEUE_FULL = -1222,
-
- /** PKA result queue is empty, can't get a result from the queue. */
- GXIO_MICA_ERR_PKA_RESULT_QUEUE_EMPTY = -1223,
-
- /********************************************************/
- /* GPIO Error Codes */
- /********************************************************/
-
- /** Pin not available. Either the physical pin does not exist, or
- * it is reserved by the hypervisor for system usage. */
- GXIO_GPIO_ERR_PIN_UNAVAILABLE = -1240,
-
- /** Pin busy. The pin exists, and is available for use via GXIO, but
- * it has been attached by some other process or driver. */
- GXIO_GPIO_ERR_PIN_BUSY = -1241,
-
- /** Cannot access unattached pin. One or more of the pins being
- * manipulated by this call are not attached to the requesting
- * context. */
- GXIO_GPIO_ERR_PIN_UNATTACHED = -1242,
-
- /** Invalid I/O mode for pin. The wiring of the pin in the system
- * is such that the I/O mode or electrical control parameters
- * requested could cause damage. */
- GXIO_GPIO_ERR_PIN_INVALID_MODE = -1243,
-
- /** Smallest iorpc error number. */
- GXIO_ERR_MIN = -1299
-};
-
-
-#endif /* !_HV_IORPC_H_ */
diff --git a/arch/tile/include/hv/netio_errors.h b/arch/tile/include/hv/netio_errors.h
deleted file mode 100644
index e1591bff61b5..000000000000
--- a/arch/tile/include/hv/netio_errors.h
+++ /dev/null
@@ -1,122 +0,0 @@
-/*
- * Copyright 2010 Tilera Corporation. All Rights Reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation, version 2.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
- * NON INFRINGEMENT. See the GNU General Public License for
- * more details.
- */
-
-/**
- * Error codes returned from NetIO routines.
- */
-
-#ifndef __NETIO_ERRORS_H__
-#define __NETIO_ERRORS_H__
-
-/**
- * @addtogroup error
- *
- * @brief The error codes returned by NetIO functions.
- *
- * NetIO functions return 0 (defined as ::NETIO_NO_ERROR) on success, and
- * a negative value if an error occurs.
- *
- * In cases where a NetIO function failed due to a error reported by
- * system libraries, the error code will be the negation of the
- * system errno at the time of failure. The @ref netio_strerror()
- * function will deliver error strings for both NetIO and system error
- * codes.
- *
- * @{
- */
-
-/** The set of all NetIO errors. */
-typedef enum
-{
- /** Operation successfully completed. */
- NETIO_NO_ERROR = 0,
-
- /** A packet was successfully retrieved from an input queue. */
- NETIO_PKT = 0,
-
- /** Largest NetIO error number. */
- NETIO_ERR_MAX = -701,
-
- /** The tile is not registered with the IPP. */
- NETIO_NOT_REGISTERED = -701,
-
- /** No packet was available to retrieve from the input queue. */
- NETIO_NOPKT = -702,
-
- /** The requested function is not implemented. */
- NETIO_NOT_IMPLEMENTED = -703,
-
- /** On a registration operation, the target queue already has the maximum
- * number of tiles registered for it, and no more may be added. On a
- * packet send operation, the output queue is full and nothing more can
- * be queued until some of the queued packets are actually transmitted. */
- NETIO_QUEUE_FULL = -704,
-
- /** The calling process or thread is not bound to exactly one CPU. */
- NETIO_BAD_AFFINITY = -705,
-
- /** Cannot allocate memory on requested controllers. */
- NETIO_CANNOT_HOME = -706,
-
- /** On a registration operation, the IPP specified is not configured
- * to support the options requested; for instance, the application
- * wants a specific type of tagged headers which the configured IPP
- * doesn't support. Or, the supplied configuration information is
- * not self-consistent, or is out of range; for instance, specifying
- * both NETIO_RECV and NETIO_NO_RECV, or asking for more than
- * NETIO_MAX_SEND_BUFFERS to be preallocated. On a VLAN or bucket
- * configure operation, the number of items, or the base item, was
- * out of range.
- */
- NETIO_BAD_CONFIG = -707,
-
- /** Too many tiles have registered to transmit packets. */
- NETIO_TOOMANY_XMIT = -708,
-
- /** Packet transmission was attempted on a queue which was registered
- with transmit disabled. */
- NETIO_UNREG_XMIT = -709,
-
- /** This tile is already registered with the IPP. */
- NETIO_ALREADY_REGISTERED = -710,
-
- /** The Ethernet link is down. The application should try again later. */
- NETIO_LINK_DOWN = -711,
-
- /** An invalid memory buffer has been specified. This may be an unmapped
- * virtual address, or one which does not meet alignment requirements.
- * For netio_input_register(), this error may be returned when multiple
- * processes specify different memory regions to be used for NetIO
- * buffers. That can happen if these processes specify explicit memory
- * regions with the ::NETIO_FIXED_BUFFER_VA flag, or if tmc_cmem_init()
- * has not been called by a common ancestor of the processes.
- */
- NETIO_FAULT = -712,
-
- /** Cannot combine user-managed shared memory and cache coherence. */
- NETIO_BAD_CACHE_CONFIG = -713,
-
- /** Smallest NetIO error number. */
- NETIO_ERR_MIN = -713,
-
-#ifndef __DOXYGEN__
- /** Used internally to mean that no response is needed; never returned to
- * an application. */
- NETIO_NO_RESPONSE = 1
-#endif
-} netio_error_t;
-
-/** @} */
-
-#endif /* __NETIO_ERRORS_H__ */
diff --git a/arch/tile/include/hv/netio_intf.h b/arch/tile/include/hv/netio_intf.h
deleted file mode 100644
index 8d20972aba2c..000000000000
--- a/arch/tile/include/hv/netio_intf.h
+++ /dev/null
@@ -1,2975 +0,0 @@
-/*
- * Copyright 2010 Tilera Corporation. All Rights Reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation, version 2.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
- * NON INFRINGEMENT. See the GNU General Public License for
- * more details.
- */
-
-/**
- * NetIO interface structures and macros.
- */
-
-#ifndef __NETIO_INTF_H__
-#define __NETIO_INTF_H__
-
-#include <hv/netio_errors.h>
-
-#ifdef __KERNEL__
-#include <linux/types.h>
-#else
-#include <stdint.h>
-#endif
-
-#if !defined(__HV__) && !defined(__BOGUX__) && !defined(__KERNEL__)
-#include <assert.h>
-#define netio_assert assert /**< Enable assertions from macros */
-#else
-#define netio_assert(...) ((void)(0)) /**< Disable assertions from macros */
-#endif
-
-/*
- * If none of these symbols are defined, we're building libnetio in an
- * environment where we have pthreads, so we'll enable locking.
- */
-#if !defined(__HV__) && !defined(__BOGUX__) && !defined(__KERNEL__) && \
- !defined(__NEWLIB__)
-#define _NETIO_PTHREAD /**< Include a mutex in netio_queue_t below */
-
-/*
- * If NETIO_UNLOCKED is defined, we don't do use per-cpu locks on
- * per-packet NetIO operations. We still do pthread locking on things
- * like netio_input_register, though. This is used for building
- * libnetio_unlocked.
- */
-#ifndef NETIO_UNLOCKED
-
-/* Avoid PLT overhead by using our own inlined per-cpu lock. */
-#include <sched.h>
-typedef int _netio_percpu_mutex_t;
-
-static __inline int
-_netio_percpu_mutex_init(_netio_percpu_mutex_t* lock)
-{
- *lock = 0;
- return 0;
-}
-
-static __inline int
-_netio_percpu_mutex_lock(_netio_percpu_mutex_t* lock)
-{
- while (__builtin_expect(__insn_tns(lock), 0))
- sched_yield();
- return 0;
-}
-
-static __inline int
-_netio_percpu_mutex_unlock(_netio_percpu_mutex_t* lock)
-{
- *lock = 0;
- return 0;
-}
-
-#else /* NETIO_UNLOCKED */
-
-/* Don't do any locking for per-packet NetIO operations. */
-typedef int _netio_percpu_mutex_t;
-#define _netio_percpu_mutex_init(L)
-#define _netio_percpu_mutex_lock(L)
-#define _netio_percpu_mutex_unlock(L)
-
-#endif /* NETIO_UNLOCKED */
-#endif /* !__HV__, !__BOGUX, !__KERNEL__, !__NEWLIB__ */
-
-/** How many tiles can register for a given queue.
- * @ingroup setup */
-#define NETIO_MAX_TILES_PER_QUEUE 64
-
-
-/** Largest permissible queue identifier.
- * @ingroup setup */
-#define NETIO_MAX_QUEUE_ID 255
-
-
-#ifndef __DOXYGEN__
-
-/* Metadata packet checksum/ethertype flags. */
-
-/** The L4 checksum has not been calculated. */
-#define _NETIO_PKT_NO_L4_CSUM_SHIFT 0
-#define _NETIO_PKT_NO_L4_CSUM_RMASK 1
-#define _NETIO_PKT_NO_L4_CSUM_MASK \
- (_NETIO_PKT_NO_L4_CSUM_RMASK << _NETIO_PKT_NO_L4_CSUM_SHIFT)
-
-/** The L3 checksum has not been calculated. */
-#define _NETIO_PKT_NO_L3_CSUM_SHIFT 1
-#define _NETIO_PKT_NO_L3_CSUM_RMASK 1
-#define _NETIO_PKT_NO_L3_CSUM_MASK \
- (_NETIO_PKT_NO_L3_CSUM_RMASK << _NETIO_PKT_NO_L3_CSUM_SHIFT)
-
-/** The L3 checksum is incorrect (or perhaps has not been calculated). */
-#define _NETIO_PKT_BAD_L3_CSUM_SHIFT 2
-#define _NETIO_PKT_BAD_L3_CSUM_RMASK 1
-#define _NETIO_PKT_BAD_L3_CSUM_MASK \
- (_NETIO_PKT_BAD_L3_CSUM_RMASK << _NETIO_PKT_BAD_L3_CSUM_SHIFT)
-
-/** The Ethernet packet type is unrecognized. */
-#define _NETIO_PKT_TYPE_UNRECOGNIZED_SHIFT 3
-#define _NETIO_PKT_TYPE_UNRECOGNIZED_RMASK 1
-#define _NETIO_PKT_TYPE_UNRECOGNIZED_MASK \
- (_NETIO_PKT_TYPE_UNRECOGNIZED_RMASK << \
- _NETIO_PKT_TYPE_UNRECOGNIZED_SHIFT)
-
-/* Metadata packet type flags. */
-
-/** Where the packet type bits are; this field is the index into
- * _netio_pkt_info. */
-#define _NETIO_PKT_TYPE_SHIFT 4
-#define _NETIO_PKT_TYPE_RMASK 0x3F
-
-/** How many VLAN tags the packet has, and, if we have two, which one we
- * actually grouped on. A VLAN within a proprietary (Marvell or Broadcom)
- * tag is counted here. */
-#define _NETIO_PKT_VLAN_SHIFT 4
-#define _NETIO_PKT_VLAN_RMASK 0x3
-#define _NETIO_PKT_VLAN_MASK \
- (_NETIO_PKT_VLAN_RMASK << _NETIO_PKT_VLAN_SHIFT)
-#define _NETIO_PKT_VLAN_NONE 0 /* No VLAN tag. */
-#define _NETIO_PKT_VLAN_ONE 1 /* One VLAN tag. */
-#define _NETIO_PKT_VLAN_TWO_OUTER 2 /* Two VLAN tags, outer one used. */
-#define _NETIO_PKT_VLAN_TWO_INNER 3 /* Two VLAN tags, inner one used. */
-
-/** Which proprietary tags the packet has. */
-#define _NETIO_PKT_TAG_SHIFT 6
-#define _NETIO_PKT_TAG_RMASK 0x3
-#define _NETIO_PKT_TAG_MASK \
- (_NETIO_PKT_TAG_RMASK << _NETIO_PKT_TAG_SHIFT)
-#define _NETIO_PKT_TAG_NONE 0 /* No proprietary tags. */
-#define _NETIO_PKT_TAG_MRVL 1 /* Marvell HyperG.Stack tags. */
-#define _NETIO_PKT_TAG_MRVL_EXT 2 /* HyperG.Stack extended tags. */
-#define _NETIO_PKT_TAG_BRCM 3 /* Broadcom HiGig tags. */
-
-/** Whether a packet has an LLC + SNAP header. */
-#define _NETIO_PKT_SNAP_SHIFT 8
-#define _NETIO_PKT_SNAP_RMASK 0x1
-#define _NETIO_PKT_SNAP_MASK \
- (_NETIO_PKT_SNAP_RMASK << _NETIO_PKT_SNAP_SHIFT)
-
-/* NOTE: Bits 9 and 10 are unused. */
-
-/** Length of any custom data before the L2 header, in words. */
-#define _NETIO_PKT_CUSTOM_LEN_SHIFT 11
-#define _NETIO_PKT_CUSTOM_LEN_RMASK 0x1F
-#define _NETIO_PKT_CUSTOM_LEN_MASK \
- (_NETIO_PKT_CUSTOM_LEN_RMASK << _NETIO_PKT_CUSTOM_LEN_SHIFT)
-
-/** The L4 checksum is incorrect (or perhaps has not been calculated). */
-#define _NETIO_PKT_BAD_L4_CSUM_SHIFT 16
-#define _NETIO_PKT_BAD_L4_CSUM_RMASK 0x1
-#define _NETIO_PKT_BAD_L4_CSUM_MASK \
- (_NETIO_PKT_BAD_L4_CSUM_RMASK << _NETIO_PKT_BAD_L4_CSUM_SHIFT)
-
-/** Length of the L2 header, in words. */
-#define _NETIO_PKT_L2_LEN_SHIFT 17
-#define _NETIO_PKT_L2_LEN_RMASK 0x1F
-#define _NETIO_PKT_L2_LEN_MASK \
- (_NETIO_PKT_L2_LEN_RMASK << _NETIO_PKT_L2_LEN_SHIFT)
-
-
-/* Flags in minimal packet metadata. */
-
-/** We need an eDMA checksum on this packet. */
-#define _NETIO_PKT_NEED_EDMA_CSUM_SHIFT 0
-#define _NETIO_PKT_NEED_EDMA_CSUM_RMASK 1
-#define _NETIO_PKT_NEED_EDMA_CSUM_MASK \
- (_NETIO_PKT_NEED_EDMA_CSUM_RMASK << _NETIO_PKT_NEED_EDMA_CSUM_SHIFT)
-
-/* Data within the packet information table. */
-
-/* Note that, for efficiency, code which uses these fields assumes that none
- * of the shift values below are zero. See uses below for an explanation. */
-
-/** Offset within the L2 header of the innermost ethertype (in halfwords). */
-#define _NETIO_PKT_INFO_ETYPE_SHIFT 6
-#define _NETIO_PKT_INFO_ETYPE_RMASK 0x1F
-
-/** Offset within the L2 header of the VLAN tag (in halfwords). */
-#define _NETIO_PKT_INFO_VLAN_SHIFT 11
-#define _NETIO_PKT_INFO_VLAN_RMASK 0x1F
-
-#endif
-
-
-/** The size of a memory buffer representing a small packet.
- * @ingroup egress */
-#define SMALL_PACKET_SIZE 256
-
-/** The size of a memory buffer representing a large packet.
- * @ingroup egress */
-#define LARGE_PACKET_SIZE 2048
-
-/** The size of a memory buffer representing a jumbo packet.
- * @ingroup egress */
-#define JUMBO_PACKET_SIZE (12 * 1024)
-
-
-/* Common ethertypes.
- * @ingroup ingress */
-/** @{ */
-/** The ethertype of IPv4. */
-#define ETHERTYPE_IPv4 (0x0800)
-/** The ethertype of ARP. */
-#define ETHERTYPE_ARP (0x0806)
-/** The ethertype of VLANs. */
-#define ETHERTYPE_VLAN (0x8100)
-/** The ethertype of a Q-in-Q header. */
-#define ETHERTYPE_Q_IN_Q (0x9100)
-/** The ethertype of IPv6. */
-#define ETHERTYPE_IPv6 (0x86DD)
-/** The ethertype of MPLS. */
-#define ETHERTYPE_MPLS (0x8847)
-/** @} */
-
-
-/** The possible return values of NETIO_PKT_STATUS.
- * @ingroup ingress
- */
-typedef enum
-{
- /** No problems were detected with this packet. */
- NETIO_PKT_STATUS_OK,
- /** The packet is undersized; this is expected behavior if the packet's
- * ethertype is unrecognized, but otherwise the packet is likely corrupt. */
- NETIO_PKT_STATUS_UNDERSIZE,
- /** The packet is oversized and some trailing bytes have been discarded.
- This is expected behavior for short packets, since it's impossible to
- precisely determine the amount of padding which may have been added to
- them to make them meet the minimum Ethernet packet size. */
- NETIO_PKT_STATUS_OVERSIZE,
- /** The packet was judged to be corrupt by hardware (for instance, it had
- a bad CRC, or part of it was discarded due to lack of buffer space in
- the I/O shim) and should be discarded. */
- NETIO_PKT_STATUS_BAD
-} netio_pkt_status_t;
-
-
-/** Log2 of how many buckets we have. */
-#define NETIO_LOG2_NUM_BUCKETS (10)
-
-/** How many buckets we have.
- * @ingroup ingress */
-#define NETIO_NUM_BUCKETS (1 << NETIO_LOG2_NUM_BUCKETS)
-
-
-/**
- * @brief A group-to-bucket identifier.
- *
- * @ingroup setup
- *
- * This tells us what to do with a given group.
- */
-typedef union {
- /** The header broken down into bits. */
- struct {
- /** Whether we should balance on L4, if available */
- unsigned int __balance_on_l4:1;
- /** Whether we should balance on L3, if available */
- unsigned int __balance_on_l3:1;
- /** Whether we should balance on L2, if available */
- unsigned int __balance_on_l2:1;
- /** Reserved for future use */
- unsigned int __reserved:1;
- /** The base bucket to use to send traffic */
- unsigned int __bucket_base:NETIO_LOG2_NUM_BUCKETS;
- /** The mask to apply to the balancing value. This must be one less
- * than a power of two, e.g. 0x3 or 0xFF.
- */
- unsigned int __bucket_mask:NETIO_LOG2_NUM_BUCKETS;
- /** Pad to 32 bits */
- unsigned int __padding:(32 - 4 - 2 * NETIO_LOG2_NUM_BUCKETS);
- } bits;
- /** To send out the IDN. */
- unsigned int word;
-}
-netio_group_t;
-
-
-/**
- * @brief A VLAN-to-bucket identifier.
- *
- * @ingroup setup
- *
- * This tells us what to do with a given VLAN.
- */
-typedef netio_group_t netio_vlan_t;
-
-
-/**
- * A bucket-to-queue mapping.
- * @ingroup setup
- */
-typedef unsigned char netio_bucket_t;
-
-
-/**
- * A packet size can always fit in a netio_size_t.
- * @ingroup setup
- */
-typedef unsigned int netio_size_t;
-
-
-/**
- * @brief Ethernet standard (ingress) packet metadata.
- *
- * @ingroup ingress
- *
- * This is additional data associated with each packet.
- * This structure is opaque and accessed through the @ref ingress.
- *
- * Also, the buffer population operation currently assumes that standard
- * metadata is at least as large as minimal metadata, and will need to be
- * modified if that is no longer the case.
- */
-typedef struct
-{
-#ifdef __DOXYGEN__
- /** This structure is opaque. */
- unsigned char opaque[24];
-#else
- /** The overall ordinal of the packet */
- unsigned int __packet_ordinal;
- /** The ordinal of the packet within the group */
- unsigned int __group_ordinal;
- /** The best flow hash IPP could compute. */
- unsigned int __flow_hash;
- /** Flags pertaining to checksum calculation, packet type, etc. */
- unsigned int __flags;
- /** The first word of "user data". */
- unsigned int __user_data_0;
- /** The second word of "user data". */
- unsigned int __user_data_1;
-#endif
-}
-netio_pkt_metadata_t;
-
-
-/** To ensure that the L3 header is aligned mod 4, the L2 header should be
- * aligned mod 4 plus 2, since every supported L2 header is 4n + 2 bytes
- * long. The standard way to do this is to simply add 2 bytes of padding
- * before the L2 header.
- */
-#define NETIO_PACKET_PADDING 2
-
-
-
-/**
- * @brief Ethernet minimal (egress) packet metadata.
- *
- * @ingroup egress
- *
- * This structure represents information about packets which have
- * been processed by @ref netio_populate_buffer() or
- * @ref netio_populate_prepend_buffer(). This structure is opaque
- * and accessed through the @ref egress.
- *
- * @internal This structure is actually copied into the memory used by
- * standard metadata, which is assumed to be large enough.
- */
-typedef struct
-{
-#ifdef __DOXYGEN__
- /** This structure is opaque. */
- unsigned char opaque[14];
-#else
- /** The offset of the L2 header from the start of the packet data. */
- unsigned short l2_offset;
- /** The offset of the L3 header from the start of the packet data. */
- unsigned short l3_offset;
- /** Where to write the checksum. */
- unsigned char csum_location;
- /** Where to start checksumming from. */
- unsigned char csum_start;
- /** Flags pertaining to checksum calculation etc. */
- unsigned short flags;
- /** The L2 length of the packet. */
- unsigned short l2_length;
- /** The checksum with which to seed the checksum generator. */
- unsigned short csum_seed;
- /** How much to checksum. */
- unsigned short csum_length;
-#endif
-}
-netio_pkt_minimal_metadata_t;
-
-
-#ifndef __DOXYGEN__
-
-/**
- * @brief An I/O notification header.
- *
- * This is the first word of data received from an I/O shim in a notification
- * packet. It contains framing and status information.
- */
-typedef union
-{
- unsigned int word; /**< The whole word. */
- /** The various fields. */
- struct
- {
- unsigned int __channel:7; /**< Resource channel. */
- unsigned int __type:4; /**< Type. */
- unsigned int __ack:1; /**< Whether an acknowledgement is needed. */
- unsigned int __reserved:1; /**< Reserved. */
- unsigned int __protocol:1; /**< A protocol-specific word is added. */
- unsigned int __status:2; /**< Status of the transfer. */
- unsigned int __framing:2; /**< Framing of the transfer. */
- unsigned int __transfer_size:14; /**< Transfer size in bytes (total). */
- } bits;
-}
-__netio_pkt_notif_t;
-
-
-/**
- * Returns the base address of the packet.
- */
-#define _NETIO_PKT_HANDLE_BASE(p) \
- ((unsigned char*)((p).word & 0xFFFFFFC0))
-
-/**
- * Returns the base address of the packet.
- */
-#define _NETIO_PKT_BASE(p) \
- _NETIO_PKT_HANDLE_BASE(p->__packet)
-
-/**
- * @brief An I/O notification packet (second word)
- *
- * This is the second word of data received from an I/O shim in a notification
- * packet. This is the virtual address of the packet buffer, plus some flag
- * bits. (The virtual address of the packet is always 256-byte aligned so we
- * have room for 8 bits' worth of flags in the low 8 bits.)
- *
- * @internal
- * NOTE: The low two bits must contain "__queue", so the "packet size"
- * (SIZE_SMALL, SIZE_LARGE, or SIZE_JUMBO) can be determined quickly.
- *
- * If __addr or __offset are moved, _NETIO_PKT_BASE
- * (defined right below this) must be changed.
- */
-typedef union
-{
- unsigned int word; /**< The whole word. */
- /** The various fields. */
- struct
- {
- /** Which queue the packet will be returned to once it is sent back to
- the IPP. This is one of the SIZE_xxx values. */
- unsigned int __queue:2;
-
- /** The IPP handle of the sending IPP. */
- unsigned int __ipp_handle:2;
-
- /** Reserved for future use. */
- unsigned int __reserved:1;
-
- /** If 1, this packet has minimal (egress) metadata; otherwise, it
- has standard (ingress) metadata. */
- unsigned int __minimal:1;
-
- /** Offset of the metadata within the packet. This value is multiplied
- * by 64 and added to the base packet address to get the metadata
- * address. Note that this field is aligned within the word such that
- * you can easily extract the metadata address with a 26-bit mask. */
- unsigned int __offset:2;
-
- /** The top 24 bits of the packet's virtual address. */
- unsigned int __addr:24;
- } bits;
-}
-__netio_pkt_handle_t;
-
-#endif /* !__DOXYGEN__ */
-
-
-/**
- * @brief A handle for an I/O packet's storage.
- * @ingroup ingress
- *
- * netio_pkt_handle_t encodes the concept of a ::netio_pkt_t with its
- * packet metadata removed. It is a much smaller type that exists to
- * facilitate applications where the full ::netio_pkt_t type is too
- * large, such as those that cache enormous numbers of packets or wish
- * to transmit packet descriptors over the UDN.
- *
- * Because there is no metadata, most ::netio_pkt_t operations cannot be
- * performed on a netio_pkt_handle_t. It supports only
- * netio_free_handle() (to free the buffer) and
- * NETIO_PKT_CUSTOM_DATA_H() (to access a pointer to its contents).
- * The application must acquire any additional metadata it wants from the
- * original ::netio_pkt_t and record it separately.
- *
- * A netio_pkt_handle_t can be extracted from a ::netio_pkt_t by calling
- * NETIO_PKT_HANDLE(). An invalid handle (analogous to NULL) can be
- * created by assigning the value ::NETIO_PKT_HANDLE_NONE. A handle can
- * be tested for validity with NETIO_PKT_HANDLE_IS_VALID().
- */
-typedef struct
-{
- unsigned int word; /**< Opaque bits. */
-} netio_pkt_handle_t;
-
-/**
- * @brief A packet descriptor.
- *
- * @ingroup ingress
- * @ingroup egress
- *
- * This data structure represents a packet. The structure is manipulated
- * through the @ref ingress and the @ref egress.
- *
- * While the contents of a netio_pkt_t are opaque, the structure itself is
- * portable. This means that it may be shared between all tiles which have
- * done a netio_input_register() call for the interface on which the pkt_t
- * was initially received (via netio_get_packet()) or retrieved (via
- * netio_get_buffer()). The contents of a netio_pkt_t can be transmitted to
- * another tile via shared memory, or via a UDN message, or by other means.
- * The destination tile may then use the pkt_t as if it had originally been
- * received locally; it may read or write the packet's data, read its
- * metadata, free the packet, send the packet, transfer the netio_pkt_t to
- * yet another tile, and so forth.
- *
- * Once a netio_pkt_t has been transferred to a second tile, the first tile
- * should not reference the original copy; in particular, if more than one
- * tile frees or sends the same netio_pkt_t, the IPP's packet free lists will
- * become corrupted. Note also that each tile which reads or modifies
- * packet data must obey the memory coherency rules outlined in @ref input.
- */
-typedef struct
-{
-#ifdef __DOXYGEN__
- /** This structure is opaque. */
- unsigned char opaque[32];
-#else
- /** For an ingress packet (one with standard metadata), this is the
- * notification header we got from the I/O shim. For an egress packet
- * (one with minimal metadata), this word is zero if the packet has not
- * been populated, and nonzero if it has. */
- __netio_pkt_notif_t __notif_header;
-
- /** Virtual address of the packet buffer, plus state flags. */
- __netio_pkt_handle_t __packet;
-
- /** Metadata associated with the packet. */
- netio_pkt_metadata_t __metadata;
-#endif
-}
-netio_pkt_t;
-
-
-#ifndef __DOXYGEN__
-
-#define __NETIO_PKT_NOTIF_HEADER(pkt) ((pkt)->__notif_header)
-#define __NETIO_PKT_IPP_HANDLE(pkt) ((pkt)->__packet.bits.__ipp_handle)
-#define __NETIO_PKT_QUEUE(pkt) ((pkt)->__packet.bits.__queue)
-#define __NETIO_PKT_NOTIF_HEADER_M(mda, pkt) ((pkt)->__notif_header)
-#define __NETIO_PKT_IPP_HANDLE_M(mda, pkt) ((pkt)->__packet.bits.__ipp_handle)
-#define __NETIO_PKT_MINIMAL(pkt) ((pkt)->__packet.bits.__minimal)
-#define __NETIO_PKT_QUEUE_M(mda, pkt) ((pkt)->__packet.bits.__queue)
-#define __NETIO_PKT_FLAGS_M(mda, pkt) ((mda)->__flags)
-
-/* Packet information table, used by the attribute access functions below. */
-extern const uint16_t _netio_pkt_info[];
-
-#endif /* __DOXYGEN__ */
-
-
-#ifndef __DOXYGEN__
-/* These macros are deprecated and will disappear in a future MDE release. */
-#define NETIO_PKT_GOOD_CHECKSUM(pkt) \
- NETIO_PKT_L4_CSUM_CORRECT(pkt)
-#define NETIO_PKT_GOOD_CHECKSUM_M(mda, pkt) \
- NETIO_PKT_L4_CSUM_CORRECT_M(mda, pkt)
-#endif /* __DOXYGEN__ */
-
-
-/* Packet attribute access functions. */
-
-/** Return a pointer to the metadata for a packet.
- * @ingroup ingress
- *
- * Calling this function once and passing the result to other retrieval
- * functions with a "_M" suffix usually improves performance. This
- * function must be called on an 'ingress' packet (i.e. one retrieved
- * by @ref netio_get_packet(), on which @ref netio_populate_buffer() or
- * @ref netio_populate_prepend_buffer have not been called). Use of this
- * function on an 'egress' packet will cause an assertion failure.
- *
- * @param[in] pkt Packet on which to operate.
- * @return A pointer to the packet's standard metadata.
- */
-static __inline netio_pkt_metadata_t*
-NETIO_PKT_METADATA(netio_pkt_t* pkt)
-{
- netio_assert(!pkt->__packet.bits.__minimal);
- return &pkt->__metadata;
-}
-
-
-/** Return a pointer to the minimal metadata for a packet.
- * @ingroup egress
- *
- * Calling this function once and passing the result to other retrieval
- * functions with a "_MM" suffix usually improves performance. This
- * function must be called on an 'egress' packet (i.e. one on which
- * @ref netio_populate_buffer() or @ref netio_populate_prepend_buffer()
- * have been called, or one retrieved by @ref netio_get_buffer()). Use of
- * this function on an 'ingress' packet will cause an assertion failure.
- *
- * @param[in] pkt Packet on which to operate.
- * @return A pointer to the packet's standard metadata.
- */
-static __inline netio_pkt_minimal_metadata_t*
-NETIO_PKT_MINIMAL_METADATA(netio_pkt_t* pkt)
-{
- netio_assert(pkt->__packet.bits.__minimal);
- return (netio_pkt_minimal_metadata_t*) &pkt->__metadata;
-}
-
-
-/** Determine whether a packet has 'minimal' metadata.
- * @ingroup pktfuncs
- *
- * This function will return nonzero if the packet is an 'egress'
- * packet (i.e. one on which @ref netio_populate_buffer() or
- * @ref netio_populate_prepend_buffer() have been called, or one
- * retrieved by @ref netio_get_buffer()), and zero if the packet
- * is an 'ingress' packet (i.e. one retrieved by @ref netio_get_packet(),
- * which has not been converted into an 'egress' packet).
- *
- * @param[in] pkt Packet on which to operate.
- * @return Nonzero if the packet has minimal metadata.
- */
-static __inline unsigned int
-NETIO_PKT_IS_MINIMAL(netio_pkt_t* pkt)
-{
- return pkt->__packet.bits.__minimal;
-}
-
-
-/** Return a handle for a packet's storage.
- * @ingroup pktfuncs
- *
- * @param[in] pkt Packet on which to operate.
- * @return A handle for the packet's storage.
- */
-static __inline netio_pkt_handle_t
-NETIO_PKT_HANDLE(netio_pkt_t* pkt)
-{
- netio_pkt_handle_t h;
- h.word = pkt->__packet.word;
- return h;
-}
-
-
-/** A special reserved value indicating the absence of a packet handle.
- *
- * @ingroup pktfuncs
- */
-#define NETIO_PKT_HANDLE_NONE ((netio_pkt_handle_t) { 0 })
-
-
-/** Test whether a packet handle is valid.
- *
- * Applications may wish to use the reserved value NETIO_PKT_HANDLE_NONE
- * to indicate no packet at all. This function tests to see if a packet
- * handle is a real handle, not this special reserved value.
- *
- * @ingroup pktfuncs
- *
- * @param[in] handle Handle on which to operate.
- * @return One if the packet handle is valid, else zero.
- */
-static __inline unsigned int
-NETIO_PKT_HANDLE_IS_VALID(netio_pkt_handle_t handle)
-{
- return handle.word != 0;
-}
-
-
-
-/** Return a pointer to the start of the packet's custom header.
- * A custom header may or may not be present, depending upon the IPP; its
- * contents and alignment are also IPP-dependent. Currently, none of the
- * standard IPPs supplied by Tilera produce a custom header. If present,
- * the custom header precedes the L2 header in the packet buffer.
- * @ingroup ingress
- *
- * @param[in] handle Handle on which to operate.
- * @return A pointer to start of the packet.
- */
-static __inline unsigned char*
-NETIO_PKT_CUSTOM_DATA_H(netio_pkt_handle_t handle)
-{
- return _NETIO_PKT_HANDLE_BASE(handle) + NETIO_PACKET_PADDING;
-}
-
-
-/** Return the length of the packet's custom header.
- * A custom header may or may not be present, depending upon the IPP; its
- * contents and alignment are also IPP-dependent. Currently, none of the
- * standard IPPs supplied by Tilera produce a custom header. If present,
- * the custom header precedes the L2 header in the packet buffer.
- *
- * @ingroup ingress
- *
- * @param[in] mda Pointer to packet's standard metadata.
- * @param[in] pkt Packet on which to operate.
- * @return The length of the packet's custom header, in bytes.
- */
-static __inline netio_size_t
-NETIO_PKT_CUSTOM_HEADER_LENGTH_M(netio_pkt_metadata_t* mda, netio_pkt_t* pkt)
-{
- /*
- * Note that we effectively need to extract a quantity from the flags word
- * which is measured in words, and then turn it into bytes by shifting
- * it left by 2. We do this all at once by just shifting right two less
- * bits, and shifting the mask up two bits.
- */
- return ((mda->__flags >> (_NETIO_PKT_CUSTOM_LEN_SHIFT - 2)) &
- (_NETIO_PKT_CUSTOM_LEN_RMASK << 2));
-}
-
-
-/** Return the length of the packet, starting with the custom header.
- * A custom header may or may not be present, depending upon the IPP; its
- * contents and alignment are also IPP-dependent. Currently, none of the
- * standard IPPs supplied by Tilera produce a custom header. If present,
- * the custom header precedes the L2 header in the packet buffer.
- * @ingroup ingress
- *
- * @param[in] mda Pointer to packet's standard metadata.
- * @param[in] pkt Packet on which to operate.
- * @return The length of the packet, in bytes.
- */
-static __inline netio_size_t
-NETIO_PKT_CUSTOM_LENGTH_M(netio_pkt_metadata_t* mda, netio_pkt_t* pkt)
-{
- return (__NETIO_PKT_NOTIF_HEADER(pkt).bits.__transfer_size -
- NETIO_PACKET_PADDING);
-}
-
-
-/** Return a pointer to the start of the packet's custom header.
- * A custom header may or may not be present, depending upon the IPP; its
- * contents and alignment are also IPP-dependent. Currently, none of the
- * standard IPPs supplied by Tilera produce a custom header. If present,
- * the custom header precedes the L2 header in the packet buffer.
- * @ingroup ingress
- *
- * @param[in] mda Pointer to packet's standard metadata.
- * @param[in] pkt Packet on which to operate.
- * @return A pointer to start of the packet.
- */
-static __inline unsigned char*
-NETIO_PKT_CUSTOM_DATA_M(netio_pkt_metadata_t* mda, netio_pkt_t* pkt)
-{
- return NETIO_PKT_CUSTOM_DATA_H(NETIO_PKT_HANDLE(pkt));
-}
-
-
-/** Return the length of the packet's L2 (Ethernet plus VLAN or SNAP) header.
- * @ingroup ingress
- *
- * @param[in] mda Pointer to packet's standard metadata.
- * @param[in] pkt Packet on which to operate.
- * @return The length of the packet's L2 header, in bytes.
- */
-static __inline netio_size_t
-NETIO_PKT_L2_HEADER_LENGTH_M(netio_pkt_metadata_t* mda, netio_pkt_t* pkt)
-{
- /*
- * Note that we effectively need to extract a quantity from the flags word
- * which is measured in words, and then turn it into bytes by shifting
- * it left by 2. We do this all at once by just shifting right two less
- * bits, and shifting the mask up two bits. We then add two bytes.
- */
- return ((mda->__flags >> (_NETIO_PKT_L2_LEN_SHIFT - 2)) &
- (_NETIO_PKT_L2_LEN_RMASK << 2)) + 2;
-}
-
-
-/** Return the length of the packet, starting with the L2 (Ethernet) header.
- * @ingroup ingress
- *
- * @param[in] mda Pointer to packet's standard metadata.
- * @param[in] pkt Packet on which to operate.
- * @return The length of the packet, in bytes.
- */
-static __inline netio_size_t
-NETIO_PKT_L2_LENGTH_M(netio_pkt_metadata_t* mda, netio_pkt_t* pkt)
-{
- return (NETIO_PKT_CUSTOM_LENGTH_M(mda, pkt) -
- NETIO_PKT_CUSTOM_HEADER_LENGTH_M(mda,pkt));
-}
-
-
-/** Return a pointer to the start of the packet's L2 (Ethernet) header.
- * @ingroup ingress
- *
- * @param[in] mda Pointer to packet's standard metadata.
- * @param[in] pkt Packet on which to operate.
- * @return A pointer to start of the packet.
- */
-static __inline unsigned char*
-NETIO_PKT_L2_DATA_M(netio_pkt_metadata_t* mda, netio_pkt_t* pkt)
-{
- return (NETIO_PKT_CUSTOM_DATA_M(mda, pkt) +
- NETIO_PKT_CUSTOM_HEADER_LENGTH_M(mda, pkt));
-}
-
-
-/** Retrieve the length of the packet, starting with the L3 (generally,
- * the IP) header.
- * @ingroup ingress
- *
- * @param[in] mda Pointer to packet's standard metadata.
- * @param[in] pkt Packet on which to operate.
- * @return Length of the packet's L3 header and data, in bytes.
- */
-static __inline netio_size_t
-NETIO_PKT_L3_LENGTH_M(netio_pkt_metadata_t* mda, netio_pkt_t* pkt)
-{
- return (NETIO_PKT_L2_LENGTH_M(mda, pkt) -
- NETIO_PKT_L2_HEADER_LENGTH_M(mda,pkt));
-}
-
-
-/** Return a pointer to the packet's L3 (generally, the IP) header.
- * @ingroup ingress
- *
- * Note that we guarantee word alignment of the L3 header.
- *
- * @param[in] mda Pointer to packet's standard metadata.
- * @param[in] pkt Packet on which to operate.
- * @return A pointer to the packet's L3 header.
- */
-static __inline unsigned char*
-NETIO_PKT_L3_DATA_M(netio_pkt_metadata_t* mda, netio_pkt_t* pkt)
-{
- return (NETIO_PKT_L2_DATA_M(mda, pkt) +
- NETIO_PKT_L2_HEADER_LENGTH_M(mda, pkt));
-}
-
-
-/** Return the ordinal of the packet.
- * @ingroup ingress
- *
- * Each packet is given an ordinal number when it is delivered by the IPP.
- * In the medium term, the ordinal is unique and monotonically increasing,
- * being incremented by 1 for each packet; the ordinal of the first packet
- * delivered after the IPP starts is zero. (Since the ordinal is of finite
- * size, given enough input packets, it will eventually wrap around to zero;
- * in the long term, therefore, ordinals are not unique.) The ordinals
- * handed out by different IPPs are not disjoint, so two packets from
- * different IPPs may have identical ordinals. Packets dropped by the
- * IPP or by the I/O shim are not assigned ordinals.
- *
- * @param[in] mda Pointer to packet's standard metadata.
- * @param[in] pkt Packet on which to operate.
- * @return The packet's per-IPP packet ordinal.
- */
-static __inline unsigned int
-NETIO_PKT_ORDINAL_M(netio_pkt_metadata_t* mda, netio_pkt_t* pkt)
-{
- return mda->__packet_ordinal;
-}
-
-
-/** Return the per-group ordinal of the packet.
- * @ingroup ingress
- *
- * Each packet is given a per-group ordinal number when it is
- * delivered by the IPP. By default, the group is the packet's VLAN,
- * although IPP can be recompiled to use different values. In
- * the medium term, the ordinal is unique and monotonically
- * increasing, being incremented by 1 for each packet; the ordinal of
- * the first packet distributed to a particular group is zero.
- * (Since the ordinal is of finite size, given enough input packets,
- * it will eventually wrap around to zero; in the long term,
- * therefore, ordinals are not unique.) The ordinals handed out by
- * different IPPs are not disjoint, so two packets from different IPPs
- * may have identical ordinals; similarly, packets distributed to
- * different groups may have identical ordinals. Packets dropped by
- * the IPP or by the I/O shim are not assigned ordinals.
- *
- * @param[in] mda Pointer to packet's standard metadata.
- * @param[in] pkt Packet on which to operate.
- * @return The packet's per-IPP, per-group ordinal.
- */
-static __inline unsigned int
-NETIO_PKT_GROUP_ORDINAL_M(netio_pkt_metadata_t* mda, netio_pkt_t* pkt)
-{
- return mda->__group_ordinal;
-}
-
-
-/** Return the VLAN ID assigned to the packet.
- * @ingroup ingress
- *
- * This value is usually contained within the packet header.
- *
- * This value will be zero if the packet does not have a VLAN tag, or if
- * this value was not extracted from the packet.
- *
- * @param[in] mda Pointer to packet's standard metadata.
- * @param[in] pkt Packet on which to operate.
- * @return The packet's VLAN ID.
- */
-static __inline unsigned short
-NETIO_PKT_VLAN_ID_M(netio_pkt_metadata_t* mda, netio_pkt_t* pkt)
-{
- int vl = (mda->__flags >> _NETIO_PKT_VLAN_SHIFT) & _NETIO_PKT_VLAN_RMASK;
- unsigned short* pkt_p;
- int index;
- unsigned short val;
-
- if (vl == _NETIO_PKT_VLAN_NONE)
- return 0;
-
- pkt_p = (unsigned short*) NETIO_PKT_L2_DATA_M(mda, pkt);
- index = (mda->__flags >> _NETIO_PKT_TYPE_SHIFT) & _NETIO_PKT_TYPE_RMASK;
-
- val = pkt_p[(_netio_pkt_info[index] >> _NETIO_PKT_INFO_VLAN_SHIFT) &
- _NETIO_PKT_INFO_VLAN_RMASK];
-
-#ifdef __TILECC__
- return (__insn_bytex(val) >> 16) & 0xFFF;
-#else
- return (__builtin_bswap32(val) >> 16) & 0xFFF;
-#endif
-}
-
-
-/** Return the ethertype of the packet.
- * @ingroup ingress
- *
- * This value is usually contained within the packet header.
- *
- * This value is reliable if @ref NETIO_PKT_ETHERTYPE_RECOGNIZED_M()
- * returns true, and otherwise, may not be well defined.
- *
- * @param[in] mda Pointer to packet's standard metadata.
- * @param[in] pkt Packet on which to operate.
- * @return The packet's ethertype.
- */
-static __inline unsigned short
-NETIO_PKT_ETHERTYPE_M(netio_pkt_metadata_t* mda, netio_pkt_t* pkt)
-{
- unsigned short* pkt_p = (unsigned short*) NETIO_PKT_L2_DATA_M(mda, pkt);
- int index = (mda->__flags >> _NETIO_PKT_TYPE_SHIFT) & _NETIO_PKT_TYPE_RMASK;
-
- unsigned short val =
- pkt_p[(_netio_pkt_info[index] >> _NETIO_PKT_INFO_ETYPE_SHIFT) &
- _NETIO_PKT_INFO_ETYPE_RMASK];
-
- return __builtin_bswap32(val) >> 16;
-}
-
-
-/** Return the flow hash computed on the packet.
- * @ingroup ingress
- *
- * For TCP and UDP packets, this hash is calculated by hashing together
- * the "5-tuple" values, specifically the source IP address, destination
- * IP address, protocol type, source port and destination port.
- * The hash value is intended to be helpful for millions of distinct
- * flows.
- *
- * For IPv4 or IPv6 packets which are neither TCP nor UDP, the flow hash is
- * derived by hashing together the source and destination IP addresses.
- *
- * For MPLS-encapsulated packets, the flow hash is derived by hashing
- * the first MPLS label.
- *
- * For all other packets the flow hash is computed from the source
- * and destination Ethernet addresses.
- *
- * The hash is symmetric, meaning it produces the same value if the
- * source and destination are swapped. The only exceptions are
- * tunneling protocols 0x04 (IP in IP Encapsulation), 0x29 (Simple
- * Internet Protocol), 0x2F (General Routing Encapsulation) and 0x32
- * (Encap Security Payload), which use only the destination address
- * since the source address is not meaningful.
- *
- * @param[in] mda Pointer to packet's standard metadata.
- * @param[in] pkt Packet on which to operate.
- * @return The packet's 32-bit flow hash.
- */
-static __inline unsigned int
-NETIO_PKT_FLOW_HASH_M(netio_pkt_metadata_t* mda, netio_pkt_t* pkt)
-{
- return mda->__flow_hash;
-}
-
-
-/** Return the first word of "user data" for the packet.
- *
- * The contents of the user data words depend on the IPP.
- *
- * When using the standard ipp1, ipp2, or ipp4 sub-drivers, the first
- * word of user data contains the least significant bits of the 64-bit
- * arrival cycle count (see @c get_cycle_count_low()).
- *
- * See the <em>System Programmer's Guide</em> for details.
- *
- * @ingroup ingress
- *
- * @param[in] mda Pointer to packet's standard metadata.
- * @param[in] pkt Packet on which to operate.
- * @return The packet's first word of "user data".
- */
-static __inline unsigned int
-NETIO_PKT_USER_DATA_0_M(netio_pkt_metadata_t* mda, netio_pkt_t* pkt)
-{
- return mda->__user_data_0;
-}
-
-
-/** Return the second word of "user data" for the packet.
- *
- * The contents of the user data words depend on the IPP.
- *
- * When using the standard ipp1, ipp2, or ipp4 sub-drivers, the second
- * word of user data contains the most significant bits of the 64-bit
- * arrival cycle count (see @c get_cycle_count_high()).
- *
- * See the <em>System Programmer's Guide</em> for details.
- *
- * @ingroup ingress
- *
- * @param[in] mda Pointer to packet's standard metadata.
- * @param[in] pkt Packet on which to operate.
- * @return The packet's second word of "user data".
- */
-static __inline unsigned int
-NETIO_PKT_USER_DATA_1_M(netio_pkt_metadata_t* mda, netio_pkt_t* pkt)
-{
- return mda->__user_data_1;
-}
-
-
-/** Determine whether the L4 (TCP/UDP) checksum was calculated.
- * @ingroup ingress
- *
- * @param[in] mda Pointer to packet's standard metadata.
- * @param[in] pkt Packet on which to operate.
- * @return Nonzero if the L4 checksum was calculated.
- */
-static __inline unsigned int
-NETIO_PKT_L4_CSUM_CALCULATED_M(netio_pkt_metadata_t* mda, netio_pkt_t* pkt)
-{
- return !(mda->__flags & _NETIO_PKT_NO_L4_CSUM_MASK);
-}
-
-
-/** Determine whether the L4 (TCP/UDP) checksum was calculated and found to
- * be correct.
- * @ingroup ingress
- *
- * @param[in] mda Pointer to packet's standard metadata.
- * @param[in] pkt Packet on which to operate.
- * @return Nonzero if the checksum was calculated and is correct.
- */
-static __inline unsigned int
-NETIO_PKT_L4_CSUM_CORRECT_M(netio_pkt_metadata_t* mda, netio_pkt_t* pkt)
-{
- return !(mda->__flags &
- (_NETIO_PKT_BAD_L4_CSUM_MASK | _NETIO_PKT_NO_L4_CSUM_MASK));
-}
-
-
-/** Determine whether the L3 (IP) checksum was calculated.
- * @ingroup ingress
- *
- * @param[in] mda Pointer to packet's standard metadata.
- * @param[in] pkt Packet on which to operate.
- * @return Nonzero if the L3 (IP) checksum was calculated.
-*/
-static __inline unsigned int
-NETIO_PKT_L3_CSUM_CALCULATED_M(netio_pkt_metadata_t* mda, netio_pkt_t* pkt)
-{
- return !(mda->__flags & _NETIO_PKT_NO_L3_CSUM_MASK);
-}
-
-
-/** Determine whether the L3 (IP) checksum was calculated and found to be
- * correct.
- * @ingroup ingress
- *
- * @param[in] mda Pointer to packet's standard metadata.
- * @param[in] pkt Packet on which to operate.
- * @return Nonzero if the checksum was calculated and is correct.
- */
-static __inline unsigned int
-NETIO_PKT_L3_CSUM_CORRECT_M(netio_pkt_metadata_t* mda, netio_pkt_t* pkt)
-{
- return !(mda->__flags &
- (_NETIO_PKT_BAD_L3_CSUM_MASK | _NETIO_PKT_NO_L3_CSUM_MASK));
-}
-
-
-/** Determine whether the ethertype was recognized and L3 packet data was
- * processed.
- * @ingroup ingress
- *
- * @param[in] mda Pointer to packet's standard metadata.
- * @param[in] pkt Packet on which to operate.
- * @return Nonzero if the ethertype was recognized and L3 packet data was
- * processed.
- */
-static __inline unsigned int
-NETIO_PKT_ETHERTYPE_RECOGNIZED_M(netio_pkt_metadata_t* mda, netio_pkt_t* pkt)
-{
- return !(mda->__flags & _NETIO_PKT_TYPE_UNRECOGNIZED_MASK);
-}
-
-
-/** Retrieve the status of a packet and any errors that may have occurred
- * during ingress processing (length mismatches, CRC errors, etc.).
- * @ingroup ingress
- *
- * Note that packets for which @ref NETIO_PKT_ETHERTYPE_RECOGNIZED()
- * returns zero are always reported as underlength, as there is no a priori
- * means to determine their length. Normally, applications should use
- * @ref NETIO_PKT_BAD_M() instead of explicitly checking status with this
- * function.
- *
- * @param[in] mda Pointer to packet's standard metadata.
- * @param[in] pkt Packet on which to operate.
- * @return The packet's status.
- */
-static __inline netio_pkt_status_t
-NETIO_PKT_STATUS_M(netio_pkt_metadata_t* mda, netio_pkt_t* pkt)
-{
- return (netio_pkt_status_t) __NETIO_PKT_NOTIF_HEADER(pkt).bits.__status;
-}
-
-
-/** Report whether a packet is bad (i.e., was shorter than expected based on
- * its headers, or had a bad CRC).
- * @ingroup ingress
- *
- * Note that this function does not verify L3 or L4 checksums.
- *
- * @param[in] mda Pointer to packet's standard metadata.
- * @param[in] pkt Packet on which to operate.
- * @return Nonzero if the packet is bad and should be discarded.
- */
-static __inline unsigned int
-NETIO_PKT_BAD_M(netio_pkt_metadata_t* mda, netio_pkt_t* pkt)
-{
- return ((NETIO_PKT_STATUS_M(mda, pkt) & 1) &&
- (NETIO_PKT_ETHERTYPE_RECOGNIZED_M(mda, pkt) ||
- NETIO_PKT_STATUS_M(mda, pkt) == NETIO_PKT_STATUS_BAD));
-}
-
-
-/** Return the length of the packet, starting with the L2 (Ethernet) header.
- * @ingroup egress
- *
- * @param[in] mmd Pointer to packet's minimal metadata.
- * @param[in] pkt Packet on which to operate.
- * @return The length of the packet, in bytes.
- */
-static __inline netio_size_t
-NETIO_PKT_L2_LENGTH_MM(netio_pkt_minimal_metadata_t* mmd, netio_pkt_t* pkt)
-{
- return mmd->l2_length;
-}
-
-
-/** Return the length of the L2 (Ethernet) header.
- * @ingroup egress
- *
- * @param[in] mmd Pointer to packet's minimal metadata.
- * @param[in] pkt Packet on which to operate.
- * @return The length of the packet's L2 header, in bytes.
- */
-static __inline netio_size_t
-NETIO_PKT_L2_HEADER_LENGTH_MM(netio_pkt_minimal_metadata_t* mmd,
- netio_pkt_t* pkt)
-{
- return mmd->l3_offset - mmd->l2_offset;
-}
-
-
-/** Return the length of the packet, starting with the L3 (IP) header.
- * @ingroup egress
- *
- * @param[in] mmd Pointer to packet's minimal metadata.
- * @param[in] pkt Packet on which to operate.
- * @return Length of the packet's L3 header and data, in bytes.
- */
-static __inline netio_size_t
-NETIO_PKT_L3_LENGTH_MM(netio_pkt_minimal_metadata_t* mmd, netio_pkt_t* pkt)
-{
- return (NETIO_PKT_L2_LENGTH_MM(mmd, pkt) -
- NETIO_PKT_L2_HEADER_LENGTH_MM(mmd, pkt));
-}
-
-
-/** Return a pointer to the packet's L3 (generally, the IP) header.
- * @ingroup egress
- *
- * Note that we guarantee word alignment of the L3 header.
- *
- * @param[in] mmd Pointer to packet's minimal metadata.
- * @param[in] pkt Packet on which to operate.
- * @return A pointer to the packet's L3 header.
- */
-static __inline unsigned char*
-NETIO_PKT_L3_DATA_MM(netio_pkt_minimal_metadata_t* mmd, netio_pkt_t* pkt)
-{
- return _NETIO_PKT_BASE(pkt) + mmd->l3_offset;
-}
-
-
-/** Return a pointer to the packet's L2 (Ethernet) header.
- * @ingroup egress
- *
- * @param[in] mmd Pointer to packet's minimal metadata.
- * @param[in] pkt Packet on which to operate.
- * @return A pointer to start of the packet.
- */
-static __inline unsigned char*
-NETIO_PKT_L2_DATA_MM(netio_pkt_minimal_metadata_t* mmd, netio_pkt_t* pkt)
-{
- return _NETIO_PKT_BASE(pkt) + mmd->l2_offset;
-}
-
-
-/** Retrieve the status of a packet and any errors that may have occurred
- * during ingress processing (length mismatches, CRC errors, etc.).
- * @ingroup ingress
- *
- * Note that packets for which @ref NETIO_PKT_ETHERTYPE_RECOGNIZED()
- * returns zero are always reported as underlength, as there is no a priori
- * means to determine their length. Normally, applications should use
- * @ref NETIO_PKT_BAD() instead of explicitly checking status with this
- * function.
- *
- * @param[in] pkt Packet on which to operate.
- * @return The packet's status.
- */
-static __inline netio_pkt_status_t
-NETIO_PKT_STATUS(netio_pkt_t* pkt)
-{
- netio_assert(!pkt->__packet.bits.__minimal);
-
- return (netio_pkt_status_t) __NETIO_PKT_NOTIF_HEADER(pkt).bits.__status;
-}
-
-
-/** Report whether a packet is bad (i.e., was shorter than expected based on
- * its headers, or had a bad CRC).
- * @ingroup ingress
- *
- * Note that this function does not verify L3 or L4 checksums.
- *
- * @param[in] pkt Packet on which to operate.
- * @return Nonzero if the packet is bad and should be discarded.
- */
-static __inline unsigned int
-NETIO_PKT_BAD(netio_pkt_t* pkt)
-{
- netio_pkt_metadata_t* mda = NETIO_PKT_METADATA(pkt);
-
- return NETIO_PKT_BAD_M(mda, pkt);
-}
-
-
-/** Return the length of the packet's custom header.
- * A custom header may or may not be present, depending upon the IPP; its
- * contents and alignment are also IPP-dependent. Currently, none of the
- * standard IPPs supplied by Tilera produce a custom header. If present,
- * the custom header precedes the L2 header in the packet buffer.
- * @ingroup pktfuncs
- *
- * @param[in] pkt Packet on which to operate.
- * @return The length of the packet's custom header, in bytes.
- */
-static __inline netio_size_t
-NETIO_PKT_CUSTOM_HEADER_LENGTH(netio_pkt_t* pkt)
-{
- netio_pkt_metadata_t* mda = NETIO_PKT_METADATA(pkt);
-
- return NETIO_PKT_CUSTOM_HEADER_LENGTH_M(mda, pkt);
-}
-
-
-/** Return the length of the packet, starting with the custom header.
- * A custom header may or may not be present, depending upon the IPP; its
- * contents and alignment are also IPP-dependent. Currently, none of the
- * standard IPPs supplied by Tilera produce a custom header. If present,
- * the custom header precedes the L2 header in the packet buffer.
- * @ingroup pktfuncs
- *
- * @param[in] pkt Packet on which to operate.
- * @return The length of the packet, in bytes.
- */
-static __inline netio_size_t
-NETIO_PKT_CUSTOM_LENGTH(netio_pkt_t* pkt)
-{
- netio_pkt_metadata_t* mda = NETIO_PKT_METADATA(pkt);
-
- return NETIO_PKT_CUSTOM_LENGTH_M(mda, pkt);
-}
-
-
-/** Return a pointer to the packet's custom header.
- * A custom header may or may not be present, depending upon the IPP; its
- * contents and alignment are also IPP-dependent. Currently, none of the
- * standard IPPs supplied by Tilera produce a custom header. If present,
- * the custom header precedes the L2 header in the packet buffer.
- * @ingroup pktfuncs
- *
- * @param[in] pkt Packet on which to operate.
- * @return A pointer to start of the packet.
- */
-static __inline unsigned char*
-NETIO_PKT_CUSTOM_DATA(netio_pkt_t* pkt)
-{
- netio_pkt_metadata_t* mda = NETIO_PKT_METADATA(pkt);
-
- return NETIO_PKT_CUSTOM_DATA_M(mda, pkt);
-}
-
-
-/** Return the length of the packet's L2 (Ethernet plus VLAN or SNAP) header.
- * @ingroup pktfuncs
- *
- * @param[in] pkt Packet on which to operate.
- * @return The length of the packet's L2 header, in bytes.
- */
-static __inline netio_size_t
-NETIO_PKT_L2_HEADER_LENGTH(netio_pkt_t* pkt)
-{
- if (NETIO_PKT_IS_MINIMAL(pkt))
- {
- netio_pkt_minimal_metadata_t* mmd = NETIO_PKT_MINIMAL_METADATA(pkt);
-
- return NETIO_PKT_L2_HEADER_LENGTH_MM(mmd, pkt);
- }
- else
- {
- netio_pkt_metadata_t* mda = NETIO_PKT_METADATA(pkt);
-
- return NETIO_PKT_L2_HEADER_LENGTH_M(mda, pkt);
- }
-}
-
-
-/** Return the length of the packet, starting with the L2 (Ethernet) header.
- * @ingroup pktfuncs
- *
- * @param[in] pkt Packet on which to operate.
- * @return The length of the packet, in bytes.
- */
-static __inline netio_size_t
-NETIO_PKT_L2_LENGTH(netio_pkt_t* pkt)
-{
- if (NETIO_PKT_IS_MINIMAL(pkt))
- {
- netio_pkt_minimal_metadata_t* mmd = NETIO_PKT_MINIMAL_METADATA(pkt);
-
- return NETIO_PKT_L2_LENGTH_MM(mmd, pkt);
- }
- else
- {
- netio_pkt_metadata_t* mda = NETIO_PKT_METADATA(pkt);
-
- return NETIO_PKT_L2_LENGTH_M(mda, pkt);
- }
-}
-
-
-/** Return a pointer to the packet's L2 (Ethernet) header.
- * @ingroup pktfuncs
- *
- * @param[in] pkt Packet on which to operate.
- * @return A pointer to start of the packet.
- */
-static __inline unsigned char*
-NETIO_PKT_L2_DATA(netio_pkt_t* pkt)
-{
- if (NETIO_PKT_IS_MINIMAL(pkt))
- {
- netio_pkt_minimal_metadata_t* mmd = NETIO_PKT_MINIMAL_METADATA(pkt);
-
- return NETIO_PKT_L2_DATA_MM(mmd, pkt);
- }
- else
- {
- netio_pkt_metadata_t* mda = NETIO_PKT_METADATA(pkt);
-
- return NETIO_PKT_L2_DATA_M(mda, pkt);
- }
-}
-
-
-/** Retrieve the length of the packet, starting with the L3 (generally, the IP)
- * header.
- * @ingroup pktfuncs
- *
- * @param[in] pkt Packet on which to operate.
- * @return Length of the packet's L3 header and data, in bytes.
- */
-static __inline netio_size_t
-NETIO_PKT_L3_LENGTH(netio_pkt_t* pkt)
-{
- if (NETIO_PKT_IS_MINIMAL(pkt))
- {
- netio_pkt_minimal_metadata_t* mmd = NETIO_PKT_MINIMAL_METADATA(pkt);
-
- return NETIO_PKT_L3_LENGTH_MM(mmd, pkt);
- }
- else
- {
- netio_pkt_metadata_t* mda = NETIO_PKT_METADATA(pkt);
-
- return NETIO_PKT_L3_LENGTH_M(mda, pkt);
- }
-}
-
-
-/** Return a pointer to the packet's L3 (generally, the IP) header.
- * @ingroup pktfuncs
- *
- * Note that we guarantee word alignment of the L3 header.
- *
- * @param[in] pkt Packet on which to operate.
- * @return A pointer to the packet's L3 header.
- */
-static __inline unsigned char*
-NETIO_PKT_L3_DATA(netio_pkt_t* pkt)
-{
- if (NETIO_PKT_IS_MINIMAL(pkt))
- {
- netio_pkt_minimal_metadata_t* mmd = NETIO_PKT_MINIMAL_METADATA(pkt);
-
- return NETIO_PKT_L3_DATA_MM(mmd, pkt);
- }
- else
- {
- netio_pkt_metadata_t* mda = NETIO_PKT_METADATA(pkt);
-
- return NETIO_PKT_L3_DATA_M(mda, pkt);
- }
-}
-
-
-/** Return the ordinal of the packet.
- * @ingroup ingress
- *
- * Each packet is given an ordinal number when it is delivered by the IPP.
- * In the medium term, the ordinal is unique and monotonically increasing,
- * being incremented by 1 for each packet; the ordinal of the first packet
- * delivered after the IPP starts is zero. (Since the ordinal is of finite
- * size, given enough input packets, it will eventually wrap around to zero;
- * in the long term, therefore, ordinals are not unique.) The ordinals
- * handed out by different IPPs are not disjoint, so two packets from
- * different IPPs may have identical ordinals. Packets dropped by the
- * IPP or by the I/O shim are not assigned ordinals.
- *
- *
- * @param[in] pkt Packet on which to operate.
- * @return The packet's per-IPP packet ordinal.
- */
-static __inline unsigned int
-NETIO_PKT_ORDINAL(netio_pkt_t* pkt)
-{
- netio_pkt_metadata_t* mda = NETIO_PKT_METADATA(pkt);
-
- return NETIO_PKT_ORDINAL_M(mda, pkt);
-}
-
-
-/** Return the per-group ordinal of the packet.
- * @ingroup ingress
- *
- * Each packet is given a per-group ordinal number when it is
- * delivered by the IPP. By default, the group is the packet's VLAN,
- * although IPP can be recompiled to use different values. In
- * the medium term, the ordinal is unique and monotonically
- * increasing, being incremented by 1 for each packet; the ordinal of
- * the first packet distributed to a particular group is zero.
- * (Since the ordinal is of finite size, given enough input packets,
- * it will eventually wrap around to zero; in the long term,
- * therefore, ordinals are not unique.) The ordinals handed out by
- * different IPPs are not disjoint, so two packets from different IPPs
- * may have identical ordinals; similarly, packets distributed to
- * different groups may have identical ordinals. Packets dropped by
- * the IPP or by the I/O shim are not assigned ordinals.
- *
- * @param[in] pkt Packet on which to operate.
- * @return The packet's per-IPP, per-group ordinal.
- */
-static __inline unsigned int
-NETIO_PKT_GROUP_ORDINAL(netio_pkt_t* pkt)
-{
- netio_pkt_metadata_t* mda = NETIO_PKT_METADATA(pkt);
-
- return NETIO_PKT_GROUP_ORDINAL_M(mda, pkt);
-}
-
-
-/** Return the VLAN ID assigned to the packet.
- * @ingroup ingress
- *
- * This is usually also contained within the packet header. If the packet
- * does not have a VLAN tag, the VLAN ID returned by this function is zero.
- *
- * @param[in] pkt Packet on which to operate.
- * @return The packet's VLAN ID.
- */
-static __inline unsigned short
-NETIO_PKT_VLAN_ID(netio_pkt_t* pkt)
-{
- netio_pkt_metadata_t* mda = NETIO_PKT_METADATA(pkt);
-
- return NETIO_PKT_VLAN_ID_M(mda, pkt);
-}
-
-
-/** Return the ethertype of the packet.
- * @ingroup ingress
- *
- * This value is reliable if @ref NETIO_PKT_ETHERTYPE_RECOGNIZED()
- * returns true, and otherwise, may not be well defined.
- *
- * @param[in] pkt Packet on which to operate.
- * @return The packet's ethertype.
- */
-static __inline unsigned short
-NETIO_PKT_ETHERTYPE(netio_pkt_t* pkt)
-{
- netio_pkt_metadata_t* mda = NETIO_PKT_METADATA(pkt);
-
- return NETIO_PKT_ETHERTYPE_M(mda, pkt);
-}
-
-
-/** Return the flow hash computed on the packet.
- * @ingroup ingress
- *
- * For TCP and UDP packets, this hash is calculated by hashing together
- * the "5-tuple" values, specifically the source IP address, destination
- * IP address, protocol type, source port and destination port.
- * The hash value is intended to be helpful for millions of distinct
- * flows.
- *
- * For IPv4 or IPv6 packets which are neither TCP nor UDP, the flow hash is
- * derived by hashing together the source and destination IP addresses.
- *
- * For MPLS-encapsulated packets, the flow hash is derived by hashing
- * the first MPLS label.
- *
- * For all other packets the flow hash is computed from the source
- * and destination Ethernet addresses.
- *
- * The hash is symmetric, meaning it produces the same value if the
- * source and destination are swapped. The only exceptions are
- * tunneling protocols 0x04 (IP in IP Encapsulation), 0x29 (Simple
- * Internet Protocol), 0x2F (General Routing Encapsulation) and 0x32
- * (Encap Security Payload), which use only the destination address
- * since the source address is not meaningful.
- *
- * @param[in] pkt Packet on which to operate.
- * @return The packet's 32-bit flow hash.
- */
-static __inline unsigned int
-NETIO_PKT_FLOW_HASH(netio_pkt_t* pkt)
-{
- netio_pkt_metadata_t* mda = NETIO_PKT_METADATA(pkt);
-
- return NETIO_PKT_FLOW_HASH_M(mda, pkt);
-}
-
-
-/** Return the first word of "user data" for the packet.
- *
- * The contents of the user data words depend on the IPP.
- *
- * When using the standard ipp1, ipp2, or ipp4 sub-drivers, the first
- * word of user data contains the least significant bits of the 64-bit
- * arrival cycle count (see @c get_cycle_count_low()).
- *
- * See the <em>System Programmer's Guide</em> for details.
- *
- * @ingroup ingress
- *
- * @param[in] pkt Packet on which to operate.
- * @return The packet's first word of "user data".
- */
-static __inline unsigned int
-NETIO_PKT_USER_DATA_0(netio_pkt_t* pkt)
-{
- netio_pkt_metadata_t* mda = NETIO_PKT_METADATA(pkt);
-
- return NETIO_PKT_USER_DATA_0_M(mda, pkt);
-}
-
-
-/** Return the second word of "user data" for the packet.
- *
- * The contents of the user data words depend on the IPP.
- *
- * When using the standard ipp1, ipp2, or ipp4 sub-drivers, the second
- * word of user data contains the most significant bits of the 64-bit
- * arrival cycle count (see @c get_cycle_count_high()).
- *
- * See the <em>System Programmer's Guide</em> for details.
- *
- * @ingroup ingress
- *
- * @param[in] pkt Packet on which to operate.
- * @return The packet's second word of "user data".
- */
-static __inline unsigned int
-NETIO_PKT_USER_DATA_1(netio_pkt_t* pkt)
-{
- netio_pkt_metadata_t* mda = NETIO_PKT_METADATA(pkt);
-
- return NETIO_PKT_USER_DATA_1_M(mda, pkt);
-}
-
-
-/** Determine whether the L4 (TCP/UDP) checksum was calculated.
- * @ingroup ingress
- *
- * @param[in] pkt Packet on which to operate.
- * @return Nonzero if the L4 checksum was calculated.
- */
-static __inline unsigned int
-NETIO_PKT_L4_CSUM_CALCULATED(netio_pkt_t* pkt)
-{
- netio_pkt_metadata_t* mda = NETIO_PKT_METADATA(pkt);
-
- return NETIO_PKT_L4_CSUM_CALCULATED_M(mda, pkt);
-}
-
-
-/** Determine whether the L4 (TCP/UDP) checksum was calculated and found to
- * be correct.
- * @ingroup ingress
- *
- * @param[in] pkt Packet on which to operate.
- * @return Nonzero if the checksum was calculated and is correct.
- */
-static __inline unsigned int
-NETIO_PKT_L4_CSUM_CORRECT(netio_pkt_t* pkt)
-{
- netio_pkt_metadata_t* mda = NETIO_PKT_METADATA(pkt);
-
- return NETIO_PKT_L4_CSUM_CORRECT_M(mda, pkt);
-}
-
-
-/** Determine whether the L3 (IP) checksum was calculated.
- * @ingroup ingress
- *
- * @param[in] pkt Packet on which to operate.
- * @return Nonzero if the L3 (IP) checksum was calculated.
-*/
-static __inline unsigned int
-NETIO_PKT_L3_CSUM_CALCULATED(netio_pkt_t* pkt)
-{
- netio_pkt_metadata_t* mda = NETIO_PKT_METADATA(pkt);
-
- return NETIO_PKT_L3_CSUM_CALCULATED_M(mda, pkt);
-}
-
-
-/** Determine whether the L3 (IP) checksum was calculated and found to be
- * correct.
- * @ingroup ingress
- *
- * @param[in] pkt Packet on which to operate.
- * @return Nonzero if the checksum was calculated and is correct.
- */
-static __inline unsigned int
-NETIO_PKT_L3_CSUM_CORRECT(netio_pkt_t* pkt)
-{
- netio_pkt_metadata_t* mda = NETIO_PKT_METADATA(pkt);
-
- return NETIO_PKT_L3_CSUM_CORRECT_M(mda, pkt);
-}
-
-
-/** Determine whether the Ethertype was recognized and L3 packet data was
- * processed.
- * @ingroup ingress
- *
- * @param[in] pkt Packet on which to operate.
- * @return Nonzero if the Ethertype was recognized and L3 packet data was
- * processed.
- */
-static __inline unsigned int
-NETIO_PKT_ETHERTYPE_RECOGNIZED(netio_pkt_t* pkt)
-{
- netio_pkt_metadata_t* mda = NETIO_PKT_METADATA(pkt);
-
- return NETIO_PKT_ETHERTYPE_RECOGNIZED_M(mda, pkt);
-}
-
-
-/** Set an egress packet's L2 length, using a metadata pointer to speed the
- * computation.
- * @ingroup egress
- *
- * @param[in,out] mmd Pointer to packet's minimal metadata.
- * @param[in] pkt Packet on which to operate.
- * @param[in] len Packet L2 length, in bytes.
- */
-static __inline void
-NETIO_PKT_SET_L2_LENGTH_MM(netio_pkt_minimal_metadata_t* mmd, netio_pkt_t* pkt,
- int len)
-{
- mmd->l2_length = len;
-}
-
-
-/** Set an egress packet's L2 length.
- * @ingroup egress
- *
- * @param[in,out] pkt Packet on which to operate.
- * @param[in] len Packet L2 length, in bytes.
- */
-static __inline void
-NETIO_PKT_SET_L2_LENGTH(netio_pkt_t* pkt, int len)
-{
- netio_pkt_minimal_metadata_t* mmd = NETIO_PKT_MINIMAL_METADATA(pkt);
-
- NETIO_PKT_SET_L2_LENGTH_MM(mmd, pkt, len);
-}
-
-
-/** Set an egress packet's L2 header length, using a metadata pointer to
- * speed the computation.
- * @ingroup egress
- *
- * It is not normally necessary to call this routine; only the L2 length,
- * not the header length, is needed to transmit a packet. It may be useful if
- * the egress packet will later be processed by code which expects to use
- * functions like @ref NETIO_PKT_L3_DATA() to get a pointer to the L3 payload.
- *
- * @param[in,out] mmd Pointer to packet's minimal metadata.
- * @param[in] pkt Packet on which to operate.
- * @param[in] len Packet L2 header length, in bytes.
- */
-static __inline void
-NETIO_PKT_SET_L2_HEADER_LENGTH_MM(netio_pkt_minimal_metadata_t* mmd,
- netio_pkt_t* pkt, int len)
-{
- mmd->l3_offset = mmd->l2_offset + len;
-}
-
-
-/** Set an egress packet's L2 header length.
- * @ingroup egress
- *
- * It is not normally necessary to call this routine; only the L2 length,
- * not the header length, is needed to transmit a packet. It may be useful if
- * the egress packet will later be processed by code which expects to use
- * functions like @ref NETIO_PKT_L3_DATA() to get a pointer to the L3 payload.
- *
- * @param[in,out] pkt Packet on which to operate.
- * @param[in] len Packet L2 header length, in bytes.
- */
-static __inline void
-NETIO_PKT_SET_L2_HEADER_LENGTH(netio_pkt_t* pkt, int len)
-{
- netio_pkt_minimal_metadata_t* mmd = NETIO_PKT_MINIMAL_METADATA(pkt);
-
- NETIO_PKT_SET_L2_HEADER_LENGTH_MM(mmd, pkt, len);
-}
-
-
-/** Set up an egress packet for hardware checksum computation, using a
- * metadata pointer to speed the operation.
- * @ingroup egress
- *
- * NetIO provides the ability to automatically calculate a standard
- * 16-bit Internet checksum on transmitted packets. The application
- * may specify the point in the packet where the checksum starts, the
- * number of bytes to be checksummed, and the two bytes in the packet
- * which will be replaced with the completed checksum. (If the range
- * of bytes to be checksummed includes the bytes to be replaced, the
- * initial values of those bytes will be included in the checksum.)
- *
- * For some protocols, the packet checksum covers data which is not present
- * in the packet, or is at least not contiguous to the main data payload.
- * For instance, the TCP checksum includes a "pseudo-header" which includes
- * the source and destination IP addresses of the packet. To accommodate
- * this, the checksum engine may be "seeded" with an initial value, which
- * the application would need to compute based on the specific protocol's
- * requirements. Note that the seed is given in host byte order (little-
- * endian), not network byte order (big-endian); code written to compute a
- * pseudo-header checksum in network byte order will need to byte-swap it
- * before use as the seed.
- *
- * Note that the checksum is computed as part of the transmission process,
- * so it will not be present in the packet upon completion of this routine.
- *
- * @param[in,out] mmd Pointer to packet's minimal metadata.
- * @param[in] pkt Packet on which to operate.
- * @param[in] start Offset within L2 packet of the first byte to include in
- * the checksum.
- * @param[in] length Number of bytes to include in the checksum.
- * the checksum.
- * @param[in] location Offset within L2 packet of the first of the two bytes
- * to be replaced with the calculated checksum.
- * @param[in] seed Initial value of the running checksum before any of the
- * packet data is added.
- */
-static __inline void
-NETIO_PKT_DO_EGRESS_CSUM_MM(netio_pkt_minimal_metadata_t* mmd,
- netio_pkt_t* pkt, int start, int length,
- int location, uint16_t seed)
-{
- mmd->csum_start = start;
- mmd->csum_length = length;
- mmd->csum_location = location;
- mmd->csum_seed = seed;
- mmd->flags |= _NETIO_PKT_NEED_EDMA_CSUM_MASK;
-}
-
-
-/** Set up an egress packet for hardware checksum computation.
- * @ingroup egress
- *
- * NetIO provides the ability to automatically calculate a standard
- * 16-bit Internet checksum on transmitted packets. The application
- * may specify the point in the packet where the checksum starts, the
- * number of bytes to be checksummed, and the two bytes in the packet
- * which will be replaced with the completed checksum. (If the range
- * of bytes to be checksummed includes the bytes to be replaced, the
- * initial values of those bytes will be included in the checksum.)
- *
- * For some protocols, the packet checksum covers data which is not present
- * in the packet, or is at least not contiguous to the main data payload.
- * For instance, the TCP checksum includes a "pseudo-header" which includes
- * the source and destination IP addresses of the packet. To accommodate
- * this, the checksum engine may be "seeded" with an initial value, which
- * the application would need to compute based on the specific protocol's
- * requirements. Note that the seed is given in host byte order (little-
- * endian), not network byte order (big-endian); code written to compute a
- * pseudo-header checksum in network byte order will need to byte-swap it
- * before use as the seed.
- *
- * Note that the checksum is computed as part of the transmission process,
- * so it will not be present in the packet upon completion of this routine.
- *
- * @param[in,out] pkt Packet on which to operate.
- * @param[in] start Offset within L2 packet of the first byte to include in
- * the checksum.
- * @param[in] length Number of bytes to include in the checksum.
- * the checksum.
- * @param[in] location Offset within L2 packet of the first of the two bytes
- * to be replaced with the calculated checksum.
- * @param[in] seed Initial value of the running checksum before any of the
- * packet data is added.
- */
-static __inline void
-NETIO_PKT_DO_EGRESS_CSUM(netio_pkt_t* pkt, int start, int length,
- int location, uint16_t seed)
-{
- netio_pkt_minimal_metadata_t* mmd = NETIO_PKT_MINIMAL_METADATA(pkt);
-
- NETIO_PKT_DO_EGRESS_CSUM_MM(mmd, pkt, start, length, location, seed);
-}
-
-
-/** Return the number of bytes which could be prepended to a packet, using a
- * metadata pointer to speed the operation.
- * See @ref netio_populate_prepend_buffer() to get a full description of
- * prepending.
- *
- * @param[in,out] mda Pointer to packet's standard metadata.
- * @param[in] pkt Packet on which to operate.
- */
-static __inline int
-NETIO_PKT_PREPEND_AVAIL_M(netio_pkt_metadata_t* mda, netio_pkt_t* pkt)
-{
- return (pkt->__packet.bits.__offset << 6) +
- NETIO_PKT_CUSTOM_HEADER_LENGTH_M(mda, pkt);
-}
-
-
-/** Return the number of bytes which could be prepended to a packet, using a
- * metadata pointer to speed the operation.
- * See @ref netio_populate_prepend_buffer() to get a full description of
- * prepending.
- * @ingroup egress
- *
- * @param[in,out] mmd Pointer to packet's minimal metadata.
- * @param[in] pkt Packet on which to operate.
- */
-static __inline int
-NETIO_PKT_PREPEND_AVAIL_MM(netio_pkt_minimal_metadata_t* mmd, netio_pkt_t* pkt)
-{
- return (pkt->__packet.bits.__offset << 6) + mmd->l2_offset;
-}
-
-
-/** Return the number of bytes which could be prepended to a packet.
- * See @ref netio_populate_prepend_buffer() to get a full description of
- * prepending.
- * @ingroup egress
- *
- * @param[in] pkt Packet on which to operate.
- */
-static __inline int
-NETIO_PKT_PREPEND_AVAIL(netio_pkt_t* pkt)
-{
- if (NETIO_PKT_IS_MINIMAL(pkt))
- {
- netio_pkt_minimal_metadata_t* mmd = NETIO_PKT_MINIMAL_METADATA(pkt);
-
- return NETIO_PKT_PREPEND_AVAIL_MM(mmd, pkt);
- }
- else
- {
- netio_pkt_metadata_t* mda = NETIO_PKT_METADATA(pkt);
-
- return NETIO_PKT_PREPEND_AVAIL_M(mda, pkt);
- }
-}
-
-
-/** Flush a packet's minimal metadata from the cache, using a metadata pointer
- * to speed the operation.
- * @ingroup egress
- *
- * @param[in] mmd Pointer to packet's minimal metadata.
- * @param[in] pkt Packet on which to operate.
- */
-static __inline void
-NETIO_PKT_FLUSH_MINIMAL_METADATA_MM(netio_pkt_minimal_metadata_t* mmd,
- netio_pkt_t* pkt)
-{
-}
-
-
-/** Invalidate a packet's minimal metadata from the cache, using a metadata
- * pointer to speed the operation.
- * @ingroup egress
- *
- * @param[in] mmd Pointer to packet's minimal metadata.
- * @param[in] pkt Packet on which to operate.
- */
-static __inline void
-NETIO_PKT_INV_MINIMAL_METADATA_MM(netio_pkt_minimal_metadata_t* mmd,
- netio_pkt_t* pkt)
-{
-}
-
-
-/** Flush and then invalidate a packet's minimal metadata from the cache,
- * using a metadata pointer to speed the operation.
- * @ingroup egress
- *
- * @param[in] mmd Pointer to packet's minimal metadata.
- * @param[in] pkt Packet on which to operate.
- */
-static __inline void
-NETIO_PKT_FLUSH_INV_MINIMAL_METADATA_MM(netio_pkt_minimal_metadata_t* mmd,
- netio_pkt_t* pkt)
-{
-}
-
-
-/** Flush a packet's metadata from the cache, using a metadata pointer
- * to speed the operation.
- * @ingroup ingress
- *
- * @param[in] mda Pointer to packet's minimal metadata.
- * @param[in] pkt Packet on which to operate.
- */
-static __inline void
-NETIO_PKT_FLUSH_METADATA_M(netio_pkt_metadata_t* mda, netio_pkt_t* pkt)
-{
-}
-
-
-/** Invalidate a packet's metadata from the cache, using a metadata
- * pointer to speed the operation.
- * @ingroup ingress
- *
- * @param[in] mda Pointer to packet's metadata.
- * @param[in] pkt Packet on which to operate.
- */
-static __inline void
-NETIO_PKT_INV_METADATA_M(netio_pkt_metadata_t* mda, netio_pkt_t* pkt)
-{
-}
-
-
-/** Flush and then invalidate a packet's metadata from the cache,
- * using a metadata pointer to speed the operation.
- * @ingroup ingress
- *
- * @param[in] mda Pointer to packet's metadata.
- * @param[in] pkt Packet on which to operate.
- */
-static __inline void
-NETIO_PKT_FLUSH_INV_METADATA_M(netio_pkt_metadata_t* mda, netio_pkt_t* pkt)
-{
-}
-
-
-/** Flush a packet's minimal metadata from the cache.
- * @ingroup egress
- *
- * @param[in] pkt Packet on which to operate.
- */
-static __inline void
-NETIO_PKT_FLUSH_MINIMAL_METADATA(netio_pkt_t* pkt)
-{
-}
-
-
-/** Invalidate a packet's minimal metadata from the cache.
- * @ingroup egress
- *
- * @param[in] pkt Packet on which to operate.
- */
-static __inline void
-NETIO_PKT_INV_MINIMAL_METADATA(netio_pkt_t* pkt)
-{
-}
-
-
-/** Flush and then invalidate a packet's minimal metadata from the cache.
- * @ingroup egress
- *
- * @param[in] pkt Packet on which to operate.
- */
-static __inline void
-NETIO_PKT_FLUSH_INV_MINIMAL_METADATA(netio_pkt_t* pkt)
-{
-}
-
-
-/** Flush a packet's metadata from the cache.
- * @ingroup ingress
- *
- * @param[in] pkt Packet on which to operate.
- */
-static __inline void
-NETIO_PKT_FLUSH_METADATA(netio_pkt_t* pkt)
-{
-}
-
-
-/** Invalidate a packet's metadata from the cache.
- * @ingroup ingress
- *
- * @param[in] pkt Packet on which to operate.
- */
-static __inline void
-NETIO_PKT_INV_METADATA(netio_pkt_t* pkt)
-{
-}
-
-
-/** Flush and then invalidate a packet's metadata from the cache.
- * @ingroup ingress
- *
- * @param[in] pkt Packet on which to operate.
- */
-static __inline void
-NETIO_PKT_FLUSH_INV_METADATA(netio_pkt_t* pkt)
-{
-}
-
-/** Number of NUMA nodes we can distribute buffers to.
- * @ingroup setup */
-#define NETIO_NUM_NODE_WEIGHTS 16
-
-/**
- * @brief An object for specifying the characteristics of NetIO communication
- * endpoint.
- *
- * @ingroup setup
- *
- * The @ref netio_input_register() function uses this structure to define
- * how an application tile will communicate with an IPP.
- *
- *
- * Future updates to NetIO may add new members to this structure,
- * which can affect the success of the registration operation. Thus,
- * if dynamically initializing the structure, applications are urged to
- * zero it out first, for example:
- *
- * @code
- * netio_input_config_t config;
- * memset(&config, 0, sizeof (config));
- * config.flags = NETIO_RECV | NETIO_XMIT_CSUM | NETIO_TAG_NONE;
- * config.num_receive_packets = NETIO_MAX_RECEIVE_PKTS;
- * config.queue_id = 0;
- * .
- * .
- * .
- * @endcode
- *
- * since that guarantees that any unused structure members, including
- * members which did not exist when the application was first developed,
- * will not have unexpected values.
- *
- * If statically initializing the structure, we strongly recommend use of
- * C99-style named initializers, for example:
- *
- * @code
- * netio_input_config_t config = {
- * .flags = NETIO_RECV | NETIO_XMIT_CSUM | NETIO_TAG_NONE,
- * .num_receive_packets = NETIO_MAX_RECEIVE_PKTS,
- * .queue_id = 0,
- * },
- * @endcode
- *
- * instead of the old-style structure initialization:
- *
- * @code
- * // Bad example! Currently equivalent to the above, but don't do this.
- * netio_input_config_t config = {
- * NETIO_RECV | NETIO_XMIT_CSUM | NETIO_TAG_NONE, NETIO_MAX_RECEIVE_PKTS, 0
- * },
- * @endcode
- *
- * since the C99 style requires no changes to the code if elements of the
- * config structure are rearranged. (It also makes the initialization much
- * easier to understand.)
- *
- * Except for items which address a particular tile's transmit or receive
- * characteristics, such as the ::NETIO_RECV flag, applications are advised
- * to specify the same set of configuration data on all registrations.
- * This prevents differing results if multiple tiles happen to do their
- * registration operations in a different order on different invocations of
- * the application. This is particularly important for things like link
- * management flags, and buffer size and homing specifications.
- *
- * Unless the ::NETIO_FIXED_BUFFER_VA flag is specified in flags, the NetIO
- * buffer pool is automatically created and mapped into the application's
- * virtual address space at an address chosen by the operating system,
- * using the common memory (cmem) facility in the Tilera Multicore
- * Components library. The cmem facility allows multiple processes to gain
- * access to shared memory which is mapped into each process at an
- * identical virtual address. In order for this to work, the processes
- * must have a common ancestor, which must create the common memory using
- * tmc_cmem_init().
- *
- * In programs using the iLib process creation API, or in programs which use
- * only one process (which include programs using the pthreads library),
- * tmc_cmem_init() is called automatically. All other applications
- * must call it explicitly, before any child processes which might call
- * netio_input_register() are created.
- */
-typedef struct
-{
- /** Registration characteristics.
-
- This value determines several characteristics of the registration;
- flags for different types of behavior are ORed together to make the
- final flag value. Generally applications should specify exactly
- one flag from each of the following categories:
-
- - Whether the application will be receiving packets on this queue
- (::NETIO_RECV or ::NETIO_NO_RECV).
-
- - Whether the application will be transmitting packets on this queue,
- and if so, whether it will request egress checksum calculation
- (::NETIO_XMIT, ::NETIO_XMIT_CSUM, or ::NETIO_NO_XMIT). It is
- legal to call netio_get_buffer() without one of the XMIT flags,
- as long as ::NETIO_RECV is specified; in this case, the retrieved
- buffers must be passed to another tile for transmission.
-
- - Whether the application expects any vendor-specific tags in
- its packets' L2 headers (::NETIO_TAG_NONE, ::NETIO_TAG_BRCM,
- or ::NETIO_TAG_MRVL). This must match the configuration of the
- target IPP.
-
- To accommodate applications written to previous versions of the NetIO
- interface, none of the flags above are currently required; if omitted,
- NetIO behaves more or less as if ::NETIO_RECV | ::NETIO_XMIT_CSUM |
- ::NETIO_TAG_NONE were used. However, explicit specification of
- the relevant flags allows NetIO to do a better job of resource
- allocation, allows earlier detection of certain configuration errors,
- and may enable advanced features or higher performance in the future,
- so their use is strongly recommended.
-
- Note that specifying ::NETIO_NO_RECV along with ::NETIO_NO_XMIT
- is a special case, intended primarily for use by programs which
- retrieve network statistics or do link management operations.
- When these flags are both specified, the resulting queue may not
- be used with NetIO routines other than netio_get(), netio_set(),
- and netio_input_unregister(). See @ref link for more information
- on link management.
-
- Other flags are optional; their use is described below.
- */
- int flags;
-
- /** Interface name. This is a string which identifies the specific
- Ethernet controller hardware to be used. The format of the string
- is a device type and a device index, separated by a slash; so,
- the first 10 Gigabit Ethernet controller is named "xgbe/0", while
- the second 10/100/1000 Megabit Ethernet controller is named "gbe/1".
- */
- const char* interface;
-
- /** Receive packet queue size. This specifies the maximum number
- of ingress packets that can be received on this queue without
- being retrieved by @ref netio_get_packet(). If the IPP's distribution
- algorithm calls for a packet to be sent to this queue, and this
- number of packets are already pending there, the new packet
- will either be discarded, or sent to another tile registered
- for the same queue_id (see @ref drops). This value must
- be at least ::NETIO_MIN_RECEIVE_PKTS, can always be at least
- ::NETIO_MAX_RECEIVE_PKTS, and may be larger than that on certain
- interfaces.
- */
- int num_receive_packets;
-
- /** The queue ID being requested. Legal values for this range from 0
- to ::NETIO_MAX_QUEUE_ID, inclusive. ::NETIO_MAX_QUEUE_ID is always
- greater than or equal to the number of tiles; this allows one queue
- for each tile, plus at least one additional queue. Some applications
- may wish to use the additional queue as a destination for unwanted
- packets, since packets delivered to queues for which no tiles have
- registered are discarded.
- */
- unsigned int queue_id;
-
- /** Maximum number of small send buffers to be held in the local empty
- buffer cache. This specifies the size of the area which holds
- empty small egress buffers requested from the IPP but not yet
- retrieved via @ref netio_get_buffer(). This value must be greater
- than zero if the application will ever use @ref netio_get_buffer()
- to allocate empty small egress buffers; it may be no larger than
- ::NETIO_MAX_SEND_BUFFERS. See @ref epp for more details on empty
- buffer caching.
- */
- int num_send_buffers_small_total;
-
- /** Number of small send buffers to be preallocated at registration.
- If this value is nonzero, the specified number of empty small egress
- buffers will be requested from the IPP during the netio_input_register
- operation; this may speed the execution of @ref netio_get_buffer().
- This may be no larger than @ref num_send_buffers_small_total. See @ref
- epp for more details on empty buffer caching.
- */
- int num_send_buffers_small_prealloc;
-
- /** Maximum number of large send buffers to be held in the local empty
- buffer cache. This specifies the size of the area which holds empty
- large egress buffers requested from the IPP but not yet retrieved via
- @ref netio_get_buffer(). This value must be greater than zero if the
- application will ever use @ref netio_get_buffer() to allocate empty
- large egress buffers; it may be no larger than ::NETIO_MAX_SEND_BUFFERS.
- See @ref epp for more details on empty buffer caching.
- */
- int num_send_buffers_large_total;
-
- /** Number of large send buffers to be preallocated at registration.
- If this value is nonzero, the specified number of empty large egress
- buffers will be requested from the IPP during the netio_input_register
- operation; this may speed the execution of @ref netio_get_buffer().
- This may be no larger than @ref num_send_buffers_large_total. See @ref
- epp for more details on empty buffer caching.
- */
- int num_send_buffers_large_prealloc;
-
- /** Maximum number of jumbo send buffers to be held in the local empty
- buffer cache. This specifies the size of the area which holds empty
- jumbo egress buffers requested from the IPP but not yet retrieved via
- @ref netio_get_buffer(). This value must be greater than zero if the
- application will ever use @ref netio_get_buffer() to allocate empty
- jumbo egress buffers; it may be no larger than ::NETIO_MAX_SEND_BUFFERS.
- See @ref epp for more details on empty buffer caching.
- */
- int num_send_buffers_jumbo_total;
-
- /** Number of jumbo send buffers to be preallocated at registration.
- If this value is nonzero, the specified number of empty jumbo egress
- buffers will be requested from the IPP during the netio_input_register
- operation; this may speed the execution of @ref netio_get_buffer().
- This may be no larger than @ref num_send_buffers_jumbo_total. See @ref
- epp for more details on empty buffer caching.
- */
- int num_send_buffers_jumbo_prealloc;
-
- /** Total packet buffer size. This determines the total size, in bytes,
- of the NetIO buffer pool. Note that the maximum number of available
- buffers of each size is determined during hypervisor configuration
- (see the <em>System Programmer's Guide</em> for details); this just
- influences how much host memory is allocated for those buffers.
-
- The buffer pool is allocated from common memory, which will be
- automatically initialized if needed. If your buffer pool is larger
- than 240 MB, you might need to explicitly call @c tmc_cmem_init(),
- as described in the Application Libraries Reference Manual (UG227).
-
- Packet buffers are currently allocated in chunks of 16 MB; this
- value will be rounded up to the next larger multiple of 16 MB.
- If this value is zero, a default of 32 MB will be used; this was
- the value used by previous versions of NetIO. Note that taking this
- default also affects the placement of buffers on Linux NUMA nodes.
- See @ref buffer_node_weights for an explanation of buffer placement.
-
- In order to successfully allocate packet buffers, Linux must have
- available huge pages on the relevant Linux NUMA nodes. See the
- <em>System Programmer's Guide</em> for information on configuring
- huge page support in Linux.
- */
- uint64_t total_buffer_size;
-
- /** Buffer placement weighting factors.
-
- This array specifies the relative amount of buffering to place
- on each of the available Linux NUMA nodes. This array is
- indexed by the NUMA node, and the values in the array are
- proportional to the amount of buffer space to allocate on that
- node.
-
- If memory striping is enabled in the Hypervisor, then there is
- only one logical NUMA node (node 0). In that case, NetIO will by
- default ignore the suggested buffer node weights, and buffers
- will be striped across the physical memory controllers. See
- UG209 System Programmer's Guide for a description of the
- hypervisor option that controls memory striping.
-
- If memory striping is disabled, then there are up to four NUMA
- nodes, corresponding to the four DDRAM controllers in the TILE
- processor architecture. See UG100 Tile Processor Architecture
- Overview for a diagram showing the location of each of the DDRAM
- controllers relative to the tile array.
-
- For instance, if memory striping is disabled, the following
- configuration strucure:
-
- @code
- netio_input_config_t config = {
- .
- .
- .
- .total_buffer_size = 4 * 16 * 1024 * 1024;
- .buffer_node_weights = { 1, 0, 1, 0 },
- },
- @endcode
-
- would result in 32 MB of buffers being placed on controller 0, and
- 32 MB on controller 2. (Since buffers are allocated in units of
- 16 MB, some sets of weights will not be able to be matched exactly.)
-
- For the weights to be effective, @ref total_buffer_size must be
- nonzero. If @ref total_buffer_size is zero, causing the default
- 32 MB of buffer space to be used, then any specified weights will
- be ignored, and buffers will positioned as they were in previous
- versions of NetIO:
-
- - For xgbe/0 and gbe/0, 16 MB of buffers will be placed on controller 1,
- and the other 16 MB will be placed on controller 2.
-
- - For xgbe/1 and gbe/1, 16 MB of buffers will be placed on controller 2,
- and the other 16 MB will be placed on controller 3.
-
- If @ref total_buffer_size is nonzero, but all weights are zero,
- then all buffer space will be allocated on Linux NUMA node zero.
-
- By default, the specified buffer placement is treated as a hint;
- if sufficient free memory is not available on the specified
- controllers, the buffers will be allocated elsewhere. However,
- if the ::NETIO_STRICT_HOMING flag is specified in @ref flags, then a
- failure to allocate buffer space exactly as requested will cause the
- registration operation to fail with an error of ::NETIO_CANNOT_HOME.
-
- Note that maximal network performance cannot be achieved with
- only one memory controller.
- */
- uint8_t buffer_node_weights[NETIO_NUM_NODE_WEIGHTS];
-
- /** Fixed virtual address for packet buffers. Only valid when
- ::NETIO_FIXED_BUFFER_VA is specified in @ref flags; see the
- description of that flag for details.
- */
- void* fixed_buffer_va;
-
- /**
- Maximum number of outstanding send packet requests. This value is
- only relevant when an EPP is in use; it determines the number of
- slots in the EPP's outgoing packet queue which this tile is allowed
- to consume, and thus the number of packets which may be sent before
- the sending tile must wait for an acknowledgment from the EPP.
- Modifying this value is generally only helpful when using @ref
- netio_send_packet_vector(), where it can help improve performance by
- allowing a single vector send operation to process more packets.
- Typically it is not specified, and the default, which divides the
- outgoing packet slots evenly between all tiles on the chip, is used.
-
- If a registration asks for more outgoing packet queue slots than are
- available, ::NETIO_TOOMANY_XMIT will be returned. The total number
- of packet queue slots which are available for all tiles for each EPP
- is subject to change, but is currently ::NETIO_TOTAL_SENDS_OUTSTANDING.
-
-
- This value is ignored if ::NETIO_XMIT is not specified in flags.
- If you want to specify a large value here for a specific tile, you are
- advised to specify NETIO_NO_XMIT on other, non-transmitting tiles so
- that they do not consume a default number of packet slots. Any tile
- transmitting is required to have at least ::NETIO_MIN_SENDS_OUTSTANDING
- slots allocated to it; values less than that will be silently
- increased by the NetIO library.
- */
- int num_sends_outstanding;
-}
-netio_input_config_t;
-
-
-/** Registration flags; used in the @ref netio_input_config_t structure.
- * @addtogroup setup
- */
-/** @{ */
-
-/** Fail a registration request if we can't put packet buffers
- on the specified memory controllers. */
-#define NETIO_STRICT_HOMING 0x00000002
-
-/** This application expects no tags on its L2 headers. */
-#define NETIO_TAG_NONE 0x00000004
-
-/** This application expects Marvell extended tags on its L2 headers. */
-#define NETIO_TAG_MRVL 0x00000008
-
-/** This application expects Broadcom tags on its L2 headers. */
-#define NETIO_TAG_BRCM 0x00000010
-
-/** This registration may call routines which receive packets. */
-#define NETIO_RECV 0x00000020
-
-/** This registration may not call routines which receive packets. */
-#define NETIO_NO_RECV 0x00000040
-
-/** This registration may call routines which transmit packets. */
-#define NETIO_XMIT 0x00000080
-
-/** This registration may call routines which transmit packets with
- checksum acceleration. */
-#define NETIO_XMIT_CSUM 0x00000100
-
-/** This registration may not call routines which transmit packets. */
-#define NETIO_NO_XMIT 0x00000200
-
-/** This registration wants NetIO buffers mapped at an application-specified
- virtual address.
-
- NetIO buffers are by default created by the TMC common memory facility,
- which must be configured by a common ancestor of all processes sharing
- a network interface. When this flag is specified, NetIO buffers are
- instead mapped at an address chosen by the application (and specified
- in @ref netio_input_config_t::fixed_buffer_va). This allows multiple
- unrelated but cooperating processes to share a NetIO interface.
- All processes sharing the same interface must specify this flag,
- and all must specify the same fixed virtual address.
-
- @ref netio_input_config_t::fixed_buffer_va must be a
- multiple of 16 MB, and the packet buffers will occupy @ref
- netio_input_config_t::total_buffer_size bytes of virtual address
- space, beginning at that address. If any of those virtual addresses
- are currently occupied by other memory objects, like application or
- shared library code or data, @ref netio_input_register() will return
- ::NETIO_FAULT. While it is impossible to provide a fixed_buffer_va
- which will work for all applications, a good first guess might be to
- use 0xb0000000 minus @ref netio_input_config_t::total_buffer_size.
- If that fails, it might be helpful to consult the running application's
- virtual address description file (/proc/<em>pid</em>/maps) to see
- which regions of virtual address space are available.
- */
-#define NETIO_FIXED_BUFFER_VA 0x00000400
-
-/** This registration call will not complete unless the network link
- is up. The process will wait several seconds for this to happen (the
- precise interval is link-dependent), but if the link does not come up,
- ::NETIO_LINK_DOWN will be returned. This flag is the default if
- ::NETIO_NOREQUIRE_LINK_UP is not specified. Note that this flag by
- itself does not request that the link be brought up; that can be done
- with the ::NETIO_AUTO_LINK_UPDN or ::NETIO_AUTO_LINK_UP flags (the
- latter is the default if no NETIO_AUTO_LINK_xxx flags are specified),
- or by explicitly setting the link's desired state via netio_set().
- If the link is not brought up by one of those methods, and this flag
- is specified, the registration operation will return ::NETIO_LINK_DOWN.
- This flag is ignored if it is specified along with ::NETIO_NO_XMIT and
- ::NETIO_NO_RECV. See @ref link for more information on link
- management.
- */
-#define NETIO_REQUIRE_LINK_UP 0x00000800
-
-/** This registration call will complete even if the network link is not up.
- Whenever the link is not up, packets will not be sent or received:
- netio_get_packet() will return ::NETIO_NOPKT once all queued packets
- have been drained, and netio_send_packet() and similar routines will
- return NETIO_QUEUE_FULL once the outgoing packet queue in the EPP
- or the I/O shim is full. See @ref link for more information on link
- management.
- */
-#define NETIO_NOREQUIRE_LINK_UP 0x00001000
-
-#ifndef __DOXYGEN__
-/*
- * These are part of the implementation of the NETIO_AUTO_LINK_xxx flags,
- * but should not be used directly by applications, and are thus not
- * documented.
- */
-#define _NETIO_AUTO_UP 0x00002000
-#define _NETIO_AUTO_DN 0x00004000
-#define _NETIO_AUTO_PRESENT 0x00008000
-#endif
-
-/** Set the desired state of the link to up, allowing any speeds which are
- supported by the link hardware, as part of this registration operation.
- Do not take down the link automatically. This is the default if
- no other NETIO_AUTO_LINK_xxx flags are specified. This flag is ignored
- if it is specified along with ::NETIO_NO_XMIT and ::NETIO_NO_RECV.
- See @ref link for more information on link management.
- */
-#define NETIO_AUTO_LINK_UP (_NETIO_AUTO_PRESENT | _NETIO_AUTO_UP)
-
-/** Set the desired state of the link to up, allowing any speeds which are
- supported by the link hardware, as part of this registration operation.
- Set the desired state of the link to down the next time no tiles are
- registered for packet reception or transmission. This flag is ignored
- if it is specified along with ::NETIO_NO_XMIT and ::NETIO_NO_RECV.
- See @ref link for more information on link management.
- */
-#define NETIO_AUTO_LINK_UPDN (_NETIO_AUTO_PRESENT | _NETIO_AUTO_UP | \
- _NETIO_AUTO_DN)
-
-/** Set the desired state of the link to down the next time no tiles are
- registered for packet reception or transmission. This flag is ignored
- if it is specified along with ::NETIO_NO_XMIT and ::NETIO_NO_RECV.
- See @ref link for more information on link management.
- */
-#define NETIO_AUTO_LINK_DN (_NETIO_AUTO_PRESENT | _NETIO_AUTO_DN)
-
-/** Do not bring up the link automatically as part of this registration
- operation. Do not take down the link automatically. This flag
- is ignored if it is specified along with ::NETIO_NO_XMIT and
- ::NETIO_NO_RECV. See @ref link for more information on link management.
- */
-#define NETIO_AUTO_LINK_NONE _NETIO_AUTO_PRESENT
-
-
-/** Minimum number of receive packets. */
-#define NETIO_MIN_RECEIVE_PKTS 16
-
-/** Lower bound on the maximum number of receive packets; may be higher
- than this on some interfaces. */
-#define NETIO_MAX_RECEIVE_PKTS 128
-
-/** Maximum number of send buffers, per packet size. */
-#define NETIO_MAX_SEND_BUFFERS 16
-
-/** Number of EPP queue slots, and thus outstanding sends, per EPP. */
-#define NETIO_TOTAL_SENDS_OUTSTANDING 2015
-
-/** Minimum number of EPP queue slots, and thus outstanding sends, per
- * transmitting tile. */
-#define NETIO_MIN_SENDS_OUTSTANDING 16
-
-
-/**@}*/
-
-#ifndef __DOXYGEN__
-
-/**
- * An object for providing Ethernet packets to a process.
- */
-struct __netio_queue_impl_t;
-
-/**
- * An object for managing the user end of a NetIO queue.
- */
-struct __netio_queue_user_impl_t;
-
-#endif /* !__DOXYGEN__ */
-
-
-/** A netio_queue_t describes a NetIO communications endpoint.
- * @ingroup setup
- */
-typedef struct
-{
-#ifdef __DOXYGEN__
- uint8_t opaque[8]; /**< This is an opaque structure. */
-#else
- struct __netio_queue_impl_t* __system_part; /**< The system part. */
- struct __netio_queue_user_impl_t* __user_part; /**< The user part. */
-#ifdef _NETIO_PTHREAD
- _netio_percpu_mutex_t lock; /**< Queue lock. */
-#endif
-#endif
-}
-netio_queue_t;
-
-
-/**
- * @brief Packet send context.
- *
- * @ingroup egress
- *
- * Packet send context for use with netio_send_packet_prepare and _commit.
- */
-typedef struct
-{
-#ifdef __DOXYGEN__
- uint8_t opaque[44]; /**< This is an opaque structure. */
-#else
- uint8_t flags; /**< Defined below */
- uint8_t datalen; /**< Number of valid words pointed to by data. */
- uint32_t request[9]; /**< Request to be sent to the EPP or shim. Note
- that this is smaller than the 11-word maximum
- request size, since some constant values are
- not saved in the context. */
- uint32_t *data; /**< Data to be sent to the EPP or shim via IDN. */
-#endif
-}
-netio_send_pkt_context_t;
-
-
-#ifndef __DOXYGEN__
-#define SEND_PKT_CTX_USE_EPP 1 /**< We're sending to an EPP. */
-#define SEND_PKT_CTX_SEND_CSUM 2 /**< Request includes a checksum. */
-#endif
-
-/**
- * @brief Packet vector entry.
- *
- * @ingroup egress
- *
- * This data structure is used with netio_send_packet_vector() to send multiple
- * packets with one NetIO call. The structure should be initialized by
- * calling netio_pkt_vector_set(), rather than by setting the fields
- * directly.
- *
- * This structure is guaranteed to be a power of two in size, no
- * bigger than one L2 cache line, and to be aligned modulo its size.
- */
-typedef struct
-#ifndef __DOXYGEN__
-__attribute__((aligned(8)))
-#endif
-{
- /** Reserved for use by the user application. When initialized with
- * the netio_set_pkt_vector_entry() function, this field is guaranteed
- * to be visible to readers only after all other fields are already
- * visible. This way it can be used as a valid flag or generation
- * counter. */
- uint8_t user_data;
-
- /* Structure members below this point should not be accessed directly by
- * applications, as they may change in the future. */
-
- /** Low 8 bits of the packet address to send. The high bits are
- * acquired from the 'handle' field. */
- uint8_t buffer_address_low;
-
- /** Number of bytes to transmit. */
- uint16_t size;
-
- /** The raw handle from a netio_pkt_t. If this is NETIO_PKT_HANDLE_NONE,
- * this vector entry will be skipped and no packet will be transmitted. */
- netio_pkt_handle_t handle;
-}
-netio_pkt_vector_entry_t;
-
-
-/**
- * @brief Initialize fields in a packet vector entry.
- *
- * @ingroup egress
- *
- * @param[out] v Pointer to the vector entry to be initialized.
- * @param[in] pkt Packet to be transmitted when the vector entry is passed to
- * netio_send_packet_vector(). Note that the packet's attributes
- * (e.g., its L2 offset and length) are captured at the time this
- * routine is called; subsequent changes in those attributes will not
- * be reflected in the packet which is actually transmitted.
- * Changes in the packet's contents, however, will be so reflected.
- * If this is NULL, no packet will be transmitted.
- * @param[in] user_data User data to be set in the vector entry.
- * This function guarantees that the "user_data" field will become
- * visible to a reader only after all other fields have become visible.
- * This allows a structure in a ring buffer to be written and read
- * by a polling reader without any locks or other synchronization.
- */
-static __inline void
-netio_pkt_vector_set(volatile netio_pkt_vector_entry_t* v, netio_pkt_t* pkt,
- uint8_t user_data)
-{
- if (pkt)
- {
- if (NETIO_PKT_IS_MINIMAL(pkt))
- {
- netio_pkt_minimal_metadata_t* mmd =
- (netio_pkt_minimal_metadata_t*) &pkt->__metadata;
- v->buffer_address_low = (uintptr_t) NETIO_PKT_L2_DATA_MM(mmd, pkt) & 0xFF;
- v->size = NETIO_PKT_L2_LENGTH_MM(mmd, pkt);
- }
- else
- {
- netio_pkt_metadata_t* mda = &pkt->__metadata;
- v->buffer_address_low = (uintptr_t) NETIO_PKT_L2_DATA_M(mda, pkt) & 0xFF;
- v->size = NETIO_PKT_L2_LENGTH_M(mda, pkt);
- }
- v->handle.word = pkt->__packet.word;
- }
- else
- {
- v->handle.word = 0; /* Set handle to NETIO_PKT_HANDLE_NONE. */
- }
-
- __asm__("" : : : "memory");
-
- v->user_data = user_data;
-}
-
-
-/**
- * Flags and structures for @ref netio_get() and @ref netio_set().
- * @ingroup config
- */
-
-/** @{ */
-/** Parameter class; addr is a NETIO_PARAM_xxx value. */
-#define NETIO_PARAM 0
-/** Interface MAC address. This address is only valid with @ref netio_get().
- * The value is a 6-byte MAC address. Depending upon the overall system
- * design, a MAC address may or may not be available for each interface. */
-#define NETIO_PARAM_MAC 0
-
-/** Determine whether to suspend output on the receipt of pause frames.
- * If the value is nonzero, the I/O shim will suspend output when a pause
- * frame is received. If the value is zero, pause frames will be ignored. */
-#define NETIO_PARAM_PAUSE_IN 1
-
-/** Determine whether to send pause frames if the I/O shim packet FIFOs are
- * nearly full. If the value is zero, pause frames are not sent. If
- * the value is nonzero, it is the delay value which will be sent in any
- * pause frames which are output, in units of 512 bit times. */
-#define NETIO_PARAM_PAUSE_OUT 2
-
-/** Jumbo frame support. The value is a 4-byte integer. If the value is
- * nonzero, the MAC will accept frames of up to 10240 bytes. If the value
- * is zero, the MAC will only accept frames of up to 1544 bytes. */
-#define NETIO_PARAM_JUMBO 3
-
-/** I/O shim's overflow statistics register. The value is two 16-bit integers.
- * The first 16-bit value (or the low 16 bits, if the value is treated as a
- * 32-bit number) is the count of packets which were completely dropped and
- * not delivered by the shim. The second 16-bit value (or the high 16 bits,
- * if the value is treated as a 32-bit number) is the count of packets
- * which were truncated and thus only partially delivered by the shim. This
- * register is automatically reset to zero after it has been read.
- */
-#define NETIO_PARAM_OVERFLOW 4
-
-/** IPP statistics. This address is only valid with @ref netio_get(). The
- * value is a netio_stat_t structure. Unlike the I/O shim statistics, the
- * IPP statistics are not all reset to zero on read; see the description
- * of the netio_stat_t for details. */
-#define NETIO_PARAM_STAT 5
-
-/** Possible link state. The value is a combination of "NETIO_LINK_xxx"
- * flags. With @ref netio_get(), this will indicate which flags are
- * actually supported by the hardware.
- *
- * For historical reasons, specifying this value to netio_set() will have
- * the same behavior as using ::NETIO_PARAM_LINK_CONFIG, but this usage is
- * discouraged.
- */
-#define NETIO_PARAM_LINK_POSSIBLE_STATE 6
-
-/** Link configuration. The value is a combination of "NETIO_LINK_xxx" flags.
- * With @ref netio_set(), this will attempt to immediately bring up the
- * link using whichever of the requested flags are supported by the
- * hardware, or take down the link if the flags are zero; if this is
- * not possible, an error will be returned. Many programs will want
- * to use ::NETIO_PARAM_LINK_DESIRED_STATE instead.
- *
- * For historical reasons, specifying this value to netio_get() will
- * have the same behavior as using ::NETIO_PARAM_LINK_POSSIBLE_STATE,
- * but this usage is discouraged.
- */
-#define NETIO_PARAM_LINK_CONFIG NETIO_PARAM_LINK_POSSIBLE_STATE
-
-/** Current link state. This address is only valid with @ref netio_get().
- * The value is zero or more of the "NETIO_LINK_xxx" flags, ORed together.
- * If the link is down, the value ANDed with NETIO_LINK_SPEED will be
- * zero; if the link is up, the value ANDed with NETIO_LINK_SPEED will
- * result in exactly one of the NETIO_LINK_xxx values, indicating the
- * current speed. */
-#define NETIO_PARAM_LINK_CURRENT_STATE 7
-
-/** Variant symbol for current state, retained for compatibility with
- * pre-MDE-2.1 programs. */
-#define NETIO_PARAM_LINK_STATUS NETIO_PARAM_LINK_CURRENT_STATE
-
-/** Packet Coherence protocol. This address is only valid with @ref netio_get().
- * The value is nonzero if the interface is configured for cache-coherent DMA.
- */
-#define NETIO_PARAM_COHERENT 8
-
-/** Desired link state. The value is a conbination of "NETIO_LINK_xxx"
- * flags, which specify the desired state for the link. With @ref
- * netio_set(), this will, in the background, attempt to bring up the link
- * using whichever of the requested flags are reasonable, or take down the
- * link if the flags are zero. The actual link up or down operation may
- * happen after this call completes. If the link state changes in the
- * future, the system will continue to try to get back to the desired link
- * state; for instance, if the link is brought up successfully, and then
- * the network cable is disconnected, the link will go down. However, the
- * desired state of the link is still up, so if the cable is reconnected,
- * the link will be brought up again.
- *
- * With @ref netio_get(), this will indicate the desired state for the
- * link, as set with a previous netio_set() call, or implicitly by a
- * netio_input_register() or netio_input_unregister() operation. This may
- * not reflect the current state of the link; to get that, use
- * ::NETIO_PARAM_LINK_CURRENT_STATE. */
-#define NETIO_PARAM_LINK_DESIRED_STATE 9
-
-/** NetIO statistics structure. Retrieved using the ::NETIO_PARAM_STAT
- * address passed to @ref netio_get(). */
-typedef struct
-{
- /** Number of packets which have been received by the IPP and forwarded
- * to a tile's receive queue for processing. This value wraps at its
- * maximum, and is not cleared upon read. */
- uint32_t packets_received;
-
- /** Number of packets which have been dropped by the IPP, because they could
- * not be received, or could not be forwarded to a tile. The former happens
- * when the IPP does not have a free packet buffer of suitable size for an
- * incoming frame. The latter happens when all potential destination tiles
- * for a packet, as defined by the group, bucket, and queue configuration,
- * have full receive queues. This value wraps at its maximum, and is not
- * cleared upon read. */
- uint32_t packets_dropped;
-
- /*
- * Note: the #defines after each of the following four one-byte values
- * denote their location within the third word of the netio_stat_t. They
- * are intended for use only by the IPP implementation and are thus omitted
- * from the Doxygen output.
- */
-
- /** Number of packets dropped because no worker was able to accept a new
- * packet. This value saturates at its maximum, and is cleared upon
- * read. */
- uint8_t drops_no_worker;
-#ifndef __DOXYGEN__
-#define NETIO_STAT_DROPS_NO_WORKER 0
-#endif
-
- /** Number of packets dropped because no small buffers were available.
- * This value saturates at its maximum, and is cleared upon read. */
- uint8_t drops_no_smallbuf;
-#ifndef __DOXYGEN__
-#define NETIO_STAT_DROPS_NO_SMALLBUF 1
-#endif
-
- /** Number of packets dropped because no large buffers were available.
- * This value saturates at its maximum, and is cleared upon read. */
- uint8_t drops_no_largebuf;
-#ifndef __DOXYGEN__
-#define NETIO_STAT_DROPS_NO_LARGEBUF 2
-#endif
-
- /** Number of packets dropped because no jumbo buffers were available.
- * This value saturates at its maximum, and is cleared upon read. */
- uint8_t drops_no_jumbobuf;
-#ifndef __DOXYGEN__
-#define NETIO_STAT_DROPS_NO_JUMBOBUF 3
-#endif
-}
-netio_stat_t;
-
-
-/** Link can run, should run, or is running at 10 Mbps. */
-#define NETIO_LINK_10M 0x01
-
-/** Link can run, should run, or is running at 100 Mbps. */
-#define NETIO_LINK_100M 0x02
-
-/** Link can run, should run, or is running at 1 Gbps. */
-#define NETIO_LINK_1G 0x04
-
-/** Link can run, should run, or is running at 10 Gbps. */
-#define NETIO_LINK_10G 0x08
-
-/** Link should run at the highest speed supported by the link and by
- * the device connected to the link. Only usable as a value for
- * the link's desired state; never returned as a value for the current
- * or possible states. */
-#define NETIO_LINK_ANYSPEED 0x10
-
-/** All legal link speeds. */
-#define NETIO_LINK_SPEED (NETIO_LINK_10M | \
- NETIO_LINK_100M | \
- NETIO_LINK_1G | \
- NETIO_LINK_10G | \
- NETIO_LINK_ANYSPEED)
-
-
-/** MAC register class. Addr is a register offset within the MAC.
- * Registers within the XGbE and GbE MACs are documented in the Tile
- * Processor I/O Device Guide (UG104). MAC registers start at address
- * 0x4000, and do not include the MAC_INTERFACE registers. */
-#define NETIO_MAC 1
-
-/** MDIO register class (IEEE 802.3 clause 22 format). Addr is the "addr"
- * member of a netio_mdio_addr_t structure. */
-#define NETIO_MDIO 2
-
-/** MDIO register class (IEEE 802.3 clause 45 format). Addr is the "addr"
- * member of a netio_mdio_addr_t structure. */
-#define NETIO_MDIO_CLAUSE45 3
-
-/** NetIO MDIO address type. Retrieved or provided using the ::NETIO_MDIO
- * address passed to @ref netio_get() or @ref netio_set(). */
-typedef union
-{
- struct
- {
- unsigned int reg:16; /**< MDIO register offset. For clause 22 access,
- must be less than 32. */
- unsigned int phy:5; /**< Which MDIO PHY to access. */
- unsigned int dev:5; /**< Which MDIO device to access within that PHY.
- Applicable for clause 45 access only; ignored
- for clause 22 access. */
- }
- bits; /**< Container for bitfields. */
- uint64_t addr; /**< Value to pass to @ref netio_get() or
- * @ref netio_set(). */
-}
-netio_mdio_addr_t;
-
-/** @} */
-
-#endif /* __NETIO_INTF_H__ */
diff --git a/arch/tile/include/hv/syscall_public.h b/arch/tile/include/hv/syscall_public.h
deleted file mode 100644
index 9cc0837e69fd..000000000000
--- a/arch/tile/include/hv/syscall_public.h
+++ /dev/null
@@ -1,42 +0,0 @@
-/*
- * Copyright 2010 Tilera Corporation. All Rights Reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation, version 2.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
- * NON INFRINGEMENT. See the GNU General Public License for
- * more details.
- */
-
-/**
- * @file syscall.h
- * Indices for the hypervisor system calls that are intended to be called
- * directly, rather than only through hypervisor-generated "glue" code.
- */
-
-#ifndef _SYS_HV_INCLUDE_SYSCALL_PUBLIC_H
-#define _SYS_HV_INCLUDE_SYSCALL_PUBLIC_H
-
-/** Fast syscall flag bit location. When this bit is set, the hypervisor
- * handles the syscall specially.
- */
-#define HV_SYS_FAST_SHIFT 14
-
-/** Fast syscall flag bit mask. */
-#define HV_SYS_FAST_MASK (1 << HV_SYS_FAST_SHIFT)
-
-/** Bit location for flagging fast syscalls that can be called from PL0. */
-#define HV_SYS_FAST_PLO_SHIFT 13
-
-/** Fast syscall allowing PL0 bit mask. */
-#define HV_SYS_FAST_PL0_MASK (1 << HV_SYS_FAST_PLO_SHIFT)
-
-/** Perform an MF that waits for all victims to reach DRAM. */
-#define HV_SYS_fence_incoherent (51 | HV_SYS_FAST_MASK \
- | HV_SYS_FAST_PL0_MASK)
-
-#endif /* !_SYS_HV_INCLUDE_SYSCALL_PUBLIC_H */
diff --git a/arch/tile/include/uapi/arch/abi.h b/arch/tile/include/uapi/arch/abi.h
deleted file mode 100644
index df161a484730..000000000000
--- a/arch/tile/include/uapi/arch/abi.h
+++ /dev/null
@@ -1,101 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
-/*
- * Copyright 2010 Tilera Corporation. All Rights Reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation, version 2.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
- * NON INFRINGEMENT. See the GNU General Public License for
- * more details.
- */
-
-/**
- * @file
- *
- * ABI-related register definitions.
- */
-
-#ifndef __ARCH_ABI_H__
-
-#ifndef __tile__ /* support uncommon use of arch headers in non-tile builds */
-# include <arch/chip.h>
-# define __INT_REG_BITS CHIP_WORD_SIZE()
-#endif
-
-#include <arch/intreg.h>
-
-/* __need_int_reg_t is deprecated: just include <arch/intreg.h> */
-#ifndef __need_int_reg_t
-
-#define __ARCH_ABI_H__
-
-#ifndef __ASSEMBLER__
-/** Unsigned type that can hold a register. */
-typedef __uint_reg_t uint_reg_t;
-
-/** Signed type that can hold a register. */
-typedef __int_reg_t int_reg_t;
-#endif
-
-/** String prefix to use for printf(). */
-#define INT_REG_FMT __INT_REG_FMT
-
-/** Number of bits in a register. */
-#define INT_REG_BITS __INT_REG_BITS
-
-
-/* Registers 0 - 55 are "normal", but some perform special roles. */
-
-#define TREG_FP 52 /**< Frame pointer. */
-#define TREG_TP 53 /**< Thread pointer. */
-#define TREG_SP 54 /**< Stack pointer. */
-#define TREG_LR 55 /**< Link to calling function PC. */
-
-/** Index of last normal general-purpose register. */
-#define TREG_LAST_GPR 55
-
-/* Registers 56 - 62 are "special" network registers. */
-
-#define TREG_SN 56 /**< Static network access. */
-#define TREG_IDN0 57 /**< IDN demux 0 access. */
-#define TREG_IDN1 58 /**< IDN demux 1 access. */
-#define TREG_UDN0 59 /**< UDN demux 0 access. */
-#define TREG_UDN1 60 /**< UDN demux 1 access. */
-#define TREG_UDN2 61 /**< UDN demux 2 access. */
-#define TREG_UDN3 62 /**< UDN demux 3 access. */
-
-/* Register 63 is the "special" zero register. */
-
-#define TREG_ZERO 63 /**< "Zero" register; always reads as "0". */
-
-
-/** By convention, this register is used to hold the syscall number. */
-#define TREG_SYSCALL_NR 10
-
-/** Name of register that holds the syscall number, for use in assembly. */
-#define TREG_SYSCALL_NR_NAME r10
-
-
-/**
- * The ABI requires callers to allocate a caller state save area of
- * this many bytes at the bottom of each stack frame.
- */
-#define C_ABI_SAVE_AREA_SIZE (2 * (INT_REG_BITS / 8))
-
-/**
- * The operand to an 'info' opcode directing the backtracer to not
- * try to find the calling frame.
- */
-#define INFO_OP_CANNOT_BACKTRACE 2
-
-
-#endif /* !__need_int_reg_t */
-
-/* Make sure we later can get all the definitions and declarations. */
-#undef __need_int_reg_t
-
-#endif /* !__ARCH_ABI_H__ */
diff --git a/arch/tile/include/uapi/arch/chip.h b/arch/tile/include/uapi/arch/chip.h
deleted file mode 100644
index 7f55c6856c89..000000000000
--- a/arch/tile/include/uapi/arch/chip.h
+++ /dev/null
@@ -1,22 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
-/*
- * Copyright 2010 Tilera Corporation. All Rights Reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation, version 2.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
- * NON INFRINGEMENT. See the GNU General Public License for
- * more details.
- */
-
-#if __tile_chip__ == 1
-#include <arch/chip_tilepro.h>
-#elif defined(__tilegx__)
-#include <arch/chip_tilegx.h>
-#else
-#error Unexpected Tilera chip type
-#endif
diff --git a/arch/tile/include/uapi/arch/chip_tilegx.h b/arch/tile/include/uapi/arch/chip_tilegx.h
deleted file mode 100644
index c2a71a43b21c..000000000000
--- a/arch/tile/include/uapi/arch/chip_tilegx.h
+++ /dev/null
@@ -1,259 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
-/*
- * Copyright 2011 Tilera Corporation. All Rights Reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation, version 2.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
- * NON INFRINGEMENT. See the GNU General Public License for
- * more details.
- */
-
-/*
- * @file
- * Global header file.
- * This header file specifies defines for TILE-Gx.
- */
-
-#ifndef __ARCH_CHIP_H__
-#define __ARCH_CHIP_H__
-
-/** Specify chip version.
- * When possible, prefer the CHIP_xxx symbols below for future-proofing.
- * This is intended for cross-compiling; native compilation should
- * use the predefined __tile_chip__ symbol.
- */
-#define TILE_CHIP 10
-
-/** Specify chip revision.
- * This provides for the case of a respin of a particular chip type;
- * the normal value for this symbol is "0".
- * This is intended for cross-compiling; native compilation should
- * use the predefined __tile_chip_rev__ symbol.
- */
-#define TILE_CHIP_REV 0
-
-/** The name of this architecture. */
-#define CHIP_ARCH_NAME "tilegx"
-
-/** The ELF e_machine type for binaries for this chip. */
-#define CHIP_ELF_TYPE() EM_TILEGX
-
-/** The alternate ELF e_machine type for binaries for this chip. */
-#define CHIP_COMPAT_ELF_TYPE() 0x2597
-
-/** What is the native word size of the machine? */
-#define CHIP_WORD_SIZE() 64
-
-/** How many bits of a virtual address are used. Extra bits must be
- * the sign extension of the low bits.
- */
-#define CHIP_VA_WIDTH() 42
-
-/** How many bits are in a physical address? */
-#define CHIP_PA_WIDTH() 40
-
-/** Size of the L2 cache, in bytes. */
-#define CHIP_L2_CACHE_SIZE() 262144
-
-/** Log size of an L2 cache line in bytes. */
-#define CHIP_L2_LOG_LINE_SIZE() 6
-
-/** Size of an L2 cache line, in bytes. */
-#define CHIP_L2_LINE_SIZE() (1 << CHIP_L2_LOG_LINE_SIZE())
-
-/** Associativity of the L2 cache. */
-#define CHIP_L2_ASSOC() 8
-
-/** Size of the L1 data cache, in bytes. */
-#define CHIP_L1D_CACHE_SIZE() 32768
-
-/** Log size of an L1 data cache line in bytes. */
-#define CHIP_L1D_LOG_LINE_SIZE() 6
-
-/** Size of an L1 data cache line, in bytes. */
-#define CHIP_L1D_LINE_SIZE() (1 << CHIP_L1D_LOG_LINE_SIZE())
-
-/** Associativity of the L1 data cache. */
-#define CHIP_L1D_ASSOC() 2
-
-/** Size of the L1 instruction cache, in bytes. */
-#define CHIP_L1I_CACHE_SIZE() 32768
-
-/** Log size of an L1 instruction cache line in bytes. */
-#define CHIP_L1I_LOG_LINE_SIZE() 6
-
-/** Size of an L1 instruction cache line, in bytes. */
-#define CHIP_L1I_LINE_SIZE() (1 << CHIP_L1I_LOG_LINE_SIZE())
-
-/** Associativity of the L1 instruction cache. */
-#define CHIP_L1I_ASSOC() 2
-
-/** Stride with which flush instructions must be issued. */
-#define CHIP_FLUSH_STRIDE() CHIP_L2_LINE_SIZE()
-
-/** Stride with which inv instructions must be issued. */
-#define CHIP_INV_STRIDE() CHIP_L2_LINE_SIZE()
-
-/** Stride with which finv instructions must be issued. */
-#define CHIP_FINV_STRIDE() CHIP_L2_LINE_SIZE()
-
-/** Can the local cache coherently cache data that is homed elsewhere? */
-#define CHIP_HAS_COHERENT_LOCAL_CACHE() 1
-
-/** How many simultaneous outstanding victims can the L2 cache have? */
-#define CHIP_MAX_OUTSTANDING_VICTIMS() 128
-
-/** Does the TLB support the NC and NOALLOC bits? */
-#define CHIP_HAS_NC_AND_NOALLOC_BITS() 1
-
-/** Does the chip support hash-for-home caching? */
-#define CHIP_HAS_CBOX_HOME_MAP() 1
-
-/** Number of entries in the chip's home map tables. */
-#define CHIP_CBOX_HOME_MAP_SIZE() 128
-
-/** Do uncacheable requests miss in the cache regardless of whether
- * there is matching data? */
-#define CHIP_HAS_ENFORCED_UNCACHEABLE_REQUESTS() 1
-
-/** Does the mf instruction wait for victims? */
-#define CHIP_HAS_MF_WAITS_FOR_VICTIMS() 0
-
-/** Does the chip have an "inv" instruction that doesn't also flush? */
-#define CHIP_HAS_INV() 1
-
-/** Does the chip have a "wh64" instruction? */
-#define CHIP_HAS_WH64() 1
-
-/** Does this chip have a 'dword_align' instruction? */
-#define CHIP_HAS_DWORD_ALIGN() 0
-
-/** Number of performance counters. */
-#define CHIP_PERFORMANCE_COUNTERS() 4
-
-/** Does this chip have auxiliary performance counters? */
-#define CHIP_HAS_AUX_PERF_COUNTERS() 1
-
-/** Is the CBOX_MSR1 SPR supported? */
-#define CHIP_HAS_CBOX_MSR1() 0
-
-/** Is the TILE_RTF_HWM SPR supported? */
-#define CHIP_HAS_TILE_RTF_HWM() 1
-
-/** Is the TILE_WRITE_PENDING SPR supported? */
-#define CHIP_HAS_TILE_WRITE_PENDING() 0
-
-/** Is the PROC_STATUS SPR supported? */
-#define CHIP_HAS_PROC_STATUS_SPR() 1
-
-/** Is the DSTREAM_PF SPR supported? */
-#define CHIP_HAS_DSTREAM_PF() 1
-
-/** Log of the number of mshims we have. */
-#define CHIP_LOG_NUM_MSHIMS() 2
-
-/** Are the bases of the interrupt vector areas fixed? */
-#define CHIP_HAS_FIXED_INTVEC_BASE() 0
-
-/** Are the interrupt masks split up into 2 SPRs? */
-#define CHIP_HAS_SPLIT_INTR_MASK() 0
-
-/** Is the cycle count split up into 2 SPRs? */
-#define CHIP_HAS_SPLIT_CYCLE() 0
-
-/** Does the chip have a static network? */
-#define CHIP_HAS_SN() 0
-
-/** Does the chip have a static network processor? */
-#define CHIP_HAS_SN_PROC() 0
-
-/** Size of the L1 static network processor instruction cache, in bytes. */
-/* #define CHIP_L1SNI_CACHE_SIZE() -- does not apply to chip 10 */
-
-/** Does the chip have DMA support in each tile? */
-#define CHIP_HAS_TILE_DMA() 0
-
-/** Does the chip have the second revision of the directly accessible
- * dynamic networks? This encapsulates a number of characteristics,
- * including the absence of the catch-all, the absence of inline message
- * tags, the absence of support for network context-switching, and so on.
- */
-#define CHIP_HAS_REV1_XDN() 1
-
-/** Does the chip have cmpexch and similar (fetchadd, exch, etc.)? */
-#define CHIP_HAS_CMPEXCH() 1
-
-/** Does the chip have memory-mapped I/O support? */
-#define CHIP_HAS_MMIO() 1
-
-/** Does the chip have post-completion interrupts? */
-#define CHIP_HAS_POST_COMPLETION_INTERRUPTS() 1
-
-/** Does the chip have native single step support? */
-#define CHIP_HAS_SINGLE_STEP() 1
-
-#ifndef __OPEN_SOURCE__ /* features only relevant to hypervisor-level code */
-
-/** How many entries are present in the instruction TLB? */
-#define CHIP_ITLB_ENTRIES() 16
-
-/** How many entries are present in the data TLB? */
-#define CHIP_DTLB_ENTRIES() 32
-
-/** How many MAF entries does the XAUI shim have? */
-#define CHIP_XAUI_MAF_ENTRIES() 32
-
-/** Does the memory shim have a source-id table? */
-#define CHIP_HAS_MSHIM_SRCID_TABLE() 0
-
-/** Does the L1 instruction cache clear on reset? */
-#define CHIP_HAS_L1I_CLEAR_ON_RESET() 1
-
-/** Does the chip come out of reset with valid coordinates on all tiles?
- * Note that if defined, this also implies that the upper left is 1,1.
- */
-#define CHIP_HAS_VALID_TILE_COORD_RESET() 1
-
-/** Does the chip have unified packet formats? */
-#define CHIP_HAS_UNIFIED_PACKET_FORMATS() 1
-
-/** Does the chip support write reordering? */
-#define CHIP_HAS_WRITE_REORDERING() 1
-
-/** Does the chip support Y-X routing as well as X-Y? */
-#define CHIP_HAS_Y_X_ROUTING() 1
-
-/** Is INTCTRL_3 managed with the correct MPL? */
-#define CHIP_HAS_INTCTRL_3_STATUS_FIX() 1
-
-/** Is it possible to configure the chip to be big-endian? */
-#define CHIP_HAS_BIG_ENDIAN_CONFIG() 1
-
-/** Is the CACHE_RED_WAY_OVERRIDDEN SPR supported? */
-#define CHIP_HAS_CACHE_RED_WAY_OVERRIDDEN() 0
-
-/** Is the DIAG_TRACE_WAY SPR supported? */
-#define CHIP_HAS_DIAG_TRACE_WAY() 0
-
-/** Is the MEM_STRIPE_CONFIG SPR supported? */
-#define CHIP_HAS_MEM_STRIPE_CONFIG() 1
-
-/** Are the TLB_PERF SPRs supported? */
-#define CHIP_HAS_TLB_PERF() 1
-
-/** Is the VDN_SNOOP_SHIM_CTL SPR supported? */
-#define CHIP_HAS_VDN_SNOOP_SHIM_CTL() 0
-
-/** Does the chip support rev1 DMA packets? */
-#define CHIP_HAS_REV1_DMA_PACKETS() 1
-
-/** Does the chip have an IPI shim? */
-#define CHIP_HAS_IPI() 1
-
-#endif /* !__OPEN_SOURCE__ */
-#endif /* __ARCH_CHIP_H__ */
diff --git a/arch/tile/include/uapi/arch/chip_tilepro.h b/arch/tile/include/uapi/arch/chip_tilepro.h
deleted file mode 100644
index a8a3ed144dfe..000000000000
--- a/arch/tile/include/uapi/arch/chip_tilepro.h
+++ /dev/null
@@ -1,259 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
-/*
- * Copyright 2010 Tilera Corporation. All Rights Reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation, version 2.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
- * NON INFRINGEMENT. See the GNU General Public License for
- * more details.
- */
-
-/*
- * @file
- * Global header file.
- * This header file specifies defines for TILEPro.
- */
-
-#ifndef __ARCH_CHIP_H__
-#define __ARCH_CHIP_H__
-
-/** Specify chip version.
- * When possible, prefer the CHIP_xxx symbols below for future-proofing.
- * This is intended for cross-compiling; native compilation should
- * use the predefined __tile_chip__ symbol.
- */
-#define TILE_CHIP 1
-
-/** Specify chip revision.
- * This provides for the case of a respin of a particular chip type;
- * the normal value for this symbol is "0".
- * This is intended for cross-compiling; native compilation should
- * use the predefined __tile_chip_rev__ symbol.
- */
-#define TILE_CHIP_REV 0
-
-/** The name of this architecture. */
-#define CHIP_ARCH_NAME "tilepro"
-
-/** The ELF e_machine type for binaries for this chip. */
-#define CHIP_ELF_TYPE() EM_TILEPRO
-
-/** The alternate ELF e_machine type for binaries for this chip. */
-#define CHIP_COMPAT_ELF_TYPE() 0x2507
-
-/** What is the native word size of the machine? */
-#define CHIP_WORD_SIZE() 32
-
-/** How many bits of a virtual address are used. Extra bits must be
- * the sign extension of the low bits.
- */
-#define CHIP_VA_WIDTH() 32
-
-/** How many bits are in a physical address? */
-#define CHIP_PA_WIDTH() 36
-
-/** Size of the L2 cache, in bytes. */
-#define CHIP_L2_CACHE_SIZE() 65536
-
-/** Log size of an L2 cache line in bytes. */
-#define CHIP_L2_LOG_LINE_SIZE() 6
-
-/** Size of an L2 cache line, in bytes. */
-#define CHIP_L2_LINE_SIZE() (1 << CHIP_L2_LOG_LINE_SIZE())
-
-/** Associativity of the L2 cache. */
-#define CHIP_L2_ASSOC() 4
-
-/** Size of the L1 data cache, in bytes. */
-#define CHIP_L1D_CACHE_SIZE() 8192
-
-/** Log size of an L1 data cache line in bytes. */
-#define CHIP_L1D_LOG_LINE_SIZE() 4
-
-/** Size of an L1 data cache line, in bytes. */
-#define CHIP_L1D_LINE_SIZE() (1 << CHIP_L1D_LOG_LINE_SIZE())
-
-/** Associativity of the L1 data cache. */
-#define CHIP_L1D_ASSOC() 2
-
-/** Size of the L1 instruction cache, in bytes. */
-#define CHIP_L1I_CACHE_SIZE() 16384
-
-/** Log size of an L1 instruction cache line in bytes. */
-#define CHIP_L1I_LOG_LINE_SIZE() 6
-
-/** Size of an L1 instruction cache line, in bytes. */
-#define CHIP_L1I_LINE_SIZE() (1 << CHIP_L1I_LOG_LINE_SIZE())
-
-/** Associativity of the L1 instruction cache. */
-#define CHIP_L1I_ASSOC() 1
-
-/** Stride with which flush instructions must be issued. */
-#define CHIP_FLUSH_STRIDE() CHIP_L2_LINE_SIZE()
-
-/** Stride with which inv instructions must be issued. */
-#define CHIP_INV_STRIDE() CHIP_L2_LINE_SIZE()
-
-/** Stride with which finv instructions must be issued. */
-#define CHIP_FINV_STRIDE() CHIP_L2_LINE_SIZE()
-
-/** Can the local cache coherently cache data that is homed elsewhere? */
-#define CHIP_HAS_COHERENT_LOCAL_CACHE() 1
-
-/** How many simultaneous outstanding victims can the L2 cache have? */
-#define CHIP_MAX_OUTSTANDING_VICTIMS() 4
-
-/** Does the TLB support the NC and NOALLOC bits? */
-#define CHIP_HAS_NC_AND_NOALLOC_BITS() 1
-
-/** Does the chip support hash-for-home caching? */
-#define CHIP_HAS_CBOX_HOME_MAP() 1
-
-/** Number of entries in the chip's home map tables. */
-#define CHIP_CBOX_HOME_MAP_SIZE() 64
-
-/** Do uncacheable requests miss in the cache regardless of whether
- * there is matching data? */
-#define CHIP_HAS_ENFORCED_UNCACHEABLE_REQUESTS() 1
-
-/** Does the mf instruction wait for victims? */
-#define CHIP_HAS_MF_WAITS_FOR_VICTIMS() 0
-
-/** Does the chip have an "inv" instruction that doesn't also flush? */
-#define CHIP_HAS_INV() 1
-
-/** Does the chip have a "wh64" instruction? */
-#define CHIP_HAS_WH64() 1
-
-/** Does this chip have a 'dword_align' instruction? */
-#define CHIP_HAS_DWORD_ALIGN() 1
-
-/** Number of performance counters. */
-#define CHIP_PERFORMANCE_COUNTERS() 4
-
-/** Does this chip have auxiliary performance counters? */
-#define CHIP_HAS_AUX_PERF_COUNTERS() 1
-
-/** Is the CBOX_MSR1 SPR supported? */
-#define CHIP_HAS_CBOX_MSR1() 1
-
-/** Is the TILE_RTF_HWM SPR supported? */
-#define CHIP_HAS_TILE_RTF_HWM() 1
-
-/** Is the TILE_WRITE_PENDING SPR supported? */
-#define CHIP_HAS_TILE_WRITE_PENDING() 1
-
-/** Is the PROC_STATUS SPR supported? */
-#define CHIP_HAS_PROC_STATUS_SPR() 1
-
-/** Is the DSTREAM_PF SPR supported? */
-#define CHIP_HAS_DSTREAM_PF() 0
-
-/** Log of the number of mshims we have. */
-#define CHIP_LOG_NUM_MSHIMS() 2
-
-/** Are the bases of the interrupt vector areas fixed? */
-#define CHIP_HAS_FIXED_INTVEC_BASE() 1
-
-/** Are the interrupt masks split up into 2 SPRs? */
-#define CHIP_HAS_SPLIT_INTR_MASK() 1
-
-/** Is the cycle count split up into 2 SPRs? */
-#define CHIP_HAS_SPLIT_CYCLE() 1
-
-/** Does the chip have a static network? */
-#define CHIP_HAS_SN() 1
-
-/** Does the chip have a static network processor? */
-#define CHIP_HAS_SN_PROC() 0
-
-/** Size of the L1 static network processor instruction cache, in bytes. */
-/* #define CHIP_L1SNI_CACHE_SIZE() -- does not apply to chip 1 */
-
-/** Does the chip have DMA support in each tile? */
-#define CHIP_HAS_TILE_DMA() 1
-
-/** Does the chip have the second revision of the directly accessible
- * dynamic networks? This encapsulates a number of characteristics,
- * including the absence of the catch-all, the absence of inline message
- * tags, the absence of support for network context-switching, and so on.
- */
-#define CHIP_HAS_REV1_XDN() 0
-
-/** Does the chip have cmpexch and similar (fetchadd, exch, etc.)? */
-#define CHIP_HAS_CMPEXCH() 0
-
-/** Does the chip have memory-mapped I/O support? */
-#define CHIP_HAS_MMIO() 0
-
-/** Does the chip have post-completion interrupts? */
-#define CHIP_HAS_POST_COMPLETION_INTERRUPTS() 0
-
-/** Does the chip have native single step support? */
-#define CHIP_HAS_SINGLE_STEP() 0
-
-#ifndef __OPEN_SOURCE__ /* features only relevant to hypervisor-level code */
-
-/** How many entries are present in the instruction TLB? */
-#define CHIP_ITLB_ENTRIES() 16
-
-/** How many entries are present in the data TLB? */
-#define CHIP_DTLB_ENTRIES() 16
-
-/** How many MAF entries does the XAUI shim have? */
-#define CHIP_XAUI_MAF_ENTRIES() 32
-
-/** Does the memory shim have a source-id table? */
-#define CHIP_HAS_MSHIM_SRCID_TABLE() 0
-
-/** Does the L1 instruction cache clear on reset? */
-#define CHIP_HAS_L1I_CLEAR_ON_RESET() 1
-
-/** Does the chip come out of reset with valid coordinates on all tiles?
- * Note that if defined, this also implies that the upper left is 1,1.
- */
-#define CHIP_HAS_VALID_TILE_COORD_RESET() 1
-
-/** Does the chip have unified packet formats? */
-#define CHIP_HAS_UNIFIED_PACKET_FORMATS() 1
-
-/** Does the chip support write reordering? */
-#define CHIP_HAS_WRITE_REORDERING() 1
-
-/** Does the chip support Y-X routing as well as X-Y? */
-#define CHIP_HAS_Y_X_ROUTING() 1
-
-/** Is INTCTRL_3 managed with the correct MPL? */
-#define CHIP_HAS_INTCTRL_3_STATUS_FIX() 1
-
-/** Is it possible to configure the chip to be big-endian? */
-#define CHIP_HAS_BIG_ENDIAN_CONFIG() 1
-
-/** Is the CACHE_RED_WAY_OVERRIDDEN SPR supported? */
-#define CHIP_HAS_CACHE_RED_WAY_OVERRIDDEN() 1
-
-/** Is the DIAG_TRACE_WAY SPR supported? */
-#define CHIP_HAS_DIAG_TRACE_WAY() 1
-
-/** Is the MEM_STRIPE_CONFIG SPR supported? */
-#define CHIP_HAS_MEM_STRIPE_CONFIG() 1
-
-/** Are the TLB_PERF SPRs supported? */
-#define CHIP_HAS_TLB_PERF() 1
-
-/** Is the VDN_SNOOP_SHIM_CTL SPR supported? */
-#define CHIP_HAS_VDN_SNOOP_SHIM_CTL() 1
-
-/** Does the chip support rev1 DMA packets? */
-#define CHIP_HAS_REV1_DMA_PACKETS() 1
-
-/** Does the chip have an IPI shim? */
-#define CHIP_HAS_IPI() 0
-
-#endif /* !__OPEN_SOURCE__ */
-#endif /* __ARCH_CHIP_H__ */
diff --git a/arch/tile/include/uapi/arch/icache.h b/arch/tile/include/uapi/arch/icache.h
deleted file mode 100644
index ff85a5d77f16..000000000000
--- a/arch/tile/include/uapi/arch/icache.h
+++ /dev/null
@@ -1,94 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
-/*
- * Copyright 2010 Tilera Corporation. All Rights Reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation, version 2.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
- * NON INFRINGEMENT. See the GNU General Public License for
- * more details.
- *
- */
-
-/**
- * @file
- *
- * Support for invalidating bytes in the instruction cache.
- */
-
-#ifndef __ARCH_ICACHE_H__
-#define __ARCH_ICACHE_H__
-
-#include <arch/chip.h>
-
-
-/**
- * Invalidate the instruction cache for the given range of memory.
- *
- * @param addr The start of memory to be invalidated.
- * @param size The number of bytes to be invalidated.
- * @param page_size The system's page size, e.g. getpagesize() in userspace.
- * This value must be a power of two no larger than the page containing
- * the code to be invalidated. If the value is smaller than the actual page
- * size, this function will still work, but may run slower than necessary.
- */
-static __inline void
-invalidate_icache(const void* addr, unsigned long size,
- unsigned long page_size)
-{
- const unsigned long cache_way_size =
- CHIP_L1I_CACHE_SIZE() / CHIP_L1I_ASSOC();
- unsigned long max_useful_size;
- const char* start, *end;
- long num_passes;
-
- if (__builtin_expect(size == 0, 0))
- return;
-
-#ifdef __tilegx__
- /* Limit the number of bytes visited to avoid redundant iterations. */
- max_useful_size = (page_size < cache_way_size) ? page_size : cache_way_size;
-
- /* No PA aliasing is possible, so one pass always suffices. */
- num_passes = 1;
-#else
- /* Limit the number of bytes visited to avoid redundant iterations. */
- max_useful_size = cache_way_size;
-
- /*
- * Compute how many passes we need (we'll treat 0 as if it were 1).
- * This works because we know the page size is a power of two.
- */
- num_passes = cache_way_size >> __builtin_ctzl(page_size);
-#endif
-
- if (__builtin_expect(size > max_useful_size, 0))
- size = max_useful_size;
-
- /* Locate the first and last bytes to be invalidated. */
- start = (const char *)((unsigned long)addr & -CHIP_L1I_LINE_SIZE());
- end = (const char*)addr + size - 1;
-
- __insn_mf();
-
- do
- {
- const char* p;
-
- for (p = start; p <= end; p += CHIP_L1I_LINE_SIZE())
- __insn_icoh(p);
-
- start += page_size;
- end += page_size;
- }
- while (--num_passes > 0);
-
- __insn_drain();
-}
-
-
-#endif /* __ARCH_ICACHE_H__ */
diff --git a/arch/tile/include/uapi/arch/interrupts.h b/arch/tile/include/uapi/arch/interrupts.h
deleted file mode 100644
index c288b5d82b4d..000000000000
--- a/arch/tile/include/uapi/arch/interrupts.h
+++ /dev/null
@@ -1,20 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
-/*
- * Copyright 2010 Tilera Corporation. All Rights Reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation, version 2.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
- * NON INFRINGEMENT. See the GNU General Public License for
- * more details.
- */
-
-#ifdef __tilegx__
-#include <arch/interrupts_64.h>
-#else
-#include <arch/interrupts_32.h>
-#endif
diff --git a/arch/tile/include/uapi/arch/interrupts_32.h b/arch/tile/include/uapi/arch/interrupts_32.h
deleted file mode 100644
index a748752cec16..000000000000
--- a/arch/tile/include/uapi/arch/interrupts_32.h
+++ /dev/null
@@ -1,310 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
-/*
- * Copyright 2010 Tilera Corporation. All Rights Reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation, version 2.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
- * NON INFRINGEMENT. See the GNU General Public License for
- * more details.
- */
-
-#ifndef __ARCH_INTERRUPTS_H__
-#define __ARCH_INTERRUPTS_H__
-
-#ifndef __KERNEL__
-/** Mask for an interrupt. */
-/* Note: must handle breaking interrupts into high and low words manually. */
-#define INT_MASK_LO(intno) (1 << (intno))
-#define INT_MASK_HI(intno) (1 << ((intno) - 32))
-
-#ifndef __ASSEMBLER__
-#define INT_MASK(intno) (1ULL << (intno))
-#endif
-#endif
-
-
-/** Where a given interrupt executes */
-#define INTERRUPT_VECTOR(i, pl) (0xFC000000 + ((pl) << 24) + ((i) << 8))
-
-/** Where to store a vector for a given interrupt. */
-#define USER_INTERRUPT_VECTOR(i) INTERRUPT_VECTOR(i, 0)
-
-/** The base address of user-level interrupts. */
-#define USER_INTERRUPT_VECTOR_BASE INTERRUPT_VECTOR(0, 0)
-
-
-/** Additional synthetic interrupt. */
-#define INT_BREAKPOINT (63)
-
-#define INT_ITLB_MISS 0
-#define INT_MEM_ERROR 1
-#define INT_ILL 2
-#define INT_GPV 3
-#define INT_SN_ACCESS 4
-#define INT_IDN_ACCESS 5
-#define INT_UDN_ACCESS 6
-#define INT_IDN_REFILL 7
-#define INT_UDN_REFILL 8
-#define INT_IDN_COMPLETE 9
-#define INT_UDN_COMPLETE 10
-#define INT_SWINT_3 11
-#define INT_SWINT_2 12
-#define INT_SWINT_1 13
-#define INT_SWINT_0 14
-#define INT_UNALIGN_DATA 15
-#define INT_DTLB_MISS 16
-#define INT_DTLB_ACCESS 17
-#define INT_DMATLB_MISS 18
-#define INT_DMATLB_ACCESS 19
-#define INT_SNITLB_MISS 20
-#define INT_SN_NOTIFY 21
-#define INT_SN_FIREWALL 22
-#define INT_IDN_FIREWALL 23
-#define INT_UDN_FIREWALL 24
-#define INT_TILE_TIMER 25
-#define INT_IDN_TIMER 26
-#define INT_UDN_TIMER 27
-#define INT_DMA_NOTIFY 28
-#define INT_IDN_CA 29
-#define INT_UDN_CA 30
-#define INT_IDN_AVAIL 31
-#define INT_UDN_AVAIL 32
-#define INT_PERF_COUNT 33
-#define INT_INTCTRL_3 34
-#define INT_INTCTRL_2 35
-#define INT_INTCTRL_1 36
-#define INT_INTCTRL_0 37
-#define INT_BOOT_ACCESS 38
-#define INT_WORLD_ACCESS 39
-#define INT_I_ASID 40
-#define INT_D_ASID 41
-#define INT_DMA_ASID 42
-#define INT_SNI_ASID 43
-#define INT_DMA_CPL 44
-#define INT_SN_CPL 45
-#define INT_DOUBLE_FAULT 46
-#define INT_SN_STATIC_ACCESS 47
-#define INT_AUX_PERF_COUNT 48
-
-#define NUM_INTERRUPTS 49
-
-#ifndef __ASSEMBLER__
-#define QUEUED_INTERRUPTS ( \
- (1ULL << INT_MEM_ERROR) | \
- (1ULL << INT_DMATLB_MISS) | \
- (1ULL << INT_DMATLB_ACCESS) | \
- (1ULL << INT_SNITLB_MISS) | \
- (1ULL << INT_SN_NOTIFY) | \
- (1ULL << INT_SN_FIREWALL) | \
- (1ULL << INT_IDN_FIREWALL) | \
- (1ULL << INT_UDN_FIREWALL) | \
- (1ULL << INT_TILE_TIMER) | \
- (1ULL << INT_IDN_TIMER) | \
- (1ULL << INT_UDN_TIMER) | \
- (1ULL << INT_DMA_NOTIFY) | \
- (1ULL << INT_IDN_CA) | \
- (1ULL << INT_UDN_CA) | \
- (1ULL << INT_IDN_AVAIL) | \
- (1ULL << INT_UDN_AVAIL) | \
- (1ULL << INT_PERF_COUNT) | \
- (1ULL << INT_INTCTRL_3) | \
- (1ULL << INT_INTCTRL_2) | \
- (1ULL << INT_INTCTRL_1) | \
- (1ULL << INT_INTCTRL_0) | \
- (1ULL << INT_BOOT_ACCESS) | \
- (1ULL << INT_WORLD_ACCESS) | \
- (1ULL << INT_I_ASID) | \
- (1ULL << INT_D_ASID) | \
- (1ULL << INT_DMA_ASID) | \
- (1ULL << INT_SNI_ASID) | \
- (1ULL << INT_DMA_CPL) | \
- (1ULL << INT_SN_CPL) | \
- (1ULL << INT_DOUBLE_FAULT) | \
- (1ULL << INT_AUX_PERF_COUNT) | \
- 0)
-#define NONQUEUED_INTERRUPTS ( \
- (1ULL << INT_ITLB_MISS) | \
- (1ULL << INT_ILL) | \
- (1ULL << INT_GPV) | \
- (1ULL << INT_SN_ACCESS) | \
- (1ULL << INT_IDN_ACCESS) | \
- (1ULL << INT_UDN_ACCESS) | \
- (1ULL << INT_IDN_REFILL) | \
- (1ULL << INT_UDN_REFILL) | \
- (1ULL << INT_IDN_COMPLETE) | \
- (1ULL << INT_UDN_COMPLETE) | \
- (1ULL << INT_SWINT_3) | \
- (1ULL << INT_SWINT_2) | \
- (1ULL << INT_SWINT_1) | \
- (1ULL << INT_SWINT_0) | \
- (1ULL << INT_UNALIGN_DATA) | \
- (1ULL << INT_DTLB_MISS) | \
- (1ULL << INT_DTLB_ACCESS) | \
- (1ULL << INT_SN_STATIC_ACCESS) | \
- 0)
-#define CRITICAL_MASKED_INTERRUPTS ( \
- (1ULL << INT_MEM_ERROR) | \
- (1ULL << INT_DMATLB_MISS) | \
- (1ULL << INT_DMATLB_ACCESS) | \
- (1ULL << INT_SNITLB_MISS) | \
- (1ULL << INT_SN_NOTIFY) | \
- (1ULL << INT_SN_FIREWALL) | \
- (1ULL << INT_IDN_FIREWALL) | \
- (1ULL << INT_UDN_FIREWALL) | \
- (1ULL << INT_TILE_TIMER) | \
- (1ULL << INT_IDN_TIMER) | \
- (1ULL << INT_UDN_TIMER) | \
- (1ULL << INT_DMA_NOTIFY) | \
- (1ULL << INT_IDN_CA) | \
- (1ULL << INT_UDN_CA) | \
- (1ULL << INT_IDN_AVAIL) | \
- (1ULL << INT_UDN_AVAIL) | \
- (1ULL << INT_PERF_COUNT) | \
- (1ULL << INT_INTCTRL_3) | \
- (1ULL << INT_INTCTRL_2) | \
- (1ULL << INT_INTCTRL_1) | \
- (1ULL << INT_INTCTRL_0) | \
- (1ULL << INT_AUX_PERF_COUNT) | \
- 0)
-#define CRITICAL_UNMASKED_INTERRUPTS ( \
- (1ULL << INT_ITLB_MISS) | \
- (1ULL << INT_ILL) | \
- (1ULL << INT_GPV) | \
- (1ULL << INT_SN_ACCESS) | \
- (1ULL << INT_IDN_ACCESS) | \
- (1ULL << INT_UDN_ACCESS) | \
- (1ULL << INT_IDN_REFILL) | \
- (1ULL << INT_UDN_REFILL) | \
- (1ULL << INT_IDN_COMPLETE) | \
- (1ULL << INT_UDN_COMPLETE) | \
- (1ULL << INT_SWINT_3) | \
- (1ULL << INT_SWINT_2) | \
- (1ULL << INT_SWINT_1) | \
- (1ULL << INT_SWINT_0) | \
- (1ULL << INT_UNALIGN_DATA) | \
- (1ULL << INT_DTLB_MISS) | \
- (1ULL << INT_DTLB_ACCESS) | \
- (1ULL << INT_BOOT_ACCESS) | \
- (1ULL << INT_WORLD_ACCESS) | \
- (1ULL << INT_I_ASID) | \
- (1ULL << INT_D_ASID) | \
- (1ULL << INT_DMA_ASID) | \
- (1ULL << INT_SNI_ASID) | \
- (1ULL << INT_DMA_CPL) | \
- (1ULL << INT_SN_CPL) | \
- (1ULL << INT_DOUBLE_FAULT) | \
- (1ULL << INT_SN_STATIC_ACCESS) | \
- 0)
-#define MASKABLE_INTERRUPTS ( \
- (1ULL << INT_MEM_ERROR) | \
- (1ULL << INT_IDN_REFILL) | \
- (1ULL << INT_UDN_REFILL) | \
- (1ULL << INT_IDN_COMPLETE) | \
- (1ULL << INT_UDN_COMPLETE) | \
- (1ULL << INT_DMATLB_MISS) | \
- (1ULL << INT_DMATLB_ACCESS) | \
- (1ULL << INT_SNITLB_MISS) | \
- (1ULL << INT_SN_NOTIFY) | \
- (1ULL << INT_SN_FIREWALL) | \
- (1ULL << INT_IDN_FIREWALL) | \
- (1ULL << INT_UDN_FIREWALL) | \
- (1ULL << INT_TILE_TIMER) | \
- (1ULL << INT_IDN_TIMER) | \
- (1ULL << INT_UDN_TIMER) | \
- (1ULL << INT_DMA_NOTIFY) | \
- (1ULL << INT_IDN_CA) | \
- (1ULL << INT_UDN_CA) | \
- (1ULL << INT_IDN_AVAIL) | \
- (1ULL << INT_UDN_AVAIL) | \
- (1ULL << INT_PERF_COUNT) | \
- (1ULL << INT_INTCTRL_3) | \
- (1ULL << INT_INTCTRL_2) | \
- (1ULL << INT_INTCTRL_1) | \
- (1ULL << INT_INTCTRL_0) | \
- (1ULL << INT_AUX_PERF_COUNT) | \
- 0)
-#define UNMASKABLE_INTERRUPTS ( \
- (1ULL << INT_ITLB_MISS) | \
- (1ULL << INT_ILL) | \
- (1ULL << INT_GPV) | \
- (1ULL << INT_SN_ACCESS) | \
- (1ULL << INT_IDN_ACCESS) | \
- (1ULL << INT_UDN_ACCESS) | \
- (1ULL << INT_SWINT_3) | \
- (1ULL << INT_SWINT_2) | \
- (1ULL << INT_SWINT_1) | \
- (1ULL << INT_SWINT_0) | \
- (1ULL << INT_UNALIGN_DATA) | \
- (1ULL << INT_DTLB_MISS) | \
- (1ULL << INT_DTLB_ACCESS) | \
- (1ULL << INT_BOOT_ACCESS) | \
- (1ULL << INT_WORLD_ACCESS) | \
- (1ULL << INT_I_ASID) | \
- (1ULL << INT_D_ASID) | \
- (1ULL << INT_DMA_ASID) | \
- (1ULL << INT_SNI_ASID) | \
- (1ULL << INT_DMA_CPL) | \
- (1ULL << INT_SN_CPL) | \
- (1ULL << INT_DOUBLE_FAULT) | \
- (1ULL << INT_SN_STATIC_ACCESS) | \
- 0)
-#define SYNC_INTERRUPTS ( \
- (1ULL << INT_ITLB_MISS) | \
- (1ULL << INT_ILL) | \
- (1ULL << INT_GPV) | \
- (1ULL << INT_SN_ACCESS) | \
- (1ULL << INT_IDN_ACCESS) | \
- (1ULL << INT_UDN_ACCESS) | \
- (1ULL << INT_IDN_REFILL) | \
- (1ULL << INT_UDN_REFILL) | \
- (1ULL << INT_IDN_COMPLETE) | \
- (1ULL << INT_UDN_COMPLETE) | \
- (1ULL << INT_SWINT_3) | \
- (1ULL << INT_SWINT_2) | \
- (1ULL << INT_SWINT_1) | \
- (1ULL << INT_SWINT_0) | \
- (1ULL << INT_UNALIGN_DATA) | \
- (1ULL << INT_DTLB_MISS) | \
- (1ULL << INT_DTLB_ACCESS) | \
- (1ULL << INT_SN_STATIC_ACCESS) | \
- 0)
-#define NON_SYNC_INTERRUPTS ( \
- (1ULL << INT_MEM_ERROR) | \
- (1ULL << INT_DMATLB_MISS) | \
- (1ULL << INT_DMATLB_ACCESS) | \
- (1ULL << INT_SNITLB_MISS) | \
- (1ULL << INT_SN_NOTIFY) | \
- (1ULL << INT_SN_FIREWALL) | \
- (1ULL << INT_IDN_FIREWALL) | \
- (1ULL << INT_UDN_FIREWALL) | \
- (1ULL << INT_TILE_TIMER) | \
- (1ULL << INT_IDN_TIMER) | \
- (1ULL << INT_UDN_TIMER) | \
- (1ULL << INT_DMA_NOTIFY) | \
- (1ULL << INT_IDN_CA) | \
- (1ULL << INT_UDN_CA) | \
- (1ULL << INT_IDN_AVAIL) | \
- (1ULL << INT_UDN_AVAIL) | \
- (1ULL << INT_PERF_COUNT) | \
- (1ULL << INT_INTCTRL_3) | \
- (1ULL << INT_INTCTRL_2) | \
- (1ULL << INT_INTCTRL_1) | \
- (1ULL << INT_INTCTRL_0) | \
- (1ULL << INT_BOOT_ACCESS) | \
- (1ULL << INT_WORLD_ACCESS) | \
- (1ULL << INT_I_ASID) | \
- (1ULL << INT_D_ASID) | \
- (1ULL << INT_DMA_ASID) | \
- (1ULL << INT_SNI_ASID) | \
- (1ULL << INT_DMA_CPL) | \
- (1ULL << INT_SN_CPL) | \
- (1ULL << INT_DOUBLE_FAULT) | \
- (1ULL << INT_AUX_PERF_COUNT) | \
- 0)
-#endif /* !__ASSEMBLER__ */
-#endif /* !__ARCH_INTERRUPTS_H__ */
diff --git a/arch/tile/include/uapi/arch/interrupts_64.h b/arch/tile/include/uapi/arch/interrupts_64.h
deleted file mode 100644
index 142eaff3c244..000000000000
--- a/arch/tile/include/uapi/arch/interrupts_64.h
+++ /dev/null
@@ -1,279 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
-/*
- * Copyright 2011 Tilera Corporation. All Rights Reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation, version 2.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
- * NON INFRINGEMENT. See the GNU General Public License for
- * more details.
- */
-
-#ifndef __ARCH_INTERRUPTS_H__
-#define __ARCH_INTERRUPTS_H__
-
-#ifndef __KERNEL__
-/** Mask for an interrupt. */
-#ifdef __ASSEMBLER__
-/* Note: must handle breaking interrupts into high and low words manually. */
-#define INT_MASK(intno) (1 << (intno))
-#else
-#define INT_MASK(intno) (1ULL << (intno))
-#endif
-#endif
-
-
-/** Where a given interrupt executes */
-#define INTERRUPT_VECTOR(i, pl) (0xFC000000 + ((pl) << 24) + ((i) << 8))
-
-/** Where to store a vector for a given interrupt. */
-#define USER_INTERRUPT_VECTOR(i) INTERRUPT_VECTOR(i, 0)
-
-/** The base address of user-level interrupts. */
-#define USER_INTERRUPT_VECTOR_BASE INTERRUPT_VECTOR(0, 0)
-
-
-/** Additional synthetic interrupt. */
-#define INT_BREAKPOINT (63)
-
-#define INT_MEM_ERROR 0
-#define INT_SINGLE_STEP_3 1
-#define INT_SINGLE_STEP_2 2
-#define INT_SINGLE_STEP_1 3
-#define INT_SINGLE_STEP_0 4
-#define INT_IDN_COMPLETE 5
-#define INT_UDN_COMPLETE 6
-#define INT_ITLB_MISS 7
-#define INT_ILL 8
-#define INT_GPV 9
-#define INT_IDN_ACCESS 10
-#define INT_UDN_ACCESS 11
-#define INT_SWINT_3 12
-#define INT_SWINT_2 13
-#define INT_SWINT_1 14
-#define INT_SWINT_0 15
-#define INT_ILL_TRANS 16
-#define INT_UNALIGN_DATA 17
-#define INT_DTLB_MISS 18
-#define INT_DTLB_ACCESS 19
-#define INT_IDN_FIREWALL 20
-#define INT_UDN_FIREWALL 21
-#define INT_TILE_TIMER 22
-#define INT_AUX_TILE_TIMER 23
-#define INT_IDN_TIMER 24
-#define INT_UDN_TIMER 25
-#define INT_IDN_AVAIL 26
-#define INT_UDN_AVAIL 27
-#define INT_IPI_3 28
-#define INT_IPI_2 29
-#define INT_IPI_1 30
-#define INT_IPI_0 31
-#define INT_PERF_COUNT 32
-#define INT_AUX_PERF_COUNT 33
-#define INT_INTCTRL_3 34
-#define INT_INTCTRL_2 35
-#define INT_INTCTRL_1 36
-#define INT_INTCTRL_0 37
-#define INT_BOOT_ACCESS 38
-#define INT_WORLD_ACCESS 39
-#define INT_I_ASID 40
-#define INT_D_ASID 41
-#define INT_DOUBLE_FAULT 42
-
-#define NUM_INTERRUPTS 43
-
-#ifndef __ASSEMBLER__
-#define QUEUED_INTERRUPTS ( \
- (1ULL << INT_MEM_ERROR) | \
- (1ULL << INT_IDN_COMPLETE) | \
- (1ULL << INT_UDN_COMPLETE) | \
- (1ULL << INT_IDN_FIREWALL) | \
- (1ULL << INT_UDN_FIREWALL) | \
- (1ULL << INT_TILE_TIMER) | \
- (1ULL << INT_AUX_TILE_TIMER) | \
- (1ULL << INT_IDN_TIMER) | \
- (1ULL << INT_UDN_TIMER) | \
- (1ULL << INT_IDN_AVAIL) | \
- (1ULL << INT_UDN_AVAIL) | \
- (1ULL << INT_IPI_3) | \
- (1ULL << INT_IPI_2) | \
- (1ULL << INT_IPI_1) | \
- (1ULL << INT_IPI_0) | \
- (1ULL << INT_PERF_COUNT) | \
- (1ULL << INT_AUX_PERF_COUNT) | \
- (1ULL << INT_INTCTRL_3) | \
- (1ULL << INT_INTCTRL_2) | \
- (1ULL << INT_INTCTRL_1) | \
- (1ULL << INT_INTCTRL_0) | \
- (1ULL << INT_BOOT_ACCESS) | \
- (1ULL << INT_WORLD_ACCESS) | \
- (1ULL << INT_I_ASID) | \
- (1ULL << INT_D_ASID) | \
- (1ULL << INT_DOUBLE_FAULT) | \
- 0)
-#define NONQUEUED_INTERRUPTS ( \
- (1ULL << INT_SINGLE_STEP_3) | \
- (1ULL << INT_SINGLE_STEP_2) | \
- (1ULL << INT_SINGLE_STEP_1) | \
- (1ULL << INT_SINGLE_STEP_0) | \
- (1ULL << INT_ITLB_MISS) | \
- (1ULL << INT_ILL) | \
- (1ULL << INT_GPV) | \
- (1ULL << INT_IDN_ACCESS) | \
- (1ULL << INT_UDN_ACCESS) | \
- (1ULL << INT_SWINT_3) | \
- (1ULL << INT_SWINT_2) | \
- (1ULL << INT_SWINT_1) | \
- (1ULL << INT_SWINT_0) | \
- (1ULL << INT_ILL_TRANS) | \
- (1ULL << INT_UNALIGN_DATA) | \
- (1ULL << INT_DTLB_MISS) | \
- (1ULL << INT_DTLB_ACCESS) | \
- 0)
-#define CRITICAL_MASKED_INTERRUPTS ( \
- (1ULL << INT_MEM_ERROR) | \
- (1ULL << INT_SINGLE_STEP_3) | \
- (1ULL << INT_SINGLE_STEP_2) | \
- (1ULL << INT_SINGLE_STEP_1) | \
- (1ULL << INT_SINGLE_STEP_0) | \
- (1ULL << INT_IDN_COMPLETE) | \
- (1ULL << INT_UDN_COMPLETE) | \
- (1ULL << INT_IDN_FIREWALL) | \
- (1ULL << INT_UDN_FIREWALL) | \
- (1ULL << INT_TILE_TIMER) | \
- (1ULL << INT_AUX_TILE_TIMER) | \
- (1ULL << INT_IDN_TIMER) | \
- (1ULL << INT_UDN_TIMER) | \
- (1ULL << INT_IDN_AVAIL) | \
- (1ULL << INT_UDN_AVAIL) | \
- (1ULL << INT_IPI_3) | \
- (1ULL << INT_IPI_2) | \
- (1ULL << INT_IPI_1) | \
- (1ULL << INT_IPI_0) | \
- (1ULL << INT_PERF_COUNT) | \
- (1ULL << INT_AUX_PERF_COUNT) | \
- (1ULL << INT_INTCTRL_3) | \
- (1ULL << INT_INTCTRL_2) | \
- (1ULL << INT_INTCTRL_1) | \
- (1ULL << INT_INTCTRL_0) | \
- 0)
-#define CRITICAL_UNMASKED_INTERRUPTS ( \
- (1ULL << INT_ITLB_MISS) | \
- (1ULL << INT_ILL) | \
- (1ULL << INT_GPV) | \
- (1ULL << INT_IDN_ACCESS) | \
- (1ULL << INT_UDN_ACCESS) | \
- (1ULL << INT_SWINT_3) | \
- (1ULL << INT_SWINT_2) | \
- (1ULL << INT_SWINT_1) | \
- (1ULL << INT_SWINT_0) | \
- (1ULL << INT_ILL_TRANS) | \
- (1ULL << INT_UNALIGN_DATA) | \
- (1ULL << INT_DTLB_MISS) | \
- (1ULL << INT_DTLB_ACCESS) | \
- (1ULL << INT_BOOT_ACCESS) | \
- (1ULL << INT_WORLD_ACCESS) | \
- (1ULL << INT_I_ASID) | \
- (1ULL << INT_D_ASID) | \
- (1ULL << INT_DOUBLE_FAULT) | \
- 0)
-#define MASKABLE_INTERRUPTS ( \
- (1ULL << INT_MEM_ERROR) | \
- (1ULL << INT_SINGLE_STEP_3) | \
- (1ULL << INT_SINGLE_STEP_2) | \
- (1ULL << INT_SINGLE_STEP_1) | \
- (1ULL << INT_SINGLE_STEP_0) | \
- (1ULL << INT_IDN_COMPLETE) | \
- (1ULL << INT_UDN_COMPLETE) | \
- (1ULL << INT_IDN_FIREWALL) | \
- (1ULL << INT_UDN_FIREWALL) | \
- (1ULL << INT_TILE_TIMER) | \
- (1ULL << INT_AUX_TILE_TIMER) | \
- (1ULL << INT_IDN_TIMER) | \
- (1ULL << INT_UDN_TIMER) | \
- (1ULL << INT_IDN_AVAIL) | \
- (1ULL << INT_UDN_AVAIL) | \
- (1ULL << INT_IPI_3) | \
- (1ULL << INT_IPI_2) | \
- (1ULL << INT_IPI_1) | \
- (1ULL << INT_IPI_0) | \
- (1ULL << INT_PERF_COUNT) | \
- (1ULL << INT_AUX_PERF_COUNT) | \
- (1ULL << INT_INTCTRL_3) | \
- (1ULL << INT_INTCTRL_2) | \
- (1ULL << INT_INTCTRL_1) | \
- (1ULL << INT_INTCTRL_0) | \
- 0)
-#define UNMASKABLE_INTERRUPTS ( \
- (1ULL << INT_ITLB_MISS) | \
- (1ULL << INT_ILL) | \
- (1ULL << INT_GPV) | \
- (1ULL << INT_IDN_ACCESS) | \
- (1ULL << INT_UDN_ACCESS) | \
- (1ULL << INT_SWINT_3) | \
- (1ULL << INT_SWINT_2) | \
- (1ULL << INT_SWINT_1) | \
- (1ULL << INT_SWINT_0) | \
- (1ULL << INT_ILL_TRANS) | \
- (1ULL << INT_UNALIGN_DATA) | \
- (1ULL << INT_DTLB_MISS) | \
- (1ULL << INT_DTLB_ACCESS) | \
- (1ULL << INT_BOOT_ACCESS) | \
- (1ULL << INT_WORLD_ACCESS) | \
- (1ULL << INT_I_ASID) | \
- (1ULL << INT_D_ASID) | \
- (1ULL << INT_DOUBLE_FAULT) | \
- 0)
-#define SYNC_INTERRUPTS ( \
- (1ULL << INT_SINGLE_STEP_3) | \
- (1ULL << INT_SINGLE_STEP_2) | \
- (1ULL << INT_SINGLE_STEP_1) | \
- (1ULL << INT_SINGLE_STEP_0) | \
- (1ULL << INT_IDN_COMPLETE) | \
- (1ULL << INT_UDN_COMPLETE) | \
- (1ULL << INT_ITLB_MISS) | \
- (1ULL << INT_ILL) | \
- (1ULL << INT_GPV) | \
- (1ULL << INT_IDN_ACCESS) | \
- (1ULL << INT_UDN_ACCESS) | \
- (1ULL << INT_SWINT_3) | \
- (1ULL << INT_SWINT_2) | \
- (1ULL << INT_SWINT_1) | \
- (1ULL << INT_SWINT_0) | \
- (1ULL << INT_ILL_TRANS) | \
- (1ULL << INT_UNALIGN_DATA) | \
- (1ULL << INT_DTLB_MISS) | \
- (1ULL << INT_DTLB_ACCESS) | \
- 0)
-#define NON_SYNC_INTERRUPTS ( \
- (1ULL << INT_MEM_ERROR) | \
- (1ULL << INT_IDN_FIREWALL) | \
- (1ULL << INT_UDN_FIREWALL) | \
- (1ULL << INT_TILE_TIMER) | \
- (1ULL << INT_AUX_TILE_TIMER) | \
- (1ULL << INT_IDN_TIMER) | \
- (1ULL << INT_UDN_TIMER) | \
- (1ULL << INT_IDN_AVAIL) | \
- (1ULL << INT_UDN_AVAIL) | \
- (1ULL << INT_IPI_3) | \
- (1ULL << INT_IPI_2) | \
- (1ULL << INT_IPI_1) | \
- (1ULL << INT_IPI_0) | \
- (1ULL << INT_PERF_COUNT) | \
- (1ULL << INT_AUX_PERF_COUNT) | \
- (1ULL << INT_INTCTRL_3) | \
- (1ULL << INT_INTCTRL_2) | \
- (1ULL << INT_INTCTRL_1) | \
- (1ULL << INT_INTCTRL_0) | \
- (1ULL << INT_BOOT_ACCESS) | \
- (1ULL << INT_WORLD_ACCESS) | \
- (1ULL << INT_I_ASID) | \
- (1ULL << INT_D_ASID) | \
- (1ULL << INT_DOUBLE_FAULT) | \
- 0)
-#endif /* !__ASSEMBLER__ */
-#endif /* !__ARCH_INTERRUPTS_H__ */
diff --git a/arch/tile/include/uapi/arch/intreg.h b/arch/tile/include/uapi/arch/intreg.h
deleted file mode 100644
index 5387fb645bb8..000000000000
--- a/arch/tile/include/uapi/arch/intreg.h
+++ /dev/null
@@ -1,71 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
-/*
- * Copyright 2017 Tilera Corporation. All Rights Reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation, version 2.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
- * NON INFRINGEMENT. See the GNU General Public License for
- * more details.
- */
-
-/**
- * @file
- *
- * Provide types and defines for the type that can hold a register,
- * in the implementation namespace.
- */
-
-#ifndef __ARCH_INTREG_H__
-#define __ARCH_INTREG_H__
-
-/*
- * Get number of bits in a register. __INT_REG_BITS may be defined
- * prior to including this header to force a particular bit width.
- */
-
-#ifndef __INT_REG_BITS
-# if defined __tilegx__
-# define __INT_REG_BITS 64
-# elif defined __tilepro__
-# define __INT_REG_BITS 32
-# else
-# error Unrecognized architecture
-# endif
-#endif
-
-#if __INT_REG_BITS == 64
-
-# ifndef __ASSEMBLER__
-/** Unsigned type that can hold a register. */
-typedef unsigned long long __uint_reg_t;
-
-/** Signed type that can hold a register. */
-typedef long long __int_reg_t;
-# endif
-
-/** String prefix to use for printf(). */
-# define __INT_REG_FMT "ll"
-
-#elif __INT_REG_BITS == 32
-
-# ifndef __ASSEMBLER__
-/** Unsigned type that can hold a register. */
-typedef unsigned long __uint_reg_t;
-
-/** Signed type that can hold a register. */
-typedef long __int_reg_t;
-# endif
-
-/** String prefix to use for printf(). */
-# define __INT_REG_FMT "l"
-
-#else
-# error Unrecognized value of __INT_REG_BITS
-#endif
-
-#endif /* !__ARCH_INTREG_H__ */
diff --git a/arch/tile/include/uapi/arch/opcode.h b/arch/tile/include/uapi/arch/opcode.h
deleted file mode 100644
index a9ce5961a028..000000000000
--- a/arch/tile/include/uapi/arch/opcode.h
+++ /dev/null
@@ -1,22 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
-/*
- * Copyright 2011 Tilera Corporation. All Rights Reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation, version 2.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
- * NON INFRINGEMENT. See the GNU General Public License for
- * more details.
- */
-
-#if defined(__tilepro__)
-#include <arch/opcode_tilepro.h>
-#elif defined(__tilegx__)
-#include <arch/opcode_tilegx.h>
-#else
-#error Unexpected Tilera chip type
-#endif
diff --git a/arch/tile/include/uapi/arch/opcode_tilegx.h b/arch/tile/include/uapi/arch/opcode_tilegx.h
deleted file mode 100644
index 948ea544567f..000000000000
--- a/arch/tile/include/uapi/arch/opcode_tilegx.h
+++ /dev/null
@@ -1,1407 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
-/* TILE-Gx opcode information.
- *
- * Copyright 2011 Tilera Corporation. All Rights Reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation, version 2.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
- * NON INFRINGEMENT. See the GNU General Public License for
- * more details.
- *
- *
- *
- *
- *
- */
-
-#ifndef __ARCH_OPCODE_H__
-#define __ARCH_OPCODE_H__
-
-#ifndef __ASSEMBLER__
-
-typedef unsigned long long tilegx_bundle_bits;
-
-/* These are the bits that determine if a bundle is in the X encoding. */
-#define TILEGX_BUNDLE_MODE_MASK ((tilegx_bundle_bits)3 << 62)
-
-enum
-{
- /* Maximum number of instructions in a bundle (2 for X, 3 for Y). */
- TILEGX_MAX_INSTRUCTIONS_PER_BUNDLE = 3,
-
- /* How many different pipeline encodings are there? X0, X1, Y0, Y1, Y2. */
- TILEGX_NUM_PIPELINE_ENCODINGS = 5,
-
- /* Log base 2 of TILEGX_BUNDLE_SIZE_IN_BYTES. */
- TILEGX_LOG2_BUNDLE_SIZE_IN_BYTES = 3,
-
- /* Instructions take this many bytes. */
- TILEGX_BUNDLE_SIZE_IN_BYTES = 1 << TILEGX_LOG2_BUNDLE_SIZE_IN_BYTES,
-
- /* Log base 2 of TILEGX_BUNDLE_ALIGNMENT_IN_BYTES. */
- TILEGX_LOG2_BUNDLE_ALIGNMENT_IN_BYTES = 3,
-
- /* Bundles should be aligned modulo this number of bytes. */
- TILEGX_BUNDLE_ALIGNMENT_IN_BYTES =
- (1 << TILEGX_LOG2_BUNDLE_ALIGNMENT_IN_BYTES),
-
- /* Number of registers (some are magic, such as network I/O). */
- TILEGX_NUM_REGISTERS = 64,
-};
-
-/* Make a few "tile_" variables to simplify common code between
- architectures. */
-
-typedef tilegx_bundle_bits tile_bundle_bits;
-#define TILE_BUNDLE_SIZE_IN_BYTES TILEGX_BUNDLE_SIZE_IN_BYTES
-#define TILE_BUNDLE_ALIGNMENT_IN_BYTES TILEGX_BUNDLE_ALIGNMENT_IN_BYTES
-#define TILE_LOG2_BUNDLE_ALIGNMENT_IN_BYTES \
- TILEGX_LOG2_BUNDLE_ALIGNMENT_IN_BYTES
-#define TILE_BPT_BUNDLE TILEGX_BPT_BUNDLE
-
-/* 64-bit pattern for a { bpt ; nop } bundle. */
-#define TILEGX_BPT_BUNDLE 0x286a44ae51485000ULL
-
-static __inline unsigned int
-get_BFEnd_X0(tilegx_bundle_bits num)
-{
- const unsigned int n = (unsigned int)num;
- return (((n >> 12)) & 0x3f);
-}
-
-static __inline unsigned int
-get_BFOpcodeExtension_X0(tilegx_bundle_bits num)
-{
- const unsigned int n = (unsigned int)num;
- return (((n >> 24)) & 0xf);
-}
-
-static __inline unsigned int
-get_BFStart_X0(tilegx_bundle_bits num)
-{
- const unsigned int n = (unsigned int)num;
- return (((n >> 18)) & 0x3f);
-}
-
-static __inline unsigned int
-get_BrOff_X1(tilegx_bundle_bits n)
-{
- return (((unsigned int)(n >> 31)) & 0x0000003f) |
- (((unsigned int)(n >> 37)) & 0x0001ffc0);
-}
-
-static __inline unsigned int
-get_BrType_X1(tilegx_bundle_bits n)
-{
- return (((unsigned int)(n >> 54)) & 0x1f);
-}
-
-static __inline unsigned int
-get_Dest_Imm8_X1(tilegx_bundle_bits n)
-{
- return (((unsigned int)(n >> 31)) & 0x0000003f) |
- (((unsigned int)(n >> 43)) & 0x000000c0);
-}
-
-static __inline unsigned int
-get_Dest_X0(tilegx_bundle_bits num)
-{
- const unsigned int n = (unsigned int)num;
- return (((n >> 0)) & 0x3f);
-}
-
-static __inline unsigned int
-get_Dest_X1(tilegx_bundle_bits n)
-{
- return (((unsigned int)(n >> 31)) & 0x3f);
-}
-
-static __inline unsigned int
-get_Dest_Y0(tilegx_bundle_bits num)
-{
- const unsigned int n = (unsigned int)num;
- return (((n >> 0)) & 0x3f);
-}
-
-static __inline unsigned int
-get_Dest_Y1(tilegx_bundle_bits n)
-{
- return (((unsigned int)(n >> 31)) & 0x3f);
-}
-
-static __inline unsigned int
-get_Imm16_X0(tilegx_bundle_bits num)
-{
- const unsigned int n = (unsigned int)num;
- return (((n >> 12)) & 0xffff);
-}
-
-static __inline unsigned int
-get_Imm16_X1(tilegx_bundle_bits n)
-{
- return (((unsigned int)(n >> 43)) & 0xffff);
-}
-
-static __inline unsigned int
-get_Imm8OpcodeExtension_X0(tilegx_bundle_bits num)
-{
- const unsigned int n = (unsigned int)num;
- return (((n >> 20)) & 0xff);
-}
-
-static __inline unsigned int
-get_Imm8OpcodeExtension_X1(tilegx_bundle_bits n)
-{
- return (((unsigned int)(n >> 51)) & 0xff);
-}
-
-static __inline unsigned int
-get_Imm8_X0(tilegx_bundle_bits num)
-{
- const unsigned int n = (unsigned int)num;
- return (((n >> 12)) & 0xff);
-}
-
-static __inline unsigned int
-get_Imm8_X1(tilegx_bundle_bits n)
-{
- return (((unsigned int)(n >> 43)) & 0xff);
-}
-
-static __inline unsigned int
-get_Imm8_Y0(tilegx_bundle_bits num)
-{
- const unsigned int n = (unsigned int)num;
- return (((n >> 12)) & 0xff);
-}
-
-static __inline unsigned int
-get_Imm8_Y1(tilegx_bundle_bits n)
-{
- return (((unsigned int)(n >> 43)) & 0xff);
-}
-
-static __inline unsigned int
-get_JumpOff_X1(tilegx_bundle_bits n)
-{
- return (((unsigned int)(n >> 31)) & 0x7ffffff);
-}
-
-static __inline unsigned int
-get_JumpOpcodeExtension_X1(tilegx_bundle_bits n)
-{
- return (((unsigned int)(n >> 58)) & 0x1);
-}
-
-static __inline unsigned int
-get_MF_Imm14_X1(tilegx_bundle_bits n)
-{
- return (((unsigned int)(n >> 37)) & 0x3fff);
-}
-
-static __inline unsigned int
-get_MT_Imm14_X1(tilegx_bundle_bits n)
-{
- return (((unsigned int)(n >> 31)) & 0x0000003f) |
- (((unsigned int)(n >> 37)) & 0x00003fc0);
-}
-
-static __inline unsigned int
-get_Mode(tilegx_bundle_bits n)
-{
- return (((unsigned int)(n >> 62)) & 0x3);
-}
-
-static __inline unsigned int
-get_Opcode_X0(tilegx_bundle_bits num)
-{
- const unsigned int n = (unsigned int)num;
- return (((n >> 28)) & 0x7);
-}
-
-static __inline unsigned int
-get_Opcode_X1(tilegx_bundle_bits n)
-{
- return (((unsigned int)(n >> 59)) & 0x7);
-}
-
-static __inline unsigned int
-get_Opcode_Y0(tilegx_bundle_bits num)
-{
- const unsigned int n = (unsigned int)num;
- return (((n >> 27)) & 0xf);
-}
-
-static __inline unsigned int
-get_Opcode_Y1(tilegx_bundle_bits n)
-{
- return (((unsigned int)(n >> 58)) & 0xf);
-}
-
-static __inline unsigned int
-get_Opcode_Y2(tilegx_bundle_bits n)
-{
- return (((n >> 26)) & 0x00000001) |
- (((unsigned int)(n >> 56)) & 0x00000002);
-}
-
-static __inline unsigned int
-get_RRROpcodeExtension_X0(tilegx_bundle_bits num)
-{
- const unsigned int n = (unsigned int)num;
- return (((n >> 18)) & 0x3ff);
-}
-
-static __inline unsigned int
-get_RRROpcodeExtension_X1(tilegx_bundle_bits n)
-{
- return (((unsigned int)(n >> 49)) & 0x3ff);
-}
-
-static __inline unsigned int
-get_RRROpcodeExtension_Y0(tilegx_bundle_bits num)
-{
- const unsigned int n = (unsigned int)num;
- return (((n >> 18)) & 0x3);
-}
-
-static __inline unsigned int
-get_RRROpcodeExtension_Y1(tilegx_bundle_bits n)
-{
- return (((unsigned int)(n >> 49)) & 0x3);
-}
-
-static __inline unsigned int
-get_ShAmt_X0(tilegx_bundle_bits num)
-{
- const unsigned int n = (unsigned int)num;
- return (((n >> 12)) & 0x3f);
-}
-
-static __inline unsigned int
-get_ShAmt_X1(tilegx_bundle_bits n)
-{
- return (((unsigned int)(n >> 43)) & 0x3f);
-}
-
-static __inline unsigned int
-get_ShAmt_Y0(tilegx_bundle_bits num)
-{
- const unsigned int n = (unsigned int)num;
- return (((n >> 12)) & 0x3f);
-}
-
-static __inline unsigned int
-get_ShAmt_Y1(tilegx_bundle_bits n)
-{
- return (((unsigned int)(n >> 43)) & 0x3f);
-}
-
-static __inline unsigned int
-get_ShiftOpcodeExtension_X0(tilegx_bundle_bits num)
-{
- const unsigned int n = (unsigned int)num;
- return (((n >> 18)) & 0x3ff);
-}
-
-static __inline unsigned int
-get_ShiftOpcodeExtension_X1(tilegx_bundle_bits n)
-{
- return (((unsigned int)(n >> 49)) & 0x3ff);
-}
-
-static __inline unsigned int
-get_ShiftOpcodeExtension_Y0(tilegx_bundle_bits num)
-{
- const unsigned int n = (unsigned int)num;
- return (((n >> 18)) & 0x3);
-}
-
-static __inline unsigned int
-get_ShiftOpcodeExtension_Y1(tilegx_bundle_bits n)
-{
- return (((unsigned int)(n >> 49)) & 0x3);
-}
-
-static __inline unsigned int
-get_SrcA_X0(tilegx_bundle_bits num)
-{
- const unsigned int n = (unsigned int)num;
- return (((n >> 6)) & 0x3f);
-}
-
-static __inline unsigned int
-get_SrcA_X1(tilegx_bundle_bits n)
-{
- return (((unsigned int)(n >> 37)) & 0x3f);
-}
-
-static __inline unsigned int
-get_SrcA_Y0(tilegx_bundle_bits num)
-{
- const unsigned int n = (unsigned int)num;
- return (((n >> 6)) & 0x3f);
-}
-
-static __inline unsigned int
-get_SrcA_Y1(tilegx_bundle_bits n)
-{
- return (((unsigned int)(n >> 37)) & 0x3f);
-}
-
-static __inline unsigned int
-get_SrcA_Y2(tilegx_bundle_bits num)
-{
- const unsigned int n = (unsigned int)num;
- return (((n >> 20)) & 0x3f);
-}
-
-static __inline unsigned int
-get_SrcBDest_Y2(tilegx_bundle_bits n)
-{
- return (((unsigned int)(n >> 51)) & 0x3f);
-}
-
-static __inline unsigned int
-get_SrcB_X0(tilegx_bundle_bits num)
-{
- const unsigned int n = (unsigned int)num;
- return (((n >> 12)) & 0x3f);
-}
-
-static __inline unsigned int
-get_SrcB_X1(tilegx_bundle_bits n)
-{
- return (((unsigned int)(n >> 43)) & 0x3f);
-}
-
-static __inline unsigned int
-get_SrcB_Y0(tilegx_bundle_bits num)
-{
- const unsigned int n = (unsigned int)num;
- return (((n >> 12)) & 0x3f);
-}
-
-static __inline unsigned int
-get_SrcB_Y1(tilegx_bundle_bits n)
-{
- return (((unsigned int)(n >> 43)) & 0x3f);
-}
-
-static __inline unsigned int
-get_UnaryOpcodeExtension_X0(tilegx_bundle_bits num)
-{
- const unsigned int n = (unsigned int)num;
- return (((n >> 12)) & 0x3f);
-}
-
-static __inline unsigned int
-get_UnaryOpcodeExtension_X1(tilegx_bundle_bits n)
-{
- return (((unsigned int)(n >> 43)) & 0x3f);
-}
-
-static __inline unsigned int
-get_UnaryOpcodeExtension_Y0(tilegx_bundle_bits num)
-{
- const unsigned int n = (unsigned int)num;
- return (((n >> 12)) & 0x3f);
-}
-
-static __inline unsigned int
-get_UnaryOpcodeExtension_Y1(tilegx_bundle_bits n)
-{
- return (((unsigned int)(n >> 43)) & 0x3f);
-}
-
-
-static __inline int
-sign_extend(int n, int num_bits)
-{
- int shift = (int)(sizeof(int) * 8 - num_bits);
- return (n << shift) >> shift;
-}
-
-
-
-static __inline tilegx_bundle_bits
-create_BFEnd_X0(int num)
-{
- const unsigned int n = (unsigned int)num;
- return ((n & 0x3f) << 12);
-}
-
-static __inline tilegx_bundle_bits
-create_BFOpcodeExtension_X0(int num)
-{
- const unsigned int n = (unsigned int)num;
- return ((n & 0xf) << 24);
-}
-
-static __inline tilegx_bundle_bits
-create_BFStart_X0(int num)
-{
- const unsigned int n = (unsigned int)num;
- return ((n & 0x3f) << 18);
-}
-
-static __inline tilegx_bundle_bits
-create_BrOff_X1(int num)
-{
- const unsigned int n = (unsigned int)num;
- return (((tilegx_bundle_bits)(n & 0x0000003f)) << 31) |
- (((tilegx_bundle_bits)(n & 0x0001ffc0)) << 37);
-}
-
-static __inline tilegx_bundle_bits
-create_BrType_X1(int num)
-{
- const unsigned int n = (unsigned int)num;
- return (((tilegx_bundle_bits)(n & 0x1f)) << 54);
-}
-
-static __inline tilegx_bundle_bits
-create_Dest_Imm8_X1(int num)
-{
- const unsigned int n = (unsigned int)num;
- return (((tilegx_bundle_bits)(n & 0x0000003f)) << 31) |
- (((tilegx_bundle_bits)(n & 0x000000c0)) << 43);
-}
-
-static __inline tilegx_bundle_bits
-create_Dest_X0(int num)
-{
- const unsigned int n = (unsigned int)num;
- return ((n & 0x3f) << 0);
-}
-
-static __inline tilegx_bundle_bits
-create_Dest_X1(int num)
-{
- const unsigned int n = (unsigned int)num;
- return (((tilegx_bundle_bits)(n & 0x3f)) << 31);
-}
-
-static __inline tilegx_bundle_bits
-create_Dest_Y0(int num)
-{
- const unsigned int n = (unsigned int)num;
- return ((n & 0x3f) << 0);
-}
-
-static __inline tilegx_bundle_bits
-create_Dest_Y1(int num)
-{
- const unsigned int n = (unsigned int)num;
- return (((tilegx_bundle_bits)(n & 0x3f)) << 31);
-}
-
-static __inline tilegx_bundle_bits
-create_Imm16_X0(int num)
-{
- const unsigned int n = (unsigned int)num;
- return ((n & 0xffff) << 12);
-}
-
-static __inline tilegx_bundle_bits
-create_Imm16_X1(int num)
-{
- const unsigned int n = (unsigned int)num;
- return (((tilegx_bundle_bits)(n & 0xffff)) << 43);
-}
-
-static __inline tilegx_bundle_bits
-create_Imm8OpcodeExtension_X0(int num)
-{
- const unsigned int n = (unsigned int)num;
- return ((n & 0xff) << 20);
-}
-
-static __inline tilegx_bundle_bits
-create_Imm8OpcodeExtension_X1(int num)
-{
- const unsigned int n = (unsigned int)num;
- return (((tilegx_bundle_bits)(n & 0xff)) << 51);
-}
-
-static __inline tilegx_bundle_bits
-create_Imm8_X0(int num)
-{
- const unsigned int n = (unsigned int)num;
- return ((n & 0xff) << 12);
-}
-
-static __inline tilegx_bundle_bits
-create_Imm8_X1(int num)
-{
- const unsigned int n = (unsigned int)num;
- return (((tilegx_bundle_bits)(n & 0xff)) << 43);
-}
-
-static __inline tilegx_bundle_bits
-create_Imm8_Y0(int num)
-{
- const unsigned int n = (unsigned int)num;
- return ((n & 0xff) << 12);
-}
-
-static __inline tilegx_bundle_bits
-create_Imm8_Y1(int num)
-{
- const unsigned int n = (unsigned int)num;
- return (((tilegx_bundle_bits)(n & 0xff)) << 43);
-}
-
-static __inline tilegx_bundle_bits
-create_JumpOff_X1(int num)
-{
- const unsigned int n = (unsigned int)num;
- return (((tilegx_bundle_bits)(n & 0x7ffffff)) << 31);
-}
-
-static __inline tilegx_bundle_bits
-create_JumpOpcodeExtension_X1(int num)
-{
- const unsigned int n = (unsigned int)num;
- return (((tilegx_bundle_bits)(n & 0x1)) << 58);
-}
-
-static __inline tilegx_bundle_bits
-create_MF_Imm14_X1(int num)
-{
- const unsigned int n = (unsigned int)num;
- return (((tilegx_bundle_bits)(n & 0x3fff)) << 37);
-}
-
-static __inline tilegx_bundle_bits
-create_MT_Imm14_X1(int num)
-{
- const unsigned int n = (unsigned int)num;
- return (((tilegx_bundle_bits)(n & 0x0000003f)) << 31) |
- (((tilegx_bundle_bits)(n & 0x00003fc0)) << 37);
-}
-
-static __inline tilegx_bundle_bits
-create_Mode(int num)
-{
- const unsigned int n = (unsigned int)num;
- return (((tilegx_bundle_bits)(n & 0x3)) << 62);
-}
-
-static __inline tilegx_bundle_bits
-create_Opcode_X0(int num)
-{
- const unsigned int n = (unsigned int)num;
- return ((n & 0x7) << 28);
-}
-
-static __inline tilegx_bundle_bits
-create_Opcode_X1(int num)
-{
- const unsigned int n = (unsigned int)num;
- return (((tilegx_bundle_bits)(n & 0x7)) << 59);
-}
-
-static __inline tilegx_bundle_bits
-create_Opcode_Y0(int num)
-{
- const unsigned int n = (unsigned int)num;
- return ((n & 0xf) << 27);
-}
-
-static __inline tilegx_bundle_bits
-create_Opcode_Y1(int num)
-{
- const unsigned int n = (unsigned int)num;
- return (((tilegx_bundle_bits)(n & 0xf)) << 58);
-}
-
-static __inline tilegx_bundle_bits
-create_Opcode_Y2(int num)
-{
- const unsigned int n = (unsigned int)num;
- return ((n & 0x00000001) << 26) |
- (((tilegx_bundle_bits)(n & 0x00000002)) << 56);
-}
-
-static __inline tilegx_bundle_bits
-create_RRROpcodeExtension_X0(int num)
-{
- const unsigned int n = (unsigned int)num;
- return ((n & 0x3ff) << 18);
-}
-
-static __inline tilegx_bundle_bits
-create_RRROpcodeExtension_X1(int num)
-{
- const unsigned int n = (unsigned int)num;
- return (((tilegx_bundle_bits)(n & 0x3ff)) << 49);
-}
-
-static __inline tilegx_bundle_bits
-create_RRROpcodeExtension_Y0(int num)
-{
- const unsigned int n = (unsigned int)num;
- return ((n & 0x3) << 18);
-}
-
-static __inline tilegx_bundle_bits
-create_RRROpcodeExtension_Y1(int num)
-{
- const unsigned int n = (unsigned int)num;
- return (((tilegx_bundle_bits)(n & 0x3)) << 49);
-}
-
-static __inline tilegx_bundle_bits
-create_ShAmt_X0(int num)
-{
- const unsigned int n = (unsigned int)num;
- return ((n & 0x3f) << 12);
-}
-
-static __inline tilegx_bundle_bits
-create_ShAmt_X1(int num)
-{
- const unsigned int n = (unsigned int)num;
- return (((tilegx_bundle_bits)(n & 0x3f)) << 43);
-}
-
-static __inline tilegx_bundle_bits
-create_ShAmt_Y0(int num)
-{
- const unsigned int n = (unsigned int)num;
- return ((n & 0x3f) << 12);
-}
-
-static __inline tilegx_bundle_bits
-create_ShAmt_Y1(int num)
-{
- const unsigned int n = (unsigned int)num;
- return (((tilegx_bundle_bits)(n & 0x3f)) << 43);
-}
-
-static __inline tilegx_bundle_bits
-create_ShiftOpcodeExtension_X0(int num)
-{
- const unsigned int n = (unsigned int)num;
- return ((n & 0x3ff) << 18);
-}
-
-static __inline tilegx_bundle_bits
-create_ShiftOpcodeExtension_X1(int num)
-{
- const unsigned int n = (unsigned int)num;
- return (((tilegx_bundle_bits)(n & 0x3ff)) << 49);
-}
-
-static __inline tilegx_bundle_bits
-create_ShiftOpcodeExtension_Y0(int num)
-{
- const unsigned int n = (unsigned int)num;
- return ((n & 0x3) << 18);
-}
-
-static __inline tilegx_bundle_bits
-create_ShiftOpcodeExtension_Y1(int num)
-{
- const unsigned int n = (unsigned int)num;
- return (((tilegx_bundle_bits)(n & 0x3)) << 49);
-}
-
-static __inline tilegx_bundle_bits
-create_SrcA_X0(int num)
-{
- const unsigned int n = (unsigned int)num;
- return ((n & 0x3f) << 6);
-}
-
-static __inline tilegx_bundle_bits
-create_SrcA_X1(int num)
-{
- const unsigned int n = (unsigned int)num;
- return (((tilegx_bundle_bits)(n & 0x3f)) << 37);
-}
-
-static __inline tilegx_bundle_bits
-create_SrcA_Y0(int num)
-{
- const unsigned int n = (unsigned int)num;
- return ((n & 0x3f) << 6);
-}
-
-static __inline tilegx_bundle_bits
-create_SrcA_Y1(int num)
-{
- const unsigned int n = (unsigned int)num;
- return (((tilegx_bundle_bits)(n & 0x3f)) << 37);
-}
-
-static __inline tilegx_bundle_bits
-create_SrcA_Y2(int num)
-{
- const unsigned int n = (unsigned int)num;
- return ((n & 0x3f) << 20);
-}
-
-static __inline tilegx_bundle_bits
-create_SrcBDest_Y2(int num)
-{
- const unsigned int n = (unsigned int)num;
- return (((tilegx_bundle_bits)(n & 0x3f)) << 51);
-}
-
-static __inline tilegx_bundle_bits
-create_SrcB_X0(int num)
-{
- const unsigned int n = (unsigned int)num;
- return ((n & 0x3f) << 12);
-}
-
-static __inline tilegx_bundle_bits
-create_SrcB_X1(int num)
-{
- const unsigned int n = (unsigned int)num;
- return (((tilegx_bundle_bits)(n & 0x3f)) << 43);
-}
-
-static __inline tilegx_bundle_bits
-create_SrcB_Y0(int num)
-{
- const unsigned int n = (unsigned int)num;
- return ((n & 0x3f) << 12);
-}
-
-static __inline tilegx_bundle_bits
-create_SrcB_Y1(int num)
-{
- const unsigned int n = (unsigned int)num;
- return (((tilegx_bundle_bits)(n & 0x3f)) << 43);
-}
-
-static __inline tilegx_bundle_bits
-create_UnaryOpcodeExtension_X0(int num)
-{
- const unsigned int n = (unsigned int)num;
- return ((n & 0x3f) << 12);
-}
-
-static __inline tilegx_bundle_bits
-create_UnaryOpcodeExtension_X1(int num)
-{
- const unsigned int n = (unsigned int)num;
- return (((tilegx_bundle_bits)(n & 0x3f)) << 43);
-}
-
-static __inline tilegx_bundle_bits
-create_UnaryOpcodeExtension_Y0(int num)
-{
- const unsigned int n = (unsigned int)num;
- return ((n & 0x3f) << 12);
-}
-
-static __inline tilegx_bundle_bits
-create_UnaryOpcodeExtension_Y1(int num)
-{
- const unsigned int n = (unsigned int)num;
- return (((tilegx_bundle_bits)(n & 0x3f)) << 43);
-}
-
-
-enum
-{
- ADDI_IMM8_OPCODE_X0 = 1,
- ADDI_IMM8_OPCODE_X1 = 1,
- ADDI_OPCODE_Y0 = 0,
- ADDI_OPCODE_Y1 = 1,
- ADDLI_OPCODE_X0 = 1,
- ADDLI_OPCODE_X1 = 0,
- ADDXI_IMM8_OPCODE_X0 = 2,
- ADDXI_IMM8_OPCODE_X1 = 2,
- ADDXI_OPCODE_Y0 = 1,
- ADDXI_OPCODE_Y1 = 2,
- ADDXLI_OPCODE_X0 = 2,
- ADDXLI_OPCODE_X1 = 1,
- ADDXSC_RRR_0_OPCODE_X0 = 1,
- ADDXSC_RRR_0_OPCODE_X1 = 1,
- ADDX_RRR_0_OPCODE_X0 = 2,
- ADDX_RRR_0_OPCODE_X1 = 2,
- ADDX_RRR_0_OPCODE_Y0 = 0,
- ADDX_RRR_0_OPCODE_Y1 = 0,
- ADD_RRR_0_OPCODE_X0 = 3,
- ADD_RRR_0_OPCODE_X1 = 3,
- ADD_RRR_0_OPCODE_Y0 = 1,
- ADD_RRR_0_OPCODE_Y1 = 1,
- ANDI_IMM8_OPCODE_X0 = 3,
- ANDI_IMM8_OPCODE_X1 = 3,
- ANDI_OPCODE_Y0 = 2,
- ANDI_OPCODE_Y1 = 3,
- AND_RRR_0_OPCODE_X0 = 4,
- AND_RRR_0_OPCODE_X1 = 4,
- AND_RRR_5_OPCODE_Y0 = 0,
- AND_RRR_5_OPCODE_Y1 = 0,
- BEQZT_BRANCH_OPCODE_X1 = 16,
- BEQZ_BRANCH_OPCODE_X1 = 17,
- BFEXTS_BF_OPCODE_X0 = 4,
- BFEXTU_BF_OPCODE_X0 = 5,
- BFINS_BF_OPCODE_X0 = 6,
- BF_OPCODE_X0 = 3,
- BGEZT_BRANCH_OPCODE_X1 = 18,
- BGEZ_BRANCH_OPCODE_X1 = 19,
- BGTZT_BRANCH_OPCODE_X1 = 20,
- BGTZ_BRANCH_OPCODE_X1 = 21,
- BLBCT_BRANCH_OPCODE_X1 = 22,
- BLBC_BRANCH_OPCODE_X1 = 23,
- BLBST_BRANCH_OPCODE_X1 = 24,
- BLBS_BRANCH_OPCODE_X1 = 25,
- BLEZT_BRANCH_OPCODE_X1 = 26,
- BLEZ_BRANCH_OPCODE_X1 = 27,
- BLTZT_BRANCH_OPCODE_X1 = 28,
- BLTZ_BRANCH_OPCODE_X1 = 29,
- BNEZT_BRANCH_OPCODE_X1 = 30,
- BNEZ_BRANCH_OPCODE_X1 = 31,
- BRANCH_OPCODE_X1 = 2,
- CMOVEQZ_RRR_0_OPCODE_X0 = 5,
- CMOVEQZ_RRR_4_OPCODE_Y0 = 0,
- CMOVNEZ_RRR_0_OPCODE_X0 = 6,
- CMOVNEZ_RRR_4_OPCODE_Y0 = 1,
- CMPEQI_IMM8_OPCODE_X0 = 4,
- CMPEQI_IMM8_OPCODE_X1 = 4,
- CMPEQI_OPCODE_Y0 = 3,
- CMPEQI_OPCODE_Y1 = 4,
- CMPEQ_RRR_0_OPCODE_X0 = 7,
- CMPEQ_RRR_0_OPCODE_X1 = 5,
- CMPEQ_RRR_3_OPCODE_Y0 = 0,
- CMPEQ_RRR_3_OPCODE_Y1 = 2,
- CMPEXCH4_RRR_0_OPCODE_X1 = 6,
- CMPEXCH_RRR_0_OPCODE_X1 = 7,
- CMPLES_RRR_0_OPCODE_X0 = 8,
- CMPLES_RRR_0_OPCODE_X1 = 8,
- CMPLES_RRR_2_OPCODE_Y0 = 0,
- CMPLES_RRR_2_OPCODE_Y1 = 0,
- CMPLEU_RRR_0_OPCODE_X0 = 9,
- CMPLEU_RRR_0_OPCODE_X1 = 9,
- CMPLEU_RRR_2_OPCODE_Y0 = 1,
- CMPLEU_RRR_2_OPCODE_Y1 = 1,
- CMPLTSI_IMM8_OPCODE_X0 = 5,
- CMPLTSI_IMM8_OPCODE_X1 = 5,
- CMPLTSI_OPCODE_Y0 = 4,
- CMPLTSI_OPCODE_Y1 = 5,
- CMPLTS_RRR_0_OPCODE_X0 = 10,
- CMPLTS_RRR_0_OPCODE_X1 = 10,
- CMPLTS_RRR_2_OPCODE_Y0 = 2,
- CMPLTS_RRR_2_OPCODE_Y1 = 2,
- CMPLTUI_IMM8_OPCODE_X0 = 6,
- CMPLTUI_IMM8_OPCODE_X1 = 6,
- CMPLTU_RRR_0_OPCODE_X0 = 11,
- CMPLTU_RRR_0_OPCODE_X1 = 11,
- CMPLTU_RRR_2_OPCODE_Y0 = 3,
- CMPLTU_RRR_2_OPCODE_Y1 = 3,
- CMPNE_RRR_0_OPCODE_X0 = 12,
- CMPNE_RRR_0_OPCODE_X1 = 12,
- CMPNE_RRR_3_OPCODE_Y0 = 1,
- CMPNE_RRR_3_OPCODE_Y1 = 3,
- CMULAF_RRR_0_OPCODE_X0 = 13,
- CMULA_RRR_0_OPCODE_X0 = 14,
- CMULFR_RRR_0_OPCODE_X0 = 15,
- CMULF_RRR_0_OPCODE_X0 = 16,
- CMULHR_RRR_0_OPCODE_X0 = 17,
- CMULH_RRR_0_OPCODE_X0 = 18,
- CMUL_RRR_0_OPCODE_X0 = 19,
- CNTLZ_UNARY_OPCODE_X0 = 1,
- CNTLZ_UNARY_OPCODE_Y0 = 1,
- CNTTZ_UNARY_OPCODE_X0 = 2,
- CNTTZ_UNARY_OPCODE_Y0 = 2,
- CRC32_32_RRR_0_OPCODE_X0 = 20,
- CRC32_8_RRR_0_OPCODE_X0 = 21,
- DBLALIGN2_RRR_0_OPCODE_X0 = 22,
- DBLALIGN2_RRR_0_OPCODE_X1 = 13,
- DBLALIGN4_RRR_0_OPCODE_X0 = 23,
- DBLALIGN4_RRR_0_OPCODE_X1 = 14,
- DBLALIGN6_RRR_0_OPCODE_X0 = 24,
- DBLALIGN6_RRR_0_OPCODE_X1 = 15,
- DBLALIGN_RRR_0_OPCODE_X0 = 25,
- DRAIN_UNARY_OPCODE_X1 = 1,
- DTLBPR_UNARY_OPCODE_X1 = 2,
- EXCH4_RRR_0_OPCODE_X1 = 16,
- EXCH_RRR_0_OPCODE_X1 = 17,
- FDOUBLE_ADDSUB_RRR_0_OPCODE_X0 = 26,
- FDOUBLE_ADD_FLAGS_RRR_0_OPCODE_X0 = 27,
- FDOUBLE_MUL_FLAGS_RRR_0_OPCODE_X0 = 28,
- FDOUBLE_PACK1_RRR_0_OPCODE_X0 = 29,
- FDOUBLE_PACK2_RRR_0_OPCODE_X0 = 30,
- FDOUBLE_SUB_FLAGS_RRR_0_OPCODE_X0 = 31,
- FDOUBLE_UNPACK_MAX_RRR_0_OPCODE_X0 = 32,
- FDOUBLE_UNPACK_MIN_RRR_0_OPCODE_X0 = 33,
- FETCHADD4_RRR_0_OPCODE_X1 = 18,
- FETCHADDGEZ4_RRR_0_OPCODE_X1 = 19,
- FETCHADDGEZ_RRR_0_OPCODE_X1 = 20,
- FETCHADD_RRR_0_OPCODE_X1 = 21,
- FETCHAND4_RRR_0_OPCODE_X1 = 22,
- FETCHAND_RRR_0_OPCODE_X1 = 23,
- FETCHOR4_RRR_0_OPCODE_X1 = 24,
- FETCHOR_RRR_0_OPCODE_X1 = 25,
- FINV_UNARY_OPCODE_X1 = 3,
- FLUSHWB_UNARY_OPCODE_X1 = 4,
- FLUSH_UNARY_OPCODE_X1 = 5,
- FNOP_UNARY_OPCODE_X0 = 3,
- FNOP_UNARY_OPCODE_X1 = 6,
- FNOP_UNARY_OPCODE_Y0 = 3,
- FNOP_UNARY_OPCODE_Y1 = 8,
- FSINGLE_ADD1_RRR_0_OPCODE_X0 = 34,
- FSINGLE_ADDSUB2_RRR_0_OPCODE_X0 = 35,
- FSINGLE_MUL1_RRR_0_OPCODE_X0 = 36,
- FSINGLE_MUL2_RRR_0_OPCODE_X0 = 37,
- FSINGLE_PACK1_UNARY_OPCODE_X0 = 4,
- FSINGLE_PACK1_UNARY_OPCODE_Y0 = 4,
- FSINGLE_PACK2_RRR_0_OPCODE_X0 = 38,
- FSINGLE_SUB1_RRR_0_OPCODE_X0 = 39,
- ICOH_UNARY_OPCODE_X1 = 7,
- ILL_UNARY_OPCODE_X1 = 8,
- ILL_UNARY_OPCODE_Y1 = 9,
- IMM8_OPCODE_X0 = 4,
- IMM8_OPCODE_X1 = 3,
- INV_UNARY_OPCODE_X1 = 9,
- IRET_UNARY_OPCODE_X1 = 10,
- JALRP_UNARY_OPCODE_X1 = 11,
- JALRP_UNARY_OPCODE_Y1 = 10,
- JALR_UNARY_OPCODE_X1 = 12,
- JALR_UNARY_OPCODE_Y1 = 11,
- JAL_JUMP_OPCODE_X1 = 0,
- JRP_UNARY_OPCODE_X1 = 13,
- JRP_UNARY_OPCODE_Y1 = 12,
- JR_UNARY_OPCODE_X1 = 14,
- JR_UNARY_OPCODE_Y1 = 13,
- JUMP_OPCODE_X1 = 4,
- J_JUMP_OPCODE_X1 = 1,
- LD1S_ADD_IMM8_OPCODE_X1 = 7,
- LD1S_OPCODE_Y2 = 0,
- LD1S_UNARY_OPCODE_X1 = 15,
- LD1U_ADD_IMM8_OPCODE_X1 = 8,
- LD1U_OPCODE_Y2 = 1,
- LD1U_UNARY_OPCODE_X1 = 16,
- LD2S_ADD_IMM8_OPCODE_X1 = 9,
- LD2S_OPCODE_Y2 = 2,
- LD2S_UNARY_OPCODE_X1 = 17,
- LD2U_ADD_IMM8_OPCODE_X1 = 10,
- LD2U_OPCODE_Y2 = 3,
- LD2U_UNARY_OPCODE_X1 = 18,
- LD4S_ADD_IMM8_OPCODE_X1 = 11,
- LD4S_OPCODE_Y2 = 1,
- LD4S_UNARY_OPCODE_X1 = 19,
- LD4U_ADD_IMM8_OPCODE_X1 = 12,
- LD4U_OPCODE_Y2 = 2,
- LD4U_UNARY_OPCODE_X1 = 20,
- LDNA_ADD_IMM8_OPCODE_X1 = 21,
- LDNA_UNARY_OPCODE_X1 = 21,
- LDNT1S_ADD_IMM8_OPCODE_X1 = 13,
- LDNT1S_UNARY_OPCODE_X1 = 22,
- LDNT1U_ADD_IMM8_OPCODE_X1 = 14,
- LDNT1U_UNARY_OPCODE_X1 = 23,
- LDNT2S_ADD_IMM8_OPCODE_X1 = 15,
- LDNT2S_UNARY_OPCODE_X1 = 24,
- LDNT2U_ADD_IMM8_OPCODE_X1 = 16,
- LDNT2U_UNARY_OPCODE_X1 = 25,
- LDNT4S_ADD_IMM8_OPCODE_X1 = 17,
- LDNT4S_UNARY_OPCODE_X1 = 26,
- LDNT4U_ADD_IMM8_OPCODE_X1 = 18,
- LDNT4U_UNARY_OPCODE_X1 = 27,
- LDNT_ADD_IMM8_OPCODE_X1 = 19,
- LDNT_UNARY_OPCODE_X1 = 28,
- LD_ADD_IMM8_OPCODE_X1 = 20,
- LD_OPCODE_Y2 = 3,
- LD_UNARY_OPCODE_X1 = 29,
- LNK_UNARY_OPCODE_X1 = 30,
- LNK_UNARY_OPCODE_Y1 = 14,
- MFSPR_IMM8_OPCODE_X1 = 22,
- MF_UNARY_OPCODE_X1 = 31,
- MM_BF_OPCODE_X0 = 7,
- MNZ_RRR_0_OPCODE_X0 = 40,
- MNZ_RRR_0_OPCODE_X1 = 26,
- MNZ_RRR_4_OPCODE_Y0 = 2,
- MNZ_RRR_4_OPCODE_Y1 = 2,
- MODE_OPCODE_YA2 = 1,
- MODE_OPCODE_YB2 = 2,
- MODE_OPCODE_YC2 = 3,
- MTSPR_IMM8_OPCODE_X1 = 23,
- MULAX_RRR_0_OPCODE_X0 = 41,
- MULAX_RRR_3_OPCODE_Y0 = 2,
- MULA_HS_HS_RRR_0_OPCODE_X0 = 42,
- MULA_HS_HS_RRR_9_OPCODE_Y0 = 0,
- MULA_HS_HU_RRR_0_OPCODE_X0 = 43,
- MULA_HS_LS_RRR_0_OPCODE_X0 = 44,
- MULA_HS_LU_RRR_0_OPCODE_X0 = 45,
- MULA_HU_HU_RRR_0_OPCODE_X0 = 46,
- MULA_HU_HU_RRR_9_OPCODE_Y0 = 1,
- MULA_HU_LS_RRR_0_OPCODE_X0 = 47,
- MULA_HU_LU_RRR_0_OPCODE_X0 = 48,
- MULA_LS_LS_RRR_0_OPCODE_X0 = 49,
- MULA_LS_LS_RRR_9_OPCODE_Y0 = 2,
- MULA_LS_LU_RRR_0_OPCODE_X0 = 50,
- MULA_LU_LU_RRR_0_OPCODE_X0 = 51,
- MULA_LU_LU_RRR_9_OPCODE_Y0 = 3,
- MULX_RRR_0_OPCODE_X0 = 52,
- MULX_RRR_3_OPCODE_Y0 = 3,
- MUL_HS_HS_RRR_0_OPCODE_X0 = 53,
- MUL_HS_HS_RRR_8_OPCODE_Y0 = 0,
- MUL_HS_HU_RRR_0_OPCODE_X0 = 54,
- MUL_HS_LS_RRR_0_OPCODE_X0 = 55,
- MUL_HS_LU_RRR_0_OPCODE_X0 = 56,
- MUL_HU_HU_RRR_0_OPCODE_X0 = 57,
- MUL_HU_HU_RRR_8_OPCODE_Y0 = 1,
- MUL_HU_LS_RRR_0_OPCODE_X0 = 58,
- MUL_HU_LU_RRR_0_OPCODE_X0 = 59,
- MUL_LS_LS_RRR_0_OPCODE_X0 = 60,
- MUL_LS_LS_RRR_8_OPCODE_Y0 = 2,
- MUL_LS_LU_RRR_0_OPCODE_X0 = 61,
- MUL_LU_LU_RRR_0_OPCODE_X0 = 62,
- MUL_LU_LU_RRR_8_OPCODE_Y0 = 3,
- MZ_RRR_0_OPCODE_X0 = 63,
- MZ_RRR_0_OPCODE_X1 = 27,
- MZ_RRR_4_OPCODE_Y0 = 3,
- MZ_RRR_4_OPCODE_Y1 = 3,
- NAP_UNARY_OPCODE_X1 = 32,
- NOP_UNARY_OPCODE_X0 = 5,
- NOP_UNARY_OPCODE_X1 = 33,
- NOP_UNARY_OPCODE_Y0 = 5,
- NOP_UNARY_OPCODE_Y1 = 15,
- NOR_RRR_0_OPCODE_X0 = 64,
- NOR_RRR_0_OPCODE_X1 = 28,
- NOR_RRR_5_OPCODE_Y0 = 1,
- NOR_RRR_5_OPCODE_Y1 = 1,
- ORI_IMM8_OPCODE_X0 = 7,
- ORI_IMM8_OPCODE_X1 = 24,
- OR_RRR_0_OPCODE_X0 = 65,
- OR_RRR_0_OPCODE_X1 = 29,
- OR_RRR_5_OPCODE_Y0 = 2,
- OR_RRR_5_OPCODE_Y1 = 2,
- PCNT_UNARY_OPCODE_X0 = 6,
- PCNT_UNARY_OPCODE_Y0 = 6,
- REVBITS_UNARY_OPCODE_X0 = 7,
- REVBITS_UNARY_OPCODE_Y0 = 7,
- REVBYTES_UNARY_OPCODE_X0 = 8,
- REVBYTES_UNARY_OPCODE_Y0 = 8,
- ROTLI_SHIFT_OPCODE_X0 = 1,
- ROTLI_SHIFT_OPCODE_X1 = 1,
- ROTLI_SHIFT_OPCODE_Y0 = 0,
- ROTLI_SHIFT_OPCODE_Y1 = 0,
- ROTL_RRR_0_OPCODE_X0 = 66,
- ROTL_RRR_0_OPCODE_X1 = 30,
- ROTL_RRR_6_OPCODE_Y0 = 0,
- ROTL_RRR_6_OPCODE_Y1 = 0,
- RRR_0_OPCODE_X0 = 5,
- RRR_0_OPCODE_X1 = 5,
- RRR_0_OPCODE_Y0 = 5,
- RRR_0_OPCODE_Y1 = 6,
- RRR_1_OPCODE_Y0 = 6,
- RRR_1_OPCODE_Y1 = 7,
- RRR_2_OPCODE_Y0 = 7,
- RRR_2_OPCODE_Y1 = 8,
- RRR_3_OPCODE_Y0 = 8,
- RRR_3_OPCODE_Y1 = 9,
- RRR_4_OPCODE_Y0 = 9,
- RRR_4_OPCODE_Y1 = 10,
- RRR_5_OPCODE_Y0 = 10,
- RRR_5_OPCODE_Y1 = 11,
- RRR_6_OPCODE_Y0 = 11,
- RRR_6_OPCODE_Y1 = 12,
- RRR_7_OPCODE_Y0 = 12,
- RRR_7_OPCODE_Y1 = 13,
- RRR_8_OPCODE_Y0 = 13,
- RRR_9_OPCODE_Y0 = 14,
- SHIFT_OPCODE_X0 = 6,
- SHIFT_OPCODE_X1 = 6,
- SHIFT_OPCODE_Y0 = 15,
- SHIFT_OPCODE_Y1 = 14,
- SHL16INSLI_OPCODE_X0 = 7,
- SHL16INSLI_OPCODE_X1 = 7,
- SHL1ADDX_RRR_0_OPCODE_X0 = 67,
- SHL1ADDX_RRR_0_OPCODE_X1 = 31,
- SHL1ADDX_RRR_7_OPCODE_Y0 = 1,
- SHL1ADDX_RRR_7_OPCODE_Y1 = 1,
- SHL1ADD_RRR_0_OPCODE_X0 = 68,
- SHL1ADD_RRR_0_OPCODE_X1 = 32,
- SHL1ADD_RRR_1_OPCODE_Y0 = 0,
- SHL1ADD_RRR_1_OPCODE_Y1 = 0,
- SHL2ADDX_RRR_0_OPCODE_X0 = 69,
- SHL2ADDX_RRR_0_OPCODE_X1 = 33,
- SHL2ADDX_RRR_7_OPCODE_Y0 = 2,
- SHL2ADDX_RRR_7_OPCODE_Y1 = 2,
- SHL2ADD_RRR_0_OPCODE_X0 = 70,
- SHL2ADD_RRR_0_OPCODE_X1 = 34,
- SHL2ADD_RRR_1_OPCODE_Y0 = 1,
- SHL2ADD_RRR_1_OPCODE_Y1 = 1,
- SHL3ADDX_RRR_0_OPCODE_X0 = 71,
- SHL3ADDX_RRR_0_OPCODE_X1 = 35,
- SHL3ADDX_RRR_7_OPCODE_Y0 = 3,
- SHL3ADDX_RRR_7_OPCODE_Y1 = 3,
- SHL3ADD_RRR_0_OPCODE_X0 = 72,
- SHL3ADD_RRR_0_OPCODE_X1 = 36,
- SHL3ADD_RRR_1_OPCODE_Y0 = 2,
- SHL3ADD_RRR_1_OPCODE_Y1 = 2,
- SHLI_SHIFT_OPCODE_X0 = 2,
- SHLI_SHIFT_OPCODE_X1 = 2,
- SHLI_SHIFT_OPCODE_Y0 = 1,
- SHLI_SHIFT_OPCODE_Y1 = 1,
- SHLXI_SHIFT_OPCODE_X0 = 3,
- SHLXI_SHIFT_OPCODE_X1 = 3,
- SHLX_RRR_0_OPCODE_X0 = 73,
- SHLX_RRR_0_OPCODE_X1 = 37,
- SHL_RRR_0_OPCODE_X0 = 74,
- SHL_RRR_0_OPCODE_X1 = 38,
- SHL_RRR_6_OPCODE_Y0 = 1,
- SHL_RRR_6_OPCODE_Y1 = 1,
- SHRSI_SHIFT_OPCODE_X0 = 4,
- SHRSI_SHIFT_OPCODE_X1 = 4,
- SHRSI_SHIFT_OPCODE_Y0 = 2,
- SHRSI_SHIFT_OPCODE_Y1 = 2,
- SHRS_RRR_0_OPCODE_X0 = 75,
- SHRS_RRR_0_OPCODE_X1 = 39,
- SHRS_RRR_6_OPCODE_Y0 = 2,
- SHRS_RRR_6_OPCODE_Y1 = 2,
- SHRUI_SHIFT_OPCODE_X0 = 5,
- SHRUI_SHIFT_OPCODE_X1 = 5,
- SHRUI_SHIFT_OPCODE_Y0 = 3,
- SHRUI_SHIFT_OPCODE_Y1 = 3,
- SHRUXI_SHIFT_OPCODE_X0 = 6,
- SHRUXI_SHIFT_OPCODE_X1 = 6,
- SHRUX_RRR_0_OPCODE_X0 = 76,
- SHRUX_RRR_0_OPCODE_X1 = 40,
- SHRU_RRR_0_OPCODE_X0 = 77,
- SHRU_RRR_0_OPCODE_X1 = 41,
- SHRU_RRR_6_OPCODE_Y0 = 3,
- SHRU_RRR_6_OPCODE_Y1 = 3,
- SHUFFLEBYTES_RRR_0_OPCODE_X0 = 78,
- ST1_ADD_IMM8_OPCODE_X1 = 25,
- ST1_OPCODE_Y2 = 0,
- ST1_RRR_0_OPCODE_X1 = 42,
- ST2_ADD_IMM8_OPCODE_X1 = 26,
- ST2_OPCODE_Y2 = 1,
- ST2_RRR_0_OPCODE_X1 = 43,
- ST4_ADD_IMM8_OPCODE_X1 = 27,
- ST4_OPCODE_Y2 = 2,
- ST4_RRR_0_OPCODE_X1 = 44,
- STNT1_ADD_IMM8_OPCODE_X1 = 28,
- STNT1_RRR_0_OPCODE_X1 = 45,
- STNT2_ADD_IMM8_OPCODE_X1 = 29,
- STNT2_RRR_0_OPCODE_X1 = 46,
- STNT4_ADD_IMM8_OPCODE_X1 = 30,
- STNT4_RRR_0_OPCODE_X1 = 47,
- STNT_ADD_IMM8_OPCODE_X1 = 31,
- STNT_RRR_0_OPCODE_X1 = 48,
- ST_ADD_IMM8_OPCODE_X1 = 32,
- ST_OPCODE_Y2 = 3,
- ST_RRR_0_OPCODE_X1 = 49,
- SUBXSC_RRR_0_OPCODE_X0 = 79,
- SUBXSC_RRR_0_OPCODE_X1 = 50,
- SUBX_RRR_0_OPCODE_X0 = 80,
- SUBX_RRR_0_OPCODE_X1 = 51,
- SUBX_RRR_0_OPCODE_Y0 = 2,
- SUBX_RRR_0_OPCODE_Y1 = 2,
- SUB_RRR_0_OPCODE_X0 = 81,
- SUB_RRR_0_OPCODE_X1 = 52,
- SUB_RRR_0_OPCODE_Y0 = 3,
- SUB_RRR_0_OPCODE_Y1 = 3,
- SWINT0_UNARY_OPCODE_X1 = 34,
- SWINT1_UNARY_OPCODE_X1 = 35,
- SWINT2_UNARY_OPCODE_X1 = 36,
- SWINT3_UNARY_OPCODE_X1 = 37,
- TBLIDXB0_UNARY_OPCODE_X0 = 9,
- TBLIDXB0_UNARY_OPCODE_Y0 = 9,
- TBLIDXB1_UNARY_OPCODE_X0 = 10,
- TBLIDXB1_UNARY_OPCODE_Y0 = 10,
- TBLIDXB2_UNARY_OPCODE_X0 = 11,
- TBLIDXB2_UNARY_OPCODE_Y0 = 11,
- TBLIDXB3_UNARY_OPCODE_X0 = 12,
- TBLIDXB3_UNARY_OPCODE_Y0 = 12,
- UNARY_RRR_0_OPCODE_X0 = 82,
- UNARY_RRR_0_OPCODE_X1 = 53,
- UNARY_RRR_1_OPCODE_Y0 = 3,
- UNARY_RRR_1_OPCODE_Y1 = 3,
- V1ADDI_IMM8_OPCODE_X0 = 8,
- V1ADDI_IMM8_OPCODE_X1 = 33,
- V1ADDUC_RRR_0_OPCODE_X0 = 83,
- V1ADDUC_RRR_0_OPCODE_X1 = 54,
- V1ADD_RRR_0_OPCODE_X0 = 84,
- V1ADD_RRR_0_OPCODE_X1 = 55,
- V1ADIFFU_RRR_0_OPCODE_X0 = 85,
- V1AVGU_RRR_0_OPCODE_X0 = 86,
- V1CMPEQI_IMM8_OPCODE_X0 = 9,
- V1CMPEQI_IMM8_OPCODE_X1 = 34,
- V1CMPEQ_RRR_0_OPCODE_X0 = 87,
- V1CMPEQ_RRR_0_OPCODE_X1 = 56,
- V1CMPLES_RRR_0_OPCODE_X0 = 88,
- V1CMPLES_RRR_0_OPCODE_X1 = 57,
- V1CMPLEU_RRR_0_OPCODE_X0 = 89,
- V1CMPLEU_RRR_0_OPCODE_X1 = 58,
- V1CMPLTSI_IMM8_OPCODE_X0 = 10,
- V1CMPLTSI_IMM8_OPCODE_X1 = 35,
- V1CMPLTS_RRR_0_OPCODE_X0 = 90,
- V1CMPLTS_RRR_0_OPCODE_X1 = 59,
- V1CMPLTUI_IMM8_OPCODE_X0 = 11,
- V1CMPLTUI_IMM8_OPCODE_X1 = 36,
- V1CMPLTU_RRR_0_OPCODE_X0 = 91,
- V1CMPLTU_RRR_0_OPCODE_X1 = 60,
- V1CMPNE_RRR_0_OPCODE_X0 = 92,
- V1CMPNE_RRR_0_OPCODE_X1 = 61,
- V1DDOTPUA_RRR_0_OPCODE_X0 = 161,
- V1DDOTPUSA_RRR_0_OPCODE_X0 = 93,
- V1DDOTPUS_RRR_0_OPCODE_X0 = 94,
- V1DDOTPU_RRR_0_OPCODE_X0 = 162,
- V1DOTPA_RRR_0_OPCODE_X0 = 95,
- V1DOTPUA_RRR_0_OPCODE_X0 = 163,
- V1DOTPUSA_RRR_0_OPCODE_X0 = 96,
- V1DOTPUS_RRR_0_OPCODE_X0 = 97,
- V1DOTPU_RRR_0_OPCODE_X0 = 164,
- V1DOTP_RRR_0_OPCODE_X0 = 98,
- V1INT_H_RRR_0_OPCODE_X0 = 99,
- V1INT_H_RRR_0_OPCODE_X1 = 62,
- V1INT_L_RRR_0_OPCODE_X0 = 100,
- V1INT_L_RRR_0_OPCODE_X1 = 63,
- V1MAXUI_IMM8_OPCODE_X0 = 12,
- V1MAXUI_IMM8_OPCODE_X1 = 37,
- V1MAXU_RRR_0_OPCODE_X0 = 101,
- V1MAXU_RRR_0_OPCODE_X1 = 64,
- V1MINUI_IMM8_OPCODE_X0 = 13,
- V1MINUI_IMM8_OPCODE_X1 = 38,
- V1MINU_RRR_0_OPCODE_X0 = 102,
- V1MINU_RRR_0_OPCODE_X1 = 65,
- V1MNZ_RRR_0_OPCODE_X0 = 103,
- V1MNZ_RRR_0_OPCODE_X1 = 66,
- V1MULTU_RRR_0_OPCODE_X0 = 104,
- V1MULUS_RRR_0_OPCODE_X0 = 105,
- V1MULU_RRR_0_OPCODE_X0 = 106,
- V1MZ_RRR_0_OPCODE_X0 = 107,
- V1MZ_RRR_0_OPCODE_X1 = 67,
- V1SADAU_RRR_0_OPCODE_X0 = 108,
- V1SADU_RRR_0_OPCODE_X0 = 109,
- V1SHLI_SHIFT_OPCODE_X0 = 7,
- V1SHLI_SHIFT_OPCODE_X1 = 7,
- V1SHL_RRR_0_OPCODE_X0 = 110,
- V1SHL_RRR_0_OPCODE_X1 = 68,
- V1SHRSI_SHIFT_OPCODE_X0 = 8,
- V1SHRSI_SHIFT_OPCODE_X1 = 8,
- V1SHRS_RRR_0_OPCODE_X0 = 111,
- V1SHRS_RRR_0_OPCODE_X1 = 69,
- V1SHRUI_SHIFT_OPCODE_X0 = 9,
- V1SHRUI_SHIFT_OPCODE_X1 = 9,
- V1SHRU_RRR_0_OPCODE_X0 = 112,
- V1SHRU_RRR_0_OPCODE_X1 = 70,
- V1SUBUC_RRR_0_OPCODE_X0 = 113,
- V1SUBUC_RRR_0_OPCODE_X1 = 71,
- V1SUB_RRR_0_OPCODE_X0 = 114,
- V1SUB_RRR_0_OPCODE_X1 = 72,
- V2ADDI_IMM8_OPCODE_X0 = 14,
- V2ADDI_IMM8_OPCODE_X1 = 39,
- V2ADDSC_RRR_0_OPCODE_X0 = 115,
- V2ADDSC_RRR_0_OPCODE_X1 = 73,
- V2ADD_RRR_0_OPCODE_X0 = 116,
- V2ADD_RRR_0_OPCODE_X1 = 74,
- V2ADIFFS_RRR_0_OPCODE_X0 = 117,
- V2AVGS_RRR_0_OPCODE_X0 = 118,
- V2CMPEQI_IMM8_OPCODE_X0 = 15,
- V2CMPEQI_IMM8_OPCODE_X1 = 40,
- V2CMPEQ_RRR_0_OPCODE_X0 = 119,
- V2CMPEQ_RRR_0_OPCODE_X1 = 75,
- V2CMPLES_RRR_0_OPCODE_X0 = 120,
- V2CMPLES_RRR_0_OPCODE_X1 = 76,
- V2CMPLEU_RRR_0_OPCODE_X0 = 121,
- V2CMPLEU_RRR_0_OPCODE_X1 = 77,
- V2CMPLTSI_IMM8_OPCODE_X0 = 16,
- V2CMPLTSI_IMM8_OPCODE_X1 = 41,
- V2CMPLTS_RRR_0_OPCODE_X0 = 122,
- V2CMPLTS_RRR_0_OPCODE_X1 = 78,
- V2CMPLTUI_IMM8_OPCODE_X0 = 17,
- V2CMPLTUI_IMM8_OPCODE_X1 = 42,
- V2CMPLTU_RRR_0_OPCODE_X0 = 123,
- V2CMPLTU_RRR_0_OPCODE_X1 = 79,
- V2CMPNE_RRR_0_OPCODE_X0 = 124,
- V2CMPNE_RRR_0_OPCODE_X1 = 80,
- V2DOTPA_RRR_0_OPCODE_X0 = 125,
- V2DOTP_RRR_0_OPCODE_X0 = 126,
- V2INT_H_RRR_0_OPCODE_X0 = 127,
- V2INT_H_RRR_0_OPCODE_X1 = 81,
- V2INT_L_RRR_0_OPCODE_X0 = 128,
- V2INT_L_RRR_0_OPCODE_X1 = 82,
- V2MAXSI_IMM8_OPCODE_X0 = 18,
- V2MAXSI_IMM8_OPCODE_X1 = 43,
- V2MAXS_RRR_0_OPCODE_X0 = 129,
- V2MAXS_RRR_0_OPCODE_X1 = 83,
- V2MINSI_IMM8_OPCODE_X0 = 19,
- V2MINSI_IMM8_OPCODE_X1 = 44,
- V2MINS_RRR_0_OPCODE_X0 = 130,
- V2MINS_RRR_0_OPCODE_X1 = 84,
- V2MNZ_RRR_0_OPCODE_X0 = 131,
- V2MNZ_RRR_0_OPCODE_X1 = 85,
- V2MULFSC_RRR_0_OPCODE_X0 = 132,
- V2MULS_RRR_0_OPCODE_X0 = 133,
- V2MULTS_RRR_0_OPCODE_X0 = 134,
- V2MZ_RRR_0_OPCODE_X0 = 135,
- V2MZ_RRR_0_OPCODE_X1 = 86,
- V2PACKH_RRR_0_OPCODE_X0 = 136,
- V2PACKH_RRR_0_OPCODE_X1 = 87,
- V2PACKL_RRR_0_OPCODE_X0 = 137,
- V2PACKL_RRR_0_OPCODE_X1 = 88,
- V2PACKUC_RRR_0_OPCODE_X0 = 138,
- V2PACKUC_RRR_0_OPCODE_X1 = 89,
- V2SADAS_RRR_0_OPCODE_X0 = 139,
- V2SADAU_RRR_0_OPCODE_X0 = 140,
- V2SADS_RRR_0_OPCODE_X0 = 141,
- V2SADU_RRR_0_OPCODE_X0 = 142,
- V2SHLI_SHIFT_OPCODE_X0 = 10,
- V2SHLI_SHIFT_OPCODE_X1 = 10,
- V2SHLSC_RRR_0_OPCODE_X0 = 143,
- V2SHLSC_RRR_0_OPCODE_X1 = 90,
- V2SHL_RRR_0_OPCODE_X0 = 144,
- V2SHL_RRR_0_OPCODE_X1 = 91,
- V2SHRSI_SHIFT_OPCODE_X0 = 11,
- V2SHRSI_SHIFT_OPCODE_X1 = 11,
- V2SHRS_RRR_0_OPCODE_X0 = 145,
- V2SHRS_RRR_0_OPCODE_X1 = 92,
- V2SHRUI_SHIFT_OPCODE_X0 = 12,
- V2SHRUI_SHIFT_OPCODE_X1 = 12,
- V2SHRU_RRR_0_OPCODE_X0 = 146,
- V2SHRU_RRR_0_OPCODE_X1 = 93,
- V2SUBSC_RRR_0_OPCODE_X0 = 147,
- V2SUBSC_RRR_0_OPCODE_X1 = 94,
- V2SUB_RRR_0_OPCODE_X0 = 148,
- V2SUB_RRR_0_OPCODE_X1 = 95,
- V4ADDSC_RRR_0_OPCODE_X0 = 149,
- V4ADDSC_RRR_0_OPCODE_X1 = 96,
- V4ADD_RRR_0_OPCODE_X0 = 150,
- V4ADD_RRR_0_OPCODE_X1 = 97,
- V4INT_H_RRR_0_OPCODE_X0 = 151,
- V4INT_H_RRR_0_OPCODE_X1 = 98,
- V4INT_L_RRR_0_OPCODE_X0 = 152,
- V4INT_L_RRR_0_OPCODE_X1 = 99,
- V4PACKSC_RRR_0_OPCODE_X0 = 153,
- V4PACKSC_RRR_0_OPCODE_X1 = 100,
- V4SHLSC_RRR_0_OPCODE_X0 = 154,
- V4SHLSC_RRR_0_OPCODE_X1 = 101,
- V4SHL_RRR_0_OPCODE_X0 = 155,
- V4SHL_RRR_0_OPCODE_X1 = 102,
- V4SHRS_RRR_0_OPCODE_X0 = 156,
- V4SHRS_RRR_0_OPCODE_X1 = 103,
- V4SHRU_RRR_0_OPCODE_X0 = 157,
- V4SHRU_RRR_0_OPCODE_X1 = 104,
- V4SUBSC_RRR_0_OPCODE_X0 = 158,
- V4SUBSC_RRR_0_OPCODE_X1 = 105,
- V4SUB_RRR_0_OPCODE_X0 = 159,
- V4SUB_RRR_0_OPCODE_X1 = 106,
- WH64_UNARY_OPCODE_X1 = 38,
- XORI_IMM8_OPCODE_X0 = 20,
- XORI_IMM8_OPCODE_X1 = 45,
- XOR_RRR_0_OPCODE_X0 = 160,
- XOR_RRR_0_OPCODE_X1 = 107,
- XOR_RRR_5_OPCODE_Y0 = 3,
- XOR_RRR_5_OPCODE_Y1 = 3
-};
-
-
-#endif /* __ASSEMBLER__ */
-
-#endif /* __ARCH_OPCODE_H__ */
diff --git a/arch/tile/include/uapi/arch/opcode_tilepro.h b/arch/tile/include/uapi/arch/opcode_tilepro.h
deleted file mode 100644
index 0d633688de63..000000000000
--- a/arch/tile/include/uapi/arch/opcode_tilepro.h
+++ /dev/null
@@ -1,1473 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
-/* TILEPro opcode information.
- *
- * Copyright 2011 Tilera Corporation. All Rights Reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation, version 2.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
- * NON INFRINGEMENT. See the GNU General Public License for
- * more details.
- *
- *
- *
- *
- *
- */
-
-#ifndef __ARCH_OPCODE_H__
-#define __ARCH_OPCODE_H__
-
-#ifndef __ASSEMBLER__
-
-typedef unsigned long long tilepro_bundle_bits;
-
-/* This is the bit that determines if a bundle is in the Y encoding. */
-#define TILEPRO_BUNDLE_Y_ENCODING_MASK ((tilepro_bundle_bits)1 << 63)
-
-enum
-{
- /* Maximum number of instructions in a bundle (2 for X, 3 for Y). */
- TILEPRO_MAX_INSTRUCTIONS_PER_BUNDLE = 3,
-
- /* How many different pipeline encodings are there? X0, X1, Y0, Y1, Y2. */
- TILEPRO_NUM_PIPELINE_ENCODINGS = 5,
-
- /* Log base 2 of TILEPRO_BUNDLE_SIZE_IN_BYTES. */
- TILEPRO_LOG2_BUNDLE_SIZE_IN_BYTES = 3,
-
- /* Instructions take this many bytes. */
- TILEPRO_BUNDLE_SIZE_IN_BYTES = 1 << TILEPRO_LOG2_BUNDLE_SIZE_IN_BYTES,
-
- /* Log base 2 of TILEPRO_BUNDLE_ALIGNMENT_IN_BYTES. */
- TILEPRO_LOG2_BUNDLE_ALIGNMENT_IN_BYTES = 3,
-
- /* Bundles should be aligned modulo this number of bytes. */
- TILEPRO_BUNDLE_ALIGNMENT_IN_BYTES =
- (1 << TILEPRO_LOG2_BUNDLE_ALIGNMENT_IN_BYTES),
-
- /* Log base 2 of TILEPRO_SN_INSTRUCTION_SIZE_IN_BYTES. */
- TILEPRO_LOG2_SN_INSTRUCTION_SIZE_IN_BYTES = 1,
-
- /* Static network instructions take this many bytes. */
- TILEPRO_SN_INSTRUCTION_SIZE_IN_BYTES =
- (1 << TILEPRO_LOG2_SN_INSTRUCTION_SIZE_IN_BYTES),
-
- /* Number of registers (some are magic, such as network I/O). */
- TILEPRO_NUM_REGISTERS = 64,
-
- /* Number of static network registers. */
- TILEPRO_NUM_SN_REGISTERS = 4
-};
-
-/* Make a few "tile_" variables to simplify common code between
- architectures. */
-
-typedef tilepro_bundle_bits tile_bundle_bits;
-#define TILE_BUNDLE_SIZE_IN_BYTES TILEPRO_BUNDLE_SIZE_IN_BYTES
-#define TILE_BUNDLE_ALIGNMENT_IN_BYTES TILEPRO_BUNDLE_ALIGNMENT_IN_BYTES
-#define TILE_LOG2_BUNDLE_ALIGNMENT_IN_BYTES \
- TILEPRO_LOG2_BUNDLE_ALIGNMENT_IN_BYTES
-#define TILE_BPT_BUNDLE TILEPRO_BPT_BUNDLE
-
-/* 64-bit pattern for a { bpt ; nop } bundle. */
-#define TILEPRO_BPT_BUNDLE 0x400b3cae70166000ULL
-
-static __inline unsigned int
-get_BrOff_SN(tilepro_bundle_bits num)
-{
- const unsigned int n = (unsigned int)num;
- return (((n >> 0)) & 0x3ff);
-}
-
-static __inline unsigned int
-get_BrOff_X1(tilepro_bundle_bits n)
-{
- return (((unsigned int)(n >> 43)) & 0x00007fff) |
- (((unsigned int)(n >> 20)) & 0x00018000);
-}
-
-static __inline unsigned int
-get_BrType_X1(tilepro_bundle_bits n)
-{
- return (((unsigned int)(n >> 31)) & 0xf);
-}
-
-static __inline unsigned int
-get_Dest_Imm8_X1(tilepro_bundle_bits n)
-{
- return (((unsigned int)(n >> 31)) & 0x0000003f) |
- (((unsigned int)(n >> 43)) & 0x000000c0);
-}
-
-static __inline unsigned int
-get_Dest_SN(tilepro_bundle_bits num)
-{
- const unsigned int n = (unsigned int)num;
- return (((n >> 2)) & 0x3);
-}
-
-static __inline unsigned int
-get_Dest_X0(tilepro_bundle_bits num)
-{
- const unsigned int n = (unsigned int)num;
- return (((n >> 0)) & 0x3f);
-}
-
-static __inline unsigned int
-get_Dest_X1(tilepro_bundle_bits n)
-{
- return (((unsigned int)(n >> 31)) & 0x3f);
-}
-
-static __inline unsigned int
-get_Dest_Y0(tilepro_bundle_bits num)
-{
- const unsigned int n = (unsigned int)num;
- return (((n >> 0)) & 0x3f);
-}
-
-static __inline unsigned int
-get_Dest_Y1(tilepro_bundle_bits n)
-{
- return (((unsigned int)(n >> 31)) & 0x3f);
-}
-
-static __inline unsigned int
-get_Imm16_X0(tilepro_bundle_bits num)
-{
- const unsigned int n = (unsigned int)num;
- return (((n >> 12)) & 0xffff);
-}
-
-static __inline unsigned int
-get_Imm16_X1(tilepro_bundle_bits n)
-{
- return (((unsigned int)(n >> 43)) & 0xffff);
-}
-
-static __inline unsigned int
-get_Imm8_SN(tilepro_bundle_bits num)
-{
- const unsigned int n = (unsigned int)num;
- return (((n >> 0)) & 0xff);
-}
-
-static __inline unsigned int
-get_Imm8_X0(tilepro_bundle_bits num)
-{
- const unsigned int n = (unsigned int)num;
- return (((n >> 12)) & 0xff);
-}
-
-static __inline unsigned int
-get_Imm8_X1(tilepro_bundle_bits n)
-{
- return (((unsigned int)(n >> 43)) & 0xff);
-}
-
-static __inline unsigned int
-get_Imm8_Y0(tilepro_bundle_bits num)
-{
- const unsigned int n = (unsigned int)num;
- return (((n >> 12)) & 0xff);
-}
-
-static __inline unsigned int
-get_Imm8_Y1(tilepro_bundle_bits n)
-{
- return (((unsigned int)(n >> 43)) & 0xff);
-}
-
-static __inline unsigned int
-get_ImmOpcodeExtension_X0(tilepro_bundle_bits num)
-{
- const unsigned int n = (unsigned int)num;
- return (((n >> 20)) & 0x7f);
-}
-
-static __inline unsigned int
-get_ImmOpcodeExtension_X1(tilepro_bundle_bits n)
-{
- return (((unsigned int)(n >> 51)) & 0x7f);
-}
-
-static __inline unsigned int
-get_ImmRROpcodeExtension_SN(tilepro_bundle_bits num)
-{
- const unsigned int n = (unsigned int)num;
- return (((n >> 8)) & 0x3);
-}
-
-static __inline unsigned int
-get_JOffLong_X1(tilepro_bundle_bits n)
-{
- return (((unsigned int)(n >> 43)) & 0x00007fff) |
- (((unsigned int)(n >> 20)) & 0x00018000) |
- (((unsigned int)(n >> 14)) & 0x001e0000) |
- (((unsigned int)(n >> 16)) & 0x07e00000) |
- (((unsigned int)(n >> 31)) & 0x18000000);
-}
-
-static __inline unsigned int
-get_JOff_X1(tilepro_bundle_bits n)
-{
- return (((unsigned int)(n >> 43)) & 0x00007fff) |
- (((unsigned int)(n >> 20)) & 0x00018000) |
- (((unsigned int)(n >> 14)) & 0x001e0000) |
- (((unsigned int)(n >> 16)) & 0x07e00000) |
- (((unsigned int)(n >> 31)) & 0x08000000);
-}
-
-static __inline unsigned int
-get_MF_Imm15_X1(tilepro_bundle_bits n)
-{
- return (((unsigned int)(n >> 37)) & 0x00003fff) |
- (((unsigned int)(n >> 44)) & 0x00004000);
-}
-
-static __inline unsigned int
-get_MMEnd_X0(tilepro_bundle_bits num)
-{
- const unsigned int n = (unsigned int)num;
- return (((n >> 18)) & 0x1f);
-}
-
-static __inline unsigned int
-get_MMEnd_X1(tilepro_bundle_bits n)
-{
- return (((unsigned int)(n >> 49)) & 0x1f);
-}
-
-static __inline unsigned int
-get_MMStart_X0(tilepro_bundle_bits num)
-{
- const unsigned int n = (unsigned int)num;
- return (((n >> 23)) & 0x1f);
-}
-
-static __inline unsigned int
-get_MMStart_X1(tilepro_bundle_bits n)
-{
- return (((unsigned int)(n >> 54)) & 0x1f);
-}
-
-static __inline unsigned int
-get_MT_Imm15_X1(tilepro_bundle_bits n)
-{
- return (((unsigned int)(n >> 31)) & 0x0000003f) |
- (((unsigned int)(n >> 37)) & 0x00003fc0) |
- (((unsigned int)(n >> 44)) & 0x00004000);
-}
-
-static __inline unsigned int
-get_Mode(tilepro_bundle_bits n)
-{
- return (((unsigned int)(n >> 63)) & 0x1);
-}
-
-static __inline unsigned int
-get_NoRegOpcodeExtension_SN(tilepro_bundle_bits num)
-{
- const unsigned int n = (unsigned int)num;
- return (((n >> 0)) & 0xf);
-}
-
-static __inline unsigned int
-get_Opcode_SN(tilepro_bundle_bits num)
-{
- const unsigned int n = (unsigned int)num;
- return (((n >> 10)) & 0x3f);
-}
-
-static __inline unsigned int
-get_Opcode_X0(tilepro_bundle_bits num)
-{
- const unsigned int n = (unsigned int)num;
- return (((n >> 28)) & 0x7);
-}
-
-static __inline unsigned int
-get_Opcode_X1(tilepro_bundle_bits n)
-{
- return (((unsigned int)(n >> 59)) & 0xf);
-}
-
-static __inline unsigned int
-get_Opcode_Y0(tilepro_bundle_bits num)
-{
- const unsigned int n = (unsigned int)num;
- return (((n >> 27)) & 0xf);
-}
-
-static __inline unsigned int
-get_Opcode_Y1(tilepro_bundle_bits n)
-{
- return (((unsigned int)(n >> 59)) & 0xf);
-}
-
-static __inline unsigned int
-get_Opcode_Y2(tilepro_bundle_bits n)
-{
- return (((unsigned int)(n >> 56)) & 0x7);
-}
-
-static __inline unsigned int
-get_RROpcodeExtension_SN(tilepro_bundle_bits num)
-{
- const unsigned int n = (unsigned int)num;
- return (((n >> 4)) & 0xf);
-}
-
-static __inline unsigned int
-get_RRROpcodeExtension_X0(tilepro_bundle_bits num)
-{
- const unsigned int n = (unsigned int)num;
- return (((n >> 18)) & 0x1ff);
-}
-
-static __inline unsigned int
-get_RRROpcodeExtension_X1(tilepro_bundle_bits n)
-{
- return (((unsigned int)(n >> 49)) & 0x1ff);
-}
-
-static __inline unsigned int
-get_RRROpcodeExtension_Y0(tilepro_bundle_bits num)
-{
- const unsigned int n = (unsigned int)num;
- return (((n >> 18)) & 0x3);
-}
-
-static __inline unsigned int
-get_RRROpcodeExtension_Y1(tilepro_bundle_bits n)
-{
- return (((unsigned int)(n >> 49)) & 0x3);
-}
-
-static __inline unsigned int
-get_RouteOpcodeExtension_SN(tilepro_bundle_bits num)
-{
- const unsigned int n = (unsigned int)num;
- return (((n >> 0)) & 0x3ff);
-}
-
-static __inline unsigned int
-get_S_X0(tilepro_bundle_bits num)
-{
- const unsigned int n = (unsigned int)num;
- return (((n >> 27)) & 0x1);
-}
-
-static __inline unsigned int
-get_S_X1(tilepro_bundle_bits n)
-{
- return (((unsigned int)(n >> 58)) & 0x1);
-}
-
-static __inline unsigned int
-get_ShAmt_X0(tilepro_bundle_bits num)
-{
- const unsigned int n = (unsigned int)num;
- return (((n >> 12)) & 0x1f);
-}
-
-static __inline unsigned int
-get_ShAmt_X1(tilepro_bundle_bits n)
-{
- return (((unsigned int)(n >> 43)) & 0x1f);
-}
-
-static __inline unsigned int
-get_ShAmt_Y0(tilepro_bundle_bits num)
-{
- const unsigned int n = (unsigned int)num;
- return (((n >> 12)) & 0x1f);
-}
-
-static __inline unsigned int
-get_ShAmt_Y1(tilepro_bundle_bits n)
-{
- return (((unsigned int)(n >> 43)) & 0x1f);
-}
-
-static __inline unsigned int
-get_SrcA_X0(tilepro_bundle_bits num)
-{
- const unsigned int n = (unsigned int)num;
- return (((n >> 6)) & 0x3f);
-}
-
-static __inline unsigned int
-get_SrcA_X1(tilepro_bundle_bits n)
-{
- return (((unsigned int)(n >> 37)) & 0x3f);
-}
-
-static __inline unsigned int
-get_SrcA_Y0(tilepro_bundle_bits num)
-{
- const unsigned int n = (unsigned int)num;
- return (((n >> 6)) & 0x3f);
-}
-
-static __inline unsigned int
-get_SrcA_Y1(tilepro_bundle_bits n)
-{
- return (((unsigned int)(n >> 37)) & 0x3f);
-}
-
-static __inline unsigned int
-get_SrcA_Y2(tilepro_bundle_bits n)
-{
- return (((n >> 26)) & 0x00000001) |
- (((unsigned int)(n >> 50)) & 0x0000003e);
-}
-
-static __inline unsigned int
-get_SrcBDest_Y2(tilepro_bundle_bits num)
-{
- const unsigned int n = (unsigned int)num;
- return (((n >> 20)) & 0x3f);
-}
-
-static __inline unsigned int
-get_SrcB_X0(tilepro_bundle_bits num)
-{
- const unsigned int n = (unsigned int)num;
- return (((n >> 12)) & 0x3f);
-}
-
-static __inline unsigned int
-get_SrcB_X1(tilepro_bundle_bits n)
-{
- return (((unsigned int)(n >> 43)) & 0x3f);
-}
-
-static __inline unsigned int
-get_SrcB_Y0(tilepro_bundle_bits num)
-{
- const unsigned int n = (unsigned int)num;
- return (((n >> 12)) & 0x3f);
-}
-
-static __inline unsigned int
-get_SrcB_Y1(tilepro_bundle_bits n)
-{
- return (((unsigned int)(n >> 43)) & 0x3f);
-}
-
-static __inline unsigned int
-get_Src_SN(tilepro_bundle_bits num)
-{
- const unsigned int n = (unsigned int)num;
- return (((n >> 0)) & 0x3);
-}
-
-static __inline unsigned int
-get_UnOpcodeExtension_X0(tilepro_bundle_bits num)
-{
- const unsigned int n = (unsigned int)num;
- return (((n >> 12)) & 0x1f);
-}
-
-static __inline unsigned int
-get_UnOpcodeExtension_X1(tilepro_bundle_bits n)
-{
- return (((unsigned int)(n >> 43)) & 0x1f);
-}
-
-static __inline unsigned int
-get_UnOpcodeExtension_Y0(tilepro_bundle_bits num)
-{
- const unsigned int n = (unsigned int)num;
- return (((n >> 12)) & 0x1f);
-}
-
-static __inline unsigned int
-get_UnOpcodeExtension_Y1(tilepro_bundle_bits n)
-{
- return (((unsigned int)(n >> 43)) & 0x1f);
-}
-
-static __inline unsigned int
-get_UnShOpcodeExtension_X0(tilepro_bundle_bits num)
-{
- const unsigned int n = (unsigned int)num;
- return (((n >> 17)) & 0x3ff);
-}
-
-static __inline unsigned int
-get_UnShOpcodeExtension_X1(tilepro_bundle_bits n)
-{
- return (((unsigned int)(n >> 48)) & 0x3ff);
-}
-
-static __inline unsigned int
-get_UnShOpcodeExtension_Y0(tilepro_bundle_bits num)
-{
- const unsigned int n = (unsigned int)num;
- return (((n >> 17)) & 0x7);
-}
-
-static __inline unsigned int
-get_UnShOpcodeExtension_Y1(tilepro_bundle_bits n)
-{
- return (((unsigned int)(n >> 48)) & 0x7);
-}
-
-
-static __inline int
-sign_extend(int n, int num_bits)
-{
- int shift = (int)(sizeof(int) * 8 - num_bits);
- return (n << shift) >> shift;
-}
-
-
-
-static __inline tilepro_bundle_bits
-create_BrOff_SN(int num)
-{
- const unsigned int n = (unsigned int)num;
- return ((n & 0x3ff) << 0);
-}
-
-static __inline tilepro_bundle_bits
-create_BrOff_X1(int num)
-{
- const unsigned int n = (unsigned int)num;
- return (((tilepro_bundle_bits)(n & 0x00007fff)) << 43) |
- (((tilepro_bundle_bits)(n & 0x00018000)) << 20);
-}
-
-static __inline tilepro_bundle_bits
-create_BrType_X1(int num)
-{
- const unsigned int n = (unsigned int)num;
- return (((tilepro_bundle_bits)(n & 0xf)) << 31);
-}
-
-static __inline tilepro_bundle_bits
-create_Dest_Imm8_X1(int num)
-{
- const unsigned int n = (unsigned int)num;
- return (((tilepro_bundle_bits)(n & 0x0000003f)) << 31) |
- (((tilepro_bundle_bits)(n & 0x000000c0)) << 43);
-}
-
-static __inline tilepro_bundle_bits
-create_Dest_SN(int num)
-{
- const unsigned int n = (unsigned int)num;
- return ((n & 0x3) << 2);
-}
-
-static __inline tilepro_bundle_bits
-create_Dest_X0(int num)
-{
- const unsigned int n = (unsigned int)num;
- return ((n & 0x3f) << 0);
-}
-
-static __inline tilepro_bundle_bits
-create_Dest_X1(int num)
-{
- const unsigned int n = (unsigned int)num;
- return (((tilepro_bundle_bits)(n & 0x3f)) << 31);
-}
-
-static __inline tilepro_bundle_bits
-create_Dest_Y0(int num)
-{
- const unsigned int n = (unsigned int)num;
- return ((n & 0x3f) << 0);
-}
-
-static __inline tilepro_bundle_bits
-create_Dest_Y1(int num)
-{
- const unsigned int n = (unsigned int)num;
- return (((tilepro_bundle_bits)(n & 0x3f)) << 31);
-}
-
-static __inline tilepro_bundle_bits
-create_Imm16_X0(int num)
-{
- const unsigned int n = (unsigned int)num;
- return ((n & 0xffff) << 12);
-}
-
-static __inline tilepro_bundle_bits
-create_Imm16_X1(int num)
-{
- const unsigned int n = (unsigned int)num;
- return (((tilepro_bundle_bits)(n & 0xffff)) << 43);
-}
-
-static __inline tilepro_bundle_bits
-create_Imm8_SN(int num)
-{
- const unsigned int n = (unsigned int)num;
- return ((n & 0xff) << 0);
-}
-
-static __inline tilepro_bundle_bits
-create_Imm8_X0(int num)
-{
- const unsigned int n = (unsigned int)num;
- return ((n & 0xff) << 12);
-}
-
-static __inline tilepro_bundle_bits
-create_Imm8_X1(int num)
-{
- const unsigned int n = (unsigned int)num;
- return (((tilepro_bundle_bits)(n & 0xff)) << 43);
-}
-
-static __inline tilepro_bundle_bits
-create_Imm8_Y0(int num)
-{
- const unsigned int n = (unsigned int)num;
- return ((n & 0xff) << 12);
-}
-
-static __inline tilepro_bundle_bits
-create_Imm8_Y1(int num)
-{
- const unsigned int n = (unsigned int)num;
- return (((tilepro_bundle_bits)(n & 0xff)) << 43);
-}
-
-static __inline tilepro_bundle_bits
-create_ImmOpcodeExtension_X0(int num)
-{
- const unsigned int n = (unsigned int)num;
- return ((n & 0x7f) << 20);
-}
-
-static __inline tilepro_bundle_bits
-create_ImmOpcodeExtension_X1(int num)
-{
- const unsigned int n = (unsigned int)num;
- return (((tilepro_bundle_bits)(n & 0x7f)) << 51);
-}
-
-static __inline tilepro_bundle_bits
-create_ImmRROpcodeExtension_SN(int num)
-{
- const unsigned int n = (unsigned int)num;
- return ((n & 0x3) << 8);
-}
-
-static __inline tilepro_bundle_bits
-create_JOffLong_X1(int num)
-{
- const unsigned int n = (unsigned int)num;
- return (((tilepro_bundle_bits)(n & 0x00007fff)) << 43) |
- (((tilepro_bundle_bits)(n & 0x00018000)) << 20) |
- (((tilepro_bundle_bits)(n & 0x001e0000)) << 14) |
- (((tilepro_bundle_bits)(n & 0x07e00000)) << 16) |
- (((tilepro_bundle_bits)(n & 0x18000000)) << 31);
-}
-
-static __inline tilepro_bundle_bits
-create_JOff_X1(int num)
-{
- const unsigned int n = (unsigned int)num;
- return (((tilepro_bundle_bits)(n & 0x00007fff)) << 43) |
- (((tilepro_bundle_bits)(n & 0x00018000)) << 20) |
- (((tilepro_bundle_bits)(n & 0x001e0000)) << 14) |
- (((tilepro_bundle_bits)(n & 0x07e00000)) << 16) |
- (((tilepro_bundle_bits)(n & 0x08000000)) << 31);
-}
-
-static __inline tilepro_bundle_bits
-create_MF_Imm15_X1(int num)
-{
- const unsigned int n = (unsigned int)num;
- return (((tilepro_bundle_bits)(n & 0x00003fff)) << 37) |
- (((tilepro_bundle_bits)(n & 0x00004000)) << 44);
-}
-
-static __inline tilepro_bundle_bits
-create_MMEnd_X0(int num)
-{
- const unsigned int n = (unsigned int)num;
- return ((n & 0x1f) << 18);
-}
-
-static __inline tilepro_bundle_bits
-create_MMEnd_X1(int num)
-{
- const unsigned int n = (unsigned int)num;
- return (((tilepro_bundle_bits)(n & 0x1f)) << 49);
-}
-
-static __inline tilepro_bundle_bits
-create_MMStart_X0(int num)
-{
- const unsigned int n = (unsigned int)num;
- return ((n & 0x1f) << 23);
-}
-
-static __inline tilepro_bundle_bits
-create_MMStart_X1(int num)
-{
- const unsigned int n = (unsigned int)num;
- return (((tilepro_bundle_bits)(n & 0x1f)) << 54);
-}
-
-static __inline tilepro_bundle_bits
-create_MT_Imm15_X1(int num)
-{
- const unsigned int n = (unsigned int)num;
- return (((tilepro_bundle_bits)(n & 0x0000003f)) << 31) |
- (((tilepro_bundle_bits)(n & 0x00003fc0)) << 37) |
- (((tilepro_bundle_bits)(n & 0x00004000)) << 44);
-}
-
-static __inline tilepro_bundle_bits
-create_Mode(int num)
-{
- const unsigned int n = (unsigned int)num;
- return (((tilepro_bundle_bits)(n & 0x1)) << 63);
-}
-
-static __inline tilepro_bundle_bits
-create_NoRegOpcodeExtension_SN(int num)
-{
- const unsigned int n = (unsigned int)num;
- return ((n & 0xf) << 0);
-}
-
-static __inline tilepro_bundle_bits
-create_Opcode_SN(int num)
-{
- const unsigned int n = (unsigned int)num;
- return ((n & 0x3f) << 10);
-}
-
-static __inline tilepro_bundle_bits
-create_Opcode_X0(int num)
-{
- const unsigned int n = (unsigned int)num;
- return ((n & 0x7) << 28);
-}
-
-static __inline tilepro_bundle_bits
-create_Opcode_X1(int num)
-{
- const unsigned int n = (unsigned int)num;
- return (((tilepro_bundle_bits)(n & 0xf)) << 59);
-}
-
-static __inline tilepro_bundle_bits
-create_Opcode_Y0(int num)
-{
- const unsigned int n = (unsigned int)num;
- return ((n & 0xf) << 27);
-}
-
-static __inline tilepro_bundle_bits
-create_Opcode_Y1(int num)
-{
- const unsigned int n = (unsigned int)num;
- return (((tilepro_bundle_bits)(n & 0xf)) << 59);
-}
-
-static __inline tilepro_bundle_bits
-create_Opcode_Y2(int num)
-{
- const unsigned int n = (unsigned int)num;
- return (((tilepro_bundle_bits)(n & 0x7)) << 56);
-}
-
-static __inline tilepro_bundle_bits
-create_RROpcodeExtension_SN(int num)
-{
- const unsigned int n = (unsigned int)num;
- return ((n & 0xf) << 4);
-}
-
-static __inline tilepro_bundle_bits
-create_RRROpcodeExtension_X0(int num)
-{
- const unsigned int n = (unsigned int)num;
- return ((n & 0x1ff) << 18);
-}
-
-static __inline tilepro_bundle_bits
-create_RRROpcodeExtension_X1(int num)
-{
- const unsigned int n = (unsigned int)num;
- return (((tilepro_bundle_bits)(n & 0x1ff)) << 49);
-}
-
-static __inline tilepro_bundle_bits
-create_RRROpcodeExtension_Y0(int num)
-{
- const unsigned int n = (unsigned int)num;
- return ((n & 0x3) << 18);
-}
-
-static __inline tilepro_bundle_bits
-create_RRROpcodeExtension_Y1(int num)
-{
- const unsigned int n = (unsigned int)num;
- return (((tilepro_bundle_bits)(n & 0x3)) << 49);
-}
-
-static __inline tilepro_bundle_bits
-create_RouteOpcodeExtension_SN(int num)
-{
- const unsigned int n = (unsigned int)num;
- return ((n & 0x3ff) << 0);
-}
-
-static __inline tilepro_bundle_bits
-create_S_X0(int num)
-{
- const unsigned int n = (unsigned int)num;
- return ((n & 0x1) << 27);
-}
-
-static __inline tilepro_bundle_bits
-create_S_X1(int num)
-{
- const unsigned int n = (unsigned int)num;
- return (((tilepro_bundle_bits)(n & 0x1)) << 58);
-}
-
-static __inline tilepro_bundle_bits
-create_ShAmt_X0(int num)
-{
- const unsigned int n = (unsigned int)num;
- return ((n & 0x1f) << 12);
-}
-
-static __inline tilepro_bundle_bits
-create_ShAmt_X1(int num)
-{
- const unsigned int n = (unsigned int)num;
- return (((tilepro_bundle_bits)(n & 0x1f)) << 43);
-}
-
-static __inline tilepro_bundle_bits
-create_ShAmt_Y0(int num)
-{
- const unsigned int n = (unsigned int)num;
- return ((n & 0x1f) << 12);
-}
-
-static __inline tilepro_bundle_bits
-create_ShAmt_Y1(int num)
-{
- const unsigned int n = (unsigned int)num;
- return (((tilepro_bundle_bits)(n & 0x1f)) << 43);
-}
-
-static __inline tilepro_bundle_bits
-create_SrcA_X0(int num)
-{
- const unsigned int n = (unsigned int)num;
- return ((n & 0x3f) << 6);
-}
-
-static __inline tilepro_bundle_bits
-create_SrcA_X1(int num)
-{
- const unsigned int n = (unsigned int)num;
- return (((tilepro_bundle_bits)(n & 0x3f)) << 37);
-}
-
-static __inline tilepro_bundle_bits
-create_SrcA_Y0(int num)
-{
- const unsigned int n = (unsigned int)num;
- return ((n & 0x3f) << 6);
-}
-
-static __inline tilepro_bundle_bits
-create_SrcA_Y1(int num)
-{
- const unsigned int n = (unsigned int)num;
- return (((tilepro_bundle_bits)(n & 0x3f)) << 37);
-}
-
-static __inline tilepro_bundle_bits
-create_SrcA_Y2(int num)
-{
- const unsigned int n = (unsigned int)num;
- return ((n & 0x00000001) << 26) |
- (((tilepro_bundle_bits)(n & 0x0000003e)) << 50);
-}
-
-static __inline tilepro_bundle_bits
-create_SrcBDest_Y2(int num)
-{
- const unsigned int n = (unsigned int)num;
- return ((n & 0x3f) << 20);
-}
-
-static __inline tilepro_bundle_bits
-create_SrcB_X0(int num)
-{
- const unsigned int n = (unsigned int)num;
- return ((n & 0x3f) << 12);
-}
-
-static __inline tilepro_bundle_bits
-create_SrcB_X1(int num)
-{
- const unsigned int n = (unsigned int)num;
- return (((tilepro_bundle_bits)(n & 0x3f)) << 43);
-}
-
-static __inline tilepro_bundle_bits
-create_SrcB_Y0(int num)
-{
- const unsigned int n = (unsigned int)num;
- return ((n & 0x3f) << 12);
-}
-
-static __inline tilepro_bundle_bits
-create_SrcB_Y1(int num)
-{
- const unsigned int n = (unsigned int)num;
- return (((tilepro_bundle_bits)(n & 0x3f)) << 43);
-}
-
-static __inline tilepro_bundle_bits
-create_Src_SN(int num)
-{
- const unsigned int n = (unsigned int)num;
- return ((n & 0x3) << 0);
-}
-
-static __inline tilepro_bundle_bits
-create_UnOpcodeExtension_X0(int num)
-{
- const unsigned int n = (unsigned int)num;
- return ((n & 0x1f) << 12);
-}
-
-static __inline tilepro_bundle_bits
-create_UnOpcodeExtension_X1(int num)
-{
- const unsigned int n = (unsigned int)num;
- return (((tilepro_bundle_bits)(n & 0x1f)) << 43);
-}
-
-static __inline tilepro_bundle_bits
-create_UnOpcodeExtension_Y0(int num)
-{
- const unsigned int n = (unsigned int)num;
- return ((n & 0x1f) << 12);
-}
-
-static __inline tilepro_bundle_bits
-create_UnOpcodeExtension_Y1(int num)
-{
- const unsigned int n = (unsigned int)num;
- return (((tilepro_bundle_bits)(n & 0x1f)) << 43);
-}
-
-static __inline tilepro_bundle_bits
-create_UnShOpcodeExtension_X0(int num)
-{
- const unsigned int n = (unsigned int)num;
- return ((n & 0x3ff) << 17);
-}
-
-static __inline tilepro_bundle_bits
-create_UnShOpcodeExtension_X1(int num)
-{
- const unsigned int n = (unsigned int)num;
- return (((tilepro_bundle_bits)(n & 0x3ff)) << 48);
-}
-
-static __inline tilepro_bundle_bits
-create_UnShOpcodeExtension_Y0(int num)
-{
- const unsigned int n = (unsigned int)num;
- return ((n & 0x7) << 17);
-}
-
-static __inline tilepro_bundle_bits
-create_UnShOpcodeExtension_Y1(int num)
-{
- const unsigned int n = (unsigned int)num;
- return (((tilepro_bundle_bits)(n & 0x7)) << 48);
-}
-
-
-enum
-{
- ADDBS_U_SPECIAL_0_OPCODE_X0 = 98,
- ADDBS_U_SPECIAL_0_OPCODE_X1 = 68,
- ADDB_SPECIAL_0_OPCODE_X0 = 1,
- ADDB_SPECIAL_0_OPCODE_X1 = 1,
- ADDHS_SPECIAL_0_OPCODE_X0 = 99,
- ADDHS_SPECIAL_0_OPCODE_X1 = 69,
- ADDH_SPECIAL_0_OPCODE_X0 = 2,
- ADDH_SPECIAL_0_OPCODE_X1 = 2,
- ADDIB_IMM_0_OPCODE_X0 = 1,
- ADDIB_IMM_0_OPCODE_X1 = 1,
- ADDIH_IMM_0_OPCODE_X0 = 2,
- ADDIH_IMM_0_OPCODE_X1 = 2,
- ADDI_IMM_0_OPCODE_X0 = 3,
- ADDI_IMM_0_OPCODE_X1 = 3,
- ADDI_IMM_1_OPCODE_SN = 1,
- ADDI_OPCODE_Y0 = 9,
- ADDI_OPCODE_Y1 = 7,
- ADDLIS_OPCODE_X0 = 1,
- ADDLIS_OPCODE_X1 = 2,
- ADDLI_OPCODE_X0 = 2,
- ADDLI_OPCODE_X1 = 3,
- ADDS_SPECIAL_0_OPCODE_X0 = 96,
- ADDS_SPECIAL_0_OPCODE_X1 = 66,
- ADD_SPECIAL_0_OPCODE_X0 = 3,
- ADD_SPECIAL_0_OPCODE_X1 = 3,
- ADD_SPECIAL_0_OPCODE_Y0 = 0,
- ADD_SPECIAL_0_OPCODE_Y1 = 0,
- ADIFFB_U_SPECIAL_0_OPCODE_X0 = 4,
- ADIFFH_SPECIAL_0_OPCODE_X0 = 5,
- ANDI_IMM_0_OPCODE_X0 = 1,
- ANDI_IMM_0_OPCODE_X1 = 4,
- ANDI_OPCODE_Y0 = 10,
- ANDI_OPCODE_Y1 = 8,
- AND_SPECIAL_0_OPCODE_X0 = 6,
- AND_SPECIAL_0_OPCODE_X1 = 4,
- AND_SPECIAL_2_OPCODE_Y0 = 0,
- AND_SPECIAL_2_OPCODE_Y1 = 0,
- AULI_OPCODE_X0 = 3,
- AULI_OPCODE_X1 = 4,
- AVGB_U_SPECIAL_0_OPCODE_X0 = 7,
- AVGH_SPECIAL_0_OPCODE_X0 = 8,
- BBNST_BRANCH_OPCODE_X1 = 15,
- BBNS_BRANCH_OPCODE_X1 = 14,
- BBNS_OPCODE_SN = 63,
- BBST_BRANCH_OPCODE_X1 = 13,
- BBS_BRANCH_OPCODE_X1 = 12,
- BBS_OPCODE_SN = 62,
- BGEZT_BRANCH_OPCODE_X1 = 7,
- BGEZ_BRANCH_OPCODE_X1 = 6,
- BGEZ_OPCODE_SN = 61,
- BGZT_BRANCH_OPCODE_X1 = 5,
- BGZ_BRANCH_OPCODE_X1 = 4,
- BGZ_OPCODE_SN = 58,
- BITX_UN_0_SHUN_0_OPCODE_X0 = 1,
- BITX_UN_0_SHUN_0_OPCODE_Y0 = 1,
- BLEZT_BRANCH_OPCODE_X1 = 11,
- BLEZ_BRANCH_OPCODE_X1 = 10,
- BLEZ_OPCODE_SN = 59,
- BLZT_BRANCH_OPCODE_X1 = 9,
- BLZ_BRANCH_OPCODE_X1 = 8,
- BLZ_OPCODE_SN = 60,
- BNZT_BRANCH_OPCODE_X1 = 3,
- BNZ_BRANCH_OPCODE_X1 = 2,
- BNZ_OPCODE_SN = 57,
- BPT_NOREG_RR_IMM_0_OPCODE_SN = 1,
- BRANCH_OPCODE_X1 = 5,
- BYTEX_UN_0_SHUN_0_OPCODE_X0 = 2,
- BYTEX_UN_0_SHUN_0_OPCODE_Y0 = 2,
- BZT_BRANCH_OPCODE_X1 = 1,
- BZ_BRANCH_OPCODE_X1 = 0,
- BZ_OPCODE_SN = 56,
- CLZ_UN_0_SHUN_0_OPCODE_X0 = 3,
- CLZ_UN_0_SHUN_0_OPCODE_Y0 = 3,
- CRC32_32_SPECIAL_0_OPCODE_X0 = 9,
- CRC32_8_SPECIAL_0_OPCODE_X0 = 10,
- CTZ_UN_0_SHUN_0_OPCODE_X0 = 4,
- CTZ_UN_0_SHUN_0_OPCODE_Y0 = 4,
- DRAIN_UN_0_SHUN_0_OPCODE_X1 = 1,
- DTLBPR_UN_0_SHUN_0_OPCODE_X1 = 2,
- DWORD_ALIGN_SPECIAL_0_OPCODE_X0 = 95,
- FINV_UN_0_SHUN_0_OPCODE_X1 = 3,
- FLUSH_UN_0_SHUN_0_OPCODE_X1 = 4,
- FNOP_NOREG_RR_IMM_0_OPCODE_SN = 3,
- FNOP_UN_0_SHUN_0_OPCODE_X0 = 5,
- FNOP_UN_0_SHUN_0_OPCODE_X1 = 5,
- FNOP_UN_0_SHUN_0_OPCODE_Y0 = 5,
- FNOP_UN_0_SHUN_0_OPCODE_Y1 = 1,
- HALT_NOREG_RR_IMM_0_OPCODE_SN = 0,
- ICOH_UN_0_SHUN_0_OPCODE_X1 = 6,
- ILL_UN_0_SHUN_0_OPCODE_X1 = 7,
- ILL_UN_0_SHUN_0_OPCODE_Y1 = 2,
- IMM_0_OPCODE_SN = 0,
- IMM_0_OPCODE_X0 = 4,
- IMM_0_OPCODE_X1 = 6,
- IMM_1_OPCODE_SN = 1,
- IMM_OPCODE_0_X0 = 5,
- INTHB_SPECIAL_0_OPCODE_X0 = 11,
- INTHB_SPECIAL_0_OPCODE_X1 = 5,
- INTHH_SPECIAL_0_OPCODE_X0 = 12,
- INTHH_SPECIAL_0_OPCODE_X1 = 6,
- INTLB_SPECIAL_0_OPCODE_X0 = 13,
- INTLB_SPECIAL_0_OPCODE_X1 = 7,
- INTLH_SPECIAL_0_OPCODE_X0 = 14,
- INTLH_SPECIAL_0_OPCODE_X1 = 8,
- INV_UN_0_SHUN_0_OPCODE_X1 = 8,
- IRET_UN_0_SHUN_0_OPCODE_X1 = 9,
- JALB_OPCODE_X1 = 13,
- JALF_OPCODE_X1 = 12,
- JALRP_SPECIAL_0_OPCODE_X1 = 9,
- JALRR_IMM_1_OPCODE_SN = 3,
- JALR_RR_IMM_0_OPCODE_SN = 5,
- JALR_SPECIAL_0_OPCODE_X1 = 10,
- JB_OPCODE_X1 = 11,
- JF_OPCODE_X1 = 10,
- JRP_SPECIAL_0_OPCODE_X1 = 11,
- JRR_IMM_1_OPCODE_SN = 2,
- JR_RR_IMM_0_OPCODE_SN = 4,
- JR_SPECIAL_0_OPCODE_X1 = 12,
- LBADD_IMM_0_OPCODE_X1 = 22,
- LBADD_U_IMM_0_OPCODE_X1 = 23,
- LB_OPCODE_Y2 = 0,
- LB_UN_0_SHUN_0_OPCODE_X1 = 10,
- LB_U_OPCODE_Y2 = 1,
- LB_U_UN_0_SHUN_0_OPCODE_X1 = 11,
- LHADD_IMM_0_OPCODE_X1 = 24,
- LHADD_U_IMM_0_OPCODE_X1 = 25,
- LH_OPCODE_Y2 = 2,
- LH_UN_0_SHUN_0_OPCODE_X1 = 12,
- LH_U_OPCODE_Y2 = 3,
- LH_U_UN_0_SHUN_0_OPCODE_X1 = 13,
- LNK_SPECIAL_0_OPCODE_X1 = 13,
- LWADD_IMM_0_OPCODE_X1 = 26,
- LWADD_NA_IMM_0_OPCODE_X1 = 27,
- LW_NA_UN_0_SHUN_0_OPCODE_X1 = 24,
- LW_OPCODE_Y2 = 4,
- LW_UN_0_SHUN_0_OPCODE_X1 = 14,
- MAXB_U_SPECIAL_0_OPCODE_X0 = 15,
- MAXB_U_SPECIAL_0_OPCODE_X1 = 14,
- MAXH_SPECIAL_0_OPCODE_X0 = 16,
- MAXH_SPECIAL_0_OPCODE_X1 = 15,
- MAXIB_U_IMM_0_OPCODE_X0 = 4,
- MAXIB_U_IMM_0_OPCODE_X1 = 5,
- MAXIH_IMM_0_OPCODE_X0 = 5,
- MAXIH_IMM_0_OPCODE_X1 = 6,
- MFSPR_IMM_0_OPCODE_X1 = 7,
- MF_UN_0_SHUN_0_OPCODE_X1 = 15,
- MINB_U_SPECIAL_0_OPCODE_X0 = 17,
- MINB_U_SPECIAL_0_OPCODE_X1 = 16,
- MINH_SPECIAL_0_OPCODE_X0 = 18,
- MINH_SPECIAL_0_OPCODE_X1 = 17,
- MINIB_U_IMM_0_OPCODE_X0 = 6,
- MINIB_U_IMM_0_OPCODE_X1 = 8,
- MINIH_IMM_0_OPCODE_X0 = 7,
- MINIH_IMM_0_OPCODE_X1 = 9,
- MM_OPCODE_X0 = 6,
- MM_OPCODE_X1 = 7,
- MNZB_SPECIAL_0_OPCODE_X0 = 19,
- MNZB_SPECIAL_0_OPCODE_X1 = 18,
- MNZH_SPECIAL_0_OPCODE_X0 = 20,
- MNZH_SPECIAL_0_OPCODE_X1 = 19,
- MNZ_SPECIAL_0_OPCODE_X0 = 21,
- MNZ_SPECIAL_0_OPCODE_X1 = 20,
- MNZ_SPECIAL_1_OPCODE_Y0 = 0,
- MNZ_SPECIAL_1_OPCODE_Y1 = 1,
- MOVEI_IMM_1_OPCODE_SN = 0,
- MOVE_RR_IMM_0_OPCODE_SN = 8,
- MTSPR_IMM_0_OPCODE_X1 = 10,
- MULHHA_SS_SPECIAL_0_OPCODE_X0 = 22,
- MULHHA_SS_SPECIAL_7_OPCODE_Y0 = 0,
- MULHHA_SU_SPECIAL_0_OPCODE_X0 = 23,
- MULHHA_UU_SPECIAL_0_OPCODE_X0 = 24,
- MULHHA_UU_SPECIAL_7_OPCODE_Y0 = 1,
- MULHHSA_UU_SPECIAL_0_OPCODE_X0 = 25,
- MULHH_SS_SPECIAL_0_OPCODE_X0 = 26,
- MULHH_SS_SPECIAL_6_OPCODE_Y0 = 0,
- MULHH_SU_SPECIAL_0_OPCODE_X0 = 27,
- MULHH_UU_SPECIAL_0_OPCODE_X0 = 28,
- MULHH_UU_SPECIAL_6_OPCODE_Y0 = 1,
- MULHLA_SS_SPECIAL_0_OPCODE_X0 = 29,
- MULHLA_SU_SPECIAL_0_OPCODE_X0 = 30,
- MULHLA_US_SPECIAL_0_OPCODE_X0 = 31,
- MULHLA_UU_SPECIAL_0_OPCODE_X0 = 32,
- MULHLSA_UU_SPECIAL_0_OPCODE_X0 = 33,
- MULHLSA_UU_SPECIAL_5_OPCODE_Y0 = 0,
- MULHL_SS_SPECIAL_0_OPCODE_X0 = 34,
- MULHL_SU_SPECIAL_0_OPCODE_X0 = 35,
- MULHL_US_SPECIAL_0_OPCODE_X0 = 36,
- MULHL_UU_SPECIAL_0_OPCODE_X0 = 37,
- MULLLA_SS_SPECIAL_0_OPCODE_X0 = 38,
- MULLLA_SS_SPECIAL_7_OPCODE_Y0 = 2,
- MULLLA_SU_SPECIAL_0_OPCODE_X0 = 39,
- MULLLA_UU_SPECIAL_0_OPCODE_X0 = 40,
- MULLLA_UU_SPECIAL_7_OPCODE_Y0 = 3,
- MULLLSA_UU_SPECIAL_0_OPCODE_X0 = 41,
- MULLL_SS_SPECIAL_0_OPCODE_X0 = 42,
- MULLL_SS_SPECIAL_6_OPCODE_Y0 = 2,
- MULLL_SU_SPECIAL_0_OPCODE_X0 = 43,
- MULLL_UU_SPECIAL_0_OPCODE_X0 = 44,
- MULLL_UU_SPECIAL_6_OPCODE_Y0 = 3,
- MVNZ_SPECIAL_0_OPCODE_X0 = 45,
- MVNZ_SPECIAL_1_OPCODE_Y0 = 1,
- MVZ_SPECIAL_0_OPCODE_X0 = 46,
- MVZ_SPECIAL_1_OPCODE_Y0 = 2,
- MZB_SPECIAL_0_OPCODE_X0 = 47,
- MZB_SPECIAL_0_OPCODE_X1 = 21,
- MZH_SPECIAL_0_OPCODE_X0 = 48,
- MZH_SPECIAL_0_OPCODE_X1 = 22,
- MZ_SPECIAL_0_OPCODE_X0 = 49,
- MZ_SPECIAL_0_OPCODE_X1 = 23,
- MZ_SPECIAL_1_OPCODE_Y0 = 3,
- MZ_SPECIAL_1_OPCODE_Y1 = 2,
- NAP_UN_0_SHUN_0_OPCODE_X1 = 16,
- NOP_NOREG_RR_IMM_0_OPCODE_SN = 2,
- NOP_UN_0_SHUN_0_OPCODE_X0 = 6,
- NOP_UN_0_SHUN_0_OPCODE_X1 = 17,
- NOP_UN_0_SHUN_0_OPCODE_Y0 = 6,
- NOP_UN_0_SHUN_0_OPCODE_Y1 = 3,
- NOREG_RR_IMM_0_OPCODE_SN = 0,
- NOR_SPECIAL_0_OPCODE_X0 = 50,
- NOR_SPECIAL_0_OPCODE_X1 = 24,
- NOR_SPECIAL_2_OPCODE_Y0 = 1,
- NOR_SPECIAL_2_OPCODE_Y1 = 1,
- ORI_IMM_0_OPCODE_X0 = 8,
- ORI_IMM_0_OPCODE_X1 = 11,
- ORI_OPCODE_Y0 = 11,
- ORI_OPCODE_Y1 = 9,
- OR_SPECIAL_0_OPCODE_X0 = 51,
- OR_SPECIAL_0_OPCODE_X1 = 25,
- OR_SPECIAL_2_OPCODE_Y0 = 2,
- OR_SPECIAL_2_OPCODE_Y1 = 2,
- PACKBS_U_SPECIAL_0_OPCODE_X0 = 103,
- PACKBS_U_SPECIAL_0_OPCODE_X1 = 73,
- PACKHB_SPECIAL_0_OPCODE_X0 = 52,
- PACKHB_SPECIAL_0_OPCODE_X1 = 26,
- PACKHS_SPECIAL_0_OPCODE_X0 = 102,
- PACKHS_SPECIAL_0_OPCODE_X1 = 72,
- PACKLB_SPECIAL_0_OPCODE_X0 = 53,
- PACKLB_SPECIAL_0_OPCODE_X1 = 27,
- PCNT_UN_0_SHUN_0_OPCODE_X0 = 7,
- PCNT_UN_0_SHUN_0_OPCODE_Y0 = 7,
- RLI_SHUN_0_OPCODE_X0 = 1,
- RLI_SHUN_0_OPCODE_X1 = 1,
- RLI_SHUN_0_OPCODE_Y0 = 1,
- RLI_SHUN_0_OPCODE_Y1 = 1,
- RL_SPECIAL_0_OPCODE_X0 = 54,
- RL_SPECIAL_0_OPCODE_X1 = 28,
- RL_SPECIAL_3_OPCODE_Y0 = 0,
- RL_SPECIAL_3_OPCODE_Y1 = 0,
- RR_IMM_0_OPCODE_SN = 0,
- S1A_SPECIAL_0_OPCODE_X0 = 55,
- S1A_SPECIAL_0_OPCODE_X1 = 29,
- S1A_SPECIAL_0_OPCODE_Y0 = 1,
- S1A_SPECIAL_0_OPCODE_Y1 = 1,
- S2A_SPECIAL_0_OPCODE_X0 = 56,
- S2A_SPECIAL_0_OPCODE_X1 = 30,
- S2A_SPECIAL_0_OPCODE_Y0 = 2,
- S2A_SPECIAL_0_OPCODE_Y1 = 2,
- S3A_SPECIAL_0_OPCODE_X0 = 57,
- S3A_SPECIAL_0_OPCODE_X1 = 31,
- S3A_SPECIAL_5_OPCODE_Y0 = 1,
- S3A_SPECIAL_5_OPCODE_Y1 = 1,
- SADAB_U_SPECIAL_0_OPCODE_X0 = 58,
- SADAH_SPECIAL_0_OPCODE_X0 = 59,
- SADAH_U_SPECIAL_0_OPCODE_X0 = 60,
- SADB_U_SPECIAL_0_OPCODE_X0 = 61,
- SADH_SPECIAL_0_OPCODE_X0 = 62,
- SADH_U_SPECIAL_0_OPCODE_X0 = 63,
- SBADD_IMM_0_OPCODE_X1 = 28,
- SB_OPCODE_Y2 = 5,
- SB_SPECIAL_0_OPCODE_X1 = 32,
- SEQB_SPECIAL_0_OPCODE_X0 = 64,
- SEQB_SPECIAL_0_OPCODE_X1 = 33,
- SEQH_SPECIAL_0_OPCODE_X0 = 65,
- SEQH_SPECIAL_0_OPCODE_X1 = 34,
- SEQIB_IMM_0_OPCODE_X0 = 9,
- SEQIB_IMM_0_OPCODE_X1 = 12,
- SEQIH_IMM_0_OPCODE_X0 = 10,
- SEQIH_IMM_0_OPCODE_X1 = 13,
- SEQI_IMM_0_OPCODE_X0 = 11,
- SEQI_IMM_0_OPCODE_X1 = 14,
- SEQI_OPCODE_Y0 = 12,
- SEQI_OPCODE_Y1 = 10,
- SEQ_SPECIAL_0_OPCODE_X0 = 66,
- SEQ_SPECIAL_0_OPCODE_X1 = 35,
- SEQ_SPECIAL_5_OPCODE_Y0 = 2,
- SEQ_SPECIAL_5_OPCODE_Y1 = 2,
- SHADD_IMM_0_OPCODE_X1 = 29,
- SHL8II_IMM_0_OPCODE_SN = 3,
- SHLB_SPECIAL_0_OPCODE_X0 = 67,
- SHLB_SPECIAL_0_OPCODE_X1 = 36,
- SHLH_SPECIAL_0_OPCODE_X0 = 68,
- SHLH_SPECIAL_0_OPCODE_X1 = 37,
- SHLIB_SHUN_0_OPCODE_X0 = 2,
- SHLIB_SHUN_0_OPCODE_X1 = 2,
- SHLIH_SHUN_0_OPCODE_X0 = 3,
- SHLIH_SHUN_0_OPCODE_X1 = 3,
- SHLI_SHUN_0_OPCODE_X0 = 4,
- SHLI_SHUN_0_OPCODE_X1 = 4,
- SHLI_SHUN_0_OPCODE_Y0 = 2,
- SHLI_SHUN_0_OPCODE_Y1 = 2,
- SHL_SPECIAL_0_OPCODE_X0 = 69,
- SHL_SPECIAL_0_OPCODE_X1 = 38,
- SHL_SPECIAL_3_OPCODE_Y0 = 1,
- SHL_SPECIAL_3_OPCODE_Y1 = 1,
- SHR1_RR_IMM_0_OPCODE_SN = 9,
- SHRB_SPECIAL_0_OPCODE_X0 = 70,
- SHRB_SPECIAL_0_OPCODE_X1 = 39,
- SHRH_SPECIAL_0_OPCODE_X0 = 71,
- SHRH_SPECIAL_0_OPCODE_X1 = 40,
- SHRIB_SHUN_0_OPCODE_X0 = 5,
- SHRIB_SHUN_0_OPCODE_X1 = 5,
- SHRIH_SHUN_0_OPCODE_X0 = 6,
- SHRIH_SHUN_0_OPCODE_X1 = 6,
- SHRI_SHUN_0_OPCODE_X0 = 7,
- SHRI_SHUN_0_OPCODE_X1 = 7,
- SHRI_SHUN_0_OPCODE_Y0 = 3,
- SHRI_SHUN_0_OPCODE_Y1 = 3,
- SHR_SPECIAL_0_OPCODE_X0 = 72,
- SHR_SPECIAL_0_OPCODE_X1 = 41,
- SHR_SPECIAL_3_OPCODE_Y0 = 2,
- SHR_SPECIAL_3_OPCODE_Y1 = 2,
- SHUN_0_OPCODE_X0 = 7,
- SHUN_0_OPCODE_X1 = 8,
- SHUN_0_OPCODE_Y0 = 13,
- SHUN_0_OPCODE_Y1 = 11,
- SH_OPCODE_Y2 = 6,
- SH_SPECIAL_0_OPCODE_X1 = 42,
- SLTB_SPECIAL_0_OPCODE_X0 = 73,
- SLTB_SPECIAL_0_OPCODE_X1 = 43,
- SLTB_U_SPECIAL_0_OPCODE_X0 = 74,
- SLTB_U_SPECIAL_0_OPCODE_X1 = 44,
- SLTEB_SPECIAL_0_OPCODE_X0 = 75,
- SLTEB_SPECIAL_0_OPCODE_X1 = 45,
- SLTEB_U_SPECIAL_0_OPCODE_X0 = 76,
- SLTEB_U_SPECIAL_0_OPCODE_X1 = 46,
- SLTEH_SPECIAL_0_OPCODE_X0 = 77,
- SLTEH_SPECIAL_0_OPCODE_X1 = 47,
- SLTEH_U_SPECIAL_0_OPCODE_X0 = 78,
- SLTEH_U_SPECIAL_0_OPCODE_X1 = 48,
- SLTE_SPECIAL_0_OPCODE_X0 = 79,
- SLTE_SPECIAL_0_OPCODE_X1 = 49,
- SLTE_SPECIAL_4_OPCODE_Y0 = 0,
- SLTE_SPECIAL_4_OPCODE_Y1 = 0,
- SLTE_U_SPECIAL_0_OPCODE_X0 = 80,
- SLTE_U_SPECIAL_0_OPCODE_X1 = 50,
- SLTE_U_SPECIAL_4_OPCODE_Y0 = 1,
- SLTE_U_SPECIAL_4_OPCODE_Y1 = 1,
- SLTH_SPECIAL_0_OPCODE_X0 = 81,
- SLTH_SPECIAL_0_OPCODE_X1 = 51,
- SLTH_U_SPECIAL_0_OPCODE_X0 = 82,
- SLTH_U_SPECIAL_0_OPCODE_X1 = 52,
- SLTIB_IMM_0_OPCODE_X0 = 12,
- SLTIB_IMM_0_OPCODE_X1 = 15,
- SLTIB_U_IMM_0_OPCODE_X0 = 13,
- SLTIB_U_IMM_0_OPCODE_X1 = 16,
- SLTIH_IMM_0_OPCODE_X0 = 14,
- SLTIH_IMM_0_OPCODE_X1 = 17,
- SLTIH_U_IMM_0_OPCODE_X0 = 15,
- SLTIH_U_IMM_0_OPCODE_X1 = 18,
- SLTI_IMM_0_OPCODE_X0 = 16,
- SLTI_IMM_0_OPCODE_X1 = 19,
- SLTI_OPCODE_Y0 = 14,
- SLTI_OPCODE_Y1 = 12,
- SLTI_U_IMM_0_OPCODE_X0 = 17,
- SLTI_U_IMM_0_OPCODE_X1 = 20,
- SLTI_U_OPCODE_Y0 = 15,
- SLTI_U_OPCODE_Y1 = 13,
- SLT_SPECIAL_0_OPCODE_X0 = 83,
- SLT_SPECIAL_0_OPCODE_X1 = 53,
- SLT_SPECIAL_4_OPCODE_Y0 = 2,
- SLT_SPECIAL_4_OPCODE_Y1 = 2,
- SLT_U_SPECIAL_0_OPCODE_X0 = 84,
- SLT_U_SPECIAL_0_OPCODE_X1 = 54,
- SLT_U_SPECIAL_4_OPCODE_Y0 = 3,
- SLT_U_SPECIAL_4_OPCODE_Y1 = 3,
- SNEB_SPECIAL_0_OPCODE_X0 = 85,
- SNEB_SPECIAL_0_OPCODE_X1 = 55,
- SNEH_SPECIAL_0_OPCODE_X0 = 86,
- SNEH_SPECIAL_0_OPCODE_X1 = 56,
- SNE_SPECIAL_0_OPCODE_X0 = 87,
- SNE_SPECIAL_0_OPCODE_X1 = 57,
- SNE_SPECIAL_5_OPCODE_Y0 = 3,
- SNE_SPECIAL_5_OPCODE_Y1 = 3,
- SPECIAL_0_OPCODE_X0 = 0,
- SPECIAL_0_OPCODE_X1 = 1,
- SPECIAL_0_OPCODE_Y0 = 1,
- SPECIAL_0_OPCODE_Y1 = 1,
- SPECIAL_1_OPCODE_Y0 = 2,
- SPECIAL_1_OPCODE_Y1 = 2,
- SPECIAL_2_OPCODE_Y0 = 3,
- SPECIAL_2_OPCODE_Y1 = 3,
- SPECIAL_3_OPCODE_Y0 = 4,
- SPECIAL_3_OPCODE_Y1 = 4,
- SPECIAL_4_OPCODE_Y0 = 5,
- SPECIAL_4_OPCODE_Y1 = 5,
- SPECIAL_5_OPCODE_Y0 = 6,
- SPECIAL_5_OPCODE_Y1 = 6,
- SPECIAL_6_OPCODE_Y0 = 7,
- SPECIAL_7_OPCODE_Y0 = 8,
- SRAB_SPECIAL_0_OPCODE_X0 = 88,
- SRAB_SPECIAL_0_OPCODE_X1 = 58,
- SRAH_SPECIAL_0_OPCODE_X0 = 89,
- SRAH_SPECIAL_0_OPCODE_X1 = 59,
- SRAIB_SHUN_0_OPCODE_X0 = 8,
- SRAIB_SHUN_0_OPCODE_X1 = 8,
- SRAIH_SHUN_0_OPCODE_X0 = 9,
- SRAIH_SHUN_0_OPCODE_X1 = 9,
- SRAI_SHUN_0_OPCODE_X0 = 10,
- SRAI_SHUN_0_OPCODE_X1 = 10,
- SRAI_SHUN_0_OPCODE_Y0 = 4,
- SRAI_SHUN_0_OPCODE_Y1 = 4,
- SRA_SPECIAL_0_OPCODE_X0 = 90,
- SRA_SPECIAL_0_OPCODE_X1 = 60,
- SRA_SPECIAL_3_OPCODE_Y0 = 3,
- SRA_SPECIAL_3_OPCODE_Y1 = 3,
- SUBBS_U_SPECIAL_0_OPCODE_X0 = 100,
- SUBBS_U_SPECIAL_0_OPCODE_X1 = 70,
- SUBB_SPECIAL_0_OPCODE_X0 = 91,
- SUBB_SPECIAL_0_OPCODE_X1 = 61,
- SUBHS_SPECIAL_0_OPCODE_X0 = 101,
- SUBHS_SPECIAL_0_OPCODE_X1 = 71,
- SUBH_SPECIAL_0_OPCODE_X0 = 92,
- SUBH_SPECIAL_0_OPCODE_X1 = 62,
- SUBS_SPECIAL_0_OPCODE_X0 = 97,
- SUBS_SPECIAL_0_OPCODE_X1 = 67,
- SUB_SPECIAL_0_OPCODE_X0 = 93,
- SUB_SPECIAL_0_OPCODE_X1 = 63,
- SUB_SPECIAL_0_OPCODE_Y0 = 3,
- SUB_SPECIAL_0_OPCODE_Y1 = 3,
- SWADD_IMM_0_OPCODE_X1 = 30,
- SWINT0_UN_0_SHUN_0_OPCODE_X1 = 18,
- SWINT1_UN_0_SHUN_0_OPCODE_X1 = 19,
- SWINT2_UN_0_SHUN_0_OPCODE_X1 = 20,
- SWINT3_UN_0_SHUN_0_OPCODE_X1 = 21,
- SW_OPCODE_Y2 = 7,
- SW_SPECIAL_0_OPCODE_X1 = 64,
- TBLIDXB0_UN_0_SHUN_0_OPCODE_X0 = 8,
- TBLIDXB0_UN_0_SHUN_0_OPCODE_Y0 = 8,
- TBLIDXB1_UN_0_SHUN_0_OPCODE_X0 = 9,
- TBLIDXB1_UN_0_SHUN_0_OPCODE_Y0 = 9,
- TBLIDXB2_UN_0_SHUN_0_OPCODE_X0 = 10,
- TBLIDXB2_UN_0_SHUN_0_OPCODE_Y0 = 10,
- TBLIDXB3_UN_0_SHUN_0_OPCODE_X0 = 11,
- TBLIDXB3_UN_0_SHUN_0_OPCODE_Y0 = 11,
- TNS_UN_0_SHUN_0_OPCODE_X1 = 22,
- UN_0_SHUN_0_OPCODE_X0 = 11,
- UN_0_SHUN_0_OPCODE_X1 = 11,
- UN_0_SHUN_0_OPCODE_Y0 = 5,
- UN_0_SHUN_0_OPCODE_Y1 = 5,
- WH64_UN_0_SHUN_0_OPCODE_X1 = 23,
- XORI_IMM_0_OPCODE_X0 = 2,
- XORI_IMM_0_OPCODE_X1 = 21,
- XOR_SPECIAL_0_OPCODE_X0 = 94,
- XOR_SPECIAL_0_OPCODE_X1 = 65,
- XOR_SPECIAL_2_OPCODE_Y0 = 3,
- XOR_SPECIAL_2_OPCODE_Y1 = 3
-};
-
-
-#endif /* __ASSEMBLER__ */
-
-#endif /* __ARCH_OPCODE_H__ */
diff --git a/arch/tile/include/uapi/arch/sim.h b/arch/tile/include/uapi/arch/sim.h
deleted file mode 100644
index c4183dcd2ea7..000000000000
--- a/arch/tile/include/uapi/arch/sim.h
+++ /dev/null
@@ -1,644 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
-/*
- * Copyright 2010 Tilera Corporation. All Rights Reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation, version 2.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
- * NON INFRINGEMENT. See the GNU General Public License for
- * more details.
- */
-
-/**
- * @file
- *
- * Provides an API for controlling the simulator at runtime.
- */
-
-/**
- * @addtogroup arch_sim
- * @{
- *
- * An API for controlling the simulator at runtime.
- *
- * The simulator's behavior can be modified while it is running.
- * For example, human-readable trace output can be enabled and disabled
- * around code of interest.
- *
- * There are two ways to modify simulator behavior:
- * programmatically, by calling various sim_* functions, and
- * interactively, by entering commands like "sim set functional true"
- * at the tile-monitor prompt. Typing "sim help" at that prompt provides
- * a list of interactive commands.
- *
- * All interactive commands can also be executed programmatically by
- * passing a string to the sim_command function.
- */
-
-#ifndef __ARCH_SIM_H__
-#define __ARCH_SIM_H__
-
-#include <arch/sim_def.h>
-#include <arch/abi.h>
-
-#ifndef __ASSEMBLER__
-
-#include <arch/spr_def.h>
-
-
-/**
- * Return true if the current program is running under a simulator,
- * rather than on real hardware. If running on hardware, other "sim_xxx()"
- * calls have no useful effect.
- */
-static inline int
-sim_is_simulator(void)
-{
- return __insn_mfspr(SPR_SIM_CONTROL) != 0;
-}
-
-
-/**
- * Checkpoint the simulator state to a checkpoint file.
- *
- * The checkpoint file name is either the default or the name specified
- * on the command line with "--checkpoint-file".
- */
-static __inline void
-sim_checkpoint(void)
-{
- __insn_mtspr(SPR_SIM_CONTROL, SIM_CONTROL_CHECKPOINT);
-}
-
-
-/**
- * Report whether or not various kinds of simulator tracing are enabled.
- *
- * @return The bitwise OR of these values:
- *
- * SIM_TRACE_CYCLES (--trace-cycles),
- * SIM_TRACE_ROUTER (--trace-router),
- * SIM_TRACE_REGISTER_WRITES (--trace-register-writes),
- * SIM_TRACE_DISASM (--trace-disasm),
- * SIM_TRACE_STALL_INFO (--trace-stall-info)
- * SIM_TRACE_MEMORY_CONTROLLER (--trace-memory-controller)
- * SIM_TRACE_L2_CACHE (--trace-l2)
- * SIM_TRACE_LINES (--trace-lines)
- */
-static __inline unsigned int
-sim_get_tracing(void)
-{
- return __insn_mfspr(SPR_SIM_CONTROL) & SIM_TRACE_FLAG_MASK;
-}
-
-
-/**
- * Turn on or off different kinds of simulator tracing.
- *
- * @param mask Either one of these special values:
- *
- * SIM_TRACE_NONE (turns off tracing),
- * SIM_TRACE_ALL (turns on all possible tracing).
- *
- * or the bitwise OR of these values:
- *
- * SIM_TRACE_CYCLES (--trace-cycles),
- * SIM_TRACE_ROUTER (--trace-router),
- * SIM_TRACE_REGISTER_WRITES (--trace-register-writes),
- * SIM_TRACE_DISASM (--trace-disasm),
- * SIM_TRACE_STALL_INFO (--trace-stall-info)
- * SIM_TRACE_MEMORY_CONTROLLER (--trace-memory-controller)
- * SIM_TRACE_L2_CACHE (--trace-l2)
- * SIM_TRACE_LINES (--trace-lines)
- */
-static __inline void
-sim_set_tracing(unsigned int mask)
-{
- __insn_mtspr(SPR_SIM_CONTROL, SIM_TRACE_SPR_ARG(mask));
-}
-
-
-/**
- * Request dumping of different kinds of simulator state.
- *
- * @param mask Either this special value:
- *
- * SIM_DUMP_ALL (dump all known state)
- *
- * or the bitwise OR of these values:
- *
- * SIM_DUMP_REGS (the register file),
- * SIM_DUMP_SPRS (the SPRs),
- * SIM_DUMP_ITLB (the iTLB),
- * SIM_DUMP_DTLB (the dTLB),
- * SIM_DUMP_L1I (the L1 I-cache),
- * SIM_DUMP_L1D (the L1 D-cache),
- * SIM_DUMP_L2 (the L2 cache),
- * SIM_DUMP_SNREGS (the switch register file),
- * SIM_DUMP_SNITLB (the switch iTLB),
- * SIM_DUMP_SNL1I (the switch L1 I-cache),
- * SIM_DUMP_BACKTRACE (the current backtrace)
- */
-static __inline void
-sim_dump(unsigned int mask)
-{
- __insn_mtspr(SPR_SIM_CONTROL, SIM_DUMP_SPR_ARG(mask));
-}
-
-
-/**
- * Print a string to the simulator stdout.
- *
- * @param str The string to be written.
- */
-static __inline void
-sim_print(const char* str)
-{
- for ( ; *str != '\0'; str++)
- {
- __insn_mtspr(SPR_SIM_CONTROL, SIM_CONTROL_PUTC |
- (*str << _SIM_CONTROL_OPERATOR_BITS));
- }
- __insn_mtspr(SPR_SIM_CONTROL, SIM_CONTROL_PUTC |
- (SIM_PUTC_FLUSH_BINARY << _SIM_CONTROL_OPERATOR_BITS));
-}
-
-
-/**
- * Print a string to the simulator stdout.
- *
- * @param str The string to be written (a newline is automatically added).
- */
-static __inline void
-sim_print_string(const char* str)
-{
- for ( ; *str != '\0'; str++)
- {
- __insn_mtspr(SPR_SIM_CONTROL, SIM_CONTROL_PUTC |
- (*str << _SIM_CONTROL_OPERATOR_BITS));
- }
- __insn_mtspr(SPR_SIM_CONTROL, SIM_CONTROL_PUTC |
- (SIM_PUTC_FLUSH_STRING << _SIM_CONTROL_OPERATOR_BITS));
-}
-
-
-/**
- * Execute a simulator command string.
- *
- * Type 'sim help' at the tile-monitor prompt to learn what commands
- * are available. Note the use of the tile-monitor "sim" command to
- * pass commands to the simulator.
- *
- * The argument to sim_command() does not include the leading "sim"
- * prefix used at the tile-monitor prompt; for example, you might call
- * sim_command("trace disasm").
- */
-static __inline void
-sim_command(const char* str)
-{
- int c;
- do
- {
- c = *str++;
- __insn_mtspr(SPR_SIM_CONTROL, SIM_CONTROL_COMMAND |
- (c << _SIM_CONTROL_OPERATOR_BITS));
- }
- while (c);
-}
-
-
-
-#ifndef __DOXYGEN__
-
-/**
- * The underlying implementation of "_sim_syscall()".
- *
- * We use extra "and" instructions to ensure that all the values
- * we are passing to the simulator are actually valid in the registers
- * (i.e. returned from memory) prior to the SIM_CONTROL spr.
- */
-static __inline long _sim_syscall0(int val)
-{
- long result;
- __asm__ __volatile__ ("mtspr SIM_CONTROL, r0"
- : "=R00" (result) : "R00" (val));
- return result;
-}
-
-static __inline long _sim_syscall1(int val, long arg1)
-{
- long result;
- __asm__ __volatile__ ("{ and zero, r1, r1; mtspr SIM_CONTROL, r0 }"
- : "=R00" (result) : "R00" (val), "R01" (arg1));
- return result;
-}
-
-static __inline long _sim_syscall2(int val, long arg1, long arg2)
-{
- long result;
- __asm__ __volatile__ ("{ and zero, r1, r2; mtspr SIM_CONTROL, r0 }"
- : "=R00" (result)
- : "R00" (val), "R01" (arg1), "R02" (arg2));
- return result;
-}
-
-/* Note that _sim_syscall3() and higher are technically at risk of
- receiving an interrupt right before the mtspr bundle, in which case
- the register values for arguments 3 and up may still be in flight
- to the core from a stack frame reload. */
-
-static __inline long _sim_syscall3(int val, long arg1, long arg2, long arg3)
-{
- long result;
- __asm__ __volatile__ ("{ and zero, r3, r3 };"
- "{ and zero, r1, r2; mtspr SIM_CONTROL, r0 }"
- : "=R00" (result)
- : "R00" (val), "R01" (arg1), "R02" (arg2),
- "R03" (arg3));
- return result;
-}
-
-static __inline long _sim_syscall4(int val, long arg1, long arg2, long arg3,
- long arg4)
-{
- long result;
- __asm__ __volatile__ ("{ and zero, r3, r4 };"
- "{ and zero, r1, r2; mtspr SIM_CONTROL, r0 }"
- : "=R00" (result)
- : "R00" (val), "R01" (arg1), "R02" (arg2),
- "R03" (arg3), "R04" (arg4));
- return result;
-}
-
-static __inline long _sim_syscall5(int val, long arg1, long arg2, long arg3,
- long arg4, long arg5)
-{
- long result;
- __asm__ __volatile__ ("{ and zero, r3, r4; and zero, r5, r5 };"
- "{ and zero, r1, r2; mtspr SIM_CONTROL, r0 }"
- : "=R00" (result)
- : "R00" (val), "R01" (arg1), "R02" (arg2),
- "R03" (arg3), "R04" (arg4), "R05" (arg5));
- return result;
-}
-
-/**
- * Make a special syscall to the simulator itself, if running under
- * simulation. This is used as the implementation of other functions
- * and should not be used outside this file.
- *
- * @param syscall_num The simulator syscall number.
- * @param nr The number of additional arguments provided.
- *
- * @return Varies by syscall.
- */
-#define _sim_syscall(syscall_num, nr, args...) \
- _sim_syscall##nr( \
- ((syscall_num) << _SIM_CONTROL_OPERATOR_BITS) | SIM_CONTROL_SYSCALL, \
- ##args)
-
-
-/* Values for the "access_mask" parameters below. */
-#define SIM_WATCHPOINT_READ 1
-#define SIM_WATCHPOINT_WRITE 2
-#define SIM_WATCHPOINT_EXECUTE 4
-
-
-static __inline int
-sim_add_watchpoint(unsigned int process_id,
- unsigned long address,
- unsigned long size,
- unsigned int access_mask,
- unsigned long user_data)
-{
- return _sim_syscall(SIM_SYSCALL_ADD_WATCHPOINT, 5, process_id,
- address, size, access_mask, user_data);
-}
-
-
-static __inline int
-sim_remove_watchpoint(unsigned int process_id,
- unsigned long address,
- unsigned long size,
- unsigned int access_mask,
- unsigned long user_data)
-{
- return _sim_syscall(SIM_SYSCALL_REMOVE_WATCHPOINT, 5, process_id,
- address, size, access_mask, user_data);
-}
-
-
-/**
- * Return value from sim_query_watchpoint.
- */
-struct SimQueryWatchpointStatus
-{
- /**
- * 0 if a watchpoint fired, 1 if no watchpoint fired, or -1 for
- * error (meaning a bad process_id).
- */
- int syscall_status;
-
- /**
- * The address of the watchpoint that fired (this is the address
- * passed to sim_add_watchpoint, not an address within that range
- * that actually triggered the watchpoint).
- */
- unsigned long address;
-
- /** The arbitrary user_data installed by sim_add_watchpoint. */
- unsigned long user_data;
-};
-
-
-static __inline struct SimQueryWatchpointStatus
-sim_query_watchpoint(unsigned int process_id)
-{
- struct SimQueryWatchpointStatus status;
- long val = SIM_CONTROL_SYSCALL |
- (SIM_SYSCALL_QUERY_WATCHPOINT << _SIM_CONTROL_OPERATOR_BITS);
- __asm__ __volatile__ ("{ and zero, r1, r1; mtspr SIM_CONTROL, r0 }"
- : "=R00" (status.syscall_status),
- "=R01" (status.address),
- "=R02" (status.user_data)
- : "R00" (val), "R01" (process_id));
- return status;
-}
-
-
-/* On the simulator, confirm lines have been evicted everywhere. */
-static __inline void
-sim_validate_lines_evicted(unsigned long long pa, unsigned long length)
-{
-#ifdef __LP64__
- _sim_syscall(SIM_SYSCALL_VALIDATE_LINES_EVICTED, 2, pa, length);
-#else
- _sim_syscall(SIM_SYSCALL_VALIDATE_LINES_EVICTED, 4,
- 0 /* dummy */, (long)(pa), (long)(pa >> 32), length);
-#endif
-}
-
-
-/* Return the current CPU speed in cycles per second. */
-static __inline long
-sim_query_cpu_speed(void)
-{
- return _sim_syscall(SIM_SYSCALL_QUERY_CPU_SPEED, 0);
-}
-
-#endif /* !__DOXYGEN__ */
-
-
-
-
-/**
- * Modify the shaping parameters of a shim.
- *
- * @param shim The shim to modify. One of:
- * SIM_CONTROL_SHAPING_GBE_0
- * SIM_CONTROL_SHAPING_GBE_1
- * SIM_CONTROL_SHAPING_GBE_2
- * SIM_CONTROL_SHAPING_GBE_3
- * SIM_CONTROL_SHAPING_XGBE_0
- * SIM_CONTROL_SHAPING_XGBE_1
- *
- * @param type The type of shaping. This should be the same type of
- * shaping that is already in place on the shim. One of:
- * SIM_CONTROL_SHAPING_MULTIPLIER
- * SIM_CONTROL_SHAPING_PPS
- * SIM_CONTROL_SHAPING_BPS
- *
- * @param units The magnitude of the rate. One of:
- * SIM_CONTROL_SHAPING_UNITS_SINGLE
- * SIM_CONTROL_SHAPING_UNITS_KILO
- * SIM_CONTROL_SHAPING_UNITS_MEGA
- * SIM_CONTROL_SHAPING_UNITS_GIGA
- *
- * @param rate The rate to which to change it. This must fit in
- * SIM_CONTROL_SHAPING_RATE_BITS bits or a warning is issued and
- * the shaping is not changed.
- *
- * @return 0 if no problems were detected in the arguments to sim_set_shaping
- * or 1 if problems were detected (for example, rate does not fit in 17 bits).
- */
-static __inline int
-sim_set_shaping(unsigned shim,
- unsigned type,
- unsigned units,
- unsigned rate)
-{
- if ((rate & ~((1 << SIM_CONTROL_SHAPING_RATE_BITS) - 1)) != 0)
- return 1;
-
- __insn_mtspr(SPR_SIM_CONTROL, SIM_SHAPING_SPR_ARG(shim, type, units, rate));
- return 0;
-}
-
-#ifdef __tilegx__
-
-/** Enable a set of mPIPE links. Pass a -1 link_mask to enable all links. */
-static __inline void
-sim_enable_mpipe_links(unsigned mpipe, unsigned long link_mask)
-{
- __insn_mtspr(SPR_SIM_CONTROL,
- (SIM_CONTROL_ENABLE_MPIPE_LINK_MAGIC_BYTE |
- (mpipe << 8) | (1 << 16) | ((uint_reg_t)link_mask << 32)));
-}
-
-/** Disable a set of mPIPE links. Pass a -1 link_mask to disable all links. */
-static __inline void
-sim_disable_mpipe_links(unsigned mpipe, unsigned long link_mask)
-{
- __insn_mtspr(SPR_SIM_CONTROL,
- (SIM_CONTROL_ENABLE_MPIPE_LINK_MAGIC_BYTE |
- (mpipe << 8) | (0 << 16) | ((uint_reg_t)link_mask << 32)));
-}
-
-#endif /* __tilegx__ */
-
-
-/*
- * An API for changing "functional" mode.
- */
-
-#ifndef __DOXYGEN__
-
-#define sim_enable_functional() \
- __insn_mtspr(SPR_SIM_CONTROL, SIM_CONTROL_ENABLE_FUNCTIONAL)
-
-#define sim_disable_functional() \
- __insn_mtspr(SPR_SIM_CONTROL, SIM_CONTROL_DISABLE_FUNCTIONAL)
-
-#endif /* __DOXYGEN__ */
-
-
-/*
- * Profiler support.
- */
-
-/**
- * Turn profiling on for the current task.
- *
- * Note that this has no effect if run in an environment without
- * profiling support (thus, the proper flags to the simulator must
- * be supplied).
- */
-static __inline void
-sim_profiler_enable(void)
-{
- __insn_mtspr(SPR_SIM_CONTROL, SIM_CONTROL_PROFILER_ENABLE);
-}
-
-
-/** Turn profiling off for the current task. */
-static __inline void
-sim_profiler_disable(void)
-{
- __insn_mtspr(SPR_SIM_CONTROL, SIM_CONTROL_PROFILER_DISABLE);
-}
-
-
-/**
- * Turn profiling on or off for the current task.
- *
- * @param enabled If true, turns on profiling. If false, turns it off.
- *
- * Note that this has no effect if run in an environment without
- * profiling support (thus, the proper flags to the simulator must
- * be supplied).
- */
-static __inline void
-sim_profiler_set_enabled(int enabled)
-{
- int val =
- enabled ? SIM_CONTROL_PROFILER_ENABLE : SIM_CONTROL_PROFILER_DISABLE;
- __insn_mtspr(SPR_SIM_CONTROL, val);
-}
-
-
-/**
- * Return true if and only if profiling is currently enabled
- * for the current task.
- *
- * This returns false even if sim_profiler_enable() was called
- * if the current execution environment does not support profiling.
- */
-static __inline int
-sim_profiler_is_enabled(void)
-{
- return ((__insn_mfspr(SPR_SIM_CONTROL) & SIM_PROFILER_ENABLED_MASK) != 0);
-}
-
-
-/**
- * Reset profiling counters to zero for the current task.
- *
- * Resetting can be done while profiling is enabled. It does not affect
- * the chip-wide profiling counters.
- */
-static __inline void
-sim_profiler_clear(void)
-{
- __insn_mtspr(SPR_SIM_CONTROL, SIM_CONTROL_PROFILER_CLEAR);
-}
-
-
-/**
- * Enable specified chip-level profiling counters.
- *
- * Does not affect the per-task profiling counters.
- *
- * @param mask Either this special value:
- *
- * SIM_CHIP_ALL (enables all chip-level components).
- *
- * or the bitwise OR of these values:
- *
- * SIM_CHIP_MEMCTL (enable all memory controllers)
- * SIM_CHIP_XAUI (enable all XAUI controllers)
- * SIM_CHIP_MPIPE (enable all MPIPE controllers)
- */
-static __inline void
-sim_profiler_chip_enable(unsigned int mask)
-{
- __insn_mtspr(SPR_SIM_CONTROL, SIM_PROFILER_CHIP_ENABLE_SPR_ARG(mask));
-}
-
-
-/**
- * Disable specified chip-level profiling counters.
- *
- * Does not affect the per-task profiling counters.
- *
- * @param mask Either this special value:
- *
- * SIM_CHIP_ALL (disables all chip-level components).
- *
- * or the bitwise OR of these values:
- *
- * SIM_CHIP_MEMCTL (disable all memory controllers)
- * SIM_CHIP_XAUI (disable all XAUI controllers)
- * SIM_CHIP_MPIPE (disable all MPIPE controllers)
- */
-static __inline void
-sim_profiler_chip_disable(unsigned int mask)
-{
- __insn_mtspr(SPR_SIM_CONTROL, SIM_PROFILER_CHIP_DISABLE_SPR_ARG(mask));
-}
-
-
-/**
- * Reset specified chip-level profiling counters to zero.
- *
- * Does not affect the per-task profiling counters.
- *
- * @param mask Either this special value:
- *
- * SIM_CHIP_ALL (clears all chip-level components).
- *
- * or the bitwise OR of these values:
- *
- * SIM_CHIP_MEMCTL (clear all memory controllers)
- * SIM_CHIP_XAUI (clear all XAUI controllers)
- * SIM_CHIP_MPIPE (clear all MPIPE controllers)
- */
-static __inline void
-sim_profiler_chip_clear(unsigned int mask)
-{
- __insn_mtspr(SPR_SIM_CONTROL, SIM_PROFILER_CHIP_CLEAR_SPR_ARG(mask));
-}
-
-
-/*
- * Event support.
- */
-
-#ifndef __DOXYGEN__
-
-static __inline void
-sim_event_begin(unsigned int x)
-{
-#if defined(__tile__) && !defined(__NO_EVENT_SPR__)
- __insn_mtspr(SPR_EVENT_BEGIN, x);
-#endif
-}
-
-static __inline void
-sim_event_end(unsigned int x)
-{
-#if defined(__tile__) && !defined(__NO_EVENT_SPR__)
- __insn_mtspr(SPR_EVENT_END, x);
-#endif
-}
-
-#endif /* !__DOXYGEN__ */
-
-#endif /* !__ASSEMBLER__ */
-
-#endif /* !__ARCH_SIM_H__ */
-
-/** @} */
diff --git a/arch/tile/include/uapi/arch/sim_def.h b/arch/tile/include/uapi/arch/sim_def.h
deleted file mode 100644
index f74f9943770d..000000000000
--- a/arch/tile/include/uapi/arch/sim_def.h
+++ /dev/null
@@ -1,506 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
-/*
- * Copyright 2010 Tilera Corporation. All Rights Reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation, version 2.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
- * NON INFRINGEMENT. See the GNU General Public License for
- * more details.
- */
-
-/**
- * @file
- *
- * Some low-level simulator definitions.
- */
-
-#ifndef __ARCH_SIM_DEF_H__
-#define __ARCH_SIM_DEF_H__
-
-
-/**
- * Internal: the low bits of the SIM_CONTROL_* SPR values specify
- * the operation to perform, and the remaining bits are
- * an operation-specific parameter (often unused).
- */
-#define _SIM_CONTROL_OPERATOR_BITS 8
-
-
-/*
- * Values which can be written to SPR_SIM_CONTROL.
- */
-
-/** If written to SPR_SIM_CONTROL, stops profiling. */
-#define SIM_CONTROL_PROFILER_DISABLE 0
-
-/** If written to SPR_SIM_CONTROL, starts profiling. */
-#define SIM_CONTROL_PROFILER_ENABLE 1
-
-/** If written to SPR_SIM_CONTROL, clears profiling counters. */
-#define SIM_CONTROL_PROFILER_CLEAR 2
-
-/** If written to SPR_SIM_CONTROL, checkpoints the simulator. */
-#define SIM_CONTROL_CHECKPOINT 3
-
-/**
- * If written to SPR_SIM_CONTROL, combined with a mask (shifted by 8),
- * sets the tracing mask to the given mask. See "sim_set_tracing()".
- */
-#define SIM_CONTROL_SET_TRACING 4
-
-/**
- * If written to SPR_SIM_CONTROL, combined with a mask (shifted by 8),
- * dumps the requested items of machine state to the log.
- */
-#define SIM_CONTROL_DUMP 5
-
-/** If written to SPR_SIM_CONTROL, clears chip-level profiling counters. */
-#define SIM_CONTROL_PROFILER_CHIP_CLEAR 6
-
-/** If written to SPR_SIM_CONTROL, disables chip-level profiling. */
-#define SIM_CONTROL_PROFILER_CHIP_DISABLE 7
-
-/** If written to SPR_SIM_CONTROL, enables chip-level profiling. */
-#define SIM_CONTROL_PROFILER_CHIP_ENABLE 8
-
-/** If written to SPR_SIM_CONTROL, enables chip-level functional mode */
-#define SIM_CONTROL_ENABLE_FUNCTIONAL 9
-
-/** If written to SPR_SIM_CONTROL, disables chip-level functional mode. */
-#define SIM_CONTROL_DISABLE_FUNCTIONAL 10
-
-/**
- * If written to SPR_SIM_CONTROL, enables chip-level functional mode.
- * All tiles must perform this write for functional mode to be enabled.
- * Ignored in naked boot mode unless --functional is specified.
- * WARNING: Only the hypervisor startup code should use this!
- */
-#define SIM_CONTROL_ENABLE_FUNCTIONAL_BARRIER 11
-
-/**
- * If written to SPR_SIM_CONTROL, combined with a character (shifted by 8),
- * writes a string directly to the simulator output. Written to once for
- * each character in the string, plus a final NUL. Instead of NUL,
- * you can also use "SIM_PUTC_FLUSH_STRING" or "SIM_PUTC_FLUSH_BINARY".
- */
-/* ISSUE: Document the meaning of "newline", and the handling of NUL. */
-#define SIM_CONTROL_PUTC 12
-
-/**
- * If written to SPR_SIM_CONTROL, clears the --grind-coherence state for
- * this core. This is intended to be used before a loop that will
- * invalidate the cache by loading new data and evicting all current data.
- * Generally speaking, this API should only be used by system code.
- */
-#define SIM_CONTROL_GRINDER_CLEAR 13
-
-/** If written to SPR_SIM_CONTROL, shuts down the simulator. */
-#define SIM_CONTROL_SHUTDOWN 14
-
-/**
- * If written to SPR_SIM_CONTROL, combined with a pid (shifted by 8),
- * indicates that a fork syscall just created the given process.
- */
-#define SIM_CONTROL_OS_FORK 15
-
-/**
- * If written to SPR_SIM_CONTROL, combined with a pid (shifted by 8),
- * indicates that an exit syscall was just executed by the given process.
- */
-#define SIM_CONTROL_OS_EXIT 16
-
-/**
- * If written to SPR_SIM_CONTROL, combined with a pid (shifted by 8),
- * indicates that the OS just switched to the given process.
- */
-#define SIM_CONTROL_OS_SWITCH 17
-
-/**
- * If written to SPR_SIM_CONTROL, combined with a character (shifted by 8),
- * indicates that an exec syscall was just executed. Written to once for
- * each character in the executable name, plus a final NUL.
- */
-#define SIM_CONTROL_OS_EXEC 18
-
-/**
- * If written to SPR_SIM_CONTROL, combined with a character (shifted by 8),
- * indicates that an interpreter (PT_INTERP) was loaded. Written to once
- * for each character in "ADDR:PATH", plus a final NUL, where "ADDR" is a
- * hex load address starting with "0x", and "PATH" is the executable name.
- */
-#define SIM_CONTROL_OS_INTERP 19
-
-/**
- * If written to SPR_SIM_CONTROL, combined with a character (shifted by 8),
- * indicates that a dll was loaded. Written to once for each character
- * in "ADDR:PATH", plus a final NUL, where "ADDR" is a hexadecimal load
- * address starting with "0x", and "PATH" is the executable name.
- */
-#define SIM_CONTROL_DLOPEN 20
-
-/**
- * If written to SPR_SIM_CONTROL, combined with a character (shifted by 8),
- * indicates that a dll was unloaded. Written to once for each character
- * in "ADDR", plus a final NUL, where "ADDR" is a hexadecimal load
- * address starting with "0x".
- */
-#define SIM_CONTROL_DLCLOSE 21
-
-/**
- * If written to SPR_SIM_CONTROL, combined with a flag (shifted by 8),
- * indicates whether to allow data reads to remotely-cached
- * dirty cache lines to be cached locally without grinder warnings or
- * assertions (used by Linux kernel fast memcpy).
- */
-#define SIM_CONTROL_ALLOW_MULTIPLE_CACHING 22
-
-/** If written to SPR_SIM_CONTROL, enables memory tracing. */
-#define SIM_CONTROL_ENABLE_MEM_LOGGING 23
-
-/** If written to SPR_SIM_CONTROL, disables memory tracing. */
-#define SIM_CONTROL_DISABLE_MEM_LOGGING 24
-
-/**
- * If written to SPR_SIM_CONTROL, changes the shaping parameters of one of
- * the gbe or xgbe shims. Must specify the shim id, the type, the units, and
- * the rate, as defined in SIM_SHAPING_SPR_ARG.
- */
-#define SIM_CONTROL_SHAPING 25
-
-/**
- * If written to SPR_SIM_CONTROL, combined with character (shifted by 8),
- * requests that a simulator command be executed. Written to once for each
- * character in the command, plus a final NUL.
- */
-#define SIM_CONTROL_COMMAND 26
-
-/**
- * If written to SPR_SIM_CONTROL, indicates that the simulated system
- * is panicking, to allow debugging via --debug-on-panic.
- */
-#define SIM_CONTROL_PANIC 27
-
-/**
- * If written to SPR_SIM_CONTROL, triggers a simulator syscall.
- * See "sim_syscall()" for more info.
- */
-#define SIM_CONTROL_SYSCALL 32
-
-/**
- * If written to SPR_SIM_CONTROL, combined with a pid (shifted by 8),
- * provides the pid that subsequent SIM_CONTROL_OS_FORK writes should
- * use as the pid, rather than the default previous SIM_CONTROL_OS_SWITCH.
- */
-#define SIM_CONTROL_OS_FORK_PARENT 33
-
-/**
- * If written to SPR_SIM_CONTROL, combined with a mPIPE shim number
- * (shifted by 8), clears the pending magic data section. The cleared
- * pending magic data section and any subsequently appended magic bytes
- * will only take effect when the classifier blast programmer is run.
- */
-#define SIM_CONTROL_CLEAR_MPIPE_MAGIC_BYTES 34
-
-/**
- * If written to SPR_SIM_CONTROL, combined with a mPIPE shim number
- * (shifted by 8) and a byte of data (shifted by 16), appends that byte
- * to the shim's pending magic data section. The pending magic data
- * section takes effect when the classifier blast programmer is run.
- */
-#define SIM_CONTROL_APPEND_MPIPE_MAGIC_BYTE 35
-
-/**
- * If written to SPR_SIM_CONTROL, combined with a mPIPE shim number
- * (shifted by 8), an enable=1/disable=0 bit (shifted by 16), and a
- * mask of links (shifted by 32), enable or disable the corresponding
- * mPIPE links.
- */
-#define SIM_CONTROL_ENABLE_MPIPE_LINK_MAGIC_BYTE 36
-
-
-/*
- * Syscall numbers for use with "sim_syscall()".
- */
-
-/** Syscall number for sim_add_watchpoint(). */
-#define SIM_SYSCALL_ADD_WATCHPOINT 2
-
-/** Syscall number for sim_remove_watchpoint(). */
-#define SIM_SYSCALL_REMOVE_WATCHPOINT 3
-
-/** Syscall number for sim_query_watchpoint(). */
-#define SIM_SYSCALL_QUERY_WATCHPOINT 4
-
-/**
- * Syscall number that asserts that the cache lines whose 64-bit PA
- * is passed as the second argument to sim_syscall(), and over a
- * range passed as the third argument, are no longer in cache.
- * The simulator raises an error if this is not the case.
- */
-#define SIM_SYSCALL_VALIDATE_LINES_EVICTED 5
-
-/** Syscall number for sim_query_cpu_speed(). */
-#define SIM_SYSCALL_QUERY_CPU_SPEED 6
-
-
-/*
- * Bit masks which can be shifted by 8, combined with
- * SIM_CONTROL_SET_TRACING, and written to SPR_SIM_CONTROL.
- */
-
-/**
- * @addtogroup arch_sim
- * @{
- */
-
-/** Enable --trace-cycle when passed to simulator_set_tracing(). */
-#define SIM_TRACE_CYCLES 0x01
-
-/** Enable --trace-router when passed to simulator_set_tracing(). */
-#define SIM_TRACE_ROUTER 0x02
-
-/** Enable --trace-register-writes when passed to simulator_set_tracing(). */
-#define SIM_TRACE_REGISTER_WRITES 0x04
-
-/** Enable --trace-disasm when passed to simulator_set_tracing(). */
-#define SIM_TRACE_DISASM 0x08
-
-/** Enable --trace-stall-info when passed to simulator_set_tracing(). */
-#define SIM_TRACE_STALL_INFO 0x10
-
-/** Enable --trace-memory-controller when passed to simulator_set_tracing(). */
-#define SIM_TRACE_MEMORY_CONTROLLER 0x20
-
-/** Enable --trace-l2 when passed to simulator_set_tracing(). */
-#define SIM_TRACE_L2_CACHE 0x40
-
-/** Enable --trace-lines when passed to simulator_set_tracing(). */
-#define SIM_TRACE_LINES 0x80
-
-/** Turn off all tracing when passed to simulator_set_tracing(). */
-#define SIM_TRACE_NONE 0
-
-/** Turn on all tracing when passed to simulator_set_tracing(). */
-#define SIM_TRACE_ALL (-1)
-
-/** @} */
-
-/** Computes the value to write to SPR_SIM_CONTROL to set tracing flags. */
-#define SIM_TRACE_SPR_ARG(mask) \
- (SIM_CONTROL_SET_TRACING | ((mask) << _SIM_CONTROL_OPERATOR_BITS))
-
-
-/*
- * Bit masks which can be shifted by 8, combined with
- * SIM_CONTROL_DUMP, and written to SPR_SIM_CONTROL.
- */
-
-/**
- * @addtogroup arch_sim
- * @{
- */
-
-/** Dump the general-purpose registers. */
-#define SIM_DUMP_REGS 0x001
-
-/** Dump the SPRs. */
-#define SIM_DUMP_SPRS 0x002
-
-/** Dump the ITLB. */
-#define SIM_DUMP_ITLB 0x004
-
-/** Dump the DTLB. */
-#define SIM_DUMP_DTLB 0x008
-
-/** Dump the L1 I-cache. */
-#define SIM_DUMP_L1I 0x010
-
-/** Dump the L1 D-cache. */
-#define SIM_DUMP_L1D 0x020
-
-/** Dump the L2 cache. */
-#define SIM_DUMP_L2 0x040
-
-/** Dump the switch registers. */
-#define SIM_DUMP_SNREGS 0x080
-
-/** Dump the switch ITLB. */
-#define SIM_DUMP_SNITLB 0x100
-
-/** Dump the switch L1 I-cache. */
-#define SIM_DUMP_SNL1I 0x200
-
-/** Dump the current backtrace. */
-#define SIM_DUMP_BACKTRACE 0x400
-
-/** Only dump valid lines in caches. */
-#define SIM_DUMP_VALID_LINES 0x800
-
-/** Dump everything that is dumpable. */
-#define SIM_DUMP_ALL (-1 & ~SIM_DUMP_VALID_LINES)
-
-/** @} */
-
-/** Computes the value to write to SPR_SIM_CONTROL to dump machine state. */
-#define SIM_DUMP_SPR_ARG(mask) \
- (SIM_CONTROL_DUMP | ((mask) << _SIM_CONTROL_OPERATOR_BITS))
-
-
-/*
- * Bit masks which can be shifted by 8, combined with
- * SIM_CONTROL_PROFILER_CHIP_xxx, and written to SPR_SIM_CONTROL.
- */
-
-/**
- * @addtogroup arch_sim
- * @{
- */
-
-/** Use with SIM_PROFILER_CHIP_xxx to control the memory controllers. */
-#define SIM_CHIP_MEMCTL 0x001
-
-/** Use with SIM_PROFILER_CHIP_xxx to control the XAUI interface. */
-#define SIM_CHIP_XAUI 0x002
-
-/** Use with SIM_PROFILER_CHIP_xxx to control the PCIe interface. */
-#define SIM_CHIP_PCIE 0x004
-
-/** Use with SIM_PROFILER_CHIP_xxx to control the MPIPE interface. */
-#define SIM_CHIP_MPIPE 0x008
-
-/** Use with SIM_PROFILER_CHIP_xxx to control the TRIO interface. */
-#define SIM_CHIP_TRIO 0x010
-
-/** Reference all chip devices. */
-#define SIM_CHIP_ALL (-1)
-
-/** @} */
-
-/** Computes the value to write to SPR_SIM_CONTROL to clear chip statistics. */
-#define SIM_PROFILER_CHIP_CLEAR_SPR_ARG(mask) \
- (SIM_CONTROL_PROFILER_CHIP_CLEAR | ((mask) << _SIM_CONTROL_OPERATOR_BITS))
-
-/** Computes the value to write to SPR_SIM_CONTROL to disable chip statistics.*/
-#define SIM_PROFILER_CHIP_DISABLE_SPR_ARG(mask) \
- (SIM_CONTROL_PROFILER_CHIP_DISABLE | ((mask) << _SIM_CONTROL_OPERATOR_BITS))
-
-/** Computes the value to write to SPR_SIM_CONTROL to enable chip statistics. */
-#define SIM_PROFILER_CHIP_ENABLE_SPR_ARG(mask) \
- (SIM_CONTROL_PROFILER_CHIP_ENABLE | ((mask) << _SIM_CONTROL_OPERATOR_BITS))
-
-
-
-/* Shim bitrate controls. */
-
-/** The number of bits used to store the shim id. */
-#define SIM_CONTROL_SHAPING_SHIM_ID_BITS 3
-
-/**
- * @addtogroup arch_sim
- * @{
- */
-
-/** Change the gbe 0 bitrate. */
-#define SIM_CONTROL_SHAPING_GBE_0 0x0
-
-/** Change the gbe 1 bitrate. */
-#define SIM_CONTROL_SHAPING_GBE_1 0x1
-
-/** Change the gbe 2 bitrate. */
-#define SIM_CONTROL_SHAPING_GBE_2 0x2
-
-/** Change the gbe 3 bitrate. */
-#define SIM_CONTROL_SHAPING_GBE_3 0x3
-
-/** Change the xgbe 0 bitrate. */
-#define SIM_CONTROL_SHAPING_XGBE_0 0x4
-
-/** Change the xgbe 1 bitrate. */
-#define SIM_CONTROL_SHAPING_XGBE_1 0x5
-
-/** The type of shaping to do. */
-#define SIM_CONTROL_SHAPING_TYPE_BITS 2
-
-/** Control the multiplier. */
-#define SIM_CONTROL_SHAPING_MULTIPLIER 0
-
-/** Control the PPS. */
-#define SIM_CONTROL_SHAPING_PPS 1
-
-/** Control the BPS. */
-#define SIM_CONTROL_SHAPING_BPS 2
-
-/** The number of bits for the units for the shaping parameter. */
-#define SIM_CONTROL_SHAPING_UNITS_BITS 2
-
-/** Provide a number in single units. */
-#define SIM_CONTROL_SHAPING_UNITS_SINGLE 0
-
-/** Provide a number in kilo units. */
-#define SIM_CONTROL_SHAPING_UNITS_KILO 1
-
-/** Provide a number in mega units. */
-#define SIM_CONTROL_SHAPING_UNITS_MEGA 2
-
-/** Provide a number in giga units. */
-#define SIM_CONTROL_SHAPING_UNITS_GIGA 3
-
-/** @} */
-
-/** How many bits are available for the rate. */
-#define SIM_CONTROL_SHAPING_RATE_BITS \
- (32 - (_SIM_CONTROL_OPERATOR_BITS + \
- SIM_CONTROL_SHAPING_SHIM_ID_BITS + \
- SIM_CONTROL_SHAPING_TYPE_BITS + \
- SIM_CONTROL_SHAPING_UNITS_BITS))
-
-/** Computes the value to write to SPR_SIM_CONTROL to change a bitrate. */
-#define SIM_SHAPING_SPR_ARG(shim, type, units, rate) \
- (SIM_CONTROL_SHAPING | \
- ((shim) | \
- ((type) << (SIM_CONTROL_SHAPING_SHIM_ID_BITS)) | \
- ((units) << (SIM_CONTROL_SHAPING_SHIM_ID_BITS + \
- SIM_CONTROL_SHAPING_TYPE_BITS)) | \
- ((rate) << (SIM_CONTROL_SHAPING_SHIM_ID_BITS + \
- SIM_CONTROL_SHAPING_TYPE_BITS + \
- SIM_CONTROL_SHAPING_UNITS_BITS))) << _SIM_CONTROL_OPERATOR_BITS)
-
-
-/*
- * Values returned when reading SPR_SIM_CONTROL.
- * ISSUE: These names should share a longer common prefix.
- */
-
-/**
- * When reading SPR_SIM_CONTROL, the mask of simulator tracing bits
- * (SIM_TRACE_xxx values).
- */
-#define SIM_TRACE_FLAG_MASK 0xFFFF
-
-/** When reading SPR_SIM_CONTROL, the mask for whether profiling is enabled. */
-#define SIM_PROFILER_ENABLED_MASK 0x10000
-
-
-/*
- * Special arguments for "SIM_CONTROL_PUTC".
- */
-
-/**
- * Flag value for forcing a PUTC string-flush, including
- * coordinate/cycle prefix and newline.
- */
-#define SIM_PUTC_FLUSH_STRING 0x100
-
-/**
- * Flag value for forcing a PUTC binary-data-flush, which skips the
- * prefix and does not append a newline.
- */
-#define SIM_PUTC_FLUSH_BINARY 0x101
-
-
-#endif /* __ARCH_SIM_DEF_H__ */
diff --git a/arch/tile/include/uapi/arch/spr_def.h b/arch/tile/include/uapi/arch/spr_def.h
deleted file mode 100644
index 743428615cda..000000000000
--- a/arch/tile/include/uapi/arch/spr_def.h
+++ /dev/null
@@ -1,27 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
-/*
- * Copyright 2010 Tilera Corporation. All Rights Reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation, version 2.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
- * NON INFRINGEMENT. See the GNU General Public License for
- * more details.
- */
-
-#ifndef _UAPI__ARCH_SPR_DEF_H__
-#define _UAPI__ARCH_SPR_DEF_H__
-
-/* Include the proper base SPR definition file. */
-#ifdef __tilegx__
-#include <arch/spr_def_64.h>
-#else
-#include <arch/spr_def_32.h>
-#endif
-
-
-#endif /* _UAPI__ARCH_SPR_DEF_H__ */
diff --git a/arch/tile/include/uapi/arch/spr_def_32.h b/arch/tile/include/uapi/arch/spr_def_32.h
deleted file mode 100644
index 64122d6160e1..000000000000
--- a/arch/tile/include/uapi/arch/spr_def_32.h
+++ /dev/null
@@ -1,256 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
-/*
- * Copyright 2010 Tilera Corporation. All Rights Reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation, version 2.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
- * NON INFRINGEMENT. See the GNU General Public License for
- * more details.
- */
-
-#ifndef __DOXYGEN__
-
-#ifndef __ARCH_SPR_DEF_32_H__
-#define __ARCH_SPR_DEF_32_H__
-
-#define SPR_AUX_PERF_COUNT_0 0x6005
-#define SPR_AUX_PERF_COUNT_1 0x6006
-#define SPR_AUX_PERF_COUNT_CTL 0x6007
-#define SPR_AUX_PERF_COUNT_STS 0x6008
-#define SPR_CYCLE_HIGH 0x4e06
-#define SPR_CYCLE_LOW 0x4e07
-#define SPR_DMA_BYTE 0x3900
-#define SPR_DMA_CHUNK_SIZE 0x3901
-#define SPR_DMA_CTR 0x3902
-#define SPR_DMA_CTR__REQUEST_MASK 0x1
-#define SPR_DMA_CTR__SUSPEND_MASK 0x2
-#define SPR_DMA_DST_ADDR 0x3903
-#define SPR_DMA_DST_CHUNK_ADDR 0x3904
-#define SPR_DMA_SRC_ADDR 0x3905
-#define SPR_DMA_SRC_CHUNK_ADDR 0x3906
-#define SPR_DMA_STATUS__DONE_MASK 0x1
-#define SPR_DMA_STATUS__BUSY_MASK 0x2
-#define SPR_DMA_STATUS__RUNNING_MASK 0x10
-#define SPR_DMA_STRIDE 0x3907
-#define SPR_DMA_USER_STATUS 0x3908
-#define SPR_DONE 0x4e08
-#define SPR_EVENT_BEGIN 0x4e0d
-#define SPR_EVENT_END 0x4e0e
-#define SPR_EX_CONTEXT_0_0 0x4a05
-#define SPR_EX_CONTEXT_0_1 0x4a06
-#define SPR_EX_CONTEXT_0_1__PL_SHIFT 0
-#define SPR_EX_CONTEXT_0_1__PL_RMASK 0x3
-#define SPR_EX_CONTEXT_0_1__PL_MASK 0x3
-#define SPR_EX_CONTEXT_0_1__ICS_SHIFT 2
-#define SPR_EX_CONTEXT_0_1__ICS_RMASK 0x1
-#define SPR_EX_CONTEXT_0_1__ICS_MASK 0x4
-#define SPR_EX_CONTEXT_1_0 0x4805
-#define SPR_EX_CONTEXT_1_1 0x4806
-#define SPR_EX_CONTEXT_1_1__PL_SHIFT 0
-#define SPR_EX_CONTEXT_1_1__PL_RMASK 0x3
-#define SPR_EX_CONTEXT_1_1__PL_MASK 0x3
-#define SPR_EX_CONTEXT_1_1__ICS_SHIFT 2
-#define SPR_EX_CONTEXT_1_1__ICS_RMASK 0x1
-#define SPR_EX_CONTEXT_1_1__ICS_MASK 0x4
-#define SPR_EX_CONTEXT_2_0 0x4605
-#define SPR_EX_CONTEXT_2_1 0x4606
-#define SPR_EX_CONTEXT_2_1__PL_SHIFT 0
-#define SPR_EX_CONTEXT_2_1__PL_RMASK 0x3
-#define SPR_EX_CONTEXT_2_1__PL_MASK 0x3
-#define SPR_EX_CONTEXT_2_1__ICS_SHIFT 2
-#define SPR_EX_CONTEXT_2_1__ICS_RMASK 0x1
-#define SPR_EX_CONTEXT_2_1__ICS_MASK 0x4
-#define SPR_FAIL 0x4e09
-#define SPR_IDN_AVAIL_EN 0x3e05
-#define SPR_IDN_CA_DATA 0x0b00
-#define SPR_IDN_DATA_AVAIL 0x0b03
-#define SPR_IDN_DEADLOCK_TIMEOUT 0x3406
-#define SPR_IDN_DEMUX_CA_COUNT 0x0a05
-#define SPR_IDN_DEMUX_COUNT_0 0x0a06
-#define SPR_IDN_DEMUX_COUNT_1 0x0a07
-#define SPR_IDN_DEMUX_CTL 0x0a08
-#define SPR_IDN_DEMUX_QUEUE_SEL 0x0a0a
-#define SPR_IDN_DEMUX_STATUS 0x0a0b
-#define SPR_IDN_DEMUX_WRITE_FIFO 0x0a0c
-#define SPR_IDN_DIRECTION_PROTECT 0x2e05
-#define SPR_IDN_PENDING 0x0a0e
-#define SPR_IDN_REFILL_EN 0x0e05
-#define SPR_IDN_SP_FIFO_DATA 0x0a0f
-#define SPR_IDN_SP_FIFO_SEL 0x0a10
-#define SPR_IDN_SP_FREEZE 0x0a11
-#define SPR_IDN_SP_FREEZE__SP_FRZ_MASK 0x1
-#define SPR_IDN_SP_FREEZE__DEMUX_FRZ_MASK 0x2
-#define SPR_IDN_SP_FREEZE__NON_DEST_EXT_MASK 0x4
-#define SPR_IDN_SP_STATE 0x0a12
-#define SPR_IDN_TAG_0 0x0a13
-#define SPR_IDN_TAG_1 0x0a14
-#define SPR_IDN_TAG_VALID 0x0a15
-#define SPR_IDN_TILE_COORD 0x0a16
-#define SPR_INTCTRL_0_STATUS 0x4a07
-#define SPR_INTCTRL_1_STATUS 0x4807
-#define SPR_INTCTRL_2_STATUS 0x4607
-#define SPR_INTERRUPT_CRITICAL_SECTION 0x4e0a
-#define SPR_INTERRUPT_MASK_0_0 0x4a08
-#define SPR_INTERRUPT_MASK_0_1 0x4a09
-#define SPR_INTERRUPT_MASK_1_0 0x4809
-#define SPR_INTERRUPT_MASK_1_1 0x480a
-#define SPR_INTERRUPT_MASK_2_0 0x4608
-#define SPR_INTERRUPT_MASK_2_1 0x4609
-#define SPR_INTERRUPT_MASK_RESET_0_0 0x4a0a
-#define SPR_INTERRUPT_MASK_RESET_0_1 0x4a0b
-#define SPR_INTERRUPT_MASK_RESET_1_0 0x480b
-#define SPR_INTERRUPT_MASK_RESET_1_1 0x480c
-#define SPR_INTERRUPT_MASK_RESET_2_0 0x460a
-#define SPR_INTERRUPT_MASK_RESET_2_1 0x460b
-#define SPR_INTERRUPT_MASK_SET_0_0 0x4a0c
-#define SPR_INTERRUPT_MASK_SET_0_1 0x4a0d
-#define SPR_INTERRUPT_MASK_SET_1_0 0x480d
-#define SPR_INTERRUPT_MASK_SET_1_1 0x480e
-#define SPR_INTERRUPT_MASK_SET_2_0 0x460c
-#define SPR_INTERRUPT_MASK_SET_2_1 0x460d
-#define SPR_MPL_AUX_PERF_COUNT_SET_0 0x6000
-#define SPR_MPL_AUX_PERF_COUNT_SET_1 0x6001
-#define SPR_MPL_AUX_PERF_COUNT_SET_2 0x6002
-#define SPR_MPL_DMA_CPL_SET_0 0x5800
-#define SPR_MPL_DMA_CPL_SET_1 0x5801
-#define SPR_MPL_DMA_CPL_SET_2 0x5802
-#define SPR_MPL_DMA_NOTIFY_SET_0 0x3800
-#define SPR_MPL_DMA_NOTIFY_SET_1 0x3801
-#define SPR_MPL_DMA_NOTIFY_SET_2 0x3802
-#define SPR_MPL_IDN_ACCESS_SET_0 0x0a00
-#define SPR_MPL_IDN_ACCESS_SET_1 0x0a01
-#define SPR_MPL_IDN_ACCESS_SET_2 0x0a02
-#define SPR_MPL_IDN_AVAIL_SET_0 0x3e00
-#define SPR_MPL_IDN_AVAIL_SET_1 0x3e01
-#define SPR_MPL_IDN_AVAIL_SET_2 0x3e02
-#define SPR_MPL_IDN_CA_SET_0 0x3a00
-#define SPR_MPL_IDN_CA_SET_1 0x3a01
-#define SPR_MPL_IDN_CA_SET_2 0x3a02
-#define SPR_MPL_IDN_COMPLETE_SET_0 0x1200
-#define SPR_MPL_IDN_COMPLETE_SET_1 0x1201
-#define SPR_MPL_IDN_COMPLETE_SET_2 0x1202
-#define SPR_MPL_IDN_FIREWALL_SET_0 0x2e00
-#define SPR_MPL_IDN_FIREWALL_SET_1 0x2e01
-#define SPR_MPL_IDN_FIREWALL_SET_2 0x2e02
-#define SPR_MPL_IDN_REFILL_SET_0 0x0e00
-#define SPR_MPL_IDN_REFILL_SET_1 0x0e01
-#define SPR_MPL_IDN_REFILL_SET_2 0x0e02
-#define SPR_MPL_IDN_TIMER_SET_0 0x3400
-#define SPR_MPL_IDN_TIMER_SET_1 0x3401
-#define SPR_MPL_IDN_TIMER_SET_2 0x3402
-#define SPR_MPL_INTCTRL_0_SET_0 0x4a00
-#define SPR_MPL_INTCTRL_0_SET_1 0x4a01
-#define SPR_MPL_INTCTRL_0_SET_2 0x4a02
-#define SPR_MPL_INTCTRL_1_SET_0 0x4800
-#define SPR_MPL_INTCTRL_1_SET_1 0x4801
-#define SPR_MPL_INTCTRL_1_SET_2 0x4802
-#define SPR_MPL_INTCTRL_2_SET_0 0x4600
-#define SPR_MPL_INTCTRL_2_SET_1 0x4601
-#define SPR_MPL_INTCTRL_2_SET_2 0x4602
-#define SPR_MPL_PERF_COUNT_SET_0 0x4200
-#define SPR_MPL_PERF_COUNT_SET_1 0x4201
-#define SPR_MPL_PERF_COUNT_SET_2 0x4202
-#define SPR_MPL_SN_ACCESS_SET_0 0x0800
-#define SPR_MPL_SN_ACCESS_SET_1 0x0801
-#define SPR_MPL_SN_ACCESS_SET_2 0x0802
-#define SPR_MPL_SN_CPL_SET_0 0x5a00
-#define SPR_MPL_SN_CPL_SET_1 0x5a01
-#define SPR_MPL_SN_CPL_SET_2 0x5a02
-#define SPR_MPL_SN_FIREWALL_SET_0 0x2c00
-#define SPR_MPL_SN_FIREWALL_SET_1 0x2c01
-#define SPR_MPL_SN_FIREWALL_SET_2 0x2c02
-#define SPR_MPL_SN_NOTIFY_SET_0 0x2a00
-#define SPR_MPL_SN_NOTIFY_SET_1 0x2a01
-#define SPR_MPL_SN_NOTIFY_SET_2 0x2a02
-#define SPR_MPL_UDN_ACCESS_SET_0 0x0c00
-#define SPR_MPL_UDN_ACCESS_SET_1 0x0c01
-#define SPR_MPL_UDN_ACCESS_SET_2 0x0c02
-#define SPR_MPL_UDN_AVAIL_SET_0 0x4000
-#define SPR_MPL_UDN_AVAIL_SET_1 0x4001
-#define SPR_MPL_UDN_AVAIL_SET_2 0x4002
-#define SPR_MPL_UDN_CA_SET_0 0x3c00
-#define SPR_MPL_UDN_CA_SET_1 0x3c01
-#define SPR_MPL_UDN_CA_SET_2 0x3c02
-#define SPR_MPL_UDN_COMPLETE_SET_0 0x1400
-#define SPR_MPL_UDN_COMPLETE_SET_1 0x1401
-#define SPR_MPL_UDN_COMPLETE_SET_2 0x1402
-#define SPR_MPL_UDN_FIREWALL_SET_0 0x3000
-#define SPR_MPL_UDN_FIREWALL_SET_1 0x3001
-#define SPR_MPL_UDN_FIREWALL_SET_2 0x3002
-#define SPR_MPL_UDN_REFILL_SET_0 0x1000
-#define SPR_MPL_UDN_REFILL_SET_1 0x1001
-#define SPR_MPL_UDN_REFILL_SET_2 0x1002
-#define SPR_MPL_UDN_TIMER_SET_0 0x3600
-#define SPR_MPL_UDN_TIMER_SET_1 0x3601
-#define SPR_MPL_UDN_TIMER_SET_2 0x3602
-#define SPR_MPL_WORLD_ACCESS_SET_0 0x4e00
-#define SPR_MPL_WORLD_ACCESS_SET_1 0x4e01
-#define SPR_MPL_WORLD_ACCESS_SET_2 0x4e02
-#define SPR_PASS 0x4e0b
-#define SPR_PERF_COUNT_0 0x4205
-#define SPR_PERF_COUNT_1 0x4206
-#define SPR_PERF_COUNT_CTL 0x4207
-#define SPR_PERF_COUNT_DN_CTL 0x4210
-#define SPR_PERF_COUNT_STS 0x4208
-#define SPR_PROC_STATUS 0x4f00
-#define SPR_SIM_CONTROL 0x4e0c
-#define SPR_SNCTL 0x0805
-#define SPR_SNCTL__FRZFABRIC_MASK 0x1
-#define SPR_SNSTATIC 0x080c
-#define SPR_SYSTEM_SAVE_0_0 0x4b00
-#define SPR_SYSTEM_SAVE_0_1 0x4b01
-#define SPR_SYSTEM_SAVE_0_2 0x4b02
-#define SPR_SYSTEM_SAVE_0_3 0x4b03
-#define SPR_SYSTEM_SAVE_1_0 0x4900
-#define SPR_SYSTEM_SAVE_1_1 0x4901
-#define SPR_SYSTEM_SAVE_1_2 0x4902
-#define SPR_SYSTEM_SAVE_1_3 0x4903
-#define SPR_SYSTEM_SAVE_2_0 0x4700
-#define SPR_SYSTEM_SAVE_2_1 0x4701
-#define SPR_SYSTEM_SAVE_2_2 0x4702
-#define SPR_SYSTEM_SAVE_2_3 0x4703
-#define SPR_TILE_COORD 0x4c17
-#define SPR_TILE_RTF_HWM 0x4e10
-#define SPR_TILE_TIMER_CONTROL 0x3205
-#define SPR_TILE_WRITE_PENDING 0x4e0f
-#define SPR_UDN_AVAIL_EN 0x4005
-#define SPR_UDN_CA_DATA 0x0d00
-#define SPR_UDN_DATA_AVAIL 0x0d03
-#define SPR_UDN_DEADLOCK_TIMEOUT 0x3606
-#define SPR_UDN_DEMUX_CA_COUNT 0x0c05
-#define SPR_UDN_DEMUX_COUNT_0 0x0c06
-#define SPR_UDN_DEMUX_COUNT_1 0x0c07
-#define SPR_UDN_DEMUX_COUNT_2 0x0c08
-#define SPR_UDN_DEMUX_COUNT_3 0x0c09
-#define SPR_UDN_DEMUX_CTL 0x0c0a
-#define SPR_UDN_DEMUX_QUEUE_SEL 0x0c0c
-#define SPR_UDN_DEMUX_STATUS 0x0c0d
-#define SPR_UDN_DEMUX_WRITE_FIFO 0x0c0e
-#define SPR_UDN_DIRECTION_PROTECT 0x3005
-#define SPR_UDN_PENDING 0x0c10
-#define SPR_UDN_REFILL_EN 0x1005
-#define SPR_UDN_SP_FIFO_DATA 0x0c11
-#define SPR_UDN_SP_FIFO_SEL 0x0c12
-#define SPR_UDN_SP_FREEZE 0x0c13
-#define SPR_UDN_SP_FREEZE__SP_FRZ_MASK 0x1
-#define SPR_UDN_SP_FREEZE__DEMUX_FRZ_MASK 0x2
-#define SPR_UDN_SP_FREEZE__NON_DEST_EXT_MASK 0x4
-#define SPR_UDN_SP_STATE 0x0c14
-#define SPR_UDN_TAG_0 0x0c15
-#define SPR_UDN_TAG_1 0x0c16
-#define SPR_UDN_TAG_2 0x0c17
-#define SPR_UDN_TAG_3 0x0c18
-#define SPR_UDN_TAG_VALID 0x0c19
-#define SPR_UDN_TILE_COORD 0x0c1a
-#define SPR_WATCH_CTL 0x4209
-#define SPR_WATCH_MASK 0x420a
-#define SPR_WATCH_VAL 0x420b
-
-#endif /* !defined(__ARCH_SPR_DEF_32_H__) */
-
-#endif /* !defined(__DOXYGEN__) */
diff --git a/arch/tile/include/uapi/arch/spr_def_64.h b/arch/tile/include/uapi/arch/spr_def_64.h
deleted file mode 100644
index d183cbb31aa7..000000000000
--- a/arch/tile/include/uapi/arch/spr_def_64.h
+++ /dev/null
@@ -1,217 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
-/*
- * Copyright 2011 Tilera Corporation. All Rights Reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation, version 2.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
- * NON INFRINGEMENT. See the GNU General Public License for
- * more details.
- */
-
-#ifndef __DOXYGEN__
-
-#ifndef __ARCH_SPR_DEF_64_H__
-#define __ARCH_SPR_DEF_64_H__
-
-#define SPR_AUX_PERF_COUNT_0 0x2105
-#define SPR_AUX_PERF_COUNT_1 0x2106
-#define SPR_AUX_PERF_COUNT_CTL 0x2107
-#define SPR_AUX_PERF_COUNT_STS 0x2108
-#define SPR_CMPEXCH_VALUE 0x2780
-#define SPR_CYCLE 0x2781
-#define SPR_DONE 0x2705
-#define SPR_DSTREAM_PF 0x2706
-#define SPR_EVENT_BEGIN 0x2782
-#define SPR_EVENT_END 0x2783
-#define SPR_EX_CONTEXT_0_0 0x2580
-#define SPR_EX_CONTEXT_0_1 0x2581
-#define SPR_EX_CONTEXT_0_1__PL_SHIFT 0
-#define SPR_EX_CONTEXT_0_1__PL_RMASK 0x3
-#define SPR_EX_CONTEXT_0_1__PL_MASK 0x3
-#define SPR_EX_CONTEXT_0_1__ICS_SHIFT 2
-#define SPR_EX_CONTEXT_0_1__ICS_RMASK 0x1
-#define SPR_EX_CONTEXT_0_1__ICS_MASK 0x4
-#define SPR_EX_CONTEXT_1_0 0x2480
-#define SPR_EX_CONTEXT_1_1 0x2481
-#define SPR_EX_CONTEXT_1_1__PL_SHIFT 0
-#define SPR_EX_CONTEXT_1_1__PL_RMASK 0x3
-#define SPR_EX_CONTEXT_1_1__PL_MASK 0x3
-#define SPR_EX_CONTEXT_1_1__ICS_SHIFT 2
-#define SPR_EX_CONTEXT_1_1__ICS_RMASK 0x1
-#define SPR_EX_CONTEXT_1_1__ICS_MASK 0x4
-#define SPR_EX_CONTEXT_2_0 0x2380
-#define SPR_EX_CONTEXT_2_1 0x2381
-#define SPR_EX_CONTEXT_2_1__PL_SHIFT 0
-#define SPR_EX_CONTEXT_2_1__PL_RMASK 0x3
-#define SPR_EX_CONTEXT_2_1__PL_MASK 0x3
-#define SPR_EX_CONTEXT_2_1__ICS_SHIFT 2
-#define SPR_EX_CONTEXT_2_1__ICS_RMASK 0x1
-#define SPR_EX_CONTEXT_2_1__ICS_MASK 0x4
-#define SPR_FAIL 0x2707
-#define SPR_IDN_AVAIL_EN 0x1a05
-#define SPR_IDN_DATA_AVAIL 0x0a80
-#define SPR_IDN_DEADLOCK_TIMEOUT 0x1806
-#define SPR_IDN_DEMUX_COUNT_0 0x0a05
-#define SPR_IDN_DEMUX_COUNT_1 0x0a06
-#define SPR_IDN_DIRECTION_PROTECT 0x1405
-#define SPR_IDN_PENDING 0x0a08
-#define SPR_ILL_TRANS_REASON__I_STREAM_VA_RMASK 0x1
-#define SPR_INTCTRL_0_STATUS 0x2505
-#define SPR_INTCTRL_1_STATUS 0x2405
-#define SPR_INTCTRL_2_STATUS 0x2305
-#define SPR_INTERRUPT_CRITICAL_SECTION 0x2708
-#define SPR_INTERRUPT_MASK_0 0x2506
-#define SPR_INTERRUPT_MASK_1 0x2406
-#define SPR_INTERRUPT_MASK_2 0x2306
-#define SPR_INTERRUPT_MASK_RESET_0 0x2507
-#define SPR_INTERRUPT_MASK_RESET_1 0x2407
-#define SPR_INTERRUPT_MASK_RESET_2 0x2307
-#define SPR_INTERRUPT_MASK_SET_0 0x2508
-#define SPR_INTERRUPT_MASK_SET_1 0x2408
-#define SPR_INTERRUPT_MASK_SET_2 0x2308
-#define SPR_INTERRUPT_VECTOR_BASE_0 0x2509
-#define SPR_INTERRUPT_VECTOR_BASE_1 0x2409
-#define SPR_INTERRUPT_VECTOR_BASE_2 0x2309
-#define SPR_INTERRUPT_VECTOR_BASE_3 0x2209
-#define SPR_IPI_EVENT_0 0x1f05
-#define SPR_IPI_EVENT_1 0x1e05
-#define SPR_IPI_EVENT_2 0x1d05
-#define SPR_IPI_EVENT_RESET_0 0x1f06
-#define SPR_IPI_EVENT_RESET_1 0x1e06
-#define SPR_IPI_EVENT_RESET_2 0x1d06
-#define SPR_IPI_EVENT_SET_0 0x1f07
-#define SPR_IPI_EVENT_SET_1 0x1e07
-#define SPR_IPI_EVENT_SET_2 0x1d07
-#define SPR_IPI_MASK_0 0x1f08
-#define SPR_IPI_MASK_1 0x1e08
-#define SPR_IPI_MASK_2 0x1d08
-#define SPR_IPI_MASK_RESET_0 0x1f09
-#define SPR_IPI_MASK_RESET_1 0x1e09
-#define SPR_IPI_MASK_RESET_2 0x1d09
-#define SPR_IPI_MASK_SET_0 0x1f0a
-#define SPR_IPI_MASK_SET_1 0x1e0a
-#define SPR_IPI_MASK_SET_2 0x1d0a
-#define SPR_MPL_AUX_PERF_COUNT_SET_0 0x2100
-#define SPR_MPL_AUX_PERF_COUNT_SET_1 0x2101
-#define SPR_MPL_AUX_PERF_COUNT_SET_2 0x2102
-#define SPR_MPL_AUX_TILE_TIMER_SET_0 0x1700
-#define SPR_MPL_AUX_TILE_TIMER_SET_1 0x1701
-#define SPR_MPL_AUX_TILE_TIMER_SET_2 0x1702
-#define SPR_MPL_IDN_ACCESS_SET_0 0x0a00
-#define SPR_MPL_IDN_ACCESS_SET_1 0x0a01
-#define SPR_MPL_IDN_ACCESS_SET_2 0x0a02
-#define SPR_MPL_IDN_AVAIL_SET_0 0x1a00
-#define SPR_MPL_IDN_AVAIL_SET_1 0x1a01
-#define SPR_MPL_IDN_AVAIL_SET_2 0x1a02
-#define SPR_MPL_IDN_COMPLETE_SET_0 0x0500
-#define SPR_MPL_IDN_COMPLETE_SET_1 0x0501
-#define SPR_MPL_IDN_COMPLETE_SET_2 0x0502
-#define SPR_MPL_IDN_FIREWALL_SET_0 0x1400
-#define SPR_MPL_IDN_FIREWALL_SET_1 0x1401
-#define SPR_MPL_IDN_FIREWALL_SET_2 0x1402
-#define SPR_MPL_IDN_TIMER_SET_0 0x1800
-#define SPR_MPL_IDN_TIMER_SET_1 0x1801
-#define SPR_MPL_IDN_TIMER_SET_2 0x1802
-#define SPR_MPL_INTCTRL_0_SET_0 0x2500
-#define SPR_MPL_INTCTRL_0_SET_1 0x2501
-#define SPR_MPL_INTCTRL_0_SET_2 0x2502
-#define SPR_MPL_INTCTRL_1_SET_0 0x2400
-#define SPR_MPL_INTCTRL_1_SET_1 0x2401
-#define SPR_MPL_INTCTRL_1_SET_2 0x2402
-#define SPR_MPL_INTCTRL_2_SET_0 0x2300
-#define SPR_MPL_INTCTRL_2_SET_1 0x2301
-#define SPR_MPL_INTCTRL_2_SET_2 0x2302
-#define SPR_MPL_IPI_0 0x1f04
-#define SPR_MPL_IPI_0_SET_0 0x1f00
-#define SPR_MPL_IPI_0_SET_1 0x1f01
-#define SPR_MPL_IPI_0_SET_2 0x1f02
-#define SPR_MPL_IPI_1 0x1e04
-#define SPR_MPL_IPI_1_SET_0 0x1e00
-#define SPR_MPL_IPI_1_SET_1 0x1e01
-#define SPR_MPL_IPI_1_SET_2 0x1e02
-#define SPR_MPL_IPI_2 0x1d04
-#define SPR_MPL_IPI_2_SET_0 0x1d00
-#define SPR_MPL_IPI_2_SET_1 0x1d01
-#define SPR_MPL_IPI_2_SET_2 0x1d02
-#define SPR_MPL_PERF_COUNT_SET_0 0x2000
-#define SPR_MPL_PERF_COUNT_SET_1 0x2001
-#define SPR_MPL_PERF_COUNT_SET_2 0x2002
-#define SPR_MPL_UDN_ACCESS_SET_0 0x0b00
-#define SPR_MPL_UDN_ACCESS_SET_1 0x0b01
-#define SPR_MPL_UDN_ACCESS_SET_2 0x0b02
-#define SPR_MPL_UDN_AVAIL_SET_0 0x1b00
-#define SPR_MPL_UDN_AVAIL_SET_1 0x1b01
-#define SPR_MPL_UDN_AVAIL_SET_2 0x1b02
-#define SPR_MPL_UDN_COMPLETE_SET_0 0x0600
-#define SPR_MPL_UDN_COMPLETE_SET_1 0x0601
-#define SPR_MPL_UDN_COMPLETE_SET_2 0x0602
-#define SPR_MPL_UDN_FIREWALL_SET_0 0x1500
-#define SPR_MPL_UDN_FIREWALL_SET_1 0x1501
-#define SPR_MPL_UDN_FIREWALL_SET_2 0x1502
-#define SPR_MPL_UDN_TIMER_SET_0 0x1900
-#define SPR_MPL_UDN_TIMER_SET_1 0x1901
-#define SPR_MPL_UDN_TIMER_SET_2 0x1902
-#define SPR_MPL_WORLD_ACCESS_SET_0 0x2700
-#define SPR_MPL_WORLD_ACCESS_SET_1 0x2701
-#define SPR_MPL_WORLD_ACCESS_SET_2 0x2702
-#define SPR_PASS 0x2709
-#define SPR_PERF_COUNT_0 0x2005
-#define SPR_PERF_COUNT_1 0x2006
-#define SPR_PERF_COUNT_CTL 0x2007
-#define SPR_PERF_COUNT_DN_CTL 0x2008
-#define SPR_PERF_COUNT_STS 0x2009
-#define SPR_PROC_STATUS 0x2784
-#define SPR_SIM_CONTROL 0x2785
-#define SPR_SINGLE_STEP_CONTROL_0 0x0405
-#define SPR_SINGLE_STEP_CONTROL_0__CANCELED_MASK 0x1
-#define SPR_SINGLE_STEP_CONTROL_0__INHIBIT_MASK 0x2
-#define SPR_SINGLE_STEP_CONTROL_1 0x0305
-#define SPR_SINGLE_STEP_CONTROL_1__CANCELED_MASK 0x1
-#define SPR_SINGLE_STEP_CONTROL_1__INHIBIT_MASK 0x2
-#define SPR_SINGLE_STEP_CONTROL_2 0x0205
-#define SPR_SINGLE_STEP_CONTROL_2__CANCELED_MASK 0x1
-#define SPR_SINGLE_STEP_CONTROL_2__INHIBIT_MASK 0x2
-#define SPR_SINGLE_STEP_EN_0_0 0x250a
-#define SPR_SINGLE_STEP_EN_0_1 0x240a
-#define SPR_SINGLE_STEP_EN_0_2 0x230a
-#define SPR_SINGLE_STEP_EN_1_0 0x250b
-#define SPR_SINGLE_STEP_EN_1_1 0x240b
-#define SPR_SINGLE_STEP_EN_1_2 0x230b
-#define SPR_SINGLE_STEP_EN_2_0 0x250c
-#define SPR_SINGLE_STEP_EN_2_1 0x240c
-#define SPR_SINGLE_STEP_EN_2_2 0x230c
-#define SPR_SYSTEM_SAVE_0_0 0x2582
-#define SPR_SYSTEM_SAVE_0_1 0x2583
-#define SPR_SYSTEM_SAVE_0_2 0x2584
-#define SPR_SYSTEM_SAVE_0_3 0x2585
-#define SPR_SYSTEM_SAVE_1_0 0x2482
-#define SPR_SYSTEM_SAVE_1_1 0x2483
-#define SPR_SYSTEM_SAVE_1_2 0x2484
-#define SPR_SYSTEM_SAVE_1_3 0x2485
-#define SPR_SYSTEM_SAVE_2_0 0x2382
-#define SPR_SYSTEM_SAVE_2_1 0x2383
-#define SPR_SYSTEM_SAVE_2_2 0x2384
-#define SPR_SYSTEM_SAVE_2_3 0x2385
-#define SPR_TILE_COORD 0x270b
-#define SPR_TILE_RTF_HWM 0x270c
-#define SPR_TILE_TIMER_CONTROL 0x1605
-#define SPR_UDN_AVAIL_EN 0x1b05
-#define SPR_UDN_DATA_AVAIL 0x0b80
-#define SPR_UDN_DEADLOCK_TIMEOUT 0x1906
-#define SPR_UDN_DEMUX_COUNT_0 0x0b05
-#define SPR_UDN_DEMUX_COUNT_1 0x0b06
-#define SPR_UDN_DEMUX_COUNT_2 0x0b07
-#define SPR_UDN_DEMUX_COUNT_3 0x0b08
-#define SPR_UDN_DIRECTION_PROTECT 0x1505
-#define SPR_UDN_PENDING 0x0b0a
-#define SPR_WATCH_MASK 0x200a
-#define SPR_WATCH_VAL 0x200b
-
-#endif /* !defined(__ARCH_SPR_DEF_64_H__) */
-
-#endif /* !defined(__DOXYGEN__) */
diff --git a/arch/tile/include/uapi/asm/Kbuild b/arch/tile/include/uapi/asm/Kbuild
deleted file mode 100644
index cc439612bcd5..000000000000
--- a/arch/tile/include/uapi/asm/Kbuild
+++ /dev/null
@@ -1,24 +0,0 @@
-# UAPI Header export list
-include include/uapi/asm-generic/Kbuild.asm
-
-generic-y += bpf_perf_event.h
-generic-y += errno.h
-generic-y += fcntl.h
-generic-y += ioctl.h
-generic-y += ioctls.h
-generic-y += ipcbuf.h
-generic-y += msgbuf.h
-generic-y += param.h
-generic-y += poll.h
-generic-y += posix_types.h
-generic-y += resource.h
-generic-y += sembuf.h
-generic-y += shmbuf.h
-generic-y += shmparam.h
-generic-y += socket.h
-generic-y += sockios.h
-generic-y += statfs.h
-generic-y += termbits.h
-generic-y += termios.h
-generic-y += types.h
-generic-y += ucontext.h
diff --git a/arch/tile/include/uapi/asm/auxvec.h b/arch/tile/include/uapi/asm/auxvec.h
deleted file mode 100644
index 922383ce8f4f..000000000000
--- a/arch/tile/include/uapi/asm/auxvec.h
+++ /dev/null
@@ -1,24 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
-/*
- * Copyright 2010 Tilera Corporation. All Rights Reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation, version 2.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
- * NON INFRINGEMENT. See the GNU General Public License for
- * more details.
- */
-
-#ifndef _ASM_TILE_AUXVEC_H
-#define _ASM_TILE_AUXVEC_H
-
-/* The vDSO location. */
-#define AT_SYSINFO_EHDR 33
-
-#define AT_VECTOR_SIZE_ARCH 1 /* entries in ARCH_DLINFO */
-
-#endif /* _ASM_TILE_AUXVEC_H */
diff --git a/arch/tile/include/uapi/asm/bitsperlong.h b/arch/tile/include/uapi/asm/bitsperlong.h
deleted file mode 100644
index 57cca78c0fbb..000000000000
--- a/arch/tile/include/uapi/asm/bitsperlong.h
+++ /dev/null
@@ -1,27 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
-/*
- * Copyright 2010 Tilera Corporation. All Rights Reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation, version 2.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
- * NON INFRINGEMENT. See the GNU General Public License for
- * more details.
- */
-
-#ifndef _ASM_TILE_BITSPERLONG_H
-#define _ASM_TILE_BITSPERLONG_H
-
-#ifdef __LP64__
-# define __BITS_PER_LONG 64
-#else
-# define __BITS_PER_LONG 32
-#endif
-
-#include <asm-generic/bitsperlong.h>
-
-#endif /* _ASM_TILE_BITSPERLONG_H */
diff --git a/arch/tile/include/uapi/asm/byteorder.h b/arch/tile/include/uapi/asm/byteorder.h
deleted file mode 100644
index d508e61c1e56..000000000000
--- a/arch/tile/include/uapi/asm/byteorder.h
+++ /dev/null
@@ -1,20 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
-/*
- * Copyright 2011 Tilera Corporation. All Rights Reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation, version 2.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
- * NON INFRINGEMENT. See the GNU General Public License for
- * more details.
- */
-
-#if defined (__BIG_ENDIAN__)
-#include <linux/byteorder/big_endian.h>
-#else
-#include <linux/byteorder/little_endian.h>
-#endif
diff --git a/arch/tile/include/uapi/asm/cachectl.h b/arch/tile/include/uapi/asm/cachectl.h
deleted file mode 100644
index ed8bac28a1b9..000000000000
--- a/arch/tile/include/uapi/asm/cachectl.h
+++ /dev/null
@@ -1,43 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
-/*
- * Copyright 2011 Tilera Corporation. All Rights Reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation, version 2.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
- * NON INFRINGEMENT. See the GNU General Public License for
- * more details.
- */
-
-#ifndef _ASM_TILE_CACHECTL_H
-#define _ASM_TILE_CACHECTL_H
-
-/*
- * Options for cacheflush system call.
- *
- * The ICACHE flush is performed on all cores currently running the
- * current process's address space. The intent is for user
- * applications to be able to modify code, invoke the system call,
- * then allow arbitrary other threads in the same address space to see
- * the newly-modified code. Passing a length of CHIP_L1I_CACHE_SIZE()
- * or more invalidates the entire icache on all cores in the address
- * spaces. (Note: currently this option invalidates the entire icache
- * regardless of the requested address and length, but we may choose
- * to honor the arguments at some point.)
- *
- * Flush and invalidation of memory can normally be performed with the
- * __insn_flush() and __insn_finv() instructions from userspace.
- * The DCACHE option to the system call allows userspace
- * to flush the entire L1+L2 data cache from the core. In this case,
- * the address and length arguments are not used. The DCACHE flush is
- * restricted to the current core, not all cores in the address space.
- */
-#define ICACHE (1<<0) /* invalidate L1 instruction cache */
-#define DCACHE (1<<1) /* flush and invalidate data cache */
-#define BCACHE (ICACHE|DCACHE) /* flush both caches */
-
-#endif /* _ASM_TILE_CACHECTL_H */
diff --git a/arch/tile/include/uapi/asm/hardwall.h b/arch/tile/include/uapi/asm/hardwall.h
deleted file mode 100644
index f02e9132ae71..000000000000
--- a/arch/tile/include/uapi/asm/hardwall.h
+++ /dev/null
@@ -1,52 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
-/*
- * Copyright 2010 Tilera Corporation. All Rights Reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation, version 2.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
- * NON INFRINGEMENT. See the GNU General Public License for
- * more details.
- *
- * Provide methods for access control of per-cpu resources like
- * UDN, IDN, or IPI.
- */
-
-#ifndef _UAPI_ASM_TILE_HARDWALL_H
-#define _UAPI_ASM_TILE_HARDWALL_H
-
-#include <arch/chip.h>
-#include <linux/ioctl.h>
-
-#define HARDWALL_IOCTL_BASE 0xa2
-
-/*
- * The HARDWALL_CREATE() ioctl is a macro with a "size" argument.
- * The resulting ioctl value is passed to the kernel in conjunction
- * with a pointer to a standard kernel bitmask of cpus.
- * For network resources (UDN or IDN) the bitmask must physically
- * represent a rectangular configuration on the chip.
- * The "size" is the number of bytes of cpu mask data.
- */
-#define _HARDWALL_CREATE 1
-#define HARDWALL_CREATE(size) \
- _IOC(_IOC_READ, HARDWALL_IOCTL_BASE, _HARDWALL_CREATE, (size))
-
-#define _HARDWALL_ACTIVATE 2
-#define HARDWALL_ACTIVATE \
- _IO(HARDWALL_IOCTL_BASE, _HARDWALL_ACTIVATE)
-
-#define _HARDWALL_DEACTIVATE 3
-#define HARDWALL_DEACTIVATE \
- _IO(HARDWALL_IOCTL_BASE, _HARDWALL_DEACTIVATE)
-
-#define _HARDWALL_GET_ID 4
-#define HARDWALL_GET_ID \
- _IO(HARDWALL_IOCTL_BASE, _HARDWALL_GET_ID)
-
-
-#endif /* _UAPI_ASM_TILE_HARDWALL_H */
diff --git a/arch/tile/include/uapi/asm/kvm_para.h b/arch/tile/include/uapi/asm/kvm_para.h
deleted file mode 100644
index baacc4996d18..000000000000
--- a/arch/tile/include/uapi/asm/kvm_para.h
+++ /dev/null
@@ -1,2 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
-#include <asm-generic/kvm_para.h>
diff --git a/arch/tile/include/uapi/asm/mman.h b/arch/tile/include/uapi/asm/mman.h
deleted file mode 100644
index 9b7add95926b..000000000000
--- a/arch/tile/include/uapi/asm/mman.h
+++ /dev/null
@@ -1,43 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
-/*
- * Copyright 2010 Tilera Corporation. All Rights Reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation, version 2.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
- * NON INFRINGEMENT. See the GNU General Public License for
- * more details.
- */
-
-#ifndef _ASM_TILE_MMAN_H
-#define _ASM_TILE_MMAN_H
-
-#include <asm-generic/mman-common.h>
-#include <arch/chip.h>
-
-/* Standard Linux flags */
-
-#define MAP_POPULATE 0x0040 /* populate (prefault) pagetables */
-#define MAP_NONBLOCK 0x0080 /* do not block on IO */
-#define MAP_GROWSDOWN 0x0100 /* stack-like segment */
-#define MAP_STACK MAP_GROWSDOWN /* provide convenience alias */
-#define MAP_LOCKED 0x0200 /* pages are locked */
-#define MAP_NORESERVE 0x0400 /* don't check for reservations */
-#define MAP_DENYWRITE 0x0800 /* ETXTBSY */
-#define MAP_EXECUTABLE 0x1000 /* mark it as an executable */
-#define MAP_HUGETLB 0x4000 /* create a huge page mapping */
-
-
-/*
- * Flags for mlockall
- */
-#define MCL_CURRENT 1 /* lock all current mappings */
-#define MCL_FUTURE 2 /* lock all future mappings */
-#define MCL_ONFAULT 4 /* lock all pages that are faulted in */
-
-
-#endif /* _ASM_TILE_MMAN_H */
diff --git a/arch/tile/include/uapi/asm/ptrace.h b/arch/tile/include/uapi/asm/ptrace.h
deleted file mode 100644
index 667ed742f4dd..000000000000
--- a/arch/tile/include/uapi/asm/ptrace.h
+++ /dev/null
@@ -1,99 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
-/*
- * Copyright 2010 Tilera Corporation. All Rights Reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation, version 2.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
- * NON INFRINGEMENT. See the GNU General Public License for
- * more details.
- */
-
-#ifndef _UAPI_ASM_TILE_PTRACE_H
-#define _UAPI_ASM_TILE_PTRACE_H
-
-#include <arch/chip.h>
-#include <arch/abi.h>
-
-/* These must match struct pt_regs, below. */
-#if CHIP_WORD_SIZE() == 32
-#define PTREGS_OFFSET_REG(n) ((n)*4)
-#else
-#define PTREGS_OFFSET_REG(n) ((n)*8)
-#endif
-#define PTREGS_OFFSET_BASE 0
-#define PTREGS_OFFSET_TP PTREGS_OFFSET_REG(53)
-#define PTREGS_OFFSET_SP PTREGS_OFFSET_REG(54)
-#define PTREGS_OFFSET_LR PTREGS_OFFSET_REG(55)
-#define PTREGS_NR_GPRS 56
-#define PTREGS_OFFSET_PC PTREGS_OFFSET_REG(56)
-#define PTREGS_OFFSET_EX1 PTREGS_OFFSET_REG(57)
-#define PTREGS_OFFSET_FAULTNUM PTREGS_OFFSET_REG(58)
-#define PTREGS_OFFSET_ORIG_R0 PTREGS_OFFSET_REG(59)
-#define PTREGS_OFFSET_FLAGS PTREGS_OFFSET_REG(60)
-#if CHIP_HAS_CMPEXCH()
-#define PTREGS_OFFSET_CMPEXCH PTREGS_OFFSET_REG(61)
-#endif
-#define PTREGS_SIZE PTREGS_OFFSET_REG(64)
-
-
-#ifndef __ASSEMBLY__
-
-#ifndef __KERNEL__
-/* Provide appropriate length type to userspace regardless of -m32/-m64. */
-typedef uint_reg_t pt_reg_t;
-#endif
-
-/*
- * This struct defines the way the registers are stored on the stack during a
- * system call or exception. "struct sigcontext" has the same shape.
- */
-struct pt_regs {
- union {
- /* Saved main processor registers; 56..63 are special. */
- pt_reg_t regs[56];
- struct {
- pt_reg_t __regs[53];
- pt_reg_t tp; /* aliases regs[TREG_TP] */
- pt_reg_t sp; /* aliases regs[TREG_SP] */
- pt_reg_t lr; /* aliases regs[TREG_LR] */
- };
- };
-
- /* Saved special registers. */
- pt_reg_t pc; /* stored in EX_CONTEXT_K_0 */
- pt_reg_t ex1; /* stored in EX_CONTEXT_K_1 (PL and ICS bit) */
- pt_reg_t faultnum; /* fault number (INT_SWINT_1 for syscall) */
- pt_reg_t orig_r0; /* r0 at syscall entry, else zero */
- pt_reg_t flags; /* flags (see below) */
-#if !CHIP_HAS_CMPEXCH()
- pt_reg_t pad[3];
-#else
- pt_reg_t cmpexch; /* value of CMPEXCH_VALUE SPR at interrupt */
- pt_reg_t pad[2];
-#endif
-};
-
-#endif /* __ASSEMBLY__ */
-
-#define PTRACE_GETREGS 12
-#define PTRACE_SETREGS 13
-#define PTRACE_GETFPREGS 14
-#define PTRACE_SETFPREGS 15
-
-/* Support TILE-specific ptrace options, with events starting at 16. */
-#define PTRACE_EVENT_MIGRATE 16
-#define PTRACE_O_TRACEMIGRATE (1 << PTRACE_EVENT_MIGRATE)
-
-/*
- * Flag bits in pt_regs.flags that are part of the ptrace API.
- * We start our numbering higher up to avoid confusion with the
- * non-ABI kernel-internal values that use the low 16 bits.
- */
-#define PT_FLAGS_COMPAT 0x10000 /* process is an -m32 compat process */
-
-#endif /* _UAPI_ASM_TILE_PTRACE_H */
diff --git a/arch/tile/include/uapi/asm/setup.h b/arch/tile/include/uapi/asm/setup.h
deleted file mode 100644
index 6d1dfdddad6c..000000000000
--- a/arch/tile/include/uapi/asm/setup.h
+++ /dev/null
@@ -1,22 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
-/*
- * Copyright 2010 Tilera Corporation. All Rights Reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation, version 2.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
- * NON INFRINGEMENT. See the GNU General Public License for
- * more details.
- */
-
-#ifndef _UAPI_ASM_TILE_SETUP_H
-#define _UAPI_ASM_TILE_SETUP_H
-
-#define COMMAND_LINE_SIZE 2048
-
-
-#endif /* _UAPI_ASM_TILE_SETUP_H */
diff --git a/arch/tile/include/uapi/asm/sigcontext.h b/arch/tile/include/uapi/asm/sigcontext.h
deleted file mode 100644
index 4003d5cc9202..000000000000
--- a/arch/tile/include/uapi/asm/sigcontext.h
+++ /dev/null
@@ -1,44 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
-/*
- * Copyright 2010 Tilera Corporation. All Rights Reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation, version 2.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
- * NON INFRINGEMENT. See the GNU General Public License for
- * more details.
- */
-
-#ifndef _ASM_TILE_SIGCONTEXT_H
-#define _ASM_TILE_SIGCONTEXT_H
-
-/* Don't pollute the namespace since <signal.h> includes this file. */
-#define __need_int_reg_t
-#include <arch/abi.h>
-
-/*
- * struct sigcontext has the same shape as struct pt_regs,
- * but is simplified since we know the fault is from userspace.
- */
-struct sigcontext {
- __extension__ union {
- /* General-purpose registers. */
- __uint_reg_t gregs[56];
- __extension__ struct {
- __uint_reg_t __gregs[53];
- __uint_reg_t tp; /* Aliases gregs[TREG_TP]. */
- __uint_reg_t sp; /* Aliases gregs[TREG_SP]. */
- __uint_reg_t lr; /* Aliases gregs[TREG_LR]. */
- };
- };
- __uint_reg_t pc; /* Program counter. */
- __uint_reg_t ics; /* In Interrupt Critical Section? */
- __uint_reg_t faultnum; /* Fault number. */
- __uint_reg_t pad[5];
-};
-
-#endif /* _ASM_TILE_SIGCONTEXT_H */
diff --git a/arch/tile/include/uapi/asm/siginfo.h b/arch/tile/include/uapi/asm/siginfo.h
deleted file mode 100644
index a812fcbf4267..000000000000
--- a/arch/tile/include/uapi/asm/siginfo.h
+++ /dev/null
@@ -1,27 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
-/*
- * Copyright 2010 Tilera Corporation. All Rights Reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation, version 2.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
- * NON INFRINGEMENT. See the GNU General Public License for
- * more details.
- */
-
-#ifndef _ASM_TILE_SIGINFO_H
-#define _ASM_TILE_SIGINFO_H
-
-#define __ARCH_SI_TRAPNO
-
-#ifdef __LP64__
-# define __ARCH_SI_PREAMBLE_SIZE (4 * sizeof(int))
-#endif
-
-#include <asm-generic/siginfo.h>
-
-#endif /* _ASM_TILE_SIGINFO_H */
diff --git a/arch/tile/include/uapi/asm/signal.h b/arch/tile/include/uapi/asm/signal.h
deleted file mode 100644
index 7b3c814e00f0..000000000000
--- a/arch/tile/include/uapi/asm/signal.h
+++ /dev/null
@@ -1,28 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
-/*
- * Copyright 2010 Tilera Corporation. All Rights Reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation, version 2.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
- * NON INFRINGEMENT. See the GNU General Public License for
- * more details.
- */
-
-#ifndef _UAPI_ASM_TILE_SIGNAL_H
-#define _UAPI_ASM_TILE_SIGNAL_H
-
-/* Do not notify a ptracer when this signal is handled. */
-#define SA_NOPTRACE 0x02000000u
-
-/* Used in earlier Tilera releases, so keeping for binary compatibility. */
-#define SA_RESTORER 0x04000000u
-
-#include <asm-generic/signal.h>
-
-
-#endif /* _UAPI_ASM_TILE_SIGNAL_H */
diff --git a/arch/tile/include/uapi/asm/stat.h b/arch/tile/include/uapi/asm/stat.h
deleted file mode 100644
index ea03de7d67aa..000000000000
--- a/arch/tile/include/uapi/asm/stat.h
+++ /dev/null
@@ -1,5 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
-#if defined(__KERNEL__) && defined(CONFIG_COMPAT)
-#define __ARCH_WANT_STAT64 /* Used for compat_sys_stat64() etc. */
-#endif
-#include <asm-generic/stat.h>
diff --git a/arch/tile/include/uapi/asm/swab.h b/arch/tile/include/uapi/asm/swab.h
deleted file mode 100644
index 36952353a31d..000000000000
--- a/arch/tile/include/uapi/asm/swab.h
+++ /dev/null
@@ -1,24 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
-/*
- * Copyright 2010 Tilera Corporation. All Rights Reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation, version 2.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
- * NON INFRINGEMENT. See the GNU General Public License for
- * more details.
- */
-
-#ifndef _ASM_TILE_SWAB_H
-#define _ASM_TILE_SWAB_H
-
-/* Tile gcc is always >= 4.3.0, so we use __builtin_bswap. */
-#define __arch_swab32(x) __builtin_bswap32(x)
-#define __arch_swab64(x) __builtin_bswap64(x)
-#define __arch_swab16(x) (__builtin_bswap32(x) >> 16)
-
-#endif /* _ASM_TILE_SWAB_H */
diff --git a/arch/tile/include/uapi/asm/unistd.h b/arch/tile/include/uapi/asm/unistd.h
deleted file mode 100644
index 1a169ec92ef8..000000000000
--- a/arch/tile/include/uapi/asm/unistd.h
+++ /dev/null
@@ -1,38 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
-/*
- * Copyright 2010 Tilera Corporation. All Rights Reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation, version 2.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
- * NON INFRINGEMENT. See the GNU General Public License for
- * more details.
- */
-
-#define __ARCH_WANT_RENAMEAT
-#if !defined(__LP64__) || defined(__SYSCALL_COMPAT)
-/* Use the flavor of this syscall that matches the 32-bit API better. */
-#define __ARCH_WANT_SYNC_FILE_RANGE2
-#endif
-
-/* Use the standard ABI for syscalls. */
-#include <asm-generic/unistd.h>
-
-#define NR_syscalls __NR_syscalls
-
-/* Additional Tilera-specific syscalls. */
-#define __NR_cacheflush (__NR_arch_specific_syscall + 1)
-__SYSCALL(__NR_cacheflush, sys_cacheflush)
-
-#ifndef __tilegx__
-/* "Fast" syscalls provide atomic support for 32-bit chips. */
-#define __NR_FAST_cmpxchg -1
-#define __NR_FAST_atomic_update -2
-#define __NR_FAST_cmpxchg64 -3
-#define __NR_cmpxchg_badaddr (__NR_arch_specific_syscall + 0)
-__SYSCALL(__NR_cmpxchg_badaddr, sys_cmpxchg_badaddr)
-#endif
diff --git a/arch/tile/kernel/Makefile b/arch/tile/kernel/Makefile
deleted file mode 100644
index 3e43d78731a8..000000000000
--- a/arch/tile/kernel/Makefile
+++ /dev/null
@@ -1,38 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0
-#
-# Makefile for the Linux/TILE kernel.
-#
-
-extra-y := vmlinux.lds head_$(BITS).o
-obj-y := backtrace.o entry.o hvglue.o irq.o messaging.o \
- pci-dma.o proc.o process.o ptrace.o reboot.o \
- setup.o signal.o single_step.o stack.o sys.o \
- sysfs.o time.o traps.o unaligned.o vdso.o \
- intvec_$(BITS).o regs_$(BITS).o tile-desc_$(BITS).o
-
-ifdef CONFIG_FUNCTION_TRACER
-CFLAGS_REMOVE_ftrace.o = -pg
-CFLAGS_REMOVE_early_printk.o = -pg
-endif
-
-obj-$(CONFIG_HARDWALL) += hardwall.o
-obj-$(CONFIG_COMPAT) += compat.o compat_signal.o
-obj-$(CONFIG_SMP) += smpboot.o smp.o tlb.o
-obj-$(CONFIG_MODULES) += module.o
-obj-$(CONFIG_EARLY_PRINTK) += early_printk.o
-obj-$(CONFIG_KEXEC) += machine_kexec.o relocate_kernel_$(BITS).o
-ifdef CONFIG_TILEGX
-obj-$(CONFIG_PCI) += pci_gx.o
-else
-obj-$(CONFIG_PCI) += pci.o
-endif
-obj-$(CONFIG_PERF_EVENTS) += perf_event.o
-obj-$(CONFIG_USE_PMC) += pmc.o
-obj-$(CONFIG_TILE_USB) += usb.o
-obj-$(CONFIG_TILE_HVGLUE_TRACE) += hvglue_trace.o
-obj-$(CONFIG_FUNCTION_TRACER) += ftrace.o mcount_64.o
-obj-$(CONFIG_KPROBES) += kprobes.o
-obj-$(CONFIG_KGDB) += kgdb.o
-obj-$(CONFIG_JUMP_LABEL) += jump_label.o
-
-obj-y += vdso/
diff --git a/arch/tile/kernel/asm-offsets.c b/arch/tile/kernel/asm-offsets.c
deleted file mode 100644
index 375e7c321eef..000000000000
--- a/arch/tile/kernel/asm-offsets.c
+++ /dev/null
@@ -1,84 +0,0 @@
-/*
- * Copyright 2010 Tilera Corporation. All Rights Reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation, version 2.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
- * NON INFRINGEMENT. See the GNU General Public License for
- * more details.
- *
- * Generates definitions from c-type structures used by assembly sources.
- */
-
-/* Check for compatible compiler early in the build. */
-#ifdef CONFIG_TILEGX
-# ifndef __tilegx__
-# error Can only build TILE-Gx configurations with tilegx compiler
-# endif
-# ifndef __LP64__
-# error Must not specify -m32 when building the TILE-Gx kernel
-# endif
-#else
-# ifdef __tilegx__
-# error Can not build TILEPro configurations with tilegx compiler
-# endif
-#endif
-
-#include <linux/kbuild.h>
-#include <linux/thread_info.h>
-#include <linux/sched.h>
-#include <linux/hardirq.h>
-#include <linux/ptrace.h>
-#include <hv/hypervisor.h>
-
-void foo(void)
-{
- DEFINE(SINGLESTEP_STATE_BUFFER_OFFSET,
- offsetof(struct single_step_state, buffer));
- DEFINE(SINGLESTEP_STATE_FLAGS_OFFSET,
- offsetof(struct single_step_state, flags));
- DEFINE(SINGLESTEP_STATE_ORIG_PC_OFFSET,
- offsetof(struct single_step_state, orig_pc));
- DEFINE(SINGLESTEP_STATE_NEXT_PC_OFFSET,
- offsetof(struct single_step_state, next_pc));
- DEFINE(SINGLESTEP_STATE_BRANCH_NEXT_PC_OFFSET,
- offsetof(struct single_step_state, branch_next_pc));
- DEFINE(SINGLESTEP_STATE_UPDATE_VALUE_OFFSET,
- offsetof(struct single_step_state, update_value));
-
- DEFINE(THREAD_INFO_TASK_OFFSET,
- offsetof(struct thread_info, task));
- DEFINE(THREAD_INFO_FLAGS_OFFSET,
- offsetof(struct thread_info, flags));
- DEFINE(THREAD_INFO_STATUS_OFFSET,
- offsetof(struct thread_info, status));
- DEFINE(THREAD_INFO_HOMECACHE_CPU_OFFSET,
- offsetof(struct thread_info, homecache_cpu));
- DEFINE(THREAD_INFO_PREEMPT_COUNT_OFFSET,
- offsetof(struct thread_info, preempt_count));
- DEFINE(THREAD_INFO_STEP_STATE_OFFSET,
- offsetof(struct thread_info, step_state));
-#ifdef __tilegx__
- DEFINE(THREAD_INFO_UNALIGN_JIT_BASE_OFFSET,
- offsetof(struct thread_info, unalign_jit_base));
- DEFINE(THREAD_INFO_UNALIGN_JIT_TMP_OFFSET,
- offsetof(struct thread_info, unalign_jit_tmp));
-#endif
-
- DEFINE(TASK_STRUCT_THREAD_KSP_OFFSET,
- offsetof(struct task_struct, thread.ksp));
- DEFINE(TASK_STRUCT_THREAD_PC_OFFSET,
- offsetof(struct task_struct, thread.pc));
-
- DEFINE(HV_TOPOLOGY_WIDTH_OFFSET,
- offsetof(HV_Topology, width));
- DEFINE(HV_TOPOLOGY_HEIGHT_OFFSET,
- offsetof(HV_Topology, height));
-
- DEFINE(IRQ_CPUSTAT_SYSCALL_COUNT_OFFSET,
- offsetof(irq_cpustat_t, irq_syscall_count));
-}
diff --git a/arch/tile/kernel/backtrace.c b/arch/tile/kernel/backtrace.c
deleted file mode 100644
index f8b74ca83b92..000000000000
--- a/arch/tile/kernel/backtrace.c
+++ /dev/null
@@ -1,683 +0,0 @@
-/*
- * Copyright 2011 Tilera Corporation. All Rights Reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation, version 2.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
- * NON INFRINGEMENT. See the GNU General Public License for
- * more details.
- */
-
-#include <linux/kernel.h>
-#include <linux/string.h>
-#include <asm/byteorder.h>
-#include <asm/backtrace.h>
-#include <asm/tile-desc.h>
-#include <arch/abi.h>
-
-#ifdef __tilegx__
-#define TILE_MAX_INSTRUCTIONS_PER_BUNDLE TILEGX_MAX_INSTRUCTIONS_PER_BUNDLE
-#define tile_decoded_instruction tilegx_decoded_instruction
-#define tile_mnemonic tilegx_mnemonic
-#define parse_insn_tile parse_insn_tilegx
-#define TILE_OPC_IRET TILEGX_OPC_IRET
-#define TILE_OPC_ADDI TILEGX_OPC_ADDI
-#define TILE_OPC_ADDLI TILEGX_OPC_ADDLI
-#define TILE_OPC_INFO TILEGX_OPC_INFO
-#define TILE_OPC_INFOL TILEGX_OPC_INFOL
-#define TILE_OPC_JRP TILEGX_OPC_JRP
-#define TILE_OPC_MOVE TILEGX_OPC_MOVE
-#define OPCODE_STORE TILEGX_OPC_ST
-typedef long long bt_int_reg_t;
-#else
-#define TILE_MAX_INSTRUCTIONS_PER_BUNDLE TILEPRO_MAX_INSTRUCTIONS_PER_BUNDLE
-#define tile_decoded_instruction tilepro_decoded_instruction
-#define tile_mnemonic tilepro_mnemonic
-#define parse_insn_tile parse_insn_tilepro
-#define TILE_OPC_IRET TILEPRO_OPC_IRET
-#define TILE_OPC_ADDI TILEPRO_OPC_ADDI
-#define TILE_OPC_ADDLI TILEPRO_OPC_ADDLI
-#define TILE_OPC_INFO TILEPRO_OPC_INFO
-#define TILE_OPC_INFOL TILEPRO_OPC_INFOL
-#define TILE_OPC_JRP TILEPRO_OPC_JRP
-#define TILE_OPC_MOVE TILEPRO_OPC_MOVE
-#define OPCODE_STORE TILEPRO_OPC_SW
-typedef int bt_int_reg_t;
-#endif
-
-/* A decoded bundle used for backtracer analysis. */
-struct BacktraceBundle {
- tile_bundle_bits bits;
- int num_insns;
- struct tile_decoded_instruction
- insns[TILE_MAX_INSTRUCTIONS_PER_BUNDLE];
-};
-
-
-/* Locates an instruction inside the given bundle that
- * has the specified mnemonic, and whose first 'num_operands_to_match'
- * operands exactly match those in 'operand_values'.
- */
-static const struct tile_decoded_instruction *find_matching_insn(
- const struct BacktraceBundle *bundle,
- tile_mnemonic mnemonic,
- const int *operand_values,
- int num_operands_to_match)
-{
- int i, j;
- bool match;
-
- for (i = 0; i < bundle->num_insns; i++) {
- const struct tile_decoded_instruction *insn =
- &bundle->insns[i];
-
- if (insn->opcode->mnemonic != mnemonic)
- continue;
-
- match = true;
- for (j = 0; j < num_operands_to_match; j++) {
- if (operand_values[j] != insn->operand_values[j]) {
- match = false;
- break;
- }
- }
-
- if (match)
- return insn;
- }
-
- return NULL;
-}
-
-/* Does this bundle contain an 'iret' instruction? */
-static inline bool bt_has_iret(const struct BacktraceBundle *bundle)
-{
- return find_matching_insn(bundle, TILE_OPC_IRET, NULL, 0) != NULL;
-}
-
-/* Does this bundle contain an 'addi sp, sp, OFFSET' or
- * 'addli sp, sp, OFFSET' instruction, and if so, what is OFFSET?
- */
-static bool bt_has_addi_sp(const struct BacktraceBundle *bundle, int *adjust)
-{
- static const int vals[2] = { TREG_SP, TREG_SP };
-
- const struct tile_decoded_instruction *insn =
- find_matching_insn(bundle, TILE_OPC_ADDI, vals, 2);
- if (insn == NULL)
- insn = find_matching_insn(bundle, TILE_OPC_ADDLI, vals, 2);
-#ifdef __tilegx__
- if (insn == NULL)
- insn = find_matching_insn(bundle, TILEGX_OPC_ADDXLI, vals, 2);
- if (insn == NULL)
- insn = find_matching_insn(bundle, TILEGX_OPC_ADDXI, vals, 2);
-#endif
- if (insn == NULL)
- return false;
-
- *adjust = insn->operand_values[2];
- return true;
-}
-
-/* Does this bundle contain any 'info OP' or 'infol OP'
- * instruction, and if so, what are their OP? Note that OP is interpreted
- * as an unsigned value by this code since that's what the caller wants.
- * Returns the number of info ops found.
- */
-static int bt_get_info_ops(const struct BacktraceBundle *bundle,
- int operands[MAX_INFO_OPS_PER_BUNDLE])
-{
- int num_ops = 0;
- int i;
-
- for (i = 0; i < bundle->num_insns; i++) {
- const struct tile_decoded_instruction *insn =
- &bundle->insns[i];
-
- if (insn->opcode->mnemonic == TILE_OPC_INFO ||
- insn->opcode->mnemonic == TILE_OPC_INFOL) {
- operands[num_ops++] = insn->operand_values[0];
- }
- }
-
- return num_ops;
-}
-
-/* Does this bundle contain a jrp instruction, and if so, to which
- * register is it jumping?
- */
-static bool bt_has_jrp(const struct BacktraceBundle *bundle, int *target_reg)
-{
- const struct tile_decoded_instruction *insn =
- find_matching_insn(bundle, TILE_OPC_JRP, NULL, 0);
- if (insn == NULL)
- return false;
-
- *target_reg = insn->operand_values[0];
- return true;
-}
-
-/* Does this bundle modify the specified register in any way? */
-static bool bt_modifies_reg(const struct BacktraceBundle *bundle, int reg)
-{
- int i, j;
- for (i = 0; i < bundle->num_insns; i++) {
- const struct tile_decoded_instruction *insn =
- &bundle->insns[i];
-
- if (insn->opcode->implicitly_written_register == reg)
- return true;
-
- for (j = 0; j < insn->opcode->num_operands; j++)
- if (insn->operands[j]->is_dest_reg &&
- insn->operand_values[j] == reg)
- return true;
- }
-
- return false;
-}
-
-/* Does this bundle modify sp? */
-static inline bool bt_modifies_sp(const struct BacktraceBundle *bundle)
-{
- return bt_modifies_reg(bundle, TREG_SP);
-}
-
-/* Does this bundle modify lr? */
-static inline bool bt_modifies_lr(const struct BacktraceBundle *bundle)
-{
- return bt_modifies_reg(bundle, TREG_LR);
-}
-
-/* Does this bundle contain the instruction 'move fp, sp'? */
-static inline bool bt_has_move_r52_sp(const struct BacktraceBundle *bundle)
-{
- static const int vals[2] = { 52, TREG_SP };
- return find_matching_insn(bundle, TILE_OPC_MOVE, vals, 2) != NULL;
-}
-
-/* Does this bundle contain a store of lr to sp? */
-static inline bool bt_has_sw_sp_lr(const struct BacktraceBundle *bundle)
-{
- static const int vals[2] = { TREG_SP, TREG_LR };
- return find_matching_insn(bundle, OPCODE_STORE, vals, 2) != NULL;
-}
-
-#ifdef __tilegx__
-/* Track moveli values placed into registers. */
-static inline void bt_update_moveli(const struct BacktraceBundle *bundle,
- int moveli_args[])
-{
- int i;
- for (i = 0; i < bundle->num_insns; i++) {
- const struct tile_decoded_instruction *insn =
- &bundle->insns[i];
-
- if (insn->opcode->mnemonic == TILEGX_OPC_MOVELI) {
- int reg = insn->operand_values[0];
- moveli_args[reg] = insn->operand_values[1];
- }
- }
-}
-
-/* Does this bundle contain an 'add sp, sp, reg' instruction
- * from a register that we saw a moveli into, and if so, what
- * is the value in the register?
- */
-static bool bt_has_add_sp(const struct BacktraceBundle *bundle, int *adjust,
- int moveli_args[])
-{
- static const int vals[2] = { TREG_SP, TREG_SP };
-
- const struct tile_decoded_instruction *insn =
- find_matching_insn(bundle, TILEGX_OPC_ADDX, vals, 2);
- if (insn) {
- int reg = insn->operand_values[2];
- if (moveli_args[reg]) {
- *adjust = moveli_args[reg];
- return true;
- }
- }
- return false;
-}
-#endif
-
-/* Locates the caller's PC and SP for a program starting at the
- * given address.
- */
-static void find_caller_pc_and_caller_sp(CallerLocation *location,
- const unsigned long start_pc,
- BacktraceMemoryReader read_memory_func,
- void *read_memory_func_extra)
-{
- /* Have we explicitly decided what the sp is,
- * rather than just the default?
- */
- bool sp_determined = false;
-
- /* Has any bundle seen so far modified lr? */
- bool lr_modified = false;
-
- /* Have we seen a move from sp to fp? */
- bool sp_moved_to_r52 = false;
-
- /* Have we seen a terminating bundle? */
- bool seen_terminating_bundle = false;
-
- /* Cut down on round-trip reading overhead by reading several
- * bundles at a time.
- */
- tile_bundle_bits prefetched_bundles[32];
- int num_bundles_prefetched = 0;
- int next_bundle = 0;
- unsigned long pc;
-
-#ifdef __tilegx__
- /* Naively try to track moveli values to support addx for -m32. */
- int moveli_args[TILEGX_NUM_REGISTERS] = { 0 };
-#endif
-
- /* Default to assuming that the caller's sp is the current sp.
- * This is necessary to handle the case where we start backtracing
- * right at the end of the epilog.
- */
- location->sp_location = SP_LOC_OFFSET;
- location->sp_offset = 0;
-
- /* Default to having no idea where the caller PC is. */
- location->pc_location = PC_LOC_UNKNOWN;
-
- /* Don't even try if the PC is not aligned. */
- if (start_pc % TILE_BUNDLE_ALIGNMENT_IN_BYTES != 0)
- return;
-
- for (pc = start_pc;; pc += sizeof(tile_bundle_bits)) {
-
- struct BacktraceBundle bundle;
- int num_info_ops, info_operands[MAX_INFO_OPS_PER_BUNDLE];
- int one_ago, jrp_reg;
- bool has_jrp;
-
- if (next_bundle >= num_bundles_prefetched) {
- /* Prefetch some bytes, but don't cross a page
- * boundary since that might cause a read failure we
- * don't care about if we only need the first few
- * bytes. Note: we don't care what the actual page
- * size is; using the minimum possible page size will
- * prevent any problems.
- */
- unsigned int bytes_to_prefetch = 4096 - (pc & 4095);
- if (bytes_to_prefetch > sizeof prefetched_bundles)
- bytes_to_prefetch = sizeof prefetched_bundles;
-
- if (!read_memory_func(prefetched_bundles, pc,
- bytes_to_prefetch,
- read_memory_func_extra)) {
- if (pc == start_pc) {
- /* The program probably called a bad
- * address, such as a NULL pointer.
- * So treat this as if we are at the
- * start of the function prolog so the
- * backtrace will show how we got here.
- */
- location->pc_location = PC_LOC_IN_LR;
- return;
- }
-
- /* Unreadable address. Give up. */
- break;
- }
-
- next_bundle = 0;
- num_bundles_prefetched =
- bytes_to_prefetch / sizeof(tile_bundle_bits);
- }
-
- /*
- * Decode the next bundle.
- * TILE always stores instruction bundles in little-endian
- * mode, even when the chip is running in big-endian mode.
- */
- bundle.bits = le64_to_cpu(prefetched_bundles[next_bundle++]);
- bundle.num_insns =
- parse_insn_tile(bundle.bits, pc, bundle.insns);
- num_info_ops = bt_get_info_ops(&bundle, info_operands);
-
- /* First look at any one_ago info ops if they are interesting,
- * since they should shadow any non-one-ago info ops.
- */
- for (one_ago = (pc != start_pc) ? 1 : 0;
- one_ago >= 0; one_ago--) {
- int i;
- for (i = 0; i < num_info_ops; i++) {
- int info_operand = info_operands[i];
- if (info_operand < CALLER_UNKNOWN_BASE) {
- /* Weird; reserved value, ignore it. */
- continue;
- }
-
- /* Skip info ops which are not in the
- * "one_ago" mode we want right now.
- */
- if (((info_operand & ONE_BUNDLE_AGO_FLAG) != 0)
- != (one_ago != 0))
- continue;
-
- /* Clear the flag to make later checking
- * easier. */
- info_operand &= ~ONE_BUNDLE_AGO_FLAG;
-
- /* Default to looking at PC_IN_LR_FLAG. */
- if (info_operand & PC_IN_LR_FLAG)
- location->pc_location =
- PC_LOC_IN_LR;
- else
- location->pc_location =
- PC_LOC_ON_STACK;
-
- switch (info_operand) {
- case CALLER_UNKNOWN_BASE:
- location->pc_location = PC_LOC_UNKNOWN;
- location->sp_location = SP_LOC_UNKNOWN;
- return;
-
- case CALLER_SP_IN_R52_BASE:
- case CALLER_SP_IN_R52_BASE | PC_IN_LR_FLAG:
- location->sp_location = SP_LOC_IN_R52;
- return;
-
- default:
- {
- const unsigned int val = info_operand
- - CALLER_SP_OFFSET_BASE;
- const unsigned int sp_offset =
- (val >> NUM_INFO_OP_FLAGS) * 8;
- if (sp_offset < 32768) {
- /* This is a properly encoded
- * SP offset. */
- location->sp_location =
- SP_LOC_OFFSET;
- location->sp_offset =
- sp_offset;
- return;
- } else {
- /* This looked like an SP
- * offset, but it's outside
- * the legal range, so this
- * must be an unrecognized
- * info operand. Ignore it.
- */
- }
- }
- break;
- }
- }
- }
-
- if (seen_terminating_bundle) {
- /* We saw a terminating bundle during the previous
- * iteration, so we were only looking for an info op.
- */
- break;
- }
-
- if (bundle.bits == 0) {
- /* Wacky terminating bundle. Stop looping, and hope
- * we've already seen enough to find the caller.
- */
- break;
- }
-
- /*
- * Try to determine caller's SP.
- */
-
- if (!sp_determined) {
- int adjust;
- if (bt_has_addi_sp(&bundle, &adjust)
-#ifdef __tilegx__
- || bt_has_add_sp(&bundle, &adjust, moveli_args)
-#endif
- ) {
- location->sp_location = SP_LOC_OFFSET;
-
- if (adjust <= 0) {
- /* We are in prolog about to adjust
- * SP. */
- location->sp_offset = 0;
- } else {
- /* We are in epilog restoring SP. */
- location->sp_offset = adjust;
- }
-
- sp_determined = true;
- } else {
- if (bt_has_move_r52_sp(&bundle)) {
- /* Maybe in prolog, creating an
- * alloca-style frame. But maybe in
- * the middle of a fixed-size frame
- * clobbering r52 with SP.
- */
- sp_moved_to_r52 = true;
- }
-
- if (bt_modifies_sp(&bundle)) {
- if (sp_moved_to_r52) {
- /* We saw SP get saved into
- * r52 earlier (or now), which
- * must have been in the
- * prolog, so we now know that
- * SP is still holding the
- * caller's sp value.
- */
- location->sp_location =
- SP_LOC_OFFSET;
- location->sp_offset = 0;
- } else {
- /* Someone must have saved
- * aside the caller's SP value
- * into r52, so r52 holds the
- * current value.
- */
- location->sp_location =
- SP_LOC_IN_R52;
- }
- sp_determined = true;
- }
- }
-
-#ifdef __tilegx__
- /* Track moveli arguments for -m32 mode. */
- bt_update_moveli(&bundle, moveli_args);
-#endif
- }
-
- if (bt_has_iret(&bundle)) {
- /* This is a terminating bundle. */
- seen_terminating_bundle = true;
- continue;
- }
-
- /*
- * Try to determine caller's PC.
- */
-
- jrp_reg = -1;
- has_jrp = bt_has_jrp(&bundle, &jrp_reg);
- if (has_jrp)
- seen_terminating_bundle = true;
-
- if (location->pc_location == PC_LOC_UNKNOWN) {
- if (has_jrp) {
- if (jrp_reg == TREG_LR && !lr_modified) {
- /* Looks like a leaf function, or else
- * lr is already restored. */
- location->pc_location =
- PC_LOC_IN_LR;
- } else {
- location->pc_location =
- PC_LOC_ON_STACK;
- }
- } else if (bt_has_sw_sp_lr(&bundle)) {
- /* In prolog, spilling initial lr to stack. */
- location->pc_location = PC_LOC_IN_LR;
- } else if (bt_modifies_lr(&bundle)) {
- lr_modified = true;
- }
- }
- }
-}
-
-/* Initializes a backtracer to start from the given location.
- *
- * If the frame pointer cannot be determined it is set to -1.
- *
- * state: The state to be filled in.
- * read_memory_func: A callback that reads memory.
- * read_memory_func_extra: An arbitrary argument to read_memory_func.
- * pc: The current PC.
- * lr: The current value of the 'lr' register.
- * sp: The current value of the 'sp' register.
- * r52: The current value of the 'r52' register.
- */
-void backtrace_init(BacktraceIterator *state,
- BacktraceMemoryReader read_memory_func,
- void *read_memory_func_extra,
- unsigned long pc, unsigned long lr,
- unsigned long sp, unsigned long r52)
-{
- CallerLocation location;
- unsigned long fp, initial_frame_caller_pc;
-
- /* Find out where we are in the initial frame. */
- find_caller_pc_and_caller_sp(&location, pc,
- read_memory_func, read_memory_func_extra);
-
- switch (location.sp_location) {
- case SP_LOC_UNKNOWN:
- /* Give up. */
- fp = -1;
- break;
-
- case SP_LOC_IN_R52:
- fp = r52;
- break;
-
- case SP_LOC_OFFSET:
- fp = sp + location.sp_offset;
- break;
-
- default:
- /* Give up. */
- fp = -1;
- break;
- }
-
- /* If the frame pointer is not aligned to the basic word size
- * something terrible happened and we should mark it as invalid.
- */
- if (fp % sizeof(bt_int_reg_t) != 0)
- fp = -1;
-
- /* -1 means "don't know initial_frame_caller_pc". */
- initial_frame_caller_pc = -1;
-
- switch (location.pc_location) {
- case PC_LOC_UNKNOWN:
- /* Give up. */
- fp = -1;
- break;
-
- case PC_LOC_IN_LR:
- if (lr == 0 || lr % TILE_BUNDLE_ALIGNMENT_IN_BYTES != 0) {
- /* Give up. */
- fp = -1;
- } else {
- initial_frame_caller_pc = lr;
- }
- break;
-
- case PC_LOC_ON_STACK:
- /* Leave initial_frame_caller_pc as -1,
- * meaning check the stack.
- */
- break;
-
- default:
- /* Give up. */
- fp = -1;
- break;
- }
-
- state->pc = pc;
- state->sp = sp;
- state->fp = fp;
- state->initial_frame_caller_pc = initial_frame_caller_pc;
- state->read_memory_func = read_memory_func;
- state->read_memory_func_extra = read_memory_func_extra;
-}
-
-/* Handle the case where the register holds more bits than the VA. */
-static bool valid_addr_reg(bt_int_reg_t reg)
-{
- return ((unsigned long)reg == reg);
-}
-
-/* Advances the backtracing state to the calling frame, returning
- * true iff successful.
- */
-bool backtrace_next(BacktraceIterator *state)
-{
- unsigned long next_fp, next_pc;
- bt_int_reg_t next_frame[2];
-
- if (state->fp == -1) {
- /* No parent frame. */
- return false;
- }
-
- /* Try to read the frame linkage data chaining to the next function. */
- if (!state->read_memory_func(&next_frame, state->fp, sizeof next_frame,
- state->read_memory_func_extra)) {
- return false;
- }
-
- next_fp = next_frame[1];
- if (!valid_addr_reg(next_frame[1]) ||
- next_fp % sizeof(bt_int_reg_t) != 0) {
- /* Caller's frame pointer is suspect, so give up. */
- return false;
- }
-
- if (state->initial_frame_caller_pc != -1) {
- /* We must be in the initial stack frame and already know the
- * caller PC.
- */
- next_pc = state->initial_frame_caller_pc;
-
- /* Force reading stack next time, in case we were in the
- * initial frame. We don't do this above just to paranoidly
- * avoid changing the struct at all when we return false.
- */
- state->initial_frame_caller_pc = -1;
- } else {
- /* Get the caller PC from the frame linkage area. */
- next_pc = next_frame[0];
- if (!valid_addr_reg(next_frame[0]) || next_pc == 0 ||
- next_pc % TILE_BUNDLE_ALIGNMENT_IN_BYTES != 0) {
- /* The PC is suspect, so give up. */
- return false;
- }
- }
-
- /* Update state to become the caller's stack frame. */
- state->pc = next_pc;
- state->sp = state->fp;
- state->fp = next_fp;
-
- return true;
-}
diff --git a/arch/tile/kernel/compat.c b/arch/tile/kernel/compat.c
deleted file mode 100644
index bdaf71d31a4a..000000000000
--- a/arch/tile/kernel/compat.c
+++ /dev/null
@@ -1,117 +0,0 @@
-/*
- * Copyright 2010 Tilera Corporation. All Rights Reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation, version 2.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
- * NON INFRINGEMENT. See the GNU General Public License for
- * more details.
- */
-
-/* Adjust unistd.h to provide 32-bit numbers and functions. */
-#define __SYSCALL_COMPAT
-
-#include <linux/compat.h>
-#include <linux/syscalls.h>
-#include <linux/kdev_t.h>
-#include <linux/fs.h>
-#include <linux/fcntl.h>
-#include <linux/uaccess.h>
-#include <linux/signal.h>
-#include <asm/syscalls.h>
-#include <asm/byteorder.h>
-
-/*
- * Syscalls that take 64-bit numbers traditionally take them in 32-bit
- * "high" and "low" value parts on 32-bit architectures.
- * In principle, one could imagine passing some register arguments as
- * fully 64-bit on TILE-Gx in 32-bit mode, but it seems easier to
- * adopt the usual convention.
- */
-
-#ifdef __BIG_ENDIAN
-#define SYSCALL_PAIR(name) u32, name ## _hi, u32, name ## _lo
-#else
-#define SYSCALL_PAIR(name) u32, name ## _lo, u32, name ## _hi
-#endif
-
-COMPAT_SYSCALL_DEFINE4(truncate64, char __user *, filename, u32, dummy,
- SYSCALL_PAIR(length))
-{
- return sys_truncate(filename, ((loff_t)length_hi << 32) | length_lo);
-}
-
-COMPAT_SYSCALL_DEFINE4(ftruncate64, unsigned int, fd, u32, dummy,
- SYSCALL_PAIR(length))
-{
- return sys_ftruncate(fd, ((loff_t)length_hi << 32) | length_lo);
-}
-
-COMPAT_SYSCALL_DEFINE6(pread64, unsigned int, fd, char __user *, ubuf,
- size_t, count, u32, dummy, SYSCALL_PAIR(offset))
-{
- return sys_pread64(fd, ubuf, count,
- ((loff_t)offset_hi << 32) | offset_lo);
-}
-
-COMPAT_SYSCALL_DEFINE6(pwrite64, unsigned int, fd, char __user *, ubuf,
- size_t, count, u32, dummy, SYSCALL_PAIR(offset))
-{
- return sys_pwrite64(fd, ubuf, count,
- ((loff_t)offset_hi << 32) | offset_lo);
-}
-
-COMPAT_SYSCALL_DEFINE6(sync_file_range2, int, fd, unsigned int, flags,
- SYSCALL_PAIR(offset), SYSCALL_PAIR(nbytes))
-{
- return sys_sync_file_range(fd, ((loff_t)offset_hi << 32) | offset_lo,
- ((loff_t)nbytes_hi << 32) | nbytes_lo,
- flags);
-}
-
-COMPAT_SYSCALL_DEFINE6(fallocate, int, fd, int, mode,
- SYSCALL_PAIR(offset), SYSCALL_PAIR(len))
-{
- return sys_fallocate(fd, mode, ((loff_t)offset_hi << 32) | offset_lo,
- ((loff_t)len_hi << 32) | len_lo);
-}
-
-/*
- * Avoid bug in generic sys_llseek() that specifies offset_high and
- * offset_low as "unsigned long", thus making it possible to pass
- * a sign-extended high 32 bits in offset_low.
- * Note that we do not use SYSCALL_PAIR here since glibc passes the
- * high and low parts explicitly in that order.
- */
-COMPAT_SYSCALL_DEFINE5(llseek, unsigned int, fd, unsigned int, offset_high,
- unsigned int, offset_low, loff_t __user *, result,
- unsigned int, origin)
-{
- return sys_llseek(fd, offset_high, offset_low, result, origin);
-}
-
-/* Provide the compat syscall number to call mapping. */
-#undef __SYSCALL
-#define __SYSCALL(nr, call) [nr] = (call),
-
-/* See comments in sys.c */
-#define compat_sys_fadvise64_64 sys32_fadvise64_64
-#define compat_sys_readahead sys32_readahead
-#define sys_llseek compat_sys_llseek
-
-/* Call the assembly trampolines where necessary. */
-#define compat_sys_rt_sigreturn _compat_sys_rt_sigreturn
-#define sys_clone _sys_clone
-
-/*
- * Note that we can't include <linux/unistd.h> here since the header
- * guard will defeat us; <asm/unistd.h> checks for __SYSCALL as well.
- */
-void *compat_sys_call_table[__NR_syscalls] = {
- [0 ... __NR_syscalls-1] = sys_ni_syscall,
-#include <asm/unistd.h>
-};
diff --git a/arch/tile/kernel/compat_signal.c b/arch/tile/kernel/compat_signal.c
deleted file mode 100644
index a703bd0e0488..000000000000
--- a/arch/tile/kernel/compat_signal.c
+++ /dev/null
@@ -1,172 +0,0 @@
-/*
- * Copyright 2010 Tilera Corporation. All Rights Reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation, version 2.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
- * NON INFRINGEMENT. See the GNU General Public License for
- * more details.
- */
-
-#include <linux/sched.h>
-#include <linux/sched/task_stack.h>
-#include <linux/mm.h>
-#include <linux/smp.h>
-#include <linux/kernel.h>
-#include <linux/signal.h>
-#include <linux/errno.h>
-#include <linux/wait.h>
-#include <linux/unistd.h>
-#include <linux/stddef.h>
-#include <linux/personality.h>
-#include <linux/suspend.h>
-#include <linux/ptrace.h>
-#include <linux/elf.h>
-#include <linux/compat.h>
-#include <linux/syscalls.h>
-#include <linux/uaccess.h>
-#include <asm/processor.h>
-#include <asm/ucontext.h>
-#include <asm/sigframe.h>
-#include <asm/syscalls.h>
-#include <asm/vdso.h>
-#include <arch/interrupts.h>
-
-struct compat_ucontext {
- compat_ulong_t uc_flags;
- compat_uptr_t uc_link;
- struct compat_sigaltstack uc_stack;
- struct sigcontext uc_mcontext;
- sigset_t uc_sigmask; /* mask last for extensibility */
-};
-
-struct compat_rt_sigframe {
- unsigned char save_area[C_ABI_SAVE_AREA_SIZE]; /* caller save area */
- struct compat_siginfo info;
- struct compat_ucontext uc;
-};
-
-/* The assembly shim for this function arranges to ignore the return value. */
-long compat_sys_rt_sigreturn(void)
-{
- struct pt_regs *regs = current_pt_regs();
- struct compat_rt_sigframe __user *frame =
- (struct compat_rt_sigframe __user *) compat_ptr(regs->sp);
- sigset_t set;
-
- if (!access_ok(VERIFY_READ, frame, sizeof(*frame)))
- goto badframe;
- if (__copy_from_user(&set, &frame->uc.uc_sigmask, sizeof(set)))
- goto badframe;
-
- set_current_blocked(&set);
-
- if (restore_sigcontext(regs, &frame->uc.uc_mcontext))
- goto badframe;
-
- if (compat_restore_altstack(&frame->uc.uc_stack))
- goto badframe;
-
- return 0;
-
-badframe:
- signal_fault("bad sigreturn frame", regs, frame, 0);
- return 0;
-}
-
-/*
- * Determine which stack to use..
- */
-static inline void __user *compat_get_sigframe(struct k_sigaction *ka,
- struct pt_regs *regs,
- size_t frame_size)
-{
- unsigned long sp;
-
- /* Default to using normal stack */
- sp = (unsigned long)compat_ptr(regs->sp);
-
- /*
- * If we are on the alternate signal stack and would overflow
- * it, don't. Return an always-bogus address instead so we
- * will die with SIGSEGV.
- */
- if (on_sig_stack(sp) && !likely(on_sig_stack(sp - frame_size)))
- return (void __user __force *)-1UL;
-
- /* This is the X/Open sanctioned signal stack switching. */
- if (ka->sa.sa_flags & SA_ONSTACK) {
- if (sas_ss_flags(sp) == 0)
- sp = current->sas_ss_sp + current->sas_ss_size;
- }
-
- sp -= frame_size;
- /*
- * Align the stack pointer according to the TILE ABI,
- * i.e. so that on function entry (sp & 15) == 0.
- */
- sp &= -16UL;
- return (void __user *) sp;
-}
-
-int compat_setup_rt_frame(struct ksignal *ksig, sigset_t *set,
- struct pt_regs *regs)
-{
- unsigned long restorer;
- struct compat_rt_sigframe __user *frame;
- int err = 0, sig = ksig->sig;
-
- frame = compat_get_sigframe(&ksig->ka, regs, sizeof(*frame));
-
- if (!access_ok(VERIFY_WRITE, frame, sizeof(*frame)))
- goto err;
-
- /* Always write at least the signal number for the stack backtracer. */
- if (ksig->ka.sa.sa_flags & SA_SIGINFO) {
- /* At sigreturn time, restore the callee-save registers too. */
- err |= copy_siginfo_to_user32(&frame->info, &ksig->info);
- regs->flags |= PT_FLAGS_RESTORE_REGS;
- } else {
- err |= __put_user(ksig->info.si_signo, &frame->info.si_signo);
- }
-
- /* Create the ucontext. */
- err |= __clear_user(&frame->save_area, sizeof(frame->save_area));
- err |= __put_user(0, &frame->uc.uc_flags);
- err |= __put_user(0, &frame->uc.uc_link);
- err |= __compat_save_altstack(&frame->uc.uc_stack, regs->sp);
- err |= setup_sigcontext(&frame->uc.uc_mcontext, regs);
- err |= __copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set));
- if (err)
- goto err;
-
- restorer = VDSO_SYM(&__vdso_rt_sigreturn);
- if (ksig->ka.sa.sa_flags & SA_RESTORER)
- restorer = ptr_to_compat_reg(ksig->ka.sa.sa_restorer);
-
- /*
- * Set up registers for signal handler.
- * Registers that we don't modify keep the value they had from
- * user-space at the time we took the signal.
- * We always pass siginfo and mcontext, regardless of SA_SIGINFO,
- * since some things rely on this (e.g. glibc's debug/segfault.c).
- */
- regs->pc = ptr_to_compat_reg(ksig->ka.sa.sa_handler);
- regs->ex1 = PL_ICS_EX1(USER_PL, 1); /* set crit sec in handler */
- regs->sp = ptr_to_compat_reg(frame);
- regs->lr = restorer;
- regs->regs[0] = (unsigned long) sig;
- regs->regs[1] = ptr_to_compat_reg(&frame->info);
- regs->regs[2] = ptr_to_compat_reg(&frame->uc);
- regs->flags |= PT_FLAGS_CALLER_SAVES;
- return 0;
-
-err:
- trace_unhandled_signal("bad sigreturn frame", regs,
- (unsigned long)frame, SIGSEGV);
- return -EFAULT;
-}
diff --git a/arch/tile/kernel/early_printk.c b/arch/tile/kernel/early_printk.c
deleted file mode 100644
index aefb2c086726..000000000000
--- a/arch/tile/kernel/early_printk.c
+++ /dev/null
@@ -1,75 +0,0 @@
-/*
- * Copyright 2010 Tilera Corporation. All Rights Reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation, version 2.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
- * NON INFRINGEMENT. See the GNU General Public License for
- * more details.
- */
-
-#include <linux/console.h>
-#include <linux/kernel.h>
-#include <linux/init.h>
-#include <linux/string.h>
-#include <linux/irqflags.h>
-#include <linux/printk.h>
-#include <asm/setup.h>
-#include <hv/hypervisor.h>
-
-static void early_hv_write(struct console *con, const char *s, unsigned n)
-{
- tile_console_write(s, n);
-
- /*
- * Convert NL to NLCR (close enough to CRNL) during early boot.
- * We assume newlines are at the ends of strings, which turns out
- * to be good enough for early boot console output.
- */
- if (n && s[n-1] == '\n')
- tile_console_write("\r", 1);
-}
-
-static struct console early_hv_console = {
- .name = "earlyhv",
- .write = early_hv_write,
- .flags = CON_PRINTBUFFER | CON_BOOT,
- .index = -1,
-};
-
-void early_panic(const char *fmt, ...)
-{
- struct va_format vaf;
- va_list args;
-
- arch_local_irq_disable_all();
-
- va_start(args, fmt);
-
- vaf.fmt = fmt;
- vaf.va = &args;
-
- early_printk("Kernel panic - not syncing: %pV", &vaf);
-
- va_end(args);
-
- dump_stack();
- hv_halt();
-}
-
-static int __init setup_early_printk(char *str)
-{
- if (early_console)
- return 1;
-
- early_console = &early_hv_console;
- register_console(early_console);
-
- return 0;
-}
-
-early_param("earlyprintk", setup_early_printk);
diff --git a/arch/tile/kernel/entry.S b/arch/tile/kernel/entry.S
deleted file mode 100644
index 101de132e363..000000000000
--- a/arch/tile/kernel/entry.S
+++ /dev/null
@@ -1,64 +0,0 @@
-/*
- * Copyright 2010 Tilera Corporation. All Rights Reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation, version 2.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
- * NON INFRINGEMENT. See the GNU General Public License for
- * more details.
- */
-
-#include <linux/linkage.h>
-#include <linux/unistd.h>
-#include <asm/irqflags.h>
-#include <asm/processor.h>
-#include <arch/abi.h>
-#include <arch/spr_def.h>
-
-#ifdef __tilegx__
-#define bnzt bnezt
-#endif
-
-STD_ENTRY(current_text_addr)
- { move r0, lr; jrp lr }
- STD_ENDPROC(current_text_addr)
-
-STD_ENTRY(KBacktraceIterator_init_current)
- { move r2, lr; lnk r1 }
- { move r4, r52; addli r1, r1, KBacktraceIterator_init_current - . }
- { move r3, sp; j _KBacktraceIterator_init_current }
- jrp lr /* keep backtracer happy */
- STD_ENDPROC(KBacktraceIterator_init_current)
-
-/* Loop forever on a nap during SMP boot. */
-STD_ENTRY(smp_nap)
- nap
- nop /* avoid provoking the icache prefetch with a jump */
- j smp_nap /* we are not architecturally guaranteed not to exit nap */
- jrp lr /* clue in the backtracer */
- STD_ENDPROC(smp_nap)
-
-/*
- * Enable interrupts racelessly and then nap until interrupted.
- * Architecturally, we are guaranteed that enabling interrupts via
- * mtspr to INTERRUPT_CRITICAL_SECTION only interrupts at the next PC.
- * This function's _cpu_idle_nap address is special; see intvec.S.
- * When interrupted at _cpu_idle_nap, we bump the PC forward 8, and
- * as a result return to the function that called _cpu_idle().
- */
-STD_ENTRY_SECTION(_cpu_idle, .cpuidle.text)
- movei r1, 1
- IRQ_ENABLE_LOAD(r2, r3)
- mtspr INTERRUPT_CRITICAL_SECTION, r1
- IRQ_ENABLE_APPLY(r2, r3) /* unmask, but still with ICS set */
- mtspr INTERRUPT_CRITICAL_SECTION, zero
- .global _cpu_idle_nap
-_cpu_idle_nap:
- nap
- nop /* avoid provoking the icache prefetch with a jump */
- jrp lr
- STD_ENDPROC(_cpu_idle)
diff --git a/arch/tile/kernel/ftrace.c b/arch/tile/kernel/ftrace.c
deleted file mode 100644
index b827a418b155..000000000000
--- a/arch/tile/kernel/ftrace.c
+++ /dev/null
@@ -1,239 +0,0 @@
-/*
- * Copyright 2012 Tilera Corporation. All Rights Reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation, version 2.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
- * NON INFRINGEMENT. See the GNU General Public License for
- * more details.
- *
- * TILE-Gx specific ftrace support
- */
-
-#include <linux/ftrace.h>
-#include <linux/uaccess.h>
-
-#include <asm/cacheflush.h>
-#include <asm/ftrace.h>
-#include <asm/sections.h>
-#include <asm/insn.h>
-
-#include <arch/opcode.h>
-
-#ifdef CONFIG_DYNAMIC_FTRACE
-
-static int machine_stopped __read_mostly;
-
-int ftrace_arch_code_modify_prepare(void)
-{
- machine_stopped = 1;
- return 0;
-}
-
-int ftrace_arch_code_modify_post_process(void)
-{
- flush_icache_range(0, CHIP_L1I_CACHE_SIZE());
- machine_stopped = 0;
- return 0;
-}
-
-/*
- * Put { move r10, lr; jal ftrace_caller } in a bundle, this lets dynamic
- * tracer just add one cycle overhead to every kernel function when disabled.
- */
-static unsigned long ftrace_gen_branch(unsigned long pc, unsigned long addr,
- bool link)
-{
- tilegx_bundle_bits opcode_x0, opcode_x1;
- long pcrel_by_instr = (addr - pc) >> TILEGX_LOG2_BUNDLE_SIZE_IN_BYTES;
-
- if (link) {
- /* opcode: jal addr */
- opcode_x1 =
- create_Opcode_X1(JUMP_OPCODE_X1) |
- create_JumpOpcodeExtension_X1(JAL_JUMP_OPCODE_X1) |
- create_JumpOff_X1(pcrel_by_instr);
- } else {
- /* opcode: j addr */
- opcode_x1 =
- create_Opcode_X1(JUMP_OPCODE_X1) |
- create_JumpOpcodeExtension_X1(J_JUMP_OPCODE_X1) |
- create_JumpOff_X1(pcrel_by_instr);
- }
-
- /*
- * Also put { move r10, lr; jal ftrace_stub } in a bundle, which
- * is used to replace the instruction in address ftrace_call.
- */
- if (addr == FTRACE_ADDR || addr == (unsigned long)ftrace_stub) {
- /* opcode: or r10, lr, zero */
- opcode_x0 =
- create_Dest_X0(10) |
- create_SrcA_X0(TREG_LR) |
- create_SrcB_X0(TREG_ZERO) |
- create_RRROpcodeExtension_X0(OR_RRR_0_OPCODE_X0) |
- create_Opcode_X0(RRR_0_OPCODE_X0);
- } else {
- /* opcode: fnop */
- opcode_x0 =
- create_UnaryOpcodeExtension_X0(FNOP_UNARY_OPCODE_X0) |
- create_RRROpcodeExtension_X0(UNARY_RRR_0_OPCODE_X0) |
- create_Opcode_X0(RRR_0_OPCODE_X0);
- }
-
- return opcode_x1 | opcode_x0;
-}
-
-static unsigned long ftrace_nop_replace(struct dyn_ftrace *rec)
-{
- return NOP();
-}
-
-static unsigned long ftrace_call_replace(unsigned long pc, unsigned long addr)
-{
- return ftrace_gen_branch(pc, addr, true);
-}
-
-static int ftrace_modify_code(unsigned long pc, unsigned long old,
- unsigned long new)
-{
- unsigned long pc_wr;
-
- /* Check if the address is in kernel text space and module space. */
- if (!kernel_text_address(pc))
- return -EINVAL;
-
- /* Operate on writable kernel text mapping. */
- pc_wr = ktext_writable_addr(pc);
-
- if (probe_kernel_write((void *)pc_wr, &new, MCOUNT_INSN_SIZE))
- return -EPERM;
-
- smp_wmb();
-
- if (!machine_stopped && num_online_cpus() > 1)
- flush_icache_range(pc, pc + MCOUNT_INSN_SIZE);
-
- return 0;
-}
-
-int ftrace_update_ftrace_func(ftrace_func_t func)
-{
- unsigned long pc, old;
- unsigned long new;
- int ret;
-
- pc = (unsigned long)&ftrace_call;
- memcpy(&old, &ftrace_call, MCOUNT_INSN_SIZE);
- new = ftrace_call_replace(pc, (unsigned long)func);
-
- ret = ftrace_modify_code(pc, old, new);
-
- return ret;
-}
-
-int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
-{
- unsigned long new, old;
- unsigned long ip = rec->ip;
-
- old = ftrace_nop_replace(rec);
- new = ftrace_call_replace(ip, addr);
-
- return ftrace_modify_code(rec->ip, old, new);
-}
-
-int ftrace_make_nop(struct module *mod,
- struct dyn_ftrace *rec, unsigned long addr)
-{
- unsigned long ip = rec->ip;
- unsigned long old;
- unsigned long new;
- int ret;
-
- old = ftrace_call_replace(ip, addr);
- new = ftrace_nop_replace(rec);
- ret = ftrace_modify_code(ip, old, new);
-
- return ret;
-}
-
-int __init ftrace_dyn_arch_init(void)
-{
- return 0;
-}
-#endif /* CONFIG_DYNAMIC_FTRACE */
-
-#ifdef CONFIG_FUNCTION_GRAPH_TRACER
-void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr,
- unsigned long frame_pointer)
-{
- unsigned long return_hooker = (unsigned long) &return_to_handler;
- struct ftrace_graph_ent trace;
- unsigned long old;
- int err;
-
- if (unlikely(atomic_read(&current->tracing_graph_pause)))
- return;
-
- old = *parent;
- *parent = return_hooker;
-
- err = ftrace_push_return_trace(old, self_addr, &trace.depth,
- frame_pointer, NULL);
- if (err == -EBUSY) {
- *parent = old;
- return;
- }
-
- trace.func = self_addr;
-
- /* Only trace if the calling function expects to */
- if (!ftrace_graph_entry(&trace)) {
- current->curr_ret_stack--;
- *parent = old;
- }
-}
-
-#ifdef CONFIG_DYNAMIC_FTRACE
-extern unsigned long ftrace_graph_call;
-
-static int __ftrace_modify_caller(unsigned long *callsite,
- void (*func) (void), bool enable)
-{
- unsigned long caller_fn = (unsigned long) func;
- unsigned long pc = (unsigned long) callsite;
- unsigned long branch = ftrace_gen_branch(pc, caller_fn, false);
- unsigned long nop = NOP();
- unsigned long old = enable ? nop : branch;
- unsigned long new = enable ? branch : nop;
-
- return ftrace_modify_code(pc, old, new);
-}
-
-static int ftrace_modify_graph_caller(bool enable)
-{
- int ret;
-
- ret = __ftrace_modify_caller(&ftrace_graph_call,
- ftrace_graph_caller,
- enable);
-
- return ret;
-}
-
-int ftrace_enable_ftrace_graph_caller(void)
-{
- return ftrace_modify_graph_caller(true);
-}
-
-int ftrace_disable_ftrace_graph_caller(void)
-{
- return ftrace_modify_graph_caller(false);
-}
-#endif /* CONFIG_DYNAMIC_FTRACE */
-#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
diff --git a/arch/tile/kernel/hardwall.c b/arch/tile/kernel/hardwall.c
deleted file mode 100644
index 2fd1694ac1d0..000000000000
--- a/arch/tile/kernel/hardwall.c
+++ /dev/null
@@ -1,1096 +0,0 @@
-/*
- * Copyright 2010 Tilera Corporation. All Rights Reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation, version 2.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
- * NON INFRINGEMENT. See the GNU General Public License for
- * more details.
- */
-
-#include <linux/fs.h>
-#include <linux/proc_fs.h>
-#include <linux/seq_file.h>
-#include <linux/rwsem.h>
-#include <linux/kprobes.h>
-#include <linux/sched.h>
-#include <linux/hardirq.h>
-#include <linux/uaccess.h>
-#include <linux/smp.h>
-#include <linux/cdev.h>
-#include <linux/compat.h>
-#include <asm/hardwall.h>
-#include <asm/traps.h>
-#include <asm/siginfo.h>
-#include <asm/irq_regs.h>
-
-#include <arch/interrupts.h>
-#include <arch/spr_def.h>
-
-
-/*
- * Implement a per-cpu "hardwall" resource class such as UDN or IPI.
- * We use "hardwall" nomenclature throughout for historical reasons.
- * The lock here controls access to the list data structure as well as
- * to the items on the list.
- */
-struct hardwall_type {
- int index;
- int is_xdn;
- int is_idn;
- int disabled;
- const char *name;
- struct list_head list;
- spinlock_t lock;
- struct proc_dir_entry *proc_dir;
-};
-
-enum hardwall_index {
- HARDWALL_UDN = 0,
-#ifndef __tilepro__
- HARDWALL_IDN = 1,
- HARDWALL_IPI = 2,
-#endif
- _HARDWALL_TYPES
-};
-
-static struct hardwall_type hardwall_types[] = {
- { /* user-space access to UDN */
- 0,
- 1,
- 0,
- 0,
- "udn",
- LIST_HEAD_INIT(hardwall_types[HARDWALL_UDN].list),
- __SPIN_LOCK_UNLOCKED(hardwall_types[HARDWALL_UDN].lock),
- NULL
- },
-#ifndef __tilepro__
- { /* user-space access to IDN */
- 1,
- 1,
- 1,
- 1, /* disabled pending hypervisor support */
- "idn",
- LIST_HEAD_INIT(hardwall_types[HARDWALL_IDN].list),
- __SPIN_LOCK_UNLOCKED(hardwall_types[HARDWALL_IDN].lock),
- NULL
- },
- { /* access to user-space IPI */
- 2,
- 0,
- 0,
- 0,
- "ipi",
- LIST_HEAD_INIT(hardwall_types[HARDWALL_IPI].list),
- __SPIN_LOCK_UNLOCKED(hardwall_types[HARDWALL_IPI].lock),
- NULL
- },
-#endif
-};
-
-/*
- * This data structure tracks the cpu data, etc., associated
- * one-to-one with a "struct file *" from opening a hardwall device file.
- * Note that the file's private data points back to this structure.
- */
-struct hardwall_info {
- struct list_head list; /* for hardwall_types.list */
- struct list_head task_head; /* head of tasks in this hardwall */
- struct hardwall_type *type; /* type of this resource */
- struct cpumask cpumask; /* cpus reserved */
- int id; /* integer id for this hardwall */
- int teardown_in_progress; /* are we tearing this one down? */
-
- /* Remaining fields only valid for user-network resources. */
- int ulhc_x; /* upper left hand corner x coord */
- int ulhc_y; /* upper left hand corner y coord */
- int width; /* rectangle width */
- int height; /* rectangle height */
-#if CHIP_HAS_REV1_XDN()
- atomic_t xdn_pending_count; /* cores in phase 1 of drain */
-#endif
-};
-
-
-/* /proc/tile/hardwall */
-static struct proc_dir_entry *hardwall_proc_dir;
-
-/* Functions to manage files in /proc/tile/hardwall. */
-static void hardwall_add_proc(struct hardwall_info *);
-static void hardwall_remove_proc(struct hardwall_info *);
-
-/* Allow disabling UDN access. */
-static int __init noudn(char *str)
-{
- pr_info("User-space UDN access is disabled\n");
- hardwall_types[HARDWALL_UDN].disabled = 1;
- return 0;
-}
-early_param("noudn", noudn);
-
-#ifndef __tilepro__
-/* Allow disabling IDN access. */
-static int __init noidn(char *str)
-{
- pr_info("User-space IDN access is disabled\n");
- hardwall_types[HARDWALL_IDN].disabled = 1;
- return 0;
-}
-early_param("noidn", noidn);
-
-/* Allow disabling IPI access. */
-static int __init noipi(char *str)
-{
- pr_info("User-space IPI access is disabled\n");
- hardwall_types[HARDWALL_IPI].disabled = 1;
- return 0;
-}
-early_param("noipi", noipi);
-#endif
-
-
-/*
- * Low-level primitives for UDN/IDN
- */
-
-#ifdef __tilepro__
-#define mtspr_XDN(hwt, name, val) \
- do { (void)(hwt); __insn_mtspr(SPR_UDN_##name, (val)); } while (0)
-#define mtspr_MPL_XDN(hwt, name, val) \
- do { (void)(hwt); __insn_mtspr(SPR_MPL_UDN_##name, (val)); } while (0)
-#define mfspr_XDN(hwt, name) \
- ((void)(hwt), __insn_mfspr(SPR_UDN_##name))
-#else
-#define mtspr_XDN(hwt, name, val) \
- do { \
- if ((hwt)->is_idn) \
- __insn_mtspr(SPR_IDN_##name, (val)); \
- else \
- __insn_mtspr(SPR_UDN_##name, (val)); \
- } while (0)
-#define mtspr_MPL_XDN(hwt, name, val) \
- do { \
- if ((hwt)->is_idn) \
- __insn_mtspr(SPR_MPL_IDN_##name, (val)); \
- else \
- __insn_mtspr(SPR_MPL_UDN_##name, (val)); \
- } while (0)
-#define mfspr_XDN(hwt, name) \
- ((hwt)->is_idn ? __insn_mfspr(SPR_IDN_##name) : __insn_mfspr(SPR_UDN_##name))
-#endif
-
-/* Set a CPU bit if the CPU is online. */
-#define cpu_online_set(cpu, dst) do { \
- if (cpu_online(cpu)) \
- cpumask_set_cpu(cpu, dst); \
-} while (0)
-
-
-/* Does the given rectangle contain the given x,y coordinate? */
-static int contains(struct hardwall_info *r, int x, int y)
-{
- return (x >= r->ulhc_x && x < r->ulhc_x + r->width) &&
- (y >= r->ulhc_y && y < r->ulhc_y + r->height);
-}
-
-/* Compute the rectangle parameters and validate the cpumask. */
-static int check_rectangle(struct hardwall_info *r, struct cpumask *mask)
-{
- int x, y, cpu, ulhc, lrhc;
-
- /* The first cpu is the ULHC, the last the LRHC. */
- ulhc = find_first_bit(cpumask_bits(mask), nr_cpumask_bits);
- lrhc = find_last_bit(cpumask_bits(mask), nr_cpumask_bits);
-
- /* Compute the rectangle attributes from the cpus. */
- r->ulhc_x = cpu_x(ulhc);
- r->ulhc_y = cpu_y(ulhc);
- r->width = cpu_x(lrhc) - r->ulhc_x + 1;
- r->height = cpu_y(lrhc) - r->ulhc_y + 1;
-
- /* Width and height must be positive */
- if (r->width <= 0 || r->height <= 0)
- return -EINVAL;
-
- /* Confirm that the cpumask is exactly the rectangle. */
- for (y = 0, cpu = 0; y < smp_height; ++y)
- for (x = 0; x < smp_width; ++x, ++cpu)
- if (cpumask_test_cpu(cpu, mask) != contains(r, x, y))
- return -EINVAL;
-
- /*
- * Note that offline cpus can't be drained when this user network
- * rectangle eventually closes. We used to detect this
- * situation and print a warning, but it annoyed users and
- * they ignored it anyway, so now we just return without a
- * warning.
- */
- return 0;
-}
-
-/*
- * Hardware management of hardwall setup, teardown, trapping,
- * and enabling/disabling PL0 access to the networks.
- */
-
-/* Bit field values to mask together for writes to SPR_XDN_DIRECTION_PROTECT */
-enum direction_protect {
- N_PROTECT = (1 << 0),
- E_PROTECT = (1 << 1),
- S_PROTECT = (1 << 2),
- W_PROTECT = (1 << 3),
- C_PROTECT = (1 << 4),
-};
-
-static inline int xdn_which_interrupt(struct hardwall_type *hwt)
-{
-#ifndef __tilepro__
- if (hwt->is_idn)
- return INT_IDN_FIREWALL;
-#endif
- return INT_UDN_FIREWALL;
-}
-
-static void enable_firewall_interrupts(struct hardwall_type *hwt)
-{
- arch_local_irq_unmask_now(xdn_which_interrupt(hwt));
-}
-
-static void disable_firewall_interrupts(struct hardwall_type *hwt)
-{
- arch_local_irq_mask_now(xdn_which_interrupt(hwt));
-}
-
-/* Set up hardwall on this cpu based on the passed hardwall_info. */
-static void hardwall_setup_func(void *info)
-{
- struct hardwall_info *r = info;
- struct hardwall_type *hwt = r->type;
-
- int cpu = smp_processor_id(); /* on_each_cpu disables preemption */
- int x = cpu_x(cpu);
- int y = cpu_y(cpu);
- int bits = 0;
- if (x == r->ulhc_x)
- bits |= W_PROTECT;
- if (x == r->ulhc_x + r->width - 1)
- bits |= E_PROTECT;
- if (y == r->ulhc_y)
- bits |= N_PROTECT;
- if (y == r->ulhc_y + r->height - 1)
- bits |= S_PROTECT;
- BUG_ON(bits == 0);
- mtspr_XDN(hwt, DIRECTION_PROTECT, bits);
- enable_firewall_interrupts(hwt);
-}
-
-/* Set up all cpus on edge of rectangle to enable/disable hardwall SPRs. */
-static void hardwall_protect_rectangle(struct hardwall_info *r)
-{
- int x, y, cpu, delta;
- struct cpumask rect_cpus;
-
- cpumask_clear(&rect_cpus);
-
- /* First include the top and bottom edges */
- cpu = r->ulhc_y * smp_width + r->ulhc_x;
- delta = (r->height - 1) * smp_width;
- for (x = 0; x < r->width; ++x, ++cpu) {
- cpu_online_set(cpu, &rect_cpus);
- cpu_online_set(cpu + delta, &rect_cpus);
- }
-
- /* Then the left and right edges */
- cpu -= r->width;
- delta = r->width - 1;
- for (y = 0; y < r->height; ++y, cpu += smp_width) {
- cpu_online_set(cpu, &rect_cpus);
- cpu_online_set(cpu + delta, &rect_cpus);
- }
-
- /* Then tell all the cpus to set up their protection SPR */
- on_each_cpu_mask(&rect_cpus, hardwall_setup_func, r, 1);
-}
-
-/* Entered from INT_xDN_FIREWALL interrupt vector with irqs disabled. */
-void __kprobes do_hardwall_trap(struct pt_regs* regs, int fault_num)
-{
- struct hardwall_info *rect;
- struct hardwall_type *hwt;
- struct task_struct *p;
- struct siginfo info;
- int cpu = smp_processor_id();
- int found_processes;
- struct pt_regs *old_regs = set_irq_regs(regs);
-
- irq_enter();
-
- /* Figure out which network trapped. */
- switch (fault_num) {
-#ifndef __tilepro__
- case INT_IDN_FIREWALL:
- hwt = &hardwall_types[HARDWALL_IDN];
- break;
-#endif
- case INT_UDN_FIREWALL:
- hwt = &hardwall_types[HARDWALL_UDN];
- break;
- default:
- BUG();
- }
- BUG_ON(hwt->disabled);
-
- /* This tile trapped a network access; find the rectangle. */
- spin_lock(&hwt->lock);
- list_for_each_entry(rect, &hwt->list, list) {
- if (cpumask_test_cpu(cpu, &rect->cpumask))
- break;
- }
-
- /*
- * It shouldn't be possible not to find this cpu on the
- * rectangle list, since only cpus in rectangles get hardwalled.
- * The hardwall is only removed after the user network is drained.
- */
- BUG_ON(&rect->list == &hwt->list);
-
- /*
- * If we already started teardown on this hardwall, don't worry;
- * the abort signal has been sent and we are just waiting for things
- * to quiesce.
- */
- if (rect->teardown_in_progress) {
- pr_notice("cpu %d: detected %s hardwall violation %#lx while teardown already in progress\n",
- cpu, hwt->name,
- (long)mfspr_XDN(hwt, DIRECTION_PROTECT));
- goto done;
- }
-
- /*
- * Kill off any process that is activated in this rectangle.
- * We bypass security to deliver the signal, since it must be
- * one of the activated processes that generated the user network
- * message that caused this trap, and all the activated
- * processes shared a single open file so are pretty tightly
- * bound together from a security point of view to begin with.
- */
- rect->teardown_in_progress = 1;
- wmb(); /* Ensure visibility of rectangle before notifying processes. */
- pr_notice("cpu %d: detected %s hardwall violation %#lx...\n",
- cpu, hwt->name, (long)mfspr_XDN(hwt, DIRECTION_PROTECT));
- info.si_signo = SIGILL;
- info.si_errno = 0;
- info.si_code = ILL_HARDWALL;
- found_processes = 0;
- list_for_each_entry(p, &rect->task_head,
- thread.hardwall[hwt->index].list) {
- BUG_ON(p->thread.hardwall[hwt->index].info != rect);
- if (!(p->flags & PF_EXITING)) {
- found_processes = 1;
- pr_notice("hardwall: killing %d\n", p->pid);
- do_send_sig_info(info.si_signo, &info, p, false);
- }
- }
- if (!found_processes)
- pr_notice("hardwall: no associated processes!\n");
-
- done:
- spin_unlock(&hwt->lock);
-
- /*
- * We have to disable firewall interrupts now, or else when we
- * return from this handler, we will simply re-interrupt back to
- * it. However, we can't clear the protection bits, since we
- * haven't yet drained the network, and that would allow packets
- * to cross out of the hardwall region.
- */
- disable_firewall_interrupts(hwt);
-
- irq_exit();
- set_irq_regs(old_regs);
-}
-
-/* Allow access from user space to the user network. */
-void grant_hardwall_mpls(struct hardwall_type *hwt)
-{
-#ifndef __tilepro__
- if (!hwt->is_xdn) {
- __insn_mtspr(SPR_MPL_IPI_0_SET_0, 1);
- return;
- }
-#endif
- mtspr_MPL_XDN(hwt, ACCESS_SET_0, 1);
- mtspr_MPL_XDN(hwt, AVAIL_SET_0, 1);
- mtspr_MPL_XDN(hwt, COMPLETE_SET_0, 1);
- mtspr_MPL_XDN(hwt, TIMER_SET_0, 1);
-#if !CHIP_HAS_REV1_XDN()
- mtspr_MPL_XDN(hwt, REFILL_SET_0, 1);
- mtspr_MPL_XDN(hwt, CA_SET_0, 1);
-#endif
-}
-
-/* Deny access from user space to the user network. */
-void restrict_hardwall_mpls(struct hardwall_type *hwt)
-{
-#ifndef __tilepro__
- if (!hwt->is_xdn) {
- __insn_mtspr(SPR_MPL_IPI_0_SET_1, 1);
- return;
- }
-#endif
- mtspr_MPL_XDN(hwt, ACCESS_SET_1, 1);
- mtspr_MPL_XDN(hwt, AVAIL_SET_1, 1);
- mtspr_MPL_XDN(hwt, COMPLETE_SET_1, 1);
- mtspr_MPL_XDN(hwt, TIMER_SET_1, 1);
-#if !CHIP_HAS_REV1_XDN()
- mtspr_MPL_XDN(hwt, REFILL_SET_1, 1);
- mtspr_MPL_XDN(hwt, CA_SET_1, 1);
-#endif
-}
-
-/* Restrict or deny as necessary for the task we're switching to. */
-void hardwall_switch_tasks(struct task_struct *prev,
- struct task_struct *next)
-{
- int i;
- for (i = 0; i < HARDWALL_TYPES; ++i) {
- if (prev->thread.hardwall[i].info != NULL) {
- if (next->thread.hardwall[i].info == NULL)
- restrict_hardwall_mpls(&hardwall_types[i]);
- } else if (next->thread.hardwall[i].info != NULL) {
- grant_hardwall_mpls(&hardwall_types[i]);
- }
- }
-}
-
-/* Does this task have the right to IPI the given cpu? */
-int hardwall_ipi_valid(int cpu)
-{
-#ifdef __tilegx__
- struct hardwall_info *info =
- current->thread.hardwall[HARDWALL_IPI].info;
- return info && cpumask_test_cpu(cpu, &info->cpumask);
-#else
- return 0;
-#endif
-}
-
-/*
- * Code to create, activate, deactivate, and destroy hardwall resources.
- */
-
-/* Create a hardwall for the given resource */
-static struct hardwall_info *hardwall_create(struct hardwall_type *hwt,
- size_t size,
- const unsigned char __user *bits)
-{
- struct hardwall_info *iter, *info;
- struct cpumask mask;
- unsigned long flags;
- int rc;
-
- /* Reject crazy sizes out of hand, a la sys_mbind(). */
- if (size > PAGE_SIZE)
- return ERR_PTR(-EINVAL);
-
- /* Copy whatever fits into a cpumask. */
- if (copy_from_user(&mask, bits, min(sizeof(struct cpumask), size)))
- return ERR_PTR(-EFAULT);
-
- /*
- * If the size was short, clear the rest of the mask;
- * otherwise validate that the rest of the user mask was zero
- * (we don't try hard to be efficient when validating huge masks).
- */
- if (size < sizeof(struct cpumask)) {
- memset((char *)&mask + size, 0, sizeof(struct cpumask) - size);
- } else if (size > sizeof(struct cpumask)) {
- size_t i;
- for (i = sizeof(struct cpumask); i < size; ++i) {
- char c;
- if (get_user(c, &bits[i]))
- return ERR_PTR(-EFAULT);
- if (c)
- return ERR_PTR(-EINVAL);
- }
- }
-
- /* Allocate a new hardwall_info optimistically. */
- info = kmalloc(sizeof(struct hardwall_info),
- GFP_KERNEL | __GFP_ZERO);
- if (info == NULL)
- return ERR_PTR(-ENOMEM);
- INIT_LIST_HEAD(&info->task_head);
- info->type = hwt;
-
- /* Compute the rectangle size and validate that it's plausible. */
- cpumask_copy(&info->cpumask, &mask);
- info->id = find_first_bit(cpumask_bits(&mask), nr_cpumask_bits);
- if (hwt->is_xdn) {
- rc = check_rectangle(info, &mask);
- if (rc != 0) {
- kfree(info);
- return ERR_PTR(rc);
- }
- }
-
- /*
- * Eliminate cpus that are not part of this Linux client.
- * Note that this allows for configurations that we might not want to
- * support, such as one client on every even cpu, another client on
- * every odd cpu.
- */
- cpumask_and(&info->cpumask, &info->cpumask, cpu_online_mask);
-
- /* Confirm it doesn't overlap and add it to the list. */
- spin_lock_irqsave(&hwt->lock, flags);
- list_for_each_entry(iter, &hwt->list, list) {
- if (cpumask_intersects(&iter->cpumask, &info->cpumask)) {
- spin_unlock_irqrestore(&hwt->lock, flags);
- kfree(info);
- return ERR_PTR(-EBUSY);
- }
- }
- list_add_tail(&info->list, &hwt->list);
- spin_unlock_irqrestore(&hwt->lock, flags);
-
- /* Set up appropriate hardwalling on all affected cpus. */
- if (hwt->is_xdn)
- hardwall_protect_rectangle(info);
-
- /* Create a /proc/tile/hardwall entry. */
- hardwall_add_proc(info);
-
- return info;
-}
-
-/* Activate a given hardwall on this cpu for this process. */
-static int hardwall_activate(struct hardwall_info *info)
-{
- int cpu;
- unsigned long flags;
- struct task_struct *p = current;
- struct thread_struct *ts = &p->thread;
- struct hardwall_type *hwt;
-
- /* Require a hardwall. */
- if (info == NULL)
- return -ENODATA;
-
- /* Not allowed to activate a hardwall that is being torn down. */
- if (info->teardown_in_progress)
- return -EINVAL;
-
- /*
- * Get our affinity; if we're not bound to this tile uniquely,
- * we can't access the network registers.
- */
- if (cpumask_weight(&p->cpus_allowed) != 1)
- return -EPERM;
-
- /* Make sure we are bound to a cpu assigned to this resource. */
- cpu = smp_processor_id();
- BUG_ON(cpumask_first(&p->cpus_allowed) != cpu);
- if (!cpumask_test_cpu(cpu, &info->cpumask))
- return -EINVAL;
-
- /* If we are already bound to this hardwall, it's a no-op. */
- hwt = info->type;
- if (ts->hardwall[hwt->index].info) {
- BUG_ON(ts->hardwall[hwt->index].info != info);
- return 0;
- }
-
- /* Success! This process gets to use the resource on this cpu. */
- ts->hardwall[hwt->index].info = info;
- spin_lock_irqsave(&hwt->lock, flags);
- list_add(&ts->hardwall[hwt->index].list, &info->task_head);
- spin_unlock_irqrestore(&hwt->lock, flags);
- grant_hardwall_mpls(hwt);
- printk(KERN_DEBUG "Pid %d (%s) activated for %s hardwall: cpu %d\n",
- p->pid, p->comm, hwt->name, cpu);
- return 0;
-}
-
-/*
- * Deactivate a task's hardwall. Must hold lock for hardwall_type.
- * This method may be called from exit_thread(), so we don't want to
- * rely on too many fields of struct task_struct still being valid.
- * We assume the cpus_allowed, pid, and comm fields are still valid.
- */
-static void _hardwall_deactivate(struct hardwall_type *hwt,
- struct task_struct *task)
-{
- struct thread_struct *ts = &task->thread;
-
- if (cpumask_weight(&task->cpus_allowed) != 1) {
- pr_err("pid %d (%s) releasing %s hardwall with an affinity mask containing %d cpus!\n",
- task->pid, task->comm, hwt->name,
- cpumask_weight(&task->cpus_allowed));
- BUG();
- }
-
- BUG_ON(ts->hardwall[hwt->index].info == NULL);
- ts->hardwall[hwt->index].info = NULL;
- list_del(&ts->hardwall[hwt->index].list);
- if (task == current)
- restrict_hardwall_mpls(hwt);
-}
-
-/* Deactivate a task's hardwall. */
-static int hardwall_deactivate(struct hardwall_type *hwt,
- struct task_struct *task)
-{
- unsigned long flags;
- int activated;
-
- spin_lock_irqsave(&hwt->lock, flags);
- activated = (task->thread.hardwall[hwt->index].info != NULL);
- if (activated)
- _hardwall_deactivate(hwt, task);
- spin_unlock_irqrestore(&hwt->lock, flags);
-
- if (!activated)
- return -EINVAL;
-
- printk(KERN_DEBUG "Pid %d (%s) deactivated for %s hardwall: cpu %d\n",
- task->pid, task->comm, hwt->name, raw_smp_processor_id());
- return 0;
-}
-
-void hardwall_deactivate_all(struct task_struct *task)
-{
- int i;
- for (i = 0; i < HARDWALL_TYPES; ++i)
- if (task->thread.hardwall[i].info)
- hardwall_deactivate(&hardwall_types[i], task);
-}
-
-/* Stop the switch before draining the network. */
-static void stop_xdn_switch(void *arg)
-{
-#if !CHIP_HAS_REV1_XDN()
- /* Freeze the switch and the demux. */
- __insn_mtspr(SPR_UDN_SP_FREEZE,
- SPR_UDN_SP_FREEZE__SP_FRZ_MASK |
- SPR_UDN_SP_FREEZE__DEMUX_FRZ_MASK |
- SPR_UDN_SP_FREEZE__NON_DEST_EXT_MASK);
-#else
- /*
- * Drop all packets bound for the core or off the edge.
- * We rely on the normal hardwall protection setup code
- * to have set the low four bits to trigger firewall interrupts,
- * and shift those bits up to trigger "drop on send" semantics,
- * plus adding "drop on send to core" for all switches.
- * In practice it seems the switches latch the DIRECTION_PROTECT
- * SPR so they won't start dropping if they're already
- * delivering the last message to the core, but it doesn't
- * hurt to enable it here.
- */
- struct hardwall_type *hwt = arg;
- unsigned long protect = mfspr_XDN(hwt, DIRECTION_PROTECT);
- mtspr_XDN(hwt, DIRECTION_PROTECT, (protect | C_PROTECT) << 5);
-#endif
-}
-
-static void empty_xdn_demuxes(struct hardwall_type *hwt)
-{
-#ifndef __tilepro__
- if (hwt->is_idn) {
- while (__insn_mfspr(SPR_IDN_DATA_AVAIL) & (1 << 0))
- (void) __tile_idn0_receive();
- while (__insn_mfspr(SPR_IDN_DATA_AVAIL) & (1 << 1))
- (void) __tile_idn1_receive();
- return;
- }
-#endif
- while (__insn_mfspr(SPR_UDN_DATA_AVAIL) & (1 << 0))
- (void) __tile_udn0_receive();
- while (__insn_mfspr(SPR_UDN_DATA_AVAIL) & (1 << 1))
- (void) __tile_udn1_receive();
- while (__insn_mfspr(SPR_UDN_DATA_AVAIL) & (1 << 2))
- (void) __tile_udn2_receive();
- while (__insn_mfspr(SPR_UDN_DATA_AVAIL) & (1 << 3))
- (void) __tile_udn3_receive();
-}
-
-/* Drain all the state from a stopped switch. */
-static void drain_xdn_switch(void *arg)
-{
- struct hardwall_info *info = arg;
- struct hardwall_type *hwt = info->type;
-
-#if CHIP_HAS_REV1_XDN()
- /*
- * The switches have been configured to drop any messages
- * destined for cores (or off the edge of the rectangle).
- * But the current message may continue to be delivered,
- * so we wait until all the cores have finished any pending
- * messages before we stop draining.
- */
- int pending = mfspr_XDN(hwt, PENDING);
- while (pending--) {
- empty_xdn_demuxes(hwt);
- if (hwt->is_idn)
- __tile_idn_send(0);
- else
- __tile_udn_send(0);
- }
- atomic_dec(&info->xdn_pending_count);
- while (atomic_read(&info->xdn_pending_count))
- empty_xdn_demuxes(hwt);
-#else
- int i;
- int from_tile_words, ca_count;
-
- /* Empty out the 5 switch point fifos. */
- for (i = 0; i < 5; i++) {
- int words, j;
- __insn_mtspr(SPR_UDN_SP_FIFO_SEL, i);
- words = __insn_mfspr(SPR_UDN_SP_STATE) & 0xF;
- for (j = 0; j < words; j++)
- (void) __insn_mfspr(SPR_UDN_SP_FIFO_DATA);
- BUG_ON((__insn_mfspr(SPR_UDN_SP_STATE) & 0xF) != 0);
- }
-
- /* Dump out the 3 word fifo at top. */
- from_tile_words = (__insn_mfspr(SPR_UDN_DEMUX_STATUS) >> 10) & 0x3;
- for (i = 0; i < from_tile_words; i++)
- (void) __insn_mfspr(SPR_UDN_DEMUX_WRITE_FIFO);
-
- /* Empty out demuxes. */
- empty_xdn_demuxes(hwt);
-
- /* Empty out catch all. */
- ca_count = __insn_mfspr(SPR_UDN_DEMUX_CA_COUNT);
- for (i = 0; i < ca_count; i++)
- (void) __insn_mfspr(SPR_UDN_CA_DATA);
- BUG_ON(__insn_mfspr(SPR_UDN_DEMUX_CA_COUNT) != 0);
-
- /* Clear demux logic. */
- __insn_mtspr(SPR_UDN_DEMUX_CTL, 1);
-
- /*
- * Write switch state; experimentation indicates that 0xc3000
- * is an idle switch point.
- */
- for (i = 0; i < 5; i++) {
- __insn_mtspr(SPR_UDN_SP_FIFO_SEL, i);
- __insn_mtspr(SPR_UDN_SP_STATE, 0xc3000);
- }
-#endif
-}
-
-/* Reset random XDN state registers at boot up and during hardwall teardown. */
-static void reset_xdn_network_state(struct hardwall_type *hwt)
-{
- if (hwt->disabled)
- return;
-
- /* Clear out other random registers so we have a clean slate. */
- mtspr_XDN(hwt, DIRECTION_PROTECT, 0);
- mtspr_XDN(hwt, AVAIL_EN, 0);
- mtspr_XDN(hwt, DEADLOCK_TIMEOUT, 0);
-
-#if !CHIP_HAS_REV1_XDN()
- /* Reset UDN coordinates to their standard value */
- {
- unsigned int cpu = smp_processor_id();
- unsigned int x = cpu_x(cpu);
- unsigned int y = cpu_y(cpu);
- __insn_mtspr(SPR_UDN_TILE_COORD, (x << 18) | (y << 7));
- }
-
- /* Set demux tags to predefined values and enable them. */
- __insn_mtspr(SPR_UDN_TAG_VALID, 0xf);
- __insn_mtspr(SPR_UDN_TAG_0, (1 << 0));
- __insn_mtspr(SPR_UDN_TAG_1, (1 << 1));
- __insn_mtspr(SPR_UDN_TAG_2, (1 << 2));
- __insn_mtspr(SPR_UDN_TAG_3, (1 << 3));
-
- /* Set other rev0 random registers to a clean state. */
- __insn_mtspr(SPR_UDN_REFILL_EN, 0);
- __insn_mtspr(SPR_UDN_DEMUX_QUEUE_SEL, 0);
- __insn_mtspr(SPR_UDN_SP_FIFO_SEL, 0);
-
- /* Start the switch and demux. */
- __insn_mtspr(SPR_UDN_SP_FREEZE, 0);
-#endif
-}
-
-void reset_network_state(void)
-{
- reset_xdn_network_state(&hardwall_types[HARDWALL_UDN]);
-#ifndef __tilepro__
- reset_xdn_network_state(&hardwall_types[HARDWALL_IDN]);
-#endif
-}
-
-/* Restart an XDN switch after draining. */
-static void restart_xdn_switch(void *arg)
-{
- struct hardwall_type *hwt = arg;
-
-#if CHIP_HAS_REV1_XDN()
- /* One last drain step to avoid races with injection and draining. */
- empty_xdn_demuxes(hwt);
-#endif
-
- reset_xdn_network_state(hwt);
-
- /* Disable firewall interrupts. */
- disable_firewall_interrupts(hwt);
-}
-
-/* Last reference to a hardwall is gone, so clear the network. */
-static void hardwall_destroy(struct hardwall_info *info)
-{
- struct task_struct *task;
- struct hardwall_type *hwt;
- unsigned long flags;
-
- /* Make sure this file actually represents a hardwall. */
- if (info == NULL)
- return;
-
- /*
- * Deactivate any remaining tasks. It's possible to race with
- * some other thread that is exiting and hasn't yet called
- * deactivate (when freeing its thread_info), so we carefully
- * deactivate any remaining tasks before freeing the
- * hardwall_info object itself.
- */
- hwt = info->type;
- info->teardown_in_progress = 1;
- spin_lock_irqsave(&hwt->lock, flags);
- list_for_each_entry(task, &info->task_head,
- thread.hardwall[hwt->index].list)
- _hardwall_deactivate(hwt, task);
- spin_unlock_irqrestore(&hwt->lock, flags);
-
- if (hwt->is_xdn) {
- /* Configure the switches for draining the user network. */
- printk(KERN_DEBUG
- "Clearing %s hardwall rectangle %dx%d %d,%d\n",
- hwt->name, info->width, info->height,
- info->ulhc_x, info->ulhc_y);
- on_each_cpu_mask(&info->cpumask, stop_xdn_switch, hwt, 1);
-
- /* Drain the network. */
-#if CHIP_HAS_REV1_XDN()
- atomic_set(&info->xdn_pending_count,
- cpumask_weight(&info->cpumask));
- on_each_cpu_mask(&info->cpumask, drain_xdn_switch, info, 0);
-#else
- on_each_cpu_mask(&info->cpumask, drain_xdn_switch, info, 1);
-#endif
-
- /* Restart switch and disable firewall. */
- on_each_cpu_mask(&info->cpumask, restart_xdn_switch, hwt, 1);
- }
-
- /* Remove the /proc/tile/hardwall entry. */
- hardwall_remove_proc(info);
-
- /* Now free the hardwall from the list. */
- spin_lock_irqsave(&hwt->lock, flags);
- BUG_ON(!list_empty(&info->task_head));
- list_del(&info->list);
- spin_unlock_irqrestore(&hwt->lock, flags);
- kfree(info);
-}
-
-
-static int hardwall_proc_show(struct seq_file *sf, void *v)
-{
- struct hardwall_info *info = sf->private;
-
- seq_printf(sf, "%*pbl\n", cpumask_pr_args(&info->cpumask));
- return 0;
-}
-
-static int hardwall_proc_open(struct inode *inode,
- struct file *file)
-{
- return single_open(file, hardwall_proc_show, PDE_DATA(inode));
-}
-
-static const struct file_operations hardwall_proc_fops = {
- .open = hardwall_proc_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
-};
-
-static void hardwall_add_proc(struct hardwall_info *info)
-{
- char buf[64];
- snprintf(buf, sizeof(buf), "%d", info->id);
- proc_create_data(buf, 0444, info->type->proc_dir,
- &hardwall_proc_fops, info);
-}
-
-static void hardwall_remove_proc(struct hardwall_info *info)
-{
- char buf[64];
- snprintf(buf, sizeof(buf), "%d", info->id);
- remove_proc_entry(buf, info->type->proc_dir);
-}
-
-int proc_pid_hardwall(struct seq_file *m, struct pid_namespace *ns,
- struct pid *pid, struct task_struct *task)
-{
- int i;
- int n = 0;
- for (i = 0; i < HARDWALL_TYPES; ++i) {
- struct hardwall_info *info = task->thread.hardwall[i].info;
- if (info)
- seq_printf(m, "%s: %d\n", info->type->name, info->id);
- }
- return n;
-}
-
-void proc_tile_hardwall_init(struct proc_dir_entry *root)
-{
- int i;
- for (i = 0; i < HARDWALL_TYPES; ++i) {
- struct hardwall_type *hwt = &hardwall_types[i];
- if (hwt->disabled)
- continue;
- if (hardwall_proc_dir == NULL)
- hardwall_proc_dir = proc_mkdir("hardwall", root);
- hwt->proc_dir = proc_mkdir(hwt->name, hardwall_proc_dir);
- }
-}
-
-
-/*
- * Character device support via ioctl/close.
- */
-
-static long hardwall_ioctl(struct file *file, unsigned int a, unsigned long b)
-{
- struct hardwall_info *info = file->private_data;
- int minor = iminor(file->f_mapping->host);
- struct hardwall_type* hwt;
-
- if (_IOC_TYPE(a) != HARDWALL_IOCTL_BASE)
- return -EINVAL;
-
- BUILD_BUG_ON(HARDWALL_TYPES != _HARDWALL_TYPES);
- BUILD_BUG_ON(HARDWALL_TYPES !=
- sizeof(hardwall_types)/sizeof(hardwall_types[0]));
-
- if (minor < 0 || minor >= HARDWALL_TYPES)
- return -EINVAL;
- hwt = &hardwall_types[minor];
- WARN_ON(info && hwt != info->type);
-
- switch (_IOC_NR(a)) {
- case _HARDWALL_CREATE:
- if (hwt->disabled)
- return -ENOSYS;
- if (info != NULL)
- return -EALREADY;
- info = hardwall_create(hwt, _IOC_SIZE(a),
- (const unsigned char __user *)b);
- if (IS_ERR(info))
- return PTR_ERR(info);
- file->private_data = info;
- return 0;
-
- case _HARDWALL_ACTIVATE:
- return hardwall_activate(info);
-
- case _HARDWALL_DEACTIVATE:
- if (current->thread.hardwall[hwt->index].info != info)
- return -EINVAL;
- return hardwall_deactivate(hwt, current);
-
- case _HARDWALL_GET_ID:
- return info ? info->id : -EINVAL;
-
- default:
- return -EINVAL;
- }
-}
-
-#ifdef CONFIG_COMPAT
-static long hardwall_compat_ioctl(struct file *file,
- unsigned int a, unsigned long b)
-{
- /* Sign-extend the argument so it can be used as a pointer. */
- return hardwall_ioctl(file, a, (unsigned long)compat_ptr(b));
-}
-#endif
-
-/* The user process closed the file; revoke access to user networks. */
-static int hardwall_flush(struct file *file, fl_owner_t owner)
-{
- struct hardwall_info *info = file->private_data;
- struct task_struct *task, *tmp;
- unsigned long flags;
-
- if (info) {
- /*
- * NOTE: if multiple threads are activated on this hardwall
- * file, the other threads will continue having access to the
- * user network until they are context-switched out and back
- * in again.
- *
- * NOTE: A NULL files pointer means the task is being torn
- * down, so in that case we also deactivate it.
- */
- struct hardwall_type *hwt = info->type;
- spin_lock_irqsave(&hwt->lock, flags);
- list_for_each_entry_safe(task, tmp, &info->task_head,
- thread.hardwall[hwt->index].list) {
- if (task->files == owner || task->files == NULL)
- _hardwall_deactivate(hwt, task);
- }
- spin_unlock_irqrestore(&hwt->lock, flags);
- }
-
- return 0;
-}
-
-/* This hardwall is gone, so destroy it. */
-static int hardwall_release(struct inode *inode, struct file *file)
-{
- hardwall_destroy(file->private_data);
- return 0;
-}
-
-static const struct file_operations dev_hardwall_fops = {
- .open = nonseekable_open,
- .unlocked_ioctl = hardwall_ioctl,
-#ifdef CONFIG_COMPAT
- .compat_ioctl = hardwall_compat_ioctl,
-#endif
- .flush = hardwall_flush,
- .release = hardwall_release,
-};
-
-static struct cdev hardwall_dev;
-
-static int __init dev_hardwall_init(void)
-{
- int rc;
- dev_t dev;
-
- rc = alloc_chrdev_region(&dev, 0, HARDWALL_TYPES, "hardwall");
- if (rc < 0)
- return rc;
- cdev_init(&hardwall_dev, &dev_hardwall_fops);
- rc = cdev_add(&hardwall_dev, dev, HARDWALL_TYPES);
- if (rc < 0)
- return rc;
-
- return 0;
-}
-late_initcall(dev_hardwall_init);
diff --git a/arch/tile/kernel/head_32.S b/arch/tile/kernel/head_32.S
deleted file mode 100644
index 8d5b40ff2922..000000000000
--- a/arch/tile/kernel/head_32.S
+++ /dev/null
@@ -1,183 +0,0 @@
-/*
- * Copyright 2010 Tilera Corporation. All Rights Reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation, version 2.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
- * NON INFRINGEMENT. See the GNU General Public License for
- * more details.
- *
- * TILE startup code.
- */
-
-#include <linux/linkage.h>
-#include <linux/init.h>
-#include <asm/page.h>
-#include <asm/pgtable.h>
-#include <asm/thread_info.h>
-#include <asm/processor.h>
-#include <asm/asm-offsets.h>
-#include <hv/hypervisor.h>
-#include <arch/chip.h>
-#include <arch/spr_def.h>
-
-/*
- * This module contains the entry code for kernel images. It performs the
- * minimal setup needed to call the generic C routines.
- */
-
- __HEAD
-ENTRY(_start)
- /* Notify the hypervisor of what version of the API we want */
- {
- movei r1, TILE_CHIP
- movei r2, TILE_CHIP_REV
- }
- {
- moveli r0, _HV_VERSION_OLD_HV_INIT
- jal _hv_init
- }
- /* Get a reasonable default ASID in r0 */
- {
- move r0, zero
- jal _hv_inquire_asid
- }
- /* Install the default page table */
- {
- moveli r6, lo16(swapper_pgprot - PAGE_OFFSET)
- move r4, r0 /* use starting ASID of range for this page table */
- }
- {
- moveli r0, lo16(swapper_pg_dir - PAGE_OFFSET)
- auli r6, r6, ha16(swapper_pgprot - PAGE_OFFSET)
- }
- {
- lw r2, r6
- addi r6, r6, 4
- }
- {
- lw r3, r6
- auli r0, r0, ha16(swapper_pg_dir - PAGE_OFFSET)
- }
- {
- finv r6
- move r1, zero /* high 32 bits of CPA is zero */
- }
- {
- moveli lr, lo16(1f)
- moveli r5, CTX_PAGE_FLAG
- }
- {
- auli lr, lr, ha16(1f)
- j _hv_install_context
- }
-1:
-
- /* Get our processor number and save it away in SAVE_K_0. */
- jal _hv_inquire_topology
- mulll_uu r4, r1, r2 /* r1 == y, r2 == width */
- add r4, r4, r0 /* r0 == x, so r4 == cpu == y*width + x */
-
-#ifdef CONFIG_SMP
- /*
- * Load up our per-cpu offset. When the first (master) tile
- * boots, this value is still zero, so we will load boot_pc
- * with start_kernel, and boot_sp at the top of init_stack.
- * The master tile initializes the per-cpu offset array, so that
- * when subsequent (secondary) tiles boot, they will instead load
- * from their per-cpu versions of boot_sp and boot_pc.
- */
- moveli r5, lo16(__per_cpu_offset)
- auli r5, r5, ha16(__per_cpu_offset)
- s2a r5, r4, r5
- lw r5, r5
- bnz r5, 1f
-
- /*
- * Save the width and height to the smp_topology variable
- * for later use.
- */
- moveli r0, lo16(smp_topology + HV_TOPOLOGY_WIDTH_OFFSET)
- auli r0, r0, ha16(smp_topology + HV_TOPOLOGY_WIDTH_OFFSET)
- {
- sw r0, r2
- addi r0, r0, (HV_TOPOLOGY_HEIGHT_OFFSET - HV_TOPOLOGY_WIDTH_OFFSET)
- }
- sw r0, r3
-1:
-#else
- move r5, zero
-#endif
-
- /* Load and go with the correct pc and sp. */
- {
- addli r1, r5, lo16(boot_sp)
- addli r0, r5, lo16(boot_pc)
- }
- {
- auli r1, r1, ha16(boot_sp)
- auli r0, r0, ha16(boot_pc)
- }
- lw r0, r0
- lw sp, r1
- or r4, sp, r4
- mtspr SPR_SYSTEM_SAVE_K_0, r4 /* save ksp0 + cpu */
- {
- move lr, zero /* stop backtraces in the called function */
- jr r0
- }
- ENDPROC(_start)
-
-__PAGE_ALIGNED_BSS
- .align PAGE_SIZE
-ENTRY(empty_zero_page)
- .fill PAGE_SIZE,1,0
- END(empty_zero_page)
-
- .macro PTE va, cpa, bits1, no_org=0
- .ifeq \no_org
- .org swapper_pg_dir + PGD_INDEX(\va) * HV_PTE_SIZE
- .endif
- .word HV_PTE_PAGE | HV_PTE_DIRTY | HV_PTE_PRESENT | HV_PTE_ACCESSED | \
- (HV_PTE_MODE_CACHE_NO_L3 << HV_PTE_INDEX_MODE)
- .word (\bits1) | (HV_CPA_TO_PTFN(\cpa) << (HV_PTE_INDEX_PTFN - 32))
- .endm
-
-__PAGE_ALIGNED_DATA
- .align PAGE_SIZE
-ENTRY(swapper_pg_dir)
- /*
- * All data pages from PAGE_OFFSET to MEM_USER_INTRPT are mapped as
- * VA = PA + PAGE_OFFSET. We remap things with more precise access
- * permissions and more respect for size of RAM later.
- */
- .set addr, 0
- .rept (MEM_USER_INTRPT - PAGE_OFFSET) >> PGDIR_SHIFT
- PTE addr + PAGE_OFFSET, addr, (1 << (HV_PTE_INDEX_READABLE - 32)) | \
- (1 << (HV_PTE_INDEX_WRITABLE - 32))
- .set addr, addr + PGDIR_SIZE
- .endr
-
- /* The true text VAs are mapped as VA = PA + MEM_SV_START */
- PTE MEM_SV_START, 0, (1 << (HV_PTE_INDEX_READABLE - 32)) | \
- (1 << (HV_PTE_INDEX_EXECUTABLE - 32))
- .org swapper_pg_dir + PGDIR_SIZE
- END(swapper_pg_dir)
-
- /*
- * Isolate swapper_pgprot to its own cache line, since each cpu
- * starting up will read it using VA-is-PA and local homing.
- * This would otherwise likely conflict with other data on the cache
- * line, once we have set its permanent home in the page tables.
- */
- __INITDATA
- .align CHIP_L2_LINE_SIZE()
-ENTRY(swapper_pgprot)
- PTE 0, 0, (1 << (HV_PTE_INDEX_READABLE - 32)) | \
- (1 << (HV_PTE_INDEX_WRITABLE - 32)), 1
- .align CHIP_L2_LINE_SIZE()
- END(swapper_pgprot)
diff --git a/arch/tile/kernel/head_64.S b/arch/tile/kernel/head_64.S
deleted file mode 100644
index bd0e12f283f3..000000000000
--- a/arch/tile/kernel/head_64.S
+++ /dev/null
@@ -1,279 +0,0 @@
-/*
- * Copyright 2011 Tilera Corporation. All Rights Reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation, version 2.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
- * NON INFRINGEMENT. See the GNU General Public License for
- * more details.
- *
- * TILE startup code.
- */
-
-#include <linux/linkage.h>
-#include <linux/init.h>
-#include <asm/page.h>
-#include <asm/pgtable.h>
-#include <asm/thread_info.h>
-#include <asm/processor.h>
-#include <asm/asm-offsets.h>
-#include <hv/hypervisor.h>
-#include <arch/chip.h>
-#include <arch/spr_def.h>
-
-/* Extract two 32-bit bit values that were read into one register. */
-#ifdef __BIG_ENDIAN__
-#define GET_FIRST_INT(rd, rs) shrsi rd, rs, 32
-#define GET_SECOND_INT(rd, rs) addxi rd, rs, 0
-#else
-#define GET_FIRST_INT(rd, rs) addxi rd, rs, 0
-#define GET_SECOND_INT(rd, rs) shrsi rd, rs, 32
-#endif
-
-/*
- * This module contains the entry code for kernel images. It performs the
- * minimal setup needed to call the generic C routines.
- */
-
- __HEAD
-ENTRY(_start)
- /* Notify the hypervisor of what version of the API we want */
- {
-#if KERNEL_PL == 1 && _HV_VERSION == 13
- /* Support older hypervisors by asking for API version 12. */
- movei r0, _HV_VERSION_OLD_HV_INIT
-#else
- movei r0, _HV_VERSION
-#endif
- movei r1, TILE_CHIP
- }
- {
- movei r2, TILE_CHIP_REV
- movei r3, KERNEL_PL
- }
- jal _hv_init
- /* Get a reasonable default ASID in r0 */
- {
- move r0, zero
- jal _hv_inquire_asid
- }
-
- /*
- * Install the default page table. The relocation required to
- * statically define the table is a bit too complex, so we have
- * to plug in the pointer from the L0 to the L1 table by hand.
- * We only do this on the first cpu to boot, though, since the
- * other CPUs should see a properly-constructed page table.
- */
- {
- GET_FIRST_INT(r2, r0) /* ASID for hv_install_context */
- moveli r4, hw1_last(swapper_pgprot - PAGE_OFFSET)
- }
- {
- shl16insli r4, r4, hw0(swapper_pgprot - PAGE_OFFSET)
- }
- {
- ld r1, r4 /* access_pte for hv_install_context */
- }
- {
- moveli r0, hw1_last(.Lsv_data_pmd - PAGE_OFFSET)
- moveli r6, hw1_last(temp_data_pmd - PAGE_OFFSET)
- }
- {
- /* After initializing swapper_pgprot, HV_PTE_GLOBAL is set. */
- bfextu r7, r1, HV_PTE_INDEX_GLOBAL, HV_PTE_INDEX_GLOBAL
- finv r4
- }
- bnez r7, .Lno_write
- {
- shl16insli r0, r0, hw0(.Lsv_data_pmd - PAGE_OFFSET)
- shl16insli r6, r6, hw0(temp_data_pmd - PAGE_OFFSET)
- }
- {
- /* Cut off the low bits of the PT address. */
- shrui r6, r6, HV_LOG2_PAGE_TABLE_ALIGN
- /* Start with our access pte. */
- move r5, r1
- }
- {
- /* Stuff the address into the page table pointer slot of the PTE. */
- bfins r5, r6, HV_PTE_INDEX_PTFN, \
- HV_PTE_INDEX_PTFN + HV_PTE_PTFN_BITS - 1
- }
- {
- /* Store the L0 data PTE. */
- st r0, r5
- addli r6, r6, (temp_code_pmd - temp_data_pmd) >> \
- HV_LOG2_PAGE_TABLE_ALIGN
- }
- {
- addli r0, r0, .Lsv_code_pmd - .Lsv_data_pmd
- bfins r5, r6, HV_PTE_INDEX_PTFN, \
- HV_PTE_INDEX_PTFN + HV_PTE_PTFN_BITS - 1
- }
- /* Store the L0 code PTE. */
- st r0, r5
-
-.Lno_write:
- moveli lr, hw2_last(1f)
- {
- shl16insli lr, lr, hw1(1f)
- moveli r0, hw1_last(swapper_pg_dir - PAGE_OFFSET)
- }
- {
- shl16insli lr, lr, hw0(1f)
- shl16insli r0, r0, hw0(swapper_pg_dir - PAGE_OFFSET)
- }
- {
- moveli r3, CTX_PAGE_FLAG
- j _hv_install_context
- }
-1:
-
- /* Install the interrupt base. */
- moveli r0, hw2_last(intrpt_start)
- shl16insli r0, r0, hw1(intrpt_start)
- shl16insli r0, r0, hw0(intrpt_start)
- mtspr SPR_INTERRUPT_VECTOR_BASE_K, r0
-
- /* Get our processor number and save it away in SAVE_K_0. */
- jal _hv_inquire_topology
- {
- GET_FIRST_INT(r5, r1) /* r5 = width */
- GET_SECOND_INT(r4, r0) /* r4 = y */
- }
- {
- GET_FIRST_INT(r6, r0) /* r6 = x */
- mul_lu_lu r4, r4, r5
- }
- {
- add r4, r4, r6 /* r4 == cpu == y*width + x */
- }
-
-#ifdef CONFIG_SMP
- /*
- * Load up our per-cpu offset. When the first (master) tile
- * boots, this value is still zero, so we will load boot_pc
- * with start_kernel, and boot_sp with at the top of init_stack.
- * The master tile initializes the per-cpu offset array, so that
- * when subsequent (secondary) tiles boot, they will instead load
- * from their per-cpu versions of boot_sp and boot_pc.
- */
- moveli r5, hw2_last(__per_cpu_offset)
- shl16insli r5, r5, hw1(__per_cpu_offset)
- shl16insli r5, r5, hw0(__per_cpu_offset)
- shl3add r5, r4, r5
- ld r5, r5
- bnez r5, 1f
-
- /*
- * Save the width and height to the smp_topology variable
- * for later use.
- */
- moveli r0, hw2_last(smp_topology + HV_TOPOLOGY_WIDTH_OFFSET)
- shl16insli r0, r0, hw1(smp_topology + HV_TOPOLOGY_WIDTH_OFFSET)
- shl16insli r0, r0, hw0(smp_topology + HV_TOPOLOGY_WIDTH_OFFSET)
- st r0, r1
-1:
-#else
- move r5, zero
-#endif
-
- /* Load and go with the correct pc and sp. */
- {
- moveli r1, hw2_last(boot_sp)
- moveli r0, hw2_last(boot_pc)
- }
- {
- shl16insli r1, r1, hw1(boot_sp)
- shl16insli r0, r0, hw1(boot_pc)
- }
- {
- shl16insli r1, r1, hw0(boot_sp)
- shl16insli r0, r0, hw0(boot_pc)
- }
- {
- add r1, r1, r5
- add r0, r0, r5
- }
- ld r0, r0
- ld sp, r1
- shli r4, r4, CPU_SHIFT
- bfins r4, sp, 0, CPU_SHIFT-1
- mtspr SPR_SYSTEM_SAVE_K_0, r4 /* save ksp0 + cpu */
- {
- move lr, zero /* stop backtraces in the called function */
- jr r0
- }
- ENDPROC(_start)
-
-__PAGE_ALIGNED_BSS
- .align PAGE_SIZE
-ENTRY(empty_zero_page)
- .fill PAGE_SIZE,1,0
- END(empty_zero_page)
-
- .macro PTE cpa, bits1
- .quad HV_PTE_PAGE | HV_PTE_DIRTY | HV_PTE_PRESENT | HV_PTE_ACCESSED |\
- HV_PTE_GLOBAL | (HV_PTE_MODE_CACHE_NO_L3 << HV_PTE_INDEX_MODE) |\
- (\bits1) | (HV_CPA_TO_PTFN(\cpa) << HV_PTE_INDEX_PTFN)
- .endm
-
-__PAGE_ALIGNED_DATA
- .align PAGE_SIZE
-ENTRY(swapper_pg_dir)
- .org swapper_pg_dir + PGD_INDEX(PAGE_OFFSET) * HV_PTE_SIZE
-.Lsv_data_pmd:
- .quad 0 /* PTE temp_data_pmd - PAGE_OFFSET, 0 */
- .org swapper_pg_dir + PGD_INDEX(MEM_SV_START) * HV_PTE_SIZE
-.Lsv_code_pmd:
- .quad 0 /* PTE temp_code_pmd - PAGE_OFFSET, 0 */
- .org swapper_pg_dir + SIZEOF_PGD
- END(swapper_pg_dir)
-
- .align HV_PAGE_TABLE_ALIGN
-ENTRY(temp_data_pmd)
- /*
- * We fill the PAGE_OFFSET pmd with huge pages with
- * VA = PA + PAGE_OFFSET. We remap things with more precise access
- * permissions later.
- */
- .set addr, 0
- .rept PTRS_PER_PMD
- PTE addr, HV_PTE_READABLE | HV_PTE_WRITABLE
- .set addr, addr + HPAGE_SIZE
- .endr
- .org temp_data_pmd + SIZEOF_PMD
- END(temp_data_pmd)
-
- .align HV_PAGE_TABLE_ALIGN
-ENTRY(temp_code_pmd)
- /*
- * We fill the MEM_SV_START pmd with huge pages with
- * VA = PA + PAGE_OFFSET. We remap things with more precise access
- * permissions later.
- */
- .set addr, 0
- .rept PTRS_PER_PMD
- PTE addr, HV_PTE_READABLE | HV_PTE_EXECUTABLE
- .set addr, addr + HPAGE_SIZE
- .endr
- .org temp_code_pmd + SIZEOF_PMD
- END(temp_code_pmd)
-
- /*
- * Isolate swapper_pgprot to its own cache line, since each cpu
- * starting up will read it using VA-is-PA and local homing.
- * This would otherwise likely conflict with other data on the cache
- * line, once we have set its permanent home in the page tables.
- */
- __INITDATA
- .align CHIP_L2_LINE_SIZE()
-ENTRY(swapper_pgprot)
- .quad HV_PTE_PRESENT | (HV_PTE_MODE_CACHE_NO_L3 << HV_PTE_INDEX_MODE)
- .align CHIP_L2_LINE_SIZE()
- END(swapper_pgprot)
diff --git a/arch/tile/kernel/hvglue.S b/arch/tile/kernel/hvglue.S
deleted file mode 100644
index 70c661448638..000000000000
--- a/arch/tile/kernel/hvglue.S
+++ /dev/null
@@ -1,76 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/* Hypervisor call vector addresses; see <hv/hypervisor.h> */
-.macro gensym sym, val, size
-.org \val
-.global _\sym
-.type _\sym,function
-_\sym:
-.size _\sym,\size
-#ifndef CONFIG_TILE_HVGLUE_TRACE
-.globl \sym
-.set \sym,_\sym
-#endif
-.endm
-
-.section .hvglue,"x",@nobits
-.align 8
-gensym hv_init, 0x20, 32
-gensym hv_install_context, 0x40, 32
-gensym hv_sysconf, 0x60, 32
-gensym hv_get_rtc, 0x80, 32
-gensym hv_set_rtc, 0xa0, 32
-gensym hv_flush_asid, 0xc0, 32
-gensym hv_flush_page, 0xe0, 32
-gensym hv_flush_pages, 0x100, 32
-gensym hv_restart, 0x120, 32
-gensym hv_halt, 0x140, 32
-gensym hv_power_off, 0x160, 32
-gensym hv_inquire_physical, 0x180, 32
-gensym hv_inquire_memory_controller, 0x1a0, 32
-gensym hv_inquire_virtual, 0x1c0, 32
-gensym hv_inquire_asid, 0x1e0, 32
-gensym hv_nanosleep, 0x200, 32
-gensym hv_console_read_if_ready, 0x220, 32
-gensym hv_console_write, 0x240, 32
-gensym hv_downcall_dispatch, 0x260, 32
-gensym hv_inquire_topology, 0x280, 32
-gensym hv_fs_findfile, 0x2a0, 32
-gensym hv_fs_fstat, 0x2c0, 32
-gensym hv_fs_pread, 0x2e0, 32
-gensym hv_physaddr_read64, 0x300, 32
-gensym hv_physaddr_write64, 0x320, 32
-gensym hv_get_command_line, 0x340, 32
-gensym hv_set_caching, 0x360, 32
-gensym hv_bzero_page, 0x380, 32
-gensym hv_register_message_state, 0x3a0, 32
-gensym hv_send_message, 0x3c0, 32
-gensym hv_receive_message, 0x3e0, 32
-gensym hv_inquire_context, 0x400, 32
-gensym hv_start_all_tiles, 0x420, 32
-gensym hv_dev_open, 0x440, 32
-gensym hv_dev_close, 0x460, 32
-gensym hv_dev_pread, 0x480, 32
-gensym hv_dev_pwrite, 0x4a0, 32
-gensym hv_dev_poll, 0x4c0, 32
-gensym hv_dev_poll_cancel, 0x4e0, 32
-gensym hv_dev_preada, 0x500, 32
-gensym hv_dev_pwritea, 0x520, 32
-gensym hv_flush_remote, 0x540, 32
-gensym hv_console_putc, 0x560, 32
-gensym hv_inquire_tiles, 0x580, 32
-gensym hv_confstr, 0x5a0, 32
-gensym hv_reexec, 0x5c0, 32
-gensym hv_set_command_line, 0x5e0, 32
-gensym hv_clear_intr, 0x600, 32
-gensym hv_enable_intr, 0x620, 32
-gensym hv_disable_intr, 0x640, 32
-gensym hv_raise_intr, 0x660, 32
-gensym hv_trigger_ipi, 0x680, 32
-gensym hv_store_mapping, 0x6a0, 32
-gensym hv_inquire_realpa, 0x6c0, 32
-gensym hv_flush_all, 0x6e0, 32
-gensym hv_get_ipi_pte, 0x700, 32
-gensym hv_set_pte_super_shift, 0x720, 32
-gensym hv_console_set_ipi, 0x7e0, 32
-gensym hv_send_nmi, 0x820, 32
-gensym hv_glue_internals, 0x820, 30688
diff --git a/arch/tile/kernel/hvglue_trace.c b/arch/tile/kernel/hvglue_trace.c
deleted file mode 100644
index add0d71395c6..000000000000
--- a/arch/tile/kernel/hvglue_trace.c
+++ /dev/null
@@ -1,270 +0,0 @@
-/*
- * Copyright 2013 Tilera Corporation. All Rights Reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation, version 2.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
- * NON INFRINGEMENT. See the GNU General Public License for
- * more details.
- */
-
-/*
- * Pull in the hypervisor header so we declare all the ABI functions
- * with the underscore versions, then undef the names so that we can
- * provide our own wrapper versions.
- */
-#define hv_init _hv_init
-#define hv_install_context _hv_install_context
-#define hv_sysconf _hv_sysconf
-#define hv_get_rtc _hv_get_rtc
-#define hv_set_rtc _hv_set_rtc
-#define hv_flush_asid _hv_flush_asid
-#define hv_flush_page _hv_flush_page
-#define hv_flush_pages _hv_flush_pages
-#define hv_restart _hv_restart
-#define hv_halt _hv_halt
-#define hv_power_off _hv_power_off
-#define hv_inquire_physical _hv_inquire_physical
-#define hv_inquire_memory_controller _hv_inquire_memory_controller
-#define hv_inquire_virtual _hv_inquire_virtual
-#define hv_inquire_asid _hv_inquire_asid
-#define hv_nanosleep _hv_nanosleep
-#define hv_console_read_if_ready _hv_console_read_if_ready
-#define hv_console_write _hv_console_write
-#define hv_downcall_dispatch _hv_downcall_dispatch
-#define hv_inquire_topology _hv_inquire_topology
-#define hv_fs_findfile _hv_fs_findfile
-#define hv_fs_fstat _hv_fs_fstat
-#define hv_fs_pread _hv_fs_pread
-#define hv_physaddr_read64 _hv_physaddr_read64
-#define hv_physaddr_write64 _hv_physaddr_write64
-#define hv_get_command_line _hv_get_command_line
-#define hv_set_caching _hv_set_caching
-#define hv_bzero_page _hv_bzero_page
-#define hv_register_message_state _hv_register_message_state
-#define hv_send_message _hv_send_message
-#define hv_receive_message _hv_receive_message
-#define hv_inquire_context _hv_inquire_context
-#define hv_start_all_tiles _hv_start_all_tiles
-#define hv_dev_open _hv_dev_open
-#define hv_dev_close _hv_dev_close
-#define hv_dev_pread _hv_dev_pread
-#define hv_dev_pwrite _hv_dev_pwrite
-#define hv_dev_poll _hv_dev_poll
-#define hv_dev_poll_cancel _hv_dev_poll_cancel
-#define hv_dev_preada _hv_dev_preada
-#define hv_dev_pwritea _hv_dev_pwritea
-#define hv_flush_remote _hv_flush_remote
-#define hv_console_putc _hv_console_putc
-#define hv_inquire_tiles _hv_inquire_tiles
-#define hv_confstr _hv_confstr
-#define hv_reexec _hv_reexec
-#define hv_set_command_line _hv_set_command_line
-#define hv_clear_intr _hv_clear_intr
-#define hv_enable_intr _hv_enable_intr
-#define hv_disable_intr _hv_disable_intr
-#define hv_raise_intr _hv_raise_intr
-#define hv_trigger_ipi _hv_trigger_ipi
-#define hv_store_mapping _hv_store_mapping
-#define hv_inquire_realpa _hv_inquire_realpa
-#define hv_flush_all _hv_flush_all
-#define hv_get_ipi_pte _hv_get_ipi_pte
-#define hv_set_pte_super_shift _hv_set_pte_super_shift
-#define hv_console_set_ipi _hv_console_set_ipi
-#define hv_send_nmi _hv_send_nmi
-#include <hv/hypervisor.h>
-#undef hv_init
-#undef hv_install_context
-#undef hv_sysconf
-#undef hv_get_rtc
-#undef hv_set_rtc
-#undef hv_flush_asid
-#undef hv_flush_page
-#undef hv_flush_pages
-#undef hv_restart
-#undef hv_halt
-#undef hv_power_off
-#undef hv_inquire_physical
-#undef hv_inquire_memory_controller
-#undef hv_inquire_virtual
-#undef hv_inquire_asid
-#undef hv_nanosleep
-#undef hv_console_read_if_ready
-#undef hv_console_write
-#undef hv_downcall_dispatch
-#undef hv_inquire_topology
-#undef hv_fs_findfile
-#undef hv_fs_fstat
-#undef hv_fs_pread
-#undef hv_physaddr_read64
-#undef hv_physaddr_write64
-#undef hv_get_command_line
-#undef hv_set_caching
-#undef hv_bzero_page
-#undef hv_register_message_state
-#undef hv_send_message
-#undef hv_receive_message
-#undef hv_inquire_context
-#undef hv_start_all_tiles
-#undef hv_dev_open
-#undef hv_dev_close
-#undef hv_dev_pread
-#undef hv_dev_pwrite
-#undef hv_dev_poll
-#undef hv_dev_poll_cancel
-#undef hv_dev_preada
-#undef hv_dev_pwritea
-#undef hv_flush_remote
-#undef hv_console_putc
-#undef hv_inquire_tiles
-#undef hv_confstr
-#undef hv_reexec
-#undef hv_set_command_line
-#undef hv_clear_intr
-#undef hv_enable_intr
-#undef hv_disable_intr
-#undef hv_raise_intr
-#undef hv_trigger_ipi
-#undef hv_store_mapping
-#undef hv_inquire_realpa
-#undef hv_flush_all
-#undef hv_get_ipi_pte
-#undef hv_set_pte_super_shift
-#undef hv_console_set_ipi
-#undef hv_send_nmi
-
-/*
- * Provide macros based on <linux/syscalls.h> to provide a wrapper
- * function that invokes the same function with an underscore prefix.
- * We can't use the existing __SC_xxx macros because we need to
- * support up to nine arguments rather than up to six, and also this
- * way the file stands alone from possible changes in the
- * implementation of <linux/syscalls.h>.
- */
-#define HV_WRAP0(type, name) \
- type name(void); \
- type name(void) \
- { \
- return _##name(); \
- }
-#define __HV_DECL1(t1, a1) t1 a1
-#define __HV_DECL2(t2, a2, ...) t2 a2, __HV_DECL1(__VA_ARGS__)
-#define __HV_DECL3(t3, a3, ...) t3 a3, __HV_DECL2(__VA_ARGS__)
-#define __HV_DECL4(t4, a4, ...) t4 a4, __HV_DECL3(__VA_ARGS__)
-#define __HV_DECL5(t5, a5, ...) t5 a5, __HV_DECL4(__VA_ARGS__)
-#define __HV_DECL6(t6, a6, ...) t6 a6, __HV_DECL5(__VA_ARGS__)
-#define __HV_DECL7(t7, a7, ...) t7 a7, __HV_DECL6(__VA_ARGS__)
-#define __HV_DECL8(t8, a8, ...) t8 a8, __HV_DECL7(__VA_ARGS__)
-#define __HV_DECL9(t9, a9, ...) t9 a9, __HV_DECL8(__VA_ARGS__)
-#define __HV_PASS1(t1, a1) a1
-#define __HV_PASS2(t2, a2, ...) a2, __HV_PASS1(__VA_ARGS__)
-#define __HV_PASS3(t3, a3, ...) a3, __HV_PASS2(__VA_ARGS__)
-#define __HV_PASS4(t4, a4, ...) a4, __HV_PASS3(__VA_ARGS__)
-#define __HV_PASS5(t5, a5, ...) a5, __HV_PASS4(__VA_ARGS__)
-#define __HV_PASS6(t6, a6, ...) a6, __HV_PASS5(__VA_ARGS__)
-#define __HV_PASS7(t7, a7, ...) a7, __HV_PASS6(__VA_ARGS__)
-#define __HV_PASS8(t8, a8, ...) a8, __HV_PASS7(__VA_ARGS__)
-#define __HV_PASS9(t9, a9, ...) a9, __HV_PASS8(__VA_ARGS__)
-#define HV_WRAPx(x, type, name, ...) \
- type name(__HV_DECL##x(__VA_ARGS__)); \
- type name(__HV_DECL##x(__VA_ARGS__)) \
- { \
- return _##name(__HV_PASS##x(__VA_ARGS__)); \
- }
-#define HV_WRAP1(type, name, ...) HV_WRAPx(1, type, name, __VA_ARGS__)
-#define HV_WRAP2(type, name, ...) HV_WRAPx(2, type, name, __VA_ARGS__)
-#define HV_WRAP3(type, name, ...) HV_WRAPx(3, type, name, __VA_ARGS__)
-#define HV_WRAP4(type, name, ...) HV_WRAPx(4, type, name, __VA_ARGS__)
-#define HV_WRAP5(type, name, ...) HV_WRAPx(5, type, name, __VA_ARGS__)
-#define HV_WRAP6(type, name, ...) HV_WRAPx(6, type, name, __VA_ARGS__)
-#define HV_WRAP7(type, name, ...) HV_WRAPx(7, type, name, __VA_ARGS__)
-#define HV_WRAP8(type, name, ...) HV_WRAPx(8, type, name, __VA_ARGS__)
-#define HV_WRAP9(type, name, ...) HV_WRAPx(9, type, name, __VA_ARGS__)
-
-/* List all the hypervisor API functions. */
-HV_WRAP4(void, hv_init, HV_VersionNumber, interface_version_number,
- int, chip_num, int, chip_rev_num, int, client_pl)
-HV_WRAP1(long, hv_sysconf, HV_SysconfQuery, query)
-HV_WRAP3(int, hv_confstr, HV_ConfstrQuery, query, HV_VirtAddr, buf, int, len)
-#if CHIP_HAS_IPI()
-HV_WRAP3(int, hv_get_ipi_pte, HV_Coord, tile, int, pl, HV_PTE*, pte)
-HV_WRAP3(int, hv_console_set_ipi, int, ipi, int, event, HV_Coord, coord);
-#else
-HV_WRAP1(void, hv_enable_intr, HV_IntrMask, enab_mask)
-HV_WRAP1(void, hv_disable_intr, HV_IntrMask, disab_mask)
-HV_WRAP1(void, hv_clear_intr, HV_IntrMask, clear_mask)
-HV_WRAP1(void, hv_raise_intr, HV_IntrMask, raise_mask)
-HV_WRAP2(HV_Errno, hv_trigger_ipi, HV_Coord, tile, int, interrupt)
-#endif /* !CHIP_HAS_IPI() */
-HV_WRAP3(int, hv_store_mapping, HV_VirtAddr, va, unsigned int, len,
- HV_PhysAddr, pa)
-HV_WRAP2(HV_PhysAddr, hv_inquire_realpa, HV_PhysAddr, cpa, unsigned int, len)
-HV_WRAP0(HV_RTCTime, hv_get_rtc)
-HV_WRAP1(void, hv_set_rtc, HV_RTCTime, time)
-HV_WRAP4(int, hv_install_context, HV_PhysAddr, page_table, HV_PTE, access,
- HV_ASID, asid, __hv32, flags)
-HV_WRAP2(int, hv_set_pte_super_shift, int, level, int, log2_count)
-HV_WRAP0(HV_Context, hv_inquire_context)
-HV_WRAP1(int, hv_flush_asid, HV_ASID, asid)
-HV_WRAP2(int, hv_flush_page, HV_VirtAddr, address, HV_PageSize, page_size)
-HV_WRAP3(int, hv_flush_pages, HV_VirtAddr, start, HV_PageSize, page_size,
- unsigned long, size)
-HV_WRAP1(int, hv_flush_all, int, preserve_global)
-HV_WRAP2(void, hv_restart, HV_VirtAddr, cmd, HV_VirtAddr, args)
-HV_WRAP0(void, hv_halt)
-HV_WRAP0(void, hv_power_off)
-HV_WRAP1(int, hv_reexec, HV_PhysAddr, entry)
-HV_WRAP0(HV_Topology, hv_inquire_topology)
-HV_WRAP3(HV_Errno, hv_inquire_tiles, HV_InqTileSet, set, HV_VirtAddr, cpumask,
- int, length)
-HV_WRAP1(HV_PhysAddrRange, hv_inquire_physical, int, idx)
-HV_WRAP2(HV_MemoryControllerInfo, hv_inquire_memory_controller, HV_Coord, coord,
- int, controller)
-HV_WRAP1(HV_VirtAddrRange, hv_inquire_virtual, int, idx)
-HV_WRAP1(HV_ASIDRange, hv_inquire_asid, int, idx)
-HV_WRAP1(void, hv_nanosleep, int, nanosecs)
-HV_WRAP0(int, hv_console_read_if_ready)
-HV_WRAP1(void, hv_console_putc, int, byte)
-HV_WRAP2(int, hv_console_write, HV_VirtAddr, bytes, int, len)
-HV_WRAP0(void, hv_downcall_dispatch)
-HV_WRAP1(int, hv_fs_findfile, HV_VirtAddr, filename)
-HV_WRAP1(HV_FS_StatInfo, hv_fs_fstat, int, inode)
-HV_WRAP4(int, hv_fs_pread, int, inode, HV_VirtAddr, buf,
- int, length, int, offset)
-HV_WRAP2(unsigned long long, hv_physaddr_read64, HV_PhysAddr, addr,
- HV_PTE, access)
-HV_WRAP3(void, hv_physaddr_write64, HV_PhysAddr, addr, HV_PTE, access,
- unsigned long long, val)
-HV_WRAP2(int, hv_get_command_line, HV_VirtAddr, buf, int, length)
-HV_WRAP2(HV_Errno, hv_set_command_line, HV_VirtAddr, buf, int, length)
-HV_WRAP1(void, hv_set_caching, unsigned long, bitmask)
-HV_WRAP2(void, hv_bzero_page, HV_VirtAddr, va, unsigned int, size)
-HV_WRAP1(HV_Errno, hv_register_message_state, HV_MsgState*, msgstate)
-HV_WRAP4(int, hv_send_message, HV_Recipient *, recips, int, nrecip,
- HV_VirtAddr, buf, int, buflen)
-HV_WRAP3(HV_RcvMsgInfo, hv_receive_message, HV_MsgState, msgstate,
- HV_VirtAddr, buf, int, buflen)
-HV_WRAP0(void, hv_start_all_tiles)
-HV_WRAP2(int, hv_dev_open, HV_VirtAddr, name, __hv32, flags)
-HV_WRAP1(int, hv_dev_close, int, devhdl)
-HV_WRAP5(int, hv_dev_pread, int, devhdl, __hv32, flags, HV_VirtAddr, va,
- __hv32, len, __hv64, offset)
-HV_WRAP5(int, hv_dev_pwrite, int, devhdl, __hv32, flags, HV_VirtAddr, va,
- __hv32, len, __hv64, offset)
-HV_WRAP3(int, hv_dev_poll, int, devhdl, __hv32, events, HV_IntArg, intarg)
-HV_WRAP1(int, hv_dev_poll_cancel, int, devhdl)
-HV_WRAP6(int, hv_dev_preada, int, devhdl, __hv32, flags, __hv32, sgl_len,
- HV_SGL *, sglp, __hv64, offset, HV_IntArg, intarg)
-HV_WRAP6(int, hv_dev_pwritea, int, devhdl, __hv32, flags, __hv32, sgl_len,
- HV_SGL *, sglp, __hv64, offset, HV_IntArg, intarg)
-HV_WRAP9(int, hv_flush_remote, HV_PhysAddr, cache_pa,
- unsigned long, cache_control, unsigned long*, cache_cpumask,
- HV_VirtAddr, tlb_va, unsigned long, tlb_length,
- unsigned long, tlb_pgsize, unsigned long*, tlb_cpumask,
- HV_Remote_ASID*, asids, int, asidcount)
-HV_WRAP3(HV_NMI_Info, hv_send_nmi, HV_Coord, tile, unsigned long, info,
- __hv64, flags)
diff --git a/arch/tile/kernel/intvec_32.S b/arch/tile/kernel/intvec_32.S
deleted file mode 100644
index 9ff75e3a318a..000000000000
--- a/arch/tile/kernel/intvec_32.S
+++ /dev/null
@@ -1,1906 +0,0 @@
-/*
- * Copyright 2010 Tilera Corporation. All Rights Reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation, version 2.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
- * NON INFRINGEMENT. See the GNU General Public License for
- * more details.
- *
- * Linux interrupt vectors.
- */
-
-#include <linux/linkage.h>
-#include <linux/errno.h>
-#include <linux/init.h>
-#include <linux/unistd.h>
-#include <asm/ptrace.h>
-#include <asm/thread_info.h>
-#include <asm/irqflags.h>
-#include <asm/atomic_32.h>
-#include <asm/asm-offsets.h>
-#include <hv/hypervisor.h>
-#include <arch/abi.h>
-#include <arch/interrupts.h>
-#include <arch/spr_def.h>
-
-#define PTREGS_PTR(reg, ptreg) addli reg, sp, C_ABI_SAVE_AREA_SIZE + (ptreg)
-
-#define PTREGS_OFFSET_SYSCALL PTREGS_OFFSET_REG(TREG_SYSCALL_NR)
-
- .macro push_reg reg, ptr=sp, delta=-4
- {
- sw \ptr, \reg
- addli \ptr, \ptr, \delta
- }
- .endm
-
- .macro pop_reg reg, ptr=sp, delta=4
- {
- lw \reg, \ptr
- addli \ptr, \ptr, \delta
- }
- .endm
-
- .macro pop_reg_zero reg, zreg, ptr=sp, delta=4
- {
- move \zreg, zero
- lw \reg, \ptr
- addi \ptr, \ptr, \delta
- }
- .endm
-
- .macro push_extra_callee_saves reg
- PTREGS_PTR(\reg, PTREGS_OFFSET_REG(51))
- push_reg r51, \reg
- push_reg r50, \reg
- push_reg r49, \reg
- push_reg r48, \reg
- push_reg r47, \reg
- push_reg r46, \reg
- push_reg r45, \reg
- push_reg r44, \reg
- push_reg r43, \reg
- push_reg r42, \reg
- push_reg r41, \reg
- push_reg r40, \reg
- push_reg r39, \reg
- push_reg r38, \reg
- push_reg r37, \reg
- push_reg r36, \reg
- push_reg r35, \reg
- push_reg r34, \reg, PTREGS_OFFSET_BASE - PTREGS_OFFSET_REG(34)
- .endm
-
- .macro panic str
- .pushsection .rodata, "a"
-1:
- .asciz "\str"
- .popsection
- {
- moveli r0, lo16(1b)
- }
- {
- auli r0, r0, ha16(1b)
- jal panic
- }
- .endm
-
-#ifdef __COLLECT_LINKER_FEEDBACK__
- .pushsection .text.intvec_feedback,"ax"
-intvec_feedback:
- .popsection
-#endif
-
- /*
- * Default interrupt handler.
- *
- * vecnum is where we'll put this code.
- * c_routine is the C routine we'll call.
- *
- * The C routine is passed two arguments:
- * - A pointer to the pt_regs state.
- * - The interrupt vector number.
- *
- * The "processing" argument specifies the code for processing
- * the interrupt. Defaults to "handle_interrupt".
- */
- .macro int_hand vecnum, vecname, c_routine, processing=handle_interrupt
- .org (\vecnum << 8)
-intvec_\vecname:
- .ifc \vecnum, INT_SWINT_1
- blz TREG_SYSCALL_NR_NAME, sys_cmpxchg
- .endif
-
- /* Temporarily save a register so we have somewhere to work. */
-
- mtspr SPR_SYSTEM_SAVE_K_1, r0
- mfspr r0, SPR_EX_CONTEXT_K_1
-
- /* The cmpxchg code clears sp to force us to reset it here on fault. */
- {
- bz sp, 2f
- andi r0, r0, SPR_EX_CONTEXT_1_1__PL_MASK /* mask off ICS */
- }
-
- .ifc \vecnum, INT_DOUBLE_FAULT
- /*
- * For double-faults from user-space, fall through to the normal
- * register save and stack setup path. Otherwise, it's the
- * hypervisor giving us one last chance to dump diagnostics, and we
- * branch to the kernel_double_fault routine to do so.
- */
- bz r0, 1f
- j _kernel_double_fault
-1:
- .else
- /*
- * If we're coming from user-space, then set sp to the top of
- * the kernel stack. Otherwise, assume sp is already valid.
- */
- {
- bnz r0, 0f
- move r0, sp
- }
- .endif
-
- .ifc \c_routine, do_page_fault
- /*
- * The page_fault handler may be downcalled directly by the
- * hypervisor even when Linux is running and has ICS set.
- *
- * In this case the contents of EX_CONTEXT_K_1 reflect the
- * previous fault and can't be relied on to choose whether or
- * not to reinitialize the stack pointer. So we add a test
- * to see whether SYSTEM_SAVE_K_2 has the high bit set,
- * and if so we don't reinitialize sp, since we must be coming
- * from Linux. (In fact the precise case is !(val & ~1),
- * but any Linux PC has to have the high bit set.)
- *
- * Note that the hypervisor *always* sets SYSTEM_SAVE_K_2 for
- * any path that turns into a downcall to one of our TLB handlers.
- */
- mfspr r0, SPR_SYSTEM_SAVE_K_2
- {
- blz r0, 0f /* high bit in S_S_1_2 is for a PC to use */
- move r0, sp
- }
- .endif
-
-2:
- /*
- * SYSTEM_SAVE_K_0 holds the cpu number in the low bits, and
- * the current stack top in the higher bits. So we recover
- * our stack top by just masking off the low bits, then
- * point sp at the top aligned address on the actual stack page.
- */
- mfspr r0, SPR_SYSTEM_SAVE_K_0
- mm r0, r0, zero, LOG2_NR_CPU_IDS, 31
-
-0:
- /*
- * Align the stack mod 64 so we can properly predict what
- * cache lines we need to write-hint to reduce memory fetch
- * latency as we enter the kernel. The layout of memory is
- * as follows, with cache line 0 at the lowest VA, and cache
- * line 4 just below the r0 value this "andi" computes.
- * Note that we never write to cache line 4, and we skip
- * cache line 1 for syscalls.
- *
- * cache line 4: ptregs padding (two words)
- * cache line 3: r46...lr, pc, ex1, faultnum, orig_r0, flags, pad
- * cache line 2: r30...r45
- * cache line 1: r14...r29
- * cache line 0: 2 x frame, r0..r13
- */
-#if STACK_TOP_DELTA != 64
-#error STACK_TOP_DELTA must be 64 for assumptions here and in task_pt_regs()
-#endif
- andi r0, r0, -64
-
- /*
- * Push the first four registers on the stack, so that we can set
- * them to vector-unique values before we jump to the common code.
- *
- * Registers are pushed on the stack as a struct pt_regs,
- * with the sp initially just above the struct, and when we're
- * done, sp points to the base of the struct, minus
- * C_ABI_SAVE_AREA_SIZE, so we can directly jal to C code.
- *
- * This routine saves just the first four registers, plus the
- * stack context so we can do proper backtracing right away,
- * and defers to handle_interrupt to save the rest.
- * The backtracer needs pc, ex1, lr, sp, r52, and faultnum.
- */
- addli r0, r0, PTREGS_OFFSET_LR - (PTREGS_SIZE + KSTK_PTREGS_GAP)
- wh64 r0 /* cache line 3 */
- {
- sw r0, lr
- addli r0, r0, PTREGS_OFFSET_SP - PTREGS_OFFSET_LR
- }
- {
- sw r0, sp
- addli sp, r0, PTREGS_OFFSET_REG(52) - PTREGS_OFFSET_SP
- }
- {
- sw sp, r52
- addli sp, sp, PTREGS_OFFSET_REG(1) - PTREGS_OFFSET_REG(52)
- }
- wh64 sp /* cache line 0 */
- {
- sw sp, r1
- addli sp, sp, PTREGS_OFFSET_REG(2) - PTREGS_OFFSET_REG(1)
- }
- {
- sw sp, r2
- addli sp, sp, PTREGS_OFFSET_REG(3) - PTREGS_OFFSET_REG(2)
- }
- {
- sw sp, r3
- addli sp, sp, PTREGS_OFFSET_PC - PTREGS_OFFSET_REG(3)
- }
- mfspr r0, SPR_EX_CONTEXT_K_0
- .ifc \processing,handle_syscall
- /*
- * Bump the saved PC by one bundle so that when we return, we won't
- * execute the same swint instruction again. We need to do this while
- * we're in the critical section.
- */
- addi r0, r0, 8
- .endif
- {
- sw sp, r0
- addli sp, sp, PTREGS_OFFSET_EX1 - PTREGS_OFFSET_PC
- }
- mfspr r0, SPR_EX_CONTEXT_K_1
- {
- sw sp, r0
- addi sp, sp, PTREGS_OFFSET_FAULTNUM - PTREGS_OFFSET_EX1
- /*
- * Use r0 for syscalls so it's a temporary; use r1 for interrupts
- * so that it gets passed through unchanged to the handler routine.
- * Note that the .if conditional confusingly spans bundles.
- */
- .ifc \processing,handle_syscall
- movei r0, \vecnum
- }
- {
- sw sp, r0
- .else
- movei r1, \vecnum
- }
- {
- sw sp, r1
- .endif
- addli sp, sp, PTREGS_OFFSET_REG(0) - PTREGS_OFFSET_FAULTNUM
- }
- mfspr r0, SPR_SYSTEM_SAVE_K_1 /* Original r0 */
- {
- sw sp, r0
- addi sp, sp, -PTREGS_OFFSET_REG(0) - 4
- }
- {
- sw sp, zero /* write zero into "Next SP" frame pointer */
- addi sp, sp, -4 /* leave SP pointing at bottom of frame */
- }
- .ifc \processing,handle_syscall
- j handle_syscall
- .else
- /*
- * Capture per-interrupt SPR context to registers.
- * We overload the meaning of r3 on this path such that if its bit 31
- * is set, we have to mask all interrupts including NMIs before
- * clearing the interrupt critical section bit.
- * See discussion below at "finish_interrupt_save".
- */
- .ifc \c_routine, do_page_fault
- mfspr r2, SPR_SYSTEM_SAVE_K_3 /* address of page fault */
- mfspr r3, SPR_SYSTEM_SAVE_K_2 /* info about page fault */
- .else
- .ifc \vecnum, INT_DOUBLE_FAULT
- {
- mfspr r2, SPR_SYSTEM_SAVE_K_2 /* double fault info from HV */
- movei r3, 0
- }
- .else
- .ifc \c_routine, do_trap
- {
- mfspr r2, GPV_REASON
- movei r3, 0
- }
- .else
- .ifc \c_routine, handle_perf_interrupt
- {
- mfspr r2, PERF_COUNT_STS
- movei r3, -1 /* not used, but set for consistency */
- }
- .else
- .ifc \c_routine, handle_perf_interrupt
- {
- mfspr r2, AUX_PERF_COUNT_STS
- movei r3, -1 /* not used, but set for consistency */
- }
- .else
- movei r3, 0
- .endif
- .endif
- .endif
- .endif
- .endif
- /* Put function pointer in r0 */
- moveli r0, lo16(\c_routine)
- {
- auli r0, r0, ha16(\c_routine)
- j \processing
- }
- .endif
- ENDPROC(intvec_\vecname)
-
-#ifdef __COLLECT_LINKER_FEEDBACK__
- .pushsection .text.intvec_feedback,"ax"
- .org (\vecnum << 5)
- FEEDBACK_ENTER_EXPLICIT(intvec_\vecname, .intrpt, 1 << 8)
- jrp lr
- .popsection
-#endif
-
- .endm
-
-
- /*
- * Save the rest of the registers that we didn't save in the actual
- * vector itself. We can't use r0-r10 inclusive here.
- */
- .macro finish_interrupt_save, function
-
- /* If it's a syscall, save a proper orig_r0, otherwise just zero. */
- PTREGS_PTR(r52, PTREGS_OFFSET_ORIG_R0)
- {
- .ifc \function,handle_syscall
- sw r52, r0
- .else
- sw r52, zero
- .endif
- PTREGS_PTR(r52, PTREGS_OFFSET_TP)
- }
-
- /*
- * For ordinary syscalls, we save neither caller- nor callee-
- * save registers, since the syscall invoker doesn't expect the
- * caller-saves to be saved, and the called kernel functions will
- * take care of saving the callee-saves for us.
- *
- * For interrupts we save just the caller-save registers. Saving
- * them is required (since the "caller" can't save them). Again,
- * the called kernel functions will restore the callee-save
- * registers for us appropriately.
- *
- * On return, we normally restore nothing special for syscalls,
- * and just the caller-save registers for interrupts.
- *
- * However, there are some important caveats to all this:
- *
- * - We always save a few callee-save registers to give us
- * some scratchpad registers to carry across function calls.
- *
- * - fork/vfork/etc require us to save all the callee-save
- * registers, which we do in PTREGS_SYSCALL_ALL_REGS, below.
- *
- * - We always save r0..r5 and r10 for syscalls, since we need
- * to reload them a bit later for the actual kernel call, and
- * since we might need them for -ERESTARTNOINTR, etc.
- *
- * - Before invoking a signal handler, we save the unsaved
- * callee-save registers so they are visible to the
- * signal handler or any ptracer.
- *
- * - If the unsaved callee-save registers are modified, we set
- * a bit in pt_regs so we know to reload them from pt_regs
- * and not just rely on the kernel function unwinding.
- * (Done for ptrace register writes and SA_SIGINFO handler.)
- */
- {
- sw r52, tp
- PTREGS_PTR(r52, PTREGS_OFFSET_REG(33))
- }
- wh64 r52 /* cache line 2 */
- push_reg r33, r52
- push_reg r32, r52
- push_reg r31, r52
- .ifc \function,handle_syscall
- push_reg r30, r52, PTREGS_OFFSET_SYSCALL - PTREGS_OFFSET_REG(30)
- push_reg TREG_SYSCALL_NR_NAME, r52, \
- PTREGS_OFFSET_REG(5) - PTREGS_OFFSET_SYSCALL
- .else
-
- push_reg r30, r52, PTREGS_OFFSET_REG(29) - PTREGS_OFFSET_REG(30)
- wh64 r52 /* cache line 1 */
- push_reg r29, r52
- push_reg r28, r52
- push_reg r27, r52
- push_reg r26, r52
- push_reg r25, r52
- push_reg r24, r52
- push_reg r23, r52
- push_reg r22, r52
- push_reg r21, r52
- push_reg r20, r52
- push_reg r19, r52
- push_reg r18, r52
- push_reg r17, r52
- push_reg r16, r52
- push_reg r15, r52
- push_reg r14, r52
- push_reg r13, r52
- push_reg r12, r52
- push_reg r11, r52
- push_reg r10, r52
- push_reg r9, r52
- push_reg r8, r52
- push_reg r7, r52
- push_reg r6, r52
-
- .endif
-
- push_reg r5, r52
- sw r52, r4
-
- /* Load tp with our per-cpu offset. */
-#ifdef CONFIG_SMP
- {
- mfspr r20, SPR_SYSTEM_SAVE_K_0
- moveli r21, lo16(__per_cpu_offset)
- }
- {
- auli r21, r21, ha16(__per_cpu_offset)
- mm r20, r20, zero, 0, LOG2_NR_CPU_IDS-1
- }
- s2a r20, r20, r21
- lw tp, r20
-#else
- move tp, zero
-#endif
-
- /*
- * If we will be returning to the kernel, we will need to
- * reset the interrupt masks to the state they had before.
- * Set DISABLE_IRQ in flags iff we came from PL1 with irqs disabled.
- * We load flags in r32 here so we can jump to .Lrestore_regs
- * directly after do_page_fault_ics() if necessary.
- */
- mfspr r32, SPR_EX_CONTEXT_K_1
- {
- andi r32, r32, SPR_EX_CONTEXT_1_1__PL_MASK /* mask off ICS */
- PTREGS_PTR(r21, PTREGS_OFFSET_FLAGS)
- }
- bzt r32, 1f /* zero if from user space */
- IRQS_DISABLED(r32) /* zero if irqs enabled */
-#if PT_FLAGS_DISABLE_IRQ != 1
-# error Value of IRQS_DISABLED used to set PT_FLAGS_DISABLE_IRQ; fix
-#endif
-1:
- .ifnc \function,handle_syscall
- /* Record the fact that we saved the caller-save registers above. */
- ori r32, r32, PT_FLAGS_CALLER_SAVES
- .endif
- sw r21, r32
-
-#ifdef __COLLECT_LINKER_FEEDBACK__
- /*
- * Notify the feedback routines that we were in the
- * appropriate fixed interrupt vector area. Note that we
- * still have ICS set at this point, so we can't invoke any
- * atomic operations or we will panic. The feedback
- * routines internally preserve r0..r10 and r30 up.
- */
- .ifnc \function,handle_syscall
- shli r20, r1, 5
- .else
- moveli r20, INT_SWINT_1 << 5
- .endif
- addli r20, r20, lo16(intvec_feedback)
- auli r20, r20, ha16(intvec_feedback)
- jalr r20
-
- /* And now notify the feedback routines that we are here. */
- FEEDBACK_ENTER(\function)
-#endif
-
- /*
- * we've captured enough state to the stack (including in
- * particular our EX_CONTEXT state) that we can now release
- * the interrupt critical section and replace it with our
- * standard "interrupts disabled" mask value. This allows
- * synchronous interrupts (and profile interrupts) to punch
- * through from this point onwards.
- *
- * If bit 31 of r3 is set during a non-NMI interrupt, we know we
- * are on the path where the hypervisor has punched through our
- * ICS with a page fault, so we call out to do_page_fault_ics()
- * to figure out what to do with it. If the fault was in
- * an atomic op, we unlock the atomic lock, adjust the
- * saved register state a little, and return "zero" in r4,
- * falling through into the normal page-fault interrupt code.
- * If the fault was in a kernel-space atomic operation, then
- * do_page_fault_ics() resolves it itself, returns "one" in r4,
- * and as a result goes directly to restoring registers and iret,
- * without trying to adjust the interrupt masks at all.
- * The do_page_fault_ics() API involves passing and returning
- * a five-word struct (in registers) to avoid writing the
- * save and restore code here.
- */
- .ifc \function,handle_nmi
- IRQ_DISABLE_ALL(r20)
- .else
- .ifnc \function,handle_syscall
- bgezt r3, 1f
- {
- PTREGS_PTR(r0, PTREGS_OFFSET_BASE)
- jal do_page_fault_ics
- }
- FEEDBACK_REENTER(\function)
- bzt r4, 1f
- j .Lrestore_regs
-1:
- .endif
- IRQ_DISABLE(r20, r21)
- .endif
- mtspr INTERRUPT_CRITICAL_SECTION, zero
-
- /*
- * Prepare the first 256 stack bytes to be rapidly accessible
- * without having to fetch the background data. We don't really
- * know how far to write-hint, but kernel stacks generally
- * aren't that big, and write-hinting here does take some time.
- */
- addi r52, sp, -64
- {
- wh64 r52
- addi r52, r52, -64
- }
- {
- wh64 r52
- addi r52, r52, -64
- }
- {
- wh64 r52
- addi r52, r52, -64
- }
- wh64 r52
-
-#if defined(CONFIG_TRACE_IRQFLAGS) || defined(CONFIG_CONTEXT_TRACKING)
- .ifnc \function,handle_nmi
- /*
- * We finally have enough state set up to notify the irq
- * tracing code that irqs were disabled on entry to the handler.
- * The TRACE_IRQS_OFF call clobbers registers r0-r29.
- * For syscalls, we already have the register state saved away
- * on the stack, so we don't bother to do any register saves here,
- * and later we pop the registers back off the kernel stack.
- * For interrupt handlers, save r0-r3 in callee-saved registers.
- */
- .ifnc \function,handle_syscall
- { move r30, r0; move r31, r1 }
- { move r32, r2; move r33, r3 }
- .endif
- TRACE_IRQS_OFF
-#ifdef CONFIG_CONTEXT_TRACKING
- jal context_tracking_user_exit
-#endif
- .ifnc \function,handle_syscall
- { move r0, r30; move r1, r31 }
- { move r2, r32; move r3, r33 }
- .endif
- .endif
-#endif
-
- .endm
-
- .macro check_single_stepping, kind, not_single_stepping
- /*
- * Check for single stepping in user-level priv
- * kind can be "normal", "ill", or "syscall"
- * At end, if fall-thru
- * r29: thread_info->step_state
- * r28: &pt_regs->pc
- * r27: pt_regs->pc
- * r26: thread_info->step_state->buffer
- */
-
- /* Check for single stepping */
- GET_THREAD_INFO(r29)
- {
- /* Get pointer to field holding step state */
- addi r29, r29, THREAD_INFO_STEP_STATE_OFFSET
-
- /* Get pointer to EX1 in register state */
- PTREGS_PTR(r27, PTREGS_OFFSET_EX1)
- }
- {
- /* Get pointer to field holding PC */
- PTREGS_PTR(r28, PTREGS_OFFSET_PC)
-
- /* Load the pointer to the step state */
- lw r29, r29
- }
- /* Load EX1 */
- lw r27, r27
- {
- /* Points to flags */
- addi r23, r29, SINGLESTEP_STATE_FLAGS_OFFSET
-
- /* No single stepping if there is no step state structure */
- bzt r29, \not_single_stepping
- }
- {
- /* mask off ICS and any other high bits */
- andi r27, r27, SPR_EX_CONTEXT_1_1__PL_MASK
-
- /* Load pointer to single step instruction buffer */
- lw r26, r29
- }
- /* Check priv state */
- bnz r27, \not_single_stepping
-
- /* Get flags */
- lw r22, r23
- {
- /* Branch if single-step mode not enabled */
- bbnst r22, \not_single_stepping
-
- /* Clear enabled flag */
- andi r22, r22, ~SINGLESTEP_STATE_MASK_IS_ENABLED
- }
- .ifc \kind,normal
- {
- /* Load PC */
- lw r27, r28
-
- /* Point to the entry containing the original PC */
- addi r24, r29, SINGLESTEP_STATE_ORIG_PC_OFFSET
- }
- {
- /* Disable single stepping flag */
- sw r23, r22
- }
- {
- /* Get the original pc */
- lw r24, r24
-
- /* See if the PC is at the start of the single step buffer */
- seq r25, r26, r27
- }
- /*
- * NOTE: it is really expected that the PC be in the single step buffer
- * at this point
- */
- bzt r25, \not_single_stepping
-
- /* Restore the original PC */
- sw r28, r24
- .else
- .ifc \kind,syscall
- {
- /* Load PC */
- lw r27, r28
-
- /* Point to the entry containing the next PC */
- addi r24, r29, SINGLESTEP_STATE_NEXT_PC_OFFSET
- }
- {
- /* Increment the stopped PC by the bundle size */
- addi r26, r26, 8
-
- /* Disable single stepping flag */
- sw r23, r22
- }
- {
- /* Get the next pc */
- lw r24, r24
-
- /*
- * See if the PC is one bundle past the start of the
- * single step buffer
- */
- seq r25, r26, r27
- }
- {
- /*
- * NOTE: it is really expected that the PC be in the
- * single step buffer at this point
- */
- bzt r25, \not_single_stepping
- }
- /* Set to the next PC */
- sw r28, r24
- .else
- {
- /* Point to 3rd bundle in buffer */
- addi r25, r26, 16
-
- /* Load PC */
- lw r27, r28
- }
- {
- /* Disable single stepping flag */
- sw r23, r22
-
- /* See if the PC is in the single step buffer */
- slte_u r24, r26, r27
- }
- {
- slte_u r25, r27, r25
-
- /*
- * NOTE: it is really expected that the PC be in the
- * single step buffer at this point
- */
- bzt r24, \not_single_stepping
- }
- bzt r25, \not_single_stepping
- .endif
- .endif
- .endm
-
- /*
- * Redispatch a downcall.
- */
- .macro dc_dispatch vecnum, vecname
- .org (\vecnum << 8)
-intvec_\vecname:
- j _hv_downcall_dispatch
- ENDPROC(intvec_\vecname)
- .endm
-
- /*
- * Common code for most interrupts. The C function we're eventually
- * going to is in r0, and the faultnum is in r1; the original
- * values for those registers are on the stack.
- */
- .pushsection .text.handle_interrupt,"ax"
-handle_interrupt:
- finish_interrupt_save handle_interrupt
-
- /*
- * Check for if we are single stepping in user level. If so, then
- * we need to restore the PC.
- */
-
- check_single_stepping normal, .Ldispatch_interrupt
-.Ldispatch_interrupt:
-
- /* Jump to the C routine; it should enable irqs as soon as possible. */
- {
- jalr r0
- PTREGS_PTR(r0, PTREGS_OFFSET_BASE)
- }
- FEEDBACK_REENTER(handle_interrupt)
- {
- movei r30, 0 /* not an NMI */
- j interrupt_return
- }
- STD_ENDPROC(handle_interrupt)
-
-/*
- * This routine takes a boolean in r30 indicating if this is an NMI.
- * If so, we also expect a boolean in r31 indicating whether to
- * re-enable the oprofile interrupts.
- *
- * Note that .Lresume_userspace is jumped to directly in several
- * places, and we need to make sure r30 is set correctly in those
- * callers as well.
- */
-STD_ENTRY(interrupt_return)
- /* If we're resuming to kernel space, don't check thread flags. */
- {
- bnz r30, .Lrestore_all /* NMIs don't special-case user-space */
- PTREGS_PTR(r29, PTREGS_OFFSET_EX1)
- }
- lw r29, r29
- andi r29, r29, SPR_EX_CONTEXT_1_1__PL_MASK /* mask off ICS */
- bzt r29, .Lresume_userspace
-
-#ifdef CONFIG_PREEMPT
- /* Returning to kernel space. Check if we need preemption. */
- GET_THREAD_INFO(r29)
- addli r28, r29, THREAD_INFO_FLAGS_OFFSET
- {
- lw r28, r28
- addli r29, r29, THREAD_INFO_PREEMPT_COUNT_OFFSET
- }
- {
- andi r28, r28, _TIF_NEED_RESCHED
- lw r29, r29
- }
- bzt r28, 1f
- bnz r29, 1f
- /* Disable interrupts explicitly for preemption. */
- IRQ_DISABLE(r20,r21)
- TRACE_IRQS_OFF
- jal preempt_schedule_irq
- FEEDBACK_REENTER(interrupt_return)
-1:
-#endif
-
- /* If we're resuming to _cpu_idle_nap, bump PC forward by 8. */
- {
- PTREGS_PTR(r29, PTREGS_OFFSET_PC)
- moveli r27, lo16(_cpu_idle_nap)
- }
- {
- lw r28, r29
- auli r27, r27, ha16(_cpu_idle_nap)
- }
- {
- seq r27, r27, r28
- }
- {
- bbns r27, .Lrestore_all
- addi r28, r28, 8
- }
- sw r29, r28
- j .Lrestore_all
-
-.Lresume_userspace:
- FEEDBACK_REENTER(interrupt_return)
-
- /*
- * Disable interrupts so as to make sure we don't
- * miss an interrupt that sets any of the thread flags (like
- * need_resched or sigpending) between sampling and the iret.
- * Routines like schedule() or do_signal() may re-enable
- * interrupts before returning.
- */
- IRQ_DISABLE(r20, r21)
- TRACE_IRQS_OFF /* Note: clobbers registers r0-r29 */
-
- /*
- * See if there are any work items (including single-shot items)
- * to do. If so, save the callee-save registers to pt_regs
- * and then dispatch to C code.
- */
- GET_THREAD_INFO(r21)
- {
- addi r22, r21, THREAD_INFO_FLAGS_OFFSET
- moveli r20, lo16(_TIF_ALLWORK_MASK)
- }
- {
- lw r22, r22
- auli r20, r20, ha16(_TIF_ALLWORK_MASK)
- }
- and r1, r22, r20
- {
- PTREGS_PTR(r0, PTREGS_OFFSET_BASE)
- bzt r1, .Lrestore_all
- }
- push_extra_callee_saves r0
- jal prepare_exit_to_usermode
-
- /*
- * In the NMI case we
- * omit the call to single_process_check_nohz, which normally checks
- * to see if we should start or stop the scheduler tick, because
- * we can't call arbitrary Linux code from an NMI context.
- * We always call the homecache TLB deferral code to re-trigger
- * the deferral mechanism.
- *
- * The other chunk of responsibility this code has is to reset the
- * interrupt masks appropriately to reset irqs and NMIs. We have
- * to call TRACE_IRQS_OFF and TRACE_IRQS_ON to support all the
- * lockdep-type stuff, but we can't set ICS until afterwards, since
- * ICS can only be used in very tight chunks of code to avoid
- * tripping over various assertions that it is off.
- *
- * (There is what looks like a window of vulnerability here since
- * we might take a profile interrupt between the two SPR writes
- * that set the mask, but since we write the low SPR word first,
- * and our interrupt entry code checks the low SPR word, any
- * profile interrupt will actually disable interrupts in both SPRs
- * before returning, which is OK.)
- */
-.Lrestore_all:
- PTREGS_PTR(r0, PTREGS_OFFSET_EX1)
- {
- lw r0, r0
- PTREGS_PTR(r32, PTREGS_OFFSET_FLAGS)
- }
- {
- andi r0, r0, SPR_EX_CONTEXT_1_1__PL_MASK
- lw r32, r32
- }
- bnz r0, 1f
- j 2f
-#if PT_FLAGS_DISABLE_IRQ != 1
-# error Assuming PT_FLAGS_DISABLE_IRQ == 1 so we can use bbnst below
-#endif
-1: bbnst r32, 2f
- IRQ_DISABLE(r20,r21)
- TRACE_IRQS_OFF
- movei r0, 1
- mtspr INTERRUPT_CRITICAL_SECTION, r0
- bzt r30, .Lrestore_regs
- j 3f
-2: TRACE_IRQS_ON
- movei r0, 1
- mtspr INTERRUPT_CRITICAL_SECTION, r0
- IRQ_ENABLE(r20, r21)
- bzt r30, .Lrestore_regs
-3:
-
- /* We are relying on INT_PERF_COUNT at 33, and AUX_PERF_COUNT at 48 */
- {
- moveli r0, lo16(1 << (INT_PERF_COUNT - 32))
- bz r31, .Lrestore_regs
- }
- auli r0, r0, ha16(1 << (INT_AUX_PERF_COUNT - 32))
- mtspr SPR_INTERRUPT_MASK_RESET_K_1, r0
-
- /*
- * We now commit to returning from this interrupt, since we will be
- * doing things like setting EX_CONTEXT SPRs and unwinding the stack
- * frame. No calls should be made to any other code after this point.
- * This code should only be entered with ICS set.
- * r32 must still be set to ptregs.flags.
- * We launch loads to each cache line separately first, so we can
- * get some parallelism out of the memory subsystem.
- * We start zeroing caller-saved registers throughout, since
- * that will save some cycles if this turns out to be a syscall.
- */
-.Lrestore_regs:
- FEEDBACK_REENTER(interrupt_return) /* called from elsewhere */
-
- /*
- * Rotate so we have one high bit and one low bit to test.
- * - low bit says whether to restore all the callee-saved registers,
- * or just r30-r33, and r52 up.
- * - high bit (i.e. sign bit) says whether to restore all the
- * caller-saved registers, or just r0.
- */
-#if PT_FLAGS_CALLER_SAVES != 2 || PT_FLAGS_RESTORE_REGS != 4
-# error Rotate trick does not work :-)
-#endif
- {
- rli r20, r32, 30
- PTREGS_PTR(sp, PTREGS_OFFSET_REG(0))
- }
-
- /*
- * Load cache lines 0, 2, and 3 in that order, then use
- * the last loaded value, which makes it likely that the other
- * cache lines have also loaded, at which point we should be
- * able to safely read all the remaining words on those cache
- * lines without waiting for the memory subsystem.
- */
- pop_reg_zero r0, r28, sp, PTREGS_OFFSET_REG(30) - PTREGS_OFFSET_REG(0)
- pop_reg_zero r30, r2, sp, PTREGS_OFFSET_PC - PTREGS_OFFSET_REG(30)
- pop_reg_zero r21, r3, sp, PTREGS_OFFSET_EX1 - PTREGS_OFFSET_PC
- pop_reg_zero lr, r4, sp, PTREGS_OFFSET_REG(52) - PTREGS_OFFSET_EX1
- {
- mtspr SPR_EX_CONTEXT_K_0, r21
- move r5, zero
- }
- {
- mtspr SPR_EX_CONTEXT_K_1, lr
- andi lr, lr, SPR_EX_CONTEXT_1_1__PL_MASK /* mask off ICS */
- }
-
- /* Restore callee-saveds that we actually use. */
- pop_reg_zero r52, r6, sp, PTREGS_OFFSET_REG(31) - PTREGS_OFFSET_REG(52)
- pop_reg_zero r31, r7
- pop_reg_zero r32, r8
- pop_reg_zero r33, r9, sp, PTREGS_OFFSET_REG(29) - PTREGS_OFFSET_REG(33)
-
- /*
- * If we modified other callee-saveds, restore them now.
- * This is rare, but could be via ptrace or signal handler.
- */
- {
- move r10, zero
- bbs r20, .Lrestore_callees
- }
-.Lcontinue_restore_regs:
-
- /* Check if we're returning from a syscall. */
- {
- move r11, zero
- blzt r20, 1f /* no, so go restore callee-save registers */
- }
-
- /*
- * Check if we're returning to userspace.
- * Note that if we're not, we don't worry about zeroing everything.
- */
- {
- addli sp, sp, PTREGS_OFFSET_LR - PTREGS_OFFSET_REG(29)
- bnz lr, .Lkernel_return
- }
-
- /*
- * On return from syscall, we've restored r0 from pt_regs, but we
- * clear the remainder of the caller-saved registers. We could
- * restore the syscall arguments, but there's not much point,
- * and it ensures user programs aren't trying to use the
- * caller-saves if we clear them, as well as avoiding leaking
- * kernel pointers into userspace.
- */
- pop_reg_zero lr, r12, sp, PTREGS_OFFSET_TP - PTREGS_OFFSET_LR
- pop_reg_zero tp, r13, sp, PTREGS_OFFSET_SP - PTREGS_OFFSET_TP
- {
- lw sp, sp
- move r14, zero
- move r15, zero
- }
- { move r16, zero; move r17, zero }
- { move r18, zero; move r19, zero }
- { move r20, zero; move r21, zero }
- { move r22, zero; move r23, zero }
- { move r24, zero; move r25, zero }
- { move r26, zero; move r27, zero }
-
- /* Set r1 to errno if we are returning an error, otherwise zero. */
- {
- moveli r29, 4096
- sub r1, zero, r0
- }
- slt_u r29, r1, r29
- {
- mnz r1, r29, r1
- move r29, zero
- }
- iret
-
- /*
- * Not a syscall, so restore caller-saved registers.
- * First kick off a load for cache line 1, which we're touching
- * for the first time here.
- */
- .align 64
-1: pop_reg r29, sp, PTREGS_OFFSET_REG(1) - PTREGS_OFFSET_REG(29)
- pop_reg r1
- pop_reg r2
- pop_reg r3
- pop_reg r4
- pop_reg r5
- pop_reg r6
- pop_reg r7
- pop_reg r8
- pop_reg r9
- pop_reg r10
- pop_reg r11
- pop_reg r12
- pop_reg r13
- pop_reg r14
- pop_reg r15
- pop_reg r16
- pop_reg r17
- pop_reg r18
- pop_reg r19
- pop_reg r20
- pop_reg r21
- pop_reg r22
- pop_reg r23
- pop_reg r24
- pop_reg r25
- pop_reg r26
- pop_reg r27
- pop_reg r28, sp, PTREGS_OFFSET_LR - PTREGS_OFFSET_REG(28)
- /* r29 already restored above */
- bnz lr, .Lkernel_return
- pop_reg lr, sp, PTREGS_OFFSET_TP - PTREGS_OFFSET_LR
- pop_reg tp, sp, PTREGS_OFFSET_SP - PTREGS_OFFSET_TP
- lw sp, sp
- iret
-
- /*
- * We can't restore tp when in kernel mode, since a thread might
- * have migrated from another cpu and brought a stale tp value.
- */
-.Lkernel_return:
- pop_reg lr, sp, PTREGS_OFFSET_SP - PTREGS_OFFSET_LR
- lw sp, sp
- iret
-
- /* Restore callee-saved registers from r34 to r51. */
-.Lrestore_callees:
- addli sp, sp, PTREGS_OFFSET_REG(34) - PTREGS_OFFSET_REG(29)
- pop_reg r34
- pop_reg r35
- pop_reg r36
- pop_reg r37
- pop_reg r38
- pop_reg r39
- pop_reg r40
- pop_reg r41
- pop_reg r42
- pop_reg r43
- pop_reg r44
- pop_reg r45
- pop_reg r46
- pop_reg r47
- pop_reg r48
- pop_reg r49
- pop_reg r50
- pop_reg r51, sp, PTREGS_OFFSET_REG(29) - PTREGS_OFFSET_REG(51)
- j .Lcontinue_restore_regs
- STD_ENDPROC(interrupt_return)
-
- /*
- * Some interrupts don't check for single stepping
- */
- .pushsection .text.handle_interrupt_no_single_step,"ax"
-handle_interrupt_no_single_step:
- finish_interrupt_save handle_interrupt_no_single_step
- {
- jalr r0
- PTREGS_PTR(r0, PTREGS_OFFSET_BASE)
- }
- FEEDBACK_REENTER(handle_interrupt_no_single_step)
- {
- movei r30, 0 /* not an NMI */
- j interrupt_return
- }
- STD_ENDPROC(handle_interrupt_no_single_step)
-
- /*
- * "NMI" interrupts mask ALL interrupts before calling the
- * handler, and don't check thread flags, etc., on the way
- * back out. In general, the only things we do here for NMIs
- * are the register save/restore, fixing the PC if we were
- * doing single step, and the dataplane kernel-TLB management.
- * We don't (for example) deal with start/stop of the sched tick.
- */
- .pushsection .text.handle_nmi,"ax"
-handle_nmi:
- finish_interrupt_save handle_nmi
- check_single_stepping normal, .Ldispatch_nmi
-.Ldispatch_nmi:
- {
- jalr r0
- PTREGS_PTR(r0, PTREGS_OFFSET_BASE)
- }
- FEEDBACK_REENTER(handle_nmi)
- {
- movei r30, 1
- seq r31, r0, zero
- }
- j interrupt_return
- STD_ENDPROC(handle_nmi)
-
- /*
- * Parallel code for syscalls to handle_interrupt.
- */
- .pushsection .text.handle_syscall,"ax"
-handle_syscall:
- finish_interrupt_save handle_syscall
-
- /*
- * Check for if we are single stepping in user level. If so, then
- * we need to restore the PC.
- */
- check_single_stepping syscall, .Ldispatch_syscall
-.Ldispatch_syscall:
-
- /* Enable irqs. */
- TRACE_IRQS_ON
- IRQ_ENABLE(r20, r21)
-
- /* Bump the counter for syscalls made on this tile. */
- moveli r20, lo16(irq_stat + IRQ_CPUSTAT_SYSCALL_COUNT_OFFSET)
- auli r20, r20, ha16(irq_stat + IRQ_CPUSTAT_SYSCALL_COUNT_OFFSET)
- add r20, r20, tp
- lw r21, r20
- addi r21, r21, 1
- {
- sw r20, r21
- GET_THREAD_INFO(r31)
- }
-
- /* Trace syscalls, if requested. */
- addi r31, r31, THREAD_INFO_FLAGS_OFFSET
- lw r30, r31
- andi r30, r30, _TIF_SYSCALL_TRACE
- bzt r30, .Lrestore_syscall_regs
- {
- PTREGS_PTR(r0, PTREGS_OFFSET_BASE)
- jal do_syscall_trace_enter
- }
- FEEDBACK_REENTER(handle_syscall)
- blz r0, .Lsyscall_sigreturn_skip
-
- /*
- * We always reload our registers from the stack at this
- * point. They might be valid, if we didn't build with
- * TRACE_IRQFLAGS, and this isn't a dataplane tile, and we're not
- * doing syscall tracing, but there are enough cases now that it
- * seems simplest just to do the reload unconditionally.
- */
-.Lrestore_syscall_regs:
- PTREGS_PTR(r11, PTREGS_OFFSET_REG(0))
- pop_reg r0, r11
- pop_reg r1, r11
- pop_reg r2, r11
- pop_reg r3, r11
- pop_reg r4, r11
- pop_reg r5, r11, PTREGS_OFFSET_SYSCALL - PTREGS_OFFSET_REG(5)
- pop_reg TREG_SYSCALL_NR_NAME, r11
-
- /* Ensure that the syscall number is within the legal range. */
- moveli r21, __NR_syscalls
- {
- slt_u r21, TREG_SYSCALL_NR_NAME, r21
- moveli r20, lo16(sys_call_table)
- }
- {
- bbns r21, .Linvalid_syscall
- auli r20, r20, ha16(sys_call_table)
- }
- s2a r20, TREG_SYSCALL_NR_NAME, r20
- lw r20, r20
-
- /* Jump to syscall handler. */
- jalr r20
-.Lhandle_syscall_link: /* value of "lr" after "jalr r20" above */
-
- /*
- * Write our r0 onto the stack so it gets restored instead
- * of whatever the user had there before.
- */
- PTREGS_PTR(r29, PTREGS_OFFSET_REG(0))
- sw r29, r0
-
-.Lsyscall_sigreturn_skip:
- FEEDBACK_REENTER(handle_syscall)
-
- /* Do syscall trace again, if requested. */
- lw r30, r31
- andi r30, r30, _TIF_SYSCALL_TRACE
- bzt r30, 1f
- {
- PTREGS_PTR(r0, PTREGS_OFFSET_BASE)
- jal do_syscall_trace_exit
- }
- FEEDBACK_REENTER(handle_syscall)
-1: {
- movei r30, 0 /* not an NMI */
- j .Lresume_userspace /* jump into middle of interrupt_return */
- }
-
-.Linvalid_syscall:
- /* Report an invalid syscall back to the user program */
- {
- PTREGS_PTR(r29, PTREGS_OFFSET_REG(0))
- movei r28, -ENOSYS
- }
- sw r29, r28
- {
- movei r30, 0 /* not an NMI */
- j .Lresume_userspace /* jump into middle of interrupt_return */
- }
- STD_ENDPROC(handle_syscall)
-
- /* Return the address for oprofile to suppress in backtraces. */
-STD_ENTRY_SECTION(handle_syscall_link_address, .text.handle_syscall)
- lnk r0
- {
- addli r0, r0, .Lhandle_syscall_link - .
- jrp lr
- }
- STD_ENDPROC(handle_syscall_link_address)
-
-STD_ENTRY(ret_from_fork)
- jal sim_notify_fork
- jal schedule_tail
- FEEDBACK_REENTER(ret_from_fork)
- {
- movei r30, 0 /* not an NMI */
- j .Lresume_userspace /* jump into middle of interrupt_return */
- }
- STD_ENDPROC(ret_from_fork)
-
-STD_ENTRY(ret_from_kernel_thread)
- jal sim_notify_fork
- jal schedule_tail
- FEEDBACK_REENTER(ret_from_fork)
- {
- move r0, r31
- jalr r30
- }
- FEEDBACK_REENTER(ret_from_kernel_thread)
- {
- movei r30, 0 /* not an NMI */
- j interrupt_return
- }
- STD_ENDPROC(ret_from_kernel_thread)
-
- /*
- * Code for ill interrupt.
- */
- .pushsection .text.handle_ill,"ax"
-handle_ill:
- finish_interrupt_save handle_ill
-
- /*
- * Check for if we are single stepping in user level. If so, then
- * we need to restore the PC.
- */
- check_single_stepping ill, .Ldispatch_normal_ill
-
- {
- /* See if the PC is the 1st bundle in the buffer */
- seq r25, r27, r26
-
- /* Point to the 2nd bundle in the buffer */
- addi r26, r26, 8
- }
- {
- /* Point to the original pc */
- addi r24, r29, SINGLESTEP_STATE_ORIG_PC_OFFSET
-
- /* Branch if the PC is the 1st bundle in the buffer */
- bnz r25, 3f
- }
- {
- /* See if the PC is the 2nd bundle of the buffer */
- seq r25, r27, r26
-
- /* Set PC to next instruction */
- addi r24, r29, SINGLESTEP_STATE_NEXT_PC_OFFSET
- }
- {
- /* Point to flags */
- addi r25, r29, SINGLESTEP_STATE_FLAGS_OFFSET
-
- /* Branch if PC is in the second bundle */
- bz r25, 2f
- }
- /* Load flags */
- lw r25, r25
- {
- /*
- * Get the offset for the register to restore
- * Note: the lower bound is 2, so we have implicit scaling by 4.
- * No multiplication of the register number by the size of a register
- * is needed.
- */
- mm r27, r25, zero, SINGLESTEP_STATE_TARGET_LB, \
- SINGLESTEP_STATE_TARGET_UB
-
- /* Mask Rewrite_LR */
- andi r25, r25, SINGLESTEP_STATE_MASK_UPDATE
- }
- {
- addi r29, r29, SINGLESTEP_STATE_UPDATE_VALUE_OFFSET
-
- /* Don't rewrite temp register */
- bz r25, 3f
- }
- {
- /* Get the temp value */
- lw r29, r29
-
- /* Point to where the register is stored */
- add r27, r27, sp
- }
-
- /* Add in the C ABI save area size to the register offset */
- addi r27, r27, C_ABI_SAVE_AREA_SIZE
-
- /* Restore the user's register with the temp value */
- sw r27, r29
- j 3f
-
-2:
- /* Must be in the third bundle */
- addi r24, r29, SINGLESTEP_STATE_BRANCH_NEXT_PC_OFFSET
-
-3:
- /* set PC and continue */
- lw r26, r24
- {
- sw r28, r26
- GET_THREAD_INFO(r0)
- }
-
- /*
- * Clear TIF_SINGLESTEP to prevent recursion if we execute an ill.
- * The normal non-arch flow redundantly clears TIF_SINGLESTEP, but we
- * need to clear it here and can't really impose on all other arches.
- * So what's another write between friends?
- */
-
- addi r1, r0, THREAD_INFO_FLAGS_OFFSET
- {
- lw r2, r1
- addi r0, r0, THREAD_INFO_TASK_OFFSET /* currently a no-op */
- }
- andi r2, r2, ~_TIF_SINGLESTEP
- sw r1, r2
-
- /* Issue a sigtrap */
- {
- lw r0, r0 /* indirect thru thread_info to get task_info*/
- addi r1, sp, C_ABI_SAVE_AREA_SIZE /* put ptregs pointer into r1 */
- }
-
- jal send_sigtrap /* issue a SIGTRAP */
- FEEDBACK_REENTER(handle_ill)
- {
- movei r30, 0 /* not an NMI */
- j .Lresume_userspace /* jump into middle of interrupt_return */
- }
-
-.Ldispatch_normal_ill:
- {
- jalr r0
- PTREGS_PTR(r0, PTREGS_OFFSET_BASE)
- }
- FEEDBACK_REENTER(handle_ill)
- {
- movei r30, 0 /* not an NMI */
- j interrupt_return
- }
- STD_ENDPROC(handle_ill)
-
-/* Various stub interrupt handlers and syscall handlers */
-
-STD_ENTRY_LOCAL(_kernel_double_fault)
- mfspr r1, SPR_EX_CONTEXT_K_0
- move r2, lr
- move r3, sp
- move r4, r52
- addi sp, sp, -C_ABI_SAVE_AREA_SIZE
- j kernel_double_fault
- STD_ENDPROC(_kernel_double_fault)
-
-STD_ENTRY_LOCAL(bad_intr)
- mfspr r2, SPR_EX_CONTEXT_K_0
- panic "Unhandled interrupt %#x: PC %#lx"
- STD_ENDPROC(bad_intr)
-
-/*
- * Special-case sigreturn to not write r0 to the stack on return.
- * This is technically more efficient, but it also avoids difficulties
- * in the 64-bit OS when handling 32-bit compat code, since we must not
- * sign-extend r0 for the sigreturn return-value case.
- */
-#define PTREGS_SYSCALL_SIGRETURN(x, reg) \
- STD_ENTRY(_##x); \
- addli lr, lr, .Lsyscall_sigreturn_skip - .Lhandle_syscall_link; \
- { \
- PTREGS_PTR(reg, PTREGS_OFFSET_BASE); \
- j x \
- }; \
- STD_ENDPROC(_##x)
-
-PTREGS_SYSCALL_SIGRETURN(sys_rt_sigreturn, r0)
-
-/* Save additional callee-saves to pt_regs and jump to standard function. */
-STD_ENTRY(_sys_clone)
- push_extra_callee_saves r4
- j sys_clone
- STD_ENDPROC(_sys_clone)
-
-/*
- * This entrypoint is taken for the cmpxchg and atomic_update fast
- * swints. We may wish to generalize it to other fast swints at some
- * point, but for now there are just two very similar ones, which
- * makes it faster.
- *
- * The fast swint code is designed to have a small footprint. It does
- * not save or restore any GPRs, counting on the caller-save registers
- * to be available to it on entry. It does not modify any callee-save
- * registers (including "lr"). It does not check what PL it is being
- * called at, so you'd better not call it other than at PL0.
- * The <atomic.h> wrapper assumes it only clobbers r20-r29, so if
- * it ever is necessary to use more registers, be aware.
- *
- * It does not use the stack, but since it might be re-interrupted by
- * a page fault which would assume the stack was valid, it does
- * save/restore the stack pointer and zero it out to make sure it gets reset.
- * Since we always keep interrupts disabled, the hypervisor won't
- * clobber our EX_CONTEXT_K_x registers, so we don't save/restore them
- * (other than to advance the PC on return).
- *
- * We have to manually validate the user vs kernel address range
- * (since at PL1 we can read/write both), and for performance reasons
- * we don't allow cmpxchg on the fc000000 memory region, since we only
- * validate that the user address is below PAGE_OFFSET.
- *
- * We place it in the __HEAD section to ensure it is relatively
- * near to the intvec_SWINT_1 code (reachable by a conditional branch).
- *
- * Our use of ATOMIC_LOCK_REG here must match do_page_fault_ics().
- *
- * As we do in lib/atomic_asm_32.S, we bypass a store if the value we
- * would store is the same as the value we just loaded.
- */
- __HEAD
- .align 64
- /* Align much later jump on the start of a cache line. */
- nop
-#if PAGE_SIZE >= 0x10000
- nop
-#endif
-ENTRY(sys_cmpxchg)
-
- /*
- * Save "sp" and set it zero for any possible page fault.
- *
- * HACK: We want to both zero sp and check r0's alignment,
- * so we do both at once. If "sp" becomes nonzero we
- * know r0 is unaligned and branch to the error handler that
- * restores sp, so this is OK.
- *
- * ICS is disabled right now so having a garbage but nonzero
- * sp is OK, since we won't execute any faulting instructions
- * when it is nonzero.
- */
- {
- move r27, sp
- andi sp, r0, 3
- }
-
- /*
- * Get the lock address in ATOMIC_LOCK_REG, and also validate that the
- * address is less than PAGE_OFFSET, since that won't trap at PL1.
- * We only use bits less than PAGE_SHIFT to avoid having to worry
- * about aliasing among multiple mappings of the same physical page,
- * and we ignore the low 3 bits so we have one lock that covers
- * both a cmpxchg64() and a cmpxchg() on either its low or high word.
- * NOTE: this must match __atomic_hashed_lock() in lib/atomic_32.c.
- */
-
-#if (PAGE_OFFSET & 0xffff) != 0
-# error Code here assumes PAGE_OFFSET can be loaded with just hi16()
-#endif
-
- {
- /* Check for unaligned input. */
- bnz sp, .Lcmpxchg_badaddr
- auli r23, zero, hi16(PAGE_OFFSET) /* hugepage-aligned */
- }
- {
- /*
- * Slide bits into position for 'mm'. We want to ignore
- * the low 3 bits of r0, and consider only the next
- * ATOMIC_HASH_SHIFT bits.
- * Because of C pointer arithmetic, we want to compute this:
- *
- * ((char*)atomic_locks +
- * (((r0 >> 3) & ((1 << ATOMIC_HASH_SHIFT) - 1)) << 2))
- *
- * Instead of two shifts we just ">> 1", and use 'mm'
- * to ignore the low and high bits we don't want.
- */
- shri r25, r0, 1
-
- slt_u r23, r0, r23
-
- /*
- * Ensure that the TLB is loaded before we take out the lock.
- * This will start fetching the value all the way into our L1
- * as well (and if it gets modified before we grab the lock,
- * it will be invalidated from our cache before we reload it).
- */
- lw r26, r0
- }
- {
- auli r21, zero, ha16(atomic_locks)
-
- bbns r23, .Lcmpxchg_badaddr
- }
-#if PAGE_SIZE < 0x10000
- /* atomic_locks is page-aligned so for big pages we don't need this. */
- addli r21, r21, lo16(atomic_locks)
-#endif
- {
- /*
- * Insert the hash bits into the page-aligned pointer.
- * ATOMIC_HASH_SHIFT is so big that we don't actually hash
- * the unmasked address bits, as that may cause unnecessary
- * collisions.
- */
- mm ATOMIC_LOCK_REG_NAME, r25, r21, 2, (ATOMIC_HASH_SHIFT + 2) - 1
-
- seqi r23, TREG_SYSCALL_NR_NAME, __NR_FAST_cmpxchg64
- }
- {
- /* Branch away at this point if we're doing a 64-bit cmpxchg. */
- bbs r23, .Lcmpxchg64
- andi r23, r0, 7 /* Precompute alignment for cmpxchg64. */
- }
- {
- /*
- * We very carefully align the code that actually runs with
- * the lock held (twelve bundles) so that we know it is all in
- * the icache when we start. This instruction (the jump) is
- * at the start of the first cache line, address zero mod 64;
- * we jump to the very end of the second cache line to get that
- * line loaded in the icache, then fall through to issue the tns
- * in the third cache line, at which point it's all cached.
- * Note that is for performance, not correctness.
- */
- j .Lcmpxchg32_tns
- }
-
-/* Symbol for do_page_fault_ics() to use to compare against the PC. */
-.global __sys_cmpxchg_grab_lock
-__sys_cmpxchg_grab_lock:
-
- /*
- * Perform the actual cmpxchg or atomic_update.
- */
-.Ldo_cmpxchg32:
- {
- lw r21, r0
- seqi r23, TREG_SYSCALL_NR_NAME, __NR_FAST_atomic_update
- move r24, r2
- }
- {
- seq r22, r21, r1 /* See if cmpxchg matches. */
- and r25, r21, r1 /* If atomic_update, compute (*mem & mask) */
- }
- {
- or r22, r22, r23 /* Skip compare branch for atomic_update. */
- add r25, r25, r2 /* Compute (*mem & mask) + addend. */
- }
- {
- mvnz r24, r23, r25 /* Use atomic_update value if appropriate. */
- bbns r22, .Lcmpxchg32_nostore
- }
- seq r22, r24, r21 /* Are we storing the value we loaded? */
- bbs r22, .Lcmpxchg32_nostore
- sw r0, r24
-
- /* The following instruction is the start of the second cache line. */
- /* Do slow mtspr here so the following "mf" waits less. */
- {
- move sp, r27
- mtspr SPR_EX_CONTEXT_K_0, r28
- }
- mf
-
- {
- move r0, r21
- sw ATOMIC_LOCK_REG_NAME, zero
- }
- iret
-
- /* Duplicated code here in the case where we don't overlap "mf" */
-.Lcmpxchg32_nostore:
- {
- move r0, r21
- sw ATOMIC_LOCK_REG_NAME, zero
- }
- {
- move sp, r27
- mtspr SPR_EX_CONTEXT_K_0, r28
- }
- iret
-
- /*
- * The locking code is the same for 32-bit cmpxchg/atomic_update,
- * and for 64-bit cmpxchg. We provide it as a macro and put
- * it into both versions. We can't share the code literally
- * since it depends on having the right branch-back address.
- */
- .macro cmpxchg_lock, bitwidth
-
- /* Lock; if we succeed, jump back up to the read-modify-write. */
-#ifdef CONFIG_SMP
- tns r21, ATOMIC_LOCK_REG_NAME
-#else
- /*
- * Non-SMP preserves all the lock infrastructure, to keep the
- * code simpler for the interesting (SMP) case. However, we do
- * one small optimization here and in atomic_asm.S, which is
- * to fake out acquiring the actual lock in the atomic_lock table.
- */
- movei r21, 0
-#endif
-
- /* Issue the slow SPR here while the tns result is in flight. */
- mfspr r28, SPR_EX_CONTEXT_K_0
-
- {
- addi r28, r28, 8 /* return to the instruction after the swint1 */
- bzt r21, .Ldo_cmpxchg\bitwidth
- }
- /*
- * The preceding instruction is the last thing that must be
- * hot in the icache before we do the "tns" above.
- */
-
-#ifdef CONFIG_SMP
- /*
- * We failed to acquire the tns lock on our first try. Now use
- * bounded exponential backoff to retry, like __atomic_spinlock().
- */
- {
- moveli r23, 2048 /* maximum backoff time in cycles */
- moveli r25, 32 /* starting backoff time in cycles */
- }
-1: mfspr r26, CYCLE_LOW /* get start point for this backoff */
-2: mfspr r22, CYCLE_LOW /* test to see if we've backed off enough */
- sub r22, r22, r26
- slt r22, r22, r25
- bbst r22, 2b
- {
- shli r25, r25, 1 /* double the backoff; retry the tns */
- tns r21, ATOMIC_LOCK_REG_NAME
- }
- slt r26, r23, r25 /* is the proposed backoff too big? */
- {
- mvnz r25, r26, r23
- bzt r21, .Ldo_cmpxchg\bitwidth
- }
- j 1b
-#endif /* CONFIG_SMP */
- .endm
-
-.Lcmpxchg32_tns:
- /*
- * This is the last instruction on the second cache line.
- * The nop here loads the second line, then we fall through
- * to the tns to load the third line before we take the lock.
- */
- nop
- cmpxchg_lock 32
-
- /*
- * This code is invoked from sys_cmpxchg after most of the
- * preconditions have been checked. We still need to check
- * that r0 is 8-byte aligned, since if it's not we won't
- * actually be atomic. However, ATOMIC_LOCK_REG has the atomic
- * lock pointer and r27/r28 have the saved SP/PC.
- * r23 is holding "r0 & 7" so we can test for alignment.
- * The compare value is in r2/r3; the new value is in r4/r5.
- * On return, we must put the old value in r0/r1.
- */
- .align 64
-.Lcmpxchg64:
- {
- bzt r23, .Lcmpxchg64_tns
- }
- j .Lcmpxchg_badaddr
-
-.Ldo_cmpxchg64:
- {
- lw r21, r0
- addi r25, r0, 4
- }
- {
- lw r1, r25
- }
- seq r26, r21, r2
- {
- bz r26, .Lcmpxchg64_mismatch
- seq r26, r1, r3
- }
- {
- bz r26, .Lcmpxchg64_mismatch
- }
- sw r0, r4
- sw r25, r5
-
- /*
- * The 32-bit path provides optimized "match" and "mismatch"
- * iret paths, but we don't have enough bundles in this cache line
- * to do that, so we just make even the "mismatch" path do an "mf".
- */
-.Lcmpxchg64_mismatch:
- {
- move sp, r27
- mtspr SPR_EX_CONTEXT_K_0, r28
- }
- mf
- {
- move r0, r21
- sw ATOMIC_LOCK_REG_NAME, zero
- }
- iret
-
-.Lcmpxchg64_tns:
- cmpxchg_lock 64
-
-
- /*
- * Reset sp and revector to sys_cmpxchg_badaddr(), which will
- * just raise the appropriate signal and exit. Doing it this
- * way means we don't have to duplicate the code in intvec.S's
- * int_hand macro that locates the top of the stack.
- */
-.Lcmpxchg_badaddr:
- {
- moveli TREG_SYSCALL_NR_NAME, __NR_cmpxchg_badaddr
- move sp, r27
- }
- j intvec_SWINT_1
- ENDPROC(sys_cmpxchg)
- ENTRY(__sys_cmpxchg_end)
-
-
-/* The single-step support may need to read all the registers. */
-int_unalign:
- push_extra_callee_saves r0
- j do_trap
-
-/* Include .intrpt array of interrupt vectors */
- .section ".intrpt", "ax"
-
-#ifndef CONFIG_USE_PMC
-#define handle_perf_interrupt bad_intr
-#endif
-
-#ifndef CONFIG_HARDWALL
-#define do_hardwall_trap bad_intr
-#endif
-
- int_hand INT_ITLB_MISS, ITLB_MISS, \
- do_page_fault, handle_interrupt_no_single_step
- int_hand INT_MEM_ERROR, MEM_ERROR, bad_intr
- int_hand INT_ILL, ILL, do_trap, handle_ill
- int_hand INT_GPV, GPV, do_trap
- int_hand INT_SN_ACCESS, SN_ACCESS, do_trap
- int_hand INT_IDN_ACCESS, IDN_ACCESS, do_trap
- int_hand INT_UDN_ACCESS, UDN_ACCESS, do_trap
- int_hand INT_IDN_REFILL, IDN_REFILL, bad_intr
- int_hand INT_UDN_REFILL, UDN_REFILL, bad_intr
- int_hand INT_IDN_COMPLETE, IDN_COMPLETE, bad_intr
- int_hand INT_UDN_COMPLETE, UDN_COMPLETE, bad_intr
- int_hand INT_SWINT_3, SWINT_3, do_trap
- int_hand INT_SWINT_2, SWINT_2, do_trap
- int_hand INT_SWINT_1, SWINT_1, SYSCALL, handle_syscall
- int_hand INT_SWINT_0, SWINT_0, do_trap
- int_hand INT_UNALIGN_DATA, UNALIGN_DATA, int_unalign
- int_hand INT_DTLB_MISS, DTLB_MISS, do_page_fault
- int_hand INT_DTLB_ACCESS, DTLB_ACCESS, do_page_fault
- int_hand INT_DMATLB_MISS, DMATLB_MISS, do_page_fault
- int_hand INT_DMATLB_ACCESS, DMATLB_ACCESS, do_page_fault
- int_hand INT_SNITLB_MISS, SNITLB_MISS, do_page_fault
- int_hand INT_SN_NOTIFY, SN_NOTIFY, bad_intr
- int_hand INT_SN_FIREWALL, SN_FIREWALL, do_hardwall_trap
- int_hand INT_IDN_FIREWALL, IDN_FIREWALL, bad_intr
- int_hand INT_UDN_FIREWALL, UDN_FIREWALL, do_hardwall_trap
- int_hand INT_TILE_TIMER, TILE_TIMER, do_timer_interrupt
- int_hand INT_IDN_TIMER, IDN_TIMER, bad_intr
- int_hand INT_UDN_TIMER, UDN_TIMER, bad_intr
- int_hand INT_DMA_NOTIFY, DMA_NOTIFY, bad_intr
- int_hand INT_IDN_CA, IDN_CA, bad_intr
- int_hand INT_UDN_CA, UDN_CA, bad_intr
- int_hand INT_IDN_AVAIL, IDN_AVAIL, bad_intr
- int_hand INT_UDN_AVAIL, UDN_AVAIL, bad_intr
- int_hand INT_PERF_COUNT, PERF_COUNT, \
- handle_perf_interrupt, handle_nmi
- int_hand INT_INTCTRL_3, INTCTRL_3, bad_intr
-#if CONFIG_KERNEL_PL == 2
- dc_dispatch INT_INTCTRL_2, INTCTRL_2
- int_hand INT_INTCTRL_1, INTCTRL_1, bad_intr
-#else
- int_hand INT_INTCTRL_2, INTCTRL_2, bad_intr
- dc_dispatch INT_INTCTRL_1, INTCTRL_1
-#endif
- int_hand INT_INTCTRL_0, INTCTRL_0, bad_intr
- int_hand INT_MESSAGE_RCV_DWNCL, MESSAGE_RCV_DWNCL, \
- hv_message_intr
- int_hand INT_DEV_INTR_DWNCL, DEV_INTR_DWNCL, \
- tile_dev_intr
- int_hand INT_I_ASID, I_ASID, bad_intr
- int_hand INT_D_ASID, D_ASID, bad_intr
- int_hand INT_DMATLB_MISS_DWNCL, DMATLB_MISS_DWNCL, \
- do_page_fault
- int_hand INT_SNITLB_MISS_DWNCL, SNITLB_MISS_DWNCL, \
- do_page_fault
- int_hand INT_DMATLB_ACCESS_DWNCL, DMATLB_ACCESS_DWNCL, \
- do_page_fault
- int_hand INT_SN_CPL, SN_CPL, bad_intr
- int_hand INT_DOUBLE_FAULT, DOUBLE_FAULT, do_trap
- int_hand INT_AUX_PERF_COUNT, AUX_PERF_COUNT, \
- handle_perf_interrupt, handle_nmi
-
- /* Synthetic interrupt delivered only by the simulator */
- int_hand INT_BREAKPOINT, BREAKPOINT, do_breakpoint
diff --git a/arch/tile/kernel/intvec_64.S b/arch/tile/kernel/intvec_64.S
deleted file mode 100644
index 3b51bdf37d11..000000000000
--- a/arch/tile/kernel/intvec_64.S
+++ /dev/null
@@ -1,1564 +0,0 @@
-/*
- * Copyright 2011 Tilera Corporation. All Rights Reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation, version 2.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
- * NON INFRINGEMENT. See the GNU General Public License for
- * more details.
- *
- * Linux interrupt vectors.
- */
-
-#include <linux/linkage.h>
-#include <linux/errno.h>
-#include <linux/unistd.h>
-#include <linux/init.h>
-#include <asm/ptrace.h>
-#include <asm/thread_info.h>
-#include <asm/irqflags.h>
-#include <asm/asm-offsets.h>
-#include <asm/types.h>
-#include <asm/traps.h>
-#include <asm/signal.h>
-#include <hv/hypervisor.h>
-#include <arch/abi.h>
-#include <arch/interrupts.h>
-#include <arch/spr_def.h>
-
-#define PTREGS_PTR(reg, ptreg) addli reg, sp, C_ABI_SAVE_AREA_SIZE + (ptreg)
-
-#define PTREGS_OFFSET_SYSCALL PTREGS_OFFSET_REG(TREG_SYSCALL_NR)
-
-#if CONFIG_KERNEL_PL == 1 || CONFIG_KERNEL_PL == 2
-/*
- * Set "result" non-zero if ex1 holds the PL of the kernel
- * (with or without ICS being set). Note this works only
- * because we never find the PL at level 3.
- */
-# define IS_KERNEL_EX1(result, ex1) andi result, ex1, CONFIG_KERNEL_PL
-#else
-# error Recode IS_KERNEL_EX1 for CONFIG_KERNEL_PL
-#endif
-
- .macro push_reg reg, ptr=sp, delta=-8
- {
- st \ptr, \reg
- addli \ptr, \ptr, \delta
- }
- .endm
-
- .macro pop_reg reg, ptr=sp, delta=8
- {
- ld \reg, \ptr
- addli \ptr, \ptr, \delta
- }
- .endm
-
- .macro pop_reg_zero reg, zreg, ptr=sp, delta=8
- {
- move \zreg, zero
- ld \reg, \ptr
- addi \ptr, \ptr, \delta
- }
- .endm
-
- .macro push_extra_callee_saves reg
- PTREGS_PTR(\reg, PTREGS_OFFSET_REG(51))
- push_reg r51, \reg
- push_reg r50, \reg
- push_reg r49, \reg
- push_reg r48, \reg
- push_reg r47, \reg
- push_reg r46, \reg
- push_reg r45, \reg
- push_reg r44, \reg
- push_reg r43, \reg
- push_reg r42, \reg
- push_reg r41, \reg
- push_reg r40, \reg
- push_reg r39, \reg
- push_reg r38, \reg
- push_reg r37, \reg
- push_reg r36, \reg
- push_reg r35, \reg
- push_reg r34, \reg, PTREGS_OFFSET_BASE - PTREGS_OFFSET_REG(34)
- .endm
-
- .macro panic str
- .pushsection .rodata, "a"
-1:
- .asciz "\str"
- .popsection
- {
- moveli r0, hw2_last(1b)
- }
- {
- shl16insli r0, r0, hw1(1b)
- }
- {
- shl16insli r0, r0, hw0(1b)
- jal panic
- }
- .endm
-
- /*
- * Unalign data exception fast handling: In order to handle
- * unaligned data access, a fast JIT version is generated and stored
- * in a specific area in user space. We first need to do a quick poke
- * to see if the JIT is available. We use certain bits in the fault
- * PC (3 to 9 is used for 16KB page size) as index to address the JIT
- * code area. The first 64bit word is the fault PC, and the 2nd one is
- * the fault bundle itself. If these 2 words both match, then we
- * directly "iret" to JIT code. If not, a slow path is invoked to
- * generate new JIT code. Note: the current JIT code WILL be
- * overwritten if it existed. So, ideally we can handle 128 unalign
- * fixups via JIT. For lookup efficiency and to effectively support
- * tight loops with multiple unaligned reference, a simple
- * direct-mapped cache is used.
- *
- * SPR_EX_CONTEXT_K_0 is modified to return to JIT code.
- * SPR_EX_CONTEXT_K_1 has ICS set.
- * SPR_EX_CONTEXT_0_0 is setup to user program's next PC.
- * SPR_EX_CONTEXT_0_1 = 0.
- */
- .macro int_hand_unalign_fast vecnum, vecname
- .org (\vecnum << 8)
-intvec_\vecname:
- /* Put r3 in SPR_SYSTEM_SAVE_K_1. */
- mtspr SPR_SYSTEM_SAVE_K_1, r3
-
- mfspr r3, SPR_EX_CONTEXT_K_1
- /*
- * Examine if exception comes from user without ICS set.
- * If not, just go directly to the slow path.
- */
- bnez r3, hand_unalign_slow_nonuser
-
- mfspr r3, SPR_SYSTEM_SAVE_K_0
-
- /* Get &thread_info->unalign_jit_tmp[0] in r3. */
- bfexts r3, r3, 0, CPU_SHIFT-1
- mm r3, zero, LOG2_THREAD_SIZE, 63
- addli r3, r3, THREAD_INFO_UNALIGN_JIT_TMP_OFFSET
-
- /*
- * Save r0, r1, r2 into thread_info array r3 points to
- * from low to high memory in order.
- */
- st_add r3, r0, 8
- st_add r3, r1, 8
- {
- st_add r3, r2, 8
- andi r2, sp, 7
- }
-
- /* Save stored r3 value so we can revert it on a page fault. */
- mfspr r1, SPR_SYSTEM_SAVE_K_1
- st r3, r1
-
- {
- /* Generate a SIGBUS if sp is not 8-byte aligned. */
- bnez r2, hand_unalign_slow_badsp
- }
-
- /*
- * Get the thread_info in r0; load r1 with pc. Set the low bit of sp
- * as an indicator to the page fault code in case we fault.
- */
- {
- ori sp, sp, 1
- mfspr r1, SPR_EX_CONTEXT_K_0
- }
-
- /* Add the jit_info offset in thread_info; extract r1 [3:9] into r2. */
- {
- addli r0, r3, THREAD_INFO_UNALIGN_JIT_BASE_OFFSET - \
- (THREAD_INFO_UNALIGN_JIT_TMP_OFFSET + (3 * 8))
- bfextu r2, r1, 3, (2 + PAGE_SHIFT - UNALIGN_JIT_SHIFT)
- }
-
- /* Load the jit_info; multiply r2 by 128. */
- {
- ld r0, r0
- shli r2, r2, UNALIGN_JIT_SHIFT
- }
-
- /*
- * If r0 is NULL, the JIT page is not mapped, so go to slow path;
- * add offset r2 to r0 at the same time.
- */
- {
- beqz r0, hand_unalign_slow
- add r2, r0, r2
- }
-
- /*
- * We are loading from userspace (both the JIT info PC and
- * instruction word, and the instruction word we executed)
- * and since either could fault while holding the interrupt
- * critical section, we must tag this region and check it in
- * do_page_fault() to handle it properly.
- */
-ENTRY(__start_unalign_asm_code)
-
- /* Load first word of JIT in r0 and increment r2 by 8. */
- ld_add r0, r2, 8
-
- /*
- * Compare the PC with the 1st word in JIT; load the fault bundle
- * into r1.
- */
- {
- cmpeq r0, r0, r1
- ld r1, r1
- }
-
- /* Go to slow path if PC doesn't match. */
- beqz r0, hand_unalign_slow
-
- /*
- * Load the 2nd word of JIT, which is supposed to be the fault
- * bundle for a cache hit. Increment r2; after this bundle r2 will
- * point to the potential start of the JIT code we want to run.
- */
- ld_add r0, r2, 8
-
- /* No further accesses to userspace are done after this point. */
-ENTRY(__end_unalign_asm_code)
-
- /* Compare the real bundle with what is saved in the JIT area. */
- {
- cmpeq r0, r1, r0
- mtspr SPR_EX_CONTEXT_0_1, zero
- }
-
- /* Go to slow path if the fault bundle does not match. */
- beqz r0, hand_unalign_slow
-
- /*
- * A cache hit is found.
- * r2 points to start of JIT code (3rd word).
- * r0 is the fault pc.
- * r1 is the fault bundle.
- * Reset the low bit of sp.
- */
- {
- mfspr r0, SPR_EX_CONTEXT_K_0
- andi sp, sp, ~1
- }
-
- /* Write r2 into EX_CONTEXT_K_0 and increment PC. */
- {
- mtspr SPR_EX_CONTEXT_K_0, r2
- addi r0, r0, 8
- }
-
- /*
- * Set ICS on kernel EX_CONTEXT_K_1 in order to "iret" to
- * user with ICS set. This way, if the JIT fixup causes another
- * unalign exception (which shouldn't be possible) the user
- * process will be terminated with SIGBUS. Also, our fixup will
- * run without interleaving with external interrupts.
- * Each fixup is at most 14 bundles, so it won't hold ICS for long.
- */
- {
- movei r1, PL_ICS_EX1(USER_PL, 1)
- mtspr SPR_EX_CONTEXT_0_0, r0
- }
-
- {
- mtspr SPR_EX_CONTEXT_K_1, r1
- addi r3, r3, -(3 * 8)
- }
-
- /* Restore r0..r3. */
- ld_add r0, r3, 8
- ld_add r1, r3, 8
- ld_add r2, r3, 8
- ld r3, r3
-
- iret
- ENDPROC(intvec_\vecname)
- .endm
-
-#ifdef __COLLECT_LINKER_FEEDBACK__
- .pushsection .text.intvec_feedback,"ax"
-intvec_feedback:
- .popsection
-#endif
-
- /*
- * Default interrupt handler.
- *
- * vecnum is where we'll put this code.
- * c_routine is the C routine we'll call.
- *
- * The C routine is passed two arguments:
- * - A pointer to the pt_regs state.
- * - The interrupt vector number.
- *
- * The "processing" argument specifies the code for processing
- * the interrupt. Defaults to "handle_interrupt".
- */
- .macro __int_hand vecnum, vecname, c_routine,processing=handle_interrupt
-intvec_\vecname:
- /* Temporarily save a register so we have somewhere to work. */
-
- mtspr SPR_SYSTEM_SAVE_K_1, r0
- mfspr r0, SPR_EX_CONTEXT_K_1
-
- /*
- * The unalign data fastpath code sets the low bit in sp to
- * force us to reset it here on fault.
- */
- {
- blbs sp, 2f
- IS_KERNEL_EX1(r0, r0)
- }
-
- .ifc \vecnum, INT_DOUBLE_FAULT
- /*
- * For double-faults from user-space, fall through to the normal
- * register save and stack setup path. Otherwise, it's the
- * hypervisor giving us one last chance to dump diagnostics, and we
- * branch to the kernel_double_fault routine to do so.
- */
- beqz r0, 1f
- j _kernel_double_fault
-1:
- .else
- /*
- * If we're coming from user-space, then set sp to the top of
- * the kernel stack. Otherwise, assume sp is already valid.
- */
- {
- bnez r0, 0f
- move r0, sp
- }
- .endif
-
- .ifc \c_routine, do_page_fault
- /*
- * The page_fault handler may be downcalled directly by the
- * hypervisor even when Linux is running and has ICS set.
- *
- * In this case the contents of EX_CONTEXT_K_1 reflect the
- * previous fault and can't be relied on to choose whether or
- * not to reinitialize the stack pointer. So we add a test
- * to see whether SYSTEM_SAVE_K_2 has the high bit set,
- * and if so we don't reinitialize sp, since we must be coming
- * from Linux. (In fact the precise case is !(val & ~1),
- * but any Linux PC has to have the high bit set.)
- *
- * Note that the hypervisor *always* sets SYSTEM_SAVE_K_2 for
- * any path that turns into a downcall to one of our TLB handlers.
- *
- * FIXME: if we end up never using this path, perhaps we should
- * prevent the hypervisor from generating downcalls in this case.
- * The advantage of getting a downcall is we can panic in Linux.
- */
- mfspr r0, SPR_SYSTEM_SAVE_K_2
- {
- bltz r0, 0f /* high bit in S_S_1_2 is for a PC to use */
- move r0, sp
- }
- .endif
-
-2:
- /*
- * SYSTEM_SAVE_K_0 holds the cpu number in the high bits, and
- * the current stack top in the lower bits. So we recover
- * our starting stack value by sign-extending the low bits, then
- * point sp at the top aligned address on the actual stack page.
- */
- mfspr r0, SPR_SYSTEM_SAVE_K_0
- bfexts r0, r0, 0, CPU_SHIFT-1
-
-0:
- /*
- * Align the stack mod 64 so we can properly predict what
- * cache lines we need to write-hint to reduce memory fetch
- * latency as we enter the kernel. The layout of memory is
- * as follows, with cache line 0 at the lowest VA, and cache
- * line 8 just below the r0 value this "andi" computes.
- * Note that we never write to cache line 8, and we skip
- * cache lines 1-3 for syscalls.
- *
- * cache line 8: ptregs padding (two words)
- * cache line 7: sp, lr, pc, ex1, faultnum, orig_r0, flags, cmpexch
- * cache line 6: r46...r53 (tp)
- * cache line 5: r38...r45
- * cache line 4: r30...r37
- * cache line 3: r22...r29
- * cache line 2: r14...r21
- * cache line 1: r6...r13
- * cache line 0: 2 x frame, r0..r5
- */
-#if STACK_TOP_DELTA != 64
-#error STACK_TOP_DELTA must be 64 for assumptions here and in task_pt_regs()
-#endif
- andi r0, r0, -64
-
- /*
- * Push the first four registers on the stack, so that we can set
- * them to vector-unique values before we jump to the common code.
- *
- * Registers are pushed on the stack as a struct pt_regs,
- * with the sp initially just above the struct, and when we're
- * done, sp points to the base of the struct, minus
- * C_ABI_SAVE_AREA_SIZE, so we can directly jal to C code.
- *
- * This routine saves just the first four registers, plus the
- * stack context so we can do proper backtracing right away,
- * and defers to handle_interrupt to save the rest.
- * The backtracer needs pc, ex1, lr, sp, r52, and faultnum,
- * and needs sp set to its final location at the bottom of
- * the stack frame.
- */
- addli r0, r0, PTREGS_OFFSET_LR - (PTREGS_SIZE + KSTK_PTREGS_GAP)
- wh64 r0 /* cache line 7 */
- {
- st r0, lr
- addli r0, r0, PTREGS_OFFSET_SP - PTREGS_OFFSET_LR
- }
- {
- st r0, sp
- addli sp, r0, PTREGS_OFFSET_REG(52) - PTREGS_OFFSET_SP
- }
- wh64 sp /* cache line 6 */
- {
- st sp, r52
- addli sp, sp, PTREGS_OFFSET_REG(1) - PTREGS_OFFSET_REG(52)
- }
- wh64 sp /* cache line 0 */
- {
- st sp, r1
- addli sp, sp, PTREGS_OFFSET_REG(2) - PTREGS_OFFSET_REG(1)
- }
- {
- st sp, r2
- addli sp, sp, PTREGS_OFFSET_REG(3) - PTREGS_OFFSET_REG(2)
- }
- {
- st sp, r3
- addli sp, sp, PTREGS_OFFSET_PC - PTREGS_OFFSET_REG(3)
- }
- mfspr r0, SPR_EX_CONTEXT_K_0
- .ifc \processing,handle_syscall
- /*
- * Bump the saved PC by one bundle so that when we return, we won't
- * execute the same swint instruction again. We need to do this while
- * we're in the critical section.
- */
- addi r0, r0, 8
- .endif
- {
- st sp, r0
- addli sp, sp, PTREGS_OFFSET_EX1 - PTREGS_OFFSET_PC
- }
- mfspr r0, SPR_EX_CONTEXT_K_1
- {
- st sp, r0
- addi sp, sp, PTREGS_OFFSET_FAULTNUM - PTREGS_OFFSET_EX1
- /*
- * Use r0 for syscalls so it's a temporary; use r1 for interrupts
- * so that it gets passed through unchanged to the handler routine.
- * Note that the .if conditional confusingly spans bundles.
- */
- .ifc \processing,handle_syscall
- movei r0, \vecnum
- }
- {
- st sp, r0
- .else
- movei r1, \vecnum
- }
- {
- st sp, r1
- .endif
- addli sp, sp, PTREGS_OFFSET_REG(0) - PTREGS_OFFSET_FAULTNUM
- }
- mfspr r0, SPR_SYSTEM_SAVE_K_1 /* Original r0 */
- {
- st sp, r0
- addi sp, sp, -PTREGS_OFFSET_REG(0) - 8
- }
- {
- st sp, zero /* write zero into "Next SP" frame pointer */
- addi sp, sp, -8 /* leave SP pointing at bottom of frame */
- }
- .ifc \processing,handle_syscall
- j handle_syscall
- .else
- /* Capture per-interrupt SPR context to registers. */
- .ifc \c_routine, do_page_fault
- mfspr r2, SPR_SYSTEM_SAVE_K_3 /* address of page fault */
- mfspr r3, SPR_SYSTEM_SAVE_K_2 /* info about page fault */
- .else
- .ifc \vecnum, INT_ILL_TRANS
- mfspr r2, ILL_VA_PC
- .else
- .ifc \vecnum, INT_DOUBLE_FAULT
- mfspr r2, SPR_SYSTEM_SAVE_K_2 /* double fault info from HV */
- .else
- .ifc \c_routine, do_trap
- mfspr r2, GPV_REASON
- .else
- .ifc \c_routine, handle_perf_interrupt
- mfspr r2, PERF_COUNT_STS
- .else
- .ifc \c_routine, handle_perf_interrupt
- mfspr r2, AUX_PERF_COUNT_STS
- .endif
- .ifc \c_routine, do_nmi
- mfspr r2, SPR_SYSTEM_SAVE_K_2 /* nmi type */
- .else
- .endif
- .endif
- .endif
- .endif
- .endif
- .endif
- /* Put function pointer in r0 */
- moveli r0, hw2_last(\c_routine)
- shl16insli r0, r0, hw1(\c_routine)
- {
- shl16insli r0, r0, hw0(\c_routine)
- j \processing
- }
- .endif
- ENDPROC(intvec_\vecname)
-
-#ifdef __COLLECT_LINKER_FEEDBACK__
- .pushsection .text.intvec_feedback,"ax"
- .org (\vecnum << 5)
- FEEDBACK_ENTER_EXPLICIT(intvec_\vecname, .intrpt, 1 << 8)
- jrp lr
- .popsection
-#endif
-
- .endm
-
-
- /*
- * Save the rest of the registers that we didn't save in the actual
- * vector itself. We can't use r0-r10 inclusive here.
- */
- .macro finish_interrupt_save, function
-
- /* If it's a syscall, save a proper orig_r0, otherwise just zero. */
- PTREGS_PTR(r52, PTREGS_OFFSET_ORIG_R0)
- {
- .ifc \function,handle_syscall
- st r52, r0
- .else
- st r52, zero
- .endif
- PTREGS_PTR(r52, PTREGS_OFFSET_TP)
- }
- st r52, tp
- {
- mfspr tp, CMPEXCH_VALUE
- PTREGS_PTR(r52, PTREGS_OFFSET_CMPEXCH)
- }
-
- /*
- * For ordinary syscalls, we save neither caller- nor callee-
- * save registers, since the syscall invoker doesn't expect the
- * caller-saves to be saved, and the called kernel functions will
- * take care of saving the callee-saves for us.
- *
- * For interrupts we save just the caller-save registers. Saving
- * them is required (since the "caller" can't save them). Again,
- * the called kernel functions will restore the callee-save
- * registers for us appropriately.
- *
- * On return, we normally restore nothing special for syscalls,
- * and just the caller-save registers for interrupts.
- *
- * However, there are some important caveats to all this:
- *
- * - We always save a few callee-save registers to give us
- * some scratchpad registers to carry across function calls.
- *
- * - fork/vfork/etc require us to save all the callee-save
- * registers, which we do in PTREGS_SYSCALL_ALL_REGS, below.
- *
- * - We always save r0..r5 and r10 for syscalls, since we need
- * to reload them a bit later for the actual kernel call, and
- * since we might need them for -ERESTARTNOINTR, etc.
- *
- * - Before invoking a signal handler, we save the unsaved
- * callee-save registers so they are visible to the
- * signal handler or any ptracer.
- *
- * - If the unsaved callee-save registers are modified, we set
- * a bit in pt_regs so we know to reload them from pt_regs
- * and not just rely on the kernel function unwinding.
- * (Done for ptrace register writes and SA_SIGINFO handler.)
- */
- {
- st r52, tp
- PTREGS_PTR(r52, PTREGS_OFFSET_REG(33))
- }
- wh64 r52 /* cache line 4 */
- push_reg r33, r52
- push_reg r32, r52
- push_reg r31, r52
- .ifc \function,handle_syscall
- push_reg r30, r52, PTREGS_OFFSET_SYSCALL - PTREGS_OFFSET_REG(30)
- push_reg TREG_SYSCALL_NR_NAME, r52, \
- PTREGS_OFFSET_REG(5) - PTREGS_OFFSET_SYSCALL
- .else
-
- push_reg r30, r52, PTREGS_OFFSET_REG(29) - PTREGS_OFFSET_REG(30)
- wh64 r52 /* cache line 3 */
- push_reg r29, r52
- push_reg r28, r52
- push_reg r27, r52
- push_reg r26, r52
- push_reg r25, r52
- push_reg r24, r52
- push_reg r23, r52
- push_reg r22, r52
- wh64 r52 /* cache line 2 */
- push_reg r21, r52
- push_reg r20, r52
- push_reg r19, r52
- push_reg r18, r52
- push_reg r17, r52
- push_reg r16, r52
- push_reg r15, r52
- push_reg r14, r52
- wh64 r52 /* cache line 1 */
- push_reg r13, r52
- push_reg r12, r52
- push_reg r11, r52
- push_reg r10, r52
- push_reg r9, r52
- push_reg r8, r52
- push_reg r7, r52
- push_reg r6, r52
-
- .endif
-
- push_reg r5, r52
- st r52, r4
-
- /*
- * If we will be returning to the kernel, we will need to
- * reset the interrupt masks to the state they had before.
- * Set DISABLE_IRQ in flags iff we came from kernel pl with
- * irqs disabled.
- */
- mfspr r32, SPR_EX_CONTEXT_K_1
- {
- IS_KERNEL_EX1(r32, r32)
- PTREGS_PTR(r21, PTREGS_OFFSET_FLAGS)
- }
- beqzt r32, 1f /* zero if from user space */
- IRQS_DISABLED(r32) /* zero if irqs enabled */
-#if PT_FLAGS_DISABLE_IRQ != 1
-# error Value of IRQS_DISABLED used to set PT_FLAGS_DISABLE_IRQ; fix
-#endif
-1:
- .ifnc \function,handle_syscall
- /* Record the fact that we saved the caller-save registers above. */
- ori r32, r32, PT_FLAGS_CALLER_SAVES
- .endif
- st r21, r32
-
- /*
- * we've captured enough state to the stack (including in
- * particular our EX_CONTEXT state) that we can now release
- * the interrupt critical section and replace it with our
- * standard "interrupts disabled" mask value. This allows
- * synchronous interrupts (and profile interrupts) to punch
- * through from this point onwards.
- *
- * It's important that no code before this point touch memory
- * other than our own stack (to keep the invariant that this
- * is all that gets touched under ICS), and that no code after
- * this point reference any interrupt-specific SPR, in particular
- * the EX_CONTEXT_K_ values.
- */
- .ifc \function,handle_nmi
- IRQ_DISABLE_ALL(r20)
- .else
- IRQ_DISABLE(r20, r21)
- .endif
- mtspr INTERRUPT_CRITICAL_SECTION, zero
-
- /* Load tp with our per-cpu offset. */
-#ifdef CONFIG_SMP
- {
- mfspr r20, SPR_SYSTEM_SAVE_K_0
- moveli r21, hw2_last(__per_cpu_offset)
- }
- {
- shl16insli r21, r21, hw1(__per_cpu_offset)
- bfextu r20, r20, CPU_SHIFT, 63
- }
- shl16insli r21, r21, hw0(__per_cpu_offset)
- shl3add r20, r20, r21
- ld tp, r20
-#else
- move tp, zero
-#endif
-
-#ifdef __COLLECT_LINKER_FEEDBACK__
- /*
- * Notify the feedback routines that we were in the
- * appropriate fixed interrupt vector area. Note that we
- * still have ICS set at this point, so we can't invoke any
- * atomic operations or we will panic. The feedback
- * routines internally preserve r0..r10 and r30 up.
- */
- .ifnc \function,handle_syscall
- shli r20, r1, 5
- .else
- moveli r20, INT_SWINT_1 << 5
- .endif
- moveli r21, hw2_last(intvec_feedback)
- shl16insli r21, r21, hw1(intvec_feedback)
- shl16insli r21, r21, hw0(intvec_feedback)
- add r20, r20, r21
- jalr r20
-
- /* And now notify the feedback routines that we are here. */
- FEEDBACK_ENTER(\function)
-#endif
-
- /*
- * Prepare the first 256 stack bytes to be rapidly accessible
- * without having to fetch the background data.
- */
- addi r52, sp, -64
- {
- wh64 r52
- addi r52, r52, -64
- }
- {
- wh64 r52
- addi r52, r52, -64
- }
- {
- wh64 r52
- addi r52, r52, -64
- }
- wh64 r52
-
-#if defined(CONFIG_TRACE_IRQFLAGS) || defined(CONFIG_CONTEXT_TRACKING)
- .ifnc \function,handle_nmi
- /*
- * We finally have enough state set up to notify the irq
- * tracing code that irqs were disabled on entry to the handler.
- * The TRACE_IRQS_OFF call clobbers registers r0-r29.
- * For syscalls, we already have the register state saved away
- * on the stack, so we don't bother to do any register saves here,
- * and later we pop the registers back off the kernel stack.
- * For interrupt handlers, save r0-r3 in callee-saved registers.
- */
- .ifnc \function,handle_syscall
- { move r30, r0; move r31, r1 }
- { move r32, r2; move r33, r3 }
- .endif
- TRACE_IRQS_OFF
-#ifdef CONFIG_CONTEXT_TRACKING
- jal context_tracking_user_exit
-#endif
- .ifnc \function,handle_syscall
- { move r0, r30; move r1, r31 }
- { move r2, r32; move r3, r33 }
- .endif
- .endif
-#endif
-
- .endm
-
- /*
- * Redispatch a downcall.
- */
- .macro dc_dispatch vecnum, vecname
- .org (\vecnum << 8)
-intvec_\vecname:
- j _hv_downcall_dispatch
- ENDPROC(intvec_\vecname)
- .endm
-
- /*
- * Common code for most interrupts. The C function we're eventually
- * going to is in r0, and the faultnum is in r1; the original
- * values for those registers are on the stack.
- */
- .pushsection .text.handle_interrupt,"ax"
-handle_interrupt:
- finish_interrupt_save handle_interrupt
-
- /* Jump to the C routine; it should enable irqs as soon as possible. */
- {
- jalr r0
- PTREGS_PTR(r0, PTREGS_OFFSET_BASE)
- }
- FEEDBACK_REENTER(handle_interrupt)
- {
- movei r30, 0 /* not an NMI */
- j interrupt_return
- }
- STD_ENDPROC(handle_interrupt)
-
-/*
- * This routine takes a boolean in r30 indicating if this is an NMI.
- * If so, we also expect a boolean in r31 indicating whether to
- * re-enable the oprofile interrupts.
- *
- * Note that .Lresume_userspace is jumped to directly in several
- * places, and we need to make sure r30 is set correctly in those
- * callers as well.
- */
-STD_ENTRY(interrupt_return)
- /* If we're resuming to kernel space, don't check thread flags. */
- {
- bnez r30, .Lrestore_all /* NMIs don't special-case user-space */
- PTREGS_PTR(r29, PTREGS_OFFSET_EX1)
- }
- ld r29, r29
- IS_KERNEL_EX1(r29, r29)
- {
- beqzt r29, .Lresume_userspace
- move r29, sp
- }
-
-#ifdef CONFIG_PREEMPT
- /* Returning to kernel space. Check if we need preemption. */
- EXTRACT_THREAD_INFO(r29)
- addli r28, r29, THREAD_INFO_FLAGS_OFFSET
- {
- ld r28, r28
- addli r29, r29, THREAD_INFO_PREEMPT_COUNT_OFFSET
- }
- {
- andi r28, r28, _TIF_NEED_RESCHED
- ld4s r29, r29
- }
- beqzt r28, 1f
- bnez r29, 1f
- /* Disable interrupts explicitly for preemption. */
- IRQ_DISABLE(r20,r21)
- TRACE_IRQS_OFF
- jal preempt_schedule_irq
- FEEDBACK_REENTER(interrupt_return)
-1:
-#endif
-
- /* If we're resuming to _cpu_idle_nap, bump PC forward by 8. */
- {
- moveli r27, hw2_last(_cpu_idle_nap)
- PTREGS_PTR(r29, PTREGS_OFFSET_PC)
- }
- {
- ld r28, r29
- shl16insli r27, r27, hw1(_cpu_idle_nap)
- }
- {
- shl16insli r27, r27, hw0(_cpu_idle_nap)
- }
- {
- cmpeq r27, r27, r28
- }
- {
- blbc r27, .Lrestore_all
- addi r28, r28, 8
- }
- st r29, r28
- j .Lrestore_all
-
-.Lresume_userspace:
- FEEDBACK_REENTER(interrupt_return)
-
- /*
- * Disable interrupts so as to make sure we don't
- * miss an interrupt that sets any of the thread flags (like
- * need_resched or sigpending) between sampling and the iret.
- * Routines like schedule() or do_signal() may re-enable
- * interrupts before returning.
- */
- IRQ_DISABLE(r20, r21)
- TRACE_IRQS_OFF /* Note: clobbers registers r0-r29 */
-
- /*
- * See if there are any work items (including single-shot items)
- * to do. If so, save the callee-save registers to pt_regs
- * and then dispatch to C code.
- */
- move r21, sp
- EXTRACT_THREAD_INFO(r21)
- {
- addi r22, r21, THREAD_INFO_FLAGS_OFFSET
- moveli r20, hw1_last(_TIF_ALLWORK_MASK)
- }
- {
- ld r22, r22
- shl16insli r20, r20, hw0(_TIF_ALLWORK_MASK)
- }
- and r1, r22, r20
- {
- PTREGS_PTR(r0, PTREGS_OFFSET_BASE)
- beqzt r1, .Lrestore_all
- }
- push_extra_callee_saves r0
- jal prepare_exit_to_usermode
-
- /*
- * In the NMI case we
- * omit the call to single_process_check_nohz, which normally checks
- * to see if we should start or stop the scheduler tick, because
- * we can't call arbitrary Linux code from an NMI context.
- * We always call the homecache TLB deferral code to re-trigger
- * the deferral mechanism.
- *
- * The other chunk of responsibility this code has is to reset the
- * interrupt masks appropriately to reset irqs and NMIs. We have
- * to call TRACE_IRQS_OFF and TRACE_IRQS_ON to support all the
- * lockdep-type stuff, but we can't set ICS until afterwards, since
- * ICS can only be used in very tight chunks of code to avoid
- * tripping over various assertions that it is off.
- */
-.Lrestore_all:
- PTREGS_PTR(r0, PTREGS_OFFSET_EX1)
- {
- ld r0, r0
- PTREGS_PTR(r32, PTREGS_OFFSET_FLAGS)
- }
- {
- IS_KERNEL_EX1(r0, r0)
- ld r32, r32
- }
- bnez r0, 1f
- j 2f
-#if PT_FLAGS_DISABLE_IRQ != 1
-# error Assuming PT_FLAGS_DISABLE_IRQ == 1 so we can use blbct below
-#endif
-1: blbct r32, 2f
- IRQ_DISABLE(r20,r21)
- TRACE_IRQS_OFF
- movei r0, 1
- mtspr INTERRUPT_CRITICAL_SECTION, r0
- beqzt r30, .Lrestore_regs
- j 3f
-2: TRACE_IRQS_ON
- IRQ_ENABLE_LOAD(r20, r21)
- movei r0, 1
- mtspr INTERRUPT_CRITICAL_SECTION, r0
- IRQ_ENABLE_APPLY(r20, r21)
- beqzt r30, .Lrestore_regs
-3:
-
-#if INT_PERF_COUNT + 1 != INT_AUX_PERF_COUNT
-# error Bad interrupt assumption
-#endif
- {
- movei r0, 3 /* two adjacent bits for the PERF_COUNT mask */
- beqz r31, .Lrestore_regs
- }
- shli r0, r0, INT_PERF_COUNT
- mtspr SPR_INTERRUPT_MASK_RESET_K, r0
-
- /*
- * We now commit to returning from this interrupt, since we will be
- * doing things like setting EX_CONTEXT SPRs and unwinding the stack
- * frame. No calls should be made to any other code after this point.
- * This code should only be entered with ICS set.
- * r32 must still be set to ptregs.flags.
- * We launch loads to each cache line separately first, so we can
- * get some parallelism out of the memory subsystem.
- * We start zeroing caller-saved registers throughout, since
- * that will save some cycles if this turns out to be a syscall.
- */
-.Lrestore_regs:
-
- /*
- * Rotate so we have one high bit and one low bit to test.
- * - low bit says whether to restore all the callee-saved registers,
- * or just r30-r33, and r52 up.
- * - high bit (i.e. sign bit) says whether to restore all the
- * caller-saved registers, or just r0.
- */
-#if PT_FLAGS_CALLER_SAVES != 2 || PT_FLAGS_RESTORE_REGS != 4
-# error Rotate trick does not work :-)
-#endif
- {
- rotli r20, r32, 62
- PTREGS_PTR(sp, PTREGS_OFFSET_REG(0))
- }
-
- /*
- * Load cache lines 0, 4, 6 and 7, in that order, then use
- * the last loaded value, which makes it likely that the other
- * cache lines have also loaded, at which point we should be
- * able to safely read all the remaining words on those cache
- * lines without waiting for the memory subsystem.
- */
- pop_reg r0, sp, PTREGS_OFFSET_REG(30) - PTREGS_OFFSET_REG(0)
- pop_reg r30, sp, PTREGS_OFFSET_REG(52) - PTREGS_OFFSET_REG(30)
- pop_reg_zero r52, r3, sp, PTREGS_OFFSET_CMPEXCH - PTREGS_OFFSET_REG(52)
- pop_reg_zero r21, r27, sp, PTREGS_OFFSET_EX1 - PTREGS_OFFSET_CMPEXCH
- pop_reg_zero lr, r2, sp, PTREGS_OFFSET_PC - PTREGS_OFFSET_EX1
- {
- mtspr CMPEXCH_VALUE, r21
- move r4, zero
- }
- pop_reg r21, sp, PTREGS_OFFSET_REG(31) - PTREGS_OFFSET_PC
- {
- mtspr SPR_EX_CONTEXT_K_1, lr
- IS_KERNEL_EX1(lr, lr)
- }
- {
- mtspr SPR_EX_CONTEXT_K_0, r21
- move r5, zero
- }
-
- /* Restore callee-saveds that we actually use. */
- pop_reg_zero r31, r6
- pop_reg_zero r32, r7
- pop_reg_zero r33, r8, sp, PTREGS_OFFSET_REG(29) - PTREGS_OFFSET_REG(33)
-
- /*
- * If we modified other callee-saveds, restore them now.
- * This is rare, but could be via ptrace or signal handler.
- */
- {
- move r9, zero
- blbs r20, .Lrestore_callees
- }
-.Lcontinue_restore_regs:
-
- /* Check if we're returning from a syscall. */
- {
- move r10, zero
- bltzt r20, 1f /* no, so go restore callee-save registers */
- }
-
- /*
- * Check if we're returning to userspace.
- * Note that if we're not, we don't worry about zeroing everything.
- */
- {
- addli sp, sp, PTREGS_OFFSET_LR - PTREGS_OFFSET_REG(29)
- bnez lr, .Lkernel_return
- }
-
- /*
- * On return from syscall, we've restored r0 from pt_regs, but we
- * clear the remainder of the caller-saved registers. We could
- * restore the syscall arguments, but there's not much point,
- * and it ensures user programs aren't trying to use the
- * caller-saves if we clear them, as well as avoiding leaking
- * kernel pointers into userspace.
- */
- pop_reg_zero lr, r11, sp, PTREGS_OFFSET_TP - PTREGS_OFFSET_LR
- pop_reg_zero tp, r12, sp, PTREGS_OFFSET_SP - PTREGS_OFFSET_TP
- {
- ld sp, sp
- move r13, zero
- move r14, zero
- }
- { move r15, zero; move r16, zero }
- { move r17, zero; move r18, zero }
- { move r19, zero; move r20, zero }
- { move r21, zero; move r22, zero }
- { move r23, zero; move r24, zero }
- { move r25, zero; move r26, zero }
-
- /* Set r1 to errno if we are returning an error, otherwise zero. */
- {
- moveli r29, 4096
- sub r1, zero, r0
- }
- {
- move r28, zero
- cmpltu r29, r1, r29
- }
- {
- mnz r1, r29, r1
- move r29, zero
- }
- iret
-
- /*
- * Not a syscall, so restore caller-saved registers.
- * First kick off loads for cache lines 1-3, which we're touching
- * for the first time here.
- */
- .align 64
-1: pop_reg r29, sp, PTREGS_OFFSET_REG(21) - PTREGS_OFFSET_REG(29)
- pop_reg r21, sp, PTREGS_OFFSET_REG(13) - PTREGS_OFFSET_REG(21)
- pop_reg r13, sp, PTREGS_OFFSET_REG(1) - PTREGS_OFFSET_REG(13)
- pop_reg r1
- pop_reg r2
- pop_reg r3
- pop_reg r4
- pop_reg r5
- pop_reg r6
- pop_reg r7
- pop_reg r8
- pop_reg r9
- pop_reg r10
- pop_reg r11
- pop_reg r12, sp, 16
- /* r13 already restored above */
- pop_reg r14
- pop_reg r15
- pop_reg r16
- pop_reg r17
- pop_reg r18
- pop_reg r19
- pop_reg r20, sp, 16
- /* r21 already restored above */
- pop_reg r22
- pop_reg r23
- pop_reg r24
- pop_reg r25
- pop_reg r26
- pop_reg r27
- pop_reg r28, sp, PTREGS_OFFSET_LR - PTREGS_OFFSET_REG(28)
- /* r29 already restored above */
- bnez lr, .Lkernel_return
- pop_reg lr, sp, PTREGS_OFFSET_TP - PTREGS_OFFSET_LR
- pop_reg tp, sp, PTREGS_OFFSET_SP - PTREGS_OFFSET_TP
- ld sp, sp
- iret
-
- /*
- * We can't restore tp when in kernel mode, since a thread might
- * have migrated from another cpu and brought a stale tp value.
- */
-.Lkernel_return:
- pop_reg lr, sp, PTREGS_OFFSET_SP - PTREGS_OFFSET_LR
- ld sp, sp
- iret
-
- /* Restore callee-saved registers from r34 to r51. */
-.Lrestore_callees:
- addli sp, sp, PTREGS_OFFSET_REG(34) - PTREGS_OFFSET_REG(29)
- pop_reg r34
- pop_reg r35
- pop_reg r36
- pop_reg r37
- pop_reg r38
- pop_reg r39
- pop_reg r40
- pop_reg r41
- pop_reg r42
- pop_reg r43
- pop_reg r44
- pop_reg r45
- pop_reg r46
- pop_reg r47
- pop_reg r48
- pop_reg r49
- pop_reg r50
- pop_reg r51, sp, PTREGS_OFFSET_REG(29) - PTREGS_OFFSET_REG(51)
- j .Lcontinue_restore_regs
- STD_ENDPROC(interrupt_return)
-
- /*
- * "NMI" interrupts mask ALL interrupts before calling the
- * handler, and don't check thread flags, etc., on the way
- * back out. In general, the only things we do here for NMIs
- * are register save/restore and dataplane kernel-TLB management.
- * We don't (for example) deal with start/stop of the sched tick.
- */
- .pushsection .text.handle_nmi,"ax"
-handle_nmi:
- finish_interrupt_save handle_nmi
- {
- jalr r0
- PTREGS_PTR(r0, PTREGS_OFFSET_BASE)
- }
- FEEDBACK_REENTER(handle_nmi)
- {
- movei r30, 1
- cmpeq r31, r0, zero
- }
- j interrupt_return
- STD_ENDPROC(handle_nmi)
-
- /*
- * Parallel code for syscalls to handle_interrupt.
- */
- .pushsection .text.handle_syscall,"ax"
-handle_syscall:
- finish_interrupt_save handle_syscall
-
- /* Enable irqs. */
- TRACE_IRQS_ON
- IRQ_ENABLE(r20, r21)
-
- /* Bump the counter for syscalls made on this tile. */
- moveli r20, hw2_last(irq_stat + IRQ_CPUSTAT_SYSCALL_COUNT_OFFSET)
- shl16insli r20, r20, hw1(irq_stat + IRQ_CPUSTAT_SYSCALL_COUNT_OFFSET)
- shl16insli r20, r20, hw0(irq_stat + IRQ_CPUSTAT_SYSCALL_COUNT_OFFSET)
- add r20, r20, tp
- ld4s r21, r20
- {
- addi r21, r21, 1
- move r31, sp
- }
- {
- st4 r20, r21
- EXTRACT_THREAD_INFO(r31)
- }
-
- /* Trace syscalls, if requested. */
- addi r31, r31, THREAD_INFO_FLAGS_OFFSET
- {
- ld r30, r31
- moveli r32, _TIF_SYSCALL_ENTRY_WORK
- }
- and r30, r30, r32
- {
- addi r30, r31, THREAD_INFO_STATUS_OFFSET - THREAD_INFO_FLAGS_OFFSET
- beqzt r30, .Lrestore_syscall_regs
- }
- {
- PTREGS_PTR(r0, PTREGS_OFFSET_BASE)
- jal do_syscall_trace_enter
- }
- FEEDBACK_REENTER(handle_syscall)
- bltz r0, .Lsyscall_sigreturn_skip
-
- /*
- * We always reload our registers from the stack at this
- * point. They might be valid, if we didn't build with
- * TRACE_IRQFLAGS, and this isn't a dataplane tile, and we're not
- * doing syscall tracing, but there are enough cases now that it
- * seems simplest just to do the reload unconditionally.
- */
-.Lrestore_syscall_regs:
- {
- ld r30, r30
- PTREGS_PTR(r11, PTREGS_OFFSET_REG(0))
- }
- pop_reg r0, r11
- pop_reg r1, r11
- pop_reg r2, r11
- pop_reg r3, r11
- pop_reg r4, r11
- pop_reg r5, r11, PTREGS_OFFSET_SYSCALL - PTREGS_OFFSET_REG(5)
- {
- ld TREG_SYSCALL_NR_NAME, r11
- moveli r21, __NR_syscalls
- }
-
- /* Ensure that the syscall number is within the legal range. */
- {
- moveli r20, hw2(sys_call_table)
-#ifdef CONFIG_COMPAT
- blbs r30, .Lcompat_syscall
-#endif
- }
- {
- cmpltu r21, TREG_SYSCALL_NR_NAME, r21
- shl16insli r20, r20, hw1(sys_call_table)
- }
- {
- blbc r21, .Linvalid_syscall
- shl16insli r20, r20, hw0(sys_call_table)
- }
-.Lload_syscall_pointer:
- shl3add r20, TREG_SYSCALL_NR_NAME, r20
- ld r20, r20
-
- /* Jump to syscall handler. */
- jalr r20
-.Lhandle_syscall_link: /* value of "lr" after "jalr r20" above */
-
- /*
- * Write our r0 onto the stack so it gets restored instead
- * of whatever the user had there before.
- * In compat mode, sign-extend r0 before storing it.
- */
- {
- PTREGS_PTR(r29, PTREGS_OFFSET_REG(0))
- blbct r30, 1f
- }
- addxi r0, r0, 0
-1: st r29, r0
-
-.Lsyscall_sigreturn_skip:
- FEEDBACK_REENTER(handle_syscall)
-
- /* Do syscall trace again, if requested. */
- {
- ld r30, r31
- moveli r32, _TIF_SYSCALL_EXIT_WORK
- }
- and r0, r30, r32
- {
- andi r0, r30, _TIF_SINGLESTEP
- beqzt r0, 1f
- }
- {
- PTREGS_PTR(r0, PTREGS_OFFSET_BASE)
- jal do_syscall_trace_exit
- }
- FEEDBACK_REENTER(handle_syscall)
- andi r0, r30, _TIF_SINGLESTEP
-
-1: beqzt r0, 2f
-
- /* Single stepping -- notify ptrace. */
- {
- movei r0, SIGTRAP
- jal ptrace_notify
- }
- FEEDBACK_REENTER(handle_syscall)
-
-2: {
- movei r30, 0 /* not an NMI */
- j .Lresume_userspace /* jump into middle of interrupt_return */
- }
-
-#ifdef CONFIG_COMPAT
-.Lcompat_syscall:
- /*
- * Load the base of the compat syscall table in r20, and
- * range-check the syscall number (duplicated from 64-bit path).
- * Sign-extend all the user's passed arguments to make them consistent.
- * Also save the original "r(n)" values away in "r(11+n)" in
- * case the syscall table entry wants to validate them.
- */
- moveli r20, hw2(compat_sys_call_table)
- {
- cmpltu r21, TREG_SYSCALL_NR_NAME, r21
- shl16insli r20, r20, hw1(compat_sys_call_table)
- }
- {
- blbc r21, .Linvalid_syscall
- shl16insli r20, r20, hw0(compat_sys_call_table)
- }
- { move r11, r0; addxi r0, r0, 0 }
- { move r12, r1; addxi r1, r1, 0 }
- { move r13, r2; addxi r2, r2, 0 }
- { move r14, r3; addxi r3, r3, 0 }
- { move r15, r4; addxi r4, r4, 0 }
- { move r16, r5; addxi r5, r5, 0 }
- j .Lload_syscall_pointer
-#endif
-
-.Linvalid_syscall:
- /* Report an invalid syscall back to the user program */
- {
- PTREGS_PTR(r29, PTREGS_OFFSET_REG(0))
- movei r28, -ENOSYS
- }
- st r29, r28
- {
- movei r30, 0 /* not an NMI */
- j .Lresume_userspace /* jump into middle of interrupt_return */
- }
- STD_ENDPROC(handle_syscall)
-
- /* Return the address for oprofile to suppress in backtraces. */
-STD_ENTRY_SECTION(handle_syscall_link_address, .text.handle_syscall)
- lnk r0
- {
- addli r0, r0, .Lhandle_syscall_link - .
- jrp lr
- }
- STD_ENDPROC(handle_syscall_link_address)
-
-STD_ENTRY(ret_from_fork)
- jal sim_notify_fork
- jal schedule_tail
- FEEDBACK_REENTER(ret_from_fork)
- {
- movei r30, 0 /* not an NMI */
- j .Lresume_userspace /* jump into middle of interrupt_return */
- }
- STD_ENDPROC(ret_from_fork)
-
-STD_ENTRY(ret_from_kernel_thread)
- jal sim_notify_fork
- jal schedule_tail
- FEEDBACK_REENTER(ret_from_fork)
- {
- move r0, r31
- jalr r30
- }
- FEEDBACK_REENTER(ret_from_kernel_thread)
- {
- movei r30, 0 /* not an NMI */
- j interrupt_return
- }
- STD_ENDPROC(ret_from_kernel_thread)
-
-/* Various stub interrupt handlers and syscall handlers */
-
-STD_ENTRY_LOCAL(_kernel_double_fault)
- mfspr r1, SPR_EX_CONTEXT_K_0
- move r2, lr
- move r3, sp
- move r4, r52
- addi sp, sp, -C_ABI_SAVE_AREA_SIZE
- j kernel_double_fault
- STD_ENDPROC(_kernel_double_fault)
-
-STD_ENTRY_LOCAL(bad_intr)
- mfspr r2, SPR_EX_CONTEXT_K_0
- panic "Unhandled interrupt %#x: PC %#lx"
- STD_ENDPROC(bad_intr)
-
-/*
- * Special-case sigreturn to not write r0 to the stack on return.
- * This is technically more efficient, but it also avoids difficulties
- * in the 64-bit OS when handling 32-bit compat code, since we must not
- * sign-extend r0 for the sigreturn return-value case.
- */
-#define PTREGS_SYSCALL_SIGRETURN(x, reg) \
- STD_ENTRY(_##x); \
- addli lr, lr, .Lsyscall_sigreturn_skip - .Lhandle_syscall_link; \
- { \
- PTREGS_PTR(reg, PTREGS_OFFSET_BASE); \
- j x \
- }; \
- STD_ENDPROC(_##x)
-
-PTREGS_SYSCALL_SIGRETURN(sys_rt_sigreturn, r0)
-#ifdef CONFIG_COMPAT
-PTREGS_SYSCALL_SIGRETURN(compat_sys_rt_sigreturn, r0)
-#endif
-
-/* Save additional callee-saves to pt_regs and jump to standard function. */
-STD_ENTRY(_sys_clone)
- push_extra_callee_saves r4
- j sys_clone
- STD_ENDPROC(_sys_clone)
-
- /*
- * Recover r3, r2, r1 and r0 here saved by unalign fast vector.
- * The vector area limit is 32 bundles, so we handle the reload here.
- * r0, r1, r2 are in thread_info from low to high memory in order.
- * r3 points to location the original r3 was saved.
- * We put this code in the __HEAD section so it can be reached
- * via a conditional branch from the fast path.
- */
- __HEAD
-hand_unalign_slow:
- andi sp, sp, ~1
-hand_unalign_slow_badsp:
- addi r3, r3, -(3 * 8)
- ld_add r0, r3, 8
- ld_add r1, r3, 8
- ld r2, r3
-hand_unalign_slow_nonuser:
- mfspr r3, SPR_SYSTEM_SAVE_K_1
- __int_hand INT_UNALIGN_DATA, UNALIGN_DATA_SLOW, int_unalign
-
-/* The unaligned data support needs to read all the registers. */
-int_unalign:
- push_extra_callee_saves r0
- j do_unaligned
-ENDPROC(hand_unalign_slow)
-
-/* Fill the return address stack with nonzero entries. */
-STD_ENTRY(fill_ra_stack)
- {
- move r0, lr
- jal 1f
- }
-1: jal 2f
-2: jal 3f
-3: jal 4f
-4: jrp r0
- STD_ENDPROC(fill_ra_stack)
-
- .macro int_hand vecnum, vecname, c_routine, processing=handle_interrupt
- .org (\vecnum << 8)
- __int_hand \vecnum, \vecname, \c_routine, \processing
- .endm
-
-/* Include .intrpt array of interrupt vectors */
- .section ".intrpt", "ax"
- .global intrpt_start
-intrpt_start:
-
-#ifndef CONFIG_USE_PMC
-#define handle_perf_interrupt bad_intr
-#endif
-
-#ifndef CONFIG_HARDWALL
-#define do_hardwall_trap bad_intr
-#endif
-
- int_hand INT_MEM_ERROR, MEM_ERROR, do_trap
- int_hand INT_SINGLE_STEP_3, SINGLE_STEP_3, bad_intr
-#if CONFIG_KERNEL_PL == 2
- int_hand INT_SINGLE_STEP_2, SINGLE_STEP_2, gx_singlestep_handle
- int_hand INT_SINGLE_STEP_1, SINGLE_STEP_1, bad_intr
-#else
- int_hand INT_SINGLE_STEP_2, SINGLE_STEP_2, bad_intr
- int_hand INT_SINGLE_STEP_1, SINGLE_STEP_1, gx_singlestep_handle
-#endif
- int_hand INT_SINGLE_STEP_0, SINGLE_STEP_0, bad_intr
- int_hand INT_IDN_COMPLETE, IDN_COMPLETE, bad_intr
- int_hand INT_UDN_COMPLETE, UDN_COMPLETE, bad_intr
- int_hand INT_ITLB_MISS, ITLB_MISS, do_page_fault
- int_hand INT_ILL, ILL, do_trap
- int_hand INT_GPV, GPV, do_trap
- int_hand INT_IDN_ACCESS, IDN_ACCESS, do_trap
- int_hand INT_UDN_ACCESS, UDN_ACCESS, do_trap
- int_hand INT_SWINT_3, SWINT_3, do_trap
- int_hand INT_SWINT_2, SWINT_2, do_trap
- int_hand INT_SWINT_1, SWINT_1, SYSCALL, handle_syscall
- int_hand INT_SWINT_0, SWINT_0, do_trap
- int_hand INT_ILL_TRANS, ILL_TRANS, do_trap
- int_hand_unalign_fast INT_UNALIGN_DATA, UNALIGN_DATA
- int_hand INT_DTLB_MISS, DTLB_MISS, do_page_fault
- int_hand INT_DTLB_ACCESS, DTLB_ACCESS, do_page_fault
- int_hand INT_IDN_FIREWALL, IDN_FIREWALL, do_hardwall_trap
- int_hand INT_UDN_FIREWALL, UDN_FIREWALL, do_hardwall_trap
- int_hand INT_TILE_TIMER, TILE_TIMER, do_timer_interrupt
- int_hand INT_IDN_TIMER, IDN_TIMER, bad_intr
- int_hand INT_UDN_TIMER, UDN_TIMER, bad_intr
- int_hand INT_IDN_AVAIL, IDN_AVAIL, bad_intr
- int_hand INT_UDN_AVAIL, UDN_AVAIL, bad_intr
- int_hand INT_IPI_3, IPI_3, bad_intr
-#if CONFIG_KERNEL_PL == 2
- int_hand INT_IPI_2, IPI_2, tile_dev_intr
- int_hand INT_IPI_1, IPI_1, bad_intr
-#else
- int_hand INT_IPI_2, IPI_2, bad_intr
- int_hand INT_IPI_1, IPI_1, tile_dev_intr
-#endif
- int_hand INT_IPI_0, IPI_0, bad_intr
- int_hand INT_PERF_COUNT, PERF_COUNT, \
- handle_perf_interrupt, handle_nmi
- int_hand INT_AUX_PERF_COUNT, AUX_PERF_COUNT, \
- handle_perf_interrupt, handle_nmi
- int_hand INT_INTCTRL_3, INTCTRL_3, bad_intr
-#if CONFIG_KERNEL_PL == 2
- dc_dispatch INT_INTCTRL_2, INTCTRL_2
- int_hand INT_INTCTRL_1, INTCTRL_1, bad_intr
-#else
- int_hand INT_INTCTRL_2, INTCTRL_2, bad_intr
- dc_dispatch INT_INTCTRL_1, INTCTRL_1
-#endif
- int_hand INT_INTCTRL_0, INTCTRL_0, bad_intr
- int_hand INT_MESSAGE_RCV_DWNCL, MESSAGE_RCV_DWNCL, \
- hv_message_intr
- int_hand INT_DEV_INTR_DWNCL, DEV_INTR_DWNCL, bad_intr
- int_hand INT_I_ASID, I_ASID, bad_intr
- int_hand INT_D_ASID, D_ASID, bad_intr
- int_hand INT_DOUBLE_FAULT, DOUBLE_FAULT, do_trap
-
- /* Synthetic interrupt delivered only by the simulator */
- int_hand INT_BREAKPOINT, BREAKPOINT, do_breakpoint
- /* Synthetic interrupt delivered by hv */
- int_hand INT_NMI_DWNCL, NMI_DWNCL, do_nmi, handle_nmi
diff --git a/arch/tile/kernel/irq.c b/arch/tile/kernel/irq.c
deleted file mode 100644
index 22044fc691ef..000000000000
--- a/arch/tile/kernel/irq.c
+++ /dev/null
@@ -1,280 +0,0 @@
-/*
- * Copyright 2010 Tilera Corporation. All Rights Reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation, version 2.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
- * NON INFRINGEMENT. See the GNU General Public License for
- * more details.
- */
-
-#include <linux/module.h>
-#include <linux/seq_file.h>
-#include <linux/interrupt.h>
-#include <linux/irq.h>
-#include <linux/kernel_stat.h>
-#include <linux/uaccess.h>
-#include <hv/drv_pcie_rc_intf.h>
-#include <arch/spr_def.h>
-#include <asm/traps.h>
-#include <linux/perf_event.h>
-
-/* Bit-flag stored in irq_desc->chip_data to indicate HW-cleared irqs. */
-#define IS_HW_CLEARED 1
-
-/*
- * The set of interrupts we enable for arch_local_irq_enable().
- * This is initialized to have just a single interrupt that the kernel
- * doesn't actually use as a sentinel. During kernel init,
- * interrupts are added as the kernel gets prepared to support them.
- * NOTE: we could probably initialize them all statically up front.
- */
-DEFINE_PER_CPU(unsigned long long, interrupts_enabled_mask) =
- INITIAL_INTERRUPTS_ENABLED;
-EXPORT_PER_CPU_SYMBOL(interrupts_enabled_mask);
-
-/* Define per-tile device interrupt statistics state. */
-DEFINE_PER_CPU(irq_cpustat_t, irq_stat) ____cacheline_internodealigned_in_smp;
-EXPORT_PER_CPU_SYMBOL(irq_stat);
-
-/*
- * Define per-tile irq disable mask; the hardware/HV only has a single
- * mask that we use to implement both masking and disabling.
- */
-static DEFINE_PER_CPU(unsigned long, irq_disable_mask)
- ____cacheline_internodealigned_in_smp;
-
-/*
- * Per-tile IRQ nesting depth. Used to make sure we enable newly
- * enabled IRQs before exiting the outermost interrupt.
- */
-static DEFINE_PER_CPU(int, irq_depth);
-
-#if CHIP_HAS_IPI()
-/* Use SPRs to manipulate device interrupts. */
-#define mask_irqs(irq_mask) __insn_mtspr(SPR_IPI_MASK_SET_K, irq_mask)
-#define unmask_irqs(irq_mask) __insn_mtspr(SPR_IPI_MASK_RESET_K, irq_mask)
-#define clear_irqs(irq_mask) __insn_mtspr(SPR_IPI_EVENT_RESET_K, irq_mask)
-#else
-/* Use HV to manipulate device interrupts. */
-#define mask_irqs(irq_mask) hv_disable_intr(irq_mask)
-#define unmask_irqs(irq_mask) hv_enable_intr(irq_mask)
-#define clear_irqs(irq_mask) hv_clear_intr(irq_mask)
-#endif
-
-/*
- * The interrupt handling path, implemented in terms of HV interrupt
- * emulation on TILEPro, and IPI hardware on TILE-Gx.
- * Entered with interrupts disabled.
- */
-void tile_dev_intr(struct pt_regs *regs, int intnum)
-{
- int depth = __this_cpu_inc_return(irq_depth);
- unsigned long original_irqs;
- unsigned long remaining_irqs;
- struct pt_regs *old_regs;
-
-#if CHIP_HAS_IPI()
- /*
- * Pending interrupts are listed in an SPR. We might be
- * nested, so be sure to only handle irqs that weren't already
- * masked by a previous interrupt. Then, mask out the ones
- * we're going to handle.
- */
- unsigned long masked = __insn_mfspr(SPR_IPI_MASK_K);
- original_irqs = __insn_mfspr(SPR_IPI_EVENT_K) & ~masked;
- __insn_mtspr(SPR_IPI_MASK_SET_K, original_irqs);
-#else
- /*
- * Hypervisor performs the equivalent of the Gx code above and
- * then puts the pending interrupt mask into a system save reg
- * for us to find.
- */
- original_irqs = __insn_mfspr(SPR_SYSTEM_SAVE_K_3);
-#endif
- remaining_irqs = original_irqs;
-
- /* Track time spent here in an interrupt context. */
- old_regs = set_irq_regs(regs);
- irq_enter();
-
-#ifdef CONFIG_DEBUG_STACKOVERFLOW
- /* Debugging check for stack overflow: less than 1/8th stack free? */
- {
- long sp = stack_pointer - (long) current_thread_info();
- if (unlikely(sp < (sizeof(struct thread_info) + STACK_WARN))) {
- pr_emerg("%s: stack overflow: %ld\n",
- __func__, sp - sizeof(struct thread_info));
- dump_stack();
- }
- }
-#endif
- while (remaining_irqs) {
- unsigned long irq = __ffs(remaining_irqs);
- remaining_irqs &= ~(1UL << irq);
-
- /* Count device irqs; Linux IPIs are counted elsewhere. */
- if (irq != IRQ_RESCHEDULE)
- __this_cpu_inc(irq_stat.irq_dev_intr_count);
-
- generic_handle_irq(irq);
- }
-
- /*
- * If we weren't nested, turn on all enabled interrupts,
- * including any that were reenabled during interrupt
- * handling.
- */
- if (depth == 1)
- unmask_irqs(~__this_cpu_read(irq_disable_mask));
-
- __this_cpu_dec(irq_depth);
-
- /*
- * Track time spent against the current process again and
- * process any softirqs if they are waiting.
- */
- irq_exit();
- set_irq_regs(old_regs);
-}
-
-
-/*
- * Remove an irq from the disabled mask. If we're in an interrupt
- * context, defer enabling the HW interrupt until we leave.
- */
-static void tile_irq_chip_enable(struct irq_data *d)
-{
- get_cpu_var(irq_disable_mask) &= ~(1UL << d->irq);
- if (__this_cpu_read(irq_depth) == 0)
- unmask_irqs(1UL << d->irq);
- put_cpu_var(irq_disable_mask);
-}
-
-/*
- * Add an irq to the disabled mask. We disable the HW interrupt
- * immediately so that there's no possibility of it firing. If we're
- * in an interrupt context, the return path is careful to avoid
- * unmasking a newly disabled interrupt.
- */
-static void tile_irq_chip_disable(struct irq_data *d)
-{
- get_cpu_var(irq_disable_mask) |= (1UL << d->irq);
- mask_irqs(1UL << d->irq);
- put_cpu_var(irq_disable_mask);
-}
-
-/* Mask an interrupt. */
-static void tile_irq_chip_mask(struct irq_data *d)
-{
- mask_irqs(1UL << d->irq);
-}
-
-/* Unmask an interrupt. */
-static void tile_irq_chip_unmask(struct irq_data *d)
-{
- unmask_irqs(1UL << d->irq);
-}
-
-/*
- * Clear an interrupt before processing it so that any new assertions
- * will trigger another irq.
- */
-static void tile_irq_chip_ack(struct irq_data *d)
-{
- if ((unsigned long)irq_data_get_irq_chip_data(d) != IS_HW_CLEARED)
- clear_irqs(1UL << d->irq);
-}
-
-/*
- * For per-cpu interrupts, we need to avoid unmasking any interrupts
- * that we disabled via disable_percpu_irq().
- */
-static void tile_irq_chip_eoi(struct irq_data *d)
-{
- if (!(__this_cpu_read(irq_disable_mask) & (1UL << d->irq)))
- unmask_irqs(1UL << d->irq);
-}
-
-static struct irq_chip tile_irq_chip = {
- .name = "tile_irq_chip",
- .irq_enable = tile_irq_chip_enable,
- .irq_disable = tile_irq_chip_disable,
- .irq_ack = tile_irq_chip_ack,
- .irq_eoi = tile_irq_chip_eoi,
- .irq_mask = tile_irq_chip_mask,
- .irq_unmask = tile_irq_chip_unmask,
-};
-
-void __init init_IRQ(void)
-{
- ipi_init();
-}
-
-void setup_irq_regs(void)
-{
- /* Enable interrupt delivery. */
- unmask_irqs(~0UL);
-#if CHIP_HAS_IPI()
- arch_local_irq_unmask(INT_IPI_K);
-#endif
-}
-
-void tile_irq_activate(unsigned int irq, int tile_irq_type)
-{
- /*
- * We use handle_level_irq() by default because the pending
- * interrupt vector (whether modeled by the HV on
- * TILEPro or implemented in hardware on TILE-Gx) has
- * level-style semantics for each bit. An interrupt fires
- * whenever a bit is high, not just at edges.
- */
- irq_flow_handler_t handle = handle_level_irq;
- if (tile_irq_type == TILE_IRQ_PERCPU)
- handle = handle_percpu_irq;
- irq_set_chip_and_handler(irq, &tile_irq_chip, handle);
-
- /*
- * Flag interrupts that are hardware-cleared so that ack()
- * won't clear them.
- */
- if (tile_irq_type == TILE_IRQ_HW_CLEAR)
- irq_set_chip_data(irq, (void *)IS_HW_CLEARED);
-}
-EXPORT_SYMBOL(tile_irq_activate);
-
-
-void ack_bad_irq(unsigned int irq)
-{
- pr_err("unexpected IRQ trap at vector %02x\n", irq);
-}
-
-/*
- * /proc/interrupts printing:
- */
-int arch_show_interrupts(struct seq_file *p, int prec)
-{
-#ifdef CONFIG_PERF_EVENTS
- int i;
-
- seq_printf(p, "%*s: ", prec, "PMI");
-
- for_each_online_cpu(i)
- seq_printf(p, "%10llu ", per_cpu(perf_irqs, i));
- seq_puts(p, " perf_events\n");
-#endif
- return 0;
-}
-
-#if CHIP_HAS_IPI()
-int arch_setup_hwirq(unsigned int irq, int node)
-{
- return irq >= NR_IRQS ? -EINVAL : 0;
-}
-
-void arch_teardown_hwirq(unsigned int irq) { }
-#endif
diff --git a/arch/tile/kernel/jump_label.c b/arch/tile/kernel/jump_label.c
deleted file mode 100644
index 93931a46625b..000000000000
--- a/arch/tile/kernel/jump_label.c
+++ /dev/null
@@ -1,62 +0,0 @@
-/*
- * Copyright 2015 Tilera Corporation. All Rights Reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation, version 2.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
- * NON INFRINGEMENT. See the GNU General Public License for
- * more details.
- *
- * jump label TILE-Gx support
- */
-
-#include <linux/jump_label.h>
-#include <linux/memory.h>
-#include <linux/module.h>
-#include <linux/mutex.h>
-#include <linux/cpu.h>
-
-#include <asm/cacheflush.h>
-#include <asm/insn.h>
-
-#ifdef HAVE_JUMP_LABEL
-
-static void __jump_label_transform(struct jump_entry *e,
- enum jump_label_type type)
-{
- tilegx_bundle_bits opcode;
- /* Operate on writable kernel text mapping. */
- unsigned long pc_wr = ktext_writable_addr(e->code);
-
- if (type == JUMP_LABEL_JMP)
- opcode = tilegx_gen_branch(e->code, e->target, false);
- else
- opcode = NOP();
-
- *(tilegx_bundle_bits *)pc_wr = opcode;
- /* Make sure that above mem writes were issued towards the memory. */
- smp_wmb();
-}
-
-void arch_jump_label_transform(struct jump_entry *e,
- enum jump_label_type type)
-{
- mutex_lock(&text_mutex);
-
- __jump_label_transform(e, type);
- flush_icache_range(e->code, e->code + sizeof(tilegx_bundle_bits));
-
- mutex_unlock(&text_mutex);
-}
-
-__init_or_module void arch_jump_label_transform_static(struct jump_entry *e,
- enum jump_label_type type)
-{
- __jump_label_transform(e, type);
-}
-
-#endif /* HAVE_JUMP_LABEL */
diff --git a/arch/tile/kernel/kgdb.c b/arch/tile/kernel/kgdb.c
deleted file mode 100644
index d4eb5fb2df9d..000000000000
--- a/arch/tile/kernel/kgdb.c
+++ /dev/null
@@ -1,497 +0,0 @@
-/*
- * Copyright 2013 Tilera Corporation. All Rights Reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation, version 2.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
- * NON INFRINGEMENT. See the GNU General Public License for
- * more details.
- *
- * TILE-Gx KGDB support.
- */
-
-#include <linux/ptrace.h>
-#include <linux/kgdb.h>
-#include <linux/kdebug.h>
-#include <linux/uaccess.h>
-#include <linux/module.h>
-#include <linux/sched/task_stack.h>
-
-#include <asm/cacheflush.h>
-
-static tile_bundle_bits singlestep_insn = TILEGX_BPT_BUNDLE | DIE_SSTEPBP;
-static unsigned long stepped_addr;
-static tile_bundle_bits stepped_instr;
-
-struct dbg_reg_def_t dbg_reg_def[DBG_MAX_REG_NUM] = {
- { "r0", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[0])},
- { "r1", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[1])},
- { "r2", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[2])},
- { "r3", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[3])},
- { "r4", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[4])},
- { "r5", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[5])},
- { "r6", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[6])},
- { "r7", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[7])},
- { "r8", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[8])},
- { "r9", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[9])},
- { "r10", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[10])},
- { "r11", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[11])},
- { "r12", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[12])},
- { "r13", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[13])},
- { "r14", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[14])},
- { "r15", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[15])},
- { "r16", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[16])},
- { "r17", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[17])},
- { "r18", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[18])},
- { "r19", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[19])},
- { "r20", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[20])},
- { "r21", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[21])},
- { "r22", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[22])},
- { "r23", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[23])},
- { "r24", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[24])},
- { "r25", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[25])},
- { "r26", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[26])},
- { "r27", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[27])},
- { "r28", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[28])},
- { "r29", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[29])},
- { "r30", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[30])},
- { "r31", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[31])},
- { "r32", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[32])},
- { "r33", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[33])},
- { "r34", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[34])},
- { "r35", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[35])},
- { "r36", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[36])},
- { "r37", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[37])},
- { "r38", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[38])},
- { "r39", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[39])},
- { "r40", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[40])},
- { "r41", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[41])},
- { "r42", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[42])},
- { "r43", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[43])},
- { "r44", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[44])},
- { "r45", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[45])},
- { "r46", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[46])},
- { "r47", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[47])},
- { "r48", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[48])},
- { "r49", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[49])},
- { "r50", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[50])},
- { "r51", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[51])},
- { "r52", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[52])},
- { "tp", GDB_SIZEOF_REG, offsetof(struct pt_regs, tp)},
- { "sp", GDB_SIZEOF_REG, offsetof(struct pt_regs, sp)},
- { "lr", GDB_SIZEOF_REG, offsetof(struct pt_regs, lr)},
- { "sn", GDB_SIZEOF_REG, -1},
- { "idn0", GDB_SIZEOF_REG, -1},
- { "idn1", GDB_SIZEOF_REG, -1},
- { "udn0", GDB_SIZEOF_REG, -1},
- { "udn1", GDB_SIZEOF_REG, -1},
- { "udn2", GDB_SIZEOF_REG, -1},
- { "udn3", GDB_SIZEOF_REG, -1},
- { "zero", GDB_SIZEOF_REG, -1},
- { "pc", GDB_SIZEOF_REG, offsetof(struct pt_regs, pc)},
- { "faultnum", GDB_SIZEOF_REG, offsetof(struct pt_regs, faultnum)},
-};
-
-char *dbg_get_reg(int regno, void *mem, struct pt_regs *regs)
-{
- if (regno >= DBG_MAX_REG_NUM || regno < 0)
- return NULL;
-
- if (dbg_reg_def[regno].offset != -1)
- memcpy(mem, (void *)regs + dbg_reg_def[regno].offset,
- dbg_reg_def[regno].size);
- else
- memset(mem, 0, dbg_reg_def[regno].size);
- return dbg_reg_def[regno].name;
-}
-
-int dbg_set_reg(int regno, void *mem, struct pt_regs *regs)
-{
- if (regno >= DBG_MAX_REG_NUM || regno < 0)
- return -EINVAL;
-
- if (dbg_reg_def[regno].offset != -1)
- memcpy((void *)regs + dbg_reg_def[regno].offset, mem,
- dbg_reg_def[regno].size);
- return 0;
-}
-
-/*
- * Similar to pt_regs_to_gdb_regs() except that process is sleeping and so
- * we may not be able to get all the info.
- */
-void
-sleeping_thread_to_gdb_regs(unsigned long *gdb_regs, struct task_struct *task)
-{
- struct pt_regs *thread_regs;
- const int NGPRS = TREG_LAST_GPR + 1;
-
- if (task == NULL)
- return;
-
- thread_regs = task_pt_regs(task);
- memcpy(gdb_regs, thread_regs, NGPRS * sizeof(unsigned long));
- memset(&gdb_regs[NGPRS], 0,
- (TILEGX_PC_REGNUM - NGPRS) * sizeof(unsigned long));
- gdb_regs[TILEGX_PC_REGNUM] = thread_regs->pc;
- gdb_regs[TILEGX_FAULTNUM_REGNUM] = thread_regs->faultnum;
-}
-
-void kgdb_arch_set_pc(struct pt_regs *regs, unsigned long pc)
-{
- regs->pc = pc;
-}
-
-static void kgdb_call_nmi_hook(void *ignored)
-{
- kgdb_nmicallback(raw_smp_processor_id(), NULL);
-}
-
-void kgdb_roundup_cpus(unsigned long flags)
-{
- local_irq_enable();
- smp_call_function(kgdb_call_nmi_hook, NULL, 0);
- local_irq_disable();
-}
-
-/*
- * Convert a kernel address to the writable kernel text mapping.
- */
-static unsigned long writable_address(unsigned long addr)
-{
- unsigned long ret = 0;
-
- if (core_kernel_text(addr))
- ret = ktext_writable_addr(addr);
- else if (is_module_text_address(addr))
- ret = addr;
- else
- pr_err("Unknown virtual address 0x%lx\n", addr);
-
- return ret;
-}
-
-/*
- * Calculate the new address for after a step.
- */
-static unsigned long get_step_address(struct pt_regs *regs)
-{
- int src_reg;
- int jump_off;
- int br_off;
- unsigned long addr;
- unsigned int opcode;
- tile_bundle_bits bundle;
-
- /* Move to the next instruction by default. */
- addr = regs->pc + TILEGX_BUNDLE_SIZE_IN_BYTES;
- bundle = *(unsigned long *)instruction_pointer(regs);
-
- /* 0: X mode, Otherwise: Y mode. */
- if (bundle & TILEGX_BUNDLE_MODE_MASK) {
- if (get_Opcode_Y1(bundle) == RRR_1_OPCODE_Y1 &&
- get_RRROpcodeExtension_Y1(bundle) ==
- UNARY_RRR_1_OPCODE_Y1) {
- opcode = get_UnaryOpcodeExtension_Y1(bundle);
-
- switch (opcode) {
- case JALR_UNARY_OPCODE_Y1:
- case JALRP_UNARY_OPCODE_Y1:
- case JR_UNARY_OPCODE_Y1:
- case JRP_UNARY_OPCODE_Y1:
- src_reg = get_SrcA_Y1(bundle);
- dbg_get_reg(src_reg, &addr, regs);
- break;
- }
- }
- } else if (get_Opcode_X1(bundle) == RRR_0_OPCODE_X1) {
- if (get_RRROpcodeExtension_X1(bundle) ==
- UNARY_RRR_0_OPCODE_X1) {
- opcode = get_UnaryOpcodeExtension_X1(bundle);
-
- switch (opcode) {
- case JALR_UNARY_OPCODE_X1:
- case JALRP_UNARY_OPCODE_X1:
- case JR_UNARY_OPCODE_X1:
- case JRP_UNARY_OPCODE_X1:
- src_reg = get_SrcA_X1(bundle);
- dbg_get_reg(src_reg, &addr, regs);
- break;
- }
- }
- } else if (get_Opcode_X1(bundle) == JUMP_OPCODE_X1) {
- opcode = get_JumpOpcodeExtension_X1(bundle);
-
- switch (opcode) {
- case JAL_JUMP_OPCODE_X1:
- case J_JUMP_OPCODE_X1:
- jump_off = sign_extend(get_JumpOff_X1(bundle), 27);
- addr = regs->pc +
- (jump_off << TILEGX_LOG2_BUNDLE_SIZE_IN_BYTES);
- break;
- }
- } else if (get_Opcode_X1(bundle) == BRANCH_OPCODE_X1) {
- br_off = 0;
- opcode = get_BrType_X1(bundle);
-
- switch (opcode) {
- case BEQZT_BRANCH_OPCODE_X1:
- case BEQZ_BRANCH_OPCODE_X1:
- if (get_SrcA_X1(bundle) == 0)
- br_off = get_BrOff_X1(bundle);
- break;
- case BGEZT_BRANCH_OPCODE_X1:
- case BGEZ_BRANCH_OPCODE_X1:
- if (get_SrcA_X1(bundle) >= 0)
- br_off = get_BrOff_X1(bundle);
- break;
- case BGTZT_BRANCH_OPCODE_X1:
- case BGTZ_BRANCH_OPCODE_X1:
- if (get_SrcA_X1(bundle) > 0)
- br_off = get_BrOff_X1(bundle);
- break;
- case BLBCT_BRANCH_OPCODE_X1:
- case BLBC_BRANCH_OPCODE_X1:
- if (!(get_SrcA_X1(bundle) & 1))
- br_off = get_BrOff_X1(bundle);
- break;
- case BLBST_BRANCH_OPCODE_X1:
- case BLBS_BRANCH_OPCODE_X1:
- if (get_SrcA_X1(bundle) & 1)
- br_off = get_BrOff_X1(bundle);
- break;
- case BLEZT_BRANCH_OPCODE_X1:
- case BLEZ_BRANCH_OPCODE_X1:
- if (get_SrcA_X1(bundle) <= 0)
- br_off = get_BrOff_X1(bundle);
- break;
- case BLTZT_BRANCH_OPCODE_X1:
- case BLTZ_BRANCH_OPCODE_X1:
- if (get_SrcA_X1(bundle) < 0)
- br_off = get_BrOff_X1(bundle);
- break;
- case BNEZT_BRANCH_OPCODE_X1:
- case BNEZ_BRANCH_OPCODE_X1:
- if (get_SrcA_X1(bundle) != 0)
- br_off = get_BrOff_X1(bundle);
- break;
- }
-
- if (br_off != 0) {
- br_off = sign_extend(br_off, 17);
- addr = regs->pc +
- (br_off << TILEGX_LOG2_BUNDLE_SIZE_IN_BYTES);
- }
- }
-
- return addr;
-}
-
-/*
- * Replace the next instruction after the current instruction with a
- * breakpoint instruction.
- */
-static void do_single_step(struct pt_regs *regs)
-{
- unsigned long addr_wr;
-
- /* Determine where the target instruction will send us to. */
- stepped_addr = get_step_address(regs);
- probe_kernel_read((char *)&stepped_instr, (char *)stepped_addr,
- BREAK_INSTR_SIZE);
-
- addr_wr = writable_address(stepped_addr);
- probe_kernel_write((char *)addr_wr, (char *)&singlestep_insn,
- BREAK_INSTR_SIZE);
- smp_wmb();
- flush_icache_range(stepped_addr, stepped_addr + BREAK_INSTR_SIZE);
-}
-
-static void undo_single_step(struct pt_regs *regs)
-{
- unsigned long addr_wr;
-
- if (stepped_instr == 0)
- return;
-
- addr_wr = writable_address(stepped_addr);
- probe_kernel_write((char *)addr_wr, (char *)&stepped_instr,
- BREAK_INSTR_SIZE);
- stepped_instr = 0;
- smp_wmb();
- flush_icache_range(stepped_addr, stepped_addr + BREAK_INSTR_SIZE);
-}
-
-/*
- * Calls linux_debug_hook before the kernel dies. If KGDB is enabled,
- * then try to fall into the debugger.
- */
-static int
-kgdb_notify(struct notifier_block *self, unsigned long cmd, void *ptr)
-{
- int ret;
- unsigned long flags;
- struct die_args *args = (struct die_args *)ptr;
- struct pt_regs *regs = args->regs;
-
-#ifdef CONFIG_KPROBES
- /*
- * Return immediately if the kprobes fault notifier has set
- * DIE_PAGE_FAULT.
- */
- if (cmd == DIE_PAGE_FAULT)
- return NOTIFY_DONE;
-#endif /* CONFIG_KPROBES */
-
- switch (cmd) {
- case DIE_BREAK:
- case DIE_COMPILED_BPT:
- break;
- case DIE_SSTEPBP:
- local_irq_save(flags);
- kgdb_handle_exception(0, SIGTRAP, 0, regs);
- local_irq_restore(flags);
- return NOTIFY_STOP;
- default:
- /* Userspace events, ignore. */
- if (user_mode(regs))
- return NOTIFY_DONE;
- }
-
- local_irq_save(flags);
- ret = kgdb_handle_exception(args->trapnr, args->signr, args->err, regs);
- local_irq_restore(flags);
- if (ret)
- return NOTIFY_DONE;
-
- return NOTIFY_STOP;
-}
-
-static struct notifier_block kgdb_notifier = {
- .notifier_call = kgdb_notify,
-};
-
-/*
- * kgdb_arch_handle_exception - Handle architecture specific GDB packets.
- * @vector: The error vector of the exception that happened.
- * @signo: The signal number of the exception that happened.
- * @err_code: The error code of the exception that happened.
- * @remcom_in_buffer: The buffer of the packet we have read.
- * @remcom_out_buffer: The buffer of %BUFMAX bytes to write a packet into.
- * @regs: The &struct pt_regs of the current process.
- *
- * This function MUST handle the 'c' and 's' command packets,
- * as well packets to set / remove a hardware breakpoint, if used.
- * If there are additional packets which the hardware needs to handle,
- * they are handled here. The code should return -1 if it wants to
- * process more packets, and a %0 or %1 if it wants to exit from the
- * kgdb callback.
- */
-int kgdb_arch_handle_exception(int vector, int signo, int err_code,
- char *remcom_in_buffer, char *remcom_out_buffer,
- struct pt_regs *regs)
-{
- char *ptr;
- unsigned long address;
-
- /* Undo any stepping we may have done. */
- undo_single_step(regs);
-
- switch (remcom_in_buffer[0]) {
- case 'c':
- case 's':
- case 'D':
- case 'k':
- /*
- * Try to read optional parameter, pc unchanged if no parm.
- * If this was a compiled-in breakpoint, we need to move
- * to the next instruction or we will just breakpoint
- * over and over again.
- */
- ptr = &remcom_in_buffer[1];
- if (kgdb_hex2long(&ptr, &address))
- regs->pc = address;
- else if (*(unsigned long *)regs->pc == compiled_bpt)
- regs->pc += BREAK_INSTR_SIZE;
-
- if (remcom_in_buffer[0] == 's') {
- do_single_step(regs);
- kgdb_single_step = 1;
- atomic_set(&kgdb_cpu_doing_single_step,
- raw_smp_processor_id());
- } else
- atomic_set(&kgdb_cpu_doing_single_step, -1);
-
- return 0;
- }
-
- return -1; /* this means that we do not want to exit from the handler */
-}
-
-struct kgdb_arch arch_kgdb_ops;
-
-/*
- * kgdb_arch_init - Perform any architecture specific initialization.
- *
- * This function will handle the initialization of any architecture
- * specific callbacks.
- */
-int kgdb_arch_init(void)
-{
- tile_bundle_bits bundle = TILEGX_BPT_BUNDLE;
-
- memcpy(arch_kgdb_ops.gdb_bpt_instr, &bundle, BREAK_INSTR_SIZE);
- return register_die_notifier(&kgdb_notifier);
-}
-
-/*
- * kgdb_arch_exit - Perform any architecture specific uninitialization.
- *
- * This function will handle the uninitialization of any architecture
- * specific callbacks, for dynamic registration and unregistration.
- */
-void kgdb_arch_exit(void)
-{
- unregister_die_notifier(&kgdb_notifier);
-}
-
-int kgdb_arch_set_breakpoint(struct kgdb_bkpt *bpt)
-{
- int err;
- unsigned long addr_wr = writable_address(bpt->bpt_addr);
-
- if (addr_wr == 0)
- return -1;
-
- err = probe_kernel_read(bpt->saved_instr, (char *)bpt->bpt_addr,
- BREAK_INSTR_SIZE);
- if (err)
- return err;
-
- err = probe_kernel_write((char *)addr_wr, arch_kgdb_ops.gdb_bpt_instr,
- BREAK_INSTR_SIZE);
- smp_wmb();
- flush_icache_range((unsigned long)bpt->bpt_addr,
- (unsigned long)bpt->bpt_addr + BREAK_INSTR_SIZE);
- return err;
-}
-
-int kgdb_arch_remove_breakpoint(struct kgdb_bkpt *bpt)
-{
- int err;
- unsigned long addr_wr = writable_address(bpt->bpt_addr);
-
- if (addr_wr == 0)
- return -1;
-
- err = probe_kernel_write((char *)addr_wr, (char *)bpt->saved_instr,
- BREAK_INSTR_SIZE);
- smp_wmb();
- flush_icache_range((unsigned long)bpt->bpt_addr,
- (unsigned long)bpt->bpt_addr + BREAK_INSTR_SIZE);
- return err;
-}
diff --git a/arch/tile/kernel/kprobes.c b/arch/tile/kernel/kprobes.c
deleted file mode 100644
index c68694bb1ad2..000000000000
--- a/arch/tile/kernel/kprobes.c
+++ /dev/null
@@ -1,527 +0,0 @@
-/*
- * arch/tile/kernel/kprobes.c
- * Kprobes on TILE-Gx
- *
- * Some portions copied from the MIPS version.
- *
- * Copyright (C) IBM Corporation, 2002, 2004
- * Copyright 2006 Sony Corp.
- * Copyright 2010 Cavium Networks
- *
- * Copyright 2012 Tilera Corporation. All Rights Reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation, version 2.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
- * NON INFRINGEMENT. See the GNU General Public License for
- * more details.
- */
-
-#include <linux/kprobes.h>
-#include <linux/kdebug.h>
-#include <linux/module.h>
-#include <linux/slab.h>
-#include <linux/uaccess.h>
-#include <asm/cacheflush.h>
-
-#include <arch/opcode.h>
-
-DEFINE_PER_CPU(struct kprobe *, current_kprobe) = NULL;
-DEFINE_PER_CPU(struct kprobe_ctlblk, kprobe_ctlblk);
-
-tile_bundle_bits breakpoint_insn = TILEGX_BPT_BUNDLE;
-tile_bundle_bits breakpoint2_insn = TILEGX_BPT_BUNDLE | DIE_SSTEPBP;
-
-/*
- * Check whether instruction is branch or jump, or if executing it
- * has different results depending on where it is executed (e.g. lnk).
- */
-static int __kprobes insn_has_control(kprobe_opcode_t insn)
-{
- if (get_Mode(insn) != 0) { /* Y-format bundle */
- if (get_Opcode_Y1(insn) != RRR_1_OPCODE_Y1 ||
- get_RRROpcodeExtension_Y1(insn) != UNARY_RRR_1_OPCODE_Y1)
- return 0;
-
- switch (get_UnaryOpcodeExtension_Y1(insn)) {
- case JALRP_UNARY_OPCODE_Y1:
- case JALR_UNARY_OPCODE_Y1:
- case JRP_UNARY_OPCODE_Y1:
- case JR_UNARY_OPCODE_Y1:
- case LNK_UNARY_OPCODE_Y1:
- return 1;
- default:
- return 0;
- }
- }
-
- switch (get_Opcode_X1(insn)) {
- case BRANCH_OPCODE_X1: /* branch instructions */
- case JUMP_OPCODE_X1: /* jump instructions: j and jal */
- return 1;
-
- case RRR_0_OPCODE_X1: /* other jump instructions */
- if (get_RRROpcodeExtension_X1(insn) != UNARY_RRR_0_OPCODE_X1)
- return 0;
- switch (get_UnaryOpcodeExtension_X1(insn)) {
- case JALRP_UNARY_OPCODE_X1:
- case JALR_UNARY_OPCODE_X1:
- case JRP_UNARY_OPCODE_X1:
- case JR_UNARY_OPCODE_X1:
- case LNK_UNARY_OPCODE_X1:
- return 1;
- default:
- return 0;
- }
- default:
- return 0;
- }
-}
-
-int __kprobes arch_prepare_kprobe(struct kprobe *p)
-{
- unsigned long addr = (unsigned long)p->addr;
-
- if (addr & (sizeof(kprobe_opcode_t) - 1))
- return -EINVAL;
-
- if (insn_has_control(*p->addr)) {
- pr_notice("Kprobes for control instructions are not supported\n");
- return -EINVAL;
- }
-
- /* insn: must be on special executable page on tile. */
- p->ainsn.insn = get_insn_slot();
- if (!p->ainsn.insn)
- return -ENOMEM;
-
- /*
- * In the kprobe->ainsn.insn[] array we store the original
- * instruction at index zero and a break trap instruction at
- * index one.
- */
- memcpy(&p->ainsn.insn[0], p->addr, sizeof(kprobe_opcode_t));
- p->ainsn.insn[1] = breakpoint2_insn;
- p->opcode = *p->addr;
-
- return 0;
-}
-
-void __kprobes arch_arm_kprobe(struct kprobe *p)
-{
- unsigned long addr_wr;
-
- /* Operate on writable kernel text mapping. */
- addr_wr = ktext_writable_addr(p->addr);
-
- if (probe_kernel_write((void *)addr_wr, &breakpoint_insn,
- sizeof(breakpoint_insn)))
- pr_err("%s: failed to enable kprobe\n", __func__);
-
- smp_wmb();
- flush_insn_slot(p);
-}
-
-void __kprobes arch_disarm_kprobe(struct kprobe *kp)
-{
- unsigned long addr_wr;
-
- /* Operate on writable kernel text mapping. */
- addr_wr = ktext_writable_addr(kp->addr);
-
- if (probe_kernel_write((void *)addr_wr, &kp->opcode,
- sizeof(kp->opcode)))
- pr_err("%s: failed to enable kprobe\n", __func__);
-
- smp_wmb();
- flush_insn_slot(kp);
-}
-
-void __kprobes arch_remove_kprobe(struct kprobe *p)
-{
- if (p->ainsn.insn) {
- free_insn_slot(p->ainsn.insn, 0);
- p->ainsn.insn = NULL;
- }
-}
-
-static void __kprobes save_previous_kprobe(struct kprobe_ctlblk *kcb)
-{
- kcb->prev_kprobe.kp = kprobe_running();
- kcb->prev_kprobe.status = kcb->kprobe_status;
- kcb->prev_kprobe.saved_pc = kcb->kprobe_saved_pc;
-}
-
-static void __kprobes restore_previous_kprobe(struct kprobe_ctlblk *kcb)
-{
- __this_cpu_write(current_kprobe, kcb->prev_kprobe.kp);
- kcb->kprobe_status = kcb->prev_kprobe.status;
- kcb->kprobe_saved_pc = kcb->prev_kprobe.saved_pc;
-}
-
-static void __kprobes set_current_kprobe(struct kprobe *p, struct pt_regs *regs,
- struct kprobe_ctlblk *kcb)
-{
- __this_cpu_write(current_kprobe, p);
- kcb->kprobe_saved_pc = regs->pc;
-}
-
-static void __kprobes prepare_singlestep(struct kprobe *p, struct pt_regs *regs)
-{
- /* Single step inline if the instruction is a break. */
- if (p->opcode == breakpoint_insn ||
- p->opcode == breakpoint2_insn)
- regs->pc = (unsigned long)p->addr;
- else
- regs->pc = (unsigned long)&p->ainsn.insn[0];
-}
-
-static int __kprobes kprobe_handler(struct pt_regs *regs)
-{
- struct kprobe *p;
- int ret = 0;
- kprobe_opcode_t *addr;
- struct kprobe_ctlblk *kcb;
-
- addr = (kprobe_opcode_t *)regs->pc;
-
- /*
- * We don't want to be preempted for the entire
- * duration of kprobe processing.
- */
- preempt_disable();
- kcb = get_kprobe_ctlblk();
-
- /* Check we're not actually recursing. */
- if (kprobe_running()) {
- p = get_kprobe(addr);
- if (p) {
- if (kcb->kprobe_status == KPROBE_HIT_SS &&
- p->ainsn.insn[0] == breakpoint_insn) {
- goto no_kprobe;
- }
- /*
- * We have reentered the kprobe_handler(), since
- * another probe was hit while within the handler.
- * We here save the original kprobes variables and
- * just single step on the instruction of the new probe
- * without calling any user handlers.
- */
- save_previous_kprobe(kcb);
- set_current_kprobe(p, regs, kcb);
- kprobes_inc_nmissed_count(p);
- prepare_singlestep(p, regs);
- kcb->kprobe_status = KPROBE_REENTER;
- return 1;
- } else {
- if (*addr != breakpoint_insn) {
- /*
- * The breakpoint instruction was removed by
- * another cpu right after we hit, no further
- * handling of this interrupt is appropriate.
- */
- ret = 1;
- goto no_kprobe;
- }
- p = __this_cpu_read(current_kprobe);
- if (p->break_handler && p->break_handler(p, regs))
- goto ss_probe;
- }
- goto no_kprobe;
- }
-
- p = get_kprobe(addr);
- if (!p) {
- if (*addr != breakpoint_insn) {
- /*
- * The breakpoint instruction was removed right
- * after we hit it. Another cpu has removed
- * either a probepoint or a debugger breakpoint
- * at this address. In either case, no further
- * handling of this interrupt is appropriate.
- */
- ret = 1;
- }
- /* Not one of ours: let kernel handle it. */
- goto no_kprobe;
- }
-
- set_current_kprobe(p, regs, kcb);
- kcb->kprobe_status = KPROBE_HIT_ACTIVE;
-
- if (p->pre_handler && p->pre_handler(p, regs)) {
- /* Handler has already set things up, so skip ss setup. */
- return 1;
- }
-
-ss_probe:
- prepare_singlestep(p, regs);
- kcb->kprobe_status = KPROBE_HIT_SS;
- return 1;
-
-no_kprobe:
- preempt_enable_no_resched();
- return ret;
-}
-
-/*
- * Called after single-stepping. p->addr is the address of the
- * instruction that has been replaced by the breakpoint. To avoid the
- * SMP problems that can occur when we temporarily put back the
- * original opcode to single-step, we single-stepped a copy of the
- * instruction. The address of this copy is p->ainsn.insn.
- *
- * This function prepares to return from the post-single-step
- * breakpoint trap.
- */
-static void __kprobes resume_execution(struct kprobe *p,
- struct pt_regs *regs,
- struct kprobe_ctlblk *kcb)
-{
- unsigned long orig_pc = kcb->kprobe_saved_pc;
- regs->pc = orig_pc + 8;
-}
-
-static inline int post_kprobe_handler(struct pt_regs *regs)
-{
- struct kprobe *cur = kprobe_running();
- struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
-
- if (!cur)
- return 0;
-
- if ((kcb->kprobe_status != KPROBE_REENTER) && cur->post_handler) {
- kcb->kprobe_status = KPROBE_HIT_SSDONE;
- cur->post_handler(cur, regs, 0);
- }
-
- resume_execution(cur, regs, kcb);
-
- /* Restore back the original saved kprobes variables and continue. */
- if (kcb->kprobe_status == KPROBE_REENTER) {
- restore_previous_kprobe(kcb);
- goto out;
- }
- reset_current_kprobe();
-out:
- preempt_enable_no_resched();
-
- return 1;
-}
-
-static inline int kprobe_fault_handler(struct pt_regs *regs, int trapnr)
-{
- struct kprobe *cur = kprobe_running();
- struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
-
- if (cur->fault_handler && cur->fault_handler(cur, regs, trapnr))
- return 1;
-
- if (kcb->kprobe_status & KPROBE_HIT_SS) {
- /*
- * We are here because the instruction being single
- * stepped caused a page fault. We reset the current
- * kprobe and the ip points back to the probe address
- * and allow the page fault handler to continue as a
- * normal page fault.
- */
- resume_execution(cur, regs, kcb);
- reset_current_kprobe();
- preempt_enable_no_resched();
- }
- return 0;
-}
-
-/*
- * Wrapper routine for handling exceptions.
- */
-int __kprobes kprobe_exceptions_notify(struct notifier_block *self,
- unsigned long val, void *data)
-{
- struct die_args *args = (struct die_args *)data;
- int ret = NOTIFY_DONE;
-
- switch (val) {
- case DIE_BREAK:
- if (kprobe_handler(args->regs))
- ret = NOTIFY_STOP;
- break;
- case DIE_SSTEPBP:
- if (post_kprobe_handler(args->regs))
- ret = NOTIFY_STOP;
- break;
- case DIE_PAGE_FAULT:
- /* kprobe_running() needs smp_processor_id(). */
- preempt_disable();
-
- if (kprobe_running()
- && kprobe_fault_handler(args->regs, args->trapnr))
- ret = NOTIFY_STOP;
- preempt_enable();
- break;
- default:
- break;
- }
- return ret;
-}
-
-int __kprobes setjmp_pre_handler(struct kprobe *p, struct pt_regs *regs)
-{
- struct jprobe *jp = container_of(p, struct jprobe, kp);
- struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
-
- kcb->jprobe_saved_regs = *regs;
- kcb->jprobe_saved_sp = regs->sp;
-
- memcpy(kcb->jprobes_stack, (void *)kcb->jprobe_saved_sp,
- MIN_JPROBES_STACK_SIZE(kcb->jprobe_saved_sp));
-
- regs->pc = (unsigned long)(jp->entry);
-
- return 1;
-}
-
-/* Defined in the inline asm below. */
-void jprobe_return_end(void);
-
-void __kprobes jprobe_return(void)
-{
- asm volatile(
- "bpt\n\t"
- ".globl jprobe_return_end\n"
- "jprobe_return_end:\n");
-}
-
-int __kprobes longjmp_break_handler(struct kprobe *p, struct pt_regs *regs)
-{
- struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
-
- if (regs->pc >= (unsigned long)jprobe_return &&
- regs->pc <= (unsigned long)jprobe_return_end) {
- *regs = kcb->jprobe_saved_regs;
- memcpy((void *)kcb->jprobe_saved_sp, kcb->jprobes_stack,
- MIN_JPROBES_STACK_SIZE(kcb->jprobe_saved_sp));
- preempt_enable_no_resched();
-
- return 1;
- }
- return 0;
-}
-
-/*
- * Function return probe trampoline:
- * - init_kprobes() establishes a probepoint here
- * - When the probed function returns, this probe causes the
- * handlers to fire
- */
-static void __used kretprobe_trampoline_holder(void)
-{
- asm volatile(
- "nop\n\t"
- ".global kretprobe_trampoline\n"
- "kretprobe_trampoline:\n\t"
- "nop\n\t"
- : : : "memory");
-}
-
-void kretprobe_trampoline(void);
-
-void __kprobes arch_prepare_kretprobe(struct kretprobe_instance *ri,
- struct pt_regs *regs)
-{
- ri->ret_addr = (kprobe_opcode_t *) regs->lr;
-
- /* Replace the return addr with trampoline addr */
- regs->lr = (unsigned long)kretprobe_trampoline;
-}
-
-/*
- * Called when the probe at kretprobe trampoline is hit.
- */
-static int __kprobes trampoline_probe_handler(struct kprobe *p,
- struct pt_regs *regs)
-{
- struct kretprobe_instance *ri = NULL;
- struct hlist_head *head, empty_rp;
- struct hlist_node *tmp;
- unsigned long flags, orig_ret_address = 0;
- unsigned long trampoline_address = (unsigned long)kretprobe_trampoline;
-
- INIT_HLIST_HEAD(&empty_rp);
- kretprobe_hash_lock(current, &head, &flags);
-
- /*
- * It is possible to have multiple instances associated with a given
- * task either because multiple functions in the call path have
- * a return probe installed on them, and/or more than one return
- * return probe was registered for a target function.
- *
- * We can handle this because:
- * - instances are always inserted at the head of the list
- * - when multiple return probes are registered for the same
- * function, the first instance's ret_addr will point to the
- * real return address, and all the rest will point to
- * kretprobe_trampoline
- */
- hlist_for_each_entry_safe(ri, tmp, head, hlist) {
- if (ri->task != current)
- /* another task is sharing our hash bucket */
- continue;
-
- if (ri->rp && ri->rp->handler)
- ri->rp->handler(ri, regs);
-
- orig_ret_address = (unsigned long)ri->ret_addr;
- recycle_rp_inst(ri, &empty_rp);
-
- if (orig_ret_address != trampoline_address) {
- /*
- * This is the real return address. Any other
- * instances associated with this task are for
- * other calls deeper on the call stack
- */
- break;
- }
- }
-
- kretprobe_assert(ri, orig_ret_address, trampoline_address);
- instruction_pointer(regs) = orig_ret_address;
-
- reset_current_kprobe();
- kretprobe_hash_unlock(current, &flags);
- preempt_enable_no_resched();
-
- hlist_for_each_entry_safe(ri, tmp, &empty_rp, hlist) {
- hlist_del(&ri->hlist);
- kfree(ri);
- }
- /*
- * By returning a non-zero value, we are telling
- * kprobe_handler() that we don't want the post_handler
- * to run (and have re-enabled preemption)
- */
- return 1;
-}
-
-int __kprobes arch_trampoline_kprobe(struct kprobe *p)
-{
- if (p->addr == (kprobe_opcode_t *)kretprobe_trampoline)
- return 1;
-
- return 0;
-}
-
-static struct kprobe trampoline_p = {
- .addr = (kprobe_opcode_t *)kretprobe_trampoline,
- .pre_handler = trampoline_probe_handler
-};
-
-int __init arch_init_kprobes(void)
-{
- register_kprobe(&trampoline_p);
- return 0;
-}
diff --git a/arch/tile/kernel/machine_kexec.c b/arch/tile/kernel/machine_kexec.c
deleted file mode 100644
index 008aa2faef55..000000000000
--- a/arch/tile/kernel/machine_kexec.c
+++ /dev/null
@@ -1,298 +0,0 @@
-/*
- * Copyright 2010 Tilera Corporation. All Rights Reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation, version 2.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
- * NON INFRINGEMENT. See the GNU General Public License for
- * more details.
- *
- * based on machine_kexec.c from other architectures in linux-2.6.18
- */
-
-#include <linux/mm.h>
-#include <linux/kexec.h>
-#include <linux/delay.h>
-#include <linux/reboot.h>
-#include <linux/errno.h>
-#include <linux/vmalloc.h>
-#include <linux/cpumask.h>
-#include <linux/kernel.h>
-#include <linux/elf.h>
-#include <linux/highmem.h>
-#include <linux/mmu_context.h>
-#include <linux/io.h>
-#include <linux/timex.h>
-#include <asm/pgtable.h>
-#include <asm/pgalloc.h>
-#include <asm/cacheflush.h>
-#include <asm/checksum.h>
-#include <asm/tlbflush.h>
-#include <asm/homecache.h>
-#include <hv/hypervisor.h>
-
-
-/*
- * This stuff is not in elf.h and is not in any other kernel include.
- * This stuff is needed below in the little boot notes parser to
- * extract the command line so we can pass it to the hypervisor.
- */
-struct Elf32_Bhdr {
- Elf32_Word b_signature;
- Elf32_Word b_size;
- Elf32_Half b_checksum;
- Elf32_Half b_records;
-};
-#define ELF_BOOT_MAGIC 0x0E1FB007
-#define EBN_COMMAND_LINE 0x00000004
-#define roundupsz(X) (((X) + 3) & ~3)
-
-/* - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - */
-
-
-void machine_shutdown(void)
-{
- /*
- * Normally we would stop all the other processors here, but
- * the check in machine_kexec_prepare below ensures we'll only
- * get this far if we've been booted with "nosmp" on the
- * command line or without CONFIG_SMP so there's nothing to do
- * here (for now).
- */
-}
-
-void machine_crash_shutdown(struct pt_regs *regs)
-{
- /*
- * Cannot happen. This type of kexec is disabled on this
- * architecture (and enforced in machine_kexec_prepare below).
- */
-}
-
-
-int machine_kexec_prepare(struct kimage *image)
-{
- if (num_online_cpus() > 1) {
- pr_warn("%s: detected attempt to kexec with num_online_cpus() > 1\n",
- __func__);
- return -ENOSYS;
- }
- if (image->type != KEXEC_TYPE_DEFAULT) {
- pr_warn("%s: detected attempt to kexec with unsupported type: %d\n",
- __func__, image->type);
- return -ENOSYS;
- }
- return 0;
-}
-
-void machine_kexec_cleanup(struct kimage *image)
-{
- /*
- * We did nothing in machine_kexec_prepare,
- * so we have nothing to do here.
- */
-}
-
-/*
- * If we can find elf boot notes on this page, return the command
- * line. Otherwise, silently return null. Somewhat kludgy, but no
- * good way to do this without significantly rearchitecting the
- * architecture-independent kexec code.
- */
-
-static unsigned char *kexec_bn2cl(void *pg)
-{
- struct Elf32_Bhdr *bhdrp;
- Elf32_Nhdr *nhdrp;
- unsigned char *desc;
- unsigned char *command_line;
- __sum16 csum;
-
- bhdrp = (struct Elf32_Bhdr *) pg;
-
- /*
- * This routine is invoked for every source page, so make
- * sure to quietly ignore every impossible page.
- */
- if (bhdrp->b_signature != ELF_BOOT_MAGIC ||
- bhdrp->b_size > PAGE_SIZE)
- return 0;
-
- /*
- * If we get a checksum mismatch, warn with the checksum
- * so we can diagnose better.
- */
- csum = ip_compute_csum(pg, bhdrp->b_size);
- if (csum != 0) {
- pr_warn("%s: bad checksum %#x (size %d)\n",
- __func__, csum, bhdrp->b_size);
- return 0;
- }
-
- nhdrp = (Elf32_Nhdr *) (bhdrp + 1);
-
- while (nhdrp->n_type != EBN_COMMAND_LINE) {
-
- desc = (unsigned char *) (nhdrp + 1);
- desc += roundupsz(nhdrp->n_descsz);
-
- nhdrp = (Elf32_Nhdr *) desc;
-
- /* still in bounds? */
- if ((unsigned char *) (nhdrp + 1) >
- ((unsigned char *) pg) + bhdrp->b_size) {
-
- pr_info("%s: out of bounds\n", __func__);
- return 0;
- }
- }
-
- command_line = (unsigned char *) (nhdrp + 1);
- desc = command_line;
-
- while (*desc != '\0') {
- desc++;
- if (((unsigned long)desc & PAGE_MASK) != (unsigned long)pg) {
- pr_info("%s: ran off end of page\n", __func__);
- return 0;
- }
- }
-
- return command_line;
-}
-
-static void kexec_find_and_set_command_line(struct kimage *image)
-{
- kimage_entry_t *ptr, entry;
-
- unsigned char *command_line = 0;
- unsigned char *r;
- HV_Errno hverr;
-
- for (ptr = &image->head;
- (entry = *ptr) && !(entry & IND_DONE);
- ptr = (entry & IND_INDIRECTION) ?
- phys_to_virt((entry & PAGE_MASK)) : ptr + 1) {
-
- if ((entry & IND_SOURCE)) {
- void *va =
- kmap_atomic_pfn(entry >> PAGE_SHIFT);
- r = kexec_bn2cl(va);
- if (r) {
- command_line = r;
- break;
- }
- kunmap_atomic(va);
- }
- }
-
- if (command_line != 0) {
- pr_info("setting new command line to \"%s\"\n", command_line);
-
- hverr = hv_set_command_line(
- (HV_VirtAddr) command_line, strlen(command_line));
- kunmap_atomic(command_line);
- } else {
- pr_info("%s: no command line found; making empty\n", __func__);
- hverr = hv_set_command_line((HV_VirtAddr) command_line, 0);
- }
- if (hverr)
- pr_warn("%s: hv_set_command_line returned error: %d\n",
- __func__, hverr);
-}
-
-/*
- * The kexec code range-checks all its PAs, so to avoid having it run
- * amok and allocate memory and then sequester it from every other
- * controller, we force it to come from controller zero. We also
- * disable the oom-killer since if we do end up running out of memory,
- * that almost certainly won't help.
- */
-struct page *kimage_alloc_pages_arch(gfp_t gfp_mask, unsigned int order)
-{
- gfp_mask |= __GFP_THISNODE | __GFP_NORETRY;
- return alloc_pages_node(0, gfp_mask, order);
-}
-
-/*
- * Address range in which pa=va mapping is set in setup_quasi_va_is_pa().
- * For tilepro, PAGE_OFFSET is used since this is the largest possbile value
- * for tilepro, while for tilegx, we limit it to entire middle level page
- * table which we assume has been allocated and is undoubtedly large enough.
- */
-#ifndef __tilegx__
-#define QUASI_VA_IS_PA_ADDR_RANGE PAGE_OFFSET
-#else
-#define QUASI_VA_IS_PA_ADDR_RANGE PGDIR_SIZE
-#endif
-
-static void setup_quasi_va_is_pa(void)
-{
- HV_PTE pte;
- unsigned long i;
-
- /*
- * Flush our TLB to prevent conflicts between the previous contents
- * and the new stuff we're about to add.
- */
- local_flush_tlb_all();
-
- /*
- * setup VA is PA, at least up to QUASI_VA_IS_PA_ADDR_RANGE.
- * Note here we assume that level-1 page table is defined by
- * HPAGE_SIZE.
- */
- pte = hv_pte(_PAGE_KERNEL | _PAGE_HUGE_PAGE);
- pte = hv_pte_set_mode(pte, HV_PTE_MODE_CACHE_NO_L3);
- for (i = 0; i < (QUASI_VA_IS_PA_ADDR_RANGE >> HPAGE_SHIFT); i++) {
- unsigned long vaddr = i << HPAGE_SHIFT;
- pgd_t *pgd = pgd_offset(current->mm, vaddr);
- pud_t *pud = pud_offset(pgd, vaddr);
- pte_t *ptep = (pte_t *) pmd_offset(pud, vaddr);
- unsigned long pfn = i << (HPAGE_SHIFT - PAGE_SHIFT);
-
- if (pfn_valid(pfn))
- __set_pte(ptep, pfn_pte(pfn, pte));
- }
-}
-
-
-void machine_kexec(struct kimage *image)
-{
- void *reboot_code_buffer;
- pte_t *ptep;
- void (*rnk)(unsigned long, void *, unsigned long)
- __noreturn;
-
- /* Mask all interrupts before starting to reboot. */
- interrupt_mask_set_mask(~0ULL);
-
- kexec_find_and_set_command_line(image);
-
- /*
- * Adjust the home caching of the control page to be cached on
- * this cpu, and copy the assembly helper into the control
- * code page, which we map in the vmalloc area.
- */
- homecache_change_page_home(image->control_code_page, 0,
- smp_processor_id());
- reboot_code_buffer = page_address(image->control_code_page);
- BUG_ON(reboot_code_buffer == NULL);
- ptep = virt_to_pte(NULL, (unsigned long)reboot_code_buffer);
- __set_pte(ptep, pte_mkexec(*ptep));
- memcpy(reboot_code_buffer, relocate_new_kernel,
- relocate_new_kernel_size);
- __flush_icache_range(
- (unsigned long) reboot_code_buffer,
- (unsigned long) reboot_code_buffer + relocate_new_kernel_size);
-
- setup_quasi_va_is_pa();
-
- /* now call it */
- rnk = reboot_code_buffer;
- (*rnk)(image->head, reboot_code_buffer, image->start);
-}
diff --git a/arch/tile/kernel/mcount_64.S b/arch/tile/kernel/mcount_64.S
deleted file mode 100644
index 6c6702451962..000000000000
--- a/arch/tile/kernel/mcount_64.S
+++ /dev/null
@@ -1,211 +0,0 @@
-/*
- * Copyright 2012 Tilera Corporation. All Rights Reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation, version 2.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
- * NON INFRINGEMENT. See the GNU General Public License for
- * more details.
- *
- * TILE-Gx specific __mcount support
- */
-
-#include <linux/linkage.h>
-#include <asm/ftrace.h>
-
-#define REGSIZE 8
-
- .text
- .global __mcount
-
- .macro MCOUNT_SAVE_REGS
- addli sp, sp, -REGSIZE
- {
- st sp, lr
- addli r29, sp, - (12 * REGSIZE)
- }
- {
- addli sp, sp, - (13 * REGSIZE)
- st r29, sp
- }
- addli r29, r29, REGSIZE
- { st r29, r0; addli r29, r29, REGSIZE }
- { st r29, r1; addli r29, r29, REGSIZE }
- { st r29, r2; addli r29, r29, REGSIZE }
- { st r29, r3; addli r29, r29, REGSIZE }
- { st r29, r4; addli r29, r29, REGSIZE }
- { st r29, r5; addli r29, r29, REGSIZE }
- { st r29, r6; addli r29, r29, REGSIZE }
- { st r29, r7; addli r29, r29, REGSIZE }
- { st r29, r8; addli r29, r29, REGSIZE }
- { st r29, r9; addli r29, r29, REGSIZE }
- { st r29, r10; addli r29, r29, REGSIZE }
- .endm
-
- .macro MCOUNT_RESTORE_REGS
- addli r29, sp, (2 * REGSIZE)
- { ld r0, r29; addli r29, r29, REGSIZE }
- { ld r1, r29; addli r29, r29, REGSIZE }
- { ld r2, r29; addli r29, r29, REGSIZE }
- { ld r3, r29; addli r29, r29, REGSIZE }
- { ld r4, r29; addli r29, r29, REGSIZE }
- { ld r5, r29; addli r29, r29, REGSIZE }
- { ld r6, r29; addli r29, r29, REGSIZE }
- { ld r7, r29; addli r29, r29, REGSIZE }
- { ld r8, r29; addli r29, r29, REGSIZE }
- { ld r9, r29; addli r29, r29, REGSIZE }
- { ld r10, r29; addli lr, sp, (13 * REGSIZE) }
- { ld lr, lr; addli sp, sp, (14 * REGSIZE) }
- .endm
-
- .macro RETURN_BACK
- { move r12, lr; move lr, r10 }
- jrp r12
- .endm
-
-#ifdef CONFIG_DYNAMIC_FTRACE
-
- .align 64
-STD_ENTRY(__mcount)
-__mcount:
- j ftrace_stub
-STD_ENDPROC(__mcount)
-
- .align 64
-STD_ENTRY(ftrace_caller)
- MCOUNT_SAVE_REGS
-
- /* arg1: self return address */
- /* arg2: parent's return address */
- /* arg3: ftrace_ops */
- /* arg4: regs (but make it NULL) */
- { move r0, lr; moveli r2, hw2_last(function_trace_op) }
- { move r1, r10; shl16insli r2, r2, hw1(function_trace_op) }
- { movei r3, 0; shl16insli r2, r2, hw0(function_trace_op) }
- ld r2,r2
-
- .global ftrace_call
-ftrace_call:
- /*
- * a placeholder for the call to a real tracing function, i.e.
- * ftrace_trace_function()
- */
- nop
-
-#ifdef CONFIG_FUNCTION_GRAPH_TRACER
- .global ftrace_graph_call
-ftrace_graph_call:
- /*
- * a placeholder for the call to a real tracing function, i.e.
- * ftrace_graph_caller()
- */
- nop
-#endif
- MCOUNT_RESTORE_REGS
- .global ftrace_stub
-ftrace_stub:
- RETURN_BACK
-STD_ENDPROC(ftrace_caller)
-
-#else /* ! CONFIG_DYNAMIC_FTRACE */
-
- .align 64
-STD_ENTRY(__mcount)
- {
- moveli r11, hw2_last(ftrace_trace_function)
- moveli r13, hw2_last(ftrace_stub)
- }
- {
- shl16insli r11, r11, hw1(ftrace_trace_function)
- shl16insli r13, r13, hw1(ftrace_stub)
- }
- {
- shl16insli r11, r11, hw0(ftrace_trace_function)
- shl16insli r13, r13, hw0(ftrace_stub)
- }
-
- ld r11, r11
- sub r14, r13, r11
- bnez r14, static_trace
-
-#ifdef CONFIG_FUNCTION_GRAPH_TRACER
- moveli r15, hw2_last(ftrace_graph_return)
- shl16insli r15, r15, hw1(ftrace_graph_return)
- shl16insli r15, r15, hw0(ftrace_graph_return)
- ld r15, r15
- sub r15, r15, r13
- bnez r15, ftrace_graph_caller
-
- {
- moveli r16, hw2_last(ftrace_graph_entry)
- moveli r17, hw2_last(ftrace_graph_entry_stub)
- }
- {
- shl16insli r16, r16, hw1(ftrace_graph_entry)
- shl16insli r17, r17, hw1(ftrace_graph_entry_stub)
- }
- {
- shl16insli r16, r16, hw0(ftrace_graph_entry)
- shl16insli r17, r17, hw0(ftrace_graph_entry_stub)
- }
- ld r16, r16
- sub r17, r16, r17
- bnez r17, ftrace_graph_caller
-
-#endif
- RETURN_BACK
-
-static_trace:
- MCOUNT_SAVE_REGS
-
- /* arg1: self return address */
- /* arg2: parent's return address */
- { move r0, lr; move r1, r10 }
-
- /* call ftrace_trace_function() */
- jalr r11
-
- MCOUNT_RESTORE_REGS
-
- .global ftrace_stub
-ftrace_stub:
- RETURN_BACK
-STD_ENDPROC(__mcount)
-
-#endif /* ! CONFIG_DYNAMIC_FTRACE */
-
-#ifdef CONFIG_FUNCTION_GRAPH_TRACER
-
-STD_ENTRY(ftrace_graph_caller)
-ftrace_graph_caller:
-#ifndef CONFIG_DYNAMIC_FTRACE
- MCOUNT_SAVE_REGS
-#endif
-
- /* arg1: Get the location of the parent's return address */
- addi r0, sp, 12 * REGSIZE
- /* arg2: Get self return address */
- move r1, lr
-
- jal prepare_ftrace_return
-
- MCOUNT_RESTORE_REGS
- RETURN_BACK
-STD_ENDPROC(ftrace_graph_caller)
-
- .global return_to_handler
-return_to_handler:
- MCOUNT_SAVE_REGS
-
- jal ftrace_return_to_handler
- /* restore the real parent address */
- move r11, r0
-
- MCOUNT_RESTORE_REGS
- jr r11
-
-#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
diff --git a/arch/tile/kernel/messaging.c b/arch/tile/kernel/messaging.c
deleted file mode 100644
index 7475af3aacec..000000000000
--- a/arch/tile/kernel/messaging.c
+++ /dev/null
@@ -1,115 +0,0 @@
-/*
- * Copyright 2010 Tilera Corporation. All Rights Reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation, version 2.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
- * NON INFRINGEMENT. See the GNU General Public License for
- * more details.
- */
-
-#include <linux/percpu.h>
-#include <linux/smp.h>
-#include <linux/hardirq.h>
-#include <linux/ptrace.h>
-#include <asm/hv_driver.h>
-#include <asm/irq_regs.h>
-#include <asm/traps.h>
-#include <hv/hypervisor.h>
-#include <arch/interrupts.h>
-
-/* All messages are stored here */
-static DEFINE_PER_CPU(HV_MsgState, msg_state);
-
-void init_messaging(void)
-{
- /* Allocate storage for messages in kernel space */
- HV_MsgState *state = this_cpu_ptr(&msg_state);
- int rc = hv_register_message_state(state);
- if (rc != HV_OK)
- panic("hv_register_message_state: error %d", rc);
-
- /* Make sure downcall interrupts will be enabled. */
- arch_local_irq_unmask(INT_INTCTRL_K);
-}
-
-void hv_message_intr(struct pt_regs *regs, int intnum)
-{
- /*
- * We enter with interrupts disabled and leave them disabled,
- * to match expectations of called functions (e.g.
- * do_ccupdate_local() in mm/slab.c). This is also consistent
- * with normal call entry for device interrupts.
- */
-
- int message[HV_MAX_MESSAGE_SIZE/sizeof(int)];
- HV_RcvMsgInfo rmi;
- int nmsgs = 0;
-
- /* Track time spent here in an interrupt context */
- struct pt_regs *old_regs = set_irq_regs(regs);
- irq_enter();
-
-#ifdef CONFIG_DEBUG_STACKOVERFLOW
- /* Debugging check for stack overflow: less than 1/8th stack free? */
- {
- long sp = stack_pointer - (long) current_thread_info();
- if (unlikely(sp < (sizeof(struct thread_info) + STACK_WARN))) {
- pr_emerg("%s: stack overflow: %ld\n",
- __func__, sp - sizeof(struct thread_info));
- dump_stack();
- }
- }
-#endif
-
- while (1) {
- HV_MsgState *state = this_cpu_ptr(&msg_state);
- rmi = hv_receive_message(*state, (HV_VirtAddr) message,
- sizeof(message));
- if (rmi.msglen == 0)
- break;
-
- if (rmi.msglen < 0)
- panic("hv_receive_message failed: %d", rmi.msglen);
-
- ++nmsgs;
-
- if (rmi.source == HV_MSG_TILE) {
- int tag;
-
- /* we just send tags for now */
- BUG_ON(rmi.msglen != sizeof(int));
-
- tag = message[0];
-#ifdef CONFIG_SMP
- evaluate_message(message[0]);
-#else
- panic("Received IPI message %d in UP mode", tag);
-#endif
- } else if (rmi.source == HV_MSG_INTR) {
- HV_IntrMsg *him = (HV_IntrMsg *)message;
- struct hv_driver_cb *cb =
- (struct hv_driver_cb *)him->intarg;
- cb->callback(cb, him->intdata);
- __this_cpu_inc(irq_stat.irq_hv_msg_count);
- }
- }
-
- /*
- * We shouldn't have gotten a message downcall with no
- * messages available.
- */
- if (nmsgs == 0)
- panic("Message downcall invoked with no messages!");
-
- /*
- * Track time spent against the current process again and
- * process any softirqs if they are waiting.
- */
- irq_exit();
- set_irq_regs(old_regs);
-}
diff --git a/arch/tile/kernel/module.c b/arch/tile/kernel/module.c
deleted file mode 100644
index 09233fbe7801..000000000000
--- a/arch/tile/kernel/module.c
+++ /dev/null
@@ -1,231 +0,0 @@
-/*
- * Copyright 2010 Tilera Corporation. All Rights Reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation, version 2.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
- * NON INFRINGEMENT. See the GNU General Public License for
- * more details.
- *
- * Based on i386 version, copyright (C) 2001 Rusty Russell.
- */
-
-#include <linux/moduleloader.h>
-#include <linux/elf.h>
-#include <linux/vmalloc.h>
-#include <linux/fs.h>
-#include <linux/string.h>
-#include <linux/kernel.h>
-#include <asm/pgtable.h>
-#include <asm/homecache.h>
-#include <arch/opcode.h>
-
-#ifdef MODULE_DEBUG
-#define DEBUGP printk
-#else
-#define DEBUGP(fmt...)
-#endif
-
-/*
- * Allocate some address space in the range MEM_MODULE_START to
- * MEM_MODULE_END and populate it with memory.
- */
-void *module_alloc(unsigned long size)
-{
- struct page **pages;
- pgprot_t prot_rwx = __pgprot(_PAGE_KERNEL | _PAGE_KERNEL_EXEC);
- struct vm_struct *area;
- int i = 0;
- int npages;
-
- npages = (size + PAGE_SIZE - 1) / PAGE_SIZE;
- pages = kmalloc_array(npages, sizeof(*pages), GFP_KERNEL);
- if (pages == NULL)
- return NULL;
- for (; i < npages; ++i) {
- pages[i] = alloc_page(GFP_KERNEL | __GFP_HIGHMEM);
- if (!pages[i])
- goto free_pages;
- }
-
- area = __get_vm_area(size, VM_ALLOC, MEM_MODULE_START, MEM_MODULE_END);
- if (!area)
- goto free_pages;
- area->nr_pages = npages;
- area->pages = pages;
-
- if (map_vm_area(area, prot_rwx, pages)) {
- vunmap(area->addr);
- goto free_pages;
- }
-
- return area->addr;
- free_pages:
- while (--i >= 0)
- __free_page(pages[i]);
- kfree(pages);
- return NULL;
-}
-
-
-/* Free memory returned from module_alloc */
-void module_memfree(void *module_region)
-{
- vfree(module_region);
-
- /* Globally flush the L1 icache. */
- flush_remote(0, HV_FLUSH_EVICT_L1I, cpu_online_mask,
- 0, 0, 0, NULL, NULL, 0);
-
- /*
- * FIXME: Add module_arch_freeing_init to trim exception
- * table entries.
- */
-}
-
-#ifdef __tilegx__
-/*
- * Validate that the high 16 bits of "value" is just the sign-extension of
- * the low 48 bits.
- */
-static int validate_hw2_last(long value, struct module *me)
-{
- if (((value << 16) >> 16) != value) {
- pr_warn("module %s: Out of range HW2_LAST value %#lx\n",
- me->name, value);
- return 0;
- }
- return 1;
-}
-
-/*
- * Validate that "value" isn't too big to hold in a JumpOff relocation.
- */
-static int validate_jumpoff(long value)
-{
- /* Determine size of jump offset. */
- int shift = __builtin_clzl(get_JumpOff_X1(create_JumpOff_X1(-1)));
-
- /* Check to see if it fits into the relocation slot. */
- long f = get_JumpOff_X1(create_JumpOff_X1(value));
- f = (f << shift) >> shift;
-
- return f == value;
-}
-#endif
-
-int apply_relocate_add(Elf_Shdr *sechdrs,
- const char *strtab,
- unsigned int symindex,
- unsigned int relsec,
- struct module *me)
-{
- unsigned int i;
- Elf_Rela *rel = (void *)sechdrs[relsec].sh_addr;
- Elf_Sym *sym;
- u64 *location;
- unsigned long value;
-
- DEBUGP("Applying relocate section %u to %u\n", relsec,
- sechdrs[relsec].sh_info);
- for (i = 0; i < sechdrs[relsec].sh_size / sizeof(*rel); i++) {
- /* This is where to make the change */
- location = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr
- + rel[i].r_offset;
- /*
- * This is the symbol it is referring to.
- * Note that all undefined symbols have been resolved.
- */
- sym = (Elf_Sym *)sechdrs[symindex].sh_addr
- + ELF_R_SYM(rel[i].r_info);
- value = sym->st_value + rel[i].r_addend;
-
- switch (ELF_R_TYPE(rel[i].r_info)) {
-
-#ifdef __LITTLE_ENDIAN
-# define MUNGE(func) \
- (*location = ((*location & ~func(-1)) | func(value)))
-#else
-/*
- * Instructions are always little-endian, so when we read them as data,
- * we have to swap them around before and after modifying them.
- */
-# define MUNGE(func) \
- (*location = swab64((swab64(*location) & ~func(-1)) | func(value)))
-#endif
-
-#ifndef __tilegx__
- case R_TILE_32:
- *(uint32_t *)location = value;
- break;
- case R_TILE_IMM16_X0_HA:
- value = (value + 0x8000) >> 16;
- /*FALLTHROUGH*/
- case R_TILE_IMM16_X0_LO:
- MUNGE(create_Imm16_X0);
- break;
- case R_TILE_IMM16_X1_HA:
- value = (value + 0x8000) >> 16;
- /*FALLTHROUGH*/
- case R_TILE_IMM16_X1_LO:
- MUNGE(create_Imm16_X1);
- break;
- case R_TILE_JOFFLONG_X1:
- value -= (unsigned long) location; /* pc-relative */
- value = (long) value >> 3; /* count by instrs */
- MUNGE(create_JOffLong_X1);
- break;
-#else
- case R_TILEGX_64:
- *location = value;
- break;
- case R_TILEGX_IMM16_X0_HW2_LAST:
- if (!validate_hw2_last(value, me))
- return -ENOEXEC;
- value >>= 16;
- /*FALLTHROUGH*/
- case R_TILEGX_IMM16_X0_HW1:
- value >>= 16;
- /*FALLTHROUGH*/
- case R_TILEGX_IMM16_X0_HW0:
- MUNGE(create_Imm16_X0);
- break;
- case R_TILEGX_IMM16_X1_HW2_LAST:
- if (!validate_hw2_last(value, me))
- return -ENOEXEC;
- value >>= 16;
- /*FALLTHROUGH*/
- case R_TILEGX_IMM16_X1_HW1:
- value >>= 16;
- /*FALLTHROUGH*/
- case R_TILEGX_IMM16_X1_HW0:
- MUNGE(create_Imm16_X1);
- break;
- case R_TILEGX_JUMPOFF_X1:
- value -= (unsigned long) location; /* pc-relative */
- value = (long) value >> 3; /* count by instrs */
- if (!validate_jumpoff(value)) {
- pr_warn("module %s: Out of range jump to %#llx at %#llx (%p)\n",
- me->name,
- sym->st_value + rel[i].r_addend,
- rel[i].r_offset, location);
- return -ENOEXEC;
- }
- MUNGE(create_JumpOff_X1);
- break;
-#endif
-
-#undef MUNGE
-
- default:
- pr_err("module %s: Unknown relocation: %d\n",
- me->name, (int) ELF_R_TYPE(rel[i].r_info));
- return -ENOEXEC;
- }
- }
- return 0;
-}
diff --git a/arch/tile/kernel/pci-dma.c b/arch/tile/kernel/pci-dma.c
deleted file mode 100644
index 6a1efe5543fa..000000000000
--- a/arch/tile/kernel/pci-dma.c
+++ /dev/null
@@ -1,607 +0,0 @@
-/*
- * Copyright 2010 Tilera Corporation. All Rights Reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation, version 2.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
- * NON INFRINGEMENT. See the GNU General Public License for
- * more details.
- */
-
-#include <linux/mm.h>
-#include <linux/dma-mapping.h>
-#include <linux/swiotlb.h>
-#include <linux/vmalloc.h>
-#include <linux/export.h>
-#include <asm/tlbflush.h>
-#include <asm/homecache.h>
-
-/* Generic DMA mapping functions: */
-
-/*
- * Allocate what Linux calls "coherent" memory. On TILEPro this is
- * uncached memory; on TILE-Gx it is hash-for-home memory.
- */
-#ifdef __tilepro__
-#define PAGE_HOME_DMA PAGE_HOME_UNCACHED
-#else
-#define PAGE_HOME_DMA PAGE_HOME_HASH
-#endif
-
-static void *tile_dma_alloc_coherent(struct device *dev, size_t size,
- dma_addr_t *dma_handle, gfp_t gfp,
- unsigned long attrs)
-{
- u64 dma_mask = (dev && dev->coherent_dma_mask) ?
- dev->coherent_dma_mask : DMA_BIT_MASK(32);
- int node = dev ? dev_to_node(dev) : 0;
- int order = get_order(size);
- struct page *pg;
- dma_addr_t addr;
-
- gfp |= __GFP_ZERO;
-
- /*
- * If the mask specifies that the memory be in the first 4 GB, then
- * we force the allocation to come from the DMA zone. We also
- * force the node to 0 since that's the only node where the DMA
- * zone isn't empty. If the mask size is smaller than 32 bits, we
- * may still not be able to guarantee a suitable memory address, in
- * which case we will return NULL. But such devices are uncommon.
- */
- if (dma_mask <= DMA_BIT_MASK(32)) {
- gfp |= GFP_DMA32;
- node = 0;
- }
-
- pg = homecache_alloc_pages_node(node, gfp, order, PAGE_HOME_DMA);
- if (pg == NULL)
- return NULL;
-
- addr = page_to_phys(pg);
- if (addr + size > dma_mask) {
- __homecache_free_pages(pg, order);
- return NULL;
- }
-
- *dma_handle = addr;
-
- return page_address(pg);
-}
-
-/*
- * Free memory that was allocated with tile_dma_alloc_coherent.
- */
-static void tile_dma_free_coherent(struct device *dev, size_t size,
- void *vaddr, dma_addr_t dma_handle,
- unsigned long attrs)
-{
- homecache_free_pages((unsigned long)vaddr, get_order(size));
-}
-
-/*
- * The map routines "map" the specified address range for DMA
- * accesses. The memory belongs to the device after this call is
- * issued, until it is unmapped with dma_unmap_single.
- *
- * We don't need to do any mapping, we just flush the address range
- * out of the cache and return a DMA address.
- *
- * The unmap routines do whatever is necessary before the processor
- * accesses the memory again, and must be called before the driver
- * touches the memory. We can get away with a cache invalidate if we
- * can count on nothing having been touched.
- */
-
-/* Set up a single page for DMA access. */
-static void __dma_prep_page(struct page *page, unsigned long offset,
- size_t size, enum dma_data_direction direction)
-{
- /*
- * Flush the page from cache if necessary.
- * On tilegx, data is delivered to hash-for-home L3; on tilepro,
- * data is delivered direct to memory.
- *
- * NOTE: If we were just doing DMA_TO_DEVICE we could optimize
- * this to be a "flush" not a "finv" and keep some of the
- * state in cache across the DMA operation, but it doesn't seem
- * worth creating the necessary flush_buffer_xxx() infrastructure.
- */
- int home = page_home(page);
- switch (home) {
- case PAGE_HOME_HASH:
-#ifdef __tilegx__
- return;
-#endif
- break;
- case PAGE_HOME_UNCACHED:
-#ifdef __tilepro__
- return;
-#endif
- break;
- case PAGE_HOME_IMMUTABLE:
- /* Should be going to the device only. */
- BUG_ON(direction == DMA_FROM_DEVICE ||
- direction == DMA_BIDIRECTIONAL);
- return;
- case PAGE_HOME_INCOHERENT:
- /* Incoherent anyway, so no need to work hard here. */
- return;
- default:
- BUG_ON(home < 0 || home >= NR_CPUS);
- break;
- }
- homecache_finv_page(page);
-
-#ifdef DEBUG_ALIGNMENT
- /* Warn if the region isn't cacheline aligned. */
- if (offset & (L2_CACHE_BYTES - 1) || (size & (L2_CACHE_BYTES - 1)))
- pr_warn("Unaligned DMA to non-hfh memory: PA %#llx/%#lx\n",
- PFN_PHYS(page_to_pfn(page)) + offset, size);
-#endif
-}
-
-/* Make the page ready to be read by the core. */
-static void __dma_complete_page(struct page *page, unsigned long offset,
- size_t size, enum dma_data_direction direction)
-{
-#ifdef __tilegx__
- switch (page_home(page)) {
- case PAGE_HOME_HASH:
- /* I/O device delivered data the way the cpu wanted it. */
- break;
- case PAGE_HOME_INCOHERENT:
- /* Incoherent anyway, so no need to work hard here. */
- break;
- case PAGE_HOME_IMMUTABLE:
- /* Extra read-only copies are not a problem. */
- break;
- default:
- /* Flush the bogus hash-for-home I/O entries to memory. */
- homecache_finv_map_page(page, PAGE_HOME_HASH);
- break;
- }
-#endif
-}
-
-static void __dma_prep_pa_range(dma_addr_t dma_addr, size_t size,
- enum dma_data_direction direction)
-{
- struct page *page = pfn_to_page(PFN_DOWN(dma_addr));
- unsigned long offset = dma_addr & (PAGE_SIZE - 1);
- size_t bytes = min(size, (size_t)(PAGE_SIZE - offset));
-
- while (size != 0) {
- __dma_prep_page(page, offset, bytes, direction);
- size -= bytes;
- ++page;
- offset = 0;
- bytes = min((size_t)PAGE_SIZE, size);
- }
-}
-
-static void __dma_complete_pa_range(dma_addr_t dma_addr, size_t size,
- enum dma_data_direction direction)
-{
- struct page *page = pfn_to_page(PFN_DOWN(dma_addr));
- unsigned long offset = dma_addr & (PAGE_SIZE - 1);
- size_t bytes = min(size, (size_t)(PAGE_SIZE - offset));
-
- while (size != 0) {
- __dma_complete_page(page, offset, bytes, direction);
- size -= bytes;
- ++page;
- offset = 0;
- bytes = min((size_t)PAGE_SIZE, size);
- }
-}
-
-static int tile_dma_map_sg(struct device *dev, struct scatterlist *sglist,
- int nents, enum dma_data_direction direction,
- unsigned long attrs)
-{
- struct scatterlist *sg;
- int i;
-
- BUG_ON(!valid_dma_direction(direction));
-
- WARN_ON(nents == 0 || sglist->length == 0);
-
- for_each_sg(sglist, sg, nents, i) {
- sg->dma_address = sg_phys(sg);
-#ifdef CONFIG_NEED_SG_DMA_LENGTH
- sg->dma_length = sg->length;
-#endif
- if (attrs & DMA_ATTR_SKIP_CPU_SYNC)
- continue;
- __dma_prep_pa_range(sg->dma_address, sg->length, direction);
- }
-
- return nents;
-}
-
-static void tile_dma_unmap_sg(struct device *dev, struct scatterlist *sglist,
- int nents, enum dma_data_direction direction,
- unsigned long attrs)
-{
- struct scatterlist *sg;
- int i;
-
- BUG_ON(!valid_dma_direction(direction));
- for_each_sg(sglist, sg, nents, i) {
- sg->dma_address = sg_phys(sg);
- if (attrs & DMA_ATTR_SKIP_CPU_SYNC)
- continue;
- __dma_complete_pa_range(sg->dma_address, sg->length,
- direction);
- }
-}
-
-static dma_addr_t tile_dma_map_page(struct device *dev, struct page *page,
- unsigned long offset, size_t size,
- enum dma_data_direction direction,
- unsigned long attrs)
-{
- BUG_ON(!valid_dma_direction(direction));
-
- BUG_ON(offset + size > PAGE_SIZE);
- if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC))
- __dma_prep_page(page, offset, size, direction);
-
- return page_to_pa(page) + offset;
-}
-
-static void tile_dma_unmap_page(struct device *dev, dma_addr_t dma_address,
- size_t size, enum dma_data_direction direction,
- unsigned long attrs)
-{
- BUG_ON(!valid_dma_direction(direction));
-
- if (attrs & DMA_ATTR_SKIP_CPU_SYNC)
- return;
-
- __dma_complete_page(pfn_to_page(PFN_DOWN(dma_address)),
- dma_address & (PAGE_SIZE - 1), size, direction);
-}
-
-static void tile_dma_sync_single_for_cpu(struct device *dev,
- dma_addr_t dma_handle,
- size_t size,
- enum dma_data_direction direction)
-{
- BUG_ON(!valid_dma_direction(direction));
-
- __dma_complete_pa_range(dma_handle, size, direction);
-}
-
-static void tile_dma_sync_single_for_device(struct device *dev,
- dma_addr_t dma_handle, size_t size,
- enum dma_data_direction direction)
-{
- __dma_prep_pa_range(dma_handle, size, direction);
-}
-
-static void tile_dma_sync_sg_for_cpu(struct device *dev,
- struct scatterlist *sglist, int nelems,
- enum dma_data_direction direction)
-{
- struct scatterlist *sg;
- int i;
-
- BUG_ON(!valid_dma_direction(direction));
- WARN_ON(nelems == 0 || sglist->length == 0);
-
- for_each_sg(sglist, sg, nelems, i) {
- dma_sync_single_for_cpu(dev, sg->dma_address,
- sg_dma_len(sg), direction);
- }
-}
-
-static void tile_dma_sync_sg_for_device(struct device *dev,
- struct scatterlist *sglist, int nelems,
- enum dma_data_direction direction)
-{
- struct scatterlist *sg;
- int i;
-
- BUG_ON(!valid_dma_direction(direction));
- WARN_ON(nelems == 0 || sglist->length == 0);
-
- for_each_sg(sglist, sg, nelems, i) {
- dma_sync_single_for_device(dev, sg->dma_address,
- sg_dma_len(sg), direction);
- }
-}
-
-static const struct dma_map_ops tile_default_dma_map_ops = {
- .alloc = tile_dma_alloc_coherent,
- .free = tile_dma_free_coherent,
- .map_page = tile_dma_map_page,
- .unmap_page = tile_dma_unmap_page,
- .map_sg = tile_dma_map_sg,
- .unmap_sg = tile_dma_unmap_sg,
- .sync_single_for_cpu = tile_dma_sync_single_for_cpu,
- .sync_single_for_device = tile_dma_sync_single_for_device,
- .sync_sg_for_cpu = tile_dma_sync_sg_for_cpu,
- .sync_sg_for_device = tile_dma_sync_sg_for_device,
-};
-
-const struct dma_map_ops *tile_dma_map_ops = &tile_default_dma_map_ops;
-EXPORT_SYMBOL(tile_dma_map_ops);
-
-/* Generic PCI DMA mapping functions */
-
-static void *tile_pci_dma_alloc_coherent(struct device *dev, size_t size,
- dma_addr_t *dma_handle, gfp_t gfp,
- unsigned long attrs)
-{
- int node = dev_to_node(dev);
- int order = get_order(size);
- struct page *pg;
- dma_addr_t addr;
-
- gfp |= __GFP_ZERO;
-
- pg = homecache_alloc_pages_node(node, gfp, order, PAGE_HOME_DMA);
- if (pg == NULL)
- return NULL;
-
- addr = page_to_phys(pg);
-
- *dma_handle = addr + get_dma_offset(dev);
-
- return page_address(pg);
-}
-
-/*
- * Free memory that was allocated with tile_pci_dma_alloc_coherent.
- */
-static void tile_pci_dma_free_coherent(struct device *dev, size_t size,
- void *vaddr, dma_addr_t dma_handle,
- unsigned long attrs)
-{
- homecache_free_pages((unsigned long)vaddr, get_order(size));
-}
-
-static int tile_pci_dma_map_sg(struct device *dev, struct scatterlist *sglist,
- int nents, enum dma_data_direction direction,
- unsigned long attrs)
-{
- struct scatterlist *sg;
- int i;
-
- BUG_ON(!valid_dma_direction(direction));
-
- WARN_ON(nents == 0 || sglist->length == 0);
-
- for_each_sg(sglist, sg, nents, i) {
- sg->dma_address = sg_phys(sg);
- __dma_prep_pa_range(sg->dma_address, sg->length, direction);
-
- sg->dma_address = sg->dma_address + get_dma_offset(dev);
-#ifdef CONFIG_NEED_SG_DMA_LENGTH
- sg->dma_length = sg->length;
-#endif
- }
-
- return nents;
-}
-
-static void tile_pci_dma_unmap_sg(struct device *dev,
- struct scatterlist *sglist, int nents,
- enum dma_data_direction direction,
- unsigned long attrs)
-{
- struct scatterlist *sg;
- int i;
-
- BUG_ON(!valid_dma_direction(direction));
- for_each_sg(sglist, sg, nents, i) {
- sg->dma_address = sg_phys(sg);
- __dma_complete_pa_range(sg->dma_address, sg->length,
- direction);
- }
-}
-
-static dma_addr_t tile_pci_dma_map_page(struct device *dev, struct page *page,
- unsigned long offset, size_t size,
- enum dma_data_direction direction,
- unsigned long attrs)
-{
- BUG_ON(!valid_dma_direction(direction));
-
- BUG_ON(offset + size > PAGE_SIZE);
- __dma_prep_page(page, offset, size, direction);
-
- return page_to_pa(page) + offset + get_dma_offset(dev);
-}
-
-static void tile_pci_dma_unmap_page(struct device *dev, dma_addr_t dma_address,
- size_t size,
- enum dma_data_direction direction,
- unsigned long attrs)
-{
- BUG_ON(!valid_dma_direction(direction));
-
- dma_address -= get_dma_offset(dev);
-
- __dma_complete_page(pfn_to_page(PFN_DOWN(dma_address)),
- dma_address & (PAGE_SIZE - 1), size, direction);
-}
-
-static void tile_pci_dma_sync_single_for_cpu(struct device *dev,
- dma_addr_t dma_handle,
- size_t size,
- enum dma_data_direction direction)
-{
- BUG_ON(!valid_dma_direction(direction));
-
- dma_handle -= get_dma_offset(dev);
-
- __dma_complete_pa_range(dma_handle, size, direction);
-}
-
-static void tile_pci_dma_sync_single_for_device(struct device *dev,
- dma_addr_t dma_handle,
- size_t size,
- enum dma_data_direction
- direction)
-{
- dma_handle -= get_dma_offset(dev);
-
- __dma_prep_pa_range(dma_handle, size, direction);
-}
-
-static void tile_pci_dma_sync_sg_for_cpu(struct device *dev,
- struct scatterlist *sglist,
- int nelems,
- enum dma_data_direction direction)
-{
- struct scatterlist *sg;
- int i;
-
- BUG_ON(!valid_dma_direction(direction));
- WARN_ON(nelems == 0 || sglist->length == 0);
-
- for_each_sg(sglist, sg, nelems, i) {
- dma_sync_single_for_cpu(dev, sg->dma_address,
- sg_dma_len(sg), direction);
- }
-}
-
-static void tile_pci_dma_sync_sg_for_device(struct device *dev,
- struct scatterlist *sglist,
- int nelems,
- enum dma_data_direction direction)
-{
- struct scatterlist *sg;
- int i;
-
- BUG_ON(!valid_dma_direction(direction));
- WARN_ON(nelems == 0 || sglist->length == 0);
-
- for_each_sg(sglist, sg, nelems, i) {
- dma_sync_single_for_device(dev, sg->dma_address,
- sg_dma_len(sg), direction);
- }
-}
-
-static const struct dma_map_ops tile_pci_default_dma_map_ops = {
- .alloc = tile_pci_dma_alloc_coherent,
- .free = tile_pci_dma_free_coherent,
- .map_page = tile_pci_dma_map_page,
- .unmap_page = tile_pci_dma_unmap_page,
- .map_sg = tile_pci_dma_map_sg,
- .unmap_sg = tile_pci_dma_unmap_sg,
- .sync_single_for_cpu = tile_pci_dma_sync_single_for_cpu,
- .sync_single_for_device = tile_pci_dma_sync_single_for_device,
- .sync_sg_for_cpu = tile_pci_dma_sync_sg_for_cpu,
- .sync_sg_for_device = tile_pci_dma_sync_sg_for_device,
-};
-
-const struct dma_map_ops *gx_pci_dma_map_ops = &tile_pci_default_dma_map_ops;
-EXPORT_SYMBOL(gx_pci_dma_map_ops);
-
-/* PCI DMA mapping functions for legacy PCI devices */
-
-#ifdef CONFIG_SWIOTLB
-static const struct dma_map_ops pci_hybrid_dma_ops = {
- .alloc = swiotlb_alloc,
- .free = swiotlb_free,
- .map_page = tile_pci_dma_map_page,
- .unmap_page = tile_pci_dma_unmap_page,
- .map_sg = tile_pci_dma_map_sg,
- .unmap_sg = tile_pci_dma_unmap_sg,
- .sync_single_for_cpu = tile_pci_dma_sync_single_for_cpu,
- .sync_single_for_device = tile_pci_dma_sync_single_for_device,
- .sync_sg_for_cpu = tile_pci_dma_sync_sg_for_cpu,
- .sync_sg_for_device = tile_pci_dma_sync_sg_for_device,
-};
-
-const struct dma_map_ops *gx_legacy_pci_dma_map_ops = &swiotlb_dma_ops;
-const struct dma_map_ops *gx_hybrid_pci_dma_map_ops = &pci_hybrid_dma_ops;
-#else
-const struct dma_map_ops *gx_legacy_pci_dma_map_ops;
-const struct dma_map_ops *gx_hybrid_pci_dma_map_ops;
-#endif
-EXPORT_SYMBOL(gx_legacy_pci_dma_map_ops);
-EXPORT_SYMBOL(gx_hybrid_pci_dma_map_ops);
-
-int dma_set_mask(struct device *dev, u64 mask)
-{
- const struct dma_map_ops *dma_ops = get_dma_ops(dev);
-
- /*
- * For PCI devices with 64-bit DMA addressing capability, promote
- * the dma_ops to hybrid, with the consistent memory DMA space limited
- * to 32-bit. For 32-bit capable devices, limit the streaming DMA
- * address range to max_direct_dma_addr.
- */
- if (dma_ops == gx_pci_dma_map_ops ||
- dma_ops == gx_hybrid_pci_dma_map_ops ||
- dma_ops == gx_legacy_pci_dma_map_ops) {
- if (mask == DMA_BIT_MASK(64) &&
- dma_ops == gx_legacy_pci_dma_map_ops)
- set_dma_ops(dev, gx_hybrid_pci_dma_map_ops);
- else if (mask > dev->archdata.max_direct_dma_addr)
- mask = dev->archdata.max_direct_dma_addr;
- }
-
- if (!dev->dma_mask || !dma_supported(dev, mask))
- return -EIO;
-
- *dev->dma_mask = mask;
-
- return 0;
-}
-EXPORT_SYMBOL(dma_set_mask);
-
-#ifdef CONFIG_ARCH_HAS_DMA_SET_COHERENT_MASK
-int dma_set_coherent_mask(struct device *dev, u64 mask)
-{
- const struct dma_map_ops *dma_ops = get_dma_ops(dev);
-
- /*
- * For PCI devices with 64-bit DMA addressing capability, promote
- * the dma_ops to full capability for both streams and consistent
- * memory access. For 32-bit capable devices, limit the consistent
- * memory DMA range to max_direct_dma_addr.
- */
- if (dma_ops == gx_pci_dma_map_ops ||
- dma_ops == gx_hybrid_pci_dma_map_ops ||
- dma_ops == gx_legacy_pci_dma_map_ops) {
- if (mask == DMA_BIT_MASK(64))
- set_dma_ops(dev, gx_pci_dma_map_ops);
- else if (mask > dev->archdata.max_direct_dma_addr)
- mask = dev->archdata.max_direct_dma_addr;
- }
-
- if (!dma_supported(dev, mask))
- return -EIO;
- dev->coherent_dma_mask = mask;
- return 0;
-}
-EXPORT_SYMBOL(dma_set_coherent_mask);
-#endif
-
-#ifdef ARCH_HAS_DMA_GET_REQUIRED_MASK
-/*
- * The generic dma_get_required_mask() uses the highest physical address
- * (max_pfn) to provide the hint to the PCI drivers regarding 32-bit or
- * 64-bit DMA configuration. Since TILEGx has I/O TLB/MMU, allowing the
- * DMAs to use the full 64-bit PCI address space and not limited by
- * the physical memory space, we always let the PCI devices use
- * 64-bit DMA if they have that capability, by returning the 64-bit
- * DMA mask here. The device driver has the option to use 32-bit DMA if
- * the device is not capable of 64-bit DMA.
- */
-u64 dma_get_required_mask(struct device *dev)
-{
- return DMA_BIT_MASK(64);
-}
-EXPORT_SYMBOL_GPL(dma_get_required_mask);
-#endif
diff --git a/arch/tile/kernel/pci.c b/arch/tile/kernel/pci.c
deleted file mode 100644
index bbf81579b1f8..000000000000
--- a/arch/tile/kernel/pci.c
+++ /dev/null
@@ -1,592 +0,0 @@
-/*
- * Copyright 2011 Tilera Corporation. All Rights Reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation, version 2.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
- * NON INFRINGEMENT. See the GNU General Public License for
- * more details.
- */
-
-#include <linux/kernel.h>
-#include <linux/pci.h>
-#include <linux/delay.h>
-#include <linux/string.h>
-#include <linux/init.h>
-#include <linux/capability.h>
-#include <linux/sched.h>
-#include <linux/errno.h>
-#include <linux/irq.h>
-#include <linux/io.h>
-#include <linux/uaccess.h>
-#include <linux/export.h>
-
-#include <asm/processor.h>
-#include <asm/sections.h>
-#include <asm/byteorder.h>
-#include <asm/hv_driver.h>
-#include <hv/drv_pcie_rc_intf.h>
-
-
-/*
- * Initialization flow and process
- * -------------------------------
- *
- * This files contains the routines to search for PCI buses,
- * enumerate the buses, and configure any attached devices.
- *
- * There are two entry points here:
- * 1) tile_pci_init
- * This sets up the pci_controller structs, and opens the
- * FDs to the hypervisor. This is called from setup_arch() early
- * in the boot process.
- * 2) pcibios_init
- * This probes the PCI bus(es) for any attached hardware. It's
- * called by subsys_initcall. All of the real work is done by the
- * generic Linux PCI layer.
- *
- */
-
-static int pci_probe = 1;
-
-/*
- * This flag tells if the platform is TILEmpower that needs
- * special configuration for the PLX switch chip.
- */
-int __ro_after_init tile_plx_gen1;
-
-static struct pci_controller controllers[TILE_NUM_PCIE];
-static int num_controllers;
-static int pci_scan_flags[TILE_NUM_PCIE];
-
-static struct pci_ops tile_cfg_ops;
-
-
-/*
- * Open a FD to the hypervisor PCI device.
- *
- * controller_id is the controller number, config type is 0 or 1 for
- * config0 or config1 operations.
- */
-static int tile_pcie_open(int controller_id, int config_type)
-{
- char filename[32];
- int fd;
-
- sprintf(filename, "pcie/%d/config%d", controller_id, config_type);
-
- fd = hv_dev_open((HV_VirtAddr)filename, 0);
-
- return fd;
-}
-
-
-/*
- * Get the IRQ numbers from the HV and set up the handlers for them.
- */
-static int tile_init_irqs(int controller_id, struct pci_controller *controller)
-{
- char filename[32];
- int fd;
- int ret;
- int x;
- struct pcie_rc_config rc_config;
-
- sprintf(filename, "pcie/%d/ctl", controller_id);
- fd = hv_dev_open((HV_VirtAddr)filename, 0);
- if (fd < 0) {
- pr_err("PCI: hv_dev_open(%s) failed\n", filename);
- return -1;
- }
- ret = hv_dev_pread(fd, 0, (HV_VirtAddr)(&rc_config),
- sizeof(rc_config), PCIE_RC_CONFIG_MASK_OFF);
- hv_dev_close(fd);
- if (ret != sizeof(rc_config)) {
- pr_err("PCI: wanted %zd bytes, got %d\n",
- sizeof(rc_config), ret);
- return -1;
- }
- /* Record irq_base so that we can map INTx to IRQ # later. */
- controller->irq_base = rc_config.intr;
-
- for (x = 0; x < 4; x++)
- tile_irq_activate(rc_config.intr + x,
- TILE_IRQ_HW_CLEAR);
-
- if (rc_config.plx_gen1)
- controller->plx_gen1 = 1;
-
- return 0;
-}
-
-/*
- * First initialization entry point, called from setup_arch().
- *
- * Find valid controllers and fill in pci_controller structs for each
- * of them.
- *
- * Returns the number of controllers discovered.
- */
-int __init tile_pci_init(void)
-{
- int i;
-
- if (!pci_probe) {
- pr_info("PCI: disabled by boot argument\n");
- return 0;
- }
-
- pr_info("PCI: Searching for controllers...\n");
-
- /* Re-init number of PCIe controllers to support hot-plug feature. */
- num_controllers = 0;
-
- /* Do any configuration we need before using the PCIe */
-
- for (i = 0; i < TILE_NUM_PCIE; i++) {
- /*
- * To see whether we need a real config op based on
- * the results of pcibios_init(), to support PCIe hot-plug.
- */
- if (pci_scan_flags[i] == 0) {
- int hv_cfg_fd0 = -1;
- int hv_cfg_fd1 = -1;
- int hv_mem_fd = -1;
- char name[32];
- struct pci_controller *controller;
-
- /*
- * Open the fd to the HV. If it fails then this
- * device doesn't exist.
- */
- hv_cfg_fd0 = tile_pcie_open(i, 0);
- if (hv_cfg_fd0 < 0)
- continue;
- hv_cfg_fd1 = tile_pcie_open(i, 1);
- if (hv_cfg_fd1 < 0) {
- pr_err("PCI: Couldn't open config fd to HV for controller %d\n",
- i);
- goto err_cont;
- }
-
- sprintf(name, "pcie/%d/mem", i);
- hv_mem_fd = hv_dev_open((HV_VirtAddr)name, 0);
- if (hv_mem_fd < 0) {
- pr_err("PCI: Could not open mem fd to HV!\n");
- goto err_cont;
- }
-
- pr_info("PCI: Found PCI controller #%d\n", i);
-
- controller = &controllers[i];
-
- controller->index = i;
- controller->hv_cfg_fd[0] = hv_cfg_fd0;
- controller->hv_cfg_fd[1] = hv_cfg_fd1;
- controller->hv_mem_fd = hv_mem_fd;
- controller->last_busno = 0xff;
- controller->ops = &tile_cfg_ops;
-
- num_controllers++;
- continue;
-
-err_cont:
- if (hv_cfg_fd0 >= 0)
- hv_dev_close(hv_cfg_fd0);
- if (hv_cfg_fd1 >= 0)
- hv_dev_close(hv_cfg_fd1);
- if (hv_mem_fd >= 0)
- hv_dev_close(hv_mem_fd);
- continue;
- }
- }
-
- /*
- * Before using the PCIe, see if we need to do any platform-specific
- * configuration, such as the PLX switch Gen 1 issue on TILEmpower.
- */
- for (i = 0; i < num_controllers; i++) {
- struct pci_controller *controller = &controllers[i];
-
- if (controller->plx_gen1)
- tile_plx_gen1 = 1;
- }
-
- return num_controllers;
-}
-
-/*
- * (pin - 1) converts from the PCI standard's [1:4] convention to
- * a normal [0:3] range.
- */
-static int tile_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
-{
- struct pci_controller *controller =
- (struct pci_controller *)dev->sysdata;
- return (pin - 1) + controller->irq_base;
-}
-
-
-static void fixup_read_and_payload_sizes(void)
-{
- struct pci_dev *dev = NULL;
- int smallest_max_payload = 0x1; /* Tile maxes out at 256 bytes. */
- int max_read_size = PCI_EXP_DEVCTL_READRQ_512B;
- u16 new_values;
-
- /* Scan for the smallest maximum payload size. */
- for_each_pci_dev(dev) {
- if (!pci_is_pcie(dev))
- continue;
-
- if (dev->pcie_mpss < smallest_max_payload)
- smallest_max_payload = dev->pcie_mpss;
- }
-
- /* Now, set the max_payload_size for all devices to that value. */
- new_values = max_read_size | (smallest_max_payload << 5);
- for_each_pci_dev(dev)
- pcie_capability_clear_and_set_word(dev, PCI_EXP_DEVCTL,
- PCI_EXP_DEVCTL_PAYLOAD | PCI_EXP_DEVCTL_READRQ,
- new_values);
-}
-
-
-/*
- * Second PCI initialization entry point, called by subsys_initcall.
- *
- * The controllers have been set up by the time we get here, by a call to
- * tile_pci_init.
- */
-int __init pcibios_init(void)
-{
- struct pci_host_bridge *bridge;
- int i;
-
- pr_info("PCI: Probing PCI hardware\n");
-
- /*
- * Delay a bit in case devices aren't ready. Some devices are
- * known to require at least 20ms here, but we use a more
- * conservative value.
- */
- msleep(250);
-
- /* Scan all of the recorded PCI controllers. */
- for (i = 0; i < TILE_NUM_PCIE; i++) {
- /*
- * Do real pcibios init ops if the controller is initialized
- * by tile_pci_init() successfully and not initialized by
- * pcibios_init() yet to support PCIe hot-plug.
- */
- if (pci_scan_flags[i] == 0 && controllers[i].ops != NULL) {
- struct pci_controller *controller = &controllers[i];
- struct pci_bus *bus;
- LIST_HEAD(resources);
-
- if (tile_init_irqs(i, controller)) {
- pr_err("PCI: Could not initialize IRQs\n");
- continue;
- }
-
- pr_info("PCI: initializing controller #%d\n", i);
-
- pci_add_resource(&resources, &ioport_resource);
- pci_add_resource(&resources, &iomem_resource);
-
- bridge = pci_alloc_host_bridge(0);
- if (!bridge)
- break;
-
- list_splice_init(&resources, &bridge->windows);
- bridge->dev.parent = NULL;
- bridge->sysdata = controller;
- bridge->busnr = 0;
- bridge->ops = controller->ops;
- bridge->swizzle_irq = pci_common_swizzle;
- bridge->map_irq = tile_map_irq;
-
- pci_scan_root_bus_bridge(bridge);
- bus = bridge->bus;
- controller->root_bus = bus;
- controller->last_busno = bus->busn_res.end;
- }
- }
-
- /*
- * This comes from the generic Linux PCI driver.
- *
- * It allocates all of the resources (I/O memory, etc)
- * associated with the devices read in above.
- */
- pci_assign_unassigned_resources();
-
- /* Configure the max_read_size and max_payload_size values. */
- fixup_read_and_payload_sizes();
-
- /* Record the I/O resources in the PCI controller structure. */
- for (i = 0; i < TILE_NUM_PCIE; i++) {
- /*
- * Do real pcibios init ops if the controller is initialized
- * by tile_pci_init() successfully and not initialized by
- * pcibios_init() yet to support PCIe hot-plug.
- */
- if (pci_scan_flags[i] == 0 && controllers[i].ops != NULL) {
- struct pci_bus *root_bus = controllers[i].root_bus;
- struct pci_bus *next_bus;
- struct pci_dev *dev;
-
- pci_bus_add_devices(root_bus);
-
- list_for_each_entry(dev, &root_bus->devices, bus_list) {
- /*
- * Find the PCI host controller, ie. the 1st
- * bridge.
- */
- if ((dev->class >> 8) == PCI_CLASS_BRIDGE_PCI &&
- (PCI_SLOT(dev->devfn) == 0)) {
- next_bus = dev->subordinate;
- controllers[i].mem_resources[0] =
- *next_bus->resource[0];
- controllers[i].mem_resources[1] =
- *next_bus->resource[1];
- controllers[i].mem_resources[2] =
- *next_bus->resource[2];
-
- /* Setup flags. */
- pci_scan_flags[i] = 1;
-
- break;
- }
- }
- }
- }
-
- return 0;
-}
-subsys_initcall(pcibios_init);
-
-void pcibios_set_master(struct pci_dev *dev)
-{
- /* No special bus mastering setup handling. */
-}
-
-/* Process any "pci=" kernel boot arguments. */
-char *__init pcibios_setup(char *str)
-{
- if (!strcmp(str, "off")) {
- pci_probe = 0;
- return NULL;
- }
- return str;
-}
-
-/*
- * Enable memory and/or address decoding, as appropriate, for the
- * device described by the 'dev' struct.
- *
- * This is called from the generic PCI layer, and can be called
- * for bridges or endpoints.
- */
-int pcibios_enable_device(struct pci_dev *dev, int mask)
-{
- u16 cmd, old_cmd;
- u8 header_type;
- int i;
- struct resource *r;
-
- pci_read_config_byte(dev, PCI_HEADER_TYPE, &header_type);
-
- pci_read_config_word(dev, PCI_COMMAND, &cmd);
- old_cmd = cmd;
- if ((header_type & 0x7F) == PCI_HEADER_TYPE_BRIDGE) {
- /*
- * For bridges, we enable both memory and I/O decoding
- * in call cases.
- */
- cmd |= PCI_COMMAND_IO;
- cmd |= PCI_COMMAND_MEMORY;
- } else {
- /*
- * For endpoints, we enable memory and/or I/O decoding
- * only if they have a memory resource of that type.
- */
- for (i = 0; i < 6; i++) {
- r = &dev->resource[i];
- if (r->flags & IORESOURCE_UNSET) {
- pr_err("PCI: Device %s not available because of resource collisions\n",
- pci_name(dev));
- return -EINVAL;
- }
- if (r->flags & IORESOURCE_IO)
- cmd |= PCI_COMMAND_IO;
- if (r->flags & IORESOURCE_MEM)
- cmd |= PCI_COMMAND_MEMORY;
- }
- }
-
- /*
- * We only write the command if it changed.
- */
- if (cmd != old_cmd)
- pci_write_config_word(dev, PCI_COMMAND, cmd);
- return 0;
-}
-
-/****************************************************************
- *
- * Tile PCI config space read/write routines
- *
- ****************************************************************/
-
-/*
- * These are the normal read and write ops
- * These are expanded with macros from pci_bus_read_config_byte() etc.
- *
- * devfn is the combined PCI slot & function.
- *
- * offset is in bytes, from the start of config space for the
- * specified bus & slot.
- */
-
-static int tile_cfg_read(struct pci_bus *bus, unsigned int devfn, int offset,
- int size, u32 *val)
-{
- struct pci_controller *controller = bus->sysdata;
- int busnum = bus->number & 0xff;
- int slot = (devfn >> 3) & 0x1f;
- int function = devfn & 0x7;
- u32 addr;
- int config_mode = 1;
-
- /*
- * There is no bridge between the Tile and bus 0, so we
- * use config0 to talk to bus 0.
- *
- * If we're talking to a bus other than zero then we
- * must have found a bridge.
- */
- if (busnum == 0) {
- /*
- * We fake an empty slot for (busnum == 0) && (slot > 0),
- * since there is only one slot on bus 0.
- */
- if (slot) {
- *val = 0xFFFFFFFF;
- return 0;
- }
- config_mode = 0;
- }
-
- addr = busnum << 20; /* Bus in 27:20 */
- addr |= slot << 15; /* Slot (device) in 19:15 */
- addr |= function << 12; /* Function is in 14:12 */
- addr |= (offset & 0xFFF); /* byte address in 0:11 */
-
- return hv_dev_pread(controller->hv_cfg_fd[config_mode], 0,
- (HV_VirtAddr)(val), size, addr);
-}
-
-
-/*
- * See tile_cfg_read() for relevant comments.
- * Note that "val" is the value to write, not a pointer to that value.
- */
-static int tile_cfg_write(struct pci_bus *bus, unsigned int devfn, int offset,
- int size, u32 val)
-{
- struct pci_controller *controller = bus->sysdata;
- int busnum = bus->number & 0xff;
- int slot = (devfn >> 3) & 0x1f;
- int function = devfn & 0x7;
- u32 addr;
- int config_mode = 1;
- HV_VirtAddr valp = (HV_VirtAddr)&val;
-
- /*
- * For bus 0 slot 0 we use config 0 accesses.
- */
- if (busnum == 0) {
- /*
- * We fake an empty slot for (busnum == 0) && (slot > 0),
- * since there is only one slot on bus 0.
- */
- if (slot)
- return 0;
- config_mode = 0;
- }
-
- addr = busnum << 20; /* Bus in 27:20 */
- addr |= slot << 15; /* Slot (device) in 19:15 */
- addr |= function << 12; /* Function is in 14:12 */
- addr |= (offset & 0xFFF); /* byte address in 0:11 */
-
-#ifdef __BIG_ENDIAN
- /* Point to the correct part of the 32-bit "val". */
- valp += 4 - size;
-#endif
-
- return hv_dev_pwrite(controller->hv_cfg_fd[config_mode], 0,
- valp, size, addr);
-}
-
-
-static struct pci_ops tile_cfg_ops = {
- .read = tile_cfg_read,
- .write = tile_cfg_write,
-};
-
-
-/*
- * In the following, each PCI controller's mem_resources[1]
- * represents its (non-prefetchable) PCI memory resource.
- * mem_resources[0] and mem_resources[2] refer to its PCI I/O and
- * prefetchable PCI memory resources, respectively.
- * For more details, see pci_setup_bridge() in setup-bus.c.
- * By comparing the target PCI memory address against the
- * end address of controller 0, we can determine the controller
- * that should accept the PCI memory access.
- */
-#define TILE_READ(size, type) \
-type _tile_read##size(unsigned long addr) \
-{ \
- type val; \
- int idx = 0; \
- if (addr > controllers[0].mem_resources[1].end && \
- addr > controllers[0].mem_resources[2].end) \
- idx = 1; \
- if (hv_dev_pread(controllers[idx].hv_mem_fd, 0, \
- (HV_VirtAddr)(&val), sizeof(type), addr)) \
- pr_err("PCI: read %zd bytes at 0x%lX failed\n", \
- sizeof(type), addr); \
- return val; \
-} \
-EXPORT_SYMBOL(_tile_read##size)
-
-TILE_READ(b, u8);
-TILE_READ(w, u16);
-TILE_READ(l, u32);
-TILE_READ(q, u64);
-
-#define TILE_WRITE(size, type) \
-void _tile_write##size(type val, unsigned long addr) \
-{ \
- int idx = 0; \
- if (addr > controllers[0].mem_resources[1].end && \
- addr > controllers[0].mem_resources[2].end) \
- idx = 1; \
- if (hv_dev_pwrite(controllers[idx].hv_mem_fd, 0, \
- (HV_VirtAddr)(&val), sizeof(type), addr)) \
- pr_err("PCI: write %zd bytes at 0x%lX failed\n", \
- sizeof(type), addr); \
-} \
-EXPORT_SYMBOL(_tile_write##size)
-
-TILE_WRITE(b, u8);
-TILE_WRITE(w, u16);
-TILE_WRITE(l, u32);
-TILE_WRITE(q, u64);
diff --git a/arch/tile/kernel/pci_gx.c b/arch/tile/kernel/pci_gx.c
deleted file mode 100644
index 9aa238ac7b35..000000000000
--- a/arch/tile/kernel/pci_gx.c
+++ /dev/null
@@ -1,1592 +0,0 @@
-/*
- * Copyright 2012 Tilera Corporation. All Rights Reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation, version 2.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
- * NON INFRINGEMENT. See the GNU General Public License for
- * more details.
- */
-
-#include <linux/kernel.h>
-#include <linux/mmzone.h>
-#include <linux/pci.h>
-#include <linux/delay.h>
-#include <linux/string.h>
-#include <linux/init.h>
-#include <linux/capability.h>
-#include <linux/sched.h>
-#include <linux/errno.h>
-#include <linux/irq.h>
-#include <linux/msi.h>
-#include <linux/io.h>
-#include <linux/uaccess.h>
-#include <linux/ctype.h>
-
-#include <asm/processor.h>
-#include <asm/sections.h>
-#include <asm/byteorder.h>
-
-#include <gxio/iorpc_globals.h>
-#include <gxio/kiorpc.h>
-#include <gxio/trio.h>
-#include <gxio/iorpc_trio.h>
-#include <hv/drv_trio_intf.h>
-
-#include <arch/sim.h>
-
-/*
- * This file contains the routines to search for PCI buses,
- * enumerate the buses, and configure any attached devices.
- */
-
-#define DEBUG_PCI_CFG 0
-
-#if DEBUG_PCI_CFG
-#define TRACE_CFG_WR(size, val, bus, dev, func, offset) \
- pr_info("CFG WR %d-byte VAL %#x to bus %d dev %d func %d addr %u\n", \
- size, val, bus, dev, func, offset & 0xFFF);
-#define TRACE_CFG_RD(size, val, bus, dev, func, offset) \
- pr_info("CFG RD %d-byte VAL %#x from bus %d dev %d func %d addr %u\n", \
- size, val, bus, dev, func, offset & 0xFFF);
-#else
-#define TRACE_CFG_WR(...)
-#define TRACE_CFG_RD(...)
-#endif
-
-static int pci_probe = 1;
-
-/* Information on the PCIe RC ports configuration. */
-static int pcie_rc[TILEGX_NUM_TRIO][TILEGX_TRIO_PCIES];
-
-/*
- * On some platforms with one or more Gx endpoint ports, we need to
- * delay the PCIe RC port probe for a few seconds to work around
- * a HW PCIe link-training bug. The exact delay is specified with
- * a kernel boot argument in the form of "pcie_rc_delay=T,P,S",
- * where T is the TRIO instance number, P is the port number and S is
- * the delay in seconds. If the argument is specified, but the delay is
- * not provided, the value will be DEFAULT_RC_DELAY.
- */
-static int rc_delay[TILEGX_NUM_TRIO][TILEGX_TRIO_PCIES];
-
-/* Default number of seconds that the PCIe RC port probe can be delayed. */
-#define DEFAULT_RC_DELAY 10
-
-/* The PCI I/O space size in each PCI domain. */
-#define IO_SPACE_SIZE 0x10000
-
-/* Provide shorter versions of some very long constant names. */
-#define AUTO_CONFIG_RC \
- TRIO_PCIE_INTFC_PORT_CONFIG__STRAP_STATE_VAL_AUTO_CONFIG_RC
-#define AUTO_CONFIG_RC_G1 \
- TRIO_PCIE_INTFC_PORT_CONFIG__STRAP_STATE_VAL_AUTO_CONFIG_RC_G1
-#define AUTO_CONFIG_EP \
- TRIO_PCIE_INTFC_PORT_CONFIG__STRAP_STATE_VAL_AUTO_CONFIG_ENDPOINT
-#define AUTO_CONFIG_EP_G1 \
- TRIO_PCIE_INTFC_PORT_CONFIG__STRAP_STATE_VAL_AUTO_CONFIG_ENDPOINT_G1
-
-/* Array of the PCIe ports configuration info obtained from the BIB. */
-struct pcie_trio_ports_property pcie_ports[TILEGX_NUM_TRIO];
-
-/* Number of configured TRIO instances. */
-int num_trio_shims;
-
-/* All drivers share the TRIO contexts defined here. */
-gxio_trio_context_t trio_contexts[TILEGX_NUM_TRIO];
-
-/* Pointer to an array of PCIe RC controllers. */
-struct pci_controller pci_controllers[TILEGX_NUM_TRIO * TILEGX_TRIO_PCIES];
-int num_rc_controllers;
-
-static struct pci_ops tile_cfg_ops;
-
-/* Mask of CPUs that should receive PCIe interrupts. */
-static struct cpumask intr_cpus_map;
-
-/*
- * Pick a CPU to receive and handle the PCIe interrupts, based on the IRQ #.
- * For now, we simply send interrupts to non-dataplane CPUs.
- * We may implement methods to allow user to specify the target CPUs,
- * e.g. via boot arguments.
- */
-static int tile_irq_cpu(int irq)
-{
- unsigned int count;
- int i = 0;
- int cpu;
-
- count = cpumask_weight(&intr_cpus_map);
- if (unlikely(count == 0)) {
- pr_warn("intr_cpus_map empty, interrupts will be delivered to dataplane tiles\n");
- return irq % (smp_height * smp_width);
- }
-
- count = irq % count;
- for_each_cpu(cpu, &intr_cpus_map) {
- if (i++ == count)
- break;
- }
- return cpu;
-}
-
-/* Open a file descriptor to the TRIO shim. */
-static int tile_pcie_open(int trio_index)
-{
- gxio_trio_context_t *context = &trio_contexts[trio_index];
- int ret;
- int mac;
-
- /* This opens a file descriptor to the TRIO shim. */
- ret = gxio_trio_init(context, trio_index);
- if (ret < 0)
- goto gxio_trio_init_failure;
-
- /* Allocate an ASID for the kernel. */
- ret = gxio_trio_alloc_asids(context, 1, 0, 0);
- if (ret < 0) {
- pr_err("PCI: ASID alloc failure on TRIO %d, give up\n",
- trio_index);
- goto asid_alloc_failure;
- }
-
- context->asid = ret;
-
-#ifdef USE_SHARED_PCIE_CONFIG_REGION
- /*
- * Alloc a PIO region for config access, shared by all MACs per TRIO.
- * This shouldn't fail since the kernel is supposed to the first
- * client of the TRIO's PIO regions.
- */
- ret = gxio_trio_alloc_pio_regions(context, 1, 0, 0);
- if (ret < 0) {
- pr_err("PCI: CFG PIO alloc failure on TRIO %d, give up\n",
- trio_index);
- goto pio_alloc_failure;
- }
-
- context->pio_cfg_index = ret;
-
- /*
- * For PIO CFG, the bus_address_hi parameter is 0. The mac parameter
- * is also 0 because it is specified in PIO_REGION_SETUP_CFG_ADDR.
- */
- ret = gxio_trio_init_pio_region_aux(context, context->pio_cfg_index,
- 0, 0, HV_TRIO_PIO_FLAG_CONFIG_SPACE);
- if (ret < 0) {
- pr_err("PCI: CFG PIO init failure on TRIO %d, give up\n",
- trio_index);
- goto pio_alloc_failure;
- }
-#endif
-
- /* Get the properties of the PCIe ports on this TRIO instance. */
- ret = gxio_trio_get_port_property(context, &pcie_ports[trio_index]);
- if (ret < 0) {
- pr_err("PCI: PCIE_GET_PORT_PROPERTY failure, error %d, on TRIO %d\n",
- ret, trio_index);
- goto get_port_property_failure;
- }
-
- context->mmio_base_mac =
- iorpc_ioremap(context->fd, 0, HV_TRIO_CONFIG_IOREMAP_SIZE);
- if (context->mmio_base_mac == NULL) {
- pr_err("PCI: TRIO config space mapping failure, error %d, on TRIO %d\n",
- ret, trio_index);
- ret = -ENOMEM;
-
- goto trio_mmio_mapping_failure;
- }
-
- /* Check the port strap state which will override the BIB setting. */
- for (mac = 0; mac < TILEGX_TRIO_PCIES; mac++) {
- TRIO_PCIE_INTFC_PORT_CONFIG_t port_config;
- unsigned int reg_offset;
-
- /* Ignore ports that are not specified in the BIB. */
- if (!pcie_ports[trio_index].ports[mac].allow_rc &&
- !pcie_ports[trio_index].ports[mac].allow_ep)
- continue;
-
- reg_offset =
- (TRIO_PCIE_INTFC_PORT_CONFIG <<
- TRIO_CFG_REGION_ADDR__REG_SHIFT) |
- (TRIO_CFG_REGION_ADDR__INTFC_VAL_MAC_INTERFACE <<
- TRIO_CFG_REGION_ADDR__INTFC_SHIFT) |
- (mac << TRIO_CFG_REGION_ADDR__MAC_SEL_SHIFT);
-
- port_config.word =
- __gxio_mmio_read(context->mmio_base_mac + reg_offset);
-
- if (port_config.strap_state != AUTO_CONFIG_RC &&
- port_config.strap_state != AUTO_CONFIG_RC_G1) {
- /*
- * If this is really intended to be an EP port, record
- * it so that the endpoint driver will know about it.
- */
- if (port_config.strap_state == AUTO_CONFIG_EP ||
- port_config.strap_state == AUTO_CONFIG_EP_G1)
- pcie_ports[trio_index].ports[mac].allow_ep = 1;
- }
- }
-
- return ret;
-
-trio_mmio_mapping_failure:
-get_port_property_failure:
-asid_alloc_failure:
-#ifdef USE_SHARED_PCIE_CONFIG_REGION
-pio_alloc_failure:
-#endif
- hv_dev_close(context->fd);
-gxio_trio_init_failure:
- context->fd = -1;
-
- return ret;
-}
-
-static int __init tile_trio_init(void)
-{
- int i;
-
- /* We loop over all the TRIO shims. */
- for (i = 0; i < TILEGX_NUM_TRIO; i++) {
- if (tile_pcie_open(i) < 0)
- continue;
- num_trio_shims++;
- }
-
- return 0;
-}
-postcore_initcall(tile_trio_init);
-
-static void tilegx_legacy_irq_ack(struct irq_data *d)
-{
- __insn_mtspr(SPR_IPI_EVENT_RESET_K, 1UL << d->irq);
-}
-
-static void tilegx_legacy_irq_mask(struct irq_data *d)
-{
- __insn_mtspr(SPR_IPI_MASK_SET_K, 1UL << d->irq);
-}
-
-static void tilegx_legacy_irq_unmask(struct irq_data *d)
-{
- __insn_mtspr(SPR_IPI_MASK_RESET_K, 1UL << d->irq);
-}
-
-static struct irq_chip tilegx_legacy_irq_chip = {
- .name = "tilegx_legacy_irq",
- .irq_ack = tilegx_legacy_irq_ack,
- .irq_mask = tilegx_legacy_irq_mask,
- .irq_unmask = tilegx_legacy_irq_unmask,
-
- /* TBD: support set_affinity. */
-};
-
-/*
- * This is a wrapper function of the kernel level-trigger interrupt
- * handler handle_level_irq() for PCI legacy interrupts. The TRIO
- * is configured such that only INTx Assert interrupts are proxied
- * to Linux which just calls handle_level_irq() after clearing the
- * MAC INTx Assert status bit associated with this interrupt.
- */
-static void trio_handle_level_irq(struct irq_desc *desc)
-{
- struct pci_controller *controller = irq_desc_get_handler_data(desc);
- gxio_trio_context_t *trio_context = controller->trio;
- uint64_t intx = (uint64_t)irq_desc_get_chip_data(desc);
- int mac = controller->mac;
- unsigned int reg_offset;
- uint64_t level_mask;
-
- handle_level_irq(desc);
-
- /*
- * Clear the INTx Level status, otherwise future interrupts are
- * not sent.
- */
- reg_offset = (TRIO_PCIE_INTFC_MAC_INT_STS <<
- TRIO_CFG_REGION_ADDR__REG_SHIFT) |
- (TRIO_CFG_REGION_ADDR__INTFC_VAL_MAC_INTERFACE <<
- TRIO_CFG_REGION_ADDR__INTFC_SHIFT ) |
- (mac << TRIO_CFG_REGION_ADDR__MAC_SEL_SHIFT);
-
- level_mask = TRIO_PCIE_INTFC_MAC_INT_STS__INT_LEVEL_MASK << intx;
-
- __gxio_mmio_write(trio_context->mmio_base_mac + reg_offset, level_mask);
-}
-
-/*
- * Create kernel irqs and set up the handlers for the legacy interrupts.
- * Also some minimum initialization for the MSI support.
- */
-static int tile_init_irqs(struct pci_controller *controller)
-{
- int i;
- int j;
- int irq;
- int result;
-
- cpumask_copy(&intr_cpus_map, cpu_online_mask);
-
-
- for (i = 0; i < 4; i++) {
- gxio_trio_context_t *context = controller->trio;
- int cpu;
-
- /* Ask the kernel to allocate an IRQ. */
- irq = irq_alloc_hwirq(-1);
- if (!irq) {
- pr_err("PCI: no free irq vectors, failed for %d\n", i);
- goto free_irqs;
- }
- controller->irq_intx_table[i] = irq;
-
- /* Distribute the 4 IRQs to different tiles. */
- cpu = tile_irq_cpu(irq);
-
- /* Configure the TRIO intr binding for this IRQ. */
- result = gxio_trio_config_legacy_intr(context, cpu_x(cpu),
- cpu_y(cpu), KERNEL_PL,
- irq, controller->mac, i);
- if (result < 0) {
- pr_err("PCI: MAC intx config failed for %d\n", i);
-
- goto free_irqs;
- }
-
- /* Register the IRQ handler with the kernel. */
- irq_set_chip_and_handler(irq, &tilegx_legacy_irq_chip,
- trio_handle_level_irq);
- irq_set_chip_data(irq, (void *)(uint64_t)i);
- irq_set_handler_data(irq, controller);
- }
-
- return 0;
-
-free_irqs:
- for (j = 0; j < i; j++)
- irq_free_hwirq(controller->irq_intx_table[j]);
-
- return -1;
-}
-
-/*
- * Return 1 if the port is strapped to operate in RC mode.
- */
-static int
-strapped_for_rc(gxio_trio_context_t *trio_context, int mac)
-{
- TRIO_PCIE_INTFC_PORT_CONFIG_t port_config;
- unsigned int reg_offset;
-
- /* Check the port configuration. */
- reg_offset =
- (TRIO_PCIE_INTFC_PORT_CONFIG <<
- TRIO_CFG_REGION_ADDR__REG_SHIFT) |
- (TRIO_CFG_REGION_ADDR__INTFC_VAL_MAC_INTERFACE <<
- TRIO_CFG_REGION_ADDR__INTFC_SHIFT) |
- (mac << TRIO_CFG_REGION_ADDR__MAC_SEL_SHIFT);
- port_config.word =
- __gxio_mmio_read(trio_context->mmio_base_mac + reg_offset);
-
- if (port_config.strap_state == AUTO_CONFIG_RC ||
- port_config.strap_state == AUTO_CONFIG_RC_G1)
- return 1;
- else
- return 0;
-}
-
-/*
- * Find valid controllers and fill in pci_controller structs for each
- * of them.
- *
- * Return the number of controllers discovered.
- */
-int __init tile_pci_init(void)
-{
- int ctl_index = 0;
- int i, j;
-
- if (!pci_probe) {
- pr_info("PCI: disabled by boot argument\n");
- return 0;
- }
-
- pr_info("PCI: Searching for controllers...\n");
-
- if (num_trio_shims == 0 || sim_is_simulator())
- return 0;
-
- /*
- * Now determine which PCIe ports are configured to operate in RC
- * mode. There is a difference in the port configuration capability
- * between the Gx36 and Gx72 devices.
- *
- * The Gx36 has configuration capability for each of the 3 PCIe
- * interfaces (disable, auto endpoint, auto RC, etc.).
- * On the Gx72, you can only select one of the 3 PCIe interfaces per
- * TRIO to train automatically. Further, the allowable training modes
- * are reduced to four options (auto endpoint, auto RC, stream x1,
- * stream x4).
- *
- * For Gx36 ports, it must be allowed to be in RC mode by the
- * Board Information Block, and the hardware strapping pins must be
- * set to RC mode.
- *
- * For Gx72 ports, the port will operate in RC mode if either of the
- * following is true:
- * 1. It is allowed to be in RC mode by the Board Information Block,
- * and the BIB doesn't allow the EP mode.
- * 2. It is allowed to be in either the RC or the EP mode by the BIB,
- * and the hardware strapping pin is set to RC mode.
- */
- for (i = 0; i < TILEGX_NUM_TRIO; i++) {
- gxio_trio_context_t *context = &trio_contexts[i];
-
- if (context->fd < 0)
- continue;
-
- for (j = 0; j < TILEGX_TRIO_PCIES; j++) {
- int is_rc = 0;
-
- if (pcie_ports[i].is_gx72 &&
- pcie_ports[i].ports[j].allow_rc) {
- if (!pcie_ports[i].ports[j].allow_ep ||
- strapped_for_rc(context, j))
- is_rc = 1;
- } else if (pcie_ports[i].ports[j].allow_rc &&
- strapped_for_rc(context, j)) {
- is_rc = 1;
- }
- if (is_rc) {
- pcie_rc[i][j] = 1;
- num_rc_controllers++;
- }
- }
- }
-
- /* Return if no PCIe ports are configured to operate in RC mode. */
- if (num_rc_controllers == 0)
- return 0;
-
- /* Set the TRIO pointer and MAC index for each PCIe RC port. */
- for (i = 0; i < TILEGX_NUM_TRIO; i++) {
- for (j = 0; j < TILEGX_TRIO_PCIES; j++) {
- if (pcie_rc[i][j]) {
- pci_controllers[ctl_index].trio =
- &trio_contexts[i];
- pci_controllers[ctl_index].mac = j;
- pci_controllers[ctl_index].trio_index = i;
- ctl_index++;
- if (ctl_index == num_rc_controllers)
- goto out;
- }
- }
- }
-
-out:
- /* Configure each PCIe RC port. */
- for (i = 0; i < num_rc_controllers; i++) {
-
- /* Configure the PCIe MAC to run in RC mode. */
- struct pci_controller *controller = &pci_controllers[i];
-
- controller->index = i;
- controller->ops = &tile_cfg_ops;
-
- controller->io_space.start = PCIBIOS_MIN_IO +
- (i * IO_SPACE_SIZE);
- controller->io_space.end = controller->io_space.start +
- IO_SPACE_SIZE - 1;
- BUG_ON(controller->io_space.end > IO_SPACE_LIMIT);
- controller->io_space.flags = IORESOURCE_IO;
- snprintf(controller->io_space_name,
- sizeof(controller->io_space_name),
- "PCI I/O domain %d", i);
- controller->io_space.name = controller->io_space_name;
-
- /*
- * The PCI memory resource is located above the PA space.
- * For every host bridge, the BAR window or the MMIO aperture
- * is in range [3GB, 4GB - 1] of a 4GB space beyond the
- * PA space.
- */
- controller->mem_offset = TILE_PCI_MEM_START +
- (i * TILE_PCI_BAR_WINDOW_TOP);
- controller->mem_space.start = controller->mem_offset +
- TILE_PCI_BAR_WINDOW_TOP - TILE_PCI_BAR_WINDOW_SIZE;
- controller->mem_space.end = controller->mem_offset +
- TILE_PCI_BAR_WINDOW_TOP - 1;
- controller->mem_space.flags = IORESOURCE_MEM;
- snprintf(controller->mem_space_name,
- sizeof(controller->mem_space_name),
- "PCI mem domain %d", i);
- controller->mem_space.name = controller->mem_space_name;
- }
-
- return num_rc_controllers;
-}
-
-/*
- * (pin - 1) converts from the PCI standard's [1:4] convention to
- * a normal [0:3] range.
- */
-static int tile_map_irq(const struct pci_dev *dev, u8 device, u8 pin)
-{
- struct pci_controller *controller =
- (struct pci_controller *)dev->sysdata;
- return controller->irq_intx_table[pin - 1];
-}
-
-static void fixup_read_and_payload_sizes(struct pci_controller *controller)
-{
- gxio_trio_context_t *trio_context = controller->trio;
- struct pci_bus *root_bus = controller->root_bus;
- TRIO_PCIE_RC_DEVICE_CONTROL_t dev_control;
- TRIO_PCIE_RC_DEVICE_CAP_t rc_dev_cap;
- unsigned int reg_offset;
- struct pci_bus *child;
- int mac;
- int err;
-
- mac = controller->mac;
-
- /* Set our max read request size to be 4KB. */
- reg_offset =
- (TRIO_PCIE_RC_DEVICE_CONTROL <<
- TRIO_CFG_REGION_ADDR__REG_SHIFT) |
- (TRIO_CFG_REGION_ADDR__INTFC_VAL_MAC_STANDARD <<
- TRIO_CFG_REGION_ADDR__INTFC_SHIFT ) |
- (mac << TRIO_CFG_REGION_ADDR__MAC_SEL_SHIFT);
-
- dev_control.word = __gxio_mmio_read32(trio_context->mmio_base_mac +
- reg_offset);
- dev_control.max_read_req_sz = 5;
- __gxio_mmio_write32(trio_context->mmio_base_mac + reg_offset,
- dev_control.word);
-
- /*
- * Set the max payload size supported by this Gx PCIe MAC.
- * Though Gx PCIe supports Max Payload Size of up to 1024 bytes,
- * experiments have shown that setting MPS to 256 yields the
- * best performance.
- */
- reg_offset =
- (TRIO_PCIE_RC_DEVICE_CAP <<
- TRIO_CFG_REGION_ADDR__REG_SHIFT) |
- (TRIO_CFG_REGION_ADDR__INTFC_VAL_MAC_STANDARD <<
- TRIO_CFG_REGION_ADDR__INTFC_SHIFT ) |
- (mac << TRIO_CFG_REGION_ADDR__MAC_SEL_SHIFT);
-
- rc_dev_cap.word = __gxio_mmio_read32(trio_context->mmio_base_mac +
- reg_offset);
- rc_dev_cap.mps_sup = 1;
- __gxio_mmio_write32(trio_context->mmio_base_mac + reg_offset,
- rc_dev_cap.word);
-
- /* Configure PCI Express MPS setting. */
- list_for_each_entry(child, &root_bus->children, node)
- pcie_bus_configure_settings(child);
-
- /*
- * Set the mac_config register in trio based on the MPS/MRS of the link.
- */
- reg_offset =
- (TRIO_PCIE_RC_DEVICE_CONTROL <<
- TRIO_CFG_REGION_ADDR__REG_SHIFT) |
- (TRIO_CFG_REGION_ADDR__INTFC_VAL_MAC_STANDARD <<
- TRIO_CFG_REGION_ADDR__INTFC_SHIFT ) |
- (mac << TRIO_CFG_REGION_ADDR__MAC_SEL_SHIFT);
-
- dev_control.word = __gxio_mmio_read32(trio_context->mmio_base_mac +
- reg_offset);
-
- err = gxio_trio_set_mps_mrs(trio_context,
- dev_control.max_payload_size,
- dev_control.max_read_req_sz,
- mac);
- if (err < 0) {
- pr_err("PCI: PCIE_CONFIGURE_MAC_MPS_MRS failure, MAC %d on TRIO %d\n",
- mac, controller->trio_index);
- }
-}
-
-static int setup_pcie_rc_delay(char *str)
-{
- unsigned long delay = 0;
- unsigned long trio_index;
- unsigned long mac;
-
- if (str == NULL || !isdigit(*str))
- return -EINVAL;
- trio_index = simple_strtoul(str, (char **)&str, 10);
- if (trio_index >= TILEGX_NUM_TRIO)
- return -EINVAL;
-
- if (*str != ',')
- return -EINVAL;
-
- str++;
- if (!isdigit(*str))
- return -EINVAL;
- mac = simple_strtoul(str, (char **)&str, 10);
- if (mac >= TILEGX_TRIO_PCIES)
- return -EINVAL;
-
- if (*str != '\0') {
- if (*str != ',')
- return -EINVAL;
-
- str++;
- if (!isdigit(*str))
- return -EINVAL;
- delay = simple_strtoul(str, (char **)&str, 10);
- }
-
- rc_delay[trio_index][mac] = delay ? : DEFAULT_RC_DELAY;
- return 0;
-}
-early_param("pcie_rc_delay", setup_pcie_rc_delay);
-
-/* PCI initialization entry point, called by subsys_initcall. */
-int __init pcibios_init(void)
-{
- resource_size_t offset;
- LIST_HEAD(resources);
- int next_busno;
- struct pci_host_bridge *bridge;
- int i;
-
- tile_pci_init();
-
- if (num_rc_controllers == 0)
- return 0;
-
- /*
- * Delay a bit in case devices aren't ready. Some devices are
- * known to require at least 20ms here, but we use a more
- * conservative value.
- */
- msleep(250);
-
- /* Scan all of the recorded PCI controllers. */
- for (next_busno = 0, i = 0; i < num_rc_controllers; i++) {
- struct pci_controller *controller = &pci_controllers[i];
- gxio_trio_context_t *trio_context = controller->trio;
- TRIO_PCIE_INTFC_PORT_STATUS_t port_status;
- TRIO_PCIE_INTFC_TX_FIFO_CTL_t tx_fifo_ctl;
- struct pci_bus *bus;
- unsigned int reg_offset;
- unsigned int class_code_revision;
- int trio_index;
- int mac;
- int ret;
-
- if (trio_context->fd < 0)
- continue;
-
- trio_index = controller->trio_index;
- mac = controller->mac;
-
- /*
- * Check for PCIe link-up status to decide if we need
- * to force the link to come up.
- */
- reg_offset =
- (TRIO_PCIE_INTFC_PORT_STATUS <<
- TRIO_CFG_REGION_ADDR__REG_SHIFT) |
- (TRIO_CFG_REGION_ADDR__INTFC_VAL_MAC_INTERFACE <<
- TRIO_CFG_REGION_ADDR__INTFC_SHIFT) |
- (mac << TRIO_CFG_REGION_ADDR__MAC_SEL_SHIFT);
-
- port_status.word =
- __gxio_mmio_read(trio_context->mmio_base_mac +
- reg_offset);
- if (!port_status.dl_up) {
- if (rc_delay[trio_index][mac]) {
- pr_info("Delaying PCIe RC TRIO init %d sec on MAC %d on TRIO %d\n",
- rc_delay[trio_index][mac], mac,
- trio_index);
- msleep(rc_delay[trio_index][mac] * 1000);
- }
- ret = gxio_trio_force_rc_link_up(trio_context, mac);
- if (ret < 0)
- pr_err("PCI: PCIE_FORCE_LINK_UP failure, MAC %d on TRIO %d\n",
- mac, trio_index);
- }
-
- pr_info("PCI: Found PCI controller #%d on TRIO %d MAC %d\n",
- i, trio_index, controller->mac);
-
- /* Delay the bus probe if needed. */
- if (rc_delay[trio_index][mac]) {
- pr_info("Delaying PCIe RC bus enumerating %d sec on MAC %d on TRIO %d\n",
- rc_delay[trio_index][mac], mac, trio_index);
- msleep(rc_delay[trio_index][mac] * 1000);
- } else {
- /*
- * Wait a bit here because some EP devices
- * take longer to come up.
- */
- msleep(1000);
- }
-
- /* Check for PCIe link-up status again. */
- port_status.word =
- __gxio_mmio_read(trio_context->mmio_base_mac +
- reg_offset);
- if (!port_status.dl_up) {
- if (pcie_ports[trio_index].ports[mac].removable) {
- pr_info("PCI: link is down, MAC %d on TRIO %d\n",
- mac, trio_index);
- pr_info("This is expected if no PCIe card is connected to this link\n");
- } else
- pr_err("PCI: link is down, MAC %d on TRIO %d\n",
- mac, trio_index);
- continue;
- }
-
- /*
- * Ensure that the link can come out of L1 power down state.
- * Strictly speaking, this is needed only in the case of
- * heavy RC-initiated DMAs.
- */
- reg_offset =
- (TRIO_PCIE_INTFC_TX_FIFO_CTL <<
- TRIO_CFG_REGION_ADDR__REG_SHIFT) |
- (TRIO_CFG_REGION_ADDR__INTFC_VAL_MAC_INTERFACE <<
- TRIO_CFG_REGION_ADDR__INTFC_SHIFT ) |
- (mac << TRIO_CFG_REGION_ADDR__MAC_SEL_SHIFT);
- tx_fifo_ctl.word =
- __gxio_mmio_read(trio_context->mmio_base_mac +
- reg_offset);
- tx_fifo_ctl.min_p_credits = 0;
- __gxio_mmio_write(trio_context->mmio_base_mac + reg_offset,
- tx_fifo_ctl.word);
-
- /*
- * Change the device ID so that Linux bus crawl doesn't confuse
- * the internal bridge with any Tilera endpoints.
- */
- reg_offset =
- (TRIO_PCIE_RC_DEVICE_ID_VEN_ID <<
- TRIO_CFG_REGION_ADDR__REG_SHIFT) |
- (TRIO_CFG_REGION_ADDR__INTFC_VAL_MAC_STANDARD <<
- TRIO_CFG_REGION_ADDR__INTFC_SHIFT ) |
- (mac << TRIO_CFG_REGION_ADDR__MAC_SEL_SHIFT);
-
- __gxio_mmio_write32(trio_context->mmio_base_mac + reg_offset,
- (TILERA_GX36_RC_DEV_ID <<
- TRIO_PCIE_RC_DEVICE_ID_VEN_ID__DEV_ID_SHIFT) |
- TILERA_VENDOR_ID);
-
- /* Set the internal P2P bridge class code. */
- reg_offset =
- (TRIO_PCIE_RC_REVISION_ID <<
- TRIO_CFG_REGION_ADDR__REG_SHIFT) |
- (TRIO_CFG_REGION_ADDR__INTFC_VAL_MAC_STANDARD <<
- TRIO_CFG_REGION_ADDR__INTFC_SHIFT ) |
- (mac << TRIO_CFG_REGION_ADDR__MAC_SEL_SHIFT);
-
- class_code_revision =
- __gxio_mmio_read32(trio_context->mmio_base_mac +
- reg_offset);
- class_code_revision = (class_code_revision & 0xff) |
- (PCI_CLASS_BRIDGE_PCI << 16);
-
- __gxio_mmio_write32(trio_context->mmio_base_mac +
- reg_offset, class_code_revision);
-
-#ifdef USE_SHARED_PCIE_CONFIG_REGION
-
- /* Map in the MMIO space for the PIO region. */
- offset = HV_TRIO_PIO_OFFSET(trio_context->pio_cfg_index) |
- (((unsigned long long)mac) <<
- TRIO_TILE_PIO_REGION_SETUP_CFG_ADDR__MAC_SHIFT);
-
-#else
-
- /* Alloc a PIO region for PCI config access per MAC. */
- ret = gxio_trio_alloc_pio_regions(trio_context, 1, 0, 0);
- if (ret < 0) {
- pr_err("PCI: PCI CFG PIO alloc failure for mac %d on TRIO %d, give up\n",
- mac, trio_index);
-
- continue;
- }
-
- trio_context->pio_cfg_index[mac] = ret;
-
- /* For PIO CFG, the bus_address_hi parameter is 0. */
- ret = gxio_trio_init_pio_region_aux(trio_context,
- trio_context->pio_cfg_index[mac],
- mac, 0, HV_TRIO_PIO_FLAG_CONFIG_SPACE);
- if (ret < 0) {
- pr_err("PCI: PCI CFG PIO init failure for mac %d on TRIO %d, give up\n",
- mac, trio_index);
-
- continue;
- }
-
- offset = HV_TRIO_PIO_OFFSET(trio_context->pio_cfg_index[mac]) |
- (((unsigned long long)mac) <<
- TRIO_TILE_PIO_REGION_SETUP_CFG_ADDR__MAC_SHIFT);
-
-#endif
-
- /*
- * To save VMALLOC space, we take advantage of the fact that
- * bit 29 in the PIO CFG address format is reserved 0. With
- * TRIO_TILE_PIO_REGION_SETUP_CFG_ADDR__MAC_SHIFT being 30,
- * this cuts VMALLOC space usage from 1GB to 512MB per mac.
- */
- trio_context->mmio_base_pio_cfg[mac] =
- iorpc_ioremap(trio_context->fd, offset, (1UL <<
- (TRIO_TILE_PIO_REGION_SETUP_CFG_ADDR__MAC_SHIFT - 1)));
- if (trio_context->mmio_base_pio_cfg[mac] == NULL) {
- pr_err("PCI: PIO map failure for mac %d on TRIO %d\n",
- mac, trio_index);
-
- continue;
- }
-
- /* Initialize the PCIe interrupts. */
- if (tile_init_irqs(controller)) {
- pr_err("PCI: IRQs init failure for mac %d on TRIO %d\n",
- mac, trio_index);
-
- continue;
- }
-
- /*
- * The PCI memory resource is located above the PA space.
- * The memory range for the PCI root bus should not overlap
- * with the physical RAM.
- */
- pci_add_resource_offset(&resources, &controller->mem_space,
- controller->mem_offset);
- pci_add_resource(&resources, &controller->io_space);
- controller->first_busno = next_busno;
-
- bridge = pci_alloc_host_bridge(0);
- if (!bridge)
- break;
-
- list_splice_init(&resources, &bridge->windows);
- bridge->dev.parent = NULL;
- bridge->sysdata = controller;
- bridge->busnr = next_busno;
- bridge->ops = controller->ops;
- bridge->swizzle_irq = pci_common_swizzle;
- bridge->map_irq = tile_map_irq;
-
- pci_scan_root_bus_bridge(bridge);
- bus = bridge->bus;
- controller->root_bus = bus;
- next_busno = bus->busn_res.end + 1;
- }
-
- /*
- * This comes from the generic Linux PCI driver.
- *
- * It allocates all of the resources (I/O memory, etc)
- * associated with the devices read in above.
- */
- pci_assign_unassigned_resources();
-
- /* Record the I/O resources in the PCI controller structure. */
- for (i = 0; i < num_rc_controllers; i++) {
- struct pci_controller *controller = &pci_controllers[i];
- gxio_trio_context_t *trio_context = controller->trio;
- struct pci_bus *root_bus = pci_controllers[i].root_bus;
- int ret;
- int j;
-
- /*
- * Skip controllers that are not properly initialized or
- * have down links.
- */
- if (root_bus == NULL)
- continue;
-
- /* Configure the max_payload_size values for this domain. */
- fixup_read_and_payload_sizes(controller);
-
- /* Alloc a PIO region for PCI memory access for each RC port. */
- ret = gxio_trio_alloc_pio_regions(trio_context, 1, 0, 0);
- if (ret < 0) {
- pr_err("PCI: MEM PIO alloc failure on TRIO %d mac %d, give up\n",
- controller->trio_index, controller->mac);
-
- continue;
- }
-
- controller->pio_mem_index = ret;
-
- /*
- * For PIO MEM, the bus_address_hi parameter is hard-coded 0
- * because we always assign 32-bit PCI bus BAR ranges.
- */
- ret = gxio_trio_init_pio_region_aux(trio_context,
- controller->pio_mem_index,
- controller->mac,
- 0,
- 0);
- if (ret < 0) {
- pr_err("PCI: MEM PIO init failure on TRIO %d mac %d, give up\n",
- controller->trio_index, controller->mac);
-
- continue;
- }
-
-#ifdef CONFIG_TILE_PCI_IO
- /*
- * Alloc a PIO region for PCI I/O space access for each RC port.
- */
- ret = gxio_trio_alloc_pio_regions(trio_context, 1, 0, 0);
- if (ret < 0) {
- pr_err("PCI: I/O PIO alloc failure on TRIO %d mac %d, give up\n",
- controller->trio_index, controller->mac);
-
- continue;
- }
-
- controller->pio_io_index = ret;
-
- /*
- * For PIO IO, the bus_address_hi parameter is hard-coded 0
- * because PCI I/O address space is 32-bit.
- */
- ret = gxio_trio_init_pio_region_aux(trio_context,
- controller->pio_io_index,
- controller->mac,
- 0,
- HV_TRIO_PIO_FLAG_IO_SPACE);
- if (ret < 0) {
- pr_err("PCI: I/O PIO init failure on TRIO %d mac %d, give up\n",
- controller->trio_index, controller->mac);
-
- continue;
- }
-#endif
-
- /*
- * Configure a Mem-Map region for each memory controller so
- * that Linux can map all of its PA space to the PCI bus.
- * Use the IOMMU to handle hash-for-home memory.
- */
- for_each_online_node(j) {
- unsigned long start_pfn = node_start_pfn[j];
- unsigned long end_pfn = node_end_pfn[j];
- unsigned long nr_pages = end_pfn - start_pfn;
-
- ret = gxio_trio_alloc_memory_maps(trio_context, 1, 0,
- 0);
- if (ret < 0) {
- pr_err("PCI: Mem-Map alloc failure on TRIO %d mac %d for MC %d, give up\n",
- controller->trio_index, controller->mac,
- j);
-
- goto alloc_mem_map_failed;
- }
-
- controller->mem_maps[j] = ret;
-
- /*
- * Initialize the Mem-Map and the I/O MMU so that all
- * the physical memory can be accessed by the endpoint
- * devices. The base bus address is set to the base CPA
- * of this memory controller plus an offset (see pci.h).
- * The region's base VA is set to the base CPA. The
- * I/O MMU table essentially translates the CPA to
- * the real PA. Implicitly, for node 0, we create
- * a separate Mem-Map region that serves as the inbound
- * window for legacy 32-bit devices. This is a direct
- * map of the low 4GB CPA space.
- */
- ret = gxio_trio_init_memory_map_mmu_aux(trio_context,
- controller->mem_maps[j],
- start_pfn << PAGE_SHIFT,
- nr_pages << PAGE_SHIFT,
- trio_context->asid,
- controller->mac,
- (start_pfn << PAGE_SHIFT) +
- TILE_PCI_MEM_MAP_BASE_OFFSET,
- j,
- GXIO_TRIO_ORDER_MODE_UNORDERED);
- if (ret < 0) {
- pr_err("PCI: Mem-Map init failure on TRIO %d mac %d for MC %d, give up\n",
- controller->trio_index, controller->mac,
- j);
-
- goto alloc_mem_map_failed;
- }
- continue;
-
-alloc_mem_map_failed:
- break;
- }
-
- pci_bus_add_devices(root_bus);
- }
-
- return 0;
-}
-subsys_initcall(pcibios_init);
-
-/* Process any "pci=" kernel boot arguments. */
-char *__init pcibios_setup(char *str)
-{
- if (!strcmp(str, "off")) {
- pci_probe = 0;
- return NULL;
- }
- return str;
-}
-
-/*
- * Called for each device after PCI setup is done.
- * We initialize the PCI device capabilities conservatively, assuming that
- * all devices can only address the 32-bit DMA space. The exception here is
- * that the device dma_offset is set to the value that matches the 64-bit
- * capable devices. This is OK because dma_offset is not used by legacy
- * dma_ops, nor by the hybrid dma_ops's streaming DMAs, which are 64-bit ops.
- * This implementation matches the kernel design of setting PCI devices'
- * coherent_dma_mask to 0xffffffffull by default, allowing the device drivers
- * to skip calling pci_set_consistent_dma_mask(DMA_BIT_MASK(32)).
- */
-static void pcibios_fixup_final(struct pci_dev *pdev)
-{
- set_dma_ops(&pdev->dev, gx_legacy_pci_dma_map_ops);
- set_dma_offset(&pdev->dev, TILE_PCI_MEM_MAP_BASE_OFFSET);
- pdev->dev.archdata.max_direct_dma_addr =
- TILE_PCI_MAX_DIRECT_DMA_ADDRESS;
- pdev->dev.coherent_dma_mask = TILE_PCI_MAX_DIRECT_DMA_ADDRESS;
-}
-DECLARE_PCI_FIXUP_FINAL(PCI_ANY_ID, PCI_ANY_ID, pcibios_fixup_final);
-
-/* Map a PCI MMIO bus address into VA space. */
-void __iomem *ioremap(resource_size_t phys_addr, unsigned long size)
-{
- struct pci_controller *controller = NULL;
- resource_size_t bar_start;
- resource_size_t bar_end;
- resource_size_t offset;
- resource_size_t start;
- resource_size_t end;
- int trio_fd;
- int i;
-
- start = phys_addr;
- end = phys_addr + size - 1;
-
- /*
- * By searching phys_addr in each controller's mem_space, we can
- * determine the controller that should accept the PCI memory access.
- */
- for (i = 0; i < num_rc_controllers; i++) {
- /*
- * Skip controllers that are not properly initialized or
- * have down links.
- */
- if (pci_controllers[i].root_bus == NULL)
- continue;
-
- bar_start = pci_controllers[i].mem_space.start;
- bar_end = pci_controllers[i].mem_space.end;
-
- if ((start >= bar_start) && (end <= bar_end)) {
- controller = &pci_controllers[i];
- break;
- }
- }
-
- if (controller == NULL)
- return NULL;
-
- trio_fd = controller->trio->fd;
-
- /* Convert the resource start to the bus address offset. */
- start = phys_addr - controller->mem_offset;
-
- offset = HV_TRIO_PIO_OFFSET(controller->pio_mem_index) + start;
-
- /* We need to keep the PCI bus address's in-page offset in the VA. */
- return iorpc_ioremap(trio_fd, offset, size) +
- (start & (PAGE_SIZE - 1));
-}
-EXPORT_SYMBOL(ioremap);
-
-#ifdef CONFIG_TILE_PCI_IO
-/* Map a PCI I/O address into VA space. */
-void __iomem *ioport_map(unsigned long port, unsigned int size)
-{
- struct pci_controller *controller = NULL;
- resource_size_t bar_start;
- resource_size_t bar_end;
- resource_size_t offset;
- resource_size_t start;
- resource_size_t end;
- int trio_fd;
- int i;
-
- start = port;
- end = port + size - 1;
-
- /*
- * By searching the port in each controller's io_space, we can
- * determine the controller that should accept the PCI I/O access.
- */
- for (i = 0; i < num_rc_controllers; i++) {
- /*
- * Skip controllers that are not properly initialized or
- * have down links.
- */
- if (pci_controllers[i].root_bus == NULL)
- continue;
-
- bar_start = pci_controllers[i].io_space.start;
- bar_end = pci_controllers[i].io_space.end;
-
- if ((start >= bar_start) && (end <= bar_end)) {
- controller = &pci_controllers[i];
- break;
- }
- }
-
- if (controller == NULL)
- return NULL;
-
- trio_fd = controller->trio->fd;
-
- /* Convert the resource start to the bus address offset. */
- port -= controller->io_space.start;
-
- offset = HV_TRIO_PIO_OFFSET(controller->pio_io_index) + port;
-
- /* We need to keep the PCI bus address's in-page offset in the VA. */
- return iorpc_ioremap(trio_fd, offset, size) + (port & (PAGE_SIZE - 1));
-}
-EXPORT_SYMBOL(ioport_map);
-
-void ioport_unmap(void __iomem *addr)
-{
- iounmap(addr);
-}
-EXPORT_SYMBOL(ioport_unmap);
-#endif
-
-void pci_iounmap(struct pci_dev *dev, void __iomem *addr)
-{
- iounmap(addr);
-}
-EXPORT_SYMBOL(pci_iounmap);
-
-/****************************************************************
- *
- * Tile PCI config space read/write routines
- *
- ****************************************************************/
-
-/*
- * These are the normal read and write ops
- * These are expanded with macros from pci_bus_read_config_byte() etc.
- *
- * devfn is the combined PCI device & function.
- *
- * offset is in bytes, from the start of config space for the
- * specified bus & device.
- */
-static int tile_cfg_read(struct pci_bus *bus, unsigned int devfn, int offset,
- int size, u32 *val)
-{
- struct pci_controller *controller = bus->sysdata;
- gxio_trio_context_t *trio_context = controller->trio;
- int busnum = bus->number & 0xff;
- int device = PCI_SLOT(devfn);
- int function = PCI_FUNC(devfn);
- int config_type = 1;
- TRIO_TILE_PIO_REGION_SETUP_CFG_ADDR_t cfg_addr;
- void *mmio_addr;
-
- /*
- * Map all accesses to the local device on root bus into the
- * MMIO space of the MAC. Accesses to the downstream devices
- * go to the PIO space.
- */
- if (pci_is_root_bus(bus)) {
- if (device == 0) {
- /*
- * This is the internal downstream P2P bridge,
- * access directly.
- */
- unsigned int reg_offset;
-
- reg_offset = ((offset & 0xFFF) <<
- TRIO_CFG_REGION_ADDR__REG_SHIFT) |
- (TRIO_CFG_REGION_ADDR__INTFC_VAL_MAC_PROTECTED
- << TRIO_CFG_REGION_ADDR__INTFC_SHIFT ) |
- (controller->mac <<
- TRIO_CFG_REGION_ADDR__MAC_SEL_SHIFT);
-
- mmio_addr = trio_context->mmio_base_mac + reg_offset;
-
- goto valid_device;
-
- } else {
- /*
- * We fake an empty device for (device > 0),
- * since there is only one device on bus 0.
- */
- goto invalid_device;
- }
- }
-
- /*
- * Accesses to the directly attached device have to be
- * sent as type-0 configs.
- */
- if (busnum == (controller->first_busno + 1)) {
- /*
- * There is only one device off of our built-in P2P bridge.
- */
- if (device != 0)
- goto invalid_device;
-
- config_type = 0;
- }
-
- cfg_addr.word = 0;
- cfg_addr.reg_addr = (offset & 0xFFF);
- cfg_addr.fn = function;
- cfg_addr.dev = device;
- cfg_addr.bus = busnum;
- cfg_addr.type = config_type;
-
- /*
- * Note that we don't set the mac field in cfg_addr because the
- * mapping is per port.
- */
- mmio_addr = trio_context->mmio_base_pio_cfg[controller->mac] +
- cfg_addr.word;
-
-valid_device:
-
- switch (size) {
- case 4:
- *val = __gxio_mmio_read32(mmio_addr);
- break;
-
- case 2:
- *val = __gxio_mmio_read16(mmio_addr);
- break;
-
- case 1:
- *val = __gxio_mmio_read8(mmio_addr);
- break;
-
- default:
- return PCIBIOS_FUNC_NOT_SUPPORTED;
- }
-
- TRACE_CFG_RD(size, *val, busnum, device, function, offset);
-
- return 0;
-
-invalid_device:
-
- switch (size) {
- case 4:
- *val = 0xFFFFFFFF;
- break;
-
- case 2:
- *val = 0xFFFF;
- break;
-
- case 1:
- *val = 0xFF;
- break;
-
- default:
- return PCIBIOS_FUNC_NOT_SUPPORTED;
- }
-
- return 0;
-}
-
-
-/*
- * See tile_cfg_read() for relevant comments.
- * Note that "val" is the value to write, not a pointer to that value.
- */
-static int tile_cfg_write(struct pci_bus *bus, unsigned int devfn, int offset,
- int size, u32 val)
-{
- struct pci_controller *controller = bus->sysdata;
- gxio_trio_context_t *trio_context = controller->trio;
- int busnum = bus->number & 0xff;
- int device = PCI_SLOT(devfn);
- int function = PCI_FUNC(devfn);
- int config_type = 1;
- TRIO_TILE_PIO_REGION_SETUP_CFG_ADDR_t cfg_addr;
- void *mmio_addr;
- u32 val_32 = (u32)val;
- u16 val_16 = (u16)val;
- u8 val_8 = (u8)val;
-
- /*
- * Map all accesses to the local device on root bus into the
- * MMIO space of the MAC. Accesses to the downstream devices
- * go to the PIO space.
- */
- if (pci_is_root_bus(bus)) {
- if (device == 0) {
- /*
- * This is the internal downstream P2P bridge,
- * access directly.
- */
- unsigned int reg_offset;
-
- reg_offset = ((offset & 0xFFF) <<
- TRIO_CFG_REGION_ADDR__REG_SHIFT) |
- (TRIO_CFG_REGION_ADDR__INTFC_VAL_MAC_PROTECTED
- << TRIO_CFG_REGION_ADDR__INTFC_SHIFT ) |
- (controller->mac <<
- TRIO_CFG_REGION_ADDR__MAC_SEL_SHIFT);
-
- mmio_addr = trio_context->mmio_base_mac + reg_offset;
-
- goto valid_device;
-
- } else {
- /*
- * We fake an empty device for (device > 0),
- * since there is only one device on bus 0.
- */
- goto invalid_device;
- }
- }
-
- /*
- * Accesses to the directly attached device have to be
- * sent as type-0 configs.
- */
- if (busnum == (controller->first_busno + 1)) {
- /*
- * There is only one device off of our built-in P2P bridge.
- */
- if (device != 0)
- goto invalid_device;
-
- config_type = 0;
- }
-
- cfg_addr.word = 0;
- cfg_addr.reg_addr = (offset & 0xFFF);
- cfg_addr.fn = function;
- cfg_addr.dev = device;
- cfg_addr.bus = busnum;
- cfg_addr.type = config_type;
-
- /*
- * Note that we don't set the mac field in cfg_addr because the
- * mapping is per port.
- */
- mmio_addr = trio_context->mmio_base_pio_cfg[controller->mac] +
- cfg_addr.word;
-
-valid_device:
-
- switch (size) {
- case 4:
- __gxio_mmio_write32(mmio_addr, val_32);
- TRACE_CFG_WR(size, val_32, busnum, device, function, offset);
- break;
-
- case 2:
- __gxio_mmio_write16(mmio_addr, val_16);
- TRACE_CFG_WR(size, val_16, busnum, device, function, offset);
- break;
-
- case 1:
- __gxio_mmio_write8(mmio_addr, val_8);
- TRACE_CFG_WR(size, val_8, busnum, device, function, offset);
- break;
-
- default:
- return PCIBIOS_FUNC_NOT_SUPPORTED;
- }
-
-invalid_device:
-
- return 0;
-}
-
-
-static struct pci_ops tile_cfg_ops = {
- .read = tile_cfg_read,
- .write = tile_cfg_write,
-};
-
-
-/* MSI support starts here. */
-static unsigned int tilegx_msi_startup(struct irq_data *d)
-{
- if (irq_data_get_msi_desc(d))
- pci_msi_unmask_irq(d);
-
- return 0;
-}
-
-static void tilegx_msi_ack(struct irq_data *d)
-{
- __insn_mtspr(SPR_IPI_EVENT_RESET_K, 1UL << d->irq);
-}
-
-static void tilegx_msi_mask(struct irq_data *d)
-{
- pci_msi_mask_irq(d);
- __insn_mtspr(SPR_IPI_MASK_SET_K, 1UL << d->irq);
-}
-
-static void tilegx_msi_unmask(struct irq_data *d)
-{
- __insn_mtspr(SPR_IPI_MASK_RESET_K, 1UL << d->irq);
- pci_msi_unmask_irq(d);
-}
-
-static struct irq_chip tilegx_msi_chip = {
- .name = "tilegx_msi",
- .irq_startup = tilegx_msi_startup,
- .irq_ack = tilegx_msi_ack,
- .irq_mask = tilegx_msi_mask,
- .irq_unmask = tilegx_msi_unmask,
-
- /* TBD: support set_affinity. */
-};
-
-int arch_setup_msi_irq(struct pci_dev *pdev, struct msi_desc *desc)
-{
- struct pci_controller *controller;
- gxio_trio_context_t *trio_context;
- struct msi_msg msg;
- int default_irq;
- uint64_t mem_map_base;
- uint64_t mem_map_limit;
- u64 msi_addr;
- int mem_map;
- int cpu;
- int irq;
- int ret;
-
- irq = irq_alloc_hwirq(-1);
- if (!irq)
- return -ENOSPC;
-
- /*
- * Since we use a 64-bit Mem-Map to accept the MSI write, we fail
- * devices that are not capable of generating a 64-bit message address.
- * These devices will fall back to using the legacy interrupts.
- * Most PCIe endpoint devices do support 64-bit message addressing.
- */
- if (desc->msi_attrib.is_64 == 0) {
- dev_info(&pdev->dev, "64-bit MSI message address not supported, falling back to legacy interrupts\n");
-
- ret = -ENOMEM;
- goto is_64_failure;
- }
-
- default_irq = desc->msi_attrib.default_irq;
- controller = irq_get_handler_data(default_irq);
-
- BUG_ON(!controller);
-
- trio_context = controller->trio;
-
- /*
- * Allocate a scatter-queue that will accept the MSI write and
- * trigger the TILE-side interrupts. We use the scatter-queue regions
- * before the mem map regions, because the latter are needed by more
- * applications.
- */
- mem_map = gxio_trio_alloc_scatter_queues(trio_context, 1, 0, 0);
- if (mem_map >= 0) {
- TRIO_MAP_SQ_DOORBELL_FMT_t doorbell_template = {{
- .pop = 0,
- .doorbell = 1,
- }};
-
- mem_map += TRIO_NUM_MAP_MEM_REGIONS;
- mem_map_base = MEM_MAP_INTR_REGIONS_BASE +
- mem_map * MEM_MAP_INTR_REGION_SIZE;
- mem_map_limit = mem_map_base + MEM_MAP_INTR_REGION_SIZE - 1;
-
- msi_addr = mem_map_base + MEM_MAP_INTR_REGION_SIZE - 8;
- msg.data = (unsigned int)doorbell_template.word;
- } else {
- /* SQ regions are out, allocate from map mem regions. */
- mem_map = gxio_trio_alloc_memory_maps(trio_context, 1, 0, 0);
- if (mem_map < 0) {
- dev_info(&pdev->dev, "%s Mem-Map alloc failure - failed to initialize MSI interrupts - falling back to legacy interrupts\n",
- desc->msi_attrib.is_msix ? "MSI-X" : "MSI");
- ret = -ENOMEM;
- goto msi_mem_map_alloc_failure;
- }
-
- mem_map_base = MEM_MAP_INTR_REGIONS_BASE +
- mem_map * MEM_MAP_INTR_REGION_SIZE;
- mem_map_limit = mem_map_base + MEM_MAP_INTR_REGION_SIZE - 1;
-
- msi_addr = mem_map_base + TRIO_MAP_MEM_REG_INT3 -
- TRIO_MAP_MEM_REG_INT0;
-
- msg.data = mem_map;
- }
-
- /* We try to distribute different IRQs to different tiles. */
- cpu = tile_irq_cpu(irq);
-
- /*
- * Now call up to the HV to configure the MSI interrupt and
- * set up the IPI binding.
- */
- ret = gxio_trio_config_msi_intr(trio_context, cpu_x(cpu), cpu_y(cpu),
- KERNEL_PL, irq, controller->mac,
- mem_map, mem_map_base, mem_map_limit,
- trio_context->asid);
- if (ret < 0) {
- dev_info(&pdev->dev, "HV MSI config failed\n");
-
- goto hv_msi_config_failure;
- }
-
- irq_set_msi_desc(irq, desc);
-
- msg.address_hi = msi_addr >> 32;
- msg.address_lo = msi_addr & 0xffffffff;
-
- pci_write_msi_msg(irq, &msg);
- irq_set_chip_and_handler(irq, &tilegx_msi_chip, handle_level_irq);
- irq_set_handler_data(irq, controller);
-
- return 0;
-
-hv_msi_config_failure:
- /* Free mem-map */
-msi_mem_map_alloc_failure:
-is_64_failure:
- irq_free_hwirq(irq);
- return ret;
-}
-
-void arch_teardown_msi_irq(unsigned int irq)
-{
- irq_free_hwirq(irq);
-}
diff --git a/arch/tile/kernel/perf_event.c b/arch/tile/kernel/perf_event.c
deleted file mode 100644
index 6394c1ccb68e..000000000000
--- a/arch/tile/kernel/perf_event.c
+++ /dev/null
@@ -1,1005 +0,0 @@
-/*
- * Copyright 2014 Tilera Corporation. All Rights Reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation, version 2.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
- * NON INFRINGEMENT. See the GNU General Public License for
- * more details.
- *
- *
- * Perf_events support for Tile processor.
- *
- * This code is based upon the x86 perf event
- * code, which is:
- *
- * Copyright (C) 2008 Thomas Gleixner <tglx@linutronix.de>
- * Copyright (C) 2008-2009 Red Hat, Inc., Ingo Molnar
- * Copyright (C) 2009 Jaswinder Singh Rajput
- * Copyright (C) 2009 Advanced Micro Devices, Inc., Robert Richter
- * Copyright (C) 2008-2009 Red Hat, Inc., Peter Zijlstra
- * Copyright (C) 2009 Intel Corporation, <markus.t.metzger@intel.com>
- * Copyright (C) 2009 Google, Inc., Stephane Eranian
- */
-
-#include <linux/kprobes.h>
-#include <linux/kernel.h>
-#include <linux/kdebug.h>
-#include <linux/mutex.h>
-#include <linux/bitmap.h>
-#include <linux/irq.h>
-#include <linux/interrupt.h>
-#include <linux/perf_event.h>
-#include <linux/atomic.h>
-#include <asm/traps.h>
-#include <asm/stack.h>
-#include <asm/pmc.h>
-#include <hv/hypervisor.h>
-
-#define TILE_MAX_COUNTERS 4
-
-#define PERF_COUNT_0_IDX 0
-#define PERF_COUNT_1_IDX 1
-#define AUX_PERF_COUNT_0_IDX 2
-#define AUX_PERF_COUNT_1_IDX 3
-
-struct cpu_hw_events {
- int n_events;
- struct perf_event *events[TILE_MAX_COUNTERS]; /* counter order */
- struct perf_event *event_list[TILE_MAX_COUNTERS]; /* enabled
- order */
- int assign[TILE_MAX_COUNTERS];
- unsigned long active_mask[BITS_TO_LONGS(TILE_MAX_COUNTERS)];
- unsigned long used_mask;
-};
-
-/* TILE arch specific performance monitor unit */
-struct tile_pmu {
- const char *name;
- int version;
- const int *hw_events; /* generic hw events table */
- /* generic hw cache events table */
- const int (*cache_events)[PERF_COUNT_HW_CACHE_MAX]
- [PERF_COUNT_HW_CACHE_OP_MAX]
- [PERF_COUNT_HW_CACHE_RESULT_MAX];
- int (*map_hw_event)(u64); /*method used to map
- hw events */
- int (*map_cache_event)(u64); /*method used to map
- cache events */
-
- u64 max_period; /* max sampling period */
- u64 cntval_mask; /* counter width mask */
- int cntval_bits; /* counter width */
- int max_events; /* max generic hw events
- in map */
- int num_counters; /* number base + aux counters */
- int num_base_counters; /* number base counters */
-};
-
-DEFINE_PER_CPU(u64, perf_irqs);
-static DEFINE_PER_CPU(struct cpu_hw_events, cpu_hw_events);
-
-#define TILE_OP_UNSUPP (-1)
-
-#ifndef __tilegx__
-/* TILEPro hardware events map */
-static const int tile_hw_event_map[] = {
- [PERF_COUNT_HW_CPU_CYCLES] = 0x01, /* ONE */
- [PERF_COUNT_HW_INSTRUCTIONS] = 0x06, /* MP_BUNDLE_RETIRED */
- [PERF_COUNT_HW_CACHE_REFERENCES] = TILE_OP_UNSUPP,
- [PERF_COUNT_HW_CACHE_MISSES] = TILE_OP_UNSUPP,
- [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = 0x16, /*
- MP_CONDITIONAL_BRANCH_ISSUED */
- [PERF_COUNT_HW_BRANCH_MISSES] = 0x14, /*
- MP_CONDITIONAL_BRANCH_MISSPREDICT */
- [PERF_COUNT_HW_BUS_CYCLES] = TILE_OP_UNSUPP,
-};
-#else
-/* TILEGx hardware events map */
-static const int tile_hw_event_map[] = {
- [PERF_COUNT_HW_CPU_CYCLES] = 0x181, /* ONE */
- [PERF_COUNT_HW_INSTRUCTIONS] = 0xdb, /* INSTRUCTION_BUNDLE */
- [PERF_COUNT_HW_CACHE_REFERENCES] = TILE_OP_UNSUPP,
- [PERF_COUNT_HW_CACHE_MISSES] = TILE_OP_UNSUPP,
- [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = 0xd9, /*
- COND_BRANCH_PRED_CORRECT */
- [PERF_COUNT_HW_BRANCH_MISSES] = 0xda, /*
- COND_BRANCH_PRED_INCORRECT */
- [PERF_COUNT_HW_BUS_CYCLES] = TILE_OP_UNSUPP,
-};
-#endif
-
-#define C(x) PERF_COUNT_HW_CACHE_##x
-
-/*
- * Generalized hw caching related hw_event table, filled
- * in on a per model basis. A value of -1 means
- * 'not supported', any other value means the
- * raw hw_event ID.
- */
-#ifndef __tilegx__
-/* TILEPro hardware cache event map */
-static const int tile_cache_event_map[PERF_COUNT_HW_CACHE_MAX]
- [PERF_COUNT_HW_CACHE_OP_MAX]
- [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
-[C(L1D)] = {
- [C(OP_READ)] = {
- [C(RESULT_ACCESS)] = TILE_OP_UNSUPP,
- [C(RESULT_MISS)] = 0x21, /* RD_MISS */
- },
- [C(OP_WRITE)] = {
- [C(RESULT_ACCESS)] = TILE_OP_UNSUPP,
- [C(RESULT_MISS)] = 0x22, /* WR_MISS */
- },
- [C(OP_PREFETCH)] = {
- [C(RESULT_ACCESS)] = TILE_OP_UNSUPP,
- [C(RESULT_MISS)] = TILE_OP_UNSUPP,
- },
-},
-[C(L1I)] = {
- [C(OP_READ)] = {
- [C(RESULT_ACCESS)] = 0x12, /* MP_ICACHE_HIT_ISSUED */
- [C(RESULT_MISS)] = TILE_OP_UNSUPP,
- },
- [C(OP_WRITE)] = {
- [C(RESULT_ACCESS)] = TILE_OP_UNSUPP,
- [C(RESULT_MISS)] = TILE_OP_UNSUPP,
- },
- [C(OP_PREFETCH)] = {
- [C(RESULT_ACCESS)] = TILE_OP_UNSUPP,
- [C(RESULT_MISS)] = TILE_OP_UNSUPP,
- },
-},
-[C(LL)] = {
- [C(OP_READ)] = {
- [C(RESULT_ACCESS)] = TILE_OP_UNSUPP,
- [C(RESULT_MISS)] = TILE_OP_UNSUPP,
- },
- [C(OP_WRITE)] = {
- [C(RESULT_ACCESS)] = TILE_OP_UNSUPP,
- [C(RESULT_MISS)] = TILE_OP_UNSUPP,
- },
- [C(OP_PREFETCH)] = {
- [C(RESULT_ACCESS)] = TILE_OP_UNSUPP,
- [C(RESULT_MISS)] = TILE_OP_UNSUPP,
- },
-},
-[C(DTLB)] = {
- [C(OP_READ)] = {
- [C(RESULT_ACCESS)] = 0x1d, /* TLB_CNT */
- [C(RESULT_MISS)] = 0x20, /* TLB_EXCEPTION */
- },
- [C(OP_WRITE)] = {
- [C(RESULT_ACCESS)] = TILE_OP_UNSUPP,
- [C(RESULT_MISS)] = TILE_OP_UNSUPP,
- },
- [C(OP_PREFETCH)] = {
- [C(RESULT_ACCESS)] = TILE_OP_UNSUPP,
- [C(RESULT_MISS)] = TILE_OP_UNSUPP,
- },
-},
-[C(ITLB)] = {
- [C(OP_READ)] = {
- [C(RESULT_ACCESS)] = 0x13, /* MP_ITLB_HIT_ISSUED */
- [C(RESULT_MISS)] = TILE_OP_UNSUPP,
- },
- [C(OP_WRITE)] = {
- [C(RESULT_ACCESS)] = TILE_OP_UNSUPP,
- [C(RESULT_MISS)] = TILE_OP_UNSUPP,
- },
- [C(OP_PREFETCH)] = {
- [C(RESULT_ACCESS)] = TILE_OP_UNSUPP,
- [C(RESULT_MISS)] = TILE_OP_UNSUPP,
- },
-},
-[C(BPU)] = {
- [C(OP_READ)] = {
- [C(RESULT_ACCESS)] = TILE_OP_UNSUPP,
- [C(RESULT_MISS)] = TILE_OP_UNSUPP,
- },
- [C(OP_WRITE)] = {
- [C(RESULT_ACCESS)] = TILE_OP_UNSUPP,
- [C(RESULT_MISS)] = TILE_OP_UNSUPP,
- },
- [C(OP_PREFETCH)] = {
- [C(RESULT_ACCESS)] = TILE_OP_UNSUPP,
- [C(RESULT_MISS)] = TILE_OP_UNSUPP,
- },
-},
-};
-#else
-/* TILEGx hardware events map */
-static const int tile_cache_event_map[PERF_COUNT_HW_CACHE_MAX]
- [PERF_COUNT_HW_CACHE_OP_MAX]
- [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
-[C(L1D)] = {
- /*
- * Like some other architectures (e.g. ARM), the performance
- * counters don't differentiate between read and write
- * accesses/misses, so this isn't strictly correct, but it's the
- * best we can do. Writes and reads get combined.
- */
- [C(OP_READ)] = {
- [C(RESULT_ACCESS)] = TILE_OP_UNSUPP,
- [C(RESULT_MISS)] = 0x44, /* RD_MISS */
- },
- [C(OP_WRITE)] = {
- [C(RESULT_ACCESS)] = TILE_OP_UNSUPP,
- [C(RESULT_MISS)] = 0x45, /* WR_MISS */
- },
- [C(OP_PREFETCH)] = {
- [C(RESULT_ACCESS)] = TILE_OP_UNSUPP,
- [C(RESULT_MISS)] = TILE_OP_UNSUPP,
- },
-},
-[C(L1I)] = {
- [C(OP_READ)] = {
- [C(RESULT_ACCESS)] = TILE_OP_UNSUPP,
- [C(RESULT_MISS)] = TILE_OP_UNSUPP,
- },
- [C(OP_WRITE)] = {
- [C(RESULT_ACCESS)] = TILE_OP_UNSUPP,
- [C(RESULT_MISS)] = TILE_OP_UNSUPP,
- },
- [C(OP_PREFETCH)] = {
- [C(RESULT_ACCESS)] = TILE_OP_UNSUPP,
- [C(RESULT_MISS)] = TILE_OP_UNSUPP,
- },
-},
-[C(LL)] = {
- [C(OP_READ)] = {
- [C(RESULT_ACCESS)] = TILE_OP_UNSUPP,
- [C(RESULT_MISS)] = TILE_OP_UNSUPP,
- },
- [C(OP_WRITE)] = {
- [C(RESULT_ACCESS)] = TILE_OP_UNSUPP,
- [C(RESULT_MISS)] = TILE_OP_UNSUPP,
- },
- [C(OP_PREFETCH)] = {
- [C(RESULT_ACCESS)] = TILE_OP_UNSUPP,
- [C(RESULT_MISS)] = TILE_OP_UNSUPP,
- },
-},
-[C(DTLB)] = {
- [C(OP_READ)] = {
- [C(RESULT_ACCESS)] = 0x40, /* TLB_CNT */
- [C(RESULT_MISS)] = 0x43, /* TLB_EXCEPTION */
- },
- [C(OP_WRITE)] = {
- [C(RESULT_ACCESS)] = 0x40, /* TLB_CNT */
- [C(RESULT_MISS)] = 0x43, /* TLB_EXCEPTION */
- },
- [C(OP_PREFETCH)] = {
- [C(RESULT_ACCESS)] = TILE_OP_UNSUPP,
- [C(RESULT_MISS)] = TILE_OP_UNSUPP,
- },
-},
-[C(ITLB)] = {
- [C(OP_READ)] = {
- [C(RESULT_ACCESS)] = TILE_OP_UNSUPP,
- [C(RESULT_MISS)] = 0xd4, /* ITLB_MISS_INT */
- },
- [C(OP_WRITE)] = {
- [C(RESULT_ACCESS)] = TILE_OP_UNSUPP,
- [C(RESULT_MISS)] = 0xd4, /* ITLB_MISS_INT */
- },
- [C(OP_PREFETCH)] = {
- [C(RESULT_ACCESS)] = TILE_OP_UNSUPP,
- [C(RESULT_MISS)] = TILE_OP_UNSUPP,
- },
-},
-[C(BPU)] = {
- [C(OP_READ)] = {
- [C(RESULT_ACCESS)] = TILE_OP_UNSUPP,
- [C(RESULT_MISS)] = TILE_OP_UNSUPP,
- },
- [C(OP_WRITE)] = {
- [C(RESULT_ACCESS)] = TILE_OP_UNSUPP,
- [C(RESULT_MISS)] = TILE_OP_UNSUPP,
- },
- [C(OP_PREFETCH)] = {
- [C(RESULT_ACCESS)] = TILE_OP_UNSUPP,
- [C(RESULT_MISS)] = TILE_OP_UNSUPP,
- },
-},
-};
-#endif
-
-static atomic_t tile_active_events;
-static DEFINE_MUTEX(perf_intr_reserve_mutex);
-
-static int tile_map_hw_event(u64 config);
-static int tile_map_cache_event(u64 config);
-
-static int tile_pmu_handle_irq(struct pt_regs *regs, int fault);
-
-/*
- * To avoid new_raw_count getting larger then pre_raw_count
- * in tile_perf_event_update(), we limit the value of max_period to 2^31 - 1.
- */
-static const struct tile_pmu tilepmu = {
-#ifndef __tilegx__
- .name = "tilepro",
-#else
- .name = "tilegx",
-#endif
- .max_events = ARRAY_SIZE(tile_hw_event_map),
- .map_hw_event = tile_map_hw_event,
- .hw_events = tile_hw_event_map,
- .map_cache_event = tile_map_cache_event,
- .cache_events = &tile_cache_event_map,
- .cntval_bits = 32,
- .cntval_mask = (1ULL << 32) - 1,
- .max_period = (1ULL << 31) - 1,
- .num_counters = TILE_MAX_COUNTERS,
- .num_base_counters = TILE_BASE_COUNTERS,
-};
-
-static const struct tile_pmu *tile_pmu __read_mostly;
-
-/*
- * Check whether perf event is enabled.
- */
-int tile_perf_enabled(void)
-{
- return atomic_read(&tile_active_events) != 0;
-}
-
-/*
- * Read Performance Counters.
- */
-static inline u64 read_counter(int idx)
-{
- u64 val = 0;
-
- /* __insn_mfspr() only takes an immediate argument */
- switch (idx) {
- case PERF_COUNT_0_IDX:
- val = __insn_mfspr(SPR_PERF_COUNT_0);
- break;
- case PERF_COUNT_1_IDX:
- val = __insn_mfspr(SPR_PERF_COUNT_1);
- break;
- case AUX_PERF_COUNT_0_IDX:
- val = __insn_mfspr(SPR_AUX_PERF_COUNT_0);
- break;
- case AUX_PERF_COUNT_1_IDX:
- val = __insn_mfspr(SPR_AUX_PERF_COUNT_1);
- break;
- default:
- WARN_ON_ONCE(idx > AUX_PERF_COUNT_1_IDX ||
- idx < PERF_COUNT_0_IDX);
- }
-
- return val;
-}
-
-/*
- * Write Performance Counters.
- */
-static inline void write_counter(int idx, u64 value)
-{
- /* __insn_mtspr() only takes an immediate argument */
- switch (idx) {
- case PERF_COUNT_0_IDX:
- __insn_mtspr(SPR_PERF_COUNT_0, value);
- break;
- case PERF_COUNT_1_IDX:
- __insn_mtspr(SPR_PERF_COUNT_1, value);
- break;
- case AUX_PERF_COUNT_0_IDX:
- __insn_mtspr(SPR_AUX_PERF_COUNT_0, value);
- break;
- case AUX_PERF_COUNT_1_IDX:
- __insn_mtspr(SPR_AUX_PERF_COUNT_1, value);
- break;
- default:
- WARN_ON_ONCE(idx > AUX_PERF_COUNT_1_IDX ||
- idx < PERF_COUNT_0_IDX);
- }
-}
-
-/*
- * Enable performance event by setting
- * Performance Counter Control registers.
- */
-static inline void tile_pmu_enable_event(struct perf_event *event)
-{
- struct hw_perf_event *hwc = &event->hw;
- unsigned long cfg, mask;
- int shift, idx = hwc->idx;
-
- /*
- * prevent early activation from tile_pmu_start() in hw_perf_enable
- */
-
- if (WARN_ON_ONCE(idx == -1))
- return;
-
- if (idx < tile_pmu->num_base_counters)
- cfg = __insn_mfspr(SPR_PERF_COUNT_CTL);
- else
- cfg = __insn_mfspr(SPR_AUX_PERF_COUNT_CTL);
-
- switch (idx) {
- case PERF_COUNT_0_IDX:
- case AUX_PERF_COUNT_0_IDX:
- mask = TILE_EVENT_MASK;
- shift = 0;
- break;
- case PERF_COUNT_1_IDX:
- case AUX_PERF_COUNT_1_IDX:
- mask = TILE_EVENT_MASK << 16;
- shift = 16;
- break;
- default:
- WARN_ON_ONCE(idx < PERF_COUNT_0_IDX ||
- idx > AUX_PERF_COUNT_1_IDX);
- return;
- }
-
- /* Clear mask bits to enable the event. */
- cfg &= ~mask;
- cfg |= hwc->config << shift;
-
- if (idx < tile_pmu->num_base_counters)
- __insn_mtspr(SPR_PERF_COUNT_CTL, cfg);
- else
- __insn_mtspr(SPR_AUX_PERF_COUNT_CTL, cfg);
-}
-
-/*
- * Disable performance event by clearing
- * Performance Counter Control registers.
- */
-static inline void tile_pmu_disable_event(struct perf_event *event)
-{
- struct hw_perf_event *hwc = &event->hw;
- unsigned long cfg, mask;
- int idx = hwc->idx;
-
- if (idx == -1)
- return;
-
- if (idx < tile_pmu->num_base_counters)
- cfg = __insn_mfspr(SPR_PERF_COUNT_CTL);
- else
- cfg = __insn_mfspr(SPR_AUX_PERF_COUNT_CTL);
-
- switch (idx) {
- case PERF_COUNT_0_IDX:
- case AUX_PERF_COUNT_0_IDX:
- mask = TILE_PLM_MASK;
- break;
- case PERF_COUNT_1_IDX:
- case AUX_PERF_COUNT_1_IDX:
- mask = TILE_PLM_MASK << 16;
- break;
- default:
- WARN_ON_ONCE(idx < PERF_COUNT_0_IDX ||
- idx > AUX_PERF_COUNT_1_IDX);
- return;
- }
-
- /* Set mask bits to disable the event. */
- cfg |= mask;
-
- if (idx < tile_pmu->num_base_counters)
- __insn_mtspr(SPR_PERF_COUNT_CTL, cfg);
- else
- __insn_mtspr(SPR_AUX_PERF_COUNT_CTL, cfg);
-}
-
-/*
- * Propagate event elapsed time into the generic event.
- * Can only be executed on the CPU where the event is active.
- * Returns the delta events processed.
- */
-static u64 tile_perf_event_update(struct perf_event *event)
-{
- struct hw_perf_event *hwc = &event->hw;
- int shift = 64 - tile_pmu->cntval_bits;
- u64 prev_raw_count, new_raw_count;
- u64 oldval;
- int idx = hwc->idx;
- u64 delta;
-
- /*
- * Careful: an NMI might modify the previous event value.
- *
- * Our tactic to handle this is to first atomically read and
- * exchange a new raw count - then add that new-prev delta
- * count to the generic event atomically:
- */
-again:
- prev_raw_count = local64_read(&hwc->prev_count);
- new_raw_count = read_counter(idx);
-
- oldval = local64_cmpxchg(&hwc->prev_count, prev_raw_count,
- new_raw_count);
- if (oldval != prev_raw_count)
- goto again;
-
- /*
- * Now we have the new raw value and have updated the prev
- * timestamp already. We can now calculate the elapsed delta
- * (event-)time and add that to the generic event.
- *
- * Careful, not all hw sign-extends above the physical width
- * of the count.
- */
- delta = (new_raw_count << shift) - (prev_raw_count << shift);
- delta >>= shift;
-
- local64_add(delta, &event->count);
- local64_sub(delta, &hwc->period_left);
-
- return new_raw_count;
-}
-
-/*
- * Set the next IRQ period, based on the hwc->period_left value.
- * To be called with the event disabled in hw:
- */
-static int tile_event_set_period(struct perf_event *event)
-{
- struct hw_perf_event *hwc = &event->hw;
- int idx = hwc->idx;
- s64 left = local64_read(&hwc->period_left);
- s64 period = hwc->sample_period;
- int ret = 0;
-
- /*
- * If we are way outside a reasonable range then just skip forward:
- */
- if (unlikely(left <= -period)) {
- left = period;
- local64_set(&hwc->period_left, left);
- hwc->last_period = period;
- ret = 1;
- }
-
- if (unlikely(left <= 0)) {
- left += period;
- local64_set(&hwc->period_left, left);
- hwc->last_period = period;
- ret = 1;
- }
- if (left > tile_pmu->max_period)
- left = tile_pmu->max_period;
-
- /*
- * The hw event starts counting from this event offset,
- * mark it to be able to extra future deltas:
- */
- local64_set(&hwc->prev_count, (u64)-left);
-
- write_counter(idx, (u64)(-left) & tile_pmu->cntval_mask);
-
- perf_event_update_userpage(event);
-
- return ret;
-}
-
-/*
- * Stop the event but do not release the PMU counter
- */
-static void tile_pmu_stop(struct perf_event *event, int flags)
-{
- struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
- struct hw_perf_event *hwc = &event->hw;
- int idx = hwc->idx;
-
- if (__test_and_clear_bit(idx, cpuc->active_mask)) {
- tile_pmu_disable_event(event);
- cpuc->events[hwc->idx] = NULL;
- WARN_ON_ONCE(hwc->state & PERF_HES_STOPPED);
- hwc->state |= PERF_HES_STOPPED;
- }
-
- if ((flags & PERF_EF_UPDATE) && !(hwc->state & PERF_HES_UPTODATE)) {
- /*
- * Drain the remaining delta count out of a event
- * that we are disabling:
- */
- tile_perf_event_update(event);
- hwc->state |= PERF_HES_UPTODATE;
- }
-}
-
-/*
- * Start an event (without re-assigning counter)
- */
-static void tile_pmu_start(struct perf_event *event, int flags)
-{
- struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
- int idx = event->hw.idx;
-
- if (WARN_ON_ONCE(!(event->hw.state & PERF_HES_STOPPED)))
- return;
-
- if (WARN_ON_ONCE(idx == -1))
- return;
-
- if (flags & PERF_EF_RELOAD) {
- WARN_ON_ONCE(!(event->hw.state & PERF_HES_UPTODATE));
- tile_event_set_period(event);
- }
-
- event->hw.state = 0;
-
- cpuc->events[idx] = event;
- __set_bit(idx, cpuc->active_mask);
-
- unmask_pmc_interrupts();
-
- tile_pmu_enable_event(event);
-
- perf_event_update_userpage(event);
-}
-
-/*
- * Add a single event to the PMU.
- *
- * The event is added to the group of enabled events
- * but only if it can be scehduled with existing events.
- */
-static int tile_pmu_add(struct perf_event *event, int flags)
-{
- struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
- struct hw_perf_event *hwc;
- unsigned long mask;
- int b, max_cnt;
-
- hwc = &event->hw;
-
- /*
- * We are full.
- */
- if (cpuc->n_events == tile_pmu->num_counters)
- return -ENOSPC;
-
- cpuc->event_list[cpuc->n_events] = event;
- cpuc->n_events++;
-
- hwc->state = PERF_HES_UPTODATE | PERF_HES_STOPPED;
- if (!(flags & PERF_EF_START))
- hwc->state |= PERF_HES_ARCH;
-
- /*
- * Find first empty counter.
- */
- max_cnt = tile_pmu->num_counters;
- mask = ~cpuc->used_mask;
-
- /* Find next free counter. */
- b = find_next_bit(&mask, max_cnt, 0);
-
- /* Should not happen. */
- if (WARN_ON_ONCE(b == max_cnt))
- return -ENOSPC;
-
- /*
- * Assign counter to event.
- */
- event->hw.idx = b;
- __set_bit(b, &cpuc->used_mask);
-
- /*
- * Start if requested.
- */
- if (flags & PERF_EF_START)
- tile_pmu_start(event, PERF_EF_RELOAD);
-
- return 0;
-}
-
-/*
- * Delete a single event from the PMU.
- *
- * The event is deleted from the group of enabled events.
- * If it is the last event, disable PMU interrupt.
- */
-static void tile_pmu_del(struct perf_event *event, int flags)
-{
- struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
- int i;
-
- /*
- * Remove event from list, compact list if necessary.
- */
- for (i = 0; i < cpuc->n_events; i++) {
- if (cpuc->event_list[i] == event) {
- while (++i < cpuc->n_events)
- cpuc->event_list[i-1] = cpuc->event_list[i];
- --cpuc->n_events;
- cpuc->events[event->hw.idx] = NULL;
- __clear_bit(event->hw.idx, &cpuc->used_mask);
- tile_pmu_stop(event, PERF_EF_UPDATE);
- break;
- }
- }
- /*
- * If there are no events left, then mask PMU interrupt.
- */
- if (cpuc->n_events == 0)
- mask_pmc_interrupts();
- perf_event_update_userpage(event);
-}
-
-/*
- * Propagate event elapsed time into the event.
- */
-static inline void tile_pmu_read(struct perf_event *event)
-{
- tile_perf_event_update(event);
-}
-
-/*
- * Map generic events to Tile PMU.
- */
-static int tile_map_hw_event(u64 config)
-{
- if (config >= tile_pmu->max_events)
- return -EINVAL;
- return tile_pmu->hw_events[config];
-}
-
-/*
- * Map generic hardware cache events to Tile PMU.
- */
-static int tile_map_cache_event(u64 config)
-{
- unsigned int cache_type, cache_op, cache_result;
- int code;
-
- if (!tile_pmu->cache_events)
- return -ENOENT;
-
- cache_type = (config >> 0) & 0xff;
- if (cache_type >= PERF_COUNT_HW_CACHE_MAX)
- return -EINVAL;
-
- cache_op = (config >> 8) & 0xff;
- if (cache_op >= PERF_COUNT_HW_CACHE_OP_MAX)
- return -EINVAL;
-
- cache_result = (config >> 16) & 0xff;
- if (cache_result >= PERF_COUNT_HW_CACHE_RESULT_MAX)
- return -EINVAL;
-
- code = (*tile_pmu->cache_events)[cache_type][cache_op][cache_result];
- if (code == TILE_OP_UNSUPP)
- return -EINVAL;
-
- return code;
-}
-
-static void tile_event_destroy(struct perf_event *event)
-{
- if (atomic_dec_return(&tile_active_events) == 0)
- release_pmc_hardware();
-}
-
-static int __tile_event_init(struct perf_event *event)
-{
- struct perf_event_attr *attr = &event->attr;
- struct hw_perf_event *hwc = &event->hw;
- int code;
-
- switch (attr->type) {
- case PERF_TYPE_HARDWARE:
- code = tile_pmu->map_hw_event(attr->config);
- break;
- case PERF_TYPE_HW_CACHE:
- code = tile_pmu->map_cache_event(attr->config);
- break;
- case PERF_TYPE_RAW:
- code = attr->config & TILE_EVENT_MASK;
- break;
- default:
- /* Should not happen. */
- return -EOPNOTSUPP;
- }
-
- if (code < 0)
- return code;
-
- hwc->config = code;
- hwc->idx = -1;
-
- if (attr->exclude_user)
- hwc->config |= TILE_CTL_EXCL_USER;
-
- if (attr->exclude_kernel)
- hwc->config |= TILE_CTL_EXCL_KERNEL;
-
- if (attr->exclude_hv)
- hwc->config |= TILE_CTL_EXCL_HV;
-
- if (!hwc->sample_period) {
- hwc->sample_period = tile_pmu->max_period;
- hwc->last_period = hwc->sample_period;
- local64_set(&hwc->period_left, hwc->sample_period);
- }
- event->destroy = tile_event_destroy;
- return 0;
-}
-
-static int tile_event_init(struct perf_event *event)
-{
- int err = 0;
- perf_irq_t old_irq_handler = NULL;
-
- if (atomic_inc_return(&tile_active_events) == 1)
- old_irq_handler = reserve_pmc_hardware(tile_pmu_handle_irq);
-
- if (old_irq_handler) {
- pr_warn("PMC hardware busy (reserved by oprofile)\n");
-
- atomic_dec(&tile_active_events);
- return -EBUSY;
- }
-
- switch (event->attr.type) {
- case PERF_TYPE_RAW:
- case PERF_TYPE_HARDWARE:
- case PERF_TYPE_HW_CACHE:
- break;
-
- default:
- return -ENOENT;
- }
-
- err = __tile_event_init(event);
- if (err) {
- if (event->destroy)
- event->destroy(event);
- }
- return err;
-}
-
-static struct pmu tilera_pmu = {
- .event_init = tile_event_init,
- .add = tile_pmu_add,
- .del = tile_pmu_del,
-
- .start = tile_pmu_start,
- .stop = tile_pmu_stop,
-
- .read = tile_pmu_read,
-};
-
-/*
- * PMU's IRQ handler, PMU has 2 interrupts, they share the same handler.
- */
-int tile_pmu_handle_irq(struct pt_regs *regs, int fault)
-{
- struct perf_sample_data data;
- struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
- struct perf_event *event;
- struct hw_perf_event *hwc;
- u64 val;
- unsigned long status;
- int bit;
-
- __this_cpu_inc(perf_irqs);
-
- if (!atomic_read(&tile_active_events))
- return 0;
-
- status = pmc_get_overflow();
- pmc_ack_overflow(status);
-
- for_each_set_bit(bit, &status, tile_pmu->num_counters) {
-
- event = cpuc->events[bit];
-
- if (!event)
- continue;
-
- if (!test_bit(bit, cpuc->active_mask))
- continue;
-
- hwc = &event->hw;
-
- val = tile_perf_event_update(event);
- if (val & (1ULL << (tile_pmu->cntval_bits - 1)))
- continue;
-
- perf_sample_data_init(&data, 0, event->hw.last_period);
- if (!tile_event_set_period(event))
- continue;
-
- if (perf_event_overflow(event, &data, regs))
- tile_pmu_stop(event, 0);
- }
-
- return 0;
-}
-
-static bool __init supported_pmu(void)
-{
- tile_pmu = &tilepmu;
- return true;
-}
-
-int __init init_hw_perf_events(void)
-{
- supported_pmu();
- perf_pmu_register(&tilera_pmu, "cpu", PERF_TYPE_RAW);
- return 0;
-}
-arch_initcall(init_hw_perf_events);
-
-/* Callchain handling code. */
-
-/*
- * Tile specific backtracing code for perf_events.
- */
-static inline void perf_callchain(struct perf_callchain_entry_ctx *entry,
- struct pt_regs *regs)
-{
- struct KBacktraceIterator kbt;
- unsigned int i;
-
- /*
- * Get the address just after the "jalr" instruction that
- * jumps to the handler for a syscall. When we find this
- * address in a backtrace, we silently ignore it, which gives
- * us a one-step backtrace connection from the sys_xxx()
- * function in the kernel to the xxx() function in libc.
- * Otherwise, we lose the ability to properly attribute time
- * from the libc calls to the kernel implementations, since
- * oprofile only considers PCs from backtraces a pair at a time.
- */
- unsigned long handle_syscall_pc = handle_syscall_link_address();
-
- KBacktraceIterator_init(&kbt, NULL, regs);
- kbt.profile = 1;
-
- /*
- * The sample for the pc is already recorded. Now we are adding the
- * address of the callsites on the stack. Our iterator starts
- * with the frame of the (already sampled) call site. If our
- * iterator contained a "return address" field, we could have just
- * used it and wouldn't have needed to skip the first
- * frame. That's in effect what the arm and x86 versions do.
- * Instead we peel off the first iteration to get the equivalent
- * behavior.
- */
-
- if (KBacktraceIterator_end(&kbt))
- return;
- KBacktraceIterator_next(&kbt);
-
- /*
- * Set stack depth to 16 for user and kernel space respectively, that
- * is, total 32 stack frames.
- */
- for (i = 0; i < 16; ++i) {
- unsigned long pc;
- if (KBacktraceIterator_end(&kbt))
- break;
- pc = kbt.it.pc;
- if (pc != handle_syscall_pc)
- perf_callchain_store(entry, pc);
- KBacktraceIterator_next(&kbt);
- }
-}
-
-void perf_callchain_user(struct perf_callchain_entry_ctx *entry,
- struct pt_regs *regs)
-{
- perf_callchain(entry, regs);
-}
-
-void perf_callchain_kernel(struct perf_callchain_entry_ctx *entry,
- struct pt_regs *regs)
-{
- perf_callchain(entry, regs);
-}
diff --git a/arch/tile/kernel/pmc.c b/arch/tile/kernel/pmc.c
deleted file mode 100644
index 81cf8743a3f3..000000000000
--- a/arch/tile/kernel/pmc.c
+++ /dev/null
@@ -1,118 +0,0 @@
-/*
- * Copyright 2014 Tilera Corporation. All Rights Reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation, version 2.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
- * NON INFRINGEMENT. See the GNU General Public License for
- * more details.
- */
-
-#include <linux/errno.h>
-#include <linux/spinlock.h>
-#include <linux/module.h>
-#include <linux/atomic.h>
-
-#include <asm/processor.h>
-#include <asm/pmc.h>
-
-perf_irq_t perf_irq = NULL;
-int handle_perf_interrupt(struct pt_regs *regs, int fault)
-{
- int retval;
-
- if (!perf_irq)
- panic("Unexpected PERF_COUNT interrupt %d\n", fault);
-
- retval = perf_irq(regs, fault);
- return retval;
-}
-
-/* Reserve PMC hardware if it is available. */
-perf_irq_t reserve_pmc_hardware(perf_irq_t new_perf_irq)
-{
- return cmpxchg(&perf_irq, NULL, new_perf_irq);
-}
-EXPORT_SYMBOL(reserve_pmc_hardware);
-
-/* Release PMC hardware. */
-void release_pmc_hardware(void)
-{
- perf_irq = NULL;
-}
-EXPORT_SYMBOL(release_pmc_hardware);
-
-
-/*
- * Get current overflow status of each performance counter,
- * and auxiliary performance counter.
- */
-unsigned long
-pmc_get_overflow(void)
-{
- unsigned long status;
-
- /*
- * merge base+aux into a single vector
- */
- status = __insn_mfspr(SPR_PERF_COUNT_STS);
- status |= __insn_mfspr(SPR_AUX_PERF_COUNT_STS) << TILE_BASE_COUNTERS;
- return status;
-}
-
-/*
- * Clear the status bit for the corresponding counter, if written
- * with a one.
- */
-void
-pmc_ack_overflow(unsigned long status)
-{
- /*
- * clear overflow status by writing ones
- */
- __insn_mtspr(SPR_PERF_COUNT_STS, status);
- __insn_mtspr(SPR_AUX_PERF_COUNT_STS, status >> TILE_BASE_COUNTERS);
-}
-
-/*
- * The perf count interrupts are masked and unmasked explicitly,
- * and only here. The normal irq_enable() does not enable them,
- * and irq_disable() does not disable them. That lets these
- * routines drive the perf count interrupts orthogonally.
- *
- * We also mask the perf count interrupts on entry to the perf count
- * interrupt handler in assembly code, and by default unmask them
- * again (with interrupt critical section protection) just before
- * returning from the interrupt. If the perf count handler returns
- * a non-zero error code, then we don't re-enable them before returning.
- *
- * For Pro, we rely on both interrupts being in the same word to update
- * them atomically so we never have one enabled and one disabled.
- */
-
-#if CHIP_HAS_SPLIT_INTR_MASK()
-# if INT_PERF_COUNT < 32 || INT_AUX_PERF_COUNT < 32
-# error Fix assumptions about which word PERF_COUNT interrupts are in
-# endif
-#endif
-
-static inline unsigned long long pmc_mask(void)
-{
- unsigned long long mask = 1ULL << INT_PERF_COUNT;
- mask |= 1ULL << INT_AUX_PERF_COUNT;
- return mask;
-}
-
-void unmask_pmc_interrupts(void)
-{
- interrupt_mask_reset_mask(pmc_mask());
-}
-
-void mask_pmc_interrupts(void)
-{
- interrupt_mask_set_mask(pmc_mask());
-}
diff --git a/arch/tile/kernel/proc.c b/arch/tile/kernel/proc.c
deleted file mode 100644
index 7983e9868df6..000000000000
--- a/arch/tile/kernel/proc.c
+++ /dev/null
@@ -1,160 +0,0 @@
-/*
- * Copyright 2010 Tilera Corporation. All Rights Reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation, version 2.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
- * NON INFRINGEMENT. See the GNU General Public License for
- * more details.
- */
-
-#include <linux/smp.h>
-#include <linux/seq_file.h>
-#include <linux/threads.h>
-#include <linux/cpumask.h>
-#include <linux/timex.h>
-#include <linux/delay.h>
-#include <linux/fs.h>
-#include <linux/proc_fs.h>
-#include <linux/sysctl.h>
-#include <linux/hardirq.h>
-#include <linux/hugetlb.h>
-#include <linux/mman.h>
-#include <asm/unaligned.h>
-#include <asm/pgtable.h>
-#include <asm/processor.h>
-#include <asm/sections.h>
-#include <asm/homecache.h>
-#include <asm/hardwall.h>
-#include <arch/chip.h>
-
-
-/*
- * Support /proc/cpuinfo
- */
-
-#define cpu_to_ptr(n) ((void *)((long)(n)+1))
-#define ptr_to_cpu(p) ((long)(p) - 1)
-
-static int show_cpuinfo(struct seq_file *m, void *v)
-{
- int n = ptr_to_cpu(v);
-
- if (n == 0) {
- seq_printf(m, "cpu count\t: %d\n", num_online_cpus());
- seq_printf(m, "cpu list\t: %*pbl\n",
- cpumask_pr_args(cpu_online_mask));
- seq_printf(m, "model name\t: %s\n", chip_model);
- seq_printf(m, "flags\t\t:\n"); /* nothing for now */
- seq_printf(m, "cpu MHz\t\t: %llu.%06llu\n",
- get_clock_rate() / 1000000,
- (get_clock_rate() % 1000000));
- seq_printf(m, "bogomips\t: %lu.%02lu\n\n",
- loops_per_jiffy/(500000/HZ),
- (loops_per_jiffy/(5000/HZ)) % 100);
- }
-
-#ifdef CONFIG_SMP
- if (!cpu_online(n))
- return 0;
-#endif
-
- seq_printf(m, "processor\t: %d\n", n);
-
- /* Print only num_online_cpus() blank lines total. */
- if (cpumask_next(n, cpu_online_mask) < nr_cpu_ids)
- seq_printf(m, "\n");
-
- return 0;
-}
-
-static void *c_start(struct seq_file *m, loff_t *pos)
-{
- return *pos < nr_cpu_ids ? cpu_to_ptr(*pos) : NULL;
-}
-static void *c_next(struct seq_file *m, void *v, loff_t *pos)
-{
- ++*pos;
- return c_start(m, pos);
-}
-static void c_stop(struct seq_file *m, void *v)
-{
-}
-const struct seq_operations cpuinfo_op = {
- .start = c_start,
- .next = c_next,
- .stop = c_stop,
- .show = show_cpuinfo,
-};
-
-/*
- * Support /proc/tile directory
- */
-
-static int __init proc_tile_init(void)
-{
- struct proc_dir_entry *root = proc_mkdir("tile", NULL);
- if (root == NULL)
- return 0;
-
- proc_tile_hardwall_init(root);
-
- return 0;
-}
-
-arch_initcall(proc_tile_init);
-
-/*
- * Support /proc/sys/tile directory
- */
-
-static struct ctl_table unaligned_subtable[] = {
- {
- .procname = "enabled",
- .data = &unaligned_fixup,
- .maxlen = sizeof(int),
- .mode = 0644,
- .proc_handler = &proc_dointvec
- },
- {
- .procname = "printk",
- .data = &unaligned_printk,
- .maxlen = sizeof(int),
- .mode = 0644,
- .proc_handler = &proc_dointvec
- },
- {
- .procname = "count",
- .data = &unaligned_fixup_count,
- .maxlen = sizeof(int),
- .mode = 0644,
- .proc_handler = &proc_dointvec
- },
- {}
-};
-
-static struct ctl_table unaligned_table[] = {
- {
- .procname = "unaligned_fixup",
- .mode = 0555,
- .child = unaligned_subtable
- },
- {}
-};
-
-static struct ctl_path tile_path[] = {
- { .procname = "tile" },
- { }
-};
-
-static int __init proc_sys_tile_init(void)
-{
- register_sysctl_paths(tile_path, unaligned_table);
- return 0;
-}
-
-arch_initcall(proc_sys_tile_init);
diff --git a/arch/tile/kernel/process.c b/arch/tile/kernel/process.c
deleted file mode 100644
index f0a0e18e4dfb..000000000000
--- a/arch/tile/kernel/process.c
+++ /dev/null
@@ -1,659 +0,0 @@
-/*
- * Copyright 2010 Tilera Corporation. All Rights Reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation, version 2.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
- * NON INFRINGEMENT. See the GNU General Public License for
- * more details.
- */
-
-#include <linux/sched.h>
-#include <linux/sched/debug.h>
-#include <linux/sched/task.h>
-#include <linux/sched/task_stack.h>
-#include <linux/preempt.h>
-#include <linux/module.h>
-#include <linux/fs.h>
-#include <linux/kprobes.h>
-#include <linux/elfcore.h>
-#include <linux/tick.h>
-#include <linux/init.h>
-#include <linux/mm.h>
-#include <linux/compat.h>
-#include <linux/nmi.h>
-#include <linux/syscalls.h>
-#include <linux/kernel.h>
-#include <linux/tracehook.h>
-#include <linux/signal.h>
-#include <linux/delay.h>
-#include <linux/context_tracking.h>
-#include <asm/stack.h>
-#include <asm/switch_to.h>
-#include <asm/homecache.h>
-#include <asm/syscalls.h>
-#include <asm/traps.h>
-#include <asm/setup.h>
-#include <linux/uaccess.h>
-#ifdef CONFIG_HARDWALL
-#include <asm/hardwall.h>
-#endif
-#include <arch/chip.h>
-#include <arch/abi.h>
-#include <arch/sim_def.h>
-
-/*
- * Use the (x86) "idle=poll" option to prefer low latency when leaving the
- * idle loop over low power while in the idle loop, e.g. if we have
- * one thread per core and we want to get threads out of futex waits fast.
- */
-static int __init idle_setup(char *str)
-{
- if (!str)
- return -EINVAL;
-
- if (!strcmp(str, "poll")) {
- pr_info("using polling idle threads\n");
- cpu_idle_poll_ctrl(true);
- return 0;
- } else if (!strcmp(str, "halt")) {
- return 0;
- }
- return -1;
-}
-early_param("idle", idle_setup);
-
-void arch_cpu_idle(void)
-{
- __this_cpu_write(irq_stat.idle_timestamp, jiffies);
- _cpu_idle();
-}
-
-/*
- * Release a thread_info structure
- */
-void arch_release_thread_stack(unsigned long *stack)
-{
- struct thread_info *info = (void *)stack;
- struct single_step_state *step_state = info->step_state;
-
- if (step_state) {
-
- /*
- * FIXME: we don't munmap step_state->buffer
- * because the mm_struct for this process (info->task->mm)
- * has already been zeroed in exit_mm(). Keeping a
- * reference to it here seems like a bad move, so this
- * means we can't munmap() the buffer, and therefore if we
- * ptrace multiple threads in a process, we will slowly
- * leak user memory. (Note that as soon as the last
- * thread in a process dies, we will reclaim all user
- * memory including single-step buffers in the usual way.)
- * We should either assign a kernel VA to this buffer
- * somehow, or we should associate the buffer(s) with the
- * mm itself so we can clean them up that way.
- */
- kfree(step_state);
- }
-}
-
-static void save_arch_state(struct thread_struct *t);
-
-int copy_thread(unsigned long clone_flags, unsigned long sp,
- unsigned long arg, struct task_struct *p)
-{
- struct pt_regs *childregs = task_pt_regs(p);
- unsigned long ksp;
- unsigned long *callee_regs;
-
- /*
- * Set up the stack and stack pointer appropriately for the
- * new child to find itself woken up in __switch_to().
- * The callee-saved registers must be on the stack to be read;
- * the new task will then jump to assembly support to handle
- * calling schedule_tail(), etc., and (for userspace tasks)
- * returning to the context set up in the pt_regs.
- */
- ksp = (unsigned long) childregs;
- ksp -= C_ABI_SAVE_AREA_SIZE; /* interrupt-entry save area */
- ((long *)ksp)[0] = ((long *)ksp)[1] = 0;
- ksp -= CALLEE_SAVED_REGS_COUNT * sizeof(unsigned long);
- callee_regs = (unsigned long *)ksp;
- ksp -= C_ABI_SAVE_AREA_SIZE; /* __switch_to() save area */
- ((long *)ksp)[0] = ((long *)ksp)[1] = 0;
- p->thread.ksp = ksp;
-
- /* Record the pid of the task that created this one. */
- p->thread.creator_pid = current->pid;
-
- if (unlikely(p->flags & PF_KTHREAD)) {
- /* kernel thread */
- memset(childregs, 0, sizeof(struct pt_regs));
- memset(&callee_regs[2], 0,
- (CALLEE_SAVED_REGS_COUNT - 2) * sizeof(unsigned long));
- callee_regs[0] = sp; /* r30 = function */
- callee_regs[1] = arg; /* r31 = arg */
- p->thread.pc = (unsigned long) ret_from_kernel_thread;
- return 0;
- }
-
- /*
- * Start new thread in ret_from_fork so it schedules properly
- * and then return from interrupt like the parent.
- */
- p->thread.pc = (unsigned long) ret_from_fork;
-
- /*
- * Do not clone step state from the parent; each thread
- * must make its own lazily.
- */
- task_thread_info(p)->step_state = NULL;
-
-#ifdef __tilegx__
- /*
- * Do not clone unalign jit fixup from the parent; each thread
- * must allocate its own on demand.
- */
- task_thread_info(p)->unalign_jit_base = NULL;
-#endif
-
- /*
- * Copy the registers onto the kernel stack so the
- * return-from-interrupt code will reload it into registers.
- */
- *childregs = *current_pt_regs();
- childregs->regs[0] = 0; /* return value is zero */
- if (sp)
- childregs->sp = sp; /* override with new user stack pointer */
- memcpy(callee_regs, &childregs->regs[CALLEE_SAVED_FIRST_REG],
- CALLEE_SAVED_REGS_COUNT * sizeof(unsigned long));
-
- /* Save user stack top pointer so we can ID the stack vm area later. */
- p->thread.usp0 = childregs->sp;
-
- /*
- * If CLONE_SETTLS is set, set "tp" in the new task to "r4",
- * which is passed in as arg #5 to sys_clone().
- */
- if (clone_flags & CLONE_SETTLS)
- childregs->tp = childregs->regs[4];
-
-
-#if CHIP_HAS_TILE_DMA()
- /*
- * No DMA in the new thread. We model this on the fact that
- * fork() clears the pending signals, alarms, and aio for the child.
- */
- memset(&p->thread.tile_dma_state, 0, sizeof(struct tile_dma_state));
- memset(&p->thread.dma_async_tlb, 0, sizeof(struct async_tlb));
-#endif
-
- /* New thread has its miscellaneous processor state bits clear. */
- p->thread.proc_status = 0;
-
-#ifdef CONFIG_HARDWALL
- /* New thread does not own any networks. */
- memset(&p->thread.hardwall[0], 0,
- sizeof(struct hardwall_task) * HARDWALL_TYPES);
-#endif
-
-
- /*
- * Start the new thread with the current architecture state
- * (user interrupt masks, etc.).
- */
- save_arch_state(&p->thread);
-
- return 0;
-}
-
-int set_unalign_ctl(struct task_struct *tsk, unsigned int val)
-{
- task_thread_info(tsk)->align_ctl = val;
- return 0;
-}
-
-int get_unalign_ctl(struct task_struct *tsk, unsigned long adr)
-{
- return put_user(task_thread_info(tsk)->align_ctl,
- (unsigned int __user *)adr);
-}
-
-static struct task_struct corrupt_current = { .comm = "<corrupt>" };
-
-/*
- * Return "current" if it looks plausible, or else a pointer to a dummy.
- * This can be helpful if we are just trying to emit a clean panic.
- */
-struct task_struct *validate_current(void)
-{
- struct task_struct *tsk = current;
- if (unlikely((unsigned long)tsk < PAGE_OFFSET ||
- (high_memory && (void *)tsk > high_memory) ||
- ((unsigned long)tsk & (__alignof__(*tsk) - 1)) != 0)) {
- pr_err("Corrupt 'current' %p (sp %#lx)\n", tsk, stack_pointer);
- tsk = &corrupt_current;
- }
- return tsk;
-}
-
-/* Take and return the pointer to the previous task, for schedule_tail(). */
-struct task_struct *sim_notify_fork(struct task_struct *prev)
-{
- struct task_struct *tsk = current;
- __insn_mtspr(SPR_SIM_CONTROL, SIM_CONTROL_OS_FORK_PARENT |
- (tsk->thread.creator_pid << _SIM_CONTROL_OPERATOR_BITS));
- __insn_mtspr(SPR_SIM_CONTROL, SIM_CONTROL_OS_FORK |
- (tsk->pid << _SIM_CONTROL_OPERATOR_BITS));
- return prev;
-}
-
-int dump_task_regs(struct task_struct *tsk, elf_gregset_t *regs)
-{
- struct pt_regs *ptregs = task_pt_regs(tsk);
- elf_core_copy_regs(regs, ptregs);
- return 1;
-}
-
-#if CHIP_HAS_TILE_DMA()
-
-/* Allow user processes to access the DMA SPRs */
-void grant_dma_mpls(void)
-{
-#if CONFIG_KERNEL_PL == 2
- __insn_mtspr(SPR_MPL_DMA_CPL_SET_1, 1);
- __insn_mtspr(SPR_MPL_DMA_NOTIFY_SET_1, 1);
-#else
- __insn_mtspr(SPR_MPL_DMA_CPL_SET_0, 1);
- __insn_mtspr(SPR_MPL_DMA_NOTIFY_SET_0, 1);
-#endif
-}
-
-/* Forbid user processes from accessing the DMA SPRs */
-void restrict_dma_mpls(void)
-{
-#if CONFIG_KERNEL_PL == 2
- __insn_mtspr(SPR_MPL_DMA_CPL_SET_2, 1);
- __insn_mtspr(SPR_MPL_DMA_NOTIFY_SET_2, 1);
-#else
- __insn_mtspr(SPR_MPL_DMA_CPL_SET_1, 1);
- __insn_mtspr(SPR_MPL_DMA_NOTIFY_SET_1, 1);
-#endif
-}
-
-/* Pause the DMA engine, then save off its state registers. */
-static void save_tile_dma_state(struct tile_dma_state *dma)
-{
- unsigned long state = __insn_mfspr(SPR_DMA_USER_STATUS);
- unsigned long post_suspend_state;
-
- /* If we're running, suspend the engine. */
- if ((state & DMA_STATUS_MASK) == SPR_DMA_STATUS__RUNNING_MASK)
- __insn_mtspr(SPR_DMA_CTR, SPR_DMA_CTR__SUSPEND_MASK);
-
- /*
- * Wait for the engine to idle, then save regs. Note that we
- * want to record the "running" bit from before suspension,
- * and the "done" bit from after, so that we can properly
- * distinguish a case where the user suspended the engine from
- * the case where the kernel suspended as part of the context
- * swap.
- */
- do {
- post_suspend_state = __insn_mfspr(SPR_DMA_USER_STATUS);
- } while (post_suspend_state & SPR_DMA_STATUS__BUSY_MASK);
-
- dma->src = __insn_mfspr(SPR_DMA_SRC_ADDR);
- dma->src_chunk = __insn_mfspr(SPR_DMA_SRC_CHUNK_ADDR);
- dma->dest = __insn_mfspr(SPR_DMA_DST_ADDR);
- dma->dest_chunk = __insn_mfspr(SPR_DMA_DST_CHUNK_ADDR);
- dma->strides = __insn_mfspr(SPR_DMA_STRIDE);
- dma->chunk_size = __insn_mfspr(SPR_DMA_CHUNK_SIZE);
- dma->byte = __insn_mfspr(SPR_DMA_BYTE);
- dma->status = (state & SPR_DMA_STATUS__RUNNING_MASK) |
- (post_suspend_state & SPR_DMA_STATUS__DONE_MASK);
-}
-
-/* Restart a DMA that was running before we were context-switched out. */
-static void restore_tile_dma_state(struct thread_struct *t)
-{
- const struct tile_dma_state *dma = &t->tile_dma_state;
-
- /*
- * The only way to restore the done bit is to run a zero
- * length transaction.
- */
- if ((dma->status & SPR_DMA_STATUS__DONE_MASK) &&
- !(__insn_mfspr(SPR_DMA_USER_STATUS) & SPR_DMA_STATUS__DONE_MASK)) {
- __insn_mtspr(SPR_DMA_BYTE, 0);
- __insn_mtspr(SPR_DMA_CTR, SPR_DMA_CTR__REQUEST_MASK);
- while (__insn_mfspr(SPR_DMA_USER_STATUS) &
- SPR_DMA_STATUS__BUSY_MASK)
- ;
- }
-
- __insn_mtspr(SPR_DMA_SRC_ADDR, dma->src);
- __insn_mtspr(SPR_DMA_SRC_CHUNK_ADDR, dma->src_chunk);
- __insn_mtspr(SPR_DMA_DST_ADDR, dma->dest);
- __insn_mtspr(SPR_DMA_DST_CHUNK_ADDR, dma->dest_chunk);
- __insn_mtspr(SPR_DMA_STRIDE, dma->strides);
- __insn_mtspr(SPR_DMA_CHUNK_SIZE, dma->chunk_size);
- __insn_mtspr(SPR_DMA_BYTE, dma->byte);
-
- /*
- * Restart the engine if we were running and not done.
- * Clear a pending async DMA fault that we were waiting on return
- * to user space to execute, since we expect the DMA engine
- * to regenerate those faults for us now. Note that we don't
- * try to clear the TIF_ASYNC_TLB flag, since it's relatively
- * harmless if set, and it covers both DMA and the SN processor.
- */
- if ((dma->status & DMA_STATUS_MASK) == SPR_DMA_STATUS__RUNNING_MASK) {
- t->dma_async_tlb.fault_num = 0;
- __insn_mtspr(SPR_DMA_CTR, SPR_DMA_CTR__REQUEST_MASK);
- }
-}
-
-#endif
-
-static void save_arch_state(struct thread_struct *t)
-{
-#if CHIP_HAS_SPLIT_INTR_MASK()
- t->interrupt_mask = __insn_mfspr(SPR_INTERRUPT_MASK_0_0) |
- ((u64)__insn_mfspr(SPR_INTERRUPT_MASK_0_1) << 32);
-#else
- t->interrupt_mask = __insn_mfspr(SPR_INTERRUPT_MASK_0);
-#endif
- t->ex_context[0] = __insn_mfspr(SPR_EX_CONTEXT_0_0);
- t->ex_context[1] = __insn_mfspr(SPR_EX_CONTEXT_0_1);
- t->system_save[0] = __insn_mfspr(SPR_SYSTEM_SAVE_0_0);
- t->system_save[1] = __insn_mfspr(SPR_SYSTEM_SAVE_0_1);
- t->system_save[2] = __insn_mfspr(SPR_SYSTEM_SAVE_0_2);
- t->system_save[3] = __insn_mfspr(SPR_SYSTEM_SAVE_0_3);
- t->intctrl_0 = __insn_mfspr(SPR_INTCTRL_0_STATUS);
- t->proc_status = __insn_mfspr(SPR_PROC_STATUS);
-#if !CHIP_HAS_FIXED_INTVEC_BASE()
- t->interrupt_vector_base = __insn_mfspr(SPR_INTERRUPT_VECTOR_BASE_0);
-#endif
- t->tile_rtf_hwm = __insn_mfspr(SPR_TILE_RTF_HWM);
-#if CHIP_HAS_DSTREAM_PF()
- t->dstream_pf = __insn_mfspr(SPR_DSTREAM_PF);
-#endif
-}
-
-static void restore_arch_state(const struct thread_struct *t)
-{
-#if CHIP_HAS_SPLIT_INTR_MASK()
- __insn_mtspr(SPR_INTERRUPT_MASK_0_0, (u32) t->interrupt_mask);
- __insn_mtspr(SPR_INTERRUPT_MASK_0_1, t->interrupt_mask >> 32);
-#else
- __insn_mtspr(SPR_INTERRUPT_MASK_0, t->interrupt_mask);
-#endif
- __insn_mtspr(SPR_EX_CONTEXT_0_0, t->ex_context[0]);
- __insn_mtspr(SPR_EX_CONTEXT_0_1, t->ex_context[1]);
- __insn_mtspr(SPR_SYSTEM_SAVE_0_0, t->system_save[0]);
- __insn_mtspr(SPR_SYSTEM_SAVE_0_1, t->system_save[1]);
- __insn_mtspr(SPR_SYSTEM_SAVE_0_2, t->system_save[2]);
- __insn_mtspr(SPR_SYSTEM_SAVE_0_3, t->system_save[3]);
- __insn_mtspr(SPR_INTCTRL_0_STATUS, t->intctrl_0);
- __insn_mtspr(SPR_PROC_STATUS, t->proc_status);
-#if !CHIP_HAS_FIXED_INTVEC_BASE()
- __insn_mtspr(SPR_INTERRUPT_VECTOR_BASE_0, t->interrupt_vector_base);
-#endif
- __insn_mtspr(SPR_TILE_RTF_HWM, t->tile_rtf_hwm);
-#if CHIP_HAS_DSTREAM_PF()
- __insn_mtspr(SPR_DSTREAM_PF, t->dstream_pf);
-#endif
-}
-
-
-void _prepare_arch_switch(struct task_struct *next)
-{
-#if CHIP_HAS_TILE_DMA()
- struct tile_dma_state *dma = &current->thread.tile_dma_state;
- if (dma->enabled)
- save_tile_dma_state(dma);
-#endif
-}
-
-
-struct task_struct *__sched _switch_to(struct task_struct *prev,
- struct task_struct *next)
-{
- /* DMA state is already saved; save off other arch state. */
- save_arch_state(&prev->thread);
-
-#if CHIP_HAS_TILE_DMA()
- /*
- * Restore DMA in new task if desired.
- * Note that it is only safe to restart here since interrupts
- * are disabled, so we can't take any DMATLB miss or access
- * interrupts before we have finished switching stacks.
- */
- if (next->thread.tile_dma_state.enabled) {
- restore_tile_dma_state(&next->thread);
- grant_dma_mpls();
- } else {
- restrict_dma_mpls();
- }
-#endif
-
- /* Restore other arch state. */
- restore_arch_state(&next->thread);
-
-#ifdef CONFIG_HARDWALL
- /* Enable or disable access to the network registers appropriately. */
- hardwall_switch_tasks(prev, next);
-#endif
-
- /* Notify the simulator of task exit. */
- if (unlikely(prev->state == TASK_DEAD))
- __insn_mtspr(SPR_SIM_CONTROL, SIM_CONTROL_OS_EXIT |
- (prev->pid << _SIM_CONTROL_OPERATOR_BITS));
-
- /*
- * Switch kernel SP, PC, and callee-saved registers.
- * In the context of the new task, return the old task pointer
- * (i.e. the task that actually called __switch_to).
- * Pass the value to use for SYSTEM_SAVE_K_0 when we reset our sp.
- */
- return __switch_to(prev, next, next_current_ksp0(next));
-}
-
-/*
- * This routine is called on return from interrupt if any of the
- * TIF_ALLWORK_MASK flags are set in thread_info->flags. It is
- * entered with interrupts disabled so we don't miss an event that
- * modified the thread_info flags. We loop until all the tested flags
- * are clear. Note that the function is called on certain conditions
- * that are not listed in the loop condition here (e.g. SINGLESTEP)
- * which guarantees we will do those things once, and redo them if any
- * of the other work items is re-done, but won't continue looping if
- * all the other work is done.
- */
-void prepare_exit_to_usermode(struct pt_regs *regs, u32 thread_info_flags)
-{
- if (WARN_ON(!user_mode(regs)))
- return;
-
- do {
- local_irq_enable();
-
- if (thread_info_flags & _TIF_NEED_RESCHED)
- schedule();
-
-#if CHIP_HAS_TILE_DMA()
- if (thread_info_flags & _TIF_ASYNC_TLB)
- do_async_page_fault(regs);
-#endif
-
- if (thread_info_flags & _TIF_SIGPENDING)
- do_signal(regs);
-
- if (thread_info_flags & _TIF_NOTIFY_RESUME) {
- clear_thread_flag(TIF_NOTIFY_RESUME);
- tracehook_notify_resume(regs);
- }
-
- local_irq_disable();
- thread_info_flags = READ_ONCE(current_thread_info()->flags);
-
- } while (thread_info_flags & _TIF_WORK_MASK);
-
- if (thread_info_flags & _TIF_SINGLESTEP) {
- single_step_once(regs);
-#ifndef __tilegx__
- /*
- * FIXME: on tilepro, since we enable interrupts in
- * this routine, it's possible that we miss a signal
- * or other asynchronous event.
- */
- local_irq_disable();
-#endif
- }
-
- user_enter();
-}
-
-unsigned long get_wchan(struct task_struct *p)
-{
- struct KBacktraceIterator kbt;
-
- if (!p || p == current || p->state == TASK_RUNNING)
- return 0;
-
- for (KBacktraceIterator_init(&kbt, p, NULL);
- !KBacktraceIterator_end(&kbt);
- KBacktraceIterator_next(&kbt)) {
- if (!in_sched_functions(kbt.it.pc))
- return kbt.it.pc;
- }
-
- return 0;
-}
-
-/* Flush thread state. */
-void flush_thread(void)
-{
- /* Nothing */
-}
-
-/*
- * Free current thread data structures etc..
- */
-void exit_thread(struct task_struct *tsk)
-{
-#ifdef CONFIG_HARDWALL
- /*
- * Remove the task from the list of tasks that are associated
- * with any live hardwalls. (If the task that is exiting held
- * the last reference to a hardwall fd, it would already have
- * been released and deactivated at this point.)
- */
- hardwall_deactivate_all(tsk);
-#endif
-}
-
-void tile_show_regs(struct pt_regs *regs)
-{
- int i;
-#ifdef __tilegx__
- for (i = 0; i < 17; i++)
- pr_err(" r%-2d: "REGFMT" r%-2d: "REGFMT" r%-2d: "REGFMT"\n",
- i, regs->regs[i], i+18, regs->regs[i+18],
- i+36, regs->regs[i+36]);
- pr_err(" r17: "REGFMT" r35: "REGFMT" tp : "REGFMT"\n",
- regs->regs[17], regs->regs[35], regs->tp);
- pr_err(" sp : "REGFMT" lr : "REGFMT"\n", regs->sp, regs->lr);
-#else
- for (i = 0; i < 13; i++)
- pr_err(" r%-2d: "REGFMT" r%-2d: "REGFMT
- " r%-2d: "REGFMT" r%-2d: "REGFMT"\n",
- i, regs->regs[i], i+14, regs->regs[i+14],
- i+27, regs->regs[i+27], i+40, regs->regs[i+40]);
- pr_err(" r13: "REGFMT" tp : "REGFMT" sp : "REGFMT" lr : "REGFMT"\n",
- regs->regs[13], regs->tp, regs->sp, regs->lr);
-#endif
- pr_err(" pc : "REGFMT" ex1: %ld faultnum: %ld flags:%s%s%s%s\n",
- regs->pc, regs->ex1, regs->faultnum,
- is_compat_task() ? " compat" : "",
- (regs->flags & PT_FLAGS_DISABLE_IRQ) ? " noirq" : "",
- !(regs->flags & PT_FLAGS_CALLER_SAVES) ? " nocallersave" : "",
- (regs->flags & PT_FLAGS_RESTORE_REGS) ? " restoreregs" : "");
-}
-
-void show_regs(struct pt_regs *regs)
-{
- struct KBacktraceIterator kbt;
-
- show_regs_print_info(KERN_DEFAULT);
- tile_show_regs(regs);
-
- KBacktraceIterator_init(&kbt, NULL, regs);
- tile_show_stack(&kbt);
-}
-
-#ifdef __tilegx__
-void nmi_raise_cpu_backtrace(struct cpumask *in_mask)
-{
- struct cpumask mask;
- HV_Coord tile;
- unsigned int timeout;
- int cpu;
- HV_NMI_Info info[NR_CPUS];
-
- /* Tentatively dump stack on remote tiles via NMI. */
- timeout = 100;
- cpumask_copy(&mask, in_mask);
- while (!cpumask_empty(&mask) && timeout) {
- for_each_cpu(cpu, &mask) {
- tile.x = cpu_x(cpu);
- tile.y = cpu_y(cpu);
- info[cpu] = hv_send_nmi(tile, TILE_NMI_DUMP_STACK, 0);
- if (info[cpu].result == HV_NMI_RESULT_OK)
- cpumask_clear_cpu(cpu, &mask);
- }
-
- mdelay(10);
- touch_softlockup_watchdog();
- timeout--;
- }
-
- /* Warn about cpus stuck in ICS. */
- if (!cpumask_empty(&mask)) {
- for_each_cpu(cpu, &mask) {
-
- /* Clear the bit as if nmi_cpu_backtrace() ran. */
- cpumask_clear_cpu(cpu, in_mask);
-
- switch (info[cpu].result) {
- case HV_NMI_RESULT_FAIL_ICS:
- pr_warn("Skipping stack dump of cpu %d in ICS at pc %#llx\n",
- cpu, info[cpu].pc);
- break;
- case HV_NMI_RESULT_FAIL_HV:
- pr_warn("Skipping stack dump of cpu %d in hypervisor\n",
- cpu);
- break;
- case HV_ENOSYS:
- WARN_ONCE(1, "Hypervisor too old to allow remote stack dumps.\n");
- break;
- default: /* should not happen */
- pr_warn("Skipping stack dump of cpu %d [%d,%#llx]\n",
- cpu, info[cpu].result, info[cpu].pc);
- break;
- }
- }
- }
-}
-
-void arch_trigger_cpumask_backtrace(const cpumask_t *mask, bool exclude_self)
-{
- nmi_trigger_cpumask_backtrace(mask, exclude_self,
- nmi_raise_cpu_backtrace);
-}
-#endif /* __tilegx_ */
diff --git a/arch/tile/kernel/ptrace.c b/arch/tile/kernel/ptrace.c
deleted file mode 100644
index d516d61751c2..000000000000
--- a/arch/tile/kernel/ptrace.c
+++ /dev/null
@@ -1,316 +0,0 @@
-/*
- * Copyright 2010 Tilera Corporation. All Rights Reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation, version 2.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
- * NON INFRINGEMENT. See the GNU General Public License for
- * more details.
- *
- * Copied from i386: Ross Biro 1/23/92
- */
-
-#include <linux/kernel.h>
-#include <linux/ptrace.h>
-#include <linux/kprobes.h>
-#include <linux/compat.h>
-#include <linux/uaccess.h>
-#include <linux/regset.h>
-#include <linux/elf.h>
-#include <linux/tracehook.h>
-#include <linux/context_tracking.h>
-#include <linux/sched/task_stack.h>
-
-#include <asm/traps.h>
-#include <arch/chip.h>
-
-#define CREATE_TRACE_POINTS
-#include <trace/events/syscalls.h>
-
-void user_enable_single_step(struct task_struct *child)
-{
- set_tsk_thread_flag(child, TIF_SINGLESTEP);
-}
-
-void user_disable_single_step(struct task_struct *child)
-{
- clear_tsk_thread_flag(child, TIF_SINGLESTEP);
-}
-
-/*
- * Called by kernel/ptrace.c when detaching..
- */
-void ptrace_disable(struct task_struct *child)
-{
- clear_tsk_thread_flag(child, TIF_SINGLESTEP);
-
- /*
- * These two are currently unused, but will be set by arch_ptrace()
- * and used in the syscall assembly when we do support them.
- */
- clear_tsk_thread_flag(child, TIF_SYSCALL_TRACE);
-}
-
-/*
- * Get registers from task and ready the result for userspace.
- * Note that we localize the API issues to getregs() and putregs() at
- * some cost in performance, e.g. we need a full pt_regs copy for
- * PEEKUSR, and two copies for POKEUSR. But in general we expect
- * GETREGS/PUTREGS to be the API of choice anyway.
- */
-static char *getregs(struct task_struct *child, struct pt_regs *uregs)
-{
- *uregs = *task_pt_regs(child);
-
- /* Set up flags ABI bits. */
- uregs->flags = 0;
-#ifdef CONFIG_COMPAT
- if (task_thread_info(child)->status & TS_COMPAT)
- uregs->flags |= PT_FLAGS_COMPAT;
-#endif
-
- return (char *)uregs;
-}
-
-/* Put registers back to task. */
-static void putregs(struct task_struct *child, struct pt_regs *uregs)
-{
- struct pt_regs *regs = task_pt_regs(child);
-
- /* Don't allow overwriting the kernel-internal flags word. */
- uregs->flags = regs->flags;
-
- /* Only allow setting the ICS bit in the ex1 word. */
- uregs->ex1 = PL_ICS_EX1(USER_PL, EX1_ICS(uregs->ex1));
-
- *regs = *uregs;
-}
-
-enum tile_regset {
- REGSET_GPR,
-};
-
-static int tile_gpr_get(struct task_struct *target,
- const struct user_regset *regset,
- unsigned int pos, unsigned int count,
- void *kbuf, void __user *ubuf)
-{
- struct pt_regs regs;
-
- getregs(target, &regs);
-
- return user_regset_copyout(&pos, &count, &kbuf, &ubuf, &regs, 0,
- sizeof(regs));
-}
-
-static int tile_gpr_set(struct task_struct *target,
- const struct user_regset *regset,
- unsigned int pos, unsigned int count,
- const void *kbuf, const void __user *ubuf)
-{
- int ret;
- struct pt_regs regs = *task_pt_regs(target);
-
- ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &regs, 0,
- sizeof(regs));
- if (ret)
- return ret;
-
- putregs(target, &regs);
-
- return 0;
-}
-
-static const struct user_regset tile_user_regset[] = {
- [REGSET_GPR] = {
- .core_note_type = NT_PRSTATUS,
- .n = ELF_NGREG,
- .size = sizeof(elf_greg_t),
- .align = sizeof(elf_greg_t),
- .get = tile_gpr_get,
- .set = tile_gpr_set,
- },
-};
-
-static const struct user_regset_view tile_user_regset_view = {
- .name = CHIP_ARCH_NAME,
- .e_machine = ELF_ARCH,
- .ei_osabi = ELF_OSABI,
- .regsets = tile_user_regset,
- .n = ARRAY_SIZE(tile_user_regset),
-};
-
-const struct user_regset_view *task_user_regset_view(struct task_struct *task)
-{
- return &tile_user_regset_view;
-}
-
-long arch_ptrace(struct task_struct *child, long request,
- unsigned long addr, unsigned long data)
-{
- unsigned long __user *datap = (long __user __force *)data;
- unsigned long tmp;
- long ret = -EIO;
- char *childreg;
- struct pt_regs copyregs;
-
- switch (request) {
-
- case PTRACE_PEEKUSR: /* Read register from pt_regs. */
- if (addr >= PTREGS_SIZE)
- break;
- childreg = getregs(child, &copyregs) + addr;
-#ifdef CONFIG_COMPAT
- if (is_compat_task()) {
- if (addr & (sizeof(compat_long_t)-1))
- break;
- ret = put_user(*(compat_long_t *)childreg,
- (compat_long_t __user *)datap);
- } else
-#endif
- {
- if (addr & (sizeof(long)-1))
- break;
- ret = put_user(*(long *)childreg, datap);
- }
- break;
-
- case PTRACE_POKEUSR: /* Write register in pt_regs. */
- if (addr >= PTREGS_SIZE)
- break;
- childreg = getregs(child, &copyregs) + addr;
-#ifdef CONFIG_COMPAT
- if (is_compat_task()) {
- if (addr & (sizeof(compat_long_t)-1))
- break;
- *(compat_long_t *)childreg = data;
- } else
-#endif
- {
- if (addr & (sizeof(long)-1))
- break;
- *(long *)childreg = data;
- }
- putregs(child, &copyregs);
- ret = 0;
- break;
-
- case PTRACE_GETREGS: /* Get all registers from the child. */
- ret = copy_regset_to_user(child, &tile_user_regset_view,
- REGSET_GPR, 0,
- sizeof(struct pt_regs), datap);
- break;
-
- case PTRACE_SETREGS: /* Set all registers in the child. */
- ret = copy_regset_from_user(child, &tile_user_regset_view,
- REGSET_GPR, 0,
- sizeof(struct pt_regs), datap);
- break;
-
- case PTRACE_GETFPREGS: /* Get the child FPU state. */
- case PTRACE_SETFPREGS: /* Set the child FPU state. */
- break;
-
- case PTRACE_SETOPTIONS:
- /* Support TILE-specific ptrace options. */
- BUILD_BUG_ON(PTRACE_O_MASK_TILE & PTRACE_O_MASK);
- tmp = data & PTRACE_O_MASK_TILE;
- data &= ~PTRACE_O_MASK_TILE;
- ret = ptrace_request(child, request, addr, data);
- if (ret == 0) {
- unsigned int flags = child->ptrace;
- flags &= ~(PTRACE_O_MASK_TILE << PT_OPT_FLAG_SHIFT);
- flags |= (tmp << PT_OPT_FLAG_SHIFT);
- child->ptrace = flags;
- }
- break;
-
- default:
-#ifdef CONFIG_COMPAT
- if (task_thread_info(current)->status & TS_COMPAT) {
- ret = compat_ptrace_request(child, request,
- addr, data);
- break;
- }
-#endif
- ret = ptrace_request(child, request, addr, data);
- break;
- }
-
- return ret;
-}
-
-#ifdef CONFIG_COMPAT
-/* Not used; we handle compat issues in arch_ptrace() directly. */
-long compat_arch_ptrace(struct task_struct *child, compat_long_t request,
- compat_ulong_t addr, compat_ulong_t data)
-{
- BUG();
-}
-#endif
-
-int do_syscall_trace_enter(struct pt_regs *regs)
-{
- u32 work = READ_ONCE(current_thread_info()->flags);
-
- if ((work & _TIF_SYSCALL_TRACE) &&
- tracehook_report_syscall_entry(regs)) {
- regs->regs[TREG_SYSCALL_NR] = -1;
- return -1;
- }
-
- if (secure_computing(NULL) == -1)
- return -1;
-
- if (work & _TIF_SYSCALL_TRACEPOINT)
- trace_sys_enter(regs, regs->regs[TREG_SYSCALL_NR]);
-
- return regs->regs[TREG_SYSCALL_NR];
-}
-
-void do_syscall_trace_exit(struct pt_regs *regs)
-{
- long errno;
-
- /*
- * The standard tile calling convention returns the value (or negative
- * errno) in r0, and zero (or positive errno) in r1.
- * It saves a couple of cycles on the hot path to do this work in
- * registers only as we return, rather than updating the in-memory
- * struct ptregs.
- */
- errno = (long) regs->regs[0];
- if (errno < 0 && errno > -4096)
- regs->regs[1] = -errno;
- else
- regs->regs[1] = 0;
-
- if (test_thread_flag(TIF_SYSCALL_TRACE))
- tracehook_report_syscall_exit(regs, 0);
-
- if (test_thread_flag(TIF_SYSCALL_TRACEPOINT))
- trace_sys_exit(regs, regs->regs[0]);
-}
-
-void send_sigtrap(struct task_struct *tsk, struct pt_regs *regs)
-{
- struct siginfo info;
-
- memset(&info, 0, sizeof(info));
- info.si_signo = SIGTRAP;
- info.si_code = TRAP_BRKPT;
- info.si_addr = (void __user *) regs->pc;
-
- /* Send us the fakey SIGTRAP */
- force_sig_info(SIGTRAP, &info, tsk);
-}
-
-/* Handle synthetic interrupt delivered only by the simulator. */
-void __kprobes do_breakpoint(struct pt_regs* regs, int fault_num)
-{
- send_sigtrap(current, regs);
-}
diff --git a/arch/tile/kernel/reboot.c b/arch/tile/kernel/reboot.c
deleted file mode 100644
index 6c5d2c070a12..000000000000
--- a/arch/tile/kernel/reboot.c
+++ /dev/null
@@ -1,51 +0,0 @@
-/*
- * Copyright 2010 Tilera Corporation. All Rights Reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation, version 2.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
- * NON INFRINGEMENT. See the GNU General Public License for
- * more details.
- */
-
-#include <linux/stddef.h>
-#include <linux/reboot.h>
-#include <linux/smp.h>
-#include <linux/pm.h>
-#include <linux/export.h>
-#include <asm/page.h>
-#include <asm/setup.h>
-#include <hv/hypervisor.h>
-
-#ifndef CONFIG_SMP
-#define smp_send_stop()
-#endif
-
-void machine_halt(void)
-{
- arch_local_irq_disable_all();
- smp_send_stop();
- hv_halt();
-}
-
-void machine_power_off(void)
-{
- arch_local_irq_disable_all();
- smp_send_stop();
- hv_power_off();
-}
-
-void machine_restart(char *cmd)
-{
- arch_local_irq_disable_all();
- smp_send_stop();
- hv_restart((HV_VirtAddr) "vmlinux", (HV_VirtAddr) cmd);
-}
-
-/* No interesting distinction to be made here. */
-void (*pm_power_off)(void) = NULL;
-EXPORT_SYMBOL(pm_power_off);
diff --git a/arch/tile/kernel/regs_32.S b/arch/tile/kernel/regs_32.S
deleted file mode 100644
index 542cae17a93a..000000000000
--- a/arch/tile/kernel/regs_32.S
+++ /dev/null
@@ -1,145 +0,0 @@
-/*
- * Copyright 2010 Tilera Corporation. All Rights Reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation, version 2.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
- * NON INFRINGEMENT. See the GNU General Public License for
- * more details.
- */
-
-#include <linux/linkage.h>
-#include <asm/ptrace.h>
-#include <asm/asm-offsets.h>
-#include <arch/spr_def.h>
-#include <asm/processor.h>
-#include <asm/switch_to.h>
-
-/*
- * See <asm/switch_to.h>; called with prev and next task_struct pointers.
- * "prev" is returned in r0 for _switch_to and also for ret_from_fork.
- *
- * We want to save pc/sp in "prev", and get the new pc/sp from "next".
- * We also need to save all the callee-saved registers on the stack.
- *
- * Intel enables/disables access to the hardware cycle counter in
- * seccomp (secure computing) environments if necessary, based on
- * has_secure_computing(). We might want to do this at some point,
- * though it would require virtualizing the other SPRs under WORLD_ACCESS.
- *
- * Since we're saving to the stack, we omit sp from this list.
- * And for parallels with other architectures, we save lr separately,
- * in the thread_struct itself (as the "pc" field).
- *
- * This code also needs to be aligned with process.c copy_thread()
- */
-
-#if CALLEE_SAVED_REGS_COUNT != 24
-# error Mismatch between <asm/switch_to.h> and kernel/entry.S
-#endif
-#define FRAME_SIZE ((2 + CALLEE_SAVED_REGS_COUNT) * 4)
-
-#define SAVE_REG(r) { sw r12, r; addi r12, r12, 4 }
-#define LOAD_REG(r) { lw r, r12; addi r12, r12, 4 }
-#define FOR_EACH_CALLEE_SAVED_REG(f) \
- f(r30); f(r31); \
- f(r32); f(r33); f(r34); f(r35); f(r36); f(r37); f(r38); f(r39); \
- f(r40); f(r41); f(r42); f(r43); f(r44); f(r45); f(r46); f(r47); \
- f(r48); f(r49); f(r50); f(r51); f(r52);
-
-STD_ENTRY_SECTION(__switch_to, .sched.text)
- {
- move r10, sp
- sw sp, lr
- addi sp, sp, -FRAME_SIZE
- }
- {
- addi r11, sp, 4
- addi r12, sp, 8
- }
- {
- sw r11, r10
- addli r4, r1, TASK_STRUCT_THREAD_KSP_OFFSET
- }
- {
- lw r13, r4 /* Load new sp to a temp register early. */
- addli r3, r0, TASK_STRUCT_THREAD_KSP_OFFSET
- }
- FOR_EACH_CALLEE_SAVED_REG(SAVE_REG)
- {
- sw r3, sp
- addli r3, r0, TASK_STRUCT_THREAD_PC_OFFSET
- }
- {
- sw r3, lr
- addli r4, r1, TASK_STRUCT_THREAD_PC_OFFSET
- }
- {
- lw lr, r4
- addi r12, r13, 8
- }
- {
- /* Update sp and ksp0 simultaneously to avoid backtracer warnings. */
- move sp, r13
- mtspr SPR_SYSTEM_SAVE_K_0, r2
- }
- FOR_EACH_CALLEE_SAVED_REG(LOAD_REG)
-.L__switch_to_pc:
- {
- addi sp, sp, FRAME_SIZE
- jrp lr /* r0 is still valid here, so return it */
- }
- STD_ENDPROC(__switch_to)
-
-/* Return a suitable address for the backtracer for suspended threads */
-STD_ENTRY_SECTION(get_switch_to_pc, .sched.text)
- lnk r0
- {
- addli r0, r0, .L__switch_to_pc - .
- jrp lr
- }
- STD_ENDPROC(get_switch_to_pc)
-
-STD_ENTRY(get_pt_regs)
- .irp reg, r0, r1, r2, r3, r4, r5, r6, r7, \
- r8, r9, r10, r11, r12, r13, r14, r15, \
- r16, r17, r18, r19, r20, r21, r22, r23, \
- r24, r25, r26, r27, r28, r29, r30, r31, \
- r32, r33, r34, r35, r36, r37, r38, r39, \
- r40, r41, r42, r43, r44, r45, r46, r47, \
- r48, r49, r50, r51, r52, tp, sp
- {
- sw r0, \reg
- addi r0, r0, 4
- }
- .endr
- {
- sw r0, lr
- addi r0, r0, PTREGS_OFFSET_PC - PTREGS_OFFSET_LR
- }
- lnk r1
- {
- sw r0, r1
- addi r0, r0, PTREGS_OFFSET_EX1 - PTREGS_OFFSET_PC
- }
- mfspr r1, INTERRUPT_CRITICAL_SECTION
- shli r1, r1, SPR_EX_CONTEXT_1_1__ICS_SHIFT
- ori r1, r1, KERNEL_PL
- {
- sw r0, r1
- addi r0, r0, PTREGS_OFFSET_FAULTNUM - PTREGS_OFFSET_EX1
- }
- {
- sw r0, zero /* clear faultnum */
- addi r0, r0, PTREGS_OFFSET_ORIG_R0 - PTREGS_OFFSET_FAULTNUM
- }
- {
- sw r0, zero /* clear orig_r0 */
- addli r0, r0, -PTREGS_OFFSET_ORIG_R0 /* restore r0 to base */
- }
- jrp lr
- STD_ENDPROC(get_pt_regs)
diff --git a/arch/tile/kernel/regs_64.S b/arch/tile/kernel/regs_64.S
deleted file mode 100644
index bbffcc6f340f..000000000000
--- a/arch/tile/kernel/regs_64.S
+++ /dev/null
@@ -1,145 +0,0 @@
-/*
- * Copyright 2011 Tilera Corporation. All Rights Reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation, version 2.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
- * NON INFRINGEMENT. See the GNU General Public License for
- * more details.
- */
-
-#include <linux/linkage.h>
-#include <asm/ptrace.h>
-#include <asm/asm-offsets.h>
-#include <arch/spr_def.h>
-#include <asm/processor.h>
-#include <asm/switch_to.h>
-
-/*
- * See <asm/switch_to.h>; called with prev and next task_struct pointers.
- * "prev" is returned in r0 for _switch_to and also for ret_from_fork.
- *
- * We want to save pc/sp in "prev", and get the new pc/sp from "next".
- * We also need to save all the callee-saved registers on the stack.
- *
- * Intel enables/disables access to the hardware cycle counter in
- * seccomp (secure computing) environments if necessary, based on
- * has_secure_computing(). We might want to do this at some point,
- * though it would require virtualizing the other SPRs under WORLD_ACCESS.
- *
- * Since we're saving to the stack, we omit sp from this list.
- * And for parallels with other architectures, we save lr separately,
- * in the thread_struct itself (as the "pc" field).
- *
- * This code also needs to be aligned with process.c copy_thread()
- */
-
-#if CALLEE_SAVED_REGS_COUNT != 24
-# error Mismatch between <asm/switch_to.h> and kernel/entry.S
-#endif
-#define FRAME_SIZE ((2 + CALLEE_SAVED_REGS_COUNT) * 8)
-
-#define SAVE_REG(r) { st r12, r; addi r12, r12, 8 }
-#define LOAD_REG(r) { ld r, r12; addi r12, r12, 8 }
-#define FOR_EACH_CALLEE_SAVED_REG(f) \
- f(r30); f(r31); \
- f(r32); f(r33); f(r34); f(r35); f(r36); f(r37); f(r38); f(r39); \
- f(r40); f(r41); f(r42); f(r43); f(r44); f(r45); f(r46); f(r47); \
- f(r48); f(r49); f(r50); f(r51); f(r52);
-
-STD_ENTRY_SECTION(__switch_to, .sched.text)
- {
- move r10, sp
- st sp, lr
- }
- {
- addli r11, sp, -FRAME_SIZE + 8
- addli sp, sp, -FRAME_SIZE
- }
- {
- st r11, r10
- addli r4, r1, TASK_STRUCT_THREAD_KSP_OFFSET
- }
- {
- ld r13, r4 /* Load new sp to a temp register early. */
- addi r12, sp, 16
- }
- FOR_EACH_CALLEE_SAVED_REG(SAVE_REG)
- addli r3, r0, TASK_STRUCT_THREAD_KSP_OFFSET
- {
- st r3, sp
- addli r3, r0, TASK_STRUCT_THREAD_PC_OFFSET
- }
- {
- st r3, lr
- addli r4, r1, TASK_STRUCT_THREAD_PC_OFFSET
- }
- {
- ld lr, r4
- addi r12, r13, 16
- }
- {
- /* Update sp and ksp0 simultaneously to avoid backtracer warnings. */
- move sp, r13
- mtspr SPR_SYSTEM_SAVE_K_0, r2
- }
- FOR_EACH_CALLEE_SAVED_REG(LOAD_REG)
-.L__switch_to_pc:
- {
- addli sp, sp, FRAME_SIZE
- jrp lr /* r0 is still valid here, so return it */
- }
- STD_ENDPROC(__switch_to)
-
-/* Return a suitable address for the backtracer for suspended threads */
-STD_ENTRY_SECTION(get_switch_to_pc, .sched.text)
- lnk r0
- {
- addli r0, r0, .L__switch_to_pc - .
- jrp lr
- }
- STD_ENDPROC(get_switch_to_pc)
-
-STD_ENTRY(get_pt_regs)
- .irp reg, r0, r1, r2, r3, r4, r5, r6, r7, \
- r8, r9, r10, r11, r12, r13, r14, r15, \
- r16, r17, r18, r19, r20, r21, r22, r23, \
- r24, r25, r26, r27, r28, r29, r30, r31, \
- r32, r33, r34, r35, r36, r37, r38, r39, \
- r40, r41, r42, r43, r44, r45, r46, r47, \
- r48, r49, r50, r51, r52, tp, sp
- {
- st r0, \reg
- addi r0, r0, 8
- }
- .endr
- {
- st r0, lr
- addi r0, r0, PTREGS_OFFSET_PC - PTREGS_OFFSET_LR
- }
- lnk r1
- {
- st r0, r1
- addi r0, r0, PTREGS_OFFSET_EX1 - PTREGS_OFFSET_PC
- }
- mfspr r1, INTERRUPT_CRITICAL_SECTION
- shli r1, r1, SPR_EX_CONTEXT_1_1__ICS_SHIFT
- ori r1, r1, KERNEL_PL
- {
- st r0, r1
- addi r0, r0, PTREGS_OFFSET_FAULTNUM - PTREGS_OFFSET_EX1
- }
- {
- st r0, zero /* clear faultnum */
- addi r0, r0, PTREGS_OFFSET_ORIG_R0 - PTREGS_OFFSET_FAULTNUM
- }
- {
- st r0, zero /* clear orig_r0 */
- addli r0, r0, -PTREGS_OFFSET_ORIG_R0 /* restore r0 to base */
- }
- jrp lr
- STD_ENDPROC(get_pt_regs)
diff --git a/arch/tile/kernel/relocate_kernel_32.S b/arch/tile/kernel/relocate_kernel_32.S
deleted file mode 100644
index e44fbcf8cbd5..000000000000
--- a/arch/tile/kernel/relocate_kernel_32.S
+++ /dev/null
@@ -1,269 +0,0 @@
-/*
- * Copyright 2010 Tilera Corporation. All Rights Reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation, version 2.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
- * NON INFRINGEMENT. See the GNU General Public License for
- * more details.
- *
- * copy new kernel into place and then call hv_reexec
- *
- */
-
-#include <linux/linkage.h>
-#include <arch/chip.h>
-#include <asm/page.h>
-#include <hv/hypervisor.h>
-
-#undef RELOCATE_NEW_KERNEL_VERBOSE
-
-STD_ENTRY(relocate_new_kernel)
-
- move r30, r0 /* page list */
- move r31, r1 /* address of page we are on */
- move r32, r2 /* start address of new kernel */
-
- shri r1, r1, PAGE_SHIFT
- addi r1, r1, 1
- shli sp, r1, PAGE_SHIFT
- addi sp, sp, -8
- /* we now have a stack (whether we need one or not) */
-
- moveli r40, lo16(hv_console_putc)
- auli r40, r40, ha16(hv_console_putc)
-
-#ifdef RELOCATE_NEW_KERNEL_VERBOSE
- moveli r0, 'r'
- jalr r40
-
- moveli r0, '_'
- jalr r40
-
- moveli r0, 'n'
- jalr r40
-
- moveli r0, '_'
- jalr r40
-
- moveli r0, 'k'
- jalr r40
-
- moveli r0, '\n'
- jalr r40
-#endif
-
- /*
- * Throughout this code r30 is pointer to the element of page
- * list we are working on.
- *
- * Normally we get to the next element of the page list by
- * incrementing r30 by four. The exception is if the element
- * on the page list is an IND_INDIRECTION in which case we use
- * the element with the low bits masked off as the new value
- * of r30.
- *
- * To get this started, we need the value passed to us (which
- * will always be an IND_INDIRECTION) in memory somewhere with
- * r30 pointing at it. To do that, we push the value passed
- * to us on the stack and make r30 point to it.
- */
-
- sw sp, r30
- move r30, sp
- addi sp, sp, -8
-
- /*
- * On TILEPro, we need to flush all tiles' caches, since we may
- * have been doing hash-for-home caching there. Note that we
- * must do this _after_ we're completely done modifying any memory
- * other than our output buffer (which we know is locally cached).
- * We want the caches to be fully clean when we do the reexec,
- * because the hypervisor is going to do this flush again at that
- * point, and we don't want that second flush to overwrite any memory.
- */
- {
- move r0, zero /* cache_pa */
- move r1, zero
- }
- {
- auli r2, zero, ha16(HV_FLUSH_EVICT_L2) /* cache_control */
- movei r3, -1 /* cache_cpumask; -1 means all client tiles */
- }
- {
- move r4, zero /* tlb_va */
- move r5, zero /* tlb_length */
- }
- {
- move r6, zero /* tlb_pgsize */
- move r7, zero /* tlb_cpumask */
- }
- {
- move r8, zero /* asids */
- moveli r20, lo16(hv_flush_remote)
- }
- {
- move r9, zero /* asidcount */
- auli r20, r20, ha16(hv_flush_remote)
- }
-
- jalr r20
-
- /* r33 is destination pointer, default to zero */
-
- moveli r33, 0
-
-.Lloop: lw r10, r30
-
- andi r9, r10, 0xf /* low 4 bits tell us what type it is */
- xor r10, r10, r9 /* r10 is now value with low 4 bits stripped */
-
- seqi r0, r9, 0x1 /* IND_DESTINATION */
- bzt r0, .Ltry2
-
- move r33, r10
-
-#ifdef RELOCATE_NEW_KERNEL_VERBOSE
- moveli r0, 'd'
- jalr r40
-#endif
-
- addi r30, r30, 4
- j .Lloop
-
-.Ltry2:
- seqi r0, r9, 0x2 /* IND_INDIRECTION */
- bzt r0, .Ltry4
-
- move r30, r10
-
-#ifdef RELOCATE_NEW_KERNEL_VERBOSE
- moveli r0, 'i'
- jalr r40
-#endif
-
- j .Lloop
-
-.Ltry4:
- seqi r0, r9, 0x4 /* IND_DONE */
- bzt r0, .Ltry8
-
- mf
-
-#ifdef RELOCATE_NEW_KERNEL_VERBOSE
- moveli r0, 'D'
- jalr r40
- moveli r0, '\n'
- jalr r40
-#endif
-
- move r0, r32
- moveli r1, 0 /* arg to hv_reexec is 64 bits */
-
- moveli r41, lo16(hv_reexec)
- auli r41, r41, ha16(hv_reexec)
-
- jalr r41
-
- /* we should not get here */
-
- moveli r0, '?'
- jalr r40
- moveli r0, '\n'
- jalr r40
-
- j .Lhalt
-
-.Ltry8: seqi r0, r9, 0x8 /* IND_SOURCE */
- bz r0, .Lerr /* unknown type */
-
- /* copy page at r10 to page at r33 */
-
- move r11, r33
-
- moveli r0, lo16(PAGE_SIZE)
- auli r0, r0, ha16(PAGE_SIZE)
- add r33, r33, r0
-
- /* copy word at r10 to word at r11 until r11 equals r33 */
-
- /* We know page size must be multiple of 16, so we can unroll
- * 16 times safely without any edge case checking.
- *
- * Issue a flush of the destination every 16 words to avoid
- * incoherence when starting the new kernel. (Now this is
- * just good paranoia because the hv_reexec call will also
- * take care of this.)
- */
-
-1:
- { lw r0, r10; addi r10, r10, 4 }
- { sw r11, r0; addi r11, r11, 4 }
- { lw r0, r10; addi r10, r10, 4 }
- { sw r11, r0; addi r11, r11, 4 }
- { lw r0, r10; addi r10, r10, 4 }
- { sw r11, r0; addi r11, r11, 4 }
- { lw r0, r10; addi r10, r10, 4 }
- { sw r11, r0; addi r11, r11, 4 }
- { lw r0, r10; addi r10, r10, 4 }
- { sw r11, r0; addi r11, r11, 4 }
- { lw r0, r10; addi r10, r10, 4 }
- { sw r11, r0; addi r11, r11, 4 }
- { lw r0, r10; addi r10, r10, 4 }
- { sw r11, r0; addi r11, r11, 4 }
- { lw r0, r10; addi r10, r10, 4 }
- { sw r11, r0; addi r11, r11, 4 }
- { lw r0, r10; addi r10, r10, 4 }
- { sw r11, r0; addi r11, r11, 4 }
- { lw r0, r10; addi r10, r10, 4 }
- { sw r11, r0; addi r11, r11, 4 }
- { lw r0, r10; addi r10, r10, 4 }
- { sw r11, r0; addi r11, r11, 4 }
- { lw r0, r10; addi r10, r10, 4 }
- { sw r11, r0; addi r11, r11, 4 }
- { lw r0, r10; addi r10, r10, 4 }
- { sw r11, r0; addi r11, r11, 4 }
- { lw r0, r10; addi r10, r10, 4 }
- { sw r11, r0; addi r11, r11, 4 }
- { lw r0, r10; addi r10, r10, 4 }
- { sw r11, r0; addi r11, r11, 4 }
- { lw r0, r10; addi r10, r10, 4 }
- { sw r11, r0 }
- { flush r11 ; addi r11, r11, 4 }
-
- seq r0, r33, r11
- bzt r0, 1b
-
-#ifdef RELOCATE_NEW_KERNEL_VERBOSE
- moveli r0, 's'
- jalr r40
-#endif
-
- addi r30, r30, 4
- j .Lloop
-
-
-.Lerr: moveli r0, 'e'
- jalr r40
- moveli r0, 'r'
- jalr r40
- moveli r0, 'r'
- jalr r40
- moveli r0, '\n'
- jalr r40
-.Lhalt:
- moveli r41, lo16(hv_halt)
- auli r41, r41, ha16(hv_halt)
-
- jalr r41
- STD_ENDPROC(relocate_new_kernel)
-
- .section .rodata,"a"
-
- .globl relocate_new_kernel_size
-relocate_new_kernel_size:
- .long .Lend_relocate_new_kernel - relocate_new_kernel
diff --git a/arch/tile/kernel/relocate_kernel_64.S b/arch/tile/kernel/relocate_kernel_64.S
deleted file mode 100644
index d9d8cf6176e8..000000000000
--- a/arch/tile/kernel/relocate_kernel_64.S
+++ /dev/null
@@ -1,263 +0,0 @@
-/*
- * Copyright 2011 Tilera Corporation. All Rights Reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation, version 2.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
- * NON INFRINGEMENT. See the GNU General Public License for
- * more details.
- *
- * copy new kernel into place and then call hv_reexec
- *
- */
-
-#include <linux/linkage.h>
-#include <arch/chip.h>
-#include <asm/page.h>
-#include <hv/hypervisor.h>
-
-#undef RELOCATE_NEW_KERNEL_VERBOSE
-
-STD_ENTRY(relocate_new_kernel)
-
- move r30, r0 /* page list */
- move r31, r1 /* address of page we are on */
- move r32, r2 /* start address of new kernel */
-
- shrui r1, r1, PAGE_SHIFT
- addi r1, r1, 1
- shli sp, r1, PAGE_SHIFT
- addi sp, sp, -8
- /* we now have a stack (whether we need one or not) */
-
-#ifdef RELOCATE_NEW_KERNEL_VERBOSE
- moveli r40, hw2_last(hv_console_putc)
- shl16insli r40, r40, hw1(hv_console_putc)
- shl16insli r40, r40, hw0(hv_console_putc)
-
- moveli r0, 'r'
- jalr r40
-
- moveli r0, '_'
- jalr r40
-
- moveli r0, 'n'
- jalr r40
-
- moveli r0, '_'
- jalr r40
-
- moveli r0, 'k'
- jalr r40
-
- moveli r0, '\n'
- jalr r40
-#endif
-
- /*
- * Throughout this code r30 is pointer to the element of page
- * list we are working on.
- *
- * Normally we get to the next element of the page list by
- * incrementing r30 by eight. The exception is if the element
- * on the page list is an IND_INDIRECTION in which case we use
- * the element with the low bits masked off as the new value
- * of r30.
- *
- * To get this started, we need the value passed to us (which
- * will always be an IND_INDIRECTION) in memory somewhere with
- * r30 pointing at it. To do that, we push the value passed
- * to us on the stack and make r30 point to it.
- */
-
- st sp, r30
- move r30, sp
- addi sp, sp, -16
-
- /*
- * On TILE-GX, we need to flush all tiles' caches, since we may
- * have been doing hash-for-home caching there. Note that we
- * must do this _after_ we're completely done modifying any memory
- * other than our output buffer (which we know is locally cached).
- * We want the caches to be fully clean when we do the reexec,
- * because the hypervisor is going to do this flush again at that
- * point, and we don't want that second flush to overwrite any memory.
- */
- {
- move r0, zero /* cache_pa */
- moveli r1, hw2_last(HV_FLUSH_EVICT_L2)
- }
- {
- shl16insli r1, r1, hw1(HV_FLUSH_EVICT_L2)
- movei r2, -1 /* cache_cpumask; -1 means all client tiles */
- }
- {
- shl16insli r1, r1, hw0(HV_FLUSH_EVICT_L2) /* cache_control */
- move r3, zero /* tlb_va */
- }
- {
- move r4, zero /* tlb_length */
- move r5, zero /* tlb_pgsize */
- }
- {
- move r6, zero /* tlb_cpumask */
- move r7, zero /* asids */
- }
- {
- moveli r20, hw2_last(hv_flush_remote)
- move r8, zero /* asidcount */
- }
- shl16insli r20, r20, hw1(hv_flush_remote)
- shl16insli r20, r20, hw0(hv_flush_remote)
-
- jalr r20
-
- /* r33 is destination pointer, default to zero */
-
- moveli r33, 0
-
-.Lloop: ld r10, r30
-
- andi r9, r10, 0xf /* low 4 bits tell us what type it is */
- xor r10, r10, r9 /* r10 is now value with low 4 bits stripped */
-
- cmpeqi r0, r9, 0x1 /* IND_DESTINATION */
- beqzt r0, .Ltry2
-
- move r33, r10
-
-#ifdef RELOCATE_NEW_KERNEL_VERBOSE
- moveli r0, 'd'
- jalr r40
-#endif
-
- addi r30, r30, 8
- j .Lloop
-
-.Ltry2:
- cmpeqi r0, r9, 0x2 /* IND_INDIRECTION */
- beqzt r0, .Ltry4
-
- move r30, r10
-
-#ifdef RELOCATE_NEW_KERNEL_VERBOSE
- moveli r0, 'i'
- jalr r40
-#endif
-
- j .Lloop
-
-.Ltry4:
- cmpeqi r0, r9, 0x4 /* IND_DONE */
- beqzt r0, .Ltry8
-
- mf
-
-#ifdef RELOCATE_NEW_KERNEL_VERBOSE
- moveli r0, 'D'
- jalr r40
- moveli r0, '\n'
- jalr r40
-#endif
-
- move r0, r32
-
- moveli r41, hw2_last(hv_reexec)
- shl16insli r41, r41, hw1(hv_reexec)
- shl16insli r41, r41, hw0(hv_reexec)
-
- jalr r41
-
- /* we should not get here */
-
-#ifdef RELOCATE_NEW_KERNEL_VERBOSE
- moveli r0, '?'
- jalr r40
- moveli r0, '\n'
- jalr r40
-#endif
-
- j .Lhalt
-
-.Ltry8: cmpeqi r0, r9, 0x8 /* IND_SOURCE */
- beqz r0, .Lerr /* unknown type */
-
- /* copy page at r10 to page at r33 */
-
- move r11, r33
-
- moveli r0, hw2_last(PAGE_SIZE)
- shl16insli r0, r0, hw1(PAGE_SIZE)
- shl16insli r0, r0, hw0(PAGE_SIZE)
- add r33, r33, r0
-
- /* copy word at r10 to word at r11 until r11 equals r33 */
-
- /* We know page size must be multiple of 8, so we can unroll
- * 8 times safely without any edge case checking.
- *
- * Issue a flush of the destination every 8 words to avoid
- * incoherence when starting the new kernel. (Now this is
- * just good paranoia because the hv_reexec call will also
- * take care of this.)
- */
-
-1:
- { ld r0, r10; addi r10, r10, 8 }
- { st r11, r0; addi r11, r11, 8 }
- { ld r0, r10; addi r10, r10, 8 }
- { st r11, r0; addi r11, r11, 8 }
- { ld r0, r10; addi r10, r10, 8 }
- { st r11, r0; addi r11, r11, 8 }
- { ld r0, r10; addi r10, r10, 8 }
- { st r11, r0; addi r11, r11, 8 }
- { ld r0, r10; addi r10, r10, 8 }
- { st r11, r0; addi r11, r11, 8 }
- { ld r0, r10; addi r10, r10, 8 }
- { st r11, r0; addi r11, r11, 8 }
- { ld r0, r10; addi r10, r10, 8 }
- { st r11, r0; addi r11, r11, 8 }
- { ld r0, r10; addi r10, r10, 8 }
- { st r11, r0 }
- { flush r11 ; addi r11, r11, 8 }
-
- cmpeq r0, r33, r11
- beqzt r0, 1b
-
-#ifdef RELOCATE_NEW_KERNEL_VERBOSE
- moveli r0, 's'
- jalr r40
-#endif
-
- addi r30, r30, 8
- j .Lloop
-
-
-.Lerr:
-#ifdef RELOCATE_NEW_KERNEL_VERBOSE
- moveli r0, 'e'
- jalr r40
- moveli r0, 'r'
- jalr r40
- moveli r0, 'r'
- jalr r40
- moveli r0, '\n'
- jalr r40
-#endif
-.Lhalt:
- moveli r41, hw2_last(hv_halt)
- shl16insli r41, r41, hw1(hv_halt)
- shl16insli r41, r41, hw0(hv_halt)
-
- jalr r41
- STD_ENDPROC(relocate_new_kernel)
-
- .section .rodata,"a"
-
- .globl relocate_new_kernel_size
-relocate_new_kernel_size:
- .long .Lend_relocate_new_kernel - relocate_new_kernel
diff --git a/arch/tile/kernel/setup.c b/arch/tile/kernel/setup.c
deleted file mode 100644
index eb4e198f6f93..000000000000
--- a/arch/tile/kernel/setup.c
+++ /dev/null
@@ -1,1743 +0,0 @@
-/*
- * Copyright 2010 Tilera Corporation. All Rights Reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation, version 2.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
- * NON INFRINGEMENT. See the GNU General Public License for
- * more details.
- */
-
-#include <linux/sched.h>
-#include <linux/kernel.h>
-#include <linux/mmzone.h>
-#include <linux/bootmem.h>
-#include <linux/module.h>
-#include <linux/node.h>
-#include <linux/cpu.h>
-#include <linux/ioport.h>
-#include <linux/irq.h>
-#include <linux/kexec.h>
-#include <linux/pci.h>
-#include <linux/swiotlb.h>
-#include <linux/initrd.h>
-#include <linux/io.h>
-#include <linux/highmem.h>
-#include <linux/smp.h>
-#include <linux/timex.h>
-#include <linux/hugetlb.h>
-#include <linux/start_kernel.h>
-#include <linux/screen_info.h>
-#include <linux/tick.h>
-#include <asm/setup.h>
-#include <asm/sections.h>
-#include <asm/cacheflush.h>
-#include <asm/pgalloc.h>
-#include <asm/mmu_context.h>
-#include <hv/hypervisor.h>
-#include <arch/interrupts.h>
-
-/* <linux/smp.h> doesn't provide this definition. */
-#ifndef CONFIG_SMP
-#define setup_max_cpus 1
-#endif
-
-static inline int ABS(int x) { return x >= 0 ? x : -x; }
-
-/* Chip information */
-char chip_model[64] __ro_after_init;
-
-#ifdef CONFIG_VT
-struct screen_info screen_info;
-#endif
-
-struct pglist_data node_data[MAX_NUMNODES] __read_mostly;
-EXPORT_SYMBOL(node_data);
-
-/* Information on the NUMA nodes that we compute early */
-unsigned long node_start_pfn[MAX_NUMNODES];
-unsigned long node_end_pfn[MAX_NUMNODES];
-unsigned long __initdata node_memmap_pfn[MAX_NUMNODES];
-unsigned long __initdata node_percpu_pfn[MAX_NUMNODES];
-unsigned long __initdata node_free_pfn[MAX_NUMNODES];
-
-static unsigned long __initdata node_percpu[MAX_NUMNODES];
-
-/*
- * per-CPU stack and boot info.
- */
-DEFINE_PER_CPU(unsigned long, boot_sp) =
- (unsigned long)init_stack + THREAD_SIZE - STACK_TOP_DELTA;
-
-#ifdef CONFIG_SMP
-DEFINE_PER_CPU(unsigned long, boot_pc) = (unsigned long)start_kernel;
-#else
-/*
- * The variable must be __initdata since it references __init code.
- * With CONFIG_SMP it is per-cpu data, which is exempt from validation.
- */
-unsigned long __initdata boot_pc = (unsigned long)start_kernel;
-#endif
-
-#ifdef CONFIG_HIGHMEM
-/* Page frame index of end of lowmem on each controller. */
-unsigned long node_lowmem_end_pfn[MAX_NUMNODES];
-
-/* Number of pages that can be mapped into lowmem. */
-static unsigned long __initdata mappable_physpages;
-#endif
-
-/* Data on which physical memory controller corresponds to which NUMA node */
-int node_controller[MAX_NUMNODES] = { [0 ... MAX_NUMNODES-1] = -1 };
-
-#ifdef CONFIG_HIGHMEM
-/* Map information from VAs to PAs */
-unsigned long pbase_map[1 << (32 - HPAGE_SHIFT)]
- __ro_after_init __attribute__((aligned(L2_CACHE_BYTES)));
-EXPORT_SYMBOL(pbase_map);
-
-/* Map information from PAs to VAs */
-void *vbase_map[NR_PA_HIGHBIT_VALUES]
- __ro_after_init __attribute__((aligned(L2_CACHE_BYTES)));
-EXPORT_SYMBOL(vbase_map);
-#endif
-
-/* Node number as a function of the high PA bits */
-int highbits_to_node[NR_PA_HIGHBIT_VALUES] __ro_after_init;
-EXPORT_SYMBOL(highbits_to_node);
-
-static unsigned int __initdata maxmem_pfn = -1U;
-static unsigned int __initdata maxnodemem_pfn[MAX_NUMNODES] = {
- [0 ... MAX_NUMNODES-1] = -1U
-};
-static nodemask_t __initdata isolnodes;
-
-#if defined(CONFIG_PCI) && !defined(__tilegx__)
-enum { DEFAULT_PCI_RESERVE_MB = 64 };
-static unsigned int __initdata pci_reserve_mb = DEFAULT_PCI_RESERVE_MB;
-unsigned long __initdata pci_reserve_start_pfn = -1U;
-unsigned long __initdata pci_reserve_end_pfn = -1U;
-#endif
-
-static int __init setup_maxmem(char *str)
-{
- unsigned long long maxmem;
- if (str == NULL || (maxmem = memparse(str, NULL)) == 0)
- return -EINVAL;
-
- maxmem_pfn = (maxmem >> HPAGE_SHIFT) << (HPAGE_SHIFT - PAGE_SHIFT);
- pr_info("Forcing RAM used to no more than %dMB\n",
- maxmem_pfn >> (20 - PAGE_SHIFT));
- return 0;
-}
-early_param("maxmem", setup_maxmem);
-
-static int __init setup_maxnodemem(char *str)
-{
- char *endp;
- unsigned long long maxnodemem;
- unsigned long node;
-
- node = str ? simple_strtoul(str, &endp, 0) : INT_MAX;
- if (node >= MAX_NUMNODES || *endp != ':')
- return -EINVAL;
-
- maxnodemem = memparse(endp+1, NULL);
- maxnodemem_pfn[node] = (maxnodemem >> HPAGE_SHIFT) <<
- (HPAGE_SHIFT - PAGE_SHIFT);
- pr_info("Forcing RAM used on node %ld to no more than %dMB\n",
- node, maxnodemem_pfn[node] >> (20 - PAGE_SHIFT));
- return 0;
-}
-early_param("maxnodemem", setup_maxnodemem);
-
-struct memmap_entry {
- u64 addr; /* start of memory segment */
- u64 size; /* size of memory segment */
-};
-static struct memmap_entry memmap_map[64];
-static int memmap_nr;
-
-static void add_memmap_region(u64 addr, u64 size)
-{
- if (memmap_nr >= ARRAY_SIZE(memmap_map)) {
- pr_err("Ooops! Too many entries in the memory map!\n");
- return;
- }
- memmap_map[memmap_nr].addr = addr;
- memmap_map[memmap_nr].size = size;
- memmap_nr++;
-}
-
-static int __init setup_memmap(char *p)
-{
- char *oldp;
- u64 start_at, mem_size;
-
- if (!p)
- return -EINVAL;
-
- if (!strncmp(p, "exactmap", 8)) {
- pr_err("\"memmap=exactmap\" not valid on tile\n");
- return 0;
- }
-
- oldp = p;
- mem_size = memparse(p, &p);
- if (p == oldp)
- return -EINVAL;
-
- if (*p == '@') {
- pr_err("\"memmap=nn@ss\" (force RAM) invalid on tile\n");
- } else if (*p == '#') {
- pr_err("\"memmap=nn#ss\" (force ACPI data) invalid on tile\n");
- } else if (*p == '$') {
- start_at = memparse(p+1, &p);
- add_memmap_region(start_at, mem_size);
- } else {
- if (mem_size == 0)
- return -EINVAL;
- maxmem_pfn = (mem_size >> HPAGE_SHIFT) <<
- (HPAGE_SHIFT - PAGE_SHIFT);
- }
- return *p == '\0' ? 0 : -EINVAL;
-}
-early_param("memmap", setup_memmap);
-
-static int __init setup_mem(char *str)
-{
- return setup_maxmem(str);
-}
-early_param("mem", setup_mem); /* compatibility with x86 */
-
-static int __init setup_isolnodes(char *str)
-{
- if (str == NULL || nodelist_parse(str, isolnodes) != 0)
- return -EINVAL;
-
- pr_info("Set isolnodes value to '%*pbl'\n",
- nodemask_pr_args(&isolnodes));
- return 0;
-}
-early_param("isolnodes", setup_isolnodes);
-
-#if defined(CONFIG_PCI) && !defined(__tilegx__)
-static int __init setup_pci_reserve(char* str)
-{
- if (str == NULL || kstrtouint(str, 0, &pci_reserve_mb) != 0 ||
- pci_reserve_mb > 3 * 1024)
- return -EINVAL;
-
- pr_info("Reserving %dMB for PCIE root complex mappings\n",
- pci_reserve_mb);
- return 0;
-}
-early_param("pci_reserve", setup_pci_reserve);
-#endif
-
-#ifndef __tilegx__
-/*
- * vmalloc=size forces the vmalloc area to be exactly 'size' bytes.
- * This can be used to increase (or decrease) the vmalloc area.
- */
-static int __init parse_vmalloc(char *arg)
-{
- if (!arg)
- return -EINVAL;
-
- VMALLOC_RESERVE = (memparse(arg, &arg) + PGDIR_SIZE - 1) & PGDIR_MASK;
-
- /* See validate_va() for more on this test. */
- if ((long)_VMALLOC_START >= 0)
- early_panic("\"vmalloc=%#lx\" value too large: maximum %#lx\n",
- VMALLOC_RESERVE, _VMALLOC_END - 0x80000000UL);
-
- return 0;
-}
-early_param("vmalloc", parse_vmalloc);
-#endif
-
-#ifdef CONFIG_HIGHMEM
-/*
- * Determine for each controller where its lowmem is mapped and how much of
- * it is mapped there. On controller zero, the first few megabytes are
- * already mapped in as code at MEM_SV_START, so in principle we could
- * start our data mappings higher up, but for now we don't bother, to avoid
- * additional confusion.
- *
- * One question is whether, on systems with more than 768 Mb and
- * controllers of different sizes, to map in a proportionate amount of
- * each one, or to try to map the same amount from each controller.
- * (E.g. if we have three controllers with 256MB, 1GB, and 256MB
- * respectively, do we map 256MB from each, or do we map 128 MB, 512
- * MB, and 128 MB respectively?) For now we use a proportionate
- * solution like the latter.
- *
- * The VA/PA mapping demands that we align our decisions at 16 MB
- * boundaries so that we can rapidly convert VA to PA.
- */
-static void *__init setup_pa_va_mapping(void)
-{
- unsigned long curr_pages = 0;
- unsigned long vaddr = PAGE_OFFSET;
- nodemask_t highonlynodes = isolnodes;
- int i, j;
-
- memset(pbase_map, -1, sizeof(pbase_map));
- memset(vbase_map, -1, sizeof(vbase_map));
-
- /* Node zero cannot be isolated for LOWMEM purposes. */
- node_clear(0, highonlynodes);
-
- /* Count up the number of pages on non-highonlynodes controllers. */
- mappable_physpages = 0;
- for_each_online_node(i) {
- if (!node_isset(i, highonlynodes))
- mappable_physpages +=
- node_end_pfn[i] - node_start_pfn[i];
- }
-
- for_each_online_node(i) {
- unsigned long start = node_start_pfn[i];
- unsigned long end = node_end_pfn[i];
- unsigned long size = end - start;
- unsigned long vaddr_end;
-
- if (node_isset(i, highonlynodes)) {
- /* Mark this controller as having no lowmem. */
- node_lowmem_end_pfn[i] = start;
- continue;
- }
-
- curr_pages += size;
- if (mappable_physpages > MAXMEM_PFN) {
- vaddr_end = PAGE_OFFSET +
- (((u64)curr_pages * MAXMEM_PFN /
- mappable_physpages)
- << PAGE_SHIFT);
- } else {
- vaddr_end = PAGE_OFFSET + (curr_pages << PAGE_SHIFT);
- }
- for (j = 0; vaddr < vaddr_end; vaddr += HPAGE_SIZE, ++j) {
- unsigned long this_pfn =
- start + (j << HUGETLB_PAGE_ORDER);
- pbase_map[vaddr >> HPAGE_SHIFT] = this_pfn;
- if (vbase_map[__pfn_to_highbits(this_pfn)] ==
- (void *)-1)
- vbase_map[__pfn_to_highbits(this_pfn)] =
- (void *)(vaddr & HPAGE_MASK);
- }
- node_lowmem_end_pfn[i] = start + (j << HUGETLB_PAGE_ORDER);
- BUG_ON(node_lowmem_end_pfn[i] > end);
- }
-
- /* Return highest address of any mapped memory. */
- return (void *)vaddr;
-}
-#endif /* CONFIG_HIGHMEM */
-
-/*
- * Register our most important memory mappings with the debug stub.
- *
- * This is up to 4 mappings for lowmem, one mapping per memory
- * controller, plus one for our text segment.
- */
-static void store_permanent_mappings(void)
-{
- int i;
-
- for_each_online_node(i) {
- HV_PhysAddr pa = ((HV_PhysAddr)node_start_pfn[i]) << PAGE_SHIFT;
-#ifdef CONFIG_HIGHMEM
- HV_PhysAddr high_mapped_pa = node_lowmem_end_pfn[i];
-#else
- HV_PhysAddr high_mapped_pa = node_end_pfn[i];
-#endif
-
- unsigned long pages = high_mapped_pa - node_start_pfn[i];
- HV_VirtAddr addr = (HV_VirtAddr) __va(pa);
- hv_store_mapping(addr, pages << PAGE_SHIFT, pa);
- }
-
- hv_store_mapping((HV_VirtAddr)_text,
- (uint32_t)(_einittext - _text), 0);
-}
-
-/*
- * Use hv_inquire_physical() to populate node_{start,end}_pfn[]
- * and node_online_map, doing suitable sanity-checking.
- * Also set min_low_pfn, max_low_pfn, and max_pfn.
- */
-static void __init setup_memory(void)
-{
- int i, j;
- int highbits_seen[NR_PA_HIGHBIT_VALUES] = { 0 };
-#ifdef CONFIG_HIGHMEM
- long highmem_pages;
-#endif
-#ifndef __tilegx__
- int cap;
-#endif
-#if defined(CONFIG_HIGHMEM) || defined(__tilegx__)
- long lowmem_pages;
-#endif
- unsigned long physpages = 0;
-
- /* We are using a char to hold the cpu_2_node[] mapping */
- BUILD_BUG_ON(MAX_NUMNODES > 127);
-
- /* Discover the ranges of memory available to us */
- for (i = 0; ; ++i) {
- unsigned long start, size, end, highbits;
- HV_PhysAddrRange range = hv_inquire_physical(i);
- if (range.size == 0)
- break;
-#ifdef CONFIG_FLATMEM
- if (i > 0) {
- pr_err("Can't use discontiguous PAs: %#llx..%#llx\n",
- range.size, range.start + range.size);
- continue;
- }
-#endif
-#ifndef __tilegx__
- if ((unsigned long)range.start) {
- pr_err("Range not at 4GB multiple: %#llx..%#llx\n",
- range.start, range.start + range.size);
- continue;
- }
-#endif
- if ((range.start & (HPAGE_SIZE-1)) != 0 ||
- (range.size & (HPAGE_SIZE-1)) != 0) {
- unsigned long long start_pa = range.start;
- unsigned long long orig_size = range.size;
- range.start = (start_pa + HPAGE_SIZE - 1) & HPAGE_MASK;
- range.size -= (range.start - start_pa);
- range.size &= HPAGE_MASK;
- pr_err("Range not hugepage-aligned: %#llx..%#llx: now %#llx-%#llx\n",
- start_pa, start_pa + orig_size,
- range.start, range.start + range.size);
- }
- highbits = __pa_to_highbits(range.start);
- if (highbits >= NR_PA_HIGHBIT_VALUES) {
- pr_err("PA high bits too high: %#llx..%#llx\n",
- range.start, range.start + range.size);
- continue;
- }
- if (highbits_seen[highbits]) {
- pr_err("Range overlaps in high bits: %#llx..%#llx\n",
- range.start, range.start + range.size);
- continue;
- }
- highbits_seen[highbits] = 1;
- if (PFN_DOWN(range.size) > maxnodemem_pfn[i]) {
- int max_size = maxnodemem_pfn[i];
- if (max_size > 0) {
- pr_err("Maxnodemem reduced node %d to %d pages\n",
- i, max_size);
- range.size = PFN_PHYS(max_size);
- } else {
- pr_err("Maxnodemem disabled node %d\n", i);
- continue;
- }
- }
- if (physpages + PFN_DOWN(range.size) > maxmem_pfn) {
- int max_size = maxmem_pfn - physpages;
- if (max_size > 0) {
- pr_err("Maxmem reduced node %d to %d pages\n",
- i, max_size);
- range.size = PFN_PHYS(max_size);
- } else {
- pr_err("Maxmem disabled node %d\n", i);
- continue;
- }
- }
- if (i >= MAX_NUMNODES) {
- pr_err("Too many PA nodes (#%d): %#llx...%#llx\n",
- i, range.size, range.size + range.start);
- continue;
- }
-
- start = range.start >> PAGE_SHIFT;
- size = range.size >> PAGE_SHIFT;
- end = start + size;
-
-#ifndef __tilegx__
- if (((HV_PhysAddr)end << PAGE_SHIFT) !=
- (range.start + range.size)) {
- pr_err("PAs too high to represent: %#llx..%#llx\n",
- range.start, range.start + range.size);
- continue;
- }
-#endif
-#if defined(CONFIG_PCI) && !defined(__tilegx__)
- /*
- * Blocks that overlap the pci reserved region must
- * have enough space to hold the maximum percpu data
- * region at the top of the range. If there isn't
- * enough space above the reserved region, just
- * truncate the node.
- */
- if (start <= pci_reserve_start_pfn &&
- end > pci_reserve_start_pfn) {
- unsigned int per_cpu_size =
- __per_cpu_end - __per_cpu_start;
- unsigned int percpu_pages =
- NR_CPUS * (PFN_UP(per_cpu_size) >> PAGE_SHIFT);
- if (end < pci_reserve_end_pfn + percpu_pages) {
- end = pci_reserve_start_pfn;
- pr_err("PCI mapping region reduced node %d to %ld pages\n",
- i, end - start);
- }
- }
-#endif
-
- for (j = __pfn_to_highbits(start);
- j <= __pfn_to_highbits(end - 1); j++)
- highbits_to_node[j] = i;
-
- node_start_pfn[i] = start;
- node_end_pfn[i] = end;
- node_controller[i] = range.controller;
- physpages += size;
- max_pfn = end;
-
- /* Mark node as online */
- node_set(i, node_online_map);
- node_set(i, node_possible_map);
- }
-
-#ifndef __tilegx__
- /*
- * For 4KB pages, mem_map "struct page" data is 1% of the size
- * of the physical memory, so can be quite big (640 MB for
- * four 16G zones). These structures must be mapped in
- * lowmem, and since we currently cap out at about 768 MB,
- * it's impractical to try to use this much address space.
- * For now, arbitrarily cap the amount of physical memory
- * we're willing to use at 8 million pages (32GB of 4KB pages).
- */
- cap = 8 * 1024 * 1024; /* 8 million pages */
- if (physpages > cap) {
- int num_nodes = num_online_nodes();
- int cap_each = cap / num_nodes;
- unsigned long dropped_pages = 0;
- for (i = 0; i < num_nodes; ++i) {
- int size = node_end_pfn[i] - node_start_pfn[i];
- if (size > cap_each) {
- dropped_pages += (size - cap_each);
- node_end_pfn[i] = node_start_pfn[i] + cap_each;
- }
- }
- physpages -= dropped_pages;
- pr_warn("Only using %ldMB memory - ignoring %ldMB\n",
- physpages >> (20 - PAGE_SHIFT),
- dropped_pages >> (20 - PAGE_SHIFT));
- pr_warn("Consider using a larger page size\n");
- }
-#endif
-
- /* Heap starts just above the last loaded address. */
- min_low_pfn = PFN_UP((unsigned long)_end - PAGE_OFFSET);
-
-#ifdef CONFIG_HIGHMEM
- /* Find where we map lowmem from each controller. */
- high_memory = setup_pa_va_mapping();
-
- /* Set max_low_pfn based on what node 0 can directly address. */
- max_low_pfn = node_lowmem_end_pfn[0];
-
- lowmem_pages = (mappable_physpages > MAXMEM_PFN) ?
- MAXMEM_PFN : mappable_physpages;
- highmem_pages = (long) (physpages - lowmem_pages);
-
- pr_notice("%ldMB HIGHMEM available\n",
- pages_to_mb(highmem_pages > 0 ? highmem_pages : 0));
- pr_notice("%ldMB LOWMEM available\n", pages_to_mb(lowmem_pages));
-#else
- /* Set max_low_pfn based on what node 0 can directly address. */
- max_low_pfn = node_end_pfn[0];
-
-#ifndef __tilegx__
- if (node_end_pfn[0] > MAXMEM_PFN) {
- pr_warn("Only using %ldMB LOWMEM\n", MAXMEM >> 20);
- pr_warn("Use a HIGHMEM enabled kernel\n");
- max_low_pfn = MAXMEM_PFN;
- max_pfn = MAXMEM_PFN;
- node_end_pfn[0] = MAXMEM_PFN;
- } else {
- pr_notice("%ldMB memory available\n",
- pages_to_mb(node_end_pfn[0]));
- }
- for (i = 1; i < MAX_NUMNODES; ++i) {
- node_start_pfn[i] = 0;
- node_end_pfn[i] = 0;
- }
- high_memory = __va(node_end_pfn[0]);
-#else
- lowmem_pages = 0;
- for (i = 0; i < MAX_NUMNODES; ++i) {
- int pages = node_end_pfn[i] - node_start_pfn[i];
- lowmem_pages += pages;
- if (pages)
- high_memory = pfn_to_kaddr(node_end_pfn[i]);
- }
- pr_notice("%ldMB memory available\n", pages_to_mb(lowmem_pages));
-#endif
-#endif
-}
-
-/*
- * On 32-bit machines, we only put bootmem on the low controller,
- * since PAs > 4GB can't be used in bootmem. In principle one could
- * imagine, e.g., multiple 1 GB controllers all of which could support
- * bootmem, but in practice using controllers this small isn't a
- * particularly interesting scenario, so we just keep it simple and
- * use only the first controller for bootmem on 32-bit machines.
- */
-static inline int node_has_bootmem(int nid)
-{
-#ifdef CONFIG_64BIT
- return 1;
-#else
- return nid == 0;
-#endif
-}
-
-static inline unsigned long alloc_bootmem_pfn(int nid,
- unsigned long size,
- unsigned long goal)
-{
- void *kva = __alloc_bootmem_node(NODE_DATA(nid), size,
- PAGE_SIZE, goal);
- unsigned long pfn = kaddr_to_pfn(kva);
- BUG_ON(goal && PFN_PHYS(pfn) != goal);
- return pfn;
-}
-
-static void __init setup_bootmem_allocator_node(int i)
-{
- unsigned long start, end, mapsize, mapstart;
-
- if (node_has_bootmem(i)) {
- NODE_DATA(i)->bdata = &bootmem_node_data[i];
- } else {
- /* Share controller zero's bdata for now. */
- NODE_DATA(i)->bdata = &bootmem_node_data[0];
- return;
- }
-
- /* Skip up to after the bss in node 0. */
- start = (i == 0) ? min_low_pfn : node_start_pfn[i];
-
- /* Only lowmem, if we're a HIGHMEM build. */
-#ifdef CONFIG_HIGHMEM
- end = node_lowmem_end_pfn[i];
-#else
- end = node_end_pfn[i];
-#endif
-
- /* No memory here. */
- if (end == start)
- return;
-
- /* Figure out where the bootmem bitmap is located. */
- mapsize = bootmem_bootmap_pages(end - start);
- if (i == 0) {
- /* Use some space right before the heap on node 0. */
- mapstart = start;
- start += mapsize;
- } else {
- /* Allocate bitmap on node 0 to avoid page table issues. */
- mapstart = alloc_bootmem_pfn(0, PFN_PHYS(mapsize), 0);
- }
-
- /* Initialize a node. */
- init_bootmem_node(NODE_DATA(i), mapstart, start, end);
-
- /* Free all the space back into the allocator. */
- free_bootmem(PFN_PHYS(start), PFN_PHYS(end - start));
-
-#if defined(CONFIG_PCI) && !defined(__tilegx__)
- /*
- * Throw away any memory aliased by the PCI region.
- */
- if (pci_reserve_start_pfn < end && pci_reserve_end_pfn > start) {
- start = max(pci_reserve_start_pfn, start);
- end = min(pci_reserve_end_pfn, end);
- reserve_bootmem(PFN_PHYS(start), PFN_PHYS(end - start),
- BOOTMEM_EXCLUSIVE);
- }
-#endif
-}
-
-static void __init setup_bootmem_allocator(void)
-{
- int i;
- for (i = 0; i < MAX_NUMNODES; ++i)
- setup_bootmem_allocator_node(i);
-
- /* Reserve any memory excluded by "memmap" arguments. */
- for (i = 0; i < memmap_nr; ++i) {
- struct memmap_entry *m = &memmap_map[i];
- reserve_bootmem(m->addr, m->size, BOOTMEM_DEFAULT);
- }
-
-#ifdef CONFIG_BLK_DEV_INITRD
- if (initrd_start) {
- /* Make sure the initrd memory region is not modified. */
- if (reserve_bootmem(initrd_start, initrd_end - initrd_start,
- BOOTMEM_EXCLUSIVE)) {
- pr_crit("The initrd memory region has been polluted. Disabling it.\n");
- initrd_start = 0;
- initrd_end = 0;
- } else {
- /*
- * Translate initrd_start & initrd_end from PA to VA for
- * future access.
- */
- initrd_start += PAGE_OFFSET;
- initrd_end += PAGE_OFFSET;
- }
- }
-#endif
-
-#ifdef CONFIG_KEXEC
- if (crashk_res.start != crashk_res.end)
- reserve_bootmem(crashk_res.start, resource_size(&crashk_res),
- BOOTMEM_DEFAULT);
-#endif
-}
-
-void *__init alloc_remap(int nid, unsigned long size)
-{
- int pages = node_end_pfn[nid] - node_start_pfn[nid];
- void *map = pfn_to_kaddr(node_memmap_pfn[nid]);
- BUG_ON(size != pages * sizeof(struct page));
- memset(map, 0, size);
- return map;
-}
-
-static int __init percpu_size(void)
-{
- int size = __per_cpu_end - __per_cpu_start;
- size += PERCPU_MODULE_RESERVE;
- size += PERCPU_DYNAMIC_EARLY_SIZE;
- if (size < PCPU_MIN_UNIT_SIZE)
- size = PCPU_MIN_UNIT_SIZE;
- size = roundup(size, PAGE_SIZE);
-
- /* In several places we assume the per-cpu data fits on a huge page. */
- BUG_ON(kdata_huge && size > HPAGE_SIZE);
- return size;
-}
-
-static void __init zone_sizes_init(void)
-{
- unsigned long zones_size[MAX_NR_ZONES] = { 0 };
- int size = percpu_size();
- int num_cpus = smp_height * smp_width;
- const unsigned long dma_end = (1UL << (32 - PAGE_SHIFT));
-
- int i;
-
- for (i = 0; i < num_cpus; ++i)
- node_percpu[cpu_to_node(i)] += size;
-
- for_each_online_node(i) {
- unsigned long start = node_start_pfn[i];
- unsigned long end = node_end_pfn[i];
-#ifdef CONFIG_HIGHMEM
- unsigned long lowmem_end = node_lowmem_end_pfn[i];
-#else
- unsigned long lowmem_end = end;
-#endif
- int memmap_size = (end - start) * sizeof(struct page);
- node_free_pfn[i] = start;
-
- /*
- * Set aside pages for per-cpu data and the mem_map array.
- *
- * Since the per-cpu data requires special homecaching,
- * if we are in kdata_huge mode, we put it at the end of
- * the lowmem region. If we're not in kdata_huge mode,
- * we take the per-cpu pages from the bottom of the
- * controller, since that avoids fragmenting a huge page
- * that users might want. We always take the memmap
- * from the bottom of the controller, since with
- * kdata_huge that lets it be under a huge TLB entry.
- *
- * If the user has requested isolnodes for a controller,
- * though, there'll be no lowmem, so we just alloc_bootmem
- * the memmap. There will be no percpu memory either.
- */
- if (i != 0 && node_isset(i, isolnodes)) {
- node_memmap_pfn[i] =
- alloc_bootmem_pfn(0, memmap_size, 0);
- BUG_ON(node_percpu[i] != 0);
- } else if (node_has_bootmem(start)) {
- unsigned long goal = 0;
- node_memmap_pfn[i] =
- alloc_bootmem_pfn(i, memmap_size, 0);
- if (kdata_huge)
- goal = PFN_PHYS(lowmem_end) - node_percpu[i];
- if (node_percpu[i])
- node_percpu_pfn[i] =
- alloc_bootmem_pfn(i, node_percpu[i],
- goal);
- } else {
- /* In non-bootmem zones, just reserve some pages. */
- node_memmap_pfn[i] = node_free_pfn[i];
- node_free_pfn[i] += PFN_UP(memmap_size);
- if (!kdata_huge) {
- node_percpu_pfn[i] = node_free_pfn[i];
- node_free_pfn[i] += PFN_UP(node_percpu[i]);
- } else {
- node_percpu_pfn[i] =
- lowmem_end - PFN_UP(node_percpu[i]);
- }
- }
-
-#ifdef CONFIG_HIGHMEM
- if (start > lowmem_end) {
- zones_size[ZONE_NORMAL] = 0;
- zones_size[ZONE_HIGHMEM] = end - start;
- } else {
- zones_size[ZONE_NORMAL] = lowmem_end - start;
- zones_size[ZONE_HIGHMEM] = end - lowmem_end;
- }
-#else
- zones_size[ZONE_NORMAL] = end - start;
-#endif
-
- if (start < dma_end) {
- zones_size[ZONE_DMA32] = min(zones_size[ZONE_NORMAL],
- dma_end - start);
- zones_size[ZONE_NORMAL] -= zones_size[ZONE_DMA32];
- } else {
- zones_size[ZONE_DMA32] = 0;
- }
-
- /* Take zone metadata from controller 0 if we're isolnode. */
- if (node_isset(i, isolnodes))
- NODE_DATA(i)->bdata = &bootmem_node_data[0];
-
- free_area_init_node(i, zones_size, start, NULL);
- printk(KERN_DEBUG " Normal zone: %ld per-cpu pages\n",
- PFN_UP(node_percpu[i]));
-
- /* Track the type of memory on each node */
- if (zones_size[ZONE_NORMAL] || zones_size[ZONE_DMA32])
- node_set_state(i, N_NORMAL_MEMORY);
-#ifdef CONFIG_HIGHMEM
- if (end != start)
- node_set_state(i, N_HIGH_MEMORY);
-#endif
-
- node_set_online(i);
- }
-}
-
-#ifdef CONFIG_NUMA
-
-/* which logical CPUs are on which nodes */
-struct cpumask node_2_cpu_mask[MAX_NUMNODES] __ro_after_init;
-EXPORT_SYMBOL(node_2_cpu_mask);
-
-/* which node each logical CPU is on */
-char cpu_2_node[NR_CPUS] __ro_after_init __attribute__((aligned(L2_CACHE_BYTES)));
-EXPORT_SYMBOL(cpu_2_node);
-
-/* Return cpu_to_node() except for cpus not yet assigned, which return -1 */
-static int __init cpu_to_bound_node(int cpu, struct cpumask* unbound_cpus)
-{
- if (!cpu_possible(cpu) || cpumask_test_cpu(cpu, unbound_cpus))
- return -1;
- else
- return cpu_to_node(cpu);
-}
-
-/* Return number of immediately-adjacent tiles sharing the same NUMA node. */
-static int __init node_neighbors(int node, int cpu,
- struct cpumask *unbound_cpus)
-{
- int neighbors = 0;
- int w = smp_width;
- int h = smp_height;
- int x = cpu % w;
- int y = cpu / w;
- if (x > 0 && cpu_to_bound_node(cpu-1, unbound_cpus) == node)
- ++neighbors;
- if (x < w-1 && cpu_to_bound_node(cpu+1, unbound_cpus) == node)
- ++neighbors;
- if (y > 0 && cpu_to_bound_node(cpu-w, unbound_cpus) == node)
- ++neighbors;
- if (y < h-1 && cpu_to_bound_node(cpu+w, unbound_cpus) == node)
- ++neighbors;
- return neighbors;
-}
-
-static void __init setup_numa_mapping(void)
-{
- u8 distance[MAX_NUMNODES][NR_CPUS];
- HV_Coord coord;
- int cpu, node, cpus, i, x, y;
- int num_nodes = num_online_nodes();
- struct cpumask unbound_cpus;
- nodemask_t default_nodes;
-
- cpumask_clear(&unbound_cpus);
-
- /* Get set of nodes we will use for defaults */
- nodes_andnot(default_nodes, node_online_map, isolnodes);
- if (nodes_empty(default_nodes)) {
- BUG_ON(!node_isset(0, node_online_map));
- pr_err("Forcing NUMA node zero available as a default node\n");
- node_set(0, default_nodes);
- }
-
- /* Populate the distance[] array */
- memset(distance, -1, sizeof(distance));
- cpu = 0;
- for (coord.y = 0; coord.y < smp_height; ++coord.y) {
- for (coord.x = 0; coord.x < smp_width;
- ++coord.x, ++cpu) {
- BUG_ON(cpu >= nr_cpu_ids);
- if (!cpu_possible(cpu)) {
- cpu_2_node[cpu] = -1;
- continue;
- }
- for_each_node_mask(node, default_nodes) {
- HV_MemoryControllerInfo info =
- hv_inquire_memory_controller(
- coord, node_controller[node]);
- distance[node][cpu] =
- ABS(info.coord.x) + ABS(info.coord.y);
- }
- cpumask_set_cpu(cpu, &unbound_cpus);
- }
- }
- cpus = cpu;
-
- /*
- * Round-robin through the NUMA nodes until all the cpus are
- * assigned. We could be more clever here (e.g. create four
- * sorted linked lists on the same set of cpu nodes, and pull
- * off them in round-robin sequence, removing from all four
- * lists each time) but given the relatively small numbers
- * involved, O(n^2) seem OK for a one-time cost.
- */
- node = first_node(default_nodes);
- while (!cpumask_empty(&unbound_cpus)) {
- int best_cpu = -1;
- int best_distance = INT_MAX;
- for (cpu = 0; cpu < cpus; ++cpu) {
- if (cpumask_test_cpu(cpu, &unbound_cpus)) {
- /*
- * Compute metric, which is how much
- * closer the cpu is to this memory
- * controller than the others, shifted
- * up, and then the number of
- * neighbors already in the node as an
- * epsilon adjustment to try to keep
- * the nodes compact.
- */
- int d = distance[node][cpu] * num_nodes;
- for_each_node_mask(i, default_nodes) {
- if (i != node)
- d -= distance[i][cpu];
- }
- d *= 8; /* allow space for epsilon */
- d -= node_neighbors(node, cpu, &unbound_cpus);
- if (d < best_distance) {
- best_cpu = cpu;
- best_distance = d;
- }
- }
- }
- BUG_ON(best_cpu < 0);
- cpumask_set_cpu(best_cpu, &node_2_cpu_mask[node]);
- cpu_2_node[best_cpu] = node;
- cpumask_clear_cpu(best_cpu, &unbound_cpus);
- node = next_node_in(node, default_nodes);
- }
-
- /* Print out node assignments and set defaults for disabled cpus */
- cpu = 0;
- for (y = 0; y < smp_height; ++y) {
- printk(KERN_DEBUG "NUMA cpu-to-node row %d:", y);
- for (x = 0; x < smp_width; ++x, ++cpu) {
- if (cpu_to_node(cpu) < 0) {
- pr_cont(" -");
- cpu_2_node[cpu] = first_node(default_nodes);
- } else {
- pr_cont(" %d", cpu_to_node(cpu));
- }
- }
- pr_cont("\n");
- }
-}
-
-static struct cpu cpu_devices[NR_CPUS];
-
-static int __init topology_init(void)
-{
- int i;
-
- for_each_online_node(i)
- register_one_node(i);
-
- for (i = 0; i < smp_height * smp_width; ++i)
- register_cpu(&cpu_devices[i], i);
-
- return 0;
-}
-
-subsys_initcall(topology_init);
-
-#else /* !CONFIG_NUMA */
-
-#define setup_numa_mapping() do { } while (0)
-
-#endif /* CONFIG_NUMA */
-
-/*
- * Initialize hugepage support on this cpu. We do this on all cores
- * early in boot: before argument parsing for the boot cpu, and after
- * argument parsing but before the init functions run on the secondaries.
- * So the values we set up here in the hypervisor may be overridden on
- * the boot cpu as arguments are parsed.
- */
-static void init_super_pages(void)
-{
-#ifdef CONFIG_HUGETLB_SUPER_PAGES
- int i;
- for (i = 0; i < HUGE_SHIFT_ENTRIES; ++i)
- hv_set_pte_super_shift(i, huge_shift[i]);
-#endif
-}
-
-/**
- * setup_cpu() - Do all necessary per-cpu, tile-specific initialization.
- * @boot: Is this the boot cpu?
- *
- * Called from setup_arch() on the boot cpu, or online_secondary().
- */
-void setup_cpu(int boot)
-{
- /* The boot cpu sets up its permanent mappings much earlier. */
- if (!boot)
- store_permanent_mappings();
-
- /* Allow asynchronous TLB interrupts. */
-#if CHIP_HAS_TILE_DMA()
- arch_local_irq_unmask(INT_DMATLB_MISS);
- arch_local_irq_unmask(INT_DMATLB_ACCESS);
-#endif
-#ifdef __tilegx__
- arch_local_irq_unmask(INT_SINGLE_STEP_K);
-#endif
-
- /*
- * Allow user access to many generic SPRs, like the cycle
- * counter, PASS/FAIL/DONE, INTERRUPT_CRITICAL_SECTION, etc.
- */
- __insn_mtspr(SPR_MPL_WORLD_ACCESS_SET_0, 1);
-
-#if CHIP_HAS_SN()
- /* Static network is not restricted. */
- __insn_mtspr(SPR_MPL_SN_ACCESS_SET_0, 1);
-#endif
-
- /*
- * Set the MPL for interrupt control 0 & 1 to the corresponding
- * values. This includes access to the SYSTEM_SAVE and EX_CONTEXT
- * SPRs, as well as the interrupt mask.
- */
- __insn_mtspr(SPR_MPL_INTCTRL_0_SET_0, 1);
- __insn_mtspr(SPR_MPL_INTCTRL_1_SET_1, 1);
-
- /* Initialize IRQ support for this cpu. */
- setup_irq_regs();
-
-#ifdef CONFIG_HARDWALL
- /* Reset the network state on this cpu. */
- reset_network_state();
-#endif
-
- init_super_pages();
-}
-
-#ifdef CONFIG_BLK_DEV_INITRD
-
-static int __initdata set_initramfs_file;
-static char __initdata initramfs_file[128] = "initramfs";
-
-static int __init setup_initramfs_file(char *str)
-{
- if (str == NULL)
- return -EINVAL;
- strncpy(initramfs_file, str, sizeof(initramfs_file) - 1);
- set_initramfs_file = 1;
-
- return 0;
-}
-early_param("initramfs_file", setup_initramfs_file);
-
-/*
- * We look for a file called "initramfs" in the hvfs. If there is one, we
- * allocate some memory for it and it will be unpacked to the initramfs.
- * If it's compressed, the initd code will uncompress it first.
- */
-static void __init load_hv_initrd(void)
-{
- HV_FS_StatInfo stat;
- int fd, rc;
- void *initrd;
-
- /* If initrd has already been set, skip initramfs file in hvfs. */
- if (initrd_start)
- return;
-
- fd = hv_fs_findfile((HV_VirtAddr) initramfs_file);
- if (fd == HV_ENOENT) {
- if (set_initramfs_file) {
- pr_warn("No such hvfs initramfs file '%s'\n",
- initramfs_file);
- return;
- } else {
- /* Try old backwards-compatible name. */
- fd = hv_fs_findfile((HV_VirtAddr)"initramfs.cpio.gz");
- if (fd == HV_ENOENT)
- return;
- }
- }
- BUG_ON(fd < 0);
- stat = hv_fs_fstat(fd);
- BUG_ON(stat.size < 0);
- if (stat.flags & HV_FS_ISDIR) {
- pr_warn("Ignoring hvfs file '%s': it's a directory\n",
- initramfs_file);
- return;
- }
- initrd = alloc_bootmem_pages(stat.size);
- rc = hv_fs_pread(fd, (HV_VirtAddr) initrd, stat.size, 0);
- if (rc != stat.size) {
- pr_err("Error reading %d bytes from hvfs file '%s': %d\n",
- stat.size, initramfs_file, rc);
- free_initrd_mem((unsigned long) initrd, stat.size);
- return;
- }
- initrd_start = (unsigned long) initrd;
- initrd_end = initrd_start + stat.size;
-}
-
-void __init free_initrd_mem(unsigned long begin, unsigned long end)
-{
- free_bootmem_late(__pa(begin), end - begin);
-}
-
-static int __init setup_initrd(char *str)
-{
- char *endp;
- unsigned long initrd_size;
-
- initrd_size = str ? simple_strtoul(str, &endp, 0) : 0;
- if (initrd_size == 0 || *endp != '@')
- return -EINVAL;
-
- initrd_start = simple_strtoul(endp+1, &endp, 0);
- if (initrd_start == 0)
- return -EINVAL;
-
- initrd_end = initrd_start + initrd_size;
-
- return 0;
-}
-early_param("initrd", setup_initrd);
-
-#else
-static inline void load_hv_initrd(void) {}
-#endif /* CONFIG_BLK_DEV_INITRD */
-
-static void __init validate_hv(void)
-{
- /*
- * It may already be too late, but let's check our built-in
- * configuration against what the hypervisor is providing.
- */
- unsigned long glue_size = hv_sysconf(HV_SYSCONF_GLUE_SIZE);
- int hv_page_size = hv_sysconf(HV_SYSCONF_PAGE_SIZE_SMALL);
- int hv_hpage_size = hv_sysconf(HV_SYSCONF_PAGE_SIZE_LARGE);
- HV_ASIDRange asid_range;
-
-#ifndef CONFIG_SMP
- HV_Topology topology = hv_inquire_topology();
- BUG_ON(topology.coord.x != 0 || topology.coord.y != 0);
- if (topology.width != 1 || topology.height != 1) {
- pr_warn("Warning: booting UP kernel on %dx%d grid; will ignore all but first tile\n",
- topology.width, topology.height);
- }
-#endif
-
- if (PAGE_OFFSET + HV_GLUE_START_CPA + glue_size > (unsigned long)_text)
- early_panic("Hypervisor glue size %ld is too big!\n",
- glue_size);
- if (hv_page_size != PAGE_SIZE)
- early_panic("Hypervisor page size %#x != our %#lx\n",
- hv_page_size, PAGE_SIZE);
- if (hv_hpage_size != HPAGE_SIZE)
- early_panic("Hypervisor huge page size %#x != our %#lx\n",
- hv_hpage_size, HPAGE_SIZE);
-
-#ifdef CONFIG_SMP
- /*
- * Some hypervisor APIs take a pointer to a bitmap array
- * whose size is at least the number of cpus on the chip.
- * We use a struct cpumask for this, so it must be big enough.
- */
- if ((smp_height * smp_width) > nr_cpu_ids)
- early_panic("Hypervisor %d x %d grid too big for Linux NR_CPUS %u\n",
- smp_height, smp_width, nr_cpu_ids);
-#endif
-
- /*
- * Check that we're using allowed ASIDs, and initialize the
- * various asid variables to their appropriate initial states.
- */
- asid_range = hv_inquire_asid(0);
- min_asid = asid_range.start;
- __this_cpu_write(current_asid, min_asid);
- max_asid = asid_range.start + asid_range.size - 1;
-
- if (hv_confstr(HV_CONFSTR_CHIP_MODEL, (HV_VirtAddr)chip_model,
- sizeof(chip_model)) < 0) {
- pr_err("Warning: HV_CONFSTR_CHIP_MODEL not available\n");
- strlcpy(chip_model, "unknown", sizeof(chip_model));
- }
-}
-
-static void __init validate_va(void)
-{
-#ifndef __tilegx__ /* FIXME: GX: probably some validation relevant here */
- /*
- * Similarly, make sure we're only using allowed VAs.
- * We assume we can contiguously use MEM_USER_INTRPT .. MEM_HV_START,
- * and 0 .. KERNEL_HIGH_VADDR.
- * In addition, make sure we CAN'T use the end of memory, since
- * we use the last chunk of each pgd for the pgd_list.
- */
- int i, user_kernel_ok = 0;
- unsigned long max_va = 0;
- unsigned long list_va =
- ((PGD_LIST_OFFSET / sizeof(pgd_t)) << PGDIR_SHIFT);
-
- for (i = 0; ; ++i) {
- HV_VirtAddrRange range = hv_inquire_virtual(i);
- if (range.size == 0)
- break;
- if (range.start <= MEM_USER_INTRPT &&
- range.start + range.size >= MEM_HV_START)
- user_kernel_ok = 1;
- if (range.start == 0)
- max_va = range.size;
- BUG_ON(range.start + range.size > list_va);
- }
- if (!user_kernel_ok)
- early_panic("Hypervisor not configured for user/kernel VAs\n");
- if (max_va == 0)
- early_panic("Hypervisor not configured for low VAs\n");
- if (max_va < KERNEL_HIGH_VADDR)
- early_panic("Hypervisor max VA %#lx smaller than %#lx\n",
- max_va, KERNEL_HIGH_VADDR);
-
- /* Kernel PCs must have their high bit set; see intvec.S. */
- if ((long)VMALLOC_START >= 0)
- early_panic("Linux VMALLOC region below the 2GB line (%#lx)!\n"
- "Reconfigure the kernel with smaller VMALLOC_RESERVE\n",
- VMALLOC_START);
-#endif
-}
-
-/*
- * cpu_lotar_map lists all the cpus that are valid for the supervisor
- * to cache data on at a page level, i.e. what cpus can be placed in
- * the LOTAR field of a PTE. It is equivalent to the set of possible
- * cpus plus any other cpus that are willing to share their cache.
- * It is set by hv_inquire_tiles(HV_INQ_TILES_LOTAR).
- */
-struct cpumask __ro_after_init cpu_lotar_map;
-EXPORT_SYMBOL(cpu_lotar_map);
-
-/*
- * hash_for_home_map lists all the tiles that hash-for-home data
- * will be cached on. Note that this may includes tiles that are not
- * valid for this supervisor to use otherwise (e.g. if a hypervisor
- * device is being shared between multiple supervisors).
- * It is set by hv_inquire_tiles(HV_INQ_TILES_HFH_CACHE).
- */
-struct cpumask hash_for_home_map;
-EXPORT_SYMBOL(hash_for_home_map);
-
-/*
- * cpu_cacheable_map lists all the cpus whose caches the hypervisor can
- * flush on our behalf. It is set to cpu_possible_mask OR'ed with
- * hash_for_home_map, and it is what should be passed to
- * hv_flush_remote() to flush all caches. Note that if there are
- * dedicated hypervisor driver tiles that have authorized use of their
- * cache, those tiles will only appear in cpu_lotar_map, NOT in
- * cpu_cacheable_map, as they are a special case.
- */
-struct cpumask __ro_after_init cpu_cacheable_map;
-EXPORT_SYMBOL(cpu_cacheable_map);
-
-static __initdata struct cpumask disabled_map;
-
-static int __init disabled_cpus(char *str)
-{
- int boot_cpu = smp_processor_id();
-
- if (str == NULL || cpulist_parse_crop(str, &disabled_map) != 0)
- return -EINVAL;
- if (cpumask_test_cpu(boot_cpu, &disabled_map)) {
- pr_err("disabled_cpus: can't disable boot cpu %d\n", boot_cpu);
- cpumask_clear_cpu(boot_cpu, &disabled_map);
- }
- return 0;
-}
-
-early_param("disabled_cpus", disabled_cpus);
-
-void __init print_disabled_cpus(void)
-{
- if (!cpumask_empty(&disabled_map))
- pr_info("CPUs not available for Linux: %*pbl\n",
- cpumask_pr_args(&disabled_map));
-}
-
-static void __init setup_cpu_maps(void)
-{
- struct cpumask hv_disabled_map, cpu_possible_init;
- int boot_cpu = smp_processor_id();
- int cpus, i, rc;
-
- /* Learn which cpus are allowed by the hypervisor. */
- rc = hv_inquire_tiles(HV_INQ_TILES_AVAIL,
- (HV_VirtAddr) cpumask_bits(&cpu_possible_init),
- sizeof(cpu_cacheable_map));
- if (rc < 0)
- early_panic("hv_inquire_tiles(AVAIL) failed: rc %d\n", rc);
- if (!cpumask_test_cpu(boot_cpu, &cpu_possible_init))
- early_panic("Boot CPU %d disabled by hypervisor!\n", boot_cpu);
-
- /* Compute the cpus disabled by the hvconfig file. */
- cpumask_complement(&hv_disabled_map, &cpu_possible_init);
-
- /* Include them with the cpus disabled by "disabled_cpus". */
- cpumask_or(&disabled_map, &disabled_map, &hv_disabled_map);
-
- /*
- * Disable every cpu after "setup_max_cpus". But don't mark
- * as disabled the cpus that are outside of our initial rectangle,
- * since that turns out to be confusing.
- */
- cpus = 1; /* this cpu */
- cpumask_set_cpu(boot_cpu, &disabled_map); /* ignore this cpu */
- for (i = 0; cpus < setup_max_cpus; ++i)
- if (!cpumask_test_cpu(i, &disabled_map))
- ++cpus;
- for (; i < smp_height * smp_width; ++i)
- cpumask_set_cpu(i, &disabled_map);
- cpumask_clear_cpu(boot_cpu, &disabled_map); /* reset this cpu */
- for (i = smp_height * smp_width; i < NR_CPUS; ++i)
- cpumask_clear_cpu(i, &disabled_map);
-
- /*
- * Setup cpu_possible map as every cpu allocated to us, minus
- * the results of any "disabled_cpus" settings.
- */
- cpumask_andnot(&cpu_possible_init, &cpu_possible_init, &disabled_map);
- init_cpu_possible(&cpu_possible_init);
-
- /* Learn which cpus are valid for LOTAR caching. */
- rc = hv_inquire_tiles(HV_INQ_TILES_LOTAR,
- (HV_VirtAddr) cpumask_bits(&cpu_lotar_map),
- sizeof(cpu_lotar_map));
- if (rc < 0) {
- pr_err("warning: no HV_INQ_TILES_LOTAR; using AVAIL\n");
- cpu_lotar_map = *cpu_possible_mask;
- }
-
- /* Retrieve set of CPUs used for hash-for-home caching */
- rc = hv_inquire_tiles(HV_INQ_TILES_HFH_CACHE,
- (HV_VirtAddr) hash_for_home_map.bits,
- sizeof(hash_for_home_map));
- if (rc < 0)
- early_panic("hv_inquire_tiles(HFH_CACHE) failed: rc %d\n", rc);
- cpumask_or(&cpu_cacheable_map, cpu_possible_mask, &hash_for_home_map);
-}
-
-
-static int __init dataplane(char *str)
-{
- pr_warn("WARNING: dataplane support disabled in this kernel\n");
- return 0;
-}
-
-early_param("dataplane", dataplane);
-
-#ifdef CONFIG_NO_HZ_FULL
-/* Warn if hypervisor shared cpus are marked as nohz_full. */
-static int __init check_nohz_full_cpus(void)
-{
- struct cpumask shared;
- int cpu;
-
- if (hv_inquire_tiles(HV_INQ_TILES_SHARED,
- (HV_VirtAddr) shared.bits, sizeof(shared)) < 0) {
- pr_warn("WARNING: No support for inquiring hv shared tiles\n");
- return 0;
- }
- for_each_cpu(cpu, &shared) {
- if (tick_nohz_full_cpu(cpu))
- pr_warn("WARNING: nohz_full cpu %d receives hypervisor interrupts!\n",
- cpu);
- }
- return 0;
-}
-arch_initcall(check_nohz_full_cpus);
-#endif
-
-#ifdef CONFIG_CMDLINE_BOOL
-static char __initdata builtin_cmdline[COMMAND_LINE_SIZE] = CONFIG_CMDLINE;
-#endif
-
-void __init setup_arch(char **cmdline_p)
-{
- int len;
-
-#if defined(CONFIG_CMDLINE_BOOL) && defined(CONFIG_CMDLINE_OVERRIDE)
- len = hv_get_command_line((HV_VirtAddr) boot_command_line,
- COMMAND_LINE_SIZE);
- if (boot_command_line[0])
- pr_warn("WARNING: ignoring dynamic command line \"%s\"\n",
- boot_command_line);
- strlcpy(boot_command_line, builtin_cmdline, COMMAND_LINE_SIZE);
-#else
- char *hv_cmdline;
-#if defined(CONFIG_CMDLINE_BOOL)
- if (builtin_cmdline[0]) {
- int builtin_len = strlcpy(boot_command_line, builtin_cmdline,
- COMMAND_LINE_SIZE);
- if (builtin_len < COMMAND_LINE_SIZE-1)
- boot_command_line[builtin_len++] = ' ';
- hv_cmdline = &boot_command_line[builtin_len];
- len = COMMAND_LINE_SIZE - builtin_len;
- } else
-#endif
- {
- hv_cmdline = boot_command_line;
- len = COMMAND_LINE_SIZE;
- }
- len = hv_get_command_line((HV_VirtAddr) hv_cmdline, len);
- if (len < 0 || len > COMMAND_LINE_SIZE)
- early_panic("hv_get_command_line failed: %d\n", len);
-#endif
-
- *cmdline_p = boot_command_line;
-
- /* Set disabled_map and setup_max_cpus very early */
- parse_early_param();
-
- /* Make sure the kernel is compatible with the hypervisor. */
- validate_hv();
- validate_va();
-
- setup_cpu_maps();
-
-
-#if defined(CONFIG_PCI) && !defined(__tilegx__)
- /*
- * Initialize the PCI structures. This is done before memory
- * setup so that we know whether or not a pci_reserve region
- * is necessary.
- */
- if (tile_pci_init() == 0)
- pci_reserve_mb = 0;
-
- /* PCI systems reserve a region just below 4GB for mapping iomem. */
- pci_reserve_end_pfn = (1 << (32 - PAGE_SHIFT));
- pci_reserve_start_pfn = pci_reserve_end_pfn -
- (pci_reserve_mb << (20 - PAGE_SHIFT));
-#endif
-
- init_mm.start_code = (unsigned long) _text;
- init_mm.end_code = (unsigned long) _etext;
- init_mm.end_data = (unsigned long) _edata;
- init_mm.brk = (unsigned long) _end;
-
- setup_memory();
- store_permanent_mappings();
- setup_bootmem_allocator();
-
- /*
- * NOTE: before this point _nobody_ is allowed to allocate
- * any memory using the bootmem allocator.
- */
-
-#ifdef CONFIG_SWIOTLB
- swiotlb_init(0);
-#endif
-
- paging_init();
- setup_numa_mapping();
- zone_sizes_init();
- set_page_homes();
- setup_cpu(1);
- setup_clock();
- load_hv_initrd();
-}
-
-
-/*
- * Set up per-cpu memory.
- */
-
-unsigned long __per_cpu_offset[NR_CPUS] __ro_after_init;
-EXPORT_SYMBOL(__per_cpu_offset);
-
-static size_t __initdata pfn_offset[MAX_NUMNODES] = { 0 };
-static unsigned long __initdata percpu_pfn[NR_CPUS] = { 0 };
-
-/*
- * As the percpu code allocates pages, we return the pages from the
- * end of the node for the specified cpu.
- */
-static void *__init pcpu_fc_alloc(unsigned int cpu, size_t size, size_t align)
-{
- int nid = cpu_to_node(cpu);
- unsigned long pfn = node_percpu_pfn[nid] + pfn_offset[nid];
-
- BUG_ON(size % PAGE_SIZE != 0);
- pfn_offset[nid] += size / PAGE_SIZE;
- BUG_ON(node_percpu[nid] < size);
- node_percpu[nid] -= size;
- if (percpu_pfn[cpu] == 0)
- percpu_pfn[cpu] = pfn;
- return pfn_to_kaddr(pfn);
-}
-
-/*
- * Pages reserved for percpu memory are not freeable, and in any case we are
- * on a short path to panic() in setup_per_cpu_area() at this point anyway.
- */
-static void __init pcpu_fc_free(void *ptr, size_t size)
-{
-}
-
-/*
- * Set up vmalloc page tables using bootmem for the percpu code.
- */
-static void __init pcpu_fc_populate_pte(unsigned long addr)
-{
- pgd_t *pgd;
- pud_t *pud;
- pmd_t *pmd;
- pte_t *pte;
-
- BUG_ON(pgd_addr_invalid(addr));
- if (addr < VMALLOC_START || addr >= VMALLOC_END)
- panic("PCPU addr %#lx outside vmalloc range %#lx..%#lx; try increasing CONFIG_VMALLOC_RESERVE\n",
- addr, VMALLOC_START, VMALLOC_END);
-
- pgd = swapper_pg_dir + pgd_index(addr);
- pud = pud_offset(pgd, addr);
- BUG_ON(!pud_present(*pud));
- pmd = pmd_offset(pud, addr);
- if (pmd_present(*pmd)) {
- BUG_ON(pmd_huge_page(*pmd));
- } else {
- pte = __alloc_bootmem(L2_KERNEL_PGTABLE_SIZE,
- HV_PAGE_TABLE_ALIGN, 0);
- pmd_populate_kernel(&init_mm, pmd, pte);
- }
-}
-
-void __init setup_per_cpu_areas(void)
-{
- struct page *pg;
- unsigned long delta, pfn, lowmem_va;
- unsigned long size = percpu_size();
- char *ptr;
- int rc, cpu, i;
-
- rc = pcpu_page_first_chunk(PERCPU_MODULE_RESERVE, pcpu_fc_alloc,
- pcpu_fc_free, pcpu_fc_populate_pte);
- if (rc < 0)
- panic("Cannot initialize percpu area (err=%d)", rc);
-
- delta = (unsigned long)pcpu_base_addr - (unsigned long)__per_cpu_start;
- for_each_possible_cpu(cpu) {
- __per_cpu_offset[cpu] = delta + pcpu_unit_offsets[cpu];
-
- /* finv the copy out of cache so we can change homecache */
- ptr = pcpu_base_addr + pcpu_unit_offsets[cpu];
- __finv_buffer(ptr, size);
- pfn = percpu_pfn[cpu];
-
- /* Rewrite the page tables to cache on that cpu */
- pg = pfn_to_page(pfn);
- for (i = 0; i < size; i += PAGE_SIZE, ++pfn, ++pg) {
-
- /* Update the vmalloc mapping and page home. */
- unsigned long addr = (unsigned long)ptr + i;
- pte_t *ptep = virt_to_kpte(addr);
- pte_t pte = *ptep;
- BUG_ON(pfn != pte_pfn(pte));
- pte = hv_pte_set_mode(pte, HV_PTE_MODE_CACHE_TILE_L3);
- pte = set_remote_cache_cpu(pte, cpu);
- set_pte_at(&init_mm, addr, ptep, pte);
-
- /* Update the lowmem mapping for consistency. */
- lowmem_va = (unsigned long)pfn_to_kaddr(pfn);
- ptep = virt_to_kpte(lowmem_va);
- if (pte_huge(*ptep)) {
- printk(KERN_DEBUG "early shatter of huge page at %#lx\n",
- lowmem_va);
- shatter_pmd((pmd_t *)ptep);
- ptep = virt_to_kpte(lowmem_va);
- BUG_ON(pte_huge(*ptep));
- }
- BUG_ON(pfn != pte_pfn(*ptep));
- set_pte_at(&init_mm, lowmem_va, ptep, pte);
- }
- }
-
- /* Set our thread pointer appropriately. */
- set_my_cpu_offset(__per_cpu_offset[smp_processor_id()]);
-
- /* Make sure the finv's have completed. */
- mb_incoherent();
-
- /* Flush the TLB so we reference it properly from here on out. */
- local_flush_tlb_all();
-}
-
-static struct resource data_resource = {
- .name = "Kernel data",
- .start = 0,
- .end = 0,
- .flags = IORESOURCE_BUSY | IORESOURCE_SYSTEM_RAM
-};
-
-static struct resource code_resource = {
- .name = "Kernel code",
- .start = 0,
- .end = 0,
- .flags = IORESOURCE_BUSY | IORESOURCE_SYSTEM_RAM
-};
-
-/*
- * On Pro, we reserve all resources above 4GB so that PCI won't try to put
- * mappings above 4GB.
- */
-#if defined(CONFIG_PCI) && !defined(__tilegx__)
-static struct resource* __init
-insert_non_bus_resource(void)
-{
- struct resource *res =
- kzalloc(sizeof(struct resource), GFP_ATOMIC);
- if (!res)
- return NULL;
- res->name = "Non-Bus Physical Address Space";
- res->start = (1ULL << 32);
- res->end = -1LL;
- res->flags = IORESOURCE_BUSY | IORESOURCE_MEM;
- if (insert_resource(&iomem_resource, res)) {
- kfree(res);
- return NULL;
- }
- return res;
-}
-#endif
-
-static struct resource* __init
-insert_ram_resource(u64 start_pfn, u64 end_pfn, bool reserved)
-{
- struct resource *res =
- kzalloc(sizeof(struct resource), GFP_ATOMIC);
- if (!res)
- return NULL;
- res->start = start_pfn << PAGE_SHIFT;
- res->end = (end_pfn << PAGE_SHIFT) - 1;
- res->flags = IORESOURCE_BUSY | IORESOURCE_MEM;
- if (reserved) {
- res->name = "Reserved";
- } else {
- res->name = "System RAM";
- res->flags |= IORESOURCE_SYSRAM;
- }
- if (insert_resource(&iomem_resource, res)) {
- kfree(res);
- return NULL;
- }
- return res;
-}
-
-/*
- * Request address space for all standard resources
- *
- * If the system includes PCI root complex drivers, we need to create
- * a window just below 4GB where PCI BARs can be mapped.
- */
-static int __init request_standard_resources(void)
-{
- int i;
- enum { CODE_DELTA = MEM_SV_START - PAGE_OFFSET };
-
-#if defined(CONFIG_PCI) && !defined(__tilegx__)
- insert_non_bus_resource();
-#endif
-
- for_each_online_node(i) {
- u64 start_pfn = node_start_pfn[i];
- u64 end_pfn = node_end_pfn[i];
-
-#if defined(CONFIG_PCI) && !defined(__tilegx__)
- if (start_pfn <= pci_reserve_start_pfn &&
- end_pfn > pci_reserve_start_pfn) {
- if (end_pfn > pci_reserve_end_pfn)
- insert_ram_resource(pci_reserve_end_pfn,
- end_pfn, 0);
- end_pfn = pci_reserve_start_pfn;
- }
-#endif
- insert_ram_resource(start_pfn, end_pfn, 0);
- }
-
- code_resource.start = __pa(_text - CODE_DELTA);
- code_resource.end = __pa(_etext - CODE_DELTA)-1;
- data_resource.start = __pa(_sdata);
- data_resource.end = __pa(_end)-1;
-
- insert_resource(&iomem_resource, &code_resource);
- insert_resource(&iomem_resource, &data_resource);
-
- /* Mark any "memmap" regions busy for the resource manager. */
- for (i = 0; i < memmap_nr; ++i) {
- struct memmap_entry *m = &memmap_map[i];
- insert_ram_resource(PFN_DOWN(m->addr),
- PFN_UP(m->addr + m->size - 1), 1);
- }
-
-#ifdef CONFIG_KEXEC
- insert_resource(&iomem_resource, &crashk_res);
-#endif
-
- return 0;
-}
-
-subsys_initcall(request_standard_resources);
diff --git a/arch/tile/kernel/signal.c b/arch/tile/kernel/signal.c
deleted file mode 100644
index f2bf557bb005..000000000000
--- a/arch/tile/kernel/signal.c
+++ /dev/null
@@ -1,411 +0,0 @@
-/*
- * Copyright (C) 1991, 1992 Linus Torvalds
- * Copyright 2010 Tilera Corporation. All Rights Reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation, version 2.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
- * NON INFRINGEMENT. See the GNU General Public License for
- * more details.
- */
-
-#include <linux/sched.h>
-#include <linux/sched/debug.h>
-#include <linux/sched/task_stack.h>
-#include <linux/mm.h>
-#include <linux/smp.h>
-#include <linux/kernel.h>
-#include <linux/signal.h>
-#include <linux/errno.h>
-#include <linux/wait.h>
-#include <linux/unistd.h>
-#include <linux/stddef.h>
-#include <linux/personality.h>
-#include <linux/suspend.h>
-#include <linux/ptrace.h>
-#include <linux/elf.h>
-#include <linux/compat.h>
-#include <linux/syscalls.h>
-#include <linux/uaccess.h>
-#include <asm/processor.h>
-#include <asm/ucontext.h>
-#include <asm/sigframe.h>
-#include <asm/syscalls.h>
-#include <asm/vdso.h>
-#include <arch/interrupts.h>
-
-#define DEBUG_SIG 0
-
-/*
- * Do a signal return; undo the signal stack.
- */
-
-int restore_sigcontext(struct pt_regs *regs,
- struct sigcontext __user *sc)
-{
- int err;
-
- /* Always make any pending restarted system calls return -EINTR */
- current->restart_block.fn = do_no_restart_syscall;
-
- /*
- * Enforce that sigcontext is like pt_regs, and doesn't mess
- * up our stack alignment rules.
- */
- BUILD_BUG_ON(sizeof(struct sigcontext) != sizeof(struct pt_regs));
- BUILD_BUG_ON(sizeof(struct sigcontext) % 8 != 0);
- err = __copy_from_user(regs, sc, sizeof(*regs));
-
- /* Ensure that the PL is always set to USER_PL. */
- regs->ex1 = PL_ICS_EX1(USER_PL, EX1_ICS(regs->ex1));
-
- regs->faultnum = INT_SWINT_1_SIGRETURN;
-
- return err;
-}
-
-void signal_fault(const char *type, struct pt_regs *regs,
- void __user *frame, int sig)
-{
- trace_unhandled_signal(type, regs, (unsigned long)frame, SIGSEGV);
- force_sigsegv(sig, current);
-}
-
-/* The assembly shim for this function arranges to ignore the return value. */
-SYSCALL_DEFINE0(rt_sigreturn)
-{
- struct pt_regs *regs = current_pt_regs();
- struct rt_sigframe __user *frame =
- (struct rt_sigframe __user *)(regs->sp);
- sigset_t set;
-
- if (!access_ok(VERIFY_READ, frame, sizeof(*frame)))
- goto badframe;
- if (__copy_from_user(&set, &frame->uc.uc_sigmask, sizeof(set)))
- goto badframe;
-
- set_current_blocked(&set);
-
- if (restore_sigcontext(regs, &frame->uc.uc_mcontext))
- goto badframe;
-
- if (restore_altstack(&frame->uc.uc_stack))
- goto badframe;
-
- return 0;
-
-badframe:
- signal_fault("bad sigreturn frame", regs, frame, 0);
- return 0;
-}
-
-/*
- * Set up a signal frame.
- */
-
-int setup_sigcontext(struct sigcontext __user *sc, struct pt_regs *regs)
-{
- return __copy_to_user(sc, regs, sizeof(*regs));
-}
-
-/*
- * Determine which stack to use..
- */
-static inline void __user *get_sigframe(struct k_sigaction *ka,
- struct pt_regs *regs,
- size_t frame_size)
-{
- unsigned long sp;
-
- /* Default to using normal stack */
- sp = regs->sp;
-
- /*
- * If we are on the alternate signal stack and would overflow
- * it, don't. Return an always-bogus address instead so we
- * will die with SIGSEGV.
- */
- if (on_sig_stack(sp) && !likely(on_sig_stack(sp - frame_size)))
- return (void __user __force *)-1UL;
-
- /* This is the X/Open sanctioned signal stack switching. */
- if (ka->sa.sa_flags & SA_ONSTACK) {
- if (sas_ss_flags(sp) == 0)
- sp = current->sas_ss_sp + current->sas_ss_size;
- }
-
- sp -= frame_size;
- /*
- * Align the stack pointer according to the TILE ABI,
- * i.e. so that on function entry (sp & 15) == 0.
- */
- sp &= -16UL;
- return (void __user *) sp;
-}
-
-static int setup_rt_frame(struct ksignal *ksig, sigset_t *set,
- struct pt_regs *regs)
-{
- unsigned long restorer;
- struct rt_sigframe __user *frame;
- int err = 0, sig = ksig->sig;
-
- frame = get_sigframe(&ksig->ka, regs, sizeof(*frame));
-
- if (!access_ok(VERIFY_WRITE, frame, sizeof(*frame)))
- goto err;
-
- /* Always write at least the signal number for the stack backtracer. */
- if (ksig->ka.sa.sa_flags & SA_SIGINFO) {
- /* At sigreturn time, restore the callee-save registers too. */
- err |= copy_siginfo_to_user(&frame->info, &ksig->info);
- regs->flags |= PT_FLAGS_RESTORE_REGS;
- } else {
- err |= __put_user(ksig->info.si_signo, &frame->info.si_signo);
- }
-
- /* Create the ucontext. */
- err |= __clear_user(&frame->save_area, sizeof(frame->save_area));
- err |= __put_user(0, &frame->uc.uc_flags);
- err |= __put_user(NULL, &frame->uc.uc_link);
- err |= __save_altstack(&frame->uc.uc_stack, regs->sp);
- err |= setup_sigcontext(&frame->uc.uc_mcontext, regs);
- err |= __copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set));
- if (err)
- goto err;
-
- restorer = VDSO_SYM(&__vdso_rt_sigreturn);
- if (ksig->ka.sa.sa_flags & SA_RESTORER)
- restorer = (unsigned long) ksig->ka.sa.sa_restorer;
-
- /*
- * Set up registers for signal handler.
- * Registers that we don't modify keep the value they had from
- * user-space at the time we took the signal.
- * We always pass siginfo and mcontext, regardless of SA_SIGINFO,
- * since some things rely on this (e.g. glibc's debug/segfault.c).
- */
- regs->pc = (unsigned long) ksig->ka.sa.sa_handler;
- regs->ex1 = PL_ICS_EX1(USER_PL, 1); /* set crit sec in handler */
- regs->sp = (unsigned long) frame;
- regs->lr = restorer;
- regs->regs[0] = (unsigned long) sig;
- regs->regs[1] = (unsigned long) &frame->info;
- regs->regs[2] = (unsigned long) &frame->uc;
- regs->flags |= PT_FLAGS_CALLER_SAVES;
- return 0;
-
-err:
- trace_unhandled_signal("bad sigreturn frame", regs,
- (unsigned long)frame, SIGSEGV);
- return -EFAULT;
-}
-
-/*
- * OK, we're invoking a handler
- */
-
-static void handle_signal(struct ksignal *ksig, struct pt_regs *regs)
-{
- sigset_t *oldset = sigmask_to_save();
- int ret;
-
- /* Are we from a system call? */
- if (regs->faultnum == INT_SWINT_1) {
- /* If so, check system call restarting.. */
- switch (regs->regs[0]) {
- case -ERESTART_RESTARTBLOCK:
- case -ERESTARTNOHAND:
- regs->regs[0] = -EINTR;
- break;
-
- case -ERESTARTSYS:
- if (!(ksig->ka.sa.sa_flags & SA_RESTART)) {
- regs->regs[0] = -EINTR;
- break;
- }
- /* fallthrough */
- case -ERESTARTNOINTR:
- /* Reload caller-saves to restore r0..r5 and r10. */
- regs->flags |= PT_FLAGS_CALLER_SAVES;
- regs->regs[0] = regs->orig_r0;
- regs->pc -= 8;
- }
- }
-
- /* Set up the stack frame */
-#ifdef CONFIG_COMPAT
- if (is_compat_task())
- ret = compat_setup_rt_frame(ksig, oldset, regs);
- else
-#endif
- ret = setup_rt_frame(ksig, oldset, regs);
-
- signal_setup_done(ret, ksig, test_thread_flag(TIF_SINGLESTEP));
-}
-
-/*
- * Note that 'init' is a special process: it doesn't get signals it doesn't
- * want to handle. Thus you cannot kill init even with a SIGKILL even by
- * mistake.
- */
-void do_signal(struct pt_regs *regs)
-{
- struct ksignal ksig;
-
- /*
- * i386 will check if we're coming from kernel mode and bail out
- * here. In my experience this just turns weird crashes into
- * weird spin-hangs. But if we find a case where this seems
- * helpful, we can reinstate the check on "!user_mode(regs)".
- */
-
- if (get_signal(&ksig)) {
- /* Whee! Actually deliver the signal. */
- handle_signal(&ksig, regs);
- goto done;
- }
-
- /* Did we come from a system call? */
- if (regs->faultnum == INT_SWINT_1) {
- /* Restart the system call - no handlers present */
- switch (regs->regs[0]) {
- case -ERESTARTNOHAND:
- case -ERESTARTSYS:
- case -ERESTARTNOINTR:
- regs->flags |= PT_FLAGS_CALLER_SAVES;
- regs->regs[0] = regs->orig_r0;
- regs->pc -= 8;
- break;
-
- case -ERESTART_RESTARTBLOCK:
- regs->flags |= PT_FLAGS_CALLER_SAVES;
- regs->regs[TREG_SYSCALL_NR] = __NR_restart_syscall;
- regs->pc -= 8;
- break;
- }
- }
-
- /* If there's no signal to deliver, just put the saved sigmask back. */
- restore_saved_sigmask();
-
-done:
- /* Avoid double syscall restart if there are nested signals. */
- regs->faultnum = INT_SWINT_1_SIGRETURN;
-}
-
-int show_unhandled_signals = 1;
-
-static int __init crashinfo(char *str)
-{
- const char *word;
-
- if (*str == '\0')
- show_unhandled_signals = 2;
- else if (*str != '=' || kstrtoint(++str, 0, &show_unhandled_signals) != 0)
- return 0;
-
- switch (show_unhandled_signals) {
- case 0:
- word = "No";
- break;
- case 1:
- word = "One-line";
- break;
- default:
- word = "Detailed";
- break;
- }
- pr_info("%s crash reports will be generated on the console\n", word);
- return 1;
-}
-__setup("crashinfo", crashinfo);
-
-static void dump_mem(void __user *address)
-{
- void __user *addr;
- enum { region_size = 256, bytes_per_line = 16 };
- int i, j, k;
- int found_readable_mem = 0;
-
- if (!access_ok(VERIFY_READ, address, 1)) {
- pr_err("Not dumping at address 0x%lx (kernel address)\n",
- (unsigned long)address);
- return;
- }
-
- addr = (void __user *)
- (((unsigned long)address & -bytes_per_line) - region_size/2);
- if (addr > address)
- addr = NULL;
- for (i = 0; i < region_size;
- addr += bytes_per_line, i += bytes_per_line) {
- unsigned char buf[bytes_per_line];
- char line[100];
- if (copy_from_user(buf, addr, bytes_per_line))
- continue;
- if (!found_readable_mem) {
- pr_err("Dumping memory around address 0x%lx:\n",
- (unsigned long)address);
- found_readable_mem = 1;
- }
- j = sprintf(line, REGFMT ":", (unsigned long)addr);
- for (k = 0; k < bytes_per_line; ++k)
- j += sprintf(&line[j], " %02x", buf[k]);
- pr_err("%s\n", line);
- }
- if (!found_readable_mem)
- pr_err("No readable memory around address 0x%lx\n",
- (unsigned long)address);
-}
-
-void trace_unhandled_signal(const char *type, struct pt_regs *regs,
- unsigned long address, int sig)
-{
- struct task_struct *tsk = current;
-
- if (show_unhandled_signals == 0)
- return;
-
- /* If the signal is handled, don't show it here. */
- if (!is_global_init(tsk)) {
- void __user *handler =
- tsk->sighand->action[sig-1].sa.sa_handler;
- if (handler != SIG_IGN && handler != SIG_DFL)
- return;
- }
-
- /* Rate-limit the one-line output, not the detailed output. */
- if (show_unhandled_signals <= 1 && !printk_ratelimit())
- return;
-
- printk("%s%s[%d]: %s at %lx pc "REGFMT" signal %d",
- task_pid_nr(tsk) > 1 ? KERN_INFO : KERN_EMERG,
- tsk->comm, task_pid_nr(tsk), type, address, regs->pc, sig);
-
- print_vma_addr(KERN_CONT " in ", regs->pc);
-
- printk(KERN_CONT "\n");
-
- if (show_unhandled_signals > 1) {
- switch (sig) {
- case SIGILL:
- case SIGFPE:
- case SIGSEGV:
- case SIGBUS:
- pr_err("User crash: signal %d, trap %ld, address 0x%lx\n",
- sig, regs->faultnum, address);
- show_regs(regs);
- dump_mem((void __user *)address);
- break;
- default:
- pr_err("User crash: signal %d, trap %ld\n",
- sig, regs->faultnum);
- break;
- }
- }
-}
diff --git a/arch/tile/kernel/single_step.c b/arch/tile/kernel/single_step.c
deleted file mode 100644
index 479d8033a801..000000000000
--- a/arch/tile/kernel/single_step.c
+++ /dev/null
@@ -1,786 +0,0 @@
-/*
- * Copyright 2010 Tilera Corporation. All Rights Reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation, version 2.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
- * NON INFRINGEMENT. See the GNU General Public License for
- * more details.
- *
- * A code-rewriter that enables instruction single-stepping.
- */
-
-#include <linux/smp.h>
-#include <linux/ptrace.h>
-#include <linux/slab.h>
-#include <linux/thread_info.h>
-#include <linux/uaccess.h>
-#include <linux/mman.h>
-#include <linux/types.h>
-#include <linux/err.h>
-#include <linux/prctl.h>
-#include <asm/cacheflush.h>
-#include <asm/traps.h>
-#include <linux/uaccess.h>
-#include <asm/unaligned.h>
-#include <arch/abi.h>
-#include <arch/spr_def.h>
-#include <arch/opcode.h>
-
-
-#ifndef __tilegx__ /* Hardware support for single step unavailable. */
-
-#define signExtend17(val) sign_extend((val), 17)
-#define TILE_X1_MASK (0xffffffffULL << 31)
-
-enum mem_op {
- MEMOP_NONE,
- MEMOP_LOAD,
- MEMOP_STORE,
- MEMOP_LOAD_POSTINCR,
- MEMOP_STORE_POSTINCR
-};
-
-static inline tilepro_bundle_bits set_BrOff_X1(tilepro_bundle_bits n,
- s32 offset)
-{
- tilepro_bundle_bits result;
-
- /* mask out the old offset */
- tilepro_bundle_bits mask = create_BrOff_X1(-1);
- result = n & (~mask);
-
- /* or in the new offset */
- result |= create_BrOff_X1(offset);
-
- return result;
-}
-
-static inline tilepro_bundle_bits move_X1(tilepro_bundle_bits n, int dest,
- int src)
-{
- tilepro_bundle_bits result;
- tilepro_bundle_bits op;
-
- result = n & (~TILE_X1_MASK);
-
- op = create_Opcode_X1(SPECIAL_0_OPCODE_X1) |
- create_RRROpcodeExtension_X1(OR_SPECIAL_0_OPCODE_X1) |
- create_Dest_X1(dest) |
- create_SrcB_X1(TREG_ZERO) |
- create_SrcA_X1(src) ;
-
- result |= op;
- return result;
-}
-
-static inline tilepro_bundle_bits nop_X1(tilepro_bundle_bits n)
-{
- return move_X1(n, TREG_ZERO, TREG_ZERO);
-}
-
-static inline tilepro_bundle_bits addi_X1(
- tilepro_bundle_bits n, int dest, int src, int imm)
-{
- n &= ~TILE_X1_MASK;
-
- n |= (create_SrcA_X1(src) |
- create_Dest_X1(dest) |
- create_Imm8_X1(imm) |
- create_S_X1(0) |
- create_Opcode_X1(IMM_0_OPCODE_X1) |
- create_ImmOpcodeExtension_X1(ADDI_IMM_0_OPCODE_X1));
-
- return n;
-}
-
-static tilepro_bundle_bits rewrite_load_store_unaligned(
- struct single_step_state *state,
- tilepro_bundle_bits bundle,
- struct pt_regs *regs,
- enum mem_op mem_op,
- int size, int sign_ext)
-{
- unsigned char __user *addr;
- int val_reg, addr_reg, err, val;
- int align_ctl;
-
- align_ctl = unaligned_fixup;
- switch (task_thread_info(current)->align_ctl) {
- case PR_UNALIGN_NOPRINT:
- align_ctl = 1;
- break;
- case PR_UNALIGN_SIGBUS:
- align_ctl = 0;
- break;
- }
-
- /* Get address and value registers */
- if (bundle & TILEPRO_BUNDLE_Y_ENCODING_MASK) {
- addr_reg = get_SrcA_Y2(bundle);
- val_reg = get_SrcBDest_Y2(bundle);
- } else if (mem_op == MEMOP_LOAD || mem_op == MEMOP_LOAD_POSTINCR) {
- addr_reg = get_SrcA_X1(bundle);
- val_reg = get_Dest_X1(bundle);
- } else {
- addr_reg = get_SrcA_X1(bundle);
- val_reg = get_SrcB_X1(bundle);
- }
-
- /*
- * If registers are not GPRs, don't try to handle it.
- *
- * FIXME: we could handle non-GPR loads by getting the real value
- * from memory, writing it to the single step buffer, using a
- * temp_reg to hold a pointer to that memory, then executing that
- * instruction and resetting temp_reg. For non-GPR stores, it's a
- * little trickier; we could use the single step buffer for that
- * too, but we'd have to add some more state bits so that we could
- * call back in here to copy that value to the real target. For
- * now, we just handle the simple case.
- */
- if ((val_reg >= PTREGS_NR_GPRS &&
- (val_reg != TREG_ZERO ||
- mem_op == MEMOP_LOAD ||
- mem_op == MEMOP_LOAD_POSTINCR)) ||
- addr_reg >= PTREGS_NR_GPRS)
- return bundle;
-
- /* If it's aligned, don't handle it specially */
- addr = (void __user *)regs->regs[addr_reg];
- if (((unsigned long)addr % size) == 0)
- return bundle;
-
- /*
- * Return SIGBUS with the unaligned address, if requested.
- * Note that we return SIGBUS even for completely invalid addresses
- * as long as they are in fact unaligned; this matches what the
- * tilepro hardware would be doing, if it could provide us with the
- * actual bad address in an SPR, which it doesn't.
- */
- if (align_ctl == 0) {
- siginfo_t info;
-
- clear_siginfo(&info);
- info.si_signo = SIGBUS;
- info.si_code = BUS_ADRALN;
- info.si_addr = addr;
-
- trace_unhandled_signal("unaligned trap", regs,
- (unsigned long)addr, SIGBUS);
- force_sig_info(info.si_signo, &info, current);
- return (tilepro_bundle_bits) 0;
- }
-
- /* Handle unaligned load/store */
- if (mem_op == MEMOP_LOAD || mem_op == MEMOP_LOAD_POSTINCR) {
- unsigned short val_16;
- switch (size) {
- case 2:
- err = copy_from_user(&val_16, addr, sizeof(val_16));
- val = sign_ext ? ((short)val_16) : val_16;
- break;
- case 4:
- err = copy_from_user(&val, addr, sizeof(val));
- break;
- default:
- BUG();
- }
- if (err == 0) {
- state->update_reg = val_reg;
- state->update_value = val;
- state->update = 1;
- }
- } else {
- unsigned short val_16;
- val = (val_reg == TREG_ZERO) ? 0 : regs->regs[val_reg];
- switch (size) {
- case 2:
- val_16 = val;
- err = copy_to_user(addr, &val_16, sizeof(val_16));
- break;
- case 4:
- err = copy_to_user(addr, &val, sizeof(val));
- break;
- default:
- BUG();
- }
- }
-
- if (err) {
- siginfo_t info;
-
- clear_siginfo(&info);
- info.si_signo = SIGBUS;
- info.si_code = BUS_ADRALN;
- info.si_addr = addr;
-
- trace_unhandled_signal("bad address for unaligned fixup", regs,
- (unsigned long)addr, SIGBUS);
- force_sig_info(info.si_signo, &info, current);
- return (tilepro_bundle_bits) 0;
- }
-
- if (unaligned_printk || unaligned_fixup_count == 0) {
- pr_info("Process %d/%s: PC %#lx: Fixup of unaligned %s at %#lx\n",
- current->pid, current->comm, regs->pc,
- mem_op == MEMOP_LOAD || mem_op == MEMOP_LOAD_POSTINCR ?
- "load" : "store",
- (unsigned long)addr);
- if (!unaligned_printk) {
-#define P pr_info
-P("\n");
-P("Unaligned fixups in the kernel will slow your application considerably.\n");
-P("To find them, write a \"1\" to /proc/sys/tile/unaligned_fixup/printk,\n");
-P("which requests the kernel show all unaligned fixups, or write a \"0\"\n");
-P("to /proc/sys/tile/unaligned_fixup/enabled, in which case each unaligned\n");
-P("access will become a SIGBUS you can debug. No further warnings will be\n");
-P("shown so as to avoid additional slowdown, but you can track the number\n");
-P("of fixups performed via /proc/sys/tile/unaligned_fixup/count.\n");
-P("Use the tile-addr2line command (see \"info addr2line\") to decode PCs.\n");
-P("\n");
-#undef P
- }
- }
- ++unaligned_fixup_count;
-
- if (bundle & TILEPRO_BUNDLE_Y_ENCODING_MASK) {
- /* Convert the Y2 instruction to a prefetch. */
- bundle &= ~(create_SrcBDest_Y2(-1) |
- create_Opcode_Y2(-1));
- bundle |= (create_SrcBDest_Y2(TREG_ZERO) |
- create_Opcode_Y2(LW_OPCODE_Y2));
- /* Replace the load postincr with an addi */
- } else if (mem_op == MEMOP_LOAD_POSTINCR) {
- bundle = addi_X1(bundle, addr_reg, addr_reg,
- get_Imm8_X1(bundle));
- /* Replace the store postincr with an addi */
- } else if (mem_op == MEMOP_STORE_POSTINCR) {
- bundle = addi_X1(bundle, addr_reg, addr_reg,
- get_Dest_Imm8_X1(bundle));
- } else {
- /* Convert the X1 instruction to a nop. */
- bundle &= ~(create_Opcode_X1(-1) |
- create_UnShOpcodeExtension_X1(-1) |
- create_UnOpcodeExtension_X1(-1));
- bundle |= (create_Opcode_X1(SHUN_0_OPCODE_X1) |
- create_UnShOpcodeExtension_X1(
- UN_0_SHUN_0_OPCODE_X1) |
- create_UnOpcodeExtension_X1(
- NOP_UN_0_SHUN_0_OPCODE_X1));
- }
-
- return bundle;
-}
-
-/*
- * Called after execve() has started the new image. This allows us
- * to reset the info state. Note that the the mmap'ed memory, if there
- * was any, has already been unmapped by the exec.
- */
-void single_step_execve(void)
-{
- struct thread_info *ti = current_thread_info();
- kfree(ti->step_state);
- ti->step_state = NULL;
-}
-
-/*
- * single_step_once() - entry point when single stepping has been triggered.
- * @regs: The machine register state
- *
- * When we arrive at this routine via a trampoline, the single step
- * engine copies the executing bundle to the single step buffer.
- * If the instruction is a condition branch, then the target is
- * reset to one past the next instruction. If the instruction
- * sets the lr, then that is noted. If the instruction is a jump
- * or call, then the new target pc is preserved and the current
- * bundle instruction set to null.
- *
- * The necessary post-single-step rewriting information is stored in
- * single_step_state-> We use data segment values because the
- * stack will be rewound when we run the rewritten single-stepped
- * instruction.
- */
-void single_step_once(struct pt_regs *regs)
-{
- extern tilepro_bundle_bits __single_step_ill_insn;
- extern tilepro_bundle_bits __single_step_j_insn;
- extern tilepro_bundle_bits __single_step_addli_insn;
- extern tilepro_bundle_bits __single_step_auli_insn;
- struct thread_info *info = (void *)current_thread_info();
- struct single_step_state *state = info->step_state;
- int is_single_step = test_ti_thread_flag(info, TIF_SINGLESTEP);
- tilepro_bundle_bits __user *buffer, *pc;
- tilepro_bundle_bits bundle;
- int temp_reg;
- int target_reg = TREG_LR;
- int err;
- enum mem_op mem_op = MEMOP_NONE;
- int size = 0, sign_ext = 0; /* happy compiler */
- int align_ctl;
-
- align_ctl = unaligned_fixup;
- switch (task_thread_info(current)->align_ctl) {
- case PR_UNALIGN_NOPRINT:
- align_ctl = 1;
- break;
- case PR_UNALIGN_SIGBUS:
- align_ctl = 0;
- break;
- }
-
- asm(
-" .pushsection .rodata.single_step\n"
-" .align 8\n"
-" .globl __single_step_ill_insn\n"
-"__single_step_ill_insn:\n"
-" ill\n"
-" .globl __single_step_addli_insn\n"
-"__single_step_addli_insn:\n"
-" { nop; addli r0, zero, 0 }\n"
-" .globl __single_step_auli_insn\n"
-"__single_step_auli_insn:\n"
-" { nop; auli r0, r0, 0 }\n"
-" .globl __single_step_j_insn\n"
-"__single_step_j_insn:\n"
-" j .\n"
-" .popsection\n"
- );
-
- /*
- * Enable interrupts here to allow touching userspace and the like.
- * The callers expect this: do_trap() already has interrupts
- * enabled, and do_work_pending() handles functions that enable
- * interrupts internally.
- */
- local_irq_enable();
-
- if (state == NULL) {
- /* allocate a page of writable, executable memory */
- state = kmalloc(sizeof(struct single_step_state), GFP_KERNEL);
- if (state == NULL) {
- pr_err("Out of kernel memory trying to single-step\n");
- return;
- }
-
- /* allocate a cache line of writable, executable memory */
- buffer = (void __user *) vm_mmap(NULL, 0, 64,
- PROT_EXEC | PROT_READ | PROT_WRITE,
- MAP_PRIVATE | MAP_ANONYMOUS,
- 0);
-
- if (IS_ERR((void __force *)buffer)) {
- kfree(state);
- pr_err("Out of kernel pages trying to single-step\n");
- return;
- }
-
- state->buffer = buffer;
- state->is_enabled = 0;
-
- info->step_state = state;
-
- /* Validate our stored instruction patterns */
- BUG_ON(get_Opcode_X1(__single_step_addli_insn) !=
- ADDLI_OPCODE_X1);
- BUG_ON(get_Opcode_X1(__single_step_auli_insn) !=
- AULI_OPCODE_X1);
- BUG_ON(get_SrcA_X1(__single_step_addli_insn) != TREG_ZERO);
- BUG_ON(get_Dest_X1(__single_step_addli_insn) != 0);
- BUG_ON(get_JOffLong_X1(__single_step_j_insn) != 0);
- }
-
- /*
- * If we are returning from a syscall, we still haven't hit the
- * "ill" for the swint1 instruction. So back the PC up to be
- * pointing at the swint1, but we'll actually return directly
- * back to the "ill" so we come back in via SIGILL as if we
- * had "executed" the swint1 without ever being in kernel space.
- */
- if (regs->faultnum == INT_SWINT_1)
- regs->pc -= 8;
-
- pc = (tilepro_bundle_bits __user *)(regs->pc);
- if (get_user(bundle, pc) != 0) {
- pr_err("Couldn't read instruction at %p trying to step\n", pc);
- return;
- }
-
- /* We'll follow the instruction with 2 ill op bundles */
- state->orig_pc = (unsigned long)pc;
- state->next_pc = (unsigned long)(pc + 1);
- state->branch_next_pc = 0;
- state->update = 0;
-
- if (!(bundle & TILEPRO_BUNDLE_Y_ENCODING_MASK)) {
- /* two wide, check for control flow */
- int opcode = get_Opcode_X1(bundle);
-
- switch (opcode) {
- /* branches */
- case BRANCH_OPCODE_X1:
- {
- s32 offset = signExtend17(get_BrOff_X1(bundle));
-
- /*
- * For branches, we use a rewriting trick to let the
- * hardware evaluate whether the branch is taken or
- * untaken. We record the target offset and then
- * rewrite the branch instruction to target 1 insn
- * ahead if the branch is taken. We then follow the
- * rewritten branch with two bundles, each containing
- * an "ill" instruction. The supervisor examines the
- * pc after the single step code is executed, and if
- * the pc is the first ill instruction, then the
- * branch (if any) was not taken. If the pc is the
- * second ill instruction, then the branch was
- * taken. The new pc is computed for these cases, and
- * inserted into the registers for the thread. If
- * the pc is the start of the single step code, then
- * an exception or interrupt was taken before the
- * code started processing, and the same "original"
- * pc is restored. This change, different from the
- * original implementation, has the advantage of
- * executing a single user instruction.
- */
- state->branch_next_pc = (unsigned long)(pc + offset);
-
- /* rewrite branch offset to go forward one bundle */
- bundle = set_BrOff_X1(bundle, 2);
- }
- break;
-
- /* jumps */
- case JALB_OPCODE_X1:
- case JALF_OPCODE_X1:
- state->update = 1;
- state->next_pc =
- (unsigned long) (pc + get_JOffLong_X1(bundle));
- break;
-
- case JB_OPCODE_X1:
- case JF_OPCODE_X1:
- state->next_pc =
- (unsigned long) (pc + get_JOffLong_X1(bundle));
- bundle = nop_X1(bundle);
- break;
-
- case SPECIAL_0_OPCODE_X1:
- switch (get_RRROpcodeExtension_X1(bundle)) {
- /* jump-register */
- case JALRP_SPECIAL_0_OPCODE_X1:
- case JALR_SPECIAL_0_OPCODE_X1:
- state->update = 1;
- state->next_pc =
- regs->regs[get_SrcA_X1(bundle)];
- break;
-
- case JRP_SPECIAL_0_OPCODE_X1:
- case JR_SPECIAL_0_OPCODE_X1:
- state->next_pc =
- regs->regs[get_SrcA_X1(bundle)];
- bundle = nop_X1(bundle);
- break;
-
- case LNK_SPECIAL_0_OPCODE_X1:
- state->update = 1;
- target_reg = get_Dest_X1(bundle);
- break;
-
- /* stores */
- case SH_SPECIAL_0_OPCODE_X1:
- mem_op = MEMOP_STORE;
- size = 2;
- break;
-
- case SW_SPECIAL_0_OPCODE_X1:
- mem_op = MEMOP_STORE;
- size = 4;
- break;
- }
- break;
-
- /* loads and iret */
- case SHUN_0_OPCODE_X1:
- if (get_UnShOpcodeExtension_X1(bundle) ==
- UN_0_SHUN_0_OPCODE_X1) {
- switch (get_UnOpcodeExtension_X1(bundle)) {
- case LH_UN_0_SHUN_0_OPCODE_X1:
- mem_op = MEMOP_LOAD;
- size = 2;
- sign_ext = 1;
- break;
-
- case LH_U_UN_0_SHUN_0_OPCODE_X1:
- mem_op = MEMOP_LOAD;
- size = 2;
- sign_ext = 0;
- break;
-
- case LW_UN_0_SHUN_0_OPCODE_X1:
- mem_op = MEMOP_LOAD;
- size = 4;
- break;
-
- case IRET_UN_0_SHUN_0_OPCODE_X1:
- {
- unsigned long ex0_0 = __insn_mfspr(
- SPR_EX_CONTEXT_0_0);
- unsigned long ex0_1 = __insn_mfspr(
- SPR_EX_CONTEXT_0_1);
- /*
- * Special-case it if we're iret'ing
- * to PL0 again. Otherwise just let
- * it run and it will generate SIGILL.
- */
- if (EX1_PL(ex0_1) == USER_PL) {
- state->next_pc = ex0_0;
- regs->ex1 = ex0_1;
- bundle = nop_X1(bundle);
- }
- }
- }
- }
- break;
-
- /* postincrement operations */
- case IMM_0_OPCODE_X1:
- switch (get_ImmOpcodeExtension_X1(bundle)) {
- case LWADD_IMM_0_OPCODE_X1:
- mem_op = MEMOP_LOAD_POSTINCR;
- size = 4;
- break;
-
- case LHADD_IMM_0_OPCODE_X1:
- mem_op = MEMOP_LOAD_POSTINCR;
- size = 2;
- sign_ext = 1;
- break;
-
- case LHADD_U_IMM_0_OPCODE_X1:
- mem_op = MEMOP_LOAD_POSTINCR;
- size = 2;
- sign_ext = 0;
- break;
-
- case SWADD_IMM_0_OPCODE_X1:
- mem_op = MEMOP_STORE_POSTINCR;
- size = 4;
- break;
-
- case SHADD_IMM_0_OPCODE_X1:
- mem_op = MEMOP_STORE_POSTINCR;
- size = 2;
- break;
-
- default:
- break;
- }
- break;
- }
-
- if (state->update) {
- /*
- * Get an available register. We start with a
- * bitmask with 1's for available registers.
- * We truncate to the low 32 registers since
- * we are guaranteed to have set bits in the
- * low 32 bits, then use ctz to pick the first.
- */
- u32 mask = (u32) ~((1ULL << get_Dest_X0(bundle)) |
- (1ULL << get_SrcA_X0(bundle)) |
- (1ULL << get_SrcB_X0(bundle)) |
- (1ULL << target_reg));
- temp_reg = __builtin_ctz(mask);
- state->update_reg = temp_reg;
- state->update_value = regs->regs[temp_reg];
- regs->regs[temp_reg] = (unsigned long) (pc+1);
- regs->flags |= PT_FLAGS_RESTORE_REGS;
- bundle = move_X1(bundle, target_reg, temp_reg);
- }
- } else {
- int opcode = get_Opcode_Y2(bundle);
-
- switch (opcode) {
- /* loads */
- case LH_OPCODE_Y2:
- mem_op = MEMOP_LOAD;
- size = 2;
- sign_ext = 1;
- break;
-
- case LH_U_OPCODE_Y2:
- mem_op = MEMOP_LOAD;
- size = 2;
- sign_ext = 0;
- break;
-
- case LW_OPCODE_Y2:
- mem_op = MEMOP_LOAD;
- size = 4;
- break;
-
- /* stores */
- case SH_OPCODE_Y2:
- mem_op = MEMOP_STORE;
- size = 2;
- break;
-
- case SW_OPCODE_Y2:
- mem_op = MEMOP_STORE;
- size = 4;
- break;
- }
- }
-
- /*
- * Check if we need to rewrite an unaligned load/store.
- * Returning zero is a special value meaning we generated a signal.
- */
- if (mem_op != MEMOP_NONE && align_ctl >= 0) {
- bundle = rewrite_load_store_unaligned(state, bundle, regs,
- mem_op, size, sign_ext);
- if (bundle == 0)
- return;
- }
-
- /* write the bundle to our execution area */
- buffer = state->buffer;
- err = __put_user(bundle, buffer++);
-
- /*
- * If we're really single-stepping, we take an INT_ILL after.
- * If we're just handling an unaligned access, we can just
- * jump directly back to where we were in user code.
- */
- if (is_single_step) {
- err |= __put_user(__single_step_ill_insn, buffer++);
- err |= __put_user(__single_step_ill_insn, buffer++);
- } else {
- long delta;
-
- if (state->update) {
- /* We have some state to update; do it inline */
- int ha16;
- bundle = __single_step_addli_insn;
- bundle |= create_Dest_X1(state->update_reg);
- bundle |= create_Imm16_X1(state->update_value);
- err |= __put_user(bundle, buffer++);
- bundle = __single_step_auli_insn;
- bundle |= create_Dest_X1(state->update_reg);
- bundle |= create_SrcA_X1(state->update_reg);
- ha16 = (state->update_value + 0x8000) >> 16;
- bundle |= create_Imm16_X1(ha16);
- err |= __put_user(bundle, buffer++);
- state->update = 0;
- }
-
- /* End with a jump back to the next instruction */
- delta = ((regs->pc + TILEPRO_BUNDLE_SIZE_IN_BYTES) -
- (unsigned long)buffer) >>
- TILEPRO_LOG2_BUNDLE_ALIGNMENT_IN_BYTES;
- bundle = __single_step_j_insn;
- bundle |= create_JOffLong_X1(delta);
- err |= __put_user(bundle, buffer++);
- }
-
- if (err) {
- pr_err("Fault when writing to single-step buffer\n");
- return;
- }
-
- /*
- * Flush the buffer.
- * We do a local flush only, since this is a thread-specific buffer.
- */
- __flush_icache_range((unsigned long)state->buffer,
- (unsigned long)buffer);
-
- /* Indicate enabled */
- state->is_enabled = is_single_step;
- regs->pc = (unsigned long)state->buffer;
-
- /* Fault immediately if we are coming back from a syscall. */
- if (regs->faultnum == INT_SWINT_1)
- regs->pc += 8;
-}
-
-#else
-
-static DEFINE_PER_CPU(unsigned long, ss_saved_pc);
-
-
-/*
- * Called directly on the occasion of an interrupt.
- *
- * If the process doesn't have single step set, then we use this as an
- * opportunity to turn single step off.
- *
- * It has been mentioned that we could conditionally turn off single stepping
- * on each entry into the kernel and rely on single_step_once to turn it
- * on for the processes that matter (as we already do), but this
- * implementation is somewhat more efficient in that we muck with registers
- * once on a bum interrupt rather than on every entry into the kernel.
- *
- * If SINGLE_STEP_CONTROL_K has CANCELED set, then an interrupt occurred,
- * so we have to run through this process again before we can say that an
- * instruction has executed.
- *
- * swint will set CANCELED, but it's a legitimate instruction. Fortunately
- * it changes the PC. If it hasn't changed, then we know that the interrupt
- * wasn't generated by swint and we'll need to run this process again before
- * we can say an instruction has executed.
- *
- * If either CANCELED == 0 or the PC's changed, we send out SIGTRAPs and get
- * on with our lives.
- */
-
-void gx_singlestep_handle(struct pt_regs *regs, int fault_num)
-{
- unsigned long *ss_pc = this_cpu_ptr(&ss_saved_pc);
- struct thread_info *info = (void *)current_thread_info();
- int is_single_step = test_ti_thread_flag(info, TIF_SINGLESTEP);
- unsigned long control = __insn_mfspr(SPR_SINGLE_STEP_CONTROL_K);
-
- if (is_single_step == 0) {
- __insn_mtspr(SPR_SINGLE_STEP_EN_K_K, 0);
-
- } else if ((*ss_pc != regs->pc) ||
- (!(control & SPR_SINGLE_STEP_CONTROL_1__CANCELED_MASK))) {
-
- control |= SPR_SINGLE_STEP_CONTROL_1__CANCELED_MASK;
- control |= SPR_SINGLE_STEP_CONTROL_1__INHIBIT_MASK;
- __insn_mtspr(SPR_SINGLE_STEP_CONTROL_K, control);
- send_sigtrap(current, regs);
- }
-}
-
-
-/*
- * Called from need_singlestep. Set up the control registers and the enable
- * register, then return back.
- */
-
-void single_step_once(struct pt_regs *regs)
-{
- unsigned long *ss_pc = this_cpu_ptr(&ss_saved_pc);
- unsigned long control = __insn_mfspr(SPR_SINGLE_STEP_CONTROL_K);
-
- *ss_pc = regs->pc;
- control |= SPR_SINGLE_STEP_CONTROL_1__CANCELED_MASK;
- control |= SPR_SINGLE_STEP_CONTROL_1__INHIBIT_MASK;
- __insn_mtspr(SPR_SINGLE_STEP_CONTROL_K, control);
- __insn_mtspr(SPR_SINGLE_STEP_EN_K_K, 1 << USER_PL);
-}
-
-void single_step_execve(void)
-{
- /* Nothing */
-}
-
-#endif /* !__tilegx__ */
diff --git a/arch/tile/kernel/smp.c b/arch/tile/kernel/smp.c
deleted file mode 100644
index 94a62e1197ce..000000000000
--- a/arch/tile/kernel/smp.c
+++ /dev/null
@@ -1,287 +0,0 @@
-/*
- * Copyright 2010 Tilera Corporation. All Rights Reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation, version 2.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
- * NON INFRINGEMENT. See the GNU General Public License for
- * more details.
- *
- * TILE SMP support routines.
- */
-
-#include <linux/smp.h>
-#include <linux/interrupt.h>
-#include <linux/io.h>
-#include <linux/irq.h>
-#include <linux/irq_work.h>
-#include <linux/module.h>
-#include <asm/cacheflush.h>
-#include <asm/homecache.h>
-
-/*
- * We write to width and height with a single store in head_NN.S,
- * so make the variable aligned to "long".
- */
-HV_Topology smp_topology __ro_after_init __aligned(sizeof(long));
-EXPORT_SYMBOL(smp_topology);
-
-#if CHIP_HAS_IPI()
-static unsigned long __iomem *ipi_mappings[NR_CPUS];
-#endif
-
-/* Does messaging work correctly to the local cpu? */
-bool self_interrupt_ok;
-
-/*
- * Top-level send_IPI*() functions to send messages to other cpus.
- */
-
-/* Set by smp_send_stop() to avoid recursive panics. */
-static int stopping_cpus;
-
-static void __send_IPI_many(HV_Recipient *recip, int nrecip, int tag)
-{
- int sent = 0;
- while (sent < nrecip) {
- int rc = hv_send_message(recip, nrecip,
- (HV_VirtAddr)&tag, sizeof(tag));
- if (rc < 0) {
- if (!stopping_cpus) /* avoid recursive panic */
- panic("hv_send_message returned %d", rc);
- break;
- }
- WARN_ONCE(rc == 0, "hv_send_message() returned zero\n");
- sent += rc;
- }
-}
-
-void send_IPI_single(int cpu, int tag)
-{
- HV_Recipient recip = {
- .y = cpu / smp_width,
- .x = cpu % smp_width,
- .state = HV_TO_BE_SENT
- };
- __send_IPI_many(&recip, 1, tag);
-}
-
-void send_IPI_many(const struct cpumask *mask, int tag)
-{
- HV_Recipient recip[NR_CPUS];
- int cpu;
- int nrecip = 0;
- int my_cpu = smp_processor_id();
- for_each_cpu(cpu, mask) {
- HV_Recipient *r;
- BUG_ON(cpu == my_cpu);
- r = &recip[nrecip++];
- r->y = cpu / smp_width;
- r->x = cpu % smp_width;
- r->state = HV_TO_BE_SENT;
- }
- __send_IPI_many(recip, nrecip, tag);
-}
-
-void send_IPI_allbutself(int tag)
-{
- struct cpumask mask;
- cpumask_copy(&mask, cpu_online_mask);
- cpumask_clear_cpu(smp_processor_id(), &mask);
- send_IPI_many(&mask, tag);
-}
-
-/*
- * Functions related to starting/stopping cpus.
- */
-
-/* Handler to start the current cpu. */
-static void smp_start_cpu_interrupt(void)
-{
- get_irq_regs()->pc = start_cpu_function_addr;
-}
-
-/* Handler to stop the current cpu. */
-static void smp_stop_cpu_interrupt(void)
-{
- arch_local_irq_disable_all();
- set_cpu_online(smp_processor_id(), 0);
- for (;;)
- asm("nap; nop");
-}
-
-/* This function calls the 'stop' function on all other CPUs in the system. */
-void smp_send_stop(void)
-{
- stopping_cpus = 1;
- send_IPI_allbutself(MSG_TAG_STOP_CPU);
-}
-
-/* On panic, just wait; we may get an smp_send_stop() later on. */
-void panic_smp_self_stop(void)
-{
- while (1)
- asm("nap; nop");
-}
-
-/*
- * Dispatch code called from hv_message_intr() for HV_MSG_TILE hv messages.
- */
-void evaluate_message(int tag)
-{
- switch (tag) {
- case MSG_TAG_START_CPU: /* Start up a cpu */
- smp_start_cpu_interrupt();
- break;
-
- case MSG_TAG_STOP_CPU: /* Sent to shut down slave CPU's */
- smp_stop_cpu_interrupt();
- break;
-
- case MSG_TAG_CALL_FUNCTION_MANY: /* Call function on cpumask */
- generic_smp_call_function_interrupt();
- break;
-
- case MSG_TAG_CALL_FUNCTION_SINGLE: /* Call function on one other CPU */
- generic_smp_call_function_single_interrupt();
- break;
-
- case MSG_TAG_IRQ_WORK: /* Invoke IRQ work */
- irq_work_run();
- break;
-
- default:
- panic("Unknown IPI message tag %d", tag);
- break;
- }
-}
-
-
-/*
- * flush_icache_range() code uses smp_call_function().
- */
-
-struct ipi_flush {
- unsigned long start;
- unsigned long end;
-};
-
-static void ipi_flush_icache_range(void *info)
-{
- struct ipi_flush *flush = (struct ipi_flush *) info;
- __flush_icache_range(flush->start, flush->end);
-}
-
-void flush_icache_range(unsigned long start, unsigned long end)
-{
- struct ipi_flush flush = { start, end };
-
- /* If invoked with irqs disabled, we can not issue IPIs. */
- if (irqs_disabled())
- flush_remote(0, HV_FLUSH_EVICT_L1I, NULL, 0, 0, 0,
- NULL, NULL, 0);
- else {
- preempt_disable();
- on_each_cpu(ipi_flush_icache_range, &flush, 1);
- preempt_enable();
- }
-}
-EXPORT_SYMBOL(flush_icache_range);
-
-
-#ifdef CONFIG_IRQ_WORK
-void arch_irq_work_raise(void)
-{
- if (arch_irq_work_has_interrupt())
- send_IPI_single(smp_processor_id(), MSG_TAG_IRQ_WORK);
-}
-#endif
-
-
-/* Called when smp_send_reschedule() triggers IRQ_RESCHEDULE. */
-static irqreturn_t handle_reschedule_ipi(int irq, void *token)
-{
- __this_cpu_inc(irq_stat.irq_resched_count);
- scheduler_ipi();
-
- return IRQ_HANDLED;
-}
-
-static struct irqaction resched_action = {
- .handler = handle_reschedule_ipi,
- .name = "resched",
- .dev_id = handle_reschedule_ipi /* unique token */,
-};
-
-void __init ipi_init(void)
-{
- int cpu = smp_processor_id();
- HV_Recipient recip = { .y = cpu_y(cpu), .x = cpu_x(cpu),
- .state = HV_TO_BE_SENT };
- int tag = MSG_TAG_CALL_FUNCTION_SINGLE;
-
- /*
- * Test if we can message ourselves for arch_irq_work_raise.
- * This functionality is only available in the Tilera hypervisor
- * in versions 4.3.4 and following.
- */
- if (hv_send_message(&recip, 1, (HV_VirtAddr)&tag, sizeof(tag)) == 1)
- self_interrupt_ok = true;
- else
- pr_warn("Older hypervisor: disabling fast irq_work_raise\n");
-
-#if CHIP_HAS_IPI()
- /* Map IPI trigger MMIO addresses. */
- for_each_possible_cpu(cpu) {
- HV_Coord tile;
- HV_PTE pte;
- unsigned long offset;
-
- tile.x = cpu_x(cpu);
- tile.y = cpu_y(cpu);
- if (hv_get_ipi_pte(tile, KERNEL_PL, &pte) != 0)
- panic("Failed to initialize IPI for cpu %d\n", cpu);
-
- offset = PFN_PHYS(pte_pfn(pte));
- ipi_mappings[cpu] = ioremap_prot(offset, PAGE_SIZE, pte);
- }
-#endif
-
- /* Bind handle_reschedule_ipi() to IRQ_RESCHEDULE. */
- tile_irq_activate(IRQ_RESCHEDULE, TILE_IRQ_PERCPU);
- BUG_ON(setup_irq(IRQ_RESCHEDULE, &resched_action));
-}
-
-#if CHIP_HAS_IPI()
-
-void smp_send_reschedule(int cpu)
-{
- WARN_ON(cpu_is_offline(cpu));
-
- /*
- * We just want to do an MMIO store. The traditional writeq()
- * functions aren't really correct here, since they're always
- * directed at the PCI shim. For now, just do a raw store,
- * casting away the __iomem attribute.
- */
- ((unsigned long __force *)ipi_mappings[cpu])[IRQ_RESCHEDULE] = 0;
-}
-
-#else
-
-void smp_send_reschedule(int cpu)
-{
- HV_Coord coord;
-
- WARN_ON(cpu_is_offline(cpu));
-
- coord.y = cpu_y(cpu);
- coord.x = cpu_x(cpu);
- hv_trigger_ipi(coord, IRQ_RESCHEDULE);
-}
-
-#endif /* CHIP_HAS_IPI() */
diff --git a/arch/tile/kernel/smpboot.c b/arch/tile/kernel/smpboot.c
deleted file mode 100644
index 869c22e57561..000000000000
--- a/arch/tile/kernel/smpboot.c
+++ /dev/null
@@ -1,269 +0,0 @@
-/*
- * Copyright 2010 Tilera Corporation. All Rights Reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation, version 2.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
- * NON INFRINGEMENT. See the GNU General Public License for
- * more details.
- */
-
-#include <linux/module.h>
-#include <linux/init.h>
-#include <linux/kernel.h>
-#include <linux/mm.h>
-#include <linux/sched/mm.h>
-#include <linux/sched/task.h>
-#include <linux/kernel_stat.h>
-#include <linux/bootmem.h>
-#include <linux/notifier.h>
-#include <linux/cpu.h>
-#include <linux/percpu.h>
-#include <linux/delay.h>
-#include <linux/err.h>
-#include <linux/irq.h>
-#include <asm/mmu_context.h>
-#include <asm/tlbflush.h>
-#include <asm/sections.h>
-
-/* State of each CPU. */
-static DEFINE_PER_CPU(int, cpu_state) = { 0 };
-
-/* The messaging code jumps to this pointer during boot-up */
-unsigned long start_cpu_function_addr;
-
-/* Called very early during startup to mark boot cpu as online */
-void __init smp_prepare_boot_cpu(void)
-{
- int cpu = smp_processor_id();
- set_cpu_online(cpu, 1);
- set_cpu_present(cpu, 1);
- __this_cpu_write(cpu_state, CPU_ONLINE);
-
- init_messaging();
-}
-
-static void start_secondary(void);
-
-/*
- * Called at the top of init() to launch all the other CPUs.
- * They run free to complete their initialization and then wait
- * until they get an IPI from the boot cpu to come online.
- */
-void __init smp_prepare_cpus(unsigned int max_cpus)
-{
- long rc;
- int cpu, cpu_count;
- int boot_cpu = smp_processor_id();
-
- current_thread_info()->cpu = boot_cpu;
-
- /*
- * Pin this task to the boot CPU while we bring up the others,
- * just to make sure we don't uselessly migrate as they come up.
- */
- rc = sched_setaffinity(current->pid, cpumask_of(boot_cpu));
- if (rc != 0)
- pr_err("Couldn't set init affinity to boot cpu (%ld)\n", rc);
-
- /* Print information about disabled and dataplane cpus. */
- print_disabled_cpus();
-
- /*
- * Tell the messaging subsystem how to respond to the
- * startup message. We use a level of indirection to avoid
- * confusing the linker with the fact that the messaging
- * subsystem is calling __init code.
- */
- start_cpu_function_addr = (unsigned long) &online_secondary;
-
- /* Set up thread context for all new processors. */
- cpu_count = 1;
- for (cpu = 0; cpu < NR_CPUS; ++cpu) {
- struct task_struct *idle;
-
- if (cpu == boot_cpu)
- continue;
-
- if (!cpu_possible(cpu)) {
- /*
- * Make this processor do nothing on boot.
- * Note that we don't give the boot_pc function
- * a stack, so it has to be assembly code.
- */
- per_cpu(boot_sp, cpu) = 0;
- per_cpu(boot_pc, cpu) = (unsigned long) smp_nap;
- continue;
- }
-
- /* Create a new idle thread to run start_secondary() */
- idle = fork_idle(cpu);
- if (IS_ERR(idle))
- panic("failed fork for CPU %d", cpu);
- idle->thread.pc = (unsigned long) start_secondary;
-
- /* Make this thread the boot thread for this processor */
- per_cpu(boot_sp, cpu) = task_ksp0(idle);
- per_cpu(boot_pc, cpu) = idle->thread.pc;
-
- ++cpu_count;
- }
- BUG_ON(cpu_count > (max_cpus ? max_cpus : 1));
-
- /* Fire up the other tiles, if any */
- init_cpu_present(cpu_possible_mask);
- if (cpumask_weight(cpu_present_mask) > 1) {
- mb(); /* make sure all data is visible to new processors */
- hv_start_all_tiles();
- }
-}
-
-static __initdata struct cpumask init_affinity;
-
-static __init int reset_init_affinity(void)
-{
- long rc = sched_setaffinity(current->pid, &init_affinity);
- if (rc != 0)
- pr_warn("couldn't reset init affinity (%ld)\n", rc);
- return 0;
-}
-late_initcall(reset_init_affinity);
-
-static struct cpumask cpu_started;
-
-/*
- * Activate a secondary processor. Very minimal; don't add anything
- * to this path without knowing what you're doing, since SMP booting
- * is pretty fragile.
- */
-static void start_secondary(void)
-{
- int cpuid;
-
- preempt_disable();
-
- cpuid = smp_processor_id();
-
- /* Set our thread pointer appropriately. */
- set_my_cpu_offset(__per_cpu_offset[cpuid]);
-
- /*
- * In large machines even this will slow us down, since we
- * will be contending for for the printk spinlock.
- */
- /* printk(KERN_DEBUG "Initializing CPU#%d\n", cpuid); */
-
- /* Initialize the current asid for our first page table. */
- __this_cpu_write(current_asid, min_asid);
-
- /* Set up this thread as another owner of the init_mm */
- mmgrab(&init_mm);
- current->active_mm = &init_mm;
- if (current->mm)
- BUG();
- enter_lazy_tlb(&init_mm, current);
-
- /* Allow hypervisor messages to be received */
- init_messaging();
- local_irq_enable();
-
- /* Indicate that we're ready to come up. */
- /* Must not do this before we're ready to receive messages */
- if (cpumask_test_and_set_cpu(cpuid, &cpu_started)) {
- pr_warn("CPU#%d already started!\n", cpuid);
- for (;;)
- local_irq_enable();
- }
-
- smp_nap();
-}
-
-/*
- * Bring a secondary processor online.
- */
-void online_secondary(void)
-{
- /*
- * low-memory mappings have been cleared, flush them from
- * the local TLBs too.
- */
- local_flush_tlb();
-
- BUG_ON(in_interrupt());
-
- /* This must be done before setting cpu_online_mask */
- wmb();
-
- notify_cpu_starting(smp_processor_id());
-
- set_cpu_online(smp_processor_id(), 1);
- __this_cpu_write(cpu_state, CPU_ONLINE);
-
- /* Set up tile-specific state for this cpu. */
- setup_cpu(0);
-
- /* Set up tile-timer clock-event device on this cpu */
- setup_tile_timer();
-
- cpu_startup_entry(CPUHP_AP_ONLINE_IDLE);
-}
-
-int __cpu_up(unsigned int cpu, struct task_struct *tidle)
-{
- /* Wait 5s total for all CPUs for them to come online */
- static int timeout;
- for (; !cpumask_test_cpu(cpu, &cpu_started); timeout++) {
- if (timeout >= 50000) {
- pr_info("skipping unresponsive cpu%d\n", cpu);
- local_irq_enable();
- return -EIO;
- }
- udelay(100);
- }
-
- local_irq_enable();
- per_cpu(cpu_state, cpu) = CPU_UP_PREPARE;
-
- /* Unleash the CPU! */
- send_IPI_single(cpu, MSG_TAG_START_CPU);
- while (!cpumask_test_cpu(cpu, cpu_online_mask))
- cpu_relax();
- return 0;
-}
-
-static void panic_start_cpu(void)
-{
- panic("Received a MSG_START_CPU IPI after boot finished.");
-}
-
-void __init smp_cpus_done(unsigned int max_cpus)
-{
- int cpu, next, rc;
-
- /* Reset the response to a (now illegal) MSG_START_CPU IPI. */
- start_cpu_function_addr = (unsigned long) &panic_start_cpu;
-
- cpumask_copy(&init_affinity, cpu_online_mask);
-
- /*
- * Pin ourselves to a single cpu in the initial affinity set
- * so that kernel mappings for the rootfs are not in the dataplane,
- * if set, and to avoid unnecessary migrating during bringup.
- * Use the last cpu just in case the whole chip has been
- * isolated from the scheduler, to keep init away from likely
- * more useful user code. This also ensures that work scheduled
- * via schedule_delayed_work() in the init routines will land
- * on this cpu.
- */
- for (cpu = cpumask_first(&init_affinity);
- (next = cpumask_next(cpu, &init_affinity)) < nr_cpu_ids;
- cpu = next)
- ;
- rc = sched_setaffinity(current->pid, cpumask_of(cpu));
- if (rc != 0)
- pr_err("Couldn't set init affinity to cpu %d (%d)\n", cpu, rc);
-}
diff --git a/arch/tile/kernel/stack.c b/arch/tile/kernel/stack.c
deleted file mode 100644
index 94ecbc6676e5..000000000000
--- a/arch/tile/kernel/stack.c
+++ /dev/null
@@ -1,539 +0,0 @@
-/*
- * Copyright 2010 Tilera Corporation. All Rights Reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation, version 2.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
- * NON INFRINGEMENT. See the GNU General Public License for
- * more details.
- */
-
-#include <linux/sched.h>
-#include <linux/sched/debug.h>
-#include <linux/sched/task_stack.h>
-#include <linux/kernel.h>
-#include <linux/kprobes.h>
-#include <linux/module.h>
-#include <linux/pfn.h>
-#include <linux/kallsyms.h>
-#include <linux/stacktrace.h>
-#include <linux/uaccess.h>
-#include <linux/mmzone.h>
-#include <linux/dcache.h>
-#include <linux/fs.h>
-#include <linux/hardirq.h>
-#include <linux/string.h>
-#include <asm/backtrace.h>
-#include <asm/page.h>
-#include <asm/ucontext.h>
-#include <asm/switch_to.h>
-#include <asm/sigframe.h>
-#include <asm/stack.h>
-#include <asm/vdso.h>
-#include <arch/abi.h>
-#include <arch/interrupts.h>
-
-#define KBT_ONGOING 0 /* Backtrace still ongoing */
-#define KBT_DONE 1 /* Backtrace cleanly completed */
-#define KBT_RUNNING 2 /* Can't run backtrace on a running task */
-#define KBT_LOOP 3 /* Backtrace entered a loop */
-
-/* Is address on the specified kernel stack? */
-static int in_kernel_stack(struct KBacktraceIterator *kbt, unsigned long sp)
-{
- ulong kstack_base = (ulong) kbt->task->stack;
- if (kstack_base == 0) /* corrupt task pointer; just follow stack... */
- return sp >= PAGE_OFFSET && sp < (unsigned long)high_memory;
- return sp >= kstack_base && sp < kstack_base + THREAD_SIZE;
-}
-
-/* Callback for backtracer; basically a glorified memcpy */
-static bool read_memory_func(void *result, unsigned long address,
- unsigned int size, void *vkbt)
-{
- int retval;
- struct KBacktraceIterator *kbt = (struct KBacktraceIterator *)vkbt;
-
- if (address == 0)
- return 0;
- if (__kernel_text_address(address)) {
- /* OK to read kernel code. */
- } else if (address >= PAGE_OFFSET) {
- /* We only tolerate kernel-space reads of this task's stack */
- if (!in_kernel_stack(kbt, address))
- return 0;
- } else if (!kbt->is_current) {
- return 0; /* can't read from other user address spaces */
- }
- pagefault_disable();
- retval = __copy_from_user_inatomic(result,
- (void __user __force *)address,
- size);
- pagefault_enable();
- return (retval == 0);
-}
-
-/* Return a pt_regs pointer for a valid fault handler frame */
-static struct pt_regs *valid_fault_handler(struct KBacktraceIterator* kbt)
-{
- char fault[64];
- unsigned long sp = kbt->it.sp;
- struct pt_regs *p;
-
- if (sp % sizeof(long) != 0)
- return NULL;
- if (!in_kernel_stack(kbt, sp))
- return NULL;
- if (!in_kernel_stack(kbt, sp + C_ABI_SAVE_AREA_SIZE + PTREGS_SIZE-1))
- return NULL;
- p = (struct pt_regs *)(sp + C_ABI_SAVE_AREA_SIZE);
- if (kbt->verbose) { /* else we aren't going to use it */
- if (p->faultnum == INT_SWINT_1 ||
- p->faultnum == INT_SWINT_1_SIGRETURN)
- snprintf(fault, sizeof(fault),
- "syscall %ld", p->regs[TREG_SYSCALL_NR]);
- else
- snprintf(fault, sizeof(fault),
- "interrupt %ld", p->faultnum);
- }
- if (EX1_PL(p->ex1) == KERNEL_PL &&
- __kernel_text_address(p->pc) &&
- in_kernel_stack(kbt, p->sp) &&
- p->sp >= sp) {
- if (kbt->verbose)
- pr_err(" <%s while in kernel mode>\n", fault);
- } else if (user_mode(p) &&
- p->sp < PAGE_OFFSET && p->sp != 0) {
- if (kbt->verbose)
- pr_err(" <%s while in user mode>\n", fault);
- } else {
- if (kbt->verbose && (p->pc != 0 || p->sp != 0 || p->ex1 != 0))
- pr_err(" (odd fault: pc %#lx, sp %#lx, ex1 %#lx?)\n",
- p->pc, p->sp, p->ex1);
- return NULL;
- }
- if (kbt->profile && ((1ULL << p->faultnum) & QUEUED_INTERRUPTS) != 0)
- return NULL;
- return p;
-}
-
-/* Is the iterator pointing to a sigreturn trampoline? */
-static int is_sigreturn(struct KBacktraceIterator *kbt)
-{
- return kbt->task->mm &&
- (kbt->it.pc == ((ulong)kbt->task->mm->context.vdso_base +
- (ulong)&__vdso_rt_sigreturn));
-}
-
-/* Return a pt_regs pointer for a valid signal handler frame */
-static struct pt_regs *valid_sigframe(struct KBacktraceIterator* kbt,
- struct rt_sigframe* kframe)
-{
- BacktraceIterator *b = &kbt->it;
-
- if (is_sigreturn(kbt) && b->sp < PAGE_OFFSET &&
- b->sp % sizeof(long) == 0) {
- int retval;
- pagefault_disable();
- retval = __copy_from_user_inatomic(
- kframe, (void __user __force *)b->sp,
- sizeof(*kframe));
- pagefault_enable();
- if (retval != 0 ||
- (unsigned int)(kframe->info.si_signo) >= _NSIG)
- return NULL;
- if (kbt->verbose) {
- pr_err(" <received signal %d>\n",
- kframe->info.si_signo);
- }
- return (struct pt_regs *)&kframe->uc.uc_mcontext;
- }
- return NULL;
-}
-
-static int KBacktraceIterator_restart(struct KBacktraceIterator *kbt)
-{
- struct pt_regs *p;
- struct rt_sigframe kframe;
-
- p = valid_fault_handler(kbt);
- if (p == NULL)
- p = valid_sigframe(kbt, &kframe);
- if (p == NULL)
- return 0;
- backtrace_init(&kbt->it, read_memory_func, kbt,
- p->pc, p->lr, p->sp, p->regs[52]);
- kbt->new_context = 1;
- return 1;
-}
-
-/* Find a frame that isn't a sigreturn, if there is one. */
-static int KBacktraceIterator_next_item_inclusive(
- struct KBacktraceIterator *kbt)
-{
- for (;;) {
- do {
- if (!is_sigreturn(kbt))
- return KBT_ONGOING;
- } while (backtrace_next(&kbt->it));
-
- if (!KBacktraceIterator_restart(kbt))
- return KBT_DONE;
- }
-}
-
-/*
- * If the current sp is on a page different than what we recorded
- * as the top-of-kernel-stack last time we context switched, we have
- * probably blown the stack, and nothing is going to work out well.
- * If we can at least get out a warning, that may help the debug,
- * though we probably won't be able to backtrace into the code that
- * actually did the recursive damage.
- */
-static void validate_stack(struct pt_regs *regs)
-{
- int cpu = raw_smp_processor_id();
- unsigned long ksp0 = get_current_ksp0();
- unsigned long ksp0_base = ksp0 & -THREAD_SIZE;
- unsigned long sp = stack_pointer;
-
- if (EX1_PL(regs->ex1) == KERNEL_PL && regs->sp >= ksp0) {
- pr_err("WARNING: cpu %d: kernel stack %#lx..%#lx underrun!\n"
- " sp %#lx (%#lx in caller), caller pc %#lx, lr %#lx\n",
- cpu, ksp0_base, ksp0, sp, regs->sp, regs->pc, regs->lr);
- }
-
- else if (sp < ksp0_base + sizeof(struct thread_info)) {
- pr_err("WARNING: cpu %d: kernel stack %#lx..%#lx overrun!\n"
- " sp %#lx (%#lx in caller), caller pc %#lx, lr %#lx\n",
- cpu, ksp0_base, ksp0, sp, regs->sp, regs->pc, regs->lr);
- }
-}
-
-void KBacktraceIterator_init(struct KBacktraceIterator *kbt,
- struct task_struct *t, struct pt_regs *regs)
-{
- unsigned long pc, lr, sp, r52;
- int is_current;
-
- /*
- * Set up callback information. We grab the kernel stack base
- * so we will allow reads of that address range.
- */
- is_current = (t == NULL || t == current);
- kbt->is_current = is_current;
- if (is_current)
- t = validate_current();
- kbt->task = t;
- kbt->verbose = 0; /* override in caller if desired */
- kbt->profile = 0; /* override in caller if desired */
- kbt->end = KBT_ONGOING;
- kbt->new_context = 1;
- if (is_current)
- validate_stack(regs);
-
- if (regs == NULL) {
- if (is_current || t->state == TASK_RUNNING) {
- /* Can't do this; we need registers */
- kbt->end = KBT_RUNNING;
- return;
- }
- pc = get_switch_to_pc();
- lr = t->thread.pc;
- sp = t->thread.ksp;
- r52 = 0;
- } else {
- pc = regs->pc;
- lr = regs->lr;
- sp = regs->sp;
- r52 = regs->regs[52];
- }
-
- backtrace_init(&kbt->it, read_memory_func, kbt, pc, lr, sp, r52);
- kbt->end = KBacktraceIterator_next_item_inclusive(kbt);
-}
-EXPORT_SYMBOL(KBacktraceIterator_init);
-
-int KBacktraceIterator_end(struct KBacktraceIterator *kbt)
-{
- return kbt->end != KBT_ONGOING;
-}
-EXPORT_SYMBOL(KBacktraceIterator_end);
-
-void KBacktraceIterator_next(struct KBacktraceIterator *kbt)
-{
- unsigned long old_pc = kbt->it.pc, old_sp = kbt->it.sp;
- kbt->new_context = 0;
- if (!backtrace_next(&kbt->it) && !KBacktraceIterator_restart(kbt)) {
- kbt->end = KBT_DONE;
- return;
- }
- kbt->end = KBacktraceIterator_next_item_inclusive(kbt);
- if (old_pc == kbt->it.pc && old_sp == kbt->it.sp) {
- /* Trapped in a loop; give up. */
- kbt->end = KBT_LOOP;
- }
-}
-EXPORT_SYMBOL(KBacktraceIterator_next);
-
-static void describe_addr(struct KBacktraceIterator *kbt,
- unsigned long address,
- int have_mmap_sem, char *buf, size_t bufsize)
-{
- struct vm_area_struct *vma;
- size_t namelen, remaining;
- unsigned long size, offset, adjust;
- char *p, *modname;
- const char *name;
- int rc;
-
- /*
- * Look one byte back for every caller frame (i.e. those that
- * aren't a new context) so we look up symbol data for the
- * call itself, not the following instruction, which may be on
- * a different line (or in a different function).
- */
- adjust = !kbt->new_context;
- address -= adjust;
-
- if (address >= PAGE_OFFSET) {
- /* Handle kernel symbols. */
- BUG_ON(bufsize < KSYM_NAME_LEN);
- name = kallsyms_lookup(address, &size, &offset,
- &modname, buf);
- if (name == NULL) {
- buf[0] = '\0';
- return;
- }
- namelen = strlen(buf);
- remaining = (bufsize - 1) - namelen;
- p = buf + namelen;
- rc = snprintf(p, remaining, "+%#lx/%#lx ",
- offset + adjust, size);
- if (modname && rc < remaining)
- snprintf(p + rc, remaining - rc, "[%s] ", modname);
- buf[bufsize-1] = '\0';
- return;
- }
-
- /* If we don't have the mmap_sem, we can't show any more info. */
- buf[0] = '\0';
- if (!have_mmap_sem)
- return;
-
- /* Find vma info. */
- vma = find_vma(kbt->task->mm, address);
- if (vma == NULL || address < vma->vm_start) {
- snprintf(buf, bufsize, "[unmapped address] ");
- return;
- }
-
- if (vma->vm_file) {
- p = file_path(vma->vm_file, buf, bufsize);
- if (IS_ERR(p))
- p = "?";
- name = kbasename(p);
- } else {
- name = "anon";
- }
-
- /* Generate a string description of the vma info. */
- namelen = strlen(name);
- remaining = (bufsize - 1) - namelen;
- memmove(buf, name, namelen);
- snprintf(buf + namelen, remaining, "[%lx+%lx] ",
- vma->vm_start, vma->vm_end - vma->vm_start);
-}
-
-/*
- * Avoid possible crash recursion during backtrace. If it happens, it
- * makes it easy to lose the actual root cause of the failure, so we
- * put a simple guard on all the backtrace loops.
- */
-static bool start_backtrace(void)
-{
- if (current_thread_info()->in_backtrace) {
- pr_err("Backtrace requested while in backtrace!\n");
- return false;
- }
- current_thread_info()->in_backtrace = true;
- return true;
-}
-
-static void end_backtrace(void)
-{
- current_thread_info()->in_backtrace = false;
-}
-
-/*
- * This method wraps the backtracer's more generic support.
- * It is only invoked from the architecture-specific code; show_stack()
- * and dump_stack() are architecture-independent entry points.
- */
-void tile_show_stack(struct KBacktraceIterator *kbt)
-{
- int i;
- int have_mmap_sem = 0;
-
- if (!start_backtrace())
- return;
- kbt->verbose = 1;
- i = 0;
- for (; !KBacktraceIterator_end(kbt); KBacktraceIterator_next(kbt)) {
- char namebuf[KSYM_NAME_LEN+100];
- unsigned long address = kbt->it.pc;
-
- /*
- * Try to acquire the mmap_sem as we pass into userspace.
- * If we're in an interrupt context, don't even try, since
- * it's not safe to call e.g. d_path() from an interrupt,
- * since it uses spin locks without disabling interrupts.
- * Note we test "kbt->task == current", not "kbt->is_current",
- * since we're checking that "current" will work in d_path().
- */
- if (kbt->task == current && address < PAGE_OFFSET &&
- !have_mmap_sem && kbt->task->mm && !in_interrupt()) {
- have_mmap_sem =
- down_read_trylock(&kbt->task->mm->mmap_sem);
- }
-
- describe_addr(kbt, address, have_mmap_sem,
- namebuf, sizeof(namebuf));
-
- pr_err(" frame %d: 0x%lx %s(sp 0x%lx)\n",
- i++, address, namebuf, (unsigned long)(kbt->it.sp));
-
- if (i >= 100) {
- pr_err("Stack dump truncated (%d frames)\n", i);
- break;
- }
- }
- if (kbt->end == KBT_LOOP)
- pr_err("Stack dump stopped; next frame identical to this one\n");
- if (have_mmap_sem)
- up_read(&kbt->task->mm->mmap_sem);
- end_backtrace();
-}
-EXPORT_SYMBOL(tile_show_stack);
-
-static struct pt_regs *regs_to_pt_regs(struct pt_regs *regs,
- ulong pc, ulong lr, ulong sp, ulong r52)
-{
- memset(regs, 0, sizeof(struct pt_regs));
- regs->pc = pc;
- regs->lr = lr;
- regs->sp = sp;
- regs->regs[52] = r52;
- return regs;
-}
-
-/* Deprecated function currently only used by kernel_double_fault(). */
-void _dump_stack(int dummy, ulong pc, ulong lr, ulong sp, ulong r52)
-{
- struct KBacktraceIterator kbt;
- struct pt_regs regs;
-
- regs_to_pt_regs(&regs, pc, lr, sp, r52);
- KBacktraceIterator_init(&kbt, NULL, &regs);
- tile_show_stack(&kbt);
-}
-
-/* This is called from KBacktraceIterator_init_current() */
-void _KBacktraceIterator_init_current(struct KBacktraceIterator *kbt, ulong pc,
- ulong lr, ulong sp, ulong r52)
-{
- struct pt_regs regs;
- KBacktraceIterator_init(kbt, NULL,
- regs_to_pt_regs(&regs, pc, lr, sp, r52));
-}
-
-/*
- * Called from sched_show_task() with task != NULL, or dump_stack()
- * with task == NULL. The esp argument is always NULL.
- */
-void show_stack(struct task_struct *task, unsigned long *esp)
-{
- struct KBacktraceIterator kbt;
- if (task == NULL || task == current) {
- KBacktraceIterator_init_current(&kbt);
- KBacktraceIterator_next(&kbt); /* don't show first frame */
- } else {
- KBacktraceIterator_init(&kbt, task, NULL);
- }
- tile_show_stack(&kbt);
-}
-
-#ifdef CONFIG_STACKTRACE
-
-/* Support generic Linux stack API too */
-
-static void save_stack_trace_common(struct task_struct *task,
- struct pt_regs *regs,
- bool user,
- struct stack_trace *trace)
-{
- struct KBacktraceIterator kbt;
- int skip = trace->skip;
- int i = 0;
-
- if (!start_backtrace())
- goto done;
- if (regs != NULL) {
- KBacktraceIterator_init(&kbt, NULL, regs);
- } else if (task == NULL || task == current) {
- KBacktraceIterator_init_current(&kbt);
- skip++; /* don't show KBacktraceIterator_init_current */
- } else {
- KBacktraceIterator_init(&kbt, task, NULL);
- }
- for (; !KBacktraceIterator_end(&kbt); KBacktraceIterator_next(&kbt)) {
- if (skip) {
- --skip;
- continue;
- }
- if (i >= trace->max_entries ||
- (!user && kbt.it.pc < PAGE_OFFSET))
- break;
- trace->entries[i++] = kbt.it.pc;
- }
- end_backtrace();
-done:
- if (i < trace->max_entries)
- trace->entries[i++] = ULONG_MAX;
- trace->nr_entries = i;
-}
-
-void save_stack_trace_tsk(struct task_struct *task, struct stack_trace *trace)
-{
- save_stack_trace_common(task, NULL, false, trace);
-}
-EXPORT_SYMBOL(save_stack_trace_tsk);
-
-void save_stack_trace(struct stack_trace *trace)
-{
- save_stack_trace_common(NULL, NULL, false, trace);
-}
-EXPORT_SYMBOL_GPL(save_stack_trace);
-
-void save_stack_trace_regs(struct pt_regs *regs, struct stack_trace *trace)
-{
- save_stack_trace_common(NULL, regs, false, trace);
-}
-
-void save_stack_trace_user(struct stack_trace *trace)
-{
- /* Trace user stack if we are not a kernel thread. */
- if (current->mm)
- save_stack_trace_common(NULL, task_pt_regs(current),
- true, trace);
- else if (trace->nr_entries < trace->max_entries)
- trace->entries[trace->nr_entries++] = ULONG_MAX;
-}
-#endif
-
-/* In entry.S */
-EXPORT_SYMBOL(KBacktraceIterator_init_current);
diff --git a/arch/tile/kernel/sys.c b/arch/tile/kernel/sys.c
deleted file mode 100644
index c7418dcbbb08..000000000000
--- a/arch/tile/kernel/sys.c
+++ /dev/null
@@ -1,130 +0,0 @@
-/*
- * Copyright 2010 Tilera Corporation. All Rights Reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation, version 2.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
- * NON INFRINGEMENT. See the GNU General Public License for
- * more details.
- *
- * This file contains various random system calls that
- * have a non-standard calling sequence on the Linux/TILE
- * platform.
- */
-
-#include <linux/errno.h>
-#include <linux/sched.h>
-#include <linux/mm.h>
-#include <linux/smp.h>
-#include <linux/syscalls.h>
-#include <linux/mman.h>
-#include <linux/file.h>
-#include <linux/mempolicy.h>
-#include <linux/binfmts.h>
-#include <linux/fs.h>
-#include <linux/compat.h>
-#include <linux/uaccess.h>
-#include <linux/signal.h>
-#include <asm/syscalls.h>
-#include <asm/pgtable.h>
-#include <asm/homecache.h>
-#include <asm/cachectl.h>
-#include <asm/byteorder.h>
-#include <arch/chip.h>
-
-SYSCALL_DEFINE3(cacheflush, unsigned long, addr, unsigned long, len,
- unsigned long, flags)
-{
- /* DCACHE is not particularly effective if not bound to one cpu. */
- if (flags & DCACHE)
- homecache_evict(cpumask_of(raw_smp_processor_id()));
-
- if (flags & ICACHE)
- flush_remote(0, HV_FLUSH_EVICT_L1I, mm_cpumask(current->mm),
- 0, 0, 0, NULL, NULL, 0);
- return 0;
-}
-
-/*
- * Syscalls that pass 64-bit values on 32-bit systems normally
- * pass them as (low,high) word packed into the immediately adjacent
- * registers. If the low word naturally falls on an even register,
- * our ABI makes it work correctly; if not, we adjust it here.
- * Handling it here means we don't have to fix uclibc AND glibc AND
- * any other standard libcs we want to support.
- */
-
-#if !defined(__tilegx__) || defined(CONFIG_COMPAT)
-
-#ifdef __BIG_ENDIAN
-#define SYSCALL_PAIR(name) u32 name ## _hi, u32 name ## _lo
-#else
-#define SYSCALL_PAIR(name) u32 name ## _lo, u32 name ## _hi
-#endif
-
-ssize_t sys32_readahead(int fd, SYSCALL_PAIR(offset), u32 count)
-{
- return sys_readahead(fd, ((loff_t)offset_hi << 32) | offset_lo, count);
-}
-
-int sys32_fadvise64_64(int fd, SYSCALL_PAIR(offset),
- SYSCALL_PAIR(len), int advice)
-{
- return sys_fadvise64_64(fd, ((loff_t)offset_hi << 32) | offset_lo,
- ((loff_t)len_hi << 32) | len_lo, advice);
-}
-
-#endif /* 32-bit syscall wrappers */
-
-/* Note: used by the compat code even in 64-bit Linux. */
-SYSCALL_DEFINE6(mmap2, unsigned long, addr, unsigned long, len,
- unsigned long, prot, unsigned long, flags,
- unsigned long, fd, unsigned long, off_4k)
-{
-#define PAGE_ADJUST (PAGE_SHIFT - 12)
- if (off_4k & ((1 << PAGE_ADJUST) - 1))
- return -EINVAL;
- return sys_mmap_pgoff(addr, len, prot, flags, fd,
- off_4k >> PAGE_ADJUST);
-}
-
-#ifdef __tilegx__
-SYSCALL_DEFINE6(mmap, unsigned long, addr, unsigned long, len,
- unsigned long, prot, unsigned long, flags,
- unsigned long, fd, off_t, offset)
-{
- if (offset & ((1 << PAGE_SHIFT) - 1))
- return -EINVAL;
- return sys_mmap_pgoff(addr, len, prot, flags, fd,
- offset >> PAGE_SHIFT);
-}
-#endif
-
-
-/* Provide the actual syscall number to call mapping. */
-#undef __SYSCALL
-#define __SYSCALL(nr, call) [nr] = (call),
-
-#ifndef __tilegx__
-/* See comments at the top of the file. */
-#define sys_fadvise64_64 sys32_fadvise64_64
-#define sys_readahead sys32_readahead
-#endif
-
-/* Call the assembly trampolines where necessary. */
-#undef sys_rt_sigreturn
-#define sys_rt_sigreturn _sys_rt_sigreturn
-#define sys_clone _sys_clone
-
-/*
- * Note that we can't include <linux/unistd.h> here since the header
- * guard will defeat us; <asm/unistd.h> checks for __SYSCALL as well.
- */
-void *sys_call_table[__NR_syscalls] = {
- [0 ... __NR_syscalls-1] = sys_ni_syscall,
-#include <asm/unistd.h>
-};
diff --git a/arch/tile/kernel/sysfs.c b/arch/tile/kernel/sysfs.c
deleted file mode 100644
index b09456a3d77a..000000000000
--- a/arch/tile/kernel/sysfs.c
+++ /dev/null
@@ -1,266 +0,0 @@
-/*
- * Copyright 2011 Tilera Corporation. All Rights Reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation, version 2.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
- * NON INFRINGEMENT. See the GNU General Public License for
- * more details.
- *
- * /sys entry support.
- */
-
-#include <linux/device.h>
-#include <linux/cpu.h>
-#include <linux/slab.h>
-#include <linux/smp.h>
-#include <linux/stat.h>
-#include <hv/hypervisor.h>
-
-/* Return a string queried from the hypervisor, truncated to page size. */
-static ssize_t get_hv_confstr(char *page, int query)
-{
- ssize_t n = hv_confstr(query, (unsigned long)page, PAGE_SIZE - 1);
- n = n < 0 ? 0 : min(n, (ssize_t)PAGE_SIZE - 1) - 1;
- if (n)
- page[n++] = '\n';
- page[n] = '\0';
- return n;
-}
-
-static ssize_t chip_width_show(struct device *dev,
- struct device_attribute *attr,
- char *page)
-{
- return sprintf(page, "%u\n", smp_width);
-}
-static DEVICE_ATTR_RO(chip_width);
-
-static ssize_t chip_height_show(struct device *dev,
- struct device_attribute *attr,
- char *page)
-{
- return sprintf(page, "%u\n", smp_height);
-}
-static DEVICE_ATTR_RO(chip_height);
-
-static ssize_t chip_serial_show(struct device *dev,
- struct device_attribute *attr,
- char *page)
-{
- return get_hv_confstr(page, HV_CONFSTR_CHIP_SERIAL_NUM);
-}
-static DEVICE_ATTR_RO(chip_serial);
-
-static ssize_t chip_revision_show(struct device *dev,
- struct device_attribute *attr,
- char *page)
-{
- return get_hv_confstr(page, HV_CONFSTR_CHIP_REV);
-}
-static DEVICE_ATTR_RO(chip_revision);
-
-
-static ssize_t type_show(struct device *dev,
- struct device_attribute *attr,
- char *page)
-{
- return sprintf(page, "tilera\n");
-}
-static DEVICE_ATTR_RO(type);
-
-#define HV_CONF_ATTR(name, conf) \
- static ssize_t name ## _show(struct device *dev, \
- struct device_attribute *attr, \
- char *page) \
- { \
- return get_hv_confstr(page, conf); \
- } \
- static DEVICE_ATTR(name, 0444, name ## _show, NULL);
-
-HV_CONF_ATTR(version, HV_CONFSTR_HV_SW_VER)
-HV_CONF_ATTR(config_version, HV_CONFSTR_HV_CONFIG_VER)
-
-HV_CONF_ATTR(board_part, HV_CONFSTR_BOARD_PART_NUM)
-HV_CONF_ATTR(board_serial, HV_CONFSTR_BOARD_SERIAL_NUM)
-HV_CONF_ATTR(board_revision, HV_CONFSTR_BOARD_REV)
-HV_CONF_ATTR(board_description, HV_CONFSTR_BOARD_DESC)
-HV_CONF_ATTR(mezz_part, HV_CONFSTR_MEZZ_PART_NUM)
-HV_CONF_ATTR(mezz_serial, HV_CONFSTR_MEZZ_SERIAL_NUM)
-HV_CONF_ATTR(mezz_revision, HV_CONFSTR_MEZZ_REV)
-HV_CONF_ATTR(mezz_description, HV_CONFSTR_MEZZ_DESC)
-HV_CONF_ATTR(cpumod_part, HV_CONFSTR_CPUMOD_PART_NUM)
-HV_CONF_ATTR(cpumod_serial, HV_CONFSTR_CPUMOD_SERIAL_NUM)
-HV_CONF_ATTR(cpumod_revision, HV_CONFSTR_CPUMOD_REV)
-HV_CONF_ATTR(cpumod_description,HV_CONFSTR_CPUMOD_DESC)
-HV_CONF_ATTR(switch_control, HV_CONFSTR_SWITCH_CONTROL)
-
-static struct attribute *board_attrs[] = {
- &dev_attr_board_part.attr,
- &dev_attr_board_serial.attr,
- &dev_attr_board_revision.attr,
- &dev_attr_board_description.attr,
- &dev_attr_mezz_part.attr,
- &dev_attr_mezz_serial.attr,
- &dev_attr_mezz_revision.attr,
- &dev_attr_mezz_description.attr,
- &dev_attr_cpumod_part.attr,
- &dev_attr_cpumod_serial.attr,
- &dev_attr_cpumod_revision.attr,
- &dev_attr_cpumod_description.attr,
- &dev_attr_switch_control.attr,
- NULL
-};
-
-static struct attribute_group board_attr_group = {
- .name = "board",
- .attrs = board_attrs,
-};
-
-
-static struct bin_attribute hvconfig_bin;
-
-static ssize_t
-hvconfig_bin_read(struct file *filp, struct kobject *kobj,
- struct bin_attribute *bin_attr,
- char *buf, loff_t off, size_t count)
-{
- static size_t size;
-
- /* Lazily learn the true size (minus the trailing NUL). */
- if (size == 0)
- size = hv_confstr(HV_CONFSTR_HV_CONFIG, 0, 0) - 1;
-
- /* Check and adjust input parameters. */
- if (off > size)
- return -EINVAL;
- if (count > size - off)
- count = size - off;
-
- if (count) {
- /* Get a copy of the hvc and copy out the relevant portion. */
- char *hvc;
-
- size = off + count;
- hvc = kmalloc(size, GFP_KERNEL);
- if (hvc == NULL)
- return -ENOMEM;
- hv_confstr(HV_CONFSTR_HV_CONFIG, (unsigned long)hvc, size);
- memcpy(buf, hvc + off, count);
- kfree(hvc);
- }
-
- return count;
-}
-
-static ssize_t hv_stats_show(struct device *dev,
- struct device_attribute *attr,
- char *page)
-{
- int cpu = dev->id;
- long lotar = HV_XY_TO_LOTAR(cpu_x(cpu), cpu_y(cpu));
-
- ssize_t n = hv_confstr(HV_CONFSTR_HV_STATS,
- (unsigned long)page, PAGE_SIZE - 1,
- lotar, 0);
- n = n < 0 ? 0 : min(n, (ssize_t)PAGE_SIZE - 1);
- page[n] = '\0';
- return n;
-}
-
-static ssize_t hv_stats_store(struct device *dev,
- struct device_attribute *attr,
- const char *page,
- size_t count)
-{
- int cpu = dev->id;
- long lotar = HV_XY_TO_LOTAR(cpu_x(cpu), cpu_y(cpu));
-
- ssize_t n = hv_confstr(HV_CONFSTR_HV_STATS, 0, 0, lotar, 1);
- return n < 0 ? n : count;
-}
-
-static DEVICE_ATTR_RW(hv_stats);
-
-static int hv_stats_device_add(struct device *dev, struct subsys_interface *sif)
-{
- int err, cpu = dev->id;
-
- if (!cpu_online(cpu))
- return 0;
-
- err = sysfs_create_file(&dev->kobj, &dev_attr_hv_stats.attr);
-
- return err;
-}
-
-static void hv_stats_device_remove(struct device *dev,
- struct subsys_interface *sif)
-{
- int cpu = dev->id;
-
- if (cpu_online(cpu))
- sysfs_remove_file(&dev->kobj, &dev_attr_hv_stats.attr);
-}
-
-
-static struct subsys_interface hv_stats_interface = {
- .name = "hv_stats",
- .subsys = &cpu_subsys,
- .add_dev = hv_stats_device_add,
- .remove_dev = hv_stats_device_remove,
-};
-
-static int __init create_sysfs_entries(void)
-{
- int err = 0;
-
-#define create_cpu_attr(name) \
- if (!err) \
- err = device_create_file(cpu_subsys.dev_root, &dev_attr_##name);
- create_cpu_attr(chip_width);
- create_cpu_attr(chip_height);
- create_cpu_attr(chip_serial);
- create_cpu_attr(chip_revision);
-
-#define create_hv_attr(name) \
- if (!err) \
- err = sysfs_create_file(hypervisor_kobj, &dev_attr_##name.attr);
- create_hv_attr(type);
- create_hv_attr(version);
- create_hv_attr(config_version);
-
- if (!err)
- err = sysfs_create_group(hypervisor_kobj, &board_attr_group);
-
- if (!err) {
- sysfs_bin_attr_init(&hvconfig_bin);
- hvconfig_bin.attr.name = "hvconfig";
- hvconfig_bin.attr.mode = S_IRUGO;
- hvconfig_bin.read = hvconfig_bin_read;
- hvconfig_bin.size = PAGE_SIZE;
- err = sysfs_create_bin_file(hypervisor_kobj, &hvconfig_bin);
- }
-
- if (!err) {
- /*
- * Don't bother adding the hv_stats files on each CPU if
- * our hypervisor doesn't supply statistics.
- */
- int cpu = raw_smp_processor_id();
- long lotar = HV_XY_TO_LOTAR(cpu_x(cpu), cpu_y(cpu));
- char dummy;
- ssize_t n = hv_confstr(HV_CONFSTR_HV_STATS,
- (unsigned long) &dummy, 1,
- lotar, 0);
- if (n >= 0)
- err = subsys_interface_register(&hv_stats_interface);
- }
-
- return err;
-}
-subsys_initcall(create_sysfs_entries);
diff --git a/arch/tile/kernel/tile-desc_32.c b/arch/tile/kernel/tile-desc_32.c
deleted file mode 100644
index dd7bd1d8563c..000000000000
--- a/arch/tile/kernel/tile-desc_32.c
+++ /dev/null
@@ -1,2605 +0,0 @@
-/* TILEPro opcode information.
- *
- * Copyright 2011 Tilera Corporation. All Rights Reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation, version 2.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
- * NON INFRINGEMENT. See the GNU General Public License for
- * more details.
- *
- *
- *
- *
- *
- */
-
-/* This define is BFD_RELOC_##x for real bfd, or -1 for everyone else. */
-#define BFD_RELOC(x) -1
-
-/* Special registers. */
-#define TREG_LR 55
-#define TREG_SN 56
-#define TREG_ZERO 63
-
-#include <linux/stddef.h>
-#include <asm/tile-desc.h>
-
-const struct tilepro_opcode tilepro_opcodes[395] =
-{
- { "bpt", TILEPRO_OPC_BPT, 0x2, 0, TREG_ZERO, 0,
- { { 0, }, { }, { 0, }, { 0, }, { 0, } },
- },
- { "info", TILEPRO_OPC_INFO, 0xf, 1, TREG_ZERO, 1,
- { { 0 }, { 1 }, { 2 }, { 3 }, { 0, } },
- },
- { "infol", TILEPRO_OPC_INFOL, 0x3, 1, TREG_ZERO, 1,
- { { 4 }, { 5 }, { 0, }, { 0, }, { 0, } },
- },
- { "j", TILEPRO_OPC_J, 0x2, 1, TREG_ZERO, 1,
- { { 0, }, { 6 }, { 0, }, { 0, }, { 0, } },
- },
- { "jal", TILEPRO_OPC_JAL, 0x2, 1, TREG_LR, 1,
- { { 0, }, { 6 }, { 0, }, { 0, }, { 0, } },
- },
- { "move", TILEPRO_OPC_MOVE, 0xf, 2, TREG_ZERO, 1,
- { { 7, 8 }, { 9, 10 }, { 11, 12 }, { 13, 14 }, { 0, } },
- },
- { "move.sn", TILEPRO_OPC_MOVE_SN, 0x3, 2, TREG_SN, 1,
- { { 7, 8 }, { 9, 10 }, { 0, }, { 0, }, { 0, } },
- },
- { "movei", TILEPRO_OPC_MOVEI, 0xf, 2, TREG_ZERO, 1,
- { { 7, 0 }, { 9, 1 }, { 11, 2 }, { 13, 3 }, { 0, } },
- },
- { "movei.sn", TILEPRO_OPC_MOVEI_SN, 0x3, 2, TREG_SN, 1,
- { { 7, 0 }, { 9, 1 }, { 0, }, { 0, }, { 0, } },
- },
- { "moveli", TILEPRO_OPC_MOVELI, 0x3, 2, TREG_ZERO, 1,
- { { 7, 4 }, { 9, 5 }, { 0, }, { 0, }, { 0, } },
- },
- { "moveli.sn", TILEPRO_OPC_MOVELI_SN, 0x3, 2, TREG_SN, 1,
- { { 7, 4 }, { 9, 5 }, { 0, }, { 0, }, { 0, } },
- },
- { "movelis", TILEPRO_OPC_MOVELIS, 0x3, 2, TREG_SN, 1,
- { { 7, 4 }, { 9, 5 }, { 0, }, { 0, }, { 0, } },
- },
- { "prefetch", TILEPRO_OPC_PREFETCH, 0x12, 1, TREG_ZERO, 1,
- { { 0, }, { 10 }, { 0, }, { 0, }, { 15 } },
- },
- { "raise", TILEPRO_OPC_RAISE, 0x2, 0, TREG_ZERO, 1,
- { { 0, }, { }, { 0, }, { 0, }, { 0, } },
- },
- { "add", TILEPRO_OPC_ADD, 0xf, 3, TREG_ZERO, 1,
- { { 7, 8, 16 }, { 9, 10, 17 }, { 11, 12, 18 }, { 13, 14, 19 }, { 0, } },
- },
- { "add.sn", TILEPRO_OPC_ADD_SN, 0x3, 3, TREG_SN, 1,
- { { 7, 8, 16 }, { 9, 10, 17 }, { 0, }, { 0, }, { 0, } },
- },
- { "addb", TILEPRO_OPC_ADDB, 0x3, 3, TREG_ZERO, 1,
- { { 7, 8, 16 }, { 9, 10, 17 }, { 0, }, { 0, }, { 0, } },
- },
- { "addb.sn", TILEPRO_OPC_ADDB_SN, 0x3, 3, TREG_SN, 1,
- { { 7, 8, 16 }, { 9, 10, 17 }, { 0, }, { 0, }, { 0, } },
- },
- { "addbs_u", TILEPRO_OPC_ADDBS_U, 0x3, 3, TREG_ZERO, 1,
- { { 7, 8, 16 }, { 9, 10, 17 }, { 0, }, { 0, }, { 0, } },
- },
- { "addbs_u.sn", TILEPRO_OPC_ADDBS_U_SN, 0x3, 3, TREG_SN, 1,
- { { 7, 8, 16 }, { 9, 10, 17 }, { 0, }, { 0, }, { 0, } },
- },
- { "addh", TILEPRO_OPC_ADDH, 0x3, 3, TREG_ZERO, 1,
- { { 7, 8, 16 }, { 9, 10, 17 }, { 0, }, { 0, }, { 0, } },
- },
- { "addh.sn", TILEPRO_OPC_ADDH_SN, 0x3, 3, TREG_SN, 1,
- { { 7, 8, 16 }, { 9, 10, 17 }, { 0, }, { 0, }, { 0, } },
- },
- { "addhs", TILEPRO_OPC_ADDHS, 0x3, 3, TREG_ZERO, 1,
- { { 7, 8, 16 }, { 9, 10, 17 }, { 0, }, { 0, }, { 0, } },
- },
- { "addhs.sn", TILEPRO_OPC_ADDHS_SN, 0x3, 3, TREG_SN, 1,
- { { 7, 8, 16 }, { 9, 10, 17 }, { 0, }, { 0, }, { 0, } },
- },
- { "addi", TILEPRO_OPC_ADDI, 0xf, 3, TREG_ZERO, 1,
- { { 7, 8, 0 }, { 9, 10, 1 }, { 11, 12, 2 }, { 13, 14, 3 }, { 0, } },
- },
- { "addi.sn", TILEPRO_OPC_ADDI_SN, 0x3, 3, TREG_SN, 1,
- { { 7, 8, 0 }, { 9, 10, 1 }, { 0, }, { 0, }, { 0, } },
- },
- { "addib", TILEPRO_OPC_ADDIB, 0x3, 3, TREG_ZERO, 1,
- { { 7, 8, 0 }, { 9, 10, 1 }, { 0, }, { 0, }, { 0, } },
- },
- { "addib.sn", TILEPRO_OPC_ADDIB_SN, 0x3, 3, TREG_SN, 1,
- { { 7, 8, 0 }, { 9, 10, 1 }, { 0, }, { 0, }, { 0, } },
- },
- { "addih", TILEPRO_OPC_ADDIH, 0x3, 3, TREG_ZERO, 1,
- { { 7, 8, 0 }, { 9, 10, 1 }, { 0, }, { 0, }, { 0, } },
- },
- { "addih.sn", TILEPRO_OPC_ADDIH_SN, 0x3, 3, TREG_SN, 1,
- { { 7, 8, 0 }, { 9, 10, 1 }, { 0, }, { 0, }, { 0, } },
- },
- { "addli", TILEPRO_OPC_ADDLI, 0x3, 3, TREG_ZERO, 1,
- { { 7, 8, 4 }, { 9, 10, 5 }, { 0, }, { 0, }, { 0, } },
- },
- { "addli.sn", TILEPRO_OPC_ADDLI_SN, 0x3, 3, TREG_SN, 1,
- { { 7, 8, 4 }, { 9, 10, 5 }, { 0, }, { 0, }, { 0, } },
- },
- { "addlis", TILEPRO_OPC_ADDLIS, 0x3, 3, TREG_SN, 1,
- { { 7, 8, 4 }, { 9, 10, 5 }, { 0, }, { 0, }, { 0, } },
- },
- { "adds", TILEPRO_OPC_ADDS, 0x3, 3, TREG_ZERO, 1,
- { { 7, 8, 16 }, { 9, 10, 17 }, { 0, }, { 0, }, { 0, } },
- },
- { "adds.sn", TILEPRO_OPC_ADDS_SN, 0x3, 3, TREG_SN, 1,
- { { 7, 8, 16 }, { 9, 10, 17 }, { 0, }, { 0, }, { 0, } },
- },
- { "adiffb_u", TILEPRO_OPC_ADIFFB_U, 0x1, 3, TREG_ZERO, 1,
- { { 7, 8, 16 }, { 0, }, { 0, }, { 0, }, { 0, } },
- },
- { "adiffb_u.sn", TILEPRO_OPC_ADIFFB_U_SN, 0x1, 3, TREG_SN, 1,
- { { 7, 8, 16 }, { 0, }, { 0, }, { 0, }, { 0, } },
- },
- { "adiffh", TILEPRO_OPC_ADIFFH, 0x1, 3, TREG_ZERO, 1,
- { { 7, 8, 16 }, { 0, }, { 0, }, { 0, }, { 0, } },
- },
- { "adiffh.sn", TILEPRO_OPC_ADIFFH_SN, 0x1, 3, TREG_SN, 1,
- { { 7, 8, 16 }, { 0, }, { 0, }, { 0, }, { 0, } },
- },
- { "and", TILEPRO_OPC_AND, 0xf, 3, TREG_ZERO, 1,
- { { 7, 8, 16 }, { 9, 10, 17 }, { 11, 12, 18 }, { 13, 14, 19 }, { 0, } },
- },
- { "and.sn", TILEPRO_OPC_AND_SN, 0x3, 3, TREG_SN, 1,
- { { 7, 8, 16 }, { 9, 10, 17 }, { 0, }, { 0, }, { 0, } },
- },
- { "andi", TILEPRO_OPC_ANDI, 0xf, 3, TREG_ZERO, 1,
- { { 7, 8, 0 }, { 9, 10, 1 }, { 11, 12, 2 }, { 13, 14, 3 }, { 0, } },
- },
- { "andi.sn", TILEPRO_OPC_ANDI_SN, 0x3, 3, TREG_SN, 1,
- { { 7, 8, 0 }, { 9, 10, 1 }, { 0, }, { 0, }, { 0, } },
- },
- { "auli", TILEPRO_OPC_AULI, 0x3, 3, TREG_ZERO, 1,
- { { 7, 8, 4 }, { 9, 10, 5 }, { 0, }, { 0, }, { 0, } },
- },
- { "avgb_u", TILEPRO_OPC_AVGB_U, 0x1, 3, TREG_ZERO, 1,
- { { 7, 8, 16 }, { 0, }, { 0, }, { 0, }, { 0, } },
- },
- { "avgb_u.sn", TILEPRO_OPC_AVGB_U_SN, 0x1, 3, TREG_SN, 1,
- { { 7, 8, 16 }, { 0, }, { 0, }, { 0, }, { 0, } },
- },
- { "avgh", TILEPRO_OPC_AVGH, 0x1, 3, TREG_ZERO, 1,
- { { 7, 8, 16 }, { 0, }, { 0, }, { 0, }, { 0, } },
- },
- { "avgh.sn", TILEPRO_OPC_AVGH_SN, 0x1, 3, TREG_SN, 1,
- { { 7, 8, 16 }, { 0, }, { 0, }, { 0, }, { 0, } },
- },
- { "bbns", TILEPRO_OPC_BBNS, 0x2, 2, TREG_ZERO, 1,
- { { 0, }, { 10, 20 }, { 0, }, { 0, }, { 0, } },
- },
- { "bbns.sn", TILEPRO_OPC_BBNS_SN, 0x2, 2, TREG_SN, 1,
- { { 0, }, { 10, 20 }, { 0, }, { 0, }, { 0, } },
- },
- { "bbnst", TILEPRO_OPC_BBNST, 0x2, 2, TREG_ZERO, 1,
- { { 0, }, { 10, 20 }, { 0, }, { 0, }, { 0, } },
- },
- { "bbnst.sn", TILEPRO_OPC_BBNST_SN, 0x2, 2, TREG_SN, 1,
- { { 0, }, { 10, 20 }, { 0, }, { 0, }, { 0, } },
- },
- { "bbs", TILEPRO_OPC_BBS, 0x2, 2, TREG_ZERO, 1,
- { { 0, }, { 10, 20 }, { 0, }, { 0, }, { 0, } },
- },
- { "bbs.sn", TILEPRO_OPC_BBS_SN, 0x2, 2, TREG_SN, 1,
- { { 0, }, { 10, 20 }, { 0, }, { 0, }, { 0, } },
- },
- { "bbst", TILEPRO_OPC_BBST, 0x2, 2, TREG_ZERO, 1,
- { { 0, }, { 10, 20 }, { 0, }, { 0, }, { 0, } },
- },
- { "bbst.sn", TILEPRO_OPC_BBST_SN, 0x2, 2, TREG_SN, 1,
- { { 0, }, { 10, 20 }, { 0, }, { 0, }, { 0, } },
- },
- { "bgez", TILEPRO_OPC_BGEZ, 0x2, 2, TREG_ZERO, 1,
- { { 0, }, { 10, 20 }, { 0, }, { 0, }, { 0, } },
- },
- { "bgez.sn", TILEPRO_OPC_BGEZ_SN, 0x2, 2, TREG_SN, 1,
- { { 0, }, { 10, 20 }, { 0, }, { 0, }, { 0, } },
- },
- { "bgezt", TILEPRO_OPC_BGEZT, 0x2, 2, TREG_ZERO, 1,
- { { 0, }, { 10, 20 }, { 0, }, { 0, }, { 0, } },
- },
- { "bgezt.sn", TILEPRO_OPC_BGEZT_SN, 0x2, 2, TREG_SN, 1,
- { { 0, }, { 10, 20 }, { 0, }, { 0, }, { 0, } },
- },
- { "bgz", TILEPRO_OPC_BGZ, 0x2, 2, TREG_ZERO, 1,
- { { 0, }, { 10, 20 }, { 0, }, { 0, }, { 0, } },
- },
- { "bgz.sn", TILEPRO_OPC_BGZ_SN, 0x2, 2, TREG_SN, 1,
- { { 0, }, { 10, 20 }, { 0, }, { 0, }, { 0, } },
- },
- { "bgzt", TILEPRO_OPC_BGZT, 0x2, 2, TREG_ZERO, 1,
- { { 0, }, { 10, 20 }, { 0, }, { 0, }, { 0, } },
- },
- { "bgzt.sn", TILEPRO_OPC_BGZT_SN, 0x2, 2, TREG_SN, 1,
- { { 0, }, { 10, 20 }, { 0, }, { 0, }, { 0, } },
- },
- { "bitx", TILEPRO_OPC_BITX, 0x5, 2, TREG_ZERO, 1,
- { { 7, 8 }, { 0, }, { 11, 12 }, { 0, }, { 0, } },
- },
- { "bitx.sn", TILEPRO_OPC_BITX_SN, 0x1, 2, TREG_SN, 1,
- { { 7, 8 }, { 0, }, { 0, }, { 0, }, { 0, } },
- },
- { "blez", TILEPRO_OPC_BLEZ, 0x2, 2, TREG_ZERO, 1,
- { { 0, }, { 10, 20 }, { 0, }, { 0, }, { 0, } },
- },
- { "blez.sn", TILEPRO_OPC_BLEZ_SN, 0x2, 2, TREG_SN, 1,
- { { 0, }, { 10, 20 }, { 0, }, { 0, }, { 0, } },
- },
- { "blezt", TILEPRO_OPC_BLEZT, 0x2, 2, TREG_ZERO, 1,
- { { 0, }, { 10, 20 }, { 0, }, { 0, }, { 0, } },
- },
- { "blezt.sn", TILEPRO_OPC_BLEZT_SN, 0x2, 2, TREG_SN, 1,
- { { 0, }, { 10, 20 }, { 0, }, { 0, }, { 0, } },
- },
- { "blz", TILEPRO_OPC_BLZ, 0x2, 2, TREG_ZERO, 1,
- { { 0, }, { 10, 20 }, { 0, }, { 0, }, { 0, } },
- },
- { "blz.sn", TILEPRO_OPC_BLZ_SN, 0x2, 2, TREG_SN, 1,
- { { 0, }, { 10, 20 }, { 0, }, { 0, }, { 0, } },
- },
- { "blzt", TILEPRO_OPC_BLZT, 0x2, 2, TREG_ZERO, 1,
- { { 0, }, { 10, 20 }, { 0, }, { 0, }, { 0, } },
- },
- { "blzt.sn", TILEPRO_OPC_BLZT_SN, 0x2, 2, TREG_SN, 1,
- { { 0, }, { 10, 20 }, { 0, }, { 0, }, { 0, } },
- },
- { "bnz", TILEPRO_OPC_BNZ, 0x2, 2, TREG_ZERO, 1,
- { { 0, }, { 10, 20 }, { 0, }, { 0, }, { 0, } },
- },
- { "bnz.sn", TILEPRO_OPC_BNZ_SN, 0x2, 2, TREG_SN, 1,
- { { 0, }, { 10, 20 }, { 0, }, { 0, }, { 0, } },
- },
- { "bnzt", TILEPRO_OPC_BNZT, 0x2, 2, TREG_ZERO, 1,
- { { 0, }, { 10, 20 }, { 0, }, { 0, }, { 0, } },
- },
- { "bnzt.sn", TILEPRO_OPC_BNZT_SN, 0x2, 2, TREG_SN, 1,
- { { 0, }, { 10, 20 }, { 0, }, { 0, }, { 0, } },
- },
- { "bytex", TILEPRO_OPC_BYTEX, 0x5, 2, TREG_ZERO, 1,
- { { 7, 8 }, { 0, }, { 11, 12 }, { 0, }, { 0, } },
- },
- { "bytex.sn", TILEPRO_OPC_BYTEX_SN, 0x1, 2, TREG_SN, 1,
- { { 7, 8 }, { 0, }, { 0, }, { 0, }, { 0, } },
- },
- { "bz", TILEPRO_OPC_BZ, 0x2, 2, TREG_ZERO, 1,
- { { 0, }, { 10, 20 }, { 0, }, { 0, }, { 0, } },
- },
- { "bz.sn", TILEPRO_OPC_BZ_SN, 0x2, 2, TREG_SN, 1,
- { { 0, }, { 10, 20 }, { 0, }, { 0, }, { 0, } },
- },
- { "bzt", TILEPRO_OPC_BZT, 0x2, 2, TREG_ZERO, 1,
- { { 0, }, { 10, 20 }, { 0, }, { 0, }, { 0, } },
- },
- { "bzt.sn", TILEPRO_OPC_BZT_SN, 0x2, 2, TREG_SN, 1,
- { { 0, }, { 10, 20 }, { 0, }, { 0, }, { 0, } },
- },
- { "clz", TILEPRO_OPC_CLZ, 0x5, 2, TREG_ZERO, 1,
- { { 7, 8 }, { 0, }, { 11, 12 }, { 0, }, { 0, } },
- },
- { "clz.sn", TILEPRO_OPC_CLZ_SN, 0x1, 2, TREG_SN, 1,
- { { 7, 8 }, { 0, }, { 0, }, { 0, }, { 0, } },
- },
- { "crc32_32", TILEPRO_OPC_CRC32_32, 0x1, 3, TREG_ZERO, 1,
- { { 7, 8, 16 }, { 0, }, { 0, }, { 0, }, { 0, } },
- },
- { "crc32_32.sn", TILEPRO_OPC_CRC32_32_SN, 0x1, 3, TREG_SN, 1,
- { { 7, 8, 16 }, { 0, }, { 0, }, { 0, }, { 0, } },
- },
- { "crc32_8", TILEPRO_OPC_CRC32_8, 0x1, 3, TREG_ZERO, 1,
- { { 7, 8, 16 }, { 0, }, { 0, }, { 0, }, { 0, } },
- },
- { "crc32_8.sn", TILEPRO_OPC_CRC32_8_SN, 0x1, 3, TREG_SN, 1,
- { { 7, 8, 16 }, { 0, }, { 0, }, { 0, }, { 0, } },
- },
- { "ctz", TILEPRO_OPC_CTZ, 0x5, 2, TREG_ZERO, 1,
- { { 7, 8 }, { 0, }, { 11, 12 }, { 0, }, { 0, } },
- },
- { "ctz.sn", TILEPRO_OPC_CTZ_SN, 0x1, 2, TREG_SN, 1,
- { { 7, 8 }, { 0, }, { 0, }, { 0, }, { 0, } },
- },
- { "drain", TILEPRO_OPC_DRAIN, 0x2, 0, TREG_ZERO, 0,
- { { 0, }, { }, { 0, }, { 0, }, { 0, } },
- },
- { "dtlbpr", TILEPRO_OPC_DTLBPR, 0x2, 1, TREG_ZERO, 1,
- { { 0, }, { 10 }, { 0, }, { 0, }, { 0, } },
- },
- { "dword_align", TILEPRO_OPC_DWORD_ALIGN, 0x1, 3, TREG_ZERO, 1,
- { { 21, 8, 16 }, { 0, }, { 0, }, { 0, }, { 0, } },
- },
- { "dword_align.sn", TILEPRO_OPC_DWORD_ALIGN_SN, 0x1, 3, TREG_SN, 1,
- { { 21, 8, 16 }, { 0, }, { 0, }, { 0, }, { 0, } },
- },
- { "finv", TILEPRO_OPC_FINV, 0x2, 1, TREG_ZERO, 1,
- { { 0, }, { 10 }, { 0, }, { 0, }, { 0, } },
- },
- { "flush", TILEPRO_OPC_FLUSH, 0x2, 1, TREG_ZERO, 1,
- { { 0, }, { 10 }, { 0, }, { 0, }, { 0, } },
- },
- { "fnop", TILEPRO_OPC_FNOP, 0xf, 0, TREG_ZERO, 1,
- { { }, { }, { }, { }, { 0, } },
- },
- { "icoh", TILEPRO_OPC_ICOH, 0x2, 1, TREG_ZERO, 1,
- { { 0, }, { 10 }, { 0, }, { 0, }, { 0, } },
- },
- { "ill", TILEPRO_OPC_ILL, 0xa, 0, TREG_ZERO, 1,
- { { 0, }, { }, { 0, }, { }, { 0, } },
- },
- { "inthb", TILEPRO_OPC_INTHB, 0x3, 3, TREG_ZERO, 1,
- { { 7, 8, 16 }, { 9, 10, 17 }, { 0, }, { 0, }, { 0, } },
- },
- { "inthb.sn", TILEPRO_OPC_INTHB_SN, 0x3, 3, TREG_SN, 1,
- { { 7, 8, 16 }, { 9, 10, 17 }, { 0, }, { 0, }, { 0, } },
- },
- { "inthh", TILEPRO_OPC_INTHH, 0x3, 3, TREG_ZERO, 1,
- { { 7, 8, 16 }, { 9, 10, 17 }, { 0, }, { 0, }, { 0, } },
- },
- { "inthh.sn", TILEPRO_OPC_INTHH_SN, 0x3, 3, TREG_SN, 1,
- { { 7, 8, 16 }, { 9, 10, 17 }, { 0, }, { 0, }, { 0, } },
- },
- { "intlb", TILEPRO_OPC_INTLB, 0x3, 3, TREG_ZERO, 1,
- { { 7, 8, 16 }, { 9, 10, 17 }, { 0, }, { 0, }, { 0, } },
- },
- { "intlb.sn", TILEPRO_OPC_INTLB_SN, 0x3, 3, TREG_SN, 1,
- { { 7, 8, 16 }, { 9, 10, 17 }, { 0, }, { 0, }, { 0, } },
- },
- { "intlh", TILEPRO_OPC_INTLH, 0x3, 3, TREG_ZERO, 1,
- { { 7, 8, 16 }, { 9, 10, 17 }, { 0, }, { 0, }, { 0, } },
- },
- { "intlh.sn", TILEPRO_OPC_INTLH_SN, 0x3, 3, TREG_SN, 1,
- { { 7, 8, 16 }, { 9, 10, 17 }, { 0, }, { 0, }, { 0, } },
- },
- { "inv", TILEPRO_OPC_INV, 0x2, 1, TREG_ZERO, 1,
- { { 0, }, { 10 }, { 0, }, { 0, }, { 0, } },
- },
- { "iret", TILEPRO_OPC_IRET, 0x2, 0, TREG_ZERO, 1,
- { { 0, }, { }, { 0, }, { 0, }, { 0, } },
- },
- { "jalb", TILEPRO_OPC_JALB, 0x2, 1, TREG_LR, 1,
- { { 0, }, { 22 }, { 0, }, { 0, }, { 0, } },
- },
- { "jalf", TILEPRO_OPC_JALF, 0x2, 1, TREG_LR, 1,
- { { 0, }, { 22 }, { 0, }, { 0, }, { 0, } },
- },
- { "jalr", TILEPRO_OPC_JALR, 0x2, 1, TREG_LR, 1,
- { { 0, }, { 10 }, { 0, }, { 0, }, { 0, } },
- },
- { "jalrp", TILEPRO_OPC_JALRP, 0x2, 1, TREG_LR, 1,
- { { 0, }, { 10 }, { 0, }, { 0, }, { 0, } },
- },
- { "jb", TILEPRO_OPC_JB, 0x2, 1, TREG_ZERO, 1,
- { { 0, }, { 22 }, { 0, }, { 0, }, { 0, } },
- },
- { "jf", TILEPRO_OPC_JF, 0x2, 1, TREG_ZERO, 1,
- { { 0, }, { 22 }, { 0, }, { 0, }, { 0, } },
- },
- { "jr", TILEPRO_OPC_JR, 0x2, 1, TREG_ZERO, 1,
- { { 0, }, { 10 }, { 0, }, { 0, }, { 0, } },
- },
- { "jrp", TILEPRO_OPC_JRP, 0x2, 1, TREG_ZERO, 1,
- { { 0, }, { 10 }, { 0, }, { 0, }, { 0, } },
- },
- { "lb", TILEPRO_OPC_LB, 0x12, 2, TREG_ZERO, 1,
- { { 0, }, { 9, 10 }, { 0, }, { 0, }, { 23, 15 } },
- },
- { "lb.sn", TILEPRO_OPC_LB_SN, 0x2, 2, TREG_SN, 1,
- { { 0, }, { 9, 10 }, { 0, }, { 0, }, { 0, } },
- },
- { "lb_u", TILEPRO_OPC_LB_U, 0x12, 2, TREG_ZERO, 1,
- { { 0, }, { 9, 10 }, { 0, }, { 0, }, { 23, 15 } },
- },
- { "lb_u.sn", TILEPRO_OPC_LB_U_SN, 0x2, 2, TREG_SN, 1,
- { { 0, }, { 9, 10 }, { 0, }, { 0, }, { 0, } },
- },
- { "lbadd", TILEPRO_OPC_LBADD, 0x2, 3, TREG_ZERO, 1,
- { { 0, }, { 9, 24, 1 }, { 0, }, { 0, }, { 0, } },
- },
- { "lbadd.sn", TILEPRO_OPC_LBADD_SN, 0x2, 3, TREG_SN, 1,
- { { 0, }, { 9, 24, 1 }, { 0, }, { 0, }, { 0, } },
- },
- { "lbadd_u", TILEPRO_OPC_LBADD_U, 0x2, 3, TREG_ZERO, 1,
- { { 0, }, { 9, 24, 1 }, { 0, }, { 0, }, { 0, } },
- },
- { "lbadd_u.sn", TILEPRO_OPC_LBADD_U_SN, 0x2, 3, TREG_SN, 1,
- { { 0, }, { 9, 24, 1 }, { 0, }, { 0, }, { 0, } },
- },
- { "lh", TILEPRO_OPC_LH, 0x12, 2, TREG_ZERO, 1,
- { { 0, }, { 9, 10 }, { 0, }, { 0, }, { 23, 15 } },
- },
- { "lh.sn", TILEPRO_OPC_LH_SN, 0x2, 2, TREG_SN, 1,
- { { 0, }, { 9, 10 }, { 0, }, { 0, }, { 0, } },
- },
- { "lh_u", TILEPRO_OPC_LH_U, 0x12, 2, TREG_ZERO, 1,
- { { 0, }, { 9, 10 }, { 0, }, { 0, }, { 23, 15 } },
- },
- { "lh_u.sn", TILEPRO_OPC_LH_U_SN, 0x2, 2, TREG_SN, 1,
- { { 0, }, { 9, 10 }, { 0, }, { 0, }, { 0, } },
- },
- { "lhadd", TILEPRO_OPC_LHADD, 0x2, 3, TREG_ZERO, 1,
- { { 0, }, { 9, 24, 1 }, { 0, }, { 0, }, { 0, } },
- },
- { "lhadd.sn", TILEPRO_OPC_LHADD_SN, 0x2, 3, TREG_SN, 1,
- { { 0, }, { 9, 24, 1 }, { 0, }, { 0, }, { 0, } },
- },
- { "lhadd_u", TILEPRO_OPC_LHADD_U, 0x2, 3, TREG_ZERO, 1,
- { { 0, }, { 9, 24, 1 }, { 0, }, { 0, }, { 0, } },
- },
- { "lhadd_u.sn", TILEPRO_OPC_LHADD_U_SN, 0x2, 3, TREG_SN, 1,
- { { 0, }, { 9, 24, 1 }, { 0, }, { 0, }, { 0, } },
- },
- { "lnk", TILEPRO_OPC_LNK, 0x2, 1, TREG_ZERO, 1,
- { { 0, }, { 9 }, { 0, }, { 0, }, { 0, } },
- },
- { "lnk.sn", TILEPRO_OPC_LNK_SN, 0x2, 1, TREG_SN, 1,
- { { 0, }, { 9 }, { 0, }, { 0, }, { 0, } },
- },
- { "lw", TILEPRO_OPC_LW, 0x12, 2, TREG_ZERO, 1,
- { { 0, }, { 9, 10 }, { 0, }, { 0, }, { 23, 15 } },
- },
- { "lw.sn", TILEPRO_OPC_LW_SN, 0x2, 2, TREG_SN, 1,
- { { 0, }, { 9, 10 }, { 0, }, { 0, }, { 0, } },
- },
- { "lw_na", TILEPRO_OPC_LW_NA, 0x2, 2, TREG_ZERO, 1,
- { { 0, }, { 9, 10 }, { 0, }, { 0, }, { 0, } },
- },
- { "lw_na.sn", TILEPRO_OPC_LW_NA_SN, 0x2, 2, TREG_SN, 1,
- { { 0, }, { 9, 10 }, { 0, }, { 0, }, { 0, } },
- },
- { "lwadd", TILEPRO_OPC_LWADD, 0x2, 3, TREG_ZERO, 1,
- { { 0, }, { 9, 24, 1 }, { 0, }, { 0, }, { 0, } },
- },
- { "lwadd.sn", TILEPRO_OPC_LWADD_SN, 0x2, 3, TREG_SN, 1,
- { { 0, }, { 9, 24, 1 }, { 0, }, { 0, }, { 0, } },
- },
- { "lwadd_na", TILEPRO_OPC_LWADD_NA, 0x2, 3, TREG_ZERO, 1,
- { { 0, }, { 9, 24, 1 }, { 0, }, { 0, }, { 0, } },
- },
- { "lwadd_na.sn", TILEPRO_OPC_LWADD_NA_SN, 0x2, 3, TREG_SN, 1,
- { { 0, }, { 9, 24, 1 }, { 0, }, { 0, }, { 0, } },
- },
- { "maxb_u", TILEPRO_OPC_MAXB_U, 0x3, 3, TREG_ZERO, 1,
- { { 7, 8, 16 }, { 9, 10, 17 }, { 0, }, { 0, }, { 0, } },
- },
- { "maxb_u.sn", TILEPRO_OPC_MAXB_U_SN, 0x3, 3, TREG_SN, 1,
- { { 7, 8, 16 }, { 9, 10, 17 }, { 0, }, { 0, }, { 0, } },
- },
- { "maxh", TILEPRO_OPC_MAXH, 0x3, 3, TREG_ZERO, 1,
- { { 7, 8, 16 }, { 9, 10, 17 }, { 0, }, { 0, }, { 0, } },
- },
- { "maxh.sn", TILEPRO_OPC_MAXH_SN, 0x3, 3, TREG_SN, 1,
- { { 7, 8, 16 }, { 9, 10, 17 }, { 0, }, { 0, }, { 0, } },
- },
- { "maxib_u", TILEPRO_OPC_MAXIB_U, 0x3, 3, TREG_ZERO, 1,
- { { 7, 8, 0 }, { 9, 10, 1 }, { 0, }, { 0, }, { 0, } },
- },
- { "maxib_u.sn", TILEPRO_OPC_MAXIB_U_SN, 0x3, 3, TREG_SN, 1,
- { { 7, 8, 0 }, { 9, 10, 1 }, { 0, }, { 0, }, { 0, } },
- },
- { "maxih", TILEPRO_OPC_MAXIH, 0x3, 3, TREG_ZERO, 1,
- { { 7, 8, 0 }, { 9, 10, 1 }, { 0, }, { 0, }, { 0, } },
- },
- { "maxih.sn", TILEPRO_OPC_MAXIH_SN, 0x3, 3, TREG_SN, 1,
- { { 7, 8, 0 }, { 9, 10, 1 }, { 0, }, { 0, }, { 0, } },
- },
- { "mf", TILEPRO_OPC_MF, 0x2, 0, TREG_ZERO, 1,
- { { 0, }, { }, { 0, }, { 0, }, { 0, } },
- },
- { "mfspr", TILEPRO_OPC_MFSPR, 0x2, 2, TREG_ZERO, 1,
- { { 0, }, { 9, 25 }, { 0, }, { 0, }, { 0, } },
- },
- { "minb_u", TILEPRO_OPC_MINB_U, 0x3, 3, TREG_ZERO, 1,
- { { 7, 8, 16 }, { 9, 10, 17 }, { 0, }, { 0, }, { 0, } },
- },
- { "minb_u.sn", TILEPRO_OPC_MINB_U_SN, 0x3, 3, TREG_SN, 1,
- { { 7, 8, 16 }, { 9, 10, 17 }, { 0, }, { 0, }, { 0, } },
- },
- { "minh", TILEPRO_OPC_MINH, 0x3, 3, TREG_ZERO, 1,
- { { 7, 8, 16 }, { 9, 10, 17 }, { 0, }, { 0, }, { 0, } },
- },
- { "minh.sn", TILEPRO_OPC_MINH_SN, 0x3, 3, TREG_SN, 1,
- { { 7, 8, 16 }, { 9, 10, 17 }, { 0, }, { 0, }, { 0, } },
- },
- { "minib_u", TILEPRO_OPC_MINIB_U, 0x3, 3, TREG_ZERO, 1,
- { { 7, 8, 0 }, { 9, 10, 1 }, { 0, }, { 0, }, { 0, } },
- },
- { "minib_u.sn", TILEPRO_OPC_MINIB_U_SN, 0x3, 3, TREG_SN, 1,
- { { 7, 8, 0 }, { 9, 10, 1 }, { 0, }, { 0, }, { 0, } },
- },
- { "minih", TILEPRO_OPC_MINIH, 0x3, 3, TREG_ZERO, 1,
- { { 7, 8, 0 }, { 9, 10, 1 }, { 0, }, { 0, }, { 0, } },
- },
- { "minih.sn", TILEPRO_OPC_MINIH_SN, 0x3, 3, TREG_SN, 1,
- { { 7, 8, 0 }, { 9, 10, 1 }, { 0, }, { 0, }, { 0, } },
- },
- { "mm", TILEPRO_OPC_MM, 0x3, 5, TREG_ZERO, 1,
- { { 7, 8, 16, 26, 27 }, { 9, 10, 17, 28, 29 }, { 0, }, { 0, }, { 0, } },
- },
- { "mnz", TILEPRO_OPC_MNZ, 0xf, 3, TREG_ZERO, 1,
- { { 7, 8, 16 }, { 9, 10, 17 }, { 11, 12, 18 }, { 13, 14, 19 }, { 0, } },
- },
- { "mnz.sn", TILEPRO_OPC_MNZ_SN, 0x3, 3, TREG_SN, 1,
- { { 7, 8, 16 }, { 9, 10, 17 }, { 0, }, { 0, }, { 0, } },
- },
- { "mnzb", TILEPRO_OPC_MNZB, 0x3, 3, TREG_ZERO, 1,
- { { 7, 8, 16 }, { 9, 10, 17 }, { 0, }, { 0, }, { 0, } },
- },
- { "mnzb.sn", TILEPRO_OPC_MNZB_SN, 0x3, 3, TREG_SN, 1,
- { { 7, 8, 16 }, { 9, 10, 17 }, { 0, }, { 0, }, { 0, } },
- },
- { "mnzh", TILEPRO_OPC_MNZH, 0x3, 3, TREG_ZERO, 1,
- { { 7, 8, 16 }, { 9, 10, 17 }, { 0, }, { 0, }, { 0, } },
- },
- { "mnzh.sn", TILEPRO_OPC_MNZH_SN, 0x3, 3, TREG_SN, 1,
- { { 7, 8, 16 }, { 9, 10, 17 }, { 0, }, { 0, }, { 0, } },
- },
- { "mtspr", TILEPRO_OPC_MTSPR, 0x2, 2, TREG_ZERO, 1,
- { { 0, }, { 30, 10 }, { 0, }, { 0, }, { 0, } },
- },
- { "mulhh_ss", TILEPRO_OPC_MULHH_SS, 0x5, 3, TREG_ZERO, 1,
- { { 7, 8, 16 }, { 0, }, { 11, 12, 18 }, { 0, }, { 0, } },
- },
- { "mulhh_ss.sn", TILEPRO_OPC_MULHH_SS_SN, 0x1, 3, TREG_SN, 1,
- { { 7, 8, 16 }, { 0, }, { 0, }, { 0, }, { 0, } },
- },
- { "mulhh_su", TILEPRO_OPC_MULHH_SU, 0x1, 3, TREG_ZERO, 1,
- { { 7, 8, 16 }, { 0, }, { 0, }, { 0, }, { 0, } },
- },
- { "mulhh_su.sn", TILEPRO_OPC_MULHH_SU_SN, 0x1, 3, TREG_SN, 1,
- { { 7, 8, 16 }, { 0, }, { 0, }, { 0, }, { 0, } },
- },
- { "mulhh_uu", TILEPRO_OPC_MULHH_UU, 0x5, 3, TREG_ZERO, 1,
- { { 7, 8, 16 }, { 0, }, { 11, 12, 18 }, { 0, }, { 0, } },
- },
- { "mulhh_uu.sn", TILEPRO_OPC_MULHH_UU_SN, 0x1, 3, TREG_SN, 1,
- { { 7, 8, 16 }, { 0, }, { 0, }, { 0, }, { 0, } },
- },
- { "mulhha_ss", TILEPRO_OPC_MULHHA_SS, 0x5, 3, TREG_ZERO, 1,
- { { 21, 8, 16 }, { 0, }, { 31, 12, 18 }, { 0, }, { 0, } },
- },
- { "mulhha_ss.sn", TILEPRO_OPC_MULHHA_SS_SN, 0x1, 3, TREG_SN, 1,
- { { 21, 8, 16 }, { 0, }, { 0, }, { 0, }, { 0, } },
- },
- { "mulhha_su", TILEPRO_OPC_MULHHA_SU, 0x1, 3, TREG_ZERO, 1,
- { { 21, 8, 16 }, { 0, }, { 0, }, { 0, }, { 0, } },
- },
- { "mulhha_su.sn", TILEPRO_OPC_MULHHA_SU_SN, 0x1, 3, TREG_SN, 1,
- { { 21, 8, 16 }, { 0, }, { 0, }, { 0, }, { 0, } },
- },
- { "mulhha_uu", TILEPRO_OPC_MULHHA_UU, 0x5, 3, TREG_ZERO, 1,
- { { 21, 8, 16 }, { 0, }, { 31, 12, 18 }, { 0, }, { 0, } },
- },
- { "mulhha_uu.sn", TILEPRO_OPC_MULHHA_UU_SN, 0x1, 3, TREG_SN, 1,
- { { 21, 8, 16 }, { 0, }, { 0, }, { 0, }, { 0, } },
- },
- { "mulhhsa_uu", TILEPRO_OPC_MULHHSA_UU, 0x1, 3, TREG_ZERO, 1,
- { { 21, 8, 16 }, { 0, }, { 0, }, { 0, }, { 0, } },
- },
- { "mulhhsa_uu.sn", TILEPRO_OPC_MULHHSA_UU_SN, 0x1, 3, TREG_SN, 1,
- { { 21, 8, 16 }, { 0, }, { 0, }, { 0, }, { 0, } },
- },
- { "mulhl_ss", TILEPRO_OPC_MULHL_SS, 0x1, 3, TREG_ZERO, 1,
- { { 7, 8, 16 }, { 0, }, { 0, }, { 0, }, { 0, } },
- },
- { "mulhl_ss.sn", TILEPRO_OPC_MULHL_SS_SN, 0x1, 3, TREG_SN, 1,
- { { 7, 8, 16 }, { 0, }, { 0, }, { 0, }, { 0, } },
- },
- { "mulhl_su", TILEPRO_OPC_MULHL_SU, 0x1, 3, TREG_ZERO, 1,
- { { 7, 8, 16 }, { 0, }, { 0, }, { 0, }, { 0, } },
- },
- { "mulhl_su.sn", TILEPRO_OPC_MULHL_SU_SN, 0x1, 3, TREG_SN, 1,
- { { 7, 8, 16 }, { 0, }, { 0, }, { 0, }, { 0, } },
- },
- { "mulhl_us", TILEPRO_OPC_MULHL_US, 0x1, 3, TREG_ZERO, 1,
- { { 7, 8, 16 }, { 0, }, { 0, }, { 0, }, { 0, } },
- },
- { "mulhl_us.sn", TILEPRO_OPC_MULHL_US_SN, 0x1, 3, TREG_SN, 1,
- { { 7, 8, 16 }, { 0, }, { 0, }, { 0, }, { 0, } },
- },
- { "mulhl_uu", TILEPRO_OPC_MULHL_UU, 0x1, 3, TREG_ZERO, 1,
- { { 7, 8, 16 }, { 0, }, { 0, }, { 0, }, { 0, } },
- },
- { "mulhl_uu.sn", TILEPRO_OPC_MULHL_UU_SN, 0x1, 3, TREG_SN, 1,
- { { 7, 8, 16 }, { 0, }, { 0, }, { 0, }, { 0, } },
- },
- { "mulhla_ss", TILEPRO_OPC_MULHLA_SS, 0x1, 3, TREG_ZERO, 1,
- { { 21, 8, 16 }, { 0, }, { 0, }, { 0, }, { 0, } },
- },
- { "mulhla_ss.sn", TILEPRO_OPC_MULHLA_SS_SN, 0x1, 3, TREG_SN, 1,
- { { 21, 8, 16 }, { 0, }, { 0, }, { 0, }, { 0, } },
- },
- { "mulhla_su", TILEPRO_OPC_MULHLA_SU, 0x1, 3, TREG_ZERO, 1,
- { { 21, 8, 16 }, { 0, }, { 0, }, { 0, }, { 0, } },
- },
- { "mulhla_su.sn", TILEPRO_OPC_MULHLA_SU_SN, 0x1, 3, TREG_SN, 1,
- { { 21, 8, 16 }, { 0, }, { 0, }, { 0, }, { 0, } },
- },
- { "mulhla_us", TILEPRO_OPC_MULHLA_US, 0x1, 3, TREG_ZERO, 1,
- { { 21, 8, 16 }, { 0, }, { 0, }, { 0, }, { 0, } },
- },
- { "mulhla_us.sn", TILEPRO_OPC_MULHLA_US_SN, 0x1, 3, TREG_SN, 1,
- { { 21, 8, 16 }, { 0, }, { 0, }, { 0, }, { 0, } },
- },
- { "mulhla_uu", TILEPRO_OPC_MULHLA_UU, 0x1, 3, TREG_ZERO, 1,
- { { 21, 8, 16 }, { 0, }, { 0, }, { 0, }, { 0, } },
- },
- { "mulhla_uu.sn", TILEPRO_OPC_MULHLA_UU_SN, 0x1, 3, TREG_SN, 1,
- { { 21, 8, 16 }, { 0, }, { 0, }, { 0, }, { 0, } },
- },
- { "mulhlsa_uu", TILEPRO_OPC_MULHLSA_UU, 0x5, 3, TREG_ZERO, 1,
- { { 21, 8, 16 }, { 0, }, { 31, 12, 18 }, { 0, }, { 0, } },
- },
- { "mulhlsa_uu.sn", TILEPRO_OPC_MULHLSA_UU_SN, 0x1, 3, TREG_SN, 1,
- { { 21, 8, 16 }, { 0, }, { 0, }, { 0, }, { 0, } },
- },
- { "mulll_ss", TILEPRO_OPC_MULLL_SS, 0x5, 3, TREG_ZERO, 1,
- { { 7, 8, 16 }, { 0, }, { 11, 12, 18 }, { 0, }, { 0, } },
- },
- { "mulll_ss.sn", TILEPRO_OPC_MULLL_SS_SN, 0x1, 3, TREG_SN, 1,
- { { 7, 8, 16 }, { 0, }, { 0, }, { 0, }, { 0, } },
- },
- { "mulll_su", TILEPRO_OPC_MULLL_SU, 0x1, 3, TREG_ZERO, 1,
- { { 7, 8, 16 }, { 0, }, { 0, }, { 0, }, { 0, } },
- },
- { "mulll_su.sn", TILEPRO_OPC_MULLL_SU_SN, 0x1, 3, TREG_SN, 1,
- { { 7, 8, 16 }, { 0, }, { 0, }, { 0, }, { 0, } },
- },
- { "mulll_uu", TILEPRO_OPC_MULLL_UU, 0x5, 3, TREG_ZERO, 1,
- { { 7, 8, 16 }, { 0, }, { 11, 12, 18 }, { 0, }, { 0, } },
- },
- { "mulll_uu.sn", TILEPRO_OPC_MULLL_UU_SN, 0x1, 3, TREG_SN, 1,
- { { 7, 8, 16 }, { 0, }, { 0, }, { 0, }, { 0, } },
- },
- { "mullla_ss", TILEPRO_OPC_MULLLA_SS, 0x5, 3, TREG_ZERO, 1,
- { { 21, 8, 16 }, { 0, }, { 31, 12, 18 }, { 0, }, { 0, } },
- },
- { "mullla_ss.sn", TILEPRO_OPC_MULLLA_SS_SN, 0x1, 3, TREG_SN, 1,
- { { 21, 8, 16 }, { 0, }, { 0, }, { 0, }, { 0, } },
- },
- { "mullla_su", TILEPRO_OPC_MULLLA_SU, 0x1, 3, TREG_ZERO, 1,
- { { 21, 8, 16 }, { 0, }, { 0, }, { 0, }, { 0, } },
- },
- { "mullla_su.sn", TILEPRO_OPC_MULLLA_SU_SN, 0x1, 3, TREG_SN, 1,
- { { 21, 8, 16 }, { 0, }, { 0, }, { 0, }, { 0, } },
- },
- { "mullla_uu", TILEPRO_OPC_MULLLA_UU, 0x5, 3, TREG_ZERO, 1,
- { { 21, 8, 16 }, { 0, }, { 31, 12, 18 }, { 0, }, { 0, } },
- },
- { "mullla_uu.sn", TILEPRO_OPC_MULLLA_UU_SN, 0x1, 3, TREG_SN, 1,
- { { 21, 8, 16 }, { 0, }, { 0, }, { 0, }, { 0, } },
- },
- { "mulllsa_uu", TILEPRO_OPC_MULLLSA_UU, 0x1, 3, TREG_ZERO, 1,
- { { 21, 8, 16 }, { 0, }, { 0, }, { 0, }, { 0, } },
- },
- { "mulllsa_uu.sn", TILEPRO_OPC_MULLLSA_UU_SN, 0x1, 3, TREG_SN, 1,
- { { 21, 8, 16 }, { 0, }, { 0, }, { 0, }, { 0, } },
- },
- { "mvnz", TILEPRO_OPC_MVNZ, 0x5, 3, TREG_ZERO, 1,
- { { 21, 8, 16 }, { 0, }, { 31, 12, 18 }, { 0, }, { 0, } },
- },
- { "mvnz.sn", TILEPRO_OPC_MVNZ_SN, 0x1, 3, TREG_SN, 1,
- { { 21, 8, 16 }, { 0, }, { 0, }, { 0, }, { 0, } },
- },
- { "mvz", TILEPRO_OPC_MVZ, 0x5, 3, TREG_ZERO, 1,
- { { 21, 8, 16 }, { 0, }, { 31, 12, 18 }, { 0, }, { 0, } },
- },
- { "mvz.sn", TILEPRO_OPC_MVZ_SN, 0x1, 3, TREG_SN, 1,
- { { 21, 8, 16 }, { 0, }, { 0, }, { 0, }, { 0, } },
- },
- { "mz", TILEPRO_OPC_MZ, 0xf, 3, TREG_ZERO, 1,
- { { 7, 8, 16 }, { 9, 10, 17 }, { 11, 12, 18 }, { 13, 14, 19 }, { 0, } },
- },
- { "mz.sn", TILEPRO_OPC_MZ_SN, 0x3, 3, TREG_SN, 1,
- { { 7, 8, 16 }, { 9, 10, 17 }, { 0, }, { 0, }, { 0, } },
- },
- { "mzb", TILEPRO_OPC_MZB, 0x3, 3, TREG_ZERO, 1,
- { { 7, 8, 16 }, { 9, 10, 17 }, { 0, }, { 0, }, { 0, } },
- },
- { "mzb.sn", TILEPRO_OPC_MZB_SN, 0x3, 3, TREG_SN, 1,
- { { 7, 8, 16 }, { 9, 10, 17 }, { 0, }, { 0, }, { 0, } },
- },
- { "mzh", TILEPRO_OPC_MZH, 0x3, 3, TREG_ZERO, 1,
- { { 7, 8, 16 }, { 9, 10, 17 }, { 0, }, { 0, }, { 0, } },
- },
- { "mzh.sn", TILEPRO_OPC_MZH_SN, 0x3, 3, TREG_SN, 1,
- { { 7, 8, 16 }, { 9, 10, 17 }, { 0, }, { 0, }, { 0, } },
- },
- { "nap", TILEPRO_OPC_NAP, 0x2, 0, TREG_ZERO, 0,
- { { 0, }, { }, { 0, }, { 0, }, { 0, } },
- },
- { "nop", TILEPRO_OPC_NOP, 0xf, 0, TREG_ZERO, 1,
- { { }, { }, { }, { }, { 0, } },
- },
- { "nor", TILEPRO_OPC_NOR, 0xf, 3, TREG_ZERO, 1,
- { { 7, 8, 16 }, { 9, 10, 17 }, { 11, 12, 18 }, { 13, 14, 19 }, { 0, } },
- },
- { "nor.sn", TILEPRO_OPC_NOR_SN, 0x3, 3, TREG_SN, 1,
- { { 7, 8, 16 }, { 9, 10, 17 }, { 0, }, { 0, }, { 0, } },
- },
- { "or", TILEPRO_OPC_OR, 0xf, 3, TREG_ZERO, 1,
- { { 7, 8, 16 }, { 9, 10, 17 }, { 11, 12, 18 }, { 13, 14, 19 }, { 0, } },
- },
- { "or.sn", TILEPRO_OPC_OR_SN, 0x3, 3, TREG_SN, 1,
- { { 7, 8, 16 }, { 9, 10, 17 }, { 0, }, { 0, }, { 0, } },
- },
- { "ori", TILEPRO_OPC_ORI, 0xf, 3, TREG_ZERO, 1,
- { { 7, 8, 0 }, { 9, 10, 1 }, { 11, 12, 2 }, { 13, 14, 3 }, { 0, } },
- },
- { "ori.sn", TILEPRO_OPC_ORI_SN, 0x3, 3, TREG_SN, 1,
- { { 7, 8, 0 }, { 9, 10, 1 }, { 0, }, { 0, }, { 0, } },
- },
- { "packbs_u", TILEPRO_OPC_PACKBS_U, 0x3, 3, TREG_ZERO, 1,
- { { 7, 8, 16 }, { 9, 10, 17 }, { 0, }, { 0, }, { 0, } },
- },
- { "packbs_u.sn", TILEPRO_OPC_PACKBS_U_SN, 0x3, 3, TREG_SN, 1,
- { { 7, 8, 16 }, { 9, 10, 17 }, { 0, }, { 0, }, { 0, } },
- },
- { "packhb", TILEPRO_OPC_PACKHB, 0x3, 3, TREG_ZERO, 1,
- { { 7, 8, 16 }, { 9, 10, 17 }, { 0, }, { 0, }, { 0, } },
- },
- { "packhb.sn", TILEPRO_OPC_PACKHB_SN, 0x3, 3, TREG_SN, 1,
- { { 7, 8, 16 }, { 9, 10, 17 }, { 0, }, { 0, }, { 0, } },
- },
- { "packhs", TILEPRO_OPC_PACKHS, 0x3, 3, TREG_ZERO, 1,
- { { 7, 8, 16 }, { 9, 10, 17 }, { 0, }, { 0, }, { 0, } },
- },
- { "packhs.sn", TILEPRO_OPC_PACKHS_SN, 0x3, 3, TREG_SN, 1,
- { { 7, 8, 16 }, { 9, 10, 17 }, { 0, }, { 0, }, { 0, } },
- },
- { "packlb", TILEPRO_OPC_PACKLB, 0x3, 3, TREG_ZERO, 1,
- { { 7, 8, 16 }, { 9, 10, 17 }, { 0, }, { 0, }, { 0, } },
- },
- { "packlb.sn", TILEPRO_OPC_PACKLB_SN, 0x3, 3, TREG_SN, 1,
- { { 7, 8, 16 }, { 9, 10, 17 }, { 0, }, { 0, }, { 0, } },
- },
- { "pcnt", TILEPRO_OPC_PCNT, 0x5, 2, TREG_ZERO, 1,
- { { 7, 8 }, { 0, }, { 11, 12 }, { 0, }, { 0, } },
- },
- { "pcnt.sn", TILEPRO_OPC_PCNT_SN, 0x1, 2, TREG_SN, 1,
- { { 7, 8 }, { 0, }, { 0, }, { 0, }, { 0, } },
- },
- { "rl", TILEPRO_OPC_RL, 0xf, 3, TREG_ZERO, 1,
- { { 7, 8, 16 }, { 9, 10, 17 }, { 11, 12, 18 }, { 13, 14, 19 }, { 0, } },
- },
- { "rl.sn", TILEPRO_OPC_RL_SN, 0x3, 3, TREG_SN, 1,
- { { 7, 8, 16 }, { 9, 10, 17 }, { 0, }, { 0, }, { 0, } },
- },
- { "rli", TILEPRO_OPC_RLI, 0xf, 3, TREG_ZERO, 1,
- { { 7, 8, 32 }, { 9, 10, 33 }, { 11, 12, 34 }, { 13, 14, 35 }, { 0, } },
- },
- { "rli.sn", TILEPRO_OPC_RLI_SN, 0x3, 3, TREG_SN, 1,
- { { 7, 8, 32 }, { 9, 10, 33 }, { 0, }, { 0, }, { 0, } },
- },
- { "s1a", TILEPRO_OPC_S1A, 0xf, 3, TREG_ZERO, 1,
- { { 7, 8, 16 }, { 9, 10, 17 }, { 11, 12, 18 }, { 13, 14, 19 }, { 0, } },
- },
- { "s1a.sn", TILEPRO_OPC_S1A_SN, 0x3, 3, TREG_SN, 1,
- { { 7, 8, 16 }, { 9, 10, 17 }, { 0, }, { 0, }, { 0, } },
- },
- { "s2a", TILEPRO_OPC_S2A, 0xf, 3, TREG_ZERO, 1,
- { { 7, 8, 16 }, { 9, 10, 17 }, { 11, 12, 18 }, { 13, 14, 19 }, { 0, } },
- },
- { "s2a.sn", TILEPRO_OPC_S2A_SN, 0x3, 3, TREG_SN, 1,
- { { 7, 8, 16 }, { 9, 10, 17 }, { 0, }, { 0, }, { 0, } },
- },
- { "s3a", TILEPRO_OPC_S3A, 0xf, 3, TREG_ZERO, 1,
- { { 7, 8, 16 }, { 9, 10, 17 }, { 11, 12, 18 }, { 13, 14, 19 }, { 0, } },
- },
- { "s3a.sn", TILEPRO_OPC_S3A_SN, 0x3, 3, TREG_SN, 1,
- { { 7, 8, 16 }, { 9, 10, 17 }, { 0, }, { 0, }, { 0, } },
- },
- { "sadab_u", TILEPRO_OPC_SADAB_U, 0x1, 3, TREG_ZERO, 1,
- { { 21, 8, 16 }, { 0, }, { 0, }, { 0, }, { 0, } },
- },
- { "sadab_u.sn", TILEPRO_OPC_SADAB_U_SN, 0x1, 3, TREG_SN, 1,
- { { 21, 8, 16 }, { 0, }, { 0, }, { 0, }, { 0, } },
- },
- { "sadah", TILEPRO_OPC_SADAH, 0x1, 3, TREG_ZERO, 1,
- { { 21, 8, 16 }, { 0, }, { 0, }, { 0, }, { 0, } },
- },
- { "sadah.sn", TILEPRO_OPC_SADAH_SN, 0x1, 3, TREG_SN, 1,
- { { 21, 8, 16 }, { 0, }, { 0, }, { 0, }, { 0, } },
- },
- { "sadah_u", TILEPRO_OPC_SADAH_U, 0x1, 3, TREG_ZERO, 1,
- { { 21, 8, 16 }, { 0, }, { 0, }, { 0, }, { 0, } },
- },
- { "sadah_u.sn", TILEPRO_OPC_SADAH_U_SN, 0x1, 3, TREG_SN, 1,
- { { 21, 8, 16 }, { 0, }, { 0, }, { 0, }, { 0, } },
- },
- { "sadb_u", TILEPRO_OPC_SADB_U, 0x1, 3, TREG_ZERO, 1,
- { { 7, 8, 16 }, { 0, }, { 0, }, { 0, }, { 0, } },
- },
- { "sadb_u.sn", TILEPRO_OPC_SADB_U_SN, 0x1, 3, TREG_SN, 1,
- { { 7, 8, 16 }, { 0, }, { 0, }, { 0, }, { 0, } },
- },
- { "sadh", TILEPRO_OPC_SADH, 0x1, 3, TREG_ZERO, 1,
- { { 7, 8, 16 }, { 0, }, { 0, }, { 0, }, { 0, } },
- },
- { "sadh.sn", TILEPRO_OPC_SADH_SN, 0x1, 3, TREG_SN, 1,
- { { 7, 8, 16 }, { 0, }, { 0, }, { 0, }, { 0, } },
- },
- { "sadh_u", TILEPRO_OPC_SADH_U, 0x1, 3, TREG_ZERO, 1,
- { { 7, 8, 16 }, { 0, }, { 0, }, { 0, }, { 0, } },
- },
- { "sadh_u.sn", TILEPRO_OPC_SADH_U_SN, 0x1, 3, TREG_SN, 1,
- { { 7, 8, 16 }, { 0, }, { 0, }, { 0, }, { 0, } },
- },
- { "sb", TILEPRO_OPC_SB, 0x12, 2, TREG_ZERO, 1,
- { { 0, }, { 10, 17 }, { 0, }, { 0, }, { 15, 36 } },
- },
- { "sbadd", TILEPRO_OPC_SBADD, 0x2, 3, TREG_ZERO, 1,
- { { 0, }, { 24, 17, 37 }, { 0, }, { 0, }, { 0, } },
- },
- { "seq", TILEPRO_OPC_SEQ, 0xf, 3, TREG_ZERO, 1,
- { { 7, 8, 16 }, { 9, 10, 17 }, { 11, 12, 18 }, { 13, 14, 19 }, { 0, } },
- },
- { "seq.sn", TILEPRO_OPC_SEQ_SN, 0x3, 3, TREG_SN, 1,
- { { 7, 8, 16 }, { 9, 10, 17 }, { 0, }, { 0, }, { 0, } },
- },
- { "seqb", TILEPRO_OPC_SEQB, 0x3, 3, TREG_ZERO, 1,
- { { 7, 8, 16 }, { 9, 10, 17 }, { 0, }, { 0, }, { 0, } },
- },
- { "seqb.sn", TILEPRO_OPC_SEQB_SN, 0x3, 3, TREG_SN, 1,
- { { 7, 8, 16 }, { 9, 10, 17 }, { 0, }, { 0, }, { 0, } },
- },
- { "seqh", TILEPRO_OPC_SEQH, 0x3, 3, TREG_ZERO, 1,
- { { 7, 8, 16 }, { 9, 10, 17 }, { 0, }, { 0, }, { 0, } },
- },
- { "seqh.sn", TILEPRO_OPC_SEQH_SN, 0x3, 3, TREG_SN, 1,
- { { 7, 8, 16 }, { 9, 10, 17 }, { 0, }, { 0, }, { 0, } },
- },
- { "seqi", TILEPRO_OPC_SEQI, 0xf, 3, TREG_ZERO, 1,
- { { 7, 8, 0 }, { 9, 10, 1 }, { 11, 12, 2 }, { 13, 14, 3 }, { 0, } },
- },
- { "seqi.sn", TILEPRO_OPC_SEQI_SN, 0x3, 3, TREG_SN, 1,
- { { 7, 8, 0 }, { 9, 10, 1 }, { 0, }, { 0, }, { 0, } },
- },
- { "seqib", TILEPRO_OPC_SEQIB, 0x3, 3, TREG_ZERO, 1,
- { { 7, 8, 0 }, { 9, 10, 1 }, { 0, }, { 0, }, { 0, } },
- },
- { "seqib.sn", TILEPRO_OPC_SEQIB_SN, 0x3, 3, TREG_SN, 1,
- { { 7, 8, 0 }, { 9, 10, 1 }, { 0, }, { 0, }, { 0, } },
- },
- { "seqih", TILEPRO_OPC_SEQIH, 0x3, 3, TREG_ZERO, 1,
- { { 7, 8, 0 }, { 9, 10, 1 }, { 0, }, { 0, }, { 0, } },
- },
- { "seqih.sn", TILEPRO_OPC_SEQIH_SN, 0x3, 3, TREG_SN, 1,
- { { 7, 8, 0 }, { 9, 10, 1 }, { 0, }, { 0, }, { 0, } },
- },
- { "sh", TILEPRO_OPC_SH, 0x12, 2, TREG_ZERO, 1,
- { { 0, }, { 10, 17 }, { 0, }, { 0, }, { 15, 36 } },
- },
- { "shadd", TILEPRO_OPC_SHADD, 0x2, 3, TREG_ZERO, 1,
- { { 0, }, { 24, 17, 37 }, { 0, }, { 0, }, { 0, } },
- },
- { "shl", TILEPRO_OPC_SHL, 0xf, 3, TREG_ZERO, 1,
- { { 7, 8, 16 }, { 9, 10, 17 }, { 11, 12, 18 }, { 13, 14, 19 }, { 0, } },
- },
- { "shl.sn", TILEPRO_OPC_SHL_SN, 0x3, 3, TREG_SN, 1,
- { { 7, 8, 16 }, { 9, 10, 17 }, { 0, }, { 0, }, { 0, } },
- },
- { "shlb", TILEPRO_OPC_SHLB, 0x3, 3, TREG_ZERO, 1,
- { { 7, 8, 16 }, { 9, 10, 17 }, { 0, }, { 0, }, { 0, } },
- },
- { "shlb.sn", TILEPRO_OPC_SHLB_SN, 0x3, 3, TREG_SN, 1,
- { { 7, 8, 16 }, { 9, 10, 17 }, { 0, }, { 0, }, { 0, } },
- },
- { "shlh", TILEPRO_OPC_SHLH, 0x3, 3, TREG_ZERO, 1,
- { { 7, 8, 16 }, { 9, 10, 17 }, { 0, }, { 0, }, { 0, } },
- },
- { "shlh.sn", TILEPRO_OPC_SHLH_SN, 0x3, 3, TREG_SN, 1,
- { { 7, 8, 16 }, { 9, 10, 17 }, { 0, }, { 0, }, { 0, } },
- },
- { "shli", TILEPRO_OPC_SHLI, 0xf, 3, TREG_ZERO, 1,
- { { 7, 8, 32 }, { 9, 10, 33 }, { 11, 12, 34 }, { 13, 14, 35 }, { 0, } },
- },
- { "shli.sn", TILEPRO_OPC_SHLI_SN, 0x3, 3, TREG_SN, 1,
- { { 7, 8, 32 }, { 9, 10, 33 }, { 0, }, { 0, }, { 0, } },
- },
- { "shlib", TILEPRO_OPC_SHLIB, 0x3, 3, TREG_ZERO, 1,
- { { 7, 8, 32 }, { 9, 10, 33 }, { 0, }, { 0, }, { 0, } },
- },
- { "shlib.sn", TILEPRO_OPC_SHLIB_SN, 0x3, 3, TREG_SN, 1,
- { { 7, 8, 32 }, { 9, 10, 33 }, { 0, }, { 0, }, { 0, } },
- },
- { "shlih", TILEPRO_OPC_SHLIH, 0x3, 3, TREG_ZERO, 1,
- { { 7, 8, 32 }, { 9, 10, 33 }, { 0, }, { 0, }, { 0, } },
- },
- { "shlih.sn", TILEPRO_OPC_SHLIH_SN, 0x3, 3, TREG_SN, 1,
- { { 7, 8, 32 }, { 9, 10, 33 }, { 0, }, { 0, }, { 0, } },
- },
- { "shr", TILEPRO_OPC_SHR, 0xf, 3, TREG_ZERO, 1,
- { { 7, 8, 16 }, { 9, 10, 17 }, { 11, 12, 18 }, { 13, 14, 19 }, { 0, } },
- },
- { "shr.sn", TILEPRO_OPC_SHR_SN, 0x3, 3, TREG_SN, 1,
- { { 7, 8, 16 }, { 9, 10, 17 }, { 0, }, { 0, }, { 0, } },
- },
- { "shrb", TILEPRO_OPC_SHRB, 0x3, 3, TREG_ZERO, 1,
- { { 7, 8, 16 }, { 9, 10, 17 }, { 0, }, { 0, }, { 0, } },
- },
- { "shrb.sn", TILEPRO_OPC_SHRB_SN, 0x3, 3, TREG_SN, 1,
- { { 7, 8, 16 }, { 9, 10, 17 }, { 0, }, { 0, }, { 0, } },
- },
- { "shrh", TILEPRO_OPC_SHRH, 0x3, 3, TREG_ZERO, 1,
- { { 7, 8, 16 }, { 9, 10, 17 }, { 0, }, { 0, }, { 0, } },
- },
- { "shrh.sn", TILEPRO_OPC_SHRH_SN, 0x3, 3, TREG_SN, 1,
- { { 7, 8, 16 }, { 9, 10, 17 }, { 0, }, { 0, }, { 0, } },
- },
- { "shri", TILEPRO_OPC_SHRI, 0xf, 3, TREG_ZERO, 1,
- { { 7, 8, 32 }, { 9, 10, 33 }, { 11, 12, 34 }, { 13, 14, 35 }, { 0, } },
- },
- { "shri.sn", TILEPRO_OPC_SHRI_SN, 0x3, 3, TREG_SN, 1,
- { { 7, 8, 32 }, { 9, 10, 33 }, { 0, }, { 0, }, { 0, } },
- },
- { "shrib", TILEPRO_OPC_SHRIB, 0x3, 3, TREG_ZERO, 1,
- { { 7, 8, 32 }, { 9, 10, 33 }, { 0, }, { 0, }, { 0, } },
- },
- { "shrib.sn", TILEPRO_OPC_SHRIB_SN, 0x3, 3, TREG_SN, 1,
- { { 7, 8, 32 }, { 9, 10, 33 }, { 0, }, { 0, }, { 0, } },
- },
- { "shrih", TILEPRO_OPC_SHRIH, 0x3, 3, TREG_ZERO, 1,
- { { 7, 8, 32 }, { 9, 10, 33 }, { 0, }, { 0, }, { 0, } },
- },
- { "shrih.sn", TILEPRO_OPC_SHRIH_SN, 0x3, 3, TREG_SN, 1,
- { { 7, 8, 32 }, { 9, 10, 33 }, { 0, }, { 0, }, { 0, } },
- },
- { "slt", TILEPRO_OPC_SLT, 0xf, 3, TREG_ZERO, 1,
- { { 7, 8, 16 }, { 9, 10, 17 }, { 11, 12, 18 }, { 13, 14, 19 }, { 0, } },
- },
- { "slt.sn", TILEPRO_OPC_SLT_SN, 0x3, 3, TREG_SN, 1,
- { { 7, 8, 16 }, { 9, 10, 17 }, { 0, }, { 0, }, { 0, } },
- },
- { "slt_u", TILEPRO_OPC_SLT_U, 0xf, 3, TREG_ZERO, 1,
- { { 7, 8, 16 }, { 9, 10, 17 }, { 11, 12, 18 }, { 13, 14, 19 }, { 0, } },
- },
- { "slt_u.sn", TILEPRO_OPC_SLT_U_SN, 0x3, 3, TREG_SN, 1,
- { { 7, 8, 16 }, { 9, 10, 17 }, { 0, }, { 0, }, { 0, } },
- },
- { "sltb", TILEPRO_OPC_SLTB, 0x3, 3, TREG_ZERO, 1,
- { { 7, 8, 16 }, { 9, 10, 17 }, { 0, }, { 0, }, { 0, } },
- },
- { "sltb.sn", TILEPRO_OPC_SLTB_SN, 0x3, 3, TREG_SN, 1,
- { { 7, 8, 16 }, { 9, 10, 17 }, { 0, }, { 0, }, { 0, } },
- },
- { "sltb_u", TILEPRO_OPC_SLTB_U, 0x3, 3, TREG_ZERO, 1,
- { { 7, 8, 16 }, { 9, 10, 17 }, { 0, }, { 0, }, { 0, } },
- },
- { "sltb_u.sn", TILEPRO_OPC_SLTB_U_SN, 0x3, 3, TREG_SN, 1,
- { { 7, 8, 16 }, { 9, 10, 17 }, { 0, }, { 0, }, { 0, } },
- },
- { "slte", TILEPRO_OPC_SLTE, 0xf, 3, TREG_ZERO, 1,
- { { 7, 8, 16 }, { 9, 10, 17 }, { 11, 12, 18 }, { 13, 14, 19 }, { 0, } },
- },
- { "slte.sn", TILEPRO_OPC_SLTE_SN, 0x3, 3, TREG_SN, 1,
- { { 7, 8, 16 }, { 9, 10, 17 }, { 0, }, { 0, }, { 0, } },
- },
- { "slte_u", TILEPRO_OPC_SLTE_U, 0xf, 3, TREG_ZERO, 1,
- { { 7, 8, 16 }, { 9, 10, 17 }, { 11, 12, 18 }, { 13, 14, 19 }, { 0, } },
- },
- { "slte_u.sn", TILEPRO_OPC_SLTE_U_SN, 0x3, 3, TREG_SN, 1,
- { { 7, 8, 16 }, { 9, 10, 17 }, { 0, }, { 0, }, { 0, } },
- },
- { "slteb", TILEPRO_OPC_SLTEB, 0x3, 3, TREG_ZERO, 1,
- { { 7, 8, 16 }, { 9, 10, 17 }, { 0, }, { 0, }, { 0, } },
- },
- { "slteb.sn", TILEPRO_OPC_SLTEB_SN, 0x3, 3, TREG_SN, 1,
- { { 7, 8, 16 }, { 9, 10, 17 }, { 0, }, { 0, }, { 0, } },
- },
- { "slteb_u", TILEPRO_OPC_SLTEB_U, 0x3, 3, TREG_ZERO, 1,
- { { 7, 8, 16 }, { 9, 10, 17 }, { 0, }, { 0, }, { 0, } },
- },
- { "slteb_u.sn", TILEPRO_OPC_SLTEB_U_SN, 0x3, 3, TREG_SN, 1,
- { { 7, 8, 16 }, { 9, 10, 17 }, { 0, }, { 0, }, { 0, } },
- },
- { "slteh", TILEPRO_OPC_SLTEH, 0x3, 3, TREG_ZERO, 1,
- { { 7, 8, 16 }, { 9, 10, 17 }, { 0, }, { 0, }, { 0, } },
- },
- { "slteh.sn", TILEPRO_OPC_SLTEH_SN, 0x3, 3, TREG_SN, 1,
- { { 7, 8, 16 }, { 9, 10, 17 }, { 0, }, { 0, }, { 0, } },
- },
- { "slteh_u", TILEPRO_OPC_SLTEH_U, 0x3, 3, TREG_ZERO, 1,
- { { 7, 8, 16 }, { 9, 10, 17 }, { 0, }, { 0, }, { 0, } },
- },
- { "slteh_u.sn", TILEPRO_OPC_SLTEH_U_SN, 0x3, 3, TREG_SN, 1,
- { { 7, 8, 16 }, { 9, 10, 17 }, { 0, }, { 0, }, { 0, } },
- },
- { "slth", TILEPRO_OPC_SLTH, 0x3, 3, TREG_ZERO, 1,
- { { 7, 8, 16 }, { 9, 10, 17 }, { 0, }, { 0, }, { 0, } },
- },
- { "slth.sn", TILEPRO_OPC_SLTH_SN, 0x3, 3, TREG_SN, 1,
- { { 7, 8, 16 }, { 9, 10, 17 }, { 0, }, { 0, }, { 0, } },
- },
- { "slth_u", TILEPRO_OPC_SLTH_U, 0x3, 3, TREG_ZERO, 1,
- { { 7, 8, 16 }, { 9, 10, 17 }, { 0, }, { 0, }, { 0, } },
- },
- { "slth_u.sn", TILEPRO_OPC_SLTH_U_SN, 0x3, 3, TREG_SN, 1,
- { { 7, 8, 16 }, { 9, 10, 17 }, { 0, }, { 0, }, { 0, } },
- },
- { "slti", TILEPRO_OPC_SLTI, 0xf, 3, TREG_ZERO, 1,
- { { 7, 8, 0 }, { 9, 10, 1 }, { 11, 12, 2 }, { 13, 14, 3 }, { 0, } },
- },
- { "slti.sn", TILEPRO_OPC_SLTI_SN, 0x3, 3, TREG_SN, 1,
- { { 7, 8, 0 }, { 9, 10, 1 }, { 0, }, { 0, }, { 0, } },
- },
- { "slti_u", TILEPRO_OPC_SLTI_U, 0xf, 3, TREG_ZERO, 1,
- { { 7, 8, 0 }, { 9, 10, 1 }, { 11, 12, 2 }, { 13, 14, 3 }, { 0, } },
- },
- { "slti_u.sn", TILEPRO_OPC_SLTI_U_SN, 0x3, 3, TREG_SN, 1,
- { { 7, 8, 0 }, { 9, 10, 1 }, { 0, }, { 0, }, { 0, } },
- },
- { "sltib", TILEPRO_OPC_SLTIB, 0x3, 3, TREG_ZERO, 1,
- { { 7, 8, 0 }, { 9, 10, 1 }, { 0, }, { 0, }, { 0, } },
- },
- { "sltib.sn", TILEPRO_OPC_SLTIB_SN, 0x3, 3, TREG_SN, 1,
- { { 7, 8, 0 }, { 9, 10, 1 }, { 0, }, { 0, }, { 0, } },
- },
- { "sltib_u", TILEPRO_OPC_SLTIB_U, 0x3, 3, TREG_ZERO, 1,
- { { 7, 8, 0 }, { 9, 10, 1 }, { 0, }, { 0, }, { 0, } },
- },
- { "sltib_u.sn", TILEPRO_OPC_SLTIB_U_SN, 0x3, 3, TREG_SN, 1,
- { { 7, 8, 0 }, { 9, 10, 1 }, { 0, }, { 0, }, { 0, } },
- },
- { "sltih", TILEPRO_OPC_SLTIH, 0x3, 3, TREG_ZERO, 1,
- { { 7, 8, 0 }, { 9, 10, 1 }, { 0, }, { 0, }, { 0, } },
- },
- { "sltih.sn", TILEPRO_OPC_SLTIH_SN, 0x3, 3, TREG_SN, 1,
- { { 7, 8, 0 }, { 9, 10, 1 }, { 0, }, { 0, }, { 0, } },
- },
- { "sltih_u", TILEPRO_OPC_SLTIH_U, 0x3, 3, TREG_ZERO, 1,
- { { 7, 8, 0 }, { 9, 10, 1 }, { 0, }, { 0, }, { 0, } },
- },
- { "sltih_u.sn", TILEPRO_OPC_SLTIH_U_SN, 0x3, 3, TREG_SN, 1,
- { { 7, 8, 0 }, { 9, 10, 1 }, { 0, }, { 0, }, { 0, } },
- },
- { "sne", TILEPRO_OPC_SNE, 0xf, 3, TREG_ZERO, 1,
- { { 7, 8, 16 }, { 9, 10, 17 }, { 11, 12, 18 }, { 13, 14, 19 }, { 0, } },
- },
- { "sne.sn", TILEPRO_OPC_SNE_SN, 0x3, 3, TREG_SN, 1,
- { { 7, 8, 16 }, { 9, 10, 17 }, { 0, }, { 0, }, { 0, } },
- },
- { "sneb", TILEPRO_OPC_SNEB, 0x3, 3, TREG_ZERO, 1,
- { { 7, 8, 16 }, { 9, 10, 17 }, { 0, }, { 0, }, { 0, } },
- },
- { "sneb.sn", TILEPRO_OPC_SNEB_SN, 0x3, 3, TREG_SN, 1,
- { { 7, 8, 16 }, { 9, 10, 17 }, { 0, }, { 0, }, { 0, } },
- },
- { "sneh", TILEPRO_OPC_SNEH, 0x3, 3, TREG_ZERO, 1,
- { { 7, 8, 16 }, { 9, 10, 17 }, { 0, }, { 0, }, { 0, } },
- },
- { "sneh.sn", TILEPRO_OPC_SNEH_SN, 0x3, 3, TREG_SN, 1,
- { { 7, 8, 16 }, { 9, 10, 17 }, { 0, }, { 0, }, { 0, } },
- },
- { "sra", TILEPRO_OPC_SRA, 0xf, 3, TREG_ZERO, 1,
- { { 7, 8, 16 }, { 9, 10, 17 }, { 11, 12, 18 }, { 13, 14, 19 }, { 0, } },
- },
- { "sra.sn", TILEPRO_OPC_SRA_SN, 0x3, 3, TREG_SN, 1,
- { { 7, 8, 16 }, { 9, 10, 17 }, { 0, }, { 0, }, { 0, } },
- },
- { "srab", TILEPRO_OPC_SRAB, 0x3, 3, TREG_ZERO, 1,
- { { 7, 8, 16 }, { 9, 10, 17 }, { 0, }, { 0, }, { 0, } },
- },
- { "srab.sn", TILEPRO_OPC_SRAB_SN, 0x3, 3, TREG_SN, 1,
- { { 7, 8, 16 }, { 9, 10, 17 }, { 0, }, { 0, }, { 0, } },
- },
- { "srah", TILEPRO_OPC_SRAH, 0x3, 3, TREG_ZERO, 1,
- { { 7, 8, 16 }, { 9, 10, 17 }, { 0, }, { 0, }, { 0, } },
- },
- { "srah.sn", TILEPRO_OPC_SRAH_SN, 0x3, 3, TREG_SN, 1,
- { { 7, 8, 16 }, { 9, 10, 17 }, { 0, }, { 0, }, { 0, } },
- },
- { "srai", TILEPRO_OPC_SRAI, 0xf, 3, TREG_ZERO, 1,
- { { 7, 8, 32 }, { 9, 10, 33 }, { 11, 12, 34 }, { 13, 14, 35 }, { 0, } },
- },
- { "srai.sn", TILEPRO_OPC_SRAI_SN, 0x3, 3, TREG_SN, 1,
- { { 7, 8, 32 }, { 9, 10, 33 }, { 0, }, { 0, }, { 0, } },
- },
- { "sraib", TILEPRO_OPC_SRAIB, 0x3, 3, TREG_ZERO, 1,
- { { 7, 8, 32 }, { 9, 10, 33 }, { 0, }, { 0, }, { 0, } },
- },
- { "sraib.sn", TILEPRO_OPC_SRAIB_SN, 0x3, 3, TREG_SN, 1,
- { { 7, 8, 32 }, { 9, 10, 33 }, { 0, }, { 0, }, { 0, } },
- },
- { "sraih", TILEPRO_OPC_SRAIH, 0x3, 3, TREG_ZERO, 1,
- { { 7, 8, 32 }, { 9, 10, 33 }, { 0, }, { 0, }, { 0, } },
- },
- { "sraih.sn", TILEPRO_OPC_SRAIH_SN, 0x3, 3, TREG_SN, 1,
- { { 7, 8, 32 }, { 9, 10, 33 }, { 0, }, { 0, }, { 0, } },
- },
- { "sub", TILEPRO_OPC_SUB, 0xf, 3, TREG_ZERO, 1,
- { { 7, 8, 16 }, { 9, 10, 17 }, { 11, 12, 18 }, { 13, 14, 19 }, { 0, } },
- },
- { "sub.sn", TILEPRO_OPC_SUB_SN, 0x3, 3, TREG_SN, 1,
- { { 7, 8, 16 }, { 9, 10, 17 }, { 0, }, { 0, }, { 0, } },
- },
- { "subb", TILEPRO_OPC_SUBB, 0x3, 3, TREG_ZERO, 1,
- { { 7, 8, 16 }, { 9, 10, 17 }, { 0, }, { 0, }, { 0, } },
- },
- { "subb.sn", TILEPRO_OPC_SUBB_SN, 0x3, 3, TREG_SN, 1,
- { { 7, 8, 16 }, { 9, 10, 17 }, { 0, }, { 0, }, { 0, } },
- },
- { "subbs_u", TILEPRO_OPC_SUBBS_U, 0x3, 3, TREG_ZERO, 1,
- { { 7, 8, 16 }, { 9, 10, 17 }, { 0, }, { 0, }, { 0, } },
- },
- { "subbs_u.sn", TILEPRO_OPC_SUBBS_U_SN, 0x3, 3, TREG_SN, 1,
- { { 7, 8, 16 }, { 9, 10, 17 }, { 0, }, { 0, }, { 0, } },
- },
- { "subh", TILEPRO_OPC_SUBH, 0x3, 3, TREG_ZERO, 1,
- { { 7, 8, 16 }, { 9, 10, 17 }, { 0, }, { 0, }, { 0, } },
- },
- { "subh.sn", TILEPRO_OPC_SUBH_SN, 0x3, 3, TREG_SN, 1,
- { { 7, 8, 16 }, { 9, 10, 17 }, { 0, }, { 0, }, { 0, } },
- },
- { "subhs", TILEPRO_OPC_SUBHS, 0x3, 3, TREG_ZERO, 1,
- { { 7, 8, 16 }, { 9, 10, 17 }, { 0, }, { 0, }, { 0, } },
- },
- { "subhs.sn", TILEPRO_OPC_SUBHS_SN, 0x3, 3, TREG_SN, 1,
- { { 7, 8, 16 }, { 9, 10, 17 }, { 0, }, { 0, }, { 0, } },
- },
- { "subs", TILEPRO_OPC_SUBS, 0x3, 3, TREG_ZERO, 1,
- { { 7, 8, 16 }, { 9, 10, 17 }, { 0, }, { 0, }, { 0, } },
- },
- { "subs.sn", TILEPRO_OPC_SUBS_SN, 0x3, 3, TREG_SN, 1,
- { { 7, 8, 16 }, { 9, 10, 17 }, { 0, }, { 0, }, { 0, } },
- },
- { "sw", TILEPRO_OPC_SW, 0x12, 2, TREG_ZERO, 1,
- { { 0, }, { 10, 17 }, { 0, }, { 0, }, { 15, 36 } },
- },
- { "swadd", TILEPRO_OPC_SWADD, 0x2, 3, TREG_ZERO, 1,
- { { 0, }, { 24, 17, 37 }, { 0, }, { 0, }, { 0, } },
- },
- { "swint0", TILEPRO_OPC_SWINT0, 0x2, 0, TREG_ZERO, 0,
- { { 0, }, { }, { 0, }, { 0, }, { 0, } },
- },
- { "swint1", TILEPRO_OPC_SWINT1, 0x2, 0, TREG_ZERO, 0,
- { { 0, }, { }, { 0, }, { 0, }, { 0, } },
- },
- { "swint2", TILEPRO_OPC_SWINT2, 0x2, 0, TREG_ZERO, 0,
- { { 0, }, { }, { 0, }, { 0, }, { 0, } },
- },
- { "swint3", TILEPRO_OPC_SWINT3, 0x2, 0, TREG_ZERO, 0,
- { { 0, }, { }, { 0, }, { 0, }, { 0, } },
- },
- { "tblidxb0", TILEPRO_OPC_TBLIDXB0, 0x5, 2, TREG_ZERO, 1,
- { { 21, 8 }, { 0, }, { 31, 12 }, { 0, }, { 0, } },
- },
- { "tblidxb0.sn", TILEPRO_OPC_TBLIDXB0_SN, 0x1, 2, TREG_SN, 1,
- { { 21, 8 }, { 0, }, { 0, }, { 0, }, { 0, } },
- },
- { "tblidxb1", TILEPRO_OPC_TBLIDXB1, 0x5, 2, TREG_ZERO, 1,
- { { 21, 8 }, { 0, }, { 31, 12 }, { 0, }, { 0, } },
- },
- { "tblidxb1.sn", TILEPRO_OPC_TBLIDXB1_SN, 0x1, 2, TREG_SN, 1,
- { { 21, 8 }, { 0, }, { 0, }, { 0, }, { 0, } },
- },
- { "tblidxb2", TILEPRO_OPC_TBLIDXB2, 0x5, 2, TREG_ZERO, 1,
- { { 21, 8 }, { 0, }, { 31, 12 }, { 0, }, { 0, } },
- },
- { "tblidxb2.sn", TILEPRO_OPC_TBLIDXB2_SN, 0x1, 2, TREG_SN, 1,
- { { 21, 8 }, { 0, }, { 0, }, { 0, }, { 0, } },
- },
- { "tblidxb3", TILEPRO_OPC_TBLIDXB3, 0x5, 2, TREG_ZERO, 1,
- { { 21, 8 }, { 0, }, { 31, 12 }, { 0, }, { 0, } },
- },
- { "tblidxb3.sn", TILEPRO_OPC_TBLIDXB3_SN, 0x1, 2, TREG_SN, 1,
- { { 21, 8 }, { 0, }, { 0, }, { 0, }, { 0, } },
- },
- { "tns", TILEPRO_OPC_TNS, 0x2, 2, TREG_ZERO, 1,
- { { 0, }, { 9, 10 }, { 0, }, { 0, }, { 0, } },
- },
- { "tns.sn", TILEPRO_OPC_TNS_SN, 0x2, 2, TREG_SN, 1,
- { { 0, }, { 9, 10 }, { 0, }, { 0, }, { 0, } },
- },
- { "wh64", TILEPRO_OPC_WH64, 0x2, 1, TREG_ZERO, 1,
- { { 0, }, { 10 }, { 0, }, { 0, }, { 0, } },
- },
- { "xor", TILEPRO_OPC_XOR, 0xf, 3, TREG_ZERO, 1,
- { { 7, 8, 16 }, { 9, 10, 17 }, { 11, 12, 18 }, { 13, 14, 19 }, { 0, } },
- },
- { "xor.sn", TILEPRO_OPC_XOR_SN, 0x3, 3, TREG_SN, 1,
- { { 7, 8, 16 }, { 9, 10, 17 }, { 0, }, { 0, }, { 0, } },
- },
- { "xori", TILEPRO_OPC_XORI, 0x3, 3, TREG_ZERO, 1,
- { { 7, 8, 0 }, { 9, 10, 1 }, { 0, }, { 0, }, { 0, } },
- },
- { "xori.sn", TILEPRO_OPC_XORI_SN, 0x3, 3, TREG_SN, 1,
- { { 7, 8, 0 }, { 9, 10, 1 }, { 0, }, { 0, }, { 0, } },
- },
- { NULL, TILEPRO_OPC_NONE, 0, 0, TREG_ZERO, 0, { { 0, } },
- }
-};
-#define BITFIELD(start, size) ((start) | (((1 << (size)) - 1) << 6))
-#define CHILD(array_index) (TILEPRO_OPC_NONE + (array_index))
-
-static const unsigned short decode_X0_fsm[1153] =
-{
- BITFIELD(22, 9) /* index 0 */,
- CHILD(513), CHILD(530), CHILD(547), CHILD(564), CHILD(596), CHILD(613),
- CHILD(630), TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE,
- TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE,
- TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE,
- TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE,
- TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE,
- TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE,
- TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, CHILD(663), CHILD(680), CHILD(697),
- CHILD(714), CHILD(746), CHILD(763), CHILD(780), TILEPRO_OPC_NONE,
- TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE,
- TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE,
- TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE,
- TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE,
- TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE,
- TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE,
- CHILD(813), CHILD(813), CHILD(813), CHILD(813), CHILD(813), CHILD(813),
- CHILD(813), CHILD(813), CHILD(813), CHILD(813), CHILD(813), CHILD(813),
- CHILD(813), CHILD(813), CHILD(813), CHILD(813), CHILD(813), CHILD(813),
- CHILD(813), CHILD(813), CHILD(813), CHILD(813), CHILD(813), CHILD(813),
- CHILD(813), CHILD(813), CHILD(813), CHILD(813), CHILD(813), CHILD(813),
- CHILD(813), CHILD(813), CHILD(813), CHILD(813), CHILD(813), CHILD(813),
- CHILD(813), CHILD(813), CHILD(813), CHILD(813), CHILD(813), CHILD(813),
- CHILD(813), CHILD(813), CHILD(813), CHILD(813), CHILD(813), CHILD(813),
- CHILD(813), CHILD(813), CHILD(813), CHILD(813), CHILD(813), CHILD(813),
- CHILD(813), CHILD(813), CHILD(813), CHILD(813), CHILD(813), CHILD(813),
- CHILD(813), CHILD(813), CHILD(813), CHILD(813), CHILD(828), CHILD(828),
- CHILD(828), CHILD(828), CHILD(828), CHILD(828), CHILD(828), CHILD(828),
- CHILD(828), CHILD(828), CHILD(828), CHILD(828), CHILD(828), CHILD(828),
- CHILD(828), CHILD(828), CHILD(828), CHILD(828), CHILD(828), CHILD(828),
- CHILD(828), CHILD(828), CHILD(828), CHILD(828), CHILD(828), CHILD(828),
- CHILD(828), CHILD(828), CHILD(828), CHILD(828), CHILD(828), CHILD(828),
- CHILD(828), CHILD(828), CHILD(828), CHILD(828), CHILD(828), CHILD(828),
- CHILD(828), CHILD(828), CHILD(828), CHILD(828), CHILD(828), CHILD(828),
- CHILD(828), CHILD(828), CHILD(828), CHILD(828), CHILD(828), CHILD(828),
- CHILD(828), CHILD(828), CHILD(828), CHILD(828), CHILD(828), CHILD(828),
- CHILD(828), CHILD(828), CHILD(828), CHILD(828), CHILD(828), CHILD(828),
- CHILD(828), CHILD(828), CHILD(843), CHILD(843), CHILD(843), CHILD(843),
- CHILD(843), CHILD(843), CHILD(843), CHILD(843), CHILD(843), CHILD(843),
- CHILD(843), CHILD(843), CHILD(843), CHILD(843), CHILD(843), CHILD(843),
- CHILD(843), CHILD(843), CHILD(843), CHILD(843), CHILD(843), CHILD(843),
- CHILD(843), CHILD(843), CHILD(843), CHILD(843), CHILD(843), CHILD(843),
- CHILD(843), CHILD(843), CHILD(843), CHILD(843), CHILD(843), CHILD(843),
- CHILD(843), CHILD(843), CHILD(843), CHILD(843), CHILD(843), CHILD(843),
- CHILD(843), CHILD(843), CHILD(843), CHILD(843), CHILD(843), CHILD(843),
- CHILD(843), CHILD(843), CHILD(843), CHILD(843), CHILD(843), CHILD(843),
- CHILD(843), CHILD(843), CHILD(843), CHILD(843), CHILD(843), CHILD(843),
- CHILD(843), CHILD(843), CHILD(843), CHILD(843), CHILD(843), CHILD(843),
- CHILD(873), CHILD(878), CHILD(883), CHILD(903), CHILD(908),
- TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE,
- TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE,
- TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE,
- TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE,
- TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE,
- TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE,
- TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, CHILD(913),
- CHILD(918), CHILD(923), CHILD(943), CHILD(948), TILEPRO_OPC_NONE,
- TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE,
- TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE,
- TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE,
- TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE,
- TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE,
- TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE,
- TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, CHILD(953), TILEPRO_OPC_NONE,
- TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE,
- TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE,
- TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE,
- TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE,
- TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE,
- TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE,
- TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE,
- TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, CHILD(988), TILEPRO_OPC_NONE,
- TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE,
- TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE,
- TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE,
- TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE,
- TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE,
- TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE,
- TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE,
- TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_MM, TILEPRO_OPC_MM,
- TILEPRO_OPC_MM, TILEPRO_OPC_MM, TILEPRO_OPC_MM, TILEPRO_OPC_MM,
- TILEPRO_OPC_MM, TILEPRO_OPC_MM, TILEPRO_OPC_MM, TILEPRO_OPC_MM,
- TILEPRO_OPC_MM, TILEPRO_OPC_MM, TILEPRO_OPC_MM, TILEPRO_OPC_MM,
- TILEPRO_OPC_MM, TILEPRO_OPC_MM, TILEPRO_OPC_MM, TILEPRO_OPC_MM,
- TILEPRO_OPC_MM, TILEPRO_OPC_MM, TILEPRO_OPC_MM, TILEPRO_OPC_MM,
- TILEPRO_OPC_MM, TILEPRO_OPC_MM, TILEPRO_OPC_MM, TILEPRO_OPC_MM,
- TILEPRO_OPC_MM, TILEPRO_OPC_MM, TILEPRO_OPC_MM, TILEPRO_OPC_MM,
- TILEPRO_OPC_MM, TILEPRO_OPC_MM, TILEPRO_OPC_MM, TILEPRO_OPC_MM,
- TILEPRO_OPC_MM, TILEPRO_OPC_MM, TILEPRO_OPC_MM, TILEPRO_OPC_MM,
- TILEPRO_OPC_MM, TILEPRO_OPC_MM, TILEPRO_OPC_MM, TILEPRO_OPC_MM,
- TILEPRO_OPC_MM, TILEPRO_OPC_MM, TILEPRO_OPC_MM, TILEPRO_OPC_MM,
- TILEPRO_OPC_MM, TILEPRO_OPC_MM, TILEPRO_OPC_MM, TILEPRO_OPC_MM,
- TILEPRO_OPC_MM, TILEPRO_OPC_MM, TILEPRO_OPC_MM, TILEPRO_OPC_MM,
- TILEPRO_OPC_MM, TILEPRO_OPC_MM, TILEPRO_OPC_MM, TILEPRO_OPC_MM,
- TILEPRO_OPC_MM, TILEPRO_OPC_MM, TILEPRO_OPC_MM, TILEPRO_OPC_MM,
- TILEPRO_OPC_MM, TILEPRO_OPC_MM, CHILD(993), TILEPRO_OPC_NONE,
- TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE,
- TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE,
- TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE,
- TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE,
- TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE,
- TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE,
- TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE,
- TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, CHILD(1076), TILEPRO_OPC_NONE,
- TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE,
- TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE,
- TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE,
- TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE,
- TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE,
- TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE,
- TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE,
- TILEPRO_OPC_NONE, TILEPRO_OPC_NONE,
- BITFIELD(18, 4) /* index 513 */,
- TILEPRO_OPC_NONE, TILEPRO_OPC_ADDB, TILEPRO_OPC_ADDH, TILEPRO_OPC_ADD,
- TILEPRO_OPC_ADIFFB_U, TILEPRO_OPC_ADIFFH, TILEPRO_OPC_AND,
- TILEPRO_OPC_AVGB_U, TILEPRO_OPC_AVGH, TILEPRO_OPC_CRC32_32,
- TILEPRO_OPC_CRC32_8, TILEPRO_OPC_INTHB, TILEPRO_OPC_INTHH,
- TILEPRO_OPC_INTLB, TILEPRO_OPC_INTLH, TILEPRO_OPC_MAXB_U,
- BITFIELD(18, 4) /* index 530 */,
- TILEPRO_OPC_MAXH, TILEPRO_OPC_MINB_U, TILEPRO_OPC_MINH, TILEPRO_OPC_MNZB,
- TILEPRO_OPC_MNZH, TILEPRO_OPC_MNZ, TILEPRO_OPC_MULHHA_SS,
- TILEPRO_OPC_MULHHA_SU, TILEPRO_OPC_MULHHA_UU, TILEPRO_OPC_MULHHSA_UU,
- TILEPRO_OPC_MULHH_SS, TILEPRO_OPC_MULHH_SU, TILEPRO_OPC_MULHH_UU,
- TILEPRO_OPC_MULHLA_SS, TILEPRO_OPC_MULHLA_SU, TILEPRO_OPC_MULHLA_US,
- BITFIELD(18, 4) /* index 547 */,
- TILEPRO_OPC_MULHLA_UU, TILEPRO_OPC_MULHLSA_UU, TILEPRO_OPC_MULHL_SS,
- TILEPRO_OPC_MULHL_SU, TILEPRO_OPC_MULHL_US, TILEPRO_OPC_MULHL_UU,
- TILEPRO_OPC_MULLLA_SS, TILEPRO_OPC_MULLLA_SU, TILEPRO_OPC_MULLLA_UU,
- TILEPRO_OPC_MULLLSA_UU, TILEPRO_OPC_MULLL_SS, TILEPRO_OPC_MULLL_SU,
- TILEPRO_OPC_MULLL_UU, TILEPRO_OPC_MVNZ, TILEPRO_OPC_MVZ, TILEPRO_OPC_MZB,
- BITFIELD(18, 4) /* index 564 */,
- TILEPRO_OPC_MZH, TILEPRO_OPC_MZ, TILEPRO_OPC_NOR, CHILD(581),
- TILEPRO_OPC_PACKHB, TILEPRO_OPC_PACKLB, TILEPRO_OPC_RL, TILEPRO_OPC_S1A,
- TILEPRO_OPC_S2A, TILEPRO_OPC_S3A, TILEPRO_OPC_SADAB_U, TILEPRO_OPC_SADAH,
- TILEPRO_OPC_SADAH_U, TILEPRO_OPC_SADB_U, TILEPRO_OPC_SADH,
- TILEPRO_OPC_SADH_U,
- BITFIELD(12, 2) /* index 581 */,
- TILEPRO_OPC_OR, TILEPRO_OPC_OR, TILEPRO_OPC_OR, CHILD(586),
- BITFIELD(14, 2) /* index 586 */,
- TILEPRO_OPC_OR, TILEPRO_OPC_OR, TILEPRO_OPC_OR, CHILD(591),
- BITFIELD(16, 2) /* index 591 */,
- TILEPRO_OPC_OR, TILEPRO_OPC_OR, TILEPRO_OPC_OR, TILEPRO_OPC_MOVE,
- BITFIELD(18, 4) /* index 596 */,
- TILEPRO_OPC_SEQB, TILEPRO_OPC_SEQH, TILEPRO_OPC_SEQ, TILEPRO_OPC_SHLB,
- TILEPRO_OPC_SHLH, TILEPRO_OPC_SHL, TILEPRO_OPC_SHRB, TILEPRO_OPC_SHRH,
- TILEPRO_OPC_SHR, TILEPRO_OPC_SLTB, TILEPRO_OPC_SLTB_U, TILEPRO_OPC_SLTEB,
- TILEPRO_OPC_SLTEB_U, TILEPRO_OPC_SLTEH, TILEPRO_OPC_SLTEH_U,
- TILEPRO_OPC_SLTE,
- BITFIELD(18, 4) /* index 613 */,
- TILEPRO_OPC_SLTE_U, TILEPRO_OPC_SLTH, TILEPRO_OPC_SLTH_U, TILEPRO_OPC_SLT,
- TILEPRO_OPC_SLT_U, TILEPRO_OPC_SNEB, TILEPRO_OPC_SNEH, TILEPRO_OPC_SNE,
- TILEPRO_OPC_SRAB, TILEPRO_OPC_SRAH, TILEPRO_OPC_SRA, TILEPRO_OPC_SUBB,
- TILEPRO_OPC_SUBH, TILEPRO_OPC_SUB, TILEPRO_OPC_XOR, TILEPRO_OPC_DWORD_ALIGN,
- BITFIELD(18, 3) /* index 630 */,
- CHILD(639), CHILD(642), CHILD(645), CHILD(648), CHILD(651), CHILD(654),
- CHILD(657), CHILD(660),
- BITFIELD(21, 1) /* index 639 */,
- TILEPRO_OPC_ADDS, TILEPRO_OPC_NONE,
- BITFIELD(21, 1) /* index 642 */,
- TILEPRO_OPC_SUBS, TILEPRO_OPC_NONE,
- BITFIELD(21, 1) /* index 645 */,
- TILEPRO_OPC_ADDBS_U, TILEPRO_OPC_NONE,
- BITFIELD(21, 1) /* index 648 */,
- TILEPRO_OPC_ADDHS, TILEPRO_OPC_NONE,
- BITFIELD(21, 1) /* index 651 */,
- TILEPRO_OPC_SUBBS_U, TILEPRO_OPC_NONE,
- BITFIELD(21, 1) /* index 654 */,
- TILEPRO_OPC_SUBHS, TILEPRO_OPC_NONE,
- BITFIELD(21, 1) /* index 657 */,
- TILEPRO_OPC_PACKHS, TILEPRO_OPC_NONE,
- BITFIELD(21, 1) /* index 660 */,
- TILEPRO_OPC_PACKBS_U, TILEPRO_OPC_NONE,
- BITFIELD(18, 4) /* index 663 */,
- TILEPRO_OPC_NONE, TILEPRO_OPC_ADDB_SN, TILEPRO_OPC_ADDH_SN,
- TILEPRO_OPC_ADD_SN, TILEPRO_OPC_ADIFFB_U_SN, TILEPRO_OPC_ADIFFH_SN,
- TILEPRO_OPC_AND_SN, TILEPRO_OPC_AVGB_U_SN, TILEPRO_OPC_AVGH_SN,
- TILEPRO_OPC_CRC32_32_SN, TILEPRO_OPC_CRC32_8_SN, TILEPRO_OPC_INTHB_SN,
- TILEPRO_OPC_INTHH_SN, TILEPRO_OPC_INTLB_SN, TILEPRO_OPC_INTLH_SN,
- TILEPRO_OPC_MAXB_U_SN,
- BITFIELD(18, 4) /* index 680 */,
- TILEPRO_OPC_MAXH_SN, TILEPRO_OPC_MINB_U_SN, TILEPRO_OPC_MINH_SN,
- TILEPRO_OPC_MNZB_SN, TILEPRO_OPC_MNZH_SN, TILEPRO_OPC_MNZ_SN,
- TILEPRO_OPC_MULHHA_SS_SN, TILEPRO_OPC_MULHHA_SU_SN,
- TILEPRO_OPC_MULHHA_UU_SN, TILEPRO_OPC_MULHHSA_UU_SN,
- TILEPRO_OPC_MULHH_SS_SN, TILEPRO_OPC_MULHH_SU_SN, TILEPRO_OPC_MULHH_UU_SN,
- TILEPRO_OPC_MULHLA_SS_SN, TILEPRO_OPC_MULHLA_SU_SN,
- TILEPRO_OPC_MULHLA_US_SN,
- BITFIELD(18, 4) /* index 697 */,
- TILEPRO_OPC_MULHLA_UU_SN, TILEPRO_OPC_MULHLSA_UU_SN,
- TILEPRO_OPC_MULHL_SS_SN, TILEPRO_OPC_MULHL_SU_SN, TILEPRO_OPC_MULHL_US_SN,
- TILEPRO_OPC_MULHL_UU_SN, TILEPRO_OPC_MULLLA_SS_SN, TILEPRO_OPC_MULLLA_SU_SN,
- TILEPRO_OPC_MULLLA_UU_SN, TILEPRO_OPC_MULLLSA_UU_SN,
- TILEPRO_OPC_MULLL_SS_SN, TILEPRO_OPC_MULLL_SU_SN, TILEPRO_OPC_MULLL_UU_SN,
- TILEPRO_OPC_MVNZ_SN, TILEPRO_OPC_MVZ_SN, TILEPRO_OPC_MZB_SN,
- BITFIELD(18, 4) /* index 714 */,
- TILEPRO_OPC_MZH_SN, TILEPRO_OPC_MZ_SN, TILEPRO_OPC_NOR_SN, CHILD(731),
- TILEPRO_OPC_PACKHB_SN, TILEPRO_OPC_PACKLB_SN, TILEPRO_OPC_RL_SN,
- TILEPRO_OPC_S1A_SN, TILEPRO_OPC_S2A_SN, TILEPRO_OPC_S3A_SN,
- TILEPRO_OPC_SADAB_U_SN, TILEPRO_OPC_SADAH_SN, TILEPRO_OPC_SADAH_U_SN,
- TILEPRO_OPC_SADB_U_SN, TILEPRO_OPC_SADH_SN, TILEPRO_OPC_SADH_U_SN,
- BITFIELD(12, 2) /* index 731 */,
- TILEPRO_OPC_OR_SN, TILEPRO_OPC_OR_SN, TILEPRO_OPC_OR_SN, CHILD(736),
- BITFIELD(14, 2) /* index 736 */,
- TILEPRO_OPC_OR_SN, TILEPRO_OPC_OR_SN, TILEPRO_OPC_OR_SN, CHILD(741),
- BITFIELD(16, 2) /* index 741 */,
- TILEPRO_OPC_OR_SN, TILEPRO_OPC_OR_SN, TILEPRO_OPC_OR_SN,
- TILEPRO_OPC_MOVE_SN,
- BITFIELD(18, 4) /* index 746 */,
- TILEPRO_OPC_SEQB_SN, TILEPRO_OPC_SEQH_SN, TILEPRO_OPC_SEQ_SN,
- TILEPRO_OPC_SHLB_SN, TILEPRO_OPC_SHLH_SN, TILEPRO_OPC_SHL_SN,
- TILEPRO_OPC_SHRB_SN, TILEPRO_OPC_SHRH_SN, TILEPRO_OPC_SHR_SN,
- TILEPRO_OPC_SLTB_SN, TILEPRO_OPC_SLTB_U_SN, TILEPRO_OPC_SLTEB_SN,
- TILEPRO_OPC_SLTEB_U_SN, TILEPRO_OPC_SLTEH_SN, TILEPRO_OPC_SLTEH_U_SN,
- TILEPRO_OPC_SLTE_SN,
- BITFIELD(18, 4) /* index 763 */,
- TILEPRO_OPC_SLTE_U_SN, TILEPRO_OPC_SLTH_SN, TILEPRO_OPC_SLTH_U_SN,
- TILEPRO_OPC_SLT_SN, TILEPRO_OPC_SLT_U_SN, TILEPRO_OPC_SNEB_SN,
- TILEPRO_OPC_SNEH_SN, TILEPRO_OPC_SNE_SN, TILEPRO_OPC_SRAB_SN,
- TILEPRO_OPC_SRAH_SN, TILEPRO_OPC_SRA_SN, TILEPRO_OPC_SUBB_SN,
- TILEPRO_OPC_SUBH_SN, TILEPRO_OPC_SUB_SN, TILEPRO_OPC_XOR_SN,
- TILEPRO_OPC_DWORD_ALIGN_SN,
- BITFIELD(18, 3) /* index 780 */,
- CHILD(789), CHILD(792), CHILD(795), CHILD(798), CHILD(801), CHILD(804),
- CHILD(807), CHILD(810),
- BITFIELD(21, 1) /* index 789 */,
- TILEPRO_OPC_ADDS_SN, TILEPRO_OPC_NONE,
- BITFIELD(21, 1) /* index 792 */,
- TILEPRO_OPC_SUBS_SN, TILEPRO_OPC_NONE,
- BITFIELD(21, 1) /* index 795 */,
- TILEPRO_OPC_ADDBS_U_SN, TILEPRO_OPC_NONE,
- BITFIELD(21, 1) /* index 798 */,
- TILEPRO_OPC_ADDHS_SN, TILEPRO_OPC_NONE,
- BITFIELD(21, 1) /* index 801 */,
- TILEPRO_OPC_SUBBS_U_SN, TILEPRO_OPC_NONE,
- BITFIELD(21, 1) /* index 804 */,
- TILEPRO_OPC_SUBHS_SN, TILEPRO_OPC_NONE,
- BITFIELD(21, 1) /* index 807 */,
- TILEPRO_OPC_PACKHS_SN, TILEPRO_OPC_NONE,
- BITFIELD(21, 1) /* index 810 */,
- TILEPRO_OPC_PACKBS_U_SN, TILEPRO_OPC_NONE,
- BITFIELD(6, 2) /* index 813 */,
- TILEPRO_OPC_ADDLI_SN, TILEPRO_OPC_ADDLI_SN, TILEPRO_OPC_ADDLI_SN,
- CHILD(818),
- BITFIELD(8, 2) /* index 818 */,
- TILEPRO_OPC_ADDLI_SN, TILEPRO_OPC_ADDLI_SN, TILEPRO_OPC_ADDLI_SN,
- CHILD(823),
- BITFIELD(10, 2) /* index 823 */,
- TILEPRO_OPC_ADDLI_SN, TILEPRO_OPC_ADDLI_SN, TILEPRO_OPC_ADDLI_SN,
- TILEPRO_OPC_MOVELI_SN,
- BITFIELD(6, 2) /* index 828 */,
- TILEPRO_OPC_ADDLI, TILEPRO_OPC_ADDLI, TILEPRO_OPC_ADDLI, CHILD(833),
- BITFIELD(8, 2) /* index 833 */,
- TILEPRO_OPC_ADDLI, TILEPRO_OPC_ADDLI, TILEPRO_OPC_ADDLI, CHILD(838),
- BITFIELD(10, 2) /* index 838 */,
- TILEPRO_OPC_ADDLI, TILEPRO_OPC_ADDLI, TILEPRO_OPC_ADDLI, TILEPRO_OPC_MOVELI,
- BITFIELD(0, 2) /* index 843 */,
- TILEPRO_OPC_AULI, TILEPRO_OPC_AULI, TILEPRO_OPC_AULI, CHILD(848),
- BITFIELD(2, 2) /* index 848 */,
- TILEPRO_OPC_AULI, TILEPRO_OPC_AULI, TILEPRO_OPC_AULI, CHILD(853),
- BITFIELD(4, 2) /* index 853 */,
- TILEPRO_OPC_AULI, TILEPRO_OPC_AULI, TILEPRO_OPC_AULI, CHILD(858),
- BITFIELD(6, 2) /* index 858 */,
- TILEPRO_OPC_AULI, TILEPRO_OPC_AULI, TILEPRO_OPC_AULI, CHILD(863),
- BITFIELD(8, 2) /* index 863 */,
- TILEPRO_OPC_AULI, TILEPRO_OPC_AULI, TILEPRO_OPC_AULI, CHILD(868),
- BITFIELD(10, 2) /* index 868 */,
- TILEPRO_OPC_AULI, TILEPRO_OPC_AULI, TILEPRO_OPC_AULI, TILEPRO_OPC_INFOL,
- BITFIELD(20, 2) /* index 873 */,
- TILEPRO_OPC_NONE, TILEPRO_OPC_ADDIB, TILEPRO_OPC_ADDIH, TILEPRO_OPC_ADDI,
- BITFIELD(20, 2) /* index 878 */,
- TILEPRO_OPC_MAXIB_U, TILEPRO_OPC_MAXIH, TILEPRO_OPC_MINIB_U,
- TILEPRO_OPC_MINIH,
- BITFIELD(20, 2) /* index 883 */,
- CHILD(888), TILEPRO_OPC_SEQIB, TILEPRO_OPC_SEQIH, TILEPRO_OPC_SEQI,
- BITFIELD(6, 2) /* index 888 */,
- TILEPRO_OPC_ORI, TILEPRO_OPC_ORI, TILEPRO_OPC_ORI, CHILD(893),
- BITFIELD(8, 2) /* index 893 */,
- TILEPRO_OPC_ORI, TILEPRO_OPC_ORI, TILEPRO_OPC_ORI, CHILD(898),
- BITFIELD(10, 2) /* index 898 */,
- TILEPRO_OPC_ORI, TILEPRO_OPC_ORI, TILEPRO_OPC_ORI, TILEPRO_OPC_MOVEI,
- BITFIELD(20, 2) /* index 903 */,
- TILEPRO_OPC_SLTIB, TILEPRO_OPC_SLTIB_U, TILEPRO_OPC_SLTIH,
- TILEPRO_OPC_SLTIH_U,
- BITFIELD(20, 2) /* index 908 */,
- TILEPRO_OPC_SLTI, TILEPRO_OPC_SLTI_U, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE,
- BITFIELD(20, 2) /* index 913 */,
- TILEPRO_OPC_NONE, TILEPRO_OPC_ADDIB_SN, TILEPRO_OPC_ADDIH_SN,
- TILEPRO_OPC_ADDI_SN,
- BITFIELD(20, 2) /* index 918 */,
- TILEPRO_OPC_MAXIB_U_SN, TILEPRO_OPC_MAXIH_SN, TILEPRO_OPC_MINIB_U_SN,
- TILEPRO_OPC_MINIH_SN,
- BITFIELD(20, 2) /* index 923 */,
- CHILD(928), TILEPRO_OPC_SEQIB_SN, TILEPRO_OPC_SEQIH_SN, TILEPRO_OPC_SEQI_SN,
- BITFIELD(6, 2) /* index 928 */,
- TILEPRO_OPC_ORI_SN, TILEPRO_OPC_ORI_SN, TILEPRO_OPC_ORI_SN, CHILD(933),
- BITFIELD(8, 2) /* index 933 */,
- TILEPRO_OPC_ORI_SN, TILEPRO_OPC_ORI_SN, TILEPRO_OPC_ORI_SN, CHILD(938),
- BITFIELD(10, 2) /* index 938 */,
- TILEPRO_OPC_ORI_SN, TILEPRO_OPC_ORI_SN, TILEPRO_OPC_ORI_SN,
- TILEPRO_OPC_MOVEI_SN,
- BITFIELD(20, 2) /* index 943 */,
- TILEPRO_OPC_SLTIB_SN, TILEPRO_OPC_SLTIB_U_SN, TILEPRO_OPC_SLTIH_SN,
- TILEPRO_OPC_SLTIH_U_SN,
- BITFIELD(20, 2) /* index 948 */,
- TILEPRO_OPC_SLTI_SN, TILEPRO_OPC_SLTI_U_SN, TILEPRO_OPC_NONE,
- TILEPRO_OPC_NONE,
- BITFIELD(20, 2) /* index 953 */,
- TILEPRO_OPC_NONE, CHILD(958), TILEPRO_OPC_XORI, TILEPRO_OPC_NONE,
- BITFIELD(0, 2) /* index 958 */,
- TILEPRO_OPC_ANDI, TILEPRO_OPC_ANDI, TILEPRO_OPC_ANDI, CHILD(963),
- BITFIELD(2, 2) /* index 963 */,
- TILEPRO_OPC_ANDI, TILEPRO_OPC_ANDI, TILEPRO_OPC_ANDI, CHILD(968),
- BITFIELD(4, 2) /* index 968 */,
- TILEPRO_OPC_ANDI, TILEPRO_OPC_ANDI, TILEPRO_OPC_ANDI, CHILD(973),
- BITFIELD(6, 2) /* index 973 */,
- TILEPRO_OPC_ANDI, TILEPRO_OPC_ANDI, TILEPRO_OPC_ANDI, CHILD(978),
- BITFIELD(8, 2) /* index 978 */,
- TILEPRO_OPC_ANDI, TILEPRO_OPC_ANDI, TILEPRO_OPC_ANDI, CHILD(983),
- BITFIELD(10, 2) /* index 983 */,
- TILEPRO_OPC_ANDI, TILEPRO_OPC_ANDI, TILEPRO_OPC_ANDI, TILEPRO_OPC_INFO,
- BITFIELD(20, 2) /* index 988 */,
- TILEPRO_OPC_NONE, TILEPRO_OPC_ANDI_SN, TILEPRO_OPC_XORI_SN,
- TILEPRO_OPC_NONE,
- BITFIELD(17, 5) /* index 993 */,
- TILEPRO_OPC_NONE, TILEPRO_OPC_RLI, TILEPRO_OPC_SHLIB, TILEPRO_OPC_SHLIH,
- TILEPRO_OPC_SHLI, TILEPRO_OPC_SHRIB, TILEPRO_OPC_SHRIH, TILEPRO_OPC_SHRI,
- TILEPRO_OPC_SRAIB, TILEPRO_OPC_SRAIH, TILEPRO_OPC_SRAI, CHILD(1026),
- TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE,
- TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE,
- TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE,
- TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE,
- TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE,
- BITFIELD(12, 4) /* index 1026 */,
- TILEPRO_OPC_NONE, CHILD(1043), CHILD(1046), CHILD(1049), CHILD(1052),
- CHILD(1055), CHILD(1058), CHILD(1061), CHILD(1064), CHILD(1067),
- CHILD(1070), CHILD(1073), TILEPRO_OPC_NONE, TILEPRO_OPC_NONE,
- TILEPRO_OPC_NONE, TILEPRO_OPC_NONE,
- BITFIELD(16, 1) /* index 1043 */,
- TILEPRO_OPC_BITX, TILEPRO_OPC_NONE,
- BITFIELD(16, 1) /* index 1046 */,
- TILEPRO_OPC_BYTEX, TILEPRO_OPC_NONE,
- BITFIELD(16, 1) /* index 1049 */,
- TILEPRO_OPC_CLZ, TILEPRO_OPC_NONE,
- BITFIELD(16, 1) /* index 1052 */,
- TILEPRO_OPC_CTZ, TILEPRO_OPC_NONE,
- BITFIELD(16, 1) /* index 1055 */,
- TILEPRO_OPC_FNOP, TILEPRO_OPC_NONE,
- BITFIELD(16, 1) /* index 1058 */,
- TILEPRO_OPC_NOP, TILEPRO_OPC_NONE,
- BITFIELD(16, 1) /* index 1061 */,
- TILEPRO_OPC_PCNT, TILEPRO_OPC_NONE,
- BITFIELD(16, 1) /* index 1064 */,
- TILEPRO_OPC_TBLIDXB0, TILEPRO_OPC_NONE,
- BITFIELD(16, 1) /* index 1067 */,
- TILEPRO_OPC_TBLIDXB1, TILEPRO_OPC_NONE,
- BITFIELD(16, 1) /* index 1070 */,
- TILEPRO_OPC_TBLIDXB2, TILEPRO_OPC_NONE,
- BITFIELD(16, 1) /* index 1073 */,
- TILEPRO_OPC_TBLIDXB3, TILEPRO_OPC_NONE,
- BITFIELD(17, 5) /* index 1076 */,
- TILEPRO_OPC_NONE, TILEPRO_OPC_RLI_SN, TILEPRO_OPC_SHLIB_SN,
- TILEPRO_OPC_SHLIH_SN, TILEPRO_OPC_SHLI_SN, TILEPRO_OPC_SHRIB_SN,
- TILEPRO_OPC_SHRIH_SN, TILEPRO_OPC_SHRI_SN, TILEPRO_OPC_SRAIB_SN,
- TILEPRO_OPC_SRAIH_SN, TILEPRO_OPC_SRAI_SN, CHILD(1109), TILEPRO_OPC_NONE,
- TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE,
- TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE,
- TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE,
- TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE,
- TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE,
- BITFIELD(12, 4) /* index 1109 */,
- TILEPRO_OPC_NONE, CHILD(1126), CHILD(1129), CHILD(1132), CHILD(1135),
- CHILD(1055), CHILD(1058), CHILD(1138), CHILD(1141), CHILD(1144),
- CHILD(1147), CHILD(1150), TILEPRO_OPC_NONE, TILEPRO_OPC_NONE,
- TILEPRO_OPC_NONE, TILEPRO_OPC_NONE,
- BITFIELD(16, 1) /* index 1126 */,
- TILEPRO_OPC_BITX_SN, TILEPRO_OPC_NONE,
- BITFIELD(16, 1) /* index 1129 */,
- TILEPRO_OPC_BYTEX_SN, TILEPRO_OPC_NONE,
- BITFIELD(16, 1) /* index 1132 */,
- TILEPRO_OPC_CLZ_SN, TILEPRO_OPC_NONE,
- BITFIELD(16, 1) /* index 1135 */,
- TILEPRO_OPC_CTZ_SN, TILEPRO_OPC_NONE,
- BITFIELD(16, 1) /* index 1138 */,
- TILEPRO_OPC_PCNT_SN, TILEPRO_OPC_NONE,
- BITFIELD(16, 1) /* index 1141 */,
- TILEPRO_OPC_TBLIDXB0_SN, TILEPRO_OPC_NONE,
- BITFIELD(16, 1) /* index 1144 */,
- TILEPRO_OPC_TBLIDXB1_SN, TILEPRO_OPC_NONE,
- BITFIELD(16, 1) /* index 1147 */,
- TILEPRO_OPC_TBLIDXB2_SN, TILEPRO_OPC_NONE,
- BITFIELD(16, 1) /* index 1150 */,
- TILEPRO_OPC_TBLIDXB3_SN, TILEPRO_OPC_NONE,
-};
-
-static const unsigned short decode_X1_fsm[1540] =
-{
- BITFIELD(54, 9) /* index 0 */,
- TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE,
- TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE,
- TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE,
- TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE,
- TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE,
- TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE,
- TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE,
- TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE,
- CHILD(513), CHILD(561), CHILD(594), TILEPRO_OPC_NONE, TILEPRO_OPC_NONE,
- TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE,
- TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE,
- TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, CHILD(641),
- CHILD(689), CHILD(722), TILEPRO_OPC_NONE, TILEPRO_OPC_NONE,
- TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE,
- TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE,
- TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, CHILD(766),
- CHILD(766), CHILD(766), CHILD(766), CHILD(766), CHILD(766), CHILD(766),
- CHILD(766), CHILD(766), CHILD(766), CHILD(766), CHILD(766), CHILD(766),
- CHILD(766), CHILD(766), CHILD(766), CHILD(766), CHILD(766), CHILD(766),
- CHILD(766), CHILD(766), CHILD(766), CHILD(766), CHILD(766), CHILD(766),
- CHILD(766), CHILD(766), CHILD(766), CHILD(766), CHILD(766), CHILD(766),
- CHILD(766), CHILD(781), CHILD(781), CHILD(781), CHILD(781), CHILD(781),
- CHILD(781), CHILD(781), CHILD(781), CHILD(781), CHILD(781), CHILD(781),
- CHILD(781), CHILD(781), CHILD(781), CHILD(781), CHILD(781), CHILD(781),
- CHILD(781), CHILD(781), CHILD(781), CHILD(781), CHILD(781), CHILD(781),
- CHILD(781), CHILD(781), CHILD(781), CHILD(781), CHILD(781), CHILD(781),
- CHILD(781), CHILD(781), CHILD(781), CHILD(796), CHILD(796), CHILD(796),
- CHILD(796), CHILD(796), CHILD(796), CHILD(796), CHILD(796), CHILD(796),
- CHILD(796), CHILD(796), CHILD(796), CHILD(796), CHILD(796), CHILD(796),
- CHILD(796), CHILD(796), CHILD(796), CHILD(796), CHILD(796), CHILD(796),
- CHILD(796), CHILD(796), CHILD(796), CHILD(796), CHILD(796), CHILD(796),
- CHILD(796), CHILD(796), CHILD(796), CHILD(796), CHILD(796), CHILD(826),
- CHILD(826), CHILD(826), CHILD(826), CHILD(826), CHILD(826), CHILD(826),
- CHILD(826), CHILD(826), CHILD(826), CHILD(826), CHILD(826), CHILD(826),
- CHILD(826), CHILD(826), CHILD(826), CHILD(843), CHILD(843), CHILD(843),
- CHILD(843), CHILD(843), CHILD(843), CHILD(843), CHILD(843), CHILD(843),
- CHILD(843), CHILD(843), CHILD(843), CHILD(843), CHILD(843), CHILD(843),
- CHILD(843), CHILD(860), CHILD(899), CHILD(923), CHILD(932),
- TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE,
- TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE,
- TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE,
- CHILD(941), CHILD(950), CHILD(974), CHILD(983), TILEPRO_OPC_NONE,
- TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE,
- TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE,
- TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_MM,
- TILEPRO_OPC_MM, TILEPRO_OPC_MM, TILEPRO_OPC_MM, TILEPRO_OPC_MM,
- TILEPRO_OPC_MM, TILEPRO_OPC_MM, TILEPRO_OPC_MM, TILEPRO_OPC_MM,
- TILEPRO_OPC_MM, TILEPRO_OPC_MM, TILEPRO_OPC_MM, TILEPRO_OPC_MM,
- TILEPRO_OPC_MM, TILEPRO_OPC_MM, TILEPRO_OPC_MM, TILEPRO_OPC_MM,
- TILEPRO_OPC_MM, TILEPRO_OPC_MM, TILEPRO_OPC_MM, TILEPRO_OPC_MM,
- TILEPRO_OPC_MM, TILEPRO_OPC_MM, TILEPRO_OPC_MM, TILEPRO_OPC_MM,
- TILEPRO_OPC_MM, TILEPRO_OPC_MM, TILEPRO_OPC_MM, TILEPRO_OPC_MM,
- TILEPRO_OPC_MM, TILEPRO_OPC_MM, TILEPRO_OPC_MM, CHILD(992),
- TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE,
- TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE,
- TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE,
- TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, CHILD(1334),
- TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE,
- TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE,
- TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE,
- TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE,
- TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE,
- TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE,
- TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE,
- TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE,
- TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE,
- TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE,
- TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE,
- TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_J,
- TILEPRO_OPC_J, TILEPRO_OPC_J, TILEPRO_OPC_J, TILEPRO_OPC_J, TILEPRO_OPC_J,
- TILEPRO_OPC_J, TILEPRO_OPC_J, TILEPRO_OPC_J, TILEPRO_OPC_J, TILEPRO_OPC_J,
- TILEPRO_OPC_J, TILEPRO_OPC_J, TILEPRO_OPC_J, TILEPRO_OPC_J, TILEPRO_OPC_J,
- TILEPRO_OPC_J, TILEPRO_OPC_J, TILEPRO_OPC_J, TILEPRO_OPC_J, TILEPRO_OPC_J,
- TILEPRO_OPC_J, TILEPRO_OPC_J, TILEPRO_OPC_J, TILEPRO_OPC_J, TILEPRO_OPC_J,
- TILEPRO_OPC_J, TILEPRO_OPC_J, TILEPRO_OPC_J, TILEPRO_OPC_J, TILEPRO_OPC_J,
- TILEPRO_OPC_J, TILEPRO_OPC_J, TILEPRO_OPC_J, TILEPRO_OPC_J, TILEPRO_OPC_J,
- TILEPRO_OPC_J, TILEPRO_OPC_J, TILEPRO_OPC_J, TILEPRO_OPC_J, TILEPRO_OPC_J,
- TILEPRO_OPC_J, TILEPRO_OPC_J, TILEPRO_OPC_J, TILEPRO_OPC_J, TILEPRO_OPC_J,
- TILEPRO_OPC_J, TILEPRO_OPC_J, TILEPRO_OPC_J, TILEPRO_OPC_J, TILEPRO_OPC_J,
- TILEPRO_OPC_J, TILEPRO_OPC_J, TILEPRO_OPC_J, TILEPRO_OPC_J, TILEPRO_OPC_J,
- TILEPRO_OPC_J, TILEPRO_OPC_J, TILEPRO_OPC_J, TILEPRO_OPC_J, TILEPRO_OPC_J,
- TILEPRO_OPC_J, TILEPRO_OPC_J, TILEPRO_OPC_J, TILEPRO_OPC_JAL,
- TILEPRO_OPC_JAL, TILEPRO_OPC_JAL, TILEPRO_OPC_JAL, TILEPRO_OPC_JAL,
- TILEPRO_OPC_JAL, TILEPRO_OPC_JAL, TILEPRO_OPC_JAL, TILEPRO_OPC_JAL,
- TILEPRO_OPC_JAL, TILEPRO_OPC_JAL, TILEPRO_OPC_JAL, TILEPRO_OPC_JAL,
- TILEPRO_OPC_JAL, TILEPRO_OPC_JAL, TILEPRO_OPC_JAL, TILEPRO_OPC_JAL,
- TILEPRO_OPC_JAL, TILEPRO_OPC_JAL, TILEPRO_OPC_JAL, TILEPRO_OPC_JAL,
- TILEPRO_OPC_JAL, TILEPRO_OPC_JAL, TILEPRO_OPC_JAL, TILEPRO_OPC_JAL,
- TILEPRO_OPC_JAL, TILEPRO_OPC_JAL, TILEPRO_OPC_JAL, TILEPRO_OPC_JAL,
- TILEPRO_OPC_JAL, TILEPRO_OPC_JAL, TILEPRO_OPC_JAL, TILEPRO_OPC_JAL,
- TILEPRO_OPC_JAL, TILEPRO_OPC_JAL, TILEPRO_OPC_JAL, TILEPRO_OPC_JAL,
- TILEPRO_OPC_JAL, TILEPRO_OPC_JAL, TILEPRO_OPC_JAL, TILEPRO_OPC_JAL,
- TILEPRO_OPC_JAL, TILEPRO_OPC_JAL, TILEPRO_OPC_JAL, TILEPRO_OPC_JAL,
- TILEPRO_OPC_JAL, TILEPRO_OPC_JAL, TILEPRO_OPC_JAL, TILEPRO_OPC_JAL,
- TILEPRO_OPC_JAL, TILEPRO_OPC_JAL, TILEPRO_OPC_JAL, TILEPRO_OPC_JAL,
- TILEPRO_OPC_JAL, TILEPRO_OPC_JAL, TILEPRO_OPC_JAL, TILEPRO_OPC_JAL,
- TILEPRO_OPC_JAL, TILEPRO_OPC_JAL, TILEPRO_OPC_JAL, TILEPRO_OPC_JAL,
- TILEPRO_OPC_JAL, TILEPRO_OPC_JAL, TILEPRO_OPC_JAL, TILEPRO_OPC_NONE,
- TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE,
- TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE,
- TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE,
- TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE,
- TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE,
- TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE,
- TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE,
- TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE,
- TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE,
- TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE,
- TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE,
- TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE,
- TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE,
- TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE,
- TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE,
- TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE,
- BITFIELD(49, 5) /* index 513 */,
- TILEPRO_OPC_NONE, TILEPRO_OPC_ADDB, TILEPRO_OPC_ADDH, TILEPRO_OPC_ADD,
- TILEPRO_OPC_AND, TILEPRO_OPC_INTHB, TILEPRO_OPC_INTHH, TILEPRO_OPC_INTLB,
- TILEPRO_OPC_INTLH, TILEPRO_OPC_JALRP, TILEPRO_OPC_JALR, TILEPRO_OPC_JRP,
- TILEPRO_OPC_JR, TILEPRO_OPC_LNK, TILEPRO_OPC_MAXB_U, TILEPRO_OPC_MAXH,
- TILEPRO_OPC_MINB_U, TILEPRO_OPC_MINH, TILEPRO_OPC_MNZB, TILEPRO_OPC_MNZH,
- TILEPRO_OPC_MNZ, TILEPRO_OPC_MZB, TILEPRO_OPC_MZH, TILEPRO_OPC_MZ,
- TILEPRO_OPC_NOR, CHILD(546), TILEPRO_OPC_PACKHB, TILEPRO_OPC_PACKLB,
- TILEPRO_OPC_RL, TILEPRO_OPC_S1A, TILEPRO_OPC_S2A, TILEPRO_OPC_S3A,
- BITFIELD(43, 2) /* index 546 */,
- TILEPRO_OPC_OR, TILEPRO_OPC_OR, TILEPRO_OPC_OR, CHILD(551),
- BITFIELD(45, 2) /* index 551 */,
- TILEPRO_OPC_OR, TILEPRO_OPC_OR, TILEPRO_OPC_OR, CHILD(556),
- BITFIELD(47, 2) /* index 556 */,
- TILEPRO_OPC_OR, TILEPRO_OPC_OR, TILEPRO_OPC_OR, TILEPRO_OPC_MOVE,
- BITFIELD(49, 5) /* index 561 */,
- TILEPRO_OPC_SB, TILEPRO_OPC_SEQB, TILEPRO_OPC_SEQH, TILEPRO_OPC_SEQ,
- TILEPRO_OPC_SHLB, TILEPRO_OPC_SHLH, TILEPRO_OPC_SHL, TILEPRO_OPC_SHRB,
- TILEPRO_OPC_SHRH, TILEPRO_OPC_SHR, TILEPRO_OPC_SH, TILEPRO_OPC_SLTB,
- TILEPRO_OPC_SLTB_U, TILEPRO_OPC_SLTEB, TILEPRO_OPC_SLTEB_U,
- TILEPRO_OPC_SLTEH, TILEPRO_OPC_SLTEH_U, TILEPRO_OPC_SLTE,
- TILEPRO_OPC_SLTE_U, TILEPRO_OPC_SLTH, TILEPRO_OPC_SLTH_U, TILEPRO_OPC_SLT,
- TILEPRO_OPC_SLT_U, TILEPRO_OPC_SNEB, TILEPRO_OPC_SNEH, TILEPRO_OPC_SNE,
- TILEPRO_OPC_SRAB, TILEPRO_OPC_SRAH, TILEPRO_OPC_SRA, TILEPRO_OPC_SUBB,
- TILEPRO_OPC_SUBH, TILEPRO_OPC_SUB,
- BITFIELD(49, 4) /* index 594 */,
- CHILD(611), CHILD(614), CHILD(617), CHILD(620), CHILD(623), CHILD(626),
- CHILD(629), CHILD(632), CHILD(635), CHILD(638), TILEPRO_OPC_NONE,
- TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE,
- TILEPRO_OPC_NONE,
- BITFIELD(53, 1) /* index 611 */,
- TILEPRO_OPC_SW, TILEPRO_OPC_NONE,
- BITFIELD(53, 1) /* index 614 */,
- TILEPRO_OPC_XOR, TILEPRO_OPC_NONE,
- BITFIELD(53, 1) /* index 617 */,
- TILEPRO_OPC_ADDS, TILEPRO_OPC_NONE,
- BITFIELD(53, 1) /* index 620 */,
- TILEPRO_OPC_SUBS, TILEPRO_OPC_NONE,
- BITFIELD(53, 1) /* index 623 */,
- TILEPRO_OPC_ADDBS_U, TILEPRO_OPC_NONE,
- BITFIELD(53, 1) /* index 626 */,
- TILEPRO_OPC_ADDHS, TILEPRO_OPC_NONE,
- BITFIELD(53, 1) /* index 629 */,
- TILEPRO_OPC_SUBBS_U, TILEPRO_OPC_NONE,
- BITFIELD(53, 1) /* index 632 */,
- TILEPRO_OPC_SUBHS, TILEPRO_OPC_NONE,
- BITFIELD(53, 1) /* index 635 */,
- TILEPRO_OPC_PACKHS, TILEPRO_OPC_NONE,
- BITFIELD(53, 1) /* index 638 */,
- TILEPRO_OPC_PACKBS_U, TILEPRO_OPC_NONE,
- BITFIELD(49, 5) /* index 641 */,
- TILEPRO_OPC_NONE, TILEPRO_OPC_ADDB_SN, TILEPRO_OPC_ADDH_SN,
- TILEPRO_OPC_ADD_SN, TILEPRO_OPC_AND_SN, TILEPRO_OPC_INTHB_SN,
- TILEPRO_OPC_INTHH_SN, TILEPRO_OPC_INTLB_SN, TILEPRO_OPC_INTLH_SN,
- TILEPRO_OPC_JALRP, TILEPRO_OPC_JALR, TILEPRO_OPC_JRP, TILEPRO_OPC_JR,
- TILEPRO_OPC_LNK_SN, TILEPRO_OPC_MAXB_U_SN, TILEPRO_OPC_MAXH_SN,
- TILEPRO_OPC_MINB_U_SN, TILEPRO_OPC_MINH_SN, TILEPRO_OPC_MNZB_SN,
- TILEPRO_OPC_MNZH_SN, TILEPRO_OPC_MNZ_SN, TILEPRO_OPC_MZB_SN,
- TILEPRO_OPC_MZH_SN, TILEPRO_OPC_MZ_SN, TILEPRO_OPC_NOR_SN, CHILD(674),
- TILEPRO_OPC_PACKHB_SN, TILEPRO_OPC_PACKLB_SN, TILEPRO_OPC_RL_SN,
- TILEPRO_OPC_S1A_SN, TILEPRO_OPC_S2A_SN, TILEPRO_OPC_S3A_SN,
- BITFIELD(43, 2) /* index 674 */,
- TILEPRO_OPC_OR_SN, TILEPRO_OPC_OR_SN, TILEPRO_OPC_OR_SN, CHILD(679),
- BITFIELD(45, 2) /* index 679 */,
- TILEPRO_OPC_OR_SN, TILEPRO_OPC_OR_SN, TILEPRO_OPC_OR_SN, CHILD(684),
- BITFIELD(47, 2) /* index 684 */,
- TILEPRO_OPC_OR_SN, TILEPRO_OPC_OR_SN, TILEPRO_OPC_OR_SN,
- TILEPRO_OPC_MOVE_SN,
- BITFIELD(49, 5) /* index 689 */,
- TILEPRO_OPC_SB, TILEPRO_OPC_SEQB_SN, TILEPRO_OPC_SEQH_SN,
- TILEPRO_OPC_SEQ_SN, TILEPRO_OPC_SHLB_SN, TILEPRO_OPC_SHLH_SN,
- TILEPRO_OPC_SHL_SN, TILEPRO_OPC_SHRB_SN, TILEPRO_OPC_SHRH_SN,
- TILEPRO_OPC_SHR_SN, TILEPRO_OPC_SH, TILEPRO_OPC_SLTB_SN,
- TILEPRO_OPC_SLTB_U_SN, TILEPRO_OPC_SLTEB_SN, TILEPRO_OPC_SLTEB_U_SN,
- TILEPRO_OPC_SLTEH_SN, TILEPRO_OPC_SLTEH_U_SN, TILEPRO_OPC_SLTE_SN,
- TILEPRO_OPC_SLTE_U_SN, TILEPRO_OPC_SLTH_SN, TILEPRO_OPC_SLTH_U_SN,
- TILEPRO_OPC_SLT_SN, TILEPRO_OPC_SLT_U_SN, TILEPRO_OPC_SNEB_SN,
- TILEPRO_OPC_SNEH_SN, TILEPRO_OPC_SNE_SN, TILEPRO_OPC_SRAB_SN,
- TILEPRO_OPC_SRAH_SN, TILEPRO_OPC_SRA_SN, TILEPRO_OPC_SUBB_SN,
- TILEPRO_OPC_SUBH_SN, TILEPRO_OPC_SUB_SN,
- BITFIELD(49, 4) /* index 722 */,
- CHILD(611), CHILD(739), CHILD(742), CHILD(745), CHILD(748), CHILD(751),
- CHILD(754), CHILD(757), CHILD(760), CHILD(763), TILEPRO_OPC_NONE,
- TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE,
- TILEPRO_OPC_NONE,
- BITFIELD(53, 1) /* index 739 */,
- TILEPRO_OPC_XOR_SN, TILEPRO_OPC_NONE,
- BITFIELD(53, 1) /* index 742 */,
- TILEPRO_OPC_ADDS_SN, TILEPRO_OPC_NONE,
- BITFIELD(53, 1) /* index 745 */,
- TILEPRO_OPC_SUBS_SN, TILEPRO_OPC_NONE,
- BITFIELD(53, 1) /* index 748 */,
- TILEPRO_OPC_ADDBS_U_SN, TILEPRO_OPC_NONE,
- BITFIELD(53, 1) /* index 751 */,
- TILEPRO_OPC_ADDHS_SN, TILEPRO_OPC_NONE,
- BITFIELD(53, 1) /* index 754 */,
- TILEPRO_OPC_SUBBS_U_SN, TILEPRO_OPC_NONE,
- BITFIELD(53, 1) /* index 757 */,
- TILEPRO_OPC_SUBHS_SN, TILEPRO_OPC_NONE,
- BITFIELD(53, 1) /* index 760 */,
- TILEPRO_OPC_PACKHS_SN, TILEPRO_OPC_NONE,
- BITFIELD(53, 1) /* index 763 */,
- TILEPRO_OPC_PACKBS_U_SN, TILEPRO_OPC_NONE,
- BITFIELD(37, 2) /* index 766 */,
- TILEPRO_OPC_ADDLI_SN, TILEPRO_OPC_ADDLI_SN, TILEPRO_OPC_ADDLI_SN,
- CHILD(771),
- BITFIELD(39, 2) /* index 771 */,
- TILEPRO_OPC_ADDLI_SN, TILEPRO_OPC_ADDLI_SN, TILEPRO_OPC_ADDLI_SN,
- CHILD(776),
- BITFIELD(41, 2) /* index 776 */,
- TILEPRO_OPC_ADDLI_SN, TILEPRO_OPC_ADDLI_SN, TILEPRO_OPC_ADDLI_SN,
- TILEPRO_OPC_MOVELI_SN,
- BITFIELD(37, 2) /* index 781 */,
- TILEPRO_OPC_ADDLI, TILEPRO_OPC_ADDLI, TILEPRO_OPC_ADDLI, CHILD(786),
- BITFIELD(39, 2) /* index 786 */,
- TILEPRO_OPC_ADDLI, TILEPRO_OPC_ADDLI, TILEPRO_OPC_ADDLI, CHILD(791),
- BITFIELD(41, 2) /* index 791 */,
- TILEPRO_OPC_ADDLI, TILEPRO_OPC_ADDLI, TILEPRO_OPC_ADDLI, TILEPRO_OPC_MOVELI,
- BITFIELD(31, 2) /* index 796 */,
- TILEPRO_OPC_AULI, TILEPRO_OPC_AULI, TILEPRO_OPC_AULI, CHILD(801),
- BITFIELD(33, 2) /* index 801 */,
- TILEPRO_OPC_AULI, TILEPRO_OPC_AULI, TILEPRO_OPC_AULI, CHILD(806),
- BITFIELD(35, 2) /* index 806 */,
- TILEPRO_OPC_AULI, TILEPRO_OPC_AULI, TILEPRO_OPC_AULI, CHILD(811),
- BITFIELD(37, 2) /* index 811 */,
- TILEPRO_OPC_AULI, TILEPRO_OPC_AULI, TILEPRO_OPC_AULI, CHILD(816),
- BITFIELD(39, 2) /* index 816 */,
- TILEPRO_OPC_AULI, TILEPRO_OPC_AULI, TILEPRO_OPC_AULI, CHILD(821),
- BITFIELD(41, 2) /* index 821 */,
- TILEPRO_OPC_AULI, TILEPRO_OPC_AULI, TILEPRO_OPC_AULI, TILEPRO_OPC_INFOL,
- BITFIELD(31, 4) /* index 826 */,
- TILEPRO_OPC_BZ, TILEPRO_OPC_BZT, TILEPRO_OPC_BNZ, TILEPRO_OPC_BNZT,
- TILEPRO_OPC_BGZ, TILEPRO_OPC_BGZT, TILEPRO_OPC_BGEZ, TILEPRO_OPC_BGEZT,
- TILEPRO_OPC_BLZ, TILEPRO_OPC_BLZT, TILEPRO_OPC_BLEZ, TILEPRO_OPC_BLEZT,
- TILEPRO_OPC_BBS, TILEPRO_OPC_BBST, TILEPRO_OPC_BBNS, TILEPRO_OPC_BBNST,
- BITFIELD(31, 4) /* index 843 */,
- TILEPRO_OPC_BZ_SN, TILEPRO_OPC_BZT_SN, TILEPRO_OPC_BNZ_SN,
- TILEPRO_OPC_BNZT_SN, TILEPRO_OPC_BGZ_SN, TILEPRO_OPC_BGZT_SN,
- TILEPRO_OPC_BGEZ_SN, TILEPRO_OPC_BGEZT_SN, TILEPRO_OPC_BLZ_SN,
- TILEPRO_OPC_BLZT_SN, TILEPRO_OPC_BLEZ_SN, TILEPRO_OPC_BLEZT_SN,
- TILEPRO_OPC_BBS_SN, TILEPRO_OPC_BBST_SN, TILEPRO_OPC_BBNS_SN,
- TILEPRO_OPC_BBNST_SN,
- BITFIELD(51, 3) /* index 860 */,
- TILEPRO_OPC_NONE, TILEPRO_OPC_ADDIB, TILEPRO_OPC_ADDIH, TILEPRO_OPC_ADDI,
- CHILD(869), TILEPRO_OPC_MAXIB_U, TILEPRO_OPC_MAXIH, TILEPRO_OPC_MFSPR,
- BITFIELD(31, 2) /* index 869 */,
- TILEPRO_OPC_ANDI, TILEPRO_OPC_ANDI, TILEPRO_OPC_ANDI, CHILD(874),
- BITFIELD(33, 2) /* index 874 */,
- TILEPRO_OPC_ANDI, TILEPRO_OPC_ANDI, TILEPRO_OPC_ANDI, CHILD(879),
- BITFIELD(35, 2) /* index 879 */,
- TILEPRO_OPC_ANDI, TILEPRO_OPC_ANDI, TILEPRO_OPC_ANDI, CHILD(884),
- BITFIELD(37, 2) /* index 884 */,
- TILEPRO_OPC_ANDI, TILEPRO_OPC_ANDI, TILEPRO_OPC_ANDI, CHILD(889),
- BITFIELD(39, 2) /* index 889 */,
- TILEPRO_OPC_ANDI, TILEPRO_OPC_ANDI, TILEPRO_OPC_ANDI, CHILD(894),
- BITFIELD(41, 2) /* index 894 */,
- TILEPRO_OPC_ANDI, TILEPRO_OPC_ANDI, TILEPRO_OPC_ANDI, TILEPRO_OPC_INFO,
- BITFIELD(51, 3) /* index 899 */,
- TILEPRO_OPC_MINIB_U, TILEPRO_OPC_MINIH, TILEPRO_OPC_MTSPR, CHILD(908),
- TILEPRO_OPC_SEQIB, TILEPRO_OPC_SEQIH, TILEPRO_OPC_SEQI, TILEPRO_OPC_SLTIB,
- BITFIELD(37, 2) /* index 908 */,
- TILEPRO_OPC_ORI, TILEPRO_OPC_ORI, TILEPRO_OPC_ORI, CHILD(913),
- BITFIELD(39, 2) /* index 913 */,
- TILEPRO_OPC_ORI, TILEPRO_OPC_ORI, TILEPRO_OPC_ORI, CHILD(918),
- BITFIELD(41, 2) /* index 918 */,
- TILEPRO_OPC_ORI, TILEPRO_OPC_ORI, TILEPRO_OPC_ORI, TILEPRO_OPC_MOVEI,
- BITFIELD(51, 3) /* index 923 */,
- TILEPRO_OPC_SLTIB_U, TILEPRO_OPC_SLTIH, TILEPRO_OPC_SLTIH_U,
- TILEPRO_OPC_SLTI, TILEPRO_OPC_SLTI_U, TILEPRO_OPC_XORI, TILEPRO_OPC_LBADD,
- TILEPRO_OPC_LBADD_U,
- BITFIELD(51, 3) /* index 932 */,
- TILEPRO_OPC_LHADD, TILEPRO_OPC_LHADD_U, TILEPRO_OPC_LWADD,
- TILEPRO_OPC_LWADD_NA, TILEPRO_OPC_SBADD, TILEPRO_OPC_SHADD,
- TILEPRO_OPC_SWADD, TILEPRO_OPC_NONE,
- BITFIELD(51, 3) /* index 941 */,
- TILEPRO_OPC_NONE, TILEPRO_OPC_ADDIB_SN, TILEPRO_OPC_ADDIH_SN,
- TILEPRO_OPC_ADDI_SN, TILEPRO_OPC_ANDI_SN, TILEPRO_OPC_MAXIB_U_SN,
- TILEPRO_OPC_MAXIH_SN, TILEPRO_OPC_MFSPR,
- BITFIELD(51, 3) /* index 950 */,
- TILEPRO_OPC_MINIB_U_SN, TILEPRO_OPC_MINIH_SN, TILEPRO_OPC_MTSPR, CHILD(959),
- TILEPRO_OPC_SEQIB_SN, TILEPRO_OPC_SEQIH_SN, TILEPRO_OPC_SEQI_SN,
- TILEPRO_OPC_SLTIB_SN,
- BITFIELD(37, 2) /* index 959 */,
- TILEPRO_OPC_ORI_SN, TILEPRO_OPC_ORI_SN, TILEPRO_OPC_ORI_SN, CHILD(964),
- BITFIELD(39, 2) /* index 964 */,
- TILEPRO_OPC_ORI_SN, TILEPRO_OPC_ORI_SN, TILEPRO_OPC_ORI_SN, CHILD(969),
- BITFIELD(41, 2) /* index 969 */,
- TILEPRO_OPC_ORI_SN, TILEPRO_OPC_ORI_SN, TILEPRO_OPC_ORI_SN,
- TILEPRO_OPC_MOVEI_SN,
- BITFIELD(51, 3) /* index 974 */,
- TILEPRO_OPC_SLTIB_U_SN, TILEPRO_OPC_SLTIH_SN, TILEPRO_OPC_SLTIH_U_SN,
- TILEPRO_OPC_SLTI_SN, TILEPRO_OPC_SLTI_U_SN, TILEPRO_OPC_XORI_SN,
- TILEPRO_OPC_LBADD_SN, TILEPRO_OPC_LBADD_U_SN,
- BITFIELD(51, 3) /* index 983 */,
- TILEPRO_OPC_LHADD_SN, TILEPRO_OPC_LHADD_U_SN, TILEPRO_OPC_LWADD_SN,
- TILEPRO_OPC_LWADD_NA_SN, TILEPRO_OPC_SBADD, TILEPRO_OPC_SHADD,
- TILEPRO_OPC_SWADD, TILEPRO_OPC_NONE,
- BITFIELD(46, 7) /* index 992 */,
- TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE,
- CHILD(1121), CHILD(1121), CHILD(1121), CHILD(1121), CHILD(1124),
- CHILD(1124), CHILD(1124), CHILD(1124), CHILD(1127), CHILD(1127),
- CHILD(1127), CHILD(1127), CHILD(1130), CHILD(1130), CHILD(1130),
- CHILD(1130), CHILD(1133), CHILD(1133), CHILD(1133), CHILD(1133),
- CHILD(1136), CHILD(1136), CHILD(1136), CHILD(1136), CHILD(1139),
- CHILD(1139), CHILD(1139), CHILD(1139), CHILD(1142), CHILD(1142),
- CHILD(1142), CHILD(1142), CHILD(1145), CHILD(1145), CHILD(1145),
- CHILD(1145), CHILD(1148), CHILD(1148), CHILD(1148), CHILD(1148),
- CHILD(1151), CHILD(1242), CHILD(1290), CHILD(1323), TILEPRO_OPC_NONE,
- TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE,
- TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE,
- TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE,
- TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE,
- TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE,
- TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE,
- TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE,
- TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE,
- TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE,
- TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE,
- TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE,
- TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE,
- TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE,
- TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE,
- TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE,
- TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE,
- TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE,
- TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE,
- TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE,
- TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE,
- BITFIELD(53, 1) /* index 1121 */,
- TILEPRO_OPC_RLI, TILEPRO_OPC_NONE,
- BITFIELD(53, 1) /* index 1124 */,
- TILEPRO_OPC_SHLIB, TILEPRO_OPC_NONE,
- BITFIELD(53, 1) /* index 1127 */,
- TILEPRO_OPC_SHLIH, TILEPRO_OPC_NONE,
- BITFIELD(53, 1) /* index 1130 */,
- TILEPRO_OPC_SHLI, TILEPRO_OPC_NONE,
- BITFIELD(53, 1) /* index 1133 */,
- TILEPRO_OPC_SHRIB, TILEPRO_OPC_NONE,
- BITFIELD(53, 1) /* index 1136 */,
- TILEPRO_OPC_SHRIH, TILEPRO_OPC_NONE,
- BITFIELD(53, 1) /* index 1139 */,
- TILEPRO_OPC_SHRI, TILEPRO_OPC_NONE,
- BITFIELD(53, 1) /* index 1142 */,
- TILEPRO_OPC_SRAIB, TILEPRO_OPC_NONE,
- BITFIELD(53, 1) /* index 1145 */,
- TILEPRO_OPC_SRAIH, TILEPRO_OPC_NONE,
- BITFIELD(53, 1) /* index 1148 */,
- TILEPRO_OPC_SRAI, TILEPRO_OPC_NONE,
- BITFIELD(43, 3) /* index 1151 */,
- TILEPRO_OPC_NONE, CHILD(1160), CHILD(1163), CHILD(1166), CHILD(1169),
- CHILD(1172), CHILD(1175), CHILD(1178),
- BITFIELD(53, 1) /* index 1160 */,
- TILEPRO_OPC_DRAIN, TILEPRO_OPC_NONE,
- BITFIELD(53, 1) /* index 1163 */,
- TILEPRO_OPC_DTLBPR, TILEPRO_OPC_NONE,
- BITFIELD(53, 1) /* index 1166 */,
- TILEPRO_OPC_FINV, TILEPRO_OPC_NONE,
- BITFIELD(53, 1) /* index 1169 */,
- TILEPRO_OPC_FLUSH, TILEPRO_OPC_NONE,
- BITFIELD(53, 1) /* index 1172 */,
- TILEPRO_OPC_FNOP, TILEPRO_OPC_NONE,
- BITFIELD(53, 1) /* index 1175 */,
- TILEPRO_OPC_ICOH, TILEPRO_OPC_NONE,
- BITFIELD(31, 2) /* index 1178 */,
- CHILD(1183), CHILD(1211), CHILD(1239), CHILD(1239),
- BITFIELD(53, 1) /* index 1183 */,
- CHILD(1186), TILEPRO_OPC_NONE,
- BITFIELD(33, 2) /* index 1186 */,
- TILEPRO_OPC_ILL, TILEPRO_OPC_ILL, TILEPRO_OPC_ILL, CHILD(1191),
- BITFIELD(35, 2) /* index 1191 */,
- TILEPRO_OPC_ILL, CHILD(1196), TILEPRO_OPC_ILL, TILEPRO_OPC_ILL,
- BITFIELD(37, 2) /* index 1196 */,
- TILEPRO_OPC_ILL, CHILD(1201), TILEPRO_OPC_ILL, TILEPRO_OPC_ILL,
- BITFIELD(39, 2) /* index 1201 */,
- TILEPRO_OPC_ILL, CHILD(1206), TILEPRO_OPC_ILL, TILEPRO_OPC_ILL,
- BITFIELD(41, 2) /* index 1206 */,
- TILEPRO_OPC_ILL, TILEPRO_OPC_ILL, TILEPRO_OPC_BPT, TILEPRO_OPC_ILL,
- BITFIELD(53, 1) /* index 1211 */,
- CHILD(1214), TILEPRO_OPC_NONE,
- BITFIELD(33, 2) /* index 1214 */,
- TILEPRO_OPC_ILL, TILEPRO_OPC_ILL, TILEPRO_OPC_ILL, CHILD(1219),
- BITFIELD(35, 2) /* index 1219 */,
- TILEPRO_OPC_ILL, CHILD(1224), TILEPRO_OPC_ILL, TILEPRO_OPC_ILL,
- BITFIELD(37, 2) /* index 1224 */,
- TILEPRO_OPC_ILL, CHILD(1229), TILEPRO_OPC_ILL, TILEPRO_OPC_ILL,
- BITFIELD(39, 2) /* index 1229 */,
- TILEPRO_OPC_ILL, CHILD(1234), TILEPRO_OPC_ILL, TILEPRO_OPC_ILL,
- BITFIELD(41, 2) /* index 1234 */,
- TILEPRO_OPC_ILL, TILEPRO_OPC_ILL, TILEPRO_OPC_RAISE, TILEPRO_OPC_ILL,
- BITFIELD(53, 1) /* index 1239 */,
- TILEPRO_OPC_ILL, TILEPRO_OPC_NONE,
- BITFIELD(43, 3) /* index 1242 */,
- CHILD(1251), CHILD(1254), CHILD(1257), CHILD(1275), CHILD(1278),
- CHILD(1281), CHILD(1284), CHILD(1287),
- BITFIELD(53, 1) /* index 1251 */,
- TILEPRO_OPC_INV, TILEPRO_OPC_NONE,
- BITFIELD(53, 1) /* index 1254 */,
- TILEPRO_OPC_IRET, TILEPRO_OPC_NONE,
- BITFIELD(53, 1) /* index 1257 */,
- CHILD(1260), TILEPRO_OPC_NONE,
- BITFIELD(31, 2) /* index 1260 */,
- TILEPRO_OPC_LB, TILEPRO_OPC_LB, TILEPRO_OPC_LB, CHILD(1265),
- BITFIELD(33, 2) /* index 1265 */,
- TILEPRO_OPC_LB, TILEPRO_OPC_LB, TILEPRO_OPC_LB, CHILD(1270),
- BITFIELD(35, 2) /* index 1270 */,
- TILEPRO_OPC_LB, TILEPRO_OPC_LB, TILEPRO_OPC_LB, TILEPRO_OPC_PREFETCH,
- BITFIELD(53, 1) /* index 1275 */,
- TILEPRO_OPC_LB_U, TILEPRO_OPC_NONE,
- BITFIELD(53, 1) /* index 1278 */,
- TILEPRO_OPC_LH, TILEPRO_OPC_NONE,
- BITFIELD(53, 1) /* index 1281 */,
- TILEPRO_OPC_LH_U, TILEPRO_OPC_NONE,
- BITFIELD(53, 1) /* index 1284 */,
- TILEPRO_OPC_LW, TILEPRO_OPC_NONE,
- BITFIELD(53, 1) /* index 1287 */,
- TILEPRO_OPC_MF, TILEPRO_OPC_NONE,
- BITFIELD(43, 3) /* index 1290 */,
- CHILD(1299), CHILD(1302), CHILD(1305), CHILD(1308), CHILD(1311),
- CHILD(1314), CHILD(1317), CHILD(1320),
- BITFIELD(53, 1) /* index 1299 */,
- TILEPRO_OPC_NAP, TILEPRO_OPC_NONE,
- BITFIELD(53, 1) /* index 1302 */,
- TILEPRO_OPC_NOP, TILEPRO_OPC_NONE,
- BITFIELD(53, 1) /* index 1305 */,
- TILEPRO_OPC_SWINT0, TILEPRO_OPC_NONE,
- BITFIELD(53, 1) /* index 1308 */,
- TILEPRO_OPC_SWINT1, TILEPRO_OPC_NONE,
- BITFIELD(53, 1) /* index 1311 */,
- TILEPRO_OPC_SWINT2, TILEPRO_OPC_NONE,
- BITFIELD(53, 1) /* index 1314 */,
- TILEPRO_OPC_SWINT3, TILEPRO_OPC_NONE,
- BITFIELD(53, 1) /* index 1317 */,
- TILEPRO_OPC_TNS, TILEPRO_OPC_NONE,
- BITFIELD(53, 1) /* index 1320 */,
- TILEPRO_OPC_WH64, TILEPRO_OPC_NONE,
- BITFIELD(43, 2) /* index 1323 */,
- CHILD(1328), TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE,
- BITFIELD(45, 1) /* index 1328 */,
- CHILD(1331), TILEPRO_OPC_NONE,
- BITFIELD(53, 1) /* index 1331 */,
- TILEPRO_OPC_LW_NA, TILEPRO_OPC_NONE,
- BITFIELD(46, 7) /* index 1334 */,
- TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE,
- CHILD(1463), CHILD(1463), CHILD(1463), CHILD(1463), CHILD(1466),
- CHILD(1466), CHILD(1466), CHILD(1466), CHILD(1469), CHILD(1469),
- CHILD(1469), CHILD(1469), CHILD(1472), CHILD(1472), CHILD(1472),
- CHILD(1472), CHILD(1475), CHILD(1475), CHILD(1475), CHILD(1475),
- CHILD(1478), CHILD(1478), CHILD(1478), CHILD(1478), CHILD(1481),
- CHILD(1481), CHILD(1481), CHILD(1481), CHILD(1484), CHILD(1484),
- CHILD(1484), CHILD(1484), CHILD(1487), CHILD(1487), CHILD(1487),
- CHILD(1487), CHILD(1490), CHILD(1490), CHILD(1490), CHILD(1490),
- CHILD(1151), CHILD(1493), CHILD(1517), CHILD(1529), TILEPRO_OPC_NONE,
- TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE,
- TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE,
- TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE,
- TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE,
- TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE,
- TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE,
- TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE,
- TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE,
- TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE,
- TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE,
- TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE,
- TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE,
- TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE,
- TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE,
- TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE,
- TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE,
- TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE,
- TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE,
- TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE,
- TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE,
- BITFIELD(53, 1) /* index 1463 */,
- TILEPRO_OPC_RLI_SN, TILEPRO_OPC_NONE,
- BITFIELD(53, 1) /* index 1466 */,
- TILEPRO_OPC_SHLIB_SN, TILEPRO_OPC_NONE,
- BITFIELD(53, 1) /* index 1469 */,
- TILEPRO_OPC_SHLIH_SN, TILEPRO_OPC_NONE,
- BITFIELD(53, 1) /* index 1472 */,
- TILEPRO_OPC_SHLI_SN, TILEPRO_OPC_NONE,
- BITFIELD(53, 1) /* index 1475 */,
- TILEPRO_OPC_SHRIB_SN, TILEPRO_OPC_NONE,
- BITFIELD(53, 1) /* index 1478 */,
- TILEPRO_OPC_SHRIH_SN, TILEPRO_OPC_NONE,
- BITFIELD(53, 1) /* index 1481 */,
- TILEPRO_OPC_SHRI_SN, TILEPRO_OPC_NONE,
- BITFIELD(53, 1) /* index 1484 */,
- TILEPRO_OPC_SRAIB_SN, TILEPRO_OPC_NONE,
- BITFIELD(53, 1) /* index 1487 */,
- TILEPRO_OPC_SRAIH_SN, TILEPRO_OPC_NONE,
- BITFIELD(53, 1) /* index 1490 */,
- TILEPRO_OPC_SRAI_SN, TILEPRO_OPC_NONE,
- BITFIELD(43, 3) /* index 1493 */,
- CHILD(1251), CHILD(1254), CHILD(1502), CHILD(1505), CHILD(1508),
- CHILD(1511), CHILD(1514), CHILD(1287),
- BITFIELD(53, 1) /* index 1502 */,
- TILEPRO_OPC_LB_SN, TILEPRO_OPC_NONE,
- BITFIELD(53, 1) /* index 1505 */,
- TILEPRO_OPC_LB_U_SN, TILEPRO_OPC_NONE,
- BITFIELD(53, 1) /* index 1508 */,
- TILEPRO_OPC_LH_SN, TILEPRO_OPC_NONE,
- BITFIELD(53, 1) /* index 1511 */,
- TILEPRO_OPC_LH_U_SN, TILEPRO_OPC_NONE,
- BITFIELD(53, 1) /* index 1514 */,
- TILEPRO_OPC_LW_SN, TILEPRO_OPC_NONE,
- BITFIELD(43, 3) /* index 1517 */,
- CHILD(1299), CHILD(1302), CHILD(1305), CHILD(1308), CHILD(1311),
- CHILD(1314), CHILD(1526), CHILD(1320),
- BITFIELD(53, 1) /* index 1526 */,
- TILEPRO_OPC_TNS_SN, TILEPRO_OPC_NONE,
- BITFIELD(43, 2) /* index 1529 */,
- CHILD(1534), TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE,
- BITFIELD(45, 1) /* index 1534 */,
- CHILD(1537), TILEPRO_OPC_NONE,
- BITFIELD(53, 1) /* index 1537 */,
- TILEPRO_OPC_LW_NA_SN, TILEPRO_OPC_NONE,
-};
-
-static const unsigned short decode_Y0_fsm[168] =
-{
- BITFIELD(27, 4) /* index 0 */,
- TILEPRO_OPC_NONE, CHILD(17), CHILD(22), CHILD(27), CHILD(47), CHILD(52),
- CHILD(57), CHILD(62), CHILD(67), TILEPRO_OPC_ADDI, CHILD(72), CHILD(102),
- TILEPRO_OPC_SEQI, CHILD(117), TILEPRO_OPC_SLTI, TILEPRO_OPC_SLTI_U,
- BITFIELD(18, 2) /* index 17 */,
- TILEPRO_OPC_ADD, TILEPRO_OPC_S1A, TILEPRO_OPC_S2A, TILEPRO_OPC_SUB,
- BITFIELD(18, 2) /* index 22 */,
- TILEPRO_OPC_MNZ, TILEPRO_OPC_MVNZ, TILEPRO_OPC_MVZ, TILEPRO_OPC_MZ,
- BITFIELD(18, 2) /* index 27 */,
- TILEPRO_OPC_AND, TILEPRO_OPC_NOR, CHILD(32), TILEPRO_OPC_XOR,
- BITFIELD(12, 2) /* index 32 */,
- TILEPRO_OPC_OR, TILEPRO_OPC_OR, TILEPRO_OPC_OR, CHILD(37),
- BITFIELD(14, 2) /* index 37 */,
- TILEPRO_OPC_OR, TILEPRO_OPC_OR, TILEPRO_OPC_OR, CHILD(42),
- BITFIELD(16, 2) /* index 42 */,
- TILEPRO_OPC_OR, TILEPRO_OPC_OR, TILEPRO_OPC_OR, TILEPRO_OPC_MOVE,
- BITFIELD(18, 2) /* index 47 */,
- TILEPRO_OPC_RL, TILEPRO_OPC_SHL, TILEPRO_OPC_SHR, TILEPRO_OPC_SRA,
- BITFIELD(18, 2) /* index 52 */,
- TILEPRO_OPC_SLTE, TILEPRO_OPC_SLTE_U, TILEPRO_OPC_SLT, TILEPRO_OPC_SLT_U,
- BITFIELD(18, 2) /* index 57 */,
- TILEPRO_OPC_MULHLSA_UU, TILEPRO_OPC_S3A, TILEPRO_OPC_SEQ, TILEPRO_OPC_SNE,
- BITFIELD(18, 2) /* index 62 */,
- TILEPRO_OPC_MULHH_SS, TILEPRO_OPC_MULHH_UU, TILEPRO_OPC_MULLL_SS,
- TILEPRO_OPC_MULLL_UU,
- BITFIELD(18, 2) /* index 67 */,
- TILEPRO_OPC_MULHHA_SS, TILEPRO_OPC_MULHHA_UU, TILEPRO_OPC_MULLLA_SS,
- TILEPRO_OPC_MULLLA_UU,
- BITFIELD(0, 2) /* index 72 */,
- TILEPRO_OPC_ANDI, TILEPRO_OPC_ANDI, TILEPRO_OPC_ANDI, CHILD(77),
- BITFIELD(2, 2) /* index 77 */,
- TILEPRO_OPC_ANDI, TILEPRO_OPC_ANDI, TILEPRO_OPC_ANDI, CHILD(82),
- BITFIELD(4, 2) /* index 82 */,
- TILEPRO_OPC_ANDI, TILEPRO_OPC_ANDI, TILEPRO_OPC_ANDI, CHILD(87),
- BITFIELD(6, 2) /* index 87 */,
- TILEPRO_OPC_ANDI, TILEPRO_OPC_ANDI, TILEPRO_OPC_ANDI, CHILD(92),
- BITFIELD(8, 2) /* index 92 */,
- TILEPRO_OPC_ANDI, TILEPRO_OPC_ANDI, TILEPRO_OPC_ANDI, CHILD(97),
- BITFIELD(10, 2) /* index 97 */,
- TILEPRO_OPC_ANDI, TILEPRO_OPC_ANDI, TILEPRO_OPC_ANDI, TILEPRO_OPC_INFO,
- BITFIELD(6, 2) /* index 102 */,
- TILEPRO_OPC_ORI, TILEPRO_OPC_ORI, TILEPRO_OPC_ORI, CHILD(107),
- BITFIELD(8, 2) /* index 107 */,
- TILEPRO_OPC_ORI, TILEPRO_OPC_ORI, TILEPRO_OPC_ORI, CHILD(112),
- BITFIELD(10, 2) /* index 112 */,
- TILEPRO_OPC_ORI, TILEPRO_OPC_ORI, TILEPRO_OPC_ORI, TILEPRO_OPC_MOVEI,
- BITFIELD(15, 5) /* index 117 */,
- TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE,
- TILEPRO_OPC_RLI, TILEPRO_OPC_RLI, TILEPRO_OPC_RLI, TILEPRO_OPC_RLI,
- TILEPRO_OPC_SHLI, TILEPRO_OPC_SHLI, TILEPRO_OPC_SHLI, TILEPRO_OPC_SHLI,
- TILEPRO_OPC_SHRI, TILEPRO_OPC_SHRI, TILEPRO_OPC_SHRI, TILEPRO_OPC_SHRI,
- TILEPRO_OPC_SRAI, TILEPRO_OPC_SRAI, TILEPRO_OPC_SRAI, TILEPRO_OPC_SRAI,
- CHILD(150), CHILD(159), TILEPRO_OPC_NONE, TILEPRO_OPC_NONE,
- TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE,
- TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE,
- BITFIELD(12, 3) /* index 150 */,
- TILEPRO_OPC_NONE, TILEPRO_OPC_BITX, TILEPRO_OPC_BYTEX, TILEPRO_OPC_CLZ,
- TILEPRO_OPC_CTZ, TILEPRO_OPC_FNOP, TILEPRO_OPC_NOP, TILEPRO_OPC_PCNT,
- BITFIELD(12, 3) /* index 159 */,
- TILEPRO_OPC_TBLIDXB0, TILEPRO_OPC_TBLIDXB1, TILEPRO_OPC_TBLIDXB2,
- TILEPRO_OPC_TBLIDXB3, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE,
- TILEPRO_OPC_NONE,
-};
-
-static const unsigned short decode_Y1_fsm[140] =
-{
- BITFIELD(59, 4) /* index 0 */,
- TILEPRO_OPC_NONE, CHILD(17), CHILD(22), CHILD(27), CHILD(47), CHILD(52),
- CHILD(57), TILEPRO_OPC_ADDI, CHILD(62), CHILD(92), TILEPRO_OPC_SEQI,
- CHILD(107), TILEPRO_OPC_SLTI, TILEPRO_OPC_SLTI_U, TILEPRO_OPC_NONE,
- TILEPRO_OPC_NONE,
- BITFIELD(49, 2) /* index 17 */,
- TILEPRO_OPC_ADD, TILEPRO_OPC_S1A, TILEPRO_OPC_S2A, TILEPRO_OPC_SUB,
- BITFIELD(49, 2) /* index 22 */,
- TILEPRO_OPC_NONE, TILEPRO_OPC_MNZ, TILEPRO_OPC_MZ, TILEPRO_OPC_NONE,
- BITFIELD(49, 2) /* index 27 */,
- TILEPRO_OPC_AND, TILEPRO_OPC_NOR, CHILD(32), TILEPRO_OPC_XOR,
- BITFIELD(43, 2) /* index 32 */,
- TILEPRO_OPC_OR, TILEPRO_OPC_OR, TILEPRO_OPC_OR, CHILD(37),
- BITFIELD(45, 2) /* index 37 */,
- TILEPRO_OPC_OR, TILEPRO_OPC_OR, TILEPRO_OPC_OR, CHILD(42),
- BITFIELD(47, 2) /* index 42 */,
- TILEPRO_OPC_OR, TILEPRO_OPC_OR, TILEPRO_OPC_OR, TILEPRO_OPC_MOVE,
- BITFIELD(49, 2) /* index 47 */,
- TILEPRO_OPC_RL, TILEPRO_OPC_SHL, TILEPRO_OPC_SHR, TILEPRO_OPC_SRA,
- BITFIELD(49, 2) /* index 52 */,
- TILEPRO_OPC_SLTE, TILEPRO_OPC_SLTE_U, TILEPRO_OPC_SLT, TILEPRO_OPC_SLT_U,
- BITFIELD(49, 2) /* index 57 */,
- TILEPRO_OPC_NONE, TILEPRO_OPC_S3A, TILEPRO_OPC_SEQ, TILEPRO_OPC_SNE,
- BITFIELD(31, 2) /* index 62 */,
- TILEPRO_OPC_ANDI, TILEPRO_OPC_ANDI, TILEPRO_OPC_ANDI, CHILD(67),
- BITFIELD(33, 2) /* index 67 */,
- TILEPRO_OPC_ANDI, TILEPRO_OPC_ANDI, TILEPRO_OPC_ANDI, CHILD(72),
- BITFIELD(35, 2) /* index 72 */,
- TILEPRO_OPC_ANDI, TILEPRO_OPC_ANDI, TILEPRO_OPC_ANDI, CHILD(77),
- BITFIELD(37, 2) /* index 77 */,
- TILEPRO_OPC_ANDI, TILEPRO_OPC_ANDI, TILEPRO_OPC_ANDI, CHILD(82),
- BITFIELD(39, 2) /* index 82 */,
- TILEPRO_OPC_ANDI, TILEPRO_OPC_ANDI, TILEPRO_OPC_ANDI, CHILD(87),
- BITFIELD(41, 2) /* index 87 */,
- TILEPRO_OPC_ANDI, TILEPRO_OPC_ANDI, TILEPRO_OPC_ANDI, TILEPRO_OPC_INFO,
- BITFIELD(37, 2) /* index 92 */,
- TILEPRO_OPC_ORI, TILEPRO_OPC_ORI, TILEPRO_OPC_ORI, CHILD(97),
- BITFIELD(39, 2) /* index 97 */,
- TILEPRO_OPC_ORI, TILEPRO_OPC_ORI, TILEPRO_OPC_ORI, CHILD(102),
- BITFIELD(41, 2) /* index 102 */,
- TILEPRO_OPC_ORI, TILEPRO_OPC_ORI, TILEPRO_OPC_ORI, TILEPRO_OPC_MOVEI,
- BITFIELD(48, 3) /* index 107 */,
- TILEPRO_OPC_NONE, TILEPRO_OPC_RLI, TILEPRO_OPC_SHLI, TILEPRO_OPC_SHRI,
- TILEPRO_OPC_SRAI, CHILD(116), TILEPRO_OPC_NONE, TILEPRO_OPC_NONE,
- BITFIELD(43, 3) /* index 116 */,
- TILEPRO_OPC_NONE, CHILD(125), CHILD(130), CHILD(135), TILEPRO_OPC_NONE,
- TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE,
- BITFIELD(46, 2) /* index 125 */,
- TILEPRO_OPC_FNOP, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE,
- BITFIELD(46, 2) /* index 130 */,
- TILEPRO_OPC_ILL, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE,
- BITFIELD(46, 2) /* index 135 */,
- TILEPRO_OPC_NOP, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE,
-};
-
-static const unsigned short decode_Y2_fsm[24] =
-{
- BITFIELD(56, 3) /* index 0 */,
- CHILD(9), TILEPRO_OPC_LB_U, TILEPRO_OPC_LH, TILEPRO_OPC_LH_U,
- TILEPRO_OPC_LW, TILEPRO_OPC_SB, TILEPRO_OPC_SH, TILEPRO_OPC_SW,
- BITFIELD(20, 2) /* index 9 */,
- TILEPRO_OPC_LB, TILEPRO_OPC_LB, TILEPRO_OPC_LB, CHILD(14),
- BITFIELD(22, 2) /* index 14 */,
- TILEPRO_OPC_LB, TILEPRO_OPC_LB, TILEPRO_OPC_LB, CHILD(19),
- BITFIELD(24, 2) /* index 19 */,
- TILEPRO_OPC_LB, TILEPRO_OPC_LB, TILEPRO_OPC_LB, TILEPRO_OPC_PREFETCH,
-};
-
-#undef BITFIELD
-#undef CHILD
-const unsigned short * const
-tilepro_bundle_decoder_fsms[TILEPRO_NUM_PIPELINE_ENCODINGS] =
-{
- decode_X0_fsm,
- decode_X1_fsm,
- decode_Y0_fsm,
- decode_Y1_fsm,
- decode_Y2_fsm
-};
-const struct tilepro_operand tilepro_operands[43] =
-{
- {
- TILEPRO_OP_TYPE_IMMEDIATE, BFD_RELOC(TILEPRO_IMM8_X0),
- 8, 1, 0, 0, 0, 0,
- create_Imm8_X0, get_Imm8_X0
- },
- {
- TILEPRO_OP_TYPE_IMMEDIATE, BFD_RELOC(TILEPRO_IMM8_X1),
- 8, 1, 0, 0, 0, 0,
- create_Imm8_X1, get_Imm8_X1
- },
- {
- TILEPRO_OP_TYPE_IMMEDIATE, BFD_RELOC(TILEPRO_IMM8_Y0),
- 8, 1, 0, 0, 0, 0,
- create_Imm8_Y0, get_Imm8_Y0
- },
- {
- TILEPRO_OP_TYPE_IMMEDIATE, BFD_RELOC(TILEPRO_IMM8_Y1),
- 8, 1, 0, 0, 0, 0,
- create_Imm8_Y1, get_Imm8_Y1
- },
- {
- TILEPRO_OP_TYPE_IMMEDIATE, BFD_RELOC(TILEPRO_IMM16_X0),
- 16, 1, 0, 0, 0, 0,
- create_Imm16_X0, get_Imm16_X0
- },
- {
- TILEPRO_OP_TYPE_IMMEDIATE, BFD_RELOC(TILEPRO_IMM16_X1),
- 16, 1, 0, 0, 0, 0,
- create_Imm16_X1, get_Imm16_X1
- },
- {
- TILEPRO_OP_TYPE_ADDRESS, BFD_RELOC(TILEPRO_JOFFLONG_X1),
- 29, 1, 0, 0, 1, TILEPRO_LOG2_BUNDLE_ALIGNMENT_IN_BYTES,
- create_JOffLong_X1, get_JOffLong_X1
- },
- {
- TILEPRO_OP_TYPE_REGISTER, BFD_RELOC(NONE),
- 6, 0, 0, 1, 0, 0,
- create_Dest_X0, get_Dest_X0
- },
- {
- TILEPRO_OP_TYPE_REGISTER, BFD_RELOC(NONE),
- 6, 0, 1, 0, 0, 0,
- create_SrcA_X0, get_SrcA_X0
- },
- {
- TILEPRO_OP_TYPE_REGISTER, BFD_RELOC(NONE),
- 6, 0, 0, 1, 0, 0,
- create_Dest_X1, get_Dest_X1
- },
- {
- TILEPRO_OP_TYPE_REGISTER, BFD_RELOC(NONE),
- 6, 0, 1, 0, 0, 0,
- create_SrcA_X1, get_SrcA_X1
- },
- {
- TILEPRO_OP_TYPE_REGISTER, BFD_RELOC(NONE),
- 6, 0, 0, 1, 0, 0,
- create_Dest_Y0, get_Dest_Y0
- },
- {
- TILEPRO_OP_TYPE_REGISTER, BFD_RELOC(NONE),
- 6, 0, 1, 0, 0, 0,
- create_SrcA_Y0, get_SrcA_Y0
- },
- {
- TILEPRO_OP_TYPE_REGISTER, BFD_RELOC(NONE),
- 6, 0, 0, 1, 0, 0,
- create_Dest_Y1, get_Dest_Y1
- },
- {
- TILEPRO_OP_TYPE_REGISTER, BFD_RELOC(NONE),
- 6, 0, 1, 0, 0, 0,
- create_SrcA_Y1, get_SrcA_Y1
- },
- {
- TILEPRO_OP_TYPE_REGISTER, BFD_RELOC(NONE),
- 6, 0, 1, 0, 0, 0,
- create_SrcA_Y2, get_SrcA_Y2
- },
- {
- TILEPRO_OP_TYPE_REGISTER, BFD_RELOC(NONE),
- 6, 0, 1, 0, 0, 0,
- create_SrcB_X0, get_SrcB_X0
- },
- {
- TILEPRO_OP_TYPE_REGISTER, BFD_RELOC(NONE),
- 6, 0, 1, 0, 0, 0,
- create_SrcB_X1, get_SrcB_X1
- },
- {
- TILEPRO_OP_TYPE_REGISTER, BFD_RELOC(NONE),
- 6, 0, 1, 0, 0, 0,
- create_SrcB_Y0, get_SrcB_Y0
- },
- {
- TILEPRO_OP_TYPE_REGISTER, BFD_RELOC(NONE),
- 6, 0, 1, 0, 0, 0,
- create_SrcB_Y1, get_SrcB_Y1
- },
- {
- TILEPRO_OP_TYPE_ADDRESS, BFD_RELOC(TILEPRO_BROFF_X1),
- 17, 1, 0, 0, 1, TILEPRO_LOG2_BUNDLE_ALIGNMENT_IN_BYTES,
- create_BrOff_X1, get_BrOff_X1
- },
- {
- TILEPRO_OP_TYPE_REGISTER, BFD_RELOC(NONE),
- 6, 0, 1, 1, 0, 0,
- create_Dest_X0, get_Dest_X0
- },
- {
- TILEPRO_OP_TYPE_ADDRESS, BFD_RELOC(NONE),
- 28, 1, 0, 0, 1, TILEPRO_LOG2_BUNDLE_ALIGNMENT_IN_BYTES,
- create_JOff_X1, get_JOff_X1
- },
- {
- TILEPRO_OP_TYPE_REGISTER, BFD_RELOC(NONE),
- 6, 0, 0, 1, 0, 0,
- create_SrcBDest_Y2, get_SrcBDest_Y2
- },
- {
- TILEPRO_OP_TYPE_REGISTER, BFD_RELOC(NONE),
- 6, 0, 1, 1, 0, 0,
- create_SrcA_X1, get_SrcA_X1
- },
- {
- TILEPRO_OP_TYPE_SPR, BFD_RELOC(TILEPRO_MF_IMM15_X1),
- 15, 0, 0, 0, 0, 0,
- create_MF_Imm15_X1, get_MF_Imm15_X1
- },
- {
- TILEPRO_OP_TYPE_IMMEDIATE, BFD_RELOC(TILEPRO_MMSTART_X0),
- 5, 0, 0, 0, 0, 0,
- create_MMStart_X0, get_MMStart_X0
- },
- {
- TILEPRO_OP_TYPE_IMMEDIATE, BFD_RELOC(TILEPRO_MMEND_X0),
- 5, 0, 0, 0, 0, 0,
- create_MMEnd_X0, get_MMEnd_X0
- },
- {
- TILEPRO_OP_TYPE_IMMEDIATE, BFD_RELOC(TILEPRO_MMSTART_X1),
- 5, 0, 0, 0, 0, 0,
- create_MMStart_X1, get_MMStart_X1
- },
- {
- TILEPRO_OP_TYPE_IMMEDIATE, BFD_RELOC(TILEPRO_MMEND_X1),
- 5, 0, 0, 0, 0, 0,
- create_MMEnd_X1, get_MMEnd_X1
- },
- {
- TILEPRO_OP_TYPE_SPR, BFD_RELOC(TILEPRO_MT_IMM15_X1),
- 15, 0, 0, 0, 0, 0,
- create_MT_Imm15_X1, get_MT_Imm15_X1
- },
- {
- TILEPRO_OP_TYPE_REGISTER, BFD_RELOC(NONE),
- 6, 0, 1, 1, 0, 0,
- create_Dest_Y0, get_Dest_Y0
- },
- {
- TILEPRO_OP_TYPE_IMMEDIATE, BFD_RELOC(TILEPRO_SHAMT_X0),
- 5, 0, 0, 0, 0, 0,
- create_ShAmt_X0, get_ShAmt_X0
- },
- {
- TILEPRO_OP_TYPE_IMMEDIATE, BFD_RELOC(TILEPRO_SHAMT_X1),
- 5, 0, 0, 0, 0, 0,
- create_ShAmt_X1, get_ShAmt_X1
- },
- {
- TILEPRO_OP_TYPE_IMMEDIATE, BFD_RELOC(TILEPRO_SHAMT_Y0),
- 5, 0, 0, 0, 0, 0,
- create_ShAmt_Y0, get_ShAmt_Y0
- },
- {
- TILEPRO_OP_TYPE_IMMEDIATE, BFD_RELOC(TILEPRO_SHAMT_Y1),
- 5, 0, 0, 0, 0, 0,
- create_ShAmt_Y1, get_ShAmt_Y1
- },
- {
- TILEPRO_OP_TYPE_REGISTER, BFD_RELOC(NONE),
- 6, 0, 1, 0, 0, 0,
- create_SrcBDest_Y2, get_SrcBDest_Y2
- },
- {
- TILEPRO_OP_TYPE_IMMEDIATE, BFD_RELOC(TILEPRO_DEST_IMM8_X1),
- 8, 1, 0, 0, 0, 0,
- create_Dest_Imm8_X1, get_Dest_Imm8_X1
- },
- {
- TILEPRO_OP_TYPE_ADDRESS, BFD_RELOC(NONE),
- 10, 1, 0, 0, 1, TILEPRO_LOG2_SN_INSTRUCTION_SIZE_IN_BYTES,
- create_BrOff_SN, get_BrOff_SN
- },
- {
- TILEPRO_OP_TYPE_IMMEDIATE, BFD_RELOC(NONE),
- 8, 0, 0, 0, 0, 0,
- create_Imm8_SN, get_Imm8_SN
- },
- {
- TILEPRO_OP_TYPE_IMMEDIATE, BFD_RELOC(NONE),
- 8, 1, 0, 0, 0, 0,
- create_Imm8_SN, get_Imm8_SN
- },
- {
- TILEPRO_OP_TYPE_REGISTER, BFD_RELOC(NONE),
- 2, 0, 0, 1, 0, 0,
- create_Dest_SN, get_Dest_SN
- },
- {
- TILEPRO_OP_TYPE_REGISTER, BFD_RELOC(NONE),
- 2, 0, 1, 0, 0, 0,
- create_Src_SN, get_Src_SN
- }
-};
-
-
-
-
-/* Given a set of bundle bits and a specific pipe, returns which
- * instruction the bundle contains in that pipe.
- */
-const struct tilepro_opcode *
-find_opcode(tilepro_bundle_bits bits, tilepro_pipeline pipe)
-{
- const unsigned short *table = tilepro_bundle_decoder_fsms[pipe];
- int index = 0;
-
- while (1)
- {
- unsigned short bitspec = table[index];
- unsigned int bitfield =
- ((unsigned int)(bits >> (bitspec & 63))) & (bitspec >> 6);
-
- unsigned short next = table[index + 1 + bitfield];
- if (next <= TILEPRO_OPC_NONE)
- return &tilepro_opcodes[next];
-
- index = next - TILEPRO_OPC_NONE;
- }
-}
-
-
-int
-parse_insn_tilepro(tilepro_bundle_bits bits,
- unsigned int pc,
- struct tilepro_decoded_instruction
- decoded[TILEPRO_MAX_INSTRUCTIONS_PER_BUNDLE])
-{
- int num_instructions = 0;
- int pipe;
-
- int min_pipe, max_pipe;
- if ((bits & TILEPRO_BUNDLE_Y_ENCODING_MASK) == 0)
- {
- min_pipe = TILEPRO_PIPELINE_X0;
- max_pipe = TILEPRO_PIPELINE_X1;
- }
- else
- {
- min_pipe = TILEPRO_PIPELINE_Y0;
- max_pipe = TILEPRO_PIPELINE_Y2;
- }
-
- /* For each pipe, find an instruction that fits. */
- for (pipe = min_pipe; pipe <= max_pipe; pipe++)
- {
- const struct tilepro_opcode *opc;
- struct tilepro_decoded_instruction *d;
- int i;
-
- d = &decoded[num_instructions++];
- opc = find_opcode (bits, (tilepro_pipeline)pipe);
- d->opcode = opc;
-
- /* Decode each operand, sign extending, etc. as appropriate. */
- for (i = 0; i < opc->num_operands; i++)
- {
- const struct tilepro_operand *op =
- &tilepro_operands[opc->operands[pipe][i]];
- int opval = op->extract (bits);
- if (op->is_signed)
- {
- /* Sign-extend the operand. */
- int shift = (int)((sizeof(int) * 8) - op->num_bits);
- opval = (opval << shift) >> shift;
- }
-
- /* Adjust PC-relative scaled branch offsets. */
- if (op->type == TILEPRO_OP_TYPE_ADDRESS)
- {
- opval *= TILEPRO_BUNDLE_SIZE_IN_BYTES;
- opval += (int)pc;
- }
-
- /* Record the final value. */
- d->operands[i] = op;
- d->operand_values[i] = opval;
- }
- }
-
- return num_instructions;
-}
diff --git a/arch/tile/kernel/tile-desc_64.c b/arch/tile/kernel/tile-desc_64.c
deleted file mode 100644
index 65b5f8aca706..000000000000
--- a/arch/tile/kernel/tile-desc_64.c
+++ /dev/null
@@ -1,2218 +0,0 @@
-/* TILE-Gx opcode information.
- *
- * Copyright 2011 Tilera Corporation. All Rights Reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation, version 2.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
- * NON INFRINGEMENT. See the GNU General Public License for
- * more details.
- *
- *
- *
- *
- *
- */
-
-/* This define is BFD_RELOC_##x for real bfd, or -1 for everyone else. */
-#define BFD_RELOC(x) -1
-
-/* Special registers. */
-#define TREG_LR 55
-#define TREG_SN 56
-#define TREG_ZERO 63
-
-#include <linux/stddef.h>
-#include <asm/tile-desc.h>
-
-const struct tilegx_opcode tilegx_opcodes[334] =
-{
- { "bpt", TILEGX_OPC_BPT, 0x2, 0, TREG_ZERO, 0,
- { { 0, }, { }, { 0, }, { 0, }, { 0, } },
- },
- { "info", TILEGX_OPC_INFO, 0xf, 1, TREG_ZERO, 1,
- { { 0 }, { 1 }, { 2 }, { 3 }, { 0, } },
- },
- { "infol", TILEGX_OPC_INFOL, 0x3, 1, TREG_ZERO, 1,
- { { 4 }, { 5 }, { 0, }, { 0, }, { 0, } },
- },
- { "move", TILEGX_OPC_MOVE, 0xf, 2, TREG_ZERO, 1,
- { { 6, 7 }, { 8, 9 }, { 10, 11 }, { 12, 13 }, { 0, } },
- },
- { "movei", TILEGX_OPC_MOVEI, 0xf, 2, TREG_ZERO, 1,
- { { 6, 0 }, { 8, 1 }, { 10, 2 }, { 12, 3 }, { 0, } },
- },
- { "moveli", TILEGX_OPC_MOVELI, 0x3, 2, TREG_ZERO, 1,
- { { 6, 4 }, { 8, 5 }, { 0, }, { 0, }, { 0, } },
- },
- { "prefetch", TILEGX_OPC_PREFETCH, 0x12, 1, TREG_ZERO, 1,
- { { 0, }, { 9 }, { 0, }, { 0, }, { 14 } },
- },
- { "prefetch_add_l1", TILEGX_OPC_PREFETCH_ADD_L1, 0x2, 2, TREG_ZERO, 1,
- { { 0, }, { 15, 1 }, { 0, }, { 0, }, { 0, } },
- },
- { "prefetch_add_l1_fault", TILEGX_OPC_PREFETCH_ADD_L1_FAULT, 0x2, 2, TREG_ZERO, 1,
- { { 0, }, { 15, 1 }, { 0, }, { 0, }, { 0, } },
- },
- { "prefetch_add_l2", TILEGX_OPC_PREFETCH_ADD_L2, 0x2, 2, TREG_ZERO, 1,
- { { 0, }, { 15, 1 }, { 0, }, { 0, }, { 0, } },
- },
- { "prefetch_add_l2_fault", TILEGX_OPC_PREFETCH_ADD_L2_FAULT, 0x2, 2, TREG_ZERO, 1,
- { { 0, }, { 15, 1 }, { 0, }, { 0, }, { 0, } },
- },
- { "prefetch_add_l3", TILEGX_OPC_PREFETCH_ADD_L3, 0x2, 2, TREG_ZERO, 1,
- { { 0, }, { 15, 1 }, { 0, }, { 0, }, { 0, } },
- },
- { "prefetch_add_l3_fault", TILEGX_OPC_PREFETCH_ADD_L3_FAULT, 0x2, 2, TREG_ZERO, 1,
- { { 0, }, { 15, 1 }, { 0, }, { 0, }, { 0, } },
- },
- { "prefetch_l1", TILEGX_OPC_PREFETCH_L1, 0x12, 1, TREG_ZERO, 1,
- { { 0, }, { 9 }, { 0, }, { 0, }, { 14 } },
- },
- { "prefetch_l1_fault", TILEGX_OPC_PREFETCH_L1_FAULT, 0x12, 1, TREG_ZERO, 1,
- { { 0, }, { 9 }, { 0, }, { 0, }, { 14 } },
- },
- { "prefetch_l2", TILEGX_OPC_PREFETCH_L2, 0x12, 1, TREG_ZERO, 1,
- { { 0, }, { 9 }, { 0, }, { 0, }, { 14 } },
- },
- { "prefetch_l2_fault", TILEGX_OPC_PREFETCH_L2_FAULT, 0x12, 1, TREG_ZERO, 1,
- { { 0, }, { 9 }, { 0, }, { 0, }, { 14 } },
- },
- { "prefetch_l3", TILEGX_OPC_PREFETCH_L3, 0x12, 1, TREG_ZERO, 1,
- { { 0, }, { 9 }, { 0, }, { 0, }, { 14 } },
- },
- { "prefetch_l3_fault", TILEGX_OPC_PREFETCH_L3_FAULT, 0x12, 1, TREG_ZERO, 1,
- { { 0, }, { 9 }, { 0, }, { 0, }, { 14 } },
- },
- { "raise", TILEGX_OPC_RAISE, 0x2, 0, TREG_ZERO, 1,
- { { 0, }, { }, { 0, }, { 0, }, { 0, } },
- },
- { "add", TILEGX_OPC_ADD, 0xf, 3, TREG_ZERO, 1,
- { { 6, 7, 16 }, { 8, 9, 17 }, { 10, 11, 18 }, { 12, 13, 19 }, { 0, } },
- },
- { "addi", TILEGX_OPC_ADDI, 0xf, 3, TREG_ZERO, 1,
- { { 6, 7, 0 }, { 8, 9, 1 }, { 10, 11, 2 }, { 12, 13, 3 }, { 0, } },
- },
- { "addli", TILEGX_OPC_ADDLI, 0x3, 3, TREG_ZERO, 1,
- { { 6, 7, 4 }, { 8, 9, 5 }, { 0, }, { 0, }, { 0, } },
- },
- { "addx", TILEGX_OPC_ADDX, 0xf, 3, TREG_ZERO, 1,
- { { 6, 7, 16 }, { 8, 9, 17 }, { 10, 11, 18 }, { 12, 13, 19 }, { 0, } },
- },
- { "addxi", TILEGX_OPC_ADDXI, 0xf, 3, TREG_ZERO, 1,
- { { 6, 7, 0 }, { 8, 9, 1 }, { 10, 11, 2 }, { 12, 13, 3 }, { 0, } },
- },
- { "addxli", TILEGX_OPC_ADDXLI, 0x3, 3, TREG_ZERO, 1,
- { { 6, 7, 4 }, { 8, 9, 5 }, { 0, }, { 0, }, { 0, } },
- },
- { "addxsc", TILEGX_OPC_ADDXSC, 0x3, 3, TREG_ZERO, 1,
- { { 6, 7, 16 }, { 8, 9, 17 }, { 0, }, { 0, }, { 0, } },
- },
- { "and", TILEGX_OPC_AND, 0xf, 3, TREG_ZERO, 1,
- { { 6, 7, 16 }, { 8, 9, 17 }, { 10, 11, 18 }, { 12, 13, 19 }, { 0, } },
- },
- { "andi", TILEGX_OPC_ANDI, 0xf, 3, TREG_ZERO, 1,
- { { 6, 7, 0 }, { 8, 9, 1 }, { 10, 11, 2 }, { 12, 13, 3 }, { 0, } },
- },
- { "beqz", TILEGX_OPC_BEQZ, 0x2, 2, TREG_ZERO, 1,
- { { 0, }, { 9, 20 }, { 0, }, { 0, }, { 0, } },
- },
- { "beqzt", TILEGX_OPC_BEQZT, 0x2, 2, TREG_ZERO, 1,
- { { 0, }, { 9, 20 }, { 0, }, { 0, }, { 0, } },
- },
- { "bfexts", TILEGX_OPC_BFEXTS, 0x1, 4, TREG_ZERO, 1,
- { { 6, 7, 21, 22 }, { 0, }, { 0, }, { 0, }, { 0, } },
- },
- { "bfextu", TILEGX_OPC_BFEXTU, 0x1, 4, TREG_ZERO, 1,
- { { 6, 7, 21, 22 }, { 0, }, { 0, }, { 0, }, { 0, } },
- },
- { "bfins", TILEGX_OPC_BFINS, 0x1, 4, TREG_ZERO, 1,
- { { 23, 7, 21, 22 }, { 0, }, { 0, }, { 0, }, { 0, } },
- },
- { "bgez", TILEGX_OPC_BGEZ, 0x2, 2, TREG_ZERO, 1,
- { { 0, }, { 9, 20 }, { 0, }, { 0, }, { 0, } },
- },
- { "bgezt", TILEGX_OPC_BGEZT, 0x2, 2, TREG_ZERO, 1,
- { { 0, }, { 9, 20 }, { 0, }, { 0, }, { 0, } },
- },
- { "bgtz", TILEGX_OPC_BGTZ, 0x2, 2, TREG_ZERO, 1,
- { { 0, }, { 9, 20 }, { 0, }, { 0, }, { 0, } },
- },
- { "bgtzt", TILEGX_OPC_BGTZT, 0x2, 2, TREG_ZERO, 1,
- { { 0, }, { 9, 20 }, { 0, }, { 0, }, { 0, } },
- },
- { "blbc", TILEGX_OPC_BLBC, 0x2, 2, TREG_ZERO, 1,
- { { 0, }, { 9, 20 }, { 0, }, { 0, }, { 0, } },
- },
- { "blbct", TILEGX_OPC_BLBCT, 0x2, 2, TREG_ZERO, 1,
- { { 0, }, { 9, 20 }, { 0, }, { 0, }, { 0, } },
- },
- { "blbs", TILEGX_OPC_BLBS, 0x2, 2, TREG_ZERO, 1,
- { { 0, }, { 9, 20 }, { 0, }, { 0, }, { 0, } },
- },
- { "blbst", TILEGX_OPC_BLBST, 0x2, 2, TREG_ZERO, 1,
- { { 0, }, { 9, 20 }, { 0, }, { 0, }, { 0, } },
- },
- { "blez", TILEGX_OPC_BLEZ, 0x2, 2, TREG_ZERO, 1,
- { { 0, }, { 9, 20 }, { 0, }, { 0, }, { 0, } },
- },
- { "blezt", TILEGX_OPC_BLEZT, 0x2, 2, TREG_ZERO, 1,
- { { 0, }, { 9, 20 }, { 0, }, { 0, }, { 0, } },
- },
- { "bltz", TILEGX_OPC_BLTZ, 0x2, 2, TREG_ZERO, 1,
- { { 0, }, { 9, 20 }, { 0, }, { 0, }, { 0, } },
- },
- { "bltzt", TILEGX_OPC_BLTZT, 0x2, 2, TREG_ZERO, 1,
- { { 0, }, { 9, 20 }, { 0, }, { 0, }, { 0, } },
- },
- { "bnez", TILEGX_OPC_BNEZ, 0x2, 2, TREG_ZERO, 1,
- { { 0, }, { 9, 20 }, { 0, }, { 0, }, { 0, } },
- },
- { "bnezt", TILEGX_OPC_BNEZT, 0x2, 2, TREG_ZERO, 1,
- { { 0, }, { 9, 20 }, { 0, }, { 0, }, { 0, } },
- },
- { "clz", TILEGX_OPC_CLZ, 0x5, 2, TREG_ZERO, 1,
- { { 6, 7 }, { 0, }, { 10, 11 }, { 0, }, { 0, } },
- },
- { "cmoveqz", TILEGX_OPC_CMOVEQZ, 0x5, 3, TREG_ZERO, 1,
- { { 23, 7, 16 }, { 0, }, { 24, 11, 18 }, { 0, }, { 0, } },
- },
- { "cmovnez", TILEGX_OPC_CMOVNEZ, 0x5, 3, TREG_ZERO, 1,
- { { 23, 7, 16 }, { 0, }, { 24, 11, 18 }, { 0, }, { 0, } },
- },
- { "cmpeq", TILEGX_OPC_CMPEQ, 0xf, 3, TREG_ZERO, 1,
- { { 6, 7, 16 }, { 8, 9, 17 }, { 10, 11, 18 }, { 12, 13, 19 }, { 0, } },
- },
- { "cmpeqi", TILEGX_OPC_CMPEQI, 0xf, 3, TREG_ZERO, 1,
- { { 6, 7, 0 }, { 8, 9, 1 }, { 10, 11, 2 }, { 12, 13, 3 }, { 0, } },
- },
- { "cmpexch", TILEGX_OPC_CMPEXCH, 0x2, 3, TREG_ZERO, 1,
- { { 0, }, { 8, 9, 17 }, { 0, }, { 0, }, { 0, } },
- },
- { "cmpexch4", TILEGX_OPC_CMPEXCH4, 0x2, 3, TREG_ZERO, 1,
- { { 0, }, { 8, 9, 17 }, { 0, }, { 0, }, { 0, } },
- },
- { "cmples", TILEGX_OPC_CMPLES, 0xf, 3, TREG_ZERO, 1,
- { { 6, 7, 16 }, { 8, 9, 17 }, { 10, 11, 18 }, { 12, 13, 19 }, { 0, } },
- },
- { "cmpleu", TILEGX_OPC_CMPLEU, 0xf, 3, TREG_ZERO, 1,
- { { 6, 7, 16 }, { 8, 9, 17 }, { 10, 11, 18 }, { 12, 13, 19 }, { 0, } },
- },
- { "cmplts", TILEGX_OPC_CMPLTS, 0xf, 3, TREG_ZERO, 1,
- { { 6, 7, 16 }, { 8, 9, 17 }, { 10, 11, 18 }, { 12, 13, 19 }, { 0, } },
- },
- { "cmpltsi", TILEGX_OPC_CMPLTSI, 0xf, 3, TREG_ZERO, 1,
- { { 6, 7, 0 }, { 8, 9, 1 }, { 10, 11, 2 }, { 12, 13, 3 }, { 0, } },
- },
- { "cmpltu", TILEGX_OPC_CMPLTU, 0xf, 3, TREG_ZERO, 1,
- { { 6, 7, 16 }, { 8, 9, 17 }, { 10, 11, 18 }, { 12, 13, 19 }, { 0, } },
- },
- { "cmpltui", TILEGX_OPC_CMPLTUI, 0x3, 3, TREG_ZERO, 1,
- { { 6, 7, 0 }, { 8, 9, 1 }, { 0, }, { 0, }, { 0, } },
- },
- { "cmpne", TILEGX_OPC_CMPNE, 0xf, 3, TREG_ZERO, 1,
- { { 6, 7, 16 }, { 8, 9, 17 }, { 10, 11, 18 }, { 12, 13, 19 }, { 0, } },
- },
- { "cmul", TILEGX_OPC_CMUL, 0x1, 3, TREG_ZERO, 1,
- { { 6, 7, 16 }, { 0, }, { 0, }, { 0, }, { 0, } },
- },
- { "cmula", TILEGX_OPC_CMULA, 0x1, 3, TREG_ZERO, 1,
- { { 23, 7, 16 }, { 0, }, { 0, }, { 0, }, { 0, } },
- },
- { "cmulaf", TILEGX_OPC_CMULAF, 0x1, 3, TREG_ZERO, 1,
- { { 23, 7, 16 }, { 0, }, { 0, }, { 0, }, { 0, } },
- },
- { "cmulf", TILEGX_OPC_CMULF, 0x1, 3, TREG_ZERO, 1,
- { { 6, 7, 16 }, { 0, }, { 0, }, { 0, }, { 0, } },
- },
- { "cmulfr", TILEGX_OPC_CMULFR, 0x1, 3, TREG_ZERO, 1,
- { { 6, 7, 16 }, { 0, }, { 0, }, { 0, }, { 0, } },
- },
- { "cmulh", TILEGX_OPC_CMULH, 0x1, 3, TREG_ZERO, 1,
- { { 6, 7, 16 }, { 0, }, { 0, }, { 0, }, { 0, } },
- },
- { "cmulhr", TILEGX_OPC_CMULHR, 0x1, 3, TREG_ZERO, 1,
- { { 6, 7, 16 }, { 0, }, { 0, }, { 0, }, { 0, } },
- },
- { "crc32_32", TILEGX_OPC_CRC32_32, 0x1, 3, TREG_ZERO, 1,
- { { 6, 7, 16 }, { 0, }, { 0, }, { 0, }, { 0, } },
- },
- { "crc32_8", TILEGX_OPC_CRC32_8, 0x1, 3, TREG_ZERO, 1,
- { { 6, 7, 16 }, { 0, }, { 0, }, { 0, }, { 0, } },
- },
- { "ctz", TILEGX_OPC_CTZ, 0x5, 2, TREG_ZERO, 1,
- { { 6, 7 }, { 0, }, { 10, 11 }, { 0, }, { 0, } },
- },
- { "dblalign", TILEGX_OPC_DBLALIGN, 0x1, 3, TREG_ZERO, 1,
- { { 23, 7, 16 }, { 0, }, { 0, }, { 0, }, { 0, } },
- },
- { "dblalign2", TILEGX_OPC_DBLALIGN2, 0x3, 3, TREG_ZERO, 1,
- { { 6, 7, 16 }, { 8, 9, 17 }, { 0, }, { 0, }, { 0, } },
- },
- { "dblalign4", TILEGX_OPC_DBLALIGN4, 0x3, 3, TREG_ZERO, 1,
- { { 6, 7, 16 }, { 8, 9, 17 }, { 0, }, { 0, }, { 0, } },
- },
- { "dblalign6", TILEGX_OPC_DBLALIGN6, 0x3, 3, TREG_ZERO, 1,
- { { 6, 7, 16 }, { 8, 9, 17 }, { 0, }, { 0, }, { 0, } },
- },
- { "drain", TILEGX_OPC_DRAIN, 0x2, 0, TREG_ZERO, 0,
- { { 0, }, { }, { 0, }, { 0, }, { 0, } },
- },
- { "dtlbpr", TILEGX_OPC_DTLBPR, 0x2, 1, TREG_ZERO, 1,
- { { 0, }, { 9 }, { 0, }, { 0, }, { 0, } },
- },
- { "exch", TILEGX_OPC_EXCH, 0x2, 3, TREG_ZERO, 1,
- { { 0, }, { 8, 9, 17 }, { 0, }, { 0, }, { 0, } },
- },
- { "exch4", TILEGX_OPC_EXCH4, 0x2, 3, TREG_ZERO, 1,
- { { 0, }, { 8, 9, 17 }, { 0, }, { 0, }, { 0, } },
- },
- { "fdouble_add_flags", TILEGX_OPC_FDOUBLE_ADD_FLAGS, 0x1, 3, TREG_ZERO, 1,
- { { 6, 7, 16 }, { 0, }, { 0, }, { 0, }, { 0, } },
- },
- { "fdouble_addsub", TILEGX_OPC_FDOUBLE_ADDSUB, 0x1, 3, TREG_ZERO, 1,
- { { 23, 7, 16 }, { 0, }, { 0, }, { 0, }, { 0, } },
- },
- { "fdouble_mul_flags", TILEGX_OPC_FDOUBLE_MUL_FLAGS, 0x1, 3, TREG_ZERO, 1,
- { { 6, 7, 16 }, { 0, }, { 0, }, { 0, }, { 0, } },
- },
- { "fdouble_pack1", TILEGX_OPC_FDOUBLE_PACK1, 0x1, 3, TREG_ZERO, 1,
- { { 6, 7, 16 }, { 0, }, { 0, }, { 0, }, { 0, } },
- },
- { "fdouble_pack2", TILEGX_OPC_FDOUBLE_PACK2, 0x1, 3, TREG_ZERO, 1,
- { { 23, 7, 16 }, { 0, }, { 0, }, { 0, }, { 0, } },
- },
- { "fdouble_sub_flags", TILEGX_OPC_FDOUBLE_SUB_FLAGS, 0x1, 3, TREG_ZERO, 1,
- { { 6, 7, 16 }, { 0, }, { 0, }, { 0, }, { 0, } },
- },
- { "fdouble_unpack_max", TILEGX_OPC_FDOUBLE_UNPACK_MAX, 0x1, 3, TREG_ZERO, 1,
- { { 6, 7, 16 }, { 0, }, { 0, }, { 0, }, { 0, } },
- },
- { "fdouble_unpack_min", TILEGX_OPC_FDOUBLE_UNPACK_MIN, 0x1, 3, TREG_ZERO, 1,
- { { 6, 7, 16 }, { 0, }, { 0, }, { 0, }, { 0, } },
- },
- { "fetchadd", TILEGX_OPC_FETCHADD, 0x2, 3, TREG_ZERO, 1,
- { { 0, }, { 8, 9, 17 }, { 0, }, { 0, }, { 0, } },
- },
- { "fetchadd4", TILEGX_OPC_FETCHADD4, 0x2, 3, TREG_ZERO, 1,
- { { 0, }, { 8, 9, 17 }, { 0, }, { 0, }, { 0, } },
- },
- { "fetchaddgez", TILEGX_OPC_FETCHADDGEZ, 0x2, 3, TREG_ZERO, 1,
- { { 0, }, { 8, 9, 17 }, { 0, }, { 0, }, { 0, } },
- },
- { "fetchaddgez4", TILEGX_OPC_FETCHADDGEZ4, 0x2, 3, TREG_ZERO, 1,
- { { 0, }, { 8, 9, 17 }, { 0, }, { 0, }, { 0, } },
- },
- { "fetchand", TILEGX_OPC_FETCHAND, 0x2, 3, TREG_ZERO, 1,
- { { 0, }, { 8, 9, 17 }, { 0, }, { 0, }, { 0, } },
- },
- { "fetchand4", TILEGX_OPC_FETCHAND4, 0x2, 3, TREG_ZERO, 1,
- { { 0, }, { 8, 9, 17 }, { 0, }, { 0, }, { 0, } },
- },
- { "fetchor", TILEGX_OPC_FETCHOR, 0x2, 3, TREG_ZERO, 1,
- { { 0, }, { 8, 9, 17 }, { 0, }, { 0, }, { 0, } },
- },
- { "fetchor4", TILEGX_OPC_FETCHOR4, 0x2, 3, TREG_ZERO, 1,
- { { 0, }, { 8, 9, 17 }, { 0, }, { 0, }, { 0, } },
- },
- { "finv", TILEGX_OPC_FINV, 0x2, 1, TREG_ZERO, 1,
- { { 0, }, { 9 }, { 0, }, { 0, }, { 0, } },
- },
- { "flush", TILEGX_OPC_FLUSH, 0x2, 1, TREG_ZERO, 1,
- { { 0, }, { 9 }, { 0, }, { 0, }, { 0, } },
- },
- { "flushwb", TILEGX_OPC_FLUSHWB, 0x2, 0, TREG_ZERO, 1,
- { { 0, }, { }, { 0, }, { 0, }, { 0, } },
- },
- { "fnop", TILEGX_OPC_FNOP, 0xf, 0, TREG_ZERO, 1,
- { { }, { }, { }, { }, { 0, } },
- },
- { "fsingle_add1", TILEGX_OPC_FSINGLE_ADD1, 0x1, 3, TREG_ZERO, 1,
- { { 6, 7, 16 }, { 0, }, { 0, }, { 0, }, { 0, } },
- },
- { "fsingle_addsub2", TILEGX_OPC_FSINGLE_ADDSUB2, 0x1, 3, TREG_ZERO, 1,
- { { 23, 7, 16 }, { 0, }, { 0, }, { 0, }, { 0, } },
- },
- { "fsingle_mul1", TILEGX_OPC_FSINGLE_MUL1, 0x1, 3, TREG_ZERO, 1,
- { { 6, 7, 16 }, { 0, }, { 0, }, { 0, }, { 0, } },
- },
- { "fsingle_mul2", TILEGX_OPC_FSINGLE_MUL2, 0x1, 3, TREG_ZERO, 1,
- { { 6, 7, 16 }, { 0, }, { 0, }, { 0, }, { 0, } },
- },
- { "fsingle_pack1", TILEGX_OPC_FSINGLE_PACK1, 0x5, 2, TREG_ZERO, 1,
- { { 6, 7 }, { 0, }, { 10, 11 }, { 0, }, { 0, } },
- },
- { "fsingle_pack2", TILEGX_OPC_FSINGLE_PACK2, 0x1, 3, TREG_ZERO, 1,
- { { 6, 7, 16 }, { 0, }, { 0, }, { 0, }, { 0, } },
- },
- { "fsingle_sub1", TILEGX_OPC_FSINGLE_SUB1, 0x1, 3, TREG_ZERO, 1,
- { { 6, 7, 16 }, { 0, }, { 0, }, { 0, }, { 0, } },
- },
- { "icoh", TILEGX_OPC_ICOH, 0x2, 1, TREG_ZERO, 1,
- { { 0, }, { 9 }, { 0, }, { 0, }, { 0, } },
- },
- { "ill", TILEGX_OPC_ILL, 0xa, 0, TREG_ZERO, 1,
- { { 0, }, { }, { 0, }, { }, { 0, } },
- },
- { "inv", TILEGX_OPC_INV, 0x2, 1, TREG_ZERO, 1,
- { { 0, }, { 9 }, { 0, }, { 0, }, { 0, } },
- },
- { "iret", TILEGX_OPC_IRET, 0x2, 0, TREG_ZERO, 1,
- { { 0, }, { }, { 0, }, { 0, }, { 0, } },
- },
- { "j", TILEGX_OPC_J, 0x2, 1, TREG_ZERO, 1,
- { { 0, }, { 25 }, { 0, }, { 0, }, { 0, } },
- },
- { "jal", TILEGX_OPC_JAL, 0x2, 1, TREG_LR, 1,
- { { 0, }, { 25 }, { 0, }, { 0, }, { 0, } },
- },
- { "jalr", TILEGX_OPC_JALR, 0xa, 1, TREG_LR, 1,
- { { 0, }, { 9 }, { 0, }, { 13 }, { 0, } },
- },
- { "jalrp", TILEGX_OPC_JALRP, 0xa, 1, TREG_LR, 1,
- { { 0, }, { 9 }, { 0, }, { 13 }, { 0, } },
- },
- { "jr", TILEGX_OPC_JR, 0xa, 1, TREG_ZERO, 1,
- { { 0, }, { 9 }, { 0, }, { 13 }, { 0, } },
- },
- { "jrp", TILEGX_OPC_JRP, 0xa, 1, TREG_ZERO, 1,
- { { 0, }, { 9 }, { 0, }, { 13 }, { 0, } },
- },
- { "ld", TILEGX_OPC_LD, 0x12, 2, TREG_ZERO, 1,
- { { 0, }, { 8, 9 }, { 0, }, { 0, }, { 26, 14 } },
- },
- { "ld1s", TILEGX_OPC_LD1S, 0x12, 2, TREG_ZERO, 1,
- { { 0, }, { 8, 9 }, { 0, }, { 0, }, { 26, 14 } },
- },
- { "ld1s_add", TILEGX_OPC_LD1S_ADD, 0x2, 3, TREG_ZERO, 1,
- { { 0, }, { 8, 15, 1 }, { 0, }, { 0, }, { 0, } },
- },
- { "ld1u", TILEGX_OPC_LD1U, 0x12, 2, TREG_ZERO, 1,
- { { 0, }, { 8, 9 }, { 0, }, { 0, }, { 26, 14 } },
- },
- { "ld1u_add", TILEGX_OPC_LD1U_ADD, 0x2, 3, TREG_ZERO, 1,
- { { 0, }, { 8, 15, 1 }, { 0, }, { 0, }, { 0, } },
- },
- { "ld2s", TILEGX_OPC_LD2S, 0x12, 2, TREG_ZERO, 1,
- { { 0, }, { 8, 9 }, { 0, }, { 0, }, { 26, 14 } },
- },
- { "ld2s_add", TILEGX_OPC_LD2S_ADD, 0x2, 3, TREG_ZERO, 1,
- { { 0, }, { 8, 15, 1 }, { 0, }, { 0, }, { 0, } },
- },
- { "ld2u", TILEGX_OPC_LD2U, 0x12, 2, TREG_ZERO, 1,
- { { 0, }, { 8, 9 }, { 0, }, { 0, }, { 26, 14 } },
- },
- { "ld2u_add", TILEGX_OPC_LD2U_ADD, 0x2, 3, TREG_ZERO, 1,
- { { 0, }, { 8, 15, 1 }, { 0, }, { 0, }, { 0, } },
- },
- { "ld4s", TILEGX_OPC_LD4S, 0x12, 2, TREG_ZERO, 1,
- { { 0, }, { 8, 9 }, { 0, }, { 0, }, { 26, 14 } },
- },
- { "ld4s_add", TILEGX_OPC_LD4S_ADD, 0x2, 3, TREG_ZERO, 1,
- { { 0, }, { 8, 15, 1 }, { 0, }, { 0, }, { 0, } },
- },
- { "ld4u", TILEGX_OPC_LD4U, 0x12, 2, TREG_ZERO, 1,
- { { 0, }, { 8, 9 }, { 0, }, { 0, }, { 26, 14 } },
- },
- { "ld4u_add", TILEGX_OPC_LD4U_ADD, 0x2, 3, TREG_ZERO, 1,
- { { 0, }, { 8, 15, 1 }, { 0, }, { 0, }, { 0, } },
- },
- { "ld_add", TILEGX_OPC_LD_ADD, 0x2, 3, TREG_ZERO, 1,
- { { 0, }, { 8, 15, 1 }, { 0, }, { 0, }, { 0, } },
- },
- { "ldna", TILEGX_OPC_LDNA, 0x2, 2, TREG_ZERO, 1,
- { { 0, }, { 8, 9 }, { 0, }, { 0, }, { 0, } },
- },
- { "ldna_add", TILEGX_OPC_LDNA_ADD, 0x2, 3, TREG_ZERO, 1,
- { { 0, }, { 8, 15, 1 }, { 0, }, { 0, }, { 0, } },
- },
- { "ldnt", TILEGX_OPC_LDNT, 0x2, 2, TREG_ZERO, 1,
- { { 0, }, { 8, 9 }, { 0, }, { 0, }, { 0, } },
- },
- { "ldnt1s", TILEGX_OPC_LDNT1S, 0x2, 2, TREG_ZERO, 1,
- { { 0, }, { 8, 9 }, { 0, }, { 0, }, { 0, } },
- },
- { "ldnt1s_add", TILEGX_OPC_LDNT1S_ADD, 0x2, 3, TREG_ZERO, 1,
- { { 0, }, { 8, 15, 1 }, { 0, }, { 0, }, { 0, } },
- },
- { "ldnt1u", TILEGX_OPC_LDNT1U, 0x2, 2, TREG_ZERO, 1,
- { { 0, }, { 8, 9 }, { 0, }, { 0, }, { 0, } },
- },
- { "ldnt1u_add", TILEGX_OPC_LDNT1U_ADD, 0x2, 3, TREG_ZERO, 1,
- { { 0, }, { 8, 15, 1 }, { 0, }, { 0, }, { 0, } },
- },
- { "ldnt2s", TILEGX_OPC_LDNT2S, 0x2, 2, TREG_ZERO, 1,
- { { 0, }, { 8, 9 }, { 0, }, { 0, }, { 0, } },
- },
- { "ldnt2s_add", TILEGX_OPC_LDNT2S_ADD, 0x2, 3, TREG_ZERO, 1,
- { { 0, }, { 8, 15, 1 }, { 0, }, { 0, }, { 0, } },
- },
- { "ldnt2u", TILEGX_OPC_LDNT2U, 0x2, 2, TREG_ZERO, 1,
- { { 0, }, { 8, 9 }, { 0, }, { 0, }, { 0, } },
- },
- { "ldnt2u_add", TILEGX_OPC_LDNT2U_ADD, 0x2, 3, TREG_ZERO, 1,
- { { 0, }, { 8, 15, 1 }, { 0, }, { 0, }, { 0, } },
- },
- { "ldnt4s", TILEGX_OPC_LDNT4S, 0x2, 2, TREG_ZERO, 1,
- { { 0, }, { 8, 9 }, { 0, }, { 0, }, { 0, } },
- },
- { "ldnt4s_add", TILEGX_OPC_LDNT4S_ADD, 0x2, 3, TREG_ZERO, 1,
- { { 0, }, { 8, 15, 1 }, { 0, }, { 0, }, { 0, } },
- },
- { "ldnt4u", TILEGX_OPC_LDNT4U, 0x2, 2, TREG_ZERO, 1,
- { { 0, }, { 8, 9 }, { 0, }, { 0, }, { 0, } },
- },
- { "ldnt4u_add", TILEGX_OPC_LDNT4U_ADD, 0x2, 3, TREG_ZERO, 1,
- { { 0, }, { 8, 15, 1 }, { 0, }, { 0, }, { 0, } },
- },
- { "ldnt_add", TILEGX_OPC_LDNT_ADD, 0x2, 3, TREG_ZERO, 1,
- { { 0, }, { 8, 15, 1 }, { 0, }, { 0, }, { 0, } },
- },
- { "lnk", TILEGX_OPC_LNK, 0xa, 1, TREG_ZERO, 1,
- { { 0, }, { 8 }, { 0, }, { 12 }, { 0, } },
- },
- { "mf", TILEGX_OPC_MF, 0x2, 0, TREG_ZERO, 1,
- { { 0, }, { }, { 0, }, { 0, }, { 0, } },
- },
- { "mfspr", TILEGX_OPC_MFSPR, 0x2, 2, TREG_ZERO, 1,
- { { 0, }, { 8, 27 }, { 0, }, { 0, }, { 0, } },
- },
- { "mm", TILEGX_OPC_MM, 0x1, 4, TREG_ZERO, 1,
- { { 23, 7, 21, 22 }, { 0, }, { 0, }, { 0, }, { 0, } },
- },
- { "mnz", TILEGX_OPC_MNZ, 0xf, 3, TREG_ZERO, 1,
- { { 6, 7, 16 }, { 8, 9, 17 }, { 10, 11, 18 }, { 12, 13, 19 }, { 0, } },
- },
- { "mtspr", TILEGX_OPC_MTSPR, 0x2, 2, TREG_ZERO, 1,
- { { 0, }, { 28, 9 }, { 0, }, { 0, }, { 0, } },
- },
- { "mul_hs_hs", TILEGX_OPC_MUL_HS_HS, 0x5, 3, TREG_ZERO, 1,
- { { 6, 7, 16 }, { 0, }, { 10, 11, 18 }, { 0, }, { 0, } },
- },
- { "mul_hs_hu", TILEGX_OPC_MUL_HS_HU, 0x1, 3, TREG_ZERO, 1,
- { { 6, 7, 16 }, { 0, }, { 0, }, { 0, }, { 0, } },
- },
- { "mul_hs_ls", TILEGX_OPC_MUL_HS_LS, 0x1, 3, TREG_ZERO, 1,
- { { 6, 7, 16 }, { 0, }, { 0, }, { 0, }, { 0, } },
- },
- { "mul_hs_lu", TILEGX_OPC_MUL_HS_LU, 0x1, 3, TREG_ZERO, 1,
- { { 6, 7, 16 }, { 0, }, { 0, }, { 0, }, { 0, } },
- },
- { "mul_hu_hu", TILEGX_OPC_MUL_HU_HU, 0x5, 3, TREG_ZERO, 1,
- { { 6, 7, 16 }, { 0, }, { 10, 11, 18 }, { 0, }, { 0, } },
- },
- { "mul_hu_ls", TILEGX_OPC_MUL_HU_LS, 0x1, 3, TREG_ZERO, 1,
- { { 6, 7, 16 }, { 0, }, { 0, }, { 0, }, { 0, } },
- },
- { "mul_hu_lu", TILEGX_OPC_MUL_HU_LU, 0x1, 3, TREG_ZERO, 1,
- { { 6, 7, 16 }, { 0, }, { 0, }, { 0, }, { 0, } },
- },
- { "mul_ls_ls", TILEGX_OPC_MUL_LS_LS, 0x5, 3, TREG_ZERO, 1,
- { { 6, 7, 16 }, { 0, }, { 10, 11, 18 }, { 0, }, { 0, } },
- },
- { "mul_ls_lu", TILEGX_OPC_MUL_LS_LU, 0x1, 3, TREG_ZERO, 1,
- { { 6, 7, 16 }, { 0, }, { 0, }, { 0, }, { 0, } },
- },
- { "mul_lu_lu", TILEGX_OPC_MUL_LU_LU, 0x5, 3, TREG_ZERO, 1,
- { { 6, 7, 16 }, { 0, }, { 10, 11, 18 }, { 0, }, { 0, } },
- },
- { "mula_hs_hs", TILEGX_OPC_MULA_HS_HS, 0x5, 3, TREG_ZERO, 1,
- { { 23, 7, 16 }, { 0, }, { 24, 11, 18 }, { 0, }, { 0, } },
- },
- { "mula_hs_hu", TILEGX_OPC_MULA_HS_HU, 0x1, 3, TREG_ZERO, 1,
- { { 23, 7, 16 }, { 0, }, { 0, }, { 0, }, { 0, } },
- },
- { "mula_hs_ls", TILEGX_OPC_MULA_HS_LS, 0x1, 3, TREG_ZERO, 1,
- { { 23, 7, 16 }, { 0, }, { 0, }, { 0, }, { 0, } },
- },
- { "mula_hs_lu", TILEGX_OPC_MULA_HS_LU, 0x1, 3, TREG_ZERO, 1,
- { { 23, 7, 16 }, { 0, }, { 0, }, { 0, }, { 0, } },
- },
- { "mula_hu_hu", TILEGX_OPC_MULA_HU_HU, 0x5, 3, TREG_ZERO, 1,
- { { 23, 7, 16 }, { 0, }, { 24, 11, 18 }, { 0, }, { 0, } },
- },
- { "mula_hu_ls", TILEGX_OPC_MULA_HU_LS, 0x1, 3, TREG_ZERO, 1,
- { { 23, 7, 16 }, { 0, }, { 0, }, { 0, }, { 0, } },
- },
- { "mula_hu_lu", TILEGX_OPC_MULA_HU_LU, 0x1, 3, TREG_ZERO, 1,
- { { 23, 7, 16 }, { 0, }, { 0, }, { 0, }, { 0, } },
- },
- { "mula_ls_ls", TILEGX_OPC_MULA_LS_LS, 0x5, 3, TREG_ZERO, 1,
- { { 23, 7, 16 }, { 0, }, { 24, 11, 18 }, { 0, }, { 0, } },
- },
- { "mula_ls_lu", TILEGX_OPC_MULA_LS_LU, 0x1, 3, TREG_ZERO, 1,
- { { 23, 7, 16 }, { 0, }, { 0, }, { 0, }, { 0, } },
- },
- { "mula_lu_lu", TILEGX_OPC_MULA_LU_LU, 0x5, 3, TREG_ZERO, 1,
- { { 23, 7, 16 }, { 0, }, { 24, 11, 18 }, { 0, }, { 0, } },
- },
- { "mulax", TILEGX_OPC_MULAX, 0x5, 3, TREG_ZERO, 1,
- { { 23, 7, 16 }, { 0, }, { 24, 11, 18 }, { 0, }, { 0, } },
- },
- { "mulx", TILEGX_OPC_MULX, 0x5, 3, TREG_ZERO, 1,
- { { 6, 7, 16 }, { 0, }, { 10, 11, 18 }, { 0, }, { 0, } },
- },
- { "mz", TILEGX_OPC_MZ, 0xf, 3, TREG_ZERO, 1,
- { { 6, 7, 16 }, { 8, 9, 17 }, { 10, 11, 18 }, { 12, 13, 19 }, { 0, } },
- },
- { "nap", TILEGX_OPC_NAP, 0x2, 0, TREG_ZERO, 0,
- { { 0, }, { }, { 0, }, { 0, }, { 0, } },
- },
- { "nop", TILEGX_OPC_NOP, 0xf, 0, TREG_ZERO, 1,
- { { }, { }, { }, { }, { 0, } },
- },
- { "nor", TILEGX_OPC_NOR, 0xf, 3, TREG_ZERO, 1,
- { { 6, 7, 16 }, { 8, 9, 17 }, { 10, 11, 18 }, { 12, 13, 19 }, { 0, } },
- },
- { "or", TILEGX_OPC_OR, 0xf, 3, TREG_ZERO, 1,
- { { 6, 7, 16 }, { 8, 9, 17 }, { 10, 11, 18 }, { 12, 13, 19 }, { 0, } },
- },
- { "ori", TILEGX_OPC_ORI, 0x3, 3, TREG_ZERO, 1,
- { { 6, 7, 0 }, { 8, 9, 1 }, { 0, }, { 0, }, { 0, } },
- },
- { "pcnt", TILEGX_OPC_PCNT, 0x5, 2, TREG_ZERO, 1,
- { { 6, 7 }, { 0, }, { 10, 11 }, { 0, }, { 0, } },
- },
- { "revbits", TILEGX_OPC_REVBITS, 0x5, 2, TREG_ZERO, 1,
- { { 6, 7 }, { 0, }, { 10, 11 }, { 0, }, { 0, } },
- },
- { "revbytes", TILEGX_OPC_REVBYTES, 0x5, 2, TREG_ZERO, 1,
- { { 6, 7 }, { 0, }, { 10, 11 }, { 0, }, { 0, } },
- },
- { "rotl", TILEGX_OPC_ROTL, 0xf, 3, TREG_ZERO, 1,
- { { 6, 7, 16 }, { 8, 9, 17 }, { 10, 11, 18 }, { 12, 13, 19 }, { 0, } },
- },
- { "rotli", TILEGX_OPC_ROTLI, 0xf, 3, TREG_ZERO, 1,
- { { 6, 7, 29 }, { 8, 9, 30 }, { 10, 11, 31 }, { 12, 13, 32 }, { 0, } },
- },
- { "shl", TILEGX_OPC_SHL, 0xf, 3, TREG_ZERO, 1,
- { { 6, 7, 16 }, { 8, 9, 17 }, { 10, 11, 18 }, { 12, 13, 19 }, { 0, } },
- },
- { "shl16insli", TILEGX_OPC_SHL16INSLI, 0x3, 3, TREG_ZERO, 1,
- { { 6, 7, 4 }, { 8, 9, 5 }, { 0, }, { 0, }, { 0, } },
- },
- { "shl1add", TILEGX_OPC_SHL1ADD, 0xf, 3, TREG_ZERO, 1,
- { { 6, 7, 16 }, { 8, 9, 17 }, { 10, 11, 18 }, { 12, 13, 19 }, { 0, } },
- },
- { "shl1addx", TILEGX_OPC_SHL1ADDX, 0xf, 3, TREG_ZERO, 1,
- { { 6, 7, 16 }, { 8, 9, 17 }, { 10, 11, 18 }, { 12, 13, 19 }, { 0, } },
- },
- { "shl2add", TILEGX_OPC_SHL2ADD, 0xf, 3, TREG_ZERO, 1,
- { { 6, 7, 16 }, { 8, 9, 17 }, { 10, 11, 18 }, { 12, 13, 19 }, { 0, } },
- },
- { "shl2addx", TILEGX_OPC_SHL2ADDX, 0xf, 3, TREG_ZERO, 1,
- { { 6, 7, 16 }, { 8, 9, 17 }, { 10, 11, 18 }, { 12, 13, 19 }, { 0, } },
- },
- { "shl3add", TILEGX_OPC_SHL3ADD, 0xf, 3, TREG_ZERO, 1,
- { { 6, 7, 16 }, { 8, 9, 17 }, { 10, 11, 18 }, { 12, 13, 19 }, { 0, } },
- },
- { "shl3addx", TILEGX_OPC_SHL3ADDX, 0xf, 3, TREG_ZERO, 1,
- { { 6, 7, 16 }, { 8, 9, 17 }, { 10, 11, 18 }, { 12, 13, 19 }, { 0, } },
- },
- { "shli", TILEGX_OPC_SHLI, 0xf, 3, TREG_ZERO, 1,
- { { 6, 7, 29 }, { 8, 9, 30 }, { 10, 11, 31 }, { 12, 13, 32 }, { 0, } },
- },
- { "shlx", TILEGX_OPC_SHLX, 0x3, 3, TREG_ZERO, 1,
- { { 6, 7, 16 }, { 8, 9, 17 }, { 0, }, { 0, }, { 0, } },
- },
- { "shlxi", TILEGX_OPC_SHLXI, 0x3, 3, TREG_ZERO, 1,
- { { 6, 7, 29 }, { 8, 9, 30 }, { 0, }, { 0, }, { 0, } },
- },
- { "shrs", TILEGX_OPC_SHRS, 0xf, 3, TREG_ZERO, 1,
- { { 6, 7, 16 }, { 8, 9, 17 }, { 10, 11, 18 }, { 12, 13, 19 }, { 0, } },
- },
- { "shrsi", TILEGX_OPC_SHRSI, 0xf, 3, TREG_ZERO, 1,
- { { 6, 7, 29 }, { 8, 9, 30 }, { 10, 11, 31 }, { 12, 13, 32 }, { 0, } },
- },
- { "shru", TILEGX_OPC_SHRU, 0xf, 3, TREG_ZERO, 1,
- { { 6, 7, 16 }, { 8, 9, 17 }, { 10, 11, 18 }, { 12, 13, 19 }, { 0, } },
- },
- { "shrui", TILEGX_OPC_SHRUI, 0xf, 3, TREG_ZERO, 1,
- { { 6, 7, 29 }, { 8, 9, 30 }, { 10, 11, 31 }, { 12, 13, 32 }, { 0, } },
- },
- { "shrux", TILEGX_OPC_SHRUX, 0x3, 3, TREG_ZERO, 1,
- { { 6, 7, 16 }, { 8, 9, 17 }, { 0, }, { 0, }, { 0, } },
- },
- { "shruxi", TILEGX_OPC_SHRUXI, 0x3, 3, TREG_ZERO, 1,
- { { 6, 7, 29 }, { 8, 9, 30 }, { 0, }, { 0, }, { 0, } },
- },
- { "shufflebytes", TILEGX_OPC_SHUFFLEBYTES, 0x1, 3, TREG_ZERO, 1,
- { { 23, 7, 16 }, { 0, }, { 0, }, { 0, }, { 0, } },
- },
- { "st", TILEGX_OPC_ST, 0x12, 2, TREG_ZERO, 1,
- { { 0, }, { 9, 17 }, { 0, }, { 0, }, { 14, 33 } },
- },
- { "st1", TILEGX_OPC_ST1, 0x12, 2, TREG_ZERO, 1,
- { { 0, }, { 9, 17 }, { 0, }, { 0, }, { 14, 33 } },
- },
- { "st1_add", TILEGX_OPC_ST1_ADD, 0x2, 3, TREG_ZERO, 1,
- { { 0, }, { 15, 17, 34 }, { 0, }, { 0, }, { 0, } },
- },
- { "st2", TILEGX_OPC_ST2, 0x12, 2, TREG_ZERO, 1,
- { { 0, }, { 9, 17 }, { 0, }, { 0, }, { 14, 33 } },
- },
- { "st2_add", TILEGX_OPC_ST2_ADD, 0x2, 3, TREG_ZERO, 1,
- { { 0, }, { 15, 17, 34 }, { 0, }, { 0, }, { 0, } },
- },
- { "st4", TILEGX_OPC_ST4, 0x12, 2, TREG_ZERO, 1,
- { { 0, }, { 9, 17 }, { 0, }, { 0, }, { 14, 33 } },
- },
- { "st4_add", TILEGX_OPC_ST4_ADD, 0x2, 3, TREG_ZERO, 1,
- { { 0, }, { 15, 17, 34 }, { 0, }, { 0, }, { 0, } },
- },
- { "st_add", TILEGX_OPC_ST_ADD, 0x2, 3, TREG_ZERO, 1,
- { { 0, }, { 15, 17, 34 }, { 0, }, { 0, }, { 0, } },
- },
- { "stnt", TILEGX_OPC_STNT, 0x2, 2, TREG_ZERO, 1,
- { { 0, }, { 9, 17 }, { 0, }, { 0, }, { 0, } },
- },
- { "stnt1", TILEGX_OPC_STNT1, 0x2, 2, TREG_ZERO, 1,
- { { 0, }, { 9, 17 }, { 0, }, { 0, }, { 0, } },
- },
- { "stnt1_add", TILEGX_OPC_STNT1_ADD, 0x2, 3, TREG_ZERO, 1,
- { { 0, }, { 15, 17, 34 }, { 0, }, { 0, }, { 0, } },
- },
- { "stnt2", TILEGX_OPC_STNT2, 0x2, 2, TREG_ZERO, 1,
- { { 0, }, { 9, 17 }, { 0, }, { 0, }, { 0, } },
- },
- { "stnt2_add", TILEGX_OPC_STNT2_ADD, 0x2, 3, TREG_ZERO, 1,
- { { 0, }, { 15, 17, 34 }, { 0, }, { 0, }, { 0, } },
- },
- { "stnt4", TILEGX_OPC_STNT4, 0x2, 2, TREG_ZERO, 1,
- { { 0, }, { 9, 17 }, { 0, }, { 0, }, { 0, } },
- },
- { "stnt4_add", TILEGX_OPC_STNT4_ADD, 0x2, 3, TREG_ZERO, 1,
- { { 0, }, { 15, 17, 34 }, { 0, }, { 0, }, { 0, } },
- },
- { "stnt_add", TILEGX_OPC_STNT_ADD, 0x2, 3, TREG_ZERO, 1,
- { { 0, }, { 15, 17, 34 }, { 0, }, { 0, }, { 0, } },
- },
- { "sub", TILEGX_OPC_SUB, 0xf, 3, TREG_ZERO, 1,
- { { 6, 7, 16 }, { 8, 9, 17 }, { 10, 11, 18 }, { 12, 13, 19 }, { 0, } },
- },
- { "subx", TILEGX_OPC_SUBX, 0xf, 3, TREG_ZERO, 1,
- { { 6, 7, 16 }, { 8, 9, 17 }, { 10, 11, 18 }, { 12, 13, 19 }, { 0, } },
- },
- { "subxsc", TILEGX_OPC_SUBXSC, 0x3, 3, TREG_ZERO, 1,
- { { 6, 7, 16 }, { 8, 9, 17 }, { 0, }, { 0, }, { 0, } },
- },
- { "swint0", TILEGX_OPC_SWINT0, 0x2, 0, TREG_ZERO, 0,
- { { 0, }, { }, { 0, }, { 0, }, { 0, } },
- },
- { "swint1", TILEGX_OPC_SWINT1, 0x2, 0, TREG_ZERO, 0,
- { { 0, }, { }, { 0, }, { 0, }, { 0, } },
- },
- { "swint2", TILEGX_OPC_SWINT2, 0x2, 0, TREG_ZERO, 0,
- { { 0, }, { }, { 0, }, { 0, }, { 0, } },
- },
- { "swint3", TILEGX_OPC_SWINT3, 0x2, 0, TREG_ZERO, 0,
- { { 0, }, { }, { 0, }, { 0, }, { 0, } },
- },
- { "tblidxb0", TILEGX_OPC_TBLIDXB0, 0x5, 2, TREG_ZERO, 1,
- { { 23, 7 }, { 0, }, { 24, 11 }, { 0, }, { 0, } },
- },
- { "tblidxb1", TILEGX_OPC_TBLIDXB1, 0x5, 2, TREG_ZERO, 1,
- { { 23, 7 }, { 0, }, { 24, 11 }, { 0, }, { 0, } },
- },
- { "tblidxb2", TILEGX_OPC_TBLIDXB2, 0x5, 2, TREG_ZERO, 1,
- { { 23, 7 }, { 0, }, { 24, 11 }, { 0, }, { 0, } },
- },
- { "tblidxb3", TILEGX_OPC_TBLIDXB3, 0x5, 2, TREG_ZERO, 1,
- { { 23, 7 }, { 0, }, { 24, 11 }, { 0, }, { 0, } },
- },
- { "v1add", TILEGX_OPC_V1ADD, 0x3, 3, TREG_ZERO, 1,
- { { 6, 7, 16 }, { 8, 9, 17 }, { 0, }, { 0, }, { 0, } },
- },
- { "v1addi", TILEGX_OPC_V1ADDI, 0x3, 3, TREG_ZERO, 1,
- { { 6, 7, 0 }, { 8, 9, 1 }, { 0, }, { 0, }, { 0, } },
- },
- { "v1adduc", TILEGX_OPC_V1ADDUC, 0x3, 3, TREG_ZERO, 1,
- { { 6, 7, 16 }, { 8, 9, 17 }, { 0, }, { 0, }, { 0, } },
- },
- { "v1adiffu", TILEGX_OPC_V1ADIFFU, 0x1, 3, TREG_ZERO, 1,
- { { 6, 7, 16 }, { 0, }, { 0, }, { 0, }, { 0, } },
- },
- { "v1avgu", TILEGX_OPC_V1AVGU, 0x1, 3, TREG_ZERO, 1,
- { { 6, 7, 16 }, { 0, }, { 0, }, { 0, }, { 0, } },
- },
- { "v1cmpeq", TILEGX_OPC_V1CMPEQ, 0x3, 3, TREG_ZERO, 1,
- { { 6, 7, 16 }, { 8, 9, 17 }, { 0, }, { 0, }, { 0, } },
- },
- { "v1cmpeqi", TILEGX_OPC_V1CMPEQI, 0x3, 3, TREG_ZERO, 1,
- { { 6, 7, 0 }, { 8, 9, 1 }, { 0, }, { 0, }, { 0, } },
- },
- { "v1cmples", TILEGX_OPC_V1CMPLES, 0x3, 3, TREG_ZERO, 1,
- { { 6, 7, 16 }, { 8, 9, 17 }, { 0, }, { 0, }, { 0, } },
- },
- { "v1cmpleu", TILEGX_OPC_V1CMPLEU, 0x3, 3, TREG_ZERO, 1,
- { { 6, 7, 16 }, { 8, 9, 17 }, { 0, }, { 0, }, { 0, } },
- },
- { "v1cmplts", TILEGX_OPC_V1CMPLTS, 0x3, 3, TREG_ZERO, 1,
- { { 6, 7, 16 }, { 8, 9, 17 }, { 0, }, { 0, }, { 0, } },
- },
- { "v1cmpltsi", TILEGX_OPC_V1CMPLTSI, 0x3, 3, TREG_ZERO, 1,
- { { 6, 7, 0 }, { 8, 9, 1 }, { 0, }, { 0, }, { 0, } },
- },
- { "v1cmpltu", TILEGX_OPC_V1CMPLTU, 0x3, 3, TREG_ZERO, 1,
- { { 6, 7, 16 }, { 8, 9, 17 }, { 0, }, { 0, }, { 0, } },
- },
- { "v1cmpltui", TILEGX_OPC_V1CMPLTUI, 0x3, 3, TREG_ZERO, 1,
- { { 6, 7, 0 }, { 8, 9, 1 }, { 0, }, { 0, }, { 0, } },
- },
- { "v1cmpne", TILEGX_OPC_V1CMPNE, 0x3, 3, TREG_ZERO, 1,
- { { 6, 7, 16 }, { 8, 9, 17 }, { 0, }, { 0, }, { 0, } },
- },
- { "v1ddotpu", TILEGX_OPC_V1DDOTPU, 0x1, 3, TREG_ZERO, 1,
- { { 6, 7, 16 }, { 0, }, { 0, }, { 0, }, { 0, } },
- },
- { "v1ddotpua", TILEGX_OPC_V1DDOTPUA, 0x1, 3, TREG_ZERO, 1,
- { { 23, 7, 16 }, { 0, }, { 0, }, { 0, }, { 0, } },
- },
- { "v1ddotpus", TILEGX_OPC_V1DDOTPUS, 0x1, 3, TREG_ZERO, 1,
- { { 6, 7, 16 }, { 0, }, { 0, }, { 0, }, { 0, } },
- },
- { "v1ddotpusa", TILEGX_OPC_V1DDOTPUSA, 0x1, 3, TREG_ZERO, 1,
- { { 23, 7, 16 }, { 0, }, { 0, }, { 0, }, { 0, } },
- },
- { "v1dotp", TILEGX_OPC_V1DOTP, 0x1, 3, TREG_ZERO, 1,
- { { 6, 7, 16 }, { 0, }, { 0, }, { 0, }, { 0, } },
- },
- { "v1dotpa", TILEGX_OPC_V1DOTPA, 0x1, 3, TREG_ZERO, 1,
- { { 23, 7, 16 }, { 0, }, { 0, }, { 0, }, { 0, } },
- },
- { "v1dotpu", TILEGX_OPC_V1DOTPU, 0x1, 3, TREG_ZERO, 1,
- { { 6, 7, 16 }, { 0, }, { 0, }, { 0, }, { 0, } },
- },
- { "v1dotpua", TILEGX_OPC_V1DOTPUA, 0x1, 3, TREG_ZERO, 1,
- { { 23, 7, 16 }, { 0, }, { 0, }, { 0, }, { 0, } },
- },
- { "v1dotpus", TILEGX_OPC_V1DOTPUS, 0x1, 3, TREG_ZERO, 1,
- { { 6, 7, 16 }, { 0, }, { 0, }, { 0, }, { 0, } },
- },
- { "v1dotpusa", TILEGX_OPC_V1DOTPUSA, 0x1, 3, TREG_ZERO, 1,
- { { 23, 7, 16 }, { 0, }, { 0, }, { 0, }, { 0, } },
- },
- { "v1int_h", TILEGX_OPC_V1INT_H, 0x3, 3, TREG_ZERO, 1,
- { { 6, 7, 16 }, { 8, 9, 17 }, { 0, }, { 0, }, { 0, } },
- },
- { "v1int_l", TILEGX_OPC_V1INT_L, 0x3, 3, TREG_ZERO, 1,
- { { 6, 7, 16 }, { 8, 9, 17 }, { 0, }, { 0, }, { 0, } },
- },
- { "v1maxu", TILEGX_OPC_V1MAXU, 0x3, 3, TREG_ZERO, 1,
- { { 6, 7, 16 }, { 8, 9, 17 }, { 0, }, { 0, }, { 0, } },
- },
- { "v1maxui", TILEGX_OPC_V1MAXUI, 0x3, 3, TREG_ZERO, 1,
- { { 6, 7, 0 }, { 8, 9, 1 }, { 0, }, { 0, }, { 0, } },
- },
- { "v1minu", TILEGX_OPC_V1MINU, 0x3, 3, TREG_ZERO, 1,
- { { 6, 7, 16 }, { 8, 9, 17 }, { 0, }, { 0, }, { 0, } },
- },
- { "v1minui", TILEGX_OPC_V1MINUI, 0x3, 3, TREG_ZERO, 1,
- { { 6, 7, 0 }, { 8, 9, 1 }, { 0, }, { 0, }, { 0, } },
- },
- { "v1mnz", TILEGX_OPC_V1MNZ, 0x3, 3, TREG_ZERO, 1,
- { { 6, 7, 16 }, { 8, 9, 17 }, { 0, }, { 0, }, { 0, } },
- },
- { "v1multu", TILEGX_OPC_V1MULTU, 0x1, 3, TREG_ZERO, 1,
- { { 6, 7, 16 }, { 0, }, { 0, }, { 0, }, { 0, } },
- },
- { "v1mulu", TILEGX_OPC_V1MULU, 0x1, 3, TREG_ZERO, 1,
- { { 6, 7, 16 }, { 0, }, { 0, }, { 0, }, { 0, } },
- },
- { "v1mulus", TILEGX_OPC_V1MULUS, 0x1, 3, TREG_ZERO, 1,
- { { 6, 7, 16 }, { 0, }, { 0, }, { 0, }, { 0, } },
- },
- { "v1mz", TILEGX_OPC_V1MZ, 0x3, 3, TREG_ZERO, 1,
- { { 6, 7, 16 }, { 8, 9, 17 }, { 0, }, { 0, }, { 0, } },
- },
- { "v1sadau", TILEGX_OPC_V1SADAU, 0x1, 3, TREG_ZERO, 1,
- { { 23, 7, 16 }, { 0, }, { 0, }, { 0, }, { 0, } },
- },
- { "v1sadu", TILEGX_OPC_V1SADU, 0x1, 3, TREG_ZERO, 1,
- { { 6, 7, 16 }, { 0, }, { 0, }, { 0, }, { 0, } },
- },
- { "v1shl", TILEGX_OPC_V1SHL, 0x3, 3, TREG_ZERO, 1,
- { { 6, 7, 16 }, { 8, 9, 17 }, { 0, }, { 0, }, { 0, } },
- },
- { "v1shli", TILEGX_OPC_V1SHLI, 0x3, 3, TREG_ZERO, 1,
- { { 6, 7, 29 }, { 8, 9, 30 }, { 0, }, { 0, }, { 0, } },
- },
- { "v1shrs", TILEGX_OPC_V1SHRS, 0x3, 3, TREG_ZERO, 1,
- { { 6, 7, 16 }, { 8, 9, 17 }, { 0, }, { 0, }, { 0, } },
- },
- { "v1shrsi", TILEGX_OPC_V1SHRSI, 0x3, 3, TREG_ZERO, 1,
- { { 6, 7, 29 }, { 8, 9, 30 }, { 0, }, { 0, }, { 0, } },
- },
- { "v1shru", TILEGX_OPC_V1SHRU, 0x3, 3, TREG_ZERO, 1,
- { { 6, 7, 16 }, { 8, 9, 17 }, { 0, }, { 0, }, { 0, } },
- },
- { "v1shrui", TILEGX_OPC_V1SHRUI, 0x3, 3, TREG_ZERO, 1,
- { { 6, 7, 29 }, { 8, 9, 30 }, { 0, }, { 0, }, { 0, } },
- },
- { "v1sub", TILEGX_OPC_V1SUB, 0x3, 3, TREG_ZERO, 1,
- { { 6, 7, 16 }, { 8, 9, 17 }, { 0, }, { 0, }, { 0, } },
- },
- { "v1subuc", TILEGX_OPC_V1SUBUC, 0x3, 3, TREG_ZERO, 1,
- { { 6, 7, 16 }, { 8, 9, 17 }, { 0, }, { 0, }, { 0, } },
- },
- { "v2add", TILEGX_OPC_V2ADD, 0x3, 3, TREG_ZERO, 1,
- { { 6, 7, 16 }, { 8, 9, 17 }, { 0, }, { 0, }, { 0, } },
- },
- { "v2addi", TILEGX_OPC_V2ADDI, 0x3, 3, TREG_ZERO, 1,
- { { 6, 7, 0 }, { 8, 9, 1 }, { 0, }, { 0, }, { 0, } },
- },
- { "v2addsc", TILEGX_OPC_V2ADDSC, 0x3, 3, TREG_ZERO, 1,
- { { 6, 7, 16 }, { 8, 9, 17 }, { 0, }, { 0, }, { 0, } },
- },
- { "v2adiffs", TILEGX_OPC_V2ADIFFS, 0x1, 3, TREG_ZERO, 1,
- { { 6, 7, 16 }, { 0, }, { 0, }, { 0, }, { 0, } },
- },
- { "v2avgs", TILEGX_OPC_V2AVGS, 0x1, 3, TREG_ZERO, 1,
- { { 6, 7, 16 }, { 0, }, { 0, }, { 0, }, { 0, } },
- },
- { "v2cmpeq", TILEGX_OPC_V2CMPEQ, 0x3, 3, TREG_ZERO, 1,
- { { 6, 7, 16 }, { 8, 9, 17 }, { 0, }, { 0, }, { 0, } },
- },
- { "v2cmpeqi", TILEGX_OPC_V2CMPEQI, 0x3, 3, TREG_ZERO, 1,
- { { 6, 7, 0 }, { 8, 9, 1 }, { 0, }, { 0, }, { 0, } },
- },
- { "v2cmples", TILEGX_OPC_V2CMPLES, 0x3, 3, TREG_ZERO, 1,
- { { 6, 7, 16 }, { 8, 9, 17 }, { 0, }, { 0, }, { 0, } },
- },
- { "v2cmpleu", TILEGX_OPC_V2CMPLEU, 0x3, 3, TREG_ZERO, 1,
- { { 6, 7, 16 }, { 8, 9, 17 }, { 0, }, { 0, }, { 0, } },
- },
- { "v2cmplts", TILEGX_OPC_V2CMPLTS, 0x3, 3, TREG_ZERO, 1,
- { { 6, 7, 16 }, { 8, 9, 17 }, { 0, }, { 0, }, { 0, } },
- },
- { "v2cmpltsi", TILEGX_OPC_V2CMPLTSI, 0x3, 3, TREG_ZERO, 1,
- { { 6, 7, 0 }, { 8, 9, 1 }, { 0, }, { 0, }, { 0, } },
- },
- { "v2cmpltu", TILEGX_OPC_V2CMPLTU, 0x3, 3, TREG_ZERO, 1,
- { { 6, 7, 16 }, { 8, 9, 17 }, { 0, }, { 0, }, { 0, } },
- },
- { "v2cmpltui", TILEGX_OPC_V2CMPLTUI, 0x3, 3, TREG_ZERO, 1,
- { { 6, 7, 0 }, { 8, 9, 1 }, { 0, }, { 0, }, { 0, } },
- },
- { "v2cmpne", TILEGX_OPC_V2CMPNE, 0x3, 3, TREG_ZERO, 1,
- { { 6, 7, 16 }, { 8, 9, 17 }, { 0, }, { 0, }, { 0, } },
- },
- { "v2dotp", TILEGX_OPC_V2DOTP, 0x1, 3, TREG_ZERO, 1,
- { { 6, 7, 16 }, { 0, }, { 0, }, { 0, }, { 0, } },
- },
- { "v2dotpa", TILEGX_OPC_V2DOTPA, 0x1, 3, TREG_ZERO, 1,
- { { 23, 7, 16 }, { 0, }, { 0, }, { 0, }, { 0, } },
- },
- { "v2int_h", TILEGX_OPC_V2INT_H, 0x3, 3, TREG_ZERO, 1,
- { { 6, 7, 16 }, { 8, 9, 17 }, { 0, }, { 0, }, { 0, } },
- },
- { "v2int_l", TILEGX_OPC_V2INT_L, 0x3, 3, TREG_ZERO, 1,
- { { 6, 7, 16 }, { 8, 9, 17 }, { 0, }, { 0, }, { 0, } },
- },
- { "v2maxs", TILEGX_OPC_V2MAXS, 0x3, 3, TREG_ZERO, 1,
- { { 6, 7, 16 }, { 8, 9, 17 }, { 0, }, { 0, }, { 0, } },
- },
- { "v2maxsi", TILEGX_OPC_V2MAXSI, 0x3, 3, TREG_ZERO, 1,
- { { 6, 7, 0 }, { 8, 9, 1 }, { 0, }, { 0, }, { 0, } },
- },
- { "v2mins", TILEGX_OPC_V2MINS, 0x3, 3, TREG_ZERO, 1,
- { { 6, 7, 16 }, { 8, 9, 17 }, { 0, }, { 0, }, { 0, } },
- },
- { "v2minsi", TILEGX_OPC_V2MINSI, 0x3, 3, TREG_ZERO, 1,
- { { 6, 7, 0 }, { 8, 9, 1 }, { 0, }, { 0, }, { 0, } },
- },
- { "v2mnz", TILEGX_OPC_V2MNZ, 0x3, 3, TREG_ZERO, 1,
- { { 6, 7, 16 }, { 8, 9, 17 }, { 0, }, { 0, }, { 0, } },
- },
- { "v2mulfsc", TILEGX_OPC_V2MULFSC, 0x1, 3, TREG_ZERO, 1,
- { { 6, 7, 16 }, { 0, }, { 0, }, { 0, }, { 0, } },
- },
- { "v2muls", TILEGX_OPC_V2MULS, 0x1, 3, TREG_ZERO, 1,
- { { 6, 7, 16 }, { 0, }, { 0, }, { 0, }, { 0, } },
- },
- { "v2mults", TILEGX_OPC_V2MULTS, 0x1, 3, TREG_ZERO, 1,
- { { 6, 7, 16 }, { 0, }, { 0, }, { 0, }, { 0, } },
- },
- { "v2mz", TILEGX_OPC_V2MZ, 0x3, 3, TREG_ZERO, 1,
- { { 6, 7, 16 }, { 8, 9, 17 }, { 0, }, { 0, }, { 0, } },
- },
- { "v2packh", TILEGX_OPC_V2PACKH, 0x3, 3, TREG_ZERO, 1,
- { { 6, 7, 16 }, { 8, 9, 17 }, { 0, }, { 0, }, { 0, } },
- },
- { "v2packl", TILEGX_OPC_V2PACKL, 0x3, 3, TREG_ZERO, 1,
- { { 6, 7, 16 }, { 8, 9, 17 }, { 0, }, { 0, }, { 0, } },
- },
- { "v2packuc", TILEGX_OPC_V2PACKUC, 0x3, 3, TREG_ZERO, 1,
- { { 6, 7, 16 }, { 8, 9, 17 }, { 0, }, { 0, }, { 0, } },
- },
- { "v2sadas", TILEGX_OPC_V2SADAS, 0x1, 3, TREG_ZERO, 1,
- { { 23, 7, 16 }, { 0, }, { 0, }, { 0, }, { 0, } },
- },
- { "v2sadau", TILEGX_OPC_V2SADAU, 0x1, 3, TREG_ZERO, 1,
- { { 23, 7, 16 }, { 0, }, { 0, }, { 0, }, { 0, } },
- },
- { "v2sads", TILEGX_OPC_V2SADS, 0x1, 3, TREG_ZERO, 1,
- { { 6, 7, 16 }, { 0, }, { 0, }, { 0, }, { 0, } },
- },
- { "v2sadu", TILEGX_OPC_V2SADU, 0x1, 3, TREG_ZERO, 1,
- { { 6, 7, 16 }, { 0, }, { 0, }, { 0, }, { 0, } },
- },
- { "v2shl", TILEGX_OPC_V2SHL, 0x3, 3, TREG_ZERO, 1,
- { { 6, 7, 16 }, { 8, 9, 17 }, { 0, }, { 0, }, { 0, } },
- },
- { "v2shli", TILEGX_OPC_V2SHLI, 0x3, 3, TREG_ZERO, 1,
- { { 6, 7, 29 }, { 8, 9, 30 }, { 0, }, { 0, }, { 0, } },
- },
- { "v2shlsc", TILEGX_OPC_V2SHLSC, 0x3, 3, TREG_ZERO, 1,
- { { 6, 7, 16 }, { 8, 9, 17 }, { 0, }, { 0, }, { 0, } },
- },
- { "v2shrs", TILEGX_OPC_V2SHRS, 0x3, 3, TREG_ZERO, 1,
- { { 6, 7, 16 }, { 8, 9, 17 }, { 0, }, { 0, }, { 0, } },
- },
- { "v2shrsi", TILEGX_OPC_V2SHRSI, 0x3, 3, TREG_ZERO, 1,
- { { 6, 7, 29 }, { 8, 9, 30 }, { 0, }, { 0, }, { 0, } },
- },
- { "v2shru", TILEGX_OPC_V2SHRU, 0x3, 3, TREG_ZERO, 1,
- { { 6, 7, 16 }, { 8, 9, 17 }, { 0, }, { 0, }, { 0, } },
- },
- { "v2shrui", TILEGX_OPC_V2SHRUI, 0x3, 3, TREG_ZERO, 1,
- { { 6, 7, 29 }, { 8, 9, 30 }, { 0, }, { 0, }, { 0, } },
- },
- { "v2sub", TILEGX_OPC_V2SUB, 0x3, 3, TREG_ZERO, 1,
- { { 6, 7, 16 }, { 8, 9, 17 }, { 0, }, { 0, }, { 0, } },
- },
- { "v2subsc", TILEGX_OPC_V2SUBSC, 0x3, 3, TREG_ZERO, 1,
- { { 6, 7, 16 }, { 8, 9, 17 }, { 0, }, { 0, }, { 0, } },
- },
- { "v4add", TILEGX_OPC_V4ADD, 0x3, 3, TREG_ZERO, 1,
- { { 6, 7, 16 }, { 8, 9, 17 }, { 0, }, { 0, }, { 0, } },
- },
- { "v4addsc", TILEGX_OPC_V4ADDSC, 0x3, 3, TREG_ZERO, 1,
- { { 6, 7, 16 }, { 8, 9, 17 }, { 0, }, { 0, }, { 0, } },
- },
- { "v4int_h", TILEGX_OPC_V4INT_H, 0x3, 3, TREG_ZERO, 1,
- { { 6, 7, 16 }, { 8, 9, 17 }, { 0, }, { 0, }, { 0, } },
- },
- { "v4int_l", TILEGX_OPC_V4INT_L, 0x3, 3, TREG_ZERO, 1,
- { { 6, 7, 16 }, { 8, 9, 17 }, { 0, }, { 0, }, { 0, } },
- },
- { "v4packsc", TILEGX_OPC_V4PACKSC, 0x3, 3, TREG_ZERO, 1,
- { { 6, 7, 16 }, { 8, 9, 17 }, { 0, }, { 0, }, { 0, } },
- },
- { "v4shl", TILEGX_OPC_V4SHL, 0x3, 3, TREG_ZERO, 1,
- { { 6, 7, 16 }, { 8, 9, 17 }, { 0, }, { 0, }, { 0, } },
- },
- { "v4shlsc", TILEGX_OPC_V4SHLSC, 0x3, 3, TREG_ZERO, 1,
- { { 6, 7, 16 }, { 8, 9, 17 }, { 0, }, { 0, }, { 0, } },
- },
- { "v4shrs", TILEGX_OPC_V4SHRS, 0x3, 3, TREG_ZERO, 1,
- { { 6, 7, 16 }, { 8, 9, 17 }, { 0, }, { 0, }, { 0, } },
- },
- { "v4shru", TILEGX_OPC_V4SHRU, 0x3, 3, TREG_ZERO, 1,
- { { 6, 7, 16 }, { 8, 9, 17 }, { 0, }, { 0, }, { 0, } },
- },
- { "v4sub", TILEGX_OPC_V4SUB, 0x3, 3, TREG_ZERO, 1,
- { { 6, 7, 16 }, { 8, 9, 17 }, { 0, }, { 0, }, { 0, } },
- },
- { "v4subsc", TILEGX_OPC_V4SUBSC, 0x3, 3, TREG_ZERO, 1,
- { { 6, 7, 16 }, { 8, 9, 17 }, { 0, }, { 0, }, { 0, } },
- },
- { "wh64", TILEGX_OPC_WH64, 0x2, 1, TREG_ZERO, 1,
- { { 0, }, { 9 }, { 0, }, { 0, }, { 0, } },
- },
- { "xor", TILEGX_OPC_XOR, 0xf, 3, TREG_ZERO, 1,
- { { 6, 7, 16 }, { 8, 9, 17 }, { 10, 11, 18 }, { 12, 13, 19 }, { 0, } },
- },
- { "xori", TILEGX_OPC_XORI, 0x3, 3, TREG_ZERO, 1,
- { { 6, 7, 0 }, { 8, 9, 1 }, { 0, }, { 0, }, { 0, } },
- },
- { NULL, TILEGX_OPC_NONE, 0, 0, TREG_ZERO, 0, { { 0, } },
- }
-};
-#define BITFIELD(start, size) ((start) | (((1 << (size)) - 1) << 6))
-#define CHILD(array_index) (TILEGX_OPC_NONE + (array_index))
-
-static const unsigned short decode_X0_fsm[936] =
-{
- BITFIELD(22, 9) /* index 0 */,
- TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
- TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
- TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
- TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
- TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
- TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
- TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
- TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
- TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
- TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
- TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
- TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
- TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
- TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
- TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
- TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
- CHILD(513), CHILD(513), CHILD(513), CHILD(513), CHILD(513), CHILD(513),
- CHILD(513), CHILD(513), CHILD(513), CHILD(513), CHILD(513), CHILD(513),
- CHILD(513), CHILD(513), CHILD(513), CHILD(513), CHILD(513), CHILD(513),
- CHILD(513), CHILD(513), CHILD(513), CHILD(513), CHILD(513), CHILD(513),
- CHILD(513), CHILD(513), CHILD(513), CHILD(513), CHILD(513), CHILD(513),
- CHILD(513), CHILD(513), CHILD(513), CHILD(513), CHILD(513), CHILD(513),
- CHILD(513), CHILD(513), CHILD(513), CHILD(513), CHILD(513), CHILD(513),
- CHILD(513), CHILD(513), CHILD(513), CHILD(513), CHILD(513), CHILD(513),
- CHILD(513), CHILD(513), CHILD(513), CHILD(513), CHILD(513), CHILD(513),
- CHILD(513), CHILD(513), CHILD(513), CHILD(513), CHILD(513), CHILD(513),
- CHILD(513), CHILD(513), CHILD(513), CHILD(513), TILEGX_OPC_ADDXLI,
- TILEGX_OPC_ADDXLI, TILEGX_OPC_ADDXLI, TILEGX_OPC_ADDXLI, TILEGX_OPC_ADDXLI,
- TILEGX_OPC_ADDXLI, TILEGX_OPC_ADDXLI, TILEGX_OPC_ADDXLI, TILEGX_OPC_ADDXLI,
- TILEGX_OPC_ADDXLI, TILEGX_OPC_ADDXLI, TILEGX_OPC_ADDXLI, TILEGX_OPC_ADDXLI,
- TILEGX_OPC_ADDXLI, TILEGX_OPC_ADDXLI, TILEGX_OPC_ADDXLI, TILEGX_OPC_ADDXLI,
- TILEGX_OPC_ADDXLI, TILEGX_OPC_ADDXLI, TILEGX_OPC_ADDXLI, TILEGX_OPC_ADDXLI,
- TILEGX_OPC_ADDXLI, TILEGX_OPC_ADDXLI, TILEGX_OPC_ADDXLI, TILEGX_OPC_ADDXLI,
- TILEGX_OPC_ADDXLI, TILEGX_OPC_ADDXLI, TILEGX_OPC_ADDXLI, TILEGX_OPC_ADDXLI,
- TILEGX_OPC_ADDXLI, TILEGX_OPC_ADDXLI, TILEGX_OPC_ADDXLI, TILEGX_OPC_ADDXLI,
- TILEGX_OPC_ADDXLI, TILEGX_OPC_ADDXLI, TILEGX_OPC_ADDXLI, TILEGX_OPC_ADDXLI,
- TILEGX_OPC_ADDXLI, TILEGX_OPC_ADDXLI, TILEGX_OPC_ADDXLI, TILEGX_OPC_ADDXLI,
- TILEGX_OPC_ADDXLI, TILEGX_OPC_ADDXLI, TILEGX_OPC_ADDXLI, TILEGX_OPC_ADDXLI,
- TILEGX_OPC_ADDXLI, TILEGX_OPC_ADDXLI, TILEGX_OPC_ADDXLI, TILEGX_OPC_ADDXLI,
- TILEGX_OPC_ADDXLI, TILEGX_OPC_ADDXLI, TILEGX_OPC_ADDXLI, TILEGX_OPC_ADDXLI,
- TILEGX_OPC_ADDXLI, TILEGX_OPC_ADDXLI, TILEGX_OPC_ADDXLI, TILEGX_OPC_ADDXLI,
- TILEGX_OPC_ADDXLI, TILEGX_OPC_ADDXLI, TILEGX_OPC_ADDXLI, TILEGX_OPC_ADDXLI,
- TILEGX_OPC_ADDXLI, TILEGX_OPC_ADDXLI, TILEGX_OPC_ADDXLI, TILEGX_OPC_NONE,
- TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
- TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
- TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
- TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_BFEXTS,
- TILEGX_OPC_BFEXTS, TILEGX_OPC_BFEXTS, TILEGX_OPC_BFEXTS, TILEGX_OPC_BFEXTU,
- TILEGX_OPC_BFEXTU, TILEGX_OPC_BFEXTU, TILEGX_OPC_BFEXTU, TILEGX_OPC_BFINS,
- TILEGX_OPC_BFINS, TILEGX_OPC_BFINS, TILEGX_OPC_BFINS, TILEGX_OPC_MM,
- TILEGX_OPC_MM, TILEGX_OPC_MM, TILEGX_OPC_MM, TILEGX_OPC_NONE,
- TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
- TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
- TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
- TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
- TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
- TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
- TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
- TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, CHILD(528), CHILD(578),
- CHILD(583), CHILD(588), CHILD(593), CHILD(598), TILEGX_OPC_NONE,
- TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
- TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
- TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
- TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
- TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
- TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
- TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
- TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
- TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
- TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
- TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
- TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
- TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
- TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
- TILEGX_OPC_NONE, CHILD(603), CHILD(620), CHILD(637), CHILD(654), CHILD(671),
- CHILD(703), CHILD(797), CHILD(814), CHILD(831), CHILD(848), CHILD(865),
- TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
- TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
- TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
- TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
- TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
- TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
- TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
- TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
- TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
- TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
- TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
- TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
- TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
- TILEGX_OPC_NONE, CHILD(889), TILEGX_OPC_NONE, TILEGX_OPC_NONE,
- TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
- TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
- TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
- TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
- TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
- TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
- TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
- TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
- TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
- TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
- TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
- TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
- TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
- TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
- TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
- TILEGX_OPC_NONE, CHILD(906), CHILD(906), CHILD(906), CHILD(906), CHILD(906),
- CHILD(906), CHILD(906), CHILD(906), CHILD(906), CHILD(906), CHILD(906),
- CHILD(906), CHILD(906), CHILD(906), CHILD(906), CHILD(906), CHILD(906),
- CHILD(906), CHILD(906), CHILD(906), CHILD(906), CHILD(906), CHILD(906),
- CHILD(906), CHILD(906), CHILD(906), CHILD(906), CHILD(906), CHILD(906),
- CHILD(906), CHILD(906), CHILD(906), CHILD(906), CHILD(906), CHILD(906),
- CHILD(906), CHILD(906), CHILD(906), CHILD(906), CHILD(906), CHILD(906),
- CHILD(906), CHILD(906), CHILD(906), CHILD(906), CHILD(906), CHILD(906),
- CHILD(906), CHILD(906), CHILD(906), CHILD(906), CHILD(906), CHILD(906),
- CHILD(906), CHILD(906), CHILD(906), CHILD(906), CHILD(906), CHILD(906),
- CHILD(906), CHILD(906), CHILD(906), CHILD(906), CHILD(906),
- BITFIELD(6, 2) /* index 513 */,
- TILEGX_OPC_ADDLI, TILEGX_OPC_ADDLI, TILEGX_OPC_ADDLI, CHILD(518),
- BITFIELD(8, 2) /* index 518 */,
- TILEGX_OPC_ADDLI, TILEGX_OPC_ADDLI, TILEGX_OPC_ADDLI, CHILD(523),
- BITFIELD(10, 2) /* index 523 */,
- TILEGX_OPC_ADDLI, TILEGX_OPC_ADDLI, TILEGX_OPC_ADDLI, TILEGX_OPC_MOVELI,
- BITFIELD(20, 2) /* index 528 */,
- TILEGX_OPC_NONE, CHILD(533), TILEGX_OPC_ADDXI, CHILD(548),
- BITFIELD(6, 2) /* index 533 */,
- TILEGX_OPC_ADDI, TILEGX_OPC_ADDI, TILEGX_OPC_ADDI, CHILD(538),
- BITFIELD(8, 2) /* index 538 */,
- TILEGX_OPC_ADDI, TILEGX_OPC_ADDI, TILEGX_OPC_ADDI, CHILD(543),
- BITFIELD(10, 2) /* index 543 */,
- TILEGX_OPC_ADDI, TILEGX_OPC_ADDI, TILEGX_OPC_ADDI, TILEGX_OPC_MOVEI,
- BITFIELD(0, 2) /* index 548 */,
- TILEGX_OPC_ANDI, TILEGX_OPC_ANDI, TILEGX_OPC_ANDI, CHILD(553),
- BITFIELD(2, 2) /* index 553 */,
- TILEGX_OPC_ANDI, TILEGX_OPC_ANDI, TILEGX_OPC_ANDI, CHILD(558),
- BITFIELD(4, 2) /* index 558 */,
- TILEGX_OPC_ANDI, TILEGX_OPC_ANDI, TILEGX_OPC_ANDI, CHILD(563),
- BITFIELD(6, 2) /* index 563 */,
- TILEGX_OPC_ANDI, TILEGX_OPC_ANDI, TILEGX_OPC_ANDI, CHILD(568),
- BITFIELD(8, 2) /* index 568 */,
- TILEGX_OPC_ANDI, TILEGX_OPC_ANDI, TILEGX_OPC_ANDI, CHILD(573),
- BITFIELD(10, 2) /* index 573 */,
- TILEGX_OPC_ANDI, TILEGX_OPC_ANDI, TILEGX_OPC_ANDI, TILEGX_OPC_INFO,
- BITFIELD(20, 2) /* index 578 */,
- TILEGX_OPC_CMPEQI, TILEGX_OPC_CMPLTSI, TILEGX_OPC_CMPLTUI, TILEGX_OPC_ORI,
- BITFIELD(20, 2) /* index 583 */,
- TILEGX_OPC_V1ADDI, TILEGX_OPC_V1CMPEQI, TILEGX_OPC_V1CMPLTSI,
- TILEGX_OPC_V1CMPLTUI,
- BITFIELD(20, 2) /* index 588 */,
- TILEGX_OPC_V1MAXUI, TILEGX_OPC_V1MINUI, TILEGX_OPC_V2ADDI,
- TILEGX_OPC_V2CMPEQI,
- BITFIELD(20, 2) /* index 593 */,
- TILEGX_OPC_V2CMPLTSI, TILEGX_OPC_V2CMPLTUI, TILEGX_OPC_V2MAXSI,
- TILEGX_OPC_V2MINSI,
- BITFIELD(20, 2) /* index 598 */,
- TILEGX_OPC_XORI, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
- BITFIELD(18, 4) /* index 603 */,
- TILEGX_OPC_NONE, TILEGX_OPC_ADDXSC, TILEGX_OPC_ADDX, TILEGX_OPC_ADD,
- TILEGX_OPC_AND, TILEGX_OPC_CMOVEQZ, TILEGX_OPC_CMOVNEZ, TILEGX_OPC_CMPEQ,
- TILEGX_OPC_CMPLES, TILEGX_OPC_CMPLEU, TILEGX_OPC_CMPLTS, TILEGX_OPC_CMPLTU,
- TILEGX_OPC_CMPNE, TILEGX_OPC_CMULAF, TILEGX_OPC_CMULA, TILEGX_OPC_CMULFR,
- BITFIELD(18, 4) /* index 620 */,
- TILEGX_OPC_CMULF, TILEGX_OPC_CMULHR, TILEGX_OPC_CMULH, TILEGX_OPC_CMUL,
- TILEGX_OPC_CRC32_32, TILEGX_OPC_CRC32_8, TILEGX_OPC_DBLALIGN2,
- TILEGX_OPC_DBLALIGN4, TILEGX_OPC_DBLALIGN6, TILEGX_OPC_DBLALIGN,
- TILEGX_OPC_FDOUBLE_ADDSUB, TILEGX_OPC_FDOUBLE_ADD_FLAGS,
- TILEGX_OPC_FDOUBLE_MUL_FLAGS, TILEGX_OPC_FDOUBLE_PACK1,
- TILEGX_OPC_FDOUBLE_PACK2, TILEGX_OPC_FDOUBLE_SUB_FLAGS,
- BITFIELD(18, 4) /* index 637 */,
- TILEGX_OPC_FDOUBLE_UNPACK_MAX, TILEGX_OPC_FDOUBLE_UNPACK_MIN,
- TILEGX_OPC_FSINGLE_ADD1, TILEGX_OPC_FSINGLE_ADDSUB2,
- TILEGX_OPC_FSINGLE_MUL1, TILEGX_OPC_FSINGLE_MUL2, TILEGX_OPC_FSINGLE_PACK2,
- TILEGX_OPC_FSINGLE_SUB1, TILEGX_OPC_MNZ, TILEGX_OPC_MULAX,
- TILEGX_OPC_MULA_HS_HS, TILEGX_OPC_MULA_HS_HU, TILEGX_OPC_MULA_HS_LS,
- TILEGX_OPC_MULA_HS_LU, TILEGX_OPC_MULA_HU_HU, TILEGX_OPC_MULA_HU_LS,
- BITFIELD(18, 4) /* index 654 */,
- TILEGX_OPC_MULA_HU_LU, TILEGX_OPC_MULA_LS_LS, TILEGX_OPC_MULA_LS_LU,
- TILEGX_OPC_MULA_LU_LU, TILEGX_OPC_MULX, TILEGX_OPC_MUL_HS_HS,
- TILEGX_OPC_MUL_HS_HU, TILEGX_OPC_MUL_HS_LS, TILEGX_OPC_MUL_HS_LU,
- TILEGX_OPC_MUL_HU_HU, TILEGX_OPC_MUL_HU_LS, TILEGX_OPC_MUL_HU_LU,
- TILEGX_OPC_MUL_LS_LS, TILEGX_OPC_MUL_LS_LU, TILEGX_OPC_MUL_LU_LU,
- TILEGX_OPC_MZ,
- BITFIELD(18, 4) /* index 671 */,
- TILEGX_OPC_NOR, CHILD(688), TILEGX_OPC_ROTL, TILEGX_OPC_SHL1ADDX,
- TILEGX_OPC_SHL1ADD, TILEGX_OPC_SHL2ADDX, TILEGX_OPC_SHL2ADD,
- TILEGX_OPC_SHL3ADDX, TILEGX_OPC_SHL3ADD, TILEGX_OPC_SHLX, TILEGX_OPC_SHL,
- TILEGX_OPC_SHRS, TILEGX_OPC_SHRUX, TILEGX_OPC_SHRU, TILEGX_OPC_SHUFFLEBYTES,
- TILEGX_OPC_SUBXSC,
- BITFIELD(12, 2) /* index 688 */,
- TILEGX_OPC_OR, TILEGX_OPC_OR, TILEGX_OPC_OR, CHILD(693),
- BITFIELD(14, 2) /* index 693 */,
- TILEGX_OPC_OR, TILEGX_OPC_OR, TILEGX_OPC_OR, CHILD(698),
- BITFIELD(16, 2) /* index 698 */,
- TILEGX_OPC_OR, TILEGX_OPC_OR, TILEGX_OPC_OR, TILEGX_OPC_MOVE,
- BITFIELD(18, 4) /* index 703 */,
- TILEGX_OPC_SUBX, TILEGX_OPC_SUB, CHILD(720), TILEGX_OPC_V1ADDUC,
- TILEGX_OPC_V1ADD, TILEGX_OPC_V1ADIFFU, TILEGX_OPC_V1AVGU,
- TILEGX_OPC_V1CMPEQ, TILEGX_OPC_V1CMPLES, TILEGX_OPC_V1CMPLEU,
- TILEGX_OPC_V1CMPLTS, TILEGX_OPC_V1CMPLTU, TILEGX_OPC_V1CMPNE,
- TILEGX_OPC_V1DDOTPUSA, TILEGX_OPC_V1DDOTPUS, TILEGX_OPC_V1DOTPA,
- BITFIELD(12, 4) /* index 720 */,
- TILEGX_OPC_NONE, CHILD(737), CHILD(742), CHILD(747), CHILD(752), CHILD(757),
- CHILD(762), CHILD(767), CHILD(772), CHILD(777), CHILD(782), CHILD(787),
- CHILD(792), TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
- BITFIELD(16, 2) /* index 737 */,
- TILEGX_OPC_CLZ, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
- BITFIELD(16, 2) /* index 742 */,
- TILEGX_OPC_CTZ, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
- BITFIELD(16, 2) /* index 747 */,
- TILEGX_OPC_FNOP, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
- BITFIELD(16, 2) /* index 752 */,
- TILEGX_OPC_FSINGLE_PACK1, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
- BITFIELD(16, 2) /* index 757 */,
- TILEGX_OPC_NOP, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
- BITFIELD(16, 2) /* index 762 */,
- TILEGX_OPC_PCNT, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
- BITFIELD(16, 2) /* index 767 */,
- TILEGX_OPC_REVBITS, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
- BITFIELD(16, 2) /* index 772 */,
- TILEGX_OPC_REVBYTES, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
- BITFIELD(16, 2) /* index 777 */,
- TILEGX_OPC_TBLIDXB0, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
- BITFIELD(16, 2) /* index 782 */,
- TILEGX_OPC_TBLIDXB1, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
- BITFIELD(16, 2) /* index 787 */,
- TILEGX_OPC_TBLIDXB2, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
- BITFIELD(16, 2) /* index 792 */,
- TILEGX_OPC_TBLIDXB3, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
- BITFIELD(18, 4) /* index 797 */,
- TILEGX_OPC_V1DOTPUSA, TILEGX_OPC_V1DOTPUS, TILEGX_OPC_V1DOTP,
- TILEGX_OPC_V1INT_H, TILEGX_OPC_V1INT_L, TILEGX_OPC_V1MAXU,
- TILEGX_OPC_V1MINU, TILEGX_OPC_V1MNZ, TILEGX_OPC_V1MULTU, TILEGX_OPC_V1MULUS,
- TILEGX_OPC_V1MULU, TILEGX_OPC_V1MZ, TILEGX_OPC_V1SADAU, TILEGX_OPC_V1SADU,
- TILEGX_OPC_V1SHL, TILEGX_OPC_V1SHRS,
- BITFIELD(18, 4) /* index 814 */,
- TILEGX_OPC_V1SHRU, TILEGX_OPC_V1SUBUC, TILEGX_OPC_V1SUB, TILEGX_OPC_V2ADDSC,
- TILEGX_OPC_V2ADD, TILEGX_OPC_V2ADIFFS, TILEGX_OPC_V2AVGS,
- TILEGX_OPC_V2CMPEQ, TILEGX_OPC_V2CMPLES, TILEGX_OPC_V2CMPLEU,
- TILEGX_OPC_V2CMPLTS, TILEGX_OPC_V2CMPLTU, TILEGX_OPC_V2CMPNE,
- TILEGX_OPC_V2DOTPA, TILEGX_OPC_V2DOTP, TILEGX_OPC_V2INT_H,
- BITFIELD(18, 4) /* index 831 */,
- TILEGX_OPC_V2INT_L, TILEGX_OPC_V2MAXS, TILEGX_OPC_V2MINS, TILEGX_OPC_V2MNZ,
- TILEGX_OPC_V2MULFSC, TILEGX_OPC_V2MULS, TILEGX_OPC_V2MULTS, TILEGX_OPC_V2MZ,
- TILEGX_OPC_V2PACKH, TILEGX_OPC_V2PACKL, TILEGX_OPC_V2PACKUC,
- TILEGX_OPC_V2SADAS, TILEGX_OPC_V2SADAU, TILEGX_OPC_V2SADS,
- TILEGX_OPC_V2SADU, TILEGX_OPC_V2SHLSC,
- BITFIELD(18, 4) /* index 848 */,
- TILEGX_OPC_V2SHL, TILEGX_OPC_V2SHRS, TILEGX_OPC_V2SHRU, TILEGX_OPC_V2SUBSC,
- TILEGX_OPC_V2SUB, TILEGX_OPC_V4ADDSC, TILEGX_OPC_V4ADD, TILEGX_OPC_V4INT_H,
- TILEGX_OPC_V4INT_L, TILEGX_OPC_V4PACKSC, TILEGX_OPC_V4SHLSC,
- TILEGX_OPC_V4SHL, TILEGX_OPC_V4SHRS, TILEGX_OPC_V4SHRU, TILEGX_OPC_V4SUBSC,
- TILEGX_OPC_V4SUB,
- BITFIELD(18, 3) /* index 865 */,
- CHILD(874), CHILD(877), CHILD(880), CHILD(883), CHILD(886), TILEGX_OPC_NONE,
- TILEGX_OPC_NONE, TILEGX_OPC_NONE,
- BITFIELD(21, 1) /* index 874 */,
- TILEGX_OPC_XOR, TILEGX_OPC_NONE,
- BITFIELD(21, 1) /* index 877 */,
- TILEGX_OPC_V1DDOTPUA, TILEGX_OPC_NONE,
- BITFIELD(21, 1) /* index 880 */,
- TILEGX_OPC_V1DDOTPU, TILEGX_OPC_NONE,
- BITFIELD(21, 1) /* index 883 */,
- TILEGX_OPC_V1DOTPUA, TILEGX_OPC_NONE,
- BITFIELD(21, 1) /* index 886 */,
- TILEGX_OPC_V1DOTPU, TILEGX_OPC_NONE,
- BITFIELD(18, 4) /* index 889 */,
- TILEGX_OPC_NONE, TILEGX_OPC_ROTLI, TILEGX_OPC_SHLI, TILEGX_OPC_SHLXI,
- TILEGX_OPC_SHRSI, TILEGX_OPC_SHRUI, TILEGX_OPC_SHRUXI, TILEGX_OPC_V1SHLI,
- TILEGX_OPC_V1SHRSI, TILEGX_OPC_V1SHRUI, TILEGX_OPC_V2SHLI,
- TILEGX_OPC_V2SHRSI, TILEGX_OPC_V2SHRUI, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
- TILEGX_OPC_NONE,
- BITFIELD(0, 2) /* index 906 */,
- TILEGX_OPC_SHL16INSLI, TILEGX_OPC_SHL16INSLI, TILEGX_OPC_SHL16INSLI,
- CHILD(911),
- BITFIELD(2, 2) /* index 911 */,
- TILEGX_OPC_SHL16INSLI, TILEGX_OPC_SHL16INSLI, TILEGX_OPC_SHL16INSLI,
- CHILD(916),
- BITFIELD(4, 2) /* index 916 */,
- TILEGX_OPC_SHL16INSLI, TILEGX_OPC_SHL16INSLI, TILEGX_OPC_SHL16INSLI,
- CHILD(921),
- BITFIELD(6, 2) /* index 921 */,
- TILEGX_OPC_SHL16INSLI, TILEGX_OPC_SHL16INSLI, TILEGX_OPC_SHL16INSLI,
- CHILD(926),
- BITFIELD(8, 2) /* index 926 */,
- TILEGX_OPC_SHL16INSLI, TILEGX_OPC_SHL16INSLI, TILEGX_OPC_SHL16INSLI,
- CHILD(931),
- BITFIELD(10, 2) /* index 931 */,
- TILEGX_OPC_SHL16INSLI, TILEGX_OPC_SHL16INSLI, TILEGX_OPC_SHL16INSLI,
- TILEGX_OPC_INFOL,
-};
-
-static const unsigned short decode_X1_fsm[1206] =
-{
- BITFIELD(53, 9) /* index 0 */,
- CHILD(513), CHILD(513), CHILD(513), CHILD(513), CHILD(513), CHILD(513),
- CHILD(513), CHILD(513), CHILD(513), CHILD(513), CHILD(513), CHILD(513),
- CHILD(513), CHILD(513), CHILD(513), CHILD(513), CHILD(513), CHILD(513),
- CHILD(513), CHILD(513), CHILD(513), CHILD(513), CHILD(513), CHILD(513),
- CHILD(513), CHILD(513), CHILD(513), CHILD(513), CHILD(513), CHILD(513),
- CHILD(513), CHILD(513), CHILD(513), CHILD(513), CHILD(513), CHILD(513),
- CHILD(513), CHILD(513), CHILD(513), CHILD(513), CHILD(513), CHILD(513),
- CHILD(513), CHILD(513), CHILD(513), CHILD(513), CHILD(513), CHILD(513),
- CHILD(513), CHILD(513), CHILD(513), CHILD(513), CHILD(513), CHILD(513),
- CHILD(513), CHILD(513), CHILD(513), CHILD(513), CHILD(513), CHILD(513),
- CHILD(513), CHILD(513), CHILD(513), CHILD(513), TILEGX_OPC_ADDXLI,
- TILEGX_OPC_ADDXLI, TILEGX_OPC_ADDXLI, TILEGX_OPC_ADDXLI, TILEGX_OPC_ADDXLI,
- TILEGX_OPC_ADDXLI, TILEGX_OPC_ADDXLI, TILEGX_OPC_ADDXLI, TILEGX_OPC_ADDXLI,
- TILEGX_OPC_ADDXLI, TILEGX_OPC_ADDXLI, TILEGX_OPC_ADDXLI, TILEGX_OPC_ADDXLI,
- TILEGX_OPC_ADDXLI, TILEGX_OPC_ADDXLI, TILEGX_OPC_ADDXLI, TILEGX_OPC_ADDXLI,
- TILEGX_OPC_ADDXLI, TILEGX_OPC_ADDXLI, TILEGX_OPC_ADDXLI, TILEGX_OPC_ADDXLI,
- TILEGX_OPC_ADDXLI, TILEGX_OPC_ADDXLI, TILEGX_OPC_ADDXLI, TILEGX_OPC_ADDXLI,
- TILEGX_OPC_ADDXLI, TILEGX_OPC_ADDXLI, TILEGX_OPC_ADDXLI, TILEGX_OPC_ADDXLI,
- TILEGX_OPC_ADDXLI, TILEGX_OPC_ADDXLI, TILEGX_OPC_ADDXLI, TILEGX_OPC_ADDXLI,
- TILEGX_OPC_ADDXLI, TILEGX_OPC_ADDXLI, TILEGX_OPC_ADDXLI, TILEGX_OPC_ADDXLI,
- TILEGX_OPC_ADDXLI, TILEGX_OPC_ADDXLI, TILEGX_OPC_ADDXLI, TILEGX_OPC_ADDXLI,
- TILEGX_OPC_ADDXLI, TILEGX_OPC_ADDXLI, TILEGX_OPC_ADDXLI, TILEGX_OPC_ADDXLI,
- TILEGX_OPC_ADDXLI, TILEGX_OPC_ADDXLI, TILEGX_OPC_ADDXLI, TILEGX_OPC_ADDXLI,
- TILEGX_OPC_ADDXLI, TILEGX_OPC_ADDXLI, TILEGX_OPC_ADDXLI, TILEGX_OPC_ADDXLI,
- TILEGX_OPC_ADDXLI, TILEGX_OPC_ADDXLI, TILEGX_OPC_ADDXLI, TILEGX_OPC_ADDXLI,
- TILEGX_OPC_ADDXLI, TILEGX_OPC_ADDXLI, TILEGX_OPC_ADDXLI, TILEGX_OPC_ADDXLI,
- TILEGX_OPC_ADDXLI, TILEGX_OPC_ADDXLI, TILEGX_OPC_ADDXLI, TILEGX_OPC_NONE,
- TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
- TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
- TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
- TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
- TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
- TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
- TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
- TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_BEQZT,
- TILEGX_OPC_BEQZT, TILEGX_OPC_BEQZ, TILEGX_OPC_BEQZ, TILEGX_OPC_BGEZT,
- TILEGX_OPC_BGEZT, TILEGX_OPC_BGEZ, TILEGX_OPC_BGEZ, TILEGX_OPC_BGTZT,
- TILEGX_OPC_BGTZT, TILEGX_OPC_BGTZ, TILEGX_OPC_BGTZ, TILEGX_OPC_BLBCT,
- TILEGX_OPC_BLBCT, TILEGX_OPC_BLBC, TILEGX_OPC_BLBC, TILEGX_OPC_BLBST,
- TILEGX_OPC_BLBST, TILEGX_OPC_BLBS, TILEGX_OPC_BLBS, TILEGX_OPC_BLEZT,
- TILEGX_OPC_BLEZT, TILEGX_OPC_BLEZ, TILEGX_OPC_BLEZ, TILEGX_OPC_BLTZT,
- TILEGX_OPC_BLTZT, TILEGX_OPC_BLTZ, TILEGX_OPC_BLTZ, TILEGX_OPC_BNEZT,
- TILEGX_OPC_BNEZT, TILEGX_OPC_BNEZ, TILEGX_OPC_BNEZ, CHILD(528), CHILD(578),
- CHILD(598), CHILD(663), CHILD(683), CHILD(688), CHILD(693), CHILD(698),
- CHILD(703), CHILD(708), CHILD(713), CHILD(718), TILEGX_OPC_NONE,
- TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
- TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
- TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
- TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
- TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
- TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
- TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
- TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
- TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
- TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
- TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
- TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
- TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_JAL,
- TILEGX_OPC_JAL, TILEGX_OPC_JAL, TILEGX_OPC_JAL, TILEGX_OPC_JAL,
- TILEGX_OPC_JAL, TILEGX_OPC_JAL, TILEGX_OPC_JAL, TILEGX_OPC_JAL,
- TILEGX_OPC_JAL, TILEGX_OPC_JAL, TILEGX_OPC_JAL, TILEGX_OPC_JAL,
- TILEGX_OPC_JAL, TILEGX_OPC_JAL, TILEGX_OPC_JAL, TILEGX_OPC_JAL,
- TILEGX_OPC_JAL, TILEGX_OPC_JAL, TILEGX_OPC_JAL, TILEGX_OPC_JAL,
- TILEGX_OPC_JAL, TILEGX_OPC_JAL, TILEGX_OPC_JAL, TILEGX_OPC_JAL,
- TILEGX_OPC_JAL, TILEGX_OPC_JAL, TILEGX_OPC_JAL, TILEGX_OPC_JAL,
- TILEGX_OPC_JAL, TILEGX_OPC_JAL, TILEGX_OPC_JAL, TILEGX_OPC_J, TILEGX_OPC_J,
- TILEGX_OPC_J, TILEGX_OPC_J, TILEGX_OPC_J, TILEGX_OPC_J, TILEGX_OPC_J,
- TILEGX_OPC_J, TILEGX_OPC_J, TILEGX_OPC_J, TILEGX_OPC_J, TILEGX_OPC_J,
- TILEGX_OPC_J, TILEGX_OPC_J, TILEGX_OPC_J, TILEGX_OPC_J, TILEGX_OPC_J,
- TILEGX_OPC_J, TILEGX_OPC_J, TILEGX_OPC_J, TILEGX_OPC_J, TILEGX_OPC_J,
- TILEGX_OPC_J, TILEGX_OPC_J, TILEGX_OPC_J, TILEGX_OPC_J, TILEGX_OPC_J,
- TILEGX_OPC_J, TILEGX_OPC_J, TILEGX_OPC_J, TILEGX_OPC_J, TILEGX_OPC_J,
- CHILD(723), CHILD(740), CHILD(772), CHILD(789), CHILD(1108), CHILD(1125),
- CHILD(1142), TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
- TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
- TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
- TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
- TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
- TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
- TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
- TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
- TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
- TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
- TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
- TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
- TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
- TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
- TILEGX_OPC_NONE, TILEGX_OPC_NONE, CHILD(1159), TILEGX_OPC_NONE,
- TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
- TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
- TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
- TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
- TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
- TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
- TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
- TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
- TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
- TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
- TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
- TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
- TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
- TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
- TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
- TILEGX_OPC_NONE, TILEGX_OPC_NONE, CHILD(1176), CHILD(1176), CHILD(1176),
- CHILD(1176), CHILD(1176), CHILD(1176), CHILD(1176), CHILD(1176),
- CHILD(1176), CHILD(1176), CHILD(1176), CHILD(1176), CHILD(1176),
- CHILD(1176), CHILD(1176), CHILD(1176), CHILD(1176), CHILD(1176),
- CHILD(1176), CHILD(1176), CHILD(1176), CHILD(1176), CHILD(1176),
- CHILD(1176), CHILD(1176), CHILD(1176), CHILD(1176), CHILD(1176),
- CHILD(1176), CHILD(1176), CHILD(1176), CHILD(1176), CHILD(1176),
- CHILD(1176), CHILD(1176), CHILD(1176), CHILD(1176), CHILD(1176),
- CHILD(1176), CHILD(1176), CHILD(1176), CHILD(1176), CHILD(1176),
- CHILD(1176), CHILD(1176), CHILD(1176), CHILD(1176), CHILD(1176),
- CHILD(1176), CHILD(1176), CHILD(1176), CHILD(1176), CHILD(1176),
- CHILD(1176), CHILD(1176), CHILD(1176), CHILD(1176), CHILD(1176),
- CHILD(1176), CHILD(1176), CHILD(1176), CHILD(1176), CHILD(1176),
- CHILD(1176),
- BITFIELD(37, 2) /* index 513 */,
- TILEGX_OPC_ADDLI, TILEGX_OPC_ADDLI, TILEGX_OPC_ADDLI, CHILD(518),
- BITFIELD(39, 2) /* index 518 */,
- TILEGX_OPC_ADDLI, TILEGX_OPC_ADDLI, TILEGX_OPC_ADDLI, CHILD(523),
- BITFIELD(41, 2) /* index 523 */,
- TILEGX_OPC_ADDLI, TILEGX_OPC_ADDLI, TILEGX_OPC_ADDLI, TILEGX_OPC_MOVELI,
- BITFIELD(51, 2) /* index 528 */,
- TILEGX_OPC_NONE, CHILD(533), TILEGX_OPC_ADDXI, CHILD(548),
- BITFIELD(37, 2) /* index 533 */,
- TILEGX_OPC_ADDI, TILEGX_OPC_ADDI, TILEGX_OPC_ADDI, CHILD(538),
- BITFIELD(39, 2) /* index 538 */,
- TILEGX_OPC_ADDI, TILEGX_OPC_ADDI, TILEGX_OPC_ADDI, CHILD(543),
- BITFIELD(41, 2) /* index 543 */,
- TILEGX_OPC_ADDI, TILEGX_OPC_ADDI, TILEGX_OPC_ADDI, TILEGX_OPC_MOVEI,
- BITFIELD(31, 2) /* index 548 */,
- TILEGX_OPC_ANDI, TILEGX_OPC_ANDI, TILEGX_OPC_ANDI, CHILD(553),
- BITFIELD(33, 2) /* index 553 */,
- TILEGX_OPC_ANDI, TILEGX_OPC_ANDI, TILEGX_OPC_ANDI, CHILD(558),
- BITFIELD(35, 2) /* index 558 */,
- TILEGX_OPC_ANDI, TILEGX_OPC_ANDI, TILEGX_OPC_ANDI, CHILD(563),
- BITFIELD(37, 2) /* index 563 */,
- TILEGX_OPC_ANDI, TILEGX_OPC_ANDI, TILEGX_OPC_ANDI, CHILD(568),
- BITFIELD(39, 2) /* index 568 */,
- TILEGX_OPC_ANDI, TILEGX_OPC_ANDI, TILEGX_OPC_ANDI, CHILD(573),
- BITFIELD(41, 2) /* index 573 */,
- TILEGX_OPC_ANDI, TILEGX_OPC_ANDI, TILEGX_OPC_ANDI, TILEGX_OPC_INFO,
- BITFIELD(51, 2) /* index 578 */,
- TILEGX_OPC_CMPEQI, TILEGX_OPC_CMPLTSI, TILEGX_OPC_CMPLTUI, CHILD(583),
- BITFIELD(31, 2) /* index 583 */,
- TILEGX_OPC_LD1S_ADD, TILEGX_OPC_LD1S_ADD, TILEGX_OPC_LD1S_ADD, CHILD(588),
- BITFIELD(33, 2) /* index 588 */,
- TILEGX_OPC_LD1S_ADD, TILEGX_OPC_LD1S_ADD, TILEGX_OPC_LD1S_ADD, CHILD(593),
- BITFIELD(35, 2) /* index 593 */,
- TILEGX_OPC_LD1S_ADD, TILEGX_OPC_LD1S_ADD, TILEGX_OPC_LD1S_ADD,
- TILEGX_OPC_PREFETCH_ADD_L1_FAULT,
- BITFIELD(51, 2) /* index 598 */,
- CHILD(603), CHILD(618), CHILD(633), CHILD(648),
- BITFIELD(31, 2) /* index 603 */,
- TILEGX_OPC_LD1U_ADD, TILEGX_OPC_LD1U_ADD, TILEGX_OPC_LD1U_ADD, CHILD(608),
- BITFIELD(33, 2) /* index 608 */,
- TILEGX_OPC_LD1U_ADD, TILEGX_OPC_LD1U_ADD, TILEGX_OPC_LD1U_ADD, CHILD(613),
- BITFIELD(35, 2) /* index 613 */,
- TILEGX_OPC_LD1U_ADD, TILEGX_OPC_LD1U_ADD, TILEGX_OPC_LD1U_ADD,
- TILEGX_OPC_PREFETCH_ADD_L1,
- BITFIELD(31, 2) /* index 618 */,
- TILEGX_OPC_LD2S_ADD, TILEGX_OPC_LD2S_ADD, TILEGX_OPC_LD2S_ADD, CHILD(623),
- BITFIELD(33, 2) /* index 623 */,
- TILEGX_OPC_LD2S_ADD, TILEGX_OPC_LD2S_ADD, TILEGX_OPC_LD2S_ADD, CHILD(628),
- BITFIELD(35, 2) /* index 628 */,
- TILEGX_OPC_LD2S_ADD, TILEGX_OPC_LD2S_ADD, TILEGX_OPC_LD2S_ADD,
- TILEGX_OPC_PREFETCH_ADD_L2_FAULT,
- BITFIELD(31, 2) /* index 633 */,
- TILEGX_OPC_LD2U_ADD, TILEGX_OPC_LD2U_ADD, TILEGX_OPC_LD2U_ADD, CHILD(638),
- BITFIELD(33, 2) /* index 638 */,
- TILEGX_OPC_LD2U_ADD, TILEGX_OPC_LD2U_ADD, TILEGX_OPC_LD2U_ADD, CHILD(643),
- BITFIELD(35, 2) /* index 643 */,
- TILEGX_OPC_LD2U_ADD, TILEGX_OPC_LD2U_ADD, TILEGX_OPC_LD2U_ADD,
- TILEGX_OPC_PREFETCH_ADD_L2,
- BITFIELD(31, 2) /* index 648 */,
- TILEGX_OPC_LD4S_ADD, TILEGX_OPC_LD4S_ADD, TILEGX_OPC_LD4S_ADD, CHILD(653),
- BITFIELD(33, 2) /* index 653 */,
- TILEGX_OPC_LD4S_ADD, TILEGX_OPC_LD4S_ADD, TILEGX_OPC_LD4S_ADD, CHILD(658),
- BITFIELD(35, 2) /* index 658 */,
- TILEGX_OPC_LD4S_ADD, TILEGX_OPC_LD4S_ADD, TILEGX_OPC_LD4S_ADD,
- TILEGX_OPC_PREFETCH_ADD_L3_FAULT,
- BITFIELD(51, 2) /* index 663 */,
- CHILD(668), TILEGX_OPC_LDNT1S_ADD, TILEGX_OPC_LDNT1U_ADD,
- TILEGX_OPC_LDNT2S_ADD,
- BITFIELD(31, 2) /* index 668 */,
- TILEGX_OPC_LD4U_ADD, TILEGX_OPC_LD4U_ADD, TILEGX_OPC_LD4U_ADD, CHILD(673),
- BITFIELD(33, 2) /* index 673 */,
- TILEGX_OPC_LD4U_ADD, TILEGX_OPC_LD4U_ADD, TILEGX_OPC_LD4U_ADD, CHILD(678),
- BITFIELD(35, 2) /* index 678 */,
- TILEGX_OPC_LD4U_ADD, TILEGX_OPC_LD4U_ADD, TILEGX_OPC_LD4U_ADD,
- TILEGX_OPC_PREFETCH_ADD_L3,
- BITFIELD(51, 2) /* index 683 */,
- TILEGX_OPC_LDNT2U_ADD, TILEGX_OPC_LDNT4S_ADD, TILEGX_OPC_LDNT4U_ADD,
- TILEGX_OPC_LDNT_ADD,
- BITFIELD(51, 2) /* index 688 */,
- TILEGX_OPC_LD_ADD, TILEGX_OPC_LDNA_ADD, TILEGX_OPC_MFSPR, TILEGX_OPC_MTSPR,
- BITFIELD(51, 2) /* index 693 */,
- TILEGX_OPC_ORI, TILEGX_OPC_ST1_ADD, TILEGX_OPC_ST2_ADD, TILEGX_OPC_ST4_ADD,
- BITFIELD(51, 2) /* index 698 */,
- TILEGX_OPC_STNT1_ADD, TILEGX_OPC_STNT2_ADD, TILEGX_OPC_STNT4_ADD,
- TILEGX_OPC_STNT_ADD,
- BITFIELD(51, 2) /* index 703 */,
- TILEGX_OPC_ST_ADD, TILEGX_OPC_V1ADDI, TILEGX_OPC_V1CMPEQI,
- TILEGX_OPC_V1CMPLTSI,
- BITFIELD(51, 2) /* index 708 */,
- TILEGX_OPC_V1CMPLTUI, TILEGX_OPC_V1MAXUI, TILEGX_OPC_V1MINUI,
- TILEGX_OPC_V2ADDI,
- BITFIELD(51, 2) /* index 713 */,
- TILEGX_OPC_V2CMPEQI, TILEGX_OPC_V2CMPLTSI, TILEGX_OPC_V2CMPLTUI,
- TILEGX_OPC_V2MAXSI,
- BITFIELD(51, 2) /* index 718 */,
- TILEGX_OPC_V2MINSI, TILEGX_OPC_XORI, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
- BITFIELD(49, 4) /* index 723 */,
- TILEGX_OPC_NONE, TILEGX_OPC_ADDXSC, TILEGX_OPC_ADDX, TILEGX_OPC_ADD,
- TILEGX_OPC_AND, TILEGX_OPC_CMPEQ, TILEGX_OPC_CMPEXCH4, TILEGX_OPC_CMPEXCH,
- TILEGX_OPC_CMPLES, TILEGX_OPC_CMPLEU, TILEGX_OPC_CMPLTS, TILEGX_OPC_CMPLTU,
- TILEGX_OPC_CMPNE, TILEGX_OPC_DBLALIGN2, TILEGX_OPC_DBLALIGN4,
- TILEGX_OPC_DBLALIGN6,
- BITFIELD(49, 4) /* index 740 */,
- TILEGX_OPC_EXCH4, TILEGX_OPC_EXCH, TILEGX_OPC_FETCHADD4,
- TILEGX_OPC_FETCHADDGEZ4, TILEGX_OPC_FETCHADDGEZ, TILEGX_OPC_FETCHADD,
- TILEGX_OPC_FETCHAND4, TILEGX_OPC_FETCHAND, TILEGX_OPC_FETCHOR4,
- TILEGX_OPC_FETCHOR, TILEGX_OPC_MNZ, TILEGX_OPC_MZ, TILEGX_OPC_NOR,
- CHILD(757), TILEGX_OPC_ROTL, TILEGX_OPC_SHL1ADDX,
- BITFIELD(43, 2) /* index 757 */,
- TILEGX_OPC_OR, TILEGX_OPC_OR, TILEGX_OPC_OR, CHILD(762),
- BITFIELD(45, 2) /* index 762 */,
- TILEGX_OPC_OR, TILEGX_OPC_OR, TILEGX_OPC_OR, CHILD(767),
- BITFIELD(47, 2) /* index 767 */,
- TILEGX_OPC_OR, TILEGX_OPC_OR, TILEGX_OPC_OR, TILEGX_OPC_MOVE,
- BITFIELD(49, 4) /* index 772 */,
- TILEGX_OPC_SHL1ADD, TILEGX_OPC_SHL2ADDX, TILEGX_OPC_SHL2ADD,
- TILEGX_OPC_SHL3ADDX, TILEGX_OPC_SHL3ADD, TILEGX_OPC_SHLX, TILEGX_OPC_SHL,
- TILEGX_OPC_SHRS, TILEGX_OPC_SHRUX, TILEGX_OPC_SHRU, TILEGX_OPC_ST1,
- TILEGX_OPC_ST2, TILEGX_OPC_ST4, TILEGX_OPC_STNT1, TILEGX_OPC_STNT2,
- TILEGX_OPC_STNT4,
- BITFIELD(46, 7) /* index 789 */,
- TILEGX_OPC_STNT, TILEGX_OPC_STNT, TILEGX_OPC_STNT, TILEGX_OPC_STNT,
- TILEGX_OPC_STNT, TILEGX_OPC_STNT, TILEGX_OPC_STNT, TILEGX_OPC_STNT,
- TILEGX_OPC_ST, TILEGX_OPC_ST, TILEGX_OPC_ST, TILEGX_OPC_ST, TILEGX_OPC_ST,
- TILEGX_OPC_ST, TILEGX_OPC_ST, TILEGX_OPC_ST, TILEGX_OPC_SUBXSC,
- TILEGX_OPC_SUBXSC, TILEGX_OPC_SUBXSC, TILEGX_OPC_SUBXSC, TILEGX_OPC_SUBXSC,
- TILEGX_OPC_SUBXSC, TILEGX_OPC_SUBXSC, TILEGX_OPC_SUBXSC, TILEGX_OPC_SUBX,
- TILEGX_OPC_SUBX, TILEGX_OPC_SUBX, TILEGX_OPC_SUBX, TILEGX_OPC_SUBX,
- TILEGX_OPC_SUBX, TILEGX_OPC_SUBX, TILEGX_OPC_SUBX, TILEGX_OPC_SUB,
- TILEGX_OPC_SUB, TILEGX_OPC_SUB, TILEGX_OPC_SUB, TILEGX_OPC_SUB,
- TILEGX_OPC_SUB, TILEGX_OPC_SUB, TILEGX_OPC_SUB, CHILD(918), CHILD(927),
- CHILD(1006), CHILD(1090), CHILD(1099), TILEGX_OPC_NONE, TILEGX_OPC_NONE,
- TILEGX_OPC_NONE, TILEGX_OPC_V1ADDUC, TILEGX_OPC_V1ADDUC, TILEGX_OPC_V1ADDUC,
- TILEGX_OPC_V1ADDUC, TILEGX_OPC_V1ADDUC, TILEGX_OPC_V1ADDUC,
- TILEGX_OPC_V1ADDUC, TILEGX_OPC_V1ADDUC, TILEGX_OPC_V1ADD, TILEGX_OPC_V1ADD,
- TILEGX_OPC_V1ADD, TILEGX_OPC_V1ADD, TILEGX_OPC_V1ADD, TILEGX_OPC_V1ADD,
- TILEGX_OPC_V1ADD, TILEGX_OPC_V1ADD, TILEGX_OPC_V1CMPEQ, TILEGX_OPC_V1CMPEQ,
- TILEGX_OPC_V1CMPEQ, TILEGX_OPC_V1CMPEQ, TILEGX_OPC_V1CMPEQ,
- TILEGX_OPC_V1CMPEQ, TILEGX_OPC_V1CMPEQ, TILEGX_OPC_V1CMPEQ,
- TILEGX_OPC_V1CMPLES, TILEGX_OPC_V1CMPLES, TILEGX_OPC_V1CMPLES,
- TILEGX_OPC_V1CMPLES, TILEGX_OPC_V1CMPLES, TILEGX_OPC_V1CMPLES,
- TILEGX_OPC_V1CMPLES, TILEGX_OPC_V1CMPLES, TILEGX_OPC_V1CMPLEU,
- TILEGX_OPC_V1CMPLEU, TILEGX_OPC_V1CMPLEU, TILEGX_OPC_V1CMPLEU,
- TILEGX_OPC_V1CMPLEU, TILEGX_OPC_V1CMPLEU, TILEGX_OPC_V1CMPLEU,
- TILEGX_OPC_V1CMPLEU, TILEGX_OPC_V1CMPLTS, TILEGX_OPC_V1CMPLTS,
- TILEGX_OPC_V1CMPLTS, TILEGX_OPC_V1CMPLTS, TILEGX_OPC_V1CMPLTS,
- TILEGX_OPC_V1CMPLTS, TILEGX_OPC_V1CMPLTS, TILEGX_OPC_V1CMPLTS,
- TILEGX_OPC_V1CMPLTU, TILEGX_OPC_V1CMPLTU, TILEGX_OPC_V1CMPLTU,
- TILEGX_OPC_V1CMPLTU, TILEGX_OPC_V1CMPLTU, TILEGX_OPC_V1CMPLTU,
- TILEGX_OPC_V1CMPLTU, TILEGX_OPC_V1CMPLTU, TILEGX_OPC_V1CMPNE,
- TILEGX_OPC_V1CMPNE, TILEGX_OPC_V1CMPNE, TILEGX_OPC_V1CMPNE,
- TILEGX_OPC_V1CMPNE, TILEGX_OPC_V1CMPNE, TILEGX_OPC_V1CMPNE,
- TILEGX_OPC_V1CMPNE, TILEGX_OPC_V1INT_H, TILEGX_OPC_V1INT_H,
- TILEGX_OPC_V1INT_H, TILEGX_OPC_V1INT_H, TILEGX_OPC_V1INT_H,
- TILEGX_OPC_V1INT_H, TILEGX_OPC_V1INT_H, TILEGX_OPC_V1INT_H,
- TILEGX_OPC_V1INT_L, TILEGX_OPC_V1INT_L, TILEGX_OPC_V1INT_L,
- TILEGX_OPC_V1INT_L, TILEGX_OPC_V1INT_L, TILEGX_OPC_V1INT_L,
- TILEGX_OPC_V1INT_L, TILEGX_OPC_V1INT_L,
- BITFIELD(43, 3) /* index 918 */,
- TILEGX_OPC_NONE, TILEGX_OPC_DRAIN, TILEGX_OPC_DTLBPR, TILEGX_OPC_FINV,
- TILEGX_OPC_FLUSHWB, TILEGX_OPC_FLUSH, TILEGX_OPC_FNOP, TILEGX_OPC_ICOH,
- BITFIELD(43, 3) /* index 927 */,
- CHILD(936), TILEGX_OPC_INV, TILEGX_OPC_IRET, TILEGX_OPC_JALRP,
- TILEGX_OPC_JALR, TILEGX_OPC_JRP, TILEGX_OPC_JR, CHILD(991),
- BITFIELD(31, 2) /* index 936 */,
- CHILD(941), CHILD(966), TILEGX_OPC_ILL, TILEGX_OPC_ILL,
- BITFIELD(33, 2) /* index 941 */,
- TILEGX_OPC_ILL, TILEGX_OPC_ILL, TILEGX_OPC_ILL, CHILD(946),
- BITFIELD(35, 2) /* index 946 */,
- TILEGX_OPC_ILL, CHILD(951), TILEGX_OPC_ILL, TILEGX_OPC_ILL,
- BITFIELD(37, 2) /* index 951 */,
- TILEGX_OPC_ILL, CHILD(956), TILEGX_OPC_ILL, TILEGX_OPC_ILL,
- BITFIELD(39, 2) /* index 956 */,
- TILEGX_OPC_ILL, CHILD(961), TILEGX_OPC_ILL, TILEGX_OPC_ILL,
- BITFIELD(41, 2) /* index 961 */,
- TILEGX_OPC_ILL, TILEGX_OPC_ILL, TILEGX_OPC_BPT, TILEGX_OPC_ILL,
- BITFIELD(33, 2) /* index 966 */,
- TILEGX_OPC_ILL, TILEGX_OPC_ILL, TILEGX_OPC_ILL, CHILD(971),
- BITFIELD(35, 2) /* index 971 */,
- TILEGX_OPC_ILL, CHILD(976), TILEGX_OPC_ILL, TILEGX_OPC_ILL,
- BITFIELD(37, 2) /* index 976 */,
- TILEGX_OPC_ILL, CHILD(981), TILEGX_OPC_ILL, TILEGX_OPC_ILL,
- BITFIELD(39, 2) /* index 981 */,
- TILEGX_OPC_ILL, CHILD(986), TILEGX_OPC_ILL, TILEGX_OPC_ILL,
- BITFIELD(41, 2) /* index 986 */,
- TILEGX_OPC_ILL, TILEGX_OPC_ILL, TILEGX_OPC_RAISE, TILEGX_OPC_ILL,
- BITFIELD(31, 2) /* index 991 */,
- TILEGX_OPC_LD1S, TILEGX_OPC_LD1S, TILEGX_OPC_LD1S, CHILD(996),
- BITFIELD(33, 2) /* index 996 */,
- TILEGX_OPC_LD1S, TILEGX_OPC_LD1S, TILEGX_OPC_LD1S, CHILD(1001),
- BITFIELD(35, 2) /* index 1001 */,
- TILEGX_OPC_LD1S, TILEGX_OPC_LD1S, TILEGX_OPC_LD1S,
- TILEGX_OPC_PREFETCH_L1_FAULT,
- BITFIELD(43, 3) /* index 1006 */,
- CHILD(1015), CHILD(1030), CHILD(1045), CHILD(1060), CHILD(1075),
- TILEGX_OPC_LDNA, TILEGX_OPC_LDNT1S, TILEGX_OPC_LDNT1U,
- BITFIELD(31, 2) /* index 1015 */,
- TILEGX_OPC_LD1U, TILEGX_OPC_LD1U, TILEGX_OPC_LD1U, CHILD(1020),
- BITFIELD(33, 2) /* index 1020 */,
- TILEGX_OPC_LD1U, TILEGX_OPC_LD1U, TILEGX_OPC_LD1U, CHILD(1025),
- BITFIELD(35, 2) /* index 1025 */,
- TILEGX_OPC_LD1U, TILEGX_OPC_LD1U, TILEGX_OPC_LD1U, TILEGX_OPC_PREFETCH,
- BITFIELD(31, 2) /* index 1030 */,
- TILEGX_OPC_LD2S, TILEGX_OPC_LD2S, TILEGX_OPC_LD2S, CHILD(1035),
- BITFIELD(33, 2) /* index 1035 */,
- TILEGX_OPC_LD2S, TILEGX_OPC_LD2S, TILEGX_OPC_LD2S, CHILD(1040),
- BITFIELD(35, 2) /* index 1040 */,
- TILEGX_OPC_LD2S, TILEGX_OPC_LD2S, TILEGX_OPC_LD2S,
- TILEGX_OPC_PREFETCH_L2_FAULT,
- BITFIELD(31, 2) /* index 1045 */,
- TILEGX_OPC_LD2U, TILEGX_OPC_LD2U, TILEGX_OPC_LD2U, CHILD(1050),
- BITFIELD(33, 2) /* index 1050 */,
- TILEGX_OPC_LD2U, TILEGX_OPC_LD2U, TILEGX_OPC_LD2U, CHILD(1055),
- BITFIELD(35, 2) /* index 1055 */,
- TILEGX_OPC_LD2U, TILEGX_OPC_LD2U, TILEGX_OPC_LD2U, TILEGX_OPC_PREFETCH_L2,
- BITFIELD(31, 2) /* index 1060 */,
- TILEGX_OPC_LD4S, TILEGX_OPC_LD4S, TILEGX_OPC_LD4S, CHILD(1065),
- BITFIELD(33, 2) /* index 1065 */,
- TILEGX_OPC_LD4S, TILEGX_OPC_LD4S, TILEGX_OPC_LD4S, CHILD(1070),
- BITFIELD(35, 2) /* index 1070 */,
- TILEGX_OPC_LD4S, TILEGX_OPC_LD4S, TILEGX_OPC_LD4S,
- TILEGX_OPC_PREFETCH_L3_FAULT,
- BITFIELD(31, 2) /* index 1075 */,
- TILEGX_OPC_LD4U, TILEGX_OPC_LD4U, TILEGX_OPC_LD4U, CHILD(1080),
- BITFIELD(33, 2) /* index 1080 */,
- TILEGX_OPC_LD4U, TILEGX_OPC_LD4U, TILEGX_OPC_LD4U, CHILD(1085),
- BITFIELD(35, 2) /* index 1085 */,
- TILEGX_OPC_LD4U, TILEGX_OPC_LD4U, TILEGX_OPC_LD4U, TILEGX_OPC_PREFETCH_L3,
- BITFIELD(43, 3) /* index 1090 */,
- TILEGX_OPC_LDNT2S, TILEGX_OPC_LDNT2U, TILEGX_OPC_LDNT4S, TILEGX_OPC_LDNT4U,
- TILEGX_OPC_LDNT, TILEGX_OPC_LD, TILEGX_OPC_LNK, TILEGX_OPC_MF,
- BITFIELD(43, 3) /* index 1099 */,
- TILEGX_OPC_NAP, TILEGX_OPC_NOP, TILEGX_OPC_SWINT0, TILEGX_OPC_SWINT1,
- TILEGX_OPC_SWINT2, TILEGX_OPC_SWINT3, TILEGX_OPC_WH64, TILEGX_OPC_NONE,
- BITFIELD(49, 4) /* index 1108 */,
- TILEGX_OPC_V1MAXU, TILEGX_OPC_V1MINU, TILEGX_OPC_V1MNZ, TILEGX_OPC_V1MZ,
- TILEGX_OPC_V1SHL, TILEGX_OPC_V1SHRS, TILEGX_OPC_V1SHRU, TILEGX_OPC_V1SUBUC,
- TILEGX_OPC_V1SUB, TILEGX_OPC_V2ADDSC, TILEGX_OPC_V2ADD, TILEGX_OPC_V2CMPEQ,
- TILEGX_OPC_V2CMPLES, TILEGX_OPC_V2CMPLEU, TILEGX_OPC_V2CMPLTS,
- TILEGX_OPC_V2CMPLTU,
- BITFIELD(49, 4) /* index 1125 */,
- TILEGX_OPC_V2CMPNE, TILEGX_OPC_V2INT_H, TILEGX_OPC_V2INT_L,
- TILEGX_OPC_V2MAXS, TILEGX_OPC_V2MINS, TILEGX_OPC_V2MNZ, TILEGX_OPC_V2MZ,
- TILEGX_OPC_V2PACKH, TILEGX_OPC_V2PACKL, TILEGX_OPC_V2PACKUC,
- TILEGX_OPC_V2SHLSC, TILEGX_OPC_V2SHL, TILEGX_OPC_V2SHRS, TILEGX_OPC_V2SHRU,
- TILEGX_OPC_V2SUBSC, TILEGX_OPC_V2SUB,
- BITFIELD(49, 4) /* index 1142 */,
- TILEGX_OPC_V4ADDSC, TILEGX_OPC_V4ADD, TILEGX_OPC_V4INT_H,
- TILEGX_OPC_V4INT_L, TILEGX_OPC_V4PACKSC, TILEGX_OPC_V4SHLSC,
- TILEGX_OPC_V4SHL, TILEGX_OPC_V4SHRS, TILEGX_OPC_V4SHRU, TILEGX_OPC_V4SUBSC,
- TILEGX_OPC_V4SUB, TILEGX_OPC_XOR, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
- TILEGX_OPC_NONE, TILEGX_OPC_NONE,
- BITFIELD(49, 4) /* index 1159 */,
- TILEGX_OPC_NONE, TILEGX_OPC_ROTLI, TILEGX_OPC_SHLI, TILEGX_OPC_SHLXI,
- TILEGX_OPC_SHRSI, TILEGX_OPC_SHRUI, TILEGX_OPC_SHRUXI, TILEGX_OPC_V1SHLI,
- TILEGX_OPC_V1SHRSI, TILEGX_OPC_V1SHRUI, TILEGX_OPC_V2SHLI,
- TILEGX_OPC_V2SHRSI, TILEGX_OPC_V2SHRUI, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
- TILEGX_OPC_NONE,
- BITFIELD(31, 2) /* index 1176 */,
- TILEGX_OPC_SHL16INSLI, TILEGX_OPC_SHL16INSLI, TILEGX_OPC_SHL16INSLI,
- CHILD(1181),
- BITFIELD(33, 2) /* index 1181 */,
- TILEGX_OPC_SHL16INSLI, TILEGX_OPC_SHL16INSLI, TILEGX_OPC_SHL16INSLI,
- CHILD(1186),
- BITFIELD(35, 2) /* index 1186 */,
- TILEGX_OPC_SHL16INSLI, TILEGX_OPC_SHL16INSLI, TILEGX_OPC_SHL16INSLI,
- CHILD(1191),
- BITFIELD(37, 2) /* index 1191 */,
- TILEGX_OPC_SHL16INSLI, TILEGX_OPC_SHL16INSLI, TILEGX_OPC_SHL16INSLI,
- CHILD(1196),
- BITFIELD(39, 2) /* index 1196 */,
- TILEGX_OPC_SHL16INSLI, TILEGX_OPC_SHL16INSLI, TILEGX_OPC_SHL16INSLI,
- CHILD(1201),
- BITFIELD(41, 2) /* index 1201 */,
- TILEGX_OPC_SHL16INSLI, TILEGX_OPC_SHL16INSLI, TILEGX_OPC_SHL16INSLI,
- TILEGX_OPC_INFOL,
-};
-
-static const unsigned short decode_Y0_fsm[178] =
-{
- BITFIELD(27, 4) /* index 0 */,
- CHILD(17), TILEGX_OPC_ADDXI, CHILD(32), TILEGX_OPC_CMPEQI,
- TILEGX_OPC_CMPLTSI, CHILD(62), CHILD(67), CHILD(118), CHILD(123),
- CHILD(128), CHILD(133), CHILD(153), CHILD(158), CHILD(163), CHILD(168),
- CHILD(173),
- BITFIELD(6, 2) /* index 17 */,
- TILEGX_OPC_ADDI, TILEGX_OPC_ADDI, TILEGX_OPC_ADDI, CHILD(22),
- BITFIELD(8, 2) /* index 22 */,
- TILEGX_OPC_ADDI, TILEGX_OPC_ADDI, TILEGX_OPC_ADDI, CHILD(27),
- BITFIELD(10, 2) /* index 27 */,
- TILEGX_OPC_ADDI, TILEGX_OPC_ADDI, TILEGX_OPC_ADDI, TILEGX_OPC_MOVEI,
- BITFIELD(0, 2) /* index 32 */,
- TILEGX_OPC_ANDI, TILEGX_OPC_ANDI, TILEGX_OPC_ANDI, CHILD(37),
- BITFIELD(2, 2) /* index 37 */,
- TILEGX_OPC_ANDI, TILEGX_OPC_ANDI, TILEGX_OPC_ANDI, CHILD(42),
- BITFIELD(4, 2) /* index 42 */,
- TILEGX_OPC_ANDI, TILEGX_OPC_ANDI, TILEGX_OPC_ANDI, CHILD(47),
- BITFIELD(6, 2) /* index 47 */,
- TILEGX_OPC_ANDI, TILEGX_OPC_ANDI, TILEGX_OPC_ANDI, CHILD(52),
- BITFIELD(8, 2) /* index 52 */,
- TILEGX_OPC_ANDI, TILEGX_OPC_ANDI, TILEGX_OPC_ANDI, CHILD(57),
- BITFIELD(10, 2) /* index 57 */,
- TILEGX_OPC_ANDI, TILEGX_OPC_ANDI, TILEGX_OPC_ANDI, TILEGX_OPC_INFO,
- BITFIELD(18, 2) /* index 62 */,
- TILEGX_OPC_ADDX, TILEGX_OPC_ADD, TILEGX_OPC_SUBX, TILEGX_OPC_SUB,
- BITFIELD(15, 5) /* index 67 */,
- TILEGX_OPC_SHL1ADD, TILEGX_OPC_SHL1ADD, TILEGX_OPC_SHL1ADD,
- TILEGX_OPC_SHL1ADD, TILEGX_OPC_SHL1ADD, TILEGX_OPC_SHL1ADD,
- TILEGX_OPC_SHL1ADD, TILEGX_OPC_SHL1ADD, TILEGX_OPC_SHL2ADD,
- TILEGX_OPC_SHL2ADD, TILEGX_OPC_SHL2ADD, TILEGX_OPC_SHL2ADD,
- TILEGX_OPC_SHL2ADD, TILEGX_OPC_SHL2ADD, TILEGX_OPC_SHL2ADD,
- TILEGX_OPC_SHL2ADD, TILEGX_OPC_SHL3ADD, TILEGX_OPC_SHL3ADD,
- TILEGX_OPC_SHL3ADD, TILEGX_OPC_SHL3ADD, TILEGX_OPC_SHL3ADD,
- TILEGX_OPC_SHL3ADD, TILEGX_OPC_SHL3ADD, TILEGX_OPC_SHL3ADD, CHILD(100),
- CHILD(109), TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
- TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
- BITFIELD(12, 3) /* index 100 */,
- TILEGX_OPC_NONE, TILEGX_OPC_CLZ, TILEGX_OPC_CTZ, TILEGX_OPC_FNOP,
- TILEGX_OPC_FSINGLE_PACK1, TILEGX_OPC_NOP, TILEGX_OPC_PCNT,
- TILEGX_OPC_REVBITS,
- BITFIELD(12, 3) /* index 109 */,
- TILEGX_OPC_REVBYTES, TILEGX_OPC_TBLIDXB0, TILEGX_OPC_TBLIDXB1,
- TILEGX_OPC_TBLIDXB2, TILEGX_OPC_TBLIDXB3, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
- TILEGX_OPC_NONE,
- BITFIELD(18, 2) /* index 118 */,
- TILEGX_OPC_CMPLES, TILEGX_OPC_CMPLEU, TILEGX_OPC_CMPLTS, TILEGX_OPC_CMPLTU,
- BITFIELD(18, 2) /* index 123 */,
- TILEGX_OPC_CMPEQ, TILEGX_OPC_CMPNE, TILEGX_OPC_MULAX, TILEGX_OPC_MULX,
- BITFIELD(18, 2) /* index 128 */,
- TILEGX_OPC_CMOVEQZ, TILEGX_OPC_CMOVNEZ, TILEGX_OPC_MNZ, TILEGX_OPC_MZ,
- BITFIELD(18, 2) /* index 133 */,
- TILEGX_OPC_AND, TILEGX_OPC_NOR, CHILD(138), TILEGX_OPC_XOR,
- BITFIELD(12, 2) /* index 138 */,
- TILEGX_OPC_OR, TILEGX_OPC_OR, TILEGX_OPC_OR, CHILD(143),
- BITFIELD(14, 2) /* index 143 */,
- TILEGX_OPC_OR, TILEGX_OPC_OR, TILEGX_OPC_OR, CHILD(148),
- BITFIELD(16, 2) /* index 148 */,
- TILEGX_OPC_OR, TILEGX_OPC_OR, TILEGX_OPC_OR, TILEGX_OPC_MOVE,
- BITFIELD(18, 2) /* index 153 */,
- TILEGX_OPC_ROTL, TILEGX_OPC_SHL, TILEGX_OPC_SHRS, TILEGX_OPC_SHRU,
- BITFIELD(18, 2) /* index 158 */,
- TILEGX_OPC_NONE, TILEGX_OPC_SHL1ADDX, TILEGX_OPC_SHL2ADDX,
- TILEGX_OPC_SHL3ADDX,
- BITFIELD(18, 2) /* index 163 */,
- TILEGX_OPC_MUL_HS_HS, TILEGX_OPC_MUL_HU_HU, TILEGX_OPC_MUL_LS_LS,
- TILEGX_OPC_MUL_LU_LU,
- BITFIELD(18, 2) /* index 168 */,
- TILEGX_OPC_MULA_HS_HS, TILEGX_OPC_MULA_HU_HU, TILEGX_OPC_MULA_LS_LS,
- TILEGX_OPC_MULA_LU_LU,
- BITFIELD(18, 2) /* index 173 */,
- TILEGX_OPC_ROTLI, TILEGX_OPC_SHLI, TILEGX_OPC_SHRSI, TILEGX_OPC_SHRUI,
-};
-
-static const unsigned short decode_Y1_fsm[167] =
-{
- BITFIELD(58, 4) /* index 0 */,
- TILEGX_OPC_NONE, CHILD(17), TILEGX_OPC_ADDXI, CHILD(32), TILEGX_OPC_CMPEQI,
- TILEGX_OPC_CMPLTSI, CHILD(62), CHILD(67), CHILD(117), CHILD(122),
- CHILD(127), CHILD(132), CHILD(152), CHILD(157), CHILD(162), TILEGX_OPC_NONE,
- BITFIELD(37, 2) /* index 17 */,
- TILEGX_OPC_ADDI, TILEGX_OPC_ADDI, TILEGX_OPC_ADDI, CHILD(22),
- BITFIELD(39, 2) /* index 22 */,
- TILEGX_OPC_ADDI, TILEGX_OPC_ADDI, TILEGX_OPC_ADDI, CHILD(27),
- BITFIELD(41, 2) /* index 27 */,
- TILEGX_OPC_ADDI, TILEGX_OPC_ADDI, TILEGX_OPC_ADDI, TILEGX_OPC_MOVEI,
- BITFIELD(31, 2) /* index 32 */,
- TILEGX_OPC_ANDI, TILEGX_OPC_ANDI, TILEGX_OPC_ANDI, CHILD(37),
- BITFIELD(33, 2) /* index 37 */,
- TILEGX_OPC_ANDI, TILEGX_OPC_ANDI, TILEGX_OPC_ANDI, CHILD(42),
- BITFIELD(35, 2) /* index 42 */,
- TILEGX_OPC_ANDI, TILEGX_OPC_ANDI, TILEGX_OPC_ANDI, CHILD(47),
- BITFIELD(37, 2) /* index 47 */,
- TILEGX_OPC_ANDI, TILEGX_OPC_ANDI, TILEGX_OPC_ANDI, CHILD(52),
- BITFIELD(39, 2) /* index 52 */,
- TILEGX_OPC_ANDI, TILEGX_OPC_ANDI, TILEGX_OPC_ANDI, CHILD(57),
- BITFIELD(41, 2) /* index 57 */,
- TILEGX_OPC_ANDI, TILEGX_OPC_ANDI, TILEGX_OPC_ANDI, TILEGX_OPC_INFO,
- BITFIELD(49, 2) /* index 62 */,
- TILEGX_OPC_ADDX, TILEGX_OPC_ADD, TILEGX_OPC_SUBX, TILEGX_OPC_SUB,
- BITFIELD(47, 4) /* index 67 */,
- TILEGX_OPC_SHL1ADD, TILEGX_OPC_SHL1ADD, TILEGX_OPC_SHL1ADD,
- TILEGX_OPC_SHL1ADD, TILEGX_OPC_SHL2ADD, TILEGX_OPC_SHL2ADD,
- TILEGX_OPC_SHL2ADD, TILEGX_OPC_SHL2ADD, TILEGX_OPC_SHL3ADD,
- TILEGX_OPC_SHL3ADD, TILEGX_OPC_SHL3ADD, TILEGX_OPC_SHL3ADD, CHILD(84),
- TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
- BITFIELD(43, 3) /* index 84 */,
- CHILD(93), CHILD(96), CHILD(99), CHILD(102), CHILD(105), CHILD(108),
- CHILD(111), CHILD(114),
- BITFIELD(46, 1) /* index 93 */,
- TILEGX_OPC_NONE, TILEGX_OPC_FNOP,
- BITFIELD(46, 1) /* index 96 */,
- TILEGX_OPC_NONE, TILEGX_OPC_ILL,
- BITFIELD(46, 1) /* index 99 */,
- TILEGX_OPC_NONE, TILEGX_OPC_JALRP,
- BITFIELD(46, 1) /* index 102 */,
- TILEGX_OPC_NONE, TILEGX_OPC_JALR,
- BITFIELD(46, 1) /* index 105 */,
- TILEGX_OPC_NONE, TILEGX_OPC_JRP,
- BITFIELD(46, 1) /* index 108 */,
- TILEGX_OPC_NONE, TILEGX_OPC_JR,
- BITFIELD(46, 1) /* index 111 */,
- TILEGX_OPC_NONE, TILEGX_OPC_LNK,
- BITFIELD(46, 1) /* index 114 */,
- TILEGX_OPC_NONE, TILEGX_OPC_NOP,
- BITFIELD(49, 2) /* index 117 */,
- TILEGX_OPC_CMPLES, TILEGX_OPC_CMPLEU, TILEGX_OPC_CMPLTS, TILEGX_OPC_CMPLTU,
- BITFIELD(49, 2) /* index 122 */,
- TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_CMPEQ, TILEGX_OPC_CMPNE,
- BITFIELD(49, 2) /* index 127 */,
- TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_MNZ, TILEGX_OPC_MZ,
- BITFIELD(49, 2) /* index 132 */,
- TILEGX_OPC_AND, TILEGX_OPC_NOR, CHILD(137), TILEGX_OPC_XOR,
- BITFIELD(43, 2) /* index 137 */,
- TILEGX_OPC_OR, TILEGX_OPC_OR, TILEGX_OPC_OR, CHILD(142),
- BITFIELD(45, 2) /* index 142 */,
- TILEGX_OPC_OR, TILEGX_OPC_OR, TILEGX_OPC_OR, CHILD(147),
- BITFIELD(47, 2) /* index 147 */,
- TILEGX_OPC_OR, TILEGX_OPC_OR, TILEGX_OPC_OR, TILEGX_OPC_MOVE,
- BITFIELD(49, 2) /* index 152 */,
- TILEGX_OPC_ROTL, TILEGX_OPC_SHL, TILEGX_OPC_SHRS, TILEGX_OPC_SHRU,
- BITFIELD(49, 2) /* index 157 */,
- TILEGX_OPC_NONE, TILEGX_OPC_SHL1ADDX, TILEGX_OPC_SHL2ADDX,
- TILEGX_OPC_SHL3ADDX,
- BITFIELD(49, 2) /* index 162 */,
- TILEGX_OPC_ROTLI, TILEGX_OPC_SHLI, TILEGX_OPC_SHRSI, TILEGX_OPC_SHRUI,
-};
-
-static const unsigned short decode_Y2_fsm[118] =
-{
- BITFIELD(62, 2) /* index 0 */,
- TILEGX_OPC_NONE, CHILD(5), CHILD(66), CHILD(109),
- BITFIELD(55, 3) /* index 5 */,
- CHILD(14), CHILD(14), CHILD(14), CHILD(17), CHILD(40), CHILD(40), CHILD(40),
- CHILD(43),
- BITFIELD(26, 1) /* index 14 */,
- TILEGX_OPC_LD1S, TILEGX_OPC_LD1U,
- BITFIELD(26, 1) /* index 17 */,
- CHILD(20), CHILD(30),
- BITFIELD(51, 2) /* index 20 */,
- TILEGX_OPC_LD1S, TILEGX_OPC_LD1S, TILEGX_OPC_LD1S, CHILD(25),
- BITFIELD(53, 2) /* index 25 */,
- TILEGX_OPC_LD1S, TILEGX_OPC_LD1S, TILEGX_OPC_LD1S,
- TILEGX_OPC_PREFETCH_L1_FAULT,
- BITFIELD(51, 2) /* index 30 */,
- TILEGX_OPC_LD1U, TILEGX_OPC_LD1U, TILEGX_OPC_LD1U, CHILD(35),
- BITFIELD(53, 2) /* index 35 */,
- TILEGX_OPC_LD1U, TILEGX_OPC_LD1U, TILEGX_OPC_LD1U, TILEGX_OPC_PREFETCH,
- BITFIELD(26, 1) /* index 40 */,
- TILEGX_OPC_LD2S, TILEGX_OPC_LD2U,
- BITFIELD(26, 1) /* index 43 */,
- CHILD(46), CHILD(56),
- BITFIELD(51, 2) /* index 46 */,
- TILEGX_OPC_LD2S, TILEGX_OPC_LD2S, TILEGX_OPC_LD2S, CHILD(51),
- BITFIELD(53, 2) /* index 51 */,
- TILEGX_OPC_LD2S, TILEGX_OPC_LD2S, TILEGX_OPC_LD2S,
- TILEGX_OPC_PREFETCH_L2_FAULT,
- BITFIELD(51, 2) /* index 56 */,
- TILEGX_OPC_LD2U, TILEGX_OPC_LD2U, TILEGX_OPC_LD2U, CHILD(61),
- BITFIELD(53, 2) /* index 61 */,
- TILEGX_OPC_LD2U, TILEGX_OPC_LD2U, TILEGX_OPC_LD2U, TILEGX_OPC_PREFETCH_L2,
- BITFIELD(56, 2) /* index 66 */,
- CHILD(71), CHILD(74), CHILD(90), CHILD(93),
- BITFIELD(26, 1) /* index 71 */,
- TILEGX_OPC_NONE, TILEGX_OPC_LD4S,
- BITFIELD(26, 1) /* index 74 */,
- TILEGX_OPC_NONE, CHILD(77),
- BITFIELD(51, 2) /* index 77 */,
- TILEGX_OPC_LD4S, TILEGX_OPC_LD4S, TILEGX_OPC_LD4S, CHILD(82),
- BITFIELD(53, 2) /* index 82 */,
- TILEGX_OPC_LD4S, TILEGX_OPC_LD4S, TILEGX_OPC_LD4S, CHILD(87),
- BITFIELD(55, 1) /* index 87 */,
- TILEGX_OPC_LD4S, TILEGX_OPC_PREFETCH_L3_FAULT,
- BITFIELD(26, 1) /* index 90 */,
- TILEGX_OPC_LD4U, TILEGX_OPC_LD,
- BITFIELD(26, 1) /* index 93 */,
- CHILD(96), TILEGX_OPC_LD,
- BITFIELD(51, 2) /* index 96 */,
- TILEGX_OPC_LD4U, TILEGX_OPC_LD4U, TILEGX_OPC_LD4U, CHILD(101),
- BITFIELD(53, 2) /* index 101 */,
- TILEGX_OPC_LD4U, TILEGX_OPC_LD4U, TILEGX_OPC_LD4U, CHILD(106),
- BITFIELD(55, 1) /* index 106 */,
- TILEGX_OPC_LD4U, TILEGX_OPC_PREFETCH_L3,
- BITFIELD(26, 1) /* index 109 */,
- CHILD(112), CHILD(115),
- BITFIELD(57, 1) /* index 112 */,
- TILEGX_OPC_ST1, TILEGX_OPC_ST4,
- BITFIELD(57, 1) /* index 115 */,
- TILEGX_OPC_ST2, TILEGX_OPC_ST,
-};
-
-#undef BITFIELD
-#undef CHILD
-const unsigned short * const
-tilegx_bundle_decoder_fsms[TILEGX_NUM_PIPELINE_ENCODINGS] =
-{
- decode_X0_fsm,
- decode_X1_fsm,
- decode_Y0_fsm,
- decode_Y1_fsm,
- decode_Y2_fsm
-};
-const struct tilegx_operand tilegx_operands[35] =
-{
- {
- TILEGX_OP_TYPE_IMMEDIATE, BFD_RELOC(TILEGX_IMM8_X0),
- 8, 1, 0, 0, 0, 0,
- create_Imm8_X0, get_Imm8_X0
- },
- {
- TILEGX_OP_TYPE_IMMEDIATE, BFD_RELOC(TILEGX_IMM8_X1),
- 8, 1, 0, 0, 0, 0,
- create_Imm8_X1, get_Imm8_X1
- },
- {
- TILEGX_OP_TYPE_IMMEDIATE, BFD_RELOC(TILEGX_IMM8_Y0),
- 8, 1, 0, 0, 0, 0,
- create_Imm8_Y0, get_Imm8_Y0
- },
- {
- TILEGX_OP_TYPE_IMMEDIATE, BFD_RELOC(TILEGX_IMM8_Y1),
- 8, 1, 0, 0, 0, 0,
- create_Imm8_Y1, get_Imm8_Y1
- },
- {
- TILEGX_OP_TYPE_IMMEDIATE, BFD_RELOC(TILEGX_IMM16_X0_HW0_LAST),
- 16, 1, 0, 0, 0, 0,
- create_Imm16_X0, get_Imm16_X0
- },
- {
- TILEGX_OP_TYPE_IMMEDIATE, BFD_RELOC(TILEGX_IMM16_X1_HW0_LAST),
- 16, 1, 0, 0, 0, 0,
- create_Imm16_X1, get_Imm16_X1
- },
- {
- TILEGX_OP_TYPE_REGISTER, BFD_RELOC(NONE),
- 6, 0, 0, 1, 0, 0,
- create_Dest_X0, get_Dest_X0
- },
- {
- TILEGX_OP_TYPE_REGISTER, BFD_RELOC(NONE),
- 6, 0, 1, 0, 0, 0,
- create_SrcA_X0, get_SrcA_X0
- },
- {
- TILEGX_OP_TYPE_REGISTER, BFD_RELOC(NONE),
- 6, 0, 0, 1, 0, 0,
- create_Dest_X1, get_Dest_X1
- },
- {
- TILEGX_OP_TYPE_REGISTER, BFD_RELOC(NONE),
- 6, 0, 1, 0, 0, 0,
- create_SrcA_X1, get_SrcA_X1
- },
- {
- TILEGX_OP_TYPE_REGISTER, BFD_RELOC(NONE),
- 6, 0, 0, 1, 0, 0,
- create_Dest_Y0, get_Dest_Y0
- },
- {
- TILEGX_OP_TYPE_REGISTER, BFD_RELOC(NONE),
- 6, 0, 1, 0, 0, 0,
- create_SrcA_Y0, get_SrcA_Y0
- },
- {
- TILEGX_OP_TYPE_REGISTER, BFD_RELOC(NONE),
- 6, 0, 0, 1, 0, 0,
- create_Dest_Y1, get_Dest_Y1
- },
- {
- TILEGX_OP_TYPE_REGISTER, BFD_RELOC(NONE),
- 6, 0, 1, 0, 0, 0,
- create_SrcA_Y1, get_SrcA_Y1
- },
- {
- TILEGX_OP_TYPE_REGISTER, BFD_RELOC(NONE),
- 6, 0, 1, 0, 0, 0,
- create_SrcA_Y2, get_SrcA_Y2
- },
- {
- TILEGX_OP_TYPE_REGISTER, BFD_RELOC(NONE),
- 6, 0, 1, 1, 0, 0,
- create_SrcA_X1, get_SrcA_X1
- },
- {
- TILEGX_OP_TYPE_REGISTER, BFD_RELOC(NONE),
- 6, 0, 1, 0, 0, 0,
- create_SrcB_X0, get_SrcB_X0
- },
- {
- TILEGX_OP_TYPE_REGISTER, BFD_RELOC(NONE),
- 6, 0, 1, 0, 0, 0,
- create_SrcB_X1, get_SrcB_X1
- },
- {
- TILEGX_OP_TYPE_REGISTER, BFD_RELOC(NONE),
- 6, 0, 1, 0, 0, 0,
- create_SrcB_Y0, get_SrcB_Y0
- },
- {
- TILEGX_OP_TYPE_REGISTER, BFD_RELOC(NONE),
- 6, 0, 1, 0, 0, 0,
- create_SrcB_Y1, get_SrcB_Y1
- },
- {
- TILEGX_OP_TYPE_ADDRESS, BFD_RELOC(TILEGX_BROFF_X1),
- 17, 1, 0, 0, 1, TILEGX_LOG2_BUNDLE_ALIGNMENT_IN_BYTES,
- create_BrOff_X1, get_BrOff_X1
- },
- {
- TILEGX_OP_TYPE_IMMEDIATE, BFD_RELOC(TILEGX_MMSTART_X0),
- 6, 0, 0, 0, 0, 0,
- create_BFStart_X0, get_BFStart_X0
- },
- {
- TILEGX_OP_TYPE_IMMEDIATE, BFD_RELOC(TILEGX_MMEND_X0),
- 6, 0, 0, 0, 0, 0,
- create_BFEnd_X0, get_BFEnd_X0
- },
- {
- TILEGX_OP_TYPE_REGISTER, BFD_RELOC(NONE),
- 6, 0, 1, 1, 0, 0,
- create_Dest_X0, get_Dest_X0
- },
- {
- TILEGX_OP_TYPE_REGISTER, BFD_RELOC(NONE),
- 6, 0, 1, 1, 0, 0,
- create_Dest_Y0, get_Dest_Y0
- },
- {
- TILEGX_OP_TYPE_ADDRESS, BFD_RELOC(TILEGX_JUMPOFF_X1),
- 27, 1, 0, 0, 1, TILEGX_LOG2_BUNDLE_ALIGNMENT_IN_BYTES,
- create_JumpOff_X1, get_JumpOff_X1
- },
- {
- TILEGX_OP_TYPE_REGISTER, BFD_RELOC(NONE),
- 6, 0, 0, 1, 0, 0,
- create_SrcBDest_Y2, get_SrcBDest_Y2
- },
- {
- TILEGX_OP_TYPE_SPR, BFD_RELOC(TILEGX_MF_IMM14_X1),
- 14, 0, 0, 0, 0, 0,
- create_MF_Imm14_X1, get_MF_Imm14_X1
- },
- {
- TILEGX_OP_TYPE_SPR, BFD_RELOC(TILEGX_MT_IMM14_X1),
- 14, 0, 0, 0, 0, 0,
- create_MT_Imm14_X1, get_MT_Imm14_X1
- },
- {
- TILEGX_OP_TYPE_IMMEDIATE, BFD_RELOC(TILEGX_SHAMT_X0),
- 6, 0, 0, 0, 0, 0,
- create_ShAmt_X0, get_ShAmt_X0
- },
- {
- TILEGX_OP_TYPE_IMMEDIATE, BFD_RELOC(TILEGX_SHAMT_X1),
- 6, 0, 0, 0, 0, 0,
- create_ShAmt_X1, get_ShAmt_X1
- },
- {
- TILEGX_OP_TYPE_IMMEDIATE, BFD_RELOC(TILEGX_SHAMT_Y0),
- 6, 0, 0, 0, 0, 0,
- create_ShAmt_Y0, get_ShAmt_Y0
- },
- {
- TILEGX_OP_TYPE_IMMEDIATE, BFD_RELOC(TILEGX_SHAMT_Y1),
- 6, 0, 0, 0, 0, 0,
- create_ShAmt_Y1, get_ShAmt_Y1
- },
- {
- TILEGX_OP_TYPE_REGISTER, BFD_RELOC(NONE),
- 6, 0, 1, 0, 0, 0,
- create_SrcBDest_Y2, get_SrcBDest_Y2
- },
- {
- TILEGX_OP_TYPE_IMMEDIATE, BFD_RELOC(TILEGX_DEST_IMM8_X1),
- 8, 1, 0, 0, 0, 0,
- create_Dest_Imm8_X1, get_Dest_Imm8_X1
- }
-};
-
-
-
-
-/* Given a set of bundle bits and the lookup FSM for a specific pipe,
- * returns which instruction the bundle contains in that pipe.
- */
-static const struct tilegx_opcode *
-find_opcode(tilegx_bundle_bits bits, const unsigned short *table)
-{
- int index = 0;
-
- while (1)
- {
- unsigned short bitspec = table[index];
- unsigned int bitfield =
- ((unsigned int)(bits >> (bitspec & 63))) & (bitspec >> 6);
-
- unsigned short next = table[index + 1 + bitfield];
- if (next <= TILEGX_OPC_NONE)
- return &tilegx_opcodes[next];
-
- index = next - TILEGX_OPC_NONE;
- }
-}
-
-
-int
-parse_insn_tilegx(tilegx_bundle_bits bits,
- unsigned long long pc,
- struct tilegx_decoded_instruction
- decoded[TILEGX_MAX_INSTRUCTIONS_PER_BUNDLE])
-{
- int num_instructions = 0;
- int pipe;
-
- int min_pipe, max_pipe;
- if ((bits & TILEGX_BUNDLE_MODE_MASK) == 0)
- {
- min_pipe = TILEGX_PIPELINE_X0;
- max_pipe = TILEGX_PIPELINE_X1;
- }
- else
- {
- min_pipe = TILEGX_PIPELINE_Y0;
- max_pipe = TILEGX_PIPELINE_Y2;
- }
-
- /* For each pipe, find an instruction that fits. */
- for (pipe = min_pipe; pipe <= max_pipe; pipe++)
- {
- const struct tilegx_opcode *opc;
- struct tilegx_decoded_instruction *d;
- int i;
-
- d = &decoded[num_instructions++];
- opc = find_opcode (bits, tilegx_bundle_decoder_fsms[pipe]);
- d->opcode = opc;
-
- /* Decode each operand, sign extending, etc. as appropriate. */
- for (i = 0; i < opc->num_operands; i++)
- {
- const struct tilegx_operand *op =
- &tilegx_operands[opc->operands[pipe][i]];
- int raw_opval = op->extract (bits);
- long long opval;
-
- if (op->is_signed)
- {
- /* Sign-extend the operand. */
- int shift = (int)((sizeof(int) * 8) - op->num_bits);
- raw_opval = (raw_opval << shift) >> shift;
- }
-
- /* Adjust PC-relative scaled branch offsets. */
- if (op->type == TILEGX_OP_TYPE_ADDRESS)
- opval = (raw_opval * TILEGX_BUNDLE_SIZE_IN_BYTES) + pc;
- else
- opval = raw_opval;
-
- /* Record the final value. */
- d->operands[i] = op;
- d->operand_values[i] = opval;
- }
- }
-
- return num_instructions;
-}
diff --git a/arch/tile/kernel/time.c b/arch/tile/kernel/time.c
deleted file mode 100644
index f95d65f3162b..000000000000
--- a/arch/tile/kernel/time.c
+++ /dev/null
@@ -1,306 +0,0 @@
-/*
- * Copyright 2010 Tilera Corporation. All Rights Reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation, version 2.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
- * NON INFRINGEMENT. See the GNU General Public License for
- * more details.
- *
- * Support the cycle counter clocksource and tile timer clock event device.
- */
-
-#include <linux/time.h>
-#include <linux/timex.h>
-#include <linux/clocksource.h>
-#include <linux/clockchips.h>
-#include <linux/hardirq.h>
-#include <linux/sched.h>
-#include <linux/sched/clock.h>
-#include <linux/smp.h>
-#include <linux/delay.h>
-#include <linux/module.h>
-#include <linux/timekeeper_internal.h>
-#include <asm/irq_regs.h>
-#include <asm/traps.h>
-#include <asm/vdso.h>
-#include <hv/hypervisor.h>
-#include <arch/interrupts.h>
-#include <arch/spr_def.h>
-
-
-/*
- * Define the cycle counter clock source.
- */
-
-/* How many cycles per second we are running at. */
-static cycles_t cycles_per_sec __ro_after_init;
-
-cycles_t get_clock_rate(void)
-{
- return cycles_per_sec;
-}
-
-#if CHIP_HAS_SPLIT_CYCLE()
-cycles_t get_cycles(void)
-{
- unsigned int high = __insn_mfspr(SPR_CYCLE_HIGH);
- unsigned int low = __insn_mfspr(SPR_CYCLE_LOW);
- unsigned int high2 = __insn_mfspr(SPR_CYCLE_HIGH);
-
- while (unlikely(high != high2)) {
- low = __insn_mfspr(SPR_CYCLE_LOW);
- high = high2;
- high2 = __insn_mfspr(SPR_CYCLE_HIGH);
- }
-
- return (((cycles_t)high) << 32) | low;
-}
-EXPORT_SYMBOL(get_cycles);
-#endif
-
-/*
- * We use a relatively small shift value so that sched_clock()
- * won't wrap around very often.
- */
-#define SCHED_CLOCK_SHIFT 10
-
-static unsigned long sched_clock_mult __ro_after_init;
-
-static cycles_t clocksource_get_cycles(struct clocksource *cs)
-{
- return get_cycles();
-}
-
-static struct clocksource cycle_counter_cs = {
- .name = "cycle counter",
- .rating = 300,
- .read = clocksource_get_cycles,
- .mask = CLOCKSOURCE_MASK(64),
- .flags = CLOCK_SOURCE_IS_CONTINUOUS,
-};
-
-/*
- * Called very early from setup_arch() to set cycles_per_sec.
- * We initialize it early so we can use it to set up loops_per_jiffy.
- */
-void __init setup_clock(void)
-{
- cycles_per_sec = hv_sysconf(HV_SYSCONF_CPU_SPEED);
- sched_clock_mult =
- clocksource_hz2mult(cycles_per_sec, SCHED_CLOCK_SHIFT);
-}
-
-void __init calibrate_delay(void)
-{
- loops_per_jiffy = get_clock_rate() / HZ;
- pr_info("Clock rate yields %lu.%02lu BogoMIPS (lpj=%lu)\n",
- loops_per_jiffy / (500000 / HZ),
- (loops_per_jiffy / (5000 / HZ)) % 100, loops_per_jiffy);
-}
-
-/* Called fairly late in init/main.c, but before we go smp. */
-void __init time_init(void)
-{
- /* Initialize and register the clock source. */
- clocksource_register_hz(&cycle_counter_cs, cycles_per_sec);
-
- /* Start up the tile-timer interrupt source on the boot cpu. */
- setup_tile_timer();
-}
-
-/*
- * Define the tile timer clock event device. The timer is driven by
- * the TILE_TIMER_CONTROL register, which consists of a 31-bit down
- * counter, plus bit 31, which signifies that the counter has wrapped
- * from zero to (2**31) - 1. The INT_TILE_TIMER interrupt will be
- * raised as long as bit 31 is set.
- *
- * The TILE_MINSEC value represents the largest range of real-time
- * we can possibly cover with the timer, based on MAX_TICK combined
- * with the slowest reasonable clock rate we might run at.
- */
-
-#define MAX_TICK 0x7fffffff /* we have 31 bits of countdown timer */
-#define TILE_MINSEC 5 /* timer covers no more than 5 seconds */
-
-static int tile_timer_set_next_event(unsigned long ticks,
- struct clock_event_device *evt)
-{
- BUG_ON(ticks > MAX_TICK);
- __insn_mtspr(SPR_TILE_TIMER_CONTROL, ticks);
- arch_local_irq_unmask_now(INT_TILE_TIMER);
- return 0;
-}
-
-/*
- * Whenever anyone tries to change modes, we just mask interrupts
- * and wait for the next event to get set.
- */
-static int tile_timer_shutdown(struct clock_event_device *evt)
-{
- arch_local_irq_mask_now(INT_TILE_TIMER);
- return 0;
-}
-
-/*
- * Set min_delta_ns to 1 microsecond, since it takes about
- * that long to fire the interrupt.
- */
-static DEFINE_PER_CPU(struct clock_event_device, tile_timer) = {
- .name = "tile timer",
- .features = CLOCK_EVT_FEAT_ONESHOT,
- .min_delta_ns = 1000,
- .min_delta_ticks = 1,
- .max_delta_ticks = MAX_TICK,
- .rating = 100,
- .irq = -1,
- .set_next_event = tile_timer_set_next_event,
- .set_state_shutdown = tile_timer_shutdown,
- .set_state_oneshot = tile_timer_shutdown,
- .set_state_oneshot_stopped = tile_timer_shutdown,
- .tick_resume = tile_timer_shutdown,
-};
-
-void setup_tile_timer(void)
-{
- struct clock_event_device *evt = this_cpu_ptr(&tile_timer);
-
- /* Fill in fields that are speed-specific. */
- clockevents_calc_mult_shift(evt, cycles_per_sec, TILE_MINSEC);
- evt->max_delta_ns = clockevent_delta2ns(MAX_TICK, evt);
-
- /* Mark as being for this cpu only. */
- evt->cpumask = cpumask_of(smp_processor_id());
-
- /* Start out with timer not firing. */
- arch_local_irq_mask_now(INT_TILE_TIMER);
-
- /* Register tile timer. */
- clockevents_register_device(evt);
-}
-
-/* Called from the interrupt vector. */
-void do_timer_interrupt(struct pt_regs *regs, int fault_num)
-{
- struct pt_regs *old_regs = set_irq_regs(regs);
- struct clock_event_device *evt = this_cpu_ptr(&tile_timer);
-
- /*
- * Mask the timer interrupt here, since we are a oneshot timer
- * and there are now by definition no events pending.
- */
- arch_local_irq_mask(INT_TILE_TIMER);
-
- /* Track time spent here in an interrupt context */
- irq_enter();
-
- /* Track interrupt count. */
- __this_cpu_inc(irq_stat.irq_timer_count);
-
- /* Call the generic timer handler */
- evt->event_handler(evt);
-
- /*
- * Track time spent against the current process again and
- * process any softirqs if they are waiting.
- */
- irq_exit();
-
- set_irq_regs(old_regs);
-}
-
-/*
- * Scheduler clock - returns current time in nanosec units.
- * Note that with LOCKDEP, this is called during lockdep_init(), and
- * we will claim that sched_clock() is zero for a little while, until
- * we run setup_clock(), above.
- */
-unsigned long long sched_clock(void)
-{
- return mult_frac(get_cycles(),
- sched_clock_mult, 1ULL << SCHED_CLOCK_SHIFT);
-}
-
-int setup_profiling_timer(unsigned int multiplier)
-{
- return -EINVAL;
-}
-
-/*
- * Use the tile timer to convert nsecs to core clock cycles, relying
- * on it having the same frequency as SPR_CYCLE.
- */
-cycles_t ns2cycles(unsigned long nsecs)
-{
- /*
- * We do not have to disable preemption here as each core has the same
- * clock frequency.
- */
- struct clock_event_device *dev = raw_cpu_ptr(&tile_timer);
-
- /*
- * as in clocksource.h and x86's timer.h, we split the calculation
- * into 2 parts to avoid unecessary overflow of the intermediate
- * value. This will not lead to any loss of precision.
- */
- u64 quot = (u64)nsecs >> dev->shift;
- u64 rem = (u64)nsecs & ((1ULL << dev->shift) - 1);
- return quot * dev->mult + ((rem * dev->mult) >> dev->shift);
-}
-
-void update_vsyscall_tz(void)
-{
- write_seqcount_begin(&vdso_data->tz_seq);
- vdso_data->tz_minuteswest = sys_tz.tz_minuteswest;
- vdso_data->tz_dsttime = sys_tz.tz_dsttime;
- write_seqcount_end(&vdso_data->tz_seq);
-}
-
-void update_vsyscall(struct timekeeper *tk)
-{
- if (tk->tkr_mono.clock != &cycle_counter_cs)
- return;
-
- write_seqcount_begin(&vdso_data->tb_seq);
-
- vdso_data->cycle_last = tk->tkr_mono.cycle_last;
- vdso_data->mask = tk->tkr_mono.mask;
- vdso_data->mult = tk->tkr_mono.mult;
- vdso_data->shift = tk->tkr_mono.shift;
-
- vdso_data->wall_time_sec = tk->xtime_sec;
- vdso_data->wall_time_snsec = tk->tkr_mono.xtime_nsec;
-
- vdso_data->monotonic_time_sec = tk->xtime_sec
- + tk->wall_to_monotonic.tv_sec;
- vdso_data->monotonic_time_snsec = tk->tkr_mono.xtime_nsec
- + ((u64)tk->wall_to_monotonic.tv_nsec
- << tk->tkr_mono.shift);
- while (vdso_data->monotonic_time_snsec >=
- (((u64)NSEC_PER_SEC) << tk->tkr_mono.shift)) {
- vdso_data->monotonic_time_snsec -=
- ((u64)NSEC_PER_SEC) << tk->tkr_mono.shift;
- vdso_data->monotonic_time_sec++;
- }
-
- vdso_data->wall_time_coarse_sec = tk->xtime_sec;
- vdso_data->wall_time_coarse_nsec = (long)(tk->tkr_mono.xtime_nsec >>
- tk->tkr_mono.shift);
-
- vdso_data->monotonic_time_coarse_sec =
- vdso_data->wall_time_coarse_sec + tk->wall_to_monotonic.tv_sec;
- vdso_data->monotonic_time_coarse_nsec =
- vdso_data->wall_time_coarse_nsec + tk->wall_to_monotonic.tv_nsec;
-
- while (vdso_data->monotonic_time_coarse_nsec >= NSEC_PER_SEC) {
- vdso_data->monotonic_time_coarse_nsec -= NSEC_PER_SEC;
- vdso_data->monotonic_time_coarse_sec++;
- }
-
- write_seqcount_end(&vdso_data->tb_seq);
-}
diff --git a/arch/tile/kernel/tlb.c b/arch/tile/kernel/tlb.c
deleted file mode 100644
index f23b53515671..000000000000
--- a/arch/tile/kernel/tlb.c
+++ /dev/null
@@ -1,104 +0,0 @@
-/*
- * Copyright 2010 Tilera Corporation. All Rights Reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation, version 2.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
- * NON INFRINGEMENT. See the GNU General Public License for
- * more details.
- *
- */
-
-#include <linux/cpumask.h>
-#include <linux/module.h>
-#include <linux/hugetlb.h>
-#include <asm/tlbflush.h>
-#include <asm/homecache.h>
-#include <hv/hypervisor.h>
-
-/* From tlbflush.h */
-DEFINE_PER_CPU(int, current_asid);
-int min_asid, max_asid;
-
-/*
- * Note that we flush the L1I (for VM_EXEC pages) as well as the TLB
- * so that when we are unmapping an executable page, we also flush it.
- * Combined with flushing the L1I at context switch time, this means
- * we don't have to do any other icache flushes.
- */
-
-void flush_tlb_mm(struct mm_struct *mm)
-{
- HV_Remote_ASID asids[NR_CPUS];
- int i = 0, cpu;
- for_each_cpu(cpu, mm_cpumask(mm)) {
- HV_Remote_ASID *asid = &asids[i++];
- asid->y = cpu / smp_topology.width;
- asid->x = cpu % smp_topology.width;
- asid->asid = per_cpu(current_asid, cpu);
- }
- flush_remote(0, HV_FLUSH_EVICT_L1I, mm_cpumask(mm),
- 0, 0, 0, NULL, asids, i);
-}
-
-void flush_tlb_current_task(void)
-{
- flush_tlb_mm(current->mm);
-}
-
-void flush_tlb_page_mm(struct vm_area_struct *vma, struct mm_struct *mm,
- unsigned long va)
-{
- unsigned long size = vma_kernel_pagesize(vma);
- int cache = (vma->vm_flags & VM_EXEC) ? HV_FLUSH_EVICT_L1I : 0;
- flush_remote(0, cache, mm_cpumask(mm),
- va, size, size, mm_cpumask(mm), NULL, 0);
-}
-
-void flush_tlb_page(struct vm_area_struct *vma, unsigned long va)
-{
- flush_tlb_page_mm(vma, vma->vm_mm, va);
-}
-EXPORT_SYMBOL(flush_tlb_page);
-
-void flush_tlb_range(struct vm_area_struct *vma,
- unsigned long start, unsigned long end)
-{
- unsigned long size = vma_kernel_pagesize(vma);
- struct mm_struct *mm = vma->vm_mm;
- int cache = (vma->vm_flags & VM_EXEC) ? HV_FLUSH_EVICT_L1I : 0;
- flush_remote(0, cache, mm_cpumask(mm), start, end - start, size,
- mm_cpumask(mm), NULL, 0);
-}
-
-void flush_tlb_all(void)
-{
- int i;
- for (i = 0; ; ++i) {
- HV_VirtAddrRange r = hv_inquire_virtual(i);
- if (r.size == 0)
- break;
- flush_remote(0, HV_FLUSH_EVICT_L1I, cpu_online_mask,
- r.start, r.size, PAGE_SIZE, cpu_online_mask,
- NULL, 0);
- flush_remote(0, 0, NULL,
- r.start, r.size, HPAGE_SIZE, cpu_online_mask,
- NULL, 0);
- }
-}
-
-/*
- * Callers need to flush the L1I themselves if necessary, e.g. for
- * kernel module unload. Otherwise we assume callers are not using
- * executable pgprot_t's. Using EVICT_L1I means that dataplane cpus
- * will get an unnecessary interrupt otherwise.
- */
-void flush_tlb_kernel_range(unsigned long start, unsigned long end)
-{
- flush_remote(0, 0, NULL,
- start, end - start, PAGE_SIZE, cpu_online_mask, NULL, 0);
-}
diff --git a/arch/tile/kernel/traps.c b/arch/tile/kernel/traps.c
deleted file mode 100644
index 83a7186198d7..000000000000
--- a/arch/tile/kernel/traps.c
+++ /dev/null
@@ -1,421 +0,0 @@
-/*
- * Copyright 2010 Tilera Corporation. All Rights Reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation, version 2.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
- * NON INFRINGEMENT. See the GNU General Public License for
- * more details.
- */
-
-#include <linux/sched.h>
-#include <linux/sched/debug.h>
-#include <linux/kernel.h>
-#include <linux/kprobes.h>
-#include <linux/kdebug.h>
-#include <linux/module.h>
-#include <linux/reboot.h>
-#include <linux/uaccess.h>
-#include <linux/ptrace.h>
-#include <linux/hardirq.h>
-#include <linux/nmi.h>
-#include <asm/stack.h>
-#include <asm/traps.h>
-#include <asm/setup.h>
-
-#include <arch/interrupts.h>
-#include <arch/spr_def.h>
-#include <arch/opcode.h>
-
-void __init trap_init(void)
-{
- /* Nothing needed here since we link code at .intrpt */
-}
-
-int unaligned_fixup = 1;
-
-static int __init setup_unaligned_fixup(char *str)
-{
- /*
- * Say "=-1" to completely disable it. If you just do "=0", we
- * will still parse the instruction, then fire a SIGBUS with
- * the correct address from inside the single_step code.
- */
- if (kstrtoint(str, 0, &unaligned_fixup) != 0)
- return 0;
-
- pr_info("Fixups for unaligned data accesses are %s\n",
- unaligned_fixup >= 0 ?
- (unaligned_fixup ? "enabled" : "disabled") :
- "completely disabled");
- return 1;
-}
-__setup("unaligned_fixup=", setup_unaligned_fixup);
-
-#if CHIP_HAS_TILE_DMA()
-
-static int dma_disabled;
-
-static int __init nodma(char *str)
-{
- pr_info("User-space DMA is disabled\n");
- dma_disabled = 1;
- return 1;
-}
-__setup("nodma", nodma);
-
-/* How to decode SPR_GPV_REASON */
-#define IRET_ERROR (1U << 31)
-#define MT_ERROR (1U << 30)
-#define MF_ERROR (1U << 29)
-#define SPR_INDEX ((1U << 15) - 1)
-#define SPR_MPL_SHIFT 9 /* starting bit position for MPL encoded in SPR */
-
-/*
- * See if this GPV is just to notify the kernel of SPR use and we can
- * retry the user instruction after adjusting some MPLs suitably.
- */
-static int retry_gpv(unsigned int gpv_reason)
-{
- int mpl;
-
- if (gpv_reason & IRET_ERROR)
- return 0;
-
- BUG_ON((gpv_reason & (MT_ERROR|MF_ERROR)) == 0);
- mpl = (gpv_reason & SPR_INDEX) >> SPR_MPL_SHIFT;
- if (mpl == INT_DMA_NOTIFY && !dma_disabled) {
- /* User is turning on DMA. Allow it and retry. */
- printk(KERN_DEBUG "Process %d/%s is now enabled for DMA\n",
- current->pid, current->comm);
- BUG_ON(current->thread.tile_dma_state.enabled);
- current->thread.tile_dma_state.enabled = 1;
- grant_dma_mpls();
- return 1;
- }
-
- return 0;
-}
-
-#endif /* CHIP_HAS_TILE_DMA() */
-
-extern tile_bundle_bits bpt_code;
-
-asm(".pushsection .rodata.bpt_code,\"a\";"
- ".align 8;"
- "bpt_code: bpt;"
- ".size bpt_code,.-bpt_code;"
- ".popsection");
-
-static int special_ill(tile_bundle_bits bundle, int *sigp, int *codep)
-{
- int sig, code, maxcode;
-
- if (bundle == bpt_code) {
- *sigp = SIGTRAP;
- *codep = TRAP_BRKPT;
- return 1;
- }
-
- /* If it's a "raise" bundle, then "ill" must be in pipe X1. */
-#ifdef __tilegx__
- if ((bundle & TILEGX_BUNDLE_MODE_MASK) != 0)
- return 0;
- if (get_Opcode_X1(bundle) != RRR_0_OPCODE_X1)
- return 0;
- if (get_RRROpcodeExtension_X1(bundle) != UNARY_RRR_0_OPCODE_X1)
- return 0;
- if (get_UnaryOpcodeExtension_X1(bundle) != ILL_UNARY_OPCODE_X1)
- return 0;
-#else
- if (bundle & TILEPRO_BUNDLE_Y_ENCODING_MASK)
- return 0;
- if (get_Opcode_X1(bundle) != SHUN_0_OPCODE_X1)
- return 0;
- if (get_UnShOpcodeExtension_X1(bundle) != UN_0_SHUN_0_OPCODE_X1)
- return 0;
- if (get_UnOpcodeExtension_X1(bundle) != ILL_UN_0_SHUN_0_OPCODE_X1)
- return 0;
-#endif
-
- /* Check that the magic distinguishers are set to mean "raise". */
- if (get_Dest_X1(bundle) != 29 || get_SrcA_X1(bundle) != 37)
- return 0;
-
- /* There must be an "addli zero, zero, VAL" in X0. */
- if (get_Opcode_X0(bundle) != ADDLI_OPCODE_X0)
- return 0;
- if (get_Dest_X0(bundle) != TREG_ZERO)
- return 0;
- if (get_SrcA_X0(bundle) != TREG_ZERO)
- return 0;
-
- /*
- * Validate the proposed signal number and si_code value.
- * Note that we embed these in the static instruction itself
- * so that we perturb the register state as little as possible
- * at the time of the actual fault; it's unlikely you'd ever
- * need to dynamically choose which kind of fault to raise
- * from user space.
- */
- sig = get_Imm16_X0(bundle) & 0x3f;
- switch (sig) {
- case SIGILL:
- maxcode = NSIGILL;
- break;
- case SIGFPE:
- maxcode = NSIGFPE;
- break;
- case SIGSEGV:
- maxcode = NSIGSEGV;
- break;
- case SIGBUS:
- maxcode = NSIGBUS;
- break;
- case SIGTRAP:
- maxcode = NSIGTRAP;
- break;
- default:
- return 0;
- }
- code = (get_Imm16_X0(bundle) >> 6) & 0xf;
- if (code <= 0 || code > maxcode)
- return 0;
-
- /* Make it the requested signal. */
- *sigp = sig;
- *codep = code;
- return 1;
-}
-
-static const char *const int_name[] = {
- [INT_MEM_ERROR] = "Memory error",
- [INT_ILL] = "Illegal instruction",
- [INT_GPV] = "General protection violation",
- [INT_UDN_ACCESS] = "UDN access",
- [INT_IDN_ACCESS] = "IDN access",
-#if CHIP_HAS_SN()
- [INT_SN_ACCESS] = "SN access",
-#endif
- [INT_SWINT_3] = "Software interrupt 3",
- [INT_SWINT_2] = "Software interrupt 2",
- [INT_SWINT_0] = "Software interrupt 0",
- [INT_UNALIGN_DATA] = "Unaligned data",
- [INT_DOUBLE_FAULT] = "Double fault",
-#ifdef __tilegx__
- [INT_ILL_TRANS] = "Illegal virtual address",
-#endif
-};
-
-static int do_bpt(struct pt_regs *regs)
-{
- unsigned long bundle, bcode, bpt;
-
- bundle = *(unsigned long *)instruction_pointer(regs);
-
- /*
- * bpt shoule be { bpt; nop }, which is 0x286a44ae51485000ULL.
- * we encode the unused least significant bits for other purpose.
- */
- bpt = bundle & ~((1ULL << 12) - 1);
- if (bpt != TILE_BPT_BUNDLE)
- return 0;
-
- bcode = bundle & ((1ULL << 12) - 1);
- /*
- * notify the kprobe handlers, if instruction is likely to
- * pertain to them.
- */
- switch (bcode) {
- /* breakpoint_insn */
- case 0:
- notify_die(DIE_BREAK, "debug", regs, bundle,
- INT_ILL, SIGTRAP);
- break;
- /* compiled_bpt */
- case DIE_COMPILED_BPT:
- notify_die(DIE_COMPILED_BPT, "debug", regs, bundle,
- INT_ILL, SIGTRAP);
- break;
- /* breakpoint2_insn */
- case DIE_SSTEPBP:
- notify_die(DIE_SSTEPBP, "single_step", regs, bundle,
- INT_ILL, SIGTRAP);
- break;
- default:
- return 0;
- }
-
- return 1;
-}
-
-void __kprobes do_trap(struct pt_regs *regs, int fault_num,
- unsigned long reason)
-{
- siginfo_t info;
- int signo, code;
- unsigned long address = 0;
- tile_bundle_bits instr;
- int is_kernel = !user_mode(regs);
-
- clear_siginfo(&info);
-
- /* Handle breakpoints, etc. */
- if (is_kernel && fault_num == INT_ILL && do_bpt(regs))
- return;
-
- /* Re-enable interrupts, if they were previously enabled. */
- if (!(regs->flags & PT_FLAGS_DISABLE_IRQ))
- local_irq_enable();
-
- /*
- * If it hits in kernel mode and we can't fix it up, just exit the
- * current process and hope for the best.
- */
- if (is_kernel) {
- const char *name;
- char buf[100];
- if (fixup_exception(regs)) /* ILL_TRANS or UNALIGN_DATA */
- return;
- if (fault_num >= 0 &&
- fault_num < ARRAY_SIZE(int_name) &&
- int_name[fault_num] != NULL)
- name = int_name[fault_num];
- else
- name = "Unknown interrupt";
- if (fault_num == INT_GPV)
- snprintf(buf, sizeof(buf), "; GPV_REASON %#lx", reason);
-#ifdef __tilegx__
- else if (fault_num == INT_ILL_TRANS)
- snprintf(buf, sizeof(buf), "; address %#lx", reason);
-#endif
- else
- buf[0] = '\0';
- pr_alert("Kernel took bad trap %d (%s) at PC %#lx%s\n",
- fault_num, name, regs->pc, buf);
- show_regs(regs);
- do_exit(SIGKILL); /* FIXME: implement i386 die() */
- }
-
- switch (fault_num) {
- case INT_MEM_ERROR:
- signo = SIGBUS;
- code = BUS_OBJERR;
- break;
- case INT_ILL:
- if (copy_from_user(&instr, (void __user *)regs->pc,
- sizeof(instr))) {
- pr_err("Unreadable instruction for INT_ILL: %#lx\n",
- regs->pc);
- do_exit(SIGKILL);
- }
- if (!special_ill(instr, &signo, &code)) {
- signo = SIGILL;
- code = ILL_ILLOPC;
- }
- address = regs->pc;
- break;
- case INT_GPV:
-#if CHIP_HAS_TILE_DMA()
- if (retry_gpv(reason))
- return;
-#endif
- /*FALLTHROUGH*/
- case INT_UDN_ACCESS:
- case INT_IDN_ACCESS:
-#if CHIP_HAS_SN()
- case INT_SN_ACCESS:
-#endif
- signo = SIGILL;
- code = ILL_PRVREG;
- address = regs->pc;
- break;
- case INT_SWINT_3:
- case INT_SWINT_2:
- case INT_SWINT_0:
- signo = SIGILL;
- code = ILL_ILLTRP;
- address = regs->pc;
- break;
- case INT_UNALIGN_DATA:
-#ifndef __tilegx__ /* Emulated support for single step debugging */
- if (unaligned_fixup >= 0) {
- struct single_step_state *state =
- current_thread_info()->step_state;
- if (!state ||
- (void __user *)(regs->pc) != state->buffer) {
- single_step_once(regs);
- return;
- }
- }
-#endif
- signo = SIGBUS;
- code = BUS_ADRALN;
- address = 0;
- break;
- case INT_DOUBLE_FAULT:
- /*
- * For double fault, "reason" is actually passed as
- * SYSTEM_SAVE_K_2, the hypervisor's double-fault info, so
- * we can provide the original fault number rather than
- * the uninteresting "INT_DOUBLE_FAULT" so the user can
- * learn what actually struck while PL0 ICS was set.
- */
- fault_num = reason;
- signo = SIGILL;
- code = ILL_DBLFLT;
- address = regs->pc;
- break;
-#ifdef __tilegx__
- case INT_ILL_TRANS: {
- /* Avoid a hardware erratum with the return address stack. */
- fill_ra_stack();
-
- signo = SIGSEGV;
- address = reason;
- code = SEGV_MAPERR;
- break;
- }
-#endif
- default:
- panic("Unexpected do_trap interrupt number %d", fault_num);
- }
-
- info.si_signo = signo;
- info.si_code = code;
- info.si_addr = (void __user *)address;
- if (signo == SIGILL)
- info.si_trapno = fault_num;
- if (signo != SIGTRAP)
- trace_unhandled_signal("trap", regs, address, signo);
- force_sig_info(signo, &info, current);
-}
-
-void do_nmi(struct pt_regs *regs, int fault_num, unsigned long reason)
-{
- nmi_enter();
- switch (reason) {
-#ifdef arch_trigger_cpumask_backtrace
- case TILE_NMI_DUMP_STACK:
- nmi_cpu_backtrace(regs);
- break;
-#endif
- default:
- panic("Unexpected do_nmi type %ld", reason);
- }
- nmi_exit();
-}
-
-/* Deprecated function currently only used here. */
-extern void _dump_stack(int dummy, ulong pc, ulong lr, ulong sp, ulong r52);
-
-void kernel_double_fault(int dummy, ulong pc, ulong lr, ulong sp, ulong r52)
-{
- _dump_stack(dummy, pc, lr, sp, r52);
- pr_emerg("Double fault: exiting\n");
- machine_halt();
-}
diff --git a/arch/tile/kernel/unaligned.c b/arch/tile/kernel/unaligned.c
deleted file mode 100644
index 77a0b6b6a2a1..000000000000
--- a/arch/tile/kernel/unaligned.c
+++ /dev/null
@@ -1,1603 +0,0 @@
-/*
- * Copyright 2013 Tilera Corporation. All Rights Reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation, version 2.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
- * NON INFRINGEMENT. See the GNU General Public License for
- * more details.
- *
- * A code-rewriter that handles unaligned exception.
- */
-
-#include <linux/smp.h>
-#include <linux/ptrace.h>
-#include <linux/slab.h>
-#include <linux/sched/debug.h>
-#include <linux/sched/task.h>
-#include <linux/thread_info.h>
-#include <linux/uaccess.h>
-#include <linux/mman.h>
-#include <linux/types.h>
-#include <linux/err.h>
-#include <linux/extable.h>
-#include <linux/compat.h>
-#include <linux/prctl.h>
-#include <asm/cacheflush.h>
-#include <asm/traps.h>
-#include <linux/uaccess.h>
-#include <asm/unaligned.h>
-#include <arch/abi.h>
-#include <arch/spr_def.h>
-#include <arch/opcode.h>
-
-
-/*
- * This file handles unaligned exception for tile-Gx. The tilepro's unaligned
- * exception is supported out of single_step.c
- */
-
-int unaligned_printk;
-
-static int __init setup_unaligned_printk(char *str)
-{
- long val;
- if (kstrtol(str, 0, &val) != 0)
- return 0;
- unaligned_printk = val;
- pr_info("Printk for each unaligned data accesses is %s\n",
- unaligned_printk ? "enabled" : "disabled");
- return 1;
-}
-__setup("unaligned_printk=", setup_unaligned_printk);
-
-unsigned int unaligned_fixup_count;
-
-#ifdef __tilegx__
-
-/*
- * Unalign data jit fixup code fragement. Reserved space is 128 bytes.
- * The 1st 64-bit word saves fault PC address, 2nd word is the fault
- * instruction bundle followed by 14 JIT bundles.
- */
-
-struct unaligned_jit_fragment {
- unsigned long pc;
- tilegx_bundle_bits bundle;
- tilegx_bundle_bits insn[14];
-};
-
-/*
- * Check if a nop or fnop at bundle's pipeline X0.
- */
-
-static bool is_bundle_x0_nop(tilegx_bundle_bits bundle)
-{
- return (((get_UnaryOpcodeExtension_X0(bundle) ==
- NOP_UNARY_OPCODE_X0) &&
- (get_RRROpcodeExtension_X0(bundle) ==
- UNARY_RRR_0_OPCODE_X0) &&
- (get_Opcode_X0(bundle) ==
- RRR_0_OPCODE_X0)) ||
- ((get_UnaryOpcodeExtension_X0(bundle) ==
- FNOP_UNARY_OPCODE_X0) &&
- (get_RRROpcodeExtension_X0(bundle) ==
- UNARY_RRR_0_OPCODE_X0) &&
- (get_Opcode_X0(bundle) ==
- RRR_0_OPCODE_X0)));
-}
-
-/*
- * Check if nop or fnop at bundle's pipeline X1.
- */
-
-static bool is_bundle_x1_nop(tilegx_bundle_bits bundle)
-{
- return (((get_UnaryOpcodeExtension_X1(bundle) ==
- NOP_UNARY_OPCODE_X1) &&
- (get_RRROpcodeExtension_X1(bundle) ==
- UNARY_RRR_0_OPCODE_X1) &&
- (get_Opcode_X1(bundle) ==
- RRR_0_OPCODE_X1)) ||
- ((get_UnaryOpcodeExtension_X1(bundle) ==
- FNOP_UNARY_OPCODE_X1) &&
- (get_RRROpcodeExtension_X1(bundle) ==
- UNARY_RRR_0_OPCODE_X1) &&
- (get_Opcode_X1(bundle) ==
- RRR_0_OPCODE_X1)));
-}
-
-/*
- * Check if nop or fnop at bundle's Y0 pipeline.
- */
-
-static bool is_bundle_y0_nop(tilegx_bundle_bits bundle)
-{
- return (((get_UnaryOpcodeExtension_Y0(bundle) ==
- NOP_UNARY_OPCODE_Y0) &&
- (get_RRROpcodeExtension_Y0(bundle) ==
- UNARY_RRR_1_OPCODE_Y0) &&
- (get_Opcode_Y0(bundle) ==
- RRR_1_OPCODE_Y0)) ||
- ((get_UnaryOpcodeExtension_Y0(bundle) ==
- FNOP_UNARY_OPCODE_Y0) &&
- (get_RRROpcodeExtension_Y0(bundle) ==
- UNARY_RRR_1_OPCODE_Y0) &&
- (get_Opcode_Y0(bundle) ==
- RRR_1_OPCODE_Y0)));
-}
-
-/*
- * Check if nop or fnop at bundle's pipeline Y1.
- */
-
-static bool is_bundle_y1_nop(tilegx_bundle_bits bundle)
-{
- return (((get_UnaryOpcodeExtension_Y1(bundle) ==
- NOP_UNARY_OPCODE_Y1) &&
- (get_RRROpcodeExtension_Y1(bundle) ==
- UNARY_RRR_1_OPCODE_Y1) &&
- (get_Opcode_Y1(bundle) ==
- RRR_1_OPCODE_Y1)) ||
- ((get_UnaryOpcodeExtension_Y1(bundle) ==
- FNOP_UNARY_OPCODE_Y1) &&
- (get_RRROpcodeExtension_Y1(bundle) ==
- UNARY_RRR_1_OPCODE_Y1) &&
- (get_Opcode_Y1(bundle) ==
- RRR_1_OPCODE_Y1)));
-}
-
-/*
- * Test if a bundle's y0 and y1 pipelines are both nop or fnop.
- */
-
-static bool is_y0_y1_nop(tilegx_bundle_bits bundle)
-{
- return is_bundle_y0_nop(bundle) && is_bundle_y1_nop(bundle);
-}
-
-/*
- * Test if a bundle's x0 and x1 pipelines are both nop or fnop.
- */
-
-static bool is_x0_x1_nop(tilegx_bundle_bits bundle)
-{
- return is_bundle_x0_nop(bundle) && is_bundle_x1_nop(bundle);
-}
-
-/*
- * Find the destination, source registers of fault unalign access instruction
- * at X1 or Y2. Also, allocate up to 3 scratch registers clob1, clob2 and
- * clob3, which are guaranteed different from any register used in the fault
- * bundle. r_alias is used to return if the other instructions other than the
- * unalign load/store shares same register with ra, rb and rd.
- */
-
-static void find_regs(tilegx_bundle_bits bundle, uint64_t *rd, uint64_t *ra,
- uint64_t *rb, uint64_t *clob1, uint64_t *clob2,
- uint64_t *clob3, bool *r_alias)
-{
- int i;
- uint64_t reg;
- uint64_t reg_map = 0, alias_reg_map = 0, map;
- bool alias = false;
-
- /*
- * Parse fault bundle, find potential used registers and mark
- * corresponding bits in reg_map and alias_map. These 2 bit maps
- * are used to find the scratch registers and determine if there
- * is register alias.
- */
- if (bundle & TILEGX_BUNDLE_MODE_MASK) { /* Y Mode Bundle. */
-
- reg = get_SrcA_Y2(bundle);
- reg_map |= 1ULL << reg;
- *ra = reg;
- reg = get_SrcBDest_Y2(bundle);
- reg_map |= 1ULL << reg;
-
- if (rd) {
- /* Load. */
- *rd = reg;
- alias_reg_map = (1ULL << *rd) | (1ULL << *ra);
- } else {
- /* Store. */
- *rb = reg;
- alias_reg_map = (1ULL << *ra) | (1ULL << *rb);
- }
-
- if (!is_bundle_y1_nop(bundle)) {
- reg = get_SrcA_Y1(bundle);
- reg_map |= (1ULL << reg);
- map = (1ULL << reg);
-
- reg = get_SrcB_Y1(bundle);
- reg_map |= (1ULL << reg);
- map |= (1ULL << reg);
-
- reg = get_Dest_Y1(bundle);
- reg_map |= (1ULL << reg);
- map |= (1ULL << reg);
-
- if (map & alias_reg_map)
- alias = true;
- }
-
- if (!is_bundle_y0_nop(bundle)) {
- reg = get_SrcA_Y0(bundle);
- reg_map |= (1ULL << reg);
- map = (1ULL << reg);
-
- reg = get_SrcB_Y0(bundle);
- reg_map |= (1ULL << reg);
- map |= (1ULL << reg);
-
- reg = get_Dest_Y0(bundle);
- reg_map |= (1ULL << reg);
- map |= (1ULL << reg);
-
- if (map & alias_reg_map)
- alias = true;
- }
- } else { /* X Mode Bundle. */
-
- reg = get_SrcA_X1(bundle);
- reg_map |= (1ULL << reg);
- *ra = reg;
- if (rd) {
- /* Load. */
- reg = get_Dest_X1(bundle);
- reg_map |= (1ULL << reg);
- *rd = reg;
- alias_reg_map = (1ULL << *rd) | (1ULL << *ra);
- } else {
- /* Store. */
- reg = get_SrcB_X1(bundle);
- reg_map |= (1ULL << reg);
- *rb = reg;
- alias_reg_map = (1ULL << *ra) | (1ULL << *rb);
- }
-
- if (!is_bundle_x0_nop(bundle)) {
- reg = get_SrcA_X0(bundle);
- reg_map |= (1ULL << reg);
- map = (1ULL << reg);
-
- reg = get_SrcB_X0(bundle);
- reg_map |= (1ULL << reg);
- map |= (1ULL << reg);
-
- reg = get_Dest_X0(bundle);
- reg_map |= (1ULL << reg);
- map |= (1ULL << reg);
-
- if (map & alias_reg_map)
- alias = true;
- }
- }
-
- /*
- * "alias" indicates if the unalign access registers have collision
- * with others in the same bundle. We jsut simply test all register
- * operands case (RRR), ignored the case with immidate. If a bundle
- * has no register alias, we may do fixup in a simple or fast manner.
- * So if an immidata field happens to hit with a register, we may end
- * up fall back to the generic handling.
- */
-
- *r_alias = alias;
-
- /* Flip bits on reg_map. */
- reg_map ^= -1ULL;
-
- /* Scan reg_map lower 54(TREG_SP) bits to find 3 set bits. */
- for (i = 0; i < TREG_SP; i++) {
- if (reg_map & (0x1ULL << i)) {
- if (*clob1 == -1) {
- *clob1 = i;
- } else if (*clob2 == -1) {
- *clob2 = i;
- } else if (*clob3 == -1) {
- *clob3 = i;
- return;
- }
- }
- }
-}
-
-/*
- * Sanity check for register ra, rb, rd, clob1/2/3. Return true if any of them
- * is unexpected.
- */
-
-static bool check_regs(uint64_t rd, uint64_t ra, uint64_t rb,
- uint64_t clob1, uint64_t clob2, uint64_t clob3)
-{
- bool unexpected = false;
- if ((ra >= 56) && (ra != TREG_ZERO))
- unexpected = true;
-
- if ((clob1 >= 56) || (clob2 >= 56) || (clob3 >= 56))
- unexpected = true;
-
- if (rd != -1) {
- if ((rd >= 56) && (rd != TREG_ZERO))
- unexpected = true;
- } else {
- if ((rb >= 56) && (rb != TREG_ZERO))
- unexpected = true;
- }
- return unexpected;
-}
-
-
-#define GX_INSN_X0_MASK ((1ULL << 31) - 1)
-#define GX_INSN_X1_MASK (((1ULL << 31) - 1) << 31)
-#define GX_INSN_Y0_MASK ((0xFULL << 27) | (0xFFFFFULL))
-#define GX_INSN_Y1_MASK (GX_INSN_Y0_MASK << 31)
-#define GX_INSN_Y2_MASK ((0x7FULL << 51) | (0x7FULL << 20))
-
-#ifdef __LITTLE_ENDIAN
-#define GX_INSN_BSWAP(_bundle_) (_bundle_)
-#else
-#define GX_INSN_BSWAP(_bundle_) swab64(_bundle_)
-#endif /* __LITTLE_ENDIAN */
-
-/*
- * __JIT_CODE(.) creates template bundles in .rodata.unalign_data section.
- * The corresponding static function jix_x#_###(.) generates partial or
- * whole bundle based on the template and given arguments.
- */
-
-#define __JIT_CODE(_X_) \
- asm (".pushsection .rodata.unalign_data, \"a\"\n" \
- _X_"\n" \
- ".popsection\n")
-
-__JIT_CODE("__unalign_jit_x1_mtspr: {mtspr 0, r0}");
-static tilegx_bundle_bits jit_x1_mtspr(int spr, int reg)
-{
- extern tilegx_bundle_bits __unalign_jit_x1_mtspr;
- return (GX_INSN_BSWAP(__unalign_jit_x1_mtspr) & GX_INSN_X1_MASK) |
- create_MT_Imm14_X1(spr) | create_SrcA_X1(reg);
-}
-
-__JIT_CODE("__unalign_jit_x1_mfspr: {mfspr r0, 0}");
-static tilegx_bundle_bits jit_x1_mfspr(int reg, int spr)
-{
- extern tilegx_bundle_bits __unalign_jit_x1_mfspr;
- return (GX_INSN_BSWAP(__unalign_jit_x1_mfspr) & GX_INSN_X1_MASK) |
- create_MF_Imm14_X1(spr) | create_Dest_X1(reg);
-}
-
-__JIT_CODE("__unalign_jit_x0_addi: {addi r0, r0, 0; iret}");
-static tilegx_bundle_bits jit_x0_addi(int rd, int ra, int imm8)
-{
- extern tilegx_bundle_bits __unalign_jit_x0_addi;
- return (GX_INSN_BSWAP(__unalign_jit_x0_addi) & GX_INSN_X0_MASK) |
- create_Dest_X0(rd) | create_SrcA_X0(ra) |
- create_Imm8_X0(imm8);
-}
-
-__JIT_CODE("__unalign_jit_x1_ldna: {ldna r0, r0}");
-static tilegx_bundle_bits jit_x1_ldna(int rd, int ra)
-{
- extern tilegx_bundle_bits __unalign_jit_x1_ldna;
- return (GX_INSN_BSWAP(__unalign_jit_x1_ldna) & GX_INSN_X1_MASK) |
- create_Dest_X1(rd) | create_SrcA_X1(ra);
-}
-
-__JIT_CODE("__unalign_jit_x0_dblalign: {dblalign r0, r0 ,r0}");
-static tilegx_bundle_bits jit_x0_dblalign(int rd, int ra, int rb)
-{
- extern tilegx_bundle_bits __unalign_jit_x0_dblalign;
- return (GX_INSN_BSWAP(__unalign_jit_x0_dblalign) & GX_INSN_X0_MASK) |
- create_Dest_X0(rd) | create_SrcA_X0(ra) |
- create_SrcB_X0(rb);
-}
-
-__JIT_CODE("__unalign_jit_x1_iret: {iret}");
-static tilegx_bundle_bits jit_x1_iret(void)
-{
- extern tilegx_bundle_bits __unalign_jit_x1_iret;
- return GX_INSN_BSWAP(__unalign_jit_x1_iret) & GX_INSN_X1_MASK;
-}
-
-__JIT_CODE("__unalign_jit_x01_fnop: {fnop;fnop}");
-static tilegx_bundle_bits jit_x0_fnop(void)
-{
- extern tilegx_bundle_bits __unalign_jit_x01_fnop;
- return GX_INSN_BSWAP(__unalign_jit_x01_fnop) & GX_INSN_X0_MASK;
-}
-
-static tilegx_bundle_bits jit_x1_fnop(void)
-{
- extern tilegx_bundle_bits __unalign_jit_x01_fnop;
- return GX_INSN_BSWAP(__unalign_jit_x01_fnop) & GX_INSN_X1_MASK;
-}
-
-__JIT_CODE("__unalign_jit_y2_dummy: {fnop; fnop; ld zero, sp}");
-static tilegx_bundle_bits jit_y2_dummy(void)
-{
- extern tilegx_bundle_bits __unalign_jit_y2_dummy;
- return GX_INSN_BSWAP(__unalign_jit_y2_dummy) & GX_INSN_Y2_MASK;
-}
-
-static tilegx_bundle_bits jit_y1_fnop(void)
-{
- extern tilegx_bundle_bits __unalign_jit_y2_dummy;
- return GX_INSN_BSWAP(__unalign_jit_y2_dummy) & GX_INSN_Y1_MASK;
-}
-
-__JIT_CODE("__unalign_jit_x1_st1_add: {st1_add r1, r0, 0}");
-static tilegx_bundle_bits jit_x1_st1_add(int ra, int rb, int imm8)
-{
- extern tilegx_bundle_bits __unalign_jit_x1_st1_add;
- return (GX_INSN_BSWAP(__unalign_jit_x1_st1_add) &
- (~create_SrcA_X1(-1)) &
- GX_INSN_X1_MASK) | create_SrcA_X1(ra) |
- create_SrcB_X1(rb) | create_Dest_Imm8_X1(imm8);
-}
-
-__JIT_CODE("__unalign_jit_x1_st: {crc32_8 r1, r0, r0; st r0, r0}");
-static tilegx_bundle_bits jit_x1_st(int ra, int rb)
-{
- extern tilegx_bundle_bits __unalign_jit_x1_st;
- return (GX_INSN_BSWAP(__unalign_jit_x1_st) & GX_INSN_X1_MASK) |
- create_SrcA_X1(ra) | create_SrcB_X1(rb);
-}
-
-__JIT_CODE("__unalign_jit_x1_st_add: {st_add r1, r0, 0}");
-static tilegx_bundle_bits jit_x1_st_add(int ra, int rb, int imm8)
-{
- extern tilegx_bundle_bits __unalign_jit_x1_st_add;
- return (GX_INSN_BSWAP(__unalign_jit_x1_st_add) &
- (~create_SrcA_X1(-1)) &
- GX_INSN_X1_MASK) | create_SrcA_X1(ra) |
- create_SrcB_X1(rb) | create_Dest_Imm8_X1(imm8);
-}
-
-__JIT_CODE("__unalign_jit_x1_ld: {crc32_8 r1, r0, r0; ld r0, r0}");
-static tilegx_bundle_bits jit_x1_ld(int rd, int ra)
-{
- extern tilegx_bundle_bits __unalign_jit_x1_ld;
- return (GX_INSN_BSWAP(__unalign_jit_x1_ld) & GX_INSN_X1_MASK) |
- create_Dest_X1(rd) | create_SrcA_X1(ra);
-}
-
-__JIT_CODE("__unalign_jit_x1_ld_add: {ld_add r1, r0, 0}");
-static tilegx_bundle_bits jit_x1_ld_add(int rd, int ra, int imm8)
-{
- extern tilegx_bundle_bits __unalign_jit_x1_ld_add;
- return (GX_INSN_BSWAP(__unalign_jit_x1_ld_add) &
- (~create_Dest_X1(-1)) &
- GX_INSN_X1_MASK) | create_Dest_X1(rd) |
- create_SrcA_X1(ra) | create_Imm8_X1(imm8);
-}
-
-__JIT_CODE("__unalign_jit_x0_bfexts: {bfexts r0, r0, 0, 0}");
-static tilegx_bundle_bits jit_x0_bfexts(int rd, int ra, int bfs, int bfe)
-{
- extern tilegx_bundle_bits __unalign_jit_x0_bfexts;
- return (GX_INSN_BSWAP(__unalign_jit_x0_bfexts) &
- GX_INSN_X0_MASK) |
- create_Dest_X0(rd) | create_SrcA_X0(ra) |
- create_BFStart_X0(bfs) | create_BFEnd_X0(bfe);
-}
-
-__JIT_CODE("__unalign_jit_x0_bfextu: {bfextu r0, r0, 0, 0}");
-static tilegx_bundle_bits jit_x0_bfextu(int rd, int ra, int bfs, int bfe)
-{
- extern tilegx_bundle_bits __unalign_jit_x0_bfextu;
- return (GX_INSN_BSWAP(__unalign_jit_x0_bfextu) &
- GX_INSN_X0_MASK) |
- create_Dest_X0(rd) | create_SrcA_X0(ra) |
- create_BFStart_X0(bfs) | create_BFEnd_X0(bfe);
-}
-
-__JIT_CODE("__unalign_jit_x1_addi: {bfextu r1, r1, 0, 0; addi r0, r0, 0}");
-static tilegx_bundle_bits jit_x1_addi(int rd, int ra, int imm8)
-{
- extern tilegx_bundle_bits __unalign_jit_x1_addi;
- return (GX_INSN_BSWAP(__unalign_jit_x1_addi) & GX_INSN_X1_MASK) |
- create_Dest_X1(rd) | create_SrcA_X1(ra) |
- create_Imm8_X1(imm8);
-}
-
-__JIT_CODE("__unalign_jit_x0_shrui: {shrui r0, r0, 0; iret}");
-static tilegx_bundle_bits jit_x0_shrui(int rd, int ra, int imm6)
-{
- extern tilegx_bundle_bits __unalign_jit_x0_shrui;
- return (GX_INSN_BSWAP(__unalign_jit_x0_shrui) &
- GX_INSN_X0_MASK) |
- create_Dest_X0(rd) | create_SrcA_X0(ra) |
- create_ShAmt_X0(imm6);
-}
-
-__JIT_CODE("__unalign_jit_x0_rotli: {rotli r0, r0, 0; iret}");
-static tilegx_bundle_bits jit_x0_rotli(int rd, int ra, int imm6)
-{
- extern tilegx_bundle_bits __unalign_jit_x0_rotli;
- return (GX_INSN_BSWAP(__unalign_jit_x0_rotli) &
- GX_INSN_X0_MASK) |
- create_Dest_X0(rd) | create_SrcA_X0(ra) |
- create_ShAmt_X0(imm6);
-}
-
-__JIT_CODE("__unalign_jit_x1_bnezt: {bnezt r0, __unalign_jit_x1_bnezt}");
-static tilegx_bundle_bits jit_x1_bnezt(int ra, int broff)
-{
- extern tilegx_bundle_bits __unalign_jit_x1_bnezt;
- return (GX_INSN_BSWAP(__unalign_jit_x1_bnezt) &
- GX_INSN_X1_MASK) |
- create_SrcA_X1(ra) | create_BrOff_X1(broff);
-}
-
-#undef __JIT_CODE
-
-/*
- * This function generates unalign fixup JIT.
- *
- * We first find unalign load/store instruction's destination, source
- * registers: ra, rb and rd. and 3 scratch registers by calling
- * find_regs(...). 3 scratch clobbers should not alias with any register
- * used in the fault bundle. Then analyze the fault bundle to determine
- * if it's a load or store, operand width, branch or address increment etc.
- * At last generated JIT is copied into JIT code area in user space.
- */
-
-static
-void jit_bundle_gen(struct pt_regs *regs, tilegx_bundle_bits bundle,
- int align_ctl)
-{
- struct thread_info *info = current_thread_info();
- struct unaligned_jit_fragment frag;
- struct unaligned_jit_fragment *jit_code_area;
- tilegx_bundle_bits bundle_2 = 0;
- /* If bundle_2_enable = false, bundle_2 is fnop/nop operation. */
- bool bundle_2_enable = true;
- uint64_t ra = -1, rb = -1, rd = -1, clob1 = -1, clob2 = -1, clob3 = -1;
- /*
- * Indicate if the unalign access
- * instruction's registers hit with
- * others in the same bundle.
- */
- bool alias = false;
- bool load_n_store = true;
- bool load_store_signed = false;
- unsigned int load_store_size = 8;
- bool y1_br = false; /* True, for a branch in same bundle at Y1.*/
- int y1_br_reg = 0;
- /* True for link operation. i.e. jalr or lnk at Y1 */
- bool y1_lr = false;
- int y1_lr_reg = 0;
- bool x1_add = false;/* True, for load/store ADD instruction at X1*/
- int x1_add_imm8 = 0;
- bool unexpected = false;
- int n = 0, k;
-
- jit_code_area =
- (struct unaligned_jit_fragment *)(info->unalign_jit_base);
-
- memset((void *)&frag, 0, sizeof(frag));
-
- /* 0: X mode, Otherwise: Y mode. */
- if (bundle & TILEGX_BUNDLE_MODE_MASK) {
- unsigned int mod, opcode;
-
- if (get_Opcode_Y1(bundle) == RRR_1_OPCODE_Y1 &&
- get_RRROpcodeExtension_Y1(bundle) ==
- UNARY_RRR_1_OPCODE_Y1) {
-
- opcode = get_UnaryOpcodeExtension_Y1(bundle);
-
- /*
- * Test "jalr", "jalrp", "jr", "jrp" instruction at Y1
- * pipeline.
- */
- switch (opcode) {
- case JALR_UNARY_OPCODE_Y1:
- case JALRP_UNARY_OPCODE_Y1:
- y1_lr = true;
- y1_lr_reg = 55; /* Link register. */
- /* FALLTHROUGH */
- case JR_UNARY_OPCODE_Y1:
- case JRP_UNARY_OPCODE_Y1:
- y1_br = true;
- y1_br_reg = get_SrcA_Y1(bundle);
- break;
- case LNK_UNARY_OPCODE_Y1:
- /* "lnk" at Y1 pipeline. */
- y1_lr = true;
- y1_lr_reg = get_Dest_Y1(bundle);
- break;
- }
- }
-
- opcode = get_Opcode_Y2(bundle);
- mod = get_Mode(bundle);
-
- /*
- * bundle_2 is bundle after making Y2 as a dummy operation
- * - ld zero, sp
- */
- bundle_2 = (bundle & (~GX_INSN_Y2_MASK)) | jit_y2_dummy();
-
- /* Make Y1 as fnop if Y1 is a branch or lnk operation. */
- if (y1_br || y1_lr) {
- bundle_2 &= ~(GX_INSN_Y1_MASK);
- bundle_2 |= jit_y1_fnop();
- }
-
- if (is_y0_y1_nop(bundle_2))
- bundle_2_enable = false;
-
- if (mod == MODE_OPCODE_YC2) {
- /* Store. */
- load_n_store = false;
- load_store_size = 1 << opcode;
- load_store_signed = false;
- find_regs(bundle, 0, &ra, &rb, &clob1, &clob2,
- &clob3, &alias);
- if (load_store_size > 8)
- unexpected = true;
- } else {
- /* Load. */
- load_n_store = true;
- if (mod == MODE_OPCODE_YB2) {
- switch (opcode) {
- case LD_OPCODE_Y2:
- load_store_signed = false;
- load_store_size = 8;
- break;
- case LD4S_OPCODE_Y2:
- load_store_signed = true;
- load_store_size = 4;
- break;
- case LD4U_OPCODE_Y2:
- load_store_signed = false;
- load_store_size = 4;
- break;
- default:
- unexpected = true;
- }
- } else if (mod == MODE_OPCODE_YA2) {
- if (opcode == LD2S_OPCODE_Y2) {
- load_store_signed = true;
- load_store_size = 2;
- } else if (opcode == LD2U_OPCODE_Y2) {
- load_store_signed = false;
- load_store_size = 2;
- } else
- unexpected = true;
- } else
- unexpected = true;
- find_regs(bundle, &rd, &ra, &rb, &clob1, &clob2,
- &clob3, &alias);
- }
- } else {
- unsigned int opcode;
-
- /* bundle_2 is bundle after making X1 as "fnop". */
- bundle_2 = (bundle & (~GX_INSN_X1_MASK)) | jit_x1_fnop();
-
- if (is_x0_x1_nop(bundle_2))
- bundle_2_enable = false;
-
- if (get_Opcode_X1(bundle) == RRR_0_OPCODE_X1) {
- opcode = get_UnaryOpcodeExtension_X1(bundle);
-
- if (get_RRROpcodeExtension_X1(bundle) ==
- UNARY_RRR_0_OPCODE_X1) {
- load_n_store = true;
- find_regs(bundle, &rd, &ra, &rb, &clob1,
- &clob2, &clob3, &alias);
-
- switch (opcode) {
- case LD_UNARY_OPCODE_X1:
- load_store_signed = false;
- load_store_size = 8;
- break;
- case LD4S_UNARY_OPCODE_X1:
- load_store_signed = true;
- /* FALLTHROUGH */
- case LD4U_UNARY_OPCODE_X1:
- load_store_size = 4;
- break;
-
- case LD2S_UNARY_OPCODE_X1:
- load_store_signed = true;
- /* FALLTHROUGH */
- case LD2U_UNARY_OPCODE_X1:
- load_store_size = 2;
- break;
- default:
- unexpected = true;
- }
- } else {
- load_n_store = false;
- load_store_signed = false;
- find_regs(bundle, 0, &ra, &rb,
- &clob1, &clob2, &clob3,
- &alias);
-
- opcode = get_RRROpcodeExtension_X1(bundle);
- switch (opcode) {
- case ST_RRR_0_OPCODE_X1:
- load_store_size = 8;
- break;
- case ST4_RRR_0_OPCODE_X1:
- load_store_size = 4;
- break;
- case ST2_RRR_0_OPCODE_X1:
- load_store_size = 2;
- break;
- default:
- unexpected = true;
- }
- }
- } else if (get_Opcode_X1(bundle) == IMM8_OPCODE_X1) {
- load_n_store = true;
- opcode = get_Imm8OpcodeExtension_X1(bundle);
- switch (opcode) {
- case LD_ADD_IMM8_OPCODE_X1:
- load_store_size = 8;
- break;
-
- case LD4S_ADD_IMM8_OPCODE_X1:
- load_store_signed = true;
- /* FALLTHROUGH */
- case LD4U_ADD_IMM8_OPCODE_X1:
- load_store_size = 4;
- break;
-
- case LD2S_ADD_IMM8_OPCODE_X1:
- load_store_signed = true;
- /* FALLTHROUGH */
- case LD2U_ADD_IMM8_OPCODE_X1:
- load_store_size = 2;
- break;
-
- case ST_ADD_IMM8_OPCODE_X1:
- load_n_store = false;
- load_store_size = 8;
- break;
- case ST4_ADD_IMM8_OPCODE_X1:
- load_n_store = false;
- load_store_size = 4;
- break;
- case ST2_ADD_IMM8_OPCODE_X1:
- load_n_store = false;
- load_store_size = 2;
- break;
- default:
- unexpected = true;
- }
-
- if (!unexpected) {
- x1_add = true;
- if (load_n_store)
- x1_add_imm8 = get_Imm8_X1(bundle);
- else
- x1_add_imm8 = get_Dest_Imm8_X1(bundle);
- }
-
- find_regs(bundle, load_n_store ? (&rd) : NULL,
- &ra, &rb, &clob1, &clob2, &clob3, &alias);
- } else
- unexpected = true;
- }
-
- /*
- * Some sanity check for register numbers extracted from fault bundle.
- */
- if (check_regs(rd, ra, rb, clob1, clob2, clob3) == true)
- unexpected = true;
-
- /* Give warning if register ra has an aligned address. */
- if (!unexpected)
- WARN_ON(!((load_store_size - 1) & (regs->regs[ra])));
-
-
- /*
- * Fault came from kernel space, here we only need take care of
- * unaligned "get_user/put_user" macros defined in "uaccess.h".
- * Basically, we will handle bundle like this:
- * {ld/2u/4s rd, ra; movei rx, 0} or {st/2/4 ra, rb; movei rx, 0}
- * (Refer to file "arch/tile/include/asm/uaccess.h" for details).
- * For either load or store, byte-wise operation is performed by calling
- * get_user() or put_user(). If the macro returns non-zero value,
- * set the value to rx, otherwise set zero to rx. Finally make pc point
- * to next bundle and return.
- */
-
- if (EX1_PL(regs->ex1) != USER_PL) {
-
- unsigned long rx = 0;
- unsigned long x = 0, ret = 0;
-
- if (y1_br || y1_lr || x1_add ||
- (load_store_signed !=
- (load_n_store && load_store_size == 4))) {
- /* No branch, link, wrong sign-ext or load/store add. */
- unexpected = true;
- } else if (!unexpected) {
- if (bundle & TILEGX_BUNDLE_MODE_MASK) {
- /*
- * Fault bundle is Y mode.
- * Check if the Y1 and Y0 is the form of
- * { movei rx, 0; nop/fnop }, if yes,
- * find the rx.
- */
-
- if ((get_Opcode_Y1(bundle) == ADDI_OPCODE_Y1)
- && (get_SrcA_Y1(bundle) == TREG_ZERO) &&
- (get_Imm8_Y1(bundle) == 0) &&
- is_bundle_y0_nop(bundle)) {
- rx = get_Dest_Y1(bundle);
- } else if ((get_Opcode_Y0(bundle) ==
- ADDI_OPCODE_Y0) &&
- (get_SrcA_Y0(bundle) == TREG_ZERO) &&
- (get_Imm8_Y0(bundle) == 0) &&
- is_bundle_y1_nop(bundle)) {
- rx = get_Dest_Y0(bundle);
- } else {
- unexpected = true;
- }
- } else {
- /*
- * Fault bundle is X mode.
- * Check if the X0 is 'movei rx, 0',
- * if yes, find the rx.
- */
-
- if ((get_Opcode_X0(bundle) == IMM8_OPCODE_X0)
- && (get_Imm8OpcodeExtension_X0(bundle) ==
- ADDI_IMM8_OPCODE_X0) &&
- (get_SrcA_X0(bundle) == TREG_ZERO) &&
- (get_Imm8_X0(bundle) == 0)) {
- rx = get_Dest_X0(bundle);
- } else {
- unexpected = true;
- }
- }
-
- /* rx should be less than 56. */
- if (!unexpected && (rx >= 56))
- unexpected = true;
- }
-
- if (!search_exception_tables(regs->pc)) {
- /* No fixup in the exception tables for the pc. */
- unexpected = true;
- }
-
- if (unexpected) {
- /* Unexpected unalign kernel fault. */
- struct task_struct *tsk = validate_current();
-
- bust_spinlocks(1);
-
- show_regs(regs);
-
- if (unlikely(tsk->pid < 2)) {
- panic("Kernel unalign fault running %s!",
- tsk->pid ? "init" : "the idle task");
- }
-#ifdef SUPPORT_DIE
- die("Oops", regs);
-#endif
- bust_spinlocks(1);
-
- do_group_exit(SIGKILL);
-
- } else {
- unsigned long i, b = 0;
- unsigned char *ptr =
- (unsigned char *)regs->regs[ra];
- if (load_n_store) {
- /* handle get_user(x, ptr) */
- for (i = 0; i < load_store_size; i++) {
- ret = get_user(b, ptr++);
- if (!ret) {
- /* Success! update x. */
-#ifdef __LITTLE_ENDIAN
- x |= (b << (8 * i));
-#else
- x <<= 8;
- x |= b;
-#endif /* __LITTLE_ENDIAN */
- } else {
- x = 0;
- break;
- }
- }
-
- /* Sign-extend 4-byte loads. */
- if (load_store_size == 4)
- x = (long)(int)x;
-
- /* Set register rd. */
- regs->regs[rd] = x;
-
- /* Set register rx. */
- regs->regs[rx] = ret;
-
- /* Bump pc. */
- regs->pc += 8;
-
- } else {
- /* Handle put_user(x, ptr) */
- x = regs->regs[rb];
-#ifdef __LITTLE_ENDIAN
- b = x;
-#else
- /*
- * Swap x in order to store x from low
- * to high memory same as the
- * little-endian case.
- */
- switch (load_store_size) {
- case 8:
- b = swab64(x);
- break;
- case 4:
- b = swab32(x);
- break;
- case 2:
- b = swab16(x);
- break;
- }
-#endif /* __LITTLE_ENDIAN */
- for (i = 0; i < load_store_size; i++) {
- ret = put_user(b, ptr++);
- if (ret)
- break;
- /* Success! shift 1 byte. */
- b >>= 8;
- }
- /* Set register rx. */
- regs->regs[rx] = ret;
-
- /* Bump pc. */
- regs->pc += 8;
- }
- }
-
- unaligned_fixup_count++;
-
- if (unaligned_printk) {
- pr_info("%s/%d - Unalign fixup for kernel access to userspace %lx\n",
- current->comm, current->pid, regs->regs[ra]);
- }
-
- /* Done! Return to the exception handler. */
- return;
- }
-
- if ((align_ctl == 0) || unexpected) {
- siginfo_t info;
-
- clear_siginfo(&info);
- info.si_signo = SIGBUS;
- info.si_code = BUS_ADRALN;
- info.si_addr = (unsigned char __user *)0;
-
- if (unaligned_printk)
- pr_info("Unalign bundle: unexp @%llx, %llx\n",
- (unsigned long long)regs->pc,
- (unsigned long long)bundle);
-
- if (ra < 56) {
- unsigned long uaa = (unsigned long)regs->regs[ra];
- /* Set bus Address. */
- info.si_addr = (unsigned char __user *)uaa;
- }
-
- unaligned_fixup_count++;
-
- trace_unhandled_signal("unaligned fixup trap", regs,
- (unsigned long)info.si_addr, SIGBUS);
- force_sig_info(info.si_signo, &info, current);
- return;
- }
-
-#ifdef __LITTLE_ENDIAN
-#define UA_FIXUP_ADDR_DELTA 1
-#define UA_FIXUP_BFEXT_START(_B_) 0
-#define UA_FIXUP_BFEXT_END(_B_) (8 * (_B_) - 1)
-#else /* __BIG_ENDIAN */
-#define UA_FIXUP_ADDR_DELTA -1
-#define UA_FIXUP_BFEXT_START(_B_) (64 - 8 * (_B_))
-#define UA_FIXUP_BFEXT_END(_B_) 63
-#endif /* __LITTLE_ENDIAN */
-
-
-
- if ((ra != rb) && (rd != TREG_SP) && !alias &&
- !y1_br && !y1_lr && !x1_add) {
- /*
- * Simple case: ra != rb and no register alias found,
- * and no branch or link. This will be the majority.
- * We can do a little better for simplae case than the
- * generic scheme below.
- */
- if (!load_n_store) {
- /*
- * Simple store: ra != rb, no need for scratch register.
- * Just store and rotate to right bytewise.
- */
-#ifdef __BIG_ENDIAN
- frag.insn[n++] =
- jit_x0_addi(ra, ra, load_store_size - 1) |
- jit_x1_fnop();
-#endif /* __BIG_ENDIAN */
- for (k = 0; k < load_store_size; k++) {
- /* Store a byte. */
- frag.insn[n++] =
- jit_x0_rotli(rb, rb, 56) |
- jit_x1_st1_add(ra, rb,
- UA_FIXUP_ADDR_DELTA);
- }
-#ifdef __BIG_ENDIAN
- frag.insn[n] = jit_x1_addi(ra, ra, 1);
-#else
- frag.insn[n] = jit_x1_addi(ra, ra,
- -1 * load_store_size);
-#endif /* __LITTLE_ENDIAN */
-
- if (load_store_size == 8) {
- frag.insn[n] |= jit_x0_fnop();
- } else if (load_store_size == 4) {
- frag.insn[n] |= jit_x0_rotli(rb, rb, 32);
- } else { /* = 2 */
- frag.insn[n] |= jit_x0_rotli(rb, rb, 16);
- }
- n++;
- if (bundle_2_enable)
- frag.insn[n++] = bundle_2;
- frag.insn[n++] = jit_x0_fnop() | jit_x1_iret();
- } else {
- if (rd == ra) {
- /* Use two clobber registers: clob1/2. */
- frag.insn[n++] =
- jit_x0_addi(TREG_SP, TREG_SP, -16) |
- jit_x1_fnop();
- frag.insn[n++] =
- jit_x0_addi(clob1, ra, 7) |
- jit_x1_st_add(TREG_SP, clob1, -8);
- frag.insn[n++] =
- jit_x0_addi(clob2, ra, 0) |
- jit_x1_st(TREG_SP, clob2);
- frag.insn[n++] =
- jit_x0_fnop() |
- jit_x1_ldna(rd, ra);
- frag.insn[n++] =
- jit_x0_fnop() |
- jit_x1_ldna(clob1, clob1);
- /*
- * Note: we must make sure that rd must not
- * be sp. Recover clob1/2 from stack.
- */
- frag.insn[n++] =
- jit_x0_dblalign(rd, clob1, clob2) |
- jit_x1_ld_add(clob2, TREG_SP, 8);
- frag.insn[n++] =
- jit_x0_fnop() |
- jit_x1_ld_add(clob1, TREG_SP, 16);
- } else {
- /* Use one clobber register: clob1 only. */
- frag.insn[n++] =
- jit_x0_addi(TREG_SP, TREG_SP, -16) |
- jit_x1_fnop();
- frag.insn[n++] =
- jit_x0_addi(clob1, ra, 7) |
- jit_x1_st(TREG_SP, clob1);
- frag.insn[n++] =
- jit_x0_fnop() |
- jit_x1_ldna(rd, ra);
- frag.insn[n++] =
- jit_x0_fnop() |
- jit_x1_ldna(clob1, clob1);
- /*
- * Note: we must make sure that rd must not
- * be sp. Recover clob1 from stack.
- */
- frag.insn[n++] =
- jit_x0_dblalign(rd, clob1, ra) |
- jit_x1_ld_add(clob1, TREG_SP, 16);
- }
-
- if (bundle_2_enable)
- frag.insn[n++] = bundle_2;
- /*
- * For non 8-byte load, extract corresponding bytes and
- * signed extension.
- */
- if (load_store_size == 4) {
- if (load_store_signed)
- frag.insn[n++] =
- jit_x0_bfexts(
- rd, rd,
- UA_FIXUP_BFEXT_START(4),
- UA_FIXUP_BFEXT_END(4)) |
- jit_x1_fnop();
- else
- frag.insn[n++] =
- jit_x0_bfextu(
- rd, rd,
- UA_FIXUP_BFEXT_START(4),
- UA_FIXUP_BFEXT_END(4)) |
- jit_x1_fnop();
- } else if (load_store_size == 2) {
- if (load_store_signed)
- frag.insn[n++] =
- jit_x0_bfexts(
- rd, rd,
- UA_FIXUP_BFEXT_START(2),
- UA_FIXUP_BFEXT_END(2)) |
- jit_x1_fnop();
- else
- frag.insn[n++] =
- jit_x0_bfextu(
- rd, rd,
- UA_FIXUP_BFEXT_START(2),
- UA_FIXUP_BFEXT_END(2)) |
- jit_x1_fnop();
- }
-
- frag.insn[n++] =
- jit_x0_fnop() |
- jit_x1_iret();
- }
- } else if (!load_n_store) {
-
- /*
- * Generic memory store cases: use 3 clobber registers.
- *
- * Alloc space for saveing clob2,1,3 on user's stack.
- * register clob3 points to where clob2 saved, followed by
- * clob1 and 3 from high to low memory.
- */
- frag.insn[n++] =
- jit_x0_addi(TREG_SP, TREG_SP, -32) |
- jit_x1_fnop();
- frag.insn[n++] =
- jit_x0_addi(clob3, TREG_SP, 16) |
- jit_x1_st_add(TREG_SP, clob3, 8);
-#ifdef __LITTLE_ENDIAN
- frag.insn[n++] =
- jit_x0_addi(clob1, ra, 0) |
- jit_x1_st_add(TREG_SP, clob1, 8);
-#else
- frag.insn[n++] =
- jit_x0_addi(clob1, ra, load_store_size - 1) |
- jit_x1_st_add(TREG_SP, clob1, 8);
-#endif
- if (load_store_size == 8) {
- /*
- * We save one byte a time, not for fast, but compact
- * code. After each store, data source register shift
- * right one byte. unchanged after 8 stores.
- */
- frag.insn[n++] =
- jit_x0_addi(clob2, TREG_ZERO, 7) |
- jit_x1_st_add(TREG_SP, clob2, 16);
- frag.insn[n++] =
- jit_x0_rotli(rb, rb, 56) |
- jit_x1_st1_add(clob1, rb, UA_FIXUP_ADDR_DELTA);
- frag.insn[n++] =
- jit_x0_addi(clob2, clob2, -1) |
- jit_x1_bnezt(clob2, -1);
- frag.insn[n++] =
- jit_x0_fnop() |
- jit_x1_addi(clob2, y1_br_reg, 0);
- } else if (load_store_size == 4) {
- frag.insn[n++] =
- jit_x0_addi(clob2, TREG_ZERO, 3) |
- jit_x1_st_add(TREG_SP, clob2, 16);
- frag.insn[n++] =
- jit_x0_rotli(rb, rb, 56) |
- jit_x1_st1_add(clob1, rb, UA_FIXUP_ADDR_DELTA);
- frag.insn[n++] =
- jit_x0_addi(clob2, clob2, -1) |
- jit_x1_bnezt(clob2, -1);
- /*
- * same as 8-byte case, but need shift another 4
- * byte to recover rb for 4-byte store.
- */
- frag.insn[n++] = jit_x0_rotli(rb, rb, 32) |
- jit_x1_addi(clob2, y1_br_reg, 0);
- } else { /* =2 */
- frag.insn[n++] =
- jit_x0_addi(clob2, rb, 0) |
- jit_x1_st_add(TREG_SP, clob2, 16);
- for (k = 0; k < 2; k++) {
- frag.insn[n++] =
- jit_x0_shrui(rb, rb, 8) |
- jit_x1_st1_add(clob1, rb,
- UA_FIXUP_ADDR_DELTA);
- }
- frag.insn[n++] =
- jit_x0_addi(rb, clob2, 0) |
- jit_x1_addi(clob2, y1_br_reg, 0);
- }
-
- if (bundle_2_enable)
- frag.insn[n++] = bundle_2;
-
- if (y1_lr) {
- frag.insn[n++] =
- jit_x0_fnop() |
- jit_x1_mfspr(y1_lr_reg,
- SPR_EX_CONTEXT_0_0);
- }
- if (y1_br) {
- frag.insn[n++] =
- jit_x0_fnop() |
- jit_x1_mtspr(SPR_EX_CONTEXT_0_0,
- clob2);
- }
- if (x1_add) {
- frag.insn[n++] =
- jit_x0_addi(ra, ra, x1_add_imm8) |
- jit_x1_ld_add(clob2, clob3, -8);
- } else {
- frag.insn[n++] =
- jit_x0_fnop() |
- jit_x1_ld_add(clob2, clob3, -8);
- }
- frag.insn[n++] =
- jit_x0_fnop() |
- jit_x1_ld_add(clob1, clob3, -8);
- frag.insn[n++] = jit_x0_fnop() | jit_x1_ld(clob3, clob3);
- frag.insn[n++] = jit_x0_fnop() | jit_x1_iret();
-
- } else {
- /*
- * Generic memory load cases.
- *
- * Alloc space for saveing clob1,2,3 on user's stack.
- * register clob3 points to where clob1 saved, followed
- * by clob2 and 3 from high to low memory.
- */
-
- frag.insn[n++] =
- jit_x0_addi(TREG_SP, TREG_SP, -32) |
- jit_x1_fnop();
- frag.insn[n++] =
- jit_x0_addi(clob3, TREG_SP, 16) |
- jit_x1_st_add(TREG_SP, clob3, 8);
- frag.insn[n++] =
- jit_x0_addi(clob2, ra, 0) |
- jit_x1_st_add(TREG_SP, clob2, 8);
-
- if (y1_br) {
- frag.insn[n++] =
- jit_x0_addi(clob1, y1_br_reg, 0) |
- jit_x1_st_add(TREG_SP, clob1, 16);
- } else {
- frag.insn[n++] =
- jit_x0_fnop() |
- jit_x1_st_add(TREG_SP, clob1, 16);
- }
-
- if (bundle_2_enable)
- frag.insn[n++] = bundle_2;
-
- if (y1_lr) {
- frag.insn[n++] =
- jit_x0_fnop() |
- jit_x1_mfspr(y1_lr_reg,
- SPR_EX_CONTEXT_0_0);
- }
-
- if (y1_br) {
- frag.insn[n++] =
- jit_x0_fnop() |
- jit_x1_mtspr(SPR_EX_CONTEXT_0_0,
- clob1);
- }
-
- frag.insn[n++] =
- jit_x0_addi(clob1, clob2, 7) |
- jit_x1_ldna(rd, clob2);
- frag.insn[n++] =
- jit_x0_fnop() |
- jit_x1_ldna(clob1, clob1);
- frag.insn[n++] =
- jit_x0_dblalign(rd, clob1, clob2) |
- jit_x1_ld_add(clob1, clob3, -8);
- if (x1_add) {
- frag.insn[n++] =
- jit_x0_addi(ra, ra, x1_add_imm8) |
- jit_x1_ld_add(clob2, clob3, -8);
- } else {
- frag.insn[n++] =
- jit_x0_fnop() |
- jit_x1_ld_add(clob2, clob3, -8);
- }
-
- frag.insn[n++] =
- jit_x0_fnop() |
- jit_x1_ld(clob3, clob3);
-
- if (load_store_size == 4) {
- if (load_store_signed)
- frag.insn[n++] =
- jit_x0_bfexts(
- rd, rd,
- UA_FIXUP_BFEXT_START(4),
- UA_FIXUP_BFEXT_END(4)) |
- jit_x1_fnop();
- else
- frag.insn[n++] =
- jit_x0_bfextu(
- rd, rd,
- UA_FIXUP_BFEXT_START(4),
- UA_FIXUP_BFEXT_END(4)) |
- jit_x1_fnop();
- } else if (load_store_size == 2) {
- if (load_store_signed)
- frag.insn[n++] =
- jit_x0_bfexts(
- rd, rd,
- UA_FIXUP_BFEXT_START(2),
- UA_FIXUP_BFEXT_END(2)) |
- jit_x1_fnop();
- else
- frag.insn[n++] =
- jit_x0_bfextu(
- rd, rd,
- UA_FIXUP_BFEXT_START(2),
- UA_FIXUP_BFEXT_END(2)) |
- jit_x1_fnop();
- }
-
- frag.insn[n++] = jit_x0_fnop() | jit_x1_iret();
- }
-
- /* Max JIT bundle count is 14. */
- WARN_ON(n > 14);
-
- if (!unexpected) {
- int status = 0;
- int idx = (regs->pc >> 3) &
- ((1ULL << (PAGE_SHIFT - UNALIGN_JIT_SHIFT)) - 1);
-
- frag.pc = regs->pc;
- frag.bundle = bundle;
-
- if (unaligned_printk) {
- pr_info("%s/%d, Unalign fixup: pc=%lx bundle=%lx %d %d %d %d %d %d %d %d\n",
- current->comm, current->pid,
- (unsigned long)frag.pc,
- (unsigned long)frag.bundle,
- (int)alias, (int)rd, (int)ra,
- (int)rb, (int)bundle_2_enable,
- (int)y1_lr, (int)y1_br, (int)x1_add);
-
- for (k = 0; k < n; k += 2)
- pr_info("[%d] %016llx %016llx\n",
- k, (unsigned long long)frag.insn[k],
- (unsigned long long)frag.insn[k+1]);
- }
-
- /* Swap bundle byte order for big endian sys. */
-#ifdef __BIG_ENDIAN
- frag.bundle = GX_INSN_BSWAP(frag.bundle);
- for (k = 0; k < n; k++)
- frag.insn[k] = GX_INSN_BSWAP(frag.insn[k]);
-#endif /* __BIG_ENDIAN */
-
- status = copy_to_user((void __user *)&jit_code_area[idx],
- &frag, sizeof(frag));
- if (status) {
- /* Fail to copy JIT into user land. send SIGSEGV. */
- siginfo_t info;
-
- clear_siginfo(&info);
- info.si_signo = SIGSEGV;
- info.si_code = SEGV_MAPERR;
- info.si_addr = (void __user *)&jit_code_area[idx];
-
- pr_warn("Unalign fixup: pid=%d %s jit_code_area=%llx\n",
- current->pid, current->comm,
- (unsigned long long)&jit_code_area[idx]);
-
- trace_unhandled_signal("segfault in unalign fixup",
- regs,
- (unsigned long)info.si_addr,
- SIGSEGV);
- force_sig_info(info.si_signo, &info, current);
- return;
- }
-
-
- /* Do a cheaper increment, not accurate. */
- unaligned_fixup_count++;
- __flush_icache_range((unsigned long)&jit_code_area[idx],
- (unsigned long)&jit_code_area[idx] +
- sizeof(frag));
-
- /* Setup SPR_EX_CONTEXT_0_0/1 for returning to user program.*/
- __insn_mtspr(SPR_EX_CONTEXT_0_0, regs->pc + 8);
- __insn_mtspr(SPR_EX_CONTEXT_0_1, PL_ICS_EX1(USER_PL, 0));
-
- /* Modify pc at the start of new JIT. */
- regs->pc = (unsigned long)&jit_code_area[idx].insn[0];
- /* Set ICS in SPR_EX_CONTEXT_K_1. */
- regs->ex1 = PL_ICS_EX1(USER_PL, 1);
- }
-}
-
-
-/*
- * C function to generate unalign data JIT. Called from unalign data
- * interrupt handler.
- *
- * First check if unalign fix is disabled or exception did not not come from
- * user space or sp register points to unalign address, if true, generate a
- * SIGBUS. Then map a page into user space as JIT area if it is not mapped
- * yet. Genenerate JIT code by calling jit_bundle_gen(). After that return
- * back to exception handler.
- *
- * The exception handler will "iret" to new generated JIT code after
- * restoring caller saved registers. In theory, the JIT code will perform
- * another "iret" to resume user's program.
- */
-
-void do_unaligned(struct pt_regs *regs, int vecnum)
-{
- tilegx_bundle_bits __user *pc;
- tilegx_bundle_bits bundle;
- struct thread_info *info = current_thread_info();
- int align_ctl;
-
- /* Checks the per-process unaligned JIT flags */
- align_ctl = unaligned_fixup;
- switch (task_thread_info(current)->align_ctl) {
- case PR_UNALIGN_NOPRINT:
- align_ctl = 1;
- break;
- case PR_UNALIGN_SIGBUS:
- align_ctl = 0;
- break;
- }
-
- /* Enable iterrupt in order to access user land. */
- local_irq_enable();
-
- /*
- * The fault came from kernel space. Two choices:
- * (a) unaligned_fixup < 1, we will first call get/put_user fixup
- * to return -EFAULT. If no fixup, simply panic the kernel.
- * (b) unaligned_fixup >=1, we will try to fix the unaligned access
- * if it was triggered by get_user/put_user() macros. Panic the
- * kernel if it is not fixable.
- */
-
- if (EX1_PL(regs->ex1) != USER_PL) {
-
- if (align_ctl < 1) {
- unaligned_fixup_count++;
- /* If exception came from kernel, try fix it up. */
- if (fixup_exception(regs)) {
- if (unaligned_printk)
- pr_info("Unalign fixup: %d %llx @%llx\n",
- (int)unaligned_fixup,
- (unsigned long long)regs->ex1,
- (unsigned long long)regs->pc);
- } else {
- /* Not fixable. Go panic. */
- panic("Unalign exception in Kernel. pc=%lx",
- regs->pc);
- }
- } else {
- /*
- * Try to fix the exception. If we can't, panic the
- * kernel.
- */
- bundle = GX_INSN_BSWAP(
- *((tilegx_bundle_bits *)(regs->pc)));
- jit_bundle_gen(regs, bundle, align_ctl);
- }
- return;
- }
-
- /*
- * Fault came from user with ICS or stack is not aligned.
- * If so, we will trigger SIGBUS.
- */
- if ((regs->sp & 0x7) || (regs->ex1) || (align_ctl < 0)) {
- siginfo_t info;
-
- clear_siginfo(&info);
- info.si_signo = SIGBUS;
- info.si_code = BUS_ADRALN;
- info.si_addr = (unsigned char __user *)0;
-
- if (unaligned_printk)
- pr_info("Unalign fixup: %d %llx @%llx\n",
- (int)unaligned_fixup,
- (unsigned long long)regs->ex1,
- (unsigned long long)regs->pc);
-
- unaligned_fixup_count++;
-
- trace_unhandled_signal("unaligned fixup trap", regs, 0, SIGBUS);
- force_sig_info(info.si_signo, &info, current);
- return;
- }
-
-
- /* Read the bundle caused the exception! */
- pc = (tilegx_bundle_bits __user *)(regs->pc);
- if (get_user(bundle, pc) != 0) {
- /* Probably never be here since pc is valid user address.*/
- siginfo_t info;
-
- clear_siginfo(&info);
- info.si_signo = SIGSEGV;
- info.si_code = SEGV_MAPERR;
- info.si_addr = (void __user *)pc;
-
- pr_err("Couldn't read instruction at %p trying to step\n", pc);
- trace_unhandled_signal("segfault in unalign fixup", regs,
- (unsigned long)info.si_addr, SIGSEGV);
- force_sig_info(info.si_signo, &info, current);
- return;
- }
-
- if (!info->unalign_jit_base) {
- void __user *user_page;
-
- /*
- * Allocate a page in userland.
- * For 64-bit processes we try to place the mapping far
- * from anything else that might be going on (specifically
- * 64 GB below the top of the user address space). If it
- * happens not to be possible to put it there, it's OK;
- * the kernel will choose another location and we'll
- * remember it for later.
- */
- if (is_compat_task())
- user_page = NULL;
- else
- user_page = (void __user *)(TASK_SIZE - (1UL << 36)) +
- (current->pid << PAGE_SHIFT);
-
- user_page = (void __user *) vm_mmap(NULL,
- (unsigned long)user_page,
- PAGE_SIZE,
- PROT_EXEC | PROT_READ |
- PROT_WRITE,
-#ifdef CONFIG_HOMECACHE
- MAP_CACHE_HOME_TASK |
-#endif
- MAP_PRIVATE |
- MAP_ANONYMOUS,
- 0);
-
- if (IS_ERR((void __force *)user_page)) {
- pr_err("Out of kernel pages trying do_mmap\n");
- return;
- }
-
- /* Save the address in the thread_info struct */
- info->unalign_jit_base = user_page;
- if (unaligned_printk)
- pr_info("Unalign bundle: %d:%d, allocate page @%llx\n",
- raw_smp_processor_id(), current->pid,
- (unsigned long long)user_page);
- }
-
- /* Generate unalign JIT */
- jit_bundle_gen(regs, GX_INSN_BSWAP(bundle), align_ctl);
-}
-
-#endif /* __tilegx__ */
diff --git a/arch/tile/kernel/usb.c b/arch/tile/kernel/usb.c
deleted file mode 100644
index 9f1e05e12255..000000000000
--- a/arch/tile/kernel/usb.c
+++ /dev/null
@@ -1,71 +0,0 @@
-/*
- * Copyright 2012 Tilera Corporation. All Rights Reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation, version 2.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
- * NON INFRINGEMENT. See the GNU General Public License for
- * more details.
- *
- * Register the Tile-Gx USB interfaces as platform devices.
- *
- * The actual USB driver is just some glue (in
- * drivers/usb/host/[eo]hci-tilegx.c) which makes the registers available
- * to the standard kernel EHCI and OHCI drivers.
- */
-
-#include <linux/dma-mapping.h>
-#include <linux/platform_device.h>
-#include <linux/usb/tilegx.h>
-#include <linux/init.h>
-#include <linux/module.h>
-#include <linux/types.h>
-
-static u64 ehci_dmamask = DMA_BIT_MASK(32);
-
-#define USB_HOST_DEF(unit, type, dmamask) \
- static struct \
- tilegx_usb_platform_data tilegx_usb_platform_data_ ## type ## \
- hci ## unit = { \
- .dev_index = unit, \
- }; \
- \
- static struct platform_device tilegx_usb_ ## type ## hci ## unit = { \
- .name = "tilegx-" #type "hci", \
- .id = unit, \
- .dev = { \
- .dma_mask = dmamask, \
- .coherent_dma_mask = DMA_BIT_MASK(32), \
- .platform_data = \
- &tilegx_usb_platform_data_ ## type ## hci ## \
- unit, \
- }, \
- };
-
-USB_HOST_DEF(0, e, &ehci_dmamask)
-USB_HOST_DEF(0, o, NULL)
-USB_HOST_DEF(1, e, &ehci_dmamask)
-USB_HOST_DEF(1, o, NULL)
-
-#undef USB_HOST_DEF
-
-static struct platform_device *tilegx_usb_devices[] __initdata = {
- &tilegx_usb_ehci0,
- &tilegx_usb_ehci1,
- &tilegx_usb_ohci0,
- &tilegx_usb_ohci1,
-};
-
-/** Add our set of possible USB devices. */
-static int __init tilegx_usb_init(void)
-{
- platform_add_devices(tilegx_usb_devices,
- ARRAY_SIZE(tilegx_usb_devices));
-
- return 0;
-}
-arch_initcall(tilegx_usb_init);
diff --git a/arch/tile/kernel/vdso.c b/arch/tile/kernel/vdso.c
deleted file mode 100644
index 5bc51d7dfdcb..000000000000
--- a/arch/tile/kernel/vdso.c
+++ /dev/null
@@ -1,197 +0,0 @@
-/*
- * Copyright 2012 Tilera Corporation. All Rights Reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation, version 2.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
- * NON INFRINGEMENT. See the GNU General Public License for
- * more details.
- */
-
-#include <linux/binfmts.h>
-#include <linux/compat.h>
-#include <linux/elf.h>
-#include <linux/mm.h>
-#include <linux/pagemap.h>
-
-#include <asm/vdso.h>
-#include <asm/mman.h>
-#include <asm/sections.h>
-
-#include <arch/sim.h>
-
-/* The alignment of the vDSO. */
-#define VDSO_ALIGNMENT PAGE_SIZE
-
-
-static unsigned int vdso_pages;
-static struct page **vdso_pagelist;
-
-#ifdef CONFIG_COMPAT
-static unsigned int vdso32_pages;
-static struct page **vdso32_pagelist;
-#endif
-static int vdso_ready;
-
-/*
- * The vdso data page.
- */
-static union {
- struct vdso_data data;
- u8 page[PAGE_SIZE];
-} vdso_data_store __page_aligned_data;
-
-struct vdso_data *vdso_data = &vdso_data_store.data;
-
-static unsigned int __read_mostly vdso_enabled = 1;
-
-static struct page **vdso_setup(void *vdso_kbase, unsigned int pages)
-{
- int i;
- struct page **pagelist;
-
- pagelist = kzalloc(sizeof(struct page *) * (pages + 1), GFP_KERNEL);
- BUG_ON(pagelist == NULL);
- for (i = 0; i < pages - 1; i++) {
- struct page *pg = virt_to_page(vdso_kbase + i*PAGE_SIZE);
- ClearPageReserved(pg);
- pagelist[i] = pg;
- }
- pagelist[pages - 1] = virt_to_page(vdso_data);
- pagelist[pages] = NULL;
-
- return pagelist;
-}
-
-static int __init vdso_init(void)
-{
- int data_pages = sizeof(vdso_data_store) >> PAGE_SHIFT;
-
- /*
- * We can disable vDSO support generally, but we need to retain
- * one page to support the two-bundle (16-byte) rt_sigreturn path.
- */
- if (!vdso_enabled) {
- size_t offset = (unsigned long)&__vdso_rt_sigreturn;
- static struct page *sigret_page;
- sigret_page = alloc_page(GFP_KERNEL | __GFP_ZERO);
- BUG_ON(sigret_page == NULL);
- vdso_pagelist = &sigret_page;
- vdso_pages = 1;
- BUG_ON(offset >= PAGE_SIZE);
- memcpy(page_address(sigret_page) + offset,
- vdso_start + offset, 16);
-#ifdef CONFIG_COMPAT
- vdso32_pages = vdso_pages;
- vdso32_pagelist = vdso_pagelist;
-#endif
- vdso_ready = 1;
- return 0;
- }
-
- vdso_pages = (vdso_end - vdso_start) >> PAGE_SHIFT;
- vdso_pages += data_pages;
- vdso_pagelist = vdso_setup(vdso_start, vdso_pages);
-
-#ifdef CONFIG_COMPAT
- vdso32_pages = (vdso32_end - vdso32_start) >> PAGE_SHIFT;
- vdso32_pages += data_pages;
- vdso32_pagelist = vdso_setup(vdso32_start, vdso32_pages);
-#endif
-
- smp_wmb();
- vdso_ready = 1;
-
- return 0;
-}
-arch_initcall(vdso_init);
-
-const char *arch_vma_name(struct vm_area_struct *vma)
-{
- if (vma->vm_mm && vma->vm_start == VDSO_BASE)
- return "[vdso]";
-#ifndef __tilegx__
- if (vma->vm_start == MEM_USER_INTRPT)
- return "[intrpt]";
-#endif
- return NULL;
-}
-
-int setup_vdso_pages(void)
-{
- struct page **pagelist;
- unsigned long pages;
- struct mm_struct *mm = current->mm;
- unsigned long vdso_base = 0;
- int retval = 0;
-
- if (!vdso_ready)
- return 0;
-
- mm->context.vdso_base = 0;
-
- pagelist = vdso_pagelist;
- pages = vdso_pages;
-#ifdef CONFIG_COMPAT
- if (is_compat_task()) {
- pagelist = vdso32_pagelist;
- pages = vdso32_pages;
- }
-#endif
-
- /*
- * vDSO has a problem and was disabled, just don't "enable" it for the
- * process.
- */
- if (pages == 0)
- return 0;
-
- vdso_base = get_unmapped_area(NULL, vdso_base,
- (pages << PAGE_SHIFT) +
- ((VDSO_ALIGNMENT - 1) & PAGE_MASK),
- 0, 0);
- if (IS_ERR_VALUE(vdso_base)) {
- retval = vdso_base;
- return retval;
- }
-
- /* Add required alignment. */
- vdso_base = ALIGN(vdso_base, VDSO_ALIGNMENT);
-
- /*
- * Put vDSO base into mm struct. We need to do this before calling
- * install_special_mapping or the perf counter mmap tracking code
- * will fail to recognise it as a vDSO (since arch_vma_name fails).
- */
- mm->context.vdso_base = vdso_base;
-
- /*
- * our vma flags don't have VM_WRITE so by default, the process isn't
- * allowed to write those pages.
- * gdb can break that with ptrace interface, and thus trigger COW on
- * those pages but it's then your responsibility to never do that on
- * the "data" page of the vDSO or you'll stop getting kernel updates
- * and your nice userland gettimeofday will be totally dead.
- * It's fine to use that for setting breakpoints in the vDSO code
- * pages though
- */
- retval = install_special_mapping(mm, vdso_base,
- pages << PAGE_SHIFT,
- VM_READ|VM_EXEC |
- VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC,
- pagelist);
- if (retval)
- mm->context.vdso_base = 0;
-
- return retval;
-}
-
-static __init int vdso_func(char *s)
-{
- return kstrtouint(s, 0, &vdso_enabled);
-}
-__setup("vdso=", vdso_func);
diff --git a/arch/tile/kernel/vdso/Makefile b/arch/tile/kernel/vdso/Makefile
deleted file mode 100644
index b596a7396382..000000000000
--- a/arch/tile/kernel/vdso/Makefile
+++ /dev/null
@@ -1,117 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0
-# Symbols present in the vdso
-vdso-syms = rt_sigreturn gettimeofday
-
-# Files to link into the vdso
-obj-vdso = $(patsubst %, v%.o, $(vdso-syms))
-
-# Build rules
-targets := $(obj-vdso) vdso.so vdso.so.dbg vdso.lds vdso-dummy.o
-obj-vdso := $(addprefix $(obj)/, $(obj-vdso))
-
-# vdso32 is only for tilegx -m32 compat task.
-VDSO32-$(CONFIG_COMPAT) := y
-
-obj-y += vdso.o vdso-syms.o
-obj-$(VDSO32-y) += vdso32.o
-CPPFLAGS_vdso.lds += -P -C -U$(ARCH)
-
-# vDSO code runs in userspace and -pg doesn't help with profiling anyway.
-CFLAGS_REMOVE_vdso.o = -pg
-CFLAGS_REMOVE_vdso32.o = -pg
-CFLAGS_REMOVE_vrt_sigreturn.o = -pg
-CFLAGS_REMOVE_vrt_sigreturn32.o = -pg
-CFLAGS_REMOVE_vgettimeofday.o = -pg
-CFLAGS_REMOVE_vgettimeofday32.o = -pg
-
-ifdef CONFIG_FEEDBACK_COLLECT
-# vDSO code runs in userspace, not collecting feedback data.
-CFLAGS_REMOVE_vdso.o = -ffeedback-generate
-CFLAGS_REMOVE_vdso32.o = -ffeedback-generate
-CFLAGS_REMOVE_vrt_sigreturn.o = -ffeedback-generate
-CFLAGS_REMOVE_vrt_sigreturn32.o = -ffeedback-generate
-CFLAGS_REMOVE_vgettimeofday.o = -ffeedback-generate
-CFLAGS_REMOVE_vgettimeofday32.o = -ffeedback-generate
-endif
-
-# Disable gcov profiling for VDSO code
-GCOV_PROFILE := n
-
-# Force dependency
-$(obj)/vdso.o: $(obj)/vdso.so
-
-# link rule for the .so file, .lds has to be first
-SYSCFLAGS_vdso.so.dbg = $(c_flags)
-$(obj)/vdso.so.dbg: $(src)/vdso.lds $(obj-vdso) FORCE
- $(call if_changed,vdsold)
-
-# We also create a special relocatable object that should mirror the symbol
-# table and layout of the linked DSO. With ld -R we can then refer to
-# these symbols in the kernel code rather than hand-coded addresses.
-
-SYSCFLAGS_vdso.so.dbg = -shared -s -Wl,-soname=linux-vdso.so.1 \
- $(call cc-ldoption, -Wl$(comma)--hash-style=both)
-SYSCFLAGS_vdso_dummy.o = -r
-$(obj)/vdso-dummy.o: $(src)/vdso.lds $(obj)/vrt_sigreturn.o FORCE
- $(call if_changed,vdsold)
-
-LDFLAGS_vdso-syms.o := -r -R
-$(obj)/vdso-syms.o: $(obj)/vdso-dummy.o FORCE
- $(call if_changed,ld)
-
-# strip rule for the .so file
-$(obj)/%.so: OBJCOPYFLAGS := -S
-$(obj)/%.so: $(obj)/%.so.dbg FORCE
- $(call if_changed,objcopy)
-
-# actual build commands
-# The DSO images are built using a special linker script
-# Add -lgcc so tilepro gets static muldi3 and lshrdi3 definitions.
-# Make sure only to export the intended __vdso_xxx symbol offsets.
-quiet_cmd_vdsold = VDSOLD $@
- cmd_vdsold = $(CC) $(KCFLAGS) -nostdlib $(SYSCFLAGS_$(@F)) \
- -Wl,-T,$(filter-out FORCE,$^) -o $@.tmp -lgcc && \
- $(CROSS_COMPILE)objcopy \
- $(patsubst %, -G __vdso_%, $(vdso-syms)) $@.tmp $@
-
-# install commands for the unstripped file
-quiet_cmd_vdso_install = INSTALL $@
- cmd_vdso_install = cp $(obj)/$@.dbg $(MODLIB)/vdso/$@
-
-vdso.so: $(obj)/vdso.so.dbg
- @mkdir -p $(MODLIB)/vdso
- $(call cmd,vdso_install)
-
-vdso32.so: $(obj)/vdso32.so.dbg
- $(call cmd,vdso_install)
-
-vdso_install: vdso.so
-vdso32_install: vdso32.so
-
-
-KBUILD_AFLAGS_32 := $(filter-out -m64,$(KBUILD_AFLAGS))
-KBUILD_AFLAGS_32 += -m32 -s
-KBUILD_CFLAGS_32 := $(filter-out -m64,$(KBUILD_CFLAGS))
-KBUILD_CFLAGS_32 += -m32 -fPIC -shared
-
-obj-vdso32 = $(patsubst %, v%32.o, $(vdso-syms))
-
-targets += $(obj-vdso32) vdso32.so vdso32.so.dbg
-obj-vdso32 := $(addprefix $(obj)/, $(obj-vdso32))
-
-$(obj-vdso32:%=%): KBUILD_AFLAGS = $(KBUILD_AFLAGS_32)
-$(obj-vdso32:%=%): KBUILD_CFLAGS = $(KBUILD_CFLAGS_32)
-
-$(obj)/vgettimeofday32.o: $(obj)/vgettimeofday.c FORCE
- $(call if_changed_rule,cc_o_c)
-
-$(obj)/vrt_sigreturn32.o: $(obj)/vrt_sigreturn.S FORCE
- $(call if_changed,as_o_S)
-
-# Force dependency
-$(obj)/vdso32.o: $(obj)/vdso32.so
-
-SYSCFLAGS_vdso32.so.dbg = -m32 -shared -s -Wl,-soname=linux-vdso32.so.1 \
- $(call cc-ldoption, -Wl$(comma)--hash-style=both)
-$(obj)/vdso32.so.dbg: $(src)/vdso.lds $(obj-vdso32) FORCE
- $(call if_changed,vdsold)
diff --git a/arch/tile/kernel/vdso/vdso.S b/arch/tile/kernel/vdso/vdso.S
deleted file mode 100644
index 3467adb41630..000000000000
--- a/arch/tile/kernel/vdso/vdso.S
+++ /dev/null
@@ -1,28 +0,0 @@
-/*
- * Copyright 2012 Tilera Corporation. All Rights Reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation, version 2.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
- * NON INFRINGEMENT. See the GNU General Public License for
- * more details.
- */
-
-#include <linux/init.h>
-#include <linux/linkage.h>
-#include <asm/page.h>
-
- __PAGE_ALIGNED_DATA
-
- .global vdso_start, vdso_end
- .align PAGE_SIZE
-vdso_start:
- .incbin "arch/tile/kernel/vdso/vdso.so"
- .align PAGE_SIZE
-vdso_end:
-
- .previous
diff --git a/arch/tile/kernel/vdso/vdso.lds.S b/arch/tile/kernel/vdso/vdso.lds.S
deleted file mode 100644
index 731529f3f06f..000000000000
--- a/arch/tile/kernel/vdso/vdso.lds.S
+++ /dev/null
@@ -1,89 +0,0 @@
-/*
- * Copyright 2012 Tilera Corporation. All Rights Reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation, version 2.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
- * NON INFRINGEMENT. See the GNU General Public License for
- * more details.
- */
-
-#define VDSO_VERSION_STRING LINUX_2.6
-
-
-OUTPUT_ARCH(tile)
-
-/* The ELF entry point can be used to set the AT_SYSINFO value. */
-ENTRY(__vdso_rt_sigreturn);
-
-
-SECTIONS
-{
- . = SIZEOF_HEADERS;
-
- .hash : { *(.hash) } :text
- .gnu.hash : { *(.gnu.hash) }
- .dynsym : { *(.dynsym) }
- .dynstr : { *(.dynstr) }
- .gnu.version : { *(.gnu.version) }
- .gnu.version_d : { *(.gnu.version_d) }
- .gnu.version_r : { *(.gnu.version_r) }
-
- .note : { *(.note.*) } :text :note
- .dynamic : { *(.dynamic) } :text :dynamic
-
- .eh_frame_hdr : { *(.eh_frame_hdr) } :text :eh_frame_hdr
- .eh_frame : { KEEP (*(.eh_frame)) } :text
-
- .rodata : { *(.rodata .rodata.* .gnu.linkonce.r.*) }
-
- /*
- * This linker script is used both with -r and with -shared.
- * For the layouts to match, we need to skip more than enough
- * space for the dynamic symbol table et al. If this amount
- * is insufficient, ld -shared will barf. Just increase it here.
- */
- . = 0x1000;
- .text : { *(.text .text.*) } :text
-
- .data : {
- *(.got.plt) *(.got)
- *(.data .data.* .gnu.linkonce.d.*)
- *(.dynbss)
- *(.bss .bss.* .gnu.linkonce.b.*)
- }
-}
-
-
-/*
- * We must supply the ELF program headers explicitly to get just one
- * PT_LOAD segment, and set the flags explicitly to make segments read-only.
- */
-PHDRS
-{
- text PT_LOAD FLAGS(5) FILEHDR PHDRS; /* PF_R|PF_X */
- dynamic PT_DYNAMIC FLAGS(4); /* PF_R */
- note PT_NOTE FLAGS(4); /* PF_R */
- eh_frame_hdr PT_GNU_EH_FRAME;
-}
-
-
-/*
- * This controls what userland symbols we export from the vDSO.
- */
-VERSION
-{
- VDSO_VERSION_STRING {
- global:
- __vdso_rt_sigreturn;
- __vdso_gettimeofday;
- gettimeofday;
- __vdso_clock_gettime;
- clock_gettime;
- local:*;
- };
-}
diff --git a/arch/tile/kernel/vdso/vdso32.S b/arch/tile/kernel/vdso/vdso32.S
deleted file mode 100644
index 1d1ac3257e11..000000000000
--- a/arch/tile/kernel/vdso/vdso32.S
+++ /dev/null
@@ -1,28 +0,0 @@
-/*
- * Copyright 2013 Tilera Corporation. All Rights Reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation, version 2.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
- * NON INFRINGEMENT. See the GNU General Public License for
- * more details.
- */
-
-#include <linux/init.h>
-#include <linux/linkage.h>
-#include <asm/page.h>
-
- __PAGE_ALIGNED_DATA
-
- .global vdso32_start, vdso32_end
- .align PAGE_SIZE
-vdso32_start:
- .incbin "arch/tile/kernel/vdso/vdso32.so"
- .align PAGE_SIZE
-vdso32_end:
-
- .previous
diff --git a/arch/tile/kernel/vdso/vgettimeofday.c b/arch/tile/kernel/vdso/vgettimeofday.c
deleted file mode 100644
index e63310c49742..000000000000
--- a/arch/tile/kernel/vdso/vgettimeofday.c
+++ /dev/null
@@ -1,198 +0,0 @@
-/*
- * Copyright 2012 Tilera Corporation. All Rights Reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation, version 2.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
- * NON INFRINGEMENT. See the GNU General Public License for
- * more details.
- */
-
-#define VDSO_BUILD /* avoid some shift warnings for -m32 in <asm/page.h> */
-#include <linux/time.h>
-#include <asm/timex.h>
-#include <asm/unistd.h>
-#include <asm/vdso.h>
-
-#if CHIP_HAS_SPLIT_CYCLE()
-static inline cycles_t get_cycles_inline(void)
-{
- unsigned int high = __insn_mfspr(SPR_CYCLE_HIGH);
- unsigned int low = __insn_mfspr(SPR_CYCLE_LOW);
- unsigned int high2 = __insn_mfspr(SPR_CYCLE_HIGH);
-
- while (unlikely(high != high2)) {
- low = __insn_mfspr(SPR_CYCLE_LOW);
- high = high2;
- high2 = __insn_mfspr(SPR_CYCLE_HIGH);
- }
-
- return (((cycles_t)high) << 32) | low;
-}
-#define get_cycles get_cycles_inline
-#endif
-
-struct syscall_return_value {
- long value;
- long error;
-};
-
-/*
- * Find out the vDSO data page address in the process address space.
- */
-inline unsigned long get_datapage(void)
-{
- unsigned long ret;
-
- /* vdso data page located in the 2nd vDSO page. */
- asm volatile ("lnk %0" : "=r"(ret));
- ret &= ~(PAGE_SIZE - 1);
- ret += PAGE_SIZE;
-
- return ret;
-}
-
-static inline u64 vgetsns(struct vdso_data *vdso)
-{
- return ((get_cycles() - vdso->cycle_last) & vdso->mask) * vdso->mult;
-}
-
-static inline int do_realtime(struct vdso_data *vdso, struct timespec *ts)
-{
- unsigned count;
- u64 ns;
-
- do {
- count = raw_read_seqcount_begin(&vdso->tb_seq);
- ts->tv_sec = vdso->wall_time_sec;
- ns = vdso->wall_time_snsec;
- ns += vgetsns(vdso);
- ns >>= vdso->shift;
- } while (unlikely(read_seqcount_retry(&vdso->tb_seq, count)));
-
- ts->tv_sec += __iter_div_u64_rem(ns, NSEC_PER_SEC, &ns);
- ts->tv_nsec = ns;
-
- return 0;
-}
-
-static inline int do_monotonic(struct vdso_data *vdso, struct timespec *ts)
-{
- unsigned count;
- u64 ns;
-
- do {
- count = raw_read_seqcount_begin(&vdso->tb_seq);
- ts->tv_sec = vdso->monotonic_time_sec;
- ns = vdso->monotonic_time_snsec;
- ns += vgetsns(vdso);
- ns >>= vdso->shift;
- } while (unlikely(read_seqcount_retry(&vdso->tb_seq, count)));
-
- ts->tv_sec += __iter_div_u64_rem(ns, NSEC_PER_SEC, &ns);
- ts->tv_nsec = ns;
-
- return 0;
-}
-
-static inline int do_realtime_coarse(struct vdso_data *vdso,
- struct timespec *ts)
-{
- unsigned count;
-
- do {
- count = raw_read_seqcount_begin(&vdso->tb_seq);
- ts->tv_sec = vdso->wall_time_coarse_sec;
- ts->tv_nsec = vdso->wall_time_coarse_nsec;
- } while (unlikely(read_seqcount_retry(&vdso->tb_seq, count)));
-
- return 0;
-}
-
-static inline int do_monotonic_coarse(struct vdso_data *vdso,
- struct timespec *ts)
-{
- unsigned count;
-
- do {
- count = raw_read_seqcount_begin(&vdso->tb_seq);
- ts->tv_sec = vdso->monotonic_time_coarse_sec;
- ts->tv_nsec = vdso->monotonic_time_coarse_nsec;
- } while (unlikely(read_seqcount_retry(&vdso->tb_seq, count)));
-
- return 0;
-}
-
-struct syscall_return_value __vdso_gettimeofday(struct timeval *tv,
- struct timezone *tz)
-{
- struct syscall_return_value ret = { 0, 0 };
- unsigned count;
- struct vdso_data *vdso = (struct vdso_data *)get_datapage();
-
- /* The use of the timezone is obsolete, normally tz is NULL. */
- if (unlikely(tz != NULL)) {
- do {
- count = raw_read_seqcount_begin(&vdso->tz_seq);
- tz->tz_minuteswest = vdso->tz_minuteswest;
- tz->tz_dsttime = vdso->tz_dsttime;
- } while (unlikely(read_seqcount_retry(&vdso->tz_seq, count)));
- }
-
- if (unlikely(tv == NULL))
- return ret;
-
- do_realtime(vdso, (struct timespec *)tv);
- tv->tv_usec /= 1000;
-
- return ret;
-}
-
-int gettimeofday(struct timeval *tv, struct timezone *tz)
- __attribute__((weak, alias("__vdso_gettimeofday")));
-
-static struct syscall_return_value vdso_fallback_gettime(long clock,
- struct timespec *ts)
-{
- struct syscall_return_value ret;
- __asm__ __volatile__ (
- "swint1"
- : "=R00" (ret.value), "=R01" (ret.error)
- : "R10" (__NR_clock_gettime), "R00" (clock), "R01" (ts)
- : "r2", "r3", "r4", "r5", "r6", "r7",
- "r8", "r9", "r11", "r12", "r13", "r14", "r15",
- "r16", "r17", "r18", "r19", "r20", "r21", "r22", "r23",
- "r24", "r25", "r26", "r27", "r28", "r29", "memory");
- return ret;
-}
-
-struct syscall_return_value __vdso_clock_gettime(clockid_t clock,
- struct timespec *ts)
-{
- struct vdso_data *vdso = (struct vdso_data *)get_datapage();
- struct syscall_return_value ret = { 0, 0 };
-
- switch (clock) {
- case CLOCK_REALTIME:
- do_realtime(vdso, ts);
- return ret;
- case CLOCK_MONOTONIC:
- do_monotonic(vdso, ts);
- return ret;
- case CLOCK_REALTIME_COARSE:
- do_realtime_coarse(vdso, ts);
- return ret;
- case CLOCK_MONOTONIC_COARSE:
- do_monotonic_coarse(vdso, ts);
- return ret;
- default:
- return vdso_fallback_gettime(clock, ts);
- }
-}
-
-int clock_gettime(clockid_t clock, struct timespec *ts)
- __attribute__((weak, alias("__vdso_clock_gettime")));
diff --git a/arch/tile/kernel/vdso/vrt_sigreturn.S b/arch/tile/kernel/vdso/vrt_sigreturn.S
deleted file mode 100644
index 6326caf4a039..000000000000
--- a/arch/tile/kernel/vdso/vrt_sigreturn.S
+++ /dev/null
@@ -1,30 +0,0 @@
-/*
- * Copyright 2012 Tilera Corporation. All Rights Reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation, version 2.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
- * NON INFRINGEMENT. See the GNU General Public License for
- * more details.
- */
-
-#include <linux/linkage.h>
-#include <arch/abi.h>
-#include <asm/unistd.h>
-
-/*
- * Note that libc has a copy of this function that it uses to compare
- * against the PC when a stack backtrace ends, so if this code is
- * changed, the libc implementation(s) should also be updated.
- */
-ENTRY(__vdso_rt_sigreturn)
- moveli TREG_SYSCALL_NR_NAME, __NR_rt_sigreturn
- swint1
- /* We don't use ENDPROC to avoid tagging this symbol as FUNC,
- * which confuses the perf tool.
- */
- END(__vdso_rt_sigreturn)
diff --git a/arch/tile/kernel/vmlinux.lds.S b/arch/tile/kernel/vmlinux.lds.S
deleted file mode 100644
index 3558d981e336..000000000000
--- a/arch/tile/kernel/vmlinux.lds.S
+++ /dev/null
@@ -1,105 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#include <asm-generic/vmlinux.lds.h>
-#include <asm/page.h>
-#include <asm/cache.h>
-#include <asm/thread_info.h>
-#include <hv/hypervisor.h>
-
-/* Text loads starting from the supervisor interrupt vector address. */
-#define TEXT_OFFSET MEM_SV_START
-
-OUTPUT_ARCH(tile)
-ENTRY(_start)
-jiffies = jiffies_64;
-
-PHDRS
-{
- intrpt PT_LOAD ;
- text PT_LOAD ;
- data PT_LOAD ;
-}
-SECTIONS
-{
- /* Text is loaded with a different VA than data; start with text. */
- #undef LOAD_OFFSET
- #define LOAD_OFFSET TEXT_OFFSET
-
- /* Interrupt vectors */
- .intrpt (LOAD_OFFSET) : AT ( 0 ) /* put at the start of physical memory */
- {
- _text = .;
- *(.intrpt)
- } :intrpt =0
-
- /* Hypervisor call vectors */
- . = ALIGN(0x10000);
- .hvglue : AT (ADDR(.hvglue) - LOAD_OFFSET) {
- *(.hvglue)
- } :NONE
-
- /* Now the real code */
- . = ALIGN(0x20000);
- _stext = .;
- .text : AT (ADDR(.text) - LOAD_OFFSET) {
- HEAD_TEXT
- SCHED_TEXT
- CPUIDLE_TEXT
- LOCK_TEXT
- KPROBES_TEXT
- IRQENTRY_TEXT
- SOFTIRQENTRY_TEXT
- __fix_text_end = .; /* tile-cpack won't rearrange before this */
- ALIGN_FUNCTION();
- *(.hottext*)
- TEXT_TEXT
- *(.text.*)
- *(.coldtext*)
- *(.fixup)
- *(.gnu.warning)
- } :text =0
- _etext = .;
-
- /* "Init" is divided into two areas with very different virtual addresses. */
- INIT_TEXT_SECTION(PAGE_SIZE)
-
- /*
- * Some things, like the __jump_table, may contain symbol references
- * to __exit text, so include such text in the final image if so.
- * In that case we also override the _einittext from INIT_TEXT_SECTION.
- */
-#ifdef CONFIG_JUMP_LABEL
- .exit.text : {
- EXIT_TEXT
- _einittext = .;
- }
-#endif
-
- /* Now we skip back to PAGE_OFFSET for the data. */
- . = (. - TEXT_OFFSET + PAGE_OFFSET);
- #undef LOAD_OFFSET
- #define LOAD_OFFSET PAGE_OFFSET
-
- . = ALIGN(PAGE_SIZE);
- __init_begin = .;
- INIT_DATA_SECTION(16) :data =0
- PERCPU_SECTION(L2_CACHE_BYTES)
- . = ALIGN(PAGE_SIZE);
- __init_end = .;
-
- _sdata = .; /* Start of data section */
- RO_DATA_SECTION(PAGE_SIZE)
- RW_DATA_SECTION(L2_CACHE_BYTES, PAGE_SIZE, THREAD_SIZE)
- _edata = .;
-
- EXCEPTION_TABLE(L2_CACHE_BYTES)
- NOTES
-
-
- BSS_SECTION(8, PAGE_SIZE, 1)
- _end = . ;
-
- STABS_DEBUG
- DWARF_DEBUG
-
- DISCARDS
-}
diff --git a/arch/tile/kvm/Kconfig b/arch/tile/kvm/Kconfig
deleted file mode 100644
index efce89a8473b..000000000000
--- a/arch/tile/kvm/Kconfig
+++ /dev/null
@@ -1,39 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0
-#
-# KVM configuration
-#
-
-source "virt/kvm/Kconfig"
-
-menuconfig VIRTUALIZATION
- bool "Virtualization"
- ---help---
- Say Y here to get to see options for using your Linux host to run
- other operating systems inside virtual machines (guests).
- This option alone does not add any kernel code.
-
- If you say N, all options in this submenu will be skipped and
- disabled.
-
-if VIRTUALIZATION
-
-config KVM
- tristate "Kernel-based Virtual Machine (KVM) support"
- depends on HAVE_KVM && MODULES
- select PREEMPT_NOTIFIERS
- select ANON_INODES
- select SRCU
- ---help---
- Support hosting paravirtualized guest machines.
-
- This module provides access to the hardware capabilities through
- a character device node named /dev/kvm.
-
- To compile this as a module, choose M here: the module
- will be called kvm.
-
- If unsure, say N.
-
-source drivers/vhost/Kconfig
-
-endif # VIRTUALIZATION
diff --git a/arch/tile/lib/Makefile b/arch/tile/lib/Makefile
deleted file mode 100644
index 815a1fdeb2e4..000000000000
--- a/arch/tile/lib/Makefile
+++ /dev/null
@@ -1,19 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0
-#
-# Makefile for TILE-specific library files..
-#
-
-lib-y = cacheflush.o checksum.o cpumask.o delay.o uaccess.o \
- memmove.o memcpy_$(BITS).o memchr_$(BITS).o memset_$(BITS).o \
- strchr_$(BITS).o strlen_$(BITS).o strnlen_$(BITS).o
-
-lib-$(CONFIG_TILEGX) += memcpy_user_64.o
-lib-$(CONFIG_TILEPRO) += atomic_32.o atomic_asm_32.o
-lib-$(CONFIG_SMP) += spinlock_$(BITS).o usercopy_$(BITS).o
-
-obj-$(CONFIG_MODULES) += exports.o
-
-# The finv_buffer_remote() and copy_{to,from}_user() routines can't
-# have -pg added, since they both rely on being leaf functions.
-CFLAGS_REMOVE_cacheflush.o = -pg
-CFLAGS_REMOVE_memcpy_user_64.o = -pg
diff --git a/arch/tile/lib/atomic_32.c b/arch/tile/lib/atomic_32.c
deleted file mode 100644
index f8128800dbf5..000000000000
--- a/arch/tile/lib/atomic_32.c
+++ /dev/null
@@ -1,206 +0,0 @@
-/*
- * Copyright 2010 Tilera Corporation. All Rights Reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation, version 2.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
- * NON INFRINGEMENT. See the GNU General Public License for
- * more details.
- */
-
-#include <linux/cache.h>
-#include <linux/delay.h>
-#include <linux/uaccess.h>
-#include <linux/module.h>
-#include <linux/mm.h>
-#include <linux/atomic.h>
-#include <arch/chip.h>
-
-/* This page is remapped on startup to be hash-for-home. */
-int atomic_locks[PAGE_SIZE / sizeof(int)] __page_aligned_bss;
-
-int *__atomic_hashed_lock(volatile void *v)
-{
- /* NOTE: this code must match "sys_cmpxchg" in kernel/intvec_32.S */
- /*
- * Use bits [3, 3 + ATOMIC_HASH_SHIFT) as the lock index.
- * Using mm works here because atomic_locks is page aligned.
- */
- unsigned long ptr = __insn_mm((unsigned long)v >> 1,
- (unsigned long)atomic_locks,
- 2, (ATOMIC_HASH_SHIFT + 2) - 1);
- return (int *)ptr;
-}
-
-#ifdef CONFIG_SMP
-/* Return whether the passed pointer is a valid atomic lock pointer. */
-static int is_atomic_lock(int *p)
-{
- return p >= &atomic_locks[0] && p < &atomic_locks[ATOMIC_HASH_SIZE];
-}
-
-void __atomic_fault_unlock(int *irqlock_word)
-{
- BUG_ON(!is_atomic_lock(irqlock_word));
- BUG_ON(*irqlock_word != 1);
- *irqlock_word = 0;
-}
-
-#endif /* CONFIG_SMP */
-
-static inline int *__atomic_setup(volatile void *v)
-{
- /* Issue a load to the target to bring it into cache. */
- *(volatile int *)v;
- return __atomic_hashed_lock(v);
-}
-
-int _atomic_xchg(int *v, int n)
-{
- return __atomic32_xchg(v, __atomic_setup(v), n).val;
-}
-EXPORT_SYMBOL(_atomic_xchg);
-
-int _atomic_xchg_add(int *v, int i)
-{
- return __atomic32_xchg_add(v, __atomic_setup(v), i).val;
-}
-EXPORT_SYMBOL(_atomic_xchg_add);
-
-int _atomic_xchg_add_unless(int *v, int a, int u)
-{
- /*
- * Note: argument order is switched here since it is easier
- * to use the first argument consistently as the "old value"
- * in the assembly, as is done for _atomic_cmpxchg().
- */
- return __atomic32_xchg_add_unless(v, __atomic_setup(v), u, a).val;
-}
-EXPORT_SYMBOL(_atomic_xchg_add_unless);
-
-int _atomic_cmpxchg(int *v, int o, int n)
-{
- return __atomic32_cmpxchg(v, __atomic_setup(v), o, n).val;
-}
-EXPORT_SYMBOL(_atomic_cmpxchg);
-
-unsigned long _atomic_fetch_or(volatile unsigned long *p, unsigned long mask)
-{
- return __atomic32_fetch_or((int *)p, __atomic_setup(p), mask).val;
-}
-EXPORT_SYMBOL(_atomic_fetch_or);
-
-unsigned long _atomic_fetch_and(volatile unsigned long *p, unsigned long mask)
-{
- return __atomic32_fetch_and((int *)p, __atomic_setup(p), mask).val;
-}
-EXPORT_SYMBOL(_atomic_fetch_and);
-
-unsigned long _atomic_fetch_andn(volatile unsigned long *p, unsigned long mask)
-{
- return __atomic32_fetch_andn((int *)p, __atomic_setup(p), mask).val;
-}
-EXPORT_SYMBOL(_atomic_fetch_andn);
-
-unsigned long _atomic_fetch_xor(volatile unsigned long *p, unsigned long mask)
-{
- return __atomic32_fetch_xor((int *)p, __atomic_setup(p), mask).val;
-}
-EXPORT_SYMBOL(_atomic_fetch_xor);
-
-
-long long _atomic64_xchg(long long *v, long long n)
-{
- return __atomic64_xchg(v, __atomic_setup(v), n);
-}
-EXPORT_SYMBOL(_atomic64_xchg);
-
-long long _atomic64_xchg_add(long long *v, long long i)
-{
- return __atomic64_xchg_add(v, __atomic_setup(v), i);
-}
-EXPORT_SYMBOL(_atomic64_xchg_add);
-
-long long _atomic64_xchg_add_unless(long long *v, long long a, long long u)
-{
- /*
- * Note: argument order is switched here since it is easier
- * to use the first argument consistently as the "old value"
- * in the assembly, as is done for _atomic_cmpxchg().
- */
- return __atomic64_xchg_add_unless(v, __atomic_setup(v), u, a);
-}
-EXPORT_SYMBOL(_atomic64_xchg_add_unless);
-
-long long _atomic64_cmpxchg(long long *v, long long o, long long n)
-{
- return __atomic64_cmpxchg(v, __atomic_setup(v), o, n);
-}
-EXPORT_SYMBOL(_atomic64_cmpxchg);
-
-long long _atomic64_fetch_and(long long *v, long long n)
-{
- return __atomic64_fetch_and(v, __atomic_setup(v), n);
-}
-EXPORT_SYMBOL(_atomic64_fetch_and);
-
-long long _atomic64_fetch_or(long long *v, long long n)
-{
- return __atomic64_fetch_or(v, __atomic_setup(v), n);
-}
-EXPORT_SYMBOL(_atomic64_fetch_or);
-
-long long _atomic64_fetch_xor(long long *v, long long n)
-{
- return __atomic64_fetch_xor(v, __atomic_setup(v), n);
-}
-EXPORT_SYMBOL(_atomic64_fetch_xor);
-
-/*
- * If any of the atomic or futex routines hit a bad address (not in
- * the page tables at kernel PL) this routine is called. The futex
- * routines are never used on kernel space, and the normal atomics and
- * bitops are never used on user space. So a fault on kernel space
- * must be fatal, but a fault on userspace is a futex fault and we
- * need to return -EFAULT. Note that the context this routine is
- * invoked in is the context of the "_atomic_xxx()" routines called
- * by the functions in this file.
- */
-struct __get_user __atomic_bad_address(int __user *addr)
-{
- if (unlikely(!access_ok(VERIFY_WRITE, addr, sizeof(int))))
- panic("Bad address used for kernel atomic op: %p\n", addr);
- return (struct __get_user) { .err = -EFAULT };
-}
-
-
-void __init __init_atomic_per_cpu(void)
-{
- /* Validate power-of-two and "bigger than cpus" assumption */
- BUILD_BUG_ON(ATOMIC_HASH_SIZE & (ATOMIC_HASH_SIZE-1));
- BUG_ON(ATOMIC_HASH_SIZE < nr_cpu_ids);
-
- /*
- * On TILEPro we prefer to use a single hash-for-home
- * page, since this means atomic operations are less
- * likely to encounter a TLB fault and thus should
- * in general perform faster. You may wish to disable
- * this in situations where few hash-for-home tiles
- * are configured.
- */
- BUG_ON((unsigned long)atomic_locks % PAGE_SIZE != 0);
-
- /* The locks must all fit on one page. */
- BUILD_BUG_ON(ATOMIC_HASH_SIZE * sizeof(int) > PAGE_SIZE);
-
- /*
- * We use the page offset of the atomic value's address as
- * an index into atomic_locks, excluding the low 3 bits.
- * That should not produce more indices than ATOMIC_HASH_SIZE.
- */
- BUILD_BUG_ON((PAGE_SIZE >> 3) > ATOMIC_HASH_SIZE);
-}
diff --git a/arch/tile/lib/atomic_asm_32.S b/arch/tile/lib/atomic_asm_32.S
deleted file mode 100644
index 94709ab41ed8..000000000000
--- a/arch/tile/lib/atomic_asm_32.S
+++ /dev/null
@@ -1,205 +0,0 @@
-/*
- * Copyright 2010 Tilera Corporation. All Rights Reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation, version 2.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
- * NON INFRINGEMENT. See the GNU General Public License for
- * more details.
- *
- * Support routines for atomic operations. Each function takes:
- *
- * r0: address to manipulate
- * r1: pointer to atomic lock guarding this operation (for ATOMIC_LOCK_REG)
- * r2: new value to write, or for cmpxchg/add_unless, value to compare against
- * r3: (cmpxchg/xchg_add_unless) new value to write or add;
- * (atomic64 ops) high word of value to write
- * r4/r5: (cmpxchg64/add_unless64) new value to write or add
- *
- * The 32-bit routines return a "struct __get_user" so that the futex code
- * has an opportunity to return -EFAULT to the user if needed.
- * The 64-bit routines just return a "long long" with the value,
- * since they are only used from kernel space and don't expect to fault.
- * Support for 16-bit ops is included in the framework but we don't provide any.
- *
- * Note that the caller is advised to issue a suitable L1 or L2
- * prefetch on the address being manipulated to avoid extra stalls.
- * In addition, the hot path is on two icache lines, and we start with
- * a jump to the second line to make sure they are both in cache so
- * that we never stall waiting on icache fill while holding the lock.
- * (This doesn't work out with most 64-bit ops, since they consume
- * too many bundles, so may take an extra i-cache stall.)
- *
- * These routines set the INTERRUPT_CRITICAL_SECTION bit, just
- * like sys_cmpxchg(), so that NMIs like PERF_COUNT will not interrupt
- * the code, just page faults.
- *
- * If the load or store faults in a way that can be directly fixed in
- * the do_page_fault_ics() handler (e.g. a vmalloc reference) we fix it
- * directly, return to the instruction that faulted, and retry it.
- *
- * If the load or store faults in a way that potentially requires us
- * to release the atomic lock, then retry (e.g. a migrating PTE), we
- * reset the PC in do_page_fault_ics() to the "tns" instruction so
- * that on return we will reacquire the lock and restart the op. We
- * are somewhat overloading the exception_table_entry notion by doing
- * this, since those entries are not normally used for migrating PTEs.
- *
- * If the main page fault handler discovers a bad address, it will see
- * the PC pointing to the "tns" instruction (due to the earlier
- * exception_table_entry processing in do_page_fault_ics), and
- * re-reset the PC to the fault handler, atomic_bad_address(), which
- * effectively takes over from the atomic op and can either return a
- * bad "struct __get_user" (for user addresses) or can just panic (for
- * bad kernel addresses).
- *
- * Note that if the value we would store is the same as what we
- * loaded, we bypass the store. Other platforms with true atomics can
- * make the guarantee that a non-atomic __clear_bit(), for example,
- * can safely race with an atomic test_and_set_bit(); this example is
- * from bit_spinlock.h in slub_lock() / slub_unlock(). We can't do
- * that on Tile since the "atomic" op is really just a
- * read/modify/write, and can race with the non-atomic
- * read/modify/write. However, if we can short-circuit the write when
- * it is not needed, in the atomic case, we avoid the race.
- */
-
-#include <linux/linkage.h>
-#include <asm/atomic_32.h>
-#include <asm/page.h>
-#include <asm/processor.h>
-
- .section .text.atomic,"ax"
-ENTRY(__start_atomic_asm_code)
-
- .macro atomic_op, name, bitwidth, body
- .align 64
-STD_ENTRY_SECTION(__atomic\name, .text.atomic)
- {
- movei r24, 1
- j 4f /* branch to second cache line */
- }
-1: {
- .ifc \bitwidth,16
- lh r22, r0
- .else
- lw r22, r0
- addi r28, r0, 4
- .endif
- }
- .ifc \bitwidth,64
- lw r23, r28
- .endif
- \body /* set r24, and r25 if 64-bit */
- {
- seq r26, r22, r24
- seq r27, r23, r25
- }
- .ifc \bitwidth,64
- bbnst r27, 2f
- .endif
- bbs r26, 3f /* skip write-back if it's the same value */
-2: {
- .ifc \bitwidth,16
- sh r0, r24
- .else
- sw r0, r24
- .endif
- }
- .ifc \bitwidth,64
- sw r28, r25
- .endif
- mf
-3: {
- move r0, r22
- .ifc \bitwidth,64
- move r1, r23
- .else
- move r1, zero
- .endif
- sw ATOMIC_LOCK_REG_NAME, zero
- }
- mtspr INTERRUPT_CRITICAL_SECTION, zero
- jrp lr
-4: {
- move ATOMIC_LOCK_REG_NAME, r1
- mtspr INTERRUPT_CRITICAL_SECTION, r24
- }
-#ifndef CONFIG_SMP
- j 1b /* no atomic locks */
-#else
- {
- tns r21, ATOMIC_LOCK_REG_NAME
- moveli r23, 2048 /* maximum backoff time in cycles */
- }
- {
- bzt r21, 1b /* branch if lock acquired */
- moveli r25, 32 /* starting backoff time in cycles */
- }
-5: mtspr INTERRUPT_CRITICAL_SECTION, zero
- mfspr r26, CYCLE_LOW /* get start point for this backoff */
-6: mfspr r22, CYCLE_LOW /* test to see if we've backed off enough */
- sub r22, r22, r26
- slt r22, r22, r25
- bbst r22, 6b
- {
- mtspr INTERRUPT_CRITICAL_SECTION, r24
- shli r25, r25, 1 /* double the backoff; retry the tns */
- }
- {
- tns r21, ATOMIC_LOCK_REG_NAME
- slt r26, r23, r25 /* is the proposed backoff too big? */
- }
- {
- bzt r21, 1b /* branch if lock acquired */
- mvnz r25, r26, r23
- }
- j 5b
-#endif
- STD_ENDPROC(__atomic\name)
- .ifc \bitwidth,32
- .pushsection __ex_table,"a"
- .align 4
- .word 1b, __atomic\name
- .word 2b, __atomic\name
- .word __atomic\name, __atomic_bad_address
- .popsection
- .endif
- .endm
-
-
-/*
- * Use __atomic32 prefix to avoid collisions with GCC builtin __atomic functions.
- */
-
-atomic_op 32_cmpxchg, 32, "seq r26, r22, r2; { bbns r26, 3f; move r24, r3 }"
-atomic_op 32_xchg, 32, "move r24, r2"
-atomic_op 32_xchg_add, 32, "add r24, r22, r2"
-atomic_op 32_xchg_add_unless, 32, \
- "sne r26, r22, r2; { bbns r26, 3f; add r24, r22, r3 }"
-atomic_op 32_fetch_or, 32, "or r24, r22, r2"
-atomic_op 32_fetch_and, 32, "and r24, r22, r2"
-atomic_op 32_fetch_andn, 32, "nor r2, r2, zero; and r24, r22, r2"
-atomic_op 32_fetch_xor, 32, "xor r24, r22, r2"
-
-atomic_op 64_cmpxchg, 64, "{ seq r26, r22, r2; seq r27, r23, r3 }; \
- { bbns r26, 3f; move r24, r4 }; { bbns r27, 3f; move r25, r5 }"
-atomic_op 64_xchg, 64, "{ move r24, r2; move r25, r3 }"
-atomic_op 64_xchg_add, 64, "{ add r24, r22, r2; add r25, r23, r3 }; \
- slt_u r26, r24, r22; add r25, r25, r26"
-atomic_op 64_xchg_add_unless, 64, \
- "{ sne r26, r22, r2; sne r27, r23, r3 }; \
- { bbns r26, 3f; add r24, r22, r4 }; \
- { bbns r27, 3f; add r25, r23, r5 }; \
- slt_u r26, r24, r22; add r25, r25, r26"
-atomic_op 64_fetch_or, 64, "{ or r24, r22, r2; or r25, r23, r3 }"
-atomic_op 64_fetch_and, 64, "{ and r24, r22, r2; and r25, r23, r3 }"
-atomic_op 64_fetch_xor, 64, "{ xor r24, r22, r2; xor r25, r23, r3 }"
-
- jrp lr /* happy backtracer */
-
-ENTRY(__end_atomic_asm_code)
diff --git a/arch/tile/lib/cacheflush.c b/arch/tile/lib/cacheflush.c
deleted file mode 100644
index c1ebc1065fc1..000000000000
--- a/arch/tile/lib/cacheflush.c
+++ /dev/null
@@ -1,167 +0,0 @@
-/*
- * Copyright 2010 Tilera Corporation. All Rights Reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation, version 2.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
- * NON INFRINGEMENT. See the GNU General Public License for
- * more details.
- */
-
-#include <linux/export.h>
-#include <asm/page.h>
-#include <asm/cacheflush.h>
-#include <arch/icache.h>
-#include <arch/spr_def.h>
-
-
-void __flush_icache_range(unsigned long start, unsigned long end)
-{
- invalidate_icache((const void *)start, end - start, PAGE_SIZE);
-}
-
-
-/* Force a load instruction to issue. */
-static inline void force_load(char *p)
-{
- *(volatile char *)p;
-}
-
-/*
- * Flush and invalidate a VA range that is homed remotely on a single
- * core (if "!hfh") or homed via hash-for-home (if "hfh"), waiting
- * until the memory controller holds the flushed values.
- */
-void __attribute__((optimize("omit-frame-pointer")))
-finv_buffer_remote(void *buffer, size_t size, int hfh)
-{
- char *p, *base;
- size_t step_size, load_count;
-
- /*
- * On TILEPro the striping granularity is a fixed 8KB; on
- * TILE-Gx it is configurable, and we rely on the fact that
- * the hypervisor always configures maximum striping, so that
- * bits 9 and 10 of the PA are part of the stripe function, so
- * every 512 bytes we hit a striping boundary.
- *
- */
-#ifdef __tilegx__
- const unsigned long STRIPE_WIDTH = 512;
-#else
- const unsigned long STRIPE_WIDTH = 8192;
-#endif
-
-#ifdef __tilegx__
- /*
- * On TILE-Gx, we must disable the dstream prefetcher before doing
- * a cache flush; otherwise, we could end up with data in the cache
- * that we don't want there. Note that normally we'd do an mf
- * after the SPR write to disabling the prefetcher, but we do one
- * below, before any further loads, so there's no need to do it
- * here.
- */
- uint_reg_t old_dstream_pf = __insn_mfspr(SPR_DSTREAM_PF);
- __insn_mtspr(SPR_DSTREAM_PF, 0);
-#endif
-
- /*
- * Flush and invalidate the buffer out of the local L1/L2
- * and request the home cache to flush and invalidate as well.
- */
- __finv_buffer(buffer, size);
-
- /*
- * Wait for the home cache to acknowledge that it has processed
- * all the flush-and-invalidate requests. This does not mean
- * that the flushed data has reached the memory controller yet,
- * but it does mean the home cache is processing the flushes.
- */
- __insn_mf();
-
- /*
- * Issue a load to the last cache line, which can't complete
- * until all the previously-issued flushes to the same memory
- * controller have also completed. If we weren't striping
- * memory, that one load would be sufficient, but since we may
- * be, we also need to back up to the last load issued to
- * another memory controller, which would be the point where
- * we crossed a "striping" boundary (the granularity of striping
- * across memory controllers). Keep backing up and doing this
- * until we are before the beginning of the buffer, or have
- * hit all the controllers.
- *
- * If we are flushing a hash-for-home buffer, it's even worse.
- * Each line may be homed on a different tile, and each tile
- * may have up to four lines that are on different
- * controllers. So as we walk backwards, we have to touch
- * enough cache lines to satisfy these constraints. In
- * practice this ends up being close enough to "load from
- * every cache line on a full memory stripe on each
- * controller" that we simply do that, to simplify the logic.
- *
- * On TILE-Gx the hash-for-home function is much more complex,
- * with the upshot being we can't readily guarantee we have
- * hit both entries in the 128-entry AMT that were hit by any
- * load in the entire range, so we just re-load them all.
- * With larger buffers, we may want to consider using a hypervisor
- * trap to issue loads directly to each hash-for-home tile for
- * each controller (doing it from Linux would trash the TLB).
- */
- if (hfh) {
- step_size = L2_CACHE_BYTES;
-#ifdef __tilegx__
- load_count = (size + L2_CACHE_BYTES - 1) / L2_CACHE_BYTES;
-#else
- load_count = (STRIPE_WIDTH / L2_CACHE_BYTES) *
- (1 << CHIP_LOG_NUM_MSHIMS());
-#endif
- } else {
- step_size = STRIPE_WIDTH;
- load_count = (1 << CHIP_LOG_NUM_MSHIMS());
- }
-
- /* Load the last byte of the buffer. */
- p = (char *)buffer + size - 1;
- force_load(p);
-
- /* Bump down to the end of the previous stripe or cache line. */
- p -= step_size;
- p = (char *)((unsigned long)p | (step_size - 1));
-
- /* Figure out how far back we need to go. */
- base = p - (step_size * (load_count - 2));
- if ((unsigned long)base < (unsigned long)buffer)
- base = buffer;
-
- /* Fire all the loads we need. */
- for (; p >= base; p -= step_size)
- force_load(p);
-
- /*
- * Repeat, but with finv's instead of loads, to get rid of the
- * data we just loaded into our own cache and the old home L3.
- * The finv's are guaranteed not to actually flush the data in
- * the buffer back to their home, since we just read it, so the
- * lines are clean in cache; we will only invalidate those lines.
- */
- p = (char *)buffer + size - 1;
- __insn_finv(p);
- p -= step_size;
- p = (char *)((unsigned long)p | (step_size - 1));
- for (; p >= base; p -= step_size)
- __insn_finv(p);
-
- /* Wait for these finv's (and thus the first finvs) to be done. */
- __insn_mf();
-
-#ifdef __tilegx__
- /* Reenable the prefetcher. */
- __insn_mtspr(SPR_DSTREAM_PF, old_dstream_pf);
-#endif
-}
-EXPORT_SYMBOL_GPL(finv_buffer_remote);
diff --git a/arch/tile/lib/checksum.c b/arch/tile/lib/checksum.c
deleted file mode 100644
index c3ca3e64d9d9..000000000000
--- a/arch/tile/lib/checksum.c
+++ /dev/null
@@ -1,89 +0,0 @@
-/*
- * Copyright 2010 Tilera Corporation. All Rights Reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation, version 2.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
- * NON INFRINGEMENT. See the GNU General Public License for
- * more details.
- * Support code for the main lib/checksum.c.
- */
-
-#include <net/checksum.h>
-#include <linux/module.h>
-
-__wsum do_csum(const unsigned char *buff, int len)
-{
- int odd, count;
- unsigned long result = 0;
-
- if (len <= 0)
- goto out;
- odd = 1 & (unsigned long) buff;
- if (odd) {
- result = (*buff << 8);
- len--;
- buff++;
- }
- count = len >> 1; /* nr of 16-bit words.. */
- if (count) {
- if (2 & (unsigned long) buff) {
- result += *(const unsigned short *)buff;
- count--;
- len -= 2;
- buff += 2;
- }
- count >>= 1; /* nr of 32-bit words.. */
- if (count) {
-#ifdef __tilegx__
- if (4 & (unsigned long) buff) {
- unsigned int w = *(const unsigned int *)buff;
- result = __insn_v2sadau(result, w, 0);
- count--;
- len -= 4;
- buff += 4;
- }
- count >>= 1; /* nr of 64-bit words.. */
-#endif
-
- /*
- * This algorithm could wrap around for very
- * large buffers, but those should be impossible.
- */
- BUG_ON(count >= 65530);
-
- while (count) {
- unsigned long w = *(const unsigned long *)buff;
- count--;
- buff += sizeof(w);
-#ifdef __tilegx__
- result = __insn_v2sadau(result, w, 0);
-#else
- result = __insn_sadah_u(result, w, 0);
-#endif
- }
-#ifdef __tilegx__
- if (len & 4) {
- unsigned int w = *(const unsigned int *)buff;
- result = __insn_v2sadau(result, w, 0);
- buff += 4;
- }
-#endif
- }
- if (len & 2) {
- result += *(const unsigned short *) buff;
- buff += 2;
- }
- }
- if (len & 1)
- result += *buff;
- result = csum_long(result);
- if (odd)
- result = swab16(result);
-out:
- return result;
-}
diff --git a/arch/tile/lib/cpumask.c b/arch/tile/lib/cpumask.c
deleted file mode 100644
index 75947edccb26..000000000000
--- a/arch/tile/lib/cpumask.c
+++ /dev/null
@@ -1,54 +0,0 @@
-/*
- * Copyright 2010 Tilera Corporation. All Rights Reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation, version 2.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
- * NON INFRINGEMENT. See the GNU General Public License for
- * more details.
- */
-
-#include <linux/cpumask.h>
-#include <linux/ctype.h>
-#include <linux/errno.h>
-#include <linux/smp.h>
-#include <linux/export.h>
-
-/*
- * Allow cropping out bits beyond the end of the array.
- * Move to "lib" directory if more clients want to use this routine.
- */
-int bitmap_parselist_crop(const char *bp, unsigned long *maskp, int nmaskbits)
-{
- unsigned a, b;
-
- bitmap_zero(maskp, nmaskbits);
- do {
- if (!isdigit(*bp))
- return -EINVAL;
- a = simple_strtoul(bp, (char **)&bp, 10);
- b = a;
- if (*bp == '-') {
- bp++;
- if (!isdigit(*bp))
- return -EINVAL;
- b = simple_strtoul(bp, (char **)&bp, 10);
- }
- if (!(a <= b))
- return -EINVAL;
- if (b >= nmaskbits)
- b = nmaskbits-1;
- while (a <= b) {
- set_bit(a, maskp);
- a++;
- }
- if (*bp == ',')
- bp++;
- } while (*bp != '\0' && *bp != '\n');
- return 0;
-}
-EXPORT_SYMBOL(bitmap_parselist_crop);
diff --git a/arch/tile/lib/delay.c b/arch/tile/lib/delay.c
deleted file mode 100644
index cdacdd11d360..000000000000
--- a/arch/tile/lib/delay.c
+++ /dev/null
@@ -1,45 +0,0 @@
-/*
- * Copyright 2010 Tilera Corporation. All Rights Reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation, version 2.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
- * NON INFRINGEMENT. See the GNU General Public License for
- * more details.
- */
-
-#include <linux/module.h>
-#include <linux/delay.h>
-#include <linux/thread_info.h>
-#include <asm/timex.h>
-
-void __udelay(unsigned long usecs)
-{
- if (usecs > ULONG_MAX / 1000) {
- WARN_ON_ONCE(usecs > ULONG_MAX / 1000);
- usecs = ULONG_MAX / 1000;
- }
- __ndelay(usecs * 1000);
-}
-EXPORT_SYMBOL(__udelay);
-
-void __ndelay(unsigned long nsecs)
-{
- cycles_t target = get_cycles();
- target += ns2cycles(nsecs);
- while (get_cycles() < target)
- cpu_relax();
-}
-EXPORT_SYMBOL(__ndelay);
-
-void __delay(unsigned long cycles)
-{
- cycles_t target = get_cycles() + cycles;
- while (get_cycles() < target)
- cpu_relax();
-}
-EXPORT_SYMBOL(__delay);
diff --git a/arch/tile/lib/exports.c b/arch/tile/lib/exports.c
deleted file mode 100644
index ecce8e177e3f..000000000000
--- a/arch/tile/lib/exports.c
+++ /dev/null
@@ -1,94 +0,0 @@
-/*
- * Copyright 2010 Tilera Corporation. All Rights Reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation, version 2.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
- * NON INFRINGEMENT. See the GNU General Public License for
- * more details.
- *
- * Exports from assembler code and from libtile-cc.
- */
-
-#include <linux/module.h>
-
-/* arch/tile/lib/usercopy.S */
-#include <linux/uaccess.h>
-EXPORT_SYMBOL(clear_user_asm);
-EXPORT_SYMBOL(flush_user_asm);
-EXPORT_SYMBOL(finv_user_asm);
-
-/* arch/tile/kernel/entry.S */
-#include <linux/kernel.h>
-#include <asm/processor.h>
-EXPORT_SYMBOL(current_text_addr);
-
-/* arch/tile/kernel/head.S */
-EXPORT_SYMBOL(empty_zero_page);
-
-#ifdef CONFIG_FUNCTION_TRACER
-/* arch/tile/kernel/mcount_64.S */
-#include <asm/ftrace.h>
-EXPORT_SYMBOL(__mcount);
-#endif /* CONFIG_FUNCTION_TRACER */
-
-/* arch/tile/lib/, various memcpy files */
-EXPORT_SYMBOL(memcpy);
-EXPORT_SYMBOL(raw_copy_to_user);
-EXPORT_SYMBOL(raw_copy_from_user);
-#ifdef __tilegx__
-EXPORT_SYMBOL(raw_copy_in_user);
-#endif
-
-/* hypervisor glue */
-#include <hv/hypervisor.h>
-EXPORT_SYMBOL(hv_dev_open);
-EXPORT_SYMBOL(hv_dev_pread);
-EXPORT_SYMBOL(hv_dev_pwrite);
-EXPORT_SYMBOL(hv_dev_preada);
-EXPORT_SYMBOL(hv_dev_pwritea);
-EXPORT_SYMBOL(hv_dev_poll);
-EXPORT_SYMBOL(hv_dev_poll_cancel);
-EXPORT_SYMBOL(hv_dev_close);
-EXPORT_SYMBOL(hv_sysconf);
-EXPORT_SYMBOL(hv_confstr);
-EXPORT_SYMBOL(hv_get_rtc);
-EXPORT_SYMBOL(hv_set_rtc);
-
-/* libgcc.a */
-uint32_t __udivsi3(uint32_t dividend, uint32_t divisor);
-EXPORT_SYMBOL(__udivsi3);
-int32_t __divsi3(int32_t dividend, int32_t divisor);
-EXPORT_SYMBOL(__divsi3);
-uint64_t __udivdi3(uint64_t dividend, uint64_t divisor);
-EXPORT_SYMBOL(__udivdi3);
-int64_t __divdi3(int64_t dividend, int64_t divisor);
-EXPORT_SYMBOL(__divdi3);
-uint32_t __umodsi3(uint32_t dividend, uint32_t divisor);
-EXPORT_SYMBOL(__umodsi3);
-int32_t __modsi3(int32_t dividend, int32_t divisor);
-EXPORT_SYMBOL(__modsi3);
-uint64_t __umoddi3(uint64_t dividend, uint64_t divisor);
-EXPORT_SYMBOL(__umoddi3);
-int64_t __moddi3(int64_t dividend, int64_t divisor);
-EXPORT_SYMBOL(__moddi3);
-#ifdef __tilegx__
-typedef int TItype __attribute__((mode(TI)));
-TItype __multi3(TItype a, TItype b);
-EXPORT_SYMBOL(__multi3); /* required for gcc 7 and later */
-#else
-int64_t __muldi3(int64_t, int64_t);
-EXPORT_SYMBOL(__muldi3);
-uint64_t __lshrdi3(uint64_t, unsigned int);
-EXPORT_SYMBOL(__lshrdi3);
-uint64_t __ashrdi3(uint64_t, unsigned int);
-EXPORT_SYMBOL(__ashrdi3);
-uint64_t __ashldi3(uint64_t, unsigned int);
-EXPORT_SYMBOL(__ashldi3);
-int __ffsdi2(uint64_t);
-EXPORT_SYMBOL(__ffsdi2);
-#endif
diff --git a/arch/tile/lib/memchr_32.c b/arch/tile/lib/memchr_32.c
deleted file mode 100644
index cc3d9badf030..000000000000
--- a/arch/tile/lib/memchr_32.c
+++ /dev/null
@@ -1,71 +0,0 @@
-/*
- * Copyright 2010 Tilera Corporation. All Rights Reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation, version 2.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
- * NON INFRINGEMENT. See the GNU General Public License for
- * more details.
- */
-
-#include <linux/types.h>
-#include <linux/string.h>
-#include <linux/module.h>
-
-void *memchr(const void *s, int c, size_t n)
-{
- const uint32_t *last_word_ptr;
- const uint32_t *p;
- const char *last_byte_ptr;
- uintptr_t s_int;
- uint32_t goal, before_mask, v, bits;
- char *ret;
-
- if (__builtin_expect(n == 0, 0)) {
- /* Don't dereference any memory if the array is empty. */
- return NULL;
- }
-
- /* Get an aligned pointer. */
- s_int = (uintptr_t) s;
- p = (const uint32_t *)(s_int & -4);
-
- /* Create four copies of the byte for which we are looking. */
- goal = 0x01010101 * (uint8_t) c;
-
- /* Read the first word, but munge it so that bytes before the array
- * will not match goal.
- *
- * Note that this shift count expression works because we know
- * shift counts are taken mod 32.
- */
- before_mask = (1 << (s_int << 3)) - 1;
- v = (*p | before_mask) ^ (goal & before_mask);
-
- /* Compute the address of the last byte. */
- last_byte_ptr = (const char *)s + n - 1;
-
- /* Compute the address of the word containing the last byte. */
- last_word_ptr = (const uint32_t *)((uintptr_t) last_byte_ptr & -4);
-
- while ((bits = __insn_seqb(v, goal)) == 0) {
- if (__builtin_expect(p == last_word_ptr, 0)) {
- /* We already read the last word in the array,
- * so give up.
- */
- return NULL;
- }
- v = *++p;
- }
-
- /* We found a match, but it might be in a byte past the end
- * of the array.
- */
- ret = ((char *)p) + (__insn_ctz(bits) >> 3);
- return (ret <= last_byte_ptr) ? ret : NULL;
-}
-EXPORT_SYMBOL(memchr);
diff --git a/arch/tile/lib/memchr_64.c b/arch/tile/lib/memchr_64.c
deleted file mode 100644
index f8196b3a950e..000000000000
--- a/arch/tile/lib/memchr_64.c
+++ /dev/null
@@ -1,69 +0,0 @@
-/*
- * Copyright 2011 Tilera Corporation. All Rights Reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation, version 2.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
- * NON INFRINGEMENT. See the GNU General Public License for
- * more details.
- */
-
-#include <linux/types.h>
-#include <linux/string.h>
-#include <linux/module.h>
-#include "string-endian.h"
-
-void *memchr(const void *s, int c, size_t n)
-{
- const uint64_t *last_word_ptr;
- const uint64_t *p;
- const char *last_byte_ptr;
- uintptr_t s_int;
- uint64_t goal, before_mask, v, bits;
- char *ret;
-
- if (__builtin_expect(n == 0, 0)) {
- /* Don't dereference any memory if the array is empty. */
- return NULL;
- }
-
- /* Get an aligned pointer. */
- s_int = (uintptr_t) s;
- p = (const uint64_t *)(s_int & -8);
-
- /* Create eight copies of the byte for which we are looking. */
- goal = copy_byte(c);
-
- /* Read the first word, but munge it so that bytes before the array
- * will not match goal.
- */
- before_mask = MASK(s_int);
- v = (*p | before_mask) ^ (goal & before_mask);
-
- /* Compute the address of the last byte. */
- last_byte_ptr = (const char *)s + n - 1;
-
- /* Compute the address of the word containing the last byte. */
- last_word_ptr = (const uint64_t *)((uintptr_t) last_byte_ptr & -8);
-
- while ((bits = __insn_v1cmpeq(v, goal)) == 0) {
- if (__builtin_expect(p == last_word_ptr, 0)) {
- /* We already read the last word in the array,
- * so give up.
- */
- return NULL;
- }
- v = *++p;
- }
-
- /* We found a match, but it might be in a byte past the end
- * of the array.
- */
- ret = ((char *)p) + (CFZ(bits) >> 3);
- return (ret <= last_byte_ptr) ? ret : NULL;
-}
-EXPORT_SYMBOL(memchr);
diff --git a/arch/tile/lib/memcpy_32.S b/arch/tile/lib/memcpy_32.S
deleted file mode 100644
index 270f1267cd18..000000000000
--- a/arch/tile/lib/memcpy_32.S
+++ /dev/null
@@ -1,544 +0,0 @@
-/*
- * Copyright 2010 Tilera Corporation. All Rights Reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation, version 2.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
- * NON INFRINGEMENT. See the GNU General Public License for
- * more details.
- */
-
-#include <arch/chip.h>
-
-
-/*
- * This file shares the implementation of the userspace memcpy and
- * the kernel's memcpy, copy_to_user and copy_from_user.
- */
-
-#include <linux/linkage.h>
-
-#define IS_MEMCPY 0
-#define IS_COPY_FROM_USER 1
-#define IS_COPY_TO_USER -1
-
- .section .text.memcpy_common, "ax"
- .align 64
-
-/* Use this to preface each bundle that can cause an exception so
- * the kernel can clean up properly. The special cleanup code should
- * not use these, since it knows what it is doing.
- */
-#define EX \
- .pushsection __ex_table, "a"; \
- .align 4; \
- .word 9f, memcpy_common_fixup; \
- .popsection; \
- 9
-
-
-/* raw_copy_from_user takes the kernel target address in r0,
- * the user source in r1, and the bytes to copy in r2.
- * It returns the number of uncopiable bytes (hopefully zero) in r0.
- */
-ENTRY(raw_copy_from_user)
-.type raw_copy_from_user, @function
- FEEDBACK_ENTER_EXPLICIT(raw_copy_from_user, \
- .text.memcpy_common, \
- .Lend_memcpy_common - raw_copy_from_user)
- { movei r29, IS_COPY_FROM_USER; j memcpy_common }
- .size raw_copy_from_user, . - raw_copy_from_user
-
-/* raw_copy_to_user takes the user target address in r0,
- * the kernel source in r1, and the bytes to copy in r2.
- * It returns the number of uncopiable bytes (hopefully zero) in r0.
- */
-ENTRY(raw_copy_to_user)
-.type raw_copy_to_user, @function
- FEEDBACK_REENTER(raw_copy_from_user)
- { movei r29, IS_COPY_TO_USER; j memcpy_common }
- .size raw_copy_to_user, . - raw_copy_to_user
-
-ENTRY(memcpy)
-.type memcpy, @function
- FEEDBACK_REENTER(raw_copy_from_user)
- { movei r29, IS_MEMCPY }
- .size memcpy, . - memcpy
- /* Fall through */
-
- .type memcpy_common, @function
-memcpy_common:
- /* On entry, r29 holds one of the IS_* macro values from above. */
-
-
- /* r0 is the dest, r1 is the source, r2 is the size. */
-
- /* Save aside original dest so we can return it at the end. */
- { sw sp, lr; move r23, r0; or r4, r0, r1 }
-
- /* Check for an empty size. */
- { bz r2, .Ldone; andi r4, r4, 3 }
-
- /* Save aside original values in case of a fault. */
- { move r24, r1; move r25, r2 }
- move r27, lr
-
- /* Check for an unaligned source or dest. */
- { bnz r4, .Lcopy_unaligned_maybe_many; addli r4, r2, -256 }
-
-.Lcheck_aligned_copy_size:
- /* If we are copying < 256 bytes, branch to simple case. */
- { blzt r4, .Lcopy_8_check; slti_u r8, r2, 8 }
-
- /* Copying >= 256 bytes, so jump to complex prefetching loop. */
- { andi r6, r1, 63; j .Lcopy_many }
-
-/*
- *
- * Aligned 4 byte at a time copy loop
- *
- */
-
-.Lcopy_8_loop:
- /* Copy two words at a time to hide load latency. */
-EX: { lw r3, r1; addi r1, r1, 4; slti_u r8, r2, 16 }
-EX: { lw r4, r1; addi r1, r1, 4 }
-EX: { sw r0, r3; addi r0, r0, 4; addi r2, r2, -4 }
-EX: { sw r0, r4; addi r0, r0, 4; addi r2, r2, -4 }
-.Lcopy_8_check:
- { bzt r8, .Lcopy_8_loop; slti_u r4, r2, 4 }
-
- /* Copy odd leftover word, if any. */
- { bnzt r4, .Lcheck_odd_stragglers }
-EX: { lw r3, r1; addi r1, r1, 4 }
-EX: { sw r0, r3; addi r0, r0, 4; addi r2, r2, -4 }
-
-.Lcheck_odd_stragglers:
- { bnz r2, .Lcopy_unaligned_few }
-
-.Ldone:
- /* For memcpy return original dest address, else zero. */
- { mz r0, r29, r23; jrp lr }
-
-
-/*
- *
- * Prefetching multiple cache line copy handler (for large transfers).
- *
- */
-
- /* Copy words until r1 is cache-line-aligned. */
-.Lalign_loop:
-EX: { lw r3, r1; addi r1, r1, 4 }
- { andi r6, r1, 63 }
-EX: { sw r0, r3; addi r0, r0, 4; addi r2, r2, -4 }
-.Lcopy_many:
- { bnzt r6, .Lalign_loop; addi r9, r0, 63 }
-
- { addi r3, r1, 60; andi r9, r9, -64 }
-
- /* No need to prefetch dst, we'll just do the wh64
- * right before we copy a line.
- */
-EX: { lw r5, r3; addi r3, r3, 64; movei r4, 1 }
- /* Intentionally stall for a few cycles to leave L2 cache alone. */
- { bnzt zero, .; move r27, lr }
-EX: { lw r6, r3; addi r3, r3, 64 }
- /* Intentionally stall for a few cycles to leave L2 cache alone. */
- { bnzt zero, . }
-EX: { lw r7, r3; addi r3, r3, 64 }
- /* Intentionally stall for a few cycles to leave L2 cache alone. */
- { bz zero, .Lbig_loop2 }
-
- /* On entry to this loop:
- * - r0 points to the start of dst line 0
- * - r1 points to start of src line 0
- * - r2 >= (256 - 60), only the first time the loop trips.
- * - r3 contains r1 + 128 + 60 [pointer to end of source line 2]
- * This is our prefetch address. When we get near the end
- * rather than prefetching off the end this is changed to point
- * to some "safe" recently loaded address.
- * - r5 contains *(r1 + 60) [i.e. last word of source line 0]
- * - r6 contains *(r1 + 64 + 60) [i.e. last word of source line 1]
- * - r9 contains ((r0 + 63) & -64)
- * [start of next dst cache line.]
- */
-
-.Lbig_loop:
- { jal .Lcopy_line2; add r15, r1, r2 }
-
-.Lbig_loop2:
- /* Copy line 0, first stalling until r5 is ready. */
-EX: { move r12, r5; lw r16, r1 }
- { bz r4, .Lcopy_8_check; slti_u r8, r2, 8 }
- /* Prefetch several lines ahead. */
-EX: { lw r5, r3; addi r3, r3, 64 }
- { jal .Lcopy_line }
-
- /* Copy line 1, first stalling until r6 is ready. */
-EX: { move r12, r6; lw r16, r1 }
- { bz r4, .Lcopy_8_check; slti_u r8, r2, 8 }
- /* Prefetch several lines ahead. */
-EX: { lw r6, r3; addi r3, r3, 64 }
- { jal .Lcopy_line }
-
- /* Copy line 2, first stalling until r7 is ready. */
-EX: { move r12, r7; lw r16, r1 }
- { bz r4, .Lcopy_8_check; slti_u r8, r2, 8 }
- /* Prefetch several lines ahead. */
-EX: { lw r7, r3; addi r3, r3, 64 }
- /* Use up a caches-busy cycle by jumping back to the top of the
- * loop. Might as well get it out of the way now.
- */
- { j .Lbig_loop }
-
-
- /* On entry:
- * - r0 points to the destination line.
- * - r1 points to the source line.
- * - r3 is the next prefetch address.
- * - r9 holds the last address used for wh64.
- * - r12 = WORD_15
- * - r16 = WORD_0.
- * - r17 == r1 + 16.
- * - r27 holds saved lr to restore.
- *
- * On exit:
- * - r0 is incremented by 64.
- * - r1 is incremented by 64, unless that would point to a word
- * beyond the end of the source array, in which case it is redirected
- * to point to an arbitrary word already in the cache.
- * - r2 is decremented by 64.
- * - r3 is unchanged, unless it points to a word beyond the
- * end of the source array, in which case it is redirected
- * to point to an arbitrary word already in the cache.
- * Redirecting is OK since if we are that close to the end
- * of the array we will not come back to this subroutine
- * and use the contents of the prefetched address.
- * - r4 is nonzero iff r2 >= 64.
- * - r9 is incremented by 64, unless it points beyond the
- * end of the last full destination cache line, in which
- * case it is redirected to a "safe address" that can be
- * clobbered (sp - 64)
- * - lr contains the value in r27.
- */
-
-/* r26 unused */
-
-.Lcopy_line:
- /* TODO: when r3 goes past the end, we would like to redirect it
- * to prefetch the last partial cache line (if any) just once, for the
- * benefit of the final cleanup loop. But we don't want to
- * prefetch that line more than once, or subsequent prefetches
- * will go into the RTF. But then .Lbig_loop should unconditionally
- * branch to top of loop to execute final prefetch, and its
- * nop should become a conditional branch.
- */
-
- /* We need two non-memory cycles here to cover the resources
- * used by the loads initiated by the caller.
- */
- { add r15, r1, r2 }
-.Lcopy_line2:
- { slt_u r13, r3, r15; addi r17, r1, 16 }
-
- /* NOTE: this will stall for one cycle as L1 is busy. */
-
- /* Fill second L1D line. */
-EX: { lw r17, r17; addi r1, r1, 48; mvz r3, r13, r1 } /* r17 = WORD_4 */
-
- /* Prepare destination line for writing. */
-EX: { wh64 r9; addi r9, r9, 64 }
- /* Load seven words that are L1D hits to cover wh64 L2 usage. */
-
- /* Load the three remaining words from the last L1D line, which
- * we know has already filled the L1D.
- */
-EX: { lw r4, r1; addi r1, r1, 4; addi r20, r1, 16 } /* r4 = WORD_12 */
-EX: { lw r8, r1; addi r1, r1, 4; slt_u r13, r20, r15 }/* r8 = WORD_13 */
-EX: { lw r11, r1; addi r1, r1, -52; mvz r20, r13, r1 } /* r11 = WORD_14 */
-
- /* Load the three remaining words from the first L1D line, first
- * stalling until it has filled by "looking at" r16.
- */
-EX: { lw r13, r1; addi r1, r1, 4; move zero, r16 } /* r13 = WORD_1 */
-EX: { lw r14, r1; addi r1, r1, 4 } /* r14 = WORD_2 */
-EX: { lw r15, r1; addi r1, r1, 8; addi r10, r0, 60 } /* r15 = WORD_3 */
-
- /* Load second word from the second L1D line, first
- * stalling until it has filled by "looking at" r17.
- */
-EX: { lw r19, r1; addi r1, r1, 4; move zero, r17 } /* r19 = WORD_5 */
-
- /* Store last word to the destination line, potentially dirtying it
- * for the first time, which keeps the L2 busy for two cycles.
- */
-EX: { sw r10, r12 } /* store(WORD_15) */
-
- /* Use two L1D hits to cover the sw L2 access above. */
-EX: { lw r10, r1; addi r1, r1, 4 } /* r10 = WORD_6 */
-EX: { lw r12, r1; addi r1, r1, 4 } /* r12 = WORD_7 */
-
- /* Fill third L1D line. */
-EX: { lw r18, r1; addi r1, r1, 4 } /* r18 = WORD_8 */
-
- /* Store first L1D line. */
-EX: { sw r0, r16; addi r0, r0, 4; add r16, r0, r2 } /* store(WORD_0) */
-EX: { sw r0, r13; addi r0, r0, 4; andi r16, r16, -64 } /* store(WORD_1) */
-EX: { sw r0, r14; addi r0, r0, 4; slt_u r16, r9, r16 } /* store(WORD_2) */
-EX: { sw r0, r15; addi r0, r0, 4; addi r13, sp, -64 } /* store(WORD_3) */
- /* Store second L1D line. */
-EX: { sw r0, r17; addi r0, r0, 4; mvz r9, r16, r13 }/* store(WORD_4) */
-EX: { sw r0, r19; addi r0, r0, 4 } /* store(WORD_5) */
-EX: { sw r0, r10; addi r0, r0, 4 } /* store(WORD_6) */
-EX: { sw r0, r12; addi r0, r0, 4 } /* store(WORD_7) */
-
-EX: { lw r13, r1; addi r1, r1, 4; move zero, r18 } /* r13 = WORD_9 */
-EX: { lw r14, r1; addi r1, r1, 4 } /* r14 = WORD_10 */
-EX: { lw r15, r1; move r1, r20 } /* r15 = WORD_11 */
-
- /* Store third L1D line. */
-EX: { sw r0, r18; addi r0, r0, 4 } /* store(WORD_8) */
-EX: { sw r0, r13; addi r0, r0, 4 } /* store(WORD_9) */
-EX: { sw r0, r14; addi r0, r0, 4 } /* store(WORD_10) */
-EX: { sw r0, r15; addi r0, r0, 4 } /* store(WORD_11) */
-
- /* Store rest of fourth L1D line. */
-EX: { sw r0, r4; addi r0, r0, 4 } /* store(WORD_12) */
- {
-EX: sw r0, r8 /* store(WORD_13) */
- addi r0, r0, 4
- /* Will r2 be > 64 after we subtract 64 below? */
- shri r4, r2, 7
- }
- {
-EX: sw r0, r11 /* store(WORD_14) */
- addi r0, r0, 8
- /* Record 64 bytes successfully copied. */
- addi r2, r2, -64
- }
-
- { jrp lr; move lr, r27 }
-
- /* Convey to the backtrace library that the stack frame is size
- * zero, and the real return address is on the stack rather than
- * in 'lr'.
- */
- { info 8 }
-
- .align 64
-.Lcopy_unaligned_maybe_many:
- /* Skip the setup overhead if we aren't copying many bytes. */
- { slti_u r8, r2, 20; sub r4, zero, r0 }
- { bnzt r8, .Lcopy_unaligned_few; andi r4, r4, 3 }
- { bz r4, .Ldest_is_word_aligned; add r18, r1, r2 }
-
-/*
- *
- * unaligned 4 byte at a time copy handler.
- *
- */
-
- /* Copy single bytes until r0 == 0 mod 4, so we can store words. */
-.Lalign_dest_loop:
-EX: { lb_u r3, r1; addi r1, r1, 1; addi r4, r4, -1 }
-EX: { sb r0, r3; addi r0, r0, 1; addi r2, r2, -1 }
- { bnzt r4, .Lalign_dest_loop; andi r3, r1, 3 }
-
- /* If source and dest are now *both* aligned, do an aligned copy. */
- { bz r3, .Lcheck_aligned_copy_size; addli r4, r2, -256 }
-
-.Ldest_is_word_aligned:
-
-EX: { andi r8, r0, 63; lwadd_na r6, r1, 4}
- { slti_u r9, r2, 64; bz r8, .Ldest_is_L2_line_aligned }
-
- /* This copies unaligned words until either there are fewer
- * than 4 bytes left to copy, or until the destination pointer
- * is cache-aligned, whichever comes first.
- *
- * On entry:
- * - r0 is the next store address.
- * - r1 points 4 bytes past the load address corresponding to r0.
- * - r2 >= 4
- * - r6 is the next aligned word loaded.
- */
-.Lcopy_unaligned_src_words:
-EX: { lwadd_na r7, r1, 4; slti_u r8, r2, 4 + 4 }
- /* stall */
- { dword_align r6, r7, r1; slti_u r9, r2, 64 + 4 }
-EX: { swadd r0, r6, 4; addi r2, r2, -4 }
- { bnz r8, .Lcleanup_unaligned_words; andi r8, r0, 63 }
- { bnzt r8, .Lcopy_unaligned_src_words; move r6, r7 }
-
- /* On entry:
- * - r0 is the next store address.
- * - r1 points 4 bytes past the load address corresponding to r0.
- * - r2 >= 4 (# of bytes left to store).
- * - r6 is the next aligned src word value.
- * - r9 = (r2 < 64U).
- * - r18 points one byte past the end of source memory.
- */
-.Ldest_is_L2_line_aligned:
-
- {
- /* Not a full cache line remains. */
- bnz r9, .Lcleanup_unaligned_words
- move r7, r6
- }
-
- /* r2 >= 64 */
-
- /* Kick off two prefetches, but don't go past the end. */
- { addi r3, r1, 63 - 4; addi r8, r1, 64 + 63 - 4 }
- { prefetch r3; move r3, r8; slt_u r8, r8, r18 }
- { mvz r3, r8, r1; addi r8, r3, 64 }
- { prefetch r3; move r3, r8; slt_u r8, r8, r18 }
- { mvz r3, r8, r1; movei r17, 0 }
-
-.Lcopy_unaligned_line:
- /* Prefetch another line. */
- { prefetch r3; addi r15, r1, 60; addi r3, r3, 64 }
- /* Fire off a load of the last word we are about to copy. */
-EX: { lw_na r15, r15; slt_u r8, r3, r18 }
-
-EX: { mvz r3, r8, r1; wh64 r0 }
-
- /* This loop runs twice.
- *
- * On entry:
- * - r17 is even before the first iteration, and odd before
- * the second. It is incremented inside the loop. Encountering
- * an even value at the end of the loop makes it stop.
- */
-.Lcopy_half_an_unaligned_line:
-EX: {
- /* Stall until the last byte is ready. In the steady state this
- * guarantees all words to load below will be in the L2 cache, which
- * avoids shunting the loads to the RTF.
- */
- move zero, r15
- lwadd_na r7, r1, 16
- }
-EX: { lwadd_na r11, r1, 12 }
-EX: { lwadd_na r14, r1, -24 }
-EX: { lwadd_na r8, r1, 4 }
-EX: { lwadd_na r9, r1, 4 }
-EX: {
- lwadd_na r10, r1, 8
- /* r16 = (r2 < 64), after we subtract 32 from r2 below. */
- slti_u r16, r2, 64 + 32
- }
-EX: { lwadd_na r12, r1, 4; addi r17, r17, 1 }
-EX: { lwadd_na r13, r1, 8; dword_align r6, r7, r1 }
-EX: { swadd r0, r6, 4; dword_align r7, r8, r1 }
-EX: { swadd r0, r7, 4; dword_align r8, r9, r1 }
-EX: { swadd r0, r8, 4; dword_align r9, r10, r1 }
-EX: { swadd r0, r9, 4; dword_align r10, r11, r1 }
-EX: { swadd r0, r10, 4; dword_align r11, r12, r1 }
-EX: { swadd r0, r11, 4; dword_align r12, r13, r1 }
-EX: { swadd r0, r12, 4; dword_align r13, r14, r1 }
-EX: { swadd r0, r13, 4; addi r2, r2, -32 }
- { move r6, r14; bbst r17, .Lcopy_half_an_unaligned_line }
-
- { bzt r16, .Lcopy_unaligned_line; move r7, r6 }
-
- /* On entry:
- * - r0 is the next store address.
- * - r1 points 4 bytes past the load address corresponding to r0.
- * - r2 >= 0 (# of bytes left to store).
- * - r7 is the next aligned src word value.
- */
-.Lcleanup_unaligned_words:
- /* Handle any trailing bytes. */
- { bz r2, .Lcopy_unaligned_done; slti_u r8, r2, 4 }
- { bzt r8, .Lcopy_unaligned_src_words; move r6, r7 }
-
- /* Move r1 back to the point where it corresponds to r0. */
- { addi r1, r1, -4 }
-
- /* Fall through */
-
-/*
- *
- * 1 byte at a time copy handler.
- *
- */
-
-.Lcopy_unaligned_few:
-EX: { lb_u r3, r1; addi r1, r1, 1 }
-EX: { sb r0, r3; addi r0, r0, 1; addi r2, r2, -1 }
- { bnzt r2, .Lcopy_unaligned_few }
-
-.Lcopy_unaligned_done:
-
- /* For memcpy return original dest address, else zero. */
- { mz r0, r29, r23; jrp lr }
-
-.Lend_memcpy_common:
- .size memcpy_common, .Lend_memcpy_common - memcpy_common
-
- .section .fixup,"ax"
-memcpy_common_fixup:
- .type memcpy_common_fixup, @function
-
- /* Skip any bytes we already successfully copied.
- * r2 (num remaining) is correct, but r0 (dst) and r1 (src)
- * may not be quite right because of unrolling and prefetching.
- * So we need to recompute their values as the address just
- * after the last byte we are sure was successfully loaded and
- * then stored.
- */
-
- /* Determine how many bytes we successfully copied. */
- { sub r3, r25, r2 }
-
- /* Add this to the original r0 and r1 to get their new values. */
- { add r0, r23, r3; add r1, r24, r3 }
-
- { bzt r29, memcpy_fixup_loop }
- { blzt r29, copy_to_user_fixup_loop }
-
-copy_from_user_fixup_loop:
- /* Try copying the rest one byte at a time, expecting a load fault. */
-.Lcfu: { lb_u r3, r1; addi r1, r1, 1 }
- { sb r0, r3; addi r0, r0, 1; addi r2, r2, -1 }
- { bnzt r2, copy_from_user_fixup_loop }
-
-.Lcopy_from_user_fixup_zero_remainder:
- move lr, r27
- { move r0, r2; jrp lr }
-
-copy_to_user_fixup_loop:
- /* Try copying the rest one byte at a time, expecting a store fault. */
- { lb_u r3, r1; addi r1, r1, 1 }
-.Lctu: { sb r0, r3; addi r0, r0, 1; addi r2, r2, -1 }
- { bnzt r2, copy_to_user_fixup_loop }
-.Lcopy_to_user_fixup_done:
- move lr, r27
- { move r0, r2; jrp lr }
-
-memcpy_fixup_loop:
- /* Try copying the rest one byte at a time. We expect a disastrous
- * fault to happen since we are in fixup code, but let it happen.
- */
- { lb_u r3, r1; addi r1, r1, 1 }
- { sb r0, r3; addi r0, r0, 1; addi r2, r2, -1 }
- { bnzt r2, memcpy_fixup_loop }
- /* This should be unreachable, we should have faulted again.
- * But be paranoid and handle it in case some interrupt changed
- * the TLB or something.
- */
- move lr, r27
- { move r0, r23; jrp lr }
-
- .size memcpy_common_fixup, . - memcpy_common_fixup
-
- .section __ex_table,"a"
- .align 4
- .word .Lcfu, .Lcopy_from_user_fixup_zero_remainder
- .word .Lctu, .Lcopy_to_user_fixup_done
diff --git a/arch/tile/lib/memcpy_64.c b/arch/tile/lib/memcpy_64.c
deleted file mode 100644
index 4815354b8cd2..000000000000
--- a/arch/tile/lib/memcpy_64.c
+++ /dev/null
@@ -1,367 +0,0 @@
-/*
- * Copyright 2011 Tilera Corporation. All Rights Reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation, version 2.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
- * NON INFRINGEMENT. See the GNU General Public License for
- * more details.
- */
-
-#include <linux/types.h>
-#include <linux/string.h>
-#include <linux/module.h>
-/* EXPORT_SYMBOL() is in arch/tile/lib/exports.c since this should be asm. */
-
-/* Must be 8 bytes in size. */
-#define op_t uint64_t
-
-/* Threshold value for when to enter the unrolled loops. */
-#define OP_T_THRES 16
-
-#if CHIP_L2_LINE_SIZE() != 64
-#error "Assumes 64 byte line size"
-#endif
-
-/* How many cache lines ahead should we prefetch? */
-#define PREFETCH_LINES_AHEAD 4
-
-/*
- * Provide "base versions" of load and store for the normal code path.
- * The kernel provides other versions for userspace copies.
- */
-#define ST(p, v) (*(p) = (v))
-#define LD(p) (*(p))
-
-#ifndef USERCOPY_FUNC
-#define ST1 ST
-#define ST2 ST
-#define ST4 ST
-#define ST8 ST
-#define LD1 LD
-#define LD2 LD
-#define LD4 LD
-#define LD8 LD
-#define RETVAL dstv
-void *memcpy(void *__restrict dstv, const void *__restrict srcv, size_t n)
-#else
-/*
- * Special kernel version will provide implementation of the LDn/STn
- * macros to return a count of uncopied bytes due to mm fault.
- */
-#define RETVAL 0
-int __attribute__((optimize("omit-frame-pointer")))
-USERCOPY_FUNC(void *__restrict dstv, const void *__restrict srcv, size_t n)
-#endif
-{
- char *__restrict dst1 = (char *)dstv;
- const char *__restrict src1 = (const char *)srcv;
- const char *__restrict src1_end;
- const char *__restrict prefetch;
- op_t *__restrict dst8; /* 8-byte pointer to destination memory. */
- op_t final; /* Final bytes to write to trailing word, if any */
- long i;
-
- if (n < 16) {
- for (; n; n--)
- ST1(dst1++, LD1(src1++));
- return RETVAL;
- }
-
- /*
- * Locate the end of source memory we will copy. Don't
- * prefetch past this.
- */
- src1_end = src1 + n - 1;
-
- /* Prefetch ahead a few cache lines, but not past the end. */
- prefetch = src1;
- for (i = 0; i < PREFETCH_LINES_AHEAD; i++) {
- __insn_prefetch(prefetch);
- prefetch += CHIP_L2_LINE_SIZE();
- prefetch = (prefetch < src1_end) ? prefetch : src1;
- }
-
- /* Copy bytes until dst is word-aligned. */
- for (; (uintptr_t)dst1 & (sizeof(op_t) - 1); n--)
- ST1(dst1++, LD1(src1++));
-
- /* 8-byte pointer to destination memory. */
- dst8 = (op_t *)dst1;
-
- if (__builtin_expect((uintptr_t)src1 & (sizeof(op_t) - 1), 0)) {
- /* Unaligned copy. */
-
- op_t tmp0 = 0, tmp1 = 0, tmp2, tmp3;
- const op_t *src8 = (const op_t *) ((uintptr_t)src1 &
- -sizeof(op_t));
- const void *srci = (void *)src1;
- int m;
-
- m = (CHIP_L2_LINE_SIZE() << 2) -
- (((uintptr_t)dst8) & ((CHIP_L2_LINE_SIZE() << 2) - 1));
- m = (n < m) ? n : m;
- m /= sizeof(op_t);
-
- /* Copy until 'dst' is cache-line-aligned. */
- n -= (sizeof(op_t) * m);
-
- switch (m % 4) {
- case 0:
- if (__builtin_expect(!m, 0))
- goto _M0;
- tmp1 = LD8(src8++);
- tmp2 = LD8(src8++);
- goto _8B3;
- case 2:
- m += 2;
- tmp3 = LD8(src8++);
- tmp0 = LD8(src8++);
- goto _8B1;
- case 3:
- m += 1;
- tmp2 = LD8(src8++);
- tmp3 = LD8(src8++);
- goto _8B2;
- case 1:
- m--;
- tmp0 = LD8(src8++);
- tmp1 = LD8(src8++);
- if (__builtin_expect(!m, 0))
- goto _8B0;
- }
-
- do {
- tmp2 = LD8(src8++);
- tmp0 = __insn_dblalign(tmp0, tmp1, srci);
- ST8(dst8++, tmp0);
-_8B3:
- tmp3 = LD8(src8++);
- tmp1 = __insn_dblalign(tmp1, tmp2, srci);
- ST8(dst8++, tmp1);
-_8B2:
- tmp0 = LD8(src8++);
- tmp2 = __insn_dblalign(tmp2, tmp3, srci);
- ST8(dst8++, tmp2);
-_8B1:
- tmp1 = LD8(src8++);
- tmp3 = __insn_dblalign(tmp3, tmp0, srci);
- ST8(dst8++, tmp3);
- m -= 4;
- } while (m);
-
-_8B0:
- tmp0 = __insn_dblalign(tmp0, tmp1, srci);
- ST8(dst8++, tmp0);
- src8--;
-
-_M0:
- if (__builtin_expect(n >= CHIP_L2_LINE_SIZE(), 0)) {
- op_t tmp4, tmp5, tmp6, tmp7, tmp8;
-
- prefetch = ((const char *)src8) +
- CHIP_L2_LINE_SIZE() * PREFETCH_LINES_AHEAD;
-
- for (tmp0 = LD8(src8++); n >= CHIP_L2_LINE_SIZE();
- n -= CHIP_L2_LINE_SIZE()) {
- /* Prefetch and advance to next line to
- prefetch, but don't go past the end. */
- __insn_prefetch(prefetch);
-
- /* Make sure prefetch got scheduled
- earlier. */
- __asm__ ("" : : : "memory");
-
- prefetch += CHIP_L2_LINE_SIZE();
- prefetch = (prefetch < src1_end) ? prefetch :
- (const char *) src8;
-
- tmp1 = LD8(src8++);
- tmp2 = LD8(src8++);
- tmp3 = LD8(src8++);
- tmp4 = LD8(src8++);
- tmp5 = LD8(src8++);
- tmp6 = LD8(src8++);
- tmp7 = LD8(src8++);
- tmp8 = LD8(src8++);
-
- tmp0 = __insn_dblalign(tmp0, tmp1, srci);
- tmp1 = __insn_dblalign(tmp1, tmp2, srci);
- tmp2 = __insn_dblalign(tmp2, tmp3, srci);
- tmp3 = __insn_dblalign(tmp3, tmp4, srci);
- tmp4 = __insn_dblalign(tmp4, tmp5, srci);
- tmp5 = __insn_dblalign(tmp5, tmp6, srci);
- tmp6 = __insn_dblalign(tmp6, tmp7, srci);
- tmp7 = __insn_dblalign(tmp7, tmp8, srci);
-
- __insn_wh64(dst8);
-
- ST8(dst8++, tmp0);
- ST8(dst8++, tmp1);
- ST8(dst8++, tmp2);
- ST8(dst8++, tmp3);
- ST8(dst8++, tmp4);
- ST8(dst8++, tmp5);
- ST8(dst8++, tmp6);
- ST8(dst8++, tmp7);
-
- tmp0 = tmp8;
- }
- src8--;
- }
-
- /* Copy the rest 8-byte chunks. */
- if (n >= sizeof(op_t)) {
- tmp0 = LD8(src8++);
- for (; n >= sizeof(op_t); n -= sizeof(op_t)) {
- tmp1 = LD8(src8++);
- tmp0 = __insn_dblalign(tmp0, tmp1, srci);
- ST8(dst8++, tmp0);
- tmp0 = tmp1;
- }
- src8--;
- }
-
- if (n == 0)
- return RETVAL;
-
- tmp0 = LD8(src8++);
- tmp1 = ((const char *)src8 <= src1_end)
- ? LD8((op_t *)src8) : 0;
- final = __insn_dblalign(tmp0, tmp1, srci);
-
- } else {
- /* Aligned copy. */
-
- const op_t *__restrict src8 = (const op_t *)src1;
-
- /* src8 and dst8 are both word-aligned. */
- if (n >= CHIP_L2_LINE_SIZE()) {
- /* Copy until 'dst' is cache-line-aligned. */
- for (; (uintptr_t)dst8 & (CHIP_L2_LINE_SIZE() - 1);
- n -= sizeof(op_t))
- ST8(dst8++, LD8(src8++));
-
- for (; n >= CHIP_L2_LINE_SIZE(); ) {
- op_t tmp0, tmp1, tmp2, tmp3;
- op_t tmp4, tmp5, tmp6, tmp7;
-
- /*
- * Prefetch and advance to next line
- * to prefetch, but don't go past the
- * end.
- */
- __insn_prefetch(prefetch);
-
- /* Make sure prefetch got scheduled
- earlier. */
- __asm__ ("" : : : "memory");
-
- prefetch += CHIP_L2_LINE_SIZE();
- prefetch = (prefetch < src1_end) ? prefetch :
- (const char *)src8;
-
- /*
- * Do all the loads before wh64. This
- * is necessary if [src8, src8+7] and
- * [dst8, dst8+7] share the same cache
- * line and dst8 <= src8, as can be
- * the case when called from memmove,
- * or with code tested on x86 whose
- * memcpy always works with forward
- * copies.
- */
- tmp0 = LD8(src8++);
- tmp1 = LD8(src8++);
- tmp2 = LD8(src8++);
- tmp3 = LD8(src8++);
- tmp4 = LD8(src8++);
- tmp5 = LD8(src8++);
- tmp6 = LD8(src8++);
- tmp7 = LD8(src8++);
-
- /* wh64 and wait for tmp7 load completion. */
- __asm__ ("move %0, %0; wh64 %1\n"
- : : "r"(tmp7), "r"(dst8));
-
- ST8(dst8++, tmp0);
- ST8(dst8++, tmp1);
- ST8(dst8++, tmp2);
- ST8(dst8++, tmp3);
- ST8(dst8++, tmp4);
- ST8(dst8++, tmp5);
- ST8(dst8++, tmp6);
- ST8(dst8++, tmp7);
-
- n -= CHIP_L2_LINE_SIZE();
- }
-#if CHIP_L2_LINE_SIZE() != 64
-# error "Fix code that assumes particular L2 cache line size."
-#endif
- }
-
- for (; n >= sizeof(op_t); n -= sizeof(op_t))
- ST8(dst8++, LD8(src8++));
-
- if (__builtin_expect(n == 0, 1))
- return RETVAL;
-
- final = LD8(src8);
- }
-
- /* n != 0 if we get here. Write out any trailing bytes. */
- dst1 = (char *)dst8;
-#ifndef __BIG_ENDIAN__
- if (n & 4) {
- ST4((uint32_t *)dst1, final);
- dst1 += 4;
- final >>= 32;
- n &= 3;
- }
- if (n & 2) {
- ST2((uint16_t *)dst1, final);
- dst1 += 2;
- final >>= 16;
- n &= 1;
- }
- if (n)
- ST1((uint8_t *)dst1, final);
-#else
- if (n & 4) {
- ST4((uint32_t *)dst1, final >> 32);
- dst1 += 4;
- }
- else
- {
- final >>= 32;
- }
- if (n & 2) {
- ST2((uint16_t *)dst1, final >> 16);
- dst1 += 2;
- }
- else
- {
- final >>= 16;
- }
- if (n & 1)
- ST1((uint8_t *)dst1, final >> 8);
-#endif
-
- return RETVAL;
-}
-
-#ifdef USERCOPY_FUNC
-#undef ST1
-#undef ST2
-#undef ST4
-#undef ST8
-#undef LD1
-#undef LD2
-#undef LD4
-#undef LD8
-#undef USERCOPY_FUNC
-#endif
diff --git a/arch/tile/lib/memcpy_user_64.c b/arch/tile/lib/memcpy_user_64.c
deleted file mode 100644
index a3fea9fd973e..000000000000
--- a/arch/tile/lib/memcpy_user_64.c
+++ /dev/null
@@ -1,85 +0,0 @@
-/*
- * Copyright 2011 Tilera Corporation. All Rights Reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation, version 2.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
- * NON INFRINGEMENT. See the GNU General Public License for
- * more details.
- *
- * Do memcpy(), but trap and return "n" when a load or store faults.
- *
- * Note: this idiom only works when memcpy() compiles to a leaf function.
- * Here leaf function not only means it does not have calls, but also
- * requires no stack operations (sp, stack frame pointer) and no
- * use of callee-saved registers, else "jrp lr" will be incorrect since
- * unwinding stack frame is bypassed. Since memcpy() is not complex so
- * these conditions are satisfied here, but we need to be careful when
- * modifying this file. This is not a clean solution but is the best
- * one so far.
- *
- * Also note that we are capturing "n" from the containing scope here.
- */
-
-#define _ST(p, inst, v) \
- ({ \
- asm("1: " #inst " %0, %1;" \
- ".pushsection .coldtext,\"ax\";" \
- "2: { move r0, %2; jrp lr };" \
- ".section __ex_table,\"a\";" \
- ".align 8;" \
- ".quad 1b, 2b;" \
- ".popsection" \
- : "=m" (*(p)) : "r" (v), "r" (n)); \
- })
-
-#define _LD(p, inst) \
- ({ \
- unsigned long __v; \
- asm("1: " #inst " %0, %1;" \
- ".pushsection .coldtext,\"ax\";" \
- "2: { move r0, %2; jrp lr };" \
- ".section __ex_table,\"a\";" \
- ".align 8;" \
- ".quad 1b, 2b;" \
- ".popsection" \
- : "=r" (__v) : "m" (*(p)), "r" (n)); \
- __v; \
- })
-
-#define USERCOPY_FUNC raw_copy_to_user
-#define ST1(p, v) _ST((p), st1, (v))
-#define ST2(p, v) _ST((p), st2, (v))
-#define ST4(p, v) _ST((p), st4, (v))
-#define ST8(p, v) _ST((p), st, (v))
-#define LD1 LD
-#define LD2 LD
-#define LD4 LD
-#define LD8 LD
-#include "memcpy_64.c"
-
-#define USERCOPY_FUNC raw_copy_from_user
-#define ST1 ST
-#define ST2 ST
-#define ST4 ST
-#define ST8 ST
-#define LD1(p) _LD((p), ld1u)
-#define LD2(p) _LD((p), ld2u)
-#define LD4(p) _LD((p), ld4u)
-#define LD8(p) _LD((p), ld)
-#include "memcpy_64.c"
-
-#define USERCOPY_FUNC raw_copy_in_user
-#define ST1(p, v) _ST((p), st1, (v))
-#define ST2(p, v) _ST((p), st2, (v))
-#define ST4(p, v) _ST((p), st4, (v))
-#define ST8(p, v) _ST((p), st, (v))
-#define LD1(p) _LD((p), ld1u)
-#define LD2(p) _LD((p), ld2u)
-#define LD4(p) _LD((p), ld4u)
-#define LD8(p) _LD((p), ld)
-#include "memcpy_64.c"
diff --git a/arch/tile/lib/memmove.c b/arch/tile/lib/memmove.c
deleted file mode 100644
index fd615ae6ade7..000000000000
--- a/arch/tile/lib/memmove.c
+++ /dev/null
@@ -1,63 +0,0 @@
-/*
- * Copyright 2010 Tilera Corporation. All Rights Reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation, version 2.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
- * NON INFRINGEMENT. See the GNU General Public License for
- * more details.
- */
-
-#include <linux/types.h>
-#include <linux/string.h>
-#include <linux/module.h>
-
-void *memmove(void *dest, const void *src, size_t n)
-{
- if ((const char *)src >= (char *)dest + n
- || (char *)dest >= (const char *)src + n) {
- /* We found no overlap, so let memcpy do all the heavy
- * lifting (prefetching, etc.)
- */
- return memcpy(dest, src, n);
- }
-
- if (n != 0) {
- const uint8_t *in;
- uint8_t x;
- uint8_t *out;
- int stride;
-
- if (src < dest) {
- /* copy backwards */
- in = (const uint8_t *)src + n - 1;
- out = (uint8_t *)dest + n - 1;
- stride = -1;
- } else {
- /* copy forwards */
- in = (const uint8_t *)src;
- out = (uint8_t *)dest;
- stride = 1;
- }
-
- /* Manually software-pipeline this loop. */
- x = *in;
- in += stride;
-
- while (--n != 0) {
- *out = x;
- out += stride;
- x = *in;
- in += stride;
- }
-
- *out = x;
- }
-
- return dest;
-}
-EXPORT_SYMBOL(memmove);
diff --git a/arch/tile/lib/memset_32.c b/arch/tile/lib/memset_32.c
deleted file mode 100644
index 2042bfe6595f..000000000000
--- a/arch/tile/lib/memset_32.c
+++ /dev/null
@@ -1,143 +0,0 @@
-/*
- * Copyright 2010 Tilera Corporation. All Rights Reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation, version 2.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
- * NON INFRINGEMENT. See the GNU General Public License for
- * more details.
- */
-
-#include <linux/types.h>
-#include <linux/string.h>
-#include <linux/module.h>
-#include <arch/chip.h>
-
-void *memset(void *s, int c, size_t n)
-{
- uint32_t *out32;
- int n32;
- uint32_t v16, v32;
- uint8_t *out8 = s;
- int to_align32;
-
- /* Experimentation shows that a trivial tight loop is a win up until
- * around a size of 20, where writing a word at a time starts to win.
- */
-#define BYTE_CUTOFF 20
-
-#if BYTE_CUTOFF < 3
- /* This must be at least at least this big, or some code later
- * on doesn't work.
- */
-#error "BYTE_CUTOFF is too small"
-#endif
-
- if (n < BYTE_CUTOFF) {
- /* Strangely, this turns out to be the tightest way to
- * write this loop.
- */
- if (n != 0) {
- do {
- /* Strangely, combining these into one line
- * performs worse.
- */
- *out8 = c;
- out8++;
- } while (--n != 0);
- }
-
- return s;
- }
-
- /* Align 'out8'. We know n >= 3 so this won't write past the end. */
- while (((uintptr_t) out8 & 3) != 0) {
- *out8++ = c;
- --n;
- }
-
- /* Align 'n'. */
- while (n & 3)
- out8[--n] = c;
-
- out32 = (uint32_t *) out8;
- n32 = n >> 2;
-
- /* Tile input byte out to 32 bits. */
- v16 = __insn_intlb(c, c);
- v32 = __insn_intlh(v16, v16);
-
- /* This must be at least 8 or the following loop doesn't work. */
-#define CACHE_LINE_SIZE_IN_WORDS (CHIP_L2_LINE_SIZE() / 4)
-
- /* Determine how many words we need to emit before the 'out32'
- * pointer becomes aligned modulo the cache line size.
- */
- to_align32 =
- (-((uintptr_t)out32 >> 2)) & (CACHE_LINE_SIZE_IN_WORDS - 1);
-
- /* Only bother aligning and using wh64 if there is at least
- * one full cache line to process. This check also prevents
- * overrunning the end of the buffer with alignment words.
- */
- if (to_align32 <= n32 - CACHE_LINE_SIZE_IN_WORDS) {
- int lines_left;
-
- /* Align out32 mod the cache line size so we can use wh64. */
- n32 -= to_align32;
- for (; to_align32 != 0; to_align32--) {
- *out32 = v32;
- out32++;
- }
-
- /* Use unsigned divide to turn this into a right shift. */
- lines_left = (unsigned)n32 / CACHE_LINE_SIZE_IN_WORDS;
-
- do {
- /* Only wh64 a few lines at a time, so we don't
- * exceed the maximum number of victim lines.
- */
- int x = ((lines_left < CHIP_MAX_OUTSTANDING_VICTIMS())
- ? lines_left
- : CHIP_MAX_OUTSTANDING_VICTIMS());
- uint32_t *wh = out32;
- int i = x;
- int j;
-
- lines_left -= x;
-
- do {
- __insn_wh64(wh);
- wh += CACHE_LINE_SIZE_IN_WORDS;
- } while (--i);
-
- for (j = x * (CACHE_LINE_SIZE_IN_WORDS / 4);
- j != 0; j--) {
- *out32++ = v32;
- *out32++ = v32;
- *out32++ = v32;
- *out32++ = v32;
- }
- } while (lines_left != 0);
-
- /* We processed all full lines above, so only this many
- * words remain to be processed.
- */
- n32 &= CACHE_LINE_SIZE_IN_WORDS - 1;
- }
-
- /* Now handle any leftover values. */
- if (n32 != 0) {
- do {
- *out32 = v32;
- out32++;
- } while (--n32 != 0);
- }
-
- return s;
-}
-EXPORT_SYMBOL(memset);
diff --git a/arch/tile/lib/memset_64.c b/arch/tile/lib/memset_64.c
deleted file mode 100644
index 03ef69cd73de..000000000000
--- a/arch/tile/lib/memset_64.c
+++ /dev/null
@@ -1,142 +0,0 @@
-/*
- * Copyright 2011 Tilera Corporation. All Rights Reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation, version 2.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
- * NON INFRINGEMENT. See the GNU General Public License for
- * more details.
- */
-
-#include <linux/types.h>
-#include <linux/string.h>
-#include <linux/module.h>
-#include <arch/chip.h>
-#include "string-endian.h"
-
-void *memset(void *s, int c, size_t n)
-{
- uint64_t *out64;
- int n64, to_align64;
- uint64_t v64;
- uint8_t *out8 = s;
-
- /* Experimentation shows that a trivial tight loop is a win up until
- * around a size of 20, where writing a word at a time starts to win.
- */
-#define BYTE_CUTOFF 20
-
-#if BYTE_CUTOFF < 7
- /* This must be at least at least this big, or some code later
- * on doesn't work.
- */
-#error "BYTE_CUTOFF is too small"
-#endif
-
- if (n < BYTE_CUTOFF) {
- /* Strangely, this turns out to be the tightest way to
- * write this loop.
- */
- if (n != 0) {
- do {
- /* Strangely, combining these into one line
- * performs worse.
- */
- *out8 = c;
- out8++;
- } while (--n != 0);
- }
-
- return s;
- }
-
- /* Align 'out8'. We know n >= 7 so this won't write past the end. */
- while (((uintptr_t) out8 & 7) != 0) {
- *out8++ = c;
- --n;
- }
-
- /* Align 'n'. */
- while (n & 7)
- out8[--n] = c;
-
- out64 = (uint64_t *) out8;
- n64 = n >> 3;
-
- /* Tile input byte out to 64 bits. */
- v64 = copy_byte(c);
-
- /* This must be at least 8 or the following loop doesn't work. */
-#define CACHE_LINE_SIZE_IN_DOUBLEWORDS (CHIP_L2_LINE_SIZE() / 8)
-
- /* Determine how many words we need to emit before the 'out32'
- * pointer becomes aligned modulo the cache line size.
- */
- to_align64 = (-((uintptr_t)out64 >> 3)) &
- (CACHE_LINE_SIZE_IN_DOUBLEWORDS - 1);
-
- /* Only bother aligning and using wh64 if there is at least
- * one full cache line to process. This check also prevents
- * overrunning the end of the buffer with alignment words.
- */
- if (to_align64 <= n64 - CACHE_LINE_SIZE_IN_DOUBLEWORDS) {
- int lines_left;
-
- /* Align out64 mod the cache line size so we can use wh64. */
- n64 -= to_align64;
- for (; to_align64 != 0; to_align64--) {
- *out64 = v64;
- out64++;
- }
-
- /* Use unsigned divide to turn this into a right shift. */
- lines_left = (unsigned)n64 / CACHE_LINE_SIZE_IN_DOUBLEWORDS;
-
- do {
- /* Only wh64 a few lines at a time, so we don't
- * exceed the maximum number of victim lines.
- */
- int x = ((lines_left < CHIP_MAX_OUTSTANDING_VICTIMS())
- ? lines_left
- : CHIP_MAX_OUTSTANDING_VICTIMS());
- uint64_t *wh = out64;
- int i = x;
- int j;
-
- lines_left -= x;
-
- do {
- __insn_wh64(wh);
- wh += CACHE_LINE_SIZE_IN_DOUBLEWORDS;
- } while (--i);
-
- for (j = x * (CACHE_LINE_SIZE_IN_DOUBLEWORDS / 4);
- j != 0; j--) {
- *out64++ = v64;
- *out64++ = v64;
- *out64++ = v64;
- *out64++ = v64;
- }
- } while (lines_left != 0);
-
- /* We processed all full lines above, so only this many
- * words remain to be processed.
- */
- n64 &= CACHE_LINE_SIZE_IN_DOUBLEWORDS - 1;
- }
-
- /* Now handle any leftover values. */
- if (n64 != 0) {
- do {
- *out64 = v64;
- out64++;
- } while (--n64 != 0);
- }
-
- return s;
-}
-EXPORT_SYMBOL(memset);
diff --git a/arch/tile/lib/spinlock_32.c b/arch/tile/lib/spinlock_32.c
deleted file mode 100644
index db9333f2447c..000000000000
--- a/arch/tile/lib/spinlock_32.c
+++ /dev/null
@@ -1,251 +0,0 @@
-/*
- * Copyright 2010 Tilera Corporation. All Rights Reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation, version 2.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
- * NON INFRINGEMENT. See the GNU General Public License for
- * more details.
- */
-
-#include <linux/spinlock.h>
-#include <linux/module.h>
-#include <asm/processor.h>
-#include <arch/spr_def.h>
-
-#include "spinlock_common.h"
-
-void arch_spin_lock(arch_spinlock_t *lock)
-{
- int my_ticket;
- int iterations = 0;
- int delta;
-
- while ((my_ticket = __insn_tns((void *)&lock->next_ticket)) & 1)
- delay_backoff(iterations++);
-
- /* Increment the next ticket number, implicitly releasing tns lock. */
- lock->next_ticket = my_ticket + TICKET_QUANTUM;
-
- /* Wait until it's our turn. */
- while ((delta = my_ticket - lock->current_ticket) != 0)
- relax((128 / CYCLES_PER_RELAX_LOOP) * delta);
-}
-EXPORT_SYMBOL(arch_spin_lock);
-
-int arch_spin_trylock(arch_spinlock_t *lock)
-{
- /*
- * Grab a ticket; no need to retry if it's busy, we'll just
- * treat that the same as "locked", since someone else
- * will lock it momentarily anyway.
- */
- int my_ticket = __insn_tns((void *)&lock->next_ticket);
-
- if (my_ticket == lock->current_ticket) {
- /* Not currently locked, so lock it by keeping this ticket. */
- lock->next_ticket = my_ticket + TICKET_QUANTUM;
- /* Success! */
- return 1;
- }
-
- if (!(my_ticket & 1)) {
- /* Release next_ticket. */
- lock->next_ticket = my_ticket;
- }
-
- return 0;
-}
-EXPORT_SYMBOL(arch_spin_trylock);
-
-/*
- * The low byte is always reserved to be the marker for a "tns" operation
- * since the low bit is set to "1" by a tns. The next seven bits are
- * zeroes. The next byte holds the "next" writer value, i.e. the ticket
- * available for the next task that wants to write. The third byte holds
- * the current writer value, i.e. the writer who holds the current ticket.
- * If current == next == 0, there are no interested writers.
- */
-#define WR_NEXT_SHIFT _WR_NEXT_SHIFT
-#define WR_CURR_SHIFT _WR_CURR_SHIFT
-#define WR_WIDTH _WR_WIDTH
-#define WR_MASK ((1 << WR_WIDTH) - 1)
-
-/*
- * The last eight bits hold the active reader count. This has to be
- * zero before a writer can start to write.
- */
-#define RD_COUNT_SHIFT _RD_COUNT_SHIFT
-#define RD_COUNT_WIDTH _RD_COUNT_WIDTH
-#define RD_COUNT_MASK ((1 << RD_COUNT_WIDTH) - 1)
-
-
-/*
- * We can get the read lock if everything but the reader bits (which
- * are in the high part of the word) is zero, i.e. no active or
- * waiting writers, no tns.
- *
- * We guard the tns/store-back with an interrupt critical section to
- * preserve the semantic that the same read lock can be acquired in an
- * interrupt context.
- */
-int arch_read_trylock(arch_rwlock_t *rwlock)
-{
- u32 val;
- __insn_mtspr(SPR_INTERRUPT_CRITICAL_SECTION, 1);
- val = __insn_tns((int *)&rwlock->lock);
- if (likely((val << _RD_COUNT_WIDTH) == 0)) {
- val += 1 << RD_COUNT_SHIFT;
- rwlock->lock = val;
- __insn_mtspr(SPR_INTERRUPT_CRITICAL_SECTION, 0);
- BUG_ON(val == 0); /* we don't expect wraparound */
- return 1;
- }
- if ((val & 1) == 0)
- rwlock->lock = val;
- __insn_mtspr(SPR_INTERRUPT_CRITICAL_SECTION, 0);
- return 0;
-}
-EXPORT_SYMBOL(arch_read_trylock);
-
-/*
- * Spin doing arch_read_trylock() until we acquire the lock.
- * ISSUE: This approach can permanently starve readers. A reader who sees
- * a writer could instead take a ticket lock (just like a writer would),
- * and atomically enter read mode (with 1 reader) when it gets the ticket.
- * This way both readers and writers would always make forward progress
- * in a finite time.
- */
-void arch_read_lock(arch_rwlock_t *rwlock)
-{
- u32 iterations = 0;
- while (unlikely(!arch_read_trylock(rwlock)))
- delay_backoff(iterations++);
-}
-EXPORT_SYMBOL(arch_read_lock);
-
-void arch_read_unlock(arch_rwlock_t *rwlock)
-{
- u32 val, iterations = 0;
-
- mb(); /* guarantee anything modified under the lock is visible */
- for (;;) {
- __insn_mtspr(SPR_INTERRUPT_CRITICAL_SECTION, 1);
- val = __insn_tns((int *)&rwlock->lock);
- if (likely((val & 1) == 0)) {
- rwlock->lock = val - (1 << _RD_COUNT_SHIFT);
- __insn_mtspr(SPR_INTERRUPT_CRITICAL_SECTION, 0);
- break;
- }
- __insn_mtspr(SPR_INTERRUPT_CRITICAL_SECTION, 0);
- delay_backoff(iterations++);
- }
-}
-EXPORT_SYMBOL(arch_read_unlock);
-
-/*
- * We don't need an interrupt critical section here (unlike for
- * arch_read_lock) since we should never use a bare write lock where
- * it could be interrupted by code that could try to re-acquire it.
- */
-void arch_write_lock(arch_rwlock_t *rwlock)
-{
- /*
- * The trailing underscore on this variable (and curr_ below)
- * reminds us that the high bits are garbage; we mask them out
- * when we compare them.
- */
- u32 my_ticket_;
- u32 iterations = 0;
- u32 val = __insn_tns((int *)&rwlock->lock);
-
- if (likely(val == 0)) {
- rwlock->lock = 1 << _WR_NEXT_SHIFT;
- return;
- }
-
- /*
- * Wait until there are no readers, then bump up the next
- * field and capture the ticket value.
- */
- for (;;) {
- if (!(val & 1)) {
- if ((val >> RD_COUNT_SHIFT) == 0)
- break;
- rwlock->lock = val;
- }
- delay_backoff(iterations++);
- val = __insn_tns((int *)&rwlock->lock);
- }
-
- /* Take out the next ticket and extract my ticket value. */
- rwlock->lock = __insn_addb(val, 1 << WR_NEXT_SHIFT);
- my_ticket_ = val >> WR_NEXT_SHIFT;
-
- /* Wait until the "current" field matches our ticket. */
- for (;;) {
- u32 curr_ = val >> WR_CURR_SHIFT;
- u32 delta = ((my_ticket_ - curr_) & WR_MASK);
- if (likely(delta == 0))
- break;
-
- /* Delay based on how many lock-holders are still out there. */
- relax((256 / CYCLES_PER_RELAX_LOOP) * delta);
-
- /*
- * Get a non-tns value to check; we don't need to tns
- * it ourselves. Since we're not tns'ing, we retry
- * more rapidly to get a valid value.
- */
- while ((val = rwlock->lock) & 1)
- relax(4);
- }
-}
-EXPORT_SYMBOL(arch_write_lock);
-
-int arch_write_trylock(arch_rwlock_t *rwlock)
-{
- u32 val = __insn_tns((int *)&rwlock->lock);
-
- /*
- * If a tns is in progress, or there's a waiting or active locker,
- * or active readers, we can't take the lock, so give up.
- */
- if (unlikely(val != 0)) {
- if (!(val & 1))
- rwlock->lock = val;
- return 0;
- }
-
- /* Set the "next" field to mark it locked. */
- rwlock->lock = 1 << _WR_NEXT_SHIFT;
- return 1;
-}
-EXPORT_SYMBOL(arch_write_trylock);
-
-void arch_write_unlock(arch_rwlock_t *rwlock)
-{
- u32 val, eq, mask;
-
- mb(); /* guarantee anything modified under the lock is visible */
- val = __insn_tns((int *)&rwlock->lock);
- if (likely(val == (1 << _WR_NEXT_SHIFT))) {
- rwlock->lock = 0;
- return;
- }
- while (unlikely(val & 1)) {
- /* Limited backoff since we are the highest-priority task. */
- relax(4);
- val = __insn_tns((int *)&rwlock->lock);
- }
- mask = 1 << WR_CURR_SHIFT;
- val = __insn_addb(val, mask);
- eq = __insn_seqb(val, val << (WR_CURR_SHIFT - WR_NEXT_SHIFT));
- val = __insn_mz(eq & mask, val);
- rwlock->lock = val;
-}
-EXPORT_SYMBOL(arch_write_unlock);
diff --git a/arch/tile/lib/spinlock_64.c b/arch/tile/lib/spinlock_64.c
deleted file mode 100644
index de414c22892f..000000000000
--- a/arch/tile/lib/spinlock_64.c
+++ /dev/null
@@ -1,97 +0,0 @@
-/*
- * Copyright 2011 Tilera Corporation. All Rights Reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation, version 2.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
- * NON INFRINGEMENT. See the GNU General Public License for
- * more details.
- */
-
-#include <linux/spinlock.h>
-#include <linux/module.h>
-#include <asm/processor.h>
-
-#include "spinlock_common.h"
-
-/*
- * Read the spinlock value without allocating in our cache and without
- * causing an invalidation to another cpu with a copy of the cacheline.
- * This is important when we are spinning waiting for the lock.
- */
-static inline u32 arch_spin_read_noalloc(void *lock)
-{
- return atomic_cmpxchg((atomic_t *)lock, -1, -1);
-}
-
-/*
- * Wait until the high bits (current) match my ticket.
- * If we notice the overflow bit set on entry, we clear it.
- */
-void arch_spin_lock_slow(arch_spinlock_t *lock, u32 my_ticket)
-{
- if (unlikely(my_ticket & __ARCH_SPIN_NEXT_OVERFLOW)) {
- __insn_fetchand4(&lock->lock, ~__ARCH_SPIN_NEXT_OVERFLOW);
- my_ticket &= ~__ARCH_SPIN_NEXT_OVERFLOW;
- }
-
- for (;;) {
- u32 val = arch_spin_read_noalloc(lock);
- u32 delta = my_ticket - arch_spin_current(val);
- if (delta == 0)
- return;
- relax((128 / CYCLES_PER_RELAX_LOOP) * delta);
- }
-}
-EXPORT_SYMBOL(arch_spin_lock_slow);
-
-/*
- * Check the lock to see if it is plausible, and try to get it with cmpxchg().
- */
-int arch_spin_trylock(arch_spinlock_t *lock)
-{
- u32 val = arch_spin_read_noalloc(lock);
- if (unlikely(arch_spin_current(val) != arch_spin_next(val)))
- return 0;
- return cmpxchg(&lock->lock, val, (val + 1) & ~__ARCH_SPIN_NEXT_OVERFLOW)
- == val;
-}
-EXPORT_SYMBOL(arch_spin_trylock);
-
-
-/*
- * If the read lock fails due to a writer, we retry periodically
- * until the value is positive and we write our incremented reader count.
- */
-void __read_lock_failed(arch_rwlock_t *rw)
-{
- u32 val;
- int iterations = 0;
- do {
- delay_backoff(iterations++);
- val = __insn_fetchaddgez4(&rw->lock, 1);
- } while (unlikely(arch_write_val_locked(val)));
-}
-EXPORT_SYMBOL(__read_lock_failed);
-
-/*
- * If we failed because there were readers, clear the "writer" bit
- * so we don't block additional readers. Otherwise, there was another
- * writer anyway, so our "fetchor" made no difference. Then wait,
- * issuing periodic fetchor instructions, till we get the lock.
- */
-void __write_lock_failed(arch_rwlock_t *rw, u32 val)
-{
- int iterations = 0;
- do {
- if (!arch_write_val_locked(val))
- val = __insn_fetchand4(&rw->lock, ~__WRITE_LOCK_BIT);
- delay_backoff(iterations++);
- val = __insn_fetchor4(&rw->lock, __WRITE_LOCK_BIT);
- } while (val != 0);
-}
-EXPORT_SYMBOL(__write_lock_failed);
diff --git a/arch/tile/lib/spinlock_common.h b/arch/tile/lib/spinlock_common.h
deleted file mode 100644
index 6ac37509faca..000000000000
--- a/arch/tile/lib/spinlock_common.h
+++ /dev/null
@@ -1,64 +0,0 @@
-/*
- * Copyright 2010 Tilera Corporation. All Rights Reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation, version 2.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
- * NON INFRINGEMENT. See the GNU General Public License for
- * more details.
- * This file is included into spinlock_32.c or _64.c.
- */
-
-/*
- * The mfspr in __spinlock_relax() is 5 or 6 cycles plus 2 for loop
- * overhead.
- */
-#ifdef __tilegx__
-#define CYCLES_PER_RELAX_LOOP 7
-#else
-#define CYCLES_PER_RELAX_LOOP 8
-#endif
-
-/*
- * Idle the core for CYCLES_PER_RELAX_LOOP * iterations cycles.
- */
-static inline void
-relax(int iterations)
-{
- for (/*above*/; iterations > 0; iterations--)
- __insn_mfspr(SPR_PASS);
- barrier();
-}
-
-/* Perform bounded exponential backoff.*/
-static void delay_backoff(int iterations)
-{
- u32 exponent, loops;
-
- /*
- * 2^exponent is how many times we go around the loop,
- * which takes 8 cycles. We want to start with a 16- to 31-cycle
- * loop, so we need to go around minimum 2 = 2^1 times, so we
- * bias the original value up by 1.
- */
- exponent = iterations + 1;
-
- /*
- * Don't allow exponent to exceed 7, so we have 128 loops,
- * or 1,024 (to 2,047) cycles, as our maximum.
- */
- if (exponent > 8)
- exponent = 8;
-
- loops = 1 << exponent;
-
- /* Add a randomness factor so two cpus never get in lock step. */
- loops += __insn_crc32_32(stack_pointer, get_cycles_low()) &
- (loops - 1);
-
- relax(loops);
-}
diff --git a/arch/tile/lib/strchr_32.c b/arch/tile/lib/strchr_32.c
deleted file mode 100644
index 841fe6963019..000000000000
--- a/arch/tile/lib/strchr_32.c
+++ /dev/null
@@ -1,64 +0,0 @@
-/*
- * Copyright 2010 Tilera Corporation. All Rights Reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation, version 2.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
- * NON INFRINGEMENT. See the GNU General Public License for
- * more details.
- */
-
-#include <linux/types.h>
-#include <linux/string.h>
-#include <linux/module.h>
-
-char *strchr(const char *s, int c)
-{
- int z, g;
-
- /* Get an aligned pointer. */
- const uintptr_t s_int = (uintptr_t) s;
- const uint32_t *p = (const uint32_t *)(s_int & -4);
-
- /* Create four copies of the byte for which we are looking. */
- const uint32_t goal = 0x01010101 * (uint8_t) c;
-
- /* Read the first aligned word, but force bytes before the string to
- * match neither zero nor goal (we make sure the high bit of each
- * byte is 1, and the low 7 bits are all the opposite of the goal
- * byte).
- *
- * Note that this shift count expression works because we know shift
- * counts are taken mod 32.
- */
- const uint32_t before_mask = (1 << (s_int << 3)) - 1;
- uint32_t v = (*p | before_mask) ^ (goal & __insn_shrib(before_mask, 1));
-
- uint32_t zero_matches, goal_matches;
- while (1) {
- /* Look for a terminating '\0'. */
- zero_matches = __insn_seqb(v, 0);
-
- /* Look for the goal byte. */
- goal_matches = __insn_seqb(v, goal);
-
- if (__builtin_expect(zero_matches | goal_matches, 0))
- break;
-
- v = *++p;
- }
-
- z = __insn_ctz(zero_matches);
- g = __insn_ctz(goal_matches);
-
- /* If we found c before '\0' we got a match. Note that if c == '\0'
- * then g == z, and we correctly return the address of the '\0'
- * rather than NULL.
- */
- return (g <= z) ? ((char *)p) + (g >> 3) : NULL;
-}
-EXPORT_SYMBOL(strchr);
diff --git a/arch/tile/lib/strchr_64.c b/arch/tile/lib/strchr_64.c
deleted file mode 100644
index fe6e31c06f8d..000000000000
--- a/arch/tile/lib/strchr_64.c
+++ /dev/null
@@ -1,62 +0,0 @@
-/*
- * Copyright 2011 Tilera Corporation. All Rights Reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation, version 2.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
- * NON INFRINGEMENT. See the GNU General Public License for
- * more details.
- */
-
-#include <linux/types.h>
-#include <linux/string.h>
-#include <linux/module.h>
-#include "string-endian.h"
-
-char *strchr(const char *s, int c)
-{
- int z, g;
-
- /* Get an aligned pointer. */
- const uintptr_t s_int = (uintptr_t) s;
- const uint64_t *p = (const uint64_t *)(s_int & -8);
-
- /* Create eight copies of the byte for which we are looking. */
- const uint64_t goal = copy_byte(c);
-
- /* Read the first aligned word, but force bytes before the string to
- * match neither zero nor goal (we make sure the high bit of each
- * byte is 1, and the low 7 bits are all the opposite of the goal
- * byte).
- */
- const uint64_t before_mask = MASK(s_int);
- uint64_t v = (*p | before_mask) ^ (goal & __insn_v1shrui(before_mask, 1));
-
- uint64_t zero_matches, goal_matches;
- while (1) {
- /* Look for a terminating '\0'. */
- zero_matches = __insn_v1cmpeqi(v, 0);
-
- /* Look for the goal byte. */
- goal_matches = __insn_v1cmpeq(v, goal);
-
- if (__builtin_expect((zero_matches | goal_matches) != 0, 0))
- break;
-
- v = *++p;
- }
-
- z = CFZ(zero_matches);
- g = CFZ(goal_matches);
-
- /* If we found c before '\0' we got a match. Note that if c == '\0'
- * then g == z, and we correctly return the address of the '\0'
- * rather than NULL.
- */
- return (g <= z) ? ((char *)p) + (g >> 3) : NULL;
-}
-EXPORT_SYMBOL(strchr);
diff --git a/arch/tile/lib/string-endian.h b/arch/tile/lib/string-endian.h
deleted file mode 100644
index 2e49cbfe9371..000000000000
--- a/arch/tile/lib/string-endian.h
+++ /dev/null
@@ -1,44 +0,0 @@
-/*
- * Copyright 2013 Tilera Corporation. All Rights Reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation, version 2.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
- * NON INFRINGEMENT. See the GNU General Public License for
- * more details.
- *
- * Provide a mask based on the pointer alignment that
- * sets up non-zero bytes before the beginning of the string.
- * The MASK expression works because shift counts are taken mod 64.
- * Also, specify how to count "first" and "last" bits
- * when the bits have been read as a word.
- */
-
-#include <asm/byteorder.h>
-
-#ifdef __LITTLE_ENDIAN
-#define MASK(x) (__insn_shl(1ULL, (x << 3)) - 1)
-#define NULMASK(x) ((2ULL << x) - 1)
-#define CFZ(x) __insn_ctz(x)
-#define REVCZ(x) __insn_clz(x)
-#else
-#define MASK(x) (__insn_shl(-2LL, ((-x << 3) - 1)))
-#define NULMASK(x) (-2LL << (63 - x))
-#define CFZ(x) __insn_clz(x)
-#define REVCZ(x) __insn_ctz(x)
-#endif
-
-/*
- * Create eight copies of the byte in a uint64_t. Byte Shuffle uses
- * the bytes of srcB as the index into the dest vector to select a
- * byte. With all indices of zero, the first byte is copied into all
- * the other bytes.
- */
-static inline uint64_t copy_byte(uint8_t byte)
-{
- return __insn_shufflebytes(byte, 0, 0);
-}
diff --git a/arch/tile/lib/strlen_32.c b/arch/tile/lib/strlen_32.c
deleted file mode 100644
index f26f88e11e4a..000000000000
--- a/arch/tile/lib/strlen_32.c
+++ /dev/null
@@ -1,36 +0,0 @@
-/*
- * Copyright 2010 Tilera Corporation. All Rights Reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation, version 2.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
- * NON INFRINGEMENT. See the GNU General Public License for
- * more details.
- */
-
-#include <linux/types.h>
-#include <linux/string.h>
-#include <linux/module.h>
-
-size_t strlen(const char *s)
-{
- /* Get an aligned pointer. */
- const uintptr_t s_int = (uintptr_t) s;
- const uint32_t *p = (const uint32_t *)(s_int & -4);
-
- /* Read the first word, but force bytes before the string to be nonzero.
- * This expression works because we know shift counts are taken mod 32.
- */
- uint32_t v = *p | ((1 << (s_int << 3)) - 1);
-
- uint32_t bits;
- while ((bits = __insn_seqb(v, 0)) == 0)
- v = *++p;
-
- return ((const char *)p) + (__insn_ctz(bits) >> 3) - s;
-}
-EXPORT_SYMBOL(strlen);
diff --git a/arch/tile/lib/strlen_64.c b/arch/tile/lib/strlen_64.c
deleted file mode 100644
index 9583fc3361fa..000000000000
--- a/arch/tile/lib/strlen_64.c
+++ /dev/null
@@ -1,35 +0,0 @@
-/*
- * Copyright 2011 Tilera Corporation. All Rights Reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation, version 2.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
- * NON INFRINGEMENT. See the GNU General Public License for
- * more details.
- */
-
-#include <linux/types.h>
-#include <linux/string.h>
-#include <linux/module.h>
-#include "string-endian.h"
-
-size_t strlen(const char *s)
-{
- /* Get an aligned pointer. */
- const uintptr_t s_int = (uintptr_t) s;
- const uint64_t *p = (const uint64_t *)(s_int & -8);
-
- /* Read and MASK the first word. */
- uint64_t v = *p | MASK(s_int);
-
- uint64_t bits;
- while ((bits = __insn_v1cmpeqi(v, 0)) == 0)
- v = *++p;
-
- return ((const char *)p) + (CFZ(bits) >> 3) - s;
-}
-EXPORT_SYMBOL(strlen);
diff --git a/arch/tile/lib/strnlen_32.c b/arch/tile/lib/strnlen_32.c
deleted file mode 100644
index 1434141d9e01..000000000000
--- a/arch/tile/lib/strnlen_32.c
+++ /dev/null
@@ -1,47 +0,0 @@
-/*
- * Copyright 2013 Tilera Corporation. All Rights Reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation, version 2.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
- * NON INFRINGEMENT. See the GNU General Public License for
- * more details.
- */
-
-#include <linux/types.h>
-#include <linux/string.h>
-#include <linux/module.h>
-
-size_t strnlen(const char *s, size_t count)
-{
- /* Get an aligned pointer. */
- const uintptr_t s_int = (uintptr_t) s;
- const uint32_t *p = (const uint32_t *)(s_int & -4);
- size_t bytes_read = sizeof(*p) - (s_int & (sizeof(*p) - 1));
- size_t len;
- uint32_t v, bits;
-
- /* Avoid page fault risk by not reading any bytes when count is 0. */
- if (count == 0)
- return 0;
-
- /* Read first word, but force bytes before the string to be nonzero. */
- v = *p | ((1 << ((s_int << 3) & 31)) - 1);
-
- while ((bits = __insn_seqb(v, 0)) == 0) {
- if (bytes_read >= count) {
- /* Read COUNT bytes and didn't find the terminator. */
- return count;
- }
- v = *++p;
- bytes_read += sizeof(v);
- }
-
- len = ((const char *) p) + (__insn_ctz(bits) >> 3) - s;
- return (len < count ? len : count);
-}
-EXPORT_SYMBOL(strnlen);
diff --git a/arch/tile/lib/strnlen_64.c b/arch/tile/lib/strnlen_64.c
deleted file mode 100644
index 2e8de6a5136f..000000000000
--- a/arch/tile/lib/strnlen_64.c
+++ /dev/null
@@ -1,48 +0,0 @@
-/*
- * Copyright 2013 Tilera Corporation. All Rights Reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation, version 2.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
- * NON INFRINGEMENT. See the GNU General Public License for
- * more details.
- */
-
-#include <linux/types.h>
-#include <linux/string.h>
-#include <linux/module.h>
-#include "string-endian.h"
-
-size_t strnlen(const char *s, size_t count)
-{
- /* Get an aligned pointer. */
- const uintptr_t s_int = (uintptr_t) s;
- const uint64_t *p = (const uint64_t *)(s_int & -8);
- size_t bytes_read = sizeof(*p) - (s_int & (sizeof(*p) - 1));
- size_t len;
- uint64_t v, bits;
-
- /* Avoid page fault risk by not reading any bytes when count is 0. */
- if (count == 0)
- return 0;
-
- /* Read and MASK the first word. */
- v = *p | MASK(s_int);
-
- while ((bits = __insn_v1cmpeqi(v, 0)) == 0) {
- if (bytes_read >= count) {
- /* Read COUNT bytes and didn't find the terminator. */
- return count;
- }
- v = *++p;
- bytes_read += sizeof(v);
- }
-
- len = ((const char *) p) + (CFZ(bits) >> 3) - s;
- return (len < count ? len : count);
-}
-EXPORT_SYMBOL(strnlen);
diff --git a/arch/tile/lib/uaccess.c b/arch/tile/lib/uaccess.c
deleted file mode 100644
index 030abe3ee4f1..000000000000
--- a/arch/tile/lib/uaccess.c
+++ /dev/null
@@ -1,24 +0,0 @@
-/*
- * Copyright 2010 Tilera Corporation. All Rights Reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation, version 2.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
- * NON INFRINGEMENT. See the GNU General Public License for
- * more details.
- */
-
-#include <linux/uaccess.h>
-#include <linux/module.h>
-
-int __range_ok(unsigned long addr, unsigned long size)
-{
- unsigned long limit = current_thread_info()->addr_limit.seg;
- return !((addr < limit && size <= limit - addr) ||
- is_arch_mappable_range(addr, size));
-}
-EXPORT_SYMBOL(__range_ok);
diff --git a/arch/tile/lib/usercopy_32.S b/arch/tile/lib/usercopy_32.S
deleted file mode 100644
index db93ad5fae25..000000000000
--- a/arch/tile/lib/usercopy_32.S
+++ /dev/null
@@ -1,89 +0,0 @@
-/*
- * Copyright 2010 Tilera Corporation. All Rights Reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation, version 2.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
- * NON INFRINGEMENT. See the GNU General Public License for
- * more details.
- */
-
-#include <linux/linkage.h>
-#include <asm/errno.h>
-#include <asm/cache.h>
-#include <arch/chip.h>
-
-/* Access user memory, but use MMU to avoid propagating kernel exceptions. */
-
-/*
- * clear_user_asm takes the user target address in r0 and the
- * number of bytes to zero in r1.
- * It returns the number of uncopiable bytes (hopefully zero) in r0.
- * Note that we don't use a separate .fixup section here since we fall
- * through into the "fixup" code as the last straight-line bundle anyway.
- */
-STD_ENTRY(clear_user_asm)
- { bz r1, 2f; or r2, r0, r1 }
- andi r2, r2, 3
- bzt r2, .Lclear_aligned_user_asm
-1: { sb r0, zero; addi r0, r0, 1; addi r1, r1, -1 }
- bnzt r1, 1b
-2: { move r0, r1; jrp lr }
- .pushsection __ex_table,"a"
- .align 4
- .word 1b, 2b
- .popsection
-
-.Lclear_aligned_user_asm:
-1: { sw r0, zero; addi r0, r0, 4; addi r1, r1, -4 }
- bnzt r1, 1b
-2: { move r0, r1; jrp lr }
- STD_ENDPROC(clear_user_asm)
- .pushsection __ex_table,"a"
- .align 4
- .word 1b, 2b
- .popsection
-
-/*
- * flush_user_asm takes the user target address in r0 and the
- * number of bytes to flush in r1.
- * It returns the number of unflushable bytes (hopefully zero) in r0.
- */
-STD_ENTRY(flush_user_asm)
- bz r1, 2f
- { movei r2, L2_CACHE_BYTES; add r1, r0, r1 }
- { sub r2, zero, r2; addi r1, r1, L2_CACHE_BYTES-1 }
- { and r0, r0, r2; and r1, r1, r2 }
- { sub r1, r1, r0 }
-1: { flush r0; addi r1, r1, -CHIP_FLUSH_STRIDE() }
- { addi r0, r0, CHIP_FLUSH_STRIDE(); bnzt r1, 1b }
-2: { move r0, r1; jrp lr }
- STD_ENDPROC(flush_user_asm)
- .pushsection __ex_table,"a"
- .align 4
- .word 1b, 2b
- .popsection
-
-/*
- * finv_user_asm takes the user target address in r0 and the
- * number of bytes to flush-invalidate in r1.
- * It returns the number of not finv'able bytes (hopefully zero) in r0.
- */
-STD_ENTRY(finv_user_asm)
- bz r1, 2f
- { movei r2, L2_CACHE_BYTES; add r1, r0, r1 }
- { sub r2, zero, r2; addi r1, r1, L2_CACHE_BYTES-1 }
- { and r0, r0, r2; and r1, r1, r2 }
- { sub r1, r1, r0 }
-1: { finv r0; addi r1, r1, -CHIP_FINV_STRIDE() }
- { addi r0, r0, CHIP_FINV_STRIDE(); bnzt r1, 1b }
-2: { move r0, r1; jrp lr }
- STD_ENDPROC(finv_user_asm)
- .pushsection __ex_table,"a"
- .align 4
- .word 1b, 2b
- .popsection
diff --git a/arch/tile/lib/usercopy_64.S b/arch/tile/lib/usercopy_64.S
deleted file mode 100644
index 9322dc551e91..000000000000
--- a/arch/tile/lib/usercopy_64.S
+++ /dev/null
@@ -1,89 +0,0 @@
-/*
- * Copyright 2011 Tilera Corporation. All Rights Reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation, version 2.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
- * NON INFRINGEMENT. See the GNU General Public License for
- * more details.
- */
-
-#include <linux/linkage.h>
-#include <asm/errno.h>
-#include <asm/cache.h>
-#include <arch/chip.h>
-
-/* Access user memory, but use MMU to avoid propagating kernel exceptions. */
-
-/*
- * clear_user_asm takes the user target address in r0 and the
- * number of bytes to zero in r1.
- * It returns the number of uncopiable bytes (hopefully zero) in r0.
- * Note that we don't use a separate .fixup section here since we fall
- * through into the "fixup" code as the last straight-line bundle anyway.
- */
-STD_ENTRY(clear_user_asm)
- { beqz r1, 2f; or r2, r0, r1 }
- andi r2, r2, 7
- beqzt r2, .Lclear_aligned_user_asm
-1: { st1 r0, zero; addi r0, r0, 1; addi r1, r1, -1 }
- bnezt r1, 1b
-2: { move r0, r1; jrp lr }
- .pushsection __ex_table,"a"
- .align 8
- .quad 1b, 2b
- .popsection
-
-.Lclear_aligned_user_asm:
-1: { st r0, zero; addi r0, r0, 8; addi r1, r1, -8 }
- bnezt r1, 1b
-2: { move r0, r1; jrp lr }
- STD_ENDPROC(clear_user_asm)
- .pushsection __ex_table,"a"
- .align 8
- .quad 1b, 2b
- .popsection
-
-/*
- * flush_user_asm takes the user target address in r0 and the
- * number of bytes to flush in r1.
- * It returns the number of unflushable bytes (hopefully zero) in r0.
- */
-STD_ENTRY(flush_user_asm)
- beqz r1, 2f
- { movei r2, L2_CACHE_BYTES; add r1, r0, r1 }
- { sub r2, zero, r2; addi r1, r1, L2_CACHE_BYTES-1 }
- { and r0, r0, r2; and r1, r1, r2 }
- { sub r1, r1, r0 }
-1: { flush r0; addi r1, r1, -CHIP_FLUSH_STRIDE() }
- { addi r0, r0, CHIP_FLUSH_STRIDE(); bnezt r1, 1b }
-2: { move r0, r1; jrp lr }
- STD_ENDPROC(flush_user_asm)
- .pushsection __ex_table,"a"
- .align 8
- .quad 1b, 2b
- .popsection
-
-/*
- * finv_user_asm takes the user target address in r0 and the
- * number of bytes to flush-invalidate in r1.
- * It returns the number of not finv'able bytes (hopefully zero) in r0.
- */
-STD_ENTRY(finv_user_asm)
- beqz r1, 2f
- { movei r2, L2_CACHE_BYTES; add r1, r0, r1 }
- { sub r2, zero, r2; addi r1, r1, L2_CACHE_BYTES-1 }
- { and r0, r0, r2; and r1, r1, r2 }
- { sub r1, r1, r0 }
-1: { finv r0; addi r1, r1, -CHIP_FINV_STRIDE() }
- { addi r0, r0, CHIP_FINV_STRIDE(); bnezt r1, 1b }
-2: { move r0, r1; jrp lr }
- STD_ENDPROC(finv_user_asm)
- .pushsection __ex_table,"a"
- .align 8
- .quad 1b, 2b
- .popsection
diff --git a/arch/tile/mm/Makefile b/arch/tile/mm/Makefile
deleted file mode 100644
index e252aeddc17d..000000000000
--- a/arch/tile/mm/Makefile
+++ /dev/null
@@ -1,9 +0,0 @@
-#
-# Makefile for the linux tile-specific parts of the memory manager.
-#
-
-obj-y := init.o pgtable.o fault.o extable.o elf.o \
- mmap.o homecache.o migrate_$(BITS).o
-
-obj-$(CONFIG_HUGETLB_PAGE) += hugetlbpage.o
-obj-$(CONFIG_HIGHMEM) += highmem.o
diff --git a/arch/tile/mm/elf.c b/arch/tile/mm/elf.c
deleted file mode 100644
index 889901824400..000000000000
--- a/arch/tile/mm/elf.c
+++ /dev/null
@@ -1,165 +0,0 @@
-/*
- * Copyright 2010 Tilera Corporation. All Rights Reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation, version 2.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
- * NON INFRINGEMENT. See the GNU General Public License for
- * more details.
- */
-
-#include <linux/mm.h>
-#include <linux/pagemap.h>
-#include <linux/binfmts.h>
-#include <linux/compat.h>
-#include <linux/mman.h>
-#include <linux/file.h>
-#include <linux/elf.h>
-#include <asm/pgtable.h>
-#include <asm/pgalloc.h>
-#include <asm/sections.h>
-#include <asm/vdso.h>
-#include <arch/sim.h>
-
-/* Notify a running simulator, if any, that an exec just occurred. */
-static void sim_notify_exec(const char *binary_name)
-{
- unsigned char c;
- do {
- c = *binary_name++;
- __insn_mtspr(SPR_SIM_CONTROL,
- (SIM_CONTROL_OS_EXEC
- | (c << _SIM_CONTROL_OPERATOR_BITS)));
-
- } while (c);
-}
-
-static int notify_exec(struct mm_struct *mm)
-{
- int ret = 0;
- char *buf, *path;
- struct vm_area_struct *vma;
- struct file *exe_file;
-
- if (!sim_is_simulator())
- return 1;
-
- buf = (char *) __get_free_page(GFP_KERNEL);
- if (buf == NULL)
- return 0;
-
- exe_file = get_mm_exe_file(mm);
- if (exe_file == NULL)
- goto done_free;
-
- path = file_path(exe_file, buf, PAGE_SIZE);
- if (IS_ERR(path))
- goto done_put;
-
- down_read(&mm->mmap_sem);
- for (vma = current->mm->mmap; ; vma = vma->vm_next) {
- if (vma == NULL) {
- up_read(&mm->mmap_sem);
- goto done_put;
- }
- if (vma->vm_file == exe_file)
- break;
- }
-
- /*
- * Notify simulator of an ET_DYN object so we know the load address.
- * The somewhat cryptic overuse of SIM_CONTROL_DLOPEN allows us
- * to be backward-compatible with older simulator releases.
- */
- if (vma->vm_start == (ELF_ET_DYN_BASE & PAGE_MASK)) {
- char buf[64];
- int i;
-
- snprintf(buf, sizeof(buf), "0x%lx:@", vma->vm_start);
- for (i = 0; ; ++i) {
- char c = buf[i];
- __insn_mtspr(SPR_SIM_CONTROL,
- (SIM_CONTROL_DLOPEN
- | (c << _SIM_CONTROL_OPERATOR_BITS)));
- if (c == '\0') {
- ret = 1; /* success */
- break;
- }
- }
- }
- up_read(&mm->mmap_sem);
-
- sim_notify_exec(path);
-done_put:
- fput(exe_file);
-done_free:
- free_page((unsigned long)buf);
- return ret;
-}
-
-/* Notify a running simulator, if any, that we loaded an interpreter. */
-static void sim_notify_interp(unsigned long load_addr)
-{
- size_t i;
- for (i = 0; i < sizeof(load_addr); i++) {
- unsigned char c = load_addr >> (i * 8);
- __insn_mtspr(SPR_SIM_CONTROL,
- (SIM_CONTROL_OS_INTERP
- | (c << _SIM_CONTROL_OPERATOR_BITS)));
- }
-}
-
-
-int arch_setup_additional_pages(struct linux_binprm *bprm,
- int executable_stack)
-{
- struct mm_struct *mm = current->mm;
- int retval = 0;
-
- /*
- * Notify the simulator that an exec just occurred.
- * If we can't find the filename of the mapping, just use
- * whatever was passed as the linux_binprm filename.
- */
- if (!notify_exec(mm))
- sim_notify_exec(bprm->filename);
-
- down_write(&mm->mmap_sem);
-
- retval = setup_vdso_pages();
-
-#ifndef __tilegx__
- /*
- * Set up a user-interrupt mapping here; the user can't
- * create one themselves since it is above TASK_SIZE.
- * We make it unwritable by default, so the model for adding
- * interrupt vectors always involves an mprotect.
- */
- if (!retval) {
- unsigned long addr = MEM_USER_INTRPT;
- addr = mmap_region(NULL, addr, INTRPT_SIZE,
- VM_READ|VM_EXEC|
- VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC, 0, NULL);
- if (addr > (unsigned long) -PAGE_SIZE)
- retval = (int) addr;
- }
-#endif
-
- up_write(&mm->mmap_sem);
-
- return retval;
-}
-
-
-void elf_plat_init(struct pt_regs *regs, unsigned long load_addr)
-{
- /* Zero all registers. */
- memset(regs, 0, sizeof(*regs));
-
- /* Report the interpreter's load address. */
- sim_notify_interp(load_addr);
-}
diff --git a/arch/tile/mm/extable.c b/arch/tile/mm/extable.c
deleted file mode 100644
index aeaf20c7aaa4..000000000000
--- a/arch/tile/mm/extable.c
+++ /dev/null
@@ -1,30 +0,0 @@
-/*
- * Copyright 2010 Tilera Corporation. All Rights Reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation, version 2.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
- * NON INFRINGEMENT. See the GNU General Public License for
- * more details.
- */
-
-#include <linux/extable.h>
-#include <linux/spinlock.h>
-#include <linux/uaccess.h>
-
-int fixup_exception(struct pt_regs *regs)
-{
- const struct exception_table_entry *fixup;
-
- fixup = search_exception_tables(regs->pc);
- if (fixup) {
- regs->pc = fixup->fixup;
- return 1;
- }
-
- return 0;
-}
diff --git a/arch/tile/mm/fault.c b/arch/tile/mm/fault.c
deleted file mode 100644
index f58fa06a2214..000000000000
--- a/arch/tile/mm/fault.c
+++ /dev/null
@@ -1,924 +0,0 @@
-/*
- * Copyright 2010 Tilera Corporation. All Rights Reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation, version 2.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
- * NON INFRINGEMENT. See the GNU General Public License for
- * more details.
- *
- * From i386 code copyright (C) 1995 Linus Torvalds
- */
-
-#include <linux/signal.h>
-#include <linux/sched.h>
-#include <linux/sched/debug.h>
-#include <linux/sched/task.h>
-#include <linux/sched/task_stack.h>
-#include <linux/kernel.h>
-#include <linux/errno.h>
-#include <linux/string.h>
-#include <linux/types.h>
-#include <linux/ptrace.h>
-#include <linux/mman.h>
-#include <linux/mm.h>
-#include <linux/smp.h>
-#include <linux/interrupt.h>
-#include <linux/init.h>
-#include <linux/tty.h>
-#include <linux/vt_kern.h> /* For unblank_screen() */
-#include <linux/highmem.h>
-#include <linux/extable.h>
-#include <linux/kprobes.h>
-#include <linux/hugetlb.h>
-#include <linux/syscalls.h>
-#include <linux/uaccess.h>
-#include <linux/kdebug.h>
-
-#include <asm/pgalloc.h>
-#include <asm/sections.h>
-#include <asm/traps.h>
-#include <asm/syscalls.h>
-
-#include <arch/interrupts.h>
-
-static noinline void force_sig_info_fault(const char *type, int si_signo,
- int si_code, unsigned long address,
- int fault_num,
- struct task_struct *tsk,
- struct pt_regs *regs)
-{
- siginfo_t info;
-
- if (unlikely(tsk->pid < 2)) {
- panic("Signal %d (code %d) at %#lx sent to %s!",
- si_signo, si_code & 0xffff, address,
- is_idle_task(tsk) ? "the idle task" : "init");
- }
-
- info.si_signo = si_signo;
- info.si_errno = 0;
- info.si_code = si_code;
- info.si_addr = (void __user *)address;
- info.si_trapno = fault_num;
- trace_unhandled_signal(type, regs, address, si_signo);
- force_sig_info(si_signo, &info, tsk);
-}
-
-#ifndef __tilegx__
-/*
- * Synthesize the fault a PL0 process would get by doing a word-load of
- * an unaligned address or a high kernel address.
- */
-SYSCALL_DEFINE1(cmpxchg_badaddr, unsigned long, address)
-{
- struct pt_regs *regs = current_pt_regs();
-
- if (address >= PAGE_OFFSET)
- force_sig_info_fault("atomic segfault", SIGSEGV, SEGV_MAPERR,
- address, INT_DTLB_MISS, current, regs);
- else
- force_sig_info_fault("atomic alignment fault", SIGBUS,
- BUS_ADRALN, address,
- INT_UNALIGN_DATA, current, regs);
-
- /*
- * Adjust pc to point at the actual instruction, which is unusual
- * for syscalls normally, but is appropriate when we are claiming
- * that a syscall swint1 caused a page fault or bus error.
- */
- regs->pc -= 8;
-
- /*
- * Mark this as a caller-save interrupt, like a normal page fault,
- * so that when we go through the signal handler path we will
- * properly restore r0, r1, and r2 for the signal handler arguments.
- */
- regs->flags |= PT_FLAGS_CALLER_SAVES;
-
- return 0;
-}
-#endif
-
-static inline pmd_t *vmalloc_sync_one(pgd_t *pgd, unsigned long address)
-{
- unsigned index = pgd_index(address);
- pgd_t *pgd_k;
- pud_t *pud, *pud_k;
- pmd_t *pmd, *pmd_k;
-
- pgd += index;
- pgd_k = init_mm.pgd + index;
-
- if (!pgd_present(*pgd_k))
- return NULL;
-
- pud = pud_offset(pgd, address);
- pud_k = pud_offset(pgd_k, address);
- if (!pud_present(*pud_k))
- return NULL;
-
- pmd = pmd_offset(pud, address);
- pmd_k = pmd_offset(pud_k, address);
- if (!pmd_present(*pmd_k))
- return NULL;
- if (!pmd_present(*pmd))
- set_pmd(pmd, *pmd_k);
- else
- BUG_ON(pmd_ptfn(*pmd) != pmd_ptfn(*pmd_k));
- return pmd_k;
-}
-
-/*
- * Handle a fault on the vmalloc area.
- */
-static inline int vmalloc_fault(pgd_t *pgd, unsigned long address)
-{
- pmd_t *pmd_k;
- pte_t *pte_k;
-
- /* Make sure we are in vmalloc area */
- if (!(address >= VMALLOC_START && address < VMALLOC_END))
- return -1;
-
- /*
- * Synchronize this task's top level page-table
- * with the 'reference' page table.
- */
- pmd_k = vmalloc_sync_one(pgd, address);
- if (!pmd_k)
- return -1;
- pte_k = pte_offset_kernel(pmd_k, address);
- if (!pte_present(*pte_k))
- return -1;
- return 0;
-}
-
-/* Wait until this PTE has completed migration. */
-static void wait_for_migration(pte_t *pte)
-{
- if (pte_migrating(*pte)) {
- /*
- * Wait until the migrater fixes up this pte.
- * We scale the loop count by the clock rate so we'll wait for
- * a few seconds here.
- */
- int retries = 0;
- int bound = get_clock_rate();
- while (pte_migrating(*pte)) {
- barrier();
- if (++retries > bound)
- panic("Hit migrating PTE (%#llx) and page PFN %#lx still migrating",
- pte->val, pte_pfn(*pte));
- }
- }
-}
-
-/*
- * It's not generally safe to use "current" to get the page table pointer,
- * since we might be running an oprofile interrupt in the middle of a
- * task switch.
- */
-static pgd_t *get_current_pgd(void)
-{
- HV_Context ctx = hv_inquire_context();
- unsigned long pgd_pfn = ctx.page_table >> PAGE_SHIFT;
- struct page *pgd_page = pfn_to_page(pgd_pfn);
- BUG_ON(PageHighMem(pgd_page));
- return (pgd_t *) __va(ctx.page_table);
-}
-
-/*
- * We can receive a page fault from a migrating PTE at any time.
- * Handle it by just waiting until the fault resolves.
- *
- * It's also possible to get a migrating kernel PTE that resolves
- * itself during the downcall from hypervisor to Linux. We just check
- * here to see if the PTE seems valid, and if so we retry it.
- *
- * NOTE! We MUST NOT take any locks for this case. We may be in an
- * interrupt or a critical region, and must do as little as possible.
- * Similarly, we can't use atomic ops here, since we may be handling a
- * fault caused by an atomic op access.
- *
- * If we find a migrating PTE while we're in an NMI context, and we're
- * at a PC that has a registered exception handler, we don't wait,
- * since this thread may (e.g.) have been interrupted while migrating
- * its own stack, which would then cause us to self-deadlock.
- */
-static int handle_migrating_pte(pgd_t *pgd, int fault_num,
- unsigned long address, unsigned long pc,
- int is_kernel_mode, int write)
-{
- pud_t *pud;
- pmd_t *pmd;
- pte_t *pte;
- pte_t pteval;
-
- if (pgd_addr_invalid(address))
- return 0;
-
- pgd += pgd_index(address);
- pud = pud_offset(pgd, address);
- if (!pud || !pud_present(*pud))
- return 0;
- pmd = pmd_offset(pud, address);
- if (!pmd || !pmd_present(*pmd))
- return 0;
- pte = pmd_huge_page(*pmd) ? ((pte_t *)pmd) :
- pte_offset_kernel(pmd, address);
- pteval = *pte;
- if (pte_migrating(pteval)) {
- if (in_nmi() && search_exception_tables(pc))
- return 0;
- wait_for_migration(pte);
- return 1;
- }
-
- if (!is_kernel_mode || !pte_present(pteval))
- return 0;
- if (fault_num == INT_ITLB_MISS) {
- if (pte_exec(pteval))
- return 1;
- } else if (write) {
- if (pte_write(pteval))
- return 1;
- } else {
- if (pte_read(pteval))
- return 1;
- }
-
- return 0;
-}
-
-/*
- * This routine is responsible for faulting in user pages.
- * It passes the work off to one of the appropriate routines.
- * It returns true if the fault was successfully handled.
- */
-static int handle_page_fault(struct pt_regs *regs,
- int fault_num,
- int is_page_fault,
- unsigned long address,
- int write)
-{
- struct task_struct *tsk;
- struct mm_struct *mm;
- struct vm_area_struct *vma;
- unsigned long stack_offset;
- int fault;
- int si_code;
- int is_kernel_mode;
- pgd_t *pgd;
- unsigned int flags;
-
- /* on TILE, protection faults are always writes */
- if (!is_page_fault)
- write = 1;
-
- flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE;
-
- is_kernel_mode = !user_mode(regs);
-
- tsk = validate_current();
-
- /*
- * Check to see if we might be overwriting the stack, and bail
- * out if so. The page fault code is a relatively likely
- * place to get trapped in an infinite regress, and once we
- * overwrite the whole stack, it becomes very hard to recover.
- */
- stack_offset = stack_pointer & (THREAD_SIZE-1);
- if (stack_offset < THREAD_SIZE / 8) {
- pr_alert("Potential stack overrun: sp %#lx\n", stack_pointer);
- show_regs(regs);
- pr_alert("Killing current process %d/%s\n",
- tsk->pid, tsk->comm);
- do_group_exit(SIGKILL);
- }
-
- /*
- * Early on, we need to check for migrating PTE entries;
- * see homecache.c. If we find a migrating PTE, we wait until
- * the backing page claims to be done migrating, then we proceed.
- * For kernel PTEs, we rewrite the PTE and return and retry.
- * Otherwise, we treat the fault like a normal "no PTE" fault,
- * rather than trying to patch up the existing PTE.
- */
- pgd = get_current_pgd();
- if (handle_migrating_pte(pgd, fault_num, address, regs->pc,
- is_kernel_mode, write))
- return 1;
-
- si_code = SEGV_MAPERR;
-
- /*
- * We fault-in kernel-space virtual memory on-demand. The
- * 'reference' page table is init_mm.pgd.
- *
- * NOTE! We MUST NOT take any locks for this case. We may
- * be in an interrupt or a critical region, and should
- * only copy the information from the master page table,
- * nothing more.
- *
- * This verifies that the fault happens in kernel space
- * and that the fault was not a protection fault.
- */
- if (unlikely(address >= TASK_SIZE &&
- !is_arch_mappable_range(address, 0))) {
- if (is_kernel_mode && is_page_fault &&
- vmalloc_fault(pgd, address) >= 0)
- return 1;
- /*
- * Don't take the mm semaphore here. If we fixup a prefetch
- * fault we could otherwise deadlock.
- */
- mm = NULL; /* happy compiler */
- vma = NULL;
- goto bad_area_nosemaphore;
- }
-
- /*
- * If we're trying to touch user-space addresses, we must
- * be either at PL0, or else with interrupts enabled in the
- * kernel, so either way we can re-enable interrupts here
- * unless we are doing atomic access to user space with
- * interrupts disabled.
- */
- if (!(regs->flags & PT_FLAGS_DISABLE_IRQ))
- local_irq_enable();
-
- mm = tsk->mm;
-
- /*
- * If we're in an interrupt, have no user context or are running in an
- * region with pagefaults disabled then we must not take the fault.
- */
- if (pagefault_disabled() || !mm) {
- vma = NULL; /* happy compiler */
- goto bad_area_nosemaphore;
- }
-
- if (!is_kernel_mode)
- flags |= FAULT_FLAG_USER;
-
- /*
- * When running in the kernel we expect faults to occur only to
- * addresses in user space. All other faults represent errors in the
- * kernel and should generate an OOPS. Unfortunately, in the case of an
- * erroneous fault occurring in a code path which already holds mmap_sem
- * we will deadlock attempting to validate the fault against the
- * address space. Luckily the kernel only validly references user
- * space from well defined areas of code, which are listed in the
- * exceptions table.
- *
- * As the vast majority of faults will be valid we will only perform
- * the source reference check when there is a possibility of a deadlock.
- * Attempt to lock the address space, if we cannot we then validate the
- * source. If this is invalid we can skip the address space check,
- * thus avoiding the deadlock.
- */
- if (!down_read_trylock(&mm->mmap_sem)) {
- if (is_kernel_mode &&
- !search_exception_tables(regs->pc)) {
- vma = NULL; /* happy compiler */
- goto bad_area_nosemaphore;
- }
-
-retry:
- down_read(&mm->mmap_sem);
- }
-
- vma = find_vma(mm, address);
- if (!vma)
- goto bad_area;
- if (vma->vm_start <= address)
- goto good_area;
- if (!(vma->vm_flags & VM_GROWSDOWN))
- goto bad_area;
- if (regs->sp < PAGE_OFFSET) {
- /*
- * accessing the stack below sp is always a bug.
- */
- if (address < regs->sp)
- goto bad_area;
- }
- if (expand_stack(vma, address))
- goto bad_area;
-
-/*
- * Ok, we have a good vm_area for this memory access, so
- * we can handle it..
- */
-good_area:
- si_code = SEGV_ACCERR;
- if (fault_num == INT_ITLB_MISS) {
- if (!(vma->vm_flags & VM_EXEC))
- goto bad_area;
- } else if (write) {
-#ifdef TEST_VERIFY_AREA
- if (!is_page_fault && regs->cs == KERNEL_CS)
- pr_err("WP fault at " REGFMT "\n", regs->eip);
-#endif
- if (!(vma->vm_flags & VM_WRITE))
- goto bad_area;
- flags |= FAULT_FLAG_WRITE;
- } else {
- if (!is_page_fault || !(vma->vm_flags & VM_READ))
- goto bad_area;
- }
-
- /*
- * If for any reason at all we couldn't handle the fault,
- * make sure we exit gracefully rather than endlessly redo
- * the fault.
- */
- fault = handle_mm_fault(vma, address, flags);
-
- if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current))
- return 0;
-
- if (unlikely(fault & VM_FAULT_ERROR)) {
- if (fault & VM_FAULT_OOM)
- goto out_of_memory;
- else if (fault & VM_FAULT_SIGSEGV)
- goto bad_area;
- else if (fault & VM_FAULT_SIGBUS)
- goto do_sigbus;
- BUG();
- }
- if (flags & FAULT_FLAG_ALLOW_RETRY) {
- if (fault & VM_FAULT_MAJOR)
- tsk->maj_flt++;
- else
- tsk->min_flt++;
- if (fault & VM_FAULT_RETRY) {
- flags &= ~FAULT_FLAG_ALLOW_RETRY;
- flags |= FAULT_FLAG_TRIED;
-
- /*
- * No need to up_read(&mm->mmap_sem) as we would
- * have already released it in __lock_page_or_retry
- * in mm/filemap.c.
- */
- goto retry;
- }
- }
-
-#if CHIP_HAS_TILE_DMA()
- /* If this was a DMA TLB fault, restart the DMA engine. */
- switch (fault_num) {
- case INT_DMATLB_MISS:
- case INT_DMATLB_MISS_DWNCL:
- case INT_DMATLB_ACCESS:
- case INT_DMATLB_ACCESS_DWNCL:
- __insn_mtspr(SPR_DMA_CTR, SPR_DMA_CTR__REQUEST_MASK);
- break;
- }
-#endif
-
- up_read(&mm->mmap_sem);
- return 1;
-
-/*
- * Something tried to access memory that isn't in our memory map..
- * Fix it, but check if it's kernel or user first..
- */
-bad_area:
- up_read(&mm->mmap_sem);
-
-bad_area_nosemaphore:
- /* User mode accesses just cause a SIGSEGV */
- if (!is_kernel_mode) {
- /*
- * It's possible to have interrupts off here.
- */
- local_irq_enable();
-
- force_sig_info_fault("segfault", SIGSEGV, si_code, address,
- fault_num, tsk, regs);
- return 0;
- }
-
-no_context:
- /* Are we prepared to handle this kernel fault? */
- if (fixup_exception(regs))
- return 0;
-
-/*
- * Oops. The kernel tried to access some bad page. We'll have to
- * terminate things with extreme prejudice.
- */
-
- bust_spinlocks(1);
-
- /* FIXME: no lookup_address() yet */
-#ifdef SUPPORT_LOOKUP_ADDRESS
- if (fault_num == INT_ITLB_MISS) {
- pte_t *pte = lookup_address(address);
-
- if (pte && pte_present(*pte) && !pte_exec_kernel(*pte))
- pr_crit("kernel tried to execute non-executable page - exploit attempt? (uid: %d)\n",
- current->uid);
- }
-#endif
- if (address < PAGE_SIZE)
- pr_alert("Unable to handle kernel NULL pointer dereference\n");
- else
- pr_alert("Unable to handle kernel paging request\n");
- pr_alert(" at virtual address " REGFMT ", pc " REGFMT "\n",
- address, regs->pc);
-
- show_regs(regs);
-
- if (unlikely(tsk->pid < 2)) {
- panic("Kernel page fault running %s!",
- is_idle_task(tsk) ? "the idle task" : "init");
- }
-
- /*
- * More FIXME: we should probably copy the i386 here and
- * implement a generic die() routine. Not today.
- */
-#ifdef SUPPORT_DIE
- die("Oops", regs);
-#endif
- bust_spinlocks(1);
-
- do_group_exit(SIGKILL);
-
-/*
- * We ran out of memory, or some other thing happened to us that made
- * us unable to handle the page fault gracefully.
- */
-out_of_memory:
- up_read(&mm->mmap_sem);
- if (is_kernel_mode)
- goto no_context;
- pagefault_out_of_memory();
- return 0;
-
-do_sigbus:
- up_read(&mm->mmap_sem);
-
- /* Kernel mode? Handle exceptions or die */
- if (is_kernel_mode)
- goto no_context;
-
- force_sig_info_fault("bus error", SIGBUS, BUS_ADRERR, address,
- fault_num, tsk, regs);
- return 0;
-}
-
-#ifndef __tilegx__
-
-/* We must release ICS before panicking or we won't get anywhere. */
-#define ics_panic(fmt, ...) \
-do { \
- __insn_mtspr(SPR_INTERRUPT_CRITICAL_SECTION, 0); \
- panic(fmt, ##__VA_ARGS__); \
-} while (0)
-
-/*
- * When we take an ITLB or DTLB fault or access violation in the
- * supervisor while the critical section bit is set, the hypervisor is
- * reluctant to write new values into the EX_CONTEXT_K_x registers,
- * since that might indicate we have not yet squirreled the SPR
- * contents away and can thus safely take a recursive interrupt.
- * Accordingly, the hypervisor passes us the PC via SYSTEM_SAVE_K_2.
- *
- * Note that this routine is called before homecache_tlb_defer_enter(),
- * which means that we can properly unlock any atomics that might
- * be used there (good), but also means we must be very sensitive
- * to not touch any data structures that might be located in memory
- * that could migrate, as we could be entering the kernel on a dataplane
- * cpu that has been deferring kernel TLB updates. This means, for
- * example, that we can't migrate init_mm or its pgd.
- */
-struct intvec_state do_page_fault_ics(struct pt_regs *regs, int fault_num,
- unsigned long address,
- unsigned long info)
-{
- unsigned long pc = info & ~1;
- int write = info & 1;
- pgd_t *pgd = get_current_pgd();
-
- /* Retval is 1 at first since we will handle the fault fully. */
- struct intvec_state state = {
- do_page_fault, fault_num, address, write, 1
- };
-
- /* Validate that we are plausibly in the right routine. */
- if ((pc & 0x7) != 0 || pc < PAGE_OFFSET ||
- (fault_num != INT_DTLB_MISS &&
- fault_num != INT_DTLB_ACCESS)) {
- unsigned long old_pc = regs->pc;
- regs->pc = pc;
- ics_panic("Bad ICS page fault args: old PC %#lx, fault %d/%d at %#lx",
- old_pc, fault_num, write, address);
- }
-
- /* We might be faulting on a vmalloc page, so check that first. */
- if (fault_num != INT_DTLB_ACCESS && vmalloc_fault(pgd, address) >= 0)
- return state;
-
- /*
- * If we faulted with ICS set in sys_cmpxchg, we are providing
- * a user syscall service that should generate a signal on
- * fault. We didn't set up a kernel stack on initial entry to
- * sys_cmpxchg, but instead had one set up by the fault, which
- * (because sys_cmpxchg never releases ICS) came to us via the
- * SYSTEM_SAVE_K_2 mechanism, and thus EX_CONTEXT_K_[01] are
- * still referencing the original user code. We release the
- * atomic lock and rewrite pt_regs so that it appears that we
- * came from user-space directly, and after we finish the
- * fault we'll go back to user space and re-issue the swint.
- * This way the backtrace information is correct if we need to
- * emit a stack dump at any point while handling this.
- *
- * Must match register use in sys_cmpxchg().
- */
- if (pc >= (unsigned long) sys_cmpxchg &&
- pc < (unsigned long) __sys_cmpxchg_end) {
-#ifdef CONFIG_SMP
- /* Don't unlock before we could have locked. */
- if (pc >= (unsigned long)__sys_cmpxchg_grab_lock) {
- int *lock_ptr = (int *)(regs->regs[ATOMIC_LOCK_REG]);
- __atomic_fault_unlock(lock_ptr);
- }
-#endif
- regs->sp = regs->regs[27];
- }
-
- /*
- * We can also fault in the atomic assembly, in which
- * case we use the exception table to do the first-level fixup.
- * We may re-fixup again in the real fault handler if it
- * turns out the faulting address is just bad, and not,
- * for example, migrating.
- */
- else if (pc >= (unsigned long) __start_atomic_asm_code &&
- pc < (unsigned long) __end_atomic_asm_code) {
- const struct exception_table_entry *fixup;
-#ifdef CONFIG_SMP
- /* Unlock the atomic lock. */
- int *lock_ptr = (int *)(regs->regs[ATOMIC_LOCK_REG]);
- __atomic_fault_unlock(lock_ptr);
-#endif
- fixup = search_exception_tables(pc);
- if (!fixup)
- ics_panic("ICS atomic fault not in table: PC %#lx, fault %d",
- pc, fault_num);
- regs->pc = fixup->fixup;
- regs->ex1 = PL_ICS_EX1(KERNEL_PL, 0);
- }
-
- /*
- * Now that we have released the atomic lock (if necessary),
- * it's safe to spin if the PTE that caused the fault was migrating.
- */
- if (fault_num == INT_DTLB_ACCESS)
- write = 1;
- if (handle_migrating_pte(pgd, fault_num, address, pc, 1, write))
- return state;
-
- /* Return zero so that we continue on with normal fault handling. */
- state.retval = 0;
- return state;
-}
-
-#endif /* !__tilegx__ */
-
-/*
- * This routine handles page faults. It determines the address, and the
- * problem, and then passes it handle_page_fault() for normal DTLB and
- * ITLB issues, and for DMA or SN processor faults when we are in user
- * space. For the latter, if we're in kernel mode, we just save the
- * interrupt away appropriately and return immediately. We can't do
- * page faults for user code while in kernel mode.
- */
-static inline void __do_page_fault(struct pt_regs *regs, int fault_num,
- unsigned long address, unsigned long write)
-{
- int is_page_fault;
-
-#ifdef CONFIG_KPROBES
- /*
- * This is to notify the fault handler of the kprobes. The
- * exception code is redundant as it is also carried in REGS,
- * but we pass it anyhow.
- */
- if (notify_die(DIE_PAGE_FAULT, "page fault", regs, -1,
- regs->faultnum, SIGSEGV) == NOTIFY_STOP)
- return;
-#endif
-
-#ifdef __tilegx__
- /*
- * We don't need early do_page_fault_ics() support, since unlike
- * Pro we don't need to worry about unlocking the atomic locks.
- * There is only one current case in GX where we touch any memory
- * under ICS other than our own kernel stack, and we handle that
- * here. (If we crash due to trying to touch our own stack,
- * we're in too much trouble for C code to help out anyway.)
- */
- if (write & ~1) {
- unsigned long pc = write & ~1;
- if (pc >= (unsigned long) __start_unalign_asm_code &&
- pc < (unsigned long) __end_unalign_asm_code) {
- struct thread_info *ti = current_thread_info();
- /*
- * Our EX_CONTEXT is still what it was from the
- * initial unalign exception, but now we've faulted
- * on the JIT page. We would like to complete the
- * page fault however is appropriate, and then retry
- * the instruction that caused the unalign exception.
- * Our state has been "corrupted" by setting the low
- * bit in "sp", and stashing r0..r3 in the
- * thread_info area, so we revert all of that, then
- * continue as if this were a normal page fault.
- */
- regs->sp &= ~1UL;
- regs->regs[0] = ti->unalign_jit_tmp[0];
- regs->regs[1] = ti->unalign_jit_tmp[1];
- regs->regs[2] = ti->unalign_jit_tmp[2];
- regs->regs[3] = ti->unalign_jit_tmp[3];
- write &= 1;
- } else {
- pr_alert("%s/%d: ICS set at page fault at %#lx: %#lx\n",
- current->comm, current->pid, pc, address);
- show_regs(regs);
- do_group_exit(SIGKILL);
- }
- }
-#else
- /* This case should have been handled by do_page_fault_ics(). */
- BUG_ON(write & ~1);
-#endif
-
-#if CHIP_HAS_TILE_DMA()
- /*
- * If it's a DMA fault, suspend the transfer while we're
- * handling the miss; we'll restart after it's handled. If we
- * don't suspend, it's possible that this process could swap
- * out and back in, and restart the engine since the DMA is
- * still 'running'.
- */
- if (fault_num == INT_DMATLB_MISS ||
- fault_num == INT_DMATLB_ACCESS ||
- fault_num == INT_DMATLB_MISS_DWNCL ||
- fault_num == INT_DMATLB_ACCESS_DWNCL) {
- __insn_mtspr(SPR_DMA_CTR, SPR_DMA_CTR__SUSPEND_MASK);
- while (__insn_mfspr(SPR_DMA_USER_STATUS) &
- SPR_DMA_STATUS__BUSY_MASK)
- ;
- }
-#endif
-
- /* Validate fault num and decide if this is a first-time page fault. */
- switch (fault_num) {
- case INT_ITLB_MISS:
- case INT_DTLB_MISS:
-#if CHIP_HAS_TILE_DMA()
- case INT_DMATLB_MISS:
- case INT_DMATLB_MISS_DWNCL:
-#endif
- is_page_fault = 1;
- break;
-
- case INT_DTLB_ACCESS:
-#if CHIP_HAS_TILE_DMA()
- case INT_DMATLB_ACCESS:
- case INT_DMATLB_ACCESS_DWNCL:
-#endif
- is_page_fault = 0;
- break;
-
- default:
- panic("Bad fault number %d in do_page_fault", fault_num);
- }
-
-#if CHIP_HAS_TILE_DMA()
- if (!user_mode(regs)) {
- struct async_tlb *async;
- switch (fault_num) {
-#if CHIP_HAS_TILE_DMA()
- case INT_DMATLB_MISS:
- case INT_DMATLB_ACCESS:
- case INT_DMATLB_MISS_DWNCL:
- case INT_DMATLB_ACCESS_DWNCL:
- async = &current->thread.dma_async_tlb;
- break;
-#endif
- default:
- async = NULL;
- }
- if (async) {
-
- /*
- * No vmalloc check required, so we can allow
- * interrupts immediately at this point.
- */
- local_irq_enable();
-
- set_thread_flag(TIF_ASYNC_TLB);
- if (async->fault_num != 0) {
- panic("Second async fault %d; old fault was %d (%#lx/%ld)",
- fault_num, async->fault_num,
- address, write);
- }
- BUG_ON(fault_num == 0);
- async->fault_num = fault_num;
- async->is_fault = is_page_fault;
- async->is_write = write;
- async->address = address;
- return;
- }
- }
-#endif
-
- handle_page_fault(regs, fault_num, is_page_fault, address, write);
-}
-
-void do_page_fault(struct pt_regs *regs, int fault_num,
- unsigned long address, unsigned long write)
-{
- __do_page_fault(regs, fault_num, address, write);
-}
-
-#if CHIP_HAS_TILE_DMA()
-/*
- * This routine effectively re-issues asynchronous page faults
- * when we are returning to user space.
- */
-void do_async_page_fault(struct pt_regs *regs)
-{
- struct async_tlb *async = &current->thread.dma_async_tlb;
-
- /*
- * Clear thread flag early. If we re-interrupt while processing
- * code here, we will reset it and recall this routine before
- * returning to user space.
- */
- clear_thread_flag(TIF_ASYNC_TLB);
-
- if (async->fault_num) {
- /*
- * Clear async->fault_num before calling the page-fault
- * handler so that if we re-interrupt before returning
- * from the function we have somewhere to put the
- * information from the new interrupt.
- */
- int fault_num = async->fault_num;
- async->fault_num = 0;
- handle_page_fault(regs, fault_num, async->is_fault,
- async->address, async->is_write);
- }
-}
-#endif /* CHIP_HAS_TILE_DMA() */
-
-
-void vmalloc_sync_all(void)
-{
-#ifdef __tilegx__
- /* Currently all L1 kernel pmd's are static and shared. */
- BUILD_BUG_ON(pgd_index(VMALLOC_END - PAGE_SIZE) !=
- pgd_index(VMALLOC_START));
-#else
- /*
- * Note that races in the updates of insync and start aren't
- * problematic: insync can only get set bits added, and updates to
- * start are only improving performance (without affecting correctness
- * if undone).
- */
- static DECLARE_BITMAP(insync, PTRS_PER_PGD);
- static unsigned long start = PAGE_OFFSET;
- unsigned long address;
-
- BUILD_BUG_ON(PAGE_OFFSET & ~PGDIR_MASK);
- for (address = start; address >= PAGE_OFFSET; address += PGDIR_SIZE) {
- if (!test_bit(pgd_index(address), insync)) {
- unsigned long flags;
- struct list_head *pos;
-
- spin_lock_irqsave(&pgd_lock, flags);
- list_for_each(pos, &pgd_list)
- if (!vmalloc_sync_one(list_to_pgd(pos),
- address)) {
- /* Must be at first entry in list. */
- BUG_ON(pos != pgd_list.next);
- break;
- }
- spin_unlock_irqrestore(&pgd_lock, flags);
- if (pos != pgd_list.next)
- set_bit(pgd_index(address), insync);
- }
- if (address == start && test_bit(pgd_index(address), insync))
- start = address + PGDIR_SIZE;
- }
-#endif
-}
diff --git a/arch/tile/mm/highmem.c b/arch/tile/mm/highmem.c
deleted file mode 100644
index eca28551b22d..000000000000
--- a/arch/tile/mm/highmem.c
+++ /dev/null
@@ -1,277 +0,0 @@
-/*
- * Copyright 2010 Tilera Corporation. All Rights Reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation, version 2.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
- * NON INFRINGEMENT. See the GNU General Public License for
- * more details.
- */
-
-#include <linux/highmem.h>
-#include <linux/module.h>
-#include <linux/pagemap.h>
-#include <asm/homecache.h>
-
-#define kmap_get_pte(vaddr) \
- pte_offset_kernel(pmd_offset(pud_offset(pgd_offset_k(vaddr), (vaddr)),\
- (vaddr)), (vaddr))
-
-
-void *kmap(struct page *page)
-{
- void *kva;
- unsigned long flags;
- pte_t *ptep;
-
- might_sleep();
- if (!PageHighMem(page))
- return page_address(page);
- kva = kmap_high(page);
-
- /*
- * Rewrite the PTE under the lock. This ensures that the page
- * is not currently migrating.
- */
- ptep = kmap_get_pte((unsigned long)kva);
- flags = homecache_kpte_lock();
- set_pte_at(&init_mm, kva, ptep, mk_pte(page, page_to_kpgprot(page)));
- homecache_kpte_unlock(flags);
-
- return kva;
-}
-EXPORT_SYMBOL(kmap);
-
-void kunmap(struct page *page)
-{
- if (in_interrupt())
- BUG();
- if (!PageHighMem(page))
- return;
- kunmap_high(page);
-}
-EXPORT_SYMBOL(kunmap);
-
-/*
- * Describe a single atomic mapping of a page on a given cpu at a
- * given address, and allow it to be linked into a list.
- */
-struct atomic_mapped_page {
- struct list_head list;
- struct page *page;
- int cpu;
- unsigned long va;
-};
-
-static spinlock_t amp_lock = __SPIN_LOCK_UNLOCKED(&amp_lock);
-static struct list_head amp_list = LIST_HEAD_INIT(amp_list);
-
-/*
- * Combining this structure with a per-cpu declaration lets us give
- * each cpu an atomic_mapped_page structure per type.
- */
-struct kmap_amps {
- struct atomic_mapped_page per_type[KM_TYPE_NR];
-};
-static DEFINE_PER_CPU(struct kmap_amps, amps);
-
-/*
- * Add a page and va, on this cpu, to the list of kmap_atomic pages,
- * and write the new pte to memory. Writing the new PTE under the
- * lock guarantees that it is either on the list before migration starts
- * (if we won the race), or set_pte() sets the migrating bit in the PTE
- * (if we lost the race). And doing it under the lock guarantees
- * that when kmap_atomic_fix_one_pte() comes along, it finds a valid
- * PTE in memory, iff the mapping is still on the amp_list.
- *
- * Finally, doing it under the lock lets us safely examine the page
- * to see if it is immutable or not, for the generic kmap_atomic() case.
- * If we examine it earlier we are exposed to a race where it looks
- * writable earlier, but becomes immutable before we write the PTE.
- */
-static void kmap_atomic_register(struct page *page, int type,
- unsigned long va, pte_t *ptep, pte_t pteval)
-{
- unsigned long flags;
- struct atomic_mapped_page *amp;
-
- flags = homecache_kpte_lock();
- spin_lock(&amp_lock);
-
- /* With interrupts disabled, now fill in the per-cpu info. */
- amp = this_cpu_ptr(&amps.per_type[type]);
- amp->page = page;
- amp->cpu = smp_processor_id();
- amp->va = va;
-
- /* For generic kmap_atomic(), choose the PTE writability now. */
- if (!pte_read(pteval))
- pteval = mk_pte(page, page_to_kpgprot(page));
-
- list_add(&amp->list, &amp_list);
- set_pte(ptep, pteval);
-
- spin_unlock(&amp_lock);
- homecache_kpte_unlock(flags);
-}
-
-/*
- * Remove a page and va, on this cpu, from the list of kmap_atomic pages.
- * Linear-time search, but we count on the lists being short.
- * We don't need to adjust the PTE under the lock (as opposed to the
- * kmap_atomic_register() case), since we're just unconditionally
- * zeroing the PTE after it's off the list.
- */
-static void kmap_atomic_unregister(struct page *page, unsigned long va)
-{
- unsigned long flags;
- struct atomic_mapped_page *amp;
- int cpu = smp_processor_id();
- spin_lock_irqsave(&amp_lock, flags);
- list_for_each_entry(amp, &amp_list, list) {
- if (amp->page == page && amp->cpu == cpu && amp->va == va)
- break;
- }
- BUG_ON(&amp->list == &amp_list);
- list_del(&amp->list);
- spin_unlock_irqrestore(&amp_lock, flags);
-}
-
-/* Helper routine for kmap_atomic_fix_kpte(), below. */
-static void kmap_atomic_fix_one_kpte(struct atomic_mapped_page *amp,
- int finished)
-{
- pte_t *ptep = kmap_get_pte(amp->va);
- if (!finished) {
- set_pte(ptep, pte_mkmigrate(*ptep));
- flush_remote(0, 0, NULL, amp->va, PAGE_SIZE, PAGE_SIZE,
- cpumask_of(amp->cpu), NULL, 0);
- } else {
- /*
- * Rewrite a default kernel PTE for this page.
- * We rely on the fact that set_pte() writes the
- * present+migrating bits last.
- */
- pte_t pte = mk_pte(amp->page, page_to_kpgprot(amp->page));
- set_pte(ptep, pte);
- }
-}
-
-/*
- * This routine is a helper function for homecache_fix_kpte(); see
- * its comments for more information on the "finished" argument here.
- *
- * Note that we hold the lock while doing the remote flushes, which
- * will stall any unrelated cpus trying to do kmap_atomic operations.
- * We could just update the PTEs under the lock, and save away copies
- * of the structs (or just the va+cpu), then flush them after we
- * release the lock, but it seems easier just to do it all under the lock.
- */
-void kmap_atomic_fix_kpte(struct page *page, int finished)
-{
- struct atomic_mapped_page *amp;
- unsigned long flags;
- spin_lock_irqsave(&amp_lock, flags);
- list_for_each_entry(amp, &amp_list, list) {
- if (amp->page == page)
- kmap_atomic_fix_one_kpte(amp, finished);
- }
- spin_unlock_irqrestore(&amp_lock, flags);
-}
-
-/*
- * kmap_atomic/kunmap_atomic is significantly faster than kmap/kunmap
- * because the kmap code must perform a global TLB invalidation when
- * the kmap pool wraps.
- *
- * Note that they may be slower than on x86 (etc.) because unlike on
- * those platforms, we do have to take a global lock to map and unmap
- * pages on Tile (see above).
- *
- * When holding an atomic kmap is is not legal to sleep, so atomic
- * kmaps are appropriate for short, tight code paths only.
- */
-void *kmap_atomic_prot(struct page *page, pgprot_t prot)
-{
- unsigned long vaddr;
- int idx, type;
- pte_t *pte;
-
- preempt_disable();
- pagefault_disable();
-
- /* Avoid icache flushes by disallowing atomic executable mappings. */
- BUG_ON(pte_exec(prot));
-
- if (!PageHighMem(page))
- return page_address(page);
-
- type = kmap_atomic_idx_push();
- idx = type + KM_TYPE_NR*smp_processor_id();
- vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
- pte = kmap_get_pte(vaddr);
- BUG_ON(!pte_none(*pte));
-
- /* Register that this page is mapped atomically on this cpu. */
- kmap_atomic_register(page, type, vaddr, pte, mk_pte(page, prot));
-
- return (void *)vaddr;
-}
-EXPORT_SYMBOL(kmap_atomic_prot);
-
-void *kmap_atomic(struct page *page)
-{
- /* PAGE_NONE is a magic value that tells us to check immutability. */
- return kmap_atomic_prot(page, PAGE_NONE);
-}
-EXPORT_SYMBOL(kmap_atomic);
-
-void __kunmap_atomic(void *kvaddr)
-{
- unsigned long vaddr = (unsigned long) kvaddr & PAGE_MASK;
-
- if (vaddr >= __fix_to_virt(FIX_KMAP_END) &&
- vaddr <= __fix_to_virt(FIX_KMAP_BEGIN)) {
- pte_t *pte = kmap_get_pte(vaddr);
- pte_t pteval = *pte;
- int idx, type;
-
- type = kmap_atomic_idx();
- idx = type + KM_TYPE_NR*smp_processor_id();
-
- /*
- * Force other mappings to Oops if they try to access this pte
- * without first remapping it. Keeping stale mappings around
- * is a bad idea.
- */
- BUG_ON(!pte_present(pteval) && !pte_migrating(pteval));
- kmap_atomic_unregister(pte_page(pteval), vaddr);
- kpte_clear_flush(pte, vaddr);
- kmap_atomic_idx_pop();
- } else {
- /* Must be a lowmem page */
- BUG_ON(vaddr < PAGE_OFFSET);
- BUG_ON(vaddr >= (unsigned long)high_memory);
- }
-
- pagefault_enable();
- preempt_enable();
-}
-EXPORT_SYMBOL(__kunmap_atomic);
-
-/*
- * This API is supposed to allow us to map memory without a "struct page".
- * Currently we don't support this, though this may change in the future.
- */
-void *kmap_atomic_pfn(unsigned long pfn)
-{
- return kmap_atomic(pfn_to_page(pfn));
-}
-void *kmap_atomic_prot_pfn(unsigned long pfn, pgprot_t prot)
-{
- return kmap_atomic_prot(pfn_to_page(pfn), prot);
-}
diff --git a/arch/tile/mm/homecache.c b/arch/tile/mm/homecache.c
deleted file mode 100644
index 4432f31e8479..000000000000
--- a/arch/tile/mm/homecache.c
+++ /dev/null
@@ -1,428 +0,0 @@
-/*
- * Copyright 2010 Tilera Corporation. All Rights Reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation, version 2.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
- * NON INFRINGEMENT. See the GNU General Public License for
- * more details.
- *
- * This code maintains the "home" for each page in the system.
- */
-
-#include <linux/kernel.h>
-#include <linux/mm.h>
-#include <linux/spinlock.h>
-#include <linux/list.h>
-#include <linux/bootmem.h>
-#include <linux/rmap.h>
-#include <linux/pagemap.h>
-#include <linux/mutex.h>
-#include <linux/interrupt.h>
-#include <linux/sysctl.h>
-#include <linux/pagevec.h>
-#include <linux/ptrace.h>
-#include <linux/timex.h>
-#include <linux/cache.h>
-#include <linux/smp.h>
-#include <linux/module.h>
-#include <linux/hugetlb.h>
-
-#include <asm/page.h>
-#include <asm/sections.h>
-#include <asm/tlbflush.h>
-#include <asm/pgalloc.h>
-#include <asm/homecache.h>
-
-#include <arch/sim.h>
-
-#include "migrate.h"
-
-
-/*
- * The noallocl2 option suppresses all use of the L2 cache to cache
- * locally from a remote home.
- */
-static int __ro_after_init noallocl2;
-static int __init set_noallocl2(char *str)
-{
- noallocl2 = 1;
- return 0;
-}
-early_param("noallocl2", set_noallocl2);
-
-
-/*
- * Update the irq_stat for cpus that we are going to interrupt
- * with TLB or cache flushes. Also handle removing dataplane cpus
- * from the TLB flush set, and setting dataplane_tlb_state instead.
- */
-static void hv_flush_update(const struct cpumask *cache_cpumask,
- struct cpumask *tlb_cpumask,
- unsigned long tlb_va, unsigned long tlb_length,
- HV_Remote_ASID *asids, int asidcount)
-{
- struct cpumask mask;
- int i, cpu;
-
- cpumask_clear(&mask);
- if (cache_cpumask)
- cpumask_or(&mask, &mask, cache_cpumask);
- if (tlb_cpumask && tlb_length) {
- cpumask_or(&mask, &mask, tlb_cpumask);
- }
-
- for (i = 0; i < asidcount; ++i)
- cpumask_set_cpu(asids[i].y * smp_width + asids[i].x, &mask);
-
- /*
- * Don't bother to update atomically; losing a count
- * here is not that critical.
- */
- for_each_cpu(cpu, &mask)
- ++per_cpu(irq_stat, cpu).irq_hv_flush_count;
-}
-
-/*
- * This wrapper function around hv_flush_remote() does several things:
- *
- * - Provides a return value error-checking panic path, since
- * there's never any good reason for hv_flush_remote() to fail.
- * - Accepts a 32-bit PFN rather than a 64-bit PA, which generally
- * is the type that Linux wants to pass around anyway.
- * - Canonicalizes that lengths of zero make cpumasks NULL.
- * - Handles deferring TLB flushes for dataplane tiles.
- * - Tracks remote interrupts in the per-cpu irq_cpustat_t.
- *
- * Note that we have to wait until the cache flush completes before
- * updating the per-cpu last_cache_flush word, since otherwise another
- * concurrent flush can race, conclude the flush has already
- * completed, and start to use the page while it's still dirty
- * remotely (running concurrently with the actual evict, presumably).
- */
-void flush_remote(unsigned long cache_pfn, unsigned long cache_control,
- const struct cpumask *cache_cpumask_orig,
- HV_VirtAddr tlb_va, unsigned long tlb_length,
- unsigned long tlb_pgsize,
- const struct cpumask *tlb_cpumask_orig,
- HV_Remote_ASID *asids, int asidcount)
-{
- int rc;
- struct cpumask cache_cpumask_copy, tlb_cpumask_copy;
- struct cpumask *cache_cpumask, *tlb_cpumask;
- HV_PhysAddr cache_pa;
-
- mb(); /* provided just to simplify "magic hypervisor" mode */
-
- /*
- * Canonicalize and copy the cpumasks.
- */
- if (cache_cpumask_orig && cache_control) {
- cpumask_copy(&cache_cpumask_copy, cache_cpumask_orig);
- cache_cpumask = &cache_cpumask_copy;
- } else {
- cpumask_clear(&cache_cpumask_copy);
- cache_cpumask = NULL;
- }
- if (cache_cpumask == NULL)
- cache_control = 0;
- if (tlb_cpumask_orig && tlb_length) {
- cpumask_copy(&tlb_cpumask_copy, tlb_cpumask_orig);
- tlb_cpumask = &tlb_cpumask_copy;
- } else {
- cpumask_clear(&tlb_cpumask_copy);
- tlb_cpumask = NULL;
- }
-
- hv_flush_update(cache_cpumask, tlb_cpumask, tlb_va, tlb_length,
- asids, asidcount);
- cache_pa = (HV_PhysAddr)cache_pfn << PAGE_SHIFT;
- rc = hv_flush_remote(cache_pa, cache_control,
- cpumask_bits(cache_cpumask),
- tlb_va, tlb_length, tlb_pgsize,
- cpumask_bits(tlb_cpumask),
- asids, asidcount);
- if (rc == 0)
- return;
-
- pr_err("hv_flush_remote(%#llx, %#lx, %p [%*pb], %#lx, %#lx, %#lx, %p [%*pb], %p, %d) = %d\n",
- cache_pa, cache_control, cache_cpumask,
- cpumask_pr_args(&cache_cpumask_copy),
- (unsigned long)tlb_va, tlb_length, tlb_pgsize, tlb_cpumask,
- cpumask_pr_args(&tlb_cpumask_copy), asids, asidcount, rc);
- panic("Unsafe to continue.");
-}
-
-static void homecache_finv_page_va(void* va, int home)
-{
- int cpu = get_cpu();
- if (home == cpu) {
- finv_buffer_local(va, PAGE_SIZE);
- } else if (home == PAGE_HOME_HASH) {
- finv_buffer_remote(va, PAGE_SIZE, 1);
- } else {
- BUG_ON(home < 0 || home >= NR_CPUS);
- finv_buffer_remote(va, PAGE_SIZE, 0);
- }
- put_cpu();
-}
-
-void homecache_finv_map_page(struct page *page, int home)
-{
- unsigned long flags;
- unsigned long va;
- pte_t *ptep;
- pte_t pte;
-
- if (home == PAGE_HOME_UNCACHED)
- return;
- local_irq_save(flags);
-#ifdef CONFIG_HIGHMEM
- va = __fix_to_virt(FIX_KMAP_BEGIN + kmap_atomic_idx_push() +
- (KM_TYPE_NR * smp_processor_id()));
-#else
- va = __fix_to_virt(FIX_HOMECACHE_BEGIN + smp_processor_id());
-#endif
- ptep = virt_to_kpte(va);
- pte = pfn_pte(page_to_pfn(page), PAGE_KERNEL);
- __set_pte(ptep, pte_set_home(pte, home));
- homecache_finv_page_va((void *)va, home);
- __pte_clear(ptep);
- hv_flush_page(va, PAGE_SIZE);
-#ifdef CONFIG_HIGHMEM
- kmap_atomic_idx_pop();
-#endif
- local_irq_restore(flags);
-}
-
-static void homecache_finv_page_home(struct page *page, int home)
-{
- if (!PageHighMem(page) && home == page_home(page))
- homecache_finv_page_va(page_address(page), home);
- else
- homecache_finv_map_page(page, home);
-}
-
-static inline bool incoherent_home(int home)
-{
- return home == PAGE_HOME_IMMUTABLE || home == PAGE_HOME_INCOHERENT;
-}
-
-static void homecache_finv_page_internal(struct page *page, int force_map)
-{
- int home = page_home(page);
- if (home == PAGE_HOME_UNCACHED)
- return;
- if (incoherent_home(home)) {
- int cpu;
- for_each_cpu(cpu, &cpu_cacheable_map)
- homecache_finv_map_page(page, cpu);
- } else if (force_map) {
- /* Force if, e.g., the normal mapping is migrating. */
- homecache_finv_map_page(page, home);
- } else {
- homecache_finv_page_home(page, home);
- }
- sim_validate_lines_evicted(PFN_PHYS(page_to_pfn(page)), PAGE_SIZE);
-}
-
-void homecache_finv_page(struct page *page)
-{
- homecache_finv_page_internal(page, 0);
-}
-
-void homecache_evict(const struct cpumask *mask)
-{
- flush_remote(0, HV_FLUSH_EVICT_L2, mask, 0, 0, 0, NULL, NULL, 0);
-}
-
-/* Report the home corresponding to a given PTE. */
-static int pte_to_home(pte_t pte)
-{
- if (hv_pte_get_nc(pte))
- return PAGE_HOME_IMMUTABLE;
- switch (hv_pte_get_mode(pte)) {
- case HV_PTE_MODE_CACHE_TILE_L3:
- return get_remote_cache_cpu(pte);
- case HV_PTE_MODE_CACHE_NO_L3:
- return PAGE_HOME_INCOHERENT;
- case HV_PTE_MODE_UNCACHED:
- return PAGE_HOME_UNCACHED;
- case HV_PTE_MODE_CACHE_HASH_L3:
- return PAGE_HOME_HASH;
- }
- panic("Bad PTE %#llx\n", pte.val);
-}
-
-/* Update the home of a PTE if necessary (can also be used for a pgprot_t). */
-pte_t pte_set_home(pte_t pte, int home)
-{
-#if CHIP_HAS_MMIO()
- /* Check for MMIO mappings and pass them through. */
- if (hv_pte_get_mode(pte) == HV_PTE_MODE_MMIO)
- return pte;
-#endif
-
-
- /*
- * Only immutable pages get NC mappings. If we have a
- * non-coherent PTE, but the underlying page is not
- * immutable, it's likely the result of a forced
- * caching setting running up against ptrace setting
- * the page to be writable underneath. In this case,
- * just keep the PTE coherent.
- */
- if (hv_pte_get_nc(pte) && home != PAGE_HOME_IMMUTABLE) {
- pte = hv_pte_clear_nc(pte);
- pr_err("non-immutable page incoherently referenced: %#llx\n",
- pte.val);
- }
-
- switch (home) {
-
- case PAGE_HOME_UNCACHED:
- pte = hv_pte_set_mode(pte, HV_PTE_MODE_UNCACHED);
- break;
-
- case PAGE_HOME_INCOHERENT:
- pte = hv_pte_set_mode(pte, HV_PTE_MODE_CACHE_NO_L3);
- break;
-
- case PAGE_HOME_IMMUTABLE:
- /*
- * We could home this page anywhere, since it's immutable,
- * but by default just home it to follow "hash_default".
- */
- BUG_ON(hv_pte_get_writable(pte));
- if (pte_get_forcecache(pte)) {
- /* Upgrade "force any cpu" to "No L3" for immutable. */
- if (hv_pte_get_mode(pte) == HV_PTE_MODE_CACHE_TILE_L3
- && pte_get_anyhome(pte)) {
- pte = hv_pte_set_mode(pte,
- HV_PTE_MODE_CACHE_NO_L3);
- }
- } else
- if (hash_default)
- pte = hv_pte_set_mode(pte, HV_PTE_MODE_CACHE_HASH_L3);
- else
- pte = hv_pte_set_mode(pte, HV_PTE_MODE_CACHE_NO_L3);
- pte = hv_pte_set_nc(pte);
- break;
-
- case PAGE_HOME_HASH:
- pte = hv_pte_set_mode(pte, HV_PTE_MODE_CACHE_HASH_L3);
- break;
-
- default:
- BUG_ON(home < 0 || home >= NR_CPUS ||
- !cpu_is_valid_lotar(home));
- pte = hv_pte_set_mode(pte, HV_PTE_MODE_CACHE_TILE_L3);
- pte = set_remote_cache_cpu(pte, home);
- break;
- }
-
- if (noallocl2)
- pte = hv_pte_set_no_alloc_l2(pte);
-
- /* Simplify "no local and no l3" to "uncached" */
- if (hv_pte_get_no_alloc_l2(pte) && hv_pte_get_no_alloc_l1(pte) &&
- hv_pte_get_mode(pte) == HV_PTE_MODE_CACHE_NO_L3) {
- pte = hv_pte_set_mode(pte, HV_PTE_MODE_UNCACHED);
- }
-
- /* Checking this case here gives a better panic than from the hv. */
- BUG_ON(hv_pte_get_mode(pte) == 0);
-
- return pte;
-}
-EXPORT_SYMBOL(pte_set_home);
-
-/*
- * The routines in this section are the "static" versions of the normal
- * dynamic homecaching routines; they just set the home cache
- * of a kernel page once, and require a full-chip cache/TLB flush,
- * so they're not suitable for anything but infrequent use.
- */
-
-int page_home(struct page *page)
-{
- if (PageHighMem(page)) {
- return PAGE_HOME_HASH;
- } else {
- unsigned long kva = (unsigned long)page_address(page);
- return pte_to_home(*virt_to_kpte(kva));
- }
-}
-EXPORT_SYMBOL(page_home);
-
-void homecache_change_page_home(struct page *page, int order, int home)
-{
- int i, pages = (1 << order);
- unsigned long kva;
-
- BUG_ON(PageHighMem(page));
- BUG_ON(page_count(page) > 1);
- BUG_ON(page_mapcount(page) != 0);
- kva = (unsigned long) page_address(page);
- flush_remote(0, HV_FLUSH_EVICT_L2, &cpu_cacheable_map,
- kva, pages * PAGE_SIZE, PAGE_SIZE, cpu_online_mask,
- NULL, 0);
-
- for (i = 0; i < pages; ++i, kva += PAGE_SIZE) {
- pte_t *ptep = virt_to_kpte(kva);
- pte_t pteval = *ptep;
- BUG_ON(!pte_present(pteval) || pte_huge(pteval));
- __set_pte(ptep, pte_set_home(pteval, home));
- }
-}
-EXPORT_SYMBOL(homecache_change_page_home);
-
-struct page *homecache_alloc_pages(gfp_t gfp_mask,
- unsigned int order, int home)
-{
- struct page *page;
- BUG_ON(gfp_mask & __GFP_HIGHMEM); /* must be lowmem */
- page = alloc_pages(gfp_mask, order);
- if (page)
- homecache_change_page_home(page, order, home);
- return page;
-}
-EXPORT_SYMBOL(homecache_alloc_pages);
-
-struct page *homecache_alloc_pages_node(int nid, gfp_t gfp_mask,
- unsigned int order, int home)
-{
- struct page *page;
- BUG_ON(gfp_mask & __GFP_HIGHMEM); /* must be lowmem */
- page = alloc_pages_node(nid, gfp_mask, order);
- if (page)
- homecache_change_page_home(page, order, home);
- return page;
-}
-
-void __homecache_free_pages(struct page *page, unsigned int order)
-{
- if (put_page_testzero(page)) {
- homecache_change_page_home(page, order, PAGE_HOME_HASH);
- if (order == 0) {
- free_unref_page(page);
- } else {
- init_page_count(page);
- __free_pages(page, order);
- }
- }
-}
-EXPORT_SYMBOL(__homecache_free_pages);
-
-void homecache_free_pages(unsigned long addr, unsigned int order)
-{
- if (addr != 0) {
- VM_BUG_ON(!virt_addr_valid((void *)addr));
- __homecache_free_pages(virt_to_page((void *)addr), order);
- }
-}
-EXPORT_SYMBOL(homecache_free_pages);
diff --git a/arch/tile/mm/hugetlbpage.c b/arch/tile/mm/hugetlbpage.c
deleted file mode 100644
index 0986d426a413..000000000000
--- a/arch/tile/mm/hugetlbpage.c
+++ /dev/null
@@ -1,348 +0,0 @@
-/*
- * Copyright 2010 Tilera Corporation. All Rights Reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation, version 2.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
- * NON INFRINGEMENT. See the GNU General Public License for
- * more details.
- *
- * TILE Huge TLB Page Support for Kernel.
- * Taken from i386 hugetlb implementation:
- * Copyright (C) 2002, Rohit Seth <rohit.seth@intel.com>
- */
-
-#include <linux/init.h>
-#include <linux/fs.h>
-#include <linux/mm.h>
-#include <linux/sched/mm.h>
-#include <linux/hugetlb.h>
-#include <linux/pagemap.h>
-#include <linux/slab.h>
-#include <linux/err.h>
-#include <linux/sysctl.h>
-#include <linux/mman.h>
-#include <asm/tlb.h>
-#include <asm/tlbflush.h>
-#include <asm/setup.h>
-
-#ifdef CONFIG_HUGETLB_SUPER_PAGES
-
-/*
- * Provide an additional huge page size (in addition to the regular default
- * huge page size) if no "hugepagesz" arguments are specified.
- * Note that it must be smaller than the default huge page size so
- * that it's possible to allocate them on demand from the buddy allocator.
- * You can change this to 64K (on a 16K build), 256K, 1M, or 4M,
- * or not define it at all.
- */
-#define ADDITIONAL_HUGE_SIZE (1024 * 1024UL)
-
-/* "Extra" page-size multipliers, one per level of the page table. */
-int huge_shift[HUGE_SHIFT_ENTRIES] = {
-#ifdef ADDITIONAL_HUGE_SIZE
-#define ADDITIONAL_HUGE_SHIFT __builtin_ctzl(ADDITIONAL_HUGE_SIZE / PAGE_SIZE)
- [HUGE_SHIFT_PAGE] = ADDITIONAL_HUGE_SHIFT
-#endif
-};
-
-#endif
-
-pte_t *huge_pte_alloc(struct mm_struct *mm,
- unsigned long addr, unsigned long sz)
-{
- pgd_t *pgd;
- pud_t *pud;
-
- addr &= -sz; /* Mask off any low bits in the address. */
-
- pgd = pgd_offset(mm, addr);
- pud = pud_alloc(mm, pgd, addr);
-
-#ifdef CONFIG_HUGETLB_SUPER_PAGES
- if (sz >= PGDIR_SIZE) {
- BUG_ON(sz != PGDIR_SIZE &&
- sz != PGDIR_SIZE << huge_shift[HUGE_SHIFT_PGDIR]);
- return (pte_t *)pud;
- } else {
- pmd_t *pmd = pmd_alloc(mm, pud, addr);
- if (sz >= PMD_SIZE) {
- BUG_ON(sz != PMD_SIZE &&
- sz != (PMD_SIZE << huge_shift[HUGE_SHIFT_PMD]));
- return (pte_t *)pmd;
- }
- else {
- if (sz != PAGE_SIZE << huge_shift[HUGE_SHIFT_PAGE])
- panic("Unexpected page size %#lx\n", sz);
- return pte_alloc_map(mm, pmd, addr);
- }
- }
-#else
- BUG_ON(sz != PMD_SIZE);
- return (pte_t *) pmd_alloc(mm, pud, addr);
-#endif
-}
-
-static pte_t *get_pte(pte_t *base, int index, int level)
-{
- pte_t *ptep = base + index;
-#ifdef CONFIG_HUGETLB_SUPER_PAGES
- if (!pte_present(*ptep) && huge_shift[level] != 0) {
- unsigned long mask = -1UL << huge_shift[level];
- pte_t *super_ptep = base + (index & mask);
- pte_t pte = *super_ptep;
- if (pte_present(pte) && pte_super(pte))
- ptep = super_ptep;
- }
-#endif
- return ptep;
-}
-
-pte_t *huge_pte_offset(struct mm_struct *mm,
- unsigned long addr, unsigned long sz)
-{
- pgd_t *pgd;
- pud_t *pud;
- pmd_t *pmd;
-#ifdef CONFIG_HUGETLB_SUPER_PAGES
- pte_t *pte;
-#endif
-
- /* Get the top-level page table entry. */
- pgd = (pgd_t *)get_pte((pte_t *)mm->pgd, pgd_index(addr), 0);
-
- /* We don't have four levels. */
- pud = pud_offset(pgd, addr);
-#ifndef __PAGETABLE_PUD_FOLDED
-# error support fourth page table level
-#endif
- if (!pud_present(*pud))
- return NULL;
-
- /* Check for an L0 huge PTE, if we have three levels. */
-#ifndef __PAGETABLE_PMD_FOLDED
- if (pud_huge(*pud))
- return (pte_t *)pud;
-
- pmd = (pmd_t *)get_pte((pte_t *)pud_page_vaddr(*pud),
- pmd_index(addr), 1);
- if (!pmd_present(*pmd))
- return NULL;
-#else
- pmd = pmd_offset(pud, addr);
-#endif
-
- /* Check for an L1 huge PTE. */
- if (pmd_huge(*pmd))
- return (pte_t *)pmd;
-
-#ifdef CONFIG_HUGETLB_SUPER_PAGES
- /* Check for an L2 huge PTE. */
- pte = get_pte((pte_t *)pmd_page_vaddr(*pmd), pte_index(addr), 2);
- if (!pte_present(*pte))
- return NULL;
- if (pte_super(*pte))
- return pte;
-#endif
-
- return NULL;
-}
-
-int pmd_huge(pmd_t pmd)
-{
- return !!(pmd_val(pmd) & _PAGE_HUGE_PAGE);
-}
-
-int pud_huge(pud_t pud)
-{
- return !!(pud_val(pud) & _PAGE_HUGE_PAGE);
-}
-
-#ifdef HAVE_ARCH_HUGETLB_UNMAPPED_AREA
-static unsigned long hugetlb_get_unmapped_area_bottomup(struct file *file,
- unsigned long addr, unsigned long len,
- unsigned long pgoff, unsigned long flags)
-{
- struct hstate *h = hstate_file(file);
- struct vm_unmapped_area_info info;
-
- info.flags = 0;
- info.length = len;
- info.low_limit = TASK_UNMAPPED_BASE;
- info.high_limit = TASK_SIZE;
- info.align_mask = PAGE_MASK & ~huge_page_mask(h);
- info.align_offset = 0;
- return vm_unmapped_area(&info);
-}
-
-static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file,
- unsigned long addr0, unsigned long len,
- unsigned long pgoff, unsigned long flags)
-{
- struct hstate *h = hstate_file(file);
- struct vm_unmapped_area_info info;
- unsigned long addr;
-
- info.flags = VM_UNMAPPED_AREA_TOPDOWN;
- info.length = len;
- info.low_limit = PAGE_SIZE;
- info.high_limit = current->mm->mmap_base;
- info.align_mask = PAGE_MASK & ~huge_page_mask(h);
- info.align_offset = 0;
- addr = vm_unmapped_area(&info);
-
- /*
- * A failed mmap() very likely causes application failure,
- * so fall back to the bottom-up function here. This scenario
- * can happen with large stack limits and large mmap()
- * allocations.
- */
- if (addr & ~PAGE_MASK) {
- VM_BUG_ON(addr != -ENOMEM);
- info.flags = 0;
- info.low_limit = TASK_UNMAPPED_BASE;
- info.high_limit = TASK_SIZE;
- addr = vm_unmapped_area(&info);
- }
-
- return addr;
-}
-
-unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
- unsigned long len, unsigned long pgoff, unsigned long flags)
-{
- struct hstate *h = hstate_file(file);
- struct mm_struct *mm = current->mm;
- struct vm_area_struct *vma;
-
- if (len & ~huge_page_mask(h))
- return -EINVAL;
- if (len > TASK_SIZE)
- return -ENOMEM;
-
- if (flags & MAP_FIXED) {
- if (prepare_hugepage_range(file, addr, len))
- return -EINVAL;
- return addr;
- }
-
- if (addr) {
- addr = ALIGN(addr, huge_page_size(h));
- vma = find_vma(mm, addr);
- if (TASK_SIZE - len >= addr &&
- (!vma || addr + len <= vm_start_gap(vma)))
- return addr;
- }
- if (current->mm->get_unmapped_area == arch_get_unmapped_area)
- return hugetlb_get_unmapped_area_bottomup(file, addr, len,
- pgoff, flags);
- else
- return hugetlb_get_unmapped_area_topdown(file, addr, len,
- pgoff, flags);
-}
-#endif /* HAVE_ARCH_HUGETLB_UNMAPPED_AREA */
-
-#ifdef CONFIG_HUGETLB_SUPER_PAGES
-static __init int __setup_hugepagesz(unsigned long ps)
-{
- int log_ps = __builtin_ctzl(ps);
- int level, base_shift;
-
- if ((1UL << log_ps) != ps || (log_ps & 1) != 0) {
- pr_warn("Not enabling %ld byte huge pages; must be a power of four\n",
- ps);
- return -EINVAL;
- }
-
- if (ps > 64*1024*1024*1024UL) {
- pr_warn("Not enabling %ld MB huge pages; largest legal value is 64 GB\n",
- ps >> 20);
- return -EINVAL;
- } else if (ps >= PUD_SIZE) {
- static long hv_jpage_size;
- if (hv_jpage_size == 0)
- hv_jpage_size = hv_sysconf(HV_SYSCONF_PAGE_SIZE_JUMBO);
- if (hv_jpage_size != PUD_SIZE) {
- pr_warn("Not enabling >= %ld MB huge pages: hypervisor reports size %ld\n",
- PUD_SIZE >> 20, hv_jpage_size);
- return -EINVAL;
- }
- level = 0;
- base_shift = PUD_SHIFT;
- } else if (ps >= PMD_SIZE) {
- level = 1;
- base_shift = PMD_SHIFT;
- } else if (ps > PAGE_SIZE) {
- level = 2;
- base_shift = PAGE_SHIFT;
- } else {
- pr_err("hugepagesz: huge page size %ld too small\n", ps);
- return -EINVAL;
- }
-
- if (log_ps != base_shift) {
- int shift_val = log_ps - base_shift;
- if (huge_shift[level] != 0) {
- int old_shift = base_shift + huge_shift[level];
- pr_warn("Not enabling %ld MB huge pages; already have size %ld MB\n",
- ps >> 20, (1UL << old_shift) >> 20);
- return -EINVAL;
- }
- if (hv_set_pte_super_shift(level, shift_val) != 0) {
- pr_warn("Not enabling %ld MB huge pages; no hypervisor support\n",
- ps >> 20);
- return -EINVAL;
- }
- printk(KERN_DEBUG "Enabled %ld MB huge pages\n", ps >> 20);
- huge_shift[level] = shift_val;
- }
-
- hugetlb_add_hstate(log_ps - PAGE_SHIFT);
-
- return 0;
-}
-
-static bool saw_hugepagesz;
-
-static __init int setup_hugepagesz(char *opt)
-{
- int rc;
-
- if (!saw_hugepagesz) {
- saw_hugepagesz = true;
- memset(huge_shift, 0, sizeof(huge_shift));
- }
- rc = __setup_hugepagesz(memparse(opt, NULL));
- if (rc)
- hugetlb_bad_size();
- return rc;
-}
-__setup("hugepagesz=", setup_hugepagesz);
-
-#ifdef ADDITIONAL_HUGE_SIZE
-/*
- * Provide an additional huge page size if no "hugepagesz" args are given.
- * In that case, all the cores have properly set up their hv super_shift
- * already, but we need to notify the hugetlb code to enable the
- * new huge page size from the Linux point of view.
- */
-static __init int add_default_hugepagesz(void)
-{
- if (!saw_hugepagesz) {
- BUILD_BUG_ON(ADDITIONAL_HUGE_SIZE >= PMD_SIZE ||
- ADDITIONAL_HUGE_SIZE <= PAGE_SIZE);
- BUILD_BUG_ON((PAGE_SIZE << ADDITIONAL_HUGE_SHIFT) !=
- ADDITIONAL_HUGE_SIZE);
- BUILD_BUG_ON(ADDITIONAL_HUGE_SHIFT & 1);
- hugetlb_add_hstate(ADDITIONAL_HUGE_SHIFT);
- }
- return 0;
-}
-arch_initcall(add_default_hugepagesz);
-#endif
-
-#endif /* CONFIG_HUGETLB_SUPER_PAGES */
diff --git a/arch/tile/mm/init.c b/arch/tile/mm/init.c
deleted file mode 100644
index 5f757e04bcd2..000000000000
--- a/arch/tile/mm/init.c
+++ /dev/null
@@ -1,956 +0,0 @@
-/*
- * Copyright (C) 1995 Linus Torvalds
- * Copyright 2010 Tilera Corporation. All Rights Reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation, version 2.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
- * NON INFRINGEMENT. See the GNU General Public License for
- * more details.
- */
-
-#include <linux/module.h>
-#include <linux/signal.h>
-#include <linux/sched.h>
-#include <linux/kernel.h>
-#include <linux/errno.h>
-#include <linux/string.h>
-#include <linux/types.h>
-#include <linux/ptrace.h>
-#include <linux/mman.h>
-#include <linux/mm.h>
-#include <linux/hugetlb.h>
-#include <linux/swap.h>
-#include <linux/smp.h>
-#include <linux/init.h>
-#include <linux/highmem.h>
-#include <linux/pagemap.h>
-#include <linux/poison.h>
-#include <linux/bootmem.h>
-#include <linux/slab.h>
-#include <linux/proc_fs.h>
-#include <linux/efi.h>
-#include <linux/memory_hotplug.h>
-#include <linux/uaccess.h>
-#include <asm/mmu_context.h>
-#include <asm/processor.h>
-#include <asm/pgtable.h>
-#include <asm/pgalloc.h>
-#include <asm/dma.h>
-#include <asm/fixmap.h>
-#include <asm/tlb.h>
-#include <asm/tlbflush.h>
-#include <asm/sections.h>
-#include <asm/setup.h>
-#include <asm/homecache.h>
-#include <hv/hypervisor.h>
-#include <arch/chip.h>
-
-#include "migrate.h"
-
-#define clear_pgd(pmdptr) (*(pmdptr) = hv_pte(0))
-
-#ifndef __tilegx__
-unsigned long VMALLOC_RESERVE = CONFIG_VMALLOC_RESERVE;
-EXPORT_SYMBOL(VMALLOC_RESERVE);
-#endif
-
-/* Create an L2 page table */
-static pte_t * __init alloc_pte(void)
-{
- return __alloc_bootmem(L2_KERNEL_PGTABLE_SIZE, HV_PAGE_TABLE_ALIGN, 0);
-}
-
-/*
- * L2 page tables per controller. We allocate these all at once from
- * the bootmem allocator and store them here. This saves on kernel L2
- * page table memory, compared to allocating a full 64K page per L2
- * page table, and also means that in cases where we use huge pages,
- * we are guaranteed to later be able to shatter those huge pages and
- * switch to using these page tables instead, without requiring
- * further allocation. Each l2_ptes[] entry points to the first page
- * table for the first hugepage-size piece of memory on the
- * controller; other page tables are just indexed directly, i.e. the
- * L2 page tables are contiguous in memory for each controller.
- */
-static pte_t *l2_ptes[MAX_NUMNODES];
-static int num_l2_ptes[MAX_NUMNODES];
-
-static void init_prealloc_ptes(int node, int pages)
-{
- BUG_ON(pages & (PTRS_PER_PTE - 1));
- if (pages) {
- num_l2_ptes[node] = pages;
- l2_ptes[node] = __alloc_bootmem(pages * sizeof(pte_t),
- HV_PAGE_TABLE_ALIGN, 0);
- }
-}
-
-pte_t *get_prealloc_pte(unsigned long pfn)
-{
- int node = pfn_to_nid(pfn);
- pfn &= ~(-1UL << (NR_PA_HIGHBIT_SHIFT - PAGE_SHIFT));
- BUG_ON(node >= MAX_NUMNODES);
- BUG_ON(pfn >= num_l2_ptes[node]);
- return &l2_ptes[node][pfn];
-}
-
-/*
- * What caching do we expect pages from the heap to have when
- * they are allocated during bootup? (Once we've installed the
- * "real" swapper_pg_dir.)
- */
-static int initial_heap_home(void)
-{
- if (hash_default)
- return PAGE_HOME_HASH;
- return smp_processor_id();
-}
-
-/*
- * Place a pointer to an L2 page table in a middle page
- * directory entry.
- */
-static void __init assign_pte(pmd_t *pmd, pte_t *page_table)
-{
- phys_addr_t pa = __pa(page_table);
- unsigned long l2_ptfn = pa >> HV_LOG2_PAGE_TABLE_ALIGN;
- pte_t pteval = hv_pte_set_ptfn(__pgprot(_PAGE_TABLE), l2_ptfn);
- BUG_ON((pa & (HV_PAGE_TABLE_ALIGN-1)) != 0);
- pteval = pte_set_home(pteval, initial_heap_home());
- *(pte_t *)pmd = pteval;
- if (page_table != (pte_t *)pmd_page_vaddr(*pmd))
- BUG();
-}
-
-#ifdef __tilegx__
-
-static inline pmd_t *alloc_pmd(void)
-{
- return __alloc_bootmem(L1_KERNEL_PGTABLE_SIZE, HV_PAGE_TABLE_ALIGN, 0);
-}
-
-static inline void assign_pmd(pud_t *pud, pmd_t *pmd)
-{
- assign_pte((pmd_t *)pud, (pte_t *)pmd);
-}
-
-#endif /* __tilegx__ */
-
-/* Replace the given pmd with a full PTE table. */
-void __init shatter_pmd(pmd_t *pmd)
-{
- pte_t *pte = get_prealloc_pte(pte_pfn(*(pte_t *)pmd));
- assign_pte(pmd, pte);
-}
-
-#ifdef __tilegx__
-static pmd_t *__init get_pmd(pgd_t pgtables[], unsigned long va)
-{
- pud_t *pud = pud_offset(&pgtables[pgd_index(va)], va);
- if (pud_none(*pud))
- assign_pmd(pud, alloc_pmd());
- return pmd_offset(pud, va);
-}
-#else
-static pmd_t *__init get_pmd(pgd_t pgtables[], unsigned long va)
-{
- return pmd_offset(pud_offset(&pgtables[pgd_index(va)], va), va);
-}
-#endif
-
-/*
- * This function initializes a certain range of kernel virtual memory
- * with new bootmem page tables, everywhere page tables are missing in
- * the given range.
- */
-
-/*
- * NOTE: The pagetables are allocated contiguous on the physical space
- * so we can cache the place of the first one and move around without
- * checking the pgd every time.
- */
-static void __init page_table_range_init(unsigned long start,
- unsigned long end, pgd_t *pgd)
-{
- unsigned long vaddr;
- start = round_down(start, PMD_SIZE);
- end = round_up(end, PMD_SIZE);
- for (vaddr = start; vaddr < end; vaddr += PMD_SIZE) {
- pmd_t *pmd = get_pmd(pgd, vaddr);
- if (pmd_none(*pmd))
- assign_pte(pmd, alloc_pte());
- }
-}
-
-
-static int __initdata ktext_hash = 1; /* .text pages */
-static int __initdata kdata_hash = 1; /* .data and .bss pages */
-int __ro_after_init hash_default = 1; /* kernel allocator pages */
-EXPORT_SYMBOL(hash_default);
-int __ro_after_init kstack_hash = 1; /* if no homecaching, use h4h */
-
-/*
- * CPUs to use to for striping the pages of kernel data. If hash-for-home
- * is available, this is only relevant if kcache_hash sets up the
- * .data and .bss to be page-homed, and we don't want the default mode
- * of using the full set of kernel cpus for the striping.
- */
-static __initdata struct cpumask kdata_mask;
-static __initdata int kdata_arg_seen;
-
-int __ro_after_init kdata_huge; /* if no homecaching, small pages */
-
-
-/* Combine a generic pgprot_t with cache home to get a cache-aware pgprot. */
-static pgprot_t __init construct_pgprot(pgprot_t prot, int home)
-{
- prot = pte_set_home(prot, home);
- if (home == PAGE_HOME_IMMUTABLE) {
- if (ktext_hash)
- prot = hv_pte_set_mode(prot, HV_PTE_MODE_CACHE_HASH_L3);
- else
- prot = hv_pte_set_mode(prot, HV_PTE_MODE_CACHE_NO_L3);
- }
- return prot;
-}
-
-/*
- * For a given kernel data VA, how should it be cached?
- * We return the complete pgprot_t with caching bits set.
- */
-static pgprot_t __init init_pgprot(ulong address)
-{
- int cpu;
- unsigned long page;
- enum { CODE_DELTA = MEM_SV_START - PAGE_OFFSET };
-
- /* For kdata=huge, everything is just hash-for-home. */
- if (kdata_huge)
- return construct_pgprot(PAGE_KERNEL, PAGE_HOME_HASH);
-
- /*
- * We map the aliased pages of permanent text so we can
- * update them if necessary, for ftrace, etc.
- */
- if (address < (ulong) _sinittext - CODE_DELTA)
- return construct_pgprot(PAGE_KERNEL, PAGE_HOME_HASH);
-
- /* We map read-only data non-coherent for performance. */
- if ((address >= (ulong) __start_rodata &&
- address < (ulong) __end_rodata) ||
- address == (ulong) empty_zero_page) {
- return construct_pgprot(PAGE_KERNEL_RO, PAGE_HOME_IMMUTABLE);
- }
-
-#ifndef __tilegx__
- /* Force the atomic_locks[] array page to be hash-for-home. */
- if (address == (ulong) atomic_locks)
- return construct_pgprot(PAGE_KERNEL, PAGE_HOME_HASH);
-#endif
-
- /*
- * Everything else that isn't data or bss is heap, so mark it
- * with the initial heap home (hash-for-home, or this cpu). This
- * includes any addresses after the loaded image and any address before
- * __init_end, since we already captured the case of text before
- * _sinittext, and __pa(einittext) is approximately __pa(__init_begin).
- *
- * All the LOWMEM pages that we mark this way will get their
- * struct page homecache properly marked later, in set_page_homes().
- * The HIGHMEM pages we leave with a default zero for their
- * homes, but with a zero free_time we don't have to actually
- * do a flush action the first time we use them, either.
- */
- if (address >= (ulong) _end || address < (ulong) __init_end)
- return construct_pgprot(PAGE_KERNEL, initial_heap_home());
-
- /* Use hash-for-home if requested for data/bss. */
- if (kdata_hash)
- return construct_pgprot(PAGE_KERNEL, PAGE_HOME_HASH);
-
- /*
- * Otherwise we just hand out consecutive cpus. To avoid
- * requiring this function to hold state, we just walk forward from
- * __end_rodata by PAGE_SIZE, skipping the readonly and init data, to
- * reach the requested address, while walking cpu home around
- * kdata_mask. This is typically no more than a dozen or so iterations.
- */
- page = (((ulong)__end_rodata) + PAGE_SIZE - 1) & PAGE_MASK;
- BUG_ON(address < page || address >= (ulong)_end);
- cpu = cpumask_first(&kdata_mask);
- for (; page < address; page += PAGE_SIZE) {
- if (page >= (ulong)&init_thread_union &&
- page < (ulong)&init_thread_union + THREAD_SIZE)
- continue;
- if (page == (ulong)empty_zero_page)
- continue;
-#ifndef __tilegx__
- if (page == (ulong)atomic_locks)
- continue;
-#endif
- cpu = cpumask_next(cpu, &kdata_mask);
- if (cpu == NR_CPUS)
- cpu = cpumask_first(&kdata_mask);
- }
- return construct_pgprot(PAGE_KERNEL, cpu);
-}
-
-/*
- * This function sets up how we cache the kernel text. If we have
- * hash-for-home support, normally that is used instead (see the
- * kcache_hash boot flag for more information). But if we end up
- * using a page-based caching technique, this option sets up the
- * details of that. In addition, the "ktext=nocache" option may
- * always be used to disable local caching of text pages, if desired.
- */
-
-static int __initdata ktext_arg_seen;
-static int __initdata ktext_small;
-static int __initdata ktext_local;
-static int __initdata ktext_all;
-static int __initdata ktext_nondataplane;
-static int __initdata ktext_nocache;
-static struct cpumask __initdata ktext_mask;
-
-static int __init setup_ktext(char *str)
-{
- if (str == NULL)
- return -EINVAL;
-
- /* If you have a leading "nocache", turn off ktext caching */
- if (strncmp(str, "nocache", 7) == 0) {
- ktext_nocache = 1;
- pr_info("ktext: disabling local caching of kernel text\n");
- str += 7;
- if (*str == ',')
- ++str;
- if (*str == '\0')
- return 0;
- }
-
- ktext_arg_seen = 1;
-
- /* Default setting: use a huge page */
- if (strcmp(str, "huge") == 0)
- pr_info("ktext: using one huge locally cached page\n");
-
- /* Pay TLB cost but get no cache benefit: cache small pages locally */
- else if (strcmp(str, "local") == 0) {
- ktext_small = 1;
- ktext_local = 1;
- pr_info("ktext: using small pages with local caching\n");
- }
-
- /* Neighborhood cache ktext pages on all cpus. */
- else if (strcmp(str, "all") == 0) {
- ktext_small = 1;
- ktext_all = 1;
- pr_info("ktext: using maximal caching neighborhood\n");
- }
-
-
- /* Neighborhood ktext pages on specified mask */
- else if (cpulist_parse(str, &ktext_mask) == 0) {
- if (cpumask_weight(&ktext_mask) > 1) {
- ktext_small = 1;
- pr_info("ktext: using caching neighborhood %*pbl with small pages\n",
- cpumask_pr_args(&ktext_mask));
- } else {
- pr_info("ktext: caching on cpu %*pbl with one huge page\n",
- cpumask_pr_args(&ktext_mask));
- }
- }
-
- else if (*str)
- return -EINVAL;
-
- return 0;
-}
-
-early_param("ktext", setup_ktext);
-
-
-static inline pgprot_t ktext_set_nocache(pgprot_t prot)
-{
- if (!ktext_nocache)
- prot = hv_pte_set_nc(prot);
- else
- prot = hv_pte_set_no_alloc_l2(prot);
- return prot;
-}
-
-/* Temporary page table we use for staging. */
-static pgd_t pgtables[PTRS_PER_PGD]
- __attribute__((aligned(HV_PAGE_TABLE_ALIGN)));
-
-/*
- * This maps the physical memory to kernel virtual address space, a total
- * of max_low_pfn pages, by creating page tables starting from address
- * PAGE_OFFSET.
- *
- * This routine transitions us from using a set of compiled-in large
- * pages to using some more precise caching, including removing access
- * to code pages mapped at PAGE_OFFSET (executed only at MEM_SV_START)
- * marking read-only data as locally cacheable, striping the remaining
- * .data and .bss across all the available tiles, and removing access
- * to pages above the top of RAM (thus ensuring a page fault from a bad
- * virtual address rather than a hypervisor shoot down for accessing
- * memory outside the assigned limits).
- */
-static void __init kernel_physical_mapping_init(pgd_t *pgd_base)
-{
- unsigned long long irqmask;
- unsigned long address, pfn;
- pmd_t *pmd;
- pte_t *pte;
- int pte_ofs;
- const struct cpumask *my_cpu_mask = cpumask_of(smp_processor_id());
- struct cpumask kstripe_mask;
- int rc, i;
-
- if (ktext_arg_seen && ktext_hash) {
- pr_warn("warning: \"ktext\" boot argument ignored if \"kcache_hash\" sets up text hash-for-home\n");
- ktext_small = 0;
- }
-
- if (kdata_arg_seen && kdata_hash) {
- pr_warn("warning: \"kdata\" boot argument ignored if \"kcache_hash\" sets up data hash-for-home\n");
- }
-
- if (kdata_huge && !hash_default) {
- pr_warn("warning: disabling \"kdata=huge\"; requires kcache_hash=all or =allbutstack\n");
- kdata_huge = 0;
- }
-
- /*
- * Set up a mask for cpus to use for kernel striping.
- * This is normally all cpus, but minus dataplane cpus if any.
- * If the dataplane covers the whole chip, we stripe over
- * the whole chip too.
- */
- cpumask_copy(&kstripe_mask, cpu_possible_mask);
- if (!kdata_arg_seen)
- kdata_mask = kstripe_mask;
-
- /* Allocate and fill in L2 page tables */
- for (i = 0; i < MAX_NUMNODES; ++i) {
-#ifdef CONFIG_HIGHMEM
- unsigned long end_pfn = node_lowmem_end_pfn[i];
-#else
- unsigned long end_pfn = node_end_pfn[i];
-#endif
- unsigned long end_huge_pfn = 0;
-
- /* Pre-shatter the last huge page to allow per-cpu pages. */
- if (kdata_huge)
- end_huge_pfn = end_pfn - (HPAGE_SIZE >> PAGE_SHIFT);
-
- pfn = node_start_pfn[i];
-
- /* Allocate enough memory to hold L2 page tables for node. */
- init_prealloc_ptes(i, end_pfn - pfn);
-
- address = (unsigned long) pfn_to_kaddr(pfn);
- while (pfn < end_pfn) {
- BUG_ON(address & (HPAGE_SIZE-1));
- pmd = get_pmd(pgtables, address);
- pte = get_prealloc_pte(pfn);
- if (pfn < end_huge_pfn) {
- pgprot_t prot = init_pgprot(address);
- *(pte_t *)pmd = pte_mkhuge(pfn_pte(pfn, prot));
- for (pte_ofs = 0; pte_ofs < PTRS_PER_PTE;
- pfn++, pte_ofs++, address += PAGE_SIZE)
- pte[pte_ofs] = pfn_pte(pfn, prot);
- } else {
- if (kdata_huge)
- printk(KERN_DEBUG "pre-shattered huge page at %#lx\n",
- address);
- for (pte_ofs = 0; pte_ofs < PTRS_PER_PTE;
- pfn++, pte_ofs++, address += PAGE_SIZE) {
- pgprot_t prot = init_pgprot(address);
- pte[pte_ofs] = pfn_pte(pfn, prot);
- }
- assign_pte(pmd, pte);
- }
- }
- }
-
- /*
- * Set or check ktext_map now that we have cpu_possible_mask
- * and kstripe_mask to work with.
- */
- if (ktext_all)
- cpumask_copy(&ktext_mask, cpu_possible_mask);
- else if (ktext_nondataplane)
- ktext_mask = kstripe_mask;
- else if (!cpumask_empty(&ktext_mask)) {
- /* Sanity-check any mask that was requested */
- struct cpumask bad;
- cpumask_andnot(&bad, &ktext_mask, cpu_possible_mask);
- cpumask_and(&ktext_mask, &ktext_mask, cpu_possible_mask);
- if (!cpumask_empty(&bad))
- pr_info("ktext: not using unavailable cpus %*pbl\n",
- cpumask_pr_args(&bad));
- if (cpumask_empty(&ktext_mask)) {
- pr_warn("ktext: no valid cpus; caching on %d\n",
- smp_processor_id());
- cpumask_copy(&ktext_mask,
- cpumask_of(smp_processor_id()));
- }
- }
-
- address = MEM_SV_START;
- pmd = get_pmd(pgtables, address);
- pfn = 0; /* code starts at PA 0 */
- if (ktext_small) {
- /* Allocate an L2 PTE for the kernel text */
- int cpu = 0;
- pgprot_t prot = construct_pgprot(PAGE_KERNEL_EXEC,
- PAGE_HOME_IMMUTABLE);
-
- if (ktext_local) {
- if (ktext_nocache)
- prot = hv_pte_set_mode(prot,
- HV_PTE_MODE_UNCACHED);
- else
- prot = hv_pte_set_mode(prot,
- HV_PTE_MODE_CACHE_NO_L3);
- } else {
- prot = hv_pte_set_mode(prot,
- HV_PTE_MODE_CACHE_TILE_L3);
- cpu = cpumask_first(&ktext_mask);
-
- prot = ktext_set_nocache(prot);
- }
-
- BUG_ON(address != (unsigned long)_text);
- pte = NULL;
- for (; address < (unsigned long)_einittext;
- pfn++, address += PAGE_SIZE) {
- pte_ofs = pte_index(address);
- if (pte_ofs == 0) {
- if (pte)
- assign_pte(pmd++, pte);
- pte = alloc_pte();
- }
- if (!ktext_local) {
- prot = set_remote_cache_cpu(prot, cpu);
- cpu = cpumask_next(cpu, &ktext_mask);
- if (cpu == NR_CPUS)
- cpu = cpumask_first(&ktext_mask);
- }
- pte[pte_ofs] = pfn_pte(pfn, prot);
- }
- if (pte)
- assign_pte(pmd, pte);
- } else {
- pte_t pteval = pfn_pte(0, PAGE_KERNEL_EXEC);
- pteval = pte_mkhuge(pteval);
- if (ktext_hash) {
- pteval = hv_pte_set_mode(pteval,
- HV_PTE_MODE_CACHE_HASH_L3);
- pteval = ktext_set_nocache(pteval);
- } else
- if (cpumask_weight(&ktext_mask) == 1) {
- pteval = set_remote_cache_cpu(pteval,
- cpumask_first(&ktext_mask));
- pteval = hv_pte_set_mode(pteval,
- HV_PTE_MODE_CACHE_TILE_L3);
- pteval = ktext_set_nocache(pteval);
- } else if (ktext_nocache)
- pteval = hv_pte_set_mode(pteval,
- HV_PTE_MODE_UNCACHED);
- else
- pteval = hv_pte_set_mode(pteval,
- HV_PTE_MODE_CACHE_NO_L3);
- for (; address < (unsigned long)_einittext;
- pfn += PFN_DOWN(HPAGE_SIZE), address += HPAGE_SIZE)
- *(pte_t *)(pmd++) = pfn_pte(pfn, pteval);
- }
-
- /* Set swapper_pgprot here so it is flushed to memory right away. */
- swapper_pgprot = init_pgprot((unsigned long)swapper_pg_dir);
-
- /*
- * Since we may be changing the caching of the stack and page
- * table itself, we invoke an assembly helper to do the
- * following steps:
- *
- * - flush the cache so we start with an empty slate
- * - install pgtables[] as the real page table
- * - flush the TLB so the new page table takes effect
- */
- irqmask = interrupt_mask_save_mask();
- interrupt_mask_set_mask(-1ULL);
- rc = flush_and_install_context(__pa(pgtables),
- init_pgprot((unsigned long)pgtables),
- __this_cpu_read(current_asid),
- cpumask_bits(my_cpu_mask));
- interrupt_mask_restore_mask(irqmask);
- BUG_ON(rc != 0);
-
- /* Copy the page table back to the normal swapper_pg_dir. */
- memcpy(pgd_base, pgtables, sizeof(pgtables));
- __install_page_table(pgd_base, __this_cpu_read(current_asid),
- swapper_pgprot);
-
- /*
- * We just read swapper_pgprot and thus brought it into the cache,
- * with its new home & caching mode. When we start the other CPUs,
- * they're going to reference swapper_pgprot via their initial fake
- * VA-is-PA mappings, which cache everything locally. At that
- * time, if it's in our cache with a conflicting home, the
- * simulator's coherence checker will complain. So, flush it out
- * of our cache; we're not going to ever use it again anyway.
- */
- __insn_finv(&swapper_pgprot);
-}
-
-/*
- * devmem_is_allowed() checks to see if /dev/mem access to a certain address
- * is valid. The argument is a physical page number.
- *
- * On Tile, the only valid things for which we can just hand out unchecked
- * PTEs are the kernel code and data. Anything else might change its
- * homing with time, and we wouldn't know to adjust the /dev/mem PTEs.
- * Note that init_thread_union is released to heap soon after boot,
- * so we include it in the init data.
- *
- * For TILE-Gx, we might want to consider allowing access to PA
- * regions corresponding to PCI space, etc.
- */
-int devmem_is_allowed(unsigned long pagenr)
-{
- return pagenr < kaddr_to_pfn(_end) &&
- !(pagenr >= kaddr_to_pfn(&init_thread_union) ||
- pagenr < kaddr_to_pfn(__init_end)) &&
- !(pagenr >= kaddr_to_pfn(_sinittext) ||
- pagenr <= kaddr_to_pfn(_einittext-1));
-}
-
-#ifdef CONFIG_HIGHMEM
-static void __init permanent_kmaps_init(pgd_t *pgd_base)
-{
- pgd_t *pgd;
- pud_t *pud;
- pmd_t *pmd;
- pte_t *pte;
- unsigned long vaddr;
-
- vaddr = PKMAP_BASE;
- page_table_range_init(vaddr, vaddr + PAGE_SIZE*LAST_PKMAP, pgd_base);
-
- pgd = swapper_pg_dir + pgd_index(vaddr);
- pud = pud_offset(pgd, vaddr);
- pmd = pmd_offset(pud, vaddr);
- pte = pte_offset_kernel(pmd, vaddr);
- pkmap_page_table = pte;
-}
-#endif /* CONFIG_HIGHMEM */
-
-
-#ifndef CONFIG_64BIT
-static void __init init_free_pfn_range(unsigned long start, unsigned long end)
-{
- unsigned long pfn;
- struct page *page = pfn_to_page(start);
-
- for (pfn = start; pfn < end; ) {
- /* Optimize by freeing pages in large batches */
- int order = __ffs(pfn);
- int count, i;
- struct page *p;
-
- if (order >= MAX_ORDER)
- order = MAX_ORDER-1;
- count = 1 << order;
- while (pfn + count > end) {
- count >>= 1;
- --order;
- }
- for (p = page, i = 0; i < count; ++i, ++p) {
- __ClearPageReserved(p);
- /*
- * Hacky direct set to avoid unnecessary
- * lock take/release for EVERY page here.
- */
- p->_refcount.counter = 0;
- p->_mapcount.counter = -1;
- }
- init_page_count(page);
- __free_pages(page, order);
- adjust_managed_page_count(page, count);
-
- page += count;
- pfn += count;
- }
-}
-
-static void __init set_non_bootmem_pages_init(void)
-{
- struct zone *z;
- for_each_zone(z) {
- unsigned long start, end;
- int nid = z->zone_pgdat->node_id;
-#ifdef CONFIG_HIGHMEM
- int idx = zone_idx(z);
-#endif
-
- start = z->zone_start_pfn;
- end = start + z->spanned_pages;
- start = max(start, node_free_pfn[nid]);
- start = max(start, max_low_pfn);
-
-#ifdef CONFIG_HIGHMEM
- if (idx == ZONE_HIGHMEM)
- totalhigh_pages += z->spanned_pages;
-#endif
- if (kdata_huge) {
- unsigned long percpu_pfn = node_percpu_pfn[nid];
- if (start < percpu_pfn && end > percpu_pfn)
- end = percpu_pfn;
- }
-#ifdef CONFIG_PCI
- if (start <= pci_reserve_start_pfn &&
- end > pci_reserve_start_pfn) {
- if (end > pci_reserve_end_pfn)
- init_free_pfn_range(pci_reserve_end_pfn, end);
- end = pci_reserve_start_pfn;
- }
-#endif
- init_free_pfn_range(start, end);
- }
-}
-#endif
-
-/*
- * paging_init() sets up the page tables - note that all of lowmem is
- * already mapped by head.S.
- */
-void __init paging_init(void)
-{
-#ifdef __tilegx__
- pud_t *pud;
-#endif
- pgd_t *pgd_base = swapper_pg_dir;
-
- kernel_physical_mapping_init(pgd_base);
-
- /* Fixed mappings, only the page table structure has to be created. */
- page_table_range_init(fix_to_virt(__end_of_fixed_addresses - 1),
- FIXADDR_TOP, pgd_base);
-
-#ifdef CONFIG_HIGHMEM
- permanent_kmaps_init(pgd_base);
-#endif
-
-#ifdef __tilegx__
- /*
- * Since GX allocates just one pmd_t array worth of vmalloc space,
- * we go ahead and allocate it statically here, then share it
- * globally. As a result we don't have to worry about any task
- * changing init_mm once we get up and running, and there's no
- * need for e.g. vmalloc_sync_all().
- */
- BUILD_BUG_ON(pgd_index(VMALLOC_START) != pgd_index(VMALLOC_END - 1));
- pud = pud_offset(pgd_base + pgd_index(VMALLOC_START), VMALLOC_START);
- assign_pmd(pud, alloc_pmd());
-#endif
-}
-
-
-/*
- * Walk the kernel page tables and derive the page_home() from
- * the PTEs, so that set_pte() can properly validate the caching
- * of all PTEs it sees.
- */
-void __init set_page_homes(void)
-{
-}
-
-static void __init set_max_mapnr_init(void)
-{
-#ifdef CONFIG_FLATMEM
- max_mapnr = max_low_pfn;
-#endif
-}
-
-void __init mem_init(void)
-{
- int i;
-#ifndef __tilegx__
- void *last;
-#endif
-
-#ifdef CONFIG_FLATMEM
- BUG_ON(!mem_map);
-#endif
-
-#ifdef CONFIG_HIGHMEM
- /* check that fixmap and pkmap do not overlap */
- if (PKMAP_ADDR(LAST_PKMAP-1) >= FIXADDR_START) {
- pr_err("fixmap and kmap areas overlap - this will crash\n");
- pr_err("pkstart: %lxh pkend: %lxh fixstart %lxh\n",
- PKMAP_BASE, PKMAP_ADDR(LAST_PKMAP-1), FIXADDR_START);
- BUG();
- }
-#endif
-
- set_max_mapnr_init();
-
- /* this will put all bootmem onto the freelists */
- free_all_bootmem();
-
-#ifndef CONFIG_64BIT
- /* count all remaining LOWMEM and give all HIGHMEM to page allocator */
- set_non_bootmem_pages_init();
-#endif
-
- mem_init_print_info(NULL);
-
- /*
- * In debug mode, dump some interesting memory mappings.
- */
-#ifdef CONFIG_HIGHMEM
- printk(KERN_DEBUG " KMAP %#lx - %#lx\n",
- FIXADDR_START, FIXADDR_TOP + PAGE_SIZE - 1);
- printk(KERN_DEBUG " PKMAP %#lx - %#lx\n",
- PKMAP_BASE, PKMAP_ADDR(LAST_PKMAP) - 1);
-#endif
- printk(KERN_DEBUG " VMALLOC %#lx - %#lx\n",
- _VMALLOC_START, _VMALLOC_END - 1);
-#ifdef __tilegx__
- for (i = MAX_NUMNODES-1; i >= 0; --i) {
- struct pglist_data *node = &node_data[i];
- if (node->node_present_pages) {
- unsigned long start = (unsigned long)
- pfn_to_kaddr(node->node_start_pfn);
- unsigned long end = start +
- (node->node_present_pages << PAGE_SHIFT);
- printk(KERN_DEBUG " MEM%d %#lx - %#lx\n",
- i, start, end - 1);
- }
- }
-#else
- last = high_memory;
- for (i = MAX_NUMNODES-1; i >= 0; --i) {
- if ((unsigned long)vbase_map[i] != -1UL) {
- printk(KERN_DEBUG " LOWMEM%d %#lx - %#lx\n",
- i, (unsigned long) (vbase_map[i]),
- (unsigned long) (last-1));
- last = vbase_map[i];
- }
- }
-#endif
-
-#ifndef __tilegx__
- /*
- * Convert from using one lock for all atomic operations to
- * one per cpu.
- */
- __init_atomic_per_cpu();
-#endif
-}
-
-struct kmem_cache *pgd_cache;
-
-void __init pgtable_cache_init(void)
-{
- pgd_cache = kmem_cache_create("pgd", SIZEOF_PGD, SIZEOF_PGD, 0, NULL);
- if (!pgd_cache)
- panic("pgtable_cache_init(): Cannot create pgd cache");
-}
-
-static long __ro_after_init initfree = 1;
-static bool __ro_after_init set_initfree_done;
-
-/* Select whether to free (1) or mark unusable (0) the __init pages. */
-static int __init set_initfree(char *str)
-{
- long val;
- if (kstrtol(str, 0, &val) == 0) {
- set_initfree_done = true;
- initfree = val;
- pr_info("initfree: %s free init pages\n",
- initfree ? "will" : "won't");
- }
- return 1;
-}
-__setup("initfree=", set_initfree);
-
-static void free_init_pages(char *what, unsigned long begin, unsigned long end)
-{
- unsigned long addr = (unsigned long) begin;
-
- /* Prefer user request first */
- if (!set_initfree_done) {
- if (debug_pagealloc_enabled())
- initfree = 0;
- }
- if (kdata_huge && !initfree) {
- pr_warn("Warning: ignoring initfree=0: incompatible with kdata=huge\n");
- initfree = 1;
- }
- end = (end + PAGE_SIZE - 1) & PAGE_MASK;
- local_flush_tlb_pages(NULL, begin, PAGE_SIZE, end - begin);
- for (addr = begin; addr < end; addr += PAGE_SIZE) {
- /*
- * Note we just reset the home here directly in the
- * page table. We know this is safe because our caller
- * just flushed the caches on all the other cpus,
- * and they won't be touching any of these pages.
- */
- int pfn = kaddr_to_pfn((void *)addr);
- struct page *page = pfn_to_page(pfn);
- pte_t *ptep = virt_to_kpte(addr);
- if (!initfree) {
- /*
- * If debugging page accesses then do not free
- * this memory but mark them not present - any
- * buggy init-section access will create a
- * kernel page fault:
- */
- pte_clear(&init_mm, addr, ptep);
- continue;
- }
- if (pte_huge(*ptep))
- BUG_ON(!kdata_huge);
- else
- set_pte_at(&init_mm, addr, ptep,
- pfn_pte(pfn, PAGE_KERNEL));
- memset((void *)addr, POISON_FREE_INITMEM, PAGE_SIZE);
- free_reserved_page(page);
- }
- pr_info("Freeing %s: %ldk freed\n", what, (end - begin) >> 10);
-}
-
-void free_initmem(void)
-{
- const unsigned long text_delta = MEM_SV_START - PAGE_OFFSET;
-
- /*
- * Evict the cache on all cores to avoid incoherence.
- * We are guaranteed that no one will touch the init pages any more.
- */
- homecache_evict(&cpu_cacheable_map);
-
- /* Free the data pages that we won't use again after init. */
- free_init_pages("unused kernel data",
- (unsigned long)__init_begin,
- (unsigned long)__init_end);
-
- /*
- * Free the pages mapped from 0xc0000000 that correspond to code
- * pages from MEM_SV_START that we won't use again after init.
- */
- free_init_pages("unused kernel text",
- (unsigned long)_sinittext - text_delta,
- (unsigned long)_einittext - text_delta);
- /* Do a global TLB flush so everyone sees the changes. */
- flush_tlb_all();
-}
diff --git a/arch/tile/mm/migrate.h b/arch/tile/mm/migrate.h
deleted file mode 100644
index 91683d97917e..000000000000
--- a/arch/tile/mm/migrate.h
+++ /dev/null
@@ -1,56 +0,0 @@
-/*
- * Copyright 2010 Tilera Corporation. All Rights Reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation, version 2.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
- * NON INFRINGEMENT. See the GNU General Public License for
- * more details.
- *
- * Structure definitions for migration, exposed here for use by
- * arch/tile/kernel/asm-offsets.c.
- */
-
-#ifndef MM_MIGRATE_H
-#define MM_MIGRATE_H
-
-#include <linux/cpumask.h>
-#include <hv/hypervisor.h>
-
-/*
- * This function is used as a helper when setting up the initial
- * page table (swapper_pg_dir).
- *
- * You must mask ALL interrupts prior to invoking this code, since
- * you can't legally touch the stack during the cache flush.
- */
-extern int flush_and_install_context(HV_PhysAddr page_table, HV_PTE access,
- HV_ASID asid,
- const unsigned long *cpumask);
-
-/*
- * This function supports migration as a "helper" as follows:
- *
- * - Set the stack PTE itself to "migrating".
- * - Do a global TLB flush for (va,length) and the specified ASIDs.
- * - Do a cache-evict on all necessary cpus.
- * - Write the new stack PTE.
- *
- * Note that any non-NULL pointers must not point to the page that
- * is handled by the stack_pte itself.
- *
- * You must mask ALL interrupts prior to invoking this code, since
- * you can't legally touch the stack during the cache flush.
- */
-extern int homecache_migrate_stack_and_flush(pte_t stack_pte, unsigned long va,
- size_t length, pte_t *stack_ptep,
- const struct cpumask *cache_cpumask,
- const struct cpumask *tlb_cpumask,
- HV_Remote_ASID *asids,
- int asidcount);
-
-#endif /* MM_MIGRATE_H */
diff --git a/arch/tile/mm/migrate_32.S b/arch/tile/mm/migrate_32.S
deleted file mode 100644
index 772085491bf9..000000000000
--- a/arch/tile/mm/migrate_32.S
+++ /dev/null
@@ -1,192 +0,0 @@
-/*
- * Copyright 2010 Tilera Corporation. All Rights Reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation, version 2.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
- * NON INFRINGEMENT. See the GNU General Public License for
- * more details.
- *
- * This routine is a helper for migrating the home of a set of pages to
- * a new cpu. See the documentation in homecache.c for more information.
- */
-
-#include <linux/linkage.h>
-#include <linux/threads.h>
-#include <asm/page.h>
-#include <asm/thread_info.h>
-#include <asm/types.h>
-#include <asm/asm-offsets.h>
-#include <hv/hypervisor.h>
-
- .text
-
-/*
- * First, some definitions that apply to all the code in the file.
- */
-
-/* Locals (caller-save) */
-#define r_tmp r10
-#define r_save_sp r11
-
-/* What we save where in the stack frame; must include all callee-saves. */
-#define FRAME_SP 4
-#define FRAME_R30 8
-#define FRAME_R31 12
-#define FRAME_R32 16
-#define FRAME_R33 20
-#define FRAME_R34 24
-#define FRAME_SIZE 28
-
-
-
-
-/*
- * On entry:
- *
- * r0 low word of the new context PA to install (moved to r_context_lo)
- * r1 high word of the new context PA to install (moved to r_context_hi)
- * r2 low word of PTE to use for context access (moved to r_access_lo)
- * r3 high word of PTE to use for context access (moved to r_access_lo)
- * r4 ASID to use for new context (moved to r_asid)
- * r5 pointer to cpumask with just this cpu set in it (r_my_cpumask)
- */
-
-/* Arguments (caller-save) */
-#define r_context_lo_in r0
-#define r_context_hi_in r1
-#define r_access_lo_in r2
-#define r_access_hi_in r3
-#define r_asid_in r4
-#define r_my_cpumask r5
-
-/* Locals (callee-save); must not be more than FRAME_xxx above. */
-#define r_context_lo r30
-#define r_context_hi r31
-#define r_access_lo r32
-#define r_access_hi r33
-#define r_asid r34
-
-STD_ENTRY(flush_and_install_context)
- /*
- * Create a stack frame; we can't touch it once we flush the
- * cache until we install the new page table and flush the TLB.
- */
- {
- move r_save_sp, sp
- sw sp, lr
- addi sp, sp, -FRAME_SIZE
- }
- addi r_tmp, sp, FRAME_SP
- {
- sw r_tmp, r_save_sp
- addi r_tmp, sp, FRAME_R30
- }
- {
- sw r_tmp, r30
- addi r_tmp, sp, FRAME_R31
- }
- {
- sw r_tmp, r31
- addi r_tmp, sp, FRAME_R32
- }
- {
- sw r_tmp, r32
- addi r_tmp, sp, FRAME_R33
- }
- {
- sw r_tmp, r33
- addi r_tmp, sp, FRAME_R34
- }
- sw r_tmp, r34
-
- /* Move some arguments to callee-save registers. */
- {
- move r_context_lo, r_context_lo_in
- move r_context_hi, r_context_hi_in
- }
- {
- move r_access_lo, r_access_lo_in
- move r_access_hi, r_access_hi_in
- }
- move r_asid, r_asid_in
-
- /* First, flush our L2 cache. */
- {
- move r0, zero /* cache_pa */
- move r1, zero
- }
- {
- auli r2, zero, ha16(HV_FLUSH_EVICT_L2) /* cache_control */
- move r3, r_my_cpumask /* cache_cpumask */
- }
- {
- move r4, zero /* tlb_va */
- move r5, zero /* tlb_length */
- }
- {
- move r6, zero /* tlb_pgsize */
- move r7, zero /* tlb_cpumask */
- }
- {
- move r8, zero /* asids */
- move r9, zero /* asidcount */
- }
- jal _hv_flush_remote
- bnz r0, .Ldone
-
- /* Now install the new page table. */
- {
- move r0, r_context_lo
- move r1, r_context_hi
- }
- {
- move r2, r_access_lo
- move r3, r_access_hi
- }
- {
- move r4, r_asid
- moveli r5, HV_CTX_DIRECTIO | CTX_PAGE_FLAG
- }
- jal _hv_install_context
- bnz r0, .Ldone
-
- /* Finally, flush the TLB. */
- {
- movei r0, 0 /* preserve_global */
- jal hv_flush_all
- }
-
-.Ldone:
- /* Restore the callee-saved registers and return. */
- addli lr, sp, FRAME_SIZE
- {
- lw lr, lr
- addli r_tmp, sp, FRAME_R30
- }
- {
- lw r30, r_tmp
- addli r_tmp, sp, FRAME_R31
- }
- {
- lw r31, r_tmp
- addli r_tmp, sp, FRAME_R32
- }
- {
- lw r32, r_tmp
- addli r_tmp, sp, FRAME_R33
- }
- {
- lw r33, r_tmp
- addli r_tmp, sp, FRAME_R34
- }
- {
- lw r34, r_tmp
- addi sp, sp, FRAME_SIZE
- }
- jrp lr
- STD_ENDPROC(flush_and_install_context)
diff --git a/arch/tile/mm/migrate_64.S b/arch/tile/mm/migrate_64.S
deleted file mode 100644
index a49eee38f872..000000000000
--- a/arch/tile/mm/migrate_64.S
+++ /dev/null
@@ -1,167 +0,0 @@
-/*
- * Copyright 2011 Tilera Corporation. All Rights Reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation, version 2.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
- * NON INFRINGEMENT. See the GNU General Public License for
- * more details.
- *
- * This routine is a helper for migrating the home of a set of pages to
- * a new cpu. See the documentation in homecache.c for more information.
- */
-
-#include <linux/linkage.h>
-#include <linux/threads.h>
-#include <asm/page.h>
-#include <asm/thread_info.h>
-#include <asm/types.h>
-#include <asm/asm-offsets.h>
-#include <hv/hypervisor.h>
-
- .text
-
-/*
- * First, some definitions that apply to all the code in the file.
- */
-
-/* Locals (caller-save) */
-#define r_tmp r10
-#define r_save_sp r11
-
-/* What we save where in the stack frame; must include all callee-saves. */
-#define FRAME_SP 8
-#define FRAME_R30 16
-#define FRAME_R31 24
-#define FRAME_R32 32
-#define FRAME_SIZE 40
-
-
-
-
-/*
- * On entry:
- *
- * r0 the new context PA to install (moved to r_context)
- * r1 PTE to use for context access (moved to r_access)
- * r2 ASID to use for new context (moved to r_asid)
- * r3 pointer to cpumask with just this cpu set in it (r_my_cpumask)
- */
-
-/* Arguments (caller-save) */
-#define r_context_in r0
-#define r_access_in r1
-#define r_asid_in r2
-#define r_my_cpumask r3
-
-/* Locals (callee-save); must not be more than FRAME_xxx above. */
-#define r_context r30
-#define r_access r31
-#define r_asid r32
-
-/*
- * Caller-save locals and frame constants are the same as
- * for homecache_migrate_stack_and_flush.
- */
-
-STD_ENTRY(flush_and_install_context)
- /*
- * Create a stack frame; we can't touch it once we flush the
- * cache until we install the new page table and flush the TLB.
- */
- {
- move r_save_sp, sp
- st sp, lr
- addi sp, sp, -FRAME_SIZE
- }
- addi r_tmp, sp, FRAME_SP
- {
- st r_tmp, r_save_sp
- addi r_tmp, sp, FRAME_R30
- }
- {
- st r_tmp, r30
- addi r_tmp, sp, FRAME_R31
- }
- {
- st r_tmp, r31
- addi r_tmp, sp, FRAME_R32
- }
- st r_tmp, r32
-
- /* Move some arguments to callee-save registers. */
- {
- move r_context, r_context_in
- move r_access, r_access_in
- }
- move r_asid, r_asid_in
-
- /* First, flush our L2 cache. */
- {
- move r0, zero /* cache_pa */
- moveli r1, hw2_last(HV_FLUSH_EVICT_L2) /* cache_control */
- }
- {
- shl16insli r1, r1, hw1(HV_FLUSH_EVICT_L2)
- move r2, r_my_cpumask /* cache_cpumask */
- }
- {
- shl16insli r1, r1, hw0(HV_FLUSH_EVICT_L2)
- move r3, zero /* tlb_va */
- }
- {
- move r4, zero /* tlb_length */
- move r5, zero /* tlb_pgsize */
- }
- {
- move r6, zero /* tlb_cpumask */
- move r7, zero /* asids */
- }
- {
- move r8, zero /* asidcount */
- jal _hv_flush_remote
- }
- bnez r0, 1f
-
- /* Now install the new page table. */
- {
- move r0, r_context
- move r1, r_access
- }
- {
- move r2, r_asid
- moveli r3, HV_CTX_DIRECTIO | CTX_PAGE_FLAG
- }
- jal _hv_install_context
- bnez r0, 1f
-
- /* Finally, flush the TLB. */
- {
- movei r0, 0 /* preserve_global */
- jal hv_flush_all
- }
-
-1: /* Restore the callee-saved registers and return. */
- addli lr, sp, FRAME_SIZE
- {
- ld lr, lr
- addli r_tmp, sp, FRAME_R30
- }
- {
- ld r30, r_tmp
- addli r_tmp, sp, FRAME_R31
- }
- {
- ld r31, r_tmp
- addli r_tmp, sp, FRAME_R32
- }
- {
- ld r32, r_tmp
- addi sp, sp, FRAME_SIZE
- }
- jrp lr
- STD_ENDPROC(flush_and_install_context)
diff --git a/arch/tile/mm/mmap.c b/arch/tile/mm/mmap.c
deleted file mode 100644
index 8ab28167c44b..000000000000
--- a/arch/tile/mm/mmap.c
+++ /dev/null
@@ -1,93 +0,0 @@
-/*
- * Copyright 2010 Tilera Corporation. All Rights Reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation, version 2.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
- * NON INFRINGEMENT. See the GNU General Public License for
- * more details.
- *
- * Taken from the i386 architecture and simplified.
- */
-
-#include <linux/mm.h>
-#include <linux/random.h>
-#include <linux/limits.h>
-#include <linux/sched/signal.h>
-#include <linux/sched/mm.h>
-#include <linux/mman.h>
-#include <linux/compat.h>
-
-/*
- * Top of mmap area (just below the process stack).
- *
- * Leave an at least ~128 MB hole.
- */
-#define MIN_GAP (128*1024*1024)
-#define MAX_GAP (TASK_SIZE/6*5)
-
-static inline unsigned long mmap_base(struct mm_struct *mm)
-{
- unsigned long gap = rlimit(RLIMIT_STACK);
- unsigned long random_factor = 0;
-
- if (current->flags & PF_RANDOMIZE)
- random_factor = get_random_int() % (1024*1024);
-
- if (gap < MIN_GAP)
- gap = MIN_GAP;
- else if (gap > MAX_GAP)
- gap = MAX_GAP;
-
- return PAGE_ALIGN(TASK_SIZE - gap - random_factor);
-}
-
-/*
- * This function, called very early during the creation of a new
- * process VM image, sets up which VM layout function to use:
- */
-void arch_pick_mmap_layout(struct mm_struct *mm)
-{
-#if !defined(__tilegx__)
- int is_32bit = 1;
-#elif defined(CONFIG_COMPAT)
- int is_32bit = is_compat_task();
-#else
- int is_32bit = 0;
-#endif
- unsigned long random_factor = 0UL;
-
- /*
- * 8 bits of randomness in 32bit mmaps, 24 address space bits
- * 12 bits of randomness in 64bit mmaps, 28 address space bits
- */
- if (current->flags & PF_RANDOMIZE) {
- if (is_32bit)
- random_factor = get_random_int() % (1<<8);
- else
- random_factor = get_random_int() % (1<<12);
-
- random_factor <<= PAGE_SHIFT;
- }
-
- /*
- * Use standard layout if the expected stack growth is unlimited
- * or we are running native 64 bits.
- */
- if (rlimit(RLIMIT_STACK) == RLIM_INFINITY) {
- mm->mmap_base = TASK_UNMAPPED_BASE + random_factor;
- mm->get_unmapped_area = arch_get_unmapped_area;
- } else {
- mm->mmap_base = mmap_base(mm);
- mm->get_unmapped_area = arch_get_unmapped_area_topdown;
- }
-}
-
-unsigned long arch_randomize_brk(struct mm_struct *mm)
-{
- return randomize_page(mm->brk, 0x02000000);
-}
diff --git a/arch/tile/mm/pgtable.c b/arch/tile/mm/pgtable.c
deleted file mode 100644
index ec5576fd3a86..000000000000
--- a/arch/tile/mm/pgtable.c
+++ /dev/null
@@ -1,550 +0,0 @@
-/*
- * Copyright 2010 Tilera Corporation. All Rights Reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation, version 2.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
- * NON INFRINGEMENT. See the GNU General Public License for
- * more details.
- */
-
-#include <linux/sched.h>
-#include <linux/kernel.h>
-#include <linux/errno.h>
-#include <linux/mm.h>
-#include <linux/swap.h>
-#include <linux/highmem.h>
-#include <linux/slab.h>
-#include <linux/pagemap.h>
-#include <linux/spinlock.h>
-#include <linux/cpumask.h>
-#include <linux/module.h>
-#include <linux/io.h>
-#include <linux/vmalloc.h>
-#include <linux/smp.h>
-
-#include <asm/pgtable.h>
-#include <asm/pgalloc.h>
-#include <asm/fixmap.h>
-#include <asm/tlb.h>
-#include <asm/tlbflush.h>
-#include <asm/homecache.h>
-
-#define K(x) ((x) << (PAGE_SHIFT-10))
-
-/**
- * shatter_huge_page() - ensure a given address is mapped by a small page.
- *
- * This function converts a huge PTE mapping kernel LOWMEM into a bunch
- * of small PTEs with the same caching. No cache flush required, but we
- * must do a global TLB flush.
- *
- * Any caller that wishes to modify a kernel mapping that might
- * have been made with a huge page should call this function,
- * since doing so properly avoids race conditions with installing the
- * newly-shattered page and then flushing all the TLB entries.
- *
- * @addr: Address at which to shatter any existing huge page.
- */
-void shatter_huge_page(unsigned long addr)
-{
- pgd_t *pgd;
- pud_t *pud;
- pmd_t *pmd;
- unsigned long flags = 0; /* happy compiler */
-#ifdef __PAGETABLE_PMD_FOLDED
- struct list_head *pos;
-#endif
-
- /* Get a pointer to the pmd entry that we need to change. */
- addr &= HPAGE_MASK;
- BUG_ON(pgd_addr_invalid(addr));
- BUG_ON(addr < PAGE_OFFSET); /* only for kernel LOWMEM */
- pgd = swapper_pg_dir + pgd_index(addr);
- pud = pud_offset(pgd, addr);
- BUG_ON(!pud_present(*pud));
- pmd = pmd_offset(pud, addr);
- BUG_ON(!pmd_present(*pmd));
- if (!pmd_huge_page(*pmd))
- return;
-
- spin_lock_irqsave(&init_mm.page_table_lock, flags);
- if (!pmd_huge_page(*pmd)) {
- /* Lost the race to convert the huge page. */
- spin_unlock_irqrestore(&init_mm.page_table_lock, flags);
- return;
- }
-
- /* Shatter the huge page into the preallocated L2 page table. */
- pmd_populate_kernel(&init_mm, pmd, get_prealloc_pte(pmd_pfn(*pmd)));
-
-#ifdef __PAGETABLE_PMD_FOLDED
- /* Walk every pgd on the system and update the pmd there. */
- spin_lock(&pgd_lock);
- list_for_each(pos, &pgd_list) {
- pmd_t *copy_pmd;
- pgd = list_to_pgd(pos) + pgd_index(addr);
- pud = pud_offset(pgd, addr);
- copy_pmd = pmd_offset(pud, addr);
- __set_pmd(copy_pmd, *pmd);
- }
- spin_unlock(&pgd_lock);
-#endif
-
- /* Tell every cpu to notice the change. */
- flush_remote(0, 0, NULL, addr, HPAGE_SIZE, HPAGE_SIZE,
- cpu_possible_mask, NULL, 0);
-
- /* Hold the lock until the TLB flush is finished to avoid races. */
- spin_unlock_irqrestore(&init_mm.page_table_lock, flags);
-}
-
-/*
- * List of all pgd's needed so it can invalidate entries in both cached
- * and uncached pgd's. This is essentially codepath-based locking
- * against pageattr.c; it is the unique case in which a valid change
- * of kernel pagetables can't be lazily synchronized by vmalloc faults.
- * vmalloc faults work because attached pagetables are never freed.
- *
- * The lock is always taken with interrupts disabled, unlike on x86
- * and other platforms, because we need to take the lock in
- * shatter_huge_page(), which may be called from an interrupt context.
- * We are not at risk from the tlbflush IPI deadlock that was seen on
- * x86, since we use the flush_remote() API to have the hypervisor do
- * the TLB flushes regardless of irq disabling.
- */
-DEFINE_SPINLOCK(pgd_lock);
-LIST_HEAD(pgd_list);
-
-static inline void pgd_list_add(pgd_t *pgd)
-{
- list_add(pgd_to_list(pgd), &pgd_list);
-}
-
-static inline void pgd_list_del(pgd_t *pgd)
-{
- list_del(pgd_to_list(pgd));
-}
-
-#define KERNEL_PGD_INDEX_START pgd_index(PAGE_OFFSET)
-#define KERNEL_PGD_PTRS (PTRS_PER_PGD - KERNEL_PGD_INDEX_START)
-
-static void pgd_ctor(pgd_t *pgd)
-{
- unsigned long flags;
-
- memset(pgd, 0, KERNEL_PGD_INDEX_START*sizeof(pgd_t));
- spin_lock_irqsave(&pgd_lock, flags);
-
-#ifndef __tilegx__
- /*
- * Check that the user interrupt vector has no L2.
- * It never should for the swapper, and new page tables
- * should always start with an empty user interrupt vector.
- */
- BUG_ON(((u64 *)swapper_pg_dir)[pgd_index(MEM_USER_INTRPT)] != 0);
-#endif
-
- memcpy(pgd + KERNEL_PGD_INDEX_START,
- swapper_pg_dir + KERNEL_PGD_INDEX_START,
- KERNEL_PGD_PTRS * sizeof(pgd_t));
-
- pgd_list_add(pgd);
- spin_unlock_irqrestore(&pgd_lock, flags);
-}
-
-static void pgd_dtor(pgd_t *pgd)
-{
- unsigned long flags; /* can be called from interrupt context */
-
- spin_lock_irqsave(&pgd_lock, flags);
- pgd_list_del(pgd);
- spin_unlock_irqrestore(&pgd_lock, flags);
-}
-
-pgd_t *pgd_alloc(struct mm_struct *mm)
-{
- pgd_t *pgd = kmem_cache_alloc(pgd_cache, GFP_KERNEL);
- if (pgd)
- pgd_ctor(pgd);
- return pgd;
-}
-
-void pgd_free(struct mm_struct *mm, pgd_t *pgd)
-{
- pgd_dtor(pgd);
- kmem_cache_free(pgd_cache, pgd);
-}
-
-
-#define L2_USER_PGTABLE_PAGES (1 << L2_USER_PGTABLE_ORDER)
-
-struct page *pgtable_alloc_one(struct mm_struct *mm, unsigned long address,
- int order)
-{
- gfp_t flags = GFP_KERNEL|__GFP_ZERO;
- struct page *p;
- int i;
-
- p = alloc_pages(flags, L2_USER_PGTABLE_ORDER);
- if (p == NULL)
- return NULL;
-
- if (!pgtable_page_ctor(p)) {
- __free_pages(p, L2_USER_PGTABLE_ORDER);
- return NULL;
- }
-
- /*
- * Make every page have a page_count() of one, not just the first.
- * We don't use __GFP_COMP since it doesn't look like it works
- * correctly with tlb_remove_page().
- */
- for (i = 1; i < order; ++i) {
- init_page_count(p+i);
- inc_zone_page_state(p+i, NR_PAGETABLE);
- }
-
- return p;
-}
-
-/*
- * Free page immediately (used in __pte_alloc if we raced with another
- * process). We have to correct whatever pte_alloc_one() did before
- * returning the pages to the allocator.
- */
-void pgtable_free(struct mm_struct *mm, struct page *p, int order)
-{
- int i;
-
- pgtable_page_dtor(p);
- __free_page(p);
-
- for (i = 1; i < order; ++i) {
- __free_page(p+i);
- dec_zone_page_state(p+i, NR_PAGETABLE);
- }
-}
-
-void __pgtable_free_tlb(struct mmu_gather *tlb, struct page *pte,
- unsigned long address, int order)
-{
- int i;
-
- pgtable_page_dtor(pte);
- tlb_remove_page(tlb, pte);
-
- for (i = 1; i < order; ++i) {
- tlb_remove_page(tlb, pte + i);
- dec_zone_page_state(pte + i, NR_PAGETABLE);
- }
-}
-
-#ifndef __tilegx__
-
-/*
- * FIXME: needs to be atomic vs hypervisor writes. For now we make the
- * window of vulnerability a bit smaller by doing an unlocked 8-bit update.
- */
-int ptep_test_and_clear_young(struct vm_area_struct *vma,
- unsigned long addr, pte_t *ptep)
-{
-#if HV_PTE_INDEX_ACCESSED < 8 || HV_PTE_INDEX_ACCESSED >= 16
-# error Code assumes HV_PTE "accessed" bit in second byte
-#endif
- u8 *tmp = (u8 *)ptep;
- u8 second_byte = tmp[1];
- if (!(second_byte & (1 << (HV_PTE_INDEX_ACCESSED - 8))))
- return 0;
- tmp[1] = second_byte & ~(1 << (HV_PTE_INDEX_ACCESSED - 8));
- return 1;
-}
-
-/*
- * This implementation is atomic vs hypervisor writes, since the hypervisor
- * always writes the low word (where "accessed" and "dirty" are) and this
- * routine only writes the high word.
- */
-void ptep_set_wrprotect(struct mm_struct *mm,
- unsigned long addr, pte_t *ptep)
-{
-#if HV_PTE_INDEX_WRITABLE < 32
-# error Code assumes HV_PTE "writable" bit in high word
-#endif
- u32 *tmp = (u32 *)ptep;
- tmp[1] = tmp[1] & ~(1 << (HV_PTE_INDEX_WRITABLE - 32));
-}
-
-#endif
-
-/*
- * Return a pointer to the PTE that corresponds to the given
- * address in the given page table. A NULL page table just uses
- * the standard kernel page table; the preferred API in this case
- * is virt_to_kpte().
- *
- * The returned pointer can point to a huge page in other levels
- * of the page table than the bottom, if the huge page is present
- * in the page table. For bottom-level PTEs, the returned pointer
- * can point to a PTE that is either present or not.
- */
-pte_t *virt_to_pte(struct mm_struct* mm, unsigned long addr)
-{
- pgd_t *pgd;
- pud_t *pud;
- pmd_t *pmd;
-
- if (pgd_addr_invalid(addr))
- return NULL;
-
- pgd = mm ? pgd_offset(mm, addr) : swapper_pg_dir + pgd_index(addr);
- pud = pud_offset(pgd, addr);
- if (!pud_present(*pud))
- return NULL;
- if (pud_huge_page(*pud))
- return (pte_t *)pud;
- pmd = pmd_offset(pud, addr);
- if (!pmd_present(*pmd))
- return NULL;
- if (pmd_huge_page(*pmd))
- return (pte_t *)pmd;
- return pte_offset_kernel(pmd, addr);
-}
-EXPORT_SYMBOL(virt_to_pte);
-
-pte_t *virt_to_kpte(unsigned long kaddr)
-{
- BUG_ON(kaddr < PAGE_OFFSET);
- return virt_to_pte(NULL, kaddr);
-}
-EXPORT_SYMBOL(virt_to_kpte);
-
-pgprot_t set_remote_cache_cpu(pgprot_t prot, int cpu)
-{
- unsigned int width = smp_width;
- int x = cpu % width;
- int y = cpu / width;
- BUG_ON(y >= smp_height);
- BUG_ON(hv_pte_get_mode(prot) != HV_PTE_MODE_CACHE_TILE_L3);
- BUG_ON(cpu < 0 || cpu >= NR_CPUS);
- BUG_ON(!cpu_is_valid_lotar(cpu));
- return hv_pte_set_lotar(prot, HV_XY_TO_LOTAR(x, y));
-}
-
-int get_remote_cache_cpu(pgprot_t prot)
-{
- HV_LOTAR lotar = hv_pte_get_lotar(prot);
- int x = HV_LOTAR_X(lotar);
- int y = HV_LOTAR_Y(lotar);
- BUG_ON(hv_pte_get_mode(prot) != HV_PTE_MODE_CACHE_TILE_L3);
- return x + y * smp_width;
-}
-
-/*
- * Convert a kernel VA to a PA and homing information.
- */
-int va_to_cpa_and_pte(void *va, unsigned long long *cpa, pte_t *pte)
-{
- struct page *page = virt_to_page(va);
- pte_t null_pte = { 0 };
-
- *cpa = __pa(va);
-
- /* Note that this is not writing a page table, just returning a pte. */
- *pte = pte_set_home(null_pte, page_home(page));
-
- return 0; /* return non-zero if not hfh? */
-}
-EXPORT_SYMBOL(va_to_cpa_and_pte);
-
-void __set_pte(pte_t *ptep, pte_t pte)
-{
-#ifdef __tilegx__
- *ptep = pte;
-#else
-# if HV_PTE_INDEX_PRESENT >= 32 || HV_PTE_INDEX_MIGRATING >= 32
-# error Must write the present and migrating bits last
-# endif
- if (pte_present(pte)) {
- ((u32 *)ptep)[1] = (u32)(pte_val(pte) >> 32);
- barrier();
- ((u32 *)ptep)[0] = (u32)(pte_val(pte));
- } else {
- ((u32 *)ptep)[0] = (u32)(pte_val(pte));
- barrier();
- ((u32 *)ptep)[1] = (u32)(pte_val(pte) >> 32);
- }
-#endif /* __tilegx__ */
-}
-
-void set_pte(pte_t *ptep, pte_t pte)
-{
- if (pte_present(pte) &&
- (!CHIP_HAS_MMIO() || hv_pte_get_mode(pte) != HV_PTE_MODE_MMIO)) {
- /* The PTE actually references physical memory. */
- unsigned long pfn = pte_pfn(pte);
- if (pfn_valid(pfn)) {
- /* Update the home of the PTE from the struct page. */
- pte = pte_set_home(pte, page_home(pfn_to_page(pfn)));
- } else if (hv_pte_get_mode(pte) == 0) {
- /* remap_pfn_range(), etc, must supply PTE mode. */
- panic("set_pte(): out-of-range PFN and mode 0\n");
- }
- }
-
- __set_pte(ptep, pte);
-}
-
-/* Can this mm load a PTE with cached_priority set? */
-static inline int mm_is_priority_cached(struct mm_struct *mm)
-{
- return mm->context.priority_cached != 0;
-}
-
-/*
- * Add a priority mapping to an mm_context and
- * notify the hypervisor if this is the first one.
- */
-void start_mm_caching(struct mm_struct *mm)
-{
- if (!mm_is_priority_cached(mm)) {
- mm->context.priority_cached = -1UL;
- hv_set_caching(-1UL);
- }
-}
-
-/*
- * Validate and return the priority_cached flag. We know if it's zero
- * that we don't need to scan, since we immediately set it non-zero
- * when we first consider a MAP_CACHE_PRIORITY mapping.
- *
- * We only _try_ to acquire the mmap_sem semaphore; if we can't acquire it,
- * since we're in an interrupt context (servicing switch_mm) we don't
- * worry about it and don't unset the "priority_cached" field.
- * Presumably we'll come back later and have more luck and clear
- * the value then; for now we'll just keep the cache marked for priority.
- */
-static unsigned long update_priority_cached(struct mm_struct *mm)
-{
- if (mm->context.priority_cached && down_write_trylock(&mm->mmap_sem)) {
- struct vm_area_struct *vm;
- for (vm = mm->mmap; vm; vm = vm->vm_next) {
- if (hv_pte_get_cached_priority(vm->vm_page_prot))
- break;
- }
- if (vm == NULL)
- mm->context.priority_cached = 0;
- up_write(&mm->mmap_sem);
- }
- return mm->context.priority_cached;
-}
-
-/* Set caching correctly for an mm that we are switching to. */
-void check_mm_caching(struct mm_struct *prev, struct mm_struct *next)
-{
- if (!mm_is_priority_cached(next)) {
- /*
- * If the new mm doesn't use priority caching, just see if we
- * need the hv_set_caching(), or can assume it's already zero.
- */
- if (mm_is_priority_cached(prev))
- hv_set_caching(0);
- } else {
- hv_set_caching(update_priority_cached(next));
- }
-}
-
-#if CHIP_HAS_MMIO()
-
-/* Map an arbitrary MMIO address, homed according to pgprot, into VA space. */
-void __iomem *ioremap_prot(resource_size_t phys_addr, unsigned long size,
- pgprot_t home)
-{
- void *addr;
- struct vm_struct *area;
- unsigned long offset, last_addr;
- pgprot_t pgprot;
-
- /* Don't allow wraparound or zero size */
- last_addr = phys_addr + size - 1;
- if (!size || last_addr < phys_addr)
- return NULL;
-
- /* Create a read/write, MMIO VA mapping homed at the requested shim. */
- pgprot = PAGE_KERNEL;
- pgprot = hv_pte_set_mode(pgprot, HV_PTE_MODE_MMIO);
- pgprot = hv_pte_set_lotar(pgprot, hv_pte_get_lotar(home));
-
- /*
- * Mappings have to be page-aligned
- */
- offset = phys_addr & ~PAGE_MASK;
- phys_addr &= PAGE_MASK;
- size = PAGE_ALIGN(last_addr+1) - phys_addr;
-
- /*
- * Ok, go for it..
- */
- area = get_vm_area(size, VM_IOREMAP /* | other flags? */);
- if (!area)
- return NULL;
- area->phys_addr = phys_addr;
- addr = area->addr;
- if (ioremap_page_range((unsigned long)addr, (unsigned long)addr + size,
- phys_addr, pgprot)) {
- free_vm_area(area);
- return NULL;
- }
- return (__force void __iomem *) (offset + (char *)addr);
-}
-EXPORT_SYMBOL(ioremap_prot);
-
-#if !defined(CONFIG_PCI) || !defined(CONFIG_TILEGX)
-/* ioremap is conditionally declared in pci_gx.c */
-
-void __iomem *ioremap(resource_size_t phys_addr, unsigned long size)
-{
- return NULL;
-}
-EXPORT_SYMBOL(ioremap);
-
-#endif
-
-/* Unmap an MMIO VA mapping. */
-void iounmap(volatile void __iomem *addr_in)
-{
- volatile void __iomem *addr = (volatile void __iomem *)
- (PAGE_MASK & (unsigned long __force)addr_in);
-#if 1
- vunmap((void * __force)addr);
-#else
- /* x86 uses this complicated flow instead of vunmap(). Is
- * there any particular reason we should do the same? */
- struct vm_struct *p, *o;
-
- /* Use the vm area unlocked, assuming the caller
- ensures there isn't another iounmap for the same address
- in parallel. Reuse of the virtual address is prevented by
- leaving it in the global lists until we're done with it.
- cpa takes care of the direct mappings. */
- p = find_vm_area((void *)addr);
-
- if (!p) {
- pr_err("iounmap: bad address %p\n", addr);
- dump_stack();
- return;
- }
-
- /* Finally remove it */
- o = remove_vm_area((void *)addr);
- BUG_ON(p != o || o == NULL);
- kfree(p);
-#endif
-}
-EXPORT_SYMBOL(iounmap);
-
-#endif /* CHIP_HAS_MMIO() */
diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c
index 8b14bd326d4a..5b997138e092 100644
--- a/drivers/pci/quirks.c
+++ b/drivers/pci/quirks.c
@@ -2319,25 +2319,6 @@ DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82865_HB,
DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82875_HB,
quirk_unhide_mch_dev6);
-#ifdef CONFIG_TILEPRO
-/*
- * The Tilera TILEmpower tilepro platform needs to set the link speed
- * to 2.5GT(Giga-Transfers)/s (Gen 1). The default link speed
- * setting is 5GT/s (Gen 2). 0x98 is the Link Control2 PCIe
- * capability register of the PEX8624 PCIe switch. The switch
- * supports link speed auto negotiation, but falsely sets
- * the link speed to 5GT/s.
- */
-static void quirk_tile_plx_gen1(struct pci_dev *dev)
-{
- if (tile_plx_gen1) {
- pci_write_config_dword(dev, 0x98, 0x1);
- mdelay(50);
- }
-}
-DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_PLX, 0x8624, quirk_tile_plx_gen1);
-#endif /* CONFIG_TILEPRO */
-
#ifdef CONFIG_PCI_MSI
/* Some chipsets do not support MSI. We cannot easily rely on setting
* PCI_BUS_FLAGS_NO_MSI in its bus flags because there are actually
diff --git a/samples/kprobes/kprobe_example.c b/samples/kprobes/kprobe_example.c
index 67de3b774bc9..02be8984c32f 100644
--- a/samples/kprobes/kprobe_example.c
+++ b/samples/kprobes/kprobe_example.c
@@ -38,10 +38,6 @@ static int handler_pre(struct kprobe *p, struct pt_regs *regs)
pr_info("<%s> pre_handler: p->addr = 0x%p, epc = 0x%lx, status = 0x%lx\n",
p->symbol_name, p->addr, regs->cp0_epc, regs->cp0_status);
#endif
-#ifdef CONFIG_TILEGX
- pr_info("<%s> pre_handler: p->addr = 0x%p, pc = 0x%lx, ex1 = 0x%lx\n",
- p->symbol_name, p->addr, regs->pc, regs->ex1);
-#endif
#ifdef CONFIG_ARM64
pr_info("<%s> pre_handler: p->addr = 0x%p, pc = 0x%lx,"
" pstate = 0x%lx\n",
@@ -72,10 +68,6 @@ static void handler_post(struct kprobe *p, struct pt_regs *regs,
pr_info("<%s> post_handler: p->addr = 0x%p, status = 0x%lx\n",
p->symbol_name, p->addr, regs->cp0_status);
#endif
-#ifdef CONFIG_TILEGX
- pr_info("<%s> post_handler: p->addr = 0x%p, ex1 = 0x%lx\n",
- p->symbol_name, p->addr, regs->ex1);
-#endif
#ifdef CONFIG_ARM64
pr_info("<%s> post_handler: p->addr = 0x%p, pstate = 0x%lx\n",
p->symbol_name, p->addr, (long)regs->pstate);
diff --git a/tools/arch/tile/include/asm/barrier.h b/tools/arch/tile/include/asm/barrier.h
deleted file mode 100644
index 7ad02a591b43..000000000000
--- a/tools/arch/tile/include/asm/barrier.h
+++ /dev/null
@@ -1,16 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _TOOLS_LINUX_ASM_TILE_BARRIER_H
-#define _TOOLS_LINUX_ASM_TILE_BARRIER_H
-/*
- * FIXME: This came from tools/perf/perf-sys.h, where it was first introduced
- * in 620830b6954913647b7c7f68920cf48eddf6ad92, more work needed to make it
- * more closely follow the Linux kernel arch/tile/include/asm/barrier.h file.
- * Probably when we continue work on tools/ Kconfig support to have all the
- * CONFIG_ needed for properly doing that.
- */
-
-#define mb() asm volatile ("mf" ::: "memory")
-#define wmb() mb()
-#define rmb() mb()
-
-#endif /* _TOOLS_LINUX_ASM_TILE_BARRIER_H */
diff --git a/tools/arch/tile/include/uapi/asm/bitsperlong.h b/tools/arch/tile/include/uapi/asm/bitsperlong.h
deleted file mode 100644
index 57cca78c0fbb..000000000000
--- a/tools/arch/tile/include/uapi/asm/bitsperlong.h
+++ /dev/null
@@ -1,27 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
-/*
- * Copyright 2010 Tilera Corporation. All Rights Reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation, version 2.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
- * NON INFRINGEMENT. See the GNU General Public License for
- * more details.
- */
-
-#ifndef _ASM_TILE_BITSPERLONG_H
-#define _ASM_TILE_BITSPERLONG_H
-
-#ifdef __LP64__
-# define __BITS_PER_LONG 64
-#else
-# define __BITS_PER_LONG 32
-#endif
-
-#include <asm-generic/bitsperlong.h>
-
-#endif /* _ASM_TILE_BITSPERLONG_H */
diff --git a/tools/arch/tile/include/uapi/asm/mman.h b/tools/arch/tile/include/uapi/asm/mman.h
deleted file mode 100644
index 65ec92925c6c..000000000000
--- a/tools/arch/tile/include/uapi/asm/mman.h
+++ /dev/null
@@ -1,16 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
-#ifndef TOOLS_ARCH_TILE_UAPI_ASM_MMAN_FIX_H
-#define TOOLS_ARCH_TILE_UAPI_ASM_MMAN_FIX_H
-#define MAP_DENYWRITE 0x0800
-#define MAP_EXECUTABLE 0x1000
-#define MAP_GROWSDOWN 0x0100
-#define MAP_HUGETLB 0x4000
-#define MAP_LOCKED 0x0200
-#define MAP_NONBLOCK 0x0080
-#define MAP_NORESERVE 0x0400
-#define MAP_POPULATE 0x0040
-#define MAP_STACK MAP_GROWSDOWN
-#include <uapi/asm-generic/mman-common.h>
-/* MAP_32BIT is undefined on tile, fix it for perf */
-#define MAP_32BIT 0
-#endif
diff --git a/tools/scripts/Makefile.arch b/tools/scripts/Makefile.arch
index 78d90a249e88..b10b7a27c33f 100644
--- a/tools/scripts/Makefile.arch
+++ b/tools/scripts/Makefile.arch
@@ -4,8 +4,7 @@ HOSTARCH := $(shell uname -m | sed -e s/i.86/x86/ -e s/x86_64/x86/ \
-e /arm64/!s/arm.*/arm/ -e s/sa110/arm/ \
-e s/s390x/s390/ -e s/parisc64/parisc/ \
-e s/ppc.*/powerpc/ -e s/mips.*/mips/ \
- -e s/sh[234].*/sh/ -e s/aarch64.*/arm64/ \
- -e s/tile.*/tile/ )
+ -e s/sh[234].*/sh/ -e s/aarch64.*/arm64/ )
ifndef ARCH
ARCH := $(HOSTARCH)
@@ -34,14 +33,6 @@ ifeq ($(ARCH),sh64)
SRCARCH := sh
endif
-# Additional ARCH settings for tile
-ifeq ($(ARCH),tilepro)
- SRCARCH := tile
-endif
-ifeq ($(ARCH),tilegx)
- SRCARCH := tile
-endif
-
LP64 := $(shell echo __LP64__ | ${CC} ${CFLAGS} -E -x c - | tail -n 1)
ifeq ($(LP64), 1)
IS_64_BIT := 1
diff --git a/tools/testing/ktest/ktest.pl b/tools/testing/ktest/ktest.pl
index 0c8b61f8398e..8809f244bb7c 100755
--- a/tools/testing/ktest/ktest.pl
+++ b/tools/testing/ktest/ktest.pl
@@ -3683,8 +3683,6 @@ sub read_depends {
# what directory to look at.
if ($arch eq "i386" || $arch eq "x86_64") {
$arch = "x86";
- } elsif ($arch =~ /^tile/) {
- $arch = "tile";
}
my $kconfig = "$builddir/arch/$arch/Kconfig";