From 3307e2363a812e4f68d02ec3b0114a9b510702b7 Mon Sep 17 00:00:00 2001 From: Peter Maydell Date: Wed, 12 Jun 2013 16:20:21 +0100 Subject: [PATCH 01/13] linux-user: Allow getdents to be provided by getdents64 Newer architectures may only implement the getdents64 syscall, not getdents. Provide an implementation of getdents in terms of getdents64 so that we can run getdents-using targets on a getdents64-only host. Signed-off-by: Peter Maydell Reviewed-by: Richard Henderson Tested-by: Claudio Fontana Message-id: 1370344377-27445-1-git-send-email-peter.maydell@linaro.org Message-id: 1370193044-24535-1-git-send-email-peter.maydell@linaro.org --- linux-user/syscall.c | 61 +++++++++++++++++++++++++++++++++++++++++++- 1 file changed, 60 insertions(+), 1 deletion(-) diff --git a/linux-user/syscall.c b/linux-user/syscall.c index 0099d64a9c..4151c78622 100644 --- a/linux-user/syscall.c +++ b/linux-user/syscall.c @@ -223,8 +223,11 @@ static int gettid(void) { return -ENOSYS; } #endif +#ifdef __NR_getdents _syscall3(int, sys_getdents, uint, fd, struct linux_dirent *, dirp, uint, count); -#if defined(TARGET_NR_getdents64) && defined(__NR_getdents64) +#endif +#if !defined(__NR_getdents) || \ + (defined(TARGET_NR_getdents64) && defined(__NR_getdents64)) _syscall3(int, sys_getdents64, uint, fd, struct linux_dirent64 *, dirp, uint, count); #endif #if defined(TARGET_NR__llseek) && defined(__NR_llseek) @@ -7123,6 +7126,7 @@ abi_long do_syscall(void *cpu_env, int num, abi_long arg1, break; #endif case TARGET_NR_getdents: +#ifdef __NR_getdents #if TARGET_ABI_BITS == 32 && HOST_LONG_BITS == 64 { struct target_dirent *target_dirp; @@ -7194,6 +7198,61 @@ abi_long do_syscall(void *cpu_env, int num, abi_long arg1, } unlock_user(dirp, arg2, ret); } +#endif +#else + /* Implement getdents in terms of getdents64 */ + { + struct linux_dirent64 *dirp; + abi_long count = arg3; + + dirp = lock_user(VERIFY_WRITE, arg2, count, 0); + if (!dirp) { + goto efault; + } + ret = get_errno(sys_getdents64(arg1, dirp, count)); + if (!is_error(ret)) { + /* Convert the dirent64 structs to target dirent. We do this + * in-place, since we can guarantee that a target_dirent is no + * larger than a dirent64; however this means we have to be + * careful to read everything before writing in the new format. + */ + struct linux_dirent64 *de; + struct target_dirent *tde; + int len = ret; + int tlen = 0; + + de = dirp; + tde = (struct target_dirent *)dirp; + while (len > 0) { + int namelen, treclen; + int reclen = de->d_reclen; + uint64_t ino = de->d_ino; + int64_t off = de->d_off; + uint8_t type = de->d_type; + + namelen = strlen(de->d_name); + treclen = offsetof(struct target_dirent, d_name) + + namelen + 2; + treclen = QEMU_ALIGN_UP(treclen, sizeof(abi_long)); + + memmove(tde->d_name, de->d_name, namelen + 1); + tde->d_ino = tswapal(ino); + tde->d_off = tswapal(off); + tde->d_reclen = tswap16(treclen); + /* The target_dirent type is in what was formerly a padding + * byte at the end of the structure: + */ + *(((char *)tde) + treclen - 1) = type; + + de = (struct linux_dirent64 *)((char *)de + reclen); + tde = (struct target_dirent *)((char *)tde + treclen); + len -= reclen; + tlen += treclen; + } + ret = tlen; + } + unlock_user(dirp, arg2, ret); + } #endif break; #if defined(TARGET_NR_getdents64) && defined(__NR_getdents64) From c0d472b12e8c5ba81c69b28a1088ff52a59933f2 Mon Sep 17 00:00:00 2001 From: Peter Maydell Date: Wed, 12 Jun 2013 16:20:21 +0100 Subject: [PATCH 02/13] linux-user: Drop direct use of openat etc syscalls The linux-user syscall emulation layer currently supports the openat family of syscalls via two mechanisms: simply calling the corresponding libc functions, and making direct syscalls. Since glibc has supported these functions since at least glibc 2.5, there's no real need to retain the (essentially untested) direct syscall fallback code, so simply delete it. This allows us to remove some ifdeffery that was attempting to disable provision of some of the syscalls if the host didn't seem to support them, which in some cases was actually wrong (eg where there are several flavours of the syscall and we only need one of them, not necessarily the exact one the guest has, as with the fstatat* calls). Signed-off-by: Peter Maydell Reviewed-by: Richard Henderson Tested-by: Claudio Fontana Message-id: 1370126121-22975-2-git-send-email-peter.maydell@linaro.org --- linux-user/syscall.c | 218 +++++-------------------------------------- 1 file changed, 24 insertions(+), 194 deletions(-) diff --git a/linux-user/syscall.c b/linux-user/syscall.c index 4151c78622..cdd0c28fff 100644 --- a/linux-user/syscall.c +++ b/linux-user/syscall.c @@ -181,29 +181,14 @@ static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5, \ #define __NR_sys_uname __NR_uname -#define __NR_sys_faccessat __NR_faccessat -#define __NR_sys_fchmodat __NR_fchmodat -#define __NR_sys_fchownat __NR_fchownat -#define __NR_sys_fstatat64 __NR_fstatat64 -#define __NR_sys_futimesat __NR_futimesat #define __NR_sys_getcwd1 __NR_getcwd #define __NR_sys_getdents __NR_getdents #define __NR_sys_getdents64 __NR_getdents64 #define __NR_sys_getpriority __NR_getpriority -#define __NR_sys_linkat __NR_linkat -#define __NR_sys_mkdirat __NR_mkdirat -#define __NR_sys_mknodat __NR_mknodat -#define __NR_sys_newfstatat __NR_newfstatat -#define __NR_sys_openat __NR_openat -#define __NR_sys_readlinkat __NR_readlinkat -#define __NR_sys_renameat __NR_renameat #define __NR_sys_rt_sigqueueinfo __NR_rt_sigqueueinfo -#define __NR_sys_symlinkat __NR_symlinkat #define __NR_sys_syslog __NR_syslog #define __NR_sys_tgkill __NR_tgkill #define __NR_sys_tkill __NR_tkill -#define __NR_sys_unlinkat __NR_unlinkat -#define __NR_sys_utimensat __NR_utimensat #define __NR_sys_futex __NR_futex #define __NR_sys_inotify_init __NR_inotify_init #define __NR_sys_inotify_add_watch __NR_inotify_add_watch @@ -339,72 +324,6 @@ static int sys_getcwd1(char *buf, size_t size) return strlen(buf)+1; } -#ifdef CONFIG_ATFILE -/* - * Host system seems to have atfile syscall stubs available. We - * now enable them one by one as specified by target syscall_nr.h. - */ - -#ifdef TARGET_NR_faccessat -static int sys_faccessat(int dirfd, const char *pathname, int mode) -{ - return (faccessat(dirfd, pathname, mode, 0)); -} -#endif -#ifdef TARGET_NR_fchmodat -static int sys_fchmodat(int dirfd, const char *pathname, mode_t mode) -{ - return (fchmodat(dirfd, pathname, mode, 0)); -} -#endif -#if defined(TARGET_NR_fchownat) -static int sys_fchownat(int dirfd, const char *pathname, uid_t owner, - gid_t group, int flags) -{ - return (fchownat(dirfd, pathname, owner, group, flags)); -} -#endif -#ifdef __NR_fstatat64 -static int sys_fstatat64(int dirfd, const char *pathname, struct stat *buf, - int flags) -{ - return (fstatat(dirfd, pathname, buf, flags)); -} -#endif -#ifdef __NR_newfstatat -static int sys_newfstatat(int dirfd, const char *pathname, struct stat *buf, - int flags) -{ - return (fstatat(dirfd, pathname, buf, flags)); -} -#endif -#ifdef TARGET_NR_futimesat -static int sys_futimesat(int dirfd, const char *pathname, - const struct timeval times[2]) -{ - return (futimesat(dirfd, pathname, times)); -} -#endif -#ifdef TARGET_NR_linkat -static int sys_linkat(int olddirfd, const char *oldpath, - int newdirfd, const char *newpath, int flags) -{ - return (linkat(olddirfd, oldpath, newdirfd, newpath, flags)); -} -#endif -#ifdef TARGET_NR_mkdirat -static int sys_mkdirat(int dirfd, const char *pathname, mode_t mode) -{ - return (mkdirat(dirfd, pathname, mode)); -} -#endif -#ifdef TARGET_NR_mknodat -static int sys_mknodat(int dirfd, const char *pathname, mode_t mode, - dev_t dev) -{ - return (mknodat(dirfd, pathname, mode, dev)); -} -#endif #ifdef TARGET_NR_openat static int sys_openat(int dirfd, const char *pathname, int flags, mode_t mode) { @@ -418,91 +337,6 @@ static int sys_openat(int dirfd, const char *pathname, int flags, mode_t mode) return (openat(dirfd, pathname, flags)); } #endif -#ifdef TARGET_NR_readlinkat -static int sys_readlinkat(int dirfd, const char *pathname, char *buf, size_t bufsiz) -{ - return (readlinkat(dirfd, pathname, buf, bufsiz)); -} -#endif -#ifdef TARGET_NR_renameat -static int sys_renameat(int olddirfd, const char *oldpath, - int newdirfd, const char *newpath) -{ - return (renameat(olddirfd, oldpath, newdirfd, newpath)); -} -#endif -#ifdef TARGET_NR_symlinkat -static int sys_symlinkat(const char *oldpath, int newdirfd, const char *newpath) -{ - return (symlinkat(oldpath, newdirfd, newpath)); -} -#endif -#ifdef TARGET_NR_unlinkat -static int sys_unlinkat(int dirfd, const char *pathname, int flags) -{ - return (unlinkat(dirfd, pathname, flags)); -} -#endif -#else /* !CONFIG_ATFILE */ - -/* - * Try direct syscalls instead - */ -#if defined(TARGET_NR_faccessat) && defined(__NR_faccessat) -_syscall3(int,sys_faccessat,int,dirfd,const char *,pathname,int,mode) -#endif -#if defined(TARGET_NR_fchmodat) && defined(__NR_fchmodat) -_syscall3(int,sys_fchmodat,int,dirfd,const char *,pathname, mode_t,mode) -#endif -#if defined(TARGET_NR_fchownat) && defined(__NR_fchownat) -_syscall5(int,sys_fchownat,int,dirfd,const char *,pathname, - uid_t,owner,gid_t,group,int,flags) -#endif -#if (defined(TARGET_NR_fstatat64) || defined(TARGET_NR_newfstatat)) && \ - defined(__NR_fstatat64) -_syscall4(int,sys_fstatat64,int,dirfd,const char *,pathname, - struct stat *,buf,int,flags) -#endif -#if defined(TARGET_NR_futimesat) && defined(__NR_futimesat) -_syscall3(int,sys_futimesat,int,dirfd,const char *,pathname, - const struct timeval *,times) -#endif -#if (defined(TARGET_NR_newfstatat) || defined(TARGET_NR_fstatat64) ) && \ - defined(__NR_newfstatat) -_syscall4(int,sys_newfstatat,int,dirfd,const char *,pathname, - struct stat *,buf,int,flags) -#endif -#if defined(TARGET_NR_linkat) && defined(__NR_linkat) -_syscall5(int,sys_linkat,int,olddirfd,const char *,oldpath, - int,newdirfd,const char *,newpath,int,flags) -#endif -#if defined(TARGET_NR_mkdirat) && defined(__NR_mkdirat) -_syscall3(int,sys_mkdirat,int,dirfd,const char *,pathname,mode_t,mode) -#endif -#if defined(TARGET_NR_mknodat) && defined(__NR_mknodat) -_syscall4(int,sys_mknodat,int,dirfd,const char *,pathname, - mode_t,mode,dev_t,dev) -#endif -#if defined(TARGET_NR_openat) && defined(__NR_openat) -_syscall4(int,sys_openat,int,dirfd,const char *,pathname,int,flags,mode_t,mode) -#endif -#if defined(TARGET_NR_readlinkat) && defined(__NR_readlinkat) -_syscall4(int,sys_readlinkat,int,dirfd,const char *,pathname, - char *,buf,size_t,bufsize) -#endif -#if defined(TARGET_NR_renameat) && defined(__NR_renameat) -_syscall4(int,sys_renameat,int,olddirfd,const char *,oldpath, - int,newdirfd,const char *,newpath) -#endif -#if defined(TARGET_NR_symlinkat) && defined(__NR_symlinkat) -_syscall3(int,sys_symlinkat,const char *,oldpath, - int,newdirfd,const char *,newpath) -#endif -#if defined(TARGET_NR_unlinkat) && defined(__NR_unlinkat) -_syscall3(int,sys_unlinkat,int,dirfd,const char *,pathname,int,flags) -#endif - -#endif /* CONFIG_ATFILE */ #ifdef CONFIG_UTIMENSAT static int sys_utimensat(int dirfd, const char *pathname, @@ -5345,7 +5179,7 @@ abi_long do_syscall(void *cpu_env, int num, abi_long arg1, unlock_user(p, arg1, 0); } break; -#if defined(TARGET_NR_linkat) && defined(__NR_linkat) +#if defined(TARGET_NR_linkat) case TARGET_NR_linkat: { void * p2 = NULL; @@ -5356,7 +5190,7 @@ abi_long do_syscall(void *cpu_env, int num, abi_long arg1, if (!p || !p2) ret = -TARGET_EFAULT; else - ret = get_errno(sys_linkat(arg1, p, arg3, p2, arg5)); + ret = get_errno(linkat(arg1, p, arg3, p2, arg5)); unlock_user(p, arg2, 0); unlock_user(p2, arg4, 0); } @@ -5368,11 +5202,11 @@ abi_long do_syscall(void *cpu_env, int num, abi_long arg1, ret = get_errno(unlink(p)); unlock_user(p, arg1, 0); break; -#if defined(TARGET_NR_unlinkat) && defined(__NR_unlinkat) +#if defined(TARGET_NR_unlinkat) case TARGET_NR_unlinkat: if (!(p = lock_user_string(arg2))) goto efault; - ret = get_errno(sys_unlinkat(arg1, p, arg3)); + ret = get_errno(unlinkat(arg1, p, arg3)); unlock_user(p, arg2, 0); break; #endif @@ -5490,11 +5324,11 @@ abi_long do_syscall(void *cpu_env, int num, abi_long arg1, ret = get_errno(mknod(p, arg2, arg3)); unlock_user(p, arg1, 0); break; -#if defined(TARGET_NR_mknodat) && defined(__NR_mknodat) +#if defined(TARGET_NR_mknodat) case TARGET_NR_mknodat: if (!(p = lock_user_string(arg2))) goto efault; - ret = get_errno(sys_mknodat(arg1, p, arg3, arg4)); + ret = get_errno(mknodat(arg1, p, arg3, arg4)); unlock_user(p, arg2, 0); break; #endif @@ -5625,7 +5459,7 @@ abi_long do_syscall(void *cpu_env, int num, abi_long arg1, unlock_user(p, arg1, 0); } break; -#if defined(TARGET_NR_futimesat) && defined(__NR_futimesat) +#if defined(TARGET_NR_futimesat) case TARGET_NR_futimesat: { struct timeval *tvp, tv[2]; @@ -5640,7 +5474,7 @@ abi_long do_syscall(void *cpu_env, int num, abi_long arg1, } if (!(p = lock_user_string(arg2))) goto efault; - ret = get_errno(sys_futimesat(arg1, path(p), tvp)); + ret = get_errno(futimesat(arg1, path(p), tvp)); unlock_user(p, arg2, 0); } break; @@ -5663,7 +5497,7 @@ abi_long do_syscall(void *cpu_env, int num, abi_long arg1, case TARGET_NR_faccessat: if (!(p = lock_user_string(arg2))) goto efault; - ret = get_errno(sys_faccessat(arg1, p, arg3)); + ret = get_errno(faccessat(arg1, p, arg3, 0)); unlock_user(p, arg2, 0); break; #endif @@ -5696,7 +5530,7 @@ abi_long do_syscall(void *cpu_env, int num, abi_long arg1, unlock_user(p, arg1, 0); } break; -#if defined(TARGET_NR_renameat) && defined(__NR_renameat) +#if defined(TARGET_NR_renameat) case TARGET_NR_renameat: { void *p2; @@ -5705,7 +5539,7 @@ abi_long do_syscall(void *cpu_env, int num, abi_long arg1, if (!p || !p2) ret = -TARGET_EFAULT; else - ret = get_errno(sys_renameat(arg1, p, arg3, p2)); + ret = get_errno(renameat(arg1, p, arg3, p2)); unlock_user(p2, arg4, 0); unlock_user(p, arg2, 0); } @@ -5717,11 +5551,11 @@ abi_long do_syscall(void *cpu_env, int num, abi_long arg1, ret = get_errno(mkdir(p, arg2)); unlock_user(p, arg1, 0); break; -#if defined(TARGET_NR_mkdirat) && defined(__NR_mkdirat) +#if defined(TARGET_NR_mkdirat) case TARGET_NR_mkdirat: if (!(p = lock_user_string(arg2))) goto efault; - ret = get_errno(sys_mkdirat(arg1, p, arg3)); + ret = get_errno(mkdirat(arg1, p, arg3)); unlock_user(p, arg2, 0); break; #endif @@ -6407,7 +6241,7 @@ abi_long do_syscall(void *cpu_env, int num, abi_long arg1, unlock_user(p, arg1, 0); } break; -#if defined(TARGET_NR_symlinkat) && defined(__NR_symlinkat) +#if defined(TARGET_NR_symlinkat) case TARGET_NR_symlinkat: { void *p2; @@ -6416,7 +6250,7 @@ abi_long do_syscall(void *cpu_env, int num, abi_long arg1, if (!p || !p2) ret = -TARGET_EFAULT; else - ret = get_errno(sys_symlinkat(p, arg2, p2)); + ret = get_errno(symlinkat(p, arg2, p2)); unlock_user(p2, arg3, 0); unlock_user(p, arg1, 0); } @@ -6447,7 +6281,7 @@ abi_long do_syscall(void *cpu_env, int num, abi_long arg1, unlock_user(p, arg1, 0); } break; -#if defined(TARGET_NR_readlinkat) && defined(__NR_readlinkat) +#if defined(TARGET_NR_readlinkat) case TARGET_NR_readlinkat: { void *p2; @@ -6456,7 +6290,7 @@ abi_long do_syscall(void *cpu_env, int num, abi_long arg1, if (!p || !p2) ret = -TARGET_EFAULT; else - ret = get_errno(sys_readlinkat(arg1, path(p), p2, arg4)); + ret = get_errno(readlinkat(arg1, path(p), p2, arg4)); unlock_user(p2, arg3, ret); unlock_user(p, arg2, 0); } @@ -6591,11 +6425,11 @@ abi_long do_syscall(void *cpu_env, int num, abi_long arg1, case TARGET_NR_fchmod: ret = get_errno(fchmod(arg1, arg2)); break; -#if defined(TARGET_NR_fchmodat) && defined(__NR_fchmodat) +#if defined(TARGET_NR_fchmodat) case TARGET_NR_fchmodat: if (!(p = lock_user_string(arg2))) goto efault; - ret = get_errno(sys_fchmodat(arg1, p, arg3)); + ret = get_errno(fchmodat(arg1, p, arg3, 0)); unlock_user(p, arg2, 0); break; #endif @@ -7739,8 +7573,7 @@ abi_long do_syscall(void *cpu_env, int num, abi_long arg1, ret = host_to_target_stat64(cpu_env, arg2, &st); break; #endif -#if (defined(TARGET_NR_fstatat64) || defined(TARGET_NR_newfstatat)) && \ - (defined(__NR_fstatat64) || defined(__NR_newfstatat)) +#if (defined(TARGET_NR_fstatat64) || defined(TARGET_NR_newfstatat)) #ifdef TARGET_NR_fstatat64 case TARGET_NR_fstatat64: #endif @@ -7749,11 +7582,7 @@ abi_long do_syscall(void *cpu_env, int num, abi_long arg1, #endif if (!(p = lock_user_string(arg2))) goto efault; -#ifdef __NR_fstatat64 - ret = get_errno(sys_fstatat64(arg1, path(p), &st, arg4)); -#else - ret = get_errno(sys_newfstatat(arg1, path(p), &st, arg4)); -#endif + ret = get_errno(fstatat(arg1, path(p), &st, arg4)); if (!is_error(ret)) ret = host_to_target_stat64(cpu_env, arg3, &st); break; @@ -7835,11 +7664,12 @@ abi_long do_syscall(void *cpu_env, int num, abi_long arg1, case TARGET_NR_fchown: ret = get_errno(fchown(arg1, low2highuid(arg2), low2highgid(arg3))); break; -#if defined(TARGET_NR_fchownat) && defined(__NR_fchownat) +#if defined(TARGET_NR_fchownat) case TARGET_NR_fchownat: if (!(p = lock_user_string(arg2))) goto efault; - ret = get_errno(sys_fchownat(arg1, p, low2highuid(arg3), low2highgid(arg4), arg5)); + ret = get_errno(fchownat(arg1, p, low2highuid(arg3), + low2highgid(arg4), arg5)); unlock_user(p, arg2, 0); break; #endif From 66926895433a56b657f79d14f371831cf79fd43e Mon Sep 17 00:00:00 2001 From: Peter Maydell Date: Wed, 12 Jun 2013 16:20:22 +0100 Subject: [PATCH 03/13] configure: Drop CONFIG_ATFILE test Nobody uses the CONFIG_ATFILE test now, so just drop it. Signed-off-by: Peter Maydell Reviewed-by: Richard Henderson Tested-by: Claudio Fontana Message-id: 1370126121-22975-3-git-send-email-peter.maydell@linaro.org --- configure | 26 -------------------------- 1 file changed, 26 deletions(-) diff --git a/configure b/configure index 1654413762..bb413be26c 100755 --- a/configure +++ b/configure @@ -2557,29 +2557,6 @@ EOF fi fi -# -# Check for xxxat() functions when we are building linux-user -# emulator. This is done because older glibc versions don't -# have syscall stubs for these implemented. -# -atfile=no -cat > $TMPC << EOF -#define _ATFILE_SOURCE -#include -#include -#include - -int -main(void) -{ - /* try to unlink nonexisting file */ - return (unlinkat(AT_FDCWD, "nonexistent_file", 0)); -} -EOF -if compile_prog "" "" ; then - atfile=yes -fi - # Check for inotify functions when we are building linux-user # emulator. This is done because older glibc versions don't # have syscall stubs for these implemented. In that case we @@ -3722,9 +3699,6 @@ fi if test "$curses" = "yes" ; then echo "CONFIG_CURSES=y" >> $config_host_mak fi -if test "$atfile" = "yes" ; then - echo "CONFIG_ATFILE=y" >> $config_host_mak -fi if test "$utimens" = "yes" ; then echo "CONFIG_UTIMENSAT=y" >> $config_host_mak fi From 1d256776c77c211a6f60a36e700f549f3a544cc6 Mon Sep 17 00:00:00 2001 From: Claudio Fontana Date: Wed, 12 Jun 2013 16:20:22 +0100 Subject: [PATCH 04/13] include/elf.h: add aarch64 ELF machine and relocs we will use the 26bit relative relocs in the aarch64 tcg target. Reviewed-by: Peter Maydell Signed-off-by: Claudio Fontana Reviewed-by: Richard Henderson Message-id: 51A5C52A.4080001@huawei.com Signed-off-by: Peter Maydell --- include/elf.h | 129 ++++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 129 insertions(+) diff --git a/include/elf.h b/include/elf.h index a21ea535bd..cf0d3e2bd6 100644 --- a/include/elf.h +++ b/include/elf.h @@ -129,6 +129,8 @@ typedef int64_t Elf64_Sxword; #define EM_XTENSA 94 /* Tensilica Xtensa */ +#define EM_AARCH64 183 + /* This is the info that is needed to parse the dynamic section of the file */ #define DT_NULL 0 #define DT_NEEDED 1 @@ -616,6 +618,133 @@ typedef struct { /* Keep this the last entry. */ #define R_ARM_NUM 256 +/* ARM Aarch64 relocation types */ +#define R_AARCH64_NONE 256 /* also accepts R_ARM_NONE (0) */ +/* static data relocations */ +#define R_AARCH64_ABS64 257 +#define R_AARCH64_ABS32 258 +#define R_AARCH64_ABS16 259 +#define R_AARCH64_PREL64 260 +#define R_AARCH64_PREL32 261 +#define R_AARCH64_PREL16 262 +/* static aarch64 group relocations */ +/* group relocs to create unsigned data value or address inline */ +#define R_AARCH64_MOVW_UABS_G0 263 +#define R_AARCH64_MOVW_UABS_G0_NC 264 +#define R_AARCH64_MOVW_UABS_G1 265 +#define R_AARCH64_MOVW_UABS_G1_NC 266 +#define R_AARCH64_MOVW_UABS_G2 267 +#define R_AARCH64_MOVW_UABS_G2_NC 268 +#define R_AARCH64_MOVW_UABS_G3 269 +/* group relocs to create signed data or offset value inline */ +#define R_AARCH64_MOVW_SABS_G0 270 +#define R_AARCH64_MOVW_SABS_G1 271 +#define R_AARCH64_MOVW_SABS_G2 272 +/* relocs to generate 19, 21, and 33 bit PC-relative addresses */ +#define R_AARCH64_LD_PREL_LO19 273 +#define R_AARCH64_ADR_PREL_LO21 274 +#define R_AARCH64_ADR_PREL_PG_HI21 275 +#define R_AARCH64_ADR_PREL_PG_HI21_NC 276 +#define R_AARCH64_ADD_ABS_LO12_NC 277 +#define R_AARCH64_LDST8_ABS_LO12_NC 278 +#define R_AARCH64_LDST16_ABS_LO12_NC 284 +#define R_AARCH64_LDST32_ABS_LO12_NC 285 +#define R_AARCH64_LDST64_ABS_LO12_NC 286 +#define R_AARCH64_LDST128_ABS_LO12_NC 299 +/* relocs for control-flow - all offsets as multiple of 4 */ +#define R_AARCH64_TSTBR14 279 +#define R_AARCH64_CONDBR19 280 +#define R_AARCH64_JUMP26 282 +#define R_AARCH64_CALL26 283 +/* group relocs to create pc-relative offset inline */ +#define R_AARCH64_MOVW_PREL_G0 287 +#define R_AARCH64_MOVW_PREL_G0_NC 288 +#define R_AARCH64_MOVW_PREL_G1 289 +#define R_AARCH64_MOVW_PREL_G1_NC 290 +#define R_AARCH64_MOVW_PREL_G2 291 +#define R_AARCH64_MOVW_PREL_G2_NC 292 +#define R_AARCH64_MOVW_PREL_G3 293 +/* group relocs to create a GOT-relative offset inline */ +#define R_AARCH64_MOVW_GOTOFF_G0 300 +#define R_AARCH64_MOVW_GOTOFF_G0_NC 301 +#define R_AARCH64_MOVW_GOTOFF_G1 302 +#define R_AARCH64_MOVW_GOTOFF_G1_NC 303 +#define R_AARCH64_MOVW_GOTOFF_G2 304 +#define R_AARCH64_MOVW_GOTOFF_G2_NC 305 +#define R_AARCH64_MOVW_GOTOFF_G3 306 +/* GOT-relative data relocs */ +#define R_AARCH64_GOTREL64 307 +#define R_AARCH64_GOTREL32 308 +/* GOT-relative instr relocs */ +#define R_AARCH64_GOT_LD_PREL19 309 +#define R_AARCH64_LD64_GOTOFF_LO15 310 +#define R_AARCH64_ADR_GOT_PAGE 311 +#define R_AARCH64_LD64_GOT_LO12_NC 312 +#define R_AARCH64_LD64_GOTPAGE_LO15 313 +/* General Dynamic TLS relocations */ +#define R_AARCH64_TLSGD_ADR_PREL21 512 +#define R_AARCH64_TLSGD_ADR_PAGE21 513 +#define R_AARCH64_TLSGD_ADD_LO12_NC 514 +#define R_AARCH64_TLSGD_MOVW_G1 515 +#define R_AARCH64_TLSGD_MOVW_G0_NC 516 +/* Local Dynamic TLS relocations */ +#define R_AARCH64_TLSLD_ADR_PREL21 517 +#define R_AARCH64_TLSLD_ADR_PAGE21 518 +#define R_AARCH64_TLSLD_ADD_LO12_NC 519 +#define R_AARCH64_TLSLD_MOVW_G1 520 +#define R_AARCH64_TLSLD_MOVW_G0_NC 521 +#define R_AARCH64_TLSLD_LD_PREL19 522 +#define R_AARCH64_TLSLD_MOVW_DTPREL_G2 523 +#define R_AARCH64_TLSLD_MOVW_DTPREL_G1 524 +#define R_AARCH64_TLSLD_MOVW_DTPREL_G1_NC 525 +#define R_AARCH64_TLSLD_MOVW_DTPREL_G0 526 +#define R_AARCH64_TLSLD_MOVW_DTPREL_G0_NC 527 +#define R_AARCH64_TLSLD_ADD_DTPREL_HI12 528 +#define R_AARCH64_TLSLD_ADD_DTPREL_LO12 529 +#define R_AARCH64_TLSLD_ADD_DTPREL_LO12_NC 530 +#define R_AARCH64_TLSLD_LDST8_DTPREL_LO12 531 +#define R_AARCH64_TLSLD_LDST8_DTPREL_LO12_NC 532 +#define R_AARCH64_TLSLD_LDST16_DTPREL_LO12 533 +#define R_AARCH64_TLSLD_LDST16_DTPREL_LO12_NC 534 +#define R_AARCH64_TLSLD_LDST32_DTPREL_LO12 535 +#define R_AARCH64_TLSLD_LDST32_DTPREL_LO12_NC 536 +#define R_AARCH64_TLSLD_LDST64_DTPREL_LO12 537 +#define R_AARCH64_TLSLD_LDST64_DTPREL_LO12_NC 538 +/* initial exec TLS relocations */ +#define R_AARCH64_TLSIE_MOVW_GOTTPREL_G1 539 +#define R_AARCH64_TLSIE_MOVW_GOTTPREL_G0_NC 540 +#define R_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21 541 +#define R_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC 542 +#define R_AARCH64_TLSIE_LD_GOTTPREL_PREL19 543 +/* local exec TLS relocations */ +#define R_AARCH64_TLSLE_MOVW_TPREL_G2 544 +#define R_AARCH64_TLSLE_MOVW_TPREL_G1 545 +#define R_AARCH64_TLSLE_MOVW_TPREL_G1_NC 546 +#define R_AARCH64_TLSLE_MOVW_TPREL_G0 547 +#define R_AARCH64_TLSLE_MOVW_TPREL_G0_NC 548 +#define R_AARCH64_TLSLE_ADD_TPREL_HI12 549 +#define R_AARCH64_TLSLE_ADD_TPREL_LO12 550 +#define R_AARCH64_TLSLE_ADD_TPREL_LO12_NC 551 +#define R_AARCH64_TLSLE_LDST8_TPREL_LO12 552 +#define R_AARCH64_TLSLE_LDST8_TPREL_LO12_NC 553 +#define R_AARCH64_TLSLE_LDST16_TPREL_LO12 554 +#define R_AARCH64_TLSLE_LDST16_TPREL_LO12_NC 555 +#define R_AARCH64_TLSLE_LDST32_TPREL_LO12 556 +#define R_AARCH64_TLSLE_LDST32_TPREL_LO12_NC 557 +#define R_AARCH64_TLSLE_LDST64_TPREL_LO12 558 +#define R_AARCH64_TLSLE_LDST64_TPREL_LO12_NC 559 +/* Dynamic Relocations */ +#define R_AARCH64_COPY 1024 +#define R_AARCH64_GLOB_DAT 1025 +#define R_AARCH64_JUMP_SLOT 1026 +#define R_AARCH64_RELATIVE 1027 +#define R_AARCH64_TLS_DTPREL64 1028 +#define R_AARCH64_TLS_DTPMOD64 1029 +#define R_AARCH64_TLS_TPREL64 1030 +#define R_AARCH64_TLS_DTPREL32 1031 +#define R_AARCH64_TLS_DTPMOD32 1032 +#define R_AARCH64_TLS_TPREL32 1033 + /* s390 relocations defined by the ABIs */ #define R_390_NONE 0 /* No reloc. */ #define R_390_8 1 /* Direct 8 bit. */ From 4a136e0a6b4ceac177bc2ab29502161553e25ae2 Mon Sep 17 00:00:00 2001 From: Claudio Fontana Date: Wed, 12 Jun 2013 16:20:22 +0100 Subject: [PATCH 05/13] tcg/aarch64: implement new TCG target for aarch64 add preliminary support for TCG target aarch64. Signed-off-by: Claudio Fontana Reviewed-by: Richard Henderson Reviewed-by: Peter Maydell Message-id: 51A5C596.3090108@huawei.com Signed-off-by: Peter Maydell --- include/exec/exec-all.h | 5 +- tcg/aarch64/tcg-target.c | 1161 ++++++++++++++++++++++++++++++++++++++ tcg/aarch64/tcg-target.h | 99 ++++ translate-all.c | 2 + 4 files changed, 1266 insertions(+), 1 deletion(-) create mode 100644 tcg/aarch64/tcg-target.c create mode 100644 tcg/aarch64/tcg-target.h diff --git a/include/exec/exec-all.h b/include/exec/exec-all.h index 17fde25c74..b2162a4ec4 100644 --- a/include/exec/exec-all.h +++ b/include/exec/exec-all.h @@ -128,7 +128,7 @@ static inline void tlb_flush(CPUArchState *env, int flush_global) #if defined(__arm__) || defined(_ARCH_PPC) \ || defined(__x86_64__) || defined(__i386__) \ - || defined(__sparc__) \ + || defined(__sparc__) || defined(__aarch64__) \ || defined(CONFIG_TCG_INTERPRETER) #define USE_DIRECT_JUMP #endif @@ -230,6 +230,9 @@ static inline void tb_set_jmp_target1(uintptr_t jmp_addr, uintptr_t addr) *(uint32_t *)jmp_addr = addr - (jmp_addr + 4); /* no need to flush icache explicitly */ } +#elif defined(__aarch64__) +void aarch64_tb_set_jmp_target(uintptr_t jmp_addr, uintptr_t addr); +#define tb_set_jmp_target1 aarch64_tb_set_jmp_target #elif defined(__arm__) static inline void tb_set_jmp_target1(uintptr_t jmp_addr, uintptr_t addr) { diff --git a/tcg/aarch64/tcg-target.c b/tcg/aarch64/tcg-target.c new file mode 100644 index 0000000000..ff626eb837 --- /dev/null +++ b/tcg/aarch64/tcg-target.c @@ -0,0 +1,1161 @@ +/* + * Initial TCG Implementation for aarch64 + * + * Copyright (c) 2013 Huawei Technologies Duesseldorf GmbH + * Written by Claudio Fontana + * + * This work is licensed under the terms of the GNU GPL, version 2 or + * (at your option) any later version. + * + * See the COPYING file in the top-level directory for details. + */ + +#include "qemu/bitops.h" + +#ifndef NDEBUG +static const char * const tcg_target_reg_names[TCG_TARGET_NB_REGS] = { + "%x0", "%x1", "%x2", "%x3", "%x4", "%x5", "%x6", "%x7", + "%x8", "%x9", "%x10", "%x11", "%x12", "%x13", "%x14", "%x15", + "%x16", "%x17", "%x18", "%x19", "%x20", "%x21", "%x22", "%x23", + "%x24", "%x25", "%x26", "%x27", "%x28", + "%fp", /* frame pointer */ + "%lr", /* link register */ + "%sp", /* stack pointer */ +}; +#endif /* NDEBUG */ + +static const int tcg_target_reg_alloc_order[] = { + TCG_REG_X20, TCG_REG_X21, TCG_REG_X22, TCG_REG_X23, + TCG_REG_X24, TCG_REG_X25, TCG_REG_X26, TCG_REG_X27, + TCG_REG_X28, + + TCG_REG_X9, TCG_REG_X10, TCG_REG_X11, TCG_REG_X12, + TCG_REG_X13, TCG_REG_X14, TCG_REG_X15, + TCG_REG_X16, TCG_REG_X17, + + TCG_REG_X18, TCG_REG_X19, /* will not use these, see tcg_target_init */ + + TCG_REG_X0, TCG_REG_X1, TCG_REG_X2, TCG_REG_X3, + TCG_REG_X4, TCG_REG_X5, TCG_REG_X6, TCG_REG_X7, + + TCG_REG_X8, /* will not use, see tcg_target_init */ +}; + +static const int tcg_target_call_iarg_regs[8] = { + TCG_REG_X0, TCG_REG_X1, TCG_REG_X2, TCG_REG_X3, + TCG_REG_X4, TCG_REG_X5, TCG_REG_X6, TCG_REG_X7 +}; +static const int tcg_target_call_oarg_regs[1] = { + TCG_REG_X0 +}; + +#define TCG_REG_TMP TCG_REG_X8 + +static inline void reloc_pc26(void *code_ptr, tcg_target_long target) +{ + tcg_target_long offset; uint32_t insn; + offset = (target - (tcg_target_long)code_ptr) / 4; + /* read instruction, mask away previous PC_REL26 parameter contents, + set the proper offset, then write back the instruction. */ + insn = *(uint32_t *)code_ptr; + insn = deposit32(insn, 0, 26, offset); + *(uint32_t *)code_ptr = insn; +} + +static inline void reloc_pc19(void *code_ptr, tcg_target_long target) +{ + tcg_target_long offset; uint32_t insn; + offset = (target - (tcg_target_long)code_ptr) / 4; + /* read instruction, mask away previous PC_REL19 parameter contents, + set the proper offset, then write back the instruction. */ + insn = *(uint32_t *)code_ptr; + insn = deposit32(insn, 5, 19, offset); + *(uint32_t *)code_ptr = insn; +} + +static inline void patch_reloc(uint8_t *code_ptr, int type, + tcg_target_long value, tcg_target_long addend) +{ + value += addend; + + switch (type) { + case R_AARCH64_JUMP26: + case R_AARCH64_CALL26: + reloc_pc26(code_ptr, value); + break; + case R_AARCH64_CONDBR19: + reloc_pc19(code_ptr, value); + break; + + default: + tcg_abort(); + } +} + +/* parse target specific constraints */ +static int target_parse_constraint(TCGArgConstraint *ct, + const char **pct_str) +{ + const char *ct_str = *pct_str; + + switch (ct_str[0]) { + case 'r': + ct->ct |= TCG_CT_REG; + tcg_regset_set32(ct->u.regs, 0, (1ULL << TCG_TARGET_NB_REGS) - 1); + break; + case 'l': /* qemu_ld / qemu_st address, data_reg */ + ct->ct |= TCG_CT_REG; + tcg_regset_set32(ct->u.regs, 0, (1ULL << TCG_TARGET_NB_REGS) - 1); +#ifdef CONFIG_SOFTMMU + /* x0 and x1 will be overwritten when reading the tlb entry, + and x2, and x3 for helper args, better to avoid using them. */ + tcg_regset_reset_reg(ct->u.regs, TCG_REG_X0); + tcg_regset_reset_reg(ct->u.regs, TCG_REG_X1); + tcg_regset_reset_reg(ct->u.regs, TCG_REG_X2); + tcg_regset_reset_reg(ct->u.regs, TCG_REG_X3); +#endif + break; + default: + return -1; + } + + ct_str++; + *pct_str = ct_str; + return 0; +} + +static inline int tcg_target_const_match(tcg_target_long val, + const TCGArgConstraint *arg_ct) +{ + int ct = arg_ct->ct; + + if (ct & TCG_CT_CONST) { + return 1; + } + + return 0; +} + +enum aarch64_cond_code { + COND_EQ = 0x0, + COND_NE = 0x1, + COND_CS = 0x2, /* Unsigned greater or equal */ + COND_HS = COND_CS, /* ALIAS greater or equal */ + COND_CC = 0x3, /* Unsigned less than */ + COND_LO = COND_CC, /* ALIAS Lower */ + COND_MI = 0x4, /* Negative */ + COND_PL = 0x5, /* Zero or greater */ + COND_VS = 0x6, /* Overflow */ + COND_VC = 0x7, /* No overflow */ + COND_HI = 0x8, /* Unsigned greater than */ + COND_LS = 0x9, /* Unsigned less or equal */ + COND_GE = 0xa, + COND_LT = 0xb, + COND_GT = 0xc, + COND_LE = 0xd, + COND_AL = 0xe, + COND_NV = 0xf, /* behaves like COND_AL here */ +}; + +static const enum aarch64_cond_code tcg_cond_to_aarch64[] = { + [TCG_COND_EQ] = COND_EQ, + [TCG_COND_NE] = COND_NE, + [TCG_COND_LT] = COND_LT, + [TCG_COND_GE] = COND_GE, + [TCG_COND_LE] = COND_LE, + [TCG_COND_GT] = COND_GT, + /* unsigned */ + [TCG_COND_LTU] = COND_LO, + [TCG_COND_GTU] = COND_HI, + [TCG_COND_GEU] = COND_HS, + [TCG_COND_LEU] = COND_LS, +}; + +/* opcodes for LDR / STR instructions with base + simm9 addressing */ +enum aarch64_ldst_op_data { /* size of the data moved */ + LDST_8 = 0x38, + LDST_16 = 0x78, + LDST_32 = 0xb8, + LDST_64 = 0xf8, +}; +enum aarch64_ldst_op_type { /* type of operation */ + LDST_ST = 0x0, /* store */ + LDST_LD = 0x4, /* load */ + LDST_LD_S_X = 0x8, /* load and sign-extend into Xt */ + LDST_LD_S_W = 0xc, /* load and sign-extend into Wt */ +}; + +enum aarch64_arith_opc { + ARITH_ADD = 0x0b, + ARITH_SUB = 0x4b, + ARITH_AND = 0x0a, + ARITH_OR = 0x2a, + ARITH_XOR = 0x4a +}; + +enum aarch64_srr_opc { + SRR_SHL = 0x0, + SRR_SHR = 0x4, + SRR_SAR = 0x8, + SRR_ROR = 0xc +}; + +static inline enum aarch64_ldst_op_data +aarch64_ldst_get_data(TCGOpcode tcg_op) +{ + switch (tcg_op) { + case INDEX_op_ld8u_i32: + case INDEX_op_ld8s_i32: + case INDEX_op_ld8u_i64: + case INDEX_op_ld8s_i64: + case INDEX_op_st8_i32: + case INDEX_op_st8_i64: + return LDST_8; + + case INDEX_op_ld16u_i32: + case INDEX_op_ld16s_i32: + case INDEX_op_ld16u_i64: + case INDEX_op_ld16s_i64: + case INDEX_op_st16_i32: + case INDEX_op_st16_i64: + return LDST_16; + + case INDEX_op_ld_i32: + case INDEX_op_st_i32: + case INDEX_op_ld32u_i64: + case INDEX_op_ld32s_i64: + case INDEX_op_st32_i64: + return LDST_32; + + case INDEX_op_ld_i64: + case INDEX_op_st_i64: + return LDST_64; + + default: + tcg_abort(); + } +} + +static inline enum aarch64_ldst_op_type +aarch64_ldst_get_type(TCGOpcode tcg_op) +{ + switch (tcg_op) { + case INDEX_op_st8_i32: + case INDEX_op_st16_i32: + case INDEX_op_st8_i64: + case INDEX_op_st16_i64: + case INDEX_op_st_i32: + case INDEX_op_st32_i64: + case INDEX_op_st_i64: + return LDST_ST; + + case INDEX_op_ld8u_i32: + case INDEX_op_ld16u_i32: + case INDEX_op_ld8u_i64: + case INDEX_op_ld16u_i64: + case INDEX_op_ld_i32: + case INDEX_op_ld32u_i64: + case INDEX_op_ld_i64: + return LDST_LD; + + case INDEX_op_ld8s_i32: + case INDEX_op_ld16s_i32: + return LDST_LD_S_W; + + case INDEX_op_ld8s_i64: + case INDEX_op_ld16s_i64: + case INDEX_op_ld32s_i64: + return LDST_LD_S_X; + + default: + tcg_abort(); + } +} + +static inline uint32_t tcg_in32(TCGContext *s) +{ + uint32_t v = *(uint32_t *)s->code_ptr; + return v; +} + +static inline void tcg_out_ldst_9(TCGContext *s, + enum aarch64_ldst_op_data op_data, + enum aarch64_ldst_op_type op_type, + TCGReg rd, TCGReg rn, tcg_target_long offset) +{ + /* use LDUR with BASE register with 9bit signed unscaled offset */ + unsigned int mod, off; + + if (offset < 0) { + off = (256 + offset); + mod = 0x1; + } else { + off = offset; + mod = 0x0; + } + + mod |= op_type; + tcg_out32(s, op_data << 24 | mod << 20 | off << 12 | rn << 5 | rd); +} + +static inline void tcg_out_movr(TCGContext *s, int ext, TCGReg rd, TCGReg src) +{ + /* register to register move using MOV (shifted register with no shift) */ + /* using MOV 0x2a0003e0 | (shift).. */ + unsigned int base = ext ? 0xaa0003e0 : 0x2a0003e0; + tcg_out32(s, base | src << 16 | rd); +} + +static inline void tcg_out_movi_aux(TCGContext *s, + TCGReg rd, uint64_t value) +{ + uint32_t half, base, shift, movk = 0; + /* construct halfwords of the immediate with MOVZ/MOVK with LSL */ + /* using MOVZ 0x52800000 | extended reg.. */ + base = (value > 0xffffffff) ? 0xd2800000 : 0x52800000; + /* count trailing zeros in 16 bit steps, mapping 64 to 0. Emit the + first MOVZ with the half-word immediate skipping the zeros, with a shift + (LSL) equal to this number. Then morph all next instructions into MOVKs. + Zero the processed half-word in the value, continue until empty. + We build the final result 16bits at a time with up to 4 instructions, + but do not emit instructions for 16bit zero holes. */ + do { + shift = ctz64(value) & (63 & -16); + half = (value >> shift) & 0xffff; + tcg_out32(s, base | movk | shift << 17 | half << 5 | rd); + movk = 0x20000000; /* morph next MOVZs into MOVKs */ + value &= ~(0xffffUL << shift); + } while (value); +} + +static inline void tcg_out_movi(TCGContext *s, TCGType type, + TCGReg rd, tcg_target_long value) +{ + if (type == TCG_TYPE_I64) { + tcg_out_movi_aux(s, rd, value); + } else { + tcg_out_movi_aux(s, rd, value & 0xffffffff); + } +} + +static inline void tcg_out_ldst_r(TCGContext *s, + enum aarch64_ldst_op_data op_data, + enum aarch64_ldst_op_type op_type, + TCGReg rd, TCGReg base, TCGReg regoff) +{ + /* load from memory to register using base + 64bit register offset */ + /* using f.e. STR Wt, [Xn, Xm] 0xb8600800|(regoff << 16)|(base << 5)|rd */ + /* the 0x6000 is for the "no extend field" */ + tcg_out32(s, 0x00206800 + | op_data << 24 | op_type << 20 | regoff << 16 | base << 5 | rd); +} + +/* solve the whole ldst problem */ +static inline void tcg_out_ldst(TCGContext *s, enum aarch64_ldst_op_data data, + enum aarch64_ldst_op_type type, + TCGReg rd, TCGReg rn, tcg_target_long offset) +{ + if (offset >= -256 && offset < 256) { + tcg_out_ldst_9(s, data, type, rd, rn, offset); + } else { + tcg_out_movi(s, TCG_TYPE_I64, TCG_REG_TMP, offset); + tcg_out_ldst_r(s, data, type, rd, rn, TCG_REG_TMP); + } +} + +/* mov alias implemented with add immediate, useful to move to/from SP */ +static inline void tcg_out_movr_sp(TCGContext *s, int ext, TCGReg rd, TCGReg rn) +{ + /* using ADD 0x11000000 | (ext) | rn << 5 | rd */ + unsigned int base = ext ? 0x91000000 : 0x11000000; + tcg_out32(s, base | rn << 5 | rd); +} + +static inline void tcg_out_mov(TCGContext *s, + TCGType type, TCGReg ret, TCGReg arg) +{ + if (ret != arg) { + tcg_out_movr(s, type == TCG_TYPE_I64, ret, arg); + } +} + +static inline void tcg_out_ld(TCGContext *s, TCGType type, TCGReg arg, + TCGReg arg1, tcg_target_long arg2) +{ + tcg_out_ldst(s, (type == TCG_TYPE_I64) ? LDST_64 : LDST_32, LDST_LD, + arg, arg1, arg2); +} + +static inline void tcg_out_st(TCGContext *s, TCGType type, TCGReg arg, + TCGReg arg1, tcg_target_long arg2) +{ + tcg_out_ldst(s, (type == TCG_TYPE_I64) ? LDST_64 : LDST_32, LDST_ST, + arg, arg1, arg2); +} + +static inline void tcg_out_arith(TCGContext *s, enum aarch64_arith_opc opc, + int ext, TCGReg rd, TCGReg rn, TCGReg rm) +{ + /* Using shifted register arithmetic operations */ + /* if extended registry operation (64bit) just OR with 0x80 << 24 */ + unsigned int base = ext ? (0x80 | opc) << 24 : opc << 24; + tcg_out32(s, base | rm << 16 | rn << 5 | rd); +} + +static inline void tcg_out_mul(TCGContext *s, int ext, + TCGReg rd, TCGReg rn, TCGReg rm) +{ + /* Using MADD 0x1b000000 with Ra = wzr alias MUL 0x1b007c00 */ + unsigned int base = ext ? 0x9b007c00 : 0x1b007c00; + tcg_out32(s, base | rm << 16 | rn << 5 | rd); +} + +static inline void tcg_out_shiftrot_reg(TCGContext *s, + enum aarch64_srr_opc opc, int ext, + TCGReg rd, TCGReg rn, TCGReg rm) +{ + /* using 2-source data processing instructions 0x1ac02000 */ + unsigned int base = ext ? 0x9ac02000 : 0x1ac02000; + tcg_out32(s, base | rm << 16 | opc << 8 | rn << 5 | rd); +} + +static inline void tcg_out_ubfm(TCGContext *s, int ext, TCGReg rd, TCGReg rn, + unsigned int a, unsigned int b) +{ + /* Using UBFM 0x53000000 Wd, Wn, a, b */ + unsigned int base = ext ? 0xd3400000 : 0x53000000; + tcg_out32(s, base | a << 16 | b << 10 | rn << 5 | rd); +} + +static inline void tcg_out_sbfm(TCGContext *s, int ext, TCGReg rd, TCGReg rn, + unsigned int a, unsigned int b) +{ + /* Using SBFM 0x13000000 Wd, Wn, a, b */ + unsigned int base = ext ? 0x93400000 : 0x13000000; + tcg_out32(s, base | a << 16 | b << 10 | rn << 5 | rd); +} + +static inline void tcg_out_extr(TCGContext *s, int ext, TCGReg rd, + TCGReg rn, TCGReg rm, unsigned int a) +{ + /* Using EXTR 0x13800000 Wd, Wn, Wm, a */ + unsigned int base = ext ? 0x93c00000 : 0x13800000; + tcg_out32(s, base | rm << 16 | a << 10 | rn << 5 | rd); +} + +static inline void tcg_out_shl(TCGContext *s, int ext, + TCGReg rd, TCGReg rn, unsigned int m) +{ + int bits, max; + bits = ext ? 64 : 32; + max = bits - 1; + tcg_out_ubfm(s, ext, rd, rn, bits - (m & max), max - (m & max)); +} + +static inline void tcg_out_shr(TCGContext *s, int ext, + TCGReg rd, TCGReg rn, unsigned int m) +{ + int max = ext ? 63 : 31; + tcg_out_ubfm(s, ext, rd, rn, m & max, max); +} + +static inline void tcg_out_sar(TCGContext *s, int ext, + TCGReg rd, TCGReg rn, unsigned int m) +{ + int max = ext ? 63 : 31; + tcg_out_sbfm(s, ext, rd, rn, m & max, max); +} + +static inline void tcg_out_rotr(TCGContext *s, int ext, + TCGReg rd, TCGReg rn, unsigned int m) +{ + int max = ext ? 63 : 31; + tcg_out_extr(s, ext, rd, rn, rn, m & max); +} + +static inline void tcg_out_rotl(TCGContext *s, int ext, + TCGReg rd, TCGReg rn, unsigned int m) +{ + int bits, max; + bits = ext ? 64 : 32; + max = bits - 1; + tcg_out_extr(s, ext, rd, rn, rn, bits - (m & max)); +} + +static inline void tcg_out_cmp(TCGContext *s, int ext, TCGReg rn, TCGReg rm) +{ + /* Using CMP alias SUBS wzr, Wn, Wm */ + unsigned int base = ext ? 0xeb00001f : 0x6b00001f; + tcg_out32(s, base | rm << 16 | rn << 5); +} + +static inline void tcg_out_cset(TCGContext *s, int ext, TCGReg rd, TCGCond c) +{ + /* Using CSET alias of CSINC 0x1a800400 Xd, XZR, XZR, invert(cond) */ + unsigned int base = ext ? 0x9a9f07e0 : 0x1a9f07e0; + tcg_out32(s, base | tcg_cond_to_aarch64[tcg_invert_cond(c)] << 12 | rd); +} + +static inline void tcg_out_goto(TCGContext *s, tcg_target_long target) +{ + tcg_target_long offset; + offset = (target - (tcg_target_long)s->code_ptr) / 4; + + if (offset < -0x02000000 || offset >= 0x02000000) { + /* out of 26bit range */ + tcg_abort(); + } + + tcg_out32(s, 0x14000000 | (offset & 0x03ffffff)); +} + +static inline void tcg_out_goto_noaddr(TCGContext *s) +{ + /* We pay attention here to not modify the branch target by + reading from the buffer. This ensure that caches and memory are + kept coherent during retranslation. + Mask away possible garbage in the high bits for the first translation, + while keeping the offset bits for retranslation. */ + uint32_t insn; + insn = (tcg_in32(s) & 0x03ffffff) | 0x14000000; + tcg_out32(s, insn); +} + +static inline void tcg_out_goto_cond_noaddr(TCGContext *s, TCGCond c) +{ + /* see comments in tcg_out_goto_noaddr */ + uint32_t insn; + insn = tcg_in32(s) & (0x07ffff << 5); + insn |= 0x54000000 | tcg_cond_to_aarch64[c]; + tcg_out32(s, insn); +} + +static inline void tcg_out_goto_cond(TCGContext *s, TCGCond c, + tcg_target_long target) +{ + tcg_target_long offset; + offset = (target - (tcg_target_long)s->code_ptr) / 4; + + if (offset < -0x40000 || offset >= 0x40000) { + /* out of 19bit range */ + tcg_abort(); + } + + offset &= 0x7ffff; + tcg_out32(s, 0x54000000 | tcg_cond_to_aarch64[c] | offset << 5); +} + +static inline void tcg_out_callr(TCGContext *s, TCGReg reg) +{ + tcg_out32(s, 0xd63f0000 | reg << 5); +} + +static inline void tcg_out_gotor(TCGContext *s, TCGReg reg) +{ + tcg_out32(s, 0xd61f0000 | reg << 5); +} + +static inline void tcg_out_call(TCGContext *s, tcg_target_long target) +{ + tcg_target_long offset; + + offset = (target - (tcg_target_long)s->code_ptr) / 4; + + if (offset < -0x02000000 || offset >= 0x02000000) { /* out of 26bit rng */ + tcg_out_movi(s, TCG_TYPE_I64, TCG_REG_TMP, target); + tcg_out_callr(s, TCG_REG_TMP); + } else { + tcg_out32(s, 0x94000000 | (offset & 0x03ffffff)); + } +} + +static inline void tcg_out_ret(TCGContext *s) +{ + /* emit RET { LR } */ + tcg_out32(s, 0xd65f03c0); +} + +void aarch64_tb_set_jmp_target(uintptr_t jmp_addr, uintptr_t addr) +{ + tcg_target_long target, offset; + target = (tcg_target_long)addr; + offset = (target - (tcg_target_long)jmp_addr) / 4; + + if (offset < -0x02000000 || offset >= 0x02000000) { + /* out of 26bit range */ + tcg_abort(); + } + + patch_reloc((uint8_t *)jmp_addr, R_AARCH64_JUMP26, target, 0); + flush_icache_range(jmp_addr, jmp_addr + 4); +} + +static inline void tcg_out_goto_label(TCGContext *s, int label_index) +{ + TCGLabel *l = &s->labels[label_index]; + + if (!l->has_value) { + tcg_out_reloc(s, s->code_ptr, R_AARCH64_JUMP26, label_index, 0); + tcg_out_goto_noaddr(s); + } else { + tcg_out_goto(s, l->u.value); + } +} + +static inline void tcg_out_goto_label_cond(TCGContext *s, + TCGCond c, int label_index) +{ + TCGLabel *l = &s->labels[label_index]; + + if (!l->has_value) { + tcg_out_reloc(s, s->code_ptr, R_AARCH64_CONDBR19, label_index, 0); + tcg_out_goto_cond_noaddr(s, c); + } else { + tcg_out_goto_cond(s, c, l->u.value); + } +} + +#ifdef CONFIG_SOFTMMU +#include "exec/softmmu_defs.h" + +/* helper signature: helper_ld_mmu(CPUState *env, target_ulong addr, + int mmu_idx) */ +static const void * const qemu_ld_helpers[4] = { + helper_ldb_mmu, + helper_ldw_mmu, + helper_ldl_mmu, + helper_ldq_mmu, +}; + +/* helper signature: helper_st_mmu(CPUState *env, target_ulong addr, + uintxx_t val, int mmu_idx) */ +static const void * const qemu_st_helpers[4] = { + helper_stb_mmu, + helper_stw_mmu, + helper_stl_mmu, + helper_stq_mmu, +}; + +#endif /* CONFIG_SOFTMMU */ + +static void tcg_out_qemu_ld(TCGContext *s, const TCGArg *args, int opc) +{ + TCGReg addr_reg, data_reg; +#ifdef CONFIG_SOFTMMU + int mem_index, s_bits; +#endif + data_reg = args[0]; + addr_reg = args[1]; + +#ifdef CONFIG_SOFTMMU + mem_index = args[2]; + s_bits = opc & 3; + + /* TODO: insert TLB lookup here */ + + /* all arguments passed via registers */ + tcg_out_movr(s, 1, TCG_REG_X0, TCG_AREG0); + tcg_out_movr(s, (TARGET_LONG_BITS == 64), TCG_REG_X1, addr_reg); + tcg_out_movi(s, TCG_TYPE_I32, TCG_REG_X2, mem_index); + tcg_out_movi(s, TCG_TYPE_I64, TCG_REG_TMP, + (tcg_target_long)qemu_ld_helpers[s_bits]); + tcg_out_callr(s, TCG_REG_TMP); + + if (opc & 0x04) { /* sign extend */ + unsigned int bits = 8 * (1 << s_bits) - 1; + tcg_out_sbfm(s, 1, data_reg, TCG_REG_X0, 0, bits); /* 7|15|31 */ + } else { + tcg_out_movr(s, 1, data_reg, TCG_REG_X0); + } + +#else /* !CONFIG_SOFTMMU */ + tcg_abort(); /* TODO */ +#endif +} + +static void tcg_out_qemu_st(TCGContext *s, const TCGArg *args, int opc) +{ + TCGReg addr_reg, data_reg; +#ifdef CONFIG_SOFTMMU + int mem_index, s_bits; +#endif + data_reg = args[0]; + addr_reg = args[1]; + +#ifdef CONFIG_SOFTMMU + mem_index = args[2]; + s_bits = opc & 3; + + /* TODO: insert TLB lookup here */ + + /* all arguments passed via registers */ + tcg_out_movr(s, 1, TCG_REG_X0, TCG_AREG0); + tcg_out_movr(s, (TARGET_LONG_BITS == 64), TCG_REG_X1, addr_reg); + tcg_out_movr(s, 1, TCG_REG_X2, data_reg); + tcg_out_movi(s, TCG_TYPE_I32, TCG_REG_X3, mem_index); + tcg_out_movi(s, TCG_TYPE_I64, TCG_REG_TMP, + (tcg_target_long)qemu_st_helpers[s_bits]); + tcg_out_callr(s, TCG_REG_TMP); + +#else /* !CONFIG_SOFTMMU */ + tcg_abort(); /* TODO */ +#endif +} + +static uint8_t *tb_ret_addr; + +/* callee stack use example: + stp x29, x30, [sp,#-32]! + mov x29, sp + stp x1, x2, [sp,#16] + ... + ldp x1, x2, [sp,#16] + ldp x29, x30, [sp],#32 + ret +*/ + +/* push r1 and r2, and alloc stack space for a total of + alloc_n elements (1 element=16 bytes, must be between 1 and 31. */ +static inline void tcg_out_push_pair(TCGContext *s, TCGReg addr, + TCGReg r1, TCGReg r2, int alloc_n) +{ + /* using indexed scaled simm7 STP 0x28800000 | (ext) | 0x01000000 (pre-idx) + | alloc_n * (-1) << 16 | r2 << 10 | addr << 5 | r1 */ + assert(alloc_n > 0 && alloc_n < 0x20); + alloc_n = (-alloc_n) & 0x3f; + tcg_out32(s, 0xa9800000 | alloc_n << 16 | r2 << 10 | addr << 5 | r1); +} + +/* dealloc stack space for a total of alloc_n elements and pop r1, r2. */ +static inline void tcg_out_pop_pair(TCGContext *s, TCGReg addr, + TCGReg r1, TCGReg r2, int alloc_n) +{ + /* using indexed scaled simm7 LDP 0x28c00000 | (ext) | nothing (post-idx) + | alloc_n << 16 | r2 << 10 | addr << 5 | r1 */ + assert(alloc_n > 0 && alloc_n < 0x20); + tcg_out32(s, 0xa8c00000 | alloc_n << 16 | r2 << 10 | addr << 5 | r1); +} + +static inline void tcg_out_store_pair(TCGContext *s, TCGReg addr, + TCGReg r1, TCGReg r2, int idx) +{ + /* using register pair offset simm7 STP 0x29000000 | (ext) + | idx << 16 | r2 << 10 | addr << 5 | r1 */ + assert(idx > 0 && idx < 0x20); + tcg_out32(s, 0xa9000000 | idx << 16 | r2 << 10 | addr << 5 | r1); +} + +static inline void tcg_out_load_pair(TCGContext *s, TCGReg addr, + TCGReg r1, TCGReg r2, int idx) +{ + /* using register pair offset simm7 LDP 0x29400000 | (ext) + | idx << 16 | r2 << 10 | addr << 5 | r1 */ + assert(idx > 0 && idx < 0x20); + tcg_out32(s, 0xa9400000 | idx << 16 | r2 << 10 | addr << 5 | r1); +} + +static void tcg_out_op(TCGContext *s, TCGOpcode opc, + const TCGArg *args, const int *const_args) +{ + /* ext will be set in the switch below, which will fall through to the + common code. It triggers the use of extended regs where appropriate. */ + int ext = 0; + + switch (opc) { + case INDEX_op_exit_tb: + tcg_out_movi(s, TCG_TYPE_I64, TCG_REG_X0, args[0]); + tcg_out_goto(s, (tcg_target_long)tb_ret_addr); + break; + + case INDEX_op_goto_tb: +#ifndef USE_DIRECT_JUMP +#error "USE_DIRECT_JUMP required for aarch64" +#endif + assert(s->tb_jmp_offset != NULL); /* consistency for USE_DIRECT_JUMP */ + s->tb_jmp_offset[args[0]] = s->code_ptr - s->code_buf; + /* actual branch destination will be patched by + aarch64_tb_set_jmp_target later, beware retranslation. */ + tcg_out_goto_noaddr(s); + s->tb_next_offset[args[0]] = s->code_ptr - s->code_buf; + break; + + case INDEX_op_call: + if (const_args[0]) { + tcg_out_call(s, args[0]); + } else { + tcg_out_callr(s, args[0]); + } + break; + + case INDEX_op_br: + tcg_out_goto_label(s, args[0]); + break; + + case INDEX_op_ld_i32: + case INDEX_op_ld_i64: + case INDEX_op_st_i32: + case INDEX_op_st_i64: + case INDEX_op_ld8u_i32: + case INDEX_op_ld8s_i32: + case INDEX_op_ld16u_i32: + case INDEX_op_ld16s_i32: + case INDEX_op_ld8u_i64: + case INDEX_op_ld8s_i64: + case INDEX_op_ld16u_i64: + case INDEX_op_ld16s_i64: + case INDEX_op_ld32u_i64: + case INDEX_op_ld32s_i64: + case INDEX_op_st8_i32: + case INDEX_op_st8_i64: + case INDEX_op_st16_i32: + case INDEX_op_st16_i64: + case INDEX_op_st32_i64: + tcg_out_ldst(s, aarch64_ldst_get_data(opc), aarch64_ldst_get_type(opc), + args[0], args[1], args[2]); + break; + + case INDEX_op_mov_i64: + ext = 1; /* fall through */ + case INDEX_op_mov_i32: + tcg_out_movr(s, ext, args[0], args[1]); + break; + + case INDEX_op_movi_i64: + tcg_out_movi(s, TCG_TYPE_I64, args[0], args[1]); + break; + case INDEX_op_movi_i32: + tcg_out_movi(s, TCG_TYPE_I32, args[0], args[1]); + break; + + case INDEX_op_add_i64: + ext = 1; /* fall through */ + case INDEX_op_add_i32: + tcg_out_arith(s, ARITH_ADD, ext, args[0], args[1], args[2]); + break; + + case INDEX_op_sub_i64: + ext = 1; /* fall through */ + case INDEX_op_sub_i32: + tcg_out_arith(s, ARITH_SUB, ext, args[0], args[1], args[2]); + break; + + case INDEX_op_and_i64: + ext = 1; /* fall through */ + case INDEX_op_and_i32: + tcg_out_arith(s, ARITH_AND, ext, args[0], args[1], args[2]); + break; + + case INDEX_op_or_i64: + ext = 1; /* fall through */ + case INDEX_op_or_i32: + tcg_out_arith(s, ARITH_OR, ext, args[0], args[1], args[2]); + break; + + case INDEX_op_xor_i64: + ext = 1; /* fall through */ + case INDEX_op_xor_i32: + tcg_out_arith(s, ARITH_XOR, ext, args[0], args[1], args[2]); + break; + + case INDEX_op_mul_i64: + ext = 1; /* fall through */ + case INDEX_op_mul_i32: + tcg_out_mul(s, ext, args[0], args[1], args[2]); + break; + + case INDEX_op_shl_i64: + ext = 1; /* fall through */ + case INDEX_op_shl_i32: + if (const_args[2]) { /* LSL / UBFM Wd, Wn, (32 - m) */ + tcg_out_shl(s, ext, args[0], args[1], args[2]); + } else { /* LSL / LSLV */ + tcg_out_shiftrot_reg(s, SRR_SHL, ext, args[0], args[1], args[2]); + } + break; + + case INDEX_op_shr_i64: + ext = 1; /* fall through */ + case INDEX_op_shr_i32: + if (const_args[2]) { /* LSR / UBFM Wd, Wn, m, 31 */ + tcg_out_shr(s, ext, args[0], args[1], args[2]); + } else { /* LSR / LSRV */ + tcg_out_shiftrot_reg(s, SRR_SHR, ext, args[0], args[1], args[2]); + } + break; + + case INDEX_op_sar_i64: + ext = 1; /* fall through */ + case INDEX_op_sar_i32: + if (const_args[2]) { /* ASR / SBFM Wd, Wn, m, 31 */ + tcg_out_sar(s, ext, args[0], args[1], args[2]); + } else { /* ASR / ASRV */ + tcg_out_shiftrot_reg(s, SRR_SAR, ext, args[0], args[1], args[2]); + } + break; + + case INDEX_op_rotr_i64: + ext = 1; /* fall through */ + case INDEX_op_rotr_i32: + if (const_args[2]) { /* ROR / EXTR Wd, Wm, Wm, m */ + tcg_out_rotr(s, ext, args[0], args[1], args[2]); + } else { /* ROR / RORV */ + tcg_out_shiftrot_reg(s, SRR_ROR, ext, args[0], args[1], args[2]); + } + break; + + case INDEX_op_rotl_i64: + ext = 1; /* fall through */ + case INDEX_op_rotl_i32: /* same as rotate right by (32 - m) */ + if (const_args[2]) { /* ROR / EXTR Wd, Wm, Wm, 32 - m */ + tcg_out_rotl(s, ext, args[0], args[1], args[2]); + } else { + tcg_out_arith(s, ARITH_SUB, 0, TCG_REG_TMP, TCG_REG_XZR, args[2]); + tcg_out_shiftrot_reg(s, SRR_ROR, ext, + args[0], args[1], TCG_REG_TMP); + } + break; + + case INDEX_op_brcond_i64: + ext = 1; /* fall through */ + case INDEX_op_brcond_i32: /* CMP 0, 1, cond(2), label 3 */ + tcg_out_cmp(s, ext, args[0], args[1]); + tcg_out_goto_label_cond(s, args[2], args[3]); + break; + + case INDEX_op_setcond_i64: + ext = 1; /* fall through */ + case INDEX_op_setcond_i32: + tcg_out_cmp(s, ext, args[1], args[2]); + tcg_out_cset(s, 0, args[0], args[3]); + break; + + case INDEX_op_qemu_ld8u: + tcg_out_qemu_ld(s, args, 0 | 0); + break; + case INDEX_op_qemu_ld8s: + tcg_out_qemu_ld(s, args, 4 | 0); + break; + case INDEX_op_qemu_ld16u: + tcg_out_qemu_ld(s, args, 0 | 1); + break; + case INDEX_op_qemu_ld16s: + tcg_out_qemu_ld(s, args, 4 | 1); + break; + case INDEX_op_qemu_ld32u: + tcg_out_qemu_ld(s, args, 0 | 2); + break; + case INDEX_op_qemu_ld32s: + tcg_out_qemu_ld(s, args, 4 | 2); + break; + case INDEX_op_qemu_ld32: + tcg_out_qemu_ld(s, args, 0 | 2); + break; + case INDEX_op_qemu_ld64: + tcg_out_qemu_ld(s, args, 0 | 3); + break; + case INDEX_op_qemu_st8: + tcg_out_qemu_st(s, args, 0); + break; + case INDEX_op_qemu_st16: + tcg_out_qemu_st(s, args, 1); + break; + case INDEX_op_qemu_st32: + tcg_out_qemu_st(s, args, 2); + break; + case INDEX_op_qemu_st64: + tcg_out_qemu_st(s, args, 3); + break; + + default: + tcg_abort(); /* opcode not implemented */ + } +} + +static const TCGTargetOpDef aarch64_op_defs[] = { + { INDEX_op_exit_tb, { } }, + { INDEX_op_goto_tb, { } }, + { INDEX_op_call, { "ri" } }, + { INDEX_op_br, { } }, + + { INDEX_op_mov_i32, { "r", "r" } }, + { INDEX_op_mov_i64, { "r", "r" } }, + + { INDEX_op_movi_i32, { "r" } }, + { INDEX_op_movi_i64, { "r" } }, + + { INDEX_op_ld8u_i32, { "r", "r" } }, + { INDEX_op_ld8s_i32, { "r", "r" } }, + { INDEX_op_ld16u_i32, { "r", "r" } }, + { INDEX_op_ld16s_i32, { "r", "r" } }, + { INDEX_op_ld_i32, { "r", "r" } }, + { INDEX_op_ld8u_i64, { "r", "r" } }, + { INDEX_op_ld8s_i64, { "r", "r" } }, + { INDEX_op_ld16u_i64, { "r", "r" } }, + { INDEX_op_ld16s_i64, { "r", "r" } }, + { INDEX_op_ld32u_i64, { "r", "r" } }, + { INDEX_op_ld32s_i64, { "r", "r" } }, + { INDEX_op_ld_i64, { "r", "r" } }, + + { INDEX_op_st8_i32, { "r", "r" } }, + { INDEX_op_st16_i32, { "r", "r" } }, + { INDEX_op_st_i32, { "r", "r" } }, + { INDEX_op_st8_i64, { "r", "r" } }, + { INDEX_op_st16_i64, { "r", "r" } }, + { INDEX_op_st32_i64, { "r", "r" } }, + { INDEX_op_st_i64, { "r", "r" } }, + + { INDEX_op_add_i32, { "r", "r", "r" } }, + { INDEX_op_add_i64, { "r", "r", "r" } }, + { INDEX_op_sub_i32, { "r", "r", "r" } }, + { INDEX_op_sub_i64, { "r", "r", "r" } }, + { INDEX_op_mul_i32, { "r", "r", "r" } }, + { INDEX_op_mul_i64, { "r", "r", "r" } }, + { INDEX_op_and_i32, { "r", "r", "r" } }, + { INDEX_op_and_i64, { "r", "r", "r" } }, + { INDEX_op_or_i32, { "r", "r", "r" } }, + { INDEX_op_or_i64, { "r", "r", "r" } }, + { INDEX_op_xor_i32, { "r", "r", "r" } }, + { INDEX_op_xor_i64, { "r", "r", "r" } }, + + { INDEX_op_shl_i32, { "r", "r", "ri" } }, + { INDEX_op_shr_i32, { "r", "r", "ri" } }, + { INDEX_op_sar_i32, { "r", "r", "ri" } }, + { INDEX_op_rotl_i32, { "r", "r", "ri" } }, + { INDEX_op_rotr_i32, { "r", "r", "ri" } }, + { INDEX_op_shl_i64, { "r", "r", "ri" } }, + { INDEX_op_shr_i64, { "r", "r", "ri" } }, + { INDEX_op_sar_i64, { "r", "r", "ri" } }, + { INDEX_op_rotl_i64, { "r", "r", "ri" } }, + { INDEX_op_rotr_i64, { "r", "r", "ri" } }, + + { INDEX_op_brcond_i32, { "r", "r" } }, + { INDEX_op_setcond_i32, { "r", "r", "r" } }, + { INDEX_op_brcond_i64, { "r", "r" } }, + { INDEX_op_setcond_i64, { "r", "r", "r" } }, + + { INDEX_op_qemu_ld8u, { "r", "l" } }, + { INDEX_op_qemu_ld8s, { "r", "l" } }, + { INDEX_op_qemu_ld16u, { "r", "l" } }, + { INDEX_op_qemu_ld16s, { "r", "l" } }, + { INDEX_op_qemu_ld32u, { "r", "l" } }, + { INDEX_op_qemu_ld32s, { "r", "l" } }, + + { INDEX_op_qemu_ld32, { "r", "l" } }, + { INDEX_op_qemu_ld64, { "r", "l" } }, + + { INDEX_op_qemu_st8, { "l", "l" } }, + { INDEX_op_qemu_st16, { "l", "l" } }, + { INDEX_op_qemu_st32, { "l", "l" } }, + { INDEX_op_qemu_st64, { "l", "l" } }, + { -1 }, +}; + +static void tcg_target_init(TCGContext *s) +{ +#if !defined(CONFIG_USER_ONLY) + /* fail safe */ + if ((1ULL << CPU_TLB_ENTRY_BITS) != sizeof(CPUTLBEntry)) { + tcg_abort(); + } +#endif + tcg_regset_set32(tcg_target_available_regs[TCG_TYPE_I32], 0, 0xffffffff); + tcg_regset_set32(tcg_target_available_regs[TCG_TYPE_I64], 0, 0xffffffff); + + tcg_regset_set32(tcg_target_call_clobber_regs, 0, + (1 << TCG_REG_X0) | (1 << TCG_REG_X1) | + (1 << TCG_REG_X2) | (1 << TCG_REG_X3) | + (1 << TCG_REG_X4) | (1 << TCG_REG_X5) | + (1 << TCG_REG_X6) | (1 << TCG_REG_X7) | + (1 << TCG_REG_X8) | (1 << TCG_REG_X9) | + (1 << TCG_REG_X10) | (1 << TCG_REG_X11) | + (1 << TCG_REG_X12) | (1 << TCG_REG_X13) | + (1 << TCG_REG_X14) | (1 << TCG_REG_X15) | + (1 << TCG_REG_X16) | (1 << TCG_REG_X17) | + (1 << TCG_REG_X18)); + + tcg_regset_clear(s->reserved_regs); + tcg_regset_set_reg(s->reserved_regs, TCG_REG_SP); + tcg_regset_set_reg(s->reserved_regs, TCG_REG_FP); + tcg_regset_set_reg(s->reserved_regs, TCG_REG_TMP); + tcg_regset_set_reg(s->reserved_regs, TCG_REG_X18); /* platform register */ + + tcg_add_target_add_op_defs(aarch64_op_defs); +} + +static inline void tcg_out_addi(TCGContext *s, int ext, + TCGReg rd, TCGReg rn, unsigned int aimm) +{ + /* add immediate aimm unsigned 12bit value (we use LSL 0 - no shift) */ + /* using ADD 0x11000000 | (ext) | (aimm << 10) | (rn << 5) | rd */ + unsigned int base = ext ? 0x91000000 : 0x11000000; + assert(aimm <= 0xfff); + tcg_out32(s, base | (aimm << 10) | (rn << 5) | rd); +} + +static inline void tcg_out_subi(TCGContext *s, int ext, + TCGReg rd, TCGReg rn, unsigned int aimm) +{ + /* sub immediate aimm unsigned 12bit value (we use LSL 0 - no shift) */ + /* using SUB 0x51000000 | (ext) | (aimm << 10) | (rn << 5) | rd */ + unsigned int base = ext ? 0xd1000000 : 0x51000000; + assert(aimm <= 0xfff); + tcg_out32(s, base | (aimm << 10) | (rn << 5) | rd); +} + +static void tcg_target_qemu_prologue(TCGContext *s) +{ + /* NB: frame sizes are in 16 byte stack units! */ + int frame_size_callee_saved, frame_size_tcg_locals; + TCGReg r; + + /* save pairs (FP, LR) and (X19, X20) .. (X27, X28) */ + frame_size_callee_saved = (1) + (TCG_REG_X28 - TCG_REG_X19) / 2 + 1; + + /* frame size requirement for TCG local variables */ + frame_size_tcg_locals = TCG_STATIC_CALL_ARGS_SIZE + + CPU_TEMP_BUF_NLONGS * sizeof(long) + + (TCG_TARGET_STACK_ALIGN - 1); + frame_size_tcg_locals &= ~(TCG_TARGET_STACK_ALIGN - 1); + frame_size_tcg_locals /= TCG_TARGET_STACK_ALIGN; + + /* push (FP, LR) and update sp */ + tcg_out_push_pair(s, TCG_REG_SP, + TCG_REG_FP, TCG_REG_LR, frame_size_callee_saved); + + /* FP -> callee_saved */ + tcg_out_movr_sp(s, 1, TCG_REG_FP, TCG_REG_SP); + + /* store callee-preserved regs x19..x28 using FP -> callee_saved */ + for (r = TCG_REG_X19; r <= TCG_REG_X27; r += 2) { + int idx = (r - TCG_REG_X19) / 2 + 1; + tcg_out_store_pair(s, TCG_REG_FP, r, r + 1, idx); + } + + /* make stack space for TCG locals */ + tcg_out_subi(s, 1, TCG_REG_SP, TCG_REG_SP, + frame_size_tcg_locals * TCG_TARGET_STACK_ALIGN); + /* inform TCG about how to find TCG locals with register, offset, size */ + tcg_set_frame(s, TCG_REG_SP, TCG_STATIC_CALL_ARGS_SIZE, + CPU_TEMP_BUF_NLONGS * sizeof(long)); + + tcg_out_mov(s, TCG_TYPE_PTR, TCG_AREG0, tcg_target_call_iarg_regs[0]); + tcg_out_gotor(s, tcg_target_call_iarg_regs[1]); + + tb_ret_addr = s->code_ptr; + + /* remove TCG locals stack space */ + tcg_out_addi(s, 1, TCG_REG_SP, TCG_REG_SP, + frame_size_tcg_locals * TCG_TARGET_STACK_ALIGN); + + /* restore registers x19..x28. + FP must be preserved, so it still points to callee_saved area */ + for (r = TCG_REG_X19; r <= TCG_REG_X27; r += 2) { + int idx = (r - TCG_REG_X19) / 2 + 1; + tcg_out_load_pair(s, TCG_REG_FP, r, r + 1, idx); + } + + /* pop (FP, LR), restore SP to previous frame, return */ + tcg_out_pop_pair(s, TCG_REG_SP, + TCG_REG_FP, TCG_REG_LR, frame_size_callee_saved); + tcg_out_ret(s); +} diff --git a/tcg/aarch64/tcg-target.h b/tcg/aarch64/tcg-target.h new file mode 100644 index 0000000000..075ab2afde --- /dev/null +++ b/tcg/aarch64/tcg-target.h @@ -0,0 +1,99 @@ +/* + * Initial TCG Implementation for aarch64 + * + * Copyright (c) 2013 Huawei Technologies Duesseldorf GmbH + * Written by Claudio Fontana + * + * This work is licensed under the terms of the GNU GPL, version 2 or + * (at your option) any later version. + * + * See the COPYING file in the top-level directory for details. + */ + +#ifndef TCG_TARGET_AARCH64 +#define TCG_TARGET_AARCH64 1 + +#undef TCG_TARGET_WORDS_BIGENDIAN +#undef TCG_TARGET_STACK_GROWSUP + +typedef enum { + TCG_REG_X0, TCG_REG_X1, TCG_REG_X2, TCG_REG_X3, TCG_REG_X4, + TCG_REG_X5, TCG_REG_X6, TCG_REG_X7, TCG_REG_X8, TCG_REG_X9, + TCG_REG_X10, TCG_REG_X11, TCG_REG_X12, TCG_REG_X13, TCG_REG_X14, + TCG_REG_X15, TCG_REG_X16, TCG_REG_X17, TCG_REG_X18, TCG_REG_X19, + TCG_REG_X20, TCG_REG_X21, TCG_REG_X22, TCG_REG_X23, TCG_REG_X24, + TCG_REG_X25, TCG_REG_X26, TCG_REG_X27, TCG_REG_X28, + TCG_REG_FP, /* frame pointer */ + TCG_REG_LR, /* link register */ + TCG_REG_SP, /* stack pointer or zero register */ + TCG_REG_XZR = TCG_REG_SP /* same register number */ + /* program counter is not directly accessible! */ +} TCGReg; + +#define TCG_TARGET_NB_REGS 32 + +/* used for function call generation */ +#define TCG_REG_CALL_STACK TCG_REG_SP +#define TCG_TARGET_STACK_ALIGN 16 +#define TCG_TARGET_CALL_ALIGN_ARGS 1 +#define TCG_TARGET_CALL_STACK_OFFSET 0 + +/* optional instructions */ +#define TCG_TARGET_HAS_div_i32 0 +#define TCG_TARGET_HAS_ext8s_i32 0 +#define TCG_TARGET_HAS_ext16s_i32 0 +#define TCG_TARGET_HAS_ext8u_i32 0 +#define TCG_TARGET_HAS_ext16u_i32 0 +#define TCG_TARGET_HAS_bswap16_i32 0 +#define TCG_TARGET_HAS_bswap32_i32 0 +#define TCG_TARGET_HAS_not_i32 0 +#define TCG_TARGET_HAS_neg_i32 0 +#define TCG_TARGET_HAS_rot_i32 1 +#define TCG_TARGET_HAS_andc_i32 0 +#define TCG_TARGET_HAS_orc_i32 0 +#define TCG_TARGET_HAS_eqv_i32 0 +#define TCG_TARGET_HAS_nand_i32 0 +#define TCG_TARGET_HAS_nor_i32 0 +#define TCG_TARGET_HAS_deposit_i32 0 +#define TCG_TARGET_HAS_movcond_i32 0 +#define TCG_TARGET_HAS_add2_i32 0 +#define TCG_TARGET_HAS_sub2_i32 0 +#define TCG_TARGET_HAS_mulu2_i32 0 +#define TCG_TARGET_HAS_muls2_i32 0 + +#define TCG_TARGET_HAS_div_i64 0 +#define TCG_TARGET_HAS_ext8s_i64 0 +#define TCG_TARGET_HAS_ext16s_i64 0 +#define TCG_TARGET_HAS_ext32s_i64 0 +#define TCG_TARGET_HAS_ext8u_i64 0 +#define TCG_TARGET_HAS_ext16u_i64 0 +#define TCG_TARGET_HAS_ext32u_i64 0 +#define TCG_TARGET_HAS_bswap16_i64 0 +#define TCG_TARGET_HAS_bswap32_i64 0 +#define TCG_TARGET_HAS_bswap64_i64 0 +#define TCG_TARGET_HAS_not_i64 0 +#define TCG_TARGET_HAS_neg_i64 0 +#define TCG_TARGET_HAS_rot_i64 1 +#define TCG_TARGET_HAS_andc_i64 0 +#define TCG_TARGET_HAS_orc_i64 0 +#define TCG_TARGET_HAS_eqv_i64 0 +#define TCG_TARGET_HAS_nand_i64 0 +#define TCG_TARGET_HAS_nor_i64 0 +#define TCG_TARGET_HAS_deposit_i64 0 +#define TCG_TARGET_HAS_movcond_i64 0 +#define TCG_TARGET_HAS_add2_i64 0 +#define TCG_TARGET_HAS_sub2_i64 0 +#define TCG_TARGET_HAS_mulu2_i64 0 +#define TCG_TARGET_HAS_muls2_i64 0 + +enum { + TCG_AREG0 = TCG_REG_X19, +}; + +static inline void flush_icache_range(tcg_target_ulong start, + tcg_target_ulong stop) +{ + __builtin___clear_cache((char *)start, (char *)stop); +} + +#endif /* TCG_TARGET_AARCH64 */ diff --git a/translate-all.c b/translate-all.c index 40b8f3d2b5..df7c697692 100644 --- a/translate-all.c +++ b/translate-all.c @@ -460,6 +460,8 @@ static inline PageDesc *page_find(tb_page_addr_t index) # define MAX_CODE_GEN_BUFFER_SIZE (2ul * 1024 * 1024 * 1024) #elif defined(__sparc__) # define MAX_CODE_GEN_BUFFER_SIZE (2ul * 1024 * 1024 * 1024) +#elif defined(__aarch64__) +# define MAX_CODE_GEN_BUFFER_SIZE (128ul * 1024 * 1024) #elif defined(__arm__) # define MAX_CODE_GEN_BUFFER_SIZE (16u * 1024 * 1024) #elif defined(__s390x__) From 36fac14a6416fe1f8f6f23bfac5f9e662be78d2b Mon Sep 17 00:00:00 2001 From: Claudio Fontana Date: Wed, 12 Jun 2013 16:20:22 +0100 Subject: [PATCH 06/13] tcg/aarch64: improve arith shifted regs operations for arith operations, add SUBS, ANDS, ADDS and add a shift parameter so that all arith instructions can make use of shifted registers. Signed-off-by: Claudio Fontana Reviewed-by: Richard Henderson Message-id: 51AC998B.7070506@huawei.com Signed-off-by: Peter Maydell --- tcg/aarch64/tcg-target.c | 48 +++++++++++++++++++++++++--------------- 1 file changed, 30 insertions(+), 18 deletions(-) diff --git a/tcg/aarch64/tcg-target.c b/tcg/aarch64/tcg-target.c index ff626eb837..2aa9f75a25 100644 --- a/tcg/aarch64/tcg-target.c +++ b/tcg/aarch64/tcg-target.c @@ -186,11 +186,14 @@ enum aarch64_ldst_op_type { /* type of operation */ }; enum aarch64_arith_opc { - ARITH_ADD = 0x0b, - ARITH_SUB = 0x4b, ARITH_AND = 0x0a, + ARITH_ADD = 0x0b, ARITH_OR = 0x2a, - ARITH_XOR = 0x4a + ARITH_ADDS = 0x2b, + ARITH_XOR = 0x4a, + ARITH_SUB = 0x4b, + ARITH_ANDS = 0x6a, + ARITH_SUBS = 0x6b, }; enum aarch64_srr_opc { @@ -394,12 +397,20 @@ static inline void tcg_out_st(TCGContext *s, TCGType type, TCGReg arg, } static inline void tcg_out_arith(TCGContext *s, enum aarch64_arith_opc opc, - int ext, TCGReg rd, TCGReg rn, TCGReg rm) + int ext, TCGReg rd, TCGReg rn, TCGReg rm, + int shift_imm) { /* Using shifted register arithmetic operations */ - /* if extended registry operation (64bit) just OR with 0x80 << 24 */ - unsigned int base = ext ? (0x80 | opc) << 24 : opc << 24; - tcg_out32(s, base | rm << 16 | rn << 5 | rd); + /* if extended register operation (64bit) just OR with 0x80 << 24 */ + unsigned int shift, base = ext ? (0x80 | opc) << 24 : opc << 24; + if (shift_imm == 0) { + shift = 0; + } else if (shift_imm > 0) { + shift = shift_imm << 10 | 1 << 22; + } else /* (shift_imm < 0) */ { + shift = (-shift_imm) << 10; + } + tcg_out32(s, base | rm << 16 | shift | rn << 5 | rd); } static inline void tcg_out_mul(TCGContext *s, int ext, @@ -482,11 +493,11 @@ static inline void tcg_out_rotl(TCGContext *s, int ext, tcg_out_extr(s, ext, rd, rn, rn, bits - (m & max)); } -static inline void tcg_out_cmp(TCGContext *s, int ext, TCGReg rn, TCGReg rm) +static inline void tcg_out_cmp(TCGContext *s, int ext, TCGReg rn, TCGReg rm, + int shift_imm) { /* Using CMP alias SUBS wzr, Wn, Wm */ - unsigned int base = ext ? 0xeb00001f : 0x6b00001f; - tcg_out32(s, base | rm << 16 | rn << 5); + tcg_out_arith(s, ARITH_SUBS, ext, TCG_REG_XZR, rn, rm, shift_imm); } static inline void tcg_out_cset(TCGContext *s, int ext, TCGReg rd, TCGCond c) @@ -830,31 +841,31 @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, case INDEX_op_add_i64: ext = 1; /* fall through */ case INDEX_op_add_i32: - tcg_out_arith(s, ARITH_ADD, ext, args[0], args[1], args[2]); + tcg_out_arith(s, ARITH_ADD, ext, args[0], args[1], args[2], 0); break; case INDEX_op_sub_i64: ext = 1; /* fall through */ case INDEX_op_sub_i32: - tcg_out_arith(s, ARITH_SUB, ext, args[0], args[1], args[2]); + tcg_out_arith(s, ARITH_SUB, ext, args[0], args[1], args[2], 0); break; case INDEX_op_and_i64: ext = 1; /* fall through */ case INDEX_op_and_i32: - tcg_out_arith(s, ARITH_AND, ext, args[0], args[1], args[2]); + tcg_out_arith(s, ARITH_AND, ext, args[0], args[1], args[2], 0); break; case INDEX_op_or_i64: ext = 1; /* fall through */ case INDEX_op_or_i32: - tcg_out_arith(s, ARITH_OR, ext, args[0], args[1], args[2]); + tcg_out_arith(s, ARITH_OR, ext, args[0], args[1], args[2], 0); break; case INDEX_op_xor_i64: ext = 1; /* fall through */ case INDEX_op_xor_i32: - tcg_out_arith(s, ARITH_XOR, ext, args[0], args[1], args[2]); + tcg_out_arith(s, ARITH_XOR, ext, args[0], args[1], args[2], 0); break; case INDEX_op_mul_i64: @@ -909,7 +920,8 @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, if (const_args[2]) { /* ROR / EXTR Wd, Wm, Wm, 32 - m */ tcg_out_rotl(s, ext, args[0], args[1], args[2]); } else { - tcg_out_arith(s, ARITH_SUB, 0, TCG_REG_TMP, TCG_REG_XZR, args[2]); + tcg_out_arith(s, ARITH_SUB, 0, + TCG_REG_TMP, TCG_REG_XZR, args[2], 0); tcg_out_shiftrot_reg(s, SRR_ROR, ext, args[0], args[1], TCG_REG_TMP); } @@ -918,14 +930,14 @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, case INDEX_op_brcond_i64: ext = 1; /* fall through */ case INDEX_op_brcond_i32: /* CMP 0, 1, cond(2), label 3 */ - tcg_out_cmp(s, ext, args[0], args[1]); + tcg_out_cmp(s, ext, args[0], args[1], 0); tcg_out_goto_label_cond(s, args[2], args[3]); break; case INDEX_op_setcond_i64: ext = 1; /* fall through */ case INDEX_op_setcond_i32: - tcg_out_cmp(s, ext, args[1], args[2]); + tcg_out_cmp(s, ext, args[1], args[2], 0); tcg_out_cset(s, 0, args[0], args[3]); break; From 7deea126b24508e8ffa7aa4aecfa6fa97eddc384 Mon Sep 17 00:00:00 2001 From: Claudio Fontana Date: Wed, 12 Jun 2013 16:20:22 +0100 Subject: [PATCH 07/13] tcg/aarch64: implement AND/TEST immediate pattern add functions to AND/TEST registers with immediate patterns. Signed-off-by: Claudio Fontana Reviewed-by: Richard Henderson Message-id: 51AC9A0C.3090303@huawei.com Signed-off-by: Peter Maydell --- tcg/aarch64/tcg-target.c | 34 ++++++++++++++++++++++++++++++++++ 1 file changed, 34 insertions(+) diff --git a/tcg/aarch64/tcg-target.c b/tcg/aarch64/tcg-target.c index 2aa9f75a25..bb59794046 100644 --- a/tcg/aarch64/tcg-target.c +++ b/tcg/aarch64/tcg-target.c @@ -580,6 +580,40 @@ static inline void tcg_out_call(TCGContext *s, tcg_target_long target) } } +/* encode a logical immediate, mapping user parameter + M=set bits pattern length to S=M-1 */ +static inline unsigned int +aarch64_limm(unsigned int m, unsigned int r) +{ + assert(m > 0); + return r << 16 | (m - 1) << 10; +} + +/* test a register against an immediate bit pattern made of + M set bits rotated right by R. + Examples: + to test a 32/64 reg against 0x00000007, pass M = 3, R = 0. + to test a 32/64 reg against 0x000000ff, pass M = 8, R = 0. + to test a 32bit reg against 0xff000000, pass M = 8, R = 8. + to test a 32bit reg against 0xff0000ff, pass M = 16, R = 8. + */ +static inline void tcg_out_tst(TCGContext *s, int ext, TCGReg rn, + unsigned int m, unsigned int r) +{ + /* using TST alias of ANDS XZR, Xn,#bimm64 0x7200001f */ + unsigned int base = ext ? 0xf240001f : 0x7200001f; + tcg_out32(s, base | aarch64_limm(m, r) | rn << 5); +} + +/* and a register with a bit pattern, similarly to TST, no flags change */ +static inline void tcg_out_andi(TCGContext *s, int ext, TCGReg rd, TCGReg rn, + unsigned int m, unsigned int r) +{ + /* using AND 0x12000000 */ + unsigned int base = ext ? 0x92400000 : 0x12000000; + tcg_out32(s, base | aarch64_limm(m, r) | rn << 5 | rd); +} + static inline void tcg_out_ret(TCGContext *s) { /* emit RET { LR } */ From 9c4a059df3501ba53c00724287ef50bba80b5f80 Mon Sep 17 00:00:00 2001 From: Claudio Fontana Date: Wed, 12 Jun 2013 16:20:23 +0100 Subject: [PATCH 08/13] tcg/aarch64: implement byte swap operations implement the optional byte swap operations with the dedicated aarch64 instructions. Signed-off-by: Claudio Fontana Reviewed-by: Richard Henderson Message-id: 51AC9A33.9050003@huawei.com Signed-off-by: Peter Maydell --- tcg/aarch64/tcg-target.c | 32 ++++++++++++++++++++++++++++++++ tcg/aarch64/tcg-target.h | 10 +++++----- 2 files changed, 37 insertions(+), 5 deletions(-) diff --git a/tcg/aarch64/tcg-target.c b/tcg/aarch64/tcg-target.c index bb59794046..ba65a62516 100644 --- a/tcg/aarch64/tcg-target.c +++ b/tcg/aarch64/tcg-target.c @@ -660,6 +660,20 @@ static inline void tcg_out_goto_label_cond(TCGContext *s, } } +static inline void tcg_out_rev(TCGContext *s, int ext, TCGReg rd, TCGReg rm) +{ + /* using REV 0x5ac00800 */ + unsigned int base = ext ? 0xdac00c00 : 0x5ac00800; + tcg_out32(s, base | rm << 5 | rd); +} + +static inline void tcg_out_rev16(TCGContext *s, int ext, TCGReg rd, TCGReg rm) +{ + /* using REV16 0x5ac00400 */ + unsigned int base = ext ? 0xdac00400 : 0x5ac00400; + tcg_out32(s, base | rm << 5 | rd); +} + #ifdef CONFIG_SOFTMMU #include "exec/softmmu_defs.h" @@ -1012,6 +1026,17 @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, tcg_out_qemu_st(s, args, 3); break; + case INDEX_op_bswap64_i64: + ext = 1; /* fall through */ + case INDEX_op_bswap32_i64: + case INDEX_op_bswap32_i32: + tcg_out_rev(s, ext, args[0], args[1]); + break; + case INDEX_op_bswap16_i64: + case INDEX_op_bswap16_i32: + tcg_out_rev16(s, 0, args[0], args[1]); + break; + default: tcg_abort(); /* opcode not implemented */ } @@ -1093,6 +1118,13 @@ static const TCGTargetOpDef aarch64_op_defs[] = { { INDEX_op_qemu_st16, { "l", "l" } }, { INDEX_op_qemu_st32, { "l", "l" } }, { INDEX_op_qemu_st64, { "l", "l" } }, + + { INDEX_op_bswap16_i32, { "r", "r" } }, + { INDEX_op_bswap32_i32, { "r", "r" } }, + { INDEX_op_bswap16_i64, { "r", "r" } }, + { INDEX_op_bswap32_i64, { "r", "r" } }, + { INDEX_op_bswap64_i64, { "r", "r" } }, + { -1 }, }; diff --git a/tcg/aarch64/tcg-target.h b/tcg/aarch64/tcg-target.h index 075ab2afde..247ef4385d 100644 --- a/tcg/aarch64/tcg-target.h +++ b/tcg/aarch64/tcg-target.h @@ -44,8 +44,8 @@ typedef enum { #define TCG_TARGET_HAS_ext16s_i32 0 #define TCG_TARGET_HAS_ext8u_i32 0 #define TCG_TARGET_HAS_ext16u_i32 0 -#define TCG_TARGET_HAS_bswap16_i32 0 -#define TCG_TARGET_HAS_bswap32_i32 0 +#define TCG_TARGET_HAS_bswap16_i32 1 +#define TCG_TARGET_HAS_bswap32_i32 1 #define TCG_TARGET_HAS_not_i32 0 #define TCG_TARGET_HAS_neg_i32 0 #define TCG_TARGET_HAS_rot_i32 1 @@ -68,9 +68,9 @@ typedef enum { #define TCG_TARGET_HAS_ext8u_i64 0 #define TCG_TARGET_HAS_ext16u_i64 0 #define TCG_TARGET_HAS_ext32u_i64 0 -#define TCG_TARGET_HAS_bswap16_i64 0 -#define TCG_TARGET_HAS_bswap32_i64 0 -#define TCG_TARGET_HAS_bswap64_i64 0 +#define TCG_TARGET_HAS_bswap16_i64 1 +#define TCG_TARGET_HAS_bswap32_i64 1 +#define TCG_TARGET_HAS_bswap64_i64 1 #define TCG_TARGET_HAS_not_i64 0 #define TCG_TARGET_HAS_neg_i64 0 #define TCG_TARGET_HAS_rot_i64 1 From 31f1275b90f4803ee5a2900020f21d3320ce62b7 Mon Sep 17 00:00:00 2001 From: Claudio Fontana Date: Wed, 12 Jun 2013 16:20:23 +0100 Subject: [PATCH 09/13] tcg/aarch64: implement sign/zero extend operations implement the optional sign/zero extend operations with the dedicated aarch64 instructions. Signed-off-by: Claudio Fontana Reviewed-by: Richard Henderson Message-id: 51AC9A58.40502@huawei.com Signed-off-by: Peter Maydell --- tcg/aarch64/tcg-target.c | 58 ++++++++++++++++++++++++++++++++++++++-- tcg/aarch64/tcg-target.h | 20 +++++++------- 2 files changed, 66 insertions(+), 12 deletions(-) diff --git a/tcg/aarch64/tcg-target.c b/tcg/aarch64/tcg-target.c index ba65a62516..934109efc5 100644 --- a/tcg/aarch64/tcg-target.c +++ b/tcg/aarch64/tcg-target.c @@ -674,6 +674,24 @@ static inline void tcg_out_rev16(TCGContext *s, int ext, TCGReg rd, TCGReg rm) tcg_out32(s, base | rm << 5 | rd); } +static inline void tcg_out_sxt(TCGContext *s, int ext, int s_bits, + TCGReg rd, TCGReg rn) +{ + /* using ALIASes SXTB 0x13001c00, SXTH 0x13003c00, SXTW 0x93407c00 + of SBFM Xd, Xn, #0, #7|15|31 */ + int bits = 8 * (1 << s_bits) - 1; + tcg_out_sbfm(s, ext, rd, rn, 0, bits); +} + +static inline void tcg_out_uxt(TCGContext *s, int s_bits, + TCGReg rd, TCGReg rn) +{ + /* using ALIASes UXTB 0x53001c00, UXTH 0x53003c00 + of UBFM Wd, Wn, #0, #7|15 */ + int bits = 8 * (1 << s_bits) - 1; + tcg_out_ubfm(s, 0, rd, rn, 0, bits); +} + #ifdef CONFIG_SOFTMMU #include "exec/softmmu_defs.h" @@ -721,8 +739,7 @@ static void tcg_out_qemu_ld(TCGContext *s, const TCGArg *args, int opc) tcg_out_callr(s, TCG_REG_TMP); if (opc & 0x04) { /* sign extend */ - unsigned int bits = 8 * (1 << s_bits) - 1; - tcg_out_sbfm(s, 1, data_reg, TCG_REG_X0, 0, bits); /* 7|15|31 */ + tcg_out_sxt(s, 1, s_bits, data_reg, TCG_REG_X0); } else { tcg_out_movr(s, 1, data_reg, TCG_REG_X0); } @@ -1037,6 +1054,31 @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, tcg_out_rev16(s, 0, args[0], args[1]); break; + case INDEX_op_ext8s_i64: + ext = 1; /* fall through */ + case INDEX_op_ext8s_i32: + tcg_out_sxt(s, ext, 0, args[0], args[1]); + break; + case INDEX_op_ext16s_i64: + ext = 1; /* fall through */ + case INDEX_op_ext16s_i32: + tcg_out_sxt(s, ext, 1, args[0], args[1]); + break; + case INDEX_op_ext32s_i64: + tcg_out_sxt(s, 1, 2, args[0], args[1]); + break; + case INDEX_op_ext8u_i64: + case INDEX_op_ext8u_i32: + tcg_out_uxt(s, 0, args[0], args[1]); + break; + case INDEX_op_ext16u_i64: + case INDEX_op_ext16u_i32: + tcg_out_uxt(s, 1, args[0], args[1]); + break; + case INDEX_op_ext32u_i64: + tcg_out_movr(s, 0, args[0], args[1]); + break; + default: tcg_abort(); /* opcode not implemented */ } @@ -1125,6 +1167,18 @@ static const TCGTargetOpDef aarch64_op_defs[] = { { INDEX_op_bswap32_i64, { "r", "r" } }, { INDEX_op_bswap64_i64, { "r", "r" } }, + { INDEX_op_ext8s_i32, { "r", "r" } }, + { INDEX_op_ext16s_i32, { "r", "r" } }, + { INDEX_op_ext8u_i32, { "r", "r" } }, + { INDEX_op_ext16u_i32, { "r", "r" } }, + + { INDEX_op_ext8s_i64, { "r", "r" } }, + { INDEX_op_ext16s_i64, { "r", "r" } }, + { INDEX_op_ext32s_i64, { "r", "r" } }, + { INDEX_op_ext8u_i64, { "r", "r" } }, + { INDEX_op_ext16u_i64, { "r", "r" } }, + { INDEX_op_ext32u_i64, { "r", "r" } }, + { -1 }, }; diff --git a/tcg/aarch64/tcg-target.h b/tcg/aarch64/tcg-target.h index 247ef4385d..97e4a5baa7 100644 --- a/tcg/aarch64/tcg-target.h +++ b/tcg/aarch64/tcg-target.h @@ -40,10 +40,10 @@ typedef enum { /* optional instructions */ #define TCG_TARGET_HAS_div_i32 0 -#define TCG_TARGET_HAS_ext8s_i32 0 -#define TCG_TARGET_HAS_ext16s_i32 0 -#define TCG_TARGET_HAS_ext8u_i32 0 -#define TCG_TARGET_HAS_ext16u_i32 0 +#define TCG_TARGET_HAS_ext8s_i32 1 +#define TCG_TARGET_HAS_ext16s_i32 1 +#define TCG_TARGET_HAS_ext8u_i32 1 +#define TCG_TARGET_HAS_ext16u_i32 1 #define TCG_TARGET_HAS_bswap16_i32 1 #define TCG_TARGET_HAS_bswap32_i32 1 #define TCG_TARGET_HAS_not_i32 0 @@ -62,12 +62,12 @@ typedef enum { #define TCG_TARGET_HAS_muls2_i32 0 #define TCG_TARGET_HAS_div_i64 0 -#define TCG_TARGET_HAS_ext8s_i64 0 -#define TCG_TARGET_HAS_ext16s_i64 0 -#define TCG_TARGET_HAS_ext32s_i64 0 -#define TCG_TARGET_HAS_ext8u_i64 0 -#define TCG_TARGET_HAS_ext16u_i64 0 -#define TCG_TARGET_HAS_ext32u_i64 0 +#define TCG_TARGET_HAS_ext8s_i64 1 +#define TCG_TARGET_HAS_ext16s_i64 1 +#define TCG_TARGET_HAS_ext32s_i64 1 +#define TCG_TARGET_HAS_ext8u_i64 1 +#define TCG_TARGET_HAS_ext16u_i64 1 +#define TCG_TARGET_HAS_ext32u_i64 1 #define TCG_TARGET_HAS_bswap16_i64 1 #define TCG_TARGET_HAS_bswap32_i64 1 #define TCG_TARGET_HAS_bswap64_i64 1 From f129061c6abfaee2133fcb55c384ec5f99028f62 Mon Sep 17 00:00:00 2001 From: Claudio Fontana Date: Wed, 12 Jun 2013 16:20:23 +0100 Subject: [PATCH 10/13] user-exec.c: aarch64 initial implementation of cpu_signal_handler Signed-off-by: Claudio Fontana Reviewed-by: Richard Henderson Reviewed-by: Peter Maydell Message-id: 51AF4028.5030504@huawei.com Signed-off-by: Peter Maydell --- user-exec.c | 15 +++++++++++++++ 1 file changed, 15 insertions(+) diff --git a/user-exec.c b/user-exec.c index 71bd6c531c..fa7f1f1de9 100644 --- a/user-exec.c +++ b/user-exec.c @@ -448,6 +448,21 @@ int cpu_signal_handler(int host_signum, void *pinfo, &uc->uc_sigmask, puc); } +#elif defined(__aarch64__) + +int cpu_signal_handler(int host_signum, void *pinfo, + void *puc) +{ + siginfo_t *info = pinfo; + struct ucontext *uc = puc; + uint64_t pc; + int is_write = 0; /* XXX how to determine? */ + + pc = uc->uc_mcontext.pc; + return handle_cpu_signal(pc, (uint64_t)info->si_addr, + is_write, &uc->uc_sigmask, puc); +} + #elif defined(__mc68000) int cpu_signal_handler(int host_signum, void *pinfo, From 6a91c7c978d77461cc2ed056a2869b90bebded3e Mon Sep 17 00:00:00 2001 From: Jani Kokkonen Date: Wed, 12 Jun 2013 16:20:23 +0100 Subject: [PATCH 11/13] tcg/aarch64: implement user mode qemu ld/st also put aarch64 in the list of archs that do not need an ldscript. Signed-off-by: Jani Kokkoken Signed-off-by: Claudio Fontana Reviewed-by: Richard Henderson Reviewed-by: Peter Maydell Message-id: 51AF40EE.1000104@huawei.com Signed-off-by: Peter Maydell --- configure | 2 +- tcg/aarch64/tcg-target.c | 121 +++++++++++++++++++++++++++++++++++++-- 2 files changed, 117 insertions(+), 6 deletions(-) diff --git a/configure b/configure index bb413be26c..cb42269d45 100755 --- a/configure +++ b/configure @@ -4424,7 +4424,7 @@ fi if test "$target_linux_user" = "yes" -o "$target_bsd_user" = "yes" ; then case "$ARCH" in - alpha | s390x) + alpha | s390x | aarch64) # The default placement of the application is fine. ;; *) diff --git a/tcg/aarch64/tcg-target.c b/tcg/aarch64/tcg-target.c index 934109efc5..562a549dab 100644 --- a/tcg/aarch64/tcg-target.c +++ b/tcg/aarch64/tcg-target.c @@ -24,10 +24,16 @@ static const char * const tcg_target_reg_names[TCG_TARGET_NB_REGS] = { }; #endif /* NDEBUG */ +#ifdef TARGET_WORDS_BIGENDIAN + #define TCG_LDST_BSWAP 1 +#else + #define TCG_LDST_BSWAP 0 +#endif + static const int tcg_target_reg_alloc_order[] = { TCG_REG_X20, TCG_REG_X21, TCG_REG_X22, TCG_REG_X23, TCG_REG_X24, TCG_REG_X25, TCG_REG_X26, TCG_REG_X27, - TCG_REG_X28, + TCG_REG_X28, /* we will reserve this for GUEST_BASE if configured */ TCG_REG_X9, TCG_REG_X10, TCG_REG_X11, TCG_REG_X12, TCG_REG_X13, TCG_REG_X14, TCG_REG_X15, @@ -51,6 +57,14 @@ static const int tcg_target_call_oarg_regs[1] = { #define TCG_REG_TMP TCG_REG_X8 +#ifndef CONFIG_SOFTMMU +# if defined(CONFIG_USE_GUEST_BASE) +# define TCG_REG_GUEST_BASE TCG_REG_X28 +# else +# define TCG_REG_GUEST_BASE TCG_REG_XZR +# endif +#endif + static inline void reloc_pc26(void *code_ptr, tcg_target_long target) { tcg_target_long offset; uint32_t insn; @@ -713,6 +727,94 @@ static const void * const qemu_st_helpers[4] = { helper_stq_mmu, }; +#else /* !CONFIG_SOFTMMU */ + +static void tcg_out_qemu_ld_direct(TCGContext *s, int opc, TCGReg data_r, + TCGReg addr_r, TCGReg off_r) +{ + switch (opc) { + case 0: + tcg_out_ldst_r(s, LDST_8, LDST_LD, data_r, addr_r, off_r); + break; + case 0 | 4: + tcg_out_ldst_r(s, LDST_8, LDST_LD_S_X, data_r, addr_r, off_r); + break; + case 1: + tcg_out_ldst_r(s, LDST_16, LDST_LD, data_r, addr_r, off_r); + if (TCG_LDST_BSWAP) { + tcg_out_rev16(s, 0, data_r, data_r); + } + break; + case 1 | 4: + if (TCG_LDST_BSWAP) { + tcg_out_ldst_r(s, LDST_16, LDST_LD, data_r, addr_r, off_r); + tcg_out_rev16(s, 0, data_r, data_r); + tcg_out_sxt(s, 1, 1, data_r, data_r); + } else { + tcg_out_ldst_r(s, LDST_16, LDST_LD_S_X, data_r, addr_r, off_r); + } + break; + case 2: + tcg_out_ldst_r(s, LDST_32, LDST_LD, data_r, addr_r, off_r); + if (TCG_LDST_BSWAP) { + tcg_out_rev(s, 0, data_r, data_r); + } + break; + case 2 | 4: + if (TCG_LDST_BSWAP) { + tcg_out_ldst_r(s, LDST_32, LDST_LD, data_r, addr_r, off_r); + tcg_out_rev(s, 0, data_r, data_r); + tcg_out_sxt(s, 1, 2, data_r, data_r); + } else { + tcg_out_ldst_r(s, LDST_32, LDST_LD_S_X, data_r, addr_r, off_r); + } + break; + case 3: + tcg_out_ldst_r(s, LDST_64, LDST_LD, data_r, addr_r, off_r); + if (TCG_LDST_BSWAP) { + tcg_out_rev(s, 1, data_r, data_r); + } + break; + default: + tcg_abort(); + } +} + +static void tcg_out_qemu_st_direct(TCGContext *s, int opc, TCGReg data_r, + TCGReg addr_r, TCGReg off_r) +{ + switch (opc) { + case 0: + tcg_out_ldst_r(s, LDST_8, LDST_ST, data_r, addr_r, off_r); + break; + case 1: + if (TCG_LDST_BSWAP) { + tcg_out_rev16(s, 0, TCG_REG_TMP, data_r); + tcg_out_ldst_r(s, LDST_16, LDST_ST, TCG_REG_TMP, addr_r, off_r); + } else { + tcg_out_ldst_r(s, LDST_16, LDST_ST, data_r, addr_r, off_r); + } + break; + case 2: + if (TCG_LDST_BSWAP) { + tcg_out_rev(s, 0, TCG_REG_TMP, data_r); + tcg_out_ldst_r(s, LDST_32, LDST_ST, TCG_REG_TMP, addr_r, off_r); + } else { + tcg_out_ldst_r(s, LDST_32, LDST_ST, data_r, addr_r, off_r); + } + break; + case 3: + if (TCG_LDST_BSWAP) { + tcg_out_rev(s, 1, TCG_REG_TMP, data_r); + tcg_out_ldst_r(s, LDST_64, LDST_ST, TCG_REG_TMP, addr_r, off_r); + } else { + tcg_out_ldst_r(s, LDST_64, LDST_ST, data_r, addr_r, off_r); + } + break; + default: + tcg_abort(); + } +} #endif /* CONFIG_SOFTMMU */ static void tcg_out_qemu_ld(TCGContext *s, const TCGArg *args, int opc) @@ -745,8 +847,9 @@ static void tcg_out_qemu_ld(TCGContext *s, const TCGArg *args, int opc) } #else /* !CONFIG_SOFTMMU */ - tcg_abort(); /* TODO */ -#endif + tcg_out_qemu_ld_direct(s, opc, data_reg, addr_reg, + GUEST_BASE ? TCG_REG_GUEST_BASE : TCG_REG_XZR); +#endif /* CONFIG_SOFTMMU */ } static void tcg_out_qemu_st(TCGContext *s, const TCGArg *args, int opc) @@ -774,8 +877,9 @@ static void tcg_out_qemu_st(TCGContext *s, const TCGArg *args, int opc) tcg_out_callr(s, TCG_REG_TMP); #else /* !CONFIG_SOFTMMU */ - tcg_abort(); /* TODO */ -#endif + tcg_out_qemu_st_direct(s, opc, data_reg, addr_reg, + GUEST_BASE ? TCG_REG_GUEST_BASE : TCG_REG_XZR); +#endif /* CONFIG_SOFTMMU */ } static uint8_t *tb_ret_addr; @@ -1270,6 +1374,13 @@ static void tcg_target_qemu_prologue(TCGContext *s) tcg_set_frame(s, TCG_REG_SP, TCG_STATIC_CALL_ARGS_SIZE, CPU_TEMP_BUF_NLONGS * sizeof(long)); +#if defined(CONFIG_USE_GUEST_BASE) + if (GUEST_BASE) { + tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_GUEST_BASE, GUEST_BASE); + tcg_regset_set_reg(s->reserved_regs, TCG_REG_GUEST_BASE); + } +#endif + tcg_out_mov(s, TCG_TYPE_PTR, TCG_AREG0, tcg_target_call_iarg_regs[0]); tcg_out_gotor(s, tcg_target_call_iarg_regs[1]); From 1f0803137df68c1fc02ebd0c5ec2e7aad54bbf3b Mon Sep 17 00:00:00 2001 From: Claudio Fontana Date: Wed, 12 Jun 2013 16:20:23 +0100 Subject: [PATCH 12/13] configure: permit compilation on arm aarch64 support compiling on aarch64. Reviewed-by: Peter Maydell Signed-off-by: Claudio Fontana Reviewed-by: Richard Henderson Message-id: 51A5C5ED.90103@huawei.com Signed-off-by: Peter Maydell --- configure | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/configure b/configure index cb42269d45..1bab634c43 100755 --- a/configure +++ b/configure @@ -386,6 +386,8 @@ elif check_define __s390__ ; then fi elif check_define __arm__ ; then cpu="arm" +elif check_define __aarch64__ ; then + cpu="aarch64" elif check_define __hppa__ ; then cpu="hppa" else @@ -408,6 +410,9 @@ case "$cpu" in armv*b|armv*l|arm) cpu="arm" ;; + aarch64) + cpu="aarch64" + ;; hppa|parisc|parisc64) cpu="hppa" ;; @@ -4060,6 +4065,9 @@ if test "$linux" = "yes" ; then s390x) linux_arch=s390 ;; + aarch64) + linux_arch=arm64 + ;; *) # For most CPUs the kernel architecture name and QEMU CPU name match. linux_arch="$cpu" From b25a464c6bddbe5d25b7552ba1fec1835269ac84 Mon Sep 17 00:00:00 2001 From: Claudio Fontana Date: Wed, 12 Jun 2013 16:20:24 +0100 Subject: [PATCH 13/13] MAINTAINERS: add tcg/aarch64 maintainer Signed-off-by: Claudio Fontana Message-id: 51ACA0B2.80800@huawei.com Signed-off-by: Peter Maydell --- MAINTAINERS | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/MAINTAINERS b/MAINTAINERS index 13c0cc590c..3412b07c59 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -767,6 +767,12 @@ M: qemu-devel@nongnu.org S: Maintained F: tcg/ +AArch64 target +M: Claudio Fontana +M: Claudio Fontana +S: Maintained +F: tcg/aarch64/ + ARM target M: Andrzej Zaborowski S: Maintained