qemu/tests/tcg/hexagon/misc.c
<<
>>
Prefs
   1/*
   2 *  Copyright(c) 2019-2021 Qualcomm Innovation Center, Inc. All Rights Reserved.
   3 *
   4 *  This program is free software; you can redistribute it and/or modify
   5 *  it under the terms of the GNU General Public License as published by
   6 *  the Free Software Foundation; either version 2 of the License, or
   7 *  (at your option) any later version.
   8 *
   9 *  This program is distributed in the hope that it will be useful,
  10 *  but WITHOUT ANY WARRANTY; without even the implied warranty of
  11 *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  12 *  GNU General Public License for more details.
  13 *
  14 *  You should have received a copy of the GNU General Public License
  15 *  along with this program; if not, see <http://www.gnu.org/licenses/>.
  16 */
  17
  18#include <stdio.h>
  19#include <string.h>
  20
  21typedef unsigned char uint8_t;
  22typedef unsigned short uint16_t;
  23typedef unsigned int uint32_t;
  24
  25
  26static inline void S4_storerhnew_rr(void *p, int index, uint16_t v)
  27{
  28  asm volatile("{\n\t"
  29               "    r0 = %0\n\n"
  30               "    memh(%1+%2<<#2) = r0.new\n\t"
  31               "}\n"
  32               :: "r"(v), "r"(p), "r"(index)
  33               : "r0", "memory");
  34}
  35
  36static uint32_t data;
  37static inline void *S4_storerbnew_ap(uint8_t v)
  38{
  39  void *ret;
  40  asm volatile("{\n\t"
  41               "    r0 = %1\n\n"
  42               "    memb(%0 = ##data) = r0.new\n\t"
  43               "}\n"
  44               : "=r"(ret)
  45               : "r"(v)
  46               : "r0", "memory");
  47  return ret;
  48}
  49
  50static inline void *S4_storerhnew_ap(uint16_t v)
  51{
  52  void *ret;
  53  asm volatile("{\n\t"
  54               "    r0 = %1\n\n"
  55               "    memh(%0 = ##data) = r0.new\n\t"
  56               "}\n"
  57               : "=r"(ret)
  58               : "r"(v)
  59               : "r0", "memory");
  60  return ret;
  61}
  62
  63static inline void *S4_storerinew_ap(uint32_t v)
  64{
  65  void *ret;
  66  asm volatile("{\n\t"
  67               "    r0 = %1\n\n"
  68               "    memw(%0 = ##data) = r0.new\n\t"
  69               "}\n"
  70               : "=r"(ret)
  71               : "r"(v)
  72               : "r0", "memory");
  73  return ret;
  74}
  75
  76static inline void S4_storeirbt_io(void *p, int pred)
  77{
  78  asm volatile("p0 = cmp.eq(%0, #1)\n\t"
  79               "if (p0) memb(%1+#4)=#27\n\t"
  80               :: "r"(pred), "r"(p)
  81               : "p0", "memory");
  82}
  83
  84static inline void S4_storeirbf_io(void *p, int pred)
  85{
  86  asm volatile("p0 = cmp.eq(%0, #1)\n\t"
  87               "if (!p0) memb(%1+#4)=#27\n\t"
  88               :: "r"(pred), "r"(p)
  89               : "p0", "memory");
  90}
  91
  92static inline void S4_storeirbtnew_io(void *p, int pred)
  93{
  94  asm volatile("{\n\t"
  95               "    p0 = cmp.eq(%0, #1)\n\t"
  96               "    if (p0.new) memb(%1+#4)=#27\n\t"
  97               "}\n\t"
  98               :: "r"(pred), "r"(p)
  99               : "p0", "memory");
 100}
 101
 102static inline void S4_storeirbfnew_io(void *p, int pred)
 103{
 104  asm volatile("{\n\t"
 105               "    p0 = cmp.eq(%0, #1)\n\t"
 106               "    if (!p0.new) memb(%1+#4)=#27\n\t"
 107               "}\n\t"
 108               :: "r"(pred), "r"(p)
 109               : "p0", "memory");
 110}
 111
 112static inline void S4_storeirht_io(void *p, int pred)
 113{
 114  asm volatile("p0 = cmp.eq(%0, #1)\n\t"
 115               "if (p0) memh(%1+#4)=#27\n\t"
 116               :: "r"(pred), "r"(p)
 117               : "p0", "memory");
 118}
 119
 120static inline void S4_storeirhf_io(void *p, int pred)
 121{
 122  asm volatile("p0 = cmp.eq(%0, #1)\n\t"
 123               "if (!p0) memh(%1+#4)=#27\n\t"
 124               :: "r"(pred), "r"(p)
 125               : "p0", "memory");
 126}
 127
 128static inline void S4_storeirhtnew_io(void *p, int pred)
 129{
 130  asm volatile("{\n\t"
 131               "    p0 = cmp.eq(%0, #1)\n\t"
 132               "    if (p0.new) memh(%1+#4)=#27\n\t"
 133               "}\n\t"
 134               :: "r"(pred), "r"(p)
 135               : "p0", "memory");
 136}
 137
 138static inline void S4_storeirhfnew_io(void *p, int pred)
 139{
 140  asm volatile("{\n\t"
 141               "    p0 = cmp.eq(%0, #1)\n\t"
 142               "    if (!p0.new) memh(%1+#4)=#27\n\t"
 143               "}\n\t"
 144               :: "r"(pred), "r"(p)
 145               : "p0", "memory");
 146}
 147
 148static inline void S4_storeirit_io(void *p, int pred)
 149{
 150  asm volatile("p0 = cmp.eq(%0, #1)\n\t"
 151               "if (p0) memw(%1+#4)=#27\n\t"
 152               :: "r"(pred), "r"(p)
 153               : "p0", "memory");
 154}
 155
 156static inline void S4_storeirif_io(void *p, int pred)
 157{
 158  asm volatile("p0 = cmp.eq(%0, #1)\n\t"
 159               "if (!p0) memw(%1+#4)=#27\n\t"
 160               :: "r"(pred), "r"(p)
 161               : "p0", "memory");
 162}
 163
 164static inline void S4_storeiritnew_io(void *p, int pred)
 165{
 166  asm volatile("{\n\t"
 167               "    p0 = cmp.eq(%0, #1)\n\t"
 168               "    if (p0.new) memw(%1+#4)=#27\n\t"
 169               "}\n\t"
 170               :: "r"(pred), "r"(p)
 171               : "p0", "memory");
 172}
 173
 174static inline void S4_storeirifnew_io(void *p, int pred)
 175{
 176  asm volatile("{\n\t"
 177               "    p0 = cmp.eq(%0, #1)\n\t"
 178               "    if (!p0.new) memw(%1+#4)=#27\n\t"
 179               "}\n\t"
 180               :: "r"(pred), "r"(p)
 181               : "p0", "memory");
 182}
 183
 184/*
 185 * Test that compound-compare-jump is executed in 2 parts
 186 * First we have to do all the compares in the packet and
 187 * account for auto-anding.  Then, we can do the predicated
 188 * jump.
 189 */
 190static inline int cmpnd_cmp_jump(void)
 191{
 192    int retval;
 193    asm ("r5 = #7\n\t"
 194         "r6 = #9\n\t"
 195         "{\n\t"
 196         "    p0 = cmp.eq(r5, #7)\n\t"
 197         "    if (p0.new) jump:nt 1f\n\t"
 198         "    p0 = cmp.eq(r6, #7)\n\t"
 199         "}\n\t"
 200         "%0 = #12\n\t"
 201         "jump 2f\n\t"
 202         "1:\n\t"
 203         "%0 = #13\n\t"
 204         "2:\n\t"
 205         : "=r"(retval) :: "r5", "r6", "p0");
 206    return retval;
 207}
 208
 209static inline int test_clrtnew(int arg1, int old_val)
 210{
 211  int ret;
 212  asm volatile("r5 = %2\n\t"
 213               "{\n\t"
 214                   "p0 = cmp.eq(%1, #1)\n\t"
 215                   "if (p0.new) r5=#0\n\t"
 216               "}\n\t"
 217               "%0 = r5\n\t"
 218               : "=r"(ret)
 219               : "r"(arg1), "r"(old_val)
 220               : "p0", "r5");
 221  return ret;
 222}
 223
 224int err;
 225
 226static void check(int val, int expect)
 227{
 228    if (val != expect) {
 229        printf("ERROR: 0x%04x != 0x%04x\n", val, expect);
 230        err++;
 231    }
 232}
 233
 234uint32_t init[10] = { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9 };
 235uint32_t array[10];
 236
 237uint32_t early_exit;
 238
 239/*
 240 * Write this as a function because we can't guarantee the compiler will
 241 * allocate a frame with just the SL2_return_tnew packet.
 242 */
 243static void SL2_return_tnew(int x);
 244asm ("SL2_return_tnew:\n\t"
 245     "   allocframe(#0)\n\t"
 246     "   r1 = #1\n\t"
 247     "   memw(##early_exit) = r1\n\t"
 248     "   {\n\t"
 249     "       p0 = cmp.eq(r0, #1)\n\t"
 250     "       if (p0.new) dealloc_return:nt\n\t"    /* SL2_return_tnew */
 251     "   }\n\t"
 252     "   r1 = #0\n\t"
 253     "   memw(##early_exit) = r1\n\t"
 254     "   dealloc_return\n\t"
 255    );
 256
 257static long long creg_pair(int x, int y)
 258{
 259    long long retval;
 260    asm ("m0 = %1\n\t"
 261         "m1 = %2\n\t"
 262         "%0 = c7:6\n\t"
 263         : "=r"(retval) : "r"(x), "r"(y) : "m0", "m1");
 264    return retval;
 265}
 266
 267int main()
 268{
 269
 270    memcpy(array, init, sizeof(array));
 271    S4_storerhnew_rr(array, 4, 0xffff);
 272    check(array[4], 0xffff);
 273
 274    data = ~0;
 275    check((uint32_t)S4_storerbnew_ap(0x12), (uint32_t)&data);
 276    check(data, 0xffffff12);
 277
 278    data = ~0;
 279    check((uint32_t)S4_storerhnew_ap(0x1234), (uint32_t)&data);
 280    check(data, 0xffff1234);
 281
 282    data = ~0;
 283    check((uint32_t)S4_storerinew_ap(0x12345678), (uint32_t)&data);
 284    check(data, 0x12345678);
 285
 286    /* Byte */
 287    memcpy(array, init, sizeof(array));
 288    S4_storeirbt_io(&array[1], 1);
 289    check(array[2], 27);
 290    S4_storeirbt_io(&array[2], 0);
 291    check(array[3], 3);
 292
 293    memcpy(array, init, sizeof(array));
 294    S4_storeirbf_io(&array[3], 0);
 295    check(array[4], 27);
 296    S4_storeirbf_io(&array[4], 1);
 297    check(array[5], 5);
 298
 299    memcpy(array, init, sizeof(array));
 300    S4_storeirbtnew_io(&array[5], 1);
 301    check(array[6], 27);
 302    S4_storeirbtnew_io(&array[6], 0);
 303    check(array[7], 7);
 304
 305    memcpy(array, init, sizeof(array));
 306    S4_storeirbfnew_io(&array[7], 0);
 307    check(array[8], 27);
 308    S4_storeirbfnew_io(&array[8], 1);
 309    check(array[9], 9);
 310
 311    /* Half word */
 312    memcpy(array, init, sizeof(array));
 313    S4_storeirht_io(&array[1], 1);
 314    check(array[2], 27);
 315    S4_storeirht_io(&array[2], 0);
 316    check(array[3], 3);
 317
 318    memcpy(array, init, sizeof(array));
 319    S4_storeirhf_io(&array[3], 0);
 320    check(array[4], 27);
 321    S4_storeirhf_io(&array[4], 1);
 322    check(array[5], 5);
 323
 324    memcpy(array, init, sizeof(array));
 325    S4_storeirhtnew_io(&array[5], 1);
 326    check(array[6], 27);
 327    S4_storeirhtnew_io(&array[6], 0);
 328    check(array[7], 7);
 329
 330    memcpy(array, init, sizeof(array));
 331    S4_storeirhfnew_io(&array[7], 0);
 332    check(array[8], 27);
 333    S4_storeirhfnew_io(&array[8], 1);
 334    check(array[9], 9);
 335
 336    /* Word */
 337    memcpy(array, init, sizeof(array));
 338    S4_storeirit_io(&array[1], 1);
 339    check(array[2], 27);
 340    S4_storeirit_io(&array[2], 0);
 341    check(array[3], 3);
 342
 343    memcpy(array, init, sizeof(array));
 344    S4_storeirif_io(&array[3], 0);
 345    check(array[4], 27);
 346    S4_storeirif_io(&array[4], 1);
 347    check(array[5], 5);
 348
 349    memcpy(array, init, sizeof(array));
 350    S4_storeiritnew_io(&array[5], 1);
 351    check(array[6], 27);
 352    S4_storeiritnew_io(&array[6], 0);
 353    check(array[7], 7);
 354
 355    memcpy(array, init, sizeof(array));
 356    S4_storeirifnew_io(&array[7], 0);
 357    check(array[8], 27);
 358    S4_storeirifnew_io(&array[8], 1);
 359    check(array[9], 9);
 360
 361    int x = cmpnd_cmp_jump();
 362    check(x, 12);
 363
 364    SL2_return_tnew(0);
 365    check(early_exit, 0);
 366    SL2_return_tnew(1);
 367    check(early_exit, 1);
 368
 369    long long pair = creg_pair(5, 7);
 370    check((int)pair, 5);
 371    check((int)(pair >> 32), 7);
 372
 373    int res = test_clrtnew(1, 7);
 374    check(res, 0);
 375    res = test_clrtnew(2, 7);
 376    check(res, 7);
 377
 378    puts(err ? "FAIL" : "PASS");
 379    return err;
 380}
 381