linux/drivers/gpu/drm/via/via_verifier.c
<<
>>
Prefs
   1/*
   2 * Copyright 2004 The Unichrome Project. All Rights Reserved.
   3 * Copyright 2005 Thomas Hellstrom. All Rights Reserved.
   4 *
   5 * Permission is hereby granted, free of charge, to any person obtaining a
   6 * copy of this software and associated documentation files (the "Software"),
   7 * to deal in the Software without restriction, including without limitation
   8 * the rights to use, copy, modify, merge, publish, distribute, sub license,
   9 * and/or sell copies of the Software, and to permit persons to whom the
  10 * Software is furnished to do so, subject to the following conditions:
  11 *
  12 * The above copyright notice and this permission notice (including the
  13 * next paragraph) shall be included in all copies or substantial portions
  14 * of the Software.
  15 *
  16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  18 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
  19 * THE AUTHOR(S), AND/OR THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
  20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
  21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
  22 * DEALINGS IN THE SOFTWARE.
  23 *
  24 * Author: Thomas Hellstrom 2004, 2005.
  25 * This code was written using docs obtained under NDA from VIA Inc.
  26 *
  27 * Don't run this code directly on an AGP buffer. Due to cache problems it will
  28 * be very slow.
  29 */
  30
  31#include "via_3d_reg.h"
  32#include "drmP.h"
  33#include "drm.h"
  34#include "via_drm.h"
  35#include "via_verifier.h"
  36#include "via_drv.h"
  37
  38typedef enum {
  39        state_command,
  40        state_header2,
  41        state_header1,
  42        state_vheader5,
  43        state_vheader6,
  44        state_error
  45} verifier_state_t;
  46
  47typedef enum {
  48        no_check = 0,
  49        check_for_header2,
  50        check_for_header1,
  51        check_for_header2_err,
  52        check_for_header1_err,
  53        check_for_fire,
  54        check_z_buffer_addr0,
  55        check_z_buffer_addr1,
  56        check_z_buffer_addr_mode,
  57        check_destination_addr0,
  58        check_destination_addr1,
  59        check_destination_addr_mode,
  60        check_for_dummy,
  61        check_for_dd,
  62        check_texture_addr0,
  63        check_texture_addr1,
  64        check_texture_addr2,
  65        check_texture_addr3,
  66        check_texture_addr4,
  67        check_texture_addr5,
  68        check_texture_addr6,
  69        check_texture_addr7,
  70        check_texture_addr8,
  71        check_texture_addr_mode,
  72        check_for_vertex_count,
  73        check_number_texunits,
  74        forbidden_command
  75} hazard_t;
  76
  77/*
  78 * Associates each hazard above with a possible multi-command
  79 * sequence. For example an address that is split over multiple
  80 * commands and that needs to be checked at the first command
  81 * that does not include any part of the address.
  82 */
  83
  84static drm_via_sequence_t seqs[] = {
  85        no_sequence,
  86        no_sequence,
  87        no_sequence,
  88        no_sequence,
  89        no_sequence,
  90        no_sequence,
  91        z_address,
  92        z_address,
  93        z_address,
  94        dest_address,
  95        dest_address,
  96        dest_address,
  97        no_sequence,
  98        no_sequence,
  99        tex_address,
 100        tex_address,
 101        tex_address,
 102        tex_address,
 103        tex_address,
 104        tex_address,
 105        tex_address,
 106        tex_address,
 107        tex_address,
 108        tex_address,
 109        no_sequence
 110};
 111
 112typedef struct {
 113        unsigned int code;
 114        hazard_t hz;
 115} hz_init_t;
 116
 117static hz_init_t init_table1[] = {
 118        {0xf2, check_for_header2_err},
 119        {0xf0, check_for_header1_err},
 120        {0xee, check_for_fire},
 121        {0xcc, check_for_dummy},
 122        {0xdd, check_for_dd},
 123        {0x00, no_check},
 124        {0x10, check_z_buffer_addr0},
 125        {0x11, check_z_buffer_addr1},
 126        {0x12, check_z_buffer_addr_mode},
 127        {0x13, no_check},
 128        {0x14, no_check},
 129        {0x15, no_check},
 130        {0x23, no_check},
 131        {0x24, no_check},
 132        {0x33, no_check},
 133        {0x34, no_check},
 134        {0x35, no_check},
 135        {0x36, no_check},
 136        {0x37, no_check},
 137        {0x38, no_check},
 138        {0x39, no_check},
 139        {0x3A, no_check},
 140        {0x3B, no_check},
 141        {0x3C, no_check},
 142        {0x3D, no_check},
 143        {0x3E, no_check},
 144        {0x40, check_destination_addr0},
 145        {0x41, check_destination_addr1},
 146        {0x42, check_destination_addr_mode},
 147        {0x43, no_check},
 148        {0x44, no_check},
 149        {0x50, no_check},
 150        {0x51, no_check},
 151        {0x52, no_check},
 152        {0x53, no_check},
 153        {0x54, no_check},
 154        {0x55, no_check},
 155        {0x56, no_check},
 156        {0x57, no_check},
 157        {0x58, no_check},
 158        {0x70, no_check},
 159        {0x71, no_check},
 160        {0x78, no_check},
 161        {0x79, no_check},
 162        {0x7A, no_check},
 163        {0x7B, no_check},
 164        {0x7C, no_check},
 165        {0x7D, check_for_vertex_count}
 166};
 167
 168static hz_init_t init_table2[] = {
 169        {0xf2, check_for_header2_err},
 170        {0xf0, check_for_header1_err},
 171        {0xee, check_for_fire},
 172        {0xcc, check_for_dummy},
 173        {0x00, check_texture_addr0},
 174        {0x01, check_texture_addr0},
 175        {0x02, check_texture_addr0},
 176        {0x03, check_texture_addr0},
 177        {0x04, check_texture_addr0},
 178        {0x05, check_texture_addr0},
 179        {0x06, check_texture_addr0},
 180        {0x07, check_texture_addr0},
 181        {0x08, check_texture_addr0},
 182        {0x09, check_texture_addr0},
 183        {0x20, check_texture_addr1},
 184        {0x21, check_texture_addr1},
 185        {0x22, check_texture_addr1},
 186        {0x23, check_texture_addr4},
 187        {0x2B, check_texture_addr3},
 188        {0x2C, check_texture_addr3},
 189        {0x2D, check_texture_addr3},
 190        {0x2E, check_texture_addr3},
 191        {0x2F, check_texture_addr3},
 192        {0x30, check_texture_addr3},
 193        {0x31, check_texture_addr3},
 194        {0x32, check_texture_addr3},
 195        {0x33, check_texture_addr3},
 196        {0x34, check_texture_addr3},
 197        {0x4B, check_texture_addr5},
 198        {0x4C, check_texture_addr6},
 199        {0x51, check_texture_addr7},
 200        {0x52, check_texture_addr8},
 201        {0x77, check_texture_addr2},
 202        {0x78, no_check},
 203        {0x79, no_check},
 204        {0x7A, no_check},
 205        {0x7B, check_texture_addr_mode},
 206        {0x7C, no_check},
 207        {0x7D, no_check},
 208        {0x7E, no_check},
 209        {0x7F, no_check},
 210        {0x80, no_check},
 211        {0x81, no_check},
 212        {0x82, no_check},
 213        {0x83, no_check},
 214        {0x85, no_check},
 215        {0x86, no_check},
 216        {0x87, no_check},
 217        {0x88, no_check},
 218        {0x89, no_check},
 219        {0x8A, no_check},
 220        {0x90, no_check},
 221        {0x91, no_check},
 222        {0x92, no_check},
 223        {0x93, no_check}
 224};
 225
 226static hz_init_t init_table3[] = {
 227        {0xf2, check_for_header2_err},
 228        {0xf0, check_for_header1_err},
 229        {0xcc, check_for_dummy},
 230        {0x00, check_number_texunits}
 231};
 232
 233static hazard_t table1[256];
 234static hazard_t table2[256];
 235static hazard_t table3[256];
 236
 237static __inline__ int
 238eat_words(const uint32_t ** buf, const uint32_t * buf_end, unsigned num_words)
 239{
 240        if ((buf_end - *buf) >= num_words) {
 241                *buf += num_words;
 242                return 0;
 243        }
 244        DRM_ERROR("Illegal termination of DMA command buffer\n");
 245        return 1;
 246}
 247
 248/*
 249 * Partially stolen from drm_memory.h
 250 */
 251
 252static __inline__ drm_local_map_t *via_drm_lookup_agp_map(drm_via_state_t *seq,
 253                                                    unsigned long offset,
 254                                                    unsigned long size,
 255                                                    struct drm_device * dev)
 256{
 257        struct drm_map_list *r_list;
 258        drm_local_map_t *map = seq->map_cache;
 259
 260        if (map && map->offset <= offset
 261            && (offset + size) <= (map->offset + map->size)) {
 262                return map;
 263        }
 264
 265        list_for_each_entry(r_list, &dev->maplist, head) {
 266                map = r_list->map;
 267                if (!map)
 268                        continue;
 269                if (map->offset <= offset
 270                    && (offset + size) <= (map->offset + map->size)
 271                    && !(map->flags & _DRM_RESTRICTED)
 272                    && (map->type == _DRM_AGP)) {
 273                        seq->map_cache = map;
 274                        return map;
 275                }
 276        }
 277        return NULL;
 278}
 279
 280/*
 281 * Require that all AGP texture levels reside in the same AGP map which should
 282 * be mappable by the client. This is not a big restriction.
 283 * FIXME: To actually enforce this security policy strictly, drm_rmmap
 284 * would have to wait for dma quiescent before removing an AGP map.
 285 * The via_drm_lookup_agp_map call in reality seems to take
 286 * very little CPU time.
 287 */
 288
 289static __inline__ int finish_current_sequence(drm_via_state_t * cur_seq)
 290{
 291        switch (cur_seq->unfinished) {
 292        case z_address:
 293                DRM_DEBUG("Z Buffer start address is 0x%x\n", cur_seq->z_addr);
 294                break;
 295        case dest_address:
 296                DRM_DEBUG("Destination start address is 0x%x\n",
 297                          cur_seq->d_addr);
 298                break;
 299        case tex_address:
 300                if (cur_seq->agp_texture) {
 301                        unsigned start =
 302                            cur_seq->tex_level_lo[cur_seq->texture];
 303                        unsigned end = cur_seq->tex_level_hi[cur_seq->texture];
 304                        unsigned long lo = ~0, hi = 0, tmp;
 305                        uint32_t *addr, *pitch, *height, tex;
 306                        unsigned i;
 307                        int npot;
 308
 309                        if (end > 9)
 310                                end = 9;
 311                        if (start > 9)
 312                                start = 9;
 313
 314                        addr =
 315                            &(cur_seq->t_addr[tex = cur_seq->texture][start]);
 316                        pitch = &(cur_seq->pitch[tex][start]);
 317                        height = &(cur_seq->height[tex][start]);
 318                        npot = cur_seq->tex_npot[tex];
 319                        for (i = start; i <= end; ++i) {
 320                                tmp = *addr++;
 321                                if (tmp < lo)
 322                                        lo = tmp;
 323                                if (i == 0 && npot)
 324                                        tmp += (*height++ * *pitch++);
 325                                else
 326                                        tmp += (*height++ << *pitch++);
 327                                if (tmp > hi)
 328                                        hi = tmp;
 329                        }
 330
 331                        if (!via_drm_lookup_agp_map
 332                            (cur_seq, lo, hi - lo, cur_seq->dev)) {
 333                                DRM_ERROR
 334                                    ("AGP texture is not in allowed map\n");
 335                                return 2;
 336                        }
 337                }
 338                break;
 339        default:
 340                break;
 341        }
 342        cur_seq->unfinished = no_sequence;
 343        return 0;
 344}
 345
 346static __inline__ int
 347investigate_hazard(uint32_t cmd, hazard_t hz, drm_via_state_t * cur_seq)
 348{
 349        register uint32_t tmp, *tmp_addr;
 350
 351        if (cur_seq->unfinished && (cur_seq->unfinished != seqs[hz])) {
 352                int ret;
 353                if ((ret = finish_current_sequence(cur_seq)))
 354                        return ret;
 355        }
 356
 357        switch (hz) {
 358        case check_for_header2:
 359                if (cmd == HALCYON_HEADER2)
 360                        return 1;
 361                return 0;
 362        case check_for_header1:
 363                if ((cmd & HALCYON_HEADER1MASK) == HALCYON_HEADER1)
 364                        return 1;
 365                return 0;
 366        case check_for_header2_err:
 367                if (cmd == HALCYON_HEADER2)
 368                        return 1;
 369                DRM_ERROR("Illegal DMA HALCYON_HEADER2 command\n");
 370                break;
 371        case check_for_header1_err:
 372                if ((cmd & HALCYON_HEADER1MASK) == HALCYON_HEADER1)
 373                        return 1;
 374                DRM_ERROR("Illegal DMA HALCYON_HEADER1 command\n");
 375                break;
 376        case check_for_fire:
 377                if ((cmd & HALCYON_FIREMASK) == HALCYON_FIRECMD)
 378                        return 1;
 379                DRM_ERROR("Illegal DMA HALCYON_FIRECMD command\n");
 380                break;
 381        case check_for_dummy:
 382                if (HC_DUMMY == cmd)
 383                        return 0;
 384                DRM_ERROR("Illegal DMA HC_DUMMY command\n");
 385                break;
 386        case check_for_dd:
 387                if (0xdddddddd == cmd)
 388                        return 0;
 389                DRM_ERROR("Illegal DMA 0xdddddddd command\n");
 390                break;
 391        case check_z_buffer_addr0:
 392                cur_seq->unfinished = z_address;
 393                cur_seq->z_addr = (cur_seq->z_addr & 0xFF000000) |
 394                    (cmd & 0x00FFFFFF);
 395                return 0;
 396        case check_z_buffer_addr1:
 397                cur_seq->unfinished = z_address;
 398                cur_seq->z_addr = (cur_seq->z_addr & 0x00FFFFFF) |
 399                    ((cmd & 0xFF) << 24);
 400                return 0;
 401        case check_z_buffer_addr_mode:
 402                cur_seq->unfinished = z_address;
 403                if ((cmd & 0x0000C000) == 0)
 404                        return 0;
 405                DRM_ERROR("Attempt to place Z buffer in system memory\n");
 406                return 2;
 407        case check_destination_addr0:
 408                cur_seq->unfinished = dest_address;
 409                cur_seq->d_addr = (cur_seq->d_addr & 0xFF000000) |
 410                    (cmd & 0x00FFFFFF);
 411                return 0;
 412        case check_destination_addr1:
 413                cur_seq->unfinished = dest_address;
 414                cur_seq->d_addr = (cur_seq->d_addr & 0x00FFFFFF) |
 415                    ((cmd & 0xFF) << 24);
 416                return 0;
 417        case check_destination_addr_mode:
 418                cur_seq->unfinished = dest_address;
 419                if ((cmd & 0x0000C000) == 0)
 420                        return 0;
 421                DRM_ERROR
 422                    ("Attempt to place 3D drawing buffer in system memory\n");
 423                return 2;
 424        case check_texture_addr0:
 425                cur_seq->unfinished = tex_address;
 426                tmp = (cmd >> 24);
 427                tmp_addr = &cur_seq->t_addr[cur_seq->texture][tmp];
 428                *tmp_addr = (*tmp_addr & 0xFF000000) | (cmd & 0x00FFFFFF);
 429                return 0;
 430        case check_texture_addr1:
 431                cur_seq->unfinished = tex_address;
 432                tmp = ((cmd >> 24) - 0x20);
 433                tmp += tmp << 1;
 434                tmp_addr = &cur_seq->t_addr[cur_seq->texture][tmp];
 435                *tmp_addr = (*tmp_addr & 0x00FFFFFF) | ((cmd & 0xFF) << 24);
 436                tmp_addr++;
 437                *tmp_addr = (*tmp_addr & 0x00FFFFFF) | ((cmd & 0xFF00) << 16);
 438                tmp_addr++;
 439                *tmp_addr = (*tmp_addr & 0x00FFFFFF) | ((cmd & 0xFF0000) << 8);
 440                return 0;
 441        case check_texture_addr2:
 442                cur_seq->unfinished = tex_address;
 443                cur_seq->tex_level_lo[tmp = cur_seq->texture] = cmd & 0x3F;
 444                cur_seq->tex_level_hi[tmp] = (cmd & 0xFC0) >> 6;
 445                return 0;
 446        case check_texture_addr3:
 447                cur_seq->unfinished = tex_address;
 448                tmp = ((cmd >> 24) - HC_SubA_HTXnL0Pit);
 449                if (tmp == 0 &&
 450                    (cmd & HC_HTXnEnPit_MASK)) {
 451                        cur_seq->pitch[cur_seq->texture][tmp] =
 452                                (cmd & HC_HTXnLnPit_MASK);
 453                        cur_seq->tex_npot[cur_seq->texture] = 1;
 454                } else {
 455                        cur_seq->pitch[cur_seq->texture][tmp] =
 456                                (cmd & HC_HTXnLnPitE_MASK) >> HC_HTXnLnPitE_SHIFT;
 457                        cur_seq->tex_npot[cur_seq->texture] = 0;
 458                        if (cmd & 0x000FFFFF) {
 459                                DRM_ERROR
 460                                        ("Unimplemented texture level 0 pitch mode.\n");
 461                                return 2;
 462                        }
 463                }
 464                return 0;
 465        case check_texture_addr4:
 466                cur_seq->unfinished = tex_address;
 467                tmp_addr = &cur_seq->t_addr[cur_seq->texture][9];
 468                *tmp_addr = (*tmp_addr & 0x00FFFFFF) | ((cmd & 0xFF) << 24);
 469                return 0;
 470        case check_texture_addr5:
 471        case check_texture_addr6:
 472                cur_seq->unfinished = tex_address;
 473                /*
 474                 * Texture width. We don't care since we have the pitch.
 475                 */
 476                return 0;
 477        case check_texture_addr7:
 478                cur_seq->unfinished = tex_address;
 479                tmp_addr = &(cur_seq->height[cur_seq->texture][0]);
 480                tmp_addr[5] = 1 << ((cmd & 0x00F00000) >> 20);
 481                tmp_addr[4] = 1 << ((cmd & 0x000F0000) >> 16);
 482                tmp_addr[3] = 1 << ((cmd & 0x0000F000) >> 12);
 483                tmp_addr[2] = 1 << ((cmd & 0x00000F00) >> 8);
 484                tmp_addr[1] = 1 << ((cmd & 0x000000F0) >> 4);
 485                tmp_addr[0] = 1 << (cmd & 0x0000000F);
 486                return 0;
 487        case check_texture_addr8:
 488                cur_seq->unfinished = tex_address;
 489                tmp_addr = &(cur_seq->height[cur_seq->texture][0]);
 490                tmp_addr[9] = 1 << ((cmd & 0x0000F000) >> 12);
 491                tmp_addr[8] = 1 << ((cmd & 0x00000F00) >> 8);
 492                tmp_addr[7] = 1 << ((cmd & 0x000000F0) >> 4);
 493                tmp_addr[6] = 1 << (cmd & 0x0000000F);
 494                return 0;
 495        case check_texture_addr_mode:
 496                cur_seq->unfinished = tex_address;
 497                if (2 == (tmp = cmd & 0x00000003)) {
 498                        DRM_ERROR
 499                            ("Attempt to fetch texture from system memory.\n");
 500                        return 2;
 501                }
 502                cur_seq->agp_texture = (tmp == 3);
 503                cur_seq->tex_palette_size[cur_seq->texture] =
 504                    (cmd >> 16) & 0x000000007;
 505                return 0;
 506        case check_for_vertex_count:
 507                cur_seq->vertex_count = cmd & 0x0000FFFF;
 508                return 0;
 509        case check_number_texunits:
 510                cur_seq->multitex = (cmd >> 3) & 1;
 511                return 0;
 512        default:
 513                DRM_ERROR("Illegal DMA data: 0x%x\n", cmd);
 514                return 2;
 515        }
 516        return 2;
 517}
 518
 519static __inline__ int
 520via_check_prim_list(uint32_t const **buffer, const uint32_t * buf_end,
 521                    drm_via_state_t * cur_seq)
 522{
 523        drm_via_private_t *dev_priv =
 524            (drm_via_private_t *) cur_seq->dev->dev_private;
 525        uint32_t a_fire, bcmd, dw_count;
 526        int ret = 0;
 527        int have_fire;
 528        const uint32_t *buf = *buffer;
 529
 530        while (buf < buf_end) {
 531                have_fire = 0;
 532                if ((buf_end - buf) < 2) {
 533                        DRM_ERROR
 534                            ("Unexpected termination of primitive list.\n");
 535                        ret = 1;
 536                        break;
 537                }
 538                if ((*buf & HC_ACMD_MASK) != HC_ACMD_HCmdB)
 539                        break;
 540                bcmd = *buf++;
 541                if ((*buf & HC_ACMD_MASK) != HC_ACMD_HCmdA) {
 542                        DRM_ERROR("Expected Vertex List A command, got 0x%x\n",
 543                                  *buf);
 544                        ret = 1;
 545                        break;
 546                }
 547                a_fire =
 548                    *buf++ | HC_HPLEND_MASK | HC_HPMValidN_MASK |
 549                    HC_HE3Fire_MASK;
 550
 551                /*
 552                 * How many dwords per vertex ?
 553                 */
 554
 555                if (cur_seq->agp && ((bcmd & (0xF << 11)) == 0)) {
 556                        DRM_ERROR("Illegal B command vertex data for AGP.\n");
 557                        ret = 1;
 558                        break;
 559                }
 560
 561                dw_count = 0;
 562                if (bcmd & (1 << 7))
 563                        dw_count += (cur_seq->multitex) ? 2 : 1;
 564                if (bcmd & (1 << 8))
 565                        dw_count += (cur_seq->multitex) ? 2 : 1;
 566                if (bcmd & (1 << 9))
 567                        dw_count++;
 568                if (bcmd & (1 << 10))
 569                        dw_count++;
 570                if (bcmd & (1 << 11))
 571                        dw_count++;
 572                if (bcmd & (1 << 12))
 573                        dw_count++;
 574                if (bcmd & (1 << 13))
 575                        dw_count++;
 576                if (bcmd & (1 << 14))
 577                        dw_count++;
 578
 579                while (buf < buf_end) {
 580                        if (*buf == a_fire) {
 581                                if (dev_priv->num_fire_offsets >=
 582                                    VIA_FIRE_BUF_SIZE) {
 583                                        DRM_ERROR("Fire offset buffer full.\n");
 584                                        ret = 1;
 585                                        break;
 586                                }
 587                                dev_priv->fire_offsets[dev_priv->
 588                                                       num_fire_offsets++] =
 589                                    buf;
 590                                have_fire = 1;
 591                                buf++;
 592                                if (buf < buf_end && *buf == a_fire)
 593                                        buf++;
 594                                break;
 595                        }
 596                        if ((*buf == HALCYON_HEADER2) ||
 597                            ((*buf & HALCYON_FIREMASK) == HALCYON_FIRECMD)) {
 598                                DRM_ERROR("Missing Vertex Fire command, "
 599                                          "Stray Vertex Fire command  or verifier "
 600                                          "lost sync.\n");
 601                                ret = 1;
 602                                break;
 603                        }
 604                        if ((ret = eat_words(&buf, buf_end, dw_count)))
 605                                break;
 606                }
 607                if (buf >= buf_end && !have_fire) {
 608                        DRM_ERROR("Missing Vertex Fire command or verifier "
 609                                  "lost sync.\n");
 610                        ret = 1;
 611                        break;
 612                }
 613                if (cur_seq->agp && ((buf - cur_seq->buf_start) & 0x01)) {
 614                        DRM_ERROR("AGP Primitive list end misaligned.\n");
 615                        ret = 1;
 616                        break;
 617                }
 618        }
 619        *buffer = buf;
 620        return ret;
 621}
 622
 623static __inline__ verifier_state_t
 624via_check_header2(uint32_t const **buffer, const uint32_t * buf_end,
 625                  drm_via_state_t * hc_state)
 626{
 627        uint32_t cmd;
 628        int hz_mode;
 629        hazard_t hz;
 630        const uint32_t *buf = *buffer;
 631        const hazard_t *hz_table;
 632
 633        if ((buf_end - buf) < 2) {
 634                DRM_ERROR
 635                    ("Illegal termination of DMA HALCYON_HEADER2 sequence.\n");
 636                return state_error;
 637        }
 638        buf++;
 639        cmd = (*buf++ & 0xFFFF0000) >> 16;
 640
 641        switch (cmd) {
 642        case HC_ParaType_CmdVdata:
 643                if (via_check_prim_list(&buf, buf_end, hc_state))
 644                        return state_error;
 645                *buffer = buf;
 646                return state_command;
 647        case HC_ParaType_NotTex:
 648                hz_table = table1;
 649                break;
 650        case HC_ParaType_Tex:
 651                hc_state->texture = 0;
 652                hz_table = table2;
 653                break;
 654        case (HC_ParaType_Tex | (HC_SubType_Tex1 << 8)):
 655                hc_state->texture = 1;
 656                hz_table = table2;
 657                break;
 658        case (HC_ParaType_Tex | (HC_SubType_TexGeneral << 8)):
 659                hz_table = table3;
 660                break;
 661        case HC_ParaType_Auto:
 662                if (eat_words(&buf, buf_end, 2))
 663                        return state_error;
 664                *buffer = buf;
 665                return state_command;
 666        case (HC_ParaType_Palette | (HC_SubType_Stipple << 8)):
 667                if (eat_words(&buf, buf_end, 32))
 668                        return state_error;
 669                *buffer = buf;
 670                return state_command;
 671        case (HC_ParaType_Palette | (HC_SubType_TexPalette0 << 8)):
 672        case (HC_ParaType_Palette | (HC_SubType_TexPalette1 << 8)):
 673                DRM_ERROR("Texture palettes are rejected because of "
 674                          "lack of info how to determine their size.\n");
 675                return state_error;
 676        case (HC_ParaType_Palette | (HC_SubType_FogTable << 8)):
 677                DRM_ERROR("Fog factor palettes are rejected because of "
 678                          "lack of info how to determine their size.\n");
 679                return state_error;
 680        default:
 681
 682                /*
 683                 * There are some unimplemented HC_ParaTypes here, that
 684                 * need to be implemented if the Mesa driver is extended.
 685                 */
 686
 687                DRM_ERROR("Invalid or unimplemented HALCYON_HEADER2 "
 688                          "DMA subcommand: 0x%x. Previous dword: 0x%x\n",
 689                          cmd, *(buf - 2));
 690                *buffer = buf;
 691                return state_error;
 692        }
 693
 694        while (buf < buf_end) {
 695                cmd = *buf++;
 696                if ((hz = hz_table[cmd >> 24])) {
 697                        if ((hz_mode = investigate_hazard(cmd, hz, hc_state))) {
 698                                if (hz_mode == 1) {
 699                                        buf--;
 700                                        break;
 701                                }
 702                                return state_error;
 703                        }
 704                } else if (hc_state->unfinished &&
 705                           finish_current_sequence(hc_state)) {
 706                        return state_error;
 707                }
 708        }
 709        if (hc_state->unfinished && finish_current_sequence(hc_state)) {
 710                return state_error;
 711        }
 712        *buffer = buf;
 713        return state_command;
 714}
 715
 716static __inline__ verifier_state_t
 717via_parse_header2(drm_via_private_t * dev_priv, uint32_t const **buffer,
 718                  const uint32_t * buf_end, int *fire_count)
 719{
 720        uint32_t cmd;
 721        const uint32_t *buf = *buffer;
 722        const uint32_t *next_fire;
 723        int burst = 0;
 724
 725        next_fire = dev_priv->fire_offsets[*fire_count];
 726        buf++;
 727        cmd = (*buf & 0xFFFF0000) >> 16;
 728        VIA_WRITE(HC_REG_TRANS_SET + HC_REG_BASE, *buf++);
 729        switch (cmd) {
 730        case HC_ParaType_CmdVdata:
 731                while ((buf < buf_end) &&
 732                       (*fire_count < dev_priv->num_fire_offsets) &&
 733                       (*buf & HC_ACMD_MASK) == HC_ACMD_HCmdB) {
 734                        while (buf <= next_fire) {
 735                                VIA_WRITE(HC_REG_TRANS_SPACE + HC_REG_BASE +
 736                                          (burst & 63), *buf++);
 737                                burst += 4;
 738                        }
 739                        if ((buf < buf_end)
 740                            && ((*buf & HALCYON_FIREMASK) == HALCYON_FIRECMD))
 741                                buf++;
 742
 743                        if (++(*fire_count) < dev_priv->num_fire_offsets)
 744                                next_fire = dev_priv->fire_offsets[*fire_count];
 745                }
 746                break;
 747        default:
 748                while (buf < buf_end) {
 749
 750                        if (*buf == HC_HEADER2 ||
 751                            (*buf & HALCYON_HEADER1MASK) == HALCYON_HEADER1 ||
 752                            (*buf & VIA_VIDEOMASK) == VIA_VIDEO_HEADER5 ||
 753                            (*buf & VIA_VIDEOMASK) == VIA_VIDEO_HEADER6)
 754                                break;
 755
 756                        VIA_WRITE(HC_REG_TRANS_SPACE + HC_REG_BASE +
 757                                  (burst & 63), *buf++);
 758                        burst += 4;
 759                }
 760        }
 761        *buffer = buf;
 762        return state_command;
 763}
 764
 765static __inline__ int verify_mmio_address(uint32_t address)
 766{
 767        if ((address > 0x3FF) && (address < 0xC00)) {
 768                DRM_ERROR("Invalid VIDEO DMA command. "
 769                          "Attempt to access 3D- or command burst area.\n");
 770                return 1;
 771        } else if ((address > 0xCFF) && (address < 0x1300)) {
 772                DRM_ERROR("Invalid VIDEO DMA command. "
 773                          "Attempt to access PCI DMA area.\n");
 774                return 1;
 775        } else if (address > 0x13FF) {
 776                DRM_ERROR("Invalid VIDEO DMA command. "
 777                          "Attempt to access VGA registers.\n");
 778                return 1;
 779        }
 780        return 0;
 781}
 782
 783static __inline__ int
 784verify_video_tail(uint32_t const **buffer, const uint32_t * buf_end,
 785                  uint32_t dwords)
 786{
 787        const uint32_t *buf = *buffer;
 788
 789        if (buf_end - buf < dwords) {
 790                DRM_ERROR("Illegal termination of video command.\n");
 791                return 1;
 792        }
 793        while (dwords--) {
 794                if (*buf++) {
 795                        DRM_ERROR("Illegal video command tail.\n");
 796                        return 1;
 797                }
 798        }
 799        *buffer = buf;
 800        return 0;
 801}
 802
 803static __inline__ verifier_state_t
 804via_check_header1(uint32_t const **buffer, const uint32_t * buf_end)
 805{
 806        uint32_t cmd;
 807        const uint32_t *buf = *buffer;
 808        verifier_state_t ret = state_command;
 809
 810        while (buf < buf_end) {
 811                cmd = *buf;
 812                if ((cmd > ((0x3FF >> 2) | HALCYON_HEADER1)) &&
 813                    (cmd < ((0xC00 >> 2) | HALCYON_HEADER1))) {
 814                        if ((cmd & HALCYON_HEADER1MASK) != HALCYON_HEADER1)
 815                                break;
 816                        DRM_ERROR("Invalid HALCYON_HEADER1 command. "
 817                                  "Attempt to access 3D- or command burst area.\n");
 818                        ret = state_error;
 819                        break;
 820                } else if (cmd > ((0xCFF >> 2) | HALCYON_HEADER1)) {
 821                        if ((cmd & HALCYON_HEADER1MASK) != HALCYON_HEADER1)
 822                                break;
 823                        DRM_ERROR("Invalid HALCYON_HEADER1 command. "
 824                                  "Attempt to access VGA registers.\n");
 825                        ret = state_error;
 826                        break;
 827                } else {
 828                        buf += 2;
 829                }
 830        }
 831        *buffer = buf;
 832        return ret;
 833}
 834
 835static __inline__ verifier_state_t
 836via_parse_header1(drm_via_private_t * dev_priv, uint32_t const **buffer,
 837                  const uint32_t * buf_end)
 838{
 839        register uint32_t cmd;
 840        const uint32_t *buf = *buffer;
 841
 842        while (buf < buf_end) {
 843                cmd = *buf;
 844                if ((cmd & HALCYON_HEADER1MASK) != HALCYON_HEADER1)
 845                        break;
 846                VIA_WRITE((cmd & ~HALCYON_HEADER1MASK) << 2, *++buf);
 847                buf++;
 848        }
 849        *buffer = buf;
 850        return state_command;
 851}
 852
 853static __inline__ verifier_state_t
 854via_check_vheader5(uint32_t const **buffer, const uint32_t * buf_end)
 855{
 856        uint32_t data;
 857        const uint32_t *buf = *buffer;
 858
 859        if (buf_end - buf < 4) {
 860                DRM_ERROR("Illegal termination of video header5 command\n");
 861                return state_error;
 862        }
 863
 864        data = *buf++ & ~VIA_VIDEOMASK;
 865        if (verify_mmio_address(data))
 866                return state_error;
 867
 868        data = *buf++;
 869        if (*buf++ != 0x00F50000) {
 870                DRM_ERROR("Illegal header5 header data\n");
 871                return state_error;
 872        }
 873        if (*buf++ != 0x00000000) {
 874                DRM_ERROR("Illegal header5 header data\n");
 875                return state_error;
 876        }
 877        if (eat_words(&buf, buf_end, data))
 878                return state_error;
 879        if ((data & 3) && verify_video_tail(&buf, buf_end, 4 - (data & 3)))
 880                return state_error;
 881        *buffer = buf;
 882        return state_command;
 883
 884}
 885
 886static __inline__ verifier_state_t
 887via_parse_vheader5(drm_via_private_t * dev_priv, uint32_t const **buffer,
 888                   const uint32_t * buf_end)
 889{
 890        uint32_t addr, count, i;
 891        const uint32_t *buf = *buffer;
 892
 893        addr = *buf++ & ~VIA_VIDEOMASK;
 894        i = count = *buf;
 895        buf += 3;
 896        while (i--) {
 897                VIA_WRITE(addr, *buf++);
 898        }
 899        if (count & 3)
 900                buf += 4 - (count & 3);
 901        *buffer = buf;
 902        return state_command;
 903}
 904
 905static __inline__ verifier_state_t
 906via_check_vheader6(uint32_t const **buffer, const uint32_t * buf_end)
 907{
 908        uint32_t data;
 909        const uint32_t *buf = *buffer;
 910        uint32_t i;
 911
 912        if (buf_end - buf < 4) {
 913                DRM_ERROR("Illegal termination of video header6 command\n");
 914                return state_error;
 915        }
 916        buf++;
 917        data = *buf++;
 918        if (*buf++ != 0x00F60000) {
 919                DRM_ERROR("Illegal header6 header data\n");
 920                return state_error;
 921        }
 922        if (*buf++ != 0x00000000) {
 923                DRM_ERROR("Illegal header6 header data\n");
 924                return state_error;
 925        }
 926        if ((buf_end - buf) < (data << 1)) {
 927                DRM_ERROR("Illegal termination of video header6 command\n");
 928                return state_error;
 929        }
 930        for (i = 0; i < data; ++i) {
 931                if (verify_mmio_address(*buf++))
 932                        return state_error;
 933                buf++;
 934        }
 935        data <<= 1;
 936        if ((data & 3) && verify_video_tail(&buf, buf_end, 4 - (data & 3)))
 937                return state_error;
 938        *buffer = buf;
 939        return state_command;
 940}
 941
 942static __inline__ verifier_state_t
 943via_parse_vheader6(drm_via_private_t * dev_priv, uint32_t const **buffer,
 944                   const uint32_t * buf_end)
 945{
 946
 947        uint32_t addr, count, i;
 948        const uint32_t *buf = *buffer;
 949
 950        i = count = *++buf;
 951        buf += 3;
 952        while (i--) {
 953                addr = *buf++;
 954                VIA_WRITE(addr, *buf++);
 955        }
 956        count <<= 1;
 957        if (count & 3)
 958                buf += 4 - (count & 3);
 959        *buffer = buf;
 960        return state_command;
 961}
 962
 963int
 964via_verify_command_stream(const uint32_t * buf, unsigned int size,
 965                          struct drm_device * dev, int agp)
 966{
 967
 968        drm_via_private_t *dev_priv = (drm_via_private_t *) dev->dev_private;
 969        drm_via_state_t *hc_state = &dev_priv->hc_state;
 970        drm_via_state_t saved_state = *hc_state;
 971        uint32_t cmd;
 972        const uint32_t *buf_end = buf + (size >> 2);
 973        verifier_state_t state = state_command;
 974        int cme_video;
 975        int supported_3d;
 976
 977        cme_video = (dev_priv->chipset == VIA_PRO_GROUP_A ||
 978                     dev_priv->chipset == VIA_DX9_0);
 979
 980        supported_3d = dev_priv->chipset != VIA_DX9_0;
 981
 982        hc_state->dev = dev;
 983        hc_state->unfinished = no_sequence;
 984        hc_state->map_cache = NULL;
 985        hc_state->agp = agp;
 986        hc_state->buf_start = buf;
 987        dev_priv->num_fire_offsets = 0;
 988
 989        while (buf < buf_end) {
 990
 991                switch (state) {
 992                case state_header2:
 993                        state = via_check_header2(&buf, buf_end, hc_state);
 994                        break;
 995                case state_header1:
 996                        state = via_check_header1(&buf, buf_end);
 997                        break;
 998                case state_vheader5:
 999                        state = via_check_vheader5(&buf, buf_end);
1000                        break;
1001                case state_vheader6:
1002                        state = via_check_vheader6(&buf, buf_end);
1003                        break;
1004                case state_command:
1005                        if ((HALCYON_HEADER2 == (cmd = *buf)) &&
1006                            supported_3d)
1007                                state = state_header2;
1008                        else if ((cmd & HALCYON_HEADER1MASK) == HALCYON_HEADER1)
1009                                state = state_header1;
1010                        else if (cme_video
1011                                 && (cmd & VIA_VIDEOMASK) == VIA_VIDEO_HEADER5)
1012                                state = state_vheader5;
1013                        else if (cme_video
1014                                 && (cmd & VIA_VIDEOMASK) == VIA_VIDEO_HEADER6)
1015                                state = state_vheader6;
1016                        else if ((cmd == HALCYON_HEADER2) && !supported_3d) {
1017                                DRM_ERROR("Accelerated 3D is not supported on this chipset yet.\n");
1018                                state = state_error;
1019                        } else {
1020                                DRM_ERROR
1021                                    ("Invalid / Unimplemented DMA HEADER command. 0x%x\n",
1022                                     cmd);
1023                                state = state_error;
1024                        }
1025                        break;
1026                case state_error:
1027                default:
1028                        *hc_state = saved_state;
1029                        return -EINVAL;
1030                }
1031        }
1032        if (state == state_error) {
1033                *hc_state = saved_state;
1034                return -EINVAL;
1035        }
1036        return 0;
1037}
1038
1039int
1040via_parse_command_stream(struct drm_device * dev, const uint32_t * buf,
1041                         unsigned int size)
1042{
1043
1044        drm_via_private_t *dev_priv = (drm_via_private_t *) dev->dev_private;
1045        uint32_t cmd;
1046        const uint32_t *buf_end = buf + (size >> 2);
1047        verifier_state_t state = state_command;
1048        int fire_count = 0;
1049
1050        while (buf < buf_end) {
1051
1052                switch (state) {
1053                case state_header2:
1054                        state =
1055                            via_parse_header2(dev_priv, &buf, buf_end,
1056                                              &fire_count);
1057                        break;
1058                case state_header1:
1059                        state = via_parse_header1(dev_priv, &buf, buf_end);
1060                        break;
1061                case state_vheader5:
1062                        state = via_parse_vheader5(dev_priv, &buf, buf_end);
1063                        break;
1064                case state_vheader6:
1065                        state = via_parse_vheader6(dev_priv, &buf, buf_end);
1066                        break;
1067                case state_command:
1068                        if (HALCYON_HEADER2 == (cmd = *buf))
1069                                state = state_header2;
1070                        else if ((cmd & HALCYON_HEADER1MASK) == HALCYON_HEADER1)
1071                                state = state_header1;
1072                        else if ((cmd & VIA_VIDEOMASK) == VIA_VIDEO_HEADER5)
1073                                state = state_vheader5;
1074                        else if ((cmd & VIA_VIDEOMASK) == VIA_VIDEO_HEADER6)
1075                                state = state_vheader6;
1076                        else {
1077                                DRM_ERROR
1078                                    ("Invalid / Unimplemented DMA HEADER command. 0x%x\n",
1079                                     cmd);
1080                                state = state_error;
1081                        }
1082                        break;
1083                case state_error:
1084                default:
1085                        return -EINVAL;
1086                }
1087        }
1088        if (state == state_error) {
1089                return -EINVAL;
1090        }
1091        return 0;
1092}
1093
1094static void
1095setup_hazard_table(hz_init_t init_table[], hazard_t table[], int size)
1096{
1097        int i;
1098
1099        for (i = 0; i < 256; ++i) {
1100                table[i] = forbidden_command;
1101        }
1102
1103        for (i = 0; i < size; ++i) {
1104                table[init_table[i].code] = init_table[i].hz;
1105        }
1106}
1107
1108void via_init_command_verifier(void)
1109{
1110        setup_hazard_table(init_table1, table1,
1111                           sizeof(init_table1) / sizeof(hz_init_t));
1112        setup_hazard_table(init_table2, table2,
1113                           sizeof(init_table2) / sizeof(hz_init_t));
1114        setup_hazard_table(init_table3, table3,
1115                           sizeof(init_table3) / sizeof(hz_init_t));
1116}
1117