linux/drivers/usb/mtu3/mtu3_qmu.c
<<
>>
Prefs
   1/*
   2 * mtu3_qmu.c - Queue Management Unit driver for device controller
   3 *
   4 * Copyright (C) 2016 MediaTek Inc.
   5 *
   6 * Author: Chunfeng Yun <chunfeng.yun@mediatek.com>
   7 *
   8 * This software is licensed under the terms of the GNU General Public
   9 * License version 2, as published by the Free Software Foundation, and
  10 * may be copied, distributed, and modified under those terms.
  11 *
  12 * This program is distributed in the hope that it will be useful,
  13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  15 * GNU General Public License for more details.
  16 *
  17 */
  18
  19/*
  20 * Queue Management Unit (QMU) is designed to unload SW effort
  21 * to serve DMA interrupts.
  22 * By preparing General Purpose Descriptor (GPD) and Buffer Descriptor (BD),
  23 * SW links data buffers and triggers QMU to send / receive data to
  24 * host / from device at a time.
  25 * And now only GPD is supported.
  26 *
  27 * For more detailed information, please refer to QMU Programming Guide
  28 */
  29
  30#include <linux/dmapool.h>
  31#include <linux/iopoll.h>
  32
  33#include "mtu3.h"
  34
  35#define QMU_CHECKSUM_LEN        16
  36
  37#define GPD_FLAGS_HWO   BIT(0)
  38#define GPD_FLAGS_BDP   BIT(1)
  39#define GPD_FLAGS_BPS   BIT(2)
  40#define GPD_FLAGS_IOC   BIT(7)
  41
  42#define GPD_EXT_FLAG_ZLP        BIT(5)
  43
  44
  45static struct qmu_gpd *gpd_dma_to_virt(struct mtu3_gpd_ring *ring,
  46                dma_addr_t dma_addr)
  47{
  48        dma_addr_t dma_base = ring->dma;
  49        struct qmu_gpd *gpd_head = ring->start;
  50        u32 offset = (dma_addr - dma_base) / sizeof(*gpd_head);
  51
  52        if (offset >= MAX_GPD_NUM)
  53                return NULL;
  54
  55        return gpd_head + offset;
  56}
  57
  58static dma_addr_t gpd_virt_to_dma(struct mtu3_gpd_ring *ring,
  59                struct qmu_gpd *gpd)
  60{
  61        dma_addr_t dma_base = ring->dma;
  62        struct qmu_gpd *gpd_head = ring->start;
  63        u32 offset;
  64
  65        offset = gpd - gpd_head;
  66        if (offset >= MAX_GPD_NUM)
  67                return 0;
  68
  69        return dma_base + (offset * sizeof(*gpd));
  70}
  71
  72static void gpd_ring_init(struct mtu3_gpd_ring *ring, struct qmu_gpd *gpd)
  73{
  74        ring->start = gpd;
  75        ring->enqueue = gpd;
  76        ring->dequeue = gpd;
  77        ring->end = gpd + MAX_GPD_NUM - 1;
  78}
  79
  80static void reset_gpd_list(struct mtu3_ep *mep)
  81{
  82        struct mtu3_gpd_ring *ring = &mep->gpd_ring;
  83        struct qmu_gpd *gpd = ring->start;
  84
  85        if (gpd) {
  86                gpd->flag &= ~GPD_FLAGS_HWO;
  87                gpd_ring_init(ring, gpd);
  88        }
  89}
  90
  91int mtu3_gpd_ring_alloc(struct mtu3_ep *mep)
  92{
  93        struct qmu_gpd *gpd;
  94        struct mtu3_gpd_ring *ring = &mep->gpd_ring;
  95
  96        /* software own all gpds as default */
  97        gpd = dma_pool_zalloc(mep->mtu->qmu_gpd_pool, GFP_ATOMIC, &ring->dma);
  98        if (gpd == NULL)
  99                return -ENOMEM;
 100
 101        gpd_ring_init(ring, gpd);
 102
 103        return 0;
 104}
 105
 106void mtu3_gpd_ring_free(struct mtu3_ep *mep)
 107{
 108        struct mtu3_gpd_ring *ring = &mep->gpd_ring;
 109
 110        dma_pool_free(mep->mtu->qmu_gpd_pool,
 111                        ring->start, ring->dma);
 112        memset(ring, 0, sizeof(*ring));
 113}
 114
 115/*
 116 * calculate check sum of a gpd or bd
 117 * add "noinline" and "mb" to prevent wrong calculation
 118 */
 119static noinline u8 qmu_calc_checksum(u8 *data)
 120{
 121        u8 chksum = 0;
 122        int i;
 123
 124        data[1] = 0x0;  /* set checksum to 0 */
 125
 126        mb();   /* ensure the gpd/bd is really up-to-date */
 127        for (i = 0; i < QMU_CHECKSUM_LEN; i++)
 128                chksum += data[i];
 129
 130        /* Default: HWO=1, @flag[bit0] */
 131        chksum += 1;
 132
 133        return 0xFF - chksum;
 134}
 135
 136void mtu3_qmu_resume(struct mtu3_ep *mep)
 137{
 138        struct mtu3 *mtu = mep->mtu;
 139        void __iomem *mbase = mtu->mac_base;
 140        int epnum = mep->epnum;
 141        u32 offset;
 142
 143        offset = mep->is_in ? USB_QMU_TQCSR(epnum) : USB_QMU_RQCSR(epnum);
 144
 145        mtu3_writel(mbase, offset, QMU_Q_RESUME);
 146        if (!(mtu3_readl(mbase, offset) & QMU_Q_ACTIVE))
 147                mtu3_writel(mbase, offset, QMU_Q_RESUME);
 148}
 149
 150static struct qmu_gpd *advance_enq_gpd(struct mtu3_gpd_ring *ring)
 151{
 152        if (ring->enqueue < ring->end)
 153                ring->enqueue++;
 154        else
 155                ring->enqueue = ring->start;
 156
 157        return ring->enqueue;
 158}
 159
 160static struct qmu_gpd *advance_deq_gpd(struct mtu3_gpd_ring *ring)
 161{
 162        if (ring->dequeue < ring->end)
 163                ring->dequeue++;
 164        else
 165                ring->dequeue = ring->start;
 166
 167        return ring->dequeue;
 168}
 169
 170/* check if a ring is emtpy */
 171static int gpd_ring_empty(struct mtu3_gpd_ring *ring)
 172{
 173        struct qmu_gpd *enq = ring->enqueue;
 174        struct qmu_gpd *next;
 175
 176        if (ring->enqueue < ring->end)
 177                next = enq + 1;
 178        else
 179                next = ring->start;
 180
 181        /* one gpd is reserved to simplify gpd preparation */
 182        return next == ring->dequeue;
 183}
 184
 185int mtu3_prepare_transfer(struct mtu3_ep *mep)
 186{
 187        return gpd_ring_empty(&mep->gpd_ring);
 188}
 189
 190static int mtu3_prepare_tx_gpd(struct mtu3_ep *mep, struct mtu3_request *mreq)
 191{
 192        struct qmu_gpd *enq;
 193        struct mtu3_gpd_ring *ring = &mep->gpd_ring;
 194        struct qmu_gpd *gpd = ring->enqueue;
 195        struct usb_request *req = &mreq->request;
 196
 197        /* set all fields to zero as default value */
 198        memset(gpd, 0, sizeof(*gpd));
 199
 200        gpd->buffer = cpu_to_le32((u32)req->dma);
 201        gpd->buf_len = cpu_to_le16(req->length);
 202        gpd->flag |= GPD_FLAGS_IOC;
 203
 204        /* get the next GPD */
 205        enq = advance_enq_gpd(ring);
 206        dev_dbg(mep->mtu->dev, "TX-EP%d queue gpd=%p, enq=%p\n",
 207                mep->epnum, gpd, enq);
 208
 209        enq->flag &= ~GPD_FLAGS_HWO;
 210        gpd->next_gpd = cpu_to_le32((u32)gpd_virt_to_dma(ring, enq));
 211
 212        if (req->zero)
 213                gpd->ext_flag |= GPD_EXT_FLAG_ZLP;
 214
 215        gpd->chksum = qmu_calc_checksum((u8 *)gpd);
 216        gpd->flag |= GPD_FLAGS_HWO;
 217
 218        mreq->gpd = gpd;
 219
 220        return 0;
 221}
 222
 223static int mtu3_prepare_rx_gpd(struct mtu3_ep *mep, struct mtu3_request *mreq)
 224{
 225        struct qmu_gpd *enq;
 226        struct mtu3_gpd_ring *ring = &mep->gpd_ring;
 227        struct qmu_gpd *gpd = ring->enqueue;
 228        struct usb_request *req = &mreq->request;
 229
 230        /* set all fields to zero as default value */
 231        memset(gpd, 0, sizeof(*gpd));
 232
 233        gpd->buffer = cpu_to_le32((u32)req->dma);
 234        gpd->data_buf_len = cpu_to_le16(req->length);
 235        gpd->flag |= GPD_FLAGS_IOC;
 236
 237        /* get the next GPD */
 238        enq = advance_enq_gpd(ring);
 239        dev_dbg(mep->mtu->dev, "RX-EP%d queue gpd=%p, enq=%p\n",
 240                mep->epnum, gpd, enq);
 241
 242        enq->flag &= ~GPD_FLAGS_HWO;
 243        gpd->next_gpd = cpu_to_le32((u32)gpd_virt_to_dma(ring, enq));
 244        gpd->chksum = qmu_calc_checksum((u8 *)gpd);
 245        gpd->flag |= GPD_FLAGS_HWO;
 246
 247        mreq->gpd = gpd;
 248
 249        return 0;
 250}
 251
 252void mtu3_insert_gpd(struct mtu3_ep *mep, struct mtu3_request *mreq)
 253{
 254
 255        if (mep->is_in)
 256                mtu3_prepare_tx_gpd(mep, mreq);
 257        else
 258                mtu3_prepare_rx_gpd(mep, mreq);
 259}
 260
 261int mtu3_qmu_start(struct mtu3_ep *mep)
 262{
 263        struct mtu3 *mtu = mep->mtu;
 264        void __iomem *mbase = mtu->mac_base;
 265        struct mtu3_gpd_ring *ring = &mep->gpd_ring;
 266        u8 epnum = mep->epnum;
 267
 268        if (mep->is_in) {
 269                /* set QMU start address */
 270                mtu3_writel(mbase, USB_QMU_TQSAR(mep->epnum), ring->dma);
 271                mtu3_setbits(mbase, MU3D_EP_TXCR0(mep->epnum), TX_DMAREQEN);
 272                mtu3_setbits(mbase, U3D_QCR0, QMU_TX_CS_EN(epnum));
 273                /* send zero length packet according to ZLP flag in GPD */
 274                mtu3_setbits(mbase, U3D_QCR1, QMU_TX_ZLP(epnum));
 275                mtu3_writel(mbase, U3D_TQERRIESR0,
 276                                QMU_TX_LEN_ERR(epnum) | QMU_TX_CS_ERR(epnum));
 277
 278                if (mtu3_readl(mbase, USB_QMU_TQCSR(epnum)) & QMU_Q_ACTIVE) {
 279                        dev_warn(mtu->dev, "Tx %d Active Now!\n", epnum);
 280                        return 0;
 281                }
 282                mtu3_writel(mbase, USB_QMU_TQCSR(epnum), QMU_Q_START);
 283
 284        } else {
 285                mtu3_writel(mbase, USB_QMU_RQSAR(mep->epnum), ring->dma);
 286                mtu3_setbits(mbase, MU3D_EP_RXCR0(mep->epnum), RX_DMAREQEN);
 287                mtu3_setbits(mbase, U3D_QCR0, QMU_RX_CS_EN(epnum));
 288                /* don't expect ZLP */
 289                mtu3_clrbits(mbase, U3D_QCR3, QMU_RX_ZLP(epnum));
 290                /* move to next GPD when receive ZLP */
 291                mtu3_setbits(mbase, U3D_QCR3, QMU_RX_COZ(epnum));
 292                mtu3_writel(mbase, U3D_RQERRIESR0,
 293                                QMU_RX_LEN_ERR(epnum) | QMU_RX_CS_ERR(epnum));
 294                mtu3_writel(mbase, U3D_RQERRIESR1, QMU_RX_ZLP_ERR(epnum));
 295
 296                if (mtu3_readl(mbase, USB_QMU_RQCSR(epnum)) & QMU_Q_ACTIVE) {
 297                        dev_warn(mtu->dev, "Rx %d Active Now!\n", epnum);
 298                        return 0;
 299                }
 300                mtu3_writel(mbase, USB_QMU_RQCSR(epnum), QMU_Q_START);
 301        }
 302
 303        return 0;
 304}
 305
 306/* may called in atomic context */
 307void mtu3_qmu_stop(struct mtu3_ep *mep)
 308{
 309        struct mtu3 *mtu = mep->mtu;
 310        void __iomem *mbase = mtu->mac_base;
 311        int epnum = mep->epnum;
 312        u32 value = 0;
 313        u32 qcsr;
 314        int ret;
 315
 316        qcsr = mep->is_in ? USB_QMU_TQCSR(epnum) : USB_QMU_RQCSR(epnum);
 317
 318        if (!(mtu3_readl(mbase, qcsr) & QMU_Q_ACTIVE)) {
 319                dev_dbg(mtu->dev, "%s's qmu is inactive now!\n", mep->name);
 320                return;
 321        }
 322        mtu3_writel(mbase, qcsr, QMU_Q_STOP);
 323
 324        ret = readl_poll_timeout_atomic(mbase + qcsr, value,
 325                        !(value & QMU_Q_ACTIVE), 1, 1000);
 326        if (ret) {
 327                dev_err(mtu->dev, "stop %s's qmu failed\n", mep->name);
 328                return;
 329        }
 330
 331        dev_dbg(mtu->dev, "%s's qmu stop now!\n", mep->name);
 332}
 333
 334void mtu3_qmu_flush(struct mtu3_ep *mep)
 335{
 336
 337        dev_dbg(mep->mtu->dev, "%s flush QMU %s\n", __func__,
 338                ((mep->is_in) ? "TX" : "RX"));
 339
 340        /*Stop QMU */
 341        mtu3_qmu_stop(mep);
 342        reset_gpd_list(mep);
 343}
 344
 345/*
 346 * QMU can't transfer zero length packet directly (a hardware limit
 347 * on old SoCs), so when needs to send ZLP, we intentionally trigger
 348 * a length error interrupt, and in the ISR sends a ZLP by BMU.
 349 */
 350static void qmu_tx_zlp_error_handler(struct mtu3 *mtu, u8 epnum)
 351{
 352        struct mtu3_ep *mep = mtu->in_eps + epnum;
 353        struct mtu3_gpd_ring *ring = &mep->gpd_ring;
 354        void __iomem *mbase = mtu->mac_base;
 355        struct qmu_gpd *gpd_current = NULL;
 356        dma_addr_t gpd_dma = mtu3_readl(mbase, USB_QMU_TQCPR(epnum));
 357        struct usb_request *req = NULL;
 358        struct mtu3_request *mreq;
 359        u32 txcsr = 0;
 360        int ret;
 361
 362        mreq = next_request(mep);
 363        if (mreq && mreq->request.length == 0)
 364                req = &mreq->request;
 365        else
 366                return;
 367
 368        gpd_current = gpd_dma_to_virt(ring, gpd_dma);
 369
 370        if (le16_to_cpu(gpd_current->buf_len) != 0) {
 371                dev_err(mtu->dev, "TX EP%d buffer length error(!=0)\n", epnum);
 372                return;
 373        }
 374
 375        dev_dbg(mtu->dev, "%s send ZLP for req=%p\n", __func__, mreq);
 376
 377        mtu3_clrbits(mbase, MU3D_EP_TXCR0(mep->epnum), TX_DMAREQEN);
 378
 379        ret = readl_poll_timeout_atomic(mbase + MU3D_EP_TXCR0(mep->epnum),
 380                        txcsr, !(txcsr & TX_FIFOFULL), 1, 1000);
 381        if (ret) {
 382                dev_err(mtu->dev, "%s wait for fifo empty fail\n", __func__);
 383                return;
 384        }
 385        mtu3_setbits(mbase, MU3D_EP_TXCR0(mep->epnum), TX_TXPKTRDY);
 386
 387        /* by pass the current GDP */
 388        gpd_current->flag |= GPD_FLAGS_BPS;
 389        gpd_current->chksum = qmu_calc_checksum((u8 *)gpd_current);
 390        gpd_current->flag |= GPD_FLAGS_HWO;
 391
 392        /*enable DMAREQEN, switch back to QMU mode */
 393        mtu3_setbits(mbase, MU3D_EP_TXCR0(mep->epnum), TX_DMAREQEN);
 394        mtu3_qmu_resume(mep);
 395}
 396
 397/*
 398 * NOTE: request list maybe is already empty as following case:
 399 * queue_tx --> qmu_interrupt(clear interrupt pending, schedule tasklet)-->
 400 * queue_tx --> process_tasklet(meanwhile, the second one is transferred,
 401 * tasklet process both of them)-->qmu_interrupt for second one.
 402 * To avoid upper case, put qmu_done_tx in ISR directly to process it.
 403 */
 404static void qmu_done_tx(struct mtu3 *mtu, u8 epnum)
 405{
 406        struct mtu3_ep *mep = mtu->in_eps + epnum;
 407        struct mtu3_gpd_ring *ring = &mep->gpd_ring;
 408        void __iomem *mbase = mtu->mac_base;
 409        struct qmu_gpd *gpd = ring->dequeue;
 410        struct qmu_gpd *gpd_current = NULL;
 411        dma_addr_t gpd_dma = mtu3_readl(mbase, USB_QMU_TQCPR(epnum));
 412        struct usb_request *request = NULL;
 413        struct mtu3_request *mreq;
 414
 415        /*transfer phy address got from QMU register to virtual address */
 416        gpd_current = gpd_dma_to_virt(ring, gpd_dma);
 417
 418        dev_dbg(mtu->dev, "%s EP%d, last=%p, current=%p, enq=%p\n",
 419                __func__, epnum, gpd, gpd_current, ring->enqueue);
 420
 421        while (gpd != gpd_current && !(gpd->flag & GPD_FLAGS_HWO)) {
 422
 423                mreq = next_request(mep);
 424
 425                if (mreq == NULL || mreq->gpd != gpd) {
 426                        dev_err(mtu->dev, "no correct TX req is found\n");
 427                        break;
 428                }
 429
 430                request = &mreq->request;
 431                request->actual = le16_to_cpu(gpd->buf_len);
 432                mtu3_req_complete(mep, request, 0);
 433
 434                gpd = advance_deq_gpd(ring);
 435        }
 436
 437        dev_dbg(mtu->dev, "%s EP%d, deq=%p, enq=%p, complete\n",
 438                __func__, epnum, ring->dequeue, ring->enqueue);
 439
 440}
 441
 442static void qmu_done_rx(struct mtu3 *mtu, u8 epnum)
 443{
 444        struct mtu3_ep *mep = mtu->out_eps + epnum;
 445        struct mtu3_gpd_ring *ring = &mep->gpd_ring;
 446        void __iomem *mbase = mtu->mac_base;
 447        struct qmu_gpd *gpd = ring->dequeue;
 448        struct qmu_gpd *gpd_current = NULL;
 449        dma_addr_t gpd_dma = mtu3_readl(mbase, USB_QMU_RQCPR(epnum));
 450        struct usb_request *req = NULL;
 451        struct mtu3_request *mreq;
 452
 453        gpd_current = gpd_dma_to_virt(ring, gpd_dma);
 454
 455        dev_dbg(mtu->dev, "%s EP%d, last=%p, current=%p, enq=%p\n",
 456                __func__, epnum, gpd, gpd_current, ring->enqueue);
 457
 458        while (gpd != gpd_current && !(gpd->flag & GPD_FLAGS_HWO)) {
 459
 460                mreq = next_request(mep);
 461
 462                if (mreq == NULL || mreq->gpd != gpd) {
 463                        dev_err(mtu->dev, "no correct RX req is found\n");
 464                        break;
 465                }
 466                req = &mreq->request;
 467
 468                req->actual = le16_to_cpu(gpd->buf_len);
 469                mtu3_req_complete(mep, req, 0);
 470
 471                gpd = advance_deq_gpd(ring);
 472        }
 473
 474        dev_dbg(mtu->dev, "%s EP%d, deq=%p, enq=%p, complete\n",
 475                __func__, epnum, ring->dequeue, ring->enqueue);
 476}
 477
 478static void qmu_done_isr(struct mtu3 *mtu, u32 done_status)
 479{
 480        int i;
 481
 482        for (i = 1; i < mtu->num_eps; i++) {
 483                if (done_status & QMU_RX_DONE_INT(i))
 484                        qmu_done_rx(mtu, i);
 485                if (done_status & QMU_TX_DONE_INT(i))
 486                        qmu_done_tx(mtu, i);
 487        }
 488}
 489
 490static void qmu_exception_isr(struct mtu3 *mtu, u32 qmu_status)
 491{
 492        void __iomem *mbase = mtu->mac_base;
 493        u32 errval;
 494        int i;
 495
 496        if ((qmu_status & RXQ_CSERR_INT) || (qmu_status & RXQ_LENERR_INT)) {
 497                errval = mtu3_readl(mbase, U3D_RQERRIR0);
 498                for (i = 1; i < mtu->num_eps; i++) {
 499                        if (errval & QMU_RX_CS_ERR(i))
 500                                dev_err(mtu->dev, "Rx %d CS error!\n", i);
 501
 502                        if (errval & QMU_RX_LEN_ERR(i))
 503                                dev_err(mtu->dev, "RX %d Length error\n", i);
 504                }
 505                mtu3_writel(mbase, U3D_RQERRIR0, errval);
 506        }
 507
 508        if (qmu_status & RXQ_ZLPERR_INT) {
 509                errval = mtu3_readl(mbase, U3D_RQERRIR1);
 510                for (i = 1; i < mtu->num_eps; i++) {
 511                        if (errval & QMU_RX_ZLP_ERR(i))
 512                                dev_dbg(mtu->dev, "RX EP%d Recv ZLP\n", i);
 513                }
 514                mtu3_writel(mbase, U3D_RQERRIR1, errval);
 515        }
 516
 517        if ((qmu_status & TXQ_CSERR_INT) || (qmu_status & TXQ_LENERR_INT)) {
 518                errval = mtu3_readl(mbase, U3D_TQERRIR0);
 519                for (i = 1; i < mtu->num_eps; i++) {
 520                        if (errval & QMU_TX_CS_ERR(i))
 521                                dev_err(mtu->dev, "Tx %d checksum error!\n", i);
 522
 523                        if (errval & QMU_TX_LEN_ERR(i))
 524                                qmu_tx_zlp_error_handler(mtu, i);
 525                }
 526                mtu3_writel(mbase, U3D_TQERRIR0, errval);
 527        }
 528}
 529
 530irqreturn_t mtu3_qmu_isr(struct mtu3 *mtu)
 531{
 532        void __iomem *mbase = mtu->mac_base;
 533        u32 qmu_status;
 534        u32 qmu_done_status;
 535
 536        /* U3D_QISAR1 is read update */
 537        qmu_status = mtu3_readl(mbase, U3D_QISAR1);
 538        qmu_status &= mtu3_readl(mbase, U3D_QIER1);
 539
 540        qmu_done_status = mtu3_readl(mbase, U3D_QISAR0);
 541        qmu_done_status &= mtu3_readl(mbase, U3D_QIER0);
 542        mtu3_writel(mbase, U3D_QISAR0, qmu_done_status); /* W1C */
 543        dev_dbg(mtu->dev, "=== QMUdone[tx=%x, rx=%x] QMUexp[%x] ===\n",
 544                (qmu_done_status & 0xFFFF), qmu_done_status >> 16,
 545                qmu_status);
 546
 547        if (qmu_done_status)
 548                qmu_done_isr(mtu, qmu_done_status);
 549
 550        if (qmu_status)
 551                qmu_exception_isr(mtu, qmu_status);
 552
 553        return IRQ_HANDLED;
 554}
 555
 556int mtu3_qmu_init(struct mtu3 *mtu)
 557{
 558
 559        compiletime_assert(QMU_GPD_SIZE == 16, "QMU_GPD size SHOULD be 16B");
 560
 561        mtu->qmu_gpd_pool = dma_pool_create("QMU_GPD", mtu->dev,
 562                        QMU_GPD_RING_SIZE, QMU_GPD_SIZE, 0);
 563
 564        if (!mtu->qmu_gpd_pool)
 565                return -ENOMEM;
 566
 567        return 0;
 568}
 569
 570void mtu3_qmu_exit(struct mtu3 *mtu)
 571{
 572        dma_pool_destroy(mtu->qmu_gpd_pool);
 573}
 574