linux/kernel/bpf/bpf_struct_ops.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0-only
   2/* Copyright (c) 2019 Facebook */
   3
   4#include <linux/bpf.h>
   5#include <linux/bpf_verifier.h>
   6#include <linux/btf.h>
   7#include <linux/filter.h>
   8#include <linux/slab.h>
   9#include <linux/numa.h>
  10#include <linux/seq_file.h>
  11#include <linux/refcount.h>
  12#include <linux/mutex.h>
  13
  14enum bpf_struct_ops_state {
  15        BPF_STRUCT_OPS_STATE_INIT,
  16        BPF_STRUCT_OPS_STATE_INUSE,
  17        BPF_STRUCT_OPS_STATE_TOBEFREE,
  18};
  19
  20#define BPF_STRUCT_OPS_COMMON_VALUE                     \
  21        refcount_t refcnt;                              \
  22        enum bpf_struct_ops_state state
  23
  24struct bpf_struct_ops_value {
  25        BPF_STRUCT_OPS_COMMON_VALUE;
  26        char data[0] ____cacheline_aligned_in_smp;
  27};
  28
  29struct bpf_struct_ops_map {
  30        struct bpf_map map;
  31        const struct bpf_struct_ops *st_ops;
  32        /* protect map_update */
  33        struct mutex lock;
  34        /* progs has all the bpf_prog that is populated
  35         * to the func ptr of the kernel's struct
  36         * (in kvalue.data).
  37         */
  38        struct bpf_prog **progs;
  39        /* image is a page that has all the trampolines
  40         * that stores the func args before calling the bpf_prog.
  41         * A PAGE_SIZE "image" is enough to store all trampoline for
  42         * "progs[]".
  43         */
  44        void *image;
  45        /* uvalue->data stores the kernel struct
  46         * (e.g. tcp_congestion_ops) that is more useful
  47         * to userspace than the kvalue.  For example,
  48         * the bpf_prog's id is stored instead of the kernel
  49         * address of a func ptr.
  50         */
  51        struct bpf_struct_ops_value *uvalue;
  52        /* kvalue.data stores the actual kernel's struct
  53         * (e.g. tcp_congestion_ops) that will be
  54         * registered to the kernel subsystem.
  55         */
  56        struct bpf_struct_ops_value kvalue;
  57};
  58
  59#define VALUE_PREFIX "bpf_struct_ops_"
  60#define VALUE_PREFIX_LEN (sizeof(VALUE_PREFIX) - 1)
  61
  62/* bpf_struct_ops_##_name (e.g. bpf_struct_ops_tcp_congestion_ops) is
  63 * the map's value exposed to the userspace and its btf-type-id is
  64 * stored at the map->btf_vmlinux_value_type_id.
  65 *
  66 */
  67#define BPF_STRUCT_OPS_TYPE(_name)                              \
  68extern struct bpf_struct_ops bpf_##_name;                       \
  69                                                                \
  70struct bpf_struct_ops_##_name {                                         \
  71        BPF_STRUCT_OPS_COMMON_VALUE;                            \
  72        struct _name data ____cacheline_aligned_in_smp;         \
  73};
  74#include "bpf_struct_ops_types.h"
  75#undef BPF_STRUCT_OPS_TYPE
  76
  77enum {
  78#define BPF_STRUCT_OPS_TYPE(_name) BPF_STRUCT_OPS_TYPE_##_name,
  79#include "bpf_struct_ops_types.h"
  80#undef BPF_STRUCT_OPS_TYPE
  81        __NR_BPF_STRUCT_OPS_TYPE,
  82};
  83
  84static struct bpf_struct_ops * const bpf_struct_ops[] = {
  85#define BPF_STRUCT_OPS_TYPE(_name)                              \
  86        [BPF_STRUCT_OPS_TYPE_##_name] = &bpf_##_name,
  87#include "bpf_struct_ops_types.h"
  88#undef BPF_STRUCT_OPS_TYPE
  89};
  90
  91const struct bpf_verifier_ops bpf_struct_ops_verifier_ops = {
  92};
  93
  94const struct bpf_prog_ops bpf_struct_ops_prog_ops = {
  95};
  96
  97static const struct btf_type *module_type;
  98
  99void bpf_struct_ops_init(struct btf *btf, struct bpf_verifier_log *log)
 100{
 101        s32 type_id, value_id, module_id;
 102        const struct btf_member *member;
 103        struct bpf_struct_ops *st_ops;
 104        const struct btf_type *t;
 105        char value_name[128];
 106        const char *mname;
 107        u32 i, j;
 108
 109        /* Ensure BTF type is emitted for "struct bpf_struct_ops_##_name" */
 110#define BPF_STRUCT_OPS_TYPE(_name) BTF_TYPE_EMIT(struct bpf_struct_ops_##_name);
 111#include "bpf_struct_ops_types.h"
 112#undef BPF_STRUCT_OPS_TYPE
 113
 114        module_id = btf_find_by_name_kind(btf, "module", BTF_KIND_STRUCT);
 115        if (module_id < 0) {
 116                pr_warn("Cannot find struct module in btf_vmlinux\n");
 117                return;
 118        }
 119        module_type = btf_type_by_id(btf, module_id);
 120
 121        for (i = 0; i < ARRAY_SIZE(bpf_struct_ops); i++) {
 122                st_ops = bpf_struct_ops[i];
 123
 124                if (strlen(st_ops->name) + VALUE_PREFIX_LEN >=
 125                    sizeof(value_name)) {
 126                        pr_warn("struct_ops name %s is too long\n",
 127                                st_ops->name);
 128                        continue;
 129                }
 130                sprintf(value_name, "%s%s", VALUE_PREFIX, st_ops->name);
 131
 132                value_id = btf_find_by_name_kind(btf, value_name,
 133                                                 BTF_KIND_STRUCT);
 134                if (value_id < 0) {
 135                        pr_warn("Cannot find struct %s in btf_vmlinux\n",
 136                                value_name);
 137                        continue;
 138                }
 139
 140                type_id = btf_find_by_name_kind(btf, st_ops->name,
 141                                                BTF_KIND_STRUCT);
 142                if (type_id < 0) {
 143                        pr_warn("Cannot find struct %s in btf_vmlinux\n",
 144                                st_ops->name);
 145                        continue;
 146                }
 147                t = btf_type_by_id(btf, type_id);
 148                if (btf_type_vlen(t) > BPF_STRUCT_OPS_MAX_NR_MEMBERS) {
 149                        pr_warn("Cannot support #%u members in struct %s\n",
 150                                btf_type_vlen(t), st_ops->name);
 151                        continue;
 152                }
 153
 154                for_each_member(j, t, member) {
 155                        const struct btf_type *func_proto;
 156
 157                        mname = btf_name_by_offset(btf, member->name_off);
 158                        if (!*mname) {
 159                                pr_warn("anon member in struct %s is not supported\n",
 160                                        st_ops->name);
 161                                break;
 162                        }
 163
 164                        if (btf_member_bitfield_size(t, member)) {
 165                                pr_warn("bit field member %s in struct %s is not supported\n",
 166                                        mname, st_ops->name);
 167                                break;
 168                        }
 169
 170                        func_proto = btf_type_resolve_func_ptr(btf,
 171                                                               member->type,
 172                                                               NULL);
 173                        if (func_proto &&
 174                            btf_distill_func_proto(log, btf,
 175                                                   func_proto, mname,
 176                                                   &st_ops->func_models[j])) {
 177                                pr_warn("Error in parsing func ptr %s in struct %s\n",
 178                                        mname, st_ops->name);
 179                                break;
 180                        }
 181                }
 182
 183                if (j == btf_type_vlen(t)) {
 184                        if (st_ops->init(btf)) {
 185                                pr_warn("Error in init bpf_struct_ops %s\n",
 186                                        st_ops->name);
 187                        } else {
 188                                st_ops->type_id = type_id;
 189                                st_ops->type = t;
 190                                st_ops->value_id = value_id;
 191                                st_ops->value_type = btf_type_by_id(btf,
 192                                                                    value_id);
 193                        }
 194                }
 195        }
 196}
 197
 198extern struct btf *btf_vmlinux;
 199
 200static const struct bpf_struct_ops *
 201bpf_struct_ops_find_value(u32 value_id)
 202{
 203        unsigned int i;
 204
 205        if (!value_id || !btf_vmlinux)
 206                return NULL;
 207
 208        for (i = 0; i < ARRAY_SIZE(bpf_struct_ops); i++) {
 209                if (bpf_struct_ops[i]->value_id == value_id)
 210                        return bpf_struct_ops[i];
 211        }
 212
 213        return NULL;
 214}
 215
 216const struct bpf_struct_ops *bpf_struct_ops_find(u32 type_id)
 217{
 218        unsigned int i;
 219
 220        if (!type_id || !btf_vmlinux)
 221                return NULL;
 222
 223        for (i = 0; i < ARRAY_SIZE(bpf_struct_ops); i++) {
 224                if (bpf_struct_ops[i]->type_id == type_id)
 225                        return bpf_struct_ops[i];
 226        }
 227
 228        return NULL;
 229}
 230
 231static int bpf_struct_ops_map_get_next_key(struct bpf_map *map, void *key,
 232                                           void *next_key)
 233{
 234        if (key && *(u32 *)key == 0)
 235                return -ENOENT;
 236
 237        *(u32 *)next_key = 0;
 238        return 0;
 239}
 240
 241int bpf_struct_ops_map_sys_lookup_elem(struct bpf_map *map, void *key,
 242                                       void *value)
 243{
 244        struct bpf_struct_ops_map *st_map = (struct bpf_struct_ops_map *)map;
 245        struct bpf_struct_ops_value *uvalue, *kvalue;
 246        enum bpf_struct_ops_state state;
 247
 248        if (unlikely(*(u32 *)key != 0))
 249                return -ENOENT;
 250
 251        kvalue = &st_map->kvalue;
 252        /* Pair with smp_store_release() during map_update */
 253        state = smp_load_acquire(&kvalue->state);
 254        if (state == BPF_STRUCT_OPS_STATE_INIT) {
 255                memset(value, 0, map->value_size);
 256                return 0;
 257        }
 258
 259        /* No lock is needed.  state and refcnt do not need
 260         * to be updated together under atomic context.
 261         */
 262        uvalue = (struct bpf_struct_ops_value *)value;
 263        memcpy(uvalue, st_map->uvalue, map->value_size);
 264        uvalue->state = state;
 265        refcount_set(&uvalue->refcnt, refcount_read(&kvalue->refcnt));
 266
 267        return 0;
 268}
 269
 270static void *bpf_struct_ops_map_lookup_elem(struct bpf_map *map, void *key)
 271{
 272        return ERR_PTR(-EINVAL);
 273}
 274
 275static void bpf_struct_ops_map_put_progs(struct bpf_struct_ops_map *st_map)
 276{
 277        const struct btf_type *t = st_map->st_ops->type;
 278        u32 i;
 279
 280        for (i = 0; i < btf_type_vlen(t); i++) {
 281                if (st_map->progs[i]) {
 282                        bpf_prog_put(st_map->progs[i]);
 283                        st_map->progs[i] = NULL;
 284                }
 285        }
 286}
 287
 288static int check_zero_holes(const struct btf_type *t, void *data)
 289{
 290        const struct btf_member *member;
 291        u32 i, moff, msize, prev_mend = 0;
 292        const struct btf_type *mtype;
 293
 294        for_each_member(i, t, member) {
 295                moff = btf_member_bit_offset(t, member) / 8;
 296                if (moff > prev_mend &&
 297                    memchr_inv(data + prev_mend, 0, moff - prev_mend))
 298                        return -EINVAL;
 299
 300                mtype = btf_type_by_id(btf_vmlinux, member->type);
 301                mtype = btf_resolve_size(btf_vmlinux, mtype, &msize,
 302                                         NULL, NULL);
 303                if (IS_ERR(mtype))
 304                        return PTR_ERR(mtype);
 305                prev_mend = moff + msize;
 306        }
 307
 308        if (t->size > prev_mend &&
 309            memchr_inv(data + prev_mend, 0, t->size - prev_mend))
 310                return -EINVAL;
 311
 312        return 0;
 313}
 314
 315static int bpf_struct_ops_map_update_elem(struct bpf_map *map, void *key,
 316                                          void *value, u64 flags)
 317{
 318        struct bpf_struct_ops_map *st_map = (struct bpf_struct_ops_map *)map;
 319        const struct bpf_struct_ops *st_ops = st_map->st_ops;
 320        struct bpf_struct_ops_value *uvalue, *kvalue;
 321        const struct btf_member *member;
 322        const struct btf_type *t = st_ops->type;
 323        void *udata, *kdata;
 324        int prog_fd, err = 0;
 325        void *image;
 326        u32 i;
 327
 328        if (flags)
 329                return -EINVAL;
 330
 331        if (*(u32 *)key != 0)
 332                return -E2BIG;
 333
 334        err = check_zero_holes(st_ops->value_type, value);
 335        if (err)
 336                return err;
 337
 338        uvalue = (struct bpf_struct_ops_value *)value;
 339        err = check_zero_holes(t, uvalue->data);
 340        if (err)
 341                return err;
 342
 343        if (uvalue->state || refcount_read(&uvalue->refcnt))
 344                return -EINVAL;
 345
 346        uvalue = (struct bpf_struct_ops_value *)st_map->uvalue;
 347        kvalue = (struct bpf_struct_ops_value *)&st_map->kvalue;
 348
 349        mutex_lock(&st_map->lock);
 350
 351        if (kvalue->state != BPF_STRUCT_OPS_STATE_INIT) {
 352                err = -EBUSY;
 353                goto unlock;
 354        }
 355
 356        memcpy(uvalue, value, map->value_size);
 357
 358        udata = &uvalue->data;
 359        kdata = &kvalue->data;
 360        image = st_map->image;
 361
 362        for_each_member(i, t, member) {
 363                const struct btf_type *mtype, *ptype;
 364                struct bpf_prog *prog;
 365                u32 moff;
 366
 367                moff = btf_member_bit_offset(t, member) / 8;
 368                ptype = btf_type_resolve_ptr(btf_vmlinux, member->type, NULL);
 369                if (ptype == module_type) {
 370                        if (*(void **)(udata + moff))
 371                                goto reset_unlock;
 372                        *(void **)(kdata + moff) = BPF_MODULE_OWNER;
 373                        continue;
 374                }
 375
 376                err = st_ops->init_member(t, member, kdata, udata);
 377                if (err < 0)
 378                        goto reset_unlock;
 379
 380                /* The ->init_member() has handled this member */
 381                if (err > 0)
 382                        continue;
 383
 384                /* If st_ops->init_member does not handle it,
 385                 * we will only handle func ptrs and zero-ed members
 386                 * here.  Reject everything else.
 387                 */
 388
 389                /* All non func ptr member must be 0 */
 390                if (!ptype || !btf_type_is_func_proto(ptype)) {
 391                        u32 msize;
 392
 393                        mtype = btf_type_by_id(btf_vmlinux, member->type);
 394                        mtype = btf_resolve_size(btf_vmlinux, mtype, &msize,
 395                                                 NULL, NULL);
 396                        if (IS_ERR(mtype)) {
 397                                err = PTR_ERR(mtype);
 398                                goto reset_unlock;
 399                        }
 400
 401                        if (memchr_inv(udata + moff, 0, msize)) {
 402                                err = -EINVAL;
 403                                goto reset_unlock;
 404                        }
 405
 406                        continue;
 407                }
 408
 409                prog_fd = (int)(*(unsigned long *)(udata + moff));
 410                /* Similar check as the attr->attach_prog_fd */
 411                if (!prog_fd)
 412                        continue;
 413
 414                prog = bpf_prog_get(prog_fd);
 415                if (IS_ERR(prog)) {
 416                        err = PTR_ERR(prog);
 417                        goto reset_unlock;
 418                }
 419                st_map->progs[i] = prog;
 420
 421                if (prog->type != BPF_PROG_TYPE_STRUCT_OPS ||
 422                    prog->aux->attach_btf_id != st_ops->type_id ||
 423                    prog->expected_attach_type != i) {
 424                        err = -EINVAL;
 425                        goto reset_unlock;
 426                }
 427
 428                err = arch_prepare_bpf_trampoline(image,
 429                                                  st_map->image + PAGE_SIZE,
 430                                                  &st_ops->func_models[i], 0,
 431                                                  &prog, 1, NULL, 0, NULL);
 432                if (err < 0)
 433                        goto reset_unlock;
 434
 435                *(void **)(kdata + moff) = image;
 436                image += err;
 437
 438                /* put prog_id to udata */
 439                *(unsigned long *)(udata + moff) = prog->aux->id;
 440        }
 441
 442        refcount_set(&kvalue->refcnt, 1);
 443        bpf_map_inc(map);
 444
 445        set_memory_ro((long)st_map->image, 1);
 446        set_memory_x((long)st_map->image, 1);
 447        err = st_ops->reg(kdata);
 448        if (likely(!err)) {
 449                /* Pair with smp_load_acquire() during lookup_elem().
 450                 * It ensures the above udata updates (e.g. prog->aux->id)
 451                 * can be seen once BPF_STRUCT_OPS_STATE_INUSE is set.
 452                 */
 453                smp_store_release(&kvalue->state, BPF_STRUCT_OPS_STATE_INUSE);
 454                goto unlock;
 455        }
 456
 457        /* Error during st_ops->reg().  It is very unlikely since
 458         * the above init_member() should have caught it earlier
 459         * before reg().  The only possibility is if there was a race
 460         * in registering the struct_ops (under the same name) to
 461         * a sub-system through different struct_ops's maps.
 462         */
 463        set_memory_nx((long)st_map->image, 1);
 464        set_memory_rw((long)st_map->image, 1);
 465        bpf_map_put(map);
 466
 467reset_unlock:
 468        bpf_struct_ops_map_put_progs(st_map);
 469        memset(uvalue, 0, map->value_size);
 470        memset(kvalue, 0, map->value_size);
 471unlock:
 472        mutex_unlock(&st_map->lock);
 473        return err;
 474}
 475
 476static int bpf_struct_ops_map_delete_elem(struct bpf_map *map, void *key)
 477{
 478        enum bpf_struct_ops_state prev_state;
 479        struct bpf_struct_ops_map *st_map;
 480
 481        st_map = (struct bpf_struct_ops_map *)map;
 482        prev_state = cmpxchg(&st_map->kvalue.state,
 483                             BPF_STRUCT_OPS_STATE_INUSE,
 484                             BPF_STRUCT_OPS_STATE_TOBEFREE);
 485        switch (prev_state) {
 486        case BPF_STRUCT_OPS_STATE_INUSE:
 487                st_map->st_ops->unreg(&st_map->kvalue.data);
 488                if (refcount_dec_and_test(&st_map->kvalue.refcnt))
 489                        bpf_map_put(map);
 490                return 0;
 491        case BPF_STRUCT_OPS_STATE_TOBEFREE:
 492                return -EINPROGRESS;
 493        case BPF_STRUCT_OPS_STATE_INIT:
 494                return -ENOENT;
 495        default:
 496                WARN_ON_ONCE(1);
 497                /* Should never happen.  Treat it as not found. */
 498                return -ENOENT;
 499        }
 500}
 501
 502static void bpf_struct_ops_map_seq_show_elem(struct bpf_map *map, void *key,
 503                                             struct seq_file *m)
 504{
 505        void *value;
 506        int err;
 507
 508        value = kmalloc(map->value_size, GFP_USER | __GFP_NOWARN);
 509        if (!value)
 510                return;
 511
 512        err = bpf_struct_ops_map_sys_lookup_elem(map, key, value);
 513        if (!err) {
 514                btf_type_seq_show(btf_vmlinux, map->btf_vmlinux_value_type_id,
 515                                  value, m);
 516                seq_puts(m, "\n");
 517        }
 518
 519        kfree(value);
 520}
 521
 522static void bpf_struct_ops_map_free(struct bpf_map *map)
 523{
 524        struct bpf_struct_ops_map *st_map = (struct bpf_struct_ops_map *)map;
 525
 526        if (st_map->progs)
 527                bpf_struct_ops_map_put_progs(st_map);
 528        bpf_map_area_free(st_map->progs);
 529        bpf_jit_free_exec(st_map->image);
 530        bpf_map_area_free(st_map->uvalue);
 531        bpf_map_area_free(st_map);
 532}
 533
 534static int bpf_struct_ops_map_alloc_check(union bpf_attr *attr)
 535{
 536        if (attr->key_size != sizeof(unsigned int) || attr->max_entries != 1 ||
 537            attr->map_flags || !attr->btf_vmlinux_value_type_id)
 538                return -EINVAL;
 539        return 0;
 540}
 541
 542static struct bpf_map *bpf_struct_ops_map_alloc(union bpf_attr *attr)
 543{
 544        const struct bpf_struct_ops *st_ops;
 545        size_t map_total_size, st_map_size;
 546        struct bpf_struct_ops_map *st_map;
 547        const struct btf_type *t, *vt;
 548        struct bpf_map_memory mem;
 549        struct bpf_map *map;
 550        int err;
 551
 552        if (!capable(CAP_SYS_ADMIN))
 553                return ERR_PTR(-EPERM);
 554
 555        st_ops = bpf_struct_ops_find_value(attr->btf_vmlinux_value_type_id);
 556        if (!st_ops)
 557                return ERR_PTR(-ENOTSUPP);
 558
 559        vt = st_ops->value_type;
 560        if (attr->value_size != vt->size)
 561                return ERR_PTR(-EINVAL);
 562
 563        t = st_ops->type;
 564
 565        st_map_size = sizeof(*st_map) +
 566                /* kvalue stores the
 567                 * struct bpf_struct_ops_tcp_congestions_ops
 568                 */
 569                (vt->size - sizeof(struct bpf_struct_ops_value));
 570        map_total_size = st_map_size +
 571                /* uvalue */
 572                sizeof(vt->size) +
 573                /* struct bpf_progs **progs */
 574                 btf_type_vlen(t) * sizeof(struct bpf_prog *);
 575        err = bpf_map_charge_init(&mem, map_total_size);
 576        if (err < 0)
 577                return ERR_PTR(err);
 578
 579        st_map = bpf_map_area_alloc(st_map_size, NUMA_NO_NODE);
 580        if (!st_map) {
 581                bpf_map_charge_finish(&mem);
 582                return ERR_PTR(-ENOMEM);
 583        }
 584        st_map->st_ops = st_ops;
 585        map = &st_map->map;
 586
 587        st_map->uvalue = bpf_map_area_alloc(vt->size, NUMA_NO_NODE);
 588        st_map->progs =
 589                bpf_map_area_alloc(btf_type_vlen(t) * sizeof(struct bpf_prog *),
 590                                   NUMA_NO_NODE);
 591        st_map->image = bpf_jit_alloc_exec(PAGE_SIZE);
 592        if (!st_map->uvalue || !st_map->progs || !st_map->image) {
 593                bpf_struct_ops_map_free(map);
 594                bpf_map_charge_finish(&mem);
 595                return ERR_PTR(-ENOMEM);
 596        }
 597
 598        mutex_init(&st_map->lock);
 599        set_vm_flush_reset_perms(st_map->image);
 600        bpf_map_init_from_attr(map, attr);
 601        bpf_map_charge_move(&map->memory, &mem);
 602
 603        return map;
 604}
 605
 606const struct bpf_map_ops bpf_struct_ops_map_ops = {
 607        .map_alloc_check = bpf_struct_ops_map_alloc_check,
 608        .map_alloc = bpf_struct_ops_map_alloc,
 609        .map_free = bpf_struct_ops_map_free,
 610        .map_get_next_key = bpf_struct_ops_map_get_next_key,
 611        .map_lookup_elem = bpf_struct_ops_map_lookup_elem,
 612        .map_delete_elem = bpf_struct_ops_map_delete_elem,
 613        .map_update_elem = bpf_struct_ops_map_update_elem,
 614        .map_seq_show_elem = bpf_struct_ops_map_seq_show_elem,
 615};
 616
 617/* "const void *" because some subsystem is
 618 * passing a const (e.g. const struct tcp_congestion_ops *)
 619 */
 620bool bpf_struct_ops_get(const void *kdata)
 621{
 622        struct bpf_struct_ops_value *kvalue;
 623
 624        kvalue = container_of(kdata, struct bpf_struct_ops_value, data);
 625
 626        return refcount_inc_not_zero(&kvalue->refcnt);
 627}
 628
 629void bpf_struct_ops_put(const void *kdata)
 630{
 631        struct bpf_struct_ops_value *kvalue;
 632
 633        kvalue = container_of(kdata, struct bpf_struct_ops_value, data);
 634        if (refcount_dec_and_test(&kvalue->refcnt)) {
 635                struct bpf_struct_ops_map *st_map;
 636
 637                st_map = container_of(kvalue, struct bpf_struct_ops_map,
 638                                      kvalue);
 639                bpf_map_put(&st_map->map);
 640        }
 641}
 642