00001
00002
00003
00004
00005
00006
00007
00008
00009
00010
00011
00012
00013
00014
00015
00016
00017
00018
00019
00020
00021
00022
00023
00024
00025
00026
00027
00028
00029
00030
00031
00032
00033
00034
00035
00036 #ifndef lint
00037 static const char RCSnmg[] = "@(#)$Header: /cvsroot/brlcad/brlcad/src/librt/g_nmg.c,v 14.9 2006/09/16 02:04:24 lbutler Exp $ (BRL)";
00038 #endif
00039
00040 #include "common.h"
00041
00042 #include <stdlib.h>
00043 #include <stdio.h>
00044 #include <math.h>
00045
00046 #ifdef HAVE_STRING_H
00047 # include <string.h>
00048 #else
00049 # include <strings.h>
00050 #endif
00051
00052 #include "machine.h"
00053 #include "vmath.h"
00054 #include "db.h"
00055 #include "nmg.h"
00056 #include "raytrace.h"
00057 #include "nurb.h"
00058 #include "./debug.h"
00059
00060
00061
00062
00063 #define NMG_SPEC_START_MAGIC 6014061
00064 #define NMG_SPEC_END_MAGIC 7013061
00065
00066
00067 struct nmg_specific {
00068 int nmg_smagic;
00069 struct model *nmg_model;
00070 char *manifolds;
00071 vect_t nmg_invdir;
00072 int nmg_emagic;
00073 };
00074
00075 struct tmp_v {
00076 point_t pt;
00077 struct vertex *v;
00078 };
00079
00080
00081
00082
00083
00084
00085
00086
00087
00088
00089
00090
00091
00092
00093
00094
00095
00096 int
00097 rt_nmg_prep(struct soltab *stp, struct rt_db_internal *ip, struct rt_i *rtip)
00098 {
00099 struct model *m;
00100 register struct nmg_specific *nmg_s;
00101 struct nmgregion *rp;
00102 vect_t work;
00103
00104 RT_CK_DB_INTERNAL(ip);
00105 m = (struct model *)ip->idb_ptr;
00106 NMG_CK_MODEL(m);
00107
00108 BU_GETSTRUCT( nmg_s, nmg_specific );
00109 stp->st_specific = (genptr_t)nmg_s;
00110 nmg_s->nmg_model = m;
00111 ip->idb_ptr = (genptr_t)NULL;
00112 nmg_s->nmg_smagic = NMG_SPEC_START_MAGIC;
00113 nmg_s->nmg_emagic = NMG_SPEC_END_MAGIC;
00114
00115
00116 VSETALL(stp->st_min, MAX_FASTF);
00117 VSETALL(stp->st_max, -MAX_FASTF);
00118
00119
00120
00121
00122 for (BU_LIST_FOR(rp, nmgregion, &m->r_hd )) {
00123 NMG_CK_REGION(rp);
00124 NMG_CK_REGION_A(rp->ra_p);
00125
00126 VMINMAX(stp->st_min, stp->st_max, rp->ra_p->min_pt);
00127 VMINMAX(stp->st_min, stp->st_max, rp->ra_p->max_pt);
00128
00129 nmg_ck_vs_in_region( rp , &rtip->rti_tol );
00130
00131 }
00132
00133 VADD2SCALE( stp->st_center, stp->st_min, stp->st_max, 0.5 );
00134 VSUB2SCALE( work, stp->st_max, stp->st_min, 0.5 );
00135 stp->st_aradius = stp->st_bradius = MAGNITUDE(work);
00136
00137
00138
00139
00140 nmg_s->manifolds = nmg_manifolds(m);
00141
00142 return(0);
00143 }
00144
00145
00146
00147
00148 void
00149 rt_nmg_print(register const struct soltab *stp)
00150 {
00151 register struct model *m =
00152 (struct model *)stp->st_specific;
00153
00154 NMG_CK_MODEL(m);
00155 nmg_pr_m(m);
00156 }
00157
00158
00159
00160
00161
00162
00163
00164
00165
00166
00167
00168
00169
00170
00171 int
00172 rt_nmg_shot(struct soltab *stp, register struct xray *rp, struct application *ap, struct seg *seghead)
00173
00174
00175
00176
00177 {
00178 struct ray_data rd;
00179 int status;
00180 struct nmg_specific *nmg =
00181 (struct nmg_specific *)stp->st_specific;
00182
00183 if(rt_g.NMG_debug & DEBUG_NMGRT) {
00184 bu_log("rt_nmg_shot()\n\t");
00185 rt_pr_tol(&ap->a_rt_i->rti_tol);
00186 }
00187
00188
00189 if (nmg->nmg_smagic != NMG_SPEC_START_MAGIC)
00190 rt_bomb("start of NMG st_specific structure corrupted\n");
00191
00192 if (nmg->nmg_emagic != NMG_SPEC_END_MAGIC)
00193 rt_bomb("end of NMG st_specific structure corrupted\n");
00194
00195
00196 if( !NEAR_ZERO( rp->r_dir[X], SQRT_SMALL_FASTF ) ) {
00197 nmg->nmg_invdir[X]=1.0/rp->r_dir[X];
00198 } else {
00199 nmg->nmg_invdir[X] = INFINITY;
00200 rp->r_dir[X] = 0.0;
00201 }
00202 if( !NEAR_ZERO( rp->r_dir[Y], SQRT_SMALL_FASTF ) ) {
00203 nmg->nmg_invdir[Y]=1.0/rp->r_dir[Y];
00204 } else {
00205 nmg->nmg_invdir[Y] = INFINITY;
00206 rp->r_dir[Y] = 0.0;
00207 }
00208 if( !NEAR_ZERO( rp->r_dir[Z], SQRT_SMALL_FASTF ) ) {
00209 nmg->nmg_invdir[Z]=1.0/rp->r_dir[Z];
00210 } else {
00211 nmg->nmg_invdir[Z] = INFINITY;
00212 rp->r_dir[Z] = 0.0;
00213 }
00214
00215
00216 rd.rd_m = nmg->nmg_model;
00217 rd.manifolds = nmg->manifolds;
00218 VMOVE(rd.rd_invdir, nmg->nmg_invdir);
00219 rd.rp = rp;
00220 rd.tol = &ap->a_rt_i->rti_tol;
00221 rd.ap = ap;
00222 rd.stp = stp;
00223 rd.seghead = seghead;
00224 rd.classifying_ray = 0;
00225
00226
00227
00228
00229
00230
00231
00232 rd.hitmiss = (struct hitmiss **)bu_calloc( rd.rd_m->maxindex,
00233 sizeof(struct hitmiss *), "nmg geom hit list");
00234
00235
00236 BU_LIST_INIT(&rd.rd_hit);
00237 BU_LIST_INIT(&rd.rd_miss);
00238 rd.magic = NMG_RAY_DATA_MAGIC;
00239
00240
00241 nmg_isect_ray_model(&rd);
00242
00243
00244 status = nmg_ray_segs(&rd);
00245
00246
00247 bu_free( (char *)rd.hitmiss, "free nmg geom hit list");
00248
00249 return(status);
00250 }
00251
00252
00253 #define RT_NMG_SEG_MISS(SEG) (SEG).seg_stp=RT_SOLTAB_NULL
00254
00255
00256
00257
00258
00259
00260 void
00261 rt_nmg_vshot(struct soltab **stp, struct xray **rp, struct seg *segp, int n, struct application *ap)
00262
00263
00264
00265
00266
00267 {
00268 rt_vstub( stp, rp, segp, n, ap );
00269 }
00270
00271
00272
00273
00274
00275
00276 void
00277 rt_nmg_norm(register struct hit *hitp, struct soltab *stp, register struct xray *rp)
00278 {
00279 VJOIN1( hitp->hit_point, rp->r_pt, hitp->hit_dist, rp->r_dir );
00280 }
00281
00282
00283
00284
00285
00286
00287 void
00288 rt_nmg_curve(register struct curvature *cvp, register struct hit *hitp, struct soltab *stp)
00289 {
00290
00291
00292
00293 cvp->crv_c1 = cvp->crv_c2 = 0;
00294
00295
00296 bn_vec_ortho( cvp->crv_pdir, hitp->hit_normal );
00297 }
00298
00299
00300
00301
00302
00303
00304
00305
00306
00307 void
00308 rt_nmg_uv(struct application *ap, struct soltab *stp, register struct hit *hitp, register struct uvcoord *uvp)
00309 {
00310
00311
00312 }
00313
00314
00315
00316
00317 void
00318 rt_nmg_free(register struct soltab *stp)
00319 {
00320 register struct nmg_specific *nmg =
00321 (struct nmg_specific *)stp->st_specific;
00322
00323 nmg_km( nmg->nmg_model );
00324 bu_free( (char *)nmg, "nmg_specific" );
00325 }
00326
00327
00328
00329
00330 int
00331 rt_nmg_class(void)
00332 {
00333 return(0);
00334 }
00335
00336
00337
00338
00339
00340 int
00341 rt_nmg_plot(struct bu_list *vhead, struct rt_db_internal *ip, const struct rt_tess_tol *ttol, const struct bn_tol *tol)
00342 {
00343 LOCAL struct model *m;
00344
00345 RT_CK_DB_INTERNAL(ip);
00346 m = (struct model *)ip->idb_ptr;
00347 NMG_CK_MODEL(m);
00348
00349 nmg_m_to_vlist( vhead, m, 0 );
00350
00351 return(0);
00352 }
00353
00354
00355
00356
00357
00358
00359
00360
00361
00362
00363
00364
00365
00366 int
00367 rt_nmg_tess(struct nmgregion **r, struct model *m, struct rt_db_internal *ip, const struct rt_tess_tol *ttol, const struct bn_tol *tol)
00368 {
00369 LOCAL struct model *lm;
00370
00371 NMG_CK_MODEL(m);
00372
00373 RT_CK_DB_INTERNAL(ip);
00374 lm = (struct model *)ip->idb_ptr;
00375 NMG_CK_MODEL(lm);
00376
00377 if( BU_LIST_IS_EMPTY( &(lm->r_hd) ) ) {
00378
00379 *r = (struct nmgregion *)NULL;
00380 return -1;
00381 }
00382
00383
00384
00385 *r = BU_LIST_FIRST(nmgregion, &(lm->r_hd) );
00386 NMG_CK_REGION(*r);
00387 if( BU_LIST_NEXT_NOT_HEAD( *r, &(lm->r_hd) ) ) {
00388 struct nmgregion *r2;
00389
00390 r2 = BU_LIST_PNEXT( nmgregion, &((*r)->l) );
00391 while( BU_LIST_NOT_HEAD( &r2->l, &(lm->r_hd) ) )
00392 {
00393 struct nmgregion *next_r;
00394
00395 next_r = BU_LIST_PNEXT( nmgregion, &r2->l );
00396 nmg_merge_regions( *r, r2, tol );
00397
00398 r2 = next_r;
00399 }
00400 }
00401
00402
00403
00404
00405
00406
00407
00408 nmg_merge_models(m, lm);
00409 ip->idb_ptr = GENPTR_NULL;
00410
00411 return(0);
00412 }
00413
00414 #define RT_CK_DISKMAGIC(_cp,_magic) \
00415 if( bu_glong(_cp) != _magic ) { \
00416 bu_log("RT_CK_DISKMAGIC: magic mis-match, got x%x, s/b x%x, file %s, line %d\n", \
00417 bu_glong(_cp), _magic, __FILE__, __LINE__); \
00418 rt_bomb("bad magic\n"); \
00419 }
00420
00421
00422
00423
00424
00425
00426
00427
00428
00429
00430 #define DISK_INDEX_NULL 0
00431 #define DISK_INDEX_LISTHEAD -1
00432
00433 #define DISK_MODEL_VERSION 1
00434
00435 typedef unsigned char disk_index_t[4];
00436 struct disk_rt_list {
00437 disk_index_t forw;
00438 disk_index_t back;
00439 };
00440
00441 #define DISK_MODEL_MAGIC 0x4e6d6f64
00442 struct disk_model {
00443 unsigned char magic[4];
00444 unsigned char version[4];
00445 struct disk_rt_list r_hd;
00446 };
00447
00448 #define DISK_REGION_MAGIC 0x4e726567
00449 struct disk_nmgregion {
00450 unsigned char magic[4];
00451 struct disk_rt_list l;
00452 disk_index_t m_p;
00453 disk_index_t ra_p;
00454 struct disk_rt_list s_hd;
00455 };
00456
00457 #define DISK_REGION_A_MAGIC 0x4e725f61
00458 struct disk_nmgregion_a {
00459 unsigned char magic[4];
00460 unsigned char min_pt[3*8];
00461 unsigned char max_pt[3*8];
00462 };
00463
00464 #define DISK_SHELL_MAGIC 0x4e73686c
00465 struct disk_shell {
00466 unsigned char magic[4];
00467 struct disk_rt_list l;
00468 disk_index_t r_p;
00469 disk_index_t sa_p;
00470 struct disk_rt_list fu_hd;
00471 struct disk_rt_list lu_hd;
00472 struct disk_rt_list eu_hd;
00473 disk_index_t vu_p;
00474 };
00475
00476 #define DISK_SHELL_A_MAGIC 0x4e735f61
00477 struct disk_shell_a {
00478 unsigned char magic[4];
00479 unsigned char min_pt[3*8];
00480 unsigned char max_pt[3*8];
00481 };
00482
00483 #define DISK_FACE_MAGIC 0x4e666163
00484 struct disk_face {
00485 unsigned char magic[4];
00486 struct disk_rt_list l;
00487 disk_index_t fu_p;
00488 disk_index_t g;
00489 unsigned char flip[4];
00490 };
00491
00492 #define DISK_FACE_G_PLANE_MAGIC 0x4e666770
00493 struct disk_face_g_plane {
00494 unsigned char magic[4];
00495 struct disk_rt_list f_hd;
00496 unsigned char N[4*8];
00497 };
00498
00499 #define DISK_FACE_G_SNURB_MAGIC 0x4e666773
00500 struct disk_face_g_snurb {
00501 unsigned char magic[4];
00502 struct disk_rt_list f_hd;
00503 unsigned char u_order[4];
00504 unsigned char v_order[4];
00505 unsigned char u_size[4];
00506 unsigned char v_size[4];
00507 disk_index_t u_knots;
00508 disk_index_t v_knots;
00509 unsigned char us_size[4];
00510 unsigned char vs_size[4];
00511 unsigned char pt_type[4];
00512 disk_index_t ctl_points;
00513 };
00514
00515 #define DISK_FACEUSE_MAGIC 0x4e667520
00516 struct disk_faceuse {
00517 unsigned char magic[4];
00518 struct disk_rt_list l;
00519 disk_index_t s_p;
00520 disk_index_t fumate_p;
00521 unsigned char orientation[4];
00522 disk_index_t f_p;
00523 disk_index_t fua_p;
00524 struct disk_rt_list lu_hd;
00525 };
00526
00527 #define DISK_LOOP_MAGIC 0x4e6c6f70
00528 struct disk_loop {
00529 unsigned char magic[4];
00530 disk_index_t lu_p;
00531 disk_index_t lg_p;
00532 };
00533
00534 #define DISK_LOOP_G_MAGIC 0x4e6c5f67
00535 struct disk_loop_g {
00536 unsigned char magic[4];
00537 unsigned char min_pt[3*8];
00538 unsigned char max_pt[3*8];
00539 };
00540
00541 #define DISK_LOOPUSE_MAGIC 0x4e6c7520
00542 struct disk_loopuse {
00543 unsigned char magic[4];
00544 struct disk_rt_list l;
00545 disk_index_t up;
00546 disk_index_t lumate_p;
00547 unsigned char orientation[4];
00548 disk_index_t l_p;
00549 disk_index_t lua_p;
00550 struct disk_rt_list down_hd;
00551 };
00552
00553 #define DISK_EDGE_MAGIC 0x4e656467
00554 struct disk_edge {
00555 unsigned char magic[4];
00556 disk_index_t eu_p;
00557 unsigned char is_real[4];
00558 };
00559
00560 #define DISK_EDGE_G_LSEG_MAGIC 0x4e65676c
00561 struct disk_edge_g_lseg {
00562 unsigned char magic[4];
00563 struct disk_rt_list eu_hd2;
00564 unsigned char e_pt[3*8];
00565 unsigned char e_dir[3*8];
00566 };
00567
00568 #define DISK_EDGE_G_CNURB_MAGIC 0x4e656763
00569 struct disk_edge_g_cnurb {
00570 unsigned char magic[4];
00571 struct disk_rt_list eu_hd2;
00572 unsigned char order[4];
00573 unsigned char k_size[4];
00574 disk_index_t knots;
00575 unsigned char c_size[4];
00576 unsigned char pt_type[4];
00577 disk_index_t ctl_points;
00578 };
00579
00580 #define DISK_EDGEUSE_MAGIC 0x4e657520
00581 struct disk_edgeuse {
00582 unsigned char magic[4];
00583 struct disk_rt_list l;
00584 struct disk_rt_list l2;
00585 disk_index_t up;
00586 disk_index_t eumate_p;
00587 disk_index_t radial_p;
00588 disk_index_t e_p;
00589 disk_index_t eua_p;
00590 unsigned char orientation[4];
00591 disk_index_t vu_p;
00592 disk_index_t g;
00593 };
00594
00595 #define DISK_VERTEX_MAGIC 0x4e767274
00596 struct disk_vertex {
00597 unsigned char magic[4];
00598 struct disk_rt_list vu_hd;
00599 disk_index_t vg_p;
00600 };
00601
00602 #define DISK_VERTEX_G_MAGIC 0x4e765f67
00603 struct disk_vertex_g {
00604 unsigned char magic[4];
00605 unsigned char coord[3*8];
00606 };
00607
00608 #define DISK_VERTEXUSE_MAGIC 0x4e767520
00609 struct disk_vertexuse {
00610 unsigned char magic[4];
00611 struct disk_rt_list l;
00612 disk_index_t up;
00613 disk_index_t v_p;
00614 disk_index_t a;
00615 };
00616
00617 #define DISK_VERTEXUSE_A_PLANE_MAGIC 0x4e767561
00618 struct disk_vertexuse_a_plane {
00619 unsigned char magic[4];
00620 unsigned char N[3*8];
00621 };
00622
00623 #define DISK_VERTEXUSE_A_CNURB_MAGIC 0x4e766163
00624 struct disk_vertexuse_a_cnurb {
00625 unsigned char magic[4];
00626 unsigned char param[3*8];
00627 };
00628
00629 #define DISK_DOUBLE_ARRAY_MAGIC 0x4e666172
00630 struct disk_double_array {
00631 unsigned char magic[4];
00632 unsigned char ndouble[4];
00633 unsigned char vals[1*8];
00634 };
00635
00636
00637
00638 #define NMG_KIND_MODEL 0
00639 #define NMG_KIND_NMGREGION 1
00640 #define NMG_KIND_NMGREGION_A 2
00641 #define NMG_KIND_SHELL 3
00642 #define NMG_KIND_SHELL_A 4
00643 #define NMG_KIND_FACEUSE 5
00644 #define NMG_KIND_FACE 6
00645 #define NMG_KIND_FACE_G_PLANE 7
00646 #define NMG_KIND_FACE_G_SNURB 8
00647 #define NMG_KIND_LOOPUSE 9
00648 #define NMG_KIND_LOOP 10
00649 #define NMG_KIND_LOOP_G 11
00650 #define NMG_KIND_EDGEUSE 12
00651 #define NMG_KIND_EDGE 13
00652 #define NMG_KIND_EDGE_G_LSEG 14
00653 #define NMG_KIND_EDGE_G_CNURB 15
00654 #define NMG_KIND_VERTEXUSE 16
00655 #define NMG_KIND_VERTEXUSE_A_PLANE 17
00656 #define NMG_KIND_VERTEXUSE_A_CNURB 18
00657 #define NMG_KIND_VERTEX 19
00658 #define NMG_KIND_VERTEX_G 20
00659
00660
00661 #define NMG_KIND_DOUBLE_ARRAY 25
00662
00663
00664
00665 #define NMG_N_KINDS 26
00666
00667 const int rt_nmg_disk_sizes[NMG_N_KINDS] = {
00668 sizeof(struct disk_model),
00669 sizeof(struct disk_nmgregion),
00670 sizeof(struct disk_nmgregion_a),
00671 sizeof(struct disk_shell),
00672 sizeof(struct disk_shell_a),
00673 sizeof(struct disk_faceuse),
00674 sizeof(struct disk_face),
00675 sizeof(struct disk_face_g_plane),
00676 sizeof(struct disk_face_g_snurb),
00677 sizeof(struct disk_loopuse),
00678 sizeof(struct disk_loop),
00679 sizeof(struct disk_loop_g),
00680 sizeof(struct disk_edgeuse),
00681 sizeof(struct disk_edge),
00682 sizeof(struct disk_edge_g_lseg),
00683 sizeof(struct disk_edge_g_cnurb),
00684 sizeof(struct disk_vertexuse),
00685 sizeof(struct disk_vertexuse_a_plane),
00686 sizeof(struct disk_vertexuse_a_cnurb),
00687 sizeof(struct disk_vertex),
00688 sizeof(struct disk_vertex_g),
00689 0,
00690 0,
00691 0,
00692 0,
00693 0
00694 };
00695 const char rt_nmg_kind_names[NMG_N_KINDS+2][18] = {
00696 "model",
00697 "nmgregion",
00698 "nmgregion_a",
00699 "shell",
00700 "shell_a",
00701 "faceuse",
00702 "face",
00703 "face_g_plane",
00704 "face_g_snurb",
00705 "loopuse",
00706 "loop",
00707 "loop_g",
00708 "edgeuse",
00709 "edge",
00710 "edge_g_lseg",
00711 "edge_g_cnurb",
00712 "vertexuse",
00713 "vertexuse_a_plane",
00714 "vertexuse_a_cnurb",
00715 "vertex",
00716 "vertex_g",
00717 "k21",
00718 "k22",
00719 "k23",
00720 "k24",
00721 "double_array",
00722 "k26-OFF_END",
00723 "k27-OFF_END"
00724 };
00725
00726
00727
00728
00729
00730
00731
00732 int
00733 rt_nmg_magic_to_kind(register long int magic)
00734 {
00735 switch(magic) {
00736 case NMG_MODEL_MAGIC:
00737 return NMG_KIND_MODEL;
00738 case NMG_REGION_MAGIC:
00739 return NMG_KIND_NMGREGION;
00740 case NMG_REGION_A_MAGIC:
00741 return NMG_KIND_NMGREGION_A;
00742 case NMG_SHELL_MAGIC:
00743 return NMG_KIND_SHELL;
00744 case NMG_SHELL_A_MAGIC:
00745 return NMG_KIND_SHELL_A;
00746 case NMG_FACEUSE_MAGIC:
00747 return NMG_KIND_FACEUSE;
00748 case NMG_FACE_MAGIC:
00749 return NMG_KIND_FACE;
00750 case NMG_FACE_G_PLANE_MAGIC:
00751 return NMG_KIND_FACE_G_PLANE;
00752 case NMG_FACE_G_SNURB_MAGIC:
00753 return NMG_KIND_FACE_G_SNURB;
00754 case NMG_LOOPUSE_MAGIC:
00755 return NMG_KIND_LOOPUSE;
00756 case NMG_LOOP_G_MAGIC:
00757 return NMG_KIND_LOOP_G;
00758 case NMG_LOOP_MAGIC:
00759 return NMG_KIND_LOOP;
00760 case NMG_EDGEUSE_MAGIC:
00761 return NMG_KIND_EDGEUSE;
00762 case NMG_EDGE_MAGIC:
00763 return NMG_KIND_EDGE;
00764 case NMG_EDGE_G_LSEG_MAGIC:
00765 return NMG_KIND_EDGE_G_LSEG;
00766 case NMG_EDGE_G_CNURB_MAGIC:
00767 return NMG_KIND_EDGE_G_CNURB;
00768 case NMG_VERTEXUSE_MAGIC:
00769 return NMG_KIND_VERTEXUSE;
00770 case NMG_VERTEXUSE_A_PLANE_MAGIC:
00771 return NMG_KIND_VERTEXUSE_A_PLANE;
00772 case NMG_VERTEXUSE_A_CNURB_MAGIC:
00773 return NMG_KIND_VERTEXUSE_A_CNURB;
00774 case NMG_VERTEX_MAGIC:
00775 return NMG_KIND_VERTEX;
00776 case NMG_VERTEX_G_MAGIC:
00777 return NMG_KIND_VERTEX_G;
00778 }
00779
00780 bu_log("magic = x%x\n", magic);
00781 rt_bomb("rt_nmg_magic_to_kind: bad magic");
00782 return -1;
00783 }
00784
00785
00786
00787 struct nmg_exp_counts {
00788 long new_subscript;
00789 long per_struct_index;
00790 int kind;
00791 long first_fastf_relpos;
00792 long byte_offset;
00793 };
00794
00795
00796 static unsigned char *rt_nmg_fastf_p;
00797 static unsigned int rt_nmg_cur_fastf_subscript;
00798
00799
00800
00801
00802
00803
00804
00805
00806
00807
00808
00809
00810
00811
00812
00813
00814
00815
00816
00817
00818
00819
00820
00821
00822
00823
00824
00825
00826
00827
00828 int
00829 rt_nmg_export_fastf(const fastf_t *fp, int count, int pt_type, double scale)
00830
00831
00832
00833
00834 {
00835 register unsigned char *cp;
00836
00837 if( pt_type )
00838 count *= RT_NURB_EXTRACT_COORDS(pt_type);
00839
00840 cp = rt_nmg_fastf_p;
00841 (void)bu_plong( cp + 0, DISK_DOUBLE_ARRAY_MAGIC );
00842 (void)bu_plong( cp + 4, count );
00843 if( pt_type == 0 || scale == 1.0 ) {
00844 htond( cp + (4+4), (unsigned char *)fp, count );
00845 } else {
00846 fastf_t *new;
00847
00848
00849 new = (fastf_t *)bu_malloc( count*sizeof(fastf_t), "rt_nmg_export_fastf" );
00850 if( RT_NURB_IS_PT_RATIONAL(pt_type) ) {
00851
00852 register int i;
00853 int nelem;
00854
00855 nelem = RT_NURB_EXTRACT_COORDS(pt_type);
00856 for( i = 0; i < count; i += nelem ) {
00857 VSCALEN( &new[i], &fp[i], scale, nelem-1 );
00858 new[i+nelem-1] = fp[i+nelem-1];
00859 }
00860 } else {
00861
00862 VSCALEN( new, fp, scale, count );
00863 }
00864 htond( cp + (4+4), (unsigned char *)new, count );
00865 bu_free( (char *)new, "rt_nmg_export_fastf" );
00866 }
00867 cp += (4+4) + count * 8;
00868 rt_nmg_fastf_p = cp;
00869 return rt_nmg_cur_fastf_subscript++;
00870 }
00871
00872
00873
00874
00875 fastf_t *
00876 rt_nmg_import_fastf(const unsigned char *base, struct nmg_exp_counts *ecnt, long int subscript, const matp_t mat, int len, int pt_type)
00877
00878
00879
00880
00881
00882
00883 {
00884 const unsigned char *cp;
00885 register int count;
00886 fastf_t *ret;
00887 fastf_t *tmp;
00888
00889 if( ecnt[subscript].byte_offset <= 0 || ecnt[subscript].kind != NMG_KIND_DOUBLE_ARRAY ) {
00890 bu_log("subscript=%d, byte_offset=%d, kind=%d (expected %d)\n",
00891 subscript, ecnt[subscript].byte_offset,
00892 ecnt[subscript].kind, NMG_KIND_DOUBLE_ARRAY );
00893 rt_bomb("rt_nmg_import_fastf() bad ecnt table\n");
00894 }
00895
00896
00897 cp = base + ecnt[subscript].byte_offset;
00898 if( bu_glong( cp ) != DISK_DOUBLE_ARRAY_MAGIC ) {
00899 bu_log("magic mis-match, got x%x, s/b x%x, file %s, line %d\n",
00900 bu_glong(cp), DISK_DOUBLE_ARRAY_MAGIC, __FILE__, __LINE__);
00901 bu_log("subscript=%d, byte_offset=%d\n",
00902 subscript, ecnt[subscript].byte_offset);
00903 rt_bomb("rt_nmg_import_fastf() bad magic\n");
00904 }
00905
00906 if( pt_type )
00907 len *= RT_NURB_EXTRACT_COORDS(pt_type);
00908
00909 count = bu_glong( cp + 4 );
00910 if( count != len ) {
00911 bu_log("rt_nmg_import_fastf() subscript=%d, expected len=%d, got=%d\n",
00912 subscript, len, count );
00913 rt_bomb("rt_nmg_import_fastf()\n");
00914 }
00915 ret = (fastf_t *)bu_malloc( count * sizeof(fastf_t), "rt_nmg_import_fastf[]" );
00916 if( !mat ) {
00917 ntohd( (unsigned char *)ret, cp + (4+4), count );
00918 return ret;
00919 }
00920
00921
00922
00923
00924
00925
00926 tmp = (fastf_t *)bu_malloc( count * sizeof(fastf_t), "rt_nmg_import_fastf tmp[]" );
00927 ntohd( (unsigned char *)tmp, cp + (4+4), count );
00928 switch( RT_NURB_EXTRACT_COORDS(pt_type) ) {
00929 case 3:
00930 if( RT_NURB_IS_PT_RATIONAL(pt_type) ) rt_bomb("rt_nmg_import_fastf() Rational 3-tuple?\n");
00931 for( count -= 3 ; count >= 0; count -= 3 ) {
00932 MAT4X3PNT( &ret[count], mat, &tmp[count] );
00933 }
00934 break;
00935 case 4:
00936 if( !RT_NURB_IS_PT_RATIONAL(pt_type) ) rt_bomb("rt_nmg_import_fastf() non-rational 4-tuple?\n");
00937 for( count -= 4 ; count >= 0; count -= 4 ) {
00938 MAT4X4PNT( &ret[count], mat, &tmp[count] );
00939 }
00940 break;
00941 default:
00942 rt_bomb("rt_nmg_import_fastf() unsupported # of coords in ctl_point\n");
00943 }
00944 bu_free( (char *)tmp, "rt_nmg_import_fastf tmp[]" );
00945 return ret;
00946 }
00947
00948
00949
00950
00951
00952
00953
00954
00955
00956
00957
00958 int
00959 rt_nmg_reindex(genptr_t p, struct nmg_exp_counts *ecnt)
00960 {
00961 int index;
00962 int ret=0;
00963
00964
00965 if( p == 0 ) {
00966 ret = 0;
00967 index = 0;
00968 } else {
00969 index = nmg_index_of_struct((long *)(p));
00970 if( index == -1 ) {
00971 ret = DISK_INDEX_LISTHEAD;
00972 } else if( index < -1 ) {
00973 rt_bomb("rt_nmg_reindex(): unable to obtain struct index\n");
00974 } else {
00975 ret = ecnt[index].new_subscript;
00976 if( ecnt[index].kind < 0 ) {
00977 bu_log("rt_nmg_reindex(p=x%x), p->index=%d, ret=%d, kind=%d\n", p, index, ret, ecnt[index].kind);
00978 rt_bomb("rt_nmg_reindex() This index not found in ecnt[]\n");
00979 }
00980
00981 if( ret < 0 || ret > ecnt[0].byte_offset ) {
00982 bu_log("rt_nmg_reindex(p=x%x) %s, p->index=%d, ret=%d, maxindex=%d\n",
00983 p,
00984 bu_identify_magic(*(long *)p),
00985 index, ret, ecnt[0].byte_offset);
00986 rt_bomb("rt_nmg_reindex() subscript out of range\n");
00987 }
00988 }
00989 }
00990
00991 return( ret );
00992 }
00993
00994
00995 #define INDEX(o,i,elem) \
00996 (void)bu_plong(&(o)->elem[0], rt_nmg_reindex((genptr_t)((i)->elem), ecnt))
00997 #define INDEXL(oo,ii,elem) { \
00998 register long _f = rt_nmg_reindex((genptr_t)((ii)->elem.forw), ecnt); \
00999 if( _f == DISK_INDEX_NULL ) bu_log("Warning rt_nmg_edisk: reindex forw to null?\n"); \
01000 (void)bu_plong( (oo)->elem.forw, _f ); \
01001 (void)bu_plong( (oo)->elem.back, rt_nmg_reindex((genptr_t)((ii)->elem.back), ecnt) ); }
01002 #define PUTMAGIC(_magic) (void)bu_plong( &d->magic[0], _magic )
01003
01004
01005
01006
01007
01008
01009
01010
01011 void
01012 rt_nmg_edisk(genptr_t op, genptr_t ip, struct nmg_exp_counts *ecnt, int index, double local2mm)
01013
01014
01015
01016
01017
01018 {
01019 int oindex;
01020
01021 oindex = ecnt[index].per_struct_index;
01022 switch(ecnt[index].kind) {
01023 case NMG_KIND_MODEL:
01024 {
01025 struct model *m = (struct model *)ip;
01026 struct disk_model *d;
01027 d = &((struct disk_model *)op)[oindex];
01028 NMG_CK_MODEL(m);
01029 PUTMAGIC( DISK_MODEL_MAGIC );
01030 bu_plong( d->version, 0 );
01031 INDEXL( d, m, r_hd );
01032 }
01033 return;
01034 case NMG_KIND_NMGREGION:
01035 {
01036 struct nmgregion *r = (struct nmgregion *)ip;
01037 struct disk_nmgregion *d;
01038 d = &((struct disk_nmgregion *)op)[oindex];
01039 NMG_CK_REGION(r);
01040 PUTMAGIC( DISK_REGION_MAGIC );
01041 INDEXL( d, r, l );
01042 INDEX( d, r, m_p );
01043 INDEX( d, r, ra_p );
01044 INDEXL( d, r, s_hd );
01045 }
01046 return;
01047 case NMG_KIND_NMGREGION_A:
01048 {
01049 struct nmgregion_a *r = (struct nmgregion_a *)ip;
01050 struct disk_nmgregion_a *d;
01051 point_t min, max;
01052 d = &((struct disk_nmgregion_a *)op)[oindex];
01053 NMG_CK_REGION_A(r);
01054 PUTMAGIC( DISK_REGION_A_MAGIC );
01055 VSCALE( min, r->min_pt, local2mm );
01056 VSCALE( max, r->max_pt, local2mm );
01057 htond( d->min_pt, (unsigned char *)min, 3 );
01058 htond( d->max_pt, (unsigned char *)max, 3 );
01059 }
01060 return;
01061 case NMG_KIND_SHELL:
01062 {
01063 struct shell *s = (struct shell *)ip;
01064 struct disk_shell *d;
01065 d = &((struct disk_shell *)op)[oindex];
01066 NMG_CK_SHELL(s);
01067 PUTMAGIC( DISK_SHELL_MAGIC );
01068 INDEXL( d, s, l );
01069 INDEX( d, s, r_p );
01070 INDEX( d, s, sa_p );
01071 INDEXL( d, s, fu_hd );
01072 INDEXL( d, s, lu_hd );
01073 INDEXL( d, s, eu_hd );
01074 INDEX( d, s, vu_p );
01075 }
01076 return;
01077 case NMG_KIND_SHELL_A:
01078 {
01079 struct shell_a *sa = (struct shell_a *)ip;
01080 struct disk_shell_a *d;
01081 point_t min, max;
01082 d = &((struct disk_shell_a *)op)[oindex];
01083 NMG_CK_SHELL_A(sa);
01084 PUTMAGIC( DISK_SHELL_A_MAGIC );
01085 VSCALE( min, sa->min_pt, local2mm );
01086 VSCALE( max, sa->max_pt, local2mm );
01087 htond( d->min_pt, (unsigned char *)min, 3 );
01088 htond( d->max_pt, (unsigned char *)max, 3 );
01089 }
01090 return;
01091 case NMG_KIND_FACEUSE:
01092 {
01093 struct faceuse *fu = (struct faceuse *)ip;
01094 struct disk_faceuse *d;
01095 d = &((struct disk_faceuse *)op)[oindex];
01096 NMG_CK_FACEUSE(fu);
01097 NMG_CK_FACEUSE(fu->fumate_p);
01098 NMG_CK_FACE(fu->f_p);
01099 if( fu->f_p != fu->fumate_p->f_p ) bu_log("faceuse export, differing faces\n");
01100 PUTMAGIC( DISK_FACEUSE_MAGIC );
01101 INDEXL( d, fu, l );
01102 INDEX( d, fu, s_p );
01103 INDEX( d, fu, fumate_p );
01104 bu_plong( d->orientation, fu->orientation );
01105 INDEX( d, fu, f_p );
01106 INDEXL( d, fu, lu_hd );
01107 }
01108 return;
01109 case NMG_KIND_FACE:
01110 {
01111 struct face *f = (struct face *)ip;
01112 struct disk_face *d;
01113 d = &((struct disk_face *)op)[oindex];
01114 NMG_CK_FACE(f);
01115 PUTMAGIC( DISK_FACE_MAGIC );
01116 INDEXL( d, f, l );
01117 INDEX( d, f, fu_p );
01118 bu_plong( d->g, rt_nmg_reindex((genptr_t)(f->g.magic_p), ecnt) );
01119 bu_plong( d->flip, f->flip );
01120 }
01121 return;
01122 case NMG_KIND_FACE_G_PLANE:
01123 {
01124 struct face_g_plane *fg = (struct face_g_plane *)ip;
01125 struct disk_face_g_plane *d;
01126 plane_t plane;
01127 d = &((struct disk_face_g_plane *)op)[oindex];
01128 NMG_CK_FACE_G_PLANE(fg);
01129 PUTMAGIC( DISK_FACE_G_PLANE_MAGIC );
01130 INDEXL( d, fg, f_hd );
01131 VMOVE( plane, fg->N );
01132 plane[3] = fg->N[3] * local2mm;
01133 htond( d->N, (unsigned char *)plane, 4 );
01134 }
01135 return;
01136 case NMG_KIND_FACE_G_SNURB:
01137 {
01138 struct face_g_snurb *fg = (struct face_g_snurb *)ip;
01139 struct disk_face_g_snurb *d;
01140
01141 d = &((struct disk_face_g_snurb *)op)[oindex];
01142 NMG_CK_FACE_G_SNURB(fg);
01143 PUTMAGIC( DISK_FACE_G_SNURB_MAGIC );
01144 INDEXL( d, fg, f_hd );
01145 bu_plong( d->u_order, fg->order[0] );
01146 bu_plong( d->v_order, fg->order[1] );
01147 bu_plong( d->u_size, fg->u.k_size );
01148 bu_plong( d->v_size, fg->v.k_size );
01149 bu_plong( d->u_knots,
01150 rt_nmg_export_fastf( fg->u.knots,
01151 fg->u.k_size, 0, 1.0 ) );
01152 bu_plong( d->v_knots,
01153 rt_nmg_export_fastf( fg->v.knots,
01154 fg->v.k_size, 0, 1.0 ) );
01155 bu_plong( d->us_size, fg->s_size[0] );
01156 bu_plong( d->vs_size, fg->s_size[1] );
01157 bu_plong( d->pt_type, fg->pt_type );
01158
01159 bu_plong( d->ctl_points,
01160 rt_nmg_export_fastf( fg->ctl_points,
01161 fg->s_size[0] * fg->s_size[1],
01162 fg->pt_type,
01163 local2mm ) );
01164 }
01165 return;
01166 case NMG_KIND_LOOPUSE:
01167 {
01168 struct loopuse *lu = (struct loopuse *)ip;
01169 struct disk_loopuse *d;
01170 d = &((struct disk_loopuse *)op)[oindex];
01171 NMG_CK_LOOPUSE(lu);
01172 PUTMAGIC( DISK_LOOPUSE_MAGIC );
01173 INDEXL( d, lu, l );
01174 bu_plong( d->up, rt_nmg_reindex((genptr_t)(lu->up.magic_p), ecnt) );
01175 INDEX( d, lu, lumate_p );
01176 bu_plong( d->orientation, lu->orientation );
01177 INDEX( d, lu, l_p );
01178 INDEXL( d, lu, down_hd );
01179 }
01180 return;
01181 case NMG_KIND_LOOP:
01182 {
01183 struct loop *loop = (struct loop *)ip;
01184 struct disk_loop *d;
01185 d = &((struct disk_loop *)op)[oindex];
01186 NMG_CK_LOOP(loop);
01187 PUTMAGIC( DISK_LOOP_MAGIC );
01188 INDEX( d, loop, lu_p );
01189 INDEX( d, loop, lg_p );
01190 }
01191 return;
01192 case NMG_KIND_LOOP_G:
01193 {
01194 struct loop_g *lg = (struct loop_g *)ip;
01195 struct disk_loop_g *d;
01196 point_t min, max;
01197 d = &((struct disk_loop_g *)op)[oindex];
01198 NMG_CK_LOOP_G(lg);
01199 PUTMAGIC( DISK_LOOP_G_MAGIC );
01200 VSCALE( min, lg->min_pt, local2mm );
01201 VSCALE( max, lg->max_pt, local2mm );
01202 htond( d->min_pt, (unsigned char *)min, 3 );
01203 htond( d->max_pt, (unsigned char *)max, 3 );
01204 }
01205 return;
01206 case NMG_KIND_EDGEUSE:
01207 {
01208 struct edgeuse *eu = (struct edgeuse *)ip;
01209 struct disk_edgeuse *d;
01210 d = &((struct disk_edgeuse *)op)[oindex];
01211 NMG_CK_EDGEUSE(eu);
01212 PUTMAGIC( DISK_EDGEUSE_MAGIC );
01213 INDEXL( d, eu, l );
01214
01215
01216
01217
01218 INDEXL( d, eu, l2 );
01219 bu_plong( d->up, rt_nmg_reindex((genptr_t)(eu->up.magic_p), ecnt) );
01220 INDEX( d, eu, eumate_p );
01221 INDEX( d, eu, radial_p );
01222 INDEX( d, eu, e_p );
01223 bu_plong( d->orientation, eu->orientation);
01224 INDEX( d, eu, vu_p );
01225 bu_plong( d->g, rt_nmg_reindex((genptr_t)(eu->g.magic_p), ecnt) );
01226 }
01227 return;
01228 case NMG_KIND_EDGE:
01229 {
01230 struct edge *e = (struct edge *)ip;
01231 struct disk_edge *d;
01232 d = &((struct disk_edge *)op)[oindex];
01233 NMG_CK_EDGE(e);
01234 PUTMAGIC( DISK_EDGE_MAGIC );
01235 bu_plong( d->is_real, e->is_real );
01236 INDEX( d, e, eu_p );
01237 }
01238 return;
01239 case NMG_KIND_EDGE_G_LSEG:
01240 {
01241 struct edge_g_lseg *eg = (struct edge_g_lseg *)ip;
01242 struct disk_edge_g_lseg *d;
01243 point_t pt;
01244 d = &((struct disk_edge_g_lseg *)op)[oindex];
01245 NMG_CK_EDGE_G_LSEG(eg);
01246 PUTMAGIC( DISK_EDGE_G_LSEG_MAGIC );
01247 INDEXL( d, eg, eu_hd2 );
01248 VSCALE( pt, eg->e_pt, local2mm );
01249 htond( d->e_pt, (unsigned char *)pt, 3);
01250 htond( d->e_dir, (unsigned char *)eg->e_dir, 3);
01251 }
01252 return;
01253 case NMG_KIND_EDGE_G_CNURB:
01254 {
01255 struct edge_g_cnurb *eg = (struct edge_g_cnurb *)ip;
01256 struct disk_edge_g_cnurb *d;
01257 d = &((struct disk_edge_g_cnurb *)op)[oindex];
01258 NMG_CK_EDGE_G_CNURB(eg);
01259 PUTMAGIC( DISK_EDGE_G_CNURB_MAGIC );
01260 INDEXL( d, eg, eu_hd2 );
01261 bu_plong( d->order, eg->order );
01262
01263
01264 if( eg->order == 0 ) return;
01265
01266 bu_plong( d->k_size, eg->k.k_size );
01267 bu_plong( d->knots,
01268 rt_nmg_export_fastf( eg->k.knots,
01269 eg->k.k_size, 0, 1.0 ) );
01270 bu_plong( d->c_size, eg->c_size );
01271 bu_plong( d->pt_type, eg->pt_type );
01272
01273
01274
01275
01276
01277 bu_plong( d->ctl_points,
01278 rt_nmg_export_fastf( eg->ctl_points,
01279 eg->c_size,
01280 eg->pt_type,
01281 RT_NURB_EXTRACT_PT_TYPE( eg->pt_type ) == RT_NURB_PT_UV ?
01282 1.0 : local2mm ) );
01283 }
01284 return;
01285 case NMG_KIND_VERTEXUSE:
01286 {
01287 struct vertexuse *vu = (struct vertexuse *)ip;
01288 struct disk_vertexuse *d;
01289 d = &((struct disk_vertexuse *)op)[oindex];
01290 NMG_CK_VERTEXUSE(vu);
01291 PUTMAGIC( DISK_VERTEXUSE_MAGIC );
01292 INDEXL( d, vu, l );
01293 bu_plong( d->up,
01294 rt_nmg_reindex((genptr_t)(vu->up.magic_p), ecnt) );
01295 INDEX( d, vu, v_p );
01296 if(vu->a.magic_p)NMG_CK_VERTEXUSE_A_EITHER(vu->a.magic_p);
01297 bu_plong( d->a,
01298 rt_nmg_reindex((genptr_t)(vu->a.magic_p), ecnt) );
01299 }
01300 return;
01301 case NMG_KIND_VERTEXUSE_A_PLANE:
01302 {
01303 struct vertexuse_a_plane *vua = (struct vertexuse_a_plane *)ip;
01304 struct disk_vertexuse_a_plane *d;
01305 d = &((struct disk_vertexuse_a_plane *)op)[oindex];
01306 NMG_CK_VERTEXUSE_A_PLANE(vua);
01307 PUTMAGIC( DISK_VERTEXUSE_A_PLANE_MAGIC );
01308
01309
01310 htond( d->N, (unsigned char *)vua->N, 3 );
01311 }
01312 return;
01313 case NMG_KIND_VERTEXUSE_A_CNURB:
01314 {
01315 struct vertexuse_a_cnurb *vua = (struct vertexuse_a_cnurb *)ip;
01316 struct disk_vertexuse_a_cnurb *d;
01317
01318 d = &((struct disk_vertexuse_a_cnurb *)op)[oindex];
01319 NMG_CK_VERTEXUSE_A_CNURB(vua);
01320 PUTMAGIC( DISK_VERTEXUSE_A_CNURB_MAGIC );
01321
01322 htond( d->param, (unsigned char *)vua->param, 3 );
01323 }
01324 return;
01325 case NMG_KIND_VERTEX:
01326 {
01327 struct vertex *v = (struct vertex *)ip;
01328 struct disk_vertex *d;
01329 d = &((struct disk_vertex *)op)[oindex];
01330 NMG_CK_VERTEX(v);
01331 PUTMAGIC( DISK_VERTEX_MAGIC );
01332 INDEXL( d, v, vu_hd );
01333 INDEX( d, v, vg_p );
01334 }
01335 return;
01336 case NMG_KIND_VERTEX_G:
01337 {
01338 struct vertex_g *vg = (struct vertex_g *)ip;
01339 struct disk_vertex_g *d;
01340 point_t pt;
01341 d = &((struct disk_vertex_g *)op)[oindex];
01342 NMG_CK_VERTEX_G(vg);
01343 PUTMAGIC( DISK_VERTEX_G_MAGIC );
01344 VSCALE( pt, vg->coord, local2mm );
01345 htond( d->coord, (unsigned char *)pt, 3 );
01346 }
01347 return;
01348 }
01349 bu_log("rt_nmg_edisk kind=%d unknown\n", ecnt[index].kind);
01350 }
01351 #undef INDEX
01352 #undef INDEXL
01353
01354
01355
01356
01357
01358
01359
01360 #define INDEX(o,i,ty,elem) (i)->elem = (struct ty *)ptrs[bu_glong((o)->elem)]
01361 #define INDEXL_HD(oo,ii,elem,hd) { \
01362 register int sub; \
01363 if( (sub = bu_glong((oo)->elem.forw)) < 0 ) \
01364 (ii)->elem.forw = &(hd); \
01365 else (ii)->elem.forw = (struct bu_list *)ptrs[sub]; \
01366 if( (sub = bu_glong((oo)->elem.back)) < 0 ) \
01367 (ii)->elem.back = &(hd); \
01368 else (ii)->elem.back = (struct bu_list *)ptrs[sub]; }
01369
01370
01371
01372 #define INDEXL_HD2(oo,ii,elem,hd) { \
01373 register int sub; \
01374 register struct edgeuse *eu2; \
01375 if( (sub = bu_glong((oo)->elem.forw)) < 0 ) { \
01376 (ii)->elem.forw = &(hd); \
01377 } else { \
01378 eu2 = (struct edgeuse *)ptrs[sub]; \
01379 NMG_CK_EDGEUSE(eu2); \
01380 (ii)->elem.forw = &eu2->l2; \
01381 } \
01382 if( (sub = bu_glong((oo)->elem.back)) < 0 ) { \
01383 (ii)->elem.back = &(hd); \
01384 } else { \
01385 eu2 = (struct edgeuse *)ptrs[sub]; \
01386 NMG_CK_EDGEUSE(eu2); \
01387 (ii)->elem.back = &eu2->l2; \
01388 } }
01389
01390
01391
01392
01393
01394
01395
01396
01397
01398 int
01399 rt_nmg_idisk(genptr_t op, genptr_t ip, struct nmg_exp_counts *ecnt, int index, long int **ptrs, const fastf_t *mat, const unsigned char *basep)
01400
01401
01402
01403
01404
01405
01406
01407 {
01408 int iindex;
01409
01410 iindex = 0;
01411 switch(ecnt[index].kind) {
01412 case NMG_KIND_MODEL:
01413 {
01414 struct model *m = (struct model *)op;
01415 struct disk_model *d;
01416 d = &((struct disk_model *)ip)[iindex];
01417 NMG_CK_MODEL(m);
01418 RT_CK_DISKMAGIC( d->magic, DISK_MODEL_MAGIC );
01419 INDEXL_HD( d, m, r_hd, m->r_hd );
01420 }
01421 return 0;
01422 case NMG_KIND_NMGREGION:
01423 {
01424 struct nmgregion *r = (struct nmgregion *)op;
01425 struct disk_nmgregion *d;
01426 d = &((struct disk_nmgregion *)ip)[iindex];
01427 NMG_CK_REGION(r);
01428 RT_CK_DISKMAGIC( d->magic, DISK_REGION_MAGIC );
01429 INDEX( d, r, model, m_p );
01430 INDEX( d, r, nmgregion_a, ra_p );
01431 INDEXL_HD( d, r, s_hd, r->s_hd );
01432 INDEXL_HD( d, r, l, r->m_p->r_hd );
01433 NMG_CK_MODEL(r->m_p);
01434 }
01435 return 0;
01436 case NMG_KIND_NMGREGION_A:
01437 {
01438 struct nmgregion_a *r = (struct nmgregion_a *)op;
01439 struct disk_nmgregion_a *d;
01440 point_t min, max;
01441 d = &((struct disk_nmgregion_a *)ip)[iindex];
01442 NMG_CK_REGION_A(r);
01443 RT_CK_DISKMAGIC( d->magic, DISK_REGION_A_MAGIC );
01444 ntohd( (unsigned char *)min, d->min_pt, 3 );
01445 ntohd( (unsigned char *)max, d->max_pt, 3 );
01446 bn_rotate_bbox( r->min_pt, r->max_pt, mat, min, max );
01447 }
01448 return 0;
01449 case NMG_KIND_SHELL:
01450 {
01451 struct shell *s = (struct shell *)op;
01452 struct disk_shell *d;
01453 d = &((struct disk_shell *)ip)[iindex];
01454 NMG_CK_SHELL(s);
01455 RT_CK_DISKMAGIC( d->magic, DISK_SHELL_MAGIC );
01456 INDEX( d, s, nmgregion, r_p );
01457 INDEX( d, s, shell_a, sa_p );
01458 INDEXL_HD( d, s, fu_hd, s->fu_hd );
01459 INDEXL_HD( d, s, lu_hd, s->lu_hd );
01460 INDEXL_HD( d, s, eu_hd, s->eu_hd );
01461 INDEX( d, s, vertexuse, vu_p );
01462 NMG_CK_REGION(s->r_p);
01463 INDEXL_HD( d, s, l, s->r_p->s_hd );
01464 }
01465 return 0;
01466 case NMG_KIND_SHELL_A:
01467 {
01468 struct shell_a *sa = (struct shell_a *)op;
01469 struct disk_shell_a *d;
01470 point_t min, max;
01471 d = &((struct disk_shell_a *)ip)[iindex];
01472 NMG_CK_SHELL_A(sa);
01473 RT_CK_DISKMAGIC( d->magic, DISK_SHELL_A_MAGIC );
01474 ntohd( (unsigned char *)min, d->min_pt, 3 );
01475 ntohd( (unsigned char *)max, d->max_pt, 3 );
01476 bn_rotate_bbox( sa->min_pt, sa->max_pt, mat, min, max );
01477 }
01478 return 0;
01479 case NMG_KIND_FACEUSE:
01480 {
01481 struct faceuse *fu = (struct faceuse *)op;
01482 struct disk_faceuse *d;
01483 d = &((struct disk_faceuse *)ip)[iindex];
01484 NMG_CK_FACEUSE(fu);
01485 RT_CK_DISKMAGIC( d->magic, DISK_FACEUSE_MAGIC );
01486 INDEX( d, fu, shell, s_p );
01487 INDEX( d, fu, faceuse, fumate_p );
01488 fu->orientation = bu_glong( d->orientation );
01489 INDEX( d, fu, face, f_p );
01490 INDEXL_HD( d, fu, lu_hd, fu->lu_hd );
01491 INDEXL_HD( d, fu, l, fu->s_p->fu_hd );
01492 NMG_CK_FACE(fu->f_p);
01493 NMG_CK_FACEUSE(fu->fumate_p);
01494 }
01495 return 0;
01496 case NMG_KIND_FACE:
01497 {
01498 struct face *f = (struct face *)op;
01499 struct disk_face *d;
01500 int g_index;
01501
01502 d = &((struct disk_face *)ip)[iindex];
01503 NMG_CK_FACE(f);
01504 RT_CK_DISKMAGIC( d->magic, DISK_FACE_MAGIC );
01505 INDEX( d, f, faceuse, fu_p );
01506 g_index = bu_glong(d->g);
01507 f->g.magic_p = (long *)ptrs[g_index];
01508 f->flip = bu_glong( d->flip );
01509
01510 NMG_CK_FACE_G_EITHER(f->g.magic_p);
01511 INDEXL_HD( d, f, l, f->g.plane_p->f_hd );
01512 NMG_CK_FACEUSE(f->fu_p);
01513 }
01514 return 0;
01515 case NMG_KIND_FACE_G_PLANE:
01516 {
01517 struct face_g_plane *fg = (struct face_g_plane *)op;
01518 struct disk_face_g_plane *d;
01519 plane_t plane;
01520 d = &((struct disk_face_g_plane *)ip)[iindex];
01521 NMG_CK_FACE_G_PLANE(fg);
01522 RT_CK_DISKMAGIC( d->magic, DISK_FACE_G_PLANE_MAGIC );
01523 INDEXL_HD( d, fg, f_hd, fg->f_hd );
01524 ntohd( (unsigned char *)plane, d->N, 4 );
01525 bn_rotate_plane( fg->N, mat, plane );
01526 }
01527 return 0;
01528 case NMG_KIND_FACE_G_SNURB:
01529 {
01530 struct face_g_snurb *fg = (struct face_g_snurb *)op;
01531 struct disk_face_g_snurb *d;
01532 d = &((struct disk_face_g_snurb *)ip)[iindex];
01533 NMG_CK_FACE_G_SNURB(fg);
01534 RT_CK_DISKMAGIC( d->magic, DISK_FACE_G_SNURB_MAGIC );
01535 INDEXL_HD( d, fg, f_hd, fg->f_hd );
01536 fg->order[0] = bu_glong( d->u_order );
01537 fg->order[1] = bu_glong( d->v_order );
01538 fg->u.k_size = bu_glong( d->u_size );
01539 fg->u.knots = rt_nmg_import_fastf( basep, ecnt,
01540 bu_glong( d->u_knots ), (matp_t)NULL,
01541 fg->u.k_size, 0 );
01542 fg->v.k_size = bu_glong( d->v_size );
01543 fg->v.knots = rt_nmg_import_fastf( basep, ecnt,
01544 bu_glong( d->v_knots ), (matp_t)NULL,
01545 fg->v.k_size, 0 );
01546 fg->s_size[0] = bu_glong( d->us_size );
01547 fg->s_size[1] = bu_glong( d->vs_size );
01548 fg->pt_type = bu_glong( d->pt_type );
01549
01550 fg->ctl_points = rt_nmg_import_fastf( basep, ecnt,
01551 bu_glong( d->ctl_points ), (matp_t)mat,
01552 fg->s_size[0] * fg->s_size[1],
01553 fg->pt_type );
01554 }
01555 return 0;
01556 case NMG_KIND_LOOPUSE:
01557 {
01558 struct loopuse *lu = (struct loopuse *)op;
01559 struct disk_loopuse *d;
01560 int up_index;
01561 int up_kind;
01562
01563 d = &((struct disk_loopuse *)ip)[iindex];
01564 NMG_CK_LOOPUSE(lu);
01565 RT_CK_DISKMAGIC( d->magic, DISK_LOOPUSE_MAGIC );
01566 up_index = bu_glong(d->up);
01567 lu->up.magic_p = (long *)ptrs[up_index];
01568 INDEX( d, lu, loopuse, lumate_p );
01569 lu->orientation = bu_glong( d->orientation );
01570 INDEX( d, lu, loop, l_p );
01571 up_kind = ecnt[up_index].kind;
01572 if( up_kind == NMG_KIND_FACEUSE ) {
01573 INDEXL_HD( d, lu, l, lu->up.fu_p->lu_hd );
01574 } else if( up_kind == NMG_KIND_SHELL ) {
01575 INDEXL_HD( d, lu, l, lu->up.s_p->lu_hd );
01576 } else bu_log("bad loopuse up, index=%d, kind=%d\n", up_index, up_kind);
01577 INDEXL_HD( d, lu, down_hd, lu->down_hd );
01578 if( lu->down_hd.forw == BU_LIST_NULL )
01579 rt_bomb("rt_nmg_idisk: null loopuse down_hd.forw\n");
01580 NMG_CK_LOOP(lu->l_p);
01581 }
01582 return 0;
01583 case NMG_KIND_LOOP:
01584 {
01585 struct loop *loop = (struct loop *)op;
01586 struct disk_loop *d;
01587 d = &((struct disk_loop *)ip)[iindex];
01588 NMG_CK_LOOP(loop);
01589 RT_CK_DISKMAGIC( d->magic, DISK_LOOP_MAGIC );
01590 INDEX( d, loop, loopuse, lu_p );
01591 INDEX( d, loop, loop_g, lg_p );
01592 NMG_CK_LOOPUSE(loop->lu_p);
01593 }
01594 return 0;
01595 case NMG_KIND_LOOP_G:
01596 {
01597 struct loop_g *lg = (struct loop_g *)op;
01598 struct disk_loop_g *d;
01599 point_t min, max;
01600 d = &((struct disk_loop_g *)ip)[iindex];
01601 NMG_CK_LOOP_G(lg);
01602 RT_CK_DISKMAGIC( d->magic, DISK_LOOP_G_MAGIC );
01603 ntohd( (unsigned char *)min, d->min_pt, 3 );
01604 ntohd( (unsigned char *)max, d->max_pt, 3 );
01605 bn_rotate_bbox( lg->min_pt, lg->max_pt, mat, min, max );
01606 }
01607 return 0;
01608 case NMG_KIND_EDGEUSE:
01609 {
01610 struct edgeuse *eu = (struct edgeuse *)op;
01611 struct disk_edgeuse *d;
01612 int up_index;
01613 int up_kind;
01614
01615 d = &((struct disk_edgeuse *)ip)[iindex];
01616 NMG_CK_EDGEUSE(eu);
01617 RT_CK_DISKMAGIC( d->magic, DISK_EDGEUSE_MAGIC );
01618 up_index = bu_glong(d->up);
01619 eu->up.magic_p = (long *)ptrs[up_index];
01620 INDEX( d, eu, edgeuse, eumate_p );
01621 INDEX( d, eu, edgeuse, radial_p );
01622 INDEX( d, eu, edge, e_p );
01623 eu->orientation = bu_glong( d->orientation );
01624 INDEX( d, eu, vertexuse, vu_p );
01625 up_kind = ecnt[up_index].kind;
01626 if( up_kind == NMG_KIND_LOOPUSE ) {
01627 INDEXL_HD( d, eu, l, eu->up.lu_p->down_hd );
01628 } else if( up_kind == NMG_KIND_SHELL ) {
01629 INDEXL_HD( d, eu, l, eu->up.s_p->eu_hd );
01630 } else bu_log("bad edgeuse up, index=%d, kind=%d\n", up_index, up_kind);
01631 eu->g.magic_p = (long *)ptrs[bu_glong(d->g)];
01632 NMG_CK_EDGE(eu->e_p);
01633 NMG_CK_EDGEUSE(eu->eumate_p);
01634 NMG_CK_EDGEUSE(eu->radial_p);
01635 NMG_CK_VERTEXUSE(eu->vu_p);
01636 if( eu->g.magic_p != NULL )
01637 {
01638 NMG_CK_EDGE_G_EITHER(eu->g.magic_p);
01639
01640
01641
01642 INDEXL_HD2( d, eu, l2, eu->g.lseg_p->eu_hd2 );
01643 }
01644 else
01645 {
01646 eu->l2.forw = &eu->l2;
01647 eu->l2.back = &eu->l2;
01648 }
01649 }
01650 return 0;
01651 case NMG_KIND_EDGE:
01652 {
01653 struct edge *e = (struct edge *)op;
01654 struct disk_edge *d;
01655 d = &((struct disk_edge *)ip)[iindex];
01656 NMG_CK_EDGE(e);
01657 RT_CK_DISKMAGIC( d->magic, DISK_EDGE_MAGIC );
01658 e->is_real = bu_glong( d->is_real );
01659 INDEX( d, e, edgeuse, eu_p );
01660 NMG_CK_EDGEUSE(e->eu_p);
01661 }
01662 return 0;
01663 case NMG_KIND_EDGE_G_LSEG:
01664 {
01665 struct edge_g_lseg *eg = (struct edge_g_lseg *)op;
01666 struct disk_edge_g_lseg *d;
01667 point_t pt;
01668 vect_t dir;
01669
01670 d = &((struct disk_edge_g_lseg *)ip)[iindex];
01671 NMG_CK_EDGE_G_LSEG(eg);
01672 RT_CK_DISKMAGIC( d->magic, DISK_EDGE_G_LSEG_MAGIC );
01673
01674 INDEXL_HD2( d, eg, eu_hd2, eg->eu_hd2 );
01675 ntohd((unsigned char *)pt, d->e_pt, 3);
01676 ntohd((unsigned char *)dir, d->e_dir, 3);
01677 MAT4X3PNT( eg->e_pt, mat, pt );
01678 MAT4X3VEC( eg->e_dir, mat, dir );
01679 }
01680 return 0;
01681 case NMG_KIND_EDGE_G_CNURB:
01682 {
01683 struct edge_g_cnurb *eg = (struct edge_g_cnurb *)op;
01684 struct disk_edge_g_cnurb *d;
01685 d = &((struct disk_edge_g_cnurb *)ip)[iindex];
01686 NMG_CK_EDGE_G_CNURB(eg);
01687 RT_CK_DISKMAGIC( d->magic, DISK_EDGE_G_CNURB_MAGIC );
01688 INDEXL_HD2( d, eg, eu_hd2, eg->eu_hd2 );
01689 eg->order = bu_glong( d->order );
01690
01691
01692 if( eg->order == 0 ) return 0;
01693
01694 eg->k.k_size = bu_glong( d->k_size );
01695 eg->k.knots = rt_nmg_import_fastf( basep, ecnt,
01696 bu_glong( d->knots ), (matp_t)NULL,
01697 eg->k.k_size, 0 );
01698 eg->c_size = bu_glong( d->c_size );
01699 eg->pt_type = bu_glong( d->pt_type );
01700
01701
01702
01703
01704 if( RT_NURB_EXTRACT_PT_TYPE(eg->pt_type) == RT_NURB_PT_UV ) {
01705
01706 eg->ctl_points = rt_nmg_import_fastf( basep,
01707 ecnt,
01708 bu_glong( d->ctl_points ), (matp_t)NULL,
01709 eg->c_size, eg->pt_type );
01710 } else {
01711
01712 eg->ctl_points = rt_nmg_import_fastf( basep,
01713 ecnt,
01714 bu_glong( d->ctl_points ), (matp_t)mat,
01715 eg->c_size, eg->pt_type );
01716 }
01717 }
01718 return 0;
01719 case NMG_KIND_VERTEXUSE:
01720 {
01721 struct vertexuse *vu = (struct vertexuse *)op;
01722 struct disk_vertexuse *d;
01723 d = &((struct disk_vertexuse *)ip)[iindex];
01724 NMG_CK_VERTEXUSE(vu);
01725 RT_CK_DISKMAGIC( d->magic, DISK_VERTEXUSE_MAGIC );
01726 vu->up.magic_p = (long *)ptrs[bu_glong(d->up)];
01727 INDEX( d, vu, vertex, v_p );
01728 vu->a.magic_p = (long *)ptrs[bu_glong(d->a)];
01729 NMG_CK_VERTEX(vu->v_p);
01730 if(vu->a.magic_p)NMG_CK_VERTEXUSE_A_EITHER(vu->a.magic_p);
01731 INDEXL_HD( d, vu, l, vu->v_p->vu_hd );
01732 }
01733 return 0;
01734 case NMG_KIND_VERTEXUSE_A_PLANE:
01735 {
01736 struct vertexuse_a_plane *vua = (struct vertexuse_a_plane *)op;
01737 struct disk_vertexuse_a_plane *d;
01738 vect_t norm;
01739 d = &((struct disk_vertexuse_a_plane *)ip)[iindex];
01740 NMG_CK_VERTEXUSE_A_PLANE(vua);
01741 RT_CK_DISKMAGIC( d->magic, DISK_VERTEXUSE_A_PLANE_MAGIC );
01742 ntohd( (unsigned char *)norm, d->N, 3 );
01743 MAT4X3VEC( vua->N, mat, norm );
01744 }
01745 return 0;
01746 case NMG_KIND_VERTEXUSE_A_CNURB:
01747 {
01748 struct vertexuse_a_cnurb *vua = (struct vertexuse_a_cnurb *)op;
01749 struct disk_vertexuse_a_cnurb *d;
01750 d = &((struct disk_vertexuse_a_cnurb *)ip)[iindex];
01751 NMG_CK_VERTEXUSE_A_CNURB(vua);
01752 RT_CK_DISKMAGIC( d->magic, DISK_VERTEXUSE_A_CNURB_MAGIC );
01753
01754 ntohd( (unsigned char *)vua->param, d->param, 3 );
01755 }
01756 return 0;
01757 case NMG_KIND_VERTEX:
01758 {
01759 struct vertex *v = (struct vertex *)op;
01760 struct disk_vertex *d;
01761 d = &((struct disk_vertex *)ip)[iindex];
01762 NMG_CK_VERTEX(v);
01763 RT_CK_DISKMAGIC( d->magic, DISK_VERTEX_MAGIC );
01764 INDEXL_HD( d, v, vu_hd, v->vu_hd );
01765 INDEX( d, v, vertex_g, vg_p );
01766 }
01767 return 0;
01768 case NMG_KIND_VERTEX_G:
01769 {
01770 struct vertex_g *vg = (struct vertex_g *)op;
01771 struct disk_vertex_g *d;
01772 point_t pt;
01773 d = &((struct disk_vertex_g *)ip)[iindex];
01774 NMG_CK_VERTEX_G(vg);
01775 RT_CK_DISKMAGIC( d->magic, DISK_VERTEX_G_MAGIC );
01776 ntohd( (unsigned char *)pt, d->coord, 3 );
01777 MAT4X3PNT( vg->coord, mat, pt );
01778 }
01779 return 0;
01780 }
01781 bu_log("rt_nmg_idisk kind=%d unknown\n", ecnt[index].kind);
01782 return -1;
01783 }
01784
01785
01786
01787
01788
01789
01790
01791
01792
01793 struct model *
01794 rt_nmg_ialloc(long int **ptrs, struct nmg_exp_counts *ecnt, int *kind_counts)
01795 {
01796 struct model *m = (struct model *)0;
01797 int subscript;
01798 int kind;
01799 int j;
01800
01801 subscript = 1;
01802 for( kind = 0; kind < NMG_N_KINDS; kind++ ) {
01803 if( kind == NMG_KIND_DOUBLE_ARRAY ) continue;
01804 for( j = 0; j < kind_counts[kind]; j++ ) {
01805 ecnt[subscript].kind = kind;
01806 ecnt[subscript].per_struct_index = 0;
01807 switch( kind ) {
01808 case NMG_KIND_MODEL:
01809 if( m ) rt_bomb("multiple models?");
01810 m = nmg_mm();
01811
01812 m->maxindex++;
01813 ptrs[subscript] = (long *)m;
01814 break;
01815 case NMG_KIND_NMGREGION:
01816 {
01817 struct nmgregion *r;
01818 GET_REGION( r, m );
01819 r->l.magic = NMG_REGION_MAGIC;
01820 BU_LIST_INIT( &r->s_hd );
01821 ptrs[subscript] = (long *)r;
01822 }
01823 break;
01824 case NMG_KIND_NMGREGION_A:
01825 {
01826 struct nmgregion_a *ra;
01827 GET_REGION_A( ra, m );
01828 ra->magic = NMG_REGION_A_MAGIC;
01829 ptrs[subscript] = (long *)ra;
01830 }
01831 break;
01832 case NMG_KIND_SHELL:
01833 {
01834 struct shell *s;
01835 GET_SHELL( s, m );
01836 s->l.magic = NMG_SHELL_MAGIC;
01837 BU_LIST_INIT( &s->fu_hd );
01838 BU_LIST_INIT( &s->lu_hd );
01839 BU_LIST_INIT( &s->eu_hd );
01840 ptrs[subscript] = (long *)s;
01841 }
01842 break;
01843 case NMG_KIND_SHELL_A:
01844 {
01845 struct shell_a *sa;
01846 GET_SHELL_A( sa, m );
01847 sa->magic = NMG_SHELL_A_MAGIC;
01848 ptrs[subscript] = (long *)sa;
01849 }
01850 break;
01851 case NMG_KIND_FACEUSE:
01852 {
01853 struct faceuse *fu;
01854 GET_FACEUSE( fu, m );
01855 fu->l.magic = NMG_FACEUSE_MAGIC;
01856 BU_LIST_INIT( &fu->lu_hd );
01857 ptrs[subscript] = (long *)fu;
01858 }
01859 break;
01860 case NMG_KIND_FACE:
01861 {
01862 struct face *f;
01863 GET_FACE( f, m );
01864 f->l.magic = NMG_FACE_MAGIC;
01865 ptrs[subscript] = (long *)f;
01866 }
01867 break;
01868 case NMG_KIND_FACE_G_PLANE:
01869 {
01870 struct face_g_plane *fg;
01871 GET_FACE_G_PLANE( fg, m );
01872 fg->magic = NMG_FACE_G_PLANE_MAGIC;
01873 BU_LIST_INIT( &fg->f_hd );
01874 ptrs[subscript] = (long *)fg;
01875 }
01876 break;
01877 case NMG_KIND_FACE_G_SNURB:
01878 {
01879 struct face_g_snurb *fg;
01880 GET_FACE_G_SNURB( fg, m );
01881 fg->l.magic = NMG_FACE_G_SNURB_MAGIC;
01882 BU_LIST_INIT( &fg->f_hd );
01883 ptrs[subscript] = (long *)fg;
01884 }
01885 break;
01886 case NMG_KIND_LOOPUSE:
01887 {
01888 struct loopuse *lu;
01889 GET_LOOPUSE( lu, m );
01890 lu->l.magic = NMG_LOOPUSE_MAGIC;
01891 BU_LIST_INIT( &lu->down_hd );
01892 ptrs[subscript] = (long *)lu;
01893 }
01894 break;
01895 case NMG_KIND_LOOP:
01896 {
01897 struct loop *l;
01898 GET_LOOP( l, m );
01899 l->magic = NMG_LOOP_MAGIC;
01900 ptrs[subscript] = (long *)l;
01901 }
01902 break;
01903 case NMG_KIND_LOOP_G:
01904 {
01905 struct loop_g *lg;
01906 GET_LOOP_G( lg, m );
01907 lg->magic = NMG_LOOP_G_MAGIC;
01908 ptrs[subscript] = (long *)lg;
01909 }
01910 break;
01911 case NMG_KIND_EDGEUSE:
01912 {
01913 struct edgeuse *eu;
01914 GET_EDGEUSE( eu, m );
01915 eu->l.magic = NMG_EDGEUSE_MAGIC;
01916 eu->l2.magic = NMG_EDGEUSE2_MAGIC;
01917 ptrs[subscript] = (long *)eu;
01918 }
01919 break;
01920 case NMG_KIND_EDGE:
01921 {
01922 struct edge *e;
01923 GET_EDGE( e, m );
01924 e->magic = NMG_EDGE_MAGIC;
01925 ptrs[subscript] = (long *)e;
01926 }
01927 break;
01928 case NMG_KIND_EDGE_G_LSEG:
01929 {
01930 struct edge_g_lseg *eg;
01931 GET_EDGE_G_LSEG( eg, m );
01932 eg->l.magic = NMG_EDGE_G_LSEG_MAGIC;
01933 BU_LIST_INIT( &eg->eu_hd2 );
01934 ptrs[subscript] = (long *)eg;
01935 }
01936 break;
01937 case NMG_KIND_EDGE_G_CNURB:
01938 {
01939 struct edge_g_cnurb *eg;
01940 GET_EDGE_G_CNURB( eg, m );
01941 eg->l.magic = NMG_EDGE_G_CNURB_MAGIC;
01942 BU_LIST_INIT( &eg->eu_hd2 );
01943 ptrs[subscript] = (long *)eg;
01944 }
01945 break;
01946 case NMG_KIND_VERTEXUSE:
01947 {
01948 struct vertexuse *vu;
01949 GET_VERTEXUSE( vu, m );
01950 vu->l.magic = NMG_VERTEXUSE_MAGIC;
01951 ptrs[subscript] = (long *)vu;
01952 }
01953 break;
01954 case NMG_KIND_VERTEXUSE_A_PLANE:
01955 {
01956 struct vertexuse_a_plane *vua;
01957 GET_VERTEXUSE_A_PLANE( vua, m );
01958 vua->magic = NMG_VERTEXUSE_A_PLANE_MAGIC;
01959 ptrs[subscript] = (long *)vua;
01960 }
01961 break;
01962 case NMG_KIND_VERTEXUSE_A_CNURB:
01963 {
01964 struct vertexuse_a_cnurb *vua;
01965 GET_VERTEXUSE_A_CNURB( vua, m );
01966 vua->magic = NMG_VERTEXUSE_A_CNURB_MAGIC;
01967 ptrs[subscript] = (long *)vua;
01968 }
01969 break;
01970 case NMG_KIND_VERTEX:
01971 {
01972 struct vertex *v;
01973 GET_VERTEX( v, m );
01974 v->magic = NMG_VERTEX_MAGIC;
01975 BU_LIST_INIT( &v->vu_hd );
01976 ptrs[subscript] = (long *)v;
01977 }
01978 break;
01979 case NMG_KIND_VERTEX_G:
01980 {
01981 struct vertex_g *vg;
01982 GET_VERTEX_G( vg, m );
01983 vg->magic = NMG_VERTEX_G_MAGIC;
01984 ptrs[subscript] = (long *)vg;
01985 }
01986 break;
01987 default:
01988 bu_log("bad kind = %d\n", kind);
01989 ptrs[subscript] = (long *)0;
01990 break;
01991 }
01992
01993
01994 ecnt[subscript].new_subscript = nmg_index_of_struct(ptrs[subscript]);
01995 subscript++;
01996 }
01997 }
01998 return(m);
01999 }
02000
02001
02002
02003
02004
02005
02006
02007
02008
02009
02010
02011
02012
02013 void
02014 rt_nmg_i2alloc(struct nmg_exp_counts *ecnt, unsigned char *cp, int *kind_counts, int maxindex)
02015 {
02016 register int kind;
02017 int nkind;
02018 int subscript;
02019 int offset;
02020 int i;
02021
02022 nkind = kind_counts[NMG_KIND_DOUBLE_ARRAY];
02023 if( nkind <= 0 ) return;
02024
02025
02026 subscript = 1;
02027 offset = 0;
02028 for( kind = 0; kind < NMG_N_KINDS; kind++ ) {
02029 if( kind == NMG_KIND_DOUBLE_ARRAY ) continue;
02030 offset += rt_nmg_disk_sizes[kind] * kind_counts[kind];
02031 subscript += kind_counts[kind];
02032 }
02033
02034
02035 RT_CK_DISKMAGIC( cp + offset, DISK_DOUBLE_ARRAY_MAGIC );
02036 for( i=0; i < nkind; i++ ) {
02037 int ndouble;
02038 RT_CK_DISKMAGIC( cp + offset, DISK_DOUBLE_ARRAY_MAGIC );
02039 ndouble = bu_glong( cp + offset + 4 );
02040 ecnt[subscript].kind = NMG_KIND_DOUBLE_ARRAY;
02041
02042 ecnt[subscript].byte_offset = offset;
02043 offset += (4+4) + 8*ndouble;
02044 subscript++;
02045 }
02046 }
02047
02048
02049
02050
02051
02052
02053
02054
02055
02056
02057
02058
02059 int
02060 rt_nmg_import_internal(struct rt_db_internal *ip, const struct bu_external *ep, register const fastf_t *mat, int rebound, const struct bn_tol *tol)
02061 {
02062 struct model *m;
02063 union record *rp;
02064 int kind_counts[NMG_N_KINDS];
02065 unsigned char *cp;
02066 long **real_ptrs;
02067 long **ptrs;
02068 struct nmg_exp_counts *ecnt;
02069 int i;
02070 int maxindex;
02071 int kind;
02072 static long bad_magic = 0x999;
02073
02074 BU_CK_EXTERNAL( ep );
02075 BN_CK_TOL( tol );
02076 rp = (union record *)ep->ext_buf;
02077
02078 if( rp->u_id != DBID_NMG ) {
02079 bu_log("rt_nmg_import: defective record\n");
02080 return(-1);
02081 }
02082
02083
02084
02085
02086
02087 if( rp->nmg.N_version != DISK_MODEL_VERSION ) {
02088 bu_log("rt_nmg_import: expected NMG '.g' format version %d, got version %d, aborting.\n",
02089 DISK_MODEL_VERSION,
02090 rp->nmg.N_version );
02091 return -1;
02092 }
02093
02094
02095 maxindex = 1;
02096 for( kind = 0; kind < NMG_N_KINDS; kind++ ) {
02097 kind_counts[kind] = bu_glong( rp->nmg.N_structs+4*kind );
02098 maxindex += kind_counts[kind];
02099 }
02100
02101
02102 ecnt = (struct nmg_exp_counts *)bu_calloc( maxindex+3,
02103 sizeof(struct nmg_exp_counts), "ecnt[]" );
02104 real_ptrs = (long **)bu_calloc( maxindex+3,
02105 sizeof(long *), "ptrs[]" );
02106
02107 ptrs = real_ptrs+1;
02108 ptrs[-1] = &bad_magic;
02109 ptrs[0] = (long *)0;
02110 ptrs[maxindex] = &bad_magic;
02111 ptrs[maxindex+1] = &bad_magic;
02112
02113
02114 m = rt_nmg_ialloc( ptrs, ecnt, kind_counts );
02115
02116
02117 cp = (unsigned char *)(rp+1);
02118 rt_nmg_i2alloc( ecnt, cp, kind_counts, maxindex );
02119
02120
02121 for( i=1; i < maxindex; i++ ) {
02122
02123 if( ecnt[i].kind == NMG_KIND_DOUBLE_ARRAY ) break;
02124 if( rt_nmg_idisk( (genptr_t)(ptrs[i]), (genptr_t)cp,
02125 ecnt, i, ptrs, mat, (unsigned char *)(rp+1) ) < 0 )
02126 return -1;
02127 cp += rt_nmg_disk_sizes[ecnt[i].kind];
02128 }
02129
02130 if( rebound ) {
02131
02132 nmg_rebound(m, tol);
02133 } else {
02134
02135
02136
02137
02138
02139 for( i=1; i < maxindex; i++ ) {
02140 if( ecnt[i].kind != NMG_KIND_FACE ) continue;
02141 nmg_face_bb( (struct face *)ptrs[i], tol );
02142 }
02143 }
02144
02145 RT_CK_DB_INTERNAL( ip );
02146 ip->idb_major_type = DB5_MAJORTYPE_BRLCAD;
02147 ip->idb_type = ID_NMG;
02148 ip->idb_meth = &rt_functab[ID_NMG];
02149 ip->idb_ptr = (genptr_t)m;
02150
02151 bu_free( (char *)ecnt, "ecnt[]" );
02152 bu_free( (char *)real_ptrs, "ptrs[]" );
02153
02154 return(0);
02155 }
02156
02157
02158
02159
02160
02161
02162
02163
02164
02165
02166
02167
02168
02169
02170
02171
02172
02173
02174
02175
02176
02177
02178
02179
02180
02181
02182
02183
02184
02185
02186
02187
02188
02189
02190
02191
02192
02193
02194
02195
02196
02197 int
02198 rt_nmg_export_internal(struct bu_external *ep, const struct rt_db_internal *ip, double local2mm, int compact)
02199 {
02200 struct model *m;
02201 union record *rp;
02202 struct nmg_struct_counts cntbuf;
02203 long **ptrs;
02204 struct nmg_exp_counts *ecnt;
02205 int i;
02206 int subscript;
02207 int kind_counts[NMG_N_KINDS];
02208 genptr_t disk_arrays[NMG_N_KINDS];
02209 int additional_grans;
02210 int tot_size;
02211 int kind;
02212 char *cp;
02213 int double_count;
02214 int fastf_byte_count;
02215
02216 RT_CK_DB_INTERNAL(ip);
02217 if( ip->idb_type != ID_NMG ) return(-1);
02218 m = (struct model *)ip->idb_ptr;
02219 NMG_CK_MODEL(m);
02220
02221
02222 bzero( (char *)&cntbuf, sizeof(cntbuf) );
02223 ptrs = nmg_m_struct_count( &cntbuf, m );
02224
02225
02226 ecnt = (struct nmg_exp_counts *)bu_calloc( m->maxindex+1,
02227 sizeof(struct nmg_exp_counts), "ecnt[]" );
02228 for( i = 0; i < NMG_N_KINDS; i++ )
02229 kind_counts[i] = 0;
02230 subscript = 1;
02231 double_count = 0;
02232 fastf_byte_count = 0;
02233 for( i=0; i < m->maxindex; i++ ) {
02234 if( ptrs[i] == (long *)0 ) {
02235 ecnt[i].kind = -1;
02236 continue;
02237 }
02238 kind = rt_nmg_magic_to_kind( *(ptrs[i]) );
02239 ecnt[i].per_struct_index = kind_counts[kind]++;
02240 ecnt[i].kind = kind;
02241
02242 switch(kind) {
02243 case NMG_KIND_FACE_G_SNURB:
02244 {
02245 struct face_g_snurb *fg;
02246 int ndouble;
02247 fg = (struct face_g_snurb *)ptrs[i];
02248 ecnt[i].first_fastf_relpos = kind_counts[NMG_KIND_DOUBLE_ARRAY];
02249 kind_counts[NMG_KIND_DOUBLE_ARRAY] += 3;
02250 ndouble = fg->u.k_size +
02251 fg->v.k_size +
02252 fg->s_size[0] * fg->s_size[1] *
02253 RT_NURB_EXTRACT_COORDS(fg->pt_type);
02254 double_count += ndouble;
02255 ecnt[i].byte_offset = fastf_byte_count;
02256 fastf_byte_count += 3*(4+4) + 8*ndouble;
02257 }
02258 break;
02259 case NMG_KIND_EDGE_G_CNURB:
02260 {
02261 struct edge_g_cnurb *eg;
02262 int ndouble;
02263 eg = (struct edge_g_cnurb *)ptrs[i];
02264 ecnt[i].first_fastf_relpos = kind_counts[NMG_KIND_DOUBLE_ARRAY];
02265
02266 if( eg->order == 0 ) break;
02267 kind_counts[NMG_KIND_DOUBLE_ARRAY] += 2;
02268 ndouble = eg->k.k_size + eg->c_size *
02269 RT_NURB_EXTRACT_COORDS(eg->pt_type);
02270 double_count += ndouble;
02271 ecnt[i].byte_offset = fastf_byte_count;
02272 fastf_byte_count += 2*(4+4) + 8*ndouble;
02273 }
02274 break;
02275 }
02276 }
02277 if( compact ) {
02278 kind_counts[NMG_KIND_NMGREGION_A] = 0;
02279 kind_counts[NMG_KIND_SHELL_A] = 0;
02280 kind_counts[NMG_KIND_LOOP_G] = 0;
02281 }
02282
02283
02284 for( kind = 0; kind < NMG_N_KINDS; kind++ ) {
02285 if( compact && ( kind == NMG_KIND_NMGREGION_A ||
02286 kind == NMG_KIND_SHELL_A ||
02287 kind == NMG_KIND_LOOP_G ) ) {
02288
02289
02290
02291
02292 for( i=0; i < m->maxindex; i++ ) {
02293 if( ptrs[i] == (long *)0 ) continue;
02294 if( ecnt[i].kind != kind ) continue;
02295 ecnt[i].new_subscript = DISK_INDEX_NULL;
02296 }
02297 continue;
02298 }
02299 for( i=0; i < m->maxindex; i++ ) {
02300 if( ptrs[i] == (long *)0 ) continue;
02301 if( ecnt[i].kind != kind ) continue;
02302 ecnt[i].new_subscript = subscript++;
02303 }
02304 }
02305
02306 rt_nmg_cur_fastf_subscript = subscript;
02307 subscript += kind_counts[NMG_KIND_DOUBLE_ARRAY];
02308
02309
02310 for( i=0; i < m->maxindex; i++ ) {
02311 if( ptrs[i] == (long *)0 ) continue;
02312 if( nmg_index_of_struct(ptrs[i]) != i ) {
02313 bu_log("***ERROR, ptrs[%d]->index = %d\n",
02314 i, nmg_index_of_struct(ptrs[i]) );
02315 }
02316 if( rt_nmg_magic_to_kind(*ptrs[i]) != ecnt[i].kind ) {
02317 bu_log("@@@ERROR, ptrs[%d] kind(%d) != %d\n",
02318 i, rt_nmg_magic_to_kind(*ptrs[i]),
02319 ecnt[i].kind);
02320 }
02321 }
02322
02323 tot_size = 0;
02324 for( i = 0; i < NMG_N_KINDS; i++ ) {
02325 if( kind_counts[i] <= 0 ) {
02326 disk_arrays[i] = GENPTR_NULL;
02327 continue;
02328 }
02329 tot_size += kind_counts[i] * rt_nmg_disk_sizes[i];
02330 }
02331
02332 tot_size += kind_counts[NMG_KIND_DOUBLE_ARRAY] * (4+4) +
02333 double_count * 8;
02334
02335 ecnt[0].byte_offset = subscript;
02336
02337 additional_grans = (tot_size + sizeof(union record)-1) / sizeof(union record);
02338 BU_CK_EXTERNAL(ep);
02339 ep->ext_nbytes = (1 + additional_grans) * sizeof(union record);
02340 ep->ext_buf = (genptr_t)bu_calloc( 1, ep->ext_nbytes, "nmg external");
02341 rp = (union record *)ep->ext_buf;
02342 rp->nmg.N_id = DBID_NMG;
02343 rp->nmg.N_version = DISK_MODEL_VERSION;
02344 (void)bu_plong( rp->nmg.N_count, additional_grans );
02345
02346
02347 for( kind = 0; kind < NMG_N_KINDS; kind++ ) {
02348 (void)bu_plong( rp->nmg.N_structs+4*kind, kind_counts[kind] );
02349 }
02350
02351 cp = (char *)(rp+1);
02352 for( i=0; i < NMG_N_KINDS; i++ ) {
02353 disk_arrays[i] = (genptr_t)cp;
02354 cp += kind_counts[i] * rt_nmg_disk_sizes[i];
02355 }
02356
02357 rt_nmg_fastf_p = (unsigned char *)disk_arrays[NMG_KIND_DOUBLE_ARRAY];
02358
02359
02360 for( i = m->maxindex-1; i >= 0; i-- ) {
02361 if( ptrs[i] == (long *)0 ) continue;
02362 kind = ecnt[i].kind;
02363 if( kind_counts[kind] <= 0 ) continue;
02364 rt_nmg_edisk( (genptr_t)(disk_arrays[kind]),
02365 (genptr_t)(ptrs[i]), ecnt, i, local2mm );
02366 }
02367
02368 bu_free( (char *)ptrs, "ptrs[]" );
02369 bu_free( (char *)ecnt, "ecnt[]" );
02370
02371 return(0);
02372 }
02373
02374
02375
02376
02377
02378
02379
02380 int
02381 rt_nmg_import(struct rt_db_internal *ip, const struct bu_external *ep, register const fastf_t *mat, const struct db_i *dbip)
02382 {
02383 struct model *m;
02384 union record *rp;
02385 struct bn_tol tol;
02386
02387 BU_CK_EXTERNAL( ep );
02388 rp = (union record *)ep->ext_buf;
02389
02390 if( rp->u_id != DBID_NMG ) {
02391 bu_log("rt_nmg_import: defective record\n");
02392 return(-1);
02393 }
02394
02395
02396
02397
02398
02399
02400 tol.magic = BN_TOL_MAGIC;
02401 tol.dist = 0.005;
02402 tol.dist_sq = tol.dist * tol.dist;
02403 tol.perp = 1e-6;
02404 tol.para = 1 - tol.perp;
02405
02406 if( rt_nmg_import_internal( ip, ep, mat, 1, &tol ) < 0 )
02407 return(-1);
02408
02409 m = (struct model *)ip->idb_ptr;
02410 NMG_CK_MODEL(m);
02411
02412 if( RT_G_DEBUG || rt_g.NMG_debug )
02413 nmg_vmodel(m);
02414
02415 return(0);
02416 }
02417
02418
02419
02420
02421 int
02422 rt_nmg_import5( struct rt_db_internal *ip,
02423 struct bu_external *ep,
02424 register const mat_t mat,
02425 const struct db_i *dbip )
02426 {
02427 struct model *m;
02428 struct bn_tol tol;
02429 int maxindex;
02430 int kind;
02431 int kind_counts[NMG_N_KINDS];
02432 unsigned char *dp;
02433 genptr_t startdata;
02434 long **real_ptrs;
02435 long **ptrs;
02436 struct nmg_exp_counts *ecnt;
02437 register int i;
02438 static long bad_magic = 0x999;
02439
02440 BU_CK_EXTERNAL( ep );
02441 dp = (genptr_t)ep->ext_buf;
02442
02443 tol.magic = BN_TOL_MAGIC;
02444 tol.dist = 0.005;
02445 tol.dist_sq = tol.dist * tol.dist;
02446 tol.perp = 1e-6;
02447 tol.para = 1 - tol.perp;
02448
02449 {
02450 int version;
02451 version = bu_glong(dp);
02452 dp+= SIZEOF_NETWORK_LONG;
02453 if (version != DISK_MODEL_VERSION ) {
02454 bu_log("rt_nmg_import: expected NMG '.g' format version %d, got %d, aborting nmg solid import\n",
02455 DISK_MODEL_VERSION, version);
02456 return -1;
02457 }
02458 }
02459 maxindex = 1;
02460 for (kind =0 ; kind < NMG_N_KINDS; kind++) {
02461 kind_counts[kind] = bu_glong( dp );
02462 dp+= SIZEOF_NETWORK_LONG;
02463 maxindex += kind_counts[kind];
02464 }
02465
02466 startdata = dp;
02467
02468
02469 ecnt = (struct nmg_exp_counts *) bu_calloc( maxindex+3,
02470 sizeof(struct nmg_exp_counts), "ecnt[]");
02471 real_ptrs = (long **)bu_calloc( maxindex+3, sizeof(long *), "ptrs[]");
02472
02473 ptrs = real_ptrs+1;
02474 ptrs[-1] = &bad_magic;
02475 ptrs[0] = (long *)0;
02476 ptrs[maxindex] = &bad_magic;
02477 ptrs[maxindex+1] = &bad_magic;
02478
02479 m = rt_nmg_ialloc( ptrs, ecnt, kind_counts );
02480
02481 rt_nmg_i2alloc( ecnt, dp, kind_counts, maxindex );
02482
02483
02484 for (i=1; i < maxindex; i++) {
02485
02486 if (ecnt[i].kind == NMG_KIND_DOUBLE_ARRAY) break;
02487 if (rt_nmg_idisk( (genptr_t)(ptrs[i]), (genptr_t)dp, ecnt,
02488 i, ptrs, mat, (unsigned char *)startdata) < 0) {
02489 return -1;
02490 }
02491 dp += rt_nmg_disk_sizes[ecnt[i].kind];
02492 }
02493
02494
02495 nmg_rebound(m, &tol);
02496
02497 RT_CK_DB_INTERNAL( ip );
02498 ip->idb_major_type = DB5_MAJORTYPE_BRLCAD;
02499 ip->idb_type = ID_NMG;
02500 ip->idb_meth = &rt_functab[ ID_NMG ];
02501 ip->idb_ptr = (genptr_t)m;
02502 NMG_CK_MODEL(m);
02503 bu_free( (char *)ecnt, "ecnt[]");
02504 bu_free( (char *)real_ptrs, "ptrs[]");
02505
02506 if ( RT_G_DEBUG || rt_g.NMG_debug ) {
02507 nmg_vmodel(m);
02508 }
02509 return 0;
02510 }
02511
02512
02513
02514
02515
02516
02517
02518 int
02519 rt_nmg_export(struct bu_external *ep, const struct rt_db_internal *ip, double local2mm, const struct db_i *dbip)
02520 {
02521 struct model *m;
02522
02523 RT_CK_DB_INTERNAL(ip);
02524 if( ip->idb_type != ID_NMG ) return(-1);
02525 m = (struct model *)ip->idb_ptr;
02526 NMG_CK_MODEL(m);
02527
02528
02529 nmg_vmodel(m);
02530
02531
02532 return rt_nmg_export_internal( ep, ip, local2mm, 1 );
02533 }
02534
02535
02536
02537
02538 int
02539 rt_nmg_export5(
02540 struct bu_external *ep,
02541 const struct rt_db_internal *ip,
02542 double local2mm,
02543 const struct db_i *dbip)
02544 {
02545 struct model *m;
02546 char *dp;
02547 long **ptrs;
02548 struct nmg_struct_counts cntbuf;
02549 struct nmg_exp_counts *ecnt;
02550 int kind_counts[NMG_N_KINDS];
02551 genptr_t disk_arrays[NMG_N_KINDS];
02552 int tot_size;
02553 int kind;
02554 int double_count;
02555 register int i;
02556 int subscript, fastf_byte_count;
02557
02558 RT_CK_DB_INTERNAL(ip);
02559 if (ip->idb_type != ID_NMG) return -1;
02560 m = (struct model *)ip->idb_ptr;
02561 NMG_CK_MODEL(m);
02562
02563 bzero((char *)&cntbuf, sizeof(cntbuf));
02564 ptrs = nmg_m_struct_count( &cntbuf, m);
02565
02566 ecnt = (struct nmg_exp_counts *)bu_calloc( m->maxindex+1,
02567 sizeof(struct nmg_exp_counts), "ecnt[]");
02568 for (i=0; i<NMG_N_KINDS; i++) {
02569 kind_counts[i] = 0;
02570 }
02571 subscript = 1;
02572 double_count = 0;
02573 fastf_byte_count = 0;
02574 for (i=0; i< m->maxindex; i++) {
02575 if (ptrs[i] == (long *)0 ) {
02576 ecnt[i].kind = -1;
02577 continue;
02578 }
02579
02580 kind = rt_nmg_magic_to_kind( *(ptrs[i]) );
02581 ecnt[i].per_struct_index = kind_counts[kind]++;
02582 ecnt[i].kind = kind;
02583
02584
02585
02586
02587
02588 if (kind == NMG_KIND_FACE_G_SNURB) {
02589 struct face_g_snurb *fg;
02590 int ndouble;
02591 fg = (struct face_g_snurb *)ptrs[i];
02592 ecnt[i].first_fastf_relpos = kind_counts[NMG_KIND_DOUBLE_ARRAY];
02593 kind_counts[NMG_KIND_DOUBLE_ARRAY] += 3;
02594 ndouble = fg->u.k_size +
02595 fg->v.k_size +
02596 fg->s_size[0] * fg->s_size[1] *
02597 RT_NURB_EXTRACT_COORDS(fg->pt_type);
02598 double_count += ndouble;
02599 ecnt[i].byte_offset = fastf_byte_count;
02600 fastf_byte_count += 3*(4*4) + 89*ndouble;
02601 } else if (kind == NMG_KIND_EDGE_G_CNURB) {
02602 struct edge_g_cnurb *eg;
02603 int ndouble;
02604 eg = (struct edge_g_cnurb *)ptrs[i];
02605 ecnt[i].first_fastf_relpos =
02606 kind_counts[NMG_KIND_DOUBLE_ARRAY];
02607 if (eg->order != 0) {
02608 kind_counts[NMG_KIND_DOUBLE_ARRAY] += 2;
02609 ndouble = eg->k.k_size +eg->c_size *
02610 RT_NURB_EXTRACT_COORDS(eg->pt_type);
02611 double_count += ndouble;
02612 ecnt[i].byte_offset = fastf_byte_count;
02613 fastf_byte_count += 2*(4+4) + 8*ndouble;
02614 }
02615 }
02616 }
02617 #if 1
02618 kind_counts[NMG_KIND_NMGREGION_A] = 0;
02619 kind_counts[NMG_KIND_SHELL_A] = 0;
02620 kind_counts[NMG_KIND_LOOP_G] = 0;
02621 #endif
02622
02623 for (kind=0; kind < NMG_N_KINDS; kind++) {
02624 #if 1
02625 if ( kind == NMG_KIND_NMGREGION_A ||
02626 kind == NMG_KIND_SHELL_A ||
02627 kind == NMG_KIND_LOOP_G ) {
02628 for (i=0; i<m->maxindex; i++) {
02629 if (ptrs[i] == (long *)0) continue;
02630 if (ecnt[i].kind != kind) continue;
02631 ecnt[i].new_subscript = DISK_INDEX_NULL;
02632 }
02633 continue;
02634 }
02635 #endif
02636 for (i=0; i< m->maxindex;i++) {
02637 if (ptrs[i] == (long *)0) continue;
02638 if (ecnt[i].kind != kind) continue;
02639 ecnt[i].new_subscript = subscript++;
02640 }
02641 }
02642
02643 rt_nmg_cur_fastf_subscript = subscript;
02644 subscript += kind_counts[NMG_KIND_DOUBLE_ARRAY];
02645
02646
02647 for (i=0; i<m->maxindex; i++) {
02648 if (ptrs[i] == (long *)0) continue;
02649
02650 if (nmg_index_of_struct(ptrs[i]) != i) {
02651 bu_log("***ERROR, ptrs[%d]->index = %d\n",
02652 i, nmg_index_of_struct(ptrs[i]));
02653 }
02654 if (rt_nmg_magic_to_kind(*ptrs[i]) != ecnt[i].kind ) {
02655 bu_log("***ERROR, ptrs[%d] kind(%d) != %d\n",
02656 i, rt_nmg_magic_to_kind(*ptrs[i]),
02657 ecnt[i].kind);
02658 }
02659
02660 }
02661
02662 tot_size = 0;
02663 for (i=0; i< NMG_N_KINDS; i++) {
02664 if (kind_counts[i] <= 0) {
02665 disk_arrays[i] = GENPTR_NULL;
02666 continue;
02667 }
02668 tot_size += kind_counts[i] * rt_nmg_disk_sizes[i];
02669 }
02670
02671
02672 tot_size += kind_counts[NMG_KIND_DOUBLE_ARRAY] * (4+4) +
02673 double_count*8;
02674
02675 ecnt[0].byte_offset = subscript;
02676 tot_size += SIZEOF_NETWORK_LONG*(NMG_N_KINDS + 1);
02677 BU_CK_EXTERNAL(ep);
02678 ep->ext_nbytes = tot_size;
02679 ep->ext_buf = (genptr_t)bu_calloc(1, ep->ext_nbytes, "nmg external5");
02680 dp = ep->ext_buf;
02681 (void)bu_plong((unsigned char *)dp, DISK_MODEL_VERSION);
02682 dp+=SIZEOF_NETWORK_LONG;
02683
02684 for (kind=0; kind <NMG_N_KINDS; kind++) {
02685 (void)bu_plong((unsigned char *) dp, kind_counts[kind]);
02686 dp+=SIZEOF_NETWORK_LONG;
02687 }
02688 for (i=0; i< NMG_N_KINDS; i++) {
02689 disk_arrays[i] = (genptr_t)dp;
02690 dp += kind_counts[i] * rt_nmg_disk_sizes[i];
02691 }
02692 rt_nmg_fastf_p = (unsigned char*)disk_arrays[NMG_KIND_DOUBLE_ARRAY];
02693
02694 for (i = m->maxindex-1;i >=0; i--) {
02695 if (ptrs[i] == (long *)0) continue;
02696 kind = ecnt[i].kind;
02697 if (kind_counts[kind] <= 0) continue;
02698 rt_nmg_edisk((genptr_t)(disk_arrays[kind]),
02699 (genptr_t)(ptrs[i]), ecnt, i, local2mm);
02700 }
02701
02702 bu_free((char *)ptrs, "ptrs[]");
02703 bu_free((char *)ecnt, "ecnt[]");
02704 return 0;
02705 }
02706
02707
02708
02709
02710
02711
02712
02713
02714 int
02715 rt_nmg_describe(struct bu_vls *str, const struct rt_db_internal *ip, int verbose, double mm2local)
02716 {
02717 register struct model *m =
02718 (struct model *)ip->idb_ptr;
02719
02720 NMG_CK_MODEL(m);
02721 bu_vls_printf( str, "n-Manifold Geometry solid (NMG) maxindex=%ld\n",
02722 (long)m->maxindex);
02723
02724 if( !verbose ) return(0);
02725
02726 #if 0
02727 {
02728 struct nmg_struct_counts count;
02729 long **ptrs;
02730
02731 ptrs = nmg_m_struct_count( &count, m );
02732
02733
02734 nmg_vls_struct_counts( str, &count );
02735
02736 bu_free( (char *)ptrs, "struct_count *ptrs[]" );
02737 }
02738 #endif
02739
02740 return(0);
02741 }
02742
02743
02744
02745
02746
02747
02748 void
02749 rt_nmg_ifree(struct rt_db_internal *ip)
02750 {
02751 register struct model *m;
02752
02753 RT_CK_DB_INTERNAL(ip);
02754 if (ip->idb_ptr) {
02755 m = (struct model *)ip->idb_ptr;
02756 NMG_CK_MODEL(m);
02757 nmg_km( m );
02758 }
02759
02760 ip->idb_ptr = GENPTR_NULL;
02761 }
02762
02763 int
02764 rt_nmg_tclget(Tcl_Interp *interp, const struct rt_db_internal *intern, const char *attr)
02765 {
02766 register struct model *m=(struct model *)intern->idb_ptr;
02767 Tcl_DString ds;
02768 struct bu_vls vls;
02769 struct bu_ptbl verts;
02770 struct nmgregion *r;
02771 struct shell *s;
02772 struct faceuse *fu;
02773 struct loopuse *lu;
02774 struct edgeuse *eu;
02775 struct vertexuse *vu;
02776 struct vertex *v;
02777 struct vertex_g *vg;
02778 int i;
02779
02780 NMG_CK_MODEL( m );
02781
02782 Tcl_DStringInit( &ds );
02783 bu_vls_init( &vls );
02784
02785 if( attr == (char *)NULL )
02786 {
02787 bu_vls_strcpy( &vls, "nmg" );
02788 bu_ptbl_init( &verts, 256, "nmg verts" );
02789 nmg_vertex_tabulate( &verts, &m->magic );
02790
02791
02792 bu_vls_strcat( &vls, " V {" );
02793 for( i=0 ; i<BU_PTBL_LEN( &verts ) ; i++ ) {
02794 v = (struct vertex *) BU_PTBL_GET( &verts, i );
02795 NMG_CK_VERTEX( v );
02796 vg = v->vg_p;
02797 if( !vg ) {
02798 Tcl_SetResult( interp, "Vertex has no geometry\n", TCL_STATIC );
02799 bu_ptbl_free( &verts );
02800 bu_vls_free( &vls );
02801 return( TCL_ERROR );
02802 }
02803 bu_vls_printf( &vls, " { %.25g %.25g %.25g }", V3ARGS( vg->coord ) );
02804 }
02805 bu_vls_strcat( &vls, " }" );
02806
02807
02808
02809 for( BU_LIST_FOR_BACKWARDS( r, nmgregion, &m->r_hd ) ) {
02810
02811
02812
02813 for( BU_LIST_FOR_BACKWARDS( s, shell, &r->s_hd ) ) {
02814
02815
02816
02817 if( BU_LIST_NON_EMPTY( &s->fu_hd ) ) {
02818 for( BU_LIST_FOR_BACKWARDS( fu, faceuse, &s->fu_hd ) ) {
02819 if( fu->orientation != OT_SAME )
02820 continue;
02821
02822 bu_vls_strcat( &vls, " F {" );
02823
02824
02825 for( BU_LIST_FOR_BACKWARDS( lu, loopuse, &fu->lu_hd ) ) {
02826
02827 if( BU_LIST_FIRST_MAGIC( &lu->down_hd ) == NMG_VERTEXUSE_MAGIC ) {
02828 vu = BU_LIST_FIRST( vertexuse, &lu->down_hd );
02829 bu_vls_printf( &vls, " %d",
02830 bu_ptbl_locate( &verts, (long *)vu->v_p ) );
02831 } else {
02832 bu_vls_strcat( &vls, " {" );
02833 for( BU_LIST_FOR( eu, edgeuse, &lu->down_hd ) ) {
02834 vu = eu->vu_p;
02835 bu_vls_printf( &vls, " %d",
02836 bu_ptbl_locate( &verts, (long *)vu->v_p ) );
02837 }
02838
02839 bu_vls_strcat( &vls, " }" );
02840 }
02841 }
02842
02843
02844 bu_vls_strcat( &vls, " }" );
02845 }
02846 }
02847 #if 0
02848
02849 if( BU_LIST_NON_EMPTY( &s->lu_hd ) ) {
02850 for( BU_LIST_FOR( lu, loopuse, &s->lu_hd ) ) {
02851 }
02852 }
02853
02854
02855 if( BU_LIST_NON_EMPTY( &s->eu_hd ) ) {
02856 for( BU_LIST_FOR( eu, edgeuse, &s->eu_hd ) ) {
02857 }
02858 }
02859
02860
02861 if( s->vu_p ) {
02862 bu_vls_printf( &vls, " VU %d", bu_ptbl_locate( &verts, (long *)s->vu_p->v_p ) );
02863 }
02864
02865
02866 bu_vls_strcat( &vls, " }" );
02867 #endif
02868 }
02869
02870
02871 }
02872 bu_ptbl_free( &verts );
02873 } else if( !strcmp( attr, "V" ) ) {
02874
02875
02876 bu_ptbl_init( &verts, 256, "nmg verts" );
02877 nmg_vertex_tabulate( &verts, &m->magic );
02878 for( i=0 ; i<BU_PTBL_LEN( &verts ) ; i++ ) {
02879 v = (struct vertex *) BU_PTBL_GET( &verts, i );
02880 NMG_CK_VERTEX( v );
02881 vg = v->vg_p;
02882 if( !vg ) {
02883 Tcl_SetResult( interp, "Vertex has no geometry\n", TCL_STATIC );
02884 bu_ptbl_free( &verts );
02885 bu_vls_free( &vls );
02886 return( TCL_ERROR );
02887 }
02888 bu_vls_printf( &vls, " { %.25g %.25g %.25g }", V3ARGS( vg->coord ) );
02889 }
02890 bu_ptbl_free( &verts );
02891 } else {
02892 Tcl_SetResult( interp, "Unrecognized parameter\n", TCL_STATIC );
02893 return( TCL_ERROR );
02894 }
02895
02896 Tcl_DStringAppend( &ds, bu_vls_addr( &vls ), -1 );
02897 Tcl_DStringResult( interp, &ds );
02898 Tcl_DStringFree( &ds );
02899 bu_vls_free( &vls );
02900
02901 return( TCL_OK );
02902 }
02903
02904 int
02905 rt_nmg_tcladjust( Tcl_Interp *interp, struct rt_db_internal *intern, int argc, char **argv, struct resource *resp)
02906 {
02907 struct model *m;
02908 struct nmgregion *r=NULL;
02909 struct shell *s=NULL;
02910 struct faceuse *fu=NULL;
02911 Tcl_Obj *obj, **obj_array;
02912 int len;
02913 int num_verts, num_loops;
02914 int *loop;
02915 int loop_len;
02916 int i, j;
02917 struct tmp_v *verts;
02918 fastf_t *tmp;
02919 struct bn_tol tol;
02920
02921 RT_CK_DB_INTERNAL( intern );
02922 m = (struct model *)intern->idb_ptr;
02923 NMG_CK_MODEL( m );
02924
02925 verts = (struct tmp_v *)NULL;
02926 for( i=0 ; i<argc ; i += 2 ) {
02927 if( !strcmp( argv[i], "V" ) ) {
02928 obj = Tcl_NewStringObj( argv[i+1], -1 );
02929 if( Tcl_ListObjGetElements( interp, obj, &num_verts,
02930 &obj_array) != TCL_OK) {
02931 Tcl_SetResult( interp,
02932 "ERROR: failed to parse vertex list\n",
02933 TCL_STATIC );
02934 Tcl_DecrRefCount( obj );
02935 return( TCL_ERROR );
02936 }
02937 verts = (struct tmp_v *)bu_calloc( num_verts,
02938 sizeof( struct tmp_v ),
02939 "verts" );
02940 for( j=0 ; j<num_verts ; j++ ) {
02941 len = 3;
02942 tmp = &verts[j].pt[0];
02943 if( tcl_obj_to_fastf_array( interp, obj_array[j],
02944 &tmp, &len ) != 3 ) {
02945 Tcl_SetResult( interp,
02946 "ERROR: incorrect number of coordinates for vertex\n",
02947 TCL_STATIC );
02948 return( TCL_ERROR );
02949 }
02950 }
02951
02952 }
02953 }
02954
02955 while( argc >= 2 ) {
02956 struct vertex ***face_verts;
02957 struct faceuse *fu;
02958
02959 if( !strcmp( argv[0], "V" ) ) {
02960
02961 goto cont;
02962 } else if( !strcmp( argv[0], "F" ) ) {
02963 if( !verts ) {
02964 Tcl_SetResult( interp,
02965 "ERROR: cannot set faces without vertices\n",
02966 TCL_STATIC );
02967 return( TCL_ERROR );
02968 }
02969 if( BU_LIST_IS_EMPTY( &m->r_hd ) ) {
02970 r = nmg_mrsv( m );
02971 s = BU_LIST_FIRST( shell, &r->s_hd );
02972 } else {
02973 r = BU_LIST_FIRST( nmgregion, &m->r_hd );
02974 s = BU_LIST_FIRST( shell, &r->s_hd );
02975 }
02976 obj = Tcl_NewStringObj( argv[1], -1 );
02977 if( Tcl_ListObjGetElements( interp, obj, &num_loops,
02978 &obj_array) != TCL_OK) {
02979 Tcl_SetResult( interp,
02980 "ERROR: failed to parse face list\n",
02981 TCL_STATIC );
02982 Tcl_DecrRefCount( obj );
02983 return( TCL_ERROR );
02984 }
02985 for( i=0, fu=NULL ; i<num_loops ; i++ ) {
02986 struct vertex **loop_verts;
02987
02988
02989 loop_len = 0;
02990 (void)tcl_obj_to_int_array( interp, obj_array[i],
02991 &loop, &loop_len);
02992 if( !loop_len ) {
02993 Tcl_SetResult( interp,
02994 "ERROR: unable to parse face list\n",
02995 TCL_STATIC );
02996 return( TCL_ERROR );
02997 }
02998 if( i ) {
02999 loop_verts = (struct vertex **)bu_calloc(
03000 loop_len,
03001 sizeof( struct vertex * ),
03002 "loop_verts" );
03003 for( i=0 ; i<loop_len ; i++ ) {
03004 loop_verts[i] = verts[loop[i]].v;
03005 }
03006 fu = nmg_add_loop_to_face( s, fu,
03007 loop_verts, loop_len,
03008 OT_OPPOSITE );
03009 for( i=0 ; i<loop_len ; i++ ) {
03010 verts[loop[i]].v = loop_verts[i];
03011 }
03012 } else {
03013 face_verts = (struct vertex ***)bu_calloc(
03014 loop_len,
03015 sizeof( struct vertex **),
03016 "face_verts" );
03017 for( j=0 ; j<loop_len ; j++ ) {
03018 face_verts[j] = &verts[loop[j]].v;
03019 }
03020 fu = nmg_cmface( s, face_verts, loop_len );
03021 bu_free((char *)face_verts, "face_verts" );
03022 }
03023 }
03024 } else {
03025 Tcl_SetResult( interp,
03026 "ERROR: Unrecognized parameter, must be V or F\n",
03027 TCL_STATIC );
03028 return( TCL_ERROR );
03029 }
03030 cont:
03031 argc -= 2;
03032 argv += 2;
03033 }
03034
03035
03036 for( i=0 ; i<num_verts ; i++ ) {
03037 if( verts[i].v )
03038 nmg_vertex_gv( verts[i].v, verts[i].pt );
03039 }
03040
03041
03042 for( BU_LIST_FOR( fu, faceuse, &s->fu_hd ) ) {
03043 if( fu->orientation != OT_SAME )
03044 continue;
03045 nmg_calc_face_g( fu );
03046 }
03047
03048 tol.magic = BN_TOL_MAGIC;
03049 tol.dist = 0.005;
03050 tol.dist_sq = tol.dist * tol.dist;
03051 tol.perp = 1e-6;
03052 tol.para = 1 - tol.perp;
03053
03054 nmg_rebound( m, &tol );
03055
03056 return( TCL_OK );
03057 }
03058
03059
03060 void
03061 rt_nmg_make( const struct rt_functab *ftp, struct rt_db_internal *intern, double d )
03062 {
03063 struct model *m;
03064
03065 m = nmg_mm();
03066 intern->idb_ptr = (genptr_t )m;
03067 intern->idb_major_type = DB5_MAJORTYPE_BRLCAD;
03068 intern->idb_type = ID_NMG;
03069 intern->idb_meth = ftp;
03070 }
03071
03072
03073
03074
03075
03076
03077
03078
03079
03080