BRL-CAD
nmg.c
Go to the documentation of this file.
1 /* N M G . C
2  * BRL-CAD
3  *
4  * Copyright (c) 2005-2014 United States Government as represented by
5  * the U.S. Army Research Laboratory.
6  *
7  * This library is free software; you can redistribute it and/or
8  * modify it under the terms of the GNU Lesser General Public License
9  * version 2.1 as published by the Free Software Foundation.
10  *
11  * This library is distributed in the hope that it will be useful, but
12  * WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with this file; see the file named COPYING for more
18  * information.
19  */
20 /** @addtogroup primitives */
21 /** @{ */
22 /** @file primitives/nmg/nmg.c
23  *
24  * Intersect a ray with an NMG solid.
25  *
26  */
27 /** @} */
28 
29 #include "common.h"
30 
31 #include <stdlib.h>
32 #include <stdio.h>
33 #include <math.h>
34 #include <string.h>
35 #include "bnetwork.h"
36 
37 #include "bu/cv.h"
38 #include "vmath.h"
39 #include "db.h"
40 #include "nmg.h"
41 #include "raytrace.h"
42 #include "nurb.h"
43 
44 
45 /* rt_nmg_internal is just "model", from nmg.h */
46 
47 #define NMG_SPEC_START_MAGIC 6014061
48 #define NMG_SPEC_END_MAGIC 7013061
49 
50 /* This is the solid information specific to an nmg solid */
51 struct nmg_specific {
52  uint32_t nmg_smagic; /* STRUCT START magic number */
53  struct model *nmg_model;
54  char *manifolds; /* structure 1-3manifold table */
55  vect_t nmg_invdir;
56  uint32_t nmg_emagic; /* STRUCT END magic number */
57 };
58 
59 
60 struct tmp_v {
61  point_t pt;
62  struct vertex *v;
63 };
64 
65 
66 /**
67  * Calculate the bounding box for an N-Manifold Geometry
68  */
69 int
70 rt_nmg_bbox(struct rt_db_internal *ip, point_t *min, point_t * max, const struct bn_tol *UNUSED(tol)) {
71  struct model *m;
72 
74  m = (struct model *)ip->idb_ptr;
75  NMG_CK_MODEL(m);
76 
77  nmg_model_bb(*min, *max, m);
78  return 0;
79 }
80 
81 
82 /**
83  * Given a pointer to a ged database record, and a transformation
84  * matrix, determine if this is a valid nmg, and if so, precompute
85  * various terms of the formula.
86  *
87  * returns 0 if nmg is ok and !0 on error in description
88  *
89  * implicit return - a struct nmg_specific is created, and its
90  * address is stored in stp->st_specific for use by nmg_shot().
91  */
92 int
93 rt_nmg_prep(struct soltab *stp, struct rt_db_internal *ip, struct rt_i *rtip)
94 {
95  struct model *m;
96  struct nmg_specific *nmg_s;
97  vect_t work;
98 
100  m = (struct model *)ip->idb_ptr;
101  NMG_CK_MODEL(m);
102 
103  if (stp->st_meth->ft_bbox(ip, &(stp->st_min), &(stp->st_max), &(rtip->rti_tol))) return 1;
104 
105  VADD2SCALE(stp->st_center, stp->st_min, stp->st_max, 0.5);
106  VSUB2SCALE(work, stp->st_max, stp->st_min, 0.5);
107  stp->st_aradius = stp->st_bradius = MAGNITUDE(work);
108 
109  BU_GET(nmg_s, struct nmg_specific);
110  stp->st_specific = (void *)nmg_s;
111  nmg_s->nmg_model = m;
112  ip->idb_ptr = (void *)NULL;
115 
116  /* build table indicating the manifold level of each sub-element
117  * of NMG solid
118  */
119  nmg_s->manifolds = nmg_manifolds(m);
120 
121  return 0;
122 }
123 
124 
125 void
126 rt_nmg_print(const struct soltab *stp)
127 {
128  struct model *m =
129  (struct model *)stp->st_specific;
130 
131  NMG_CK_MODEL(m);
132  nmg_pr_m(m);
133 }
134 
135 
136 /**
137  * Intersect a ray with a nmg. If an intersection occurs, a struct
138  * seg will be acquired and filled in.
139  *
140  * Returns -
141  * 0 MISS
142  * >0 HIT
143  */
144 int
145 rt_nmg_shot(struct soltab *stp, struct xray *rp, struct application *ap, struct seg *seghead)
146 
147 /* info about the ray */
148 
149 /* intersection w/ ray */
150 {
151  struct ray_data rd;
152  int status;
153  struct nmg_specific *nmg =
154  (struct nmg_specific *)stp->st_specific;
155 
156  if (RTG.NMG_debug & DEBUG_NMGRT) {
157  bu_log("rt_nmg_shot()\n\t");
158  rt_pr_tol(&ap->a_rt_i->rti_tol);
159  }
160 
161  /* check validity of nmg specific structure */
162  if (nmg->nmg_smagic != NMG_SPEC_START_MAGIC)
163  bu_bomb("start of NMG st_specific structure corrupted\n");
164 
165  if (nmg->nmg_emagic != NMG_SPEC_END_MAGIC)
166  bu_bomb("end of NMG st_specific structure corrupted\n");
167 
168  /* Compute the inverse of the direction cosines */
169  if (!ZERO(rp->r_dir[X])) {
170  nmg->nmg_invdir[X]=1.0/rp->r_dir[X];
171  } else {
172  nmg->nmg_invdir[X] = INFINITY;
173  rp->r_dir[X] = 0.0;
174  }
175  if (!ZERO(rp->r_dir[Y])) {
176  nmg->nmg_invdir[Y]=1.0/rp->r_dir[Y];
177  } else {
178  nmg->nmg_invdir[Y] = INFINITY;
179  rp->r_dir[Y] = 0.0;
180  }
181  if (!ZERO(rp->r_dir[Z])) {
182  nmg->nmg_invdir[Z]=1.0/rp->r_dir[Z];
183  } else {
184  nmg->nmg_invdir[Z] = INFINITY;
185  rp->r_dir[Z] = 0.0;
186  }
187 
188  /* build the NMG per-ray data structure */
189  rd.rd_m = nmg->nmg_model;
190  rd.manifolds = nmg->manifolds;
191  VMOVE(rd.rd_invdir, nmg->nmg_invdir);
192  rd.rp = rp;
193  rd.tol = &ap->a_rt_i->rti_tol;
194  rd.ap = ap;
195  rd.stp = stp;
196  rd.seghead = seghead;
197  rd.classifying_ray = 0;
198 
199  /* create a table to keep track of which elements have been
200  * processed before and which haven't. Elements in this table
201  * will either be (NULL) if item not previously processed or a
202  * hitmiss ptr if item was previously processed
203  */
204  rd.hitmiss = (struct hitmiss **)bu_calloc(rd.rd_m->maxindex,
205  sizeof(struct hitmiss *), "nmg geom hit list");
206 
207  /* initialize the lists of things that have been hit/missed */
208  BU_LIST_INIT(&rd.rd_hit);
209  BU_LIST_INIT(&rd.rd_miss);
211 
212  /* intersect the ray with the geometry (sets surfno) */
213  nmg_isect_ray_model(&rd);
214 
215  /* build the segment lists */
216  status = nmg_ray_segs(&rd);
217 
218  /* free the hitmiss table */
219  bu_free((char *)rd.hitmiss, "free nmg geom hit list");
220 
221  return status;
222 }
223 
224 
225 /**
226  * Given ONE ray distance, return the normal and entry/exit point.
227  */
228 void
229 rt_nmg_norm(struct hit *hitp, struct soltab *stp, struct xray *rp)
230 {
231  if (!hitp || !rp)
232  return;
233 
234  if (stp) RT_CK_SOLTAB(stp);
235  RT_CK_RAY(rp);
236  RT_CK_HIT(hitp);
237 
238  VJOIN1(hitp->hit_point, rp->r_pt, hitp->hit_dist, rp->r_dir);
239 }
240 
241 
242 /**
243  * Return the curvature of the nmg.
244  */
245 void
246 rt_nmg_curve(struct curvature *cvp, struct hit *hitp, struct soltab *stp)
247 {
248  if (!cvp || !hitp)
249  return;
250 
251  RT_CK_HIT(hitp);
252  if (stp) RT_CK_SOLTAB(stp);
253 
254  cvp->crv_c1 = cvp->crv_c2 = 0;
255 
256  /* any tangent direction */
257  bn_vec_ortho(cvp->crv_pdir, hitp->hit_normal);
258 }
259 
260 
261 /**
262  * For a hit on the surface of an nmg, return the (u, v) coordinates
263  * of the hit point, 0 <= u, v <= 1.
264  *
265  * u = azimuth
266  * v = elevation
267  */
268 void
269 rt_nmg_uv(struct application *ap, struct soltab *stp, struct hit *hitp, struct uvcoord *uvp)
270 {
271  if (ap) RT_CK_APPLICATION(ap);
272  if (stp) RT_CK_SOLTAB(stp);
273  if (hitp) RT_CK_HIT(hitp);
274  if (!uvp) return;
275 }
276 
277 
278 void
279 rt_nmg_free(struct soltab *stp)
280 {
281  struct nmg_specific *nmg =
282  (struct nmg_specific *)stp->st_specific;
283 
284  nmg_km(nmg->nmg_model);
285  BU_PUT(nmg, struct nmg_specific);
286  stp->st_specific = NULL; /* sanity */
287 }
288 
289 
290 int
291 rt_nmg_plot(struct bu_list *vhead, struct rt_db_internal *ip, const struct rt_tess_tol *UNUSED(ttol), const struct bn_tol *UNUSED(tol), const struct rt_view_info *UNUSED(info))
292 {
293  struct model *m;
294 
295  BU_CK_LIST_HEAD(vhead);
296  RT_CK_DB_INTERNAL(ip);
297  m = (struct model *)ip->idb_ptr;
298  NMG_CK_MODEL(m);
299 
300  nmg_m_to_vlist(vhead, m, 0);
301 
302  return 0;
303 }
304 
305 
306 /**
307  * XXX This routine "destroys" the internal nmg solid. This means
308  * that once you tessellate an NMG solid, your in-memory copy becomes
309  * invalid, and you can't do anything else with it until you get a new
310  * copy from disk.
311  *
312  * Returns -
313  * -1 failure
314  * 0 OK. *r points to nmgregion that holds this tessellation.
315  */
316 int
317 rt_nmg_tess(struct nmgregion **r, struct model *m, struct rt_db_internal *ip, const struct rt_tess_tol *UNUSED(ttol), const struct bn_tol *tol)
318 {
319  struct model *lm;
320 
321  NMG_CK_MODEL(m);
322 
323  RT_CK_DB_INTERNAL(ip);
324  lm = (struct model *)ip->idb_ptr;
325  NMG_CK_MODEL(lm);
326 
327  if (BU_LIST_IS_EMPTY(&(lm->r_hd))) {
328  /* No regions in imported geometry, can't give valid 'r' */
329  *r = (struct nmgregion *)NULL;
330  return -1;
331  }
332 
333  /* XXX A big hack, just for testing ***/
334 
335  *r = BU_LIST_FIRST(nmgregion, &(lm->r_hd));
336  NMG_CK_REGION(*r);
337  if (BU_LIST_NEXT_NOT_HEAD(*r, &(lm->r_hd))) {
338  struct nmgregion *r2;
339 
340  r2 = BU_LIST_PNEXT(nmgregion, &((*r)->l));
341  while (BU_LIST_NOT_HEAD(&r2->l, &(lm->r_hd))) {
342  struct nmgregion *next_r;
343 
344  next_r = BU_LIST_PNEXT(nmgregion, &r2->l);
345  nmg_merge_regions(*r, r2, tol);
346 
347  r2 = next_r;
348  }
349  }
350 
351 
352  /* XXX The next two lines "destroy" the internal nmg solid. This
353  * means that once you tessellate an NMG solid, your in-memory copy
354  * becomes invalid, and you can't do anything else with it until
355  * you get a new copy from disk.
356  */
357  nmg_merge_models(m, lm);
358  ip->idb_ptr = ((void *)0);
359 
360  return 0;
361 }
362 
363 
364 #define NMG_CK_DISKMAGIC(_cp, _magic) \
365  if (ntohl(*(uint32_t*)_cp) != _magic) { \
366  bu_log("NMG_CK_DISKMAGIC: magic mis-match, got x%x, s/b x%x, file %s, line %d\n", \
367  ntohl(*(uint32_t*)_cp), _magic, __FILE__, __LINE__); \
368  bu_bomb("bad magic\n"); \
369  }
370 
371 
372 /* ----------------------------------------------------------------------
373  *
374  * Definitions for the binary, machine-independent format of the NMG
375  * data structures.
376  *
377  * There are two special values that may be assigned to an
378  * disk_index_t to signal special processing when the structure is
379  * re-import4ed.
380  */
381 #define DISK_INDEX_NULL 0
382 #define DISK_INDEX_LISTHEAD -1
383 
384 #define DISK_MODEL_VERSION 1 /* V0 was Release 4.0 */
385 
386 typedef unsigned char disk_index_t[4]; /* uint32_t buffer */
387 struct disk_rt_list {
388  disk_index_t forw;
389  disk_index_t back;
390 };
391 
392 
393 #define DISK_MODEL_MAGIC 0x4e6d6f64 /* Nmod */
394 struct disk_model {
395  unsigned char magic[4];
396  unsigned char version[4]; /* unused */
398 };
399 
400 
401 #define DISK_REGION_MAGIC 0x4e726567 /* Nreg */
403  unsigned char magic[4];
404  struct disk_rt_list l;
405  disk_index_t m_p;
406  disk_index_t ra_p;
408 };
409 
410 
411 #define DISK_REGION_A_MAGIC 0x4e725f61 /* Nr_a */
413  unsigned char magic[4];
414  unsigned char min_pt[3*8];
415  unsigned char max_pt[3*8];
416 };
417 
418 
419 #define DISK_SHELL_MAGIC 0x4e73686c /* Nshl */
420 struct disk_shell {
421  unsigned char magic[4];
422  struct disk_rt_list l;
423  disk_index_t r_p;
424  disk_index_t sa_p;
428  disk_index_t vu_p;
429 };
430 
431 
432 #define DISK_SHELL_A_MAGIC 0x4e735f61 /* Ns_a */
433 struct disk_shell_a {
434  unsigned char magic[4];
435  unsigned char min_pt[3*8];
436  unsigned char max_pt[3*8];
437 };
438 
439 
440 #define DISK_FACE_MAGIC 0x4e666163 /* Nfac */
441 struct disk_face {
442  unsigned char magic[4];
443  struct disk_rt_list l;
444  disk_index_t fu_p;
445  disk_index_t g;
446  unsigned char flip[4];
447 };
448 
449 
450 #define DISK_FACE_G_PLANE_MAGIC 0x4e666770 /* Nfgp */
452  unsigned char magic[4];
454  unsigned char N[4*8];
455 };
456 
457 
458 #define DISK_FACE_G_SNURB_MAGIC 0x4e666773 /* Nfgs */
460  unsigned char magic[4];
462  unsigned char u_order[4];
463  unsigned char v_order[4];
464  unsigned char u_size[4]; /* u.k_size */
465  unsigned char v_size[4]; /* v.k_size */
466  disk_index_t u_knots; /* u.knots subscript */
467  disk_index_t v_knots; /* v.knots subscript */
468  unsigned char us_size[4];
469  unsigned char vs_size[4];
470  unsigned char pt_type[4];
471  disk_index_t ctl_points; /* subscript */
472 };
473 
474 
475 #define DISK_FACEUSE_MAGIC 0x4e667520 /* Nfu */
476 struct disk_faceuse {
477  unsigned char magic[4];
478  struct disk_rt_list l;
479  disk_index_t s_p;
480  disk_index_t fumate_p;
481  unsigned char orientation[4];
482  disk_index_t f_p;
483  disk_index_t fua_p;
485 };
486 
487 
488 #define DISK_LOOP_MAGIC 0x4e6c6f70 /* Nlop */
489 struct disk_loop {
490  unsigned char magic[4];
491  disk_index_t lu_p;
492  disk_index_t lg_p;
493 };
494 
495 
496 #define DISK_LOOP_G_MAGIC 0x4e6c5f67 /* Nl_g */
497 struct disk_loop_g {
498  unsigned char magic[4];
499  unsigned char min_pt[3*8];
500  unsigned char max_pt[3*8];
501 };
502 
503 
504 #define DISK_LOOPUSE_MAGIC 0x4e6c7520 /* Nlu */
505 struct disk_loopuse {
506  unsigned char magic[4];
507  struct disk_rt_list l;
508  disk_index_t up;
509  disk_index_t lumate_p;
510  unsigned char orientation[4];
511  disk_index_t l_p;
512  disk_index_t lua_p;
514 };
515 
516 
517 #define DISK_EDGE_MAGIC 0x4e656467 /* Nedg */
518 struct disk_edge {
519  unsigned char magic[4];
520  disk_index_t eu_p;
521  unsigned char is_real[4];
522 };
523 
524 
525 #define DISK_EDGE_G_LSEG_MAGIC 0x4e65676c /* Negl */
527  unsigned char magic[4];
529  unsigned char e_pt[3*8];
530  unsigned char e_dir[3*8];
531 };
532 
533 
534 #define DISK_EDGE_G_CNURB_MAGIC 0x4e656763 /* Negc */
536  unsigned char magic[4];
538  unsigned char order[4];
539  unsigned char k_size[4]; /* k.k_size */
540  disk_index_t knots; /* knot.knots subscript */
541  unsigned char c_size[4];
542  unsigned char pt_type[4];
543  disk_index_t ctl_points; /* subscript */
544 };
545 
546 
547 #define DISK_EDGEUSE_MAGIC 0x4e657520 /* Neu */
548 struct disk_edgeuse {
549  unsigned char magic[4];
550  struct disk_rt_list l;
551  struct disk_rt_list l2;
552  disk_index_t up;
553  disk_index_t eumate_p;
554  disk_index_t radial_p;
555  disk_index_t e_p;
556  disk_index_t eua_p;
557  unsigned char orientation[4];
558  disk_index_t vu_p;
559  disk_index_t g;
560 };
561 
562 
563 #define DISK_VERTEX_MAGIC 0x4e767274 /* Nvrt */
564 struct disk_vertex {
565  unsigned char magic[4];
567  disk_index_t vg_p;
568 };
569 
570 
571 #define DISK_VERTEX_G_MAGIC 0x4e765f67 /* Nv_g */
573  unsigned char magic[4];
574  unsigned char coord[3*8];
575 };
576 
577 
578 #define DISK_VERTEXUSE_MAGIC 0x4e767520 /* Nvu */
580  unsigned char magic[4];
581  struct disk_rt_list l;
582  disk_index_t up;
583  disk_index_t v_p;
584  disk_index_t a;
585 };
586 
587 
588 #define DISK_VERTEXUSE_A_PLANE_MAGIC 0x4e767561 /* Nvua */
590  unsigned char magic[4];
591  unsigned char N[3*8];
592 };
593 
594 
595 #define DISK_VERTEXUSE_A_CNURB_MAGIC 0x4e766163 /* Nvac */
597  unsigned char magic[4];
598  unsigned char param[3*8];
599 };
600 
601 
602 #define DISK_DOUBLE_ARRAY_MAGIC 0x4e666172 /* Narr */
604  unsigned char magic[4];
605  unsigned char ndouble[4]; /* # of doubles to follow */
606  unsigned char vals[1*8]; /* actually [ndouble*8] */
607 };
608 
609 
610 /* ---------------------------------------------------------------------- */
611 /* All these arrays and defines have to use the same implicit index
612  * values. FIXME: this should probably be an enum.
613  */
614 #define NMG_KIND_MODEL 0
615 #define NMG_KIND_NMGREGION 1
616 #define NMG_KIND_NMGREGION_A 2
617 #define NMG_KIND_SHELL 3
618 #define NMG_KIND_SHELL_A 4
619 #define NMG_KIND_FACEUSE 5
620 #define NMG_KIND_FACE 6
621 #define NMG_KIND_FACE_G_PLANE 7
622 #define NMG_KIND_FACE_G_SNURB 8
623 #define NMG_KIND_LOOPUSE 9
624 #define NMG_KIND_LOOP 10
625 #define NMG_KIND_LOOP_G 11
626 #define NMG_KIND_EDGEUSE 12
627 #define NMG_KIND_EDGE 13
628 #define NMG_KIND_EDGE_G_LSEG 14
629 #define NMG_KIND_EDGE_G_CNURB 15
630 #define NMG_KIND_VERTEXUSE 16
631 #define NMG_KIND_VERTEXUSE_A_PLANE 17
632 #define NMG_KIND_VERTEXUSE_A_CNURB 18
633 #define NMG_KIND_VERTEX 19
634 #define NMG_KIND_VERTEX_G 20
635 /* 21 through 24 are unassigned, and reserved for future use */
636 
637 /** special, variable sized */
638 #define NMG_KIND_DOUBLE_ARRAY 25
639 
640 /* number of kinds. This number must have some extra space, for
641  * upwards compatibility.
642  */
643 #define NMG_N_KINDS 26
644 
645 
647  sizeof(struct disk_model), /* 0 */
648  sizeof(struct disk_nmgregion),
649  sizeof(struct disk_nmgregion_a),
650  sizeof(struct disk_shell),
651  sizeof(struct disk_shell_a),
652  sizeof(struct disk_faceuse),
653  sizeof(struct disk_face),
654  sizeof(struct disk_face_g_plane),
655  sizeof(struct disk_face_g_snurb),
656  sizeof(struct disk_loopuse),
657  sizeof(struct disk_loop), /* 10 */
658  sizeof(struct disk_loop_g),
659  sizeof(struct disk_edgeuse),
660  sizeof(struct disk_edge),
661  sizeof(struct disk_edge_g_lseg),
662  sizeof(struct disk_edge_g_cnurb),
663  sizeof(struct disk_vertexuse),
664  sizeof(struct disk_vertexuse_a_plane),
665  sizeof(struct disk_vertexuse_a_cnurb),
666  sizeof(struct disk_vertex),
667  sizeof(struct disk_vertex_g), /* 20 */
668  0,
669  0,
670  0,
671  0,
672  0 /* disk_double_array, MUST BE ZERO */ /* 25: MUST BE ZERO */
673 };
674 const char rt_nmg_kind_names[NMG_N_KINDS+2][18] = {
675  "model", /* 0 */
676  "nmgregion",
677  "nmgregion_a",
678  "shell",
679  "shell_a",
680  "faceuse",
681  "face",
682  "face_g_plane",
683  "face_g_snurb",
684  "loopuse",
685  "loop", /* 10 */
686  "loop_g",
687  "edgeuse",
688  "edge",
689  "edge_g_lseg",
690  "edge_g_cnurb",
691  "vertexuse",
692  "vertexuse_a_plane",
693  "vertexuse_a_cnurb",
694  "vertex",
695  "vertex_g", /* 20 */
696  "k21",
697  "k22",
698  "k23",
699  "k24",
700  "double_array", /* 25 */
701  "k26-OFF_END",
702  "k27-OFF_END"
703 };
704 
705 
706 /**
707  * Given the magic number for an NMG structure, return the
708  * manifest constant which identifies that structure kind.
709  */
710 int
712 {
713  switch (magic) {
714  case NMG_MODEL_MAGIC:
715  return NMG_KIND_MODEL;
716  case NMG_REGION_MAGIC:
717  return NMG_KIND_NMGREGION;
718  case NMG_REGION_A_MAGIC:
719  return NMG_KIND_NMGREGION_A;
720  case NMG_SHELL_MAGIC:
721  return NMG_KIND_SHELL;
722  case NMG_SHELL_A_MAGIC:
723  return NMG_KIND_SHELL_A;
724  case NMG_FACEUSE_MAGIC:
725  return NMG_KIND_FACEUSE;
726  case NMG_FACE_MAGIC:
727  return NMG_KIND_FACE;
729  return NMG_KIND_FACE_G_PLANE;
731  return NMG_KIND_FACE_G_SNURB;
732  case NMG_LOOPUSE_MAGIC:
733  return NMG_KIND_LOOPUSE;
734  case NMG_LOOP_G_MAGIC:
735  return NMG_KIND_LOOP_G;
736  case NMG_LOOP_MAGIC:
737  return NMG_KIND_LOOP;
738  case NMG_EDGEUSE_MAGIC:
739  return NMG_KIND_EDGEUSE;
740  case NMG_EDGE_MAGIC:
741  return NMG_KIND_EDGE;
743  return NMG_KIND_EDGE_G_LSEG;
745  return NMG_KIND_EDGE_G_CNURB;
746  case NMG_VERTEXUSE_MAGIC:
747  return NMG_KIND_VERTEXUSE;
752  case NMG_VERTEX_MAGIC:
753  return NMG_KIND_VERTEX;
754  case NMG_VERTEX_G_MAGIC:
755  return NMG_KIND_VERTEX_G;
756  }
757  /* default */
758  bu_log("magic = x%x\n", magic);
759  bu_bomb("rt_nmg_magic_to_kind: bad magic");
760  return -1;
761 }
762 
763 
764 /* ---------------------------------------------------------------------- */
765 
769  int kind;
770  long first_fastf_relpos; /* for snurb and cnurb. */
771  long byte_offset; /* for snurb and cnurb. */
772 };
773 
774 
775 /* XXX These are horribly non-PARALLEL, and they *must* be PARALLEL ! */
776 static unsigned char *rt_nmg_fastf_p;
777 static unsigned int rt_nmg_cur_fastf_subscript;
778 
779 
780 /**
781  * Format a variable sized array of fastf_t's into external format
782  * (IEEE big endian double precision) with a 2 element header.
783  *
784  * +-----------+
785  * | magic |
786  * +-----------+
787  * | count |
788  * +-----------+
789  * | |
790  * ~ doubles ~
791  * ~ : ~
792  * | |
793  * +-----------+
794  *
795  * Increments the pointer to the next free byte in the external array,
796  * and increments the subscript number of the next free array.
797  *
798  * Note that this subscript number is consistent with the rest of the
799  * NMG external subscript numbering, so that the first
800  * disk_double_array subscript will be one larger than the largest
801  * disk_vertex_g subscript, and in the external record the array of
802  * fastf_t arrays will follow the array of disk_vertex_g structures.
803  *
804  * Returns subscript number of this array, in the external form.
805  */
806 int
807 rt_nmg_export4_fastf(const fastf_t *fp, int count, int pt_type, double scale)
808 
809 
810 /* If zero, means literal array of values */
811 
812 {
813  int i;
814  unsigned char *cp;
815 
816  /* always write doubles to disk */
817  double *scanp;
818 
819  if (pt_type)
820  count *= RT_NURB_EXTRACT_COORDS(pt_type);
821 
822  cp = rt_nmg_fastf_p;
823  *(uint32_t *)&cp[0] = htonl(DISK_DOUBLE_ARRAY_MAGIC);
824  *(uint32_t *)&cp[4] = htonl(count);
825  if (pt_type == 0 || ZERO(scale - 1.0)) {
826  scanp = (double *)bu_malloc(count * sizeof(double), "scanp");
827  /* convert fastf_t to double */
828  for (i=0; i<count; i++) {
829  scanp[i] = fp[i];
830  }
831  bu_cv_htond(cp + (4+4), (unsigned char *)scanp, count);
832  bu_free(scanp, "scanp");
833  } else {
834  /* Need to scale data by 'scale' ! */
835  scanp = (double *)bu_malloc(count*sizeof(double), "scanp");
836  if (RT_NURB_IS_PT_RATIONAL(pt_type)) {
837  /* Don't scale the homogeneous (rational) coord */
838  int nelem; /* # elements per tuple */
839 
840  nelem = RT_NURB_EXTRACT_COORDS(pt_type);
841  for (i = 0; i < count; i += nelem) {
842  VSCALEN(&scanp[i], &fp[i], scale, nelem-1);
843  scanp[i+nelem-1] = fp[i+nelem-1];
844  }
845  } else {
846  /* Scale everything as one long array */
847  VSCALEN(scanp, fp, scale, count);
848  }
849  bu_cv_htond(cp + (4+4), (unsigned char *)scanp, count);
850  bu_free(scanp, "rt_nmg_export4_fastf");
851  }
852  cp += (4+4) + count * 8;
853  rt_nmg_fastf_p = cp;
854  return rt_nmg_cur_fastf_subscript++;
855 }
856 
857 
858 fastf_t *
859 rt_nmg_import4_fastf(const unsigned char *base, struct nmg_exp_counts *ecnt, long int subscript, const matp_t mat, int len, int pt_type)
860 {
861  const unsigned char *cp;
862 
863  int i;
864  int count;
865  fastf_t *ret;
866 
867  /* must be double for import and export */
868  double *tmp;
869  double *scanp;
870 
871  if (ecnt[subscript].byte_offset <= 0 || ecnt[subscript].kind != NMG_KIND_DOUBLE_ARRAY) {
872  bu_log("subscript=%ld, byte_offset=%ld, kind=%d (expected %d)\n",
873  subscript, ecnt[subscript].byte_offset,
874  ecnt[subscript].kind, NMG_KIND_DOUBLE_ARRAY);
875  bu_bomb("rt_nmg_import4_fastf() bad ecnt table\n");
876  }
877 
878 
879  cp = base + ecnt[subscript].byte_offset;
880  if (ntohl(*(uint32_t*)cp) != DISK_DOUBLE_ARRAY_MAGIC) {
881  bu_log("magic mis-match, got x%x, s/b x%x, file %s, line %d\n",
882  ntohl(*(uint32_t*)cp), DISK_DOUBLE_ARRAY_MAGIC, __FILE__, __LINE__);
883  bu_log("subscript=%ld, byte_offset=%ld\n",
884  subscript, ecnt[subscript].byte_offset);
885  bu_bomb("rt_nmg_import4_fastf() bad magic\n");
886  }
887 
888  if (pt_type)
889  len *= RT_NURB_EXTRACT_COORDS(pt_type);
890 
891  count = ntohl(*(uint32_t*)(cp + 4));
892  if (count != len || count < 0) {
893  bu_log("rt_nmg_import4_fastf() subscript=%ld, expected len=%d, got=%d\n",
894  subscript, len, count);
895  bu_bomb("rt_nmg_import4_fastf()\n");
896  }
897  ret = (fastf_t *)bu_malloc(count * sizeof(fastf_t), "rt_nmg_import4_fastf[]");
898  if (!mat) {
899  scanp = (double *)bu_malloc(count * sizeof(double), "scanp");
900  bu_cv_ntohd((unsigned char *)scanp, cp + (4+4), count);
901  /* read as double, return as fastf_t */
902  for (i=0; i<count; i++) {
903  ret[i] = scanp[i];
904  }
905  bu_free(scanp, "scanp");
906  return ret;
907  }
908 
909  /*
910  * An amazing amount of work: transform all points by 4x4 mat.
911  * Need to know width of data points, may be 3, or 4-tuples.
912  * The vector times matrix calculation can't be done in place.
913  */
914  tmp = (double *)bu_malloc(count * sizeof(double), "rt_nmg_import4_fastf tmp[]");
915  bu_cv_ntohd((unsigned char *)tmp, cp + (4+4), count);
916 
917  switch (RT_NURB_EXTRACT_COORDS(pt_type)) {
918  case 3:
919  if (RT_NURB_IS_PT_RATIONAL(pt_type)) bu_bomb("rt_nmg_import4_fastf() Rational 3-tuple?\n");
920  for (count -= 3; count >= 0; count -= 3) {
921  MAT4X3PNT(&ret[count], mat, &tmp[count]);
922  }
923  break;
924  case 4:
925  if (!RT_NURB_IS_PT_RATIONAL(pt_type)) bu_bomb("rt_nmg_import4_fastf() non-rational 4-tuple?\n");
926  for (count -= 4; count >= 0; count -= 4) {
927  MAT4X4PNT(&ret[count], mat, &tmp[count]);
928  }
929  break;
930  default:
931  bu_bomb("rt_nmg_import4_fastf() unsupported # of coords in ctl_point\n");
932  }
933 
934  bu_free(tmp, "rt_nmg_import4_fastf tmp[]");
935 
936  return ret;
937 }
938 
939 
940 /**
941  * Depends on ecnt[0].byte_offset having been set to maxindex.
942  *
943  * There are some special values for the disk index returned here:
944  * >0 normal structure index.
945  * 0 substitute a null pointer when imported.
946  * -1 substitute pointer to within-struct list head when imported.
947  */
948 HIDDEN int
949 reindex(void *p, struct nmg_exp_counts *ecnt)
950 {
951  long idx;
952  long ret=0; /* zero is NOT the default value, this is just to satisfy cray compilers */
953 
954  /* If null pointer, return new subscript of zero */
955  if (p == 0) {
956  ret = 0;
957  idx = 0; /* sanity */
958  } else {
959  idx = nmg_index_of_struct((uint32_t *)(p));
960  if (idx == -1) {
961  ret = DISK_INDEX_LISTHEAD; /* FLAG: special list head */
962  } else if (idx < -1) {
963  bu_bomb("reindex(): unable to obtain struct index\n");
964  } else {
965  ret = ecnt[idx].new_subscript;
966  if (ecnt[idx].kind < 0) {
967  bu_log("reindex(p=%p), p->index=%ld, ret=%ld, kind=%d\n", p, idx, ret, ecnt[idx].kind);
968  bu_bomb("reindex() This index not found in ecnt[]\n");
969  }
970  /* ret == 0 on suppressed loop_g ptrs, etc. */
971  if (ret < 0 || ret > ecnt[0].byte_offset) {
972  bu_log("reindex(p=%p) %s, p->index=%ld, ret=%ld, maxindex=%ld\n",
973  p,
974  bu_identify_magic(*(uint32_t *)p),
975  idx, ret, ecnt[0].byte_offset);
976  bu_bomb("reindex() subscript out of range\n");
977  }
978  }
979  }
980 /*bu_log("reindex(p=x%x), p->index=%d, ret=%d\n", p, idx, ret);*/
981  return ret;
982 }
983 
984 
985 /* forw may never be null; back may be null for loopuse (sigh) */
986 #define INDEX(o, i, elem) *(uint32_t *)(o)->elem = htonl(reindex((void *)((i)->elem), ecnt))
987 #define INDEXL(oo, ii, elem) { \
988  uint32_t _f = reindex((void *)((ii)->elem.forw), ecnt); \
989  if (_f == DISK_INDEX_NULL) bu_log("Warning rt_nmg_edisk: reindex forw to null?\n"); \
990  *(uint32_t *)((oo)->elem.forw) = htonl(_f); \
991  *(uint32_t *)((oo)->elem.back) = htonl(reindex((void *)((ii)->elem.back), ecnt)); }
992 #define PUTMAGIC(_magic) *(uint32_t *)d->magic = htonl(_magic)
993 
994 
995 /**
996  * Export a given structure from memory to disk format
997  *
998  * Scale geometry by 'local2mm'
999  */
1000 void
1001 rt_nmg_edisk(void *op, void *ip, struct nmg_exp_counts *ecnt, int idx, double local2mm)
1002 /* base of disk array */
1003 /* ptr to in-memory structure */
1004 
1005 
1006 {
1007  int oindex; /* index in op */
1008 
1009  oindex = ecnt[idx].per_struct_index;
1010  switch (ecnt[idx].kind) {
1011  case NMG_KIND_MODEL: {
1012  struct model *m = (struct model *)ip;
1013  struct disk_model *d;
1014  d = &((struct disk_model *)op)[oindex];
1015  NMG_CK_MODEL(m);
1017  *(uint32_t *)d->version = htonl(0);
1018  INDEXL(d, m, r_hd);
1019  }
1020  return;
1021  case NMG_KIND_NMGREGION: {
1022  struct nmgregion *r = (struct nmgregion *)ip;
1023  struct disk_nmgregion *d;
1024  d = &((struct disk_nmgregion *)op)[oindex];
1025  NMG_CK_REGION(r);
1027  INDEXL(d, r, l);
1028  INDEX(d, r, m_p);
1029  INDEX(d, r, ra_p);
1030  INDEXL(d, r, s_hd);
1031  }
1032  return;
1033  case NMG_KIND_NMGREGION_A: {
1034  struct nmgregion_a *r = (struct nmgregion_a *)ip;
1035  struct disk_nmgregion_a *d;
1036 
1037  /* must be double for import and export */
1038  double min[ELEMENTS_PER_POINT];
1039  double max[ELEMENTS_PER_POINT];
1040 
1041  d = &((struct disk_nmgregion_a *)op)[oindex];
1042  NMG_CK_REGION_A(r);
1044  VSCALE(min, r->min_pt, local2mm);
1045  VSCALE(max, r->max_pt, local2mm);
1046  bu_cv_htond(d->min_pt, (unsigned char *)min, ELEMENTS_PER_POINT);
1047  bu_cv_htond(d->max_pt, (unsigned char *)max, ELEMENTS_PER_POINT);
1048  }
1049  return;
1050  case NMG_KIND_SHELL: {
1051  struct shell *s = (struct shell *)ip;
1052  struct disk_shell *d;
1053  d = &((struct disk_shell *)op)[oindex];
1054  NMG_CK_SHELL(s);
1056  INDEXL(d, s, l);
1057  INDEX(d, s, r_p);
1058  INDEX(d, s, sa_p);
1059  INDEXL(d, s, fu_hd);
1060  INDEXL(d, s, lu_hd);
1061  INDEXL(d, s, eu_hd);
1062  INDEX(d, s, vu_p);
1063  }
1064  return;
1065  case NMG_KIND_SHELL_A: {
1066  struct shell_a *sa = (struct shell_a *)ip;
1067  struct disk_shell_a *d;
1068 
1069  /* must be double for import and export */
1070  double min[ELEMENTS_PER_POINT];
1071  double max[ELEMENTS_PER_POINT];
1072 
1073  d = &((struct disk_shell_a *)op)[oindex];
1074  NMG_CK_SHELL_A(sa);
1076  VSCALE(min, sa->min_pt, local2mm);
1077  VSCALE(max, sa->max_pt, local2mm);
1078  bu_cv_htond(d->min_pt, (unsigned char *)min, ELEMENTS_PER_POINT);
1079  bu_cv_htond(d->max_pt, (unsigned char *)max, ELEMENTS_PER_POINT);
1080  }
1081  return;
1082  case NMG_KIND_FACEUSE: {
1083  struct faceuse *fu = (struct faceuse *)ip;
1084  struct disk_faceuse *d;
1085  d = &((struct disk_faceuse *)op)[oindex];
1086  NMG_CK_FACEUSE(fu);
1087  NMG_CK_FACEUSE(fu->fumate_p);
1088  NMG_CK_FACE(fu->f_p);
1089  if (fu->f_p != fu->fumate_p->f_p) bu_log("faceuse export, differing faces\n");
1091  INDEXL(d, fu, l);
1092  INDEX(d, fu, s_p);
1093  INDEX(d, fu, fumate_p);
1094  *(uint32_t *)d->orientation = htonl(fu->orientation);
1095  INDEX(d, fu, f_p);
1096  INDEXL(d, fu, lu_hd);
1097  }
1098  return;
1099  case NMG_KIND_FACE: {
1100  struct face *f = (struct face *)ip;
1101  struct disk_face *d;
1102  d = &((struct disk_face *)op)[oindex];
1103  NMG_CK_FACE(f);
1105  INDEXL(d, f, l); /* face is member of fg list */
1106  INDEX(d, f, fu_p);
1107  *(uint32_t *)d->g = htonl(reindex((void *)(f->g.magic_p), ecnt));
1108  *(uint32_t *)d->flip = htonl(f->flip);
1109  }
1110  return;
1111  case NMG_KIND_FACE_G_PLANE: {
1112  struct face_g_plane *fg = (struct face_g_plane *)ip;
1113  struct disk_face_g_plane *d;
1114 
1115  /* must be double for import and export */
1116  double plane[ELEMENTS_PER_PLANE];
1117 
1118  d = &((struct disk_face_g_plane *)op)[oindex];
1119  NMG_CK_FACE_G_PLANE(fg);
1121  INDEXL(d, fg, f_hd);
1122 
1123  /* convert fastf_t to double */
1124  VMOVE(plane, fg->N);
1125  plane[W] = fg->N[W] * local2mm;
1126 
1127  bu_cv_htond(d->N, (unsigned char *)plane, ELEMENTS_PER_PLANE);
1128  }
1129  return;
1130  case NMG_KIND_FACE_G_SNURB: {
1131  struct face_g_snurb *fg = (struct face_g_snurb *)ip;
1132  struct disk_face_g_snurb *d;
1133 
1134  d = &((struct disk_face_g_snurb *)op)[oindex];
1135  NMG_CK_FACE_G_SNURB(fg);
1137  INDEXL(d, fg, f_hd);
1138  *(uint32_t *)d->u_order = htonl(fg->order[0]);
1139  *(uint32_t *)d->v_order = htonl(fg->order[1]);
1140  *(uint32_t *)d->u_size = htonl(fg->u.k_size);
1141  *(uint32_t *)d->v_size = htonl(fg->v.k_size);
1142  *(uint32_t *)d->u_knots = htonl(
1143  rt_nmg_export4_fastf(fg->u.knots,
1144  fg->u.k_size, 0, 1.0));
1145  *(uint32_t *)d->v_knots = htonl(
1146  rt_nmg_export4_fastf(fg->v.knots,
1147  fg->v.k_size, 0, 1.0));
1148  *(uint32_t *)d->us_size = htonl(fg->s_size[0]);
1149  *(uint32_t *)d->vs_size = htonl(fg->s_size[1]);
1150  *(uint32_t *)d->pt_type = htonl(fg->pt_type);
1151  /* scale XYZ ctl_points by local2mm */
1152  *(uint32_t *)d->ctl_points = htonl(
1153  rt_nmg_export4_fastf(fg->ctl_points,
1154  fg->s_size[0] * fg->s_size[1],
1155  fg->pt_type,
1156  local2mm));
1157  }
1158  return;
1159  case NMG_KIND_LOOPUSE: {
1160  struct loopuse *lu = (struct loopuse *)ip;
1161  struct disk_loopuse *d;
1162  d = &((struct disk_loopuse *)op)[oindex];
1163  NMG_CK_LOOPUSE(lu);
1165  INDEXL(d, lu, l);
1166  *(uint32_t *)d->up = htonl(reindex((void *)(lu->up.magic_p), ecnt));
1167  INDEX(d, lu, lumate_p);
1168  *(uint32_t *)d->orientation = htonl(lu->orientation);
1169  INDEX(d, lu, l_p);
1170  INDEXL(d, lu, down_hd);
1171  }
1172  return;
1173  case NMG_KIND_LOOP: {
1174  struct loop *loop = (struct loop *)ip;
1175  struct disk_loop *d;
1176  d = &((struct disk_loop *)op)[oindex];
1177  NMG_CK_LOOP(loop);
1179  INDEX(d, loop, lu_p);
1180  INDEX(d, loop, lg_p);
1181  }
1182  return;
1183  case NMG_KIND_LOOP_G: {
1184  struct loop_g *lg = (struct loop_g *)ip;
1185  struct disk_loop_g *d;
1186 
1187  /* must be double for import and export */
1188  double min[ELEMENTS_PER_POINT];
1189  double max[ELEMENTS_PER_POINT];
1190 
1191  d = &((struct disk_loop_g *)op)[oindex];
1192  NMG_CK_LOOP_G(lg);
1194 
1195  VSCALE(min, lg->min_pt, local2mm);
1196  VSCALE(max, lg->max_pt, local2mm);
1197 
1198  bu_cv_htond(d->min_pt, (unsigned char *)min, ELEMENTS_PER_POINT);
1199  bu_cv_htond(d->max_pt, (unsigned char *)max, ELEMENTS_PER_POINT);
1200  }
1201  return;
1202  case NMG_KIND_EDGEUSE: {
1203  struct edgeuse *eu = (struct edgeuse *)ip;
1204  struct disk_edgeuse *d;
1205  d = &((struct disk_edgeuse *)op)[oindex];
1206  NMG_CK_EDGEUSE(eu);
1208  INDEXL(d, eu, l);
1209  /* NOTE The pointers in l2 point at other l2's.
1210  * nmg_index_of_struct() will point 'em back
1211  * at the top of the edgeuse. Beware on import.
1212  */
1213  INDEXL(d, eu, l2);
1214  *(uint32_t *)d->up = htonl(reindex((void *)(eu->up.magic_p), ecnt));
1215  INDEX(d, eu, eumate_p);
1216  INDEX(d, eu, radial_p);
1217  INDEX(d, eu, e_p);
1218  *(uint32_t *)d->orientation = htonl(eu->orientation);
1219  INDEX(d, eu, vu_p);
1220  *(uint32_t *)d->g = htonl(reindex((void *)(eu->g.magic_p), ecnt));
1221  }
1222  return;
1223  case NMG_KIND_EDGE: {
1224  struct edge *e = (struct edge *)ip;
1225  struct disk_edge *d;
1226  d = &((struct disk_edge *)op)[oindex];
1227  NMG_CK_EDGE(e);
1229  *(uint32_t *)d->is_real = htonl(e->is_real);
1230  INDEX(d, e, eu_p);
1231  }
1232  return;
1233  case NMG_KIND_EDGE_G_LSEG: {
1234  struct edge_g_lseg *eg = (struct edge_g_lseg *)ip;
1235  struct disk_edge_g_lseg *d;
1236 
1237  /* must be double for import and export */
1238  double pt[ELEMENTS_PER_POINT];
1239  double dir[ELEMENTS_PER_VECT];
1240 
1241  d = &((struct disk_edge_g_lseg *)op)[oindex];
1242  NMG_CK_EDGE_G_LSEG(eg);
1244  INDEXL(d, eg, eu_hd2);
1245 
1246  /* convert fastf_t to double */
1247  VSCALE(pt, eg->e_pt, local2mm);
1248  VMOVE(dir, eg->e_dir);
1249 
1250  bu_cv_htond(d->e_pt, (unsigned char *)pt, ELEMENTS_PER_POINT);
1251  bu_cv_htond(d->e_dir, (unsigned char *)dir, ELEMENTS_PER_VECT);
1252  }
1253  return;
1254  case NMG_KIND_EDGE_G_CNURB: {
1255  struct edge_g_cnurb *eg = (struct edge_g_cnurb *)ip;
1256  struct disk_edge_g_cnurb *d;
1257  d = &((struct disk_edge_g_cnurb *)op)[oindex];
1258  NMG_CK_EDGE_G_CNURB(eg);
1260  INDEXL(d, eg, eu_hd2);
1261  *(uint32_t *)d->order = htonl(eg->order);
1262 
1263  /* If order is zero, everything else is NULL */
1264  if (eg->order == 0) return;
1265 
1266  *(uint32_t *)d->k_size = htonl(eg->k.k_size);
1267  *(uint32_t *)d->knots = htonl(
1268  rt_nmg_export4_fastf(eg->k.knots,
1269  eg->k.k_size, 0, 1.0));
1270  *(uint32_t *)d->c_size = htonl(eg->c_size);
1271  *(uint32_t *)d->pt_type = htonl(eg->pt_type);
1272  /*
1273  * The curve's control points are in parameter space
1274  * for cnurbs on snurbs, and in XYZ for cnurbs on planar faces.
1275  * UV values do NOT get transformed, XYZ values do!
1276  */
1277  *(uint32_t *)d->ctl_points = htonl(
1278  rt_nmg_export4_fastf(eg->ctl_points,
1279  eg->c_size,
1280  eg->pt_type,
1281  RT_NURB_EXTRACT_PT_TYPE(eg->pt_type) == RT_NURB_PT_UV ?
1282  1.0 : local2mm));
1283  }
1284  return;
1285  case NMG_KIND_VERTEXUSE: {
1286  struct vertexuse *vu = (struct vertexuse *)ip;
1287  struct disk_vertexuse *d;
1288  d = &((struct disk_vertexuse *)op)[oindex];
1289  NMG_CK_VERTEXUSE(vu);
1291  INDEXL(d, vu, l);
1292  *(uint32_t *)d->up = htonl(reindex((void *)(vu->up.magic_p), ecnt));
1293  INDEX(d, vu, v_p);
1294  if (vu->a.magic_p)NMG_CK_VERTEXUSE_A_EITHER(vu->a.magic_p);
1295  *(uint32_t *)d->a = htonl(reindex((void *)(vu->a.magic_p), ecnt));
1296  }
1297  return;
1299  struct vertexuse_a_plane *vua = (struct vertexuse_a_plane *)ip;
1300  struct disk_vertexuse_a_plane *d;
1301 
1302  /* must be double for import and export */
1303  double normal[ELEMENTS_PER_VECT];
1304 
1305  d = &((struct disk_vertexuse_a_plane *)op)[oindex];
1306  NMG_CK_VERTEXUSE_A_PLANE(vua);
1308 
1309  /* Normal vectors don't scale */
1310  /* This is not a plane equation here */
1311  VMOVE(normal, vua->N); /* convert fastf_t to double */
1312  bu_cv_htond(d->N, (unsigned char *)normal, ELEMENTS_PER_VECT);
1313  }
1314  return;
1316  struct vertexuse_a_cnurb *vua = (struct vertexuse_a_cnurb *)ip;
1317  struct disk_vertexuse_a_cnurb *d;
1318 
1319  /* must be double for import and export */
1320  double param[3];
1321 
1322  d = &((struct disk_vertexuse_a_cnurb *)op)[oindex];
1323  NMG_CK_VERTEXUSE_A_CNURB(vua);
1325 
1326  /* (u, v) parameters on curves don't scale */
1327  VMOVE(param, vua->param); /* convert fastf_t to double */
1328 
1329  bu_cv_htond(d->param, (unsigned char *)param, 3);
1330  }
1331  return;
1332  case NMG_KIND_VERTEX: {
1333  struct vertex *v = (struct vertex *)ip;
1334  struct disk_vertex *d;
1335  d = &((struct disk_vertex *)op)[oindex];
1336  NMG_CK_VERTEX(v);
1338  INDEXL(d, v, vu_hd);
1339  INDEX(d, v, vg_p);
1340  }
1341  return;
1342  case NMG_KIND_VERTEX_G: {
1343  struct vertex_g *vg = (struct vertex_g *)ip;
1344  struct disk_vertex_g *d;
1345 
1346  /* must be double for import and export */
1347  double pt[ELEMENTS_PER_POINT];
1348 
1349  d = &((struct disk_vertex_g *)op)[oindex];
1350  NMG_CK_VERTEX_G(vg);
1352  VSCALE(pt, vg->coord, local2mm);
1353 
1354  bu_cv_htond(d->coord, (unsigned char *)pt, ELEMENTS_PER_POINT);
1355  }
1356  return;
1357  }
1358  bu_log("rt_nmg_edisk kind=%d unknown\n", ecnt[idx].kind);
1359 }
1360 #undef INDEX
1361 #undef INDEXL
1362 
1363 /*
1364  * For symmetry with export, use same macro names and arg ordering,
1365  * but here take from "o" (outboard) variable and put in "i" (internal).
1366  *
1367  * NOTE that the "< 0" test here is a comparison with DISK_INDEX_LISTHEAD.
1368  */
1369 #define INDEX(o, i, ty, elem) (i)->elem = (struct ty *)ptrs[ntohl(*(uint32_t*)((o)->elem))]
1370 #define INDEXL_HD(oo, ii, elem, hd) { \
1371  int sub; \
1372  if ((sub = ntohl(*(uint32_t*)((oo)->elem.forw))) < 0) \
1373  (ii)->elem.forw = &(hd); \
1374  else (ii)->elem.forw = (struct bu_list *)ptrs[sub]; \
1375  if ((sub = ntohl(*(uint32_t*)((oo)->elem.back))) < 0) \
1376  (ii)->elem.back = &(hd); \
1377  else (ii)->elem.back = (struct bu_list *)ptrs[sub]; }
1378 
1379 /* For use with the edgeuse l2 / edge_g eu2_hd secondary list */
1380 /* The subscripts will point to the edgeuse, not the edgeuse's l2 rt_list */
1381 #define INDEXL_HD2(oo, ii, elem, hd) { \
1382  int sub; \
1383  struct edgeuse *eu2; \
1384  if ((sub = ntohl(*(uint32_t*)((oo)->elem.forw))) < 0) { \
1385  (ii)->elem.forw = &(hd); \
1386  } else { \
1387  eu2 = (struct edgeuse *)ptrs[sub]; \
1388  NMG_CK_EDGEUSE(eu2); \
1389  (ii)->elem.forw = &eu2->l2; \
1390  } \
1391  if ((sub = ntohl(*(uint32_t*)((oo)->elem.back))) < 0) { \
1392  (ii)->elem.back = &(hd); \
1393  } else { \
1394  eu2 = (struct edgeuse *)ptrs[sub]; \
1395  NMG_CK_EDGEUSE(eu2); \
1396  (ii)->elem.back = &eu2->l2; \
1397  } }
1398 
1399 
1400 /**
1401  * Import a given structure from disk to memory format.
1402  *
1403  * Transform geometry by given matrix.
1404  */
1405 int
1406 rt_nmg_idisk(void *op, void *ip, struct nmg_exp_counts *ecnt, int idx, uint32_t **ptrs, const fastf_t *mat, const unsigned char *basep)
1407 /* ptr to in-memory structure */
1408 /* base of disk array */
1409 
1410 
1411 /* base of whole import record */
1412 {
1413  int iindex; /* index in ip */
1414 
1415  iindex = 0;
1416  switch (ecnt[idx].kind) {
1417  case NMG_KIND_MODEL: {
1418  struct model *m = (struct model *)op;
1419  struct disk_model *d;
1420  d = &((struct disk_model *)ip)[iindex];
1421  NMG_CK_MODEL(m);
1423  INDEXL_HD(d, m, r_hd, m->r_hd);
1424  }
1425  return 0;
1426  case NMG_KIND_NMGREGION: {
1427  struct nmgregion *r = (struct nmgregion *)op;
1428  struct disk_nmgregion *d;
1429  d = &((struct disk_nmgregion *)ip)[iindex];
1430  NMG_CK_REGION(r);
1432  INDEX(d, r, model, m_p);
1433  INDEX(d, r, nmgregion_a, ra_p);
1434  INDEXL_HD(d, r, s_hd, r->s_hd);
1435  INDEXL_HD(d, r, l, r->m_p->r_hd); /* do after m_p */
1436  NMG_CK_MODEL(r->m_p);
1437  }
1438  return 0;
1439  case NMG_KIND_NMGREGION_A: {
1440  struct nmgregion_a *r = (struct nmgregion_a *)op;
1441  struct disk_nmgregion_a *d;
1442  point_t min;
1443  point_t max;
1444 
1445  /* must be double for import and export */
1446  double scanmin[ELEMENTS_PER_POINT];
1447  double scanmax[ELEMENTS_PER_POINT];
1448 
1449  d = &((struct disk_nmgregion_a *)ip)[iindex];
1450  NMG_CK_REGION_A(r);
1452  bu_cv_ntohd((unsigned char *)scanmin, d->min_pt, ELEMENTS_PER_POINT);
1453  VMOVE(min, scanmin); /* convert double to fastf_t */
1454  bu_cv_ntohd((unsigned char *)scanmax, d->max_pt, ELEMENTS_PER_POINT);
1455  VMOVE(max, scanmax); /* convert double to fastf_t */
1456  bn_rotate_bbox(r->min_pt, r->max_pt, mat, min, max);
1457  }
1458  return 0;
1459  case NMG_KIND_SHELL: {
1460  struct shell *s = (struct shell *)op;
1461  struct disk_shell *d;
1462  d = &((struct disk_shell *)ip)[iindex];
1463  NMG_CK_SHELL(s);
1465  INDEX(d, s, nmgregion, r_p);
1466  INDEX(d, s, shell_a, sa_p);
1467  INDEXL_HD(d, s, fu_hd, s->fu_hd);
1468  INDEXL_HD(d, s, lu_hd, s->lu_hd);
1469  INDEXL_HD(d, s, eu_hd, s->eu_hd);
1470  INDEX(d, s, vertexuse, vu_p);
1471  NMG_CK_REGION(s->r_p);
1472  INDEXL_HD(d, s, l, s->r_p->s_hd); /* after s->r_p */
1473  }
1474  return 0;
1475  case NMG_KIND_SHELL_A: {
1476  struct shell_a *sa = (struct shell_a *)op;
1477  struct disk_shell_a *d;
1478  point_t min;
1479  point_t max;
1480 
1481  /* must be double for import and export */
1482  double scanmin[ELEMENTS_PER_POINT];
1483  double scanmax[ELEMENTS_PER_POINT];
1484 
1485  d = &((struct disk_shell_a *)ip)[iindex];
1486  NMG_CK_SHELL_A(sa);
1488  bu_cv_ntohd((unsigned char *)scanmin, d->min_pt, ELEMENTS_PER_POINT);
1489  VMOVE(min, scanmin); /* convert double to fastf_t */
1490  bu_cv_ntohd((unsigned char *)scanmax, d->max_pt, ELEMENTS_PER_POINT);
1491  VMOVE(max, scanmax); /* convert double to fastf_t */
1492  bn_rotate_bbox(sa->min_pt, sa->max_pt, mat, min, max);
1493  }
1494  return 0;
1495  case NMG_KIND_FACEUSE: {
1496  struct faceuse *fu = (struct faceuse *)op;
1497  struct disk_faceuse *d;
1498  d = &((struct disk_faceuse *)ip)[iindex];
1499  NMG_CK_FACEUSE(fu);
1501  INDEX(d, fu, shell, s_p);
1502  INDEX(d, fu, faceuse, fumate_p);
1503  fu->orientation = ntohl(*(uint32_t*)(d->orientation));
1504  INDEX(d, fu, face, f_p);
1505  INDEXL_HD(d, fu, lu_hd, fu->lu_hd);
1506  INDEXL_HD(d, fu, l, fu->s_p->fu_hd); /* after fu->s_p */
1507  NMG_CK_FACE(fu->f_p);
1508  NMG_CK_FACEUSE(fu->fumate_p);
1509  }
1510  return 0;
1511  case NMG_KIND_FACE: {
1512  struct face *f = (struct face *)op;
1513  struct disk_face *d;
1514  int g_index;
1515 
1516  d = &((struct disk_face *)ip)[iindex];
1517  NMG_CK_FACE(f);
1519  INDEX(d, f, faceuse, fu_p);
1520  g_index = ntohl(*(uint32_t*)(d->g));
1521  f->g.magic_p = (uint32_t *)ptrs[g_index];
1522  f->flip = ntohl(*(uint32_t*)(d->flip));
1523  /* Enroll this face on fg's list of users */
1524  NMG_CK_FACE_G_EITHER(f->g.magic_p);
1525  INDEXL_HD(d, f, l, f->g.plane_p->f_hd); /* after fu->fg_p set */
1526  NMG_CK_FACEUSE(f->fu_p);
1527  }
1528  return 0;
1529  case NMG_KIND_FACE_G_PLANE: {
1530  struct face_g_plane *fg = (struct face_g_plane *)op;
1531  struct disk_face_g_plane *d;
1532  plane_t plane;
1533 
1534  /* must be double for import and export */
1535  double scan[ELEMENTS_PER_PLANE];
1536 
1537  d = &((struct disk_face_g_plane *)ip)[iindex];
1538  NMG_CK_FACE_G_PLANE(fg);
1540  INDEXL_HD(d, fg, f_hd, fg->f_hd);
1541  bu_cv_ntohd((unsigned char *)scan, d->N, ELEMENTS_PER_PLANE);
1542  HMOVE(plane, scan); /* convert double to fastf_t */
1543  bn_rotate_plane(fg->N, mat, plane);
1544  }
1545  return 0;
1546  case NMG_KIND_FACE_G_SNURB: {
1547  struct face_g_snurb *fg = (struct face_g_snurb *)op;
1548  struct disk_face_g_snurb *d;
1549  const matp_t matrix = (const matp_t)mat;
1550  d = &((struct disk_face_g_snurb *)ip)[iindex];
1551  NMG_CK_FACE_G_SNURB(fg);
1553  INDEXL_HD(d, fg, f_hd, fg->f_hd);
1554  fg->order[0] = ntohl(*(uint32_t*)(d->u_order));
1555  fg->order[1] = ntohl(*(uint32_t*)(d->v_order));
1556  fg->u.k_size = ntohl(*(uint32_t*)(d->u_size));
1557  fg->u.knots = rt_nmg_import4_fastf(basep,
1558  ecnt,
1559  ntohl(*(uint32_t*)(d->u_knots)),
1560  (const matp_t)NULL,
1561  fg->u.k_size,
1562  0);
1563  fg->v.k_size = ntohl(*(uint32_t*)(d->v_size));
1564  fg->v.knots = rt_nmg_import4_fastf(basep,
1565  ecnt,
1566  ntohl(*(uint32_t*)(d->v_knots)),
1567  (const matp_t)NULL,
1568  fg->v.k_size,
1569  0);
1570  fg->s_size[0] = ntohl(*(uint32_t*)(d->us_size));
1571  fg->s_size[1] = ntohl(*(uint32_t*)(d->vs_size));
1572  fg->pt_type = ntohl(*(uint32_t*)(d->pt_type));
1573  /* Transform ctl_points by 'mat' */
1574  fg->ctl_points = rt_nmg_import4_fastf(basep,
1575  ecnt,
1576  ntohl(*(uint32_t*)(d->ctl_points)),
1577  matrix,
1578  fg->s_size[0] * fg->s_size[1],
1579  fg->pt_type);
1580  }
1581  return 0;
1582  case NMG_KIND_LOOPUSE: {
1583  struct loopuse *lu = (struct loopuse *)op;
1584  struct disk_loopuse *d;
1585  int up_index;
1586  int up_kind;
1587 
1588  d = &((struct disk_loopuse *)ip)[iindex];
1589  NMG_CK_LOOPUSE(lu);
1591  up_index = ntohl(*(uint32_t*)(d->up));
1592  lu->up.magic_p = ptrs[up_index];
1593  INDEX(d, lu, loopuse, lumate_p);
1594  lu->orientation = ntohl(*(uint32_t*)(d->orientation));
1595  INDEX(d, lu, loop, l_p);
1596  up_kind = ecnt[up_index].kind;
1597  if (up_kind == NMG_KIND_FACEUSE) {
1598  INDEXL_HD(d, lu, l, lu->up.fu_p->lu_hd);
1599  } else if (up_kind == NMG_KIND_SHELL) {
1600  INDEXL_HD(d, lu, l, lu->up.s_p->lu_hd);
1601  } else bu_log("bad loopuse up, index=%d, kind=%d\n", up_index, up_kind);
1602  INDEXL_HD(d, lu, down_hd, lu->down_hd);
1603  if (lu->down_hd.forw == BU_LIST_NULL)
1604  bu_bomb("rt_nmg_idisk: null loopuse down_hd.forw\n");
1605  NMG_CK_LOOP(lu->l_p);
1606  }
1607  return 0;
1608  case NMG_KIND_LOOP: {
1609  struct loop *loop = (struct loop *)op;
1610  struct disk_loop *d;
1611  d = &((struct disk_loop *)ip)[iindex];
1612  NMG_CK_LOOP(loop);
1614  INDEX(d, loop, loopuse, lu_p);
1615  INDEX(d, loop, loop_g, lg_p);
1616  NMG_CK_LOOPUSE(loop->lu_p);
1617  }
1618  return 0;
1619  case NMG_KIND_LOOP_G: {
1620  struct loop_g *lg = (struct loop_g *)op;
1621  struct disk_loop_g *d;
1622  point_t min;
1623  point_t max;
1624 
1625  /* must be double for import and export */
1626  double scanmin[ELEMENTS_PER_POINT];
1627  double scanmax[ELEMENTS_PER_POINT];
1628 
1629  d = &((struct disk_loop_g *)ip)[iindex];
1630  NMG_CK_LOOP_G(lg);
1632  bu_cv_ntohd((unsigned char *)scanmin, d->min_pt, ELEMENTS_PER_POINT);
1633  VMOVE(min, scanmin); /* convert double to fastf_t */
1634  bu_cv_ntohd((unsigned char *)scanmax, d->max_pt, ELEMENTS_PER_POINT);
1635  VMOVE(max, scanmax); /* convert double to fastf_t */
1636  bn_rotate_bbox(lg->min_pt, lg->max_pt, mat, min, max);
1637  }
1638  return 0;
1639  case NMG_KIND_EDGEUSE: {
1640  struct edgeuse *eu = (struct edgeuse *)op;
1641  struct disk_edgeuse *d;
1642  int up_index;
1643  int up_kind;
1644 
1645  d = &((struct disk_edgeuse *)ip)[iindex];
1646  NMG_CK_EDGEUSE(eu);
1648  up_index = ntohl(*(uint32_t*)(d->up));
1649  eu->up.magic_p = ptrs[up_index];
1650  INDEX(d, eu, edgeuse, eumate_p);
1651  INDEX(d, eu, edgeuse, radial_p);
1652  INDEX(d, eu, edge, e_p);
1653  eu->orientation = ntohl(*(uint32_t*)(d->orientation));
1654  INDEX(d, eu, vertexuse, vu_p);
1655  up_kind = ecnt[up_index].kind;
1656  if (up_kind == NMG_KIND_LOOPUSE) {
1657  INDEXL_HD(d, eu, l, eu->up.lu_p->down_hd);
1658  } else if (up_kind == NMG_KIND_SHELL) {
1659  INDEXL_HD(d, eu, l, eu->up.s_p->eu_hd);
1660  } else bu_log("bad edgeuse up, index=%d, kind=%d\n", up_index, up_kind);
1661  eu->g.magic_p = ptrs[ntohl(*(uint32_t*)(d->g))];
1662  NMG_CK_EDGE(eu->e_p);
1663  NMG_CK_EDGEUSE(eu->eumate_p);
1664  NMG_CK_EDGEUSE(eu->radial_p);
1665  NMG_CK_VERTEXUSE(eu->vu_p);
1666  if (eu->g.magic_p != NULL) {
1667  NMG_CK_EDGE_G_EITHER(eu->g.magic_p);
1668 
1669  /* Note that l2 subscripts will be for edgeuse, not l2 */
1670  /* g.lseg_p->eu_hd2 is a pun for g.cnurb_p->eu_hd2 also */
1671  INDEXL_HD2(d, eu, l2, eu->g.lseg_p->eu_hd2);
1672  } else {
1673  eu->l2.forw = &eu->l2;
1674  eu->l2.back = &eu->l2;
1675  }
1676  }
1677  return 0;
1678  case NMG_KIND_EDGE: {
1679  struct edge *e = (struct edge *)op;
1680  struct disk_edge *d;
1681  d = &((struct disk_edge *)ip)[iindex];
1682  NMG_CK_EDGE(e);
1684  e->is_real = ntohl(*(uint32_t*)(d->is_real));
1685  INDEX(d, e, edgeuse, eu_p);
1686  NMG_CK_EDGEUSE(e->eu_p);
1687  }
1688  return 0;
1689  case NMG_KIND_EDGE_G_LSEG: {
1690  struct edge_g_lseg *eg = (struct edge_g_lseg *)op;
1691  struct disk_edge_g_lseg *d;
1692  point_t pt;
1693  vect_t dir;
1694 
1695  /* must be double for import and export */
1696  double scanpt[ELEMENTS_PER_POINT];
1697  double scandir[ELEMENTS_PER_VECT];
1698 
1699  d = &((struct disk_edge_g_lseg *)ip)[iindex];
1700  NMG_CK_EDGE_G_LSEG(eg);
1702  /* Forw subscript points to edgeuse, not edgeuse2 */
1703  INDEXL_HD2(d, eg, eu_hd2, eg->eu_hd2);
1704  bu_cv_ntohd((unsigned char *)scanpt, d->e_pt, ELEMENTS_PER_POINT);
1705  VMOVE(pt, scanpt); /* convert double to fastf_t */
1706  bu_cv_ntohd((unsigned char *)scandir, d->e_dir, ELEMENTS_PER_VECT);
1707  VMOVE(dir, scandir); /* convert double to fastf_t */
1708  MAT4X3PNT(eg->e_pt, mat, pt);
1709  MAT4X3VEC(eg->e_dir, mat, dir);
1710  }
1711  return 0;
1712  case NMG_KIND_EDGE_G_CNURB: {
1713  struct edge_g_cnurb *eg = (struct edge_g_cnurb *)op;
1714  struct disk_edge_g_cnurb *d;
1715  d = &((struct disk_edge_g_cnurb *)ip)[iindex];
1716  NMG_CK_EDGE_G_CNURB(eg);
1718  INDEXL_HD2(d, eg, eu_hd2, eg->eu_hd2);
1719  eg->order = ntohl(*(uint32_t*)(d->order));
1720 
1721  /* If order is zero, so is everything else */
1722  if (eg->order == 0) return 0;
1723 
1724  eg->k.k_size = ntohl(*(uint32_t*)(d->k_size));
1725  eg->k.knots = rt_nmg_import4_fastf(basep,
1726  ecnt,
1727  ntohl(*(uint32_t*)(d->knots)),
1728  (const matp_t)NULL,
1729  eg->k.k_size,
1730  0);
1731  eg->c_size = ntohl(*(uint32_t*)(d->c_size));
1732  eg->pt_type = ntohl(*(uint32_t*)(d->pt_type));
1733  /*
1734  * The curve's control points are in parameter space.
1735  * They do NOT get transformed!
1736  */
1737  if (RT_NURB_EXTRACT_PT_TYPE(eg->pt_type) == RT_NURB_PT_UV) {
1738  /* UV coords on snurb surface don't get xformed */
1739  eg->ctl_points = rt_nmg_import4_fastf(basep,
1740  ecnt,
1741  ntohl(*(uint32_t*)(d->ctl_points)),
1742  (const matp_t)NULL,
1743  eg->c_size,
1744  eg->pt_type);
1745  } else {
1746  const matp_t matrix = (const matp_t)mat;
1747 
1748  /* XYZ coords on planar face DO get xformed */
1749  eg->ctl_points = rt_nmg_import4_fastf(basep,
1750  ecnt,
1751  ntohl(*(uint32_t*)(d->ctl_points)),
1752  matrix,
1753  eg->c_size,
1754  eg->pt_type);
1755  }
1756  }
1757  return 0;
1758  case NMG_KIND_VERTEXUSE: {
1759  struct vertexuse *vu = (struct vertexuse *)op;
1760  struct disk_vertexuse *d;
1761  d = &((struct disk_vertexuse *)ip)[iindex];
1762  NMG_CK_VERTEXUSE(vu);
1764  vu->up.magic_p = ptrs[ntohl(*(uint32_t*)(d->up))];
1765  INDEX(d, vu, vertex, v_p);
1766  vu->a.magic_p = ptrs[ntohl(*(uint32_t*)(d->a))];
1767  NMG_CK_VERTEX(vu->v_p);
1768  if (vu->a.magic_p)NMG_CK_VERTEXUSE_A_EITHER(vu->a.magic_p);
1769  INDEXL_HD(d, vu, l, vu->v_p->vu_hd);
1770  }
1771  return 0;
1773  struct vertexuse_a_plane *vua = (struct vertexuse_a_plane *)op;
1774  struct disk_vertexuse_a_plane *d;
1775  /* must be double for import and export */
1776  double norm[ELEMENTS_PER_VECT];
1777 
1778  d = &((struct disk_vertexuse_a_plane *)ip)[iindex];
1779  NMG_CK_VERTEXUSE_A_PLANE(vua);
1781  bu_cv_ntohd((unsigned char *)norm, d->N, ELEMENTS_PER_VECT);
1782  MAT4X3VEC(vua->N, mat, norm);
1783  }
1784  return 0;
1786  struct vertexuse_a_cnurb *vua = (struct vertexuse_a_cnurb *)op;
1787  struct disk_vertexuse_a_cnurb *d;
1788 
1789  /* must be double for import and export */
1790  double scan[3];
1791 
1792  d = &((struct disk_vertexuse_a_cnurb *)ip)[iindex];
1793  NMG_CK_VERTEXUSE_A_CNURB(vua);
1795  /* These parameters are invariant w.r.t. 'mat' */
1796  bu_cv_ntohd((unsigned char *)scan, d->param, 3);
1797  VMOVE(vua->param, scan); /* convert double to fastf_t */
1798  }
1799  return 0;
1800  case NMG_KIND_VERTEX: {
1801  struct vertex *v = (struct vertex *)op;
1802  struct disk_vertex *d;
1803  d = &((struct disk_vertex *)ip)[iindex];
1804  NMG_CK_VERTEX(v);
1806  INDEXL_HD(d, v, vu_hd, v->vu_hd);
1807  INDEX(d, v, vertex_g, vg_p);
1808  }
1809  return 0;
1810  case NMG_KIND_VERTEX_G: {
1811  struct vertex_g *vg = (struct vertex_g *)op;
1812  struct disk_vertex_g *d;
1813  /* must be double for import and export */
1814  double pt[ELEMENTS_PER_POINT];
1815 
1816  d = &((struct disk_vertex_g *)ip)[iindex];
1817  NMG_CK_VERTEX_G(vg);
1819  bu_cv_ntohd((unsigned char *)pt, d->coord, ELEMENTS_PER_POINT);
1820  MAT4X3PNT(vg->coord, mat, pt);
1821  }
1822  return 0;
1823  }
1824  bu_log("rt_nmg_idisk kind=%d unknown\n", ecnt[idx].kind);
1825  return -1;
1826 }
1827 
1828 
1829 /**
1830  * Allocate storage for all the in-memory NMG structures, in
1831  * preparation for the importation operation, using the GET_xxx()
1832  * macros, so that m->maxindex, etc., are all appropriately handled.
1833  */
1834 HIDDEN struct model *
1835 rt_nmg_ialloc(uint32_t **ptrs, struct nmg_exp_counts *ecnt, int *kind_counts)
1836 {
1837  struct model *m = (struct model *)0;
1838  int subscript;
1839  int kind;
1840  int j;
1841 
1842  subscript = 1;
1843  for (kind = 0; kind < NMG_N_KINDS; kind++) {
1844  if (kind == NMG_KIND_DOUBLE_ARRAY) continue;
1845  for (j = 0; j < kind_counts[kind]; j++) {
1846  ecnt[subscript].kind = kind;
1847  ecnt[subscript].per_struct_index = 0; /* unused on import */
1848  switch (kind) {
1849  case NMG_KIND_MODEL:
1850  if (m) bu_bomb("multiple models?");
1851  m = nmg_mm();
1852  /* Keep disk indices & new indices equal... */
1853  m->maxindex++;
1854  ptrs[subscript] = (uint32_t *)m;
1855  break;
1856  case NMG_KIND_NMGREGION: {
1857  struct nmgregion *r;
1858  GET_REGION(r, m);
1859  r->l.magic = NMG_REGION_MAGIC;
1860  BU_LIST_INIT(&r->s_hd);
1861  ptrs[subscript] = (uint32_t *)r;
1862  }
1863  break;
1864  case NMG_KIND_NMGREGION_A: {
1865  struct nmgregion_a *ra;
1866  GET_REGION_A(ra, m);
1867  ra->magic = NMG_REGION_A_MAGIC;
1868  ptrs[subscript] = (uint32_t *)ra;
1869  }
1870  break;
1871  case NMG_KIND_SHELL: {
1872  struct shell *s;
1873  GET_SHELL(s, m);
1874  s->l.magic = NMG_SHELL_MAGIC;
1875  BU_LIST_INIT(&s->fu_hd);
1876  BU_LIST_INIT(&s->lu_hd);
1877  BU_LIST_INIT(&s->eu_hd);
1878  ptrs[subscript] = (uint32_t *)s;
1879  }
1880  break;
1881  case NMG_KIND_SHELL_A: {
1882  struct shell_a *sa;
1883  GET_SHELL_A(sa, m);
1884  sa->magic = NMG_SHELL_A_MAGIC;
1885  ptrs[subscript] = (uint32_t *)sa;
1886  }
1887  break;
1888  case NMG_KIND_FACEUSE: {
1889  struct faceuse *fu;
1890  GET_FACEUSE(fu, m);
1891  fu->l.magic = NMG_FACEUSE_MAGIC;
1892  BU_LIST_INIT(&fu->lu_hd);
1893  ptrs[subscript] = (uint32_t *)fu;
1894  }
1895  break;
1896  case NMG_KIND_FACE: {
1897  struct face *f;
1898  GET_FACE(f, m);
1899  f->l.magic = NMG_FACE_MAGIC;
1900  ptrs[subscript] = (uint32_t *)f;
1901  }
1902  break;
1903  case NMG_KIND_FACE_G_PLANE: {
1904  struct face_g_plane *fg;
1905  GET_FACE_G_PLANE(fg, m);
1906  fg->magic = NMG_FACE_G_PLANE_MAGIC;
1907  BU_LIST_INIT(&fg->f_hd);
1908  ptrs[subscript] = (uint32_t *)fg;
1909  }
1910  break;
1911  case NMG_KIND_FACE_G_SNURB: {
1912  struct face_g_snurb *fg;
1913  GET_FACE_G_SNURB(fg, m);
1914  fg->l.magic = NMG_FACE_G_SNURB_MAGIC;
1915  BU_LIST_INIT(&fg->f_hd);
1916  ptrs[subscript] = (uint32_t *)fg;
1917  }
1918  break;
1919  case NMG_KIND_LOOPUSE: {
1920  struct loopuse *lu;
1921  GET_LOOPUSE(lu, m);
1922  lu->l.magic = NMG_LOOPUSE_MAGIC;
1923  BU_LIST_INIT(&lu->down_hd);
1924  ptrs[subscript] = (uint32_t *)lu;
1925  }
1926  break;
1927  case NMG_KIND_LOOP: {
1928  struct loop *l;
1929  GET_LOOP(l, m);
1930  l->magic = NMG_LOOP_MAGIC;
1931  ptrs[subscript] = (uint32_t *)l;
1932  }
1933  break;
1934  case NMG_KIND_LOOP_G: {
1935  struct loop_g *lg;
1936  GET_LOOP_G(lg, m);
1937  lg->magic = NMG_LOOP_G_MAGIC;
1938  ptrs[subscript] = (uint32_t *)lg;
1939  }
1940  break;
1941  case NMG_KIND_EDGEUSE: {
1942  struct edgeuse *eu;
1943  GET_EDGEUSE(eu, m);
1944  eu->l.magic = NMG_EDGEUSE_MAGIC;
1945  eu->l2.magic = NMG_EDGEUSE2_MAGIC;
1946  ptrs[subscript] = (uint32_t *)eu;
1947  }
1948  break;
1949  case NMG_KIND_EDGE: {
1950  struct edge *e;
1951  GET_EDGE(e, m);
1952  e->magic = NMG_EDGE_MAGIC;
1953  ptrs[subscript] = (uint32_t *)e;
1954  }
1955  break;
1956  case NMG_KIND_EDGE_G_LSEG: {
1957  struct edge_g_lseg *eg;
1958  GET_EDGE_G_LSEG(eg, m);
1959  eg->l.magic = NMG_EDGE_G_LSEG_MAGIC;
1960  BU_LIST_INIT(&eg->eu_hd2);
1961  ptrs[subscript] = (uint32_t *)eg;
1962  }
1963  break;
1964  case NMG_KIND_EDGE_G_CNURB: {
1965  struct edge_g_cnurb *eg;
1966  GET_EDGE_G_CNURB(eg, m);
1967  eg->l.magic = NMG_EDGE_G_CNURB_MAGIC;
1968  BU_LIST_INIT(&eg->eu_hd2);
1969  ptrs[subscript] = (uint32_t *)eg;
1970  }
1971  break;
1972  case NMG_KIND_VERTEXUSE: {
1973  struct vertexuse *vu;
1974  GET_VERTEXUSE(vu, m);
1975  vu->l.magic = NMG_VERTEXUSE_MAGIC;
1976  ptrs[subscript] = (uint32_t *)vu;
1977  }
1978  break;
1980  struct vertexuse_a_plane *vua;
1981  GET_VERTEXUSE_A_PLANE(vua, m);
1982  vua->magic = NMG_VERTEXUSE_A_PLANE_MAGIC;
1983  ptrs[subscript] = (uint32_t *)vua;
1984  }
1985  break;
1987  struct vertexuse_a_cnurb *vua;
1988  GET_VERTEXUSE_A_CNURB(vua, m);
1989  vua->magic = NMG_VERTEXUSE_A_CNURB_MAGIC;
1990  ptrs[subscript] = (uint32_t *)vua;
1991  }
1992  break;
1993  case NMG_KIND_VERTEX: {
1994  struct vertex *v;
1995  GET_VERTEX(v, m);
1996  v->magic = NMG_VERTEX_MAGIC;
1997  BU_LIST_INIT(&v->vu_hd);
1998  ptrs[subscript] = (uint32_t *)v;
1999  }
2000  break;
2001  case NMG_KIND_VERTEX_G: {
2002  struct vertex_g *vg;
2003  GET_VERTEX_G(vg, m);
2004  vg->magic = NMG_VERTEX_G_MAGIC;
2005  ptrs[subscript] = (uint32_t *)vg;
2006  }
2007  break;
2008  default:
2009  bu_log("bad kind = %d\n", kind);
2010  ptrs[subscript] = (uint32_t *)0;
2011  break;
2012  }
2013 
2014  /* new_subscript unused on import except for printf()s */
2015  ecnt[subscript].new_subscript = nmg_index_of_struct(ptrs[subscript]);
2016  subscript++;
2017  }
2018  }
2019  return m;
2020 }
2021 
2022 
2023 /**
2024  * Find the locations of all the variable-sized fastf_t arrays in the
2025  * input record. Record that position as a byte offset from the very
2026  * front of the input record in ecnt[], indexed by subscript number.
2027  *
2028  * No storage is allocated here, that will be done by
2029  * rt_nmg_import4_fastf() on the fly. A separate call to bu_malloc()
2030  * will be used, so that nmg_keg(), etc., can kill each array as
2031  * appropriate.
2032  */
2033 void
2034 rt_nmg_i2alloc(struct nmg_exp_counts *ecnt, unsigned char *cp, int *kind_counts)
2035 {
2036  int kind;
2037  int nkind;
2038  int subscript;
2039  int offset;
2040  int i;
2041 
2042  nkind = kind_counts[NMG_KIND_DOUBLE_ARRAY];
2043  if (nkind <= 0) return;
2044 
2045  /* First, find the beginning of the fastf_t arrays */
2046  subscript = 1;
2047  offset = 0;
2048  for (kind = 0; kind < NMG_N_KINDS; kind++) {
2049  if (kind == NMG_KIND_DOUBLE_ARRAY) continue;
2050  offset += rt_nmg_disk_sizes[kind] * kind_counts[kind];
2051  subscript += kind_counts[kind];
2052  }
2053 
2054  /* Should have found the first one now */
2056  for (i=0; i < nkind; i++) {
2057  int ndouble;
2059  ndouble = ntohl(*(uint32_t*)(cp + offset + 4));
2060  ecnt[subscript].kind = NMG_KIND_DOUBLE_ARRAY;
2061  /* Stored byte offset is from beginning of disk record */
2062  ecnt[subscript].byte_offset = offset;
2063  offset += (4+4) + 8*ndouble;
2064  subscript++;
2065  }
2066 }
2067 
2068 
2069 /**
2070  * Import an NMG from the database format to the internal format.
2071  * Apply modeling transformations as well.
2072  *
2073  * Special subscripts are used in the disk file:
2074  * -1 indicates a pointer to the rt_list structure which
2075  * heads a linked list, and is not the first struct element.
2076  * 0 indicates that a null pointer should be used.
2077  */
2078 int
2079 rt_nmg_import4_internal(struct rt_db_internal *ip, const struct bu_external *ep, const fastf_t *mat, int rebound, const struct bn_tol *tol)
2080 {
2081  struct model *m;
2082  union record *rp;
2083  int kind_counts[NMG_N_KINDS];
2084  unsigned char *cp;
2085  uint32_t **real_ptrs;
2086  uint32_t **ptrs;
2087  struct nmg_exp_counts *ecnt;
2088  int i;
2089  int maxindex;
2090  int kind;
2091  static uint32_t bad_magic = 0x999;
2092 
2093  BU_CK_EXTERNAL(ep);
2094  BN_CK_TOL(tol);
2095  rp = (union record *)ep->ext_buf;
2096  /* Check record type */
2097  if (rp->u_id != DBID_NMG) {
2098  bu_log("rt_nmg_import4: defective record\n");
2099  return -1;
2100  }
2101 
2102  /*
2103  * Check for proper version.
2104  * In the future, this will be the backwards-compatibility hook.
2105  */
2106  if (rp->nmg.N_version != DISK_MODEL_VERSION) {
2107  bu_log("rt_nmg_import4: expected NMG '.g' format version %d, got version %d, aborting.\n",
2109  rp->nmg.N_version);
2110  return -1;
2111  }
2112 
2113  /* Obtain counts of each kind of structure */
2114  maxindex = 1;
2115  for (kind = 0; kind < NMG_N_KINDS; kind++) {
2116  kind_counts[kind] = ntohl(*(uint32_t*)(rp->nmg.N_structs+4*kind));
2117  maxindex += kind_counts[kind];
2118  }
2119 
2120  /* Collect overall new subscripts, and structure-specific indices */
2121  ecnt = (struct nmg_exp_counts *)bu_calloc(maxindex+3,
2122  sizeof(struct nmg_exp_counts), "ecnt[]");
2123  real_ptrs = (uint32_t **)bu_calloc(maxindex+3,
2124  sizeof(uint32_t *), "ptrs[]");
2125  /* So that indexing [-1] gives an appropriately bogus magic # */
2126  ptrs = real_ptrs+1;
2127  ptrs[-1] = &bad_magic; /* [-1] gives bad magic */
2128  ptrs[0] = NULL; /* [0] gives NULL */
2129  ptrs[maxindex] = &bad_magic; /* [maxindex] gives bad magic */
2130  ptrs[maxindex+1] = &bad_magic; /* [maxindex+1] gives bad magic */
2131 
2132  /* Allocate storage for all the NMG structs, in ptrs[] */
2133  m = rt_nmg_ialloc(ptrs, ecnt, kind_counts);
2134 
2135  /* Locate the variably sized fastf_t arrays. ecnt[] has room. */
2136  cp = (unsigned char *)(rp+1); /* start at first granule in */
2137  rt_nmg_i2alloc(ecnt, cp, kind_counts);
2138 
2139  /* Import each structure, in turn */
2140  for (i=1; i < maxindex; i++) {
2141  /* If we made it to the last kind, stop. Nothing follows */
2142  if (ecnt[i].kind == NMG_KIND_DOUBLE_ARRAY) break;
2143  if (rt_nmg_idisk((void *)(ptrs[i]), (void *)cp,
2144  ecnt, i, ptrs, mat, (unsigned char *)(rp+1)) < 0)
2145  return -1; /* FAIL */
2146  cp += rt_nmg_disk_sizes[ecnt[i].kind];
2147  }
2148 
2149  if (rebound) {
2150  /* Recompute all bounding boxes in model */
2151  nmg_rebound(m, tol);
2152  } else {
2153  /*
2154  * Need to recompute bounding boxes for the faces here.
2155  * Other bounding boxes will exist and be intact if NMG
2156  * exporter wrote the _a structures.
2157  */
2158  for (i=1; i < maxindex; i++) {
2159  if (ecnt[i].kind != NMG_KIND_FACE) continue;
2160  nmg_face_bb((struct face *)ptrs[i], tol);
2161  }
2162  }
2163 
2164  RT_CK_DB_INTERNAL(ip);
2165  ip->idb_major_type = DB5_MAJORTYPE_BRLCAD;
2166  ip->idb_type = ID_NMG;
2167  ip->idb_meth = &OBJ[ID_NMG];
2168  ip->idb_ptr = (void *)m;
2169 
2170  bu_free((char *)ecnt, "ecnt[]");
2171  bu_free((char *)real_ptrs, "ptrs[]");
2172 
2173  return 0; /* OK */
2174 }
2175 
2176 
2177 /**
2178  * The name is added by the caller, in the usual place.
2179  *
2180  * When the "compact" flag is set, bounding boxes from (at present)
2181  * nmgregion_a
2182  * shell_a
2183  * loop_g
2184  * are not converted for storage in the database.
2185  * They should be re-generated at import time.
2186  *
2187  * If the "compact" flag is not set, then the NMG model is saved,
2188  * verbatim.
2189  *
2190  * The overall layout of the on-disk NMG is like this:
2191  *
2192  * +---------------------------+
2193  * | NMG header granule |
2194  * | solid name |
2195  * | # additional granules |
2196  * | format version |
2197  * | kind_count[] array |
2198  * +---------------------------+
2199  * | |
2200  * | |
2201  * ~ N_count granules ~
2202  * ~ : ~
2203  * | : |
2204  * | |
2205  * +---------------------------+
2206  *
2207  * In the additional granules, all structures of "kind" 0 (model) go
2208  * first, followed by all structures of kind 1 (nmgregion), etc. As
2209  * each structure is output, it is assigned a subscript number,
2210  * starting with #1 for the model structure. All pointers are
2211  * converted to the matching subscript numbers. An on-disk subscript
2212  * of zero indicates a corresponding NULL pointer in memory. All
2213  * integers are converted to network (Big-Endian) byte order. All
2214  * floating point values are stored in network (Big-Endian IEEE)
2215  * format.
2216  */
2217 int
2218 rt_nmg_export4_internal(struct bu_external *ep, const struct rt_db_internal *ip, double local2mm, int compact)
2219 {
2220  struct model *m;
2221  union record *rp;
2222  struct nmg_struct_counts cntbuf;
2223  uint32_t **ptrs;
2224  struct nmg_exp_counts *ecnt;
2225  int i;
2226  int subscript;
2227  int kind_counts[NMG_N_KINDS];
2228  void *disk_arrays[NMG_N_KINDS];
2229  int additional_grans;
2230  int tot_size;
2231  int kind;
2232  char *cp;
2233  int double_count;
2234  int fastf_byte_count;
2235 
2236  RT_CK_DB_INTERNAL(ip);
2237  if (ip->idb_type != ID_NMG) return -1;
2238  m = (struct model *)ip->idb_ptr;
2239  NMG_CK_MODEL(m);
2240 
2241  /* As a by-product, this fills in the ptrs[] array! */
2242  memset((char *)&cntbuf, 0, sizeof(cntbuf));
2243  ptrs = nmg_m_struct_count(&cntbuf, m);
2244 
2245  /* Collect overall new subscripts, and structure-specific indices */
2246  ecnt = (struct nmg_exp_counts *)bu_calloc(m->maxindex+1,
2247  sizeof(struct nmg_exp_counts), "ecnt[]");
2248  for (i = 0; i < NMG_N_KINDS; i++)
2249  kind_counts[i] = 0;
2250  subscript = 1; /* must be larger than DISK_INDEX_NULL */
2251  double_count = 0;
2252  fastf_byte_count = 0;
2253  for (i=0; i < m->maxindex; i++) {
2254  if (ptrs[i] == NULL) {
2255  ecnt[i].kind = -1;
2256  continue;
2257  }
2258  kind = rt_nmg_magic_to_kind(*(ptrs[i]));
2259  ecnt[i].per_struct_index = kind_counts[kind]++;
2260  ecnt[i].kind = kind;
2261  /* Handle the variable sized kinds */
2262  switch (kind) {
2263  case NMG_KIND_FACE_G_SNURB: {
2264  struct face_g_snurb *fg;
2265  int ndouble;
2266  fg = (struct face_g_snurb *)ptrs[i];
2267  ecnt[i].first_fastf_relpos = kind_counts[NMG_KIND_DOUBLE_ARRAY];
2268  kind_counts[NMG_KIND_DOUBLE_ARRAY] += 3;
2269  ndouble = fg->u.k_size +
2270  fg->v.k_size +
2271  fg->s_size[0] * fg->s_size[1] *
2272  RT_NURB_EXTRACT_COORDS(fg->pt_type);
2273  double_count += ndouble;
2274  ecnt[i].byte_offset = fastf_byte_count;
2275  fastf_byte_count += 3*(4+4) + 8*ndouble;
2276  }
2277  break;
2278  case NMG_KIND_EDGE_G_CNURB: {
2279  struct edge_g_cnurb *eg;
2280  int ndouble;
2281  eg = (struct edge_g_cnurb *)ptrs[i];
2282  ecnt[i].first_fastf_relpos = kind_counts[NMG_KIND_DOUBLE_ARRAY];
2283  /* If order is zero, no knots or ctl_points */
2284  if (eg->order == 0) break;
2285  kind_counts[NMG_KIND_DOUBLE_ARRAY] += 2;
2286  ndouble = eg->k.k_size + eg->c_size *
2287  RT_NURB_EXTRACT_COORDS(eg->pt_type);
2288  double_count += ndouble;
2289  ecnt[i].byte_offset = fastf_byte_count;
2290  fastf_byte_count += 2*(4+4) + 8*ndouble;
2291  }
2292  break;
2293  }
2294  }
2295  if (compact) {
2296  kind_counts[NMG_KIND_NMGREGION_A] = 0;
2297  kind_counts[NMG_KIND_SHELL_A] = 0;
2298  kind_counts[NMG_KIND_LOOP_G] = 0;
2299  }
2300 
2301  /* Assign new subscripts to ascending guys of same kind */
2302  for (kind = 0; kind < NMG_N_KINDS; kind++) {
2303  if (compact && (kind == NMG_KIND_NMGREGION_A ||
2304  kind == NMG_KIND_SHELL_A ||
2305  kind == NMG_KIND_LOOP_G)) {
2306  /*
2307  * Don't assign any new subscripts for them.
2308  * Instead, use DISK_INDEX_NULL, yielding null ptrs.
2309  */
2310  for (i=0; i < m->maxindex; i++) {
2311  if (ptrs[i] == NULL) continue;
2312  if (ecnt[i].kind != kind) continue;
2313  ecnt[i].new_subscript = DISK_INDEX_NULL;
2314  }
2315  continue;
2316  }
2317  for (i=0; i < m->maxindex; i++) {
2318  if (ptrs[i] == NULL) continue;
2319  if (ecnt[i].kind != kind) continue;
2320  ecnt[i].new_subscript = subscript++;
2321  }
2322  }
2323  /* Tack on the variable sized fastf_t arrays at the end */
2324  rt_nmg_cur_fastf_subscript = subscript;
2325  subscript += kind_counts[NMG_KIND_DOUBLE_ARRAY];
2326 
2327  /* Sanity checking */
2328  for (i=0; i < m->maxindex; i++) {
2329  if (ptrs[i] == NULL) continue;
2330  if (nmg_index_of_struct(ptrs[i]) != i) {
2331  bu_log("***ERROR, ptrs[%d]->index = %d\n",
2332  i, nmg_index_of_struct((uint32_t *)ptrs[i]));
2333  }
2334  if (rt_nmg_magic_to_kind(*ptrs[i]) != ecnt[i].kind) {
2335  bu_log("@@@ERROR, ptrs[%d] kind(%d) != %d\n",
2336  i, rt_nmg_magic_to_kind(*ptrs[i]),
2337  ecnt[i].kind);
2338  }
2339  }
2340 
2341  tot_size = 0;
2342  for (i = 0; i < NMG_N_KINDS; i++) {
2343  if (kind_counts[i] <= 0) {
2344  disk_arrays[i] = ((void *)0);
2345  continue;
2346  }
2347  tot_size += kind_counts[i] * rt_nmg_disk_sizes[i];
2348  }
2349  /* Account for variable sized double arrays, at the end */
2350  tot_size += kind_counts[NMG_KIND_DOUBLE_ARRAY] * (4+4) +
2351  double_count * 8;
2352 
2353  ecnt[0].byte_offset = subscript; /* implicit arg to reindex() */
2354 
2355  additional_grans = (tot_size + sizeof(union record)-1) / sizeof(union record);
2356  BU_CK_EXTERNAL(ep);
2357  ep->ext_nbytes = (1 + additional_grans) * sizeof(union record);
2358  ep->ext_buf = (uint8_t *)bu_calloc(1, ep->ext_nbytes, "nmg external");
2359  rp = (union record *)ep->ext_buf;
2360  rp->nmg.N_id = DBID_NMG;
2361  rp->nmg.N_version = DISK_MODEL_VERSION;
2362  *(uint32_t *)rp->nmg.N_count = htonl((uint32_t)additional_grans);
2363 
2364  /* Record counts of each kind of structure */
2365  for (kind = 0; kind < NMG_N_KINDS; kind++) {
2366  *(uint32_t *)(rp->nmg.N_structs+4*kind) = htonl(kind_counts[kind]);
2367  }
2368 
2369  cp = (char *)(rp+1); /* advance one granule */
2370  for (i=0; i < NMG_N_KINDS; i++) {
2371  disk_arrays[i] = (void *)cp;
2372  cp += kind_counts[i] * rt_nmg_disk_sizes[i];
2373  }
2374  /* disk_arrays[NMG_KIND_DOUBLE_ARRAY] is set properly because it is last */
2375  rt_nmg_fastf_p = (unsigned char *)disk_arrays[NMG_KIND_DOUBLE_ARRAY];
2376 
2377  /* Convert all the structures to their disk versions */
2378  for (i = m->maxindex-1; i >= 0; i--) {
2379  if (ptrs[i] == NULL) continue;
2380  kind = ecnt[i].kind;
2381  if (kind_counts[kind] <= 0) continue;
2382  rt_nmg_edisk((void *)(disk_arrays[kind]),
2383  (void *)(ptrs[i]), ecnt, i, local2mm);
2384  }
2385 
2386  bu_free((void *)ptrs, "ptrs[]");
2387  bu_free((void *)ecnt, "ecnt[]");
2388 
2389  return 0;
2390 }
2391 
2392 
2393 /**
2394  * Import an NMG from the database format to the internal format.
2395  * Apply modeling transformations as well.
2396  */
2397 int
2398 rt_nmg_import4(struct rt_db_internal *ip, const struct bu_external *ep, const fastf_t *mat, const struct db_i *dbip)
2399 {
2400  struct model *m;
2401  union record *rp;
2402  struct bn_tol tol;
2403 
2404  BU_CK_EXTERNAL(ep);
2405  if (dbip) RT_CK_DBI(dbip);
2406 
2407  rp = (union record *)ep->ext_buf;
2408  /* Check record type */
2409  if (rp->u_id != DBID_NMG) {
2410  bu_log("rt_nmg_import4: defective record\n");
2411  return -1;
2412  }
2413 
2414  /* XXX The bounding box routines need a tolerance.
2415  * XXX This is sheer guesswork here.
2416  * As long as this NMG is going to be turned into vlist, or
2417  * handed off to the boolean evaluator, any non-zero numbers are fine.
2418  */
2419  tol.magic = BN_TOL_MAGIC;
2420  tol.dist = 0.0005;
2421  tol.dist_sq = tol.dist * tol.dist;
2422  tol.perp = 1e-6;
2423  tol.para = 1 - tol.perp;
2424 
2425  if (rt_nmg_import4_internal(ip, ep, mat, 1, &tol) < 0)
2426  return -1;
2427 
2428  m = (struct model *)ip->idb_ptr;
2429  NMG_CK_MODEL(m);
2430 
2431  if (RT_G_DEBUG || RTG.NMG_debug)
2432  nmg_vmodel(m);
2433 
2434  return 0; /* OK */
2435 }
2436 
2437 
2438 int
2440  struct bu_external *ep,
2441  const mat_t mat,
2442  const struct db_i *dbip)
2443 {
2444  struct model *m;
2445  struct bn_tol tol;
2446  int maxindex;
2447  int kind;
2448  int kind_counts[NMG_N_KINDS];
2449  unsigned char *dp; /* data pointer */
2450  void *startdata; /* data pointer */
2451  uint32_t **real_ptrs;
2452  uint32_t **ptrs;
2453  struct nmg_exp_counts *ecnt;
2454  int i;
2455  static uint32_t bad_magic = 0x999;
2456 
2457  if (dbip) RT_CK_DBI(dbip);
2458 
2459  BU_CK_EXTERNAL(ep);
2460  dp = (unsigned char *)ep->ext_buf;
2461 
2462  tol.magic = BN_TOL_MAGIC;
2463  tol.dist = 0.0005;
2464  tol.dist_sq = tol.dist * tol.dist;
2465  tol.perp = 1e-6;
2466  tol.para = 1 - tol.perp;
2467 
2468  {
2469  int version;
2470  version = ntohl(*(uint32_t*)dp);
2471  dp+= SIZEOF_NETWORK_LONG;
2472  if (version != DISK_MODEL_VERSION) {
2473  bu_log("rt_nmg_import4: expected NMG '.g' format version %d, got %d, aborting nmg solid import\n",
2474  DISK_MODEL_VERSION, version);
2475  return -1;
2476  }
2477  }
2478  maxindex = 1;
2479  for (kind =0; kind < NMG_N_KINDS; kind++) {
2480  kind_counts[kind] = ntohl(*(uint32_t*)dp);
2481  dp+= SIZEOF_NETWORK_LONG;
2482  maxindex += kind_counts[kind];
2483  }
2484 
2485  startdata = dp;
2486 
2487  /* Collect overall new subscripts, and structure-specific indices */
2488  ecnt = (struct nmg_exp_counts *) bu_calloc(maxindex+3,
2489  sizeof(struct nmg_exp_counts), "ecnt[]");
2490  real_ptrs = (uint32_t **)bu_calloc(maxindex+3, sizeof(uint32_t *), "ptrs[]");
2491  /* some safety checking. Indexing by, -1, 0, n+1, N+2 give interesting results */
2492  ptrs = real_ptrs+1;
2493  ptrs[-1] = &bad_magic;
2494  ptrs[0] = NULL;
2495  ptrs[maxindex] = &bad_magic;
2496  ptrs[maxindex+1] = &bad_magic;
2497 
2498  m = rt_nmg_ialloc(ptrs, ecnt, kind_counts);
2499 
2500  rt_nmg_i2alloc(ecnt, dp, kind_counts);
2501 
2502  /* Now import each structure, in turn */
2503  for (i=1; i < maxindex; i++) {
2504  /* We know that the DOUBLE_ARRAY is the last thing to process */
2505  if (ecnt[i].kind == NMG_KIND_DOUBLE_ARRAY) break;
2506  if (rt_nmg_idisk((void *)(ptrs[i]), (void *)dp, ecnt,
2507  i, ptrs, mat, (unsigned char *)startdata) < 0) {
2508  return -1;
2509  }
2510  dp += rt_nmg_disk_sizes[ecnt[i].kind];
2511  }
2512 
2513  /* Face min_pt and max_pt are not stored, so this is mandatory. */
2514  nmg_rebound(m, &tol);
2515 
2516  RT_CK_DB_INTERNAL(ip);
2517  ip->idb_major_type = DB5_MAJORTYPE_BRLCAD;
2518  ip->idb_type = ID_NMG;
2519  ip->idb_meth = &OBJ[ ID_NMG ];
2520  ip->idb_ptr = (void *)m;
2521  NMG_CK_MODEL(m);
2522  bu_free((char *)ecnt, "ecnt[]");
2523  bu_free((char *)real_ptrs, "ptrs[]");
2524 
2525  if (RT_G_DEBUG || RTG.NMG_debug) {
2526  nmg_vmodel(m);
2527  }
2528  return 0; /* OK */
2529 }
2530 
2531 
2532 /**
2533  * The name is added by the caller, in the usual place.
2534  */
2535 int
2536 rt_nmg_export4(struct bu_external *ep, const struct rt_db_internal *ip, double local2mm, const struct db_i *dbip)
2537 {
2538  struct model *m;
2539 
2540  if (dbip) RT_CK_DBI(dbip);
2541 
2542  RT_CK_DB_INTERNAL(ip);
2543  if (ip->idb_type != ID_NMG) return -1;
2544  m = (struct model *)ip->idb_ptr;
2545  NMG_CK_MODEL(m);
2546 
2547  /* To ensure that a decent database is written, verify source first */
2548  nmg_vmodel(m);
2549 
2550  /* The "compact" flag is used to save space in the database */
2551  return rt_nmg_export4_internal(ep, ip, local2mm, 1);
2552 }
2553 
2554 
2555 int
2557  struct bu_external *ep,
2558  const struct rt_db_internal *ip,
2559  double local2mm,
2560  const struct db_i *dbip)
2561 {
2562  struct model *m;
2563  unsigned char *dp;
2564  uint32_t **ptrs;
2565  struct nmg_struct_counts cntbuf;
2566  struct nmg_exp_counts *ecnt;
2567  int kind_counts[NMG_N_KINDS];
2568  void *disk_arrays[NMG_N_KINDS];
2569  int tot_size;
2570  int kind;
2571  int double_count;
2572  int i;
2573  int subscript, fastf_byte_count;
2574 
2575  if (dbip) RT_CK_DBI(dbip);
2576 
2577  RT_CK_DB_INTERNAL(ip);
2578  if (ip->idb_type != ID_NMG) return -1;
2579  m = (struct model *)ip->idb_ptr;
2580  NMG_CK_MODEL(m);
2581 
2582  memset((char *)&cntbuf, 0, sizeof(cntbuf));
2583  ptrs = nmg_m_struct_count(&cntbuf, m);
2584 
2585  ecnt = (struct nmg_exp_counts *)bu_calloc(m->maxindex+1,
2586  sizeof(struct nmg_exp_counts), "ecnt[]");
2587  for (i=0; i<NMG_N_KINDS; i++) {
2588  kind_counts[i] = 0;
2589  }
2590  subscript = 1;
2591  double_count = 0;
2592  fastf_byte_count = 0;
2593  for (i=0; i< m->maxindex; i++) {
2594  if (ptrs[i] == NULL) {
2595  ecnt[i].kind = -1;
2596  continue;
2597  }
2598 
2599  kind = rt_nmg_magic_to_kind(*(ptrs[i]));
2600  ecnt[i].per_struct_index = kind_counts[kind]++;
2601  ecnt[i].kind = kind;
2602 
2603  /*
2604  * SNURB and CNURBS are variable sized and as such need
2605  * special handling
2606  */
2607  if (kind == NMG_KIND_FACE_G_SNURB) {
2608  struct face_g_snurb *fg;
2609  int ndouble;
2610  fg = (struct face_g_snurb *)ptrs[i];
2611  ecnt[i].first_fastf_relpos = kind_counts[NMG_KIND_DOUBLE_ARRAY];
2612  kind_counts[NMG_KIND_DOUBLE_ARRAY] += 3;
2613  ndouble = fg->u.k_size +
2614  fg->v.k_size +
2615  fg->s_size[0] * fg->s_size[1] *
2616  RT_NURB_EXTRACT_COORDS(fg->pt_type);
2617  double_count += ndouble;
2618  ecnt[i].byte_offset = fastf_byte_count;
2619  fastf_byte_count += 3*(4*4) + 89*ndouble;
2620  } else if (kind == NMG_KIND_EDGE_G_CNURB) {
2621  struct edge_g_cnurb *eg;
2622  int ndouble;
2623  eg = (struct edge_g_cnurb *)ptrs[i];
2624  ecnt[i].first_fastf_relpos =
2625  kind_counts[NMG_KIND_DOUBLE_ARRAY];
2626  if (eg->order != 0) {
2627  kind_counts[NMG_KIND_DOUBLE_ARRAY] += 2;
2628  ndouble = eg->k.k_size +eg->c_size *
2629  RT_NURB_EXTRACT_COORDS(eg->pt_type);
2630  double_count += ndouble;
2631  ecnt[i].byte_offset = fastf_byte_count;
2632  fastf_byte_count += 2*(4+4) + 8*ndouble;
2633  }
2634  }
2635  }
2636  /* Compacting wanted */
2637  kind_counts[NMG_KIND_NMGREGION_A] = 0;
2638  kind_counts[NMG_KIND_SHELL_A] = 0;
2639  kind_counts[NMG_KIND_LOOP_G] = 0;
2640 
2641  /* Assign new subscripts to ascending struts of the same kind */
2642  for (kind=0; kind < NMG_N_KINDS; kind++) {
2643  /* Compacting */
2644  if (kind == NMG_KIND_NMGREGION_A ||
2645  kind == NMG_KIND_SHELL_A ||
2646  kind == NMG_KIND_LOOP_G) {
2647  for (i=0; i<m->maxindex; i++) {
2648  if (ptrs[i] == NULL) continue;
2649  if (ecnt[i].kind != kind) continue;
2650  ecnt[i].new_subscript = DISK_INDEX_NULL;
2651  }
2652  continue;
2653  }
2654 
2655  for (i=0; i< m->maxindex;i++) {
2656  if (ptrs[i] == NULL) continue;
2657  if (ecnt[i].kind != kind) continue;
2658  ecnt[i].new_subscript = subscript++;
2659  }
2660  }
2661  /* Tack on the variable sized fastf_t arrays at the end */
2662  rt_nmg_cur_fastf_subscript = subscript;
2663  subscript += kind_counts[NMG_KIND_DOUBLE_ARRAY];
2664 
2665  /* Now do some checking to make sure the world is not totally mad */
2666  for (i=0; i<m->maxindex; i++) {
2667  if (ptrs[i] == NULL) continue;
2668 
2669  if (nmg_index_of_struct(ptrs[i]) != i) {
2670  bu_log("***ERROR, ptrs[%d]->index = %d\n",
2671  i, nmg_index_of_struct(ptrs[i]));
2672  }
2673  if (rt_nmg_magic_to_kind(*ptrs[i]) != ecnt[i].kind) {
2674  bu_log("***ERROR, ptrs[%d] kind(%d) != %d\n",
2675  i, rt_nmg_magic_to_kind(*ptrs[i]),
2676  ecnt[i].kind);
2677  }
2678 
2679  }
2680 
2681  tot_size = 0;
2682  for (i=0; i< NMG_N_KINDS; i++) {
2683  if (kind_counts[i] <= 0) {
2684  disk_arrays[i] = ((void *)0);
2685  continue;
2686  }
2687  tot_size += kind_counts[i] * rt_nmg_disk_sizes[i];
2688  }
2689 
2690  /* Account for variable sized double arrays, at the end */
2691  tot_size += kind_counts[NMG_KIND_DOUBLE_ARRAY] * (4+4) +
2692  double_count*8;
2693 
2694  ecnt[0].byte_offset = subscript; /* implicit arg to reindex() */
2695  tot_size += SIZEOF_NETWORK_LONG*(NMG_N_KINDS + 1); /* one for magic */
2696  BU_CK_EXTERNAL(ep);
2697  ep->ext_nbytes = tot_size;
2698  ep->ext_buf = (uint8_t *)bu_calloc(1, ep->ext_nbytes, "nmg external5");
2699  dp = ep->ext_buf;
2700  *(uint32_t *)dp = htonl(DISK_MODEL_VERSION);
2701  dp+=SIZEOF_NETWORK_LONG;
2702 
2703  for (kind=0; kind <NMG_N_KINDS; kind++) {
2704  *(uint32_t *)dp = htonl(kind_counts[kind]);
2705  dp+=SIZEOF_NETWORK_LONG;
2706  }
2707  for (i=0; i< NMG_N_KINDS; i++) {
2708  disk_arrays[i] = dp;
2709  dp += kind_counts[i] * rt_nmg_disk_sizes[i];
2710  }
2711  rt_nmg_fastf_p = (unsigned char*)disk_arrays[NMG_KIND_DOUBLE_ARRAY];
2712 
2713  for (i = m->maxindex-1;i >=0; i--) {
2714  if (ptrs[i] == NULL) continue;
2715  kind = ecnt[i].kind;
2716  if (kind_counts[kind] <= 0) continue;
2717  rt_nmg_edisk((void *)(disk_arrays[kind]),
2718  (void *)(ptrs[i]), ecnt, i, local2mm);
2719  }
2720 
2721  bu_free((char *)ptrs, "ptrs[]");
2722  bu_free((char *)ecnt, "ecnt[]");
2723  return 0; /* OK */
2724 }
2725 
2726 
2727 /**
2728  * Make human-readable formatted presentation of this solid. First
2729  * line describes type of solid. Additional lines are indented one
2730  * tab, and give parameter values.
2731  */
2732 int
2733 rt_nmg_describe(struct bu_vls *str, const struct rt_db_internal *ip, int verbose, double UNUSED(mm2local))
2734 {
2735  struct model *m =
2736  (struct model *)ip->idb_ptr;
2737 
2738  NMG_CK_MODEL(m);
2739  bu_vls_printf(str, "n-Manifold Geometry solid (NMG) maxindex=%ld\n",
2740  (long)m->maxindex);
2741 
2742  if (!verbose) return 0;
2743 
2744  return 0;
2745 }
2746 
2747 
2748 /**
2749  * Free the storage associated with the rt_db_internal version of this
2750  * solid.
2751  */
2752 void
2754 {
2755  struct model *m;
2756 
2757  RT_CK_DB_INTERNAL(ip);
2758  if (ip->idb_ptr) {
2759  m = (struct model *)ip->idb_ptr;
2760  NMG_CK_MODEL(m);
2761  nmg_km(m);
2762  }
2763 
2764  ip->idb_ptr = ((void *)0); /* sanity */
2765 }
2766 
2767 
2768 int
2769 rt_nmg_get(struct bu_vls *logstr, const struct rt_db_internal *intern, const char *attr)
2770 {
2771  struct model *m=(struct model *)intern->idb_ptr;
2772  struct bu_ptbl verts;
2773  struct nmgregion *r;
2774  struct shell *s;
2775  struct faceuse *fu;
2776  struct loopuse *lu;
2777  struct edgeuse *eu;
2778  struct vertexuse *vu;
2779  struct vertex *v;
2780  struct vertex_g *vg;
2781  size_t i;
2782 
2783  NMG_CK_MODEL(m);
2784 
2785  if (attr == (char *)NULL) {
2786  bu_vls_strcpy(logstr, "nmg");
2787  bu_ptbl_init(&verts, 256, "nmg verts");
2788  nmg_vertex_tabulate(&verts, &m->magic);
2789 
2790  /* first list all the vertices */
2791  bu_vls_strcat(logstr, " V {");
2792  for (i=0; i<BU_PTBL_LEN(&verts); i++) {
2793  v = (struct vertex *) BU_PTBL_GET(&verts, i);
2794  NMG_CK_VERTEX(v);
2795  vg = v->vg_p;
2796  if (!vg) {
2797  bu_vls_printf(logstr, "Vertex has no geometry\n");
2798  bu_ptbl_free(&verts);
2799  return BRLCAD_ERROR;
2800  }
2801  bu_vls_printf(logstr, " { %.25g %.25g %.25g }", V3ARGS(vg->coord));
2802  }
2803  bu_vls_strcat(logstr, " }");
2804 
2805  /* use the backwards macros here so that "asc2g" will build the same structures */
2806  /* now all the nmgregions */
2807  for (BU_LIST_FOR_BACKWARDS(r, nmgregion, &m->r_hd)) {
2808  /* bu_vls_strcat(logstr, " R {"); */
2809 
2810  /* and all the shells */
2811  for (BU_LIST_FOR_BACKWARDS(s, shell, &r->s_hd)) {
2812  /* bu_vls_strcat(logstr, " S {"); */
2813 
2814  /* all the faces */
2815  if (BU_LIST_NON_EMPTY(&s->fu_hd)) {
2816  for (BU_LIST_FOR_BACKWARDS(fu, faceuse, &s->fu_hd)) {
2817  if (fu->orientation != OT_SAME)
2818  continue;
2819 
2820  bu_vls_strcat(logstr, " F {");
2821 
2822  /* all the loops in this face */
2823  for (BU_LIST_FOR_BACKWARDS(lu, loopuse, &fu->lu_hd)) {
2824 
2825  if (BU_LIST_FIRST_MAGIC(&lu->down_hd) == NMG_VERTEXUSE_MAGIC) {
2826  vu = BU_LIST_FIRST(vertexuse, &lu->down_hd);
2827  bu_vls_printf(logstr, " %d",
2828  bu_ptbl_locate(&verts, (long *)vu->v_p));
2829  } else {
2830  bu_vls_strcat(logstr, " {");
2831  for (BU_LIST_FOR (eu, edgeuse, &lu->down_hd)) {
2832  vu = eu->vu_p;
2833  bu_vls_printf(logstr, " %d",
2834  bu_ptbl_locate(&verts, (long *)vu->v_p));
2835  }
2836  /* end of this loop */
2837  bu_vls_strcat(logstr, " }");
2838  }
2839  }
2840 
2841  /* end of this face */
2842  bu_vls_strcat(logstr, " }");
2843  }
2844  }
2845  }
2846  /* end of this nmgregion */
2847  /* bu_vls_strcat(logstr, " }"); */
2848  }
2849  bu_ptbl_free(&verts);
2850  } else if (BU_STR_EQUAL(attr, "V")) {
2851  /* list of vertices */
2852 
2853  bu_ptbl_init(&verts, 256, "nmg verts");
2854  nmg_vertex_tabulate(&verts, &m->magic);
2855  for (i=0; i<BU_PTBL_LEN(&verts); i++) {
2856  v = (struct vertex *) BU_PTBL_GET(&verts, i);
2857  NMG_CK_VERTEX(v);
2858  vg = v->vg_p;
2859  if (!vg) {
2860  bu_vls_printf(logstr, "Vertex has no geometry\n");
2861  bu_ptbl_free(&verts);
2862  return BRLCAD_ERROR;
2863  }
2864  bu_vls_printf(logstr, " { %.25g %.25g %.25g }", V3ARGS(vg->coord));
2865  }
2866  bu_ptbl_free(&verts);
2867  } else {
2868  bu_vls_printf(logstr, "Unrecognized parameter\n");
2869  return BRLCAD_ERROR;
2870  }
2871 
2872  return BRLCAD_OK;
2873 }
2874 
2875 
2876 int
2877 rt_nmg_adjust(struct bu_vls *logstr, struct rt_db_internal *intern, int argc, const char **argv)
2878 {
2879  struct model *m;
2880  struct nmgregion *r=NULL;
2881  struct shell *s=NULL;
2882  struct faceuse *fu=NULL;
2883  Tcl_Obj *obj, **obj_array;
2884  int len;
2885  int num_verts = 0;
2886  int num_loops = 0;
2887  int *loop;
2888  int loop_len;
2889  int i, j;
2890  struct tmp_v *verts;
2891  fastf_t *tmp;
2892  struct bn_tol tol;
2893 
2894  RT_CK_DB_INTERNAL(intern);
2895  m = (struct model *)intern->idb_ptr;
2896  NMG_CK_MODEL(m);
2897 
2898  verts = (struct tmp_v *)NULL;
2899  for (i=0; i<argc; i += 2) {
2900  if (BU_STR_EQUAL(argv[i], "V")) {
2901  obj = Tcl_NewStringObj(argv[i+1], -1);
2902  if (Tcl_ListObjGetElements(brlcad_interp, obj, &num_verts,
2903  &obj_array) != TCL_OK) {
2904  bu_vls_printf(logstr,
2905  "ERROR: failed to parse vertex list\n");
2906  Tcl_DecrRefCount(obj);
2907  return BRLCAD_ERROR;
2908  }
2909  verts = (struct tmp_v *)bu_calloc(num_verts,
2910  sizeof(struct tmp_v),
2911  "verts");
2912  for (j=0; j<num_verts; j++) {
2913  len = 3;
2914  tmp = &verts[j].pt[0];
2915  if (tcl_obj_to_fastf_array(brlcad_interp, obj_array[j],
2916  &tmp, &len) != 3) {
2917  bu_vls_printf(logstr,
2918  "ERROR: incorrect number of coordinates for vertex\n");
2919  return BRLCAD_ERROR;
2920  }
2921  }
2922 
2923  }
2924  }
2925 
2926  while (argc >= 2) {
2927  struct vertex ***face_verts;
2928 
2929  if (BU_STR_EQUAL(argv[0], "V")) {
2930  /* vertex list handled above */
2931  goto cont;
2932  } else if (BU_STR_EQUAL(argv[0], "F")) {
2933  if (!verts) {
2934  bu_vls_printf(logstr,
2935  "ERROR: cannot set faces without vertices\n");
2936  return BRLCAD_ERROR;
2937  }
2938  if (BU_LIST_IS_EMPTY(&m->r_hd)) {
2939  r = nmg_mrsv(m);
2940  s = BU_LIST_FIRST(shell, &r->s_hd);
2941  } else {
2942  r = BU_LIST_FIRST(nmgregion, &m->r_hd);
2943  s = BU_LIST_FIRST(shell, &r->s_hd);
2944  }
2945  obj = Tcl_NewStringObj(argv[1], -1);
2946  if (Tcl_ListObjGetElements(brlcad_interp, obj, &num_loops,
2947  &obj_array) != TCL_OK) {
2948  bu_vls_printf(logstr,
2949  "ERROR: failed to parse face list\n");
2950  Tcl_DecrRefCount(obj);
2951  return BRLCAD_ERROR;
2952  }
2953  for (i=0, fu=NULL; i<num_loops; i++) {
2954  struct vertex **loop_verts;
2955  /* struct faceuse fu is initialized in earlier scope */
2956 
2957  loop_len = 0;
2958  (void)tcl_obj_to_int_array(brlcad_interp, obj_array[i],
2959  &loop, &loop_len);
2960  if (!loop_len) {
2961  bu_vls_printf(logstr,
2962  "ERROR: unable to parse face list\n");
2963  return BRLCAD_ERROR;
2964  }
2965  if (i) {
2966  loop_verts = (struct vertex **)bu_calloc(
2967  loop_len,
2968  sizeof(struct vertex *),
2969  "loop_verts");
2970  for (i=0; i<loop_len; i++) {
2971  loop_verts[i] = verts[loop[i]].v;
2972  }
2973  fu = nmg_add_loop_to_face(s, fu,
2974  loop_verts, loop_len,
2975  OT_OPPOSITE);
2976  for (i=0; i<loop_len; i++) {
2977  verts[loop[i]].v = loop_verts[i];
2978  }
2979  } else {
2980  face_verts = (struct vertex ***)bu_calloc(
2981  loop_len,
2982  sizeof(struct vertex **),
2983  "face_verts");
2984  for (j=0; j<loop_len; j++) {
2985  face_verts[j] = &verts[loop[j]].v;
2986  }
2987  fu = nmg_cmface(s, face_verts, loop_len);
2988  bu_free((char *)face_verts, "face_verts");
2989  }
2990  }
2991  } else {
2992  bu_vls_printf(logstr,
2993  "ERROR: Unrecognized parameter, must be V or F\n");
2994  return BRLCAD_ERROR;
2995  }
2996  cont:
2997  argc -= 2;
2998  argv += 2;
2999  }
3000 
3001  /* assign geometry for entire vertex list (if we have one) */
3002  for (i=0; i<num_verts; i++) {
3003  if (verts[i].v)
3004  nmg_vertex_gv(verts[i].v, verts[i].pt);
3005  }
3006 
3007  /* assign face geometry */
3008  if (s) {
3009  for (BU_LIST_FOR (fu, faceuse, &s->fu_hd)) {
3010  if (fu->orientation != OT_SAME)
3011  continue;
3012  nmg_calc_face_g(fu);
3013  }
3014  }
3015 
3016  tol.magic = BN_TOL_MAGIC;
3017  tol.dist = 0.0005;
3018  tol.dist_sq = tol.dist * tol.dist;
3019  tol.perp = 1e-6;
3020  tol.para = 1 - tol.perp;
3021 
3022  nmg_rebound(m, &tol);
3023 
3024  return BRLCAD_OK;
3025 }
3026 
3027 
3028 void
3029 rt_nmg_make(const struct rt_functab *ftp, struct rt_db_internal *intern)
3030 {
3031  struct model *m;
3032 
3033  m = nmg_mm();
3034  intern->idb_ptr = (void *)m;
3035  intern->idb_major_type = DB5_MAJORTYPE_BRLCAD;
3036  intern->idb_type = ID_NMG;
3037  intern->idb_meth = ftp;
3038 }
3039 
3040 
3041 int
3042 rt_nmg_params(struct pc_pc_set *UNUSED(ps), const struct rt_db_internal *ip)
3043 {
3044  if (ip) RT_CK_DB_INTERNAL(ip);
3045 
3046  return 0; /* OK */
3047 }
3048 
3049 
3050 /* contains information used to analyze a polygonal face */
3051 struct poly_face
3052 {
3053  char label[5];
3054  size_t npts;
3055  point_t *pts;
3056  plane_t plane_eqn;
3057  fastf_t area;
3059  point_t cent_pyramid;
3060  point_t cent;
3061 };
3062 
3063 
3064 static void
3065 rt_nmg_faces_area(struct poly_face* faces, struct shell* s)
3066 {
3067  struct bu_ptbl nmg_faces;
3068  unsigned int num_faces, i;
3069  size_t *npts;
3070  point_t **tmp_pts;
3071  plane_t *eqs;
3072  nmg_face_tabulate(&nmg_faces, &s->l.magic);
3073  num_faces = BU_PTBL_LEN(&nmg_faces);
3074  tmp_pts = (point_t **)bu_calloc(num_faces, sizeof(point_t *), "rt_nmg_faces_area: tmp_pts");
3075  npts = (size_t *)bu_calloc(num_faces, sizeof(size_t), "rt_nmg_faces_area: npts");
3076  eqs = (plane_t *)bu_calloc(num_faces, sizeof(plane_t), "rt_nmg_faces_area: eqs");
3077 
3078  for (i = 0; i < num_faces; i++) {
3079  struct face *f;
3080  f = (struct face *)BU_PTBL_GET(&nmg_faces, i);
3081  HMOVE(faces[i].plane_eqn, f->g.plane_p->N);
3082  VUNITIZE(faces[i].plane_eqn);
3083  tmp_pts[i] = faces[i].pts;
3084  HMOVE(eqs[i], faces[i].plane_eqn);
3085  }
3086  bn_polygon_mk_pts_planes(npts, tmp_pts, num_faces, (const plane_t *)eqs);
3087  for (i = 0; i < num_faces; i++) {
3088  faces[i].npts = npts[i];
3089  bn_polygon_sort_ccw(faces[i].npts, faces[i].pts, faces[i].plane_eqn);
3090  bn_polygon_area(&faces[i].area, faces[i].npts, (const point_t *)faces[i].pts);
3091  }
3092  bu_free((char *)tmp_pts, "rt_nmg_faces_area: tmp_pts");
3093  bu_free((char *)npts, "rt_nmg_faces_area: npts");
3094  bu_free((char *)eqs, "rt_nmg_faces_area: eqs");
3095 }
3096 
3097 
3098 void
3099 rt_nmg_surf_area(fastf_t *area, const struct rt_db_internal *ip)
3100 {
3101  struct model *m;
3102  struct nmgregion* r;
3103 
3104  /*Iterate through all regions and shells */
3105  m = (struct model *)ip->idb_ptr;
3106  for (BU_LIST_FOR(r, nmgregion, &m->r_hd)) {
3107  struct shell* s;
3108 
3109  for (BU_LIST_FOR(s, shell, &r->s_hd)) {
3110  struct bu_ptbl nmg_faces;
3111  unsigned int num_faces, i;
3112  struct poly_face *faces;
3113 
3114  /*get faces of this shell*/
3115  nmg_face_tabulate(&nmg_faces, &s->l.magic);
3116  num_faces = BU_PTBL_LEN(&nmg_faces);
3117  faces = (struct poly_face *)bu_calloc(num_faces, sizeof(struct poly_face), "rt_nmg_surf_area: faces");
3118 
3119  for (i = 0; i < num_faces; i++) {
3120  /* allocate array of pt structs, max number of verts per faces = (# of faces) - 1 */
3121  faces[i].pts = (point_t *)bu_calloc(num_faces - 1, sizeof(point_t), "rt_nmg_surf_area: pts");
3122  }
3123  rt_nmg_faces_area(faces, s);
3124  for (i = 0; i < num_faces; i++) {
3125  *area += faces[i].area;
3126  }
3127  for (i = 0; i < num_faces; i++) {
3128  bu_free((char *)faces[i].pts, "rt_nmg_surf_area: pts");
3129  }
3130  bu_free((char *)faces, "rt_nmg_surf_area: faces");
3131  }
3132  }
3133 }
3134 
3135 
3136 void
3137 rt_nmg_centroid(point_t *cent, const struct rt_db_internal *ip)
3138 {
3139  struct model *m;
3140  struct nmgregion* r;
3141  struct shell* s;
3142  struct poly_face *faces;
3143  struct bu_ptbl nmg_faces;
3144  fastf_t volume = 0.0;
3145  point_t arbit_point = VINIT_ZERO;
3146  size_t num_faces, i;
3147 
3148  *cent[0] = 0.0;
3149  *cent[1] = 0.0;
3150  *cent[2] = 0.0;
3151  m = (struct model *)ip->idb_ptr;
3152  r = BU_LIST_FIRST(nmgregion, &m->r_hd);
3153  s = BU_LIST_FIRST(shell, &r->s_hd);
3154 
3155  /*get faces*/
3156  nmg_face_tabulate(&nmg_faces, &s->l.magic);
3157  num_faces = BU_PTBL_LEN(&nmg_faces);
3158  faces = (struct poly_face *)bu_calloc(num_faces, sizeof(struct poly_face), "rt_nmg_centroid: faces");
3159 
3160  for (i = 0; i < num_faces; i++) {
3161  /* allocate array of pt structs, max number of verts per faces = (# of faces) - 1 */
3162  faces[i].pts = (point_t *)bu_calloc(num_faces - 1, sizeof(point_t), "rt_nmg_centroid: pts");
3163  }
3164  rt_nmg_faces_area(faces, s);
3165  for (i = 0; i < num_faces; i++) {
3166  bn_polygon_centroid(&faces[i].cent, faces[i].npts, (const point_t *) faces[i].pts);
3167  VADD2(arbit_point, arbit_point, faces[i].cent);
3168  }
3169  VSCALE(arbit_point, arbit_point, (1/num_faces));
3170 
3171  for (i = 0; i < num_faces; i++) {
3172  vect_t tmp = VINIT_ZERO;
3173 
3174  /* calculate volume */
3175  volume = 0.0;
3176  VSCALE(tmp, faces[i].plane_eqn, faces[i].area);
3177  faces[i].vol_pyramid = (VDOT(faces[i].pts[0], tmp)/3);
3178  volume += faces[i].vol_pyramid;
3179  /*Vector from arbit_point to centroid of face, results in h of pyramid */
3180  VSUB2(faces[i].cent_pyramid, faces[i].cent, arbit_point);
3181  /*centroid of pyramid is 1/4 up from the bottom */
3182  VSCALE(faces[i].cent_pyramid, faces[i].cent_pyramid, 0.75f);
3183  /* now cent_pyramid is back in the polyhedron */
3184  VADD2(faces[i].cent_pyramid, faces[i].cent_pyramid, arbit_point);
3185  /* weight centroid of pyramid by pyramid's volume */
3186  VSCALE(faces[i].cent_pyramid, faces[i].cent_pyramid, faces[i].vol_pyramid);
3187  /* add cent_pyramid to the centroid of the polyhedron */
3188  VADD2(*cent, *cent, faces[i].cent_pyramid);
3189  }
3190  /* reverse the weighting */
3191  VSCALE(*cent, *cent, (1/volume));
3192  for (i = 0; i < num_faces; i++) {
3193  bu_free((char *)faces[i].pts, "rt_nmg_centroid: pts");
3194  }
3195  bu_free((char *)faces, "rt_nmg_centroid: faces");
3196 }
3197 
3198 
3199 void
3200 rt_nmg_volume(fastf_t *volume, const struct rt_db_internal *ip)
3201 {
3202  struct model *m;
3203  struct nmgregion* r;
3204 
3205  /*Iterate through all regions and shells */
3206  m = (struct model *)ip->idb_ptr;
3207  for (BU_LIST_FOR(r, nmgregion, &m->r_hd)) {
3208  struct shell* s;
3209 
3210  for (BU_LIST_FOR(s, shell, &r->s_hd)) {
3211  struct bu_ptbl nmg_faces;
3212  unsigned int num_faces, i;
3213  struct poly_face *faces;
3214 
3215  /*get faces of this shell*/
3216  nmg_face_tabulate(&nmg_faces, &s->l.magic);
3217  num_faces = BU_PTBL_LEN(&nmg_faces);
3218  faces = (struct poly_face *)bu_calloc(num_faces, sizeof(struct poly_face), "rt_nmg_volume: faces");
3219 
3220  for (i = 0; i < num_faces; i++) {
3221  /* allocate array of pt structs, max number of verts per faces = (# of faces) - 1 */
3222  faces[i].pts = (point_t *)bu_calloc(num_faces - 1, sizeof(point_t), "rt_nmg_volume: pts");
3223  }
3224  rt_nmg_faces_area(faces, s);
3225  for (i = 0; i < num_faces; i++) {
3226  vect_t tmp = VINIT_ZERO;
3227 
3228  /* calculate volume of pyramid*/
3229  VSCALE(tmp, faces[i].plane_eqn, faces[i].area);
3230  *volume = (VDOT(faces[i].pts[0], tmp)/3);
3231  }
3232  for (i = 0; i < num_faces; i++) {
3233  bu_free((char *)faces[i].pts, "rt_nmg_volume: pts");
3234  }
3235  bu_free((char *)faces, "rt_nmg_volume: faces");
3236  }
3237  }
3238 }
3239 
3240 
3241 /*
3242  * Local Variables:
3243  * mode: C
3244  * tab-width: 8
3245  * indent-tabs-mode: t
3246  * c-file-style: "stroustrup"
3247  * End:
3248  * ex: shiftwidth=4 tabstop=8
3249  */
int rt_nmg_bbox(struct rt_db_internal *ip, point_t *min, point_t *max, const struct bn_tol *tol)
Definition: nmg.c:70
#define NMG_MODEL_MAGIC
Definition: magic.h:133
#define BU_LIST_FOR(p, structure, hp)
Definition: list.h:365
Definition: raytrace.h:800
const struct bn_tol * tol
Definition: raytrace.h:2421
int rt_nmg_export4_fastf(const fastf_t *fp, int count, int pt_type, double scale)
Definition: nmg.c:807
#define NMG_EDGEUSE_MAGIC
Definition: magic.h:120
unsigned char magic[4]
Definition: nmg.c:506
struct disk_rt_list l
Definition: nmg.c:422
void bu_log(const char *,...) _BU_ATTR_PRINTF12
Definition: log.c:176
struct disk_rt_list l
Definition: nmg.c:404
unsigned char max_pt[3 *8]
Definition: nmg.c:436
Definition: nmg.c:489
#define INDEX(o, i, elem)
Definition: nmg.c:1369
disk_index_t u_knots
Definition: nmg.c:466
#define DISK_VERTEX_MAGIC
Definition: nmg.c:563
#define DISK_REGION_MAGIC
Definition: nmg.c:401
struct disk_rt_list lu_hd
Definition: nmg.c:426
struct disk_rt_list l
Definition: nmg.c:550
#define NMG_CK_DISKMAGIC(_cp, _magic)
Definition: nmg.c:364
#define DISK_MODEL_VERSION
Definition: nmg.c:384
unsigned char pt_type[4]
Definition: nmg.c:542
void rt_nmg_curve(struct curvature *cvp, struct hit *hitp, struct soltab *stp)
Definition: nmg.c:246
disk_index_t radial_p
Definition: nmg.c:554
int(* ft_bbox)(struct rt_db_internal *, point_t *, point_t *, const struct bn_tol *)
Definition: raytrace.h:2196
void rt_nmg_volume(fastf_t *volume, const struct rt_db_internal *ip)
Definition: nmg.c:3200
Definition: list.h:118
#define DISK_FACE_MAGIC
Definition: nmg.c:440
int rt_nmg_import4(struct rt_db_internal *ip, const struct bu_external *ep, const fastf_t *mat, const struct db_i *dbip)
Definition: nmg.c:2398
#define NMG_EDGE_MAGIC
Definition: magic.h:123
unsigned char orientation[4]
Definition: nmg.c:557
int kind
Definition: nmg.c:769
vect_t nmg_invdir
Definition: nmg.c:55
struct disk_rt_list down_hd
Definition: nmg.c:513
#define NMG_SHELL_MAGIC
Definition: magic.h:142
#define RT_CK_APPLICATION(_p)
Definition: raytrace.h:1675
#define DISK_FACEUSE_MAGIC
Definition: nmg.c:475
disk_index_t v_p
Definition: nmg.c:583
int rt_nmg_magic_to_kind(uint32_t magic)
Definition: nmg.c:711
#define NMG_LOOP_G_MAGIC
Definition: magic.h:131
double dist
>= 0
Definition: tol.h:73
#define NMG_N_KINDS
Definition: nmg.c:643
unsigned char u_order[4]
Definition: nmg.c:462
vect_t crv_pdir
Principle direction.
Definition: raytrace.h:307
#define NMG_VERTEX_MAGIC
Definition: magic.h:147
#define NMG_FACE_G_SNURB_MAGIC
Definition: magic.h:126
unsigned char magic[4]
Definition: nmg.c:549
if lu s
Definition: nmg_mod.c:3860
unsigned char coord[3 *8]
Definition: nmg.c:574
void bn_rotate_bbox(point_t omin, point_t omax, const mat_t mat, const point_t imin, const point_t imax)
Transform a bounding box (RPP) by the given 4x4 matrix. There are 8 corners to the bounding RPP...
Definition: nmg.c:60
void bu_ptbl_init(struct bu_ptbl *b, size_t len, const char *str)
Definition: ptbl.c:32
struct soltab * stp
Definition: raytrace.h:2420
point_t cent
Definition: arbn.c:1290
#define NMG_KIND_MODEL
Definition: nmg.c:614
void bu_vls_strcat(struct bu_vls *vp, const char *s)
Definition: vls.c:368
long first_fastf_relpos
Definition: nmg.c:770
lu
Definition: nmg_mod.c:3855
Definition: raytrace.h:215
struct disk_rt_list l
Definition: nmg.c:581
#define DISK_INDEX_NULL
Definition: nmg.c:381
unsigned char magic[4]
Definition: nmg.c:536
#define BU_LIST_IS_EMPTY(hp)
Definition: list.h:295
disk_index_t f_p
Definition: nmg.c:482
#define SIZEOF_NETWORK_LONG
Definition: cv.h:46
Definition: pc.h:108
double dist_sq
dist * dist
Definition: tol.h:74
Definition: raytrace.h:368
struct disk_rt_list lu_hd
Definition: nmg.c:484
void nmg_pr_m(const struct model *m)
Definition: nmg_pr.c:86
Definition: raytrace.h:248
#define DISK_VERTEXUSE_A_CNURB_MAGIC
Definition: nmg.c:595
void nmg_vertex_gv(struct vertex *v, const fastf_t *pt)
Definition: nmg_mk.c:1668
unsigned char version[4]
Definition: nmg.c:396
#define NMG_KIND_FACE
Definition: nmg.c:620
int rt_nmg_adjust(struct bu_vls *logstr, struct rt_db_internal *intern, int argc, const char **argv)
Definition: nmg.c:2877
#define BN_TOL_MAGIC
Definition: magic.h:74
unsigned char max_pt[3 *8]
Definition: nmg.c:415
fastf_t st_aradius
Radius of APPROXIMATING sphere.
Definition: raytrace.h:433
void rt_nmg_uv(struct application *ap, struct soltab *stp, struct hit *hitp, struct uvcoord *uvp)
Definition: nmg.c:269
void nmg_isect_ray_model(struct ray_data *rd)
void nmg_model_bb(fastf_t *min_pt, fastf_t *max_pt, const struct model *m)
Definition: nmg_info.c:166
Definition: nmg.c:441
uint32_t magic
Definition: raytrace.h:2413
void nmg_rebound(struct model *m, const struct bn_tol *tol)
Definition: nmg_misc.c:2072
struct bu_list rd_hit
list of hit elements
Definition: raytrace.h:2423
int nmg_index_of_struct(register const uint32_t *p)
Definition: nmg_index.c:49
#define DISK_MODEL_MAGIC
Definition: nmg.c:393
Header file for the BRL-CAD common definitions.
#define NMG_KIND_EDGE
Definition: nmg.c:627
#define DISK_VERTEXUSE_MAGIC
Definition: nmg.c:578
#define RT_CK_RAY(_p)
Definition: raytrace.h:224
disk_index_t e_p
Definition: nmg.c:555
disk_index_t eumate_p
Definition: nmg.c:553
struct vertex * v
Definition: nmg.c:62
void rt_nmg_ifree(struct rt_db_internal *ip)
Definition: nmg.c:2753
void bu_cv_htond(unsigned char *out, const unsigned char *in, size_t count)
int rt_nmg_get(struct bu_vls *logstr, const struct rt_db_internal *intern, const char *attr)
Definition: nmg.c:2769
#define BU_LIST_NON_EMPTY(hp)
Definition: list.h:296
disk_index_t g
Definition: nmg.c:559
#define NMG_KIND_EDGEUSE
Definition: nmg.c:626
#define NMG_LOOPUSE_MAGIC
Definition: magic.h:130
char * manifolds
structure 1-3manifold table
Definition: raytrace.h:2415
struct disk_rt_list f_hd
Definition: nmg.c:461
#define HIDDEN
Definition: common.h:86
int nmg_calc_face_g(struct faceuse *fu)
Definition: nmg_misc.c:1786
void rt_pr_tol(const struct bn_tol *tol)
Definition: pr.c:714
unsigned char disk_index_t[4]
Definition: nmg.c:386
void nmg_merge_regions(struct nmgregion *r1, struct nmgregion *r2, const struct bn_tol *tol)
Definition: nmg_mod.c:42
NMG_CK_LOOPUSE(lu)
#define DISK_LOOP_MAGIC
Definition: nmg.c:488
unsigned char magic[4]
Definition: nmg.c:452
#define BU_LIST_NEXT_NOT_HEAD(p, hp)
Definition: list.h:340
disk_index_t vu_p
Definition: nmg.c:428
void * bu_malloc(size_t siz, const char *str)
Definition: malloc.c:314
unsigned char magic[4]
Definition: nmg.c:527
#define NMG_KIND_SHELL_A
Definition: nmg.c:618
Definition: ptbl.h:62
uint32_t nmg_smagic
Definition: nmg.c:52
#define DISK_DOUBLE_ARRAY_MAGIC
Definition: nmg.c:602
#define DISK_EDGEUSE_MAGIC
Definition: nmg.c:547
if(share_geom)
Definition: nmg_mod.c:3829
int idb_major_type
Definition: raytrace.h:192
char * nmg_manifolds(struct model *m)
Definition: nmg_manif.c:413
#define DISK_EDGE_G_CNURB_MAGIC
Definition: nmg.c:534
unsigned char u_size[4]
Definition: nmg.c:464
Definition: color.c:49
int bn_polygon_mk_pts_planes(size_t *npts, point_t **pts, size_t neqs, const plane_t *eqs)
Calculate for an array of plane_eqs, which build a polyhedron, the point_t's for each face...
Definition: polygon.c:137
#define NMG_EDGEUSE2_MAGIC
Definition: magic.h:119
disk_index_t l_p
Definition: nmg.c:511
struct rt_i * a_rt_i
this librt instance
Definition: raytrace.h:1588
unsigned char order[4]
Definition: nmg.c:538
#define NMG_KIND_NMGREGION_A
Definition: nmg.c:616
void * memset(void *s, int c, size_t n)
int rt_nmg_tess(struct nmgregion **r, struct model *m, struct rt_db_internal *ip, const struct rt_tess_tol *ttol, const struct bn_tol *tol)
Definition: nmg.c:317
struct disk_rt_list vu_hd
Definition: nmg.c:566
#define RT_G_DEBUG
Definition: raytrace.h:1718
uint32_t ** nmg_m_struct_count(register struct nmg_struct_counts *ctr, const struct model *m)
Definition: nmg_index.c:523
struct hitmiss ** hitmiss
1 struct hitmiss ptr per elem.
Definition: raytrace.h:2422
#define NMG_LOOP_MAGIC
Definition: magic.h:132
unsigned char is_real[4]
Definition: nmg.c:521
int rt_nmg_params(struct pc_pc_set *ps, const struct rt_db_internal *ip)
Definition: nmg.c:3042
uint32_t NMG_debug
debug bits for NMG's see nmg.h
Definition: raytrace.h:1699
int rt_nmg_plot(struct bu_list *vhead, struct rt_db_internal *ip, const struct rt_tess_tol *ttol, const struct bn_tol *tol, const struct rt_view_info *info)
Definition: nmg.c:291
#define RT_CK_DB_INTERNAL(_p)
Definition: raytrace.h:207
#define NMG_FACE_MAGIC
Definition: magic.h:127
point_t pt
Definition: nmg.c:61
void * bu_calloc(size_t nelem, size_t elsize, const char *str)
Definition: malloc.c:321
fastf_t st_bradius
Radius of BOUNDING sphere.
Definition: raytrace.h:434
struct model * rd_m
Definition: raytrace.h:2414
fastf_t crv_c2
curvature in other direction
Definition: raytrace.h:309
disk_index_t ctl_points
Definition: nmg.c:543
#define BU_LIST_FOR_BACKWARDS(p, structure, hp)
Definition: list.h:370
#define RT_CK_HIT(_p)
Definition: raytrace.h:259
unsigned char us_size[4]
Definition: nmg.c:468
point_t cent_pyramid
Definition: arbn.c:1289
#define BU_PTBL_GET(ptbl, i)
Definition: ptbl.h:108
disk_index_t up
Definition: nmg.c:582
#define DISK_EDGE_MAGIC
Definition: nmg.c:517
struct disk_rt_list l2
Definition: nmg.c:551
int rt_nmg_describe(struct bu_vls *str, const struct rt_db_internal *ip, int verbose, double mm2local)
Definition: nmg.c:2733
unsigned char magic[4]
Definition: nmg.c:421
const struct rt_functab * idb_meth
for ft_ifree(), etc.
Definition: raytrace.h:194
unsigned char c_size[4]
Definition: nmg.c:541
#define NMG_RAY_DATA_MAGIC
Definition: magic.h:135
#define ID_NMG
n-Manifold Geometry solid
Definition: raytrace.h:469
#define V3ARGS(a)
Definition: color.c:56
#define BRLCAD_OK
Definition: defines.h:71
uint8_t * ext_buf
Definition: parse.h:216
void nmg_km(struct model *m)
Definition: nmg_mk.c:1634
unsigned char orientation[4]
Definition: nmg.c:481
#define BU_GET(_ptr, _type)
Definition: malloc.h:201
point_t hit_point
DEPRECATED: Intersection point, use VJOIN1 hit_dist.
Definition: raytrace.h:251
int rt_nmg_export4_internal(struct bu_external *ep, const struct rt_db_internal *ip, double local2mm, int compact)
Definition: nmg.c:2218
point_t st_max
max X, Y, Z of bounding RPP
Definition: raytrace.h:438
struct faceuse * nmg_add_loop_to_face(struct shell *s, struct faceuse *fu, struct vertex **verts, int n, int dir)
Definition: nmg_mod.c:1211
vect_t rd_invdir
Definition: raytrace.h:2416
disk_index_t back
Definition: nmg.c:389
disk_index_t knots
Definition: nmg.c:540
#define NMG_KIND_FACE_G_PLANE
Definition: nmg.c:621
void rt_nmg_surf_area(fastf_t *area, const struct rt_db_internal *ip)
Definition: nmg.c:3099
struct disk_rt_list l
Definition: nmg.c:443
#define NMG_SHELL_A_MAGIC
Definition: magic.h:141
unsigned char magic[4]
Definition: nmg.c:403
int rt_nmg_prep(struct soltab *stp, struct rt_db_internal *ip, struct rt_i *rtip)
Definition: nmg.c:93
void nmg_face_bb(struct face *f, const struct bn_tol *tol)
Definition: nmg_mk.c:2423
disk_index_t lumate_p
Definition: nmg.c:509
disk_index_t lg_p
Definition: nmg.c:492
struct model * nmg_model
Definition: nmg.c:53
#define BU_LIST_PNEXT(structure, p)
Definition: list.h:422
unsigned char magic[4]
Definition: nmg.c:565
struct bu_list l
Definition: ptbl.h:63
oldeumate l2 magic
Definition: nmg_mod.c:3843
point_t * pts
Definition: arbn.c:1285
disk_index_t m_p
Definition: nmg.c:405
unsigned char magic[4]
Definition: nmg.c:434
struct disk_rt_list f_hd
Definition: nmg.c:453
#define UNUSED(parameter)
Definition: common.h:239
int bn_polygon_area(fastf_t *area, size_t npts, const point_t *pts)
Functions for working with polygons.
Definition: polygon.c:28
void rt_nmg_print(const struct soltab *stp)
Definition: nmg.c:126
#define BU_PUT(_ptr, _type)
Definition: malloc.h:215
disk_index_t vg_p
Definition: nmg.c:567
void rt_nmg_centroid(point_t *cent, const struct rt_db_internal *ip)
Definition: nmg.c:3137
int rt_nmg_import4_internal(struct rt_db_internal *ip, const struct bu_external *ep, const fastf_t *mat, int rebound, const struct bn_tol *tol)
Definition: nmg.c:2079
#define NMG_REGION_MAGIC
Definition: magic.h:137
long new_subscript
Definition: nmg.c:767
long per_struct_index
Definition: nmg.c:768
Support for uniform tolerances.
Definition: tol.h:71
void rt_nmg_free(struct soltab *stp)
Definition: nmg.c:279
#define NMG_KIND_EDGE_G_CNURB
Definition: nmg.c:629
#define BN_CK_TOL(_p)
Definition: tol.h:82
#define NMG_KIND_VERTEX_G
Definition: nmg.c:634
#define BU_LIST_FIRST_MAGIC(hp)
Definition: list.h:416
int rt_nmg_idisk(void *op, void *ip, struct nmg_exp_counts *ecnt, int idx, uint32_t **ptrs, const fastf_t *mat, const unsigned char *basep)
Definition: nmg.c:1406
oldeumate e_p
Definition: nmg_mod.c:3936
Definition: nmg.c:518
void bn_rotate_plane(plane_t oplane, const mat_t mat, const plane_t iplane)
Transform a plane equation by the given 4x4 matrix.
int bn_polygon_centroid(point_t *cent, size_t npts, const point_t *pts)
Calculate the centroid of a non self-intersecting polygon.
Definition: polygon.c:72
#define NMG_VERTEXUSE_MAGIC
Definition: magic.h:145
uint32_t magic
Magic # for mem id/check.
Definition: list.h:119
disk_index_t vu_p
Definition: nmg.c:558
size_t npts
Definition: arbn.c:1284
#define NMG_VERTEXUSE_A_CNURB_MAGIC
Definition: magic.h:143
#define DISK_LOOP_G_MAGIC
Definition: nmg.c:496
struct nmgregion * nmg_mrsv(struct model *m)
Definition: nmg_mk.c:306
#define INDEXL(oo, ii, elem)
Definition: nmg.c:987
unsigned char magic[4]
Definition: nmg.c:395
unsigned char k_size[4]
Definition: nmg.c:539
void nmg_vmodel(const struct model *m)
Definition: nmg_ck.c:635
struct fg_node fg
Definition: chull3d.cpp:80
#define NMG_FACE_G_PLANE_MAGIC
Definition: magic.h:125
unsigned char min_pt[3 *8]
Definition: nmg.c:499
vect_t r_dir
Direction of ray (UNIT Length)
Definition: raytrace.h:219
unsigned char magic[4]
Definition: nmg.c:590
int bu_ptbl_locate(const struct bu_ptbl *b, const long *p)
#define BU_PTBL_LEN(ptbl)
Definition: ptbl.h:107
void bu_ptbl_free(struct bu_ptbl *b)
Definition: ptbl.c:226
#define PUTMAGIC(_magic)
Definition: nmg.c:992
struct bn_tol rti_tol
Math tolerances for this model.
Definition: raytrace.h:1765
disk_index_t lua_p
Definition: nmg.c:512
#define DISK_EDGE_G_LSEG_MAGIC
Definition: nmg.c:525
int classifying_ray
Definition: raytrace.h:2455
#define NMG_SPEC_START_MAGIC
Definition: nmg.c:47
disk_index_t fua_p
Definition: nmg.c:483
#define RT_CK_DBI(_p)
Definition: raytrace.h:829
oldeumate e_p eu_p
Definition: nmg_mod.c:3940
HIDDEN int reindex(void *p, struct nmg_exp_counts *ecnt)
Definition: nmg.c:949
struct model * nmg_mm(void)
Definition: nmg_mk.c:235
int tcl_obj_to_fastf_array(Tcl_Interp *interp, Tcl_Obj *list, fastf_t **array, int *array_len)
Definition: tcl.c:802
unsigned char magic[4]
Definition: nmg.c:597
disk_index_t eu_p
Definition: nmg.c:520
double perp
nearly 0
Definition: tol.h:75
void nmg_m_to_vlist(struct bu_list *vhead, struct model *m, int poly_markers)
Definition: nmg_plot.c:417
unsigned char vs_size[4]
Definition: nmg.c:469
void rt_nmg_norm(struct hit *hitp, struct soltab *stp, struct xray *rp)
Definition: nmg.c:229
unsigned char magic[4]
Definition: nmg.c:604
#define DISK_VERTEX_G_MAGIC
Definition: nmg.c:571
#define NMG_EDGE_G_LSEG_MAGIC
Definition: magic.h:122
char label[5]
Definition: arbn.c:1283
#define ZERO(val)
Definition: units.c:38
disk_index_t lu_p
Definition: nmg.c:491
#define BU_LIST_INIT(_hp)
Definition: list.h:148
disk_index_t eua_p
Definition: nmg.c:556
void * idb_ptr
Definition: raytrace.h:195
const int rt_nmg_disk_sizes[NMG_N_KINDS]
Definition: nmg.c:646
struct disk_rt_list r_hd
Definition: nmg.c:397
int rt_nmg_shot(struct soltab *stp, struct xray *rp, struct application *ap, struct seg *seghead)
Definition: nmg.c:145
point_t r_pt
Point at which ray starts.
Definition: raytrace.h:218
point_t st_min
min X, Y, Z of bounding RPP
Definition: raytrace.h:437
#define INDEXL_HD2(oo, ii, elem, hd)
Definition: nmg.c:1381
plane_t plane_eqn
Definition: arbn.c:1286
unsigned char N[3 *8]
Definition: nmg.c:591
#define NMG_KIND_LOOP
Definition: nmg.c:624
int nmg_ray_segs(struct ray_data *rd)
Definition: nmg_rt_segs.c:1156
unsigned char magic[4]
Definition: nmg.c:477
void bu_cv_ntohd(unsigned char *out, const unsigned char *in, size_t count)
uint32_t magic
Definition: tol.h:72
#define NMG_KIND_SHELL
Definition: nmg.c:617
const struct rt_functab OBJ[]
Definition: table.c:159
eu1 radial_p
Definition: nmg_mod.c:3930
disk_index_t s_p
Definition: nmg.c:479
#define NMG_EDGE_G_CNURB_MAGIC
Definition: magic.h:121
disk_index_t ctl_points
Definition: nmg.c:471
#define NMG_KIND_FACE_G_SNURB
Definition: nmg.c:622
#define DISK_SHELL_MAGIC
Definition: nmg.c:419
void bn_vec_ortho(vect_t out, const vect_t in)
struct disk_rt_list eu_hd
Definition: nmg.c:427
unsigned char max_pt[3 *8]
Definition: nmg.c:500
disk_index_t fu_p
Definition: nmg.c:444
#define RT_CK_SOLTAB(_p)
Definition: raytrace.h:453
#define DISK_FACE_G_PLANE_MAGIC
Definition: nmg.c:450
#define DISK_INDEX_LISTHEAD
Definition: nmg.c:382
void bu_vls_printf(struct bu_vls *vls, const char *fmt,...) _BU_ATTR_PRINTF23
Definition: vls.c:694
#define NMG_VERTEXUSE_A_PLANE_MAGIC
Definition: magic.h:144
#define NMG_KIND_VERTEX
Definition: nmg.c:633
void * st_specific
-> ID-specific (private) struct
Definition: raytrace.h:435
int rt_nmg_export5(struct bu_external *ep, const struct rt_db_internal *ip, double local2mm, const struct db_i *dbip)
Definition: nmg.c:2556
unsigned char ndouble[4]
Definition: nmg.c:605
unsigned char flip[4]
Definition: nmg.c:446
unsigned char e_pt[3 *8]
Definition: nmg.c:529
disk_index_t up
Definition: nmg.c:508
#define NMG_KIND_VERTEXUSE
Definition: nmg.c:630
struct disk_rt_list l
Definition: nmg.c:507
unsigned char orientation[4]
Definition: nmg.c:510
unsigned char magic[4]
Definition: nmg.c:498
disk_index_t v_knots
Definition: nmg.c:467
unsigned char v_size[4]
Definition: nmg.c:465
fastf_t crv_c1
curvature in principle dir
Definition: raytrace.h:308
unsigned char min_pt[3 *8]
Definition: nmg.c:435
#define DISK_SHELL_A_MAGIC
Definition: nmg.c:432
Definition: color.c:51
#define BU_LIST_NULL
Definition: list.h:124
disk_index_t fumate_p
Definition: nmg.c:480
void bu_vls_strcpy(struct bu_vls *vp, const char *s)
Definition: vls.c:310
unsigned char v_order[4]
Definition: nmg.c:463
#define NMG_KIND_NMGREGION
Definition: nmg.c:615
#define NMG_REGION_A_MAGIC
Definition: magic.h:136
void bu_free(void *ptr, const char *str)
Definition: malloc.c:328
vect_t hit_normal
DEPRECATED: Surface Normal at hit_point, use RT_HIT_NORMAL.
Definition: raytrace.h:252
unsigned char magic[4]
Definition: nmg.c:519
struct xray * rp
Definition: raytrace.h:2417
#define BU_CK_LIST_HEAD(_p)
Definition: list.h:142
unsigned char magic[4]
Definition: nmg.c:580
disk_index_t r_p
Definition: nmg.c:423
#define NMG_VERTEX_G_MAGIC
Definition: magic.h:146
void rt_nmg_i2alloc(struct nmg_exp_counts *ecnt, unsigned char *cp, int *kind_counts)
Definition: nmg.c:2034
char * manifolds
Definition: nmg.c:54
long byte_offset
Definition: nmg.c:771
#define NMG_SPEC_END_MAGIC
Definition: nmg.c:48
struct application * ap
Definition: raytrace.h:2418
#define BU_CK_EXTERNAL(_p)
Definition: parse.h:224
uint32_t nmg_emagic
Definition: nmg.c:56
NMG_CK_SHELL(s)
unsigned char pt_type[4]
Definition: nmg.c:470
void nmg_face_tabulate(struct bu_ptbl *tab, const uint32_t *magic_p)
Definition: nmg_info.c:2247
struct faceuse * nmg_cmface(struct shell *s, struct vertex ***verts, int n)
Definition: nmg_mod.c:979
struct disk_rt_list eu_hd2
Definition: nmg.c:528
const char rt_nmg_kind_names[NMG_N_KINDS+2][18]
Definition: nmg.c:674
void rt_nmg_make(const struct rt_functab *ftp, struct rt_db_internal *intern)
Definition: nmg.c:3029
#define DISK_FACE_G_SNURB_MAGIC
Definition: nmg.c:458
const struct rt_functab * st_meth
pointer to per-solid methods
Definition: raytrace.h:428
#define NMG_KIND_DOUBLE_ARRAY
Definition: nmg.c:638
struct bu_list rd_miss
list of missed/sub-hit elements
Definition: raytrace.h:2424
unsigned char param[3 *8]
Definition: nmg.c:598
fastf_t * rt_nmg_import4_fastf(const unsigned char *base, struct nmg_exp_counts *ecnt, long int subscript, const matp_t mat, int len, int pt_type)
Definition: nmg.c:859
#define DISK_REGION_A_MAGIC
Definition: nmg.c:411
disk_index_t forw
Definition: nmg.c:388
#define NMG_KIND_EDGE_G_LSEG
Definition: nmg.c:628
unsigned char vals[1 *8]
Definition: nmg.c:606
size_t ext_nbytes
Definition: parse.h:210
struct disk_rt_list s_hd
Definition: nmg.c:407
disk_index_t a
Definition: nmg.c:584
#define NMG_FACEUSE_MAGIC
Definition: magic.h:124
#define NMG_KIND_FACEUSE
Definition: nmg.c:619
fastf_t hit_dist
dist from r_pt to hit_point
Definition: raytrace.h:250
HIDDEN void verbose(struct human_data_t *dude)
Definition: human.c:2008
unsigned char magic[4]
Definition: nmg.c:442
void rt_nmg_edisk(void *op, void *ip, struct nmg_exp_counts *ecnt, int idx, double local2mm)
Definition: nmg.c:1001
disk_index_t sa_p
Definition: nmg.c:424
#define INDEXL_HD(oo, ii, elem, hd)
Definition: nmg.c:1370
disk_index_t up
Definition: nmg.c:552
Definition: vls.h:56
#define BRLCAD_ERROR
Definition: defines.h:72
void bu_bomb(const char *str) _BU_ATTR_NORETURN
Definition: bomb.c:91
disk_index_t g
Definition: nmg.c:445
unsigned char magic[4]
Definition: nmg.c:490
#define NMG_KIND_VERTEXUSE_A_CNURB
Definition: nmg.c:632
fastf_t area
Definition: arbn.c:1287
double fastf_t
Definition: defines.h:300
#define NMG_KIND_LOOPUSE
Definition: nmg.c:623
unsigned char min_pt[3 *8]
Definition: nmg.c:414
#define NMG_KIND_VERTEXUSE_A_PLANE
Definition: nmg.c:631
const char * bu_identify_magic(uint32_t magic)
int rt_nmg_import5(struct rt_db_internal *ip, struct bu_external *ep, const mat_t mat, const struct db_i *dbip)
Definition: nmg.c:2439
struct seg * seghead
Definition: raytrace.h:2419
#define DISK_VERTEXUSE_A_PLANE_MAGIC
Definition: nmg.c:588
#define BU_LIST_NOT_HEAD(p, hp)
Definition: list.h:324
unsigned char magic[4]
Definition: nmg.c:460
Tcl_Interp * brlcad_interp
Definition: tcl.c:41
int tcl_obj_to_int_array(Tcl_Interp *interp, Tcl_Obj *list, int **array, int *array_len)
Definition: tcl.c:762
double para
nearly 1
Definition: tol.h:76
#define DISK_LOOPUSE_MAGIC
Definition: nmg.c:504
Definition: color.c:50
struct disk_rt_list fu_hd
Definition: nmg.c:425
eu1 eumate_p
Definition: nmg_mod.c:3922
int rt_nmg_export4(struct bu_external *ep, const struct rt_db_internal *ip, double local2mm, const struct db_i *dbip)
Definition: nmg.c:2536
#define BU_LIST_FIRST(structure, hp)
Definition: list.h:312
unsigned char magic[4]
Definition: nmg.c:413
fastf_t vol_pyramid
Definition: arbn.c:1288
unsigned char N[4 *8]
Definition: nmg.c:454
#define NMG_KIND_LOOP_G
Definition: nmg.c:625
unsigned char magic[4]
Definition: nmg.c:573
HIDDEN struct model * rt_nmg_ialloc(uint32_t **ptrs, struct nmg_exp_counts *ecnt, int *kind_counts)
Definition: nmg.c:1835
void nmg_merge_models(struct model *m1, struct model *m2)
Definition: nmg_index.c:738
void nmg_vertex_tabulate(struct bu_ptbl *tab, const uint32_t *magic_p)
Definition: nmg_info.c:1985
struct disk_rt_list l
Definition: nmg.c:478
point_t st_center
Centroid of solid.
Definition: raytrace.h:432
unsigned char e_dir[3 *8]
Definition: nmg.c:530
int bn_polygon_sort_ccw(size_t npts, point_t *pts, plane_t cmp)
Sort an array of point_ts, building a convex polygon, counter-clockwise.
Definition: polygon.c:182
#define BU_STR_EQUAL(s1, s2)
Definition: str.h:126
disk_index_t ra_p
Definition: nmg.c:406
struct disk_rt_list eu_hd2
Definition: nmg.c:537
struct rt_g RTG
Definition: globals.c:39