1 #ifndef LAY_INCLUDE_HEADER
\r
2 #define LAY_INCLUDE_HEADER
\r
6 // #define LAY_IMPLEMENTATION
\r
8 // in exactly one C or C++ file in your project before you include layout.h.
\r
9 // Your includes should look like this:
\r
13 // #define LAY_IMPLEMENTATION
\r
14 // #include "layout.h"
\r
16 // All other files in your project should not define LAY_IMPLEMENTATION.
\r
21 #define LAY_EXPORT extern
\r
24 // Users of this library can define LAY_ASSERT if they would like to use an
\r
25 // assert other than the one from assert.h.
\r
28 #define LAY_ASSERT assert
\r
31 // 'static inline' for things we always want inlined -- the compiler should not
\r
32 // even have to consider not inlining these.
\r
33 #if defined(__GNUC__) || defined(__clang__)
\r
34 #define LAY_STATIC_INLINE __attribute__((always_inline)) static inline
\r
35 #elif defined(_MSC_VER)
\r
36 #define LAY_STATIC_INLINE __forceinline static
\r
38 #define LAY_STATIC_INLINE inline static
\r
41 typedef uint32_t lay_id;
\r
43 typedef float lay_scalar;
\r
45 typedef int16_t lay_scalar;
\r
48 #define LAY_INVALID_ID UINT32_MAX
\r
50 // GCC and Clang allow us to create vectors based on a type with the
\r
51 // vector_size extension. This will allow us to access individual components of
\r
52 // the vector via indexing operations.
\r
53 #if defined(__GNUC__) || defined(__clang__)
\r
55 // Using floats for coordinates takes up more space than using int16. 128 bits
\r
56 // for a four-component vector.
\r
58 typedef float lay_vec4 __attribute__ ((__vector_size__ (16), aligned(4)));
\r
59 typedef float lay_vec2 __attribute__ ((__vector_size__ (8), aligned(4)));
\r
60 // Integer version uses 64 bits for a four-component vector.
\r
62 typedef int16_t lay_vec4 __attribute__ ((__vector_size__ (8), aligned(2)));
\r
63 typedef int16_t lay_vec2 __attribute__ ((__vector_size__ (4), aligned(2)));
\r
66 // Note that we're not actually going to make any explicit use of any
\r
67 // platform's SIMD instructions -- we're just using the vector extension for
\r
68 // more convenient syntax. Therefore, we can specify more relaxed alignment
\r
69 // requirements. See the end of this file for some notes about this.
\r
71 // MSVC doesn't have the vetor_size attribute, but we want convenient indexing
\r
72 // operators for our layout logic code. Therefore, we force C++ compilation in
\r
73 // MSVC, and use C++ operator overloading.
\r
74 #elif defined(_MSC_VER)
\r
77 const lay_scalar& operator[](int index) const
\r
78 { return xyzw[index]; }
\r
79 lay_scalar& operator[](int index)
\r
80 { return xyzw[index]; }
\r
84 const lay_scalar& operator[](int index) const
\r
85 { return xy[index]; }
\r
86 lay_scalar& operator[](int index)
\r
87 { return xy[index]; }
\r
89 #endif // __GNUC__/__clang__ or _MSC_VER
\r
91 typedef struct lay_item_t {
\r
94 lay_id next_sibling;
\r
99 typedef struct lay_context {
\r
106 // Container flags to pass to lay_set_container()
\r
107 typedef enum lay_box_flags {
\r
108 // flex-direction (bit 0+1)
\r
113 LAY_COLUMN = 0x003,
\r
118 LAY_LAYOUT = 0x000,
\r
122 // flex-wrap (bit 2)
\r
125 LAY_NOWRAP = 0x000,
\r
126 // multi-line, wrap left to right
\r
130 // justify-content (start, end, center, space-between)
\r
131 // at start of row/column
\r
133 // at center of row/column
\r
134 LAY_MIDDLE = 0x000,
\r
135 // at end of row/column
\r
137 // insert spacing to stretch across whole row/column
\r
138 LAY_JUSTIFY = 0x018
\r
141 // can be implemented by putting a flex container in a layout container,
\r
142 // then using LAY_TOP, LAY_BOTTOM, LAY_VFILL, LAY_VCENTER, etc.
\r
143 // FILL is equivalent to stretch/grow
\r
145 // align-content (start, end, center, stretch)
\r
146 // can be implemented by putting a flex container in a layout container,
\r
147 // then using LAY_TOP, LAY_BOTTOM, LAY_VFILL, LAY_VCENTER, etc.
\r
148 // FILL is equivalent to stretch; space-between is not supported.
\r
151 // child layout flags to pass to lay_set_behave()
\r
152 typedef enum lay_layout_flags {
\r
153 // attachments (bit 5-8)
\r
154 // fully valid when parent uses LAY_LAYOUT model
\r
155 // partially valid when in LAY_FLEX model
\r
157 // anchor to left item or left side of parent
\r
159 // anchor to top item or top side of parent
\r
161 // anchor to right item or right side of parent
\r
163 // anchor to bottom item or bottom side of parent
\r
164 LAY_BOTTOM = 0x100,
\r
165 // anchor to both left and right item or parent borders
\r
167 // anchor to both top and bottom item or parent borders
\r
169 // center horizontally, with left margin as offset
\r
170 LAY_HCENTER = 0x000,
\r
171 // center vertically, with top margin as offset
\r
172 LAY_VCENTER = 0x000,
\r
173 // center in both directions, with left/top margin as offset
\r
174 LAY_CENTER = 0x000,
\r
175 // anchor to all four directions
\r
177 // When in a wrapping container, put this element on a new line. Wrapping
\r
178 // layout code auto-inserts LAY_BREAK flags as needed. See GitHub issues for
\r
179 // TODO related to this.
\r
181 // Drawing routines can read this via item pointers as needed after
\r
182 // performing layout calculations.
\r
184 } lay_layout_flags;
\r
187 // these bits, starting at bit 16, can be safely assigned by the
\r
188 // application, e.g. as item types, other event types, drop targets, etc.
\r
189 // this is not yet exposed via API functions, you'll need to get/set these
\r
190 // by directly accessing item pointers.
\r
192 // (In reality we have more free bits than this, TODO)
\r
194 // TODO fix int/unsigned size mismatch (clang issues warning for this),
\r
195 // should be all bits as 1 instead of INT_MAX
\r
196 LAY_USERMASK = 0x7fff0000,
\r
198 // a special mask passed to lay_find_item() (currently does not exist, was
\r
199 // not ported from oui)
\r
200 LAY_ANY = 0x7fffffff
\r
204 // extra item flags
\r
207 LAY_ITEM_BOX_MODEL_MASK = 0x000007,
\r
209 LAY_ITEM_BOX_MASK = 0x00001F,
\r
211 LAY_ITEM_LAYOUT_MASK = 0x0003E0,
\r
212 // item has been inserted (bit 10)
\r
213 LAY_ITEM_INSERTED = 0x400,
\r
214 // horizontal size has been explicitly set (bit 11)
\r
215 LAY_ITEM_HFIXED = 0x800,
\r
216 // vertical size has been explicitly set (bit 12)
\r
217 LAY_ITEM_VFIXED = 0x1000,
\r
219 LAY_ITEM_FIXED_MASK = LAY_ITEM_HFIXED | LAY_ITEM_VFIXED,
\r
221 // which flag bits will be compared
\r
222 LAY_ITEM_COMPARE_MASK = LAY_ITEM_BOX_MODEL_MASK
\r
223 | (LAY_ITEM_LAYOUT_MASK & ~LAY_BREAK)
\r
227 LAY_STATIC_INLINE lay_vec4 lay_vec4_xyzw(lay_scalar x, lay_scalar y, lay_scalar z, lay_scalar w)
\r
229 #if (defined(__GNUC__) || defined(__clang__)) && !defined(__cplusplus)
\r
230 return (lay_vec4){x, y, z, w};
\r
241 // Call this on a context before using it. You must also call this on a context
\r
242 // if you would like to use it again after calling lay_destroy_context() on it.
\r
243 LAY_EXPORT void lay_init_context(lay_context *ctx);
\r
245 // Reserve enough heap memory to contain `count` items without needing to
\r
246 // reallocate. The initial lay_init_context() call does not allocate any heap
\r
247 // memory, so if you init a context and then call this once with a large enough
\r
248 // number for the number of items you'll create, there will not be any further
\r
250 LAY_EXPORT void lay_reserve_items_capacity(lay_context *ctx, lay_id count);
\r
252 // Frees any heap allocated memory used by a context. Don't call this on a
\r
253 // context that did not have lay_init_context() call on it. To reuse a context
\r
254 // after destroying it, you will need to call lay_init_context() on it again.
\r
255 LAY_EXPORT void lay_destroy_context(lay_context *ctx);
\r
257 // Clears all of the items in a context, setting its count to 0. Use this when
\r
258 // you want to re-declare your layout starting from the root item. This does not
\r
259 // free any memory or perform allocations. It's safe to use the context again
\r
260 // after calling this. You should probably use this instead of init/destroy if
\r
261 // you are recalculating your layouts in a loop.
\r
262 LAY_EXPORT void lay_reset_context(lay_context *ctx);
\r
264 // Performs the layout calculations, starting at the root item (id 0). After
\r
265 // calling this, you can use lay_get_rect() to query for an item's calculated
\r
266 // rectangle. If you use procedures such as lay_append() or lay_insert() after
\r
267 // calling this, your calculated data may become invalid if a reallocation
\r
270 // You should prefer to recreate your items starting from the root instead of
\r
271 // doing fine-grained updates to the existing context.
\r
273 // However, it's safe to use lay_set_size on an item, and then re-run
\r
274 // lay_run_context. This might be useful if you are doing a resizing animation
\r
275 // on items in a layout without any contents changing.
\r
276 LAY_EXPORT void lay_run_context(lay_context *ctx);
\r
278 // Like lay_run_context(), this procedure will run layout calculations --
\r
279 // however, it lets you specify which item you want to start from.
\r
280 // lay_run_context() always starts with item 0, the first item, as the root.
\r
281 // Running the layout calculations from a specific item is useful if you want
\r
282 // need to iteratively re-run parts of your layout hierarchy, or if you are only
\r
283 // interested in updating certain subsets of it. Be careful when using this --
\r
284 // it's easy to generated bad output if the parent items haven't yet had their
\r
285 // output rectangles calculated, or if they've been invalidated (e.g. due to
\r
287 LAY_EXPORT void lay_run_item(lay_context *ctx, lay_id item);
\r
289 // Performing a layout on items where wrapping is enabled in the parent
\r
290 // container can cause flags to be modified during the calculations. If you plan
\r
291 // to call lay_run_context or lay_run_item multiple times without calling
\r
292 // lay_reset, and if you have a container that uses wrapping, and if the width
\r
293 // or height of the container may have changed, you should call
\r
294 // lay_clear_item_break on all of the children of a container before calling
\r
295 // lay_run_context or lay_run_item again. If you don't, the layout calculations
\r
296 // may perform unnecessary wrapping.
\r
298 // This requirement may be changed in the future.
\r
300 // Calling this will also reset any manually-specified breaking. You will need
\r
301 // to set the manual breaking again, or simply not call this on any items that
\r
302 // you know you wanted to break manually.
\r
304 // If you clear your context every time you calculate your layout, or if you
\r
305 // don't use wrapping, you don't need to call this.
\r
306 LAY_EXPORT void lay_clear_item_break(lay_context *ctx, lay_id item);
\r
308 // Returns the number of items that have been created in a context.
\r
309 LAY_EXPORT lay_id lay_items_count(lay_context *ctx);
\r
311 // Returns the number of items the context can hold without performing a
\r
313 LAY_EXPORT lay_id lay_items_capacity(lay_context *ctx);
\r
315 // Create a new item, which can just be thought of as a rectangle. Returns the
\r
316 // id (handle) used to identify the item.
\r
317 LAY_EXPORT lay_id lay_item(lay_context *ctx);
\r
319 // Inserts an item into another item, forming a parent - child relationship. An
\r
320 // item can contain any number of child items. Items inserted into a parent are
\r
321 // put at the end of the ordering, after any existing siblings.
\r
322 LAY_EXPORT void lay_insert(lay_context *ctx, lay_id parent, lay_id child);
\r
324 // lay_append inserts an item as a sibling after another item. This allows
\r
325 // inserting an item into the middle of an existing list of items within a
\r
326 // parent. It's also more efficient than repeatedly using lay_insert(ctx,
\r
327 // parent, new_child) in a loop to create a list of items in a parent, because
\r
328 // it does not need to traverse the parent's children each time. So if you're
\r
329 // creating a long list of children inside of a parent, you might prefer to use
\r
330 // this after using lay_insert to insert the first child.
\r
331 LAY_EXPORT void lay_append(lay_context *ctx, lay_id earlier, lay_id later);
\r
333 // Like lay_insert, but puts the new item as the first child in a parent instead
\r
335 LAY_EXPORT void lay_push(lay_context *ctx, lay_id parent, lay_id child);
\r
337 // Gets the size that was set with lay_set_size or lay_set_size_xy. The _xy
\r
338 // version writes the output values to the specified addresses instead of
\r
339 // returning the values in a lay_vec2.
\r
340 LAY_EXPORT lay_vec2 lay_get_size(lay_context *ctx, lay_id item);
\r
341 LAY_EXPORT void lay_get_size_xy(lay_context *ctx, lay_id item, lay_scalar *x, lay_scalar *y);
\r
343 // Sets the size of an item. The _xy version passes the width and height as
\r
344 // separate arguments, but functions the same.
\r
345 LAY_EXPORT void lay_set_size(lay_context *ctx, lay_id item, lay_vec2 size);
\r
346 LAY_EXPORT void lay_set_size_xy(lay_context *ctx, lay_id item, lay_scalar width, lay_scalar height);
\r
348 // Set the flags on an item which determines how it behaves as a parent. For
\r
349 // example, setting LAY_COLUMN will make an item behave as if it were a column
\r
350 // -- it will lay out its children vertically.
\r
351 LAY_EXPORT void lay_set_contain(lay_context *ctx, lay_id item, uint32_t flags);
\r
353 // Set the flags on an item which determines how it behaves as a child inside of
\r
354 // a parent item. For example, setting LAY_VFILL will make an item try to fill
\r
355 // up all available vertical space inside of its parent.
\r
356 LAY_EXPORT void lay_set_behave(lay_context *ctx, lay_id item, uint32_t flags);
\r
358 // Get the margins that were set by lay_set_margins. The _ltrb version writes
\r
359 // the output values to the specified addresses instead of returning the values
\r
361 // l: left, t: top, r: right, b: bottom
\r
362 LAY_EXPORT lay_vec4 lay_get_margins(lay_context *ctx, lay_id item);
\r
363 LAY_EXPORT void lay_get_margins_ltrb(lay_context *ctx, lay_id item, lay_scalar *l, lay_scalar *t, lay_scalar *r, lay_scalar *b);
\r
365 // Set the margins on an item. The components of the vector are:
\r
366 // 0: left, 1: top, 2: right, 3: bottom.
\r
367 LAY_EXPORT void lay_set_margins(lay_context *ctx, lay_id item, lay_vec4 ltrb);
\r
369 // Same as lay_set_margins, but the components are passed as separate arguments
\r
370 // (left, top, right, bottom).
\r
371 LAY_EXPORT void lay_set_margins_ltrb(lay_context *ctx, lay_id item, lay_scalar l, lay_scalar t, lay_scalar r, lay_scalar b);
\r
373 // Get the pointer to an item in the buffer by its id. Don't keep this around --
\r
374 // it will become invalid as soon as any reallocation occurs. Just store the id
\r
375 // instead (it's smaller, anyway, and the lookup cost will be nothing.)
\r
376 LAY_STATIC_INLINE lay_item_t *lay_get_item(const lay_context *ctx, lay_id id)
\r
378 LAY_ASSERT(id != LAY_INVALID_ID && id < ctx->count);
\r
379 return ctx->items + id;
\r
382 // Get the id of first child of an item, if any. Returns LAY_INVALID_ID if there
\r
384 LAY_STATIC_INLINE lay_id lay_first_child(const lay_context *ctx, lay_id id)
\r
386 const lay_item_t *pitem = lay_get_item(ctx, id);
\r
387 return pitem->first_child;
\r
390 // Get the id of the next sibling of an item, if any. Returns LAY_INVALID_ID if
\r
391 // there is no next sibling.
\r
392 LAY_STATIC_INLINE lay_id lay_next_sibling(const lay_context *ctx, lay_id id)
\r
394 const lay_item_t *pitem = lay_get_item(ctx, id);
\r
395 return pitem->next_sibling;
\r
398 // Returns the calculated rectangle of an item. This is only valid after calling
\r
399 // lay_run_context and before any other reallocation occurs. Otherwise, the
\r
400 // result will be undefined. The vector components are:
\r
401 // 0: x starting position, 1: y starting position
\r
402 // 2: width, 3: height
\r
403 LAY_STATIC_INLINE lay_vec4 lay_get_rect(const lay_context *ctx, lay_id id)
\r
405 LAY_ASSERT(id != LAY_INVALID_ID && id < ctx->count);
\r
406 return ctx->rects[id];
\r
409 // The same as lay_get_rect, but writes the x,y positions and width,height
\r
410 // values to the specified addresses instead of returning them in a lay_vec4.
\r
411 LAY_STATIC_INLINE void lay_get_rect_xywh(
\r
412 const lay_context *ctx, lay_id id,
\r
413 lay_scalar *x, lay_scalar *y, lay_scalar *width, lay_scalar *height)
\r
415 LAY_ASSERT(id != LAY_INVALID_ID && id < ctx->count);
\r
416 lay_vec4 rect = ctx->rects[id];
\r
424 #undef LAY_STATIC_INLINE
\r
426 #endif // LAY_INCLUDE_HEADER
\r
428 // Notes about the use of vector_size merely for syntax convenience:
\r
430 // The current layout calculation procedures are not written in a way that
\r
431 // would benefit from SIMD instruction usage.
\r
433 // (Passing 128-bit float4 vectors using __vectorcall *might* get you some
\r
434 // small benefit in very specific situations, but is unlikely to be worth the
\r
435 // hassle. And I believe this would only be needed if you compiled the library
\r
436 // in a way where the compiler was prevented from using inlining when copying
\r
437 // rectangle/size data.)
\r
439 // I might go back in the future and just use regular struct-wrapped arrays.
\r
440 // I'm not sure if relying the vector thing in GCC/clang and then using C++
\r
441 // operator overloading in MSVC is worth the annoyance of saving a couple of
\r
442 // extra characters on each array access in the implementation code.
\r
444 #ifdef LAY_IMPLEMENTATION
\r
446 #include <stddef.h>
\r
447 #include <stdbool.h>
\r
449 // Users of this library can define LAY_REALLOC to use a custom (re)allocator
\r
450 // instead of stdlib's realloc. It should have the same behavior as realloc --
\r
451 // first parameter type is a void pointer, and its value is either a null
\r
452 // pointer or an existing pointer. The second parameter is a size_t of the new
\r
453 // desired size. The buffer contents should be preserved across reallocations.
\r
455 // And, if you define LAY_REALLOC, you will also need to define LAY_FREE, which
\r
456 // should have the same behavior as free.
\r
457 #ifndef LAY_REALLOC
\r
458 #include <stdlib.h>
\r
459 #define LAY_REALLOC(_block, _size) realloc(_block, _size)
\r
460 #define LAY_FREE(_block) free(_block)
\r
463 // Like the LAY_REALLOC define, LAY_MEMSET can be used for a custom memset.
\r
464 // Otherwise, the memset from string.h will be used.
\r
466 #include <string.h>
\r
467 #define LAY_MEMSET(_dst, _val, _size) memset(_dst, _val, _size)
\r
470 #if defined(__GNUC__) || defined(__clang__)
\r
471 #define LAY_FORCE_INLINE __attribute__((always_inline)) inline
\r
473 #define LAY_RESTRICT __restrict
\r
475 #define LAY_RESTRICT restrict
\r
476 #endif // __cplusplus
\r
477 #elif defined(_MSC_VER)
\r
478 #define LAY_FORCE_INLINE __forceinline
\r
479 #define LAY_RESTRICT __restrict
\r
481 #define LAY_FORCE_INLINE inline
\r
483 #define LAY_RESTRICT
\r
485 #define LAY_RESTRICT restrict
\r
486 #endif // __cplusplus
\r
489 // Useful math utilities
\r
490 static LAY_FORCE_INLINE lay_scalar lay_scalar_max(lay_scalar a, lay_scalar b)
\r
491 { return a > b ? a : b; }
\r
492 static LAY_FORCE_INLINE lay_scalar lay_scalar_min(lay_scalar a, lay_scalar b)
\r
493 { return a < b ? a : b; }
\r
494 static LAY_FORCE_INLINE float lay_float_max(float a, float b)
\r
495 { return a > b ? a : b; }
\r
496 static LAY_FORCE_INLINE float lay_float_min(float a, float b)
\r
497 { return a < b ? a : b; }
\r
499 void lay_init_context(lay_context *ctx)
\r
507 void lay_reserve_items_capacity(lay_context *ctx, lay_id count)
\r
509 if (count >= ctx->capacity) {
\r
510 ctx->capacity = count;
\r
511 const size_t item_size = sizeof(lay_item_t) + sizeof(lay_vec4);
\r
512 ctx->items = (lay_item_t*)LAY_REALLOC(ctx->items, ctx->capacity * item_size);
\r
513 const lay_item_t *past_last = ctx->items + ctx->capacity;
\r
514 ctx->rects = (lay_vec4*)past_last;
\r
518 void lay_destroy_context(lay_context *ctx)
\r
520 if (ctx->items != NULL) {
\r
521 LAY_FREE(ctx->items);
\r
527 void lay_reset_context(lay_context *ctx)
\r
528 { ctx->count = 0; }
\r
530 static void lay_calc_size(lay_context *ctx, lay_id item, int dim);
\r
531 static void lay_arrange(lay_context *ctx, lay_id item, int dim);
\r
533 void lay_run_context(lay_context *ctx)
\r
535 LAY_ASSERT(ctx != NULL);
\r
537 if (ctx->count > 0) {
\r
538 lay_run_item(ctx, 0);
\r
542 void lay_run_item(lay_context *ctx, lay_id item)
\r
544 LAY_ASSERT(ctx != NULL);
\r
546 lay_calc_size(ctx, item, 0);
\r
547 lay_arrange(ctx, item, 0);
\r
548 lay_calc_size(ctx, item, 1);
\r
549 lay_arrange(ctx, item, 1);
\r
552 // Alternatively, we could use a flag bit to indicate whether an item's children
\r
553 // have already been wrapped and may need re-wrapping. If we do that, in the
\r
554 // future, this would become deprecated and we could make it a no-op.
\r
556 void lay_clear_item_break(lay_context *ctx, lay_id item)
\r
558 LAY_ASSERT(ctx != NULL);
\r
559 lay_item_t *pitem = lay_get_item(ctx, item);
\r
560 pitem->flags = pitem->flags & ~(uint32_t)LAY_BREAK;
\r
563 lay_id lay_items_count(lay_context *ctx)
\r
565 LAY_ASSERT(ctx != NULL);
\r
569 lay_id lay_items_capacity(lay_context *ctx)
\r
571 LAY_ASSERT(ctx != NULL);
\r
572 return ctx->capacity;
\r
575 lay_id lay_item(lay_context *ctx)
\r
577 lay_id idx = ctx->count++;
\r
579 if (idx >= ctx->capacity) {
\r
580 ctx->capacity = ctx->capacity < 1 ? 32 : (ctx->capacity * 4);
\r
581 const size_t item_size = sizeof(lay_item_t) + sizeof(lay_vec4);
\r
582 ctx->items = (lay_item_t*)LAY_REALLOC(ctx->items, ctx->capacity * item_size);
\r
583 const lay_item_t *past_last = ctx->items + ctx->capacity;
\r
584 ctx->rects = (lay_vec4*)past_last;
\r
587 lay_item_t *item = lay_get_item(ctx, idx);
\r
588 // We can either do this here, or when creating/resetting buffer
\r
589 LAY_MEMSET(item, 0, sizeof(lay_item_t));
\r
590 item->first_child = LAY_INVALID_ID;
\r
591 item->next_sibling = LAY_INVALID_ID;
\r
593 LAY_MEMSET(&ctx->rects[idx], 0, sizeof(lay_vec4));
\r
597 static LAY_FORCE_INLINE
\r
598 void lay_append_by_ptr(
\r
599 lay_item_t *LAY_RESTRICT pearlier,
\r
600 lay_id later, lay_item_t *LAY_RESTRICT plater)
\r
602 plater->next_sibling = pearlier->next_sibling;
\r
603 plater->flags |= LAY_ITEM_INSERTED;
\r
604 pearlier->next_sibling = later;
\r
607 lay_id lay_last_child(const lay_context *ctx, lay_id parent)
\r
609 lay_item_t *pparent = lay_get_item(ctx, parent);
\r
610 lay_id child = pparent->first_child;
\r
611 if (child == LAY_INVALID_ID) return LAY_INVALID_ID;
\r
612 lay_item_t *pchild = lay_get_item(ctx, child);
\r
613 lay_id result = child;
\r
615 lay_id next = pchild->next_sibling;
\r
616 if (next == LAY_INVALID_ID) break;
\r
618 pchild = lay_get_item(ctx, next);
\r
623 void lay_append(lay_context *ctx, lay_id earlier, lay_id later)
\r
625 LAY_ASSERT(later != 0); // Must not be root item
\r
626 LAY_ASSERT(earlier != later); // Must not be same item id
\r
627 lay_item_t *LAY_RESTRICT pearlier = lay_get_item(ctx, earlier);
\r
628 lay_item_t *LAY_RESTRICT plater = lay_get_item(ctx, later);
\r
629 lay_append_by_ptr(pearlier, later, plater);
\r
632 void lay_insert(lay_context *ctx, lay_id parent, lay_id child)
\r
634 LAY_ASSERT(child != 0); // Must not be root item
\r
635 LAY_ASSERT(parent != child); // Must not be same item id
\r
636 lay_item_t *LAY_RESTRICT pparent = lay_get_item(ctx, parent);
\r
637 lay_item_t *LAY_RESTRICT pchild = lay_get_item(ctx, child);
\r
638 LAY_ASSERT(!(pchild->flags & LAY_ITEM_INSERTED));
\r
639 // Parent has no existing children, make inserted item the first child.
\r
640 if (pparent->first_child == LAY_INVALID_ID) {
\r
641 pparent->first_child = child;
\r
642 pchild->flags |= LAY_ITEM_INSERTED;
\r
643 // Parent has existing items, iterate to find the last child and append the
\r
644 // inserted item after it.
\r
646 lay_id next = pparent->first_child;
\r
647 lay_item_t *LAY_RESTRICT pnext = lay_get_item(ctx, next);
\r
649 next = pnext->next_sibling;
\r
650 if (next == LAY_INVALID_ID) break;
\r
651 pnext = lay_get_item(ctx, next);
\r
653 lay_append_by_ptr(pnext, child, pchild);
\r
657 void lay_push(lay_context *ctx, lay_id parent, lay_id new_child)
\r
659 LAY_ASSERT(new_child != 0); // Must not be root item
\r
660 LAY_ASSERT(parent != new_child); // Must not be same item id
\r
661 lay_item_t *LAY_RESTRICT pparent = lay_get_item(ctx, parent);
\r
662 lay_id old_child = pparent->first_child;
\r
663 lay_item_t *LAY_RESTRICT pchild = lay_get_item(ctx, new_child);
\r
664 LAY_ASSERT(!(pchild->flags & LAY_ITEM_INSERTED));
\r
665 pparent->first_child = new_child;
\r
666 pchild->flags |= LAY_ITEM_INSERTED;
\r
667 pchild->next_sibling = old_child;
\r
670 lay_vec2 lay_get_size(lay_context *ctx, lay_id item)
\r
672 lay_item_t *pitem = lay_get_item(ctx, item);
\r
673 return pitem->size;
\r
676 void lay_get_size_xy(
\r
677 lay_context *ctx, lay_id item,
\r
678 lay_scalar *x, lay_scalar *y)
\r
680 lay_item_t *pitem = lay_get_item(ctx, item);
\r
681 lay_vec2 size = pitem->size;
\r
686 void lay_set_size(lay_context *ctx, lay_id item, lay_vec2 size)
\r
688 lay_item_t *pitem = lay_get_item(ctx, item);
\r
689 pitem->size = size;
\r
690 uint32_t flags = pitem->flags;
\r
692 flags &= ~(uint32_t)LAY_ITEM_HFIXED;
\r
694 flags |= LAY_ITEM_HFIXED;
\r
696 flags &= ~(uint32_t)LAY_ITEM_VFIXED;
\r
698 flags |= LAY_ITEM_VFIXED;
\r
699 pitem->flags = flags;
\r
702 void lay_set_size_xy(
\r
703 lay_context *ctx, lay_id item,
\r
704 lay_scalar width, lay_scalar height)
\r
706 lay_item_t *pitem = lay_get_item(ctx, item);
\r
707 pitem->size[0] = width;
\r
708 pitem->size[1] = height;
\r
709 // Kinda redundant, whatever
\r
710 uint32_t flags = pitem->flags;
\r
712 flags &= ~(uint32_t)LAY_ITEM_HFIXED;
\r
714 flags |= LAY_ITEM_HFIXED;
\r
716 flags &= ~(uint32_t)LAY_ITEM_VFIXED;
\r
718 flags |= LAY_ITEM_VFIXED;
\r
719 pitem->flags = flags;
\r
722 void lay_set_behave(lay_context *ctx, lay_id item, uint32_t flags)
\r
724 LAY_ASSERT((flags & LAY_ITEM_LAYOUT_MASK) == flags);
\r
725 lay_item_t *pitem = lay_get_item(ctx, item);
\r
726 pitem->flags = (pitem->flags & ~(uint32_t)LAY_ITEM_LAYOUT_MASK) | flags;
\r
729 void lay_set_contain(lay_context *ctx, lay_id item, uint32_t flags)
\r
731 LAY_ASSERT((flags & LAY_ITEM_BOX_MASK) == flags);
\r
732 lay_item_t *pitem = lay_get_item(ctx, item);
\r
733 pitem->flags = (pitem->flags & ~(uint32_t)LAY_ITEM_BOX_MASK) | flags;
\r
735 void lay_set_margins(lay_context *ctx, lay_id item, lay_vec4 ltrb)
\r
737 lay_item_t *pitem = lay_get_item(ctx, item);
\r
738 pitem->margins = ltrb;
\r
740 void lay_set_margins_ltrb(
\r
741 lay_context *ctx, lay_id item,
\r
742 lay_scalar l, lay_scalar t, lay_scalar r, lay_scalar b)
\r
744 lay_item_t *pitem = lay_get_item(ctx, item);
\r
745 // Alternative, uses stack and addressed writes
\r
746 //pitem->margins = lay_vec4_xyzw(l, t, r, b);
\r
747 // Alternative, uses rax and left-shift
\r
748 //pitem->margins = (lay_vec4){l, t, r, b};
\r
749 // Fewest instructions, but uses more addressed writes?
\r
750 pitem->margins[0] = l;
\r
751 pitem->margins[1] = t;
\r
752 pitem->margins[2] = r;
\r
753 pitem->margins[3] = b;
\r
756 lay_vec4 lay_get_margins(lay_context *ctx, lay_id item)
\r
757 { return lay_get_item(ctx, item)->margins; }
\r
759 void lay_get_margins_ltrb(
\r
760 lay_context *ctx, lay_id item,
\r
761 lay_scalar *l, lay_scalar *t, lay_scalar *r, lay_scalar *b)
\r
763 lay_item_t *pitem = lay_get_item(ctx, item);
\r
764 lay_vec4 margins = pitem->margins;
\r
771 // TODO restrict item ptrs correctly
\r
772 static LAY_FORCE_INLINE
\r
773 lay_scalar lay_calc_overlayed_size(
\r
774 lay_context *ctx, lay_id item, int dim)
\r
776 const int wdim = dim + 2;
\r
777 lay_item_t *LAY_RESTRICT pitem = lay_get_item(ctx, item);
\r
778 lay_scalar need_size = 0;
\r
779 lay_id child = pitem->first_child;
\r
780 while (child != LAY_INVALID_ID) {
\r
781 lay_item_t *pchild = lay_get_item(ctx, child);
\r
782 lay_vec4 rect = ctx->rects[child];
\r
783 // width = start margin + calculated width + end margin
\r
784 lay_scalar child_size = rect[dim] + rect[2 + dim] + pchild->margins[wdim];
\r
785 need_size = lay_scalar_max(need_size, child_size);
\r
786 child = pchild->next_sibling;
\r
791 static LAY_FORCE_INLINE
\r
792 lay_scalar lay_calc_stacked_size(
\r
793 lay_context *ctx, lay_id item, int dim)
\r
795 const int wdim = dim + 2;
\r
796 lay_item_t *LAY_RESTRICT pitem = lay_get_item(ctx, item);
\r
797 lay_scalar need_size = 0;
\r
798 lay_id child = pitem->first_child;
\r
799 while (child != LAY_INVALID_ID) {
\r
800 lay_item_t *pchild = lay_get_item(ctx, child);
\r
801 lay_vec4 rect = ctx->rects[child];
\r
802 need_size += rect[dim] + rect[2 + dim] + pchild->margins[wdim];
\r
803 child = pchild->next_sibling;
\r
808 static LAY_FORCE_INLINE
\r
809 lay_scalar lay_calc_wrapped_overlayed_size(
\r
810 lay_context *ctx, lay_id item, int dim)
\r
812 const int wdim = dim + 2;
\r
813 lay_item_t *LAY_RESTRICT pitem = lay_get_item(ctx, item);
\r
814 lay_scalar need_size = 0;
\r
815 lay_scalar need_size2 = 0;
\r
816 lay_id child = pitem->first_child;
\r
817 while (child != LAY_INVALID_ID) {
\r
818 lay_item_t *pchild = lay_get_item(ctx, child);
\r
819 lay_vec4 rect = ctx->rects[child];
\r
820 if (pchild->flags & LAY_BREAK) {
\r
821 need_size2 += need_size;
\r
824 lay_scalar child_size = rect[dim] + rect[2 + dim] + pchild->margins[wdim];
\r
825 need_size = lay_scalar_max(need_size, child_size);
\r
826 child = pchild->next_sibling;
\r
828 return need_size2 + need_size;
\r
831 // Equivalent to uiComputeWrappedStackedSize
\r
832 static LAY_FORCE_INLINE
\r
833 lay_scalar lay_calc_wrapped_stacked_size(
\r
834 lay_context *ctx, lay_id item, int dim)
\r
836 const int wdim = dim + 2;
\r
837 lay_item_t *LAY_RESTRICT pitem = lay_get_item(ctx, item);
\r
838 lay_scalar need_size = 0;
\r
839 lay_scalar need_size2 = 0;
\r
840 lay_id child = pitem->first_child;
\r
841 while (child != LAY_INVALID_ID) {
\r
842 lay_item_t *pchild = lay_get_item(ctx, child);
\r
843 lay_vec4 rect = ctx->rects[child];
\r
844 if (pchild->flags & LAY_BREAK) {
\r
845 need_size2 = lay_scalar_max(need_size2, need_size);
\r
848 need_size += rect[dim] + rect[2 + dim] + pchild->margins[wdim];
\r
849 child = pchild->next_sibling;
\r
851 return lay_scalar_max(need_size2, need_size);
\r
854 static void lay_calc_size(lay_context *ctx, lay_id item, int dim)
\r
856 lay_item_t *pitem = lay_get_item(ctx, item);
\r
858 lay_id child = pitem->first_child;
\r
859 while (child != LAY_INVALID_ID) {
\r
860 // NOTE: this is recursive and will run out of stack space if items are
\r
861 // nested too deeply.
\r
862 lay_calc_size(ctx, child, dim);
\r
863 lay_item_t *pchild = lay_get_item(ctx, child);
\r
864 child = pchild->next_sibling;
\r
867 // Set the mutable rect output data to the starting input data
\r
868 ctx->rects[item][dim] = pitem->margins[dim];
\r
870 // If we have an explicit input size, just set our output size (which other
\r
871 // calc_size and arrange procedures will use) to it.
\r
872 if (pitem->size[dim] != 0) {
\r
873 ctx->rects[item][2 + dim] = pitem->size[dim];
\r
877 // Calculate our size based on children items. Note that we've already
\r
878 // called lay_calc_size on our children at this point.
\r
879 lay_scalar cal_size;
\r
880 switch (pitem->flags & LAY_ITEM_BOX_MODEL_MASK) {
\r
881 case LAY_COLUMN|LAY_WRAP:
\r
883 if (dim) // direction
\r
884 cal_size = lay_calc_stacked_size(ctx, item, 1);
\r
886 cal_size = lay_calc_overlayed_size(ctx, item, 0);
\r
888 case LAY_ROW|LAY_WRAP:
\r
890 if (!dim) // direction
\r
891 cal_size = lay_calc_wrapped_stacked_size(ctx, item, 0);
\r
893 cal_size = lay_calc_wrapped_overlayed_size(ctx, item, 1);
\r
898 if ((pitem->flags & 1) == (uint32_t)dim) // direction
\r
899 cal_size = lay_calc_stacked_size(ctx, item, dim);
\r
901 cal_size = lay_calc_overlayed_size(ctx, item, dim);
\r
905 cal_size = lay_calc_overlayed_size(ctx, item, dim);
\r
909 // Set our output data size. Will be used by parent calc_size procedures.,
\r
910 // and by arrange procedures.
\r
911 ctx->rects[item][2 + dim] = cal_size;
\r
914 static LAY_FORCE_INLINE
\r
915 void lay_arrange_stacked(
\r
916 lay_context *ctx, lay_id item, int dim, bool wrap)
\r
918 const int wdim = dim + 2;
\r
919 lay_item_t *pitem = lay_get_item(ctx, item);
\r
921 const uint32_t item_flags = pitem->flags;
\r
922 lay_vec4 rect = ctx->rects[item];
\r
923 lay_scalar space = rect[2 + dim];
\r
925 float max_x2 = (float)(rect[dim] + space);
\r
927 lay_id start_child = pitem->first_child;
\r
928 while (start_child != LAY_INVALID_ID) {
\r
929 lay_scalar used = 0;
\r
930 uint32_t count = 0; // count of fillers
\r
931 uint32_t squeezed_count = 0; // count of squeezable elements
\r
932 uint32_t total = 0;
\r
933 bool hardbreak = false;
\r
934 // first pass: count items that need to be expanded,
\r
935 // and the space that is used
\r
936 lay_id child = start_child;
\r
937 lay_id end_child = LAY_INVALID_ID;
\r
938 while (child != LAY_INVALID_ID) {
\r
939 lay_item_t *pchild = lay_get_item(ctx, child);
\r
940 const uint32_t child_flags = pchild->flags;
\r
941 const uint32_t flags = (child_flags & LAY_ITEM_LAYOUT_MASK) >> dim;
\r
942 const uint32_t fflags = (child_flags & LAY_ITEM_FIXED_MASK) >> dim;
\r
943 const lay_vec4 child_margins = pchild->margins;
\r
944 lay_vec4 child_rect = ctx->rects[child];
\r
945 lay_scalar extend = used;
\r
946 if ((flags & LAY_HFILL) == LAY_HFILL) {
\r
948 extend += child_rect[dim] + child_margins[wdim];
\r
950 if ((fflags & LAY_ITEM_HFIXED) != LAY_ITEM_HFIXED)
\r
952 extend += child_rect[dim] + child_rect[2 + dim] + child_margins[wdim];
\r
954 // wrap on end of line or manual flag
\r
956 total && ((extend > space) ||
\r
957 (child_flags & LAY_BREAK)))) {
\r
959 hardbreak = (child_flags & LAY_BREAK) == LAY_BREAK;
\r
960 // add marker for subsequent queries
\r
961 pchild->flags = child_flags | LAY_BREAK;
\r
965 child = pchild->next_sibling;
\r
970 lay_scalar extra_space = space - used;
\r
971 float filler = 0.0f;
\r
972 float spacer = 0.0f;
\r
973 float extra_margin = 0.0f;
\r
974 float eater = 0.0f;
\r
976 if (extra_space > 0) {
\r
978 filler = (float)extra_space / (float)count;
\r
979 else if (total > 0) {
\r
980 switch (item_flags & LAY_JUSTIFY) {
\r
982 // justify when not wrapping or not in last line,
\r
983 // or not manually breaking
\r
984 if (!wrap || ((end_child != LAY_INVALID_ID) && !hardbreak))
\r
985 spacer = (float)extra_space / (float)(total - 1);
\r
990 extra_margin = extra_space;
\r
993 extra_margin = extra_space / 2.0f;
\r
999 // In floating point, it's possible to end up with some small negative
\r
1000 // value for extra_space, while also have a 0.0 squeezed_count. This
\r
1001 // would cause divide by zero. Instead, we'll check to see if
\r
1002 // squeezed_count is > 0. I believe this produces the same results as
\r
1003 // the original oui int-only code. However, I don't have any tests for
\r
1004 // it, so I'll leave it if-def'd for now.
\r
1005 else if (!wrap && (squeezed_count > 0))
\r
1007 // This is the original oui code
\r
1008 else if (!wrap && (extra_space < 0))
\r
1010 eater = (float)extra_space / (float)squeezed_count;
\r
1012 // distribute width among items
\r
1013 float x = (float)rect[dim];
\r
1015 // second pass: distribute and rescale
\r
1016 child = start_child;
\r
1017 while (child != end_child) {
\r
1018 lay_scalar ix0, ix1;
\r
1019 lay_item_t *pchild = lay_get_item(ctx, child);
\r
1020 const uint32_t child_flags = pchild->flags;
\r
1021 const uint32_t flags = (child_flags & LAY_ITEM_LAYOUT_MASK) >> dim;
\r
1022 const uint32_t fflags = (child_flags & LAY_ITEM_FIXED_MASK) >> dim;
\r
1023 const lay_vec4 child_margins = pchild->margins;
\r
1024 lay_vec4 child_rect = ctx->rects[child];
\r
1026 x += (float)child_rect[dim] + extra_margin;
\r
1027 if ((flags & LAY_HFILL) == LAY_HFILL) // grow
\r
1029 else if ((fflags & LAY_ITEM_HFIXED) == LAY_ITEM_HFIXED)
\r
1030 x1 = x + (float)child_rect[2 + dim];
\r
1032 x1 = x + lay_float_max(0.0f, (float)child_rect[2 + dim] + eater);
\r
1034 ix0 = (lay_scalar)x;
\r
1036 ix1 = (lay_scalar)lay_float_min(max_x2 - (float)child_margins[wdim], x1);
\r
1038 ix1 = (lay_scalar)x1;
\r
1039 child_rect[dim] = ix0; // pos
\r
1040 child_rect[dim + 2] = ix1 - ix0; // size
\r
1041 ctx->rects[child] = child_rect;
\r
1042 x = x1 + (float)child_margins[wdim];
\r
1043 child = pchild->next_sibling;
\r
1044 extra_margin = spacer;
\r
1047 start_child = end_child;
\r
1051 static LAY_FORCE_INLINE
\r
1052 void lay_arrange_overlay(lay_context *ctx, lay_id item, int dim)
\r
1054 const int wdim = dim + 2;
\r
1055 lay_item_t *pitem = lay_get_item(ctx, item);
\r
1056 const lay_vec4 rect = ctx->rects[item];
\r
1057 const lay_scalar offset = rect[dim];
\r
1058 const lay_scalar space = rect[2 + dim];
\r
1060 lay_id child = pitem->first_child;
\r
1061 while (child != LAY_INVALID_ID) {
\r
1062 lay_item_t *pchild = lay_get_item(ctx, child);
\r
1063 const uint32_t b_flags = (pchild->flags & LAY_ITEM_LAYOUT_MASK) >> dim;
\r
1064 const lay_vec4 child_margins = pchild->margins;
\r
1065 lay_vec4 child_rect = ctx->rects[child];
\r
1067 switch (b_flags & LAY_HFILL) {
\r
1069 child_rect[dim] += (space - child_rect[2 + dim]) / 2 - child_margins[wdim];
\r
1072 child_rect[dim] += space - child_rect[2 + dim] - child_margins[dim] - child_margins[wdim];
\r
1075 child_rect[2 + dim] = lay_scalar_max(0, space - child_rect[dim] - child_margins[wdim]);
\r
1081 child_rect[dim] += offset;
\r
1082 ctx->rects[child] = child_rect;
\r
1083 child = pchild->next_sibling;
\r
1087 static LAY_FORCE_INLINE
\r
1088 void lay_arrange_overlay_squeezed_range(
\r
1089 lay_context *ctx, int dim,
\r
1090 lay_id start_item, lay_id end_item,
\r
1091 lay_scalar offset, lay_scalar space)
\r
1093 int wdim = dim + 2;
\r
1094 lay_id item = start_item;
\r
1095 while (item != end_item) {
\r
1096 lay_item_t *pitem = lay_get_item(ctx, item);
\r
1097 const uint32_t b_flags = (pitem->flags & LAY_ITEM_LAYOUT_MASK) >> dim;
\r
1098 const lay_vec4 margins = pitem->margins;
\r
1099 lay_vec4 rect = ctx->rects[item];
\r
1100 lay_scalar min_size = lay_scalar_max(0, space - rect[dim] - margins[wdim]);
\r
1101 switch (b_flags & LAY_HFILL) {
\r
1103 rect[2 + dim] = lay_scalar_min(rect[2 + dim], min_size);
\r
1104 rect[dim] += (space - rect[2 + dim]) / 2 - margins[wdim];
\r
1107 rect[2 + dim] = lay_scalar_min(rect[2 + dim], min_size);
\r
1108 rect[dim] = space - rect[2 + dim] - margins[wdim];
\r
1111 rect[2 + dim] = min_size;
\r
1114 rect[2 + dim] = lay_scalar_min(rect[2 + dim], min_size);
\r
1117 rect[dim] += offset;
\r
1118 ctx->rects[item] = rect;
\r
1119 item = pitem->next_sibling;
\r
1123 static LAY_FORCE_INLINE
\r
1124 lay_scalar lay_arrange_wrapped_overlay_squeezed(
\r
1125 lay_context *ctx, lay_id item, int dim)
\r
1127 const int wdim = dim + 2;
\r
1128 lay_item_t *pitem = lay_get_item(ctx, item);
\r
1129 lay_scalar offset = ctx->rects[item][dim];
\r
1130 lay_scalar need_size = 0;
\r
1131 lay_id child = pitem->first_child;
\r
1132 lay_id start_child = child;
\r
1133 while (child != LAY_INVALID_ID) {
\r
1134 lay_item_t *pchild = lay_get_item(ctx, child);
\r
1135 if (pchild->flags & LAY_BREAK) {
\r
1136 lay_arrange_overlay_squeezed_range(ctx, dim, start_child, child, offset, need_size);
\r
1137 offset += need_size;
\r
1138 start_child = child;
\r
1141 const lay_vec4 rect = ctx->rects[child];
\r
1142 lay_scalar child_size = rect[dim] + rect[2 + dim] + pchild->margins[wdim];
\r
1143 need_size = lay_scalar_max(need_size, child_size);
\r
1144 child = pchild->next_sibling;
\r
1146 lay_arrange_overlay_squeezed_range(ctx, dim, start_child, LAY_INVALID_ID, offset, need_size);
\r
1147 offset += need_size;
\r
1151 static void lay_arrange(lay_context *ctx, lay_id item, int dim)
\r
1153 lay_item_t *pitem = lay_get_item(ctx, item);
\r
1155 const uint32_t flags = pitem->flags;
\r
1156 switch (flags & LAY_ITEM_BOX_MODEL_MASK) {
\r
1157 case LAY_COLUMN | LAY_WRAP:
\r
1159 lay_arrange_stacked(ctx, item, 1, true);
\r
1160 lay_scalar offset = lay_arrange_wrapped_overlay_squeezed(ctx, item, 0);
\r
1161 ctx->rects[item][2 + 0] = offset - ctx->rects[item][0];
\r
1164 case LAY_ROW | LAY_WRAP:
\r
1166 lay_arrange_stacked(ctx, item, 0, true);
\r
1168 // discard return value
\r
1169 lay_arrange_wrapped_overlay_squeezed(ctx, item, 1);
\r
1173 if ((flags & 1) == (uint32_t)dim) {
\r
1174 lay_arrange_stacked(ctx, item, dim, false);
\r
1176 const lay_vec4 rect = ctx->rects[item];
\r
1177 lay_arrange_overlay_squeezed_range(
\r
1178 ctx, dim, pitem->first_child, LAY_INVALID_ID,
\r
1179 rect[dim], rect[2 + dim]);
\r
1183 lay_arrange_overlay(ctx, item, dim);
\r
1186 lay_id child = pitem->first_child;
\r
1187 while (child != LAY_INVALID_ID) {
\r
1188 // NOTE: this is recursive and will run out of stack space if items are
\r
1189 // nested too deeply.
\r
1190 lay_arrange(ctx, child, dim);
\r
1191 lay_item_t *pchild = lay_get_item(ctx, child);
\r
1192 child = pchild->next_sibling;
\r
1196 #endif // LAY_IMPLEMENTATION