diff --git a/src/browser/webapi/Element.zig b/src/browser/webapi/Element.zig index f38d21a0..4c9287d5 100644 --- a/src/browser/webapi/Element.zig +++ b/src/browser/webapi/Element.zig @@ -765,8 +765,8 @@ fn getElementDimensions(self: *Element, page: *Page) !struct { width: f64, heigh const tag = self.getTag(); // Root containers get large default size to contain descendant positions. - // With calculateDocumentPosition using 10x multipliers per level, deep trees - // can position elements at y=millions, so we need a large container height. + // With calculateDocumentPosition using linear depth scaling (100px per level), + // even very deep trees (100 levels) stay within 10,000px. // 100M pixels is plausible for very long documents. if (tag == .html or tag == .body) { if (width == 5.0) width = 1920.0; @@ -843,51 +843,51 @@ pub fn getClientRects(self: *Element, page: *Page) ![]DOMRect { return ptr[0..1]; } -// Calculates a pseudo-position in the document using an efficient heuristic. +// Calculates a pseudo-position in the document using linear depth scaling. // -// Instead of walking the entire DOM tree (which would be O(total_nodes)), this -// function walks UP the tree counting previous siblings at each level. Each level -// uses exponential weighting (10x per depth level) to preserve document order. -// -// This gives O(depth * avg_siblings) complexity while maintaining relative positioning -// that's useful for scraping and understanding element flow in the document. +// This approach uses a fixed pixel offset per depth level (100px) plus sibling +// position within that level. This keeps positions reasonable even for very deep +// DOM trees (e.g., Amazon product pages can be 36+ levels deep). // // Example: -//
→ position 0 -//