├── .gitignore
├── COPYING
├── Makefile
├── README.md
├── bblocks.go
├── blockity.go
├── blockmapgen.go
├── cmdparser.go
├── config.go
├── convexity.go
├── diffgeometry.go
├── diffgeometry_test.go
├── filecontrol.go
├── fixedwriter.go
├── gamespec.go
├── gen
└── codegen.go
├── go.mod
├── intgeometry.go
├── level.go
├── lumpwrite.go
├── main_test.go
├── multiformat_tree.go
├── multitree_plain.go
├── mylogger.go
├── node_intro.go
├── node_outro.go
├── node_rearrange.go
├── node_vmap.go
├── nodegen.go
├── nodegen_test.go
├── other
├── changelog.txt
├── rejectfixtest_20220621.wad
├── rmb-manual.html
├── vigilantbsp.cfg
└── vigilantbsp.txt
├── picknode.go
├── picknode_test.go
├── reject.go
├── rejectDFS.go
├── rejectFAST.go
├── rejectLOS.go
├── rejectRMB.go
├── rejectSYMM.go
├── rejectSymmDefs.go
├── rejectdefs.go
├── ring.go
├── rmbparse.go
├── rmbparse_test.go
├── rmbunit.go
├── segalias.go
├── selfref.go
├── solidblocks.go
├── sorthelpers.go
├── stknode.go
├── superblocks.go
├── superblocks_test.go
├── trollgen.go
├── udmf_level.go
├── universal_interfaces.go
├── vigilantbsp.go
├── wad.go
├── writebus.go
├── zdefs.go
├── zenscore.go
├── zensideness.go
├── znodegen.go
└── zstream.go
/.gitignore:
--------------------------------------------------------------------------------
1 | # Ignore everything
2 | *
3 |
4 | # But not these files...
5 | !/.gitignore
6 |
7 | !*.go
8 | !go.sum
9 | !go.mod
10 |
11 | !README.md
12 | !LICENSE
13 | !COPYING
14 | !other/*.*
15 | !Makefile
16 |
17 | # ...even if they are in subdirectories
18 | !*/
19 |
--------------------------------------------------------------------------------
/Makefile:
--------------------------------------------------------------------------------
1 | PLATFORMS := linux/amd64// windows/amd64/.exe/ /linux/386/32/ /windows/386/32.exe/
2 |
3 | temp = $(subst /, ,$@)
4 | os = $(word 1, $(temp))
5 | arch = $(word 2, $(temp))
6 | suffix = $(word 3, $(temp))
7 |
8 | release: $(PLATFORMS)
9 |
10 | clean:
11 | go clean
12 | rm -f vigilantbsp
13 | rm -f vigilantbsp.exe
14 | rm -f vigilantbsp32
15 | rm -f vigilantbsp32.exe
16 | rm -f gen/gen
17 | rm -f gen/gen.exe
18 | rm -f vigilantbsp_avx2
19 | rm -f vigilantbsp_avx2.exe
20 |
21 | generate:
22 | go generate
23 |
24 | vet:
25 | go vet
26 |
27 | $(PLATFORMS): generate vet
28 | CGO_ENABLED=0 GOOS=$(os) GOARCH=$(arch) go build -ldflags="-s -w -buildid=" -trimpath -o 'vigilantbsp$(suffix)'
29 |
30 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | # VigilantBSP
2 |
3 | ## Author
4 | (c) 2022-2025 VigilantDoomer
5 |
6 | ## Description
7 |
8 | VigilantBSP is a multi-threaded polyobject-aware
9 | node / blockmap / reject builder (also called "nodebuilder" for
10 | brevity) for Doom / Doom 2 / Heretic / Hexen.
11 |
12 | It is being developed for Windows and GNU/Linux operating system on
13 | both x86 (32-bit) and x86-64 (64-bit) platforms.
14 |
15 | ## Project goals
16 |
17 | VigilantBSP intends to fulfill several ambitious goals:
18 |
19 | 1. Fast build times for maps without sacrificing support for
20 | special effects like self-referencing sectors. Use parallelism
21 | where possible to achieve faster builds on multi-core computers.
22 | 2. Support for Heretic and Hexen games being as robust as support
23 | for Doom / Doom 2.
24 | 3. Features that help get maps within vanilla limits are researched
25 | and implemented, without getting in the way of support for maps
26 | targeting advanced engines (Boom, etc.)
27 | 4. Make self-referencing sector effects easy to use for mappers.
28 | Default settings should already support self-referencing sector
29 | effects very well. Monsters in self-referencing sectors shall
30 | be able to see, hear and attack the player.
31 | 5. Special effects provided by other nodebuilders (horizon effect
32 | and "precious" linedefs from BSP v5.2, faster scrollers from
33 | ZokumBSP) are also implemented.
34 |
35 | In future, support for GL nodes / UDMF format is indeed planned,
36 | just not there yet.
37 |
38 | Partial support for RMB effects is already implemented since
39 | version 0.74a. Eventually, most RMB effects are going to be
40 | supported.
41 |
42 | Since 0.75a, VigilantBSP can output non-GL nodes in Zdoom
43 | extended and compressed nodes format besides vanilla and
44 | DeeP format.
45 |
46 | ## Debt of gratitude
47 |
48 | VigilantBSP is indebted to other free/libre software nodebuilders
49 | for their ideas and implementations, and intends to give back its
50 | own ideas as well. This is achieved by using a free software license.
51 |
52 | The list of nodebuilders and people whose work make it all possible
53 | includes, but is not limited to:
54 | DEU by Raphael Quinet,
55 | BSP v5.2 by Colin Reed, Lee Killough and other contributors to BSP (program),
56 | ZDBSP by Marisa Heit,
57 | Zennode by Marc Rousseau,
58 | Zokumbsp by Kim Roar Foldøy Hauge, et al
59 | AJ-BSP by Andrew Apted, et al
60 |
61 | ## License
62 | VigilantBSP is free software: you can redistribute it
63 | and/or modify it under the terms of GNU General Public License
64 | as published by the Free Software Foundation, either version 2 of
65 | the License, or (at your option) any later version.
66 |
67 | VigilantBSP is distributed in the hope that it will be useful,
68 | but WITHOUT ANY WARRANTY; without even the implied warranty of
69 | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
70 | GNU General Public License for more details.
71 |
72 | You should have received a copy of the GNU General Public License
73 | along with VigilantBSP (see file COPYING.txt). If not, see
74 | .
75 |
76 | ## Usage
77 |
78 | ```
79 | Usage: vigilantbsp {-options} filename.wad {-o output.wad}
80 |
81 | See the documentation file README.txt for more information.
82 |
83 | ```
84 |
85 | ## Notice about the pulled down v0.91 release
86 |
87 | 2025 Feb, 05
88 | I had to pull down v0.91 release because I've found a serious bug in it that will prevent building levels when they don't have NODES, REJECT, or BLOCKMAP lumps yet. It was introduced precisely in v0.91, and I will upload a v0.92 soon, after the necessary round of testing is done. The impact is severe because it blocks building from a editor as well (in so far as editors make a copy of the wad with only source lumps for the specific level, and none of the other lumps that they expect nodebuilder to build). I am sorry, was not feeling well at the end of January, and should not have pushed for v0.91 release in such physical condition as I had.
89 |
90 | ## Building from source
91 |
92 | Use tagged releases of VigilantBSP to build from source,
93 | as main branch may break things from time to time.
94 |
95 | First you need to clone VigilantBSP into some directory:
96 | git clone https://github.com/vigilantdoomer/vigilantbsp.git
97 | creates vigilantbsp directory inside directory where you
98 | ran that command. Cd into that directory.
99 |
100 | To see tagged releases, use:
101 | git tag --list
102 |
103 | To switch to say, v0.91 tagged release, use:
104 | git checkout v0.91
105 |
106 | To switch back to main branch and track yet unreleased changes
107 | to VigilantBSP:
108 | git checkout main
109 | git pull
110 |
111 | You need to also install the programming environment (obviously)
112 | for Go (also frequently called "golang" to make web search work).
113 | On Debian GNU/Linux, this is done via the following command
114 | (run as root):
115 | apt install golang-go
116 |
117 | Running "go build" in root directory of VigilantBSP then should
118 | just work.
119 |
120 | If you modify source code, you'll need to use "go generate" first,
121 | followed by "go build", as some parts of code are generated from
122 | others. Such autogenerated *.go files are distinguished by
123 | a special comment:
124 | "// Code generated from other source files. DO NOT EDIT."
125 | as the very top line.
126 | If you are not modifying VigilantBSP, the "go generate" step is
127 | not required.
128 |
129 | Using Makefile (and thus having make installed) is likewise not
130 | required, it is used in preparing release and contains instructions
131 | to build stripped VigilantBSP for multiple platforms at once (Go
132 | is this good at cross-compilation, yes).
133 |
134 | VigilantBSP v0.85a is the last version to build with Go 1.15.
135 | All versions that'll succeed it will require Go 1.19 at the minimum,
136 | which is the current version of Go for Debian Bookworm and
137 | the last Go version to support Windows 7. I do not intend
138 | to drop Windows 7 support, so this Go version will remain
139 | supported for a long time.
140 |
141 | It can be built with later versions of Go, you don't have to
142 | downgrade. The change in how loops are compiled that was introduced
143 | Go 1.22, doesn't affect VigilantBSP in so far as its go.mod says
144 | it supports "go 1.19" version, as Go checks that version declaration
145 | when it evaluates whether to keep old behavior. Go cares a lot about
146 | compatibility, lookup "Go 1 compatibility promise" through any web
147 | search engine of your choice.
148 |
149 | Thus if you don't modify version statement in go.mod file, then later
150 | versions of Go should not break your custom build of VigilantBSP.
151 | Such limitation currently would present an issue if you want to
152 | fork VigilantBSP in order to use libraries and features which are
153 | only supported by newer version of Go. In future versions of
154 | VigilantBSP, I will also address loops that would be affected by
155 | this change, and the limitation not to change version requirement
156 | in go.mod will be lifted.
157 |
158 | VigilantBSP v0.91 is the last tagged version that can be built
159 | without external dependencies (third-party libraries not distributed
160 | with Go). Immediately after its release, the work on UDMF parser and
161 | reject builder for UDMF will begin, which will introduce these
162 | dependencies. The effect of that will be that air-gapped setups for
163 | building VigilantBSP will become complicated, requiring the use of
164 | "go mod vendor" from Internet-connected machine.
165 |
166 | ## Website / contact information
167 |
168 | Currently the only way to contact me is on github. You can file
169 | feature requests and bug reports via "Issues" tab of this repository.
170 |
171 | I was away from development in later half of 2023 and for (almost) entire
172 | 2024 basically, but have resumed development of VigilantBSP in year 2025.
173 |
174 | I'll try to setup accounts on Doomworld and Zdoom forums, but I'm not exactly
175 | rushing until I've something new to show (must finish reject builder for UDMF
176 | at least). I don't plan to engage with other Doom communities, definitely not
177 | with those which have abysmal communication standards.
178 |
179 | If I make Doom accounts, I will update this README. If you encountered someone
180 | pretending to me but their account is not listed here, they're likely not me.
181 |
--------------------------------------------------------------------------------
/blockity.go:
--------------------------------------------------------------------------------
1 | // Copyright (C) 2022, VigilantDoomer
2 | //
3 | // This file is part of VigilantBSP program.
4 | //
5 | // VigilantBSP is free software: you can redistribute it
6 | // and/or modify it under the terms of GNU General Public License
7 | // as published by the Free Software Foundation, either version 2 of
8 | // the License, or (at your option) any later version.
9 | //
10 | // VigilantBSP is distributed in the hope that it will be useful,
11 | // but WITHOUT ANY WARRANTY; without even the implied warranty of
12 | // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 | // GNU General Public License for more details.
14 | //
15 | // You should have received a copy of the GNU General Public License
16 | // along with VigilantBSP. If not, see .
17 | package main
18 |
19 | // The name Blockity means "Block iterator blablabla"
20 |
21 | const (
22 | BLOCK_FOLLOW_AXIS = iota
23 | BLOCK_INARRAY
24 | )
25 |
26 | // Note: in regards to replacing BlockityLines.cur field type with non-pointer
27 | // uint16 (by using the fact that 0xFFFF linedef is unlikely to be used in
28 | // proper maps):
29 | // 1. The performance gain for reject, if it exists, is not noticeable with graphs.
30 | // 2. The (possible?) performance gain for reject without graphs is negligible
31 | // 3. 0xFFFF can still be used as valid linedef if not included in blockmap, I
32 | // guess, so you can't really use it as a special value and remain within 0xFFFF
33 | // So, in the end I ditched using blockity in reject code, which saved a lot of
34 | // time indeed.
35 |
36 | // BlockityLines makes it easy to get all lines that are in same block
37 | // as the line specified by SetContext method. Important feature: no line
38 | // is ever returned twice since the last call to SetContext
39 | type BlockityLines struct {
40 | bm *Blockmap
41 | xmin, ymin int
42 | xblocks int
43 | // seenLines - don't return same line twice
44 | seenLines [65536 / 8]uint8 // bool array compressed as bit array (performs better than map)
45 | // cur stands for "cursor"
46 | cur *uint16
47 | curBlock int
48 | curLine int
49 | x1, y1 int
50 | dx, dy int
51 | bx, by, bx2, by2 int
52 | wbeg, wend int
53 | bstep int
54 | mode int
55 | xchange, ychange int
56 | xadd, yadd int
57 | ymove int
58 | done bool
59 | blocks []int // for diagonal lines, we have to store blocks
60 | curInBlocks int // position of cursor within blocks array field above
61 | }
62 |
63 | func GetBlockityLines(bm *Blockmap) *BlockityLines {
64 | return &BlockityLines{
65 | bm: bm,
66 | xmin: int(bm.header.XMin),
67 | ymin: int(bm.header.YMin),
68 | xblocks: int(bm.header.XBlocks),
69 | cur: nil,
70 | done: false,
71 | }
72 | }
73 |
74 | func (it *BlockityLines) ClearSeenLines() {
75 | for i, _ := range it.seenLines {
76 | it.seenLines[i] = 0
77 | }
78 | }
79 |
80 | // This is for rejectLOS subroutines. Expected to preceed a loop calling
81 | // SetSubcontextFromRow
82 | func (it *BlockityLines) SetContextForReject() {
83 | it.ClearSeenLines() // start with a clean map
84 | it.bstep = 1
85 | it.mode = BLOCK_FOLLOW_AXIS
86 | }
87 |
88 | // This is for rejectLOS subroutines. Caller must ensure colBeg < colEnd, we
89 | // don't do checks here.
90 | func (it *BlockityLines) SetSubcontextFromRow(row, colBeg, colEnd int) {
91 | // You are supposed to call SetContextForReject() first before series of
92 | // calls to SetSubcontextFromRow
93 | // Horizontal line
94 | it.wbeg = colBeg + row*it.xblocks
95 | it.wend = colEnd + row*it.xblocks
96 | it.done = false
97 | it.cur = nil
98 | }
99 |
100 | // SetContext initializes iterator to follow blocks intersected by specific line
101 | // It does the computing which blocks the line intersects and thus which
102 | // blocks all the line that it could intersect belong. The algorithm is basically
103 | // the same one Marisa Heit used for computing blockmap
104 | func (it *BlockityLines) SetContext(x1, y1, x2, y2 int) {
105 | it.ClearSeenLines() // start with a clean map
106 | if it.blocks == nil {
107 | it.blocks = make([]int, 0)
108 | } else {
109 | it.blocks = it.blocks[:0]
110 | }
111 | it.x1 = x1
112 | it.y1 = y1
113 | it.dx = x2 - x1
114 | it.dy = y2 - y1
115 | it.bx = (x1 - it.xmin) >> BLOCK_BITS
116 | it.by = (y1 - it.ymin) >> BLOCK_BITS
117 | it.bx2 = (x2 - it.xmin) >> BLOCK_BITS
118 | it.by2 = (y2 - it.ymin) >> BLOCK_BITS
119 | it.done = false
120 | it.cur = nil
121 | // pointers to blocklist of the blocks that host starting and
122 | // ending vertices
123 | it.wbeg = it.bx + it.by*it.xblocks
124 | it.wend = it.bx2 + it.by2*it.xblocks
125 |
126 | if it.wbeg == it.wend { // Single block
127 | it.bstep = 1
128 | it.mode = BLOCK_FOLLOW_AXIS
129 | } else if it.by == it.by2 { // Horizontal line
130 | it.bstep = 1
131 | it.mode = BLOCK_FOLLOW_AXIS
132 | if it.bx > it.bx2 {
133 | // swap beginning and end
134 | it.wbeg, it.wend = it.wend, it.wbeg
135 | }
136 | } else if it.bx == it.bx2 { // Vertical line
137 | it.bstep = it.xblocks
138 | it.mode = BLOCK_FOLLOW_AXIS
139 | if it.by > it.by2 {
140 | // swap beginning and end
141 | it.wbeg, it.wend = it.wend, it.wbeg
142 | }
143 | } else { // Diagonal line
144 | // Toughest case, yeah
145 | it.xchange = Sign(it.dx)
146 | it.ychange = Sign(it.dy)
147 | it.ymove = it.ychange * it.xblocks
148 | adx := Abs(it.dx)
149 | ady := Abs(it.dy)
150 | if adx == ady { // 45 degrees
151 | xb := (x1 - it.xmin) & (BLOCK_WIDTH - 1)
152 | yb := (y1 - it.ymin) & (BLOCK_WIDTH - 1)
153 | if it.dx < 0 {
154 | xb = BLOCK_WIDTH - xb
155 | }
156 | if it.dy < 0 {
157 | yb = BLOCK_WIDTH - yb
158 | }
159 | if xb < yb {
160 | adx--
161 | }
162 | }
163 | if adx >= ady { // X major
164 | it.mode = BLOCK_INARRAY
165 | var yadd int
166 | if it.dy < 0 {
167 | yadd = -1
168 | } else {
169 | yadd = BLOCK_WIDTH
170 | }
171 | it.yadd = yadd
172 | // Now there would be a loop in bblocks.go!CreateBlockmap... ah,
173 | // fuck trying implementing Python generators in Go, now we simply
174 | // construct a slice with all the block numbers
175 | it.curInBlocks = 0
176 | it.GetBlocksMajorX(&it.blocks)
177 | } else { // Y major
178 | it.mode = BLOCK_INARRAY
179 | var xadd int
180 | if it.dx < 0 {
181 | xadd = -1
182 | } else {
183 | xadd = BLOCK_WIDTH
184 | }
185 | it.xadd = xadd
186 | // Now there would be a loop in bblocks.go!CreateBlockmap... ah,
187 | // fuck trying implementing Python generators in Go, now we simply
188 | // construct a slice with all the block numbers
189 | it.curInBlocks = 0
190 | it.GetBlocksMajorY(&it.blocks)
191 | }
192 | }
193 | }
194 |
195 | func (it *BlockityLines) GetLine() uint16 {
196 | if it.cur == nil {
197 | Log.Printf("Incorrect use of BlockityLines - you should have called NextLine() first.")
198 | return 0
199 | }
200 | return *it.cur
201 | }
202 |
203 | // nextBlock() is for internal use within the BlockityLines class
204 | func (it *BlockityLines) nextBlock() bool {
205 | switch it.mode {
206 | case BLOCK_FOLLOW_AXIS:
207 | {
208 | if it.cur != nil {
209 | it.wbeg += it.bstep
210 | }
211 | // This skips zero length blocks _IF_ we hit one prior to this
212 | for it.wbeg <= it.wend && len(it.bm.blocklist[it.wbeg]) == 0 {
213 | it.wbeg += it.bstep
214 | }
215 | if it.wbeg > it.wend {
216 | return false
217 | }
218 | it.curBlock = it.wbeg
219 | return len(it.bm.blocklist[it.wbeg]) != 0
220 | }
221 | case BLOCK_INARRAY:
222 | {
223 | if it.cur == nil {
224 | if len(it.blocks) == 0 { // no blocks at all
225 | return false
226 | }
227 | } else {
228 | it.curInBlocks++
229 | }
230 | // This skips zero length blocks _IF_ we hit one prior to this
231 | for it.curInBlocks < len(it.blocks) && len(it.bm.blocklist[it.blocks[it.curInBlocks]]) == 0 {
232 | it.curInBlocks++
233 | }
234 | if it.curInBlocks >= len(it.blocks) {
235 | return false
236 | }
237 | it.wbeg = it.blocks[it.curInBlocks]
238 | it.curBlock = it.wbeg
239 | }
240 | }
241 | return len(it.bm.blocklist[it.wbeg]) != 0
242 | }
243 |
244 | // NextLine returns false when there is nothing left to iterate. After this
245 | // happens, there is no use for iterator anymore until next call to SetContext()
246 | // Note: it automatically skips lines that were already iterated, even if
247 | // from different block, since the last call to SetContext()
248 | func (it *BlockityLines) NextLine() bool {
249 | if it.done { // Not reusable. Iterator works one direction only.
250 | return false
251 | }
252 |
253 | repeat := true
254 | for repeat {
255 | if it.cur == nil {
256 | b := it.nextBlock()
257 | if b {
258 | it.curLine = 0
259 | it.cur = &(it.bm.blocklist[it.curBlock][it.curLine])
260 | } // else it.cur remains nil, we are done
261 | } else {
262 | it.curLine++
263 | if it.curLine >= len(it.bm.blocklist[it.curBlock]) {
264 | b := it.nextBlock()
265 | if b {
266 | it.curLine = 0
267 | it.cur = &(it.bm.blocklist[it.curBlock][it.curLine])
268 | } else {
269 | it.cur = nil
270 | }
271 | } else {
272 | it.cur = &(it.bm.blocklist[it.curBlock][it.curLine])
273 | }
274 | }
275 |
276 | // Skip all lines the user has seen already - repeat this loop until
277 | // an unseen one found or ran out of lines
278 | repeat = it.cur != nil && it.markAndRecall(*it.cur)
279 | }
280 |
281 | it.done = it.cur == nil // done?
282 | return !it.done
283 | }
284 |
285 | // markAndRecall - if line #cur was not marked as seen yet, mark it
286 | // Return true if was already marked (we already gave user this line some time
287 | // before)
288 | func (it *BlockityLines) markAndRecall(cur uint16) bool {
289 | bte := cur >> 3 // #byte = cur / 8
290 | bit := uint8(1 << (cur % 8)) // #bit
291 | retA := it.seenLines[bte] & bit
292 | if retA == bit {
293 | return true
294 | } else {
295 | it.seenLines[bte] = it.seenLines[bte] | bit
296 | return false
297 | }
298 | }
299 |
300 | func (it *BlockityLines) GetBlocksMajorX(cum *[]int) {
301 | for {
302 | stop := (Scale(it.by<> BLOCK_BITS
303 | for it.bx != stop {
304 | *cum = append(*cum, it.wbeg)
305 | it.wbeg += it.xchange
306 | it.bx += it.xchange
307 | }
308 | *cum = append(*cum, it.wbeg)
309 | it.wbeg += it.ymove
310 | it.by += it.ychange
311 | if it.by == it.by2 {
312 | break
313 | }
314 | }
315 | for it.wbeg != it.wend {
316 | *cum = append(*cum, it.wbeg)
317 | it.wbeg += it.xchange
318 | }
319 | *cum = append(*cum, it.wbeg)
320 | }
321 |
322 | func (it *BlockityLines) GetBlocksMajorY(cum *[]int) {
323 | for {
324 | stop := (Scale(it.bx<> BLOCK_BITS
325 | for it.by != stop {
326 | *cum = append(*cum, it.wbeg)
327 | it.wbeg += it.ymove
328 | it.by += it.ychange
329 | }
330 | *cum = append(*cum, it.wbeg)
331 | it.wbeg += it.xchange
332 | it.bx += it.xchange
333 | if it.bx == it.bx2 {
334 | break
335 | }
336 | }
337 | for it.wbeg != it.wend {
338 | *cum = append(*cum, it.wbeg)
339 | it.wbeg += it.ymove
340 | }
341 | *cum = append(*cum, it.wbeg)
342 | }
343 |
--------------------------------------------------------------------------------
/convexity.go:
--------------------------------------------------------------------------------
1 | // Copyright (C) 2022-2023, VigilantDoomer
2 | //
3 | // This file is part of VigilantBSP program.
4 | //
5 | // VigilantBSP is free software: you can redistribute it
6 | // and/or modify it under the terms of GNU General Public License
7 | // as published by the Free Software Foundation, either version 2 of
8 | // the License, or (at your option) any later version.
9 | //
10 | // VigilantBSP is distributed in the hope that it will be useful,
11 | // but WITHOUT ANY WARRANTY; without even the implied warranty of
12 | // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 | // GNU General Public License for more details.
14 | //
15 | // You should have received a copy of the GNU General Public License
16 | // along with VigilantBSP. If not, see .
17 |
18 | // convexity
19 | package main
20 |
21 | // Special partitioning of non-convex nodes comprised of single sector
22 | // Such "leaf" clusters usually are small, allowing to use more expensive
23 | // metrics, while also cutting the clutter supposed to handle multiple sectors
24 | // Another reason this special treatment is made avaiable is because any choice
25 | // of partitioning from here on can't affect visplanes, thus it makes sense to
26 | // do seg split minimization to mitigate possible extra splits made by advanced
27 | // visplanes minimization algo (advanced seems to create more seg splits than
28 | // Killough's vanilla method)
29 | // Also, penalizing diagonal lines is no longer necessary (nor implemented) here
30 | // TODO I attempted to:
31 | // 1. Pick some partitions from arbitrary pair of vertices rather than segs
32 | // 2. Evaluate whether either of sides will be convex and penalize the pick if
33 | // neither are
34 | // - maybe when both sides are convex, the pick should be selected, ignoring
35 | // other parameters. Worth a revisit.
36 | // Both approaches separate and combined failed to make a positive difference,
37 | // while sometimes increasing running time and make the seg count worse not
38 | // better.
39 | // Thus this now stands by true and tried approach of increased seg split cost.
40 | // There are plans for revisiting this, but it would only be justified by
41 | // complete redesign of the picker, no less. It would probably need to do rounds
42 | // of evaluation in depth (to evaluate several consecutive partition choices at
43 | // once), cost function for a single partition pick is as good as it can be it
44 | // seems
45 |
46 | // CreateNodeForSingleSector processes a list of segs all formed from linedefs
47 | // of one sector only, the list is NOT a convex a polygon else we would have a
48 | // subsector already
49 | // This also has a twin in stknode.go
50 | func CreateNodeForSingleSector(w *NodesWork, ts *NodeSeg, bbox *NodeBounds,
51 | super *Superblock) *NodeInProcess {
52 | res := new(NodeInProcess)
53 | var rights *NodeSeg
54 | var lefts *NodeSeg
55 | var rightsSuper *Superblock
56 | var leftsSuper *Superblock
57 | // Divide node in two
58 | w.totals.numNodes++
59 | w.DivideSegsForSingleSector(ts, &rights, &lefts, bbox, super, &rightsSuper,
60 | &leftsSuper, nil)
61 | super = nil // NOTE after DivideSegs return, super may no longer be valid
62 | res.X = int16(w.nodeX)
63 | res.Y = int16(w.nodeY)
64 | res.Dx = int16(w.nodeDx)
65 | res.Dy = int16(w.nodeDy)
66 |
67 | // These will form the left box
68 | leftBox := FindLimits(lefts)
69 | res.Lbox[BB_TOP] = int16(leftBox.Ymax)
70 | res.Lbox[BB_BOTTOM] = int16(leftBox.Ymin)
71 | res.Lbox[BB_LEFT] = int16(leftBox.Xmin)
72 | res.Lbox[BB_RIGHT] = int16(leftBox.Xmax)
73 | if w.isItConvex(lefts) == CONVEX_SUBSECTOR {
74 | res.nextL = nil
75 | res.LChild = w.CreateSSector(lefts) | SSECTOR_DEEP_MASK
76 | w.returnSuperblockToPool(leftsSuper)
77 | } else { // only NONCONVEX_ONESECTOR can be here
78 | res.nextL = CreateNodeForSingleSector(w, lefts, leftBox, leftsSuper)
79 | res.LChild = 0
80 | }
81 |
82 | // These will form the right box
83 | rightBox := FindLimits(rights)
84 | res.Rbox[BB_TOP] = int16(rightBox.Ymax)
85 | res.Rbox[BB_BOTTOM] = int16(rightBox.Ymin)
86 | res.Rbox[BB_LEFT] = int16(rightBox.Xmin)
87 | res.Rbox[BB_RIGHT] = int16(rightBox.Xmax)
88 | if w.isItConvex(rights) == CONVEX_SUBSECTOR {
89 | res.nextR = nil
90 | res.RChild = w.CreateSSector(rights) | SSECTOR_DEEP_MASK
91 | w.returnSuperblockToPool(rightsSuper)
92 | } else { // only NONCONVEX_ONESECTOR can be here
93 | res.nextR = CreateNodeForSingleSector(w, rights, rightBox, rightsSuper)
94 | res.RChild = 0
95 | }
96 |
97 | // CheckNodeBounds(bbox, leftBox, rightBox)
98 |
99 | return res
100 | }
101 |
102 | // DivideSegsForSingleSector is like DivideSegs, but nodepicker is different
103 | func (w *NodesWork) DivideSegsForSingleSector(ts *NodeSeg, rs **NodeSeg,
104 | ls **NodeSeg, bbox *NodeBounds, super *Superblock, rightsSuper,
105 | leftsSuper **Superblock, partsegs *[]PartSeg) {
106 | // Pick best node to use
107 | best := PickNode_SingleSector(w, ts, bbox, super)
108 |
109 | if best == nil { // To programmers: write PickNode so it never happens
110 | panic("Couldn't pick nodeline!")
111 | }
112 |
113 | if partsegs != nil {
114 | w.GetPartSegs(ts, best, partsegs)
115 | }
116 |
117 | c := &IntersectionContext{
118 | psx: best.StartVertex.X,
119 | psy: best.StartVertex.Y,
120 | pex: best.EndVertex.X,
121 | pey: best.EndVertex.Y,
122 | }
123 | c.pdx = c.psx - c.pex
124 | c.pdy = c.psy - c.pey
125 |
126 | // Node line coords
127 | w.SetNodeCoords(best, bbox, c)
128 |
129 | w.DivideSegsActual(ts, rs, ls, bbox, best, c, super, rightsSuper, leftsSuper)
130 | }
131 |
132 | // A modification of PickNode_Traditional, which uses more expensive (but more
133 | // precise) doLinesIntersect for evaluating a partition + split cost is doubled,
134 | // as we are specifically looking to eradicate seg splits, everything else be
135 | // damned.
136 | func PickNode_SingleSector(w *NodesWork, ts *NodeSeg, bbox *NodeBounds,
137 | super *Superblock) *NodeSeg {
138 | best := ts // make sure always got something to return
139 | bestcost := int(INITIAL_BIG_COST) //
140 | cnt := 0
141 | if w.parts != nil { // hard multi-tree support
142 | w.parts = w.parts[:0]
143 | }
144 |
145 | for part := ts; part != nil; part = part.next { // Count once and for all
146 | cnt++
147 | }
148 |
149 | var previousPart *NodeSeg // keep track of previous partition - test only one seg per partner pair
150 |
151 | w.segAliasObj.UnvisitAll() // remove marks from previous PickNode calls
152 | for part := ts; part != nil; part = part.next { // Use each Seg as partition
153 | if part.partner != nil && part.partner == previousPart {
154 | // Partner segs are kept next to each other, they would result in
155 | // same nodeline - so skip second partner
156 | continue
157 | }
158 | if part.alias != 0 {
159 | if w.segAliasObj.MarkAndRecall(part.alias) {
160 | // More advanced way to skip all colinear segs (which would also
161 | // create the exact same nodeline). This check is more
162 | // expensive than partnership check (verified on big maps)
163 | continue
164 | }
165 | } else { // = 0 means alias was not assigned (or was intentionally dropped when segs were split)
166 | // Generate and assign new alias
167 | // Note we don't assign anything to partner HERE, partners are skipped
168 | // as part of big loop but get covered by inner loop anyway
169 | part.alias = w.segAliasObj.Generate()
170 | // Aliases get copied in the inner loop: when a line we are checking
171 | // is colinear to partition, it "inherits" alias from partition
172 | }
173 | previousPart = part // used for check above
174 | cost := 0
175 | tot := 0
176 | diff := cnt
177 |
178 | //progress(); // Something for the user to look at.
179 |
180 | c := &IntersectionContext{
181 | psx: part.StartVertex.X,
182 | psy: part.StartVertex.Y,
183 | pex: part.EndVertex.X,
184 | pey: part.EndVertex.Y,
185 | }
186 | c.pdx = c.psx - c.pex
187 | c.pdy = c.psy - c.pey
188 | leftcnt := 0
189 | rightcnt := 0
190 | prune := false
191 |
192 | for check := ts; check != nil; check = check.next { // Check partition against all Segs
193 | // get state of lines' relation to each other
194 | leftside := false
195 | c.lsx = check.StartVertex.X
196 | c.lsy = check.StartVertex.Y
197 | c.lex = check.EndVertex.X
198 | c.ley = check.EndVertex.Y
199 | val := w.doLinesIntersect(c)
200 | if ((val&2 != 0) && (val&64 != 0)) || ((val&4 != 0) && (val&32 != 0)) {
201 | // splits are now double as bad as before
202 | cost += PICKNODE_FACTOR << 1
203 | if cost >= bestcost {
204 | prune = true // Lee Killough's master speed-up
205 | break
206 | }
207 | tot++
208 | leftcnt++
209 | rightcnt++
210 | } else {
211 | if check == part || check == part.partner {
212 | leftside = check == part.partner
213 | if leftside {
214 | check.alias = part.alias
215 | leftcnt++
216 | } else {
217 | rightcnt++
218 | }
219 | } else {
220 | if val&34 != 0 {
221 | // to the left
222 | leftside = true
223 | leftcnt++
224 | }
225 | if val&68 != 0 {
226 | // to the right
227 | rightcnt++
228 | }
229 | if (val&1 != 0) && (val&16 != 0) {
230 | if check.alias != part.alias && vetAliasTransfer(c) {
231 | check.alias = part.alias
232 | }
233 | if check.pdx*part.pdx+check.pdy*part.pdy < 0 {
234 | leftside = true
235 | leftcnt++
236 | } else {
237 | rightcnt++
238 | }
239 | }
240 | }
241 | }
242 | if leftside {
243 | diff -= 2
244 | }
245 | }
246 | if prune {
247 | continue
248 | }
249 |
250 | if rightcnt == 0 || (rightcnt == 1 && leftcnt == 0) { // in this case nothing can be done to salvage the situation
251 | continue
252 | }
253 |
254 | if leftcnt == 0 {
255 | // penalize a little bit, but not as much as split
256 | cost += PICKNODE_FACTOR
257 | }
258 |
259 | // Take absolute value. diff is being used to obtain the
260 | // min/max values by way of: min(a,b)=(a+b-abs(a-b))/2
261 |
262 | diff -= tot
263 | if diff < 0 {
264 | diff = -diff
265 | }
266 |
267 | cost += diff
268 | if cost < bestcost {
269 | // We have a new better choice
270 | bestcost = cost
271 | best = part // Remember which Seg
272 | if w.parts != nil {
273 | w.parts = w.parts[:0]
274 | w.parts = append(w.parts, part)
275 | }
276 | } else if cost == bestcost && w.parts != nil {
277 | w.parts = append(w.parts, part)
278 | }
279 | }
280 |
281 | return best // All finished, return best Seg
282 | }
283 |
--------------------------------------------------------------------------------
/diffgeometry_test.go:
--------------------------------------------------------------------------------
1 | // Copyright (C) 2022, VigilantDoomer
2 | //
3 | // This file is part of VigilantBSP program.
4 | //
5 | // VigilantBSP is free software: you can redistribute it
6 | // and/or modify it under the terms of GNU General Public License
7 | // as published by the Free Software Foundation, either version 2 of
8 | // the License, or (at your option) any later version.
9 | //
10 | // VigilantBSP is distributed in the hope that it will be useful,
11 | // but WITHOUT ANY WARRANTY; without even the implied warranty of
12 | // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 | // GNU General Public License for more details.
14 | //
15 | // You should have received a copy of the GNU General Public License
16 | // along with VigilantBSP. If not, see .
17 |
18 | // diffgeometry_test.go
19 | package main
20 |
21 | import (
22 | "fmt"
23 | "testing"
24 | )
25 |
26 | func TestPartitionInBoundary(t *testing.T) {
27 | /* TODO Make sure all these cases always yield 2 intersection points now.
28 | They already should, but this needs to be in tests
29 | Couldn't determine point of intersection between partition line and solid internal blockmap bounding box (1, 4). Falling back to legacy way of measuring length.
30 | part from linedef 4720!0+0: (2781 7702) - (2800 7721) bbox: (1679 8676) - (2904 7328)
31 | Intersection#0: RIGHT(2904,7825)
32 | This cannot be! Got so far but now failing (got all the segments of line on the map to see when it goes through the void and when it does not, but failed to determine the edges of line touching the current node's bounding box)
33 | Couldn't determine point of intersection between partition line and solid internal blockmap bounding box (1, 4). Falling back to legacy way of measuring length.
34 | part from linedef 6563!0+0: (2614 7506) - (2645 7525) bbox: (1679 8676) - (2904 7328)
35 | Intersection#0: RIGHT(2904,7683)
36 | This cannot be! Got so far but now failing (got all the segments of line on the map to see when it goes through the void and when it does not, but failed to determine the edges of line touching the current node's bounding box)
37 | Couldn't determine point of intersection between partition line and solid internal blockmap bounding box (1, 4). Falling back to legacy way of measuring length.
38 | part from linedef 2129!0+0: (2455 7446) - (2510 7459) bbox: (1679 8676) - (2904 7328)
39 | Intersection#0: RIGHT(2904,7552)
40 | This cannot be! Got so far but now failing (got all the segments of line on the map to see when it goes through the void and when it does not, but failed to determine the edges of line touching the current node's bounding box)
41 | Couldn't determine point of intersection between partition line and solid internal blockmap bounding box (1, 4). Falling back to legacy way of measuring length.
42 | part from linedef 4720!0+0: (2781 7702) - (2800 7721) bbox: (1679 8676) - (2904 7328)
43 | Intersection#0: RIGHT(2904,7825)
44 | This cannot be! Got so far but now failing (got all the segments of line on the map to see when it goes through the void and when it does not, but failed to determine the edges of line touching the current node's bounding box)
45 | Couldn't determine point of intersection between partition line and solid internal blockmap bounding box (1, 4). Falling back to legacy way of measuring length.
46 | part from linedef 6563!0+0: (2614 7506) - (2645 7525) bbox: (1679 8676) - (2904 7328)
47 | Intersection#0: RIGHT(2904,7683)
48 | */
49 |
50 | //
51 | //blXMin := 1696
52 | /*blXMin := 1679
53 | blXMax := 2904
54 | blYMin := 7328
55 | blYMax := 8676
56 | part := &NodeSeg{
57 | StartVertex: &NodeVertex{
58 | X: 2781,
59 | Y: 7702,
60 | },
61 | EndVertex: &NodeVertex{
62 | X: 2800,
63 | Y: 7721,
64 | },
65 | }*/
66 |
67 | blXMin := 5936
68 | blXMax := 6544
69 | blYMin := 7476
70 | blYMax := 7952
71 | part := &NodeSeg{
72 | StartVertex: &NodeVertex{
73 | X: 6388,
74 | Y: 7488,
75 | },
76 | EndVertex: &NodeVertex{
77 | X: 6384,
78 | Y: 7476,
79 | },
80 | }
81 | s := part
82 | s.partner = nil
83 | s.pex = s.EndVertex.X
84 | s.psx = s.StartVertex.X
85 | s.pdx = s.pex - s.psx
86 | s.pey = s.EndVertex.Y
87 | s.psy = s.StartVertex.Y
88 | s.pdy = s.pey - s.psy
89 | s.perp = s.pdx*s.psy - s.psx*s.pdy
90 | partSegCoords := part.toIntVertexPairC()
91 | var c IntersectionContext
92 | c.psx = s.psx
93 | c.psy = s.psy
94 | c.pex = s.pex
95 | c.pey = s.pey
96 | c.pdx = c.pex - c.psx
97 | c.pdy = c.pey - c.psy
98 | ov1, ov2 := IntPartitionInBoundary(part, &c, blXMax, blYMax, blXMin, blYMin, partSegCoords)
99 | if ov1 == nil || ov2 == nil {
100 | t.Errorf("ov1 = %s, ov2 = %s wanted non-nil both", ov1.toString(), ov2.toString())
101 | } else {
102 | if !intTskCheckBounds(ov1.v, blXMax, blYMax, blXMin, blYMin) ||
103 | !intTskCheckBounds(ov2.v, blXMax, blYMax, blXMin, blYMin) {
104 | t.Errorf("ov1 or ov2 are out of bounds")
105 | fmt.Printf("Info: ov1 = %s, ov2 = %s\n", ov1.toString(), ov2.toString())
106 | } else {
107 | fmt.Printf("Good ov1 = %s, ov2 = %s\n", ov1.toString(), ov2.toString())
108 | }
109 | }
110 | }
111 |
112 | func TestGetIntersectionOrIndicence(t *testing.T) {
113 | // Partition (4128,4880)-(4272,5024) against segment (3872,4912)-(4128,4912)
114 | // => no intersection
115 | c := &IntersectionContext{
116 | psx: 4128,
117 | psy: 4880,
118 | pex: 4272,
119 | pey: 5024,
120 | lsx: 3872,
121 | lsy: 4912,
122 | lex: 4128,
123 | ley: 4912,
124 | }
125 | c.pdx = c.pex - c.psx
126 | c.pdy = c.pey - c.psy
127 | v1, v2 := c.intGetIntersectionOrIndicence()
128 | if v1 != nil || v2 != nil {
129 | t.Errorf("Got %s, expected v1: nil; v2: nil\n", printV1V2(v1, v2))
130 | }
131 |
132 | // Partition (5796,6836)-(5792,6832) against segment (5808,6816)-(6048,7056)
133 | // => no intersection
134 | c = &IntersectionContext{
135 | psx: 5796,
136 | psy: 6836,
137 | pex: 5792,
138 | pey: 6832,
139 | lsx: 5808,
140 | lsy: 6816,
141 | lex: 6048,
142 | ley: 7056,
143 | }
144 | c.pdx = c.pex - c.psx
145 | c.pdy = c.pey - c.psy
146 | v1, v2 = c.intGetIntersectionOrIndicence()
147 | if v1 != nil || v2 != nil {
148 | t.Errorf("Got %s, expected v1: nil; v2: nil\n", printV1V2(v1, v2))
149 | }
150 |
151 | // Partition (5796,6836)-(5792,6832) against segment (5808,6808)-(5808,6816)
152 | // => no intersection
153 | c = &IntersectionContext{
154 | psx: 5796,
155 | psy: 6836,
156 | pex: 5792,
157 | pey: 6832,
158 | lsx: 5808,
159 | lsy: 6808,
160 | lex: 5808,
161 | ley: 6816,
162 | }
163 | c.pdx = c.pex - c.psx
164 | c.pdy = c.pey - c.psy
165 | v1, v2 = c.intGetIntersectionOrIndicence()
166 | if v1 != nil || v2 != nil {
167 | t.Errorf("Got %s, expected v1: nil; v2: nil\n", printV1V2(v1, v2))
168 | }
169 | }
170 |
171 | func printV1V2(v1, v2 *NodeVertex) string {
172 | strV1 := "v1: nil; "
173 | strV2 := "v2: nil"
174 | if v1 != nil {
175 | strV1 = fmt.Sprintf("v1: (%d,%d); ", v1.X, v1.Y)
176 | }
177 | if v2 != nil {
178 | strV2 = fmt.Sprintf("v2: (%d,%d)", v2.X, v2.Y)
179 | }
180 | return strV1 + strV2
181 | }
182 |
183 | func TestCoalesce1(t *testing.T) {
184 | // [,LEFT(-768,-3497),RIGHT(-768,-3497),LEFT(64,-3648),LEFT(64,-3648),RIGHT(2105,-4019),LEFT(2406,-4074),RIGHT(2704,-4128),LEFT(2849,-4154),RIGHT(2880,-4160),LEFT(3328,-4241),RIGHT(3808,-4329)]
185 | // More dropouts! -1 -1 ; [(2105;-4019)-(2406;-4074)]; [(2704;-4128)-(2849;-4154)]; [(2880;-4160)-(3328;-4241)] [RIGHT(-640,-3520)-RIGHT(64,-3648)]
186 | pts := CollinearOrientedVertices(make([]OrientedVertex, 0))
187 | pts = append(pts, OrientedVertex{
188 | v: &FloatVertex{
189 | X: -768,
190 | Y: -3497,
191 | },
192 | left: true,
193 | },
194 | OrientedVertex{
195 | v: &FloatVertex{
196 | X: -768,
197 | Y: -3497,
198 | },
199 | left: false,
200 | },
201 | OrientedVertex{
202 | v: &FloatVertex{
203 | X: 64,
204 | Y: -3648,
205 | },
206 | left: true,
207 | },
208 | OrientedVertex{
209 | v: &FloatVertex{
210 | X: 64,
211 | Y: -3648,
212 | },
213 | left: true,
214 | },
215 | OrientedVertex{
216 | v: &FloatVertex{
217 | X: 2105,
218 | Y: -4019,
219 | },
220 | left: false,
221 | },
222 | OrientedVertex{
223 | v: &FloatVertex{
224 | X: 2406,
225 | Y: -4704,
226 | },
227 | left: true,
228 | },
229 | OrientedVertex{
230 | v: &FloatVertex{
231 | X: 2704,
232 | Y: -4128,
233 | },
234 | left: false,
235 | },
236 | OrientedVertex{
237 | v: &FloatVertex{
238 | X: 2849,
239 | Y: -4154,
240 | },
241 | left: true,
242 | },
243 | OrientedVertex{
244 | v: &FloatVertex{
245 | X: 2880,
246 | Y: -4160,
247 | },
248 | left: false,
249 | },
250 | OrientedVertex{
251 | v: &FloatVertex{
252 | X: 3328,
253 | Y: -4241,
254 | },
255 | left: true,
256 | },
257 | OrientedVertex{
258 | v: &FloatVertex{
259 | X: 3808,
260 | Y: -4329,
261 | },
262 | left: false,
263 | })
264 | pts.Coalesce()
265 | fmt.Println(pts.toString())
266 | if pts[1].left != false || pts[0].v.X != -768 || pts[0].v.Y != -3497 {
267 | t.Errorf("Wrong #1 point\n")
268 | }
269 | }
270 |
271 | func TestCoalesce2(t *testing.T) {
272 | // [RIGHT(-1536.,5120.),LEFT(-1360.,5120.),RIGHT(-1344.,5120.),LEFT(-1168.,5120.),RIGHT(-1168.,5120.),LEFT(-1152.,5120.),RIGHT(-1152.,5120.),LEFT(-784.,5120.),LEFT(-784.,5120.),RIGHT(-784.,5120.),RIGHT(-784.,5120.),LEFT(-736.,5120.),RIGHT(-736.,5120.),LEFT(-528.,5120.),LEFT(-528.,5120.),RIGHT(-384.,5120.),RIGHT(-384.,5120.),LEFT(-128.,5120.),LEFT(-128.,5120.),RIGHT(0.,5120.),RIGHT(0.,5120.),LEFT(256.,5120.),LEFT(256.,5120.),RIGHT(384.,5120.),RIGHT(384.,5120.),LEFT(650.666667,5120.),RIGHT(756.444444,5120.),LEFT(1024.,5120.),RIGHT(1207.157895,5120.),LEFT(1416.421053,5120.),RIGHT(1488.,5120.),RIGHT(1488.,5120.),LEFT(2609.560976,5120.)]
273 | // More dropouts! -1 -1 ; [LEFT(-1536.,5120.),LEFT(-1360.,5120.),RIGHT(-1344.,5120.),RIGHT(-784.,5120.),LEFT(-528.,5120.),RIGHT(-384.,5120.),LEFT(-128.,5120.),RIGHT(0.,5120.),LEFT(256.,5120.),RIGHT(384.,5120.),LEFT(650.666667,5120.),RIGHT(756.444444,5120.),LEFT(1024.,5120.),RIGHT(1207.157895,5120.),LEFT(1416.421053,5120.),RIGHT(1488.,5120.),LEFT(2609.560976,5120.),RIGHT(4800.,5120.)]
274 | dgVertexMap := CreateVertexMap(&NodesWork{}, -1536, -688, 4800, 6272)
275 | pts := CollinearOrientedVertices(make([]OrientedVertex, 0))
276 | pts = append(pts, OrientedVertex{
277 | v: dgVertexMap.SelectVertexClose(-1536, 5120),
278 | left: false,
279 | },
280 | OrientedVertex{
281 | v: dgVertexMap.SelectVertexClose(-1360, 5120),
282 | left: true,
283 | },
284 | OrientedVertex{
285 | v: dgVertexMap.SelectVertexClose(-1344, 5120),
286 | left: false,
287 | },
288 | OrientedVertex{
289 | v: dgVertexMap.SelectVertexClose(-1168, 5120),
290 | left: true,
291 | },
292 | OrientedVertex{
293 | v: dgVertexMap.SelectVertexClose(-1168, 5120),
294 | left: false,
295 | },
296 | OrientedVertex{
297 | v: dgVertexMap.SelectVertexClose(-1152, 5120),
298 | left: true,
299 | },
300 | OrientedVertex{
301 | v: dgVertexMap.SelectVertexClose(-1152, 5120),
302 |
303 | left: false,
304 | },
305 | OrientedVertex{
306 | v: dgVertexMap.SelectVertexClose(-784, 5120),
307 | left: true,
308 | },
309 | OrientedVertex{
310 | v: dgVertexMap.SelectVertexClose(-784, 5120),
311 | left: true,
312 | },
313 | OrientedVertex{
314 | v: dgVertexMap.SelectVertexClose(-784, 5120),
315 | left: false,
316 | },
317 | OrientedVertex{
318 | v: dgVertexMap.SelectVertexClose(-784, 5120),
319 | left: false,
320 | },
321 | OrientedVertex{
322 | v: dgVertexMap.SelectVertexClose(-736, 5120),
323 | left: true,
324 | },
325 | OrientedVertex{
326 | v: dgVertexMap.SelectVertexClose(-736, 5120),
327 | left: false,
328 | },
329 | OrientedVertex{
330 | v: dgVertexMap.SelectVertexClose(-528, 5120),
331 | left: true,
332 | },
333 | OrientedVertex{
334 | v: dgVertexMap.SelectVertexClose(-528, 5120),
335 | left: true,
336 | },
337 | OrientedVertex{
338 | v: dgVertexMap.SelectVertexClose(-384, 5120),
339 | left: false,
340 | },
341 | OrientedVertex{
342 | v: dgVertexMap.SelectVertexClose(-384, 5120),
343 | left: false,
344 | },
345 | OrientedVertex{
346 | v: dgVertexMap.SelectVertexClose(-128, 5120),
347 | left: true,
348 | },
349 | OrientedVertex{
350 | v: dgVertexMap.SelectVertexClose(-128, 5120),
351 | left: true,
352 | },
353 | OrientedVertex{
354 | v: dgVertexMap.SelectVertexClose(0, 5120),
355 | left: false,
356 | },
357 | OrientedVertex{
358 | v: dgVertexMap.SelectVertexClose(0, 5120),
359 | left: false,
360 | }, // TODO ...
361 | OrientedVertex{
362 | v: dgVertexMap.SelectVertexClose(2609.560976, 5120),
363 | left: false,
364 | })
365 |
366 | pts.Coalesce()
367 | fmt.Println(pts.toString())
368 | if pts[3].v.X == -784 {
369 | t.Errorf("Wrong #3 point\n")
370 | }
371 | }
372 |
373 | func TestCoalesce3(t *testing.T) {
374 | // left and right vertex missing in "old" sample
375 | // [RIGHT(909.391304,-3776.),LEFT(2423.226891,-110.924370),RIGHT(2427.446945,-100.707395),LEFT(2816.,840.),RIGHT(2820.956522,852.),LEFT(3262.086957,1920.)]
376 | // More dropouts! -1 -1 ; [(2427.446945,-100.707395)-(2816.,840.)]; [(2820.956522,852.)-(3262.086957,1920.)]
377 | // (3712,-6848) to (-3776,3840)
378 | dgVertexMap := CreateVertexMap(&NodesWork{}, -6848, -3776, 3840, 3712)
379 | pts := CollinearOrientedVertices(make([]OrientedVertex, 0))
380 | pts = append(pts, OrientedVertex{
381 | v: dgVertexMap.SelectVertexClose(909.391304, -3776),
382 | left: false,
383 | },
384 | OrientedVertex{
385 | v: dgVertexMap.SelectVertexClose(909.391304, -3776),
386 | left: true,
387 | },
388 | OrientedVertex{
389 | v: dgVertexMap.SelectVertexClose(2423.22689, -110.924370),
390 | left: false,
391 | },
392 | OrientedVertex{
393 | v: dgVertexMap.SelectVertexClose(2427.446945, -100.707395),
394 | left: true,
395 | },
396 | OrientedVertex{
397 | v: dgVertexMap.SelectVertexClose(2816., 840.),
398 | left: false,
399 | },
400 | OrientedVertex{
401 | v: dgVertexMap.SelectVertexClose(2820.956522, 852.),
402 | left: true,
403 | },
404 | OrientedVertex{
405 | v: dgVertexMap.SelectVertexClose(3262.086957, 1920.),
406 |
407 | left: false,
408 | },
409 | // missing last vertex
410 | )
411 |
412 | pts.Coalesce()
413 | fmt.Println(pts.toString())
414 | if pts[1].left != true || pts[1].v.X != 909.391304 ||
415 | pts[1].v.Y != -3776 {
416 | t.Errorf("Wrong #1 point\n")
417 | }
418 | }
419 |
420 | func TestCoalesce4(t *testing.T) {
421 | // left and right vertex missing in "old" sample
422 | // [RIGHT(3456.,4352.),LEFT(3456.,-1408.),RIGHT(3456.,-1536.),RIGHT(3456.,-1536.),LEFT(3456.,-1600.),RIGHT(3456.,-1600.),LEFT(3456.,-1664.),LEFT(3456.,-1664.)]
423 | // Sanity check failed! Evaluated partition line 13993 (3456,3308)-(3456,3318.4) doesn't consistently go in/out of the void when crossing solid lines (incidence count: 2). [LEFT(3456.,5256.),RIGHT(3456.,4352.),LEFT(3456.,-1408.),RIGHT(3456.,-1536.),RIGHT(3456.,-1536.),LEFT(3456.,-1664.),RIGHT(3456.,-2048.)]
424 | // (5256,-3392) to (-2048,9664)
425 | dgVertexMap := CreateVertexMap(&NodesWork{}, -2048, -3392, 9664, 5256)
426 | pts := CollinearOrientedVertices(make([]OrientedVertex, 0))
427 | pts = append(pts, OrientedVertex{
428 | v: dgVertexMap.SelectVertexClose(3456, 5256),
429 | left: false,
430 | },
431 | OrientedVertex{
432 | v: dgVertexMap.SelectVertexClose(3456, 4352),
433 | left: true,
434 | },
435 | OrientedVertex{
436 | v: dgVertexMap.SelectVertexClose(3456, -1408),
437 | left: false,
438 | },
439 | OrientedVertex{
440 | v: dgVertexMap.SelectVertexClose(3456, -1536),
441 | left: true,
442 | },
443 | OrientedVertex{
444 | v: dgVertexMap.SelectVertexClose(3456, -1536),
445 | left: true,
446 | },
447 | OrientedVertex{
448 | v: dgVertexMap.SelectVertexClose(3456, -1600.),
449 | left: false,
450 | },
451 | OrientedVertex{
452 | v: dgVertexMap.SelectVertexClose(3456, -1600.),
453 |
454 | left: true,
455 | },
456 | OrientedVertex{
457 | v: dgVertexMap.SelectVertexClose(3456, -1664),
458 |
459 | left: false,
460 | },
461 | OrientedVertex{
462 | v: dgVertexMap.SelectVertexClose(3456, -1664),
463 |
464 | left: false,
465 | },
466 | OrientedVertex{
467 | v: dgVertexMap.SelectVertexClose(3456, -2048),
468 |
469 | left: true,
470 | },
471 | // missing last vertex
472 | )
473 |
474 | pts.Coalesce()
475 | fmt.Println(pts.toString())
476 | if len(pts) != 6 || pts[3].v == pts[4].v {
477 | t.Errorf("Test failed %t %t\n", len(pts) == 6, pts[3].v != pts[4].v)
478 | }
479 | }
480 |
--------------------------------------------------------------------------------
/filecontrol.go:
--------------------------------------------------------------------------------
1 | // Copyright (C) 2025, VigilantDoomer
2 | //
3 | // This file is part of VigilantBSP program.
4 | //
5 | // VigilantBSP is free software: you can redistribute it
6 | // and/or modify it under the terms of GNU General Public License
7 | // as published by the Free Software Foundation, either version 2 of
8 | // the License, or (at your option) any later version.
9 | //
10 | // VigilantBSP is distributed in the hope that it will be useful,
11 | // but WITHOUT ANY WARRANTY; without even the implied warranty of
12 | // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 | // GNU General Public License for more details.
14 | //
15 | // You should have received a copy of the GNU General Public License
16 | // along with VigilantBSP. If not, see .
17 | package main
18 |
19 | import (
20 | "errors"
21 | "fmt"
22 | "io"
23 | "os"
24 | "path/filepath"
25 | "runtime/pprof"
26 | )
27 |
28 | // Controls lifetime of both input and output wads - ensures they are properly
29 | // closed by the end of program, regardless of success and failure, and that
30 | // a temporary file, if such was created because output file was not specified
31 | // and we were to replace the input, either replaces that input file (on success)
32 | // or is deleted (on failure)
33 | type FileControl struct {
34 | success bool
35 | tmp bool
36 | fin *os.File
37 | fout *os.File
38 | frmb *os.File
39 | freport *os.File
40 | fpin *os.File
41 | inputFileName string
42 | outputFileName string
43 | rmbFileName string
44 | reportFileName string
45 | pinnedFileName string // for pinnedExternalWad
46 | inputWad *PinnedWad
47 | pinnedExternalWad *PinnedWad // for RMB option NOPROCESS
48 | }
49 |
50 | func (fc *FileControl) UsingTmp() bool {
51 | return fc.tmp
52 | }
53 |
54 | func (fc *FileControl) OpenInputFile(inputFileName string) (*os.File, error) {
55 | fc.inputFileName = inputFileName
56 | var err error
57 | fc.fin, err = os.Open(inputFileName)
58 | // because I'm stupid? Should have specified EXCLUSIVE
59 | //fc.fin, err = os.OpenFile(inputFileName, os.O_RDONLY, 0) // wouldn't protect from opening same file as output with read-write access!
60 | return fc.fin, err
61 | }
62 |
63 | func (fc *FileControl) OpenOutputFile(outputFileName string) (*os.File, string, error) {
64 | fc.tmp = outputFileName == ""
65 | var err error
66 | if fc.tmp {
67 | // Need a temporary file
68 | fc.fout, err = os.CreateTemp(filepath.Dir(fc.inputFileName), "tmp")
69 | if err == nil {
70 | outputFileName = fc.fout.Name()
71 | }
72 | } else {
73 | fc.fout, err = os.OpenFile(outputFileName, os.O_CREATE|os.O_RDWR|os.O_TRUNC,
74 | os.ModeExclusive|os.ModePerm)
75 | }
76 | fc.outputFileName = outputFileName
77 | return fc.fout, outputFileName, err
78 | }
79 |
80 | func (fc *FileControl) OpenRMBOptionsFile(rmbFileName string) (*os.File, error) {
81 | fc.rmbFileName = rmbFileName
82 | var err error
83 | fc.frmb, err = os.Open(rmbFileName)
84 | if err != nil { // just in case it is not set so
85 | fc.frmb = nil
86 | }
87 | return fc.frmb, err
88 | }
89 |
90 | func (fc *FileControl) CloseRMBOptionsFile() {
91 | if fc.frmb == nil {
92 | return
93 | }
94 | err := fc.frmb.Close()
95 | if err != nil {
96 | Log.Error("Couldn't close RMB file '%s': %s\n", fc.rmbFileName, err.Error())
97 | }
98 | }
99 |
100 | func (fc *FileControl) OpenReportFile() (*os.File, error) {
101 | var err error
102 | fc.freport, err = os.OpenFile(fc.reportFileName, os.O_CREATE|os.O_RDWR|os.O_TRUNC,
103 | os.ModeExclusive|os.ModePerm)
104 | if err != nil {
105 | fc.freport = nil
106 | return nil, err
107 | }
108 | return fc.freport, nil
109 | }
110 |
111 | func (fc *FileControl) CloseReportFile(suc bool) bool {
112 | if fc.freport == nil {
113 | return true
114 | }
115 | if suc {
116 | fc.freport.Write([]byte("# Report written successfully - no entries lost\n"))
117 | } else {
118 | fc.freport.Write([]byte("# Program aborted, report might be missing entries\n"))
119 | }
120 | err := fc.freport.Close()
121 | if err != nil {
122 | Log.Error("Couldn't close report file '%s': %s\n", fc.reportFileName, err.Error())
123 | } else {
124 | if suc {
125 | Log.Printf("Written report file %s\n", fc.reportFileName)
126 | } else {
127 | Log.Printf("Written incomplete report file %s\n", fc.reportFileName)
128 | }
129 | }
130 | fc.freport = nil
131 | return err == nil
132 | }
133 |
134 | func (fc *FileControl) Success() bool {
135 | if fc.fin == nil || fc.fout == nil {
136 | Log.Panic("Sanity check failed: descriptor invalid.\n")
137 | }
138 | errFin := fc.fin.Close()
139 | errFout := fc.fout.Close()
140 | sucReport := fc.CloseReportFile(true)
141 | hasError := errFin != nil || errFout != nil || !sucReport
142 | if hasError {
143 | if errFin != nil {
144 | Log.Error("Closing input file (after wad was almost ready) returned error: %s.\n",
145 | errFin.Error())
146 | }
147 | if errFout != nil {
148 | Log.Error("Closing output file (after wad was almost ready) returned error: %s.\n",
149 | errFout.Error())
150 | }
151 | return false
152 | }
153 | success2 := true
154 | if fc.tmp {
155 | success2 = fc.tempFileReplacesInput()
156 | }
157 | fc.success = true // nothing to clean up on program exit anyway (original file descriptors closed)
158 | return success2 // but the criteria to report success to user is different - no errors should have happened
159 | }
160 |
161 | func (fc *FileControl) tempFileReplacesInput() bool {
162 | success := true
163 | // now former output - temp file - is where we read from,
164 | // where as former input is the destination file we will overwrite
165 | fin, errFin := os.Open(fc.outputFileName)
166 | if errFin != nil {
167 | Log.Error("Couldn't reopen the temporarily file to read from it: %s.\n",
168 | errFin.Error())
169 | return false
170 | }
171 | fout, errFout := os.OpenFile(fc.inputFileName, os.O_CREATE|os.O_RDWR|os.O_TRUNC,
172 | os.ModeExclusive|os.ModePerm)
173 | if errFout != nil {
174 | success = false
175 | Log.Error("Couldn't reopen the input file to overwrite it: %s.\n",
176 | errFout.Error())
177 | } else {
178 | _, err := io.Copy(fout, fin)
179 | if err != nil {
180 | success = false
181 | // FUCK Hope the user has backup for this
182 | // TODO may be make backup oneself, I dunno
183 | Log.Error("Error when overwriting the original file: %s.\n",
184 | err.Error())
185 | }
186 | fout.Close()
187 | }
188 | fin.Close()
189 | // Now delete the temporary file
190 | err := os.Remove(fc.outputFileName)
191 | if err != nil {
192 | success = false
193 | Log.Error("Couldn't delete temporary file after overwriting the original one: %s.\n",
194 | err.Error())
195 | }
196 | return success
197 | }
198 |
199 | // Ensures we close all files when program exits. Temporary file is getting
200 | // deleted at this moment
201 | func (fc *FileControl) Shutdown() {
202 | if fc.success {
203 | return
204 | }
205 |
206 | var errFrmb error
207 | if fc.frmb != nil {
208 | errFrmb = fc.frmb.Close()
209 | }
210 |
211 | var errFin error
212 | if fc.fin != nil {
213 | errFin = fc.fin.Close()
214 | }
215 |
216 | var errFout error
217 | if fc.fout != nil {
218 | errFout = fc.fout.Close()
219 | }
220 |
221 | var errFpin error
222 | if fc.fpin != nil {
223 | errFpin = fc.fpin.Close()
224 | }
225 |
226 | if errFrmb != nil {
227 | Log.Error("Couldn't close RMB file '%s': %s\n", fc.rmbFileName, errFrmb.Error())
228 | }
229 |
230 | if errFin != nil {
231 | Log.Error("Couldn't close input file '%s': %s\n", fc.inputFileName, errFin.Error())
232 | }
233 |
234 | if errFout != nil {
235 | Log.Error("Couldn't close output file '%s': %s\n", fc.outputFileName, errFout.Error())
236 | }
237 |
238 | if errFpin != nil {
239 | Log.Error("Couldn't close sourced file '%s': %s\n", fc.pinnedFileName, errFpin.Error())
240 | }
241 |
242 | fc.CloseReportFile(false)
243 |
244 | if fc.tmp { // Aborting unsuccessful operation when a temp file has been created
245 | if errFout != nil {
246 | Log.Error("Couldn't delete temporary file '%s' because failed to close it already.\n",
247 | fc.outputFileName)
248 | return
249 | }
250 | err := os.Remove(fc.outputFileName)
251 | if err != nil {
252 | Log.Error("Got error when trying to delete a temporary file '%s': %s\n", fc.outputFileName, err.Error())
253 | }
254 | }
255 | }
256 |
257 | // Loads RMB if it exists. ALLOWED to panic (and thus bring down the program)
258 | // if:
259 | // 1. File exists but couldn't be read, because, for example, permissions
260 | // 2. Syntax error in RMB file encountered while parsing it
261 | // If file doesn't exists, no panic occurs, but a message is printed to
262 | // the output that file simply doesn't exist since this functions is only
263 | // supposed to be called if user requested that RMB may be used whenever
264 | // available. This may be useful indicator that there is a typo in RMB file name
265 | // that user wanted to use alongside the file
266 | func LoadAssociatedRMB(wadFullFileName string, fileControl *FileControl) *LoadedRMB {
267 | fext := filepath.Ext(wadFullFileName)
268 | cas := wadFullFileName
269 | if len(fext) > 0 {
270 | cas = cas[:(len(cas) - len(fext))]
271 | }
272 | rmbFullName := cas + ".rej"
273 | reportFullName := cas + ".rpt"
274 |
275 | RMBFile, err := fileControl.OpenRMBOptionsFile(rmbFullName)
276 | retry := false
277 | if err != nil {
278 | if errors.Is(err, os.ErrNotExist) {
279 | retry = true
280 | }
281 | }
282 | if retry {
283 | // Depending on OS and file system, names can be case-sensitive,
284 | // so we try both
285 | rmbFullName = cas + ".REJ"
286 | RMBFile, err = fileControl.OpenRMBOptionsFile(rmbFullName)
287 | }
288 | if err != nil {
289 | if errors.Is(err, os.ErrNotExist) {
290 | Log.Printf("Ignoring RMB option because there is no file in the same directory as the input wad file, which has same name as wad file but an extension of '.rej' or '.REJ'\n")
291 | } else {
292 | Log.Panic("Found RMB options file '%s' but opening it yielded an error: %s\n", rmbFullName, err.Error())
293 | }
294 | return nil
295 | }
296 |
297 | // if RMB options file exists, be ready to create reportFileName just in
298 | // case
299 | fileControl.reportFileName = reportFullName
300 |
301 | shortName := filepath.Base(rmbFullName)
302 | fileInfo, err := os.Stat(rmbFullName)
303 | if err != nil {
304 | Log.Panic("Couldn't obtain file size of '%s', aborting: %s\n", rmbFullName, err.Error())
305 | }
306 | rawSz := fileInfo.Size()
307 | sz := int(rawSz)
308 | if int64(sz) != rawSz {
309 | Log.Panic("RMB file is too large: %d\n", rawSz)
310 | }
311 |
312 | buf := make([]byte, sz)
313 | rsz, err := RMBFile.Read(buf)
314 | if err != nil && !errors.Is(err, io.EOF) {
315 | Log.Panic("Couldn't read RMB options file '%s': %s\n", rmbFullName, err.Error())
316 | }
317 |
318 | if rsz != sz {
319 | Log.Panic("Incomplete read of RMB options file '%s': number bytes read %d is different from byte size %d\n",
320 | rmbFullName, rsz, sz)
321 | }
322 | fileControl.CloseRMBOptionsFile()
323 |
324 | b, res := LoadRMB(buf, shortName)
325 | if !b {
326 | Log.Panic("Fatal error: RMB options file contains syntax errors.\n")
327 | }
328 | return res
329 | }
330 |
331 | // This was supposed to be used to generate test data to develop a partitioning
332 | // scheme from "split concave polygon into convex polygons" algorithms (such as
333 | // Hertel-Mehlhorn or Keil/Snoeyink optimal partition), before I decided that it
334 | // might be not exactly useful for SEG minimization (or minimization of anything
335 | // for that matter) after these convex partitions need to be redone the BSP way.
336 | func DebugSaveDumpedSegs(where string) {
337 | fout, ferr := os.OpenFile(where, os.O_CREATE|os.O_RDWR|os.O_TRUNC, os.ModeExclusive|os.ModePerm)
338 | if ferr != nil {
339 | Log.Printf("An error has occured while trying to create/modify %s: %s\n", where, ferr)
340 | os.Exit(1)
341 | }
342 | defer fout.Close()
343 | n, _ := fout.WriteString(Log.GetDumpedSegs())
344 | Log.Printf("Wrote seg dump (%d bytes) to '%s'.\n", n, where)
345 | }
346 |
347 | func DumpMemoryProfile(where string) {
348 | fout, ferr := os.OpenFile(where, os.O_CREATE|os.O_RDWR|os.O_TRUNC, os.ModeExclusive|os.ModePerm)
349 | if ferr != nil {
350 | Log.Printf("An error has occured while trying to create/modify %s: %s\n", where, ferr)
351 | os.Exit(1)
352 | }
353 | defer fout.Close()
354 | pprof.Lookup("allocs").WriteTo(fout, 0)
355 | }
356 |
357 | // Print with platform-specific linebreaks indicated by CRLF argument
358 | func WriterPrintfln(w io.Writer, CRLF bool, format string, a ...interface{}) {
359 | if len(format) > 0 && format[len(format)-1] == '\n' {
360 | format = string([]byte(format)[:len(format)-1])
361 | }
362 | w.Write([]byte(appendCRLF(CRLF, fmt.Sprintf(format, a...))))
363 | }
364 |
365 | func appendCRLF(CRLF bool, s string) string {
366 | if CRLF {
367 | return s + "\r\n"
368 | } else {
369 | return s + "\n"
370 | }
371 | }
372 |
373 | func (fc *FileControl) OpenExternalFile(externalFileName string) (*os.File, error) {
374 | if fc.fpin != nil {
375 | fc.fpin.Close()
376 | fc.fpin = nil
377 | fc.pinnedFileName = ""
378 | }
379 |
380 | fin, err := os.Open(externalFileName)
381 | if err == nil {
382 | fc.fpin = fin
383 | fc.pinnedFileName = externalFileName
384 | return fin, nil
385 | }
386 | return nil, err
387 | }
388 |
389 | // PinExternalWad is a non-thread-safe func that loads and pins wad directory in
390 | // global variable
391 | func (fc *FileControl) PinExternalWad(wadFileName string) *PinnedWad {
392 | // 1. if the same file is already pinned, return it
393 | // 2. if input file, return OLD schedule (need to keep copy of it
394 | // before UpdateDirectoryAndSchedule). It should not require pinning nor should
395 | // it overwrite/be overwritten by any other pinned file
396 | // 3. check it is not the same as output file -- don't allow output file
397 | if wadFileName == fc.inputFileName {
398 | return fc.inputWad
399 | }
400 | if len(wadFileName) == 0 {
401 | return nil
402 | }
403 | f1, err1 := os.Stat(fc.inputFileName)
404 | f2, err2 := os.Stat(fc.outputFileName)
405 | f3, err3 := os.Stat(wadFileName)
406 | if err1 != nil || err2 != nil || err3 != nil {
407 | return nil
408 | }
409 | if os.SameFile(f2, f3) {
410 | Log.Error("Cannot load file %s for sourcing lump from it -- it is the output file and is still being written.\n",
411 | wadFileName)
412 | return nil
413 | }
414 | if os.SameFile(f1, f3) {
415 | return fc.inputWad
416 | }
417 |
418 | // Above either returned input wad or aborted, here check if it is the same wad
419 | // as already pinned
420 | if fc.pinnedFileName != "" && fc.pinnedExternalWad != nil {
421 | if fc.pinnedFileName == wadFileName { // shortcut
422 | return fc.pinnedExternalWad
423 | }
424 | f4, err4 := os.Stat(fc.pinnedFileName)
425 | if err4 != nil {
426 | return nil
427 | }
428 | if os.SameFile(f4, f3) {
429 | return fc.pinnedExternalWad
430 | }
431 | }
432 |
433 | // no, it is a new one
434 | f, err := fc.OpenExternalFile(wadFileName)
435 | if err != nil {
436 | Log.Error("An error has occured while trying to open %s: %s\n",
437 | wadFileName, err)
438 | os.Exit(1)
439 | }
440 |
441 | wh := new(WadHeader)
442 | le, err5 := TryReadWadDirectory(false, f, wh)
443 | if err5 != nil {
444 | Log.Error("Couldn't read wad directory from %s: %s\n",
445 | wadFileName, err5.Error())
446 | }
447 | wadDir := LoadWadDirectory(false, le, nil, nil, false, nil)
448 | if wadDir == nil {
449 | Log.Error("Couldn't read lump list from %s: no lumps?\n", wadFileName)
450 | }
451 | fc.pinnedExternalWad = &PinnedWad{
452 | le: le,
453 | scheduleRoot: wadDir.scheduleRoot,
454 | readerAt: f,
455 | }
456 |
457 | return fc.pinnedExternalWad
458 | }
459 |
460 | // ResolveFilepath resolves fname relative to inputFileName. inputFileName since
461 | // guaranteed to be a file and not a directory, so its directory is used for
462 | // resolution
463 | func (fc *FileControl) ResolveFilepath(fname string) string {
464 | if !filepath.IsAbs(fname) {
465 | return filepath.Join(filepath.Dir(fc.inputFileName), fname)
466 | }
467 | return fname
468 | }
469 |
--------------------------------------------------------------------------------
/fixedwriter.go:
--------------------------------------------------------------------------------
1 | // Copyright (C) 2022, VigilantDoomer
2 | //
3 | // This file is part of VigilantBSP program.
4 | //
5 | // VigilantBSP is free software: you can redistribute it
6 | // and/or modify it under the terms of GNU General Public License
7 | // as published by the Free Software Foundation, either version 2 of
8 | // the License, or (at your option) any later version.
9 | //
10 | // VigilantBSP is distributed in the hope that it will be useful,
11 | // but WITHOUT ANY WARRANTY; without even the implied warranty of
12 | // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 | // GNU General Public License for more details.
14 | //
15 | // You should have received a copy of the GNU General Public License
16 | // along with VigilantBSP. If not, see .
17 |
18 | // fixedwriter
19 | package main
20 |
21 | import (
22 | "encoding/binary"
23 | "errors"
24 | "fmt"
25 | )
26 |
27 | // Writer to fixed-size byte array/slice which can't be grown whatsoever
28 | // Trying to write past array capacity will fail
29 | type FixedWriterWrapper struct {
30 | data []byte
31 | offset int // offset at which data gets writen by call to Write. Can be changed with Seek
32 | }
33 |
34 | func CreateFixedWriterWrapper(data []byte, offset int) *FixedWriterWrapper {
35 | result := new(FixedWriterWrapper)
36 | result.data = data
37 | result.offset = offset
38 | return result
39 | }
40 |
41 | // Changes offset (relative to the beginning of backing storage) at which
42 | // next Write call will write bytes
43 | // If requested to move beyond length but within capacity, will automatically
44 | // increase length
45 | func (w *FixedWriterWrapper) Seek(offset int) error {
46 | if offset >= cap(w.data) {
47 | return errors.New("out of range")
48 | }
49 | w.offset = offset
50 | if offset > len(w.data) {
51 | w.data = w.data[:offset]
52 | }
53 | return nil
54 | }
55 |
56 | func (w *FixedWriterWrapper) GetBytes() []byte {
57 | return w.data
58 | }
59 |
60 | // Write bytes at current offset, and move offset
61 | // TODO code using FixedWriterWrapper should fucking check err (including
62 | // indirect uses via binary.Write as in blockmap generation code).
63 | func (w *FixedWriterWrapper) Write(p []byte) (n int, err error) {
64 | towrite := len(p)
65 | if towrite == 0 {
66 | return 0, nil
67 | }
68 | nlen := len(w.data)
69 | sl := w.data[w.offset:]
70 | if cap(sl) < towrite {
71 | // don't bother writing incomplete data!
72 | return 0, errors.New(fmt.Sprintf("Insufficient buffer size: want %d bytes have %d bytes", cap(sl), towrite))
73 | }
74 | if len(sl) < towrite {
75 | // we know already there is enough capacity
76 | // need to extend slice just enough to write data
77 | oldlen := len(sl)
78 | sl = sl[:towrite]
79 | nlen = nlen + len(sl) - oldlen
80 | }
81 | for i := 0; i < towrite; i++ { // TODO use copy built-in
82 | sl[i] = p[i]
83 | }
84 |
85 | w.data = w.data[:nlen]
86 | w.offset = w.offset + towrite
87 | return len(p), nil
88 | }
89 |
90 | // Writes uint16 slice with respect to endianness set by order. Purpose: avoid
91 | // intermediary byte slice allocation in binary.Write for our poor GC-pressured
92 | // program
93 | func (w *FixedWriterWrapper) WriteWithOrder(order binary.ByteOrder, p []uint16) (n int, err error) {
94 | words := len(p)
95 | towrite := words << 1
96 | if towrite == 0 {
97 | return 0, nil
98 | }
99 | nlen := len(w.data)
100 | sl := w.data[w.offset:]
101 | if cap(sl) < towrite {
102 | // don't bother writing incomplete data!
103 | return 0, errors.New(fmt.Sprintf("Insufficient buffer size: want %d bytes have %d bytes", cap(sl), towrite))
104 | }
105 | if len(sl) < towrite {
106 | // we know already there is enough capacity
107 | // need to extend slice just enough to write data
108 | oldlen := len(sl)
109 | sl = sl[:towrite]
110 | nlen = nlen + len(sl) - oldlen
111 | }
112 |
113 | a := 0
114 | for i := 0; i < words; i++ {
115 | order.PutUint16(sl[a:], p[i])
116 | a += 2
117 | }
118 |
119 | w.data = w.data[:nlen]
120 | w.offset = w.offset + towrite
121 | return len(p), nil
122 | }
123 |
--------------------------------------------------------------------------------
/gamespec.go:
--------------------------------------------------------------------------------
1 | // Copyright (C) 2022-2025, VigilantDoomer
2 | //
3 | // This file is part of VigilantBSP program.
4 | //
5 | // VigilantBSP is free software: you can redistribute it
6 | // and/or modify it under the terms of GNU General Public License
7 | // as published by the Free Software Foundation, either version 2 of
8 | // the License, or (at your option) any later version.
9 | //
10 | // VigilantBSP is distributed in the hope that it will be useful,
11 | // but WITHOUT ANY WARRANTY; without even the implied warranty of
12 | // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 | // GNU General Public License for more details.
14 | //
15 | // You should have received a copy of the GNU General Public License
16 | // along with VigilantBSP. If not, see .
17 |
18 | // Wad specifications for Doom-engine family of games
19 | // (including Heretic, Hexen, etc.)
20 | package main
21 |
22 | import (
23 | "regexp"
24 | )
25 |
26 | // Both brought in accordance with Prboom-Plus 2.6.1um map name ranges, except
27 | // that E1M0x is possible (when it is probably shouldn't be) since I don't
28 | // want to complicate these regexp's (and E9M97 is perfectly legal, for example)
29 | // RMB parser now uses these too, rather than its own forks. Hence the subgroups
30 | var MAP_SEQUEL *regexp.Regexp = regexp.MustCompile(`^MAP([0-9])([0-9])$`)
31 | var MAP_ExMx *regexp.Regexp = regexp.MustCompile(`^E([1-9])M([0-9])([0-9]?)$`)
32 |
33 | // This constant group is for internal program usage only
34 | const (
35 | FORMAT_DOOM = iota
36 | FORMAT_HEXEN
37 | FORMAT_UDMF
38 | )
39 |
40 | const BLOCK_WIDTH = 128
41 | const BLOCK_BITS = uint16(7) // replaces division by BLOCK_WIDTH with right shift
42 |
43 | // Starting signature "xNd4\0\0\0\0" of NODES produced by DeePBSP, some ports
44 | // do support this nodes format (PrBoom-plus v2.5.1.5 confirmed, Risen3D also
45 | // per zdoom.org wiki)
46 | var DEEPNODES_SIG = [8]byte{0x78, 0x4E, 0x64, 0x34, 0x00, 0x00, 0x00, 0x00}
47 |
48 | // Starting signature "XNOD" of NODES for Zdoom extended non-GL nodes format.
49 | // Unlike Deep NODES, this signature, together with some bytes following it, may
50 | // accidentally occur in a valid vanilla nodes format nodes data
51 | var ZNODES_PLAIN_SIG = [4]byte{0x58, 0x4E, 0x4F, 0x44}
52 |
53 | // Starting signature "ZNOD" of NODES for Zdoom extended COMPRESSED non-GL
54 | // nodes format
55 | var ZNODES_COMPRESSED_SIG = [4]byte{0x5A, 0x4E, 0x4F, 0x44}
56 |
57 | const IWAD_MAGIC_SIG = uint32(0x44415749) // ASCII - 'IWAD'
58 | const PWAD_MAGIC_SIG = uint32(0x44415750) // ASCII - 'PWAD'
59 |
60 | // Doom & Heretic thing flag constants
61 | const TF_ROOKIE = int16(0x0001)
62 | const TF_NORMAL = int16(0x0002)
63 | const TF_HARD = int16(0x0004)
64 | const TF_AMBUSH = int16(0x0008)
65 | const TF_MULTIPLAYER_ONLY = int16(0x0010)
66 |
67 | // Boom/MBF additions to Doom flag constants
68 | const TF_BOOM_NOTINDEATHMATCH = int16(0x0020)
69 | const TF_BOOM_NOTCOOP = int16(0x0040)
70 | const TF_MFB_FRIENDLY = int16(0x0080)
71 |
72 | // Hexen thing flag constant
73 | const HTF_ROOKIE = int16(0x0001)
74 | const HTF_NORMAL = int16(0x0002)
75 | const HTF_HARD = int16(0x0004)
76 | const HTF_AMBUSH = int16(0x0008)
77 | const HTF_DORMANT = int16(0x0010)
78 | const HTF_FIGHTER = int16(0x0020)
79 | const HTF_CLERIC = int16(0x0040)
80 | const HTF_MAGE = int16(0x0080)
81 | const HTF_SINGLEPLAYER = int16(0x0100)
82 | const HTF_COOP = int16(0x0200)
83 | const HTF_DEATHMATCH = int16(0x0400)
84 |
85 | // Strife thing flag constant (note STF_AMBUSH != TF_AMBUSH)
86 | const STF_ROOKIE = int16(0x0001)
87 | const STF_NORMAL = int16(0x0002)
88 | const STF_HARD = int16(0x0004)
89 | const STF_STANDSTILL = int16(0x0008)
90 | const STF_NOTINSINGLEPLAYER = int16(0x0010)
91 | const STF_AMBUSH = int16(0x0020)
92 | const STF_FRIENDLY = int16(0x0040)
93 | const STF_UNUSED = int16(0x0080)
94 | const STF_TRANSLUCENT = int16(0x0100)
95 | const STF_INVISIBLE = int16(0x0200)
96 |
97 | // COMMON linedef flags: for Doom & derivatives
98 | const LF_IMPASSABLE = uint16(0x0001)
99 | const LF_BLOCK_MONSTER = uint16(0x0002)
100 | const LF_TWOSIDED = uint16(0x0004)
101 | const LF_UPPER_UNPEGGED = uint16(0x0008)
102 | const LF_LOWER_UNPEGGED = uint16(0x0010)
103 | const LF_SECRET = uint16(0x0020) // shown as 1-sided on automap
104 | const LF_BLOCK_SOUND = uint16(0x0040)
105 | const LF_NEVER_ON_AUTOMAP = uint16(0x0080)
106 | const LF_ALWAYS_ON_AUTOMAP = uint16(0x0100)
107 | const LF_MBF21_BLOCK_LAND_MONSTERS = uint16(4096)
108 | const LF_MBF21_BLOCK_PLAYERS = uint16(8192)
109 |
110 | // Linedef flags: Strife additions to COMMON linedef flags
111 | const SLF_JUMPABLE = uint16(0x200)
112 | const SLF_BLOCK_FLOATING = uint16(0x0400)
113 | const SLF_TRANSLUCENCY1 = uint16(0x0800)
114 | const SLF_TRANSLUCENCY2 = uint16(0x0100)
115 |
116 | // Linedef flags: Boom additions to COMMON linedef flags
117 | const BLF_PASSTHRU = uint16(0x200)
118 |
119 | const SIDEDEF_NONE = uint16(0xFFFF)
120 |
121 | const DOOM_LINEDEF_SIZE = 14 // Size of "Linedef" struct
122 | const DOOM64_LINEDEF_SIZE = 16 // Size of "Doom64Linedef" struct
123 | const HEXEN_LINEDEF_SIZE = 16 // Size of "HexenLinedef" struct
124 |
125 | const DOOM_SIDEDEF_SIZE = 30 // Size of "Sidedef" struct
126 | const DOOM_SECTOR_SIZE = 26 // Size of "Sector" struct
127 |
128 | const HEXEN_ACTION_POLY_START = 1
129 | const HEXEN_ACTION_POLY_EXPLICIT = 5
130 |
131 | const PO_ANCHOR_TYPE = 3000
132 | const PO_SPAWN_TYPE = 3001
133 | const PO_SPAWNCRUSH_TYPE = 3002
134 |
135 | const ZDOOM_PO_ANCHOR_TYPE = 9300
136 | const ZDOOM_PO_SPAWN_TYPE = 9301
137 | const ZDOOM_PO_SPAWNCRUSH_TYPE = 9302
138 |
139 | // TODO Linedef flags: Hexen additions to COMMON linedef flags
140 | // (There is huge mess there)
141 |
142 | // Wad header, 12 bytes.
143 | type WadHeader struct {
144 | MagicSig uint32
145 | LumpCount uint32 // vanilla treats this as signed int32
146 | DirectoryStart uint32 // vanilla treats this as signed int32
147 | }
148 |
149 | // Lump entries listed one after another comprise the directory,
150 | // the first such lump entry is found at WadHeader.DirectoryStart offset into
151 | // the wad file.
152 | // Each lump entry is 16 bytes long
153 | type LumpEntry struct {
154 | FilePos uint32 // vanilla treats this as signed int32
155 | Size uint32 // vanilla treats this as signed int32
156 | Name [8]byte
157 | }
158 |
159 | // This is Doom/Heretic/Strife thing. Not Hexen thing
160 | type Thing struct {
161 | XPos int16
162 | YPos int16
163 | Angle int16
164 | Type int16
165 | Flags int16
166 | }
167 |
168 | // Hexen Thing
169 | type HexenThing struct {
170 | TID int16
171 | XPos int16
172 | YPos int16
173 | StartingHeight int16
174 | Angle int16
175 | Type int16
176 | Flags int16
177 | Action uint8
178 | Args [5]byte
179 | }
180 |
181 | type Doom64Thing struct {
182 | XPos int16
183 | YPos int16
184 | ZPos int16
185 | Angle int16
186 | Type int16
187 | Flags int16
188 | ID int16
189 | }
190 |
191 | // Doom/Heretic linedef format
192 | type Linedef struct {
193 | // Vanilla treats ALL fields as signed int16
194 | StartVertex uint16
195 | EndVertex uint16
196 | Flags uint16
197 | Action uint16
198 | Tag uint16
199 | FrontSdef uint16 // Front Sidedef number
200 | BackSdef uint16 // Back Sidedef number (0xFFFF special value for one-sided line)
201 | }
202 |
203 | // Doom64 linedef format: different Flags size compared to Doom (32-bit vs 16-bit)
204 | type Linedef64 struct {
205 | // TODO Check whether vanilla treats ALL fields as signed
206 | StartVertex uint16
207 | EndVertex uint16
208 | Flags uint32
209 | Action uint16
210 | Tag uint16
211 | FrontSdef uint16 // Front Sidedef number (front is to the right)
212 | BackSdef uint16 // Back Sidedef number (0xFFFF special value for one-sided line)
213 | }
214 |
215 | // Hexen linedef format
216 | type HexenLinedef struct {
217 | // Vanilla treats ALL fields as signed
218 | StartVertex uint16
219 | EndVertex uint16
220 | Flags uint16
221 | Action uint8
222 | Arg1 uint8 // this acts as corresponding to sector tag, but sector has uint16-size tag while linedef has uint8-size tag lol
223 | Arg2 uint8
224 | Arg3 uint8
225 | Arg4 uint8
226 | Arg5 uint8
227 | FrontSdef uint16
228 | BackSdef uint16
229 | }
230 |
231 | // Sidedef format - common to all? except doom64 which is not considered for support yet
232 | type Sidedef struct {
233 | XOffset int16
234 | YOffset int16
235 | UpName [8]byte // name of upper texture
236 | LoName [8]byte // name of lower texture
237 | MidName [8]byte // name of middle texture
238 | Sector uint16 // sector number; vanilla treats this as signed int16
239 | }
240 |
241 | // Doom64 Sidedef format. Indices instead of texture names
242 | type Sidedef64 struct {
243 | XOffset int16
244 | YOffset int16
245 | UpIndex uint16 // index of upper texture, likeky signed in original
246 | LoIndex uint16 // index of lower texture, likeky signed in original
247 | MidIndex uint16 // index of middle texture, likeky signed in original
248 | Sector uint16 // sector number; vanilla treats this as signed int16
249 | }
250 |
251 | // A Vertex is a coordinate on the map, and can be used in both linedefs and segs
252 | // as starting(ending) point
253 | // Note that map editing utilities display only those vertices that were
254 | // referenced in linedefs (which is what human user expects to see)
255 | // As the result of building nodes and thus constructing SEGS, VERTEXES lump is
256 | // modified to also have vertices used in SEGS introduces by splitting
257 | type Vertex struct {
258 | XPos int16
259 | YPos int16
260 | }
261 |
262 | type Doom64Vertex struct {
263 | // These are really fixed_t: 16 bits store integral part and 16 bits store
264 | // fractional part
265 | // That is, Doom64 used fractional coordinates. The maximum map boundaries
266 | // are same as Doom
267 | XPos int32
268 | YPos int32
269 | }
270 |
271 | type Seg struct {
272 | // Vanilla treats ALL fields as signed int16
273 | StartVertex uint16
274 | EndVertex uint16
275 | Angle int16
276 | Linedef uint16
277 | Flip int16 // 0 - seg follows same direction as linedef, 1 - the opposite
278 | Offset uint16 // distance along linedef to start of seg
279 | }
280 |
281 | // DeePBSP "standard V4" seg format
282 | type DeepSeg struct {
283 | StartVertex uint32
284 | EndVertex uint32
285 | Angle int16
286 | Linedef uint16
287 | Flip int16
288 | Offset uint16
289 | }
290 |
291 | // Each subsector has only these two fields, yes. And the segs in SEGS lump
292 | // follow the order so that consecutive segs in FirstSeg...FirstSeq+SeqCount-1
293 | // all belong to this subsector. So each seg is a part of one and only one subsector
294 | type SubSector struct {
295 | // Vanilla treats ALL fields as signed int16
296 | SegCount uint16 // number of Segs in this SubSector
297 | FirstSeg uint16 // first Seg number
298 | }
299 |
300 | // DeePBSP "standard V4" subsector format
301 | // SegCount is same size as regular subsector (DeePSea author states 64K is
302 | // enough for one's subsector seg count), but FirstSeg is bigger to allow
303 | // indexing into larger total number of segs
304 | type DeepSubSector struct {
305 | SegCount uint16
306 | FirstSeg uint32
307 | }
308 |
309 | type Node struct {
310 | X int16
311 | Y int16
312 | Dx int16
313 | Dy int16
314 | Rbox [4]int16 // right bounding box
315 | Lbox [4]int16 // left bounding box
316 | RChild int16 // -| if sign bit = 0 then this is a subnode number
317 | LChild int16 // -> else 0-14 bits are subsector number
318 | }
319 |
320 | // DeePBSP "standard V4" node format. Also used by Zdoom extended non-GL nodes,
321 | // as it is the same
322 | type DeepNode struct {
323 | X int16
324 | Y int16
325 | Dx int16
326 | Dy int16
327 | Rbox [4]int16 // right bounding box
328 | Lbox [4]int16 // left bounding box
329 | RChild int32 // -| if sign bit = 0 then this is a subnode number
330 | LChild int32 // -> else 0-30 bits are subsector number
331 | }
332 |
333 | const BB_TOP = 0
334 | const BB_BOTTOM = 1
335 | const BB_LEFT = 2
336 | const BB_RIGHT = 3
337 |
338 | type Sector struct {
339 | FloorHeight int16
340 | CeilHeight int16
341 | FloorName [8]byte
342 | CeilName [8]byte
343 | LightLevel uint16
344 | Special uint16
345 | Tag uint16
346 | }
347 |
348 | type Sector64 struct {
349 | FloorHeight int16
350 | CeilHeight int16
351 | FloorIndex uint16 // index of floor flat, likely signed in vanilla
352 | CeilIndex uint16 // index of floor flat, likely signed in vanilla
353 | ColorIndexes [5]uint16 // Color indexes (floor, ceiling, thing, wall top, wall bottom)
354 | Special uint16
355 | Tag uint16
356 | Flags uint16
357 | }
358 |
359 | // NOTE There is no type for reject - it is a stream of bits packed into bytes
360 |
361 | // Blockmap consists of: header, followed by XBlocks*YBlocks offsets,
362 | // followed by blocklist (arbitrary size)
363 | type BlockMapHeader struct {
364 | XMin int16
365 | YMin int16
366 | XBlocks uint16 // vanilla treats this as signed int16
367 | YBlocks uint16 // vanilla treats this as signed int16
368 | }
369 |
370 | type ZdoomNode_VertexHeader struct {
371 | ReusedOriginalVertices uint32 // number of vertices reused from VERTEXES
372 | NumExtendedVertices uint32 // how many vertices follow this
373 | }
374 |
375 | // Zdoom nodes format offers increased precision for vertices
376 | type ZdoomNode_Vertex struct {
377 | X int32 // fixed-point 16.16 signed int
378 | Y int32 // fixed-point 16.16 signed int
379 | }
380 |
381 | // Zdoom subsector information - nothing to define here. Just a count of
382 | // subsectors, and each subsector defines only number of segs in current sector
383 |
384 | // Zdoom seg information - number of segs, followed by repetition of the below
385 | // struct. Unlike DeeP segs, this seg extension omits angle and offset information,
386 | // which means some special effects (like horizon) that can be supported in vanilla
387 | // and DeeP nodes format are not supported by Zdoom non-GL extended/compressed nodes
388 | // format.
389 | type ZdoomNode_Seg struct {
390 | StartVertex uint32
391 | EndVertex uint32
392 | Linedef uint16
393 | Flip byte
394 | }
395 |
396 | // ZdoomNode_node is not defined - it is same as DeepNode
397 |
398 | // NOTE also no type definitions for blocks and blocklists, they are offsets
399 | // and (each blocklist is) array of linedef indexes correspondingly
400 |
401 | // Returns whether the string in lumpName represents Doom level marker,
402 | // i.e. MAP02, E3M1
403 | func IsALevel(lumpName []byte) bool {
404 | return MAP_SEQUEL.Match(lumpName) || MAP_ExMx.Match(lumpName)
405 | }
406 |
407 | func IsEmptyTexture(lumpName []byte) bool {
408 | return lumpName[0] == '-' && lumpName[1] == 0
409 | }
410 |
--------------------------------------------------------------------------------
/go.mod:
--------------------------------------------------------------------------------
1 | module github.com/vigilantdoomer/vigilantbsp
2 |
3 | go 1.19
4 |
--------------------------------------------------------------------------------
/lumpwrite.go:
--------------------------------------------------------------------------------
1 | // Copyright (C) 2022-2025, VigilantDoomer
2 | //
3 | // This file is part of VigilantBSP program.
4 | //
5 | // VigilantBSP is free software: you can redistribute it
6 | // and/or modify it under the terms of GNU General Public License
7 | // as published by the Free Software Foundation, either version 2 of
8 | // the License, or (at your option) any later version.
9 | //
10 | // VigilantBSP is distributed in the hope that it will be useful,
11 | // but WITHOUT ANY WARRANTY; without even the implied warranty of
12 | // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 | // GNU General Public License for more details.
14 | //
15 | // You should have received a copy of the GNU General Public License
16 | // along with VigilantBSP. If not, see .
17 |
18 | // lumpwrite
19 | package main
20 |
21 | import (
22 | "crypto/sha256"
23 | "encoding/base64"
24 | "encoding/binary"
25 | "fmt"
26 | "os"
27 | )
28 |
29 | // Write byte array without conversion. It is assumed that lump was received
30 | // as a valid byte array (also already converted to file endianness) ready to be
31 | // written
32 | func WriteSliceLump(data []byte, curPos *uint32, fout *os.File, le []LumpEntry,
33 | lumpIdx int, s string, postfix string) {
34 | if data != nil {
35 | fout.Write(data)
36 | }
37 | le[lumpIdx].FilePos = *curPos
38 | le[lumpIdx].Size = uint32(len(data))
39 | if len(s) > 0 {
40 | hashSuffix := ""
41 | if config.HashLumps {
42 | hashSuffix = fmt.Sprintf(" (HASH: %s)", Hash(data))
43 | }
44 | Log.Printf("Lump #%d (%s)%s has its size set to %d bytes.%s\n", lumpIdx,
45 | s, postfix, le[lumpIdx].Size, hashSuffix)
46 | }
47 | *curPos = *curPos + uint32(len(data))
48 | }
49 |
50 | // Write lump from a typed array of structures that represent game data but need
51 | // conversion before going to file
52 | func ConvertAndWriteGenericLump(data interface{}, curPos *uint32, fout *os.File,
53 | le []LumpEntry, lumpIdx int, s string, postfix string) {
54 | hashSuffix := ""
55 | binary.Write(fout, binary.LittleEndian, data)
56 | if config.HashLumps {
57 | hashSuffix = fmt.Sprintf(" (HASH: %s)", HashIntf(data))
58 | }
59 | dataLen := binary.Size(data)
60 | le[lumpIdx].FilePos = *curPos
61 | le[lumpIdx].Size = uint32(dataLen)
62 | Log.Printf("Lump #%d (%s)%s has its size set to %d bytes.%s\n", lumpIdx, s,
63 | postfix, le[lumpIdx].Size, hashSuffix)
64 | *curPos = *curPos + uint32(dataLen)
65 | }
66 |
67 | func ConvertAndWriteDeepNodes(data []DeepNode, curPos *uint32, fout *os.File,
68 | le []LumpEntry, lumpIdx int, s string, postfix string) {
69 | hashSuffix := ""
70 | sigCnt, _ := fout.Write(DEEPNODES_SIG[:])
71 | binary.Write(fout, binary.LittleEndian, data)
72 | if config.HashLumps {
73 | hashSuffix = fmt.Sprintf(" (HASH: %s)", HashIntf(data))
74 | }
75 | dataLen := binary.Size(data)
76 | le[lumpIdx].FilePos = *curPos
77 | le[lumpIdx].Size = uint32(dataLen + sigCnt)
78 | Log.Printf("Lump #%d (%s)%s has its size set to %d bytes.%s\n", lumpIdx, s,
79 | postfix, le[lumpIdx].Size, hashSuffix)
80 | *curPos = *curPos + uint32(dataLen+sigCnt)
81 | }
82 |
83 | // non-threadsafe encode of cryptographic hash, I presume
84 | func Hash(data []byte) string {
85 | hsh := sha256.New()
86 | hsh.Write(data)
87 | su := hsh.Sum(nil)
88 | return base64.StdEncoding.EncodeToString(su)
89 | }
90 |
91 | // same as Hash, but for arbitrary data instead of slice of bytes
92 | func HashIntf(data interface{}) string {
93 | hsh := sha256.New()
94 | binary.Write(hsh, binary.LittleEndian, data)
95 | su := hsh.Sum(nil)
96 | return base64.StdEncoding.EncodeToString(su)
97 | }
98 |
--------------------------------------------------------------------------------
/main_test.go:
--------------------------------------------------------------------------------
1 | // Copyright (C) 2022-2024, VigilantDoomer
2 | //
3 | // This file is part of VigilantBSP program.
4 | //
5 | // VigilantBSP is free software: you can redistribute it
6 | // and/or modify it under the terms of GNU General Public License
7 | // as published by the Free Software Foundation, either version 2 of
8 | // the License, or (at your option) any later version.
9 | //
10 | // VigilantBSP is distributed in the hope that it will be useful,
11 | // but WITHOUT ANY WARRANTY; without even the implied warranty of
12 | // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 | // GNU General Public License for more details.
14 | //
15 | // You should have received a copy of the GNU General Public License
16 | // along with VigilantBSP. If not, see .
17 | package main
18 |
19 | import (
20 | "math/rand"
21 | "testing"
22 | "time"
23 | )
24 |
25 | type hitsCrunchBenchmark struct {
26 | sectorHits []uint8
27 | diff int
28 | flat int
29 | unmerged int
30 | SectorsSplit int
31 | }
32 |
33 | var hitsCrunchData *hitsCrunchBenchmark
34 |
35 | func TestMain(m *testing.M) {
36 | benchmarkRandom := rand.NewSource(time.Now().UnixNano())
37 | hitsCrunchData = setupHitsCrunchBenchmark(benchmarkRandom)
38 | m.Run()
39 | }
40 |
41 | func setupHitsCrunchBenchmark(benchmarkRandom rand.Source) *hitsCrunchBenchmark {
42 | limit := uint16(20000)
43 | sectorHits := make([]uint8, limit)
44 | for j := 1; j < 9400; j++ {
45 | v := benchmarkRandom.Int63()
46 | v = (((v&(1<<48) - 1) >> 48) | ((v&(1<<32) - 1) >> 32)) & (((v&(1<<16) - 1) >> 16) | v)
47 | uv := uint16(v)
48 | if uv >= limit {
49 | uv = limit - 1
50 | }
51 | sectorHits[uv] = uint8(v & 7)
52 | }
53 | return &hitsCrunchBenchmark{
54 | diff: 0,
55 | flat: 0,
56 | unmerged: 0,
57 | SectorsSplit: 0,
58 | sectorHits: sectorHits,
59 | }
60 | }
61 |
--------------------------------------------------------------------------------
/multiformat_tree.go:
--------------------------------------------------------------------------------
1 | // Copyright (C) 2022-2025, VigilantDoomer
2 | //
3 | // This file is part of VigilantBSP program.
4 | //
5 | // VigilantBSP is free software: you can redistribute it
6 | // and/or modify it under the terms of GNU General Public License
7 | // as published by the Free Software Foundation, either version 2 of
8 | // the License, or (at your option) any later version.
9 | //
10 | // VigilantBSP is distributed in the hope that it will be useful,
11 | // but WITHOUT ANY WARRANTY; without even the implied warranty of
12 | // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 | // GNU General Public License for more details.
14 | //
15 | // You should have received a copy of the GNU General Public License
16 | // along with VigilantBSP. If not, see .
17 | package main
18 |
19 | import (
20 | "time"
21 | )
22 |
23 | type CreateNodeFunc func(w *NodesWork, ts *NodeSeg, bbox *NodeBounds, super *Superblock) *NodeInProcess
24 |
25 | type MultiformatPassthrough struct {
26 | linesForZdoom WriteableLines
27 | start time.Time
28 | input *NodesInput
29 | solidMap *Blockmap
30 | createNode CreateNodeFunc
31 | }
32 |
33 | func VanillaOrZdoomFormat_Create(w *NodesWork, ts *NodeSeg, bbox *NodeBounds,
34 | super *Superblock, oldNodeType int,
35 | passthrough *MultiformatPassthrough) *NodeInProcess {
36 |
37 | input2 := &NodesInput{}
38 | *input2 = *passthrough.input
39 | input2.lines = passthrough.linesForZdoom
40 | input2.nodeType = oldNodeType
41 | input2.bcontrol = nil // catch unexpected calls to it
42 | input2.bgenerator = nil // catch unexpected calls to it
43 | nodesChan := make(chan NodesResult)
44 | input2.nodesChan = nodesChan
45 | input2.solidMap = passthrough.solidMap
46 |
47 | start := passthrough.start
48 | input := passthrough.input
49 |
50 | var rootNode *NodeInProcess
51 | if config.NodeThreads == 1 { // reference to global: config
52 | // sequential mode
53 | Log.Printf("Running vanilla nodes format first (sequential run)\n")
54 | rootNode = passthrough.createNode(w, ts, bbox, super)
55 | if w.isUnsignedOverflow() {
56 | // Limits exceeded, must upgrade
57 | Log.Printf("Vanilla nodes format overflowed, switching to Zdoom nodes format (sequential run)\n")
58 | input.lines.AssignFrom(input2.lines)
59 | go ZNodesGenerator(input2)
60 | // this interception is because timer is only output from here
61 | nodeResult := <-nodesChan
62 | input.nodesChan <- nodeResult
63 | return nilAndPrintTimer(start, oldNodeType)
64 | }
65 | } else {
66 | // concurrent mode
67 |
68 | Log.Printf("Running vanilla and zdoom generators in parallel to each other (concurrent run)")
69 | go ZNodesGenerator(input2)
70 | rootNode = passthrough.createNode(w, ts, bbox, super)
71 | useZdoom := false
72 | if w.isUnsignedOverflow() {
73 | // Limits exceeded, must upgrade
74 | Log.Printf("Vanilla nodes format overflowed, switching to Zdoom nodes format (concurrent run)\n")
75 | useZdoom = true
76 | }
77 | // must wait for completion regardless
78 | // TODO implement early abort
79 | nodeResult := <-nodesChan
80 | if useZdoom {
81 | input.lines.AssignFrom(input2.lines)
82 | input.nodesChan <- nodeResult
83 | return nilAndPrintTimer(start, oldNodeType)
84 | }
85 | }
86 |
87 | Log.Printf("I have kept nodes in vanilla format.\n")
88 | return rootNode
89 | }
90 |
91 | func nilAndPrintTimer(start time.Time, oldNodeType int) *NodeInProcess {
92 | switch promoteNodeType(oldNodeType) {
93 | case NODETYPE_ZDOOM_EXTENDED:
94 | Log.Printf("I have switched to ZDoom extended nodes format to avoid overflow.\n")
95 | case NODETYPE_ZDOOM_COMPRESSED:
96 | Log.Printf("I have switched to ZDoom compressed nodes format to avoid overflow.\n")
97 | }
98 | Log.Printf("Nodes took %s\n", time.Since(start))
99 | return nil
100 | }
101 |
102 | // whether nodes data overflows limits for vanilla unsigned (limit-removing) format
103 | func (w *NodesWork) isUnsignedOverflow() bool {
104 | return w.nodeType != NODETYPE_VANILLA ||
105 | uint32(w.totals.numSSectors)&w.SsectorMask == w.SsectorMask ||
106 | len(w.vertices) > 65536 ||
107 | w.tooManySegsCantFix(true)
108 | }
109 |
110 | // whether nodes data overflows limits for strict (signed) vanilla format
111 | func (w *NodesWork) isVanillaSignedOverflow() bool {
112 | b := w.nodeType != NODETYPE_VANILLA ||
113 | uint32(w.totals.numSSectors)&w.SsectorMask == w.SsectorMask ||
114 | len(w.vertices) > 32768
115 | if b {
116 | return true
117 | }
118 |
119 | if w.lastSubsectorOverflows(VANILLA_MAXSEGINDEX) {
120 | couldFix, _ := w.fitSegsToTarget(VANILLA_MAXSEGINDEX, true)
121 | return !couldFix
122 | }
123 | return false
124 | }
125 |
--------------------------------------------------------------------------------
/mylogger.go:
--------------------------------------------------------------------------------
1 | // Copyright (C) 2022-2023, VigilantDoomer
2 | //
3 | // This file is part of VigilantBSP program.
4 | //
5 | // VigilantBSP is free software: you can redistribute it
6 | // and/or modify it under the terms of GNU General Public License
7 | // as published by the Free Software Foundation, either version 2 of
8 | // the License, or (at your option) any later version.
9 | //
10 | // VigilantBSP is distributed in the hope that it will be useful,
11 | // but WITHOUT ANY WARRANTY; without even the implied warranty of
12 | // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 | // GNU General Public License for more details.
14 | //
15 | // You should have received a copy of the GNU General Public License
16 | // along with VigilantBSP. If not, see .
17 |
18 | // Central log (stdout/stderr) of the program
19 | package main
20 |
21 | import (
22 | "bytes"
23 | "fmt"
24 | "log"
25 | "os"
26 | "sync"
27 | )
28 |
29 | type MyLogger struct {
30 | // Writing to the same slot allows to clobber stuff so that we don't see the
31 | // same thing written over and over again
32 | slots []string
33 | segs bytes.Buffer
34 | // Mutex is used to order writes to stdin and stderr, as well as Sync call
35 | mu sync.Mutex
36 | }
37 |
38 | // Logs specific to thread, or even just one task (one task is always worked on
39 | // by a single thread and never shared with other threads until complete, but a
40 | // single thread may work on many tasks - each of them will have their own
41 | // logger). Their output is not forwarded to the stdout or stdin, but is instead
42 | // buffered until (usually just one of them) is merged into main log of MyLogger
43 | // type. Most of such logs are instead getting discarded altogether
44 | type MiniLogger struct {
45 | buf bytes.Buffer
46 | slots []string
47 | segs bytes.Buffer
48 | }
49 |
50 | func CreateLogger() *MyLogger {
51 | var b bytes.Buffer
52 | log := new(MyLogger)
53 | log.segs = b
54 | return log
55 | }
56 |
57 | var Log = CreateLogger()
58 |
59 | var syslog = log.New(os.Stdout, "", 0)
60 | var errlog = log.New(os.Stderr, "", 0)
61 |
62 | // Your generic printf to let user see things
63 | func (log *MyLogger) Printf(s string, a ...interface{}) {
64 | log.mu.Lock()
65 | defer log.mu.Unlock()
66 | syslog.Printf(s, a...)
67 | return
68 | }
69 |
70 | // As generic as printf, but writes to stderr instead of stdout
71 | // Does NOT interrupt execution of the program
72 | func (log *MyLogger) Error(s string, a ...interface{}) {
73 | log.mu.Lock()
74 | defer log.mu.Unlock()
75 | errlog.Printf(s, a...)
76 | return
77 | }
78 |
79 | // For advanced users or users that are curious, or programmers, there is
80 | // stuff they might want to see but only when they can really bother to spend
81 | // time reading it
82 | func (log *MyLogger) Verbose(verbosityLevel int, s string, a ...interface{}) {
83 | if verbosityLevel <= config.VerbosityLevel {
84 | log.mu.Lock()
85 | defer log.mu.Unlock()
86 | syslog.Printf(s, a...)
87 | return
88 | }
89 | }
90 |
91 | // Panicking is not a good thing, but at least we can now use formatted printing
92 | // for it
93 | func (log *MyLogger) Panic(s string, a ...interface{}) {
94 | log.mu.Lock()
95 | defer log.mu.Unlock()
96 | panic(fmt.Sprintf(s, a...))
97 | }
98 |
99 | // Writes to the slot, clobbering whatever was there before us in that same slot
100 | // Used when need to debug something in nodes builder but it's worthless to
101 | // repeat if it concerns the same thing
102 | func (log *MyLogger) Push(slotNumber int, s string, a ...interface{}) {
103 | log.mu.Lock()
104 | defer log.mu.Unlock()
105 | for slotNumber >= len(log.slots) {
106 | log.slots = append(log.slots, "")
107 | }
108 | log.slots[slotNumber] = fmt.Sprintf(s, a...)
109 | }
110 |
111 | // Now that slots have been written over multiple times, time to see what was
112 | // written to begin with. If you don't call it, you might as well never write
113 | // anything to slots (it's usually not the stuff to go into release, mind it)
114 | func (log *MyLogger) Flush() {
115 | log.mu.Lock()
116 | defer log.mu.Unlock()
117 | for _, slot := range log.slots {
118 | syslog.Printf(slot)
119 | }
120 | log.slots = nil
121 | }
122 |
123 | func (log *MyLogger) DumpSegs(ts *NodeSeg) {
124 | if !config.DumpSegsFlag || ts == nil { // reference to global: config
125 | return
126 | }
127 | // Assume all come from same sector
128 | allSector := ts.sector
129 | log.segs.WriteString(fmt.Sprintf("Sector #%d:\n", allSector))
130 | for tmps := ts; tmps != nil; tmps = tmps.next {
131 | log.segs.WriteString(fmt.Sprintf(
132 | " Linedef: %d Flip: %d (%v,%v) - (%v, %v)",
133 | tmps.Linedef, tmps.getFlip(), tmps.StartVertex.X, tmps.StartVertex.Y,
134 | tmps.EndVertex.X, tmps.EndVertex.Y))
135 | if tmps.sector != allSector {
136 | // Is not supposed to write stuff from multiple sectors. You'll have
137 | // to rewrite code in this function to adjust it to your use case
138 | log.segs.WriteString(fmt.Sprintf(" BAD! Sector = %d\n", tmps.sector))
139 | } else {
140 | log.segs.WriteString("\n")
141 | }
142 | }
143 | }
144 |
145 | func (log *MyLogger) GetDumpedSegs() string {
146 | return log.segs.String()
147 | }
148 |
149 | // Sync is used to wait until all messages are written to the output
150 | func (log *MyLogger) Sync() {
151 | log.mu.Lock()
152 | log.mu.Unlock()
153 | }
154 |
155 | func (log *MyLogger) Merge(mlog *MiniLogger, preface string) {
156 | if mlog == nil {
157 | return
158 | }
159 | log.mu.Lock()
160 | defer log.mu.Unlock()
161 | if len(preface) > 0 {
162 | syslog.Printf(preface)
163 | }
164 | content := mlog.buf.String()
165 | if len(content) > 0 {
166 | syslog.Printf(content)
167 | }
168 | segs := mlog.segs.String()
169 | if len(segs) > 0 {
170 | log.segs.WriteString(segs)
171 | }
172 | if len(mlog.slots) > 0 {
173 | log.slots = append(log.slots, mlog.slots...)
174 | }
175 |
176 | }
177 |
178 | func (mlog *MiniLogger) Printf(s string, a ...interface{}) {
179 | if mlog == nil {
180 | Log.Printf(s, a...)
181 | return
182 | }
183 | mlog.buf.WriteString(fmt.Sprintf(s, a...))
184 | }
185 |
186 | func (mlog *MiniLogger) Verbose(verbosityLevel int, s string, a ...interface{}) {
187 | if mlog == nil {
188 | Log.Verbose(verbosityLevel, s, a...)
189 | return
190 | }
191 | if verbosityLevel <= config.VerbosityLevel {
192 | mlog.buf.WriteString(fmt.Sprintf(s, a...))
193 | }
194 | }
195 |
196 | func (mlog *MiniLogger) Push(slotNumber int, s string, a ...interface{}) {
197 | if mlog == nil {
198 | Log.Push(slotNumber, s, a...)
199 | return
200 | }
201 | for slotNumber >= len(mlog.slots) {
202 | mlog.slots = append(mlog.slots, "")
203 | }
204 | mlog.slots[slotNumber] = fmt.Sprintf(s, a...)
205 | }
206 |
207 | func (mlog *MiniLogger) DumpSegs(ts *NodeSeg) {
208 | if mlog == nil {
209 | Log.DumpSegs(ts)
210 | }
211 | if !config.DumpSegsFlag || ts == nil { // reference to global: config
212 | return
213 | }
214 | // Assume all come from same sector
215 | allSector := ts.sector
216 | mlog.segs.WriteString(fmt.Sprintf("Sector #%d:\n", allSector))
217 | for tmps := ts; tmps != nil; tmps = tmps.next {
218 | mlog.segs.WriteString(fmt.Sprintf(
219 | " Linedef: %d Flip: %d (%v,%v) - (%v, %v)",
220 | tmps.Linedef, tmps.getFlip(), tmps.StartVertex.X, tmps.StartVertex.Y,
221 | tmps.EndVertex.X, tmps.EndVertex.Y))
222 | if tmps.sector != allSector {
223 | // Is not supposed to write stuff from multiple sectors. You'll have
224 | // to rewrite code in this function to adjust it to your use case
225 | mlog.segs.WriteString(fmt.Sprintf(" BAD! Sector = %d\n", tmps.sector))
226 | } else {
227 | mlog.segs.WriteString("\n")
228 | }
229 | }
230 | }
231 |
232 | func CreateMiniLogger() *MiniLogger {
233 | var b bytes.Buffer
234 | var b2 bytes.Buffer
235 | mlog := new(MiniLogger)
236 | mlog.segs = b
237 | mlog.buf = b2
238 | return mlog
239 | }
240 |
--------------------------------------------------------------------------------
/node_outro.go:
--------------------------------------------------------------------------------
1 | // Copyright (C) 2022-2025, VigilantDoomer
2 | //
3 | // This file is part of VigilantBSP program.
4 | //
5 | // VigilantBSP is free software: you can redistribute it
6 | // and/or modify it under the terms of GNU General Public License
7 | // as published by the Free Software Foundation, either version 2 of
8 | // the License, or (at your option) any later version.
9 | //
10 | // VigilantBSP is distributed in the hope that it will be useful,
11 | // but WITHOUT ANY WARRANTY; without even the implied warranty of
12 | // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 | // GNU General Public License for more details.
14 | //
15 | // You should have received a copy of the GNU General Public License
16 | // along with VigilantBSP. If not, see .
17 | package main
18 |
19 | // node_outro.go contains functions called after BSP tree is built (and in
20 | // case of multi-tree modes, the best one is chosen), but needs to be written
21 | // in the way lump format specifies, and perhaps fit under limits
22 |
23 | const UNSIGNED_MAXSEGINDEX = uint16(65535)
24 | const VANILLA_MAXSEGINDEX = uint16(32767)
25 |
26 | func (w *NodesWork) emptyNodesLumps() {
27 | w.deepNodes = nil
28 | w.deepSegs = nil
29 | w.deepSubsectors = nil
30 | w.nodes = nil
31 | w.segs = nil
32 | w.subsectors = nil
33 | }
34 |
35 | // tooManySegsCantFix checks if there is overflow in SSECTORS when referencing
36 | // segs. In some cases, it can (and then will) be fixed, so this function can
37 | // mutate data
38 | // If dryRun == true, doesn't print anything, doesn't modify anything. Dry run
39 | // is used to check if conversion to DeeP nodes needs to happen when it is
40 | // permitted to build either vanilla or DeeP nodes
41 | func (w *NodesWork) tooManySegsCantFix(dryRun bool) bool {
42 | // Currently only normal nodes are checked, not deep nodes or extended nodes
43 | // (w.segs expected to be nil for both of those)
44 | if w.segs == nil { // assume no overflow is possible for advanced node formats
45 | return false
46 | }
47 | l := len(w.subsectors)
48 | if l == 0 { // wtf number of subsectors should never be zero
49 | return true
50 | }
51 |
52 | if dryRun {
53 | if w.lastSubsectorOverflows(UNSIGNED_MAXSEGINDEX) {
54 | couldFix, _ := w.fitSegsToTarget(UNSIGNED_MAXSEGINDEX, true)
55 | if !couldFix {
56 | return true
57 | }
58 | }
59 | return false
60 | }
61 |
62 | if w.lastSubsectorOverflows(UNSIGNED_MAXSEGINDEX) {
63 | couldFix, pivotSsector := w.fitSegsToTarget(UNSIGNED_MAXSEGINDEX, false)
64 | if !couldFix {
65 | return true
66 | }
67 | Log.Printf("You almost exceeded UNSIGNED addressable seg limit in SSECTORS (that would have made it invalid for all ports) - managed to reorder segs to avoid that. Changed subsector: %d\n",
68 | pivotSsector)
69 | Log.Printf("Too many segs to run in vanilla, need ports that treat FirstSeg field in SUBSECTORS as unsigned.\n")
70 | } else if w.lastSubsectorOverflows(VANILLA_MAXSEGINDEX) {
71 | couldFix, pivotSsector := w.fitSegsToTarget(VANILLA_MAXSEGINDEX, false)
72 | if !couldFix {
73 | Log.Printf("Too many segs to run in vanilla, need ports that treat FirstSeg field in SUBSECTORS as unsigned.\n")
74 | // But it can run in some ports, so don't return true (failure)
75 | } else {
76 | Log.Printf("You almost exceeded vanilla addressable seg limit in SSECTORS - managed to reorder segs to avoid that. Changed subsector: %d\n",
77 | pivotSsector)
78 | }
79 | }
80 |
81 | return false
82 | }
83 |
84 | // lastSubsectorOverflows returns true, if at least one of the following
85 | // condition regarding last subsector is true:
86 | // 1. FirstSeg field is not correct because original value overflowed int16
87 | // type (was >= 65536). This is detected by adding FirstSeg and SegCount and
88 | // comparing to total seg count
89 | // 2. FirstSeg field value exceeds maxSegIndex
90 | // NOTE that while (the converted version of) this might end up in znodegen.go,
91 | // it won't be actually called there
92 | func (w *NodesWork) lastSubsectorOverflows(maxSegIndex uint16) bool {
93 | l := len(w.subsectors)
94 | firstSeg := int(w.subsectors[l-1].FirstSeg)
95 | segCnt := int(w.subsectors[l-1].SegCount)
96 | if len(w.segs) > (firstSeg + segCnt) {
97 | // seg count is not valid
98 | return true
99 | }
100 | if firstSeg > int(maxSegIndex) {
101 | return true
102 | }
103 | return false
104 | }
105 |
106 | // fitSegsToTarget reorders seg array (for SEG lump) so that all subsectors
107 | // can have their FirstSeg value <= maxSegIndex, if possible, and updates
108 | // subsectors array to point to correct segs. It is assumed this value overflows
109 | // (must have been tested before call)
110 | // If this couldn't be done, return false
111 | // Assumes segs are not shared between different subsectors
112 | // Assumes sequential order of segs matches sequential order of subsectors
113 | // before call
114 | // NOTE that while (the converted version of) this might end up in znodegen.go,
115 | // it won't be actually called there
116 | func (w *NodesWork) fitSegsToTarget(maxSegIndex uint16, dryRun bool) (bool, int) {
117 | newMaxSegIndex := uint32(len(w.segs)) - w.totals.maxSegCountInSubsector
118 | if newMaxSegIndex > uint32(maxSegIndex) {
119 | // Nothing can be done
120 | return false, 0
121 | }
122 |
123 | if dryRun {
124 | // Yes, can do it, but we were not requested to do it yet
125 | // (used to assess whether switch to DeeP nodes is justified)
126 | return true, -1
127 | }
128 |
129 | // not necessary the only one, btw. We'll fetch the last one
130 | biggestSubsector := -1
131 | for i := len(w.subsectors) - 1; i >= 0; i-- {
132 | if uint32(w.subsectors[i].SegCount) == w.totals.maxSegCountInSubsector {
133 | biggestSubsector = i
134 | }
135 | }
136 | if biggestSubsector == -1 {
137 | Log.Panic("Couldn't find subsector whose seg count matches computed maximum. (programmer error)\n")
138 | }
139 |
140 | // if maxSegIndex was 65535, all FirstSeg values following biggestSubsector
141 | // were overflowed, so one has to recompute the value from scratch
142 | newAddr := uint16(0)
143 | if biggestSubsector > 0 {
144 | newAddr = w.subsectors[biggestSubsector-1].FirstSeg +
145 | w.subsectors[biggestSubsector-1].SegCount
146 | }
147 |
148 | pivot := w.subsectors[biggestSubsector].FirstSeg
149 | pivot2 := pivot + w.subsectors[biggestSubsector].SegCount
150 | // Move pivot:pivot2 subslice to the end of w.segs slice
151 | w.segs = append(w.segs[:pivot], append(w.segs[pivot2:], w.segs[pivot:pivot2]...)...)
152 | for i := biggestSubsector + 1; i < len(w.subsectors); i++ {
153 | w.subsectors[i].FirstSeg = newAddr
154 | newAddr += w.subsectors[i].SegCount
155 | }
156 | w.subsectors[biggestSubsector].FirstSeg = uint16(newMaxSegIndex)
157 | return true, biggestSubsector
158 | }
159 |
160 | func HeightOfNodes(node *NodeInProcess) int {
161 | lHeight := 1
162 | rHeight := 1
163 | if node.nextL != nil {
164 | lHeight = HeightOfNodes(node.nextL) + 1
165 | }
166 | if node.nextR != nil {
167 | rHeight = HeightOfNodes(node.nextR) + 1
168 | }
169 | if lHeight < rHeight {
170 | return rHeight
171 | }
172 | return lHeight
173 | }
174 |
175 | // Node reversal for standard nodes
176 | func (w *NodesWork) reverseNodes(node *NodeInProcess) uint32 {
177 | if w.nodes == nil {
178 | w.nodes = make([]Node, w.totals.numNodes)
179 | w.nreverse = 0
180 | }
181 | if config.StraightNodes { // reference to global: config
182 | // this line shall be executed for root node only
183 | // root node still needs to be placed last, even as if the rest of tree
184 | // is written "unreversed"
185 | return w.convertNodesStraight(node, uint32(w.totals.numNodes-1))
186 | }
187 | if node.nextR != nil {
188 | node.RChild = w.reverseNodes(node.nextR)
189 | }
190 | if node.nextL != nil {
191 | node.LChild = w.reverseNodes(node.nextL)
192 | }
193 |
194 | w.nodes[w.nreverse] = Node{
195 | X: node.X,
196 | Y: node.Y,
197 | Dx: node.Dx,
198 | Dy: node.Dy,
199 | Rbox: node.Rbox,
200 | Lbox: node.Lbox,
201 | LChild: convertToSsectorMask(node.LChild, w.SsectorMask),
202 | RChild: convertToSsectorMask(node.RChild, w.SsectorMask),
203 | }
204 |
205 | w.nreverse++
206 | return w.nreverse - 1
207 | }
208 |
209 | // NodeInProcess now always uses SSECTOR_DEEP_MASK by default, so conversion
210 | // involves this
211 | func convertToSsectorMask(childIdx uint32, ssectorMask uint32) int16 {
212 | masked := (childIdx & SSECTOR_DEEP_MASK) != 0
213 | if masked {
214 | childIdx = childIdx & ^SSECTOR_DEEP_MASK | ssectorMask
215 | }
216 | return int16(childIdx)
217 | }
218 |
219 | // Node reversal for deep/extended nodes
220 | func (w *NodesWork) reverseDeepNodes(node *NodeInProcess) uint32 {
221 | if w.deepNodes == nil {
222 | w.deepNodes = make([]DeepNode, w.totals.numNodes)
223 | w.nreverse = 0
224 | }
225 | if config.StraightNodes { // reference to global: config
226 | // this line shall be executed for root node only
227 | // root node still needs to be placed last, even as if the rest of tree
228 | // is written "unreversed"
229 | return w.convertDeepNodesStraight(node, uint32(w.totals.numNodes-1))
230 | }
231 | if node.nextR != nil {
232 | node.RChild = w.reverseDeepNodes(node.nextR)
233 | }
234 | if node.nextL != nil {
235 | node.LChild = w.reverseDeepNodes(node.nextL)
236 | }
237 |
238 | w.deepNodes[w.nreverse] = DeepNode{
239 | X: node.X,
240 | Y: node.Y,
241 | Dx: node.Dx,
242 | Dy: node.Dy,
243 | Rbox: node.Rbox,
244 | Lbox: node.Lbox,
245 | LChild: int32(node.LChild),
246 | RChild: int32(node.RChild),
247 | }
248 |
249 | w.nreverse++
250 | return w.nreverse - 1
251 | }
252 |
253 | // Writes nodes without reversal, except the root node is still last
254 | // Standard nodes version
255 | func (w *NodesWork) convertNodesStraight(node *NodeInProcess, idx uint32) uint32 {
256 | rnode := w.nreverse
257 | lnode := w.nreverse
258 | if node.nextR != nil {
259 | w.nreverse++
260 | }
261 | if node.nextL != nil {
262 | w.nreverse++
263 | }
264 | if node.nextR != nil {
265 | node.RChild = w.convertNodesStraight(node.nextR, rnode)
266 | lnode++
267 | }
268 | if node.nextL != nil {
269 | node.LChild = w.convertNodesStraight(node.nextL, lnode)
270 | }
271 | w.nodes[idx] = Node{
272 | X: node.X,
273 | Y: node.Y,
274 | Dx: node.Dx,
275 | Dy: node.Dy,
276 | Rbox: node.Rbox,
277 | Lbox: node.Lbox,
278 | LChild: convertToSsectorMask(node.LChild, w.SsectorMask),
279 | RChild: convertToSsectorMask(node.RChild, w.SsectorMask),
280 | }
281 | return idx
282 | }
283 |
284 | // Writes nodes without reversal, except the root node is still last
285 | // Deep/extended nodes version
286 | func (w *NodesWork) convertDeepNodesStraight(node *NodeInProcess, idx uint32) uint32 {
287 | rnode := w.nreverse
288 | lnode := w.nreverse
289 | if node.nextR != nil {
290 | w.nreverse++
291 | }
292 | if node.nextL != nil {
293 | w.nreverse++
294 | }
295 | if node.nextR != nil {
296 | node.RChild = w.convertDeepNodesStraight(node.nextR, rnode)
297 | lnode++
298 | }
299 | if node.nextL != nil {
300 | node.LChild = w.convertDeepNodesStraight(node.nextL, lnode)
301 | }
302 | w.deepNodes[idx] = DeepNode{
303 | X: node.X,
304 | Y: node.Y,
305 | Dx: node.Dx,
306 | Dy: node.Dy,
307 | Rbox: node.Rbox,
308 | Lbox: node.Lbox,
309 | LChild: int32(node.LChild),
310 | RChild: int32(node.RChild),
311 | }
312 | return idx
313 | }
314 |
315 | func (w *NodesWork) getZdoomNodesBytes() []byte {
316 | // stub, replaced in zdefs.go with ZgetZdoomNodesBytes_Proto
317 | // This is done to reduce compiled executable size
318 | return nil
319 | }
320 |
--------------------------------------------------------------------------------
/node_vmap.go:
--------------------------------------------------------------------------------
1 | // Copyright (C) 2022-2023, VigilantDoomer
2 | //
3 | // This file is part of VigilantBSP program.
4 | //
5 | // VigilantBSP is free software: you can redistribute it
6 | // and/or modify it under the terms of GNU General Public License
7 | // as published by the Free Software Foundation, either version 2 of
8 | // the License, or (at your option) any later version.
9 | //
10 | // VigilantBSP is distributed in the hope that it will be useful,
11 | // but WITHOUT ANY WARRANTY; without even the implied warranty of
12 | // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 | // GNU General Public License for more details.
14 | //
15 | // You should have received a copy of the GNU General Public License
16 | // along with VigilantBSP. If not, see .
17 | package main
18 |
19 | import (
20 | "math"
21 | )
22 |
23 | // VertexMap utils go here. Also contains some stuff for VertexCache
24 |
25 | const VMAP_BLOCK_SHIFT = 8 + FRACBITS
26 |
27 | const VMAP_BLOCK_SIZE = 1 << VMAP_BLOCK_SHIFT
28 |
29 | const VMAP_SAFE_MARGIN = 2.0 // Floating point garbage
30 |
31 | // VertexMap is a ZDBSP thingy. Allows to lookup close-enough vertices among
32 | // existing ones to the one created as the result of intersection. I am not sure
33 | // as to what the exact consequences of choosing a very slightly different
34 | // vertex instead of exact match are. More investigation needed
35 | // VertexMap is used in VigilantBSP when:
36 | // 1) nodes are built for Zdoom extended/compressed format
37 | // 2) in code for advanced visplane reduction that traces line through void and
38 | // non-void
39 | type VertexMap struct {
40 | w *NodesWork
41 | Grid [][]*FloatVertex
42 | Snapshot []int
43 | BlocksWide int
44 | BlocksTall int
45 | MinX, MinY float64
46 | MaxX, MaxY float64
47 | }
48 |
49 | func CreateVertexMap(w *NodesWork, minx, miny, maxx, maxy int) *VertexMap {
50 | // Mitigation for a possible crash when producing line traces against solid
51 | // lines (void and non-void differentiation) in advanced visplane reduction
52 | // for, apparently, bent segs (previously split segs whose angle is
53 | // different from angle of the original linedef - in node format that lacks
54 | // precision) can produce intersection vertice slightly outside the bounds
55 | minx = minx - VMAP_SAFE_MARGIN
56 | miny = miny - VMAP_SAFE_MARGIN
57 | maxx = maxx + VMAP_SAFE_MARGIN
58 | maxy = maxy + VMAP_SAFE_MARGIN
59 |
60 | vm := &VertexMap{
61 | w: w,
62 | MinX: float64(minx),
63 | MinY: float64(miny),
64 | BlocksWide: int((float64(maxx-minx+1)*
65 | FIXED16DOT16_MULTIPLIER + float64(VMAP_BLOCK_SIZE-1)) /
66 | float64(VMAP_BLOCK_SIZE)),
67 | BlocksTall: int((float64(maxy-miny+1)*
68 | FIXED16DOT16_MULTIPLIER + float64(VMAP_BLOCK_SIZE-1)) /
69 | float64(VMAP_BLOCK_SIZE)),
70 | }
71 | vm.MaxX = vm.MinX + float64(vm.BlocksWide*VMAP_BLOCK_SIZE-1)/FIXED16DOT16_MULTIPLIER
72 | vm.MaxY = vm.MinY + float64(vm.BlocksTall*VMAP_BLOCK_SIZE-1)/FIXED16DOT16_MULTIPLIER
73 | vm.Grid = make([][]*FloatVertex, vm.BlocksWide*vm.BlocksTall)
74 | return vm
75 | }
76 |
77 | func (vm *VertexMap) Clone() *VertexMap {
78 | if vm == nil {
79 | return nil
80 | }
81 | newVm := &VertexMap{}
82 | *newVm = *vm
83 | newVm.w = nil
84 | newVm.Grid = make([][]*FloatVertex, 0, len(vm.Grid))
85 | for _, it := range vm.Grid {
86 | cpit := make([]*FloatVertex, 0, len(it))
87 | for _, it2 := range it {
88 | nv := &FloatVertex{}
89 | *nv = *it2
90 | cpit = append(cpit, nv)
91 | }
92 | newVm.Grid = append(newVm.Grid, cpit)
93 | }
94 | if vm.Snapshot != nil {
95 | newVm.Snapshot = make([]int, len(vm.Grid))
96 | for i, it := range vm.Snapshot {
97 | newVm.Snapshot[i] = it
98 | }
99 | }
100 | return newVm
101 | }
102 |
103 | func (vm *VertexMap) GetBlock(x, y float64) int {
104 | // assert x >= MinX
105 | // assert y >= MinY
106 | // assert x <= MaxX
107 | // assert y <= MaxY
108 | // The above constraints are actually violated sometimes by some epsilon
109 | // because floating point is used and not fixed point like in ZDBSP. Such
110 | // cases don't produce out of bounds index, though, because of being really
111 | // close to the border values. For some runaway cases, see a different
112 | // mitigation below
113 | ret := int(uint((x-vm.MinX)*FIXED16DOT16_MULTIPLIER)>>VMAP_BLOCK_SHIFT +
114 | (uint((y-vm.MinY)*FIXED16DOT16_MULTIPLIER)>>VMAP_BLOCK_SHIFT)*
115 | uint(vm.BlocksWide))
116 | if ret < 0 || ret >= len(vm.Grid) {
117 | vm.w.mlog.Verbose(1, "Vertex map index out of range, source values: x=%f, y=%f xmin,ymin=(%f,%f) xmax,ymax=(%f,%f)\n",
118 | x, y, vm.MinX, vm.MinY, vm.MaxX, vm.MaxY)
119 | // Allow vertex map to function without panic in such cases, should they
120 | // happen
121 | // Accumulating such errors (if wrong borders are specified) can cause
122 | // slowdown, but not crash, and should not result in malfunction
123 | return 0
124 | }
125 | return ret
126 | }
127 |
128 | func (vm *VertexMap) SelectVertexExact(x, y float64, id int) *FloatVertex {
129 | block := &(vm.Grid[vm.GetBlock(x, y)])
130 | for _, it := range *block {
131 | if it.X == x && it.Y == y {
132 | return it
133 | }
134 | }
135 | return vm.insertVertex(x, y, id)
136 | }
137 |
138 | func (vm *VertexMap) SelectVertexClose(x, y float64) *FloatVertex {
139 | block := &(vm.Grid[vm.GetBlock(x, y)])
140 | for _, it := range *block {
141 | if math.Abs(it.X-x) < VERTEX_EPSILON &&
142 | math.Abs(it.Y-y) < VERTEX_EPSILON {
143 | return it
144 | }
145 | }
146 | return vm.insertVertex(x, y, -1)
147 | }
148 |
149 | func (vm *VertexMap) insertVertex(x, y float64, id int) *FloatVertex {
150 | // If a vertex is near a block boundary, then it will be inserted on
151 | // both sides of the boundary so that SelectVertexClose can find
152 | // it by checking in only one block.
153 | ret := &FloatVertex{
154 | X: x,
155 | Y: y,
156 | Id: id,
157 | }
158 | minx := vm.MinX
159 | if minx < (x - VERTEX_EPSILON) {
160 | minx = x - VERTEX_EPSILON
161 | }
162 | maxx := vm.MaxX
163 | if maxx > (x + VERTEX_EPSILON) {
164 | maxx = x + VERTEX_EPSILON
165 | }
166 | miny := vm.MinY
167 | if miny < (y - VERTEX_EPSILON) {
168 | miny = y - VERTEX_EPSILON
169 | }
170 | maxy := vm.MaxY
171 | if maxy > (y + VERTEX_EPSILON) {
172 | maxy = y + VERTEX_EPSILON
173 | }
174 | blk := [4]int{vm.GetBlock(minx, miny),
175 | vm.GetBlock(maxx, miny),
176 | vm.GetBlock(minx, maxy),
177 | vm.GetBlock(maxx, maxy)}
178 | blcount := [4]int{
179 | len(vm.Grid[blk[0]]),
180 | len(vm.Grid[blk[1]]),
181 | len(vm.Grid[blk[2]]),
182 | len(vm.Grid[blk[3]])}
183 | for i := 0; i < 4; i++ {
184 | if len(vm.Grid[blk[i]]) == blcount[i] {
185 | vm.Grid[blk[i]] = append(vm.Grid[blk[i]], ret)
186 | }
187 | }
188 | return ret
189 | }
190 |
191 | // RestoreOrBeginSnapshot() removes all vertices from map that were added
192 | // since previous RestoreOrBeginSnapshot() call. Snapshots are useful to create
193 | // distinct vertex spaces for line traces in diffgeometry operations (volatile
194 | // vertices computed there might not end up being vertices actually placed on
195 | // the map, and have additional restriction of being sortable alongside the
196 | // line)
197 | // As with the rest of VertexMap methods, snapshots do NOT provide for
198 | // concurrent access
199 | func (vm *VertexMap) RestoreOrBeginSnapshot() {
200 | if vm.Snapshot == nil {
201 | // begin
202 | vm.Snapshot = make([]int, len(vm.Grid))
203 | for i, it := range vm.Grid {
204 | vm.Snapshot[i] = len(it)
205 | }
206 | } else {
207 | // restore
208 | for i, it := range vm.Grid {
209 | vm.Grid[i] = it[:vm.Snapshot[i]]
210 | }
211 | }
212 | }
213 |
214 | func PopulateVertexMap(vm *VertexMap, allSegs []*NodeSeg) {
215 | for _, seg := range allSegs {
216 | vm.SelectVertexExact(float64(seg.psx), float64(seg.psy),
217 | int(seg.StartVertex.idx))
218 | vm.SelectVertexExact(float64(seg.pex), float64(seg.pey),
219 | int(seg.EndVertex.idx))
220 | }
221 | }
222 |
223 | func PopulateVertexMapFromLines(vm *VertexMap, lines AbstractLines) {
224 | l := int(lines.Len())
225 | for i := 0; i < l; i++ {
226 | x1, x2, y1, y2 := lines.GetAllXY(uint16(i))
227 | vm.SelectVertexExact(float64(x1), float64(y1), i)
228 | vm.SelectVertexExact(float64(x2), float64(y2), i)
229 | }
230 | }
231 |
232 | func PopulateVertexCache(cache map[SimpleVertex]int, allSegs []*NodeSeg) {
233 | for _, it := range allSegs {
234 | rec := SimpleVertex{int(it.StartVertex.X), int(it.StartVertex.Y)}
235 | cache[rec] = int(it.StartVertex.idx)
236 | rec = SimpleVertex{int(it.EndVertex.X), int(it.EndVertex.Y)}
237 | cache[rec] = int(it.EndVertex.idx)
238 | }
239 | }
240 |
--------------------------------------------------------------------------------
/nodegen_test.go:
--------------------------------------------------------------------------------
1 | // Copyright (C) 2022-2023, VigilantDoomer
2 | //
3 | // This file is part of VigilantBSP program.
4 | //
5 | // VigilantBSP is free software: you can redistribute it
6 | // and/or modify it under the terms of GNU General Public License
7 | // as published by the Free Software Foundation, either version 2 of
8 | // the License, or (at your option) any later version.
9 | //
10 | // VigilantBSP is distributed in the hope that it will be useful,
11 | // but WITHOUT ANY WARRANTY; without even the implied warranty of
12 | // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 | // GNU General Public License for more details.
14 | //
15 | // You should have received a copy of the GNU General Public License
16 | // along with VigilantBSP. If not, see .
17 |
18 | // diffgeometry_test.go
19 | package main
20 |
21 | import (
22 | //"fmt"
23 | "testing"
24 | )
25 |
26 | // Some functions declared here aren't proper tests, they never fail
27 |
28 | // No problem here - they aren't collinear indeed
29 | func TestForAliasCorrectness(t *testing.T) {
30 | // part linedef number 6396 watrsp map 03
31 | // check linedef number 12908 watrsp map 03
32 | c := &IntersectionContext{
33 | psx: 3520,
34 | psy: 768,
35 | pex: 3520,
36 | pey: 832,
37 | lsx: 6274,
38 | lsy: -821,
39 | lex: 6218,
40 | ley: -790,
41 | }
42 | c.pdx = c.pex - c.psx
43 | c.pdy = c.pey - c.psy
44 | val := doLinesIntersectStandard(c)
45 | t.Logf("val = %d\n", val)
46 | val = doLinesIntersectDetail(c)
47 | t.Logf("val = %d\n", val)
48 | val = ZdoLinesIntersect_Proto(c)
49 | t.Logf("val = %d\n", val)
50 |
51 | dx2 := c.psx - c.lsx // Checking line -> partition
52 | dy2 := c.psy - c.lsy
53 | dx3 := c.psx - c.lex
54 | dy3 := c.psy - c.ley
55 |
56 | a := c.pdy*dx2 - c.pdx*dy2
57 | b := c.pdy*dx3 - c.pdx*dy3
58 | /*a := WideNumber(c.pdy)*WideNumber(dx2) - WideNumber(c.pdx)*WideNumber(dy2)
59 | b := WideNumber(c.pdy)*WideNumber(dx3) - WideNumber(c.pdx)*WideNumber(dy3)*/
60 | t.Logf("dx2 = %d dy2 = %d dx3 = %d dy3 = %d\n", dx2, dy2, dx3, dy3)
61 | t.Logf("a = %d b = %d\n", a, b)
62 |
63 | x, y := c.computeIntersection()
64 | dx2 = c.lsx - x // Find distance from line start
65 | dy2 = c.lsy - y // to split point
66 | l := WideNumber(dx2)*WideNumber(dx2) + WideNumber(dy2)*WideNumber(dy2)
67 | t.Logf("dx2 = %d dy2 = %d l = %d\n", dx2, dy2, l)
68 | }
69 |
70 | // This can be remade into a proper test - it contains the failing example
71 | // for vanilla doLinesIntersect and variations
72 | func TestForCulpritAliasCorrectness(t *testing.T) {
73 | // part linedef number 12908 watrsp map 03
74 | // check linedef number 447 watrsp map 03
75 | c := &IntersectionContext{
76 | psx: 6274,
77 | psy: -821,
78 | pex: 6218,
79 | pey: -790,
80 | lsx: 3520,
81 | lsy: 704,
82 | lex: 3520,
83 | ley: 703,
84 | }
85 |
86 | /*
87 | // part linedef number 447 watrsp map 03
88 | // check linedef number 12908 watrsp map 03
89 | c := &IntersectionContext{
90 | psx: 3520,
91 | psy: 704,
92 | pex: 3520,
93 | pey: 703,
94 | lsx: 6274,
95 | lsy: -821,
96 | lex: 6218,
97 | ley: -790,
98 | }*/
99 |
100 | c.pdx = c.pex - c.psx
101 | c.pdy = c.pey - c.psy
102 | val := doLinesIntersectStandard(c) // gives wrong answer
103 | t.Logf("val = %d\n", val)
104 | val = doLinesIntersectDetail(c) // gives wrong answer
105 | t.Logf("val = %d\n", val)
106 | val = ZdoLinesIntersect_Proto(c) // gives correct answer
107 | t.Logf("val = %d\n", val)
108 |
109 | dx2 := c.psx - c.lsx // Checking line -> partition
110 | dy2 := c.psy - c.lsy
111 | dx3 := c.psx - c.lex
112 | dy3 := c.psy - c.ley
113 |
114 | a := c.pdy*dx2 - c.pdx*dy2
115 | b := c.pdy*dx3 - c.pdx*dy3
116 | /*a := WideNumber(c.pdy)*WideNumber(dx2) - WideNumber(c.pdx)*WideNumber(dy2)
117 | b := WideNumber(c.pdy)*WideNumber(dx3) - WideNumber(c.pdx)*WideNumber(dy3)*/
118 | t.Logf("dx2 = %d dy2 = %d dx3 = %d dy3 = %d\n", dx2, dy2, dx3, dy3)
119 | t.Logf("a = %d b = %d\n", a, b)
120 |
121 | x, y := c.computeIntersection()
122 | dx2 = c.lsx - x // Find distance from line start
123 | dy2 = c.lsy - y // to split point
124 | l := WideNumber(dx2)*WideNumber(dx2) + WideNumber(dy2)*WideNumber(dy2)
125 | t.Logf("dx2 = %d dy2 = %d l = %d\n", dx2, dy2, l)
126 |
127 | cmp := float64(b) * float64(b) / float64(c.pdx*c.pdx+c.pdy*c.pdy)
128 | t.Logf("cmp = %v\n", cmp)
129 | }
130 |
131 | func TestFlipVal(t *testing.T) {
132 | answers := make([][2]uint8, 0)
133 | answers = append(answers,
134 | [2]uint8{SIDENESS_COLLINEAR, SIDENESS_COLLINEAR},
135 | [2]uint8{SIDENESS_INTERSECT, SIDENESS_INTERSECT},
136 | [2]uint8{SIDENESS_LEFT, SIDENESS_RIGHT},
137 | [2]uint8{SIDENESS_RIGHT, SIDENESS_LEFT},
138 | )
139 | for i, answer := range answers {
140 | val := flipVal(answer[0])
141 | if val != answer[1] {
142 | t.Errorf("flip val id=%d given %d expected %d received %d\n",
143 | i, answer[0], answer[1], val)
144 | }
145 | }
146 | }
147 |
--------------------------------------------------------------------------------
/other/rejectfixtest_20220621.wad:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/VigilantDoomer/vigilantbsp/57ce4bc6a60a386d5f624026fc92df02d5c21b7d/other/rejectfixtest_20220621.wad
--------------------------------------------------------------------------------
/other/vigilantbsp.cfg:
--------------------------------------------------------------------------------
1 |
2 | compilers
3 | {
4 | // This defines what files a compiler uses
5 | // The setting named "program" defines what .exe to run
6 | vigilantbsp
7 | {
8 | interface = "NodesCompiler";
9 | program = "vigilantbsp.exe";
10 | }
11 | }
12 |
13 |
14 | // Below are configurations for this nodebuilder. If you want to make your own configurations,
15 | // it is recommended to do so in your own file as this file will be updated each release.
16 |
17 | // NOTE: Nodebuilder configuration key names defined here must be unique for all nodebuilders!
18 | // Recommend to start the key name with the name of the compiler, followed by underscore and a specific name.
19 |
20 | // The "compiler" setting must refer to an existing compiler (such as defined above), but it
21 | // does not have to be a compiler defined in the same configuration file.
22 |
23 | nodebuilders
24 | {
25 | vigilantbsp_normal
26 | {
27 | title = "vigilantbsp - Default";
28 | compiler = "vigilantbsp";
29 | parameters = "%FI -o %FO";
30 | }
31 |
32 | vigilantbsp_noreject
33 | {
34 | title = "vigilantbsp - Zero reject";
35 | compiler = "vigilantbsp";
36 | parameters = "-rz %FI -o %FO";
37 | }
38 |
39 | vigilantbsp_visplane
40 | {
41 | title = "vigilantbsp - Target vanilla (Reduce visplanes etc.)";
42 | compiler = "vigilantbsp";
43 | parameters = "-bo=3sa?e=1 -na=2 %FI -o %FO";
44 | }
45 |
46 | vigilantbsp_maelstrom
47 | {
48 | title = "vigilantbsp - Fastest (maelstrom, zero reject)";
49 | compiler = "vigilantbsp";
50 | parameters = "-na=3 -rz %FI -o %FO";
51 | }
52 |
53 | vigilantbsp_extnode
54 | {
55 | title = "vigilantbsp - Non-gl extended nodes, zero reject";
56 | compiler = "vigilantbsp";
57 | parameters = "-be=2 -nc=x -rz %FI -o %FO";
58 | }
59 | }
60 |
--------------------------------------------------------------------------------
/picknode_test.go:
--------------------------------------------------------------------------------
1 | // Copyright (C) 2022-2024, VigilantDoomer
2 | //
3 | // This file is part of VigilantBSP program.
4 | //
5 | // VigilantBSP is free software: you can redistribute it
6 | // and/or modify it under the terms of GNU General Public License
7 | // as published by the Free Software Foundation, either version 2 of
8 | // the License, or (at your option) any later version.
9 | //
10 | // VigilantBSP is distributed in the hope that it will be useful,
11 | // but WITHOUT ANY WARRANTY; without even the implied warranty of
12 | // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 | // GNU General Public License for more details.
14 | //
15 | // You should have received a copy of the GNU General Public License
16 | // along with VigilantBSP. If not, see .
17 |
18 | // picknode_test.go
19 | package main
20 |
21 | import (
22 | "testing"
23 | )
24 |
25 | func TestHitsCrunchBenchmarkBootstrap(t *testing.T) {
26 | hc := hitsCrunchData
27 | for _, v := range hc.sectorHits {
28 | if v > 7 {
29 | t.Fatalf("sectorHits contains illegal data\n")
30 | }
31 | }
32 | }
33 |
34 | func BenchmarkHitsCrunchNew(b *testing.B) {
35 | hc := hitsCrunchData
36 | b.ResetTimer()
37 |
38 | for i := 0; i < b.N; i++ {
39 | for tot := range hc.sectorHits {
40 | switch hc.sectorHits[tot] {
41 | case 1:
42 | {
43 | hc.diff++
44 | hc.flat++
45 | }
46 | case 2:
47 | {
48 | hc.diff--
49 | hc.flat++
50 | }
51 | case 3:
52 | {
53 | hc.SectorsSplit++
54 | hc.flat++
55 | }
56 | case 4, 5, 6, 7:
57 | {
58 | hc.SectorsSplit++
59 | hc.unmerged++
60 | hc.flat++
61 | }
62 | }
63 | }
64 | }
65 | }
66 |
67 | func BenchmarkHitsCrunchOld(b *testing.B) {
68 | hc := hitsCrunchData
69 | b.ResetTimer()
70 |
71 | for i := 0; i < b.N; i++ {
72 | for tot := 0; tot < len(hc.sectorHits); tot++ {
73 | switch hc.sectorHits[tot] {
74 | case 1:
75 | {
76 | hc.diff++
77 | }
78 | case 2:
79 | {
80 | hc.diff--
81 | }
82 | }
83 |
84 | if hc.sectorHits[tot] >= 3 {
85 | hc.SectorsSplit++
86 | }
87 | if hc.sectorHits[tot] >= 4 {
88 | hc.unmerged++
89 | }
90 | if hc.sectorHits[tot] != 0 {
91 | hc.flat++
92 | }
93 | }
94 | }
95 | }
96 |
--------------------------------------------------------------------------------
/rejectDFS.go:
--------------------------------------------------------------------------------
1 | // Copyright (C) 2022-2025, VigilantDoomer
2 | //
3 | // This file is part of VigilantBSP program.
4 | //
5 | // VigilantBSP is free software: you can redistribute it
6 | // and/or modify it under the terms of GNU General Public License
7 | // as published by the Free Software Foundation, either version 2 of
8 | // the License, or (at your option) any later version.
9 | //
10 | // VigilantBSP is distributed in the hope that it will be useful,
11 | // but WITHOUT ANY WARRANTY; without even the implied warranty of
12 | // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 | // GNU General Public License for more details.
14 | //
15 | // You should have received a copy of the GNU General Public License
16 | // along with VigilantBSP. If not, see .
17 |
18 | // rejectDFS
19 | package main
20 |
21 | // All the jazz related to speeding up reject builder with graphs goes into
22 | // this file.
23 |
24 | func (w *RejectWork) DFS(graph *RejGraph, sector *RejSector) int {
25 | // Initialize the sector
26 | sector.graph = graph
27 | sector.indexDFS = graph.numSectors
28 | sector.loDFS = graph.numSectors
29 | sector.isArticulation = false
30 |
31 | // Add this sector to the graph
32 | graph.sectors[graph.numSectors] = sector
33 | graph.numSectors++
34 |
35 | numChildren := 0
36 |
37 | for _, child := range w.DFSGetNeighborsAndGroupsiblings(sector) {
38 | if child == nil {
39 | break
40 | }
41 | if child.graph != graph {
42 | numChildren++
43 | child.graphParent = sector
44 | w.DFS(graph, child)
45 | if child.loDFS < sector.loDFS {
46 | sector.loDFS = child.loDFS
47 | }
48 | if child.loDFS >= sector.indexDFS {
49 | sector.isArticulation = true
50 | }
51 | } else if child != sector.graphParent {
52 | if child.indexDFS < sector.loDFS {
53 | sector.loDFS = child.indexDFS
54 | }
55 | }
56 | }
57 |
58 | sector.hiDFS = graph.numSectors - 1
59 |
60 | return numChildren
61 | }
62 |
63 | func (w *RejectWork) CreateGraph(root *RejSector) *RejGraph {
64 | w.graphTable.numGraphs++
65 | graph := &(w.graphTable.graphs[w.graphTable.numGraphs-1])
66 |
67 | graph.sectors = w.graphTable.sectorStart
68 | graph.numSectors = 0
69 |
70 | root.graphParent = nil
71 | if w.DFS(graph, root) > 1 {
72 | root.isArticulation = true
73 | } else {
74 | root.isArticulation = false
75 | }
76 |
77 | w.graphTable.sectorStart = w.graphTable.sectorStart[graph.numSectors:]
78 |
79 | return graph
80 | }
81 |
82 | func (w *RejectWork) HideComponents(oldGraph, newGraph *RejGraph) {
83 | for i := 0; i < oldGraph.numSectors; i++ {
84 | sec1 := oldGraph.sectors[i]
85 | if sec1.graph == oldGraph {
86 | for j := 0; j < newGraph.numSectors; j++ {
87 | sec2 := newGraph.sectors[j]
88 | w.markVisibility(sec1.index, sec2.index, VIS_HIDDEN)
89 | }
90 | }
91 | }
92 | }
93 |
94 | func (w *RejectWork) SplitGraph(oldGraph *RejGraph) {
95 | remainingSectors := oldGraph.numSectors - 1
96 |
97 | for i := 0; i < oldGraph.numSectors; i++ {
98 | sec := oldGraph.sectors[i]
99 | if sec.graph == oldGraph {
100 | newGraph := w.CreateGraph(sec)
101 | if newGraph.numSectors < remainingSectors {
102 | w.HideComponents(oldGraph, newGraph)
103 | }
104 | remainingSectors -= newGraph.numSectors - 1
105 | }
106 | }
107 | }
108 |
109 | func (w *RejectWork) InitializeGraphs(numSectors int) {
110 | Log.Verbose(1, "Creating sector graphs...\n")
111 |
112 | w.graphTable.numGraphs = 0
113 | w.graphTable.graphs = make([]RejGraph, w.numSectors*2)
114 | sectorPool := make([]*RejSector, w.numSectors*4)
115 | w.graphTable.sectorStart = sectorPool
116 |
117 | for _, v := range w.graphTable.graphs {
118 | v.numSectors = 0
119 | v.sectors = nil
120 | }
121 |
122 | for i, _ := range sectorPool {
123 | sectorPool[i] = nil
124 | }
125 |
126 | // Create the initial graph
127 | graph := &(w.graphTable.graphs[0])
128 | graph.numSectors = w.numSectors
129 | graph.sectors = w.graphTable.sectorStart
130 | w.graphTable.sectorStart = w.graphTable.sectorStart[w.numSectors:]
131 | w.graphTable.numGraphs++
132 |
133 | // Put all sectors in the initial graph
134 | for i := 0; i < w.numSectors; i++ {
135 | w.sectors[i].graph = graph
136 | graph.sectors[i] = &(w.sectors[i])
137 | }
138 |
139 | // Separate the individual graphs
140 | w.SplitGraph(graph)
141 |
142 | // Keep a permanent copy of the initial graph
143 | for i := 0; i < w.numSectors; i++ {
144 | w.sectors[i].baseGraph = w.sectors[i].graph
145 | }
146 |
147 | // Calculate the sector metrics
148 | for i := 1; i < w.graphTable.numGraphs; i++ {
149 | graph := &(w.graphTable.graphs[i])
150 | for j := 0; j < graph.numSectors; j++ {
151 | sec := graph.sectors[j]
152 | sum := 0
153 | left := graph.numSectors - 1
154 | for _, child := range w.DFSGetNeighborsAndGroupsiblings(sec) {
155 | if child == nil {
156 | break
157 | }
158 |
159 | if child.graphParent != sec {
160 | continue
161 | }
162 | if child.loDFS >= sec.indexDFS {
163 | num := child.hiDFS - child.indexDFS + 1
164 | left -= num
165 | sum += num * left
166 | }
167 | }
168 | sec.metric = sum
169 | }
170 | }
171 | // Hmm. It seems some maps have a lot of graphs. Could I utilize this to
172 | // spread reject building over multiple threads somehow? Reject building is
173 | // really slow operation
174 | Log.Verbose(1, "Reject: created %d graphs.\n", w.graphTable.numGraphs)
175 | }
176 |
177 | func (w *RejectWork) HideSectorFromComponents(root, sec *RejSector) {
178 | graph := sec.graph
179 | // Hide sec from all other sectors in its graph that are in different
180 | // bi-connected components
181 | for _, sec2 := range graph.sectors[:root.indexDFS] {
182 | w.markVisibility(sec.index, sec2.index, VIS_HIDDEN)
183 | }
184 | for _, sec2 := range graph.sectors[root.hiDFS+1 : graph.numSectors] {
185 | w.markVisibility(sec.index, sec2.index, VIS_HIDDEN)
186 | }
187 | }
188 |
189 | func (w *RejectWork) AddGraph(graph *RejGraph, sector *RejSector) {
190 | // Initialize the sector
191 | sector.graph = graph
192 | sector.indexDFS = graph.numSectors
193 | sector.loDFS = graph.numSectors
194 |
195 | // Add this sector to the graph
196 | graph.sectors[graph.numSectors] = sector
197 | graph.numSectors++
198 |
199 | // Add all this nodes children that aren't already in the graph
200 | for _, child := range w.DFSGetNeighborsAndGroupsiblings(sector) {
201 | if child == nil {
202 | break
203 | }
204 | if child.graph == sector.baseGraph {
205 | child.graphParent = sector
206 | w.AddGraph(graph, child)
207 | if child.loDFS < sector.loDFS {
208 | sector.loDFS = child.loDFS
209 | }
210 | } else if child != sector.graphParent {
211 | if child.indexDFS < sector.loDFS {
212 | sector.loDFS = child.indexDFS
213 | }
214 | }
215 | }
216 |
217 | sector.hiDFS = graph.numSectors - 1
218 | }
219 |
220 | func (w *RejectWork) QuickGraph(root *RejSector) *RejGraph {
221 | oldGraph := root.baseGraph
222 | for i := 0; i < oldGraph.numSectors; i++ {
223 | oldGraph.sectors[i].graph = oldGraph
224 | }
225 |
226 | graph := &(w.graphTable.graphs[w.graphTable.numGraphs])
227 |
228 | graph.sectors = w.graphTable.sectorStart
229 | graph.numSectors = 0
230 |
231 | root.graphParent = nil
232 |
233 | w.AddGraph(graph, root)
234 |
235 | return graph
236 | }
237 |
238 | func ShouldTest(src *TransLine, key uint16, tgt *TransLine, sector uint16) bool {
239 | y1 := src.DX*(tgt.start.Y-src.start.Y) - src.DY*(tgt.start.X-src.start.X)
240 | y2 := src.DX*(tgt.end.Y-src.start.Y) - src.DY*(tgt.end.X-src.start.X)
241 |
242 | if src.frontSector == key {
243 | if (y1 <= 0) && (y2 <= 0) {
244 | return false
245 | }
246 | } else if (y1 >= 0) && (y2 >= 0) {
247 | return false
248 | }
249 |
250 | x1 := tgt.DX*(src.start.Y-tgt.start.Y) - tgt.DY*(src.start.X-tgt.start.X)
251 | x2 := tgt.DX*(src.end.Y-tgt.start.Y) - tgt.DY*(src.end.X-tgt.start.X)
252 |
253 | if tgt.frontSector == sector {
254 | if (x1 <= 0) && (x2 <= 0) {
255 | return false
256 | }
257 | } else if (x1 >= 0) && (x2 >= 0) {
258 | return false
259 | }
260 |
261 | return true
262 | }
263 |
264 | func (w *RejectWork) ProcessSectorLines(key, root, sector *RejSector,
265 | lines []*TransLine) {
266 |
267 | isVisible := *(w.rejectTableIJ(key.index, sector.index)) == VIS_VISIBLE
268 | isUnknown := *(w.rejectTableIJ(key.index, sector.index)) == VIS_UNKNOWN
269 |
270 | if isUnknown {
271 | ptr := lines
272 | for ptr[0] != nil {
273 | srcLine := ptr[0]
274 | ptr = ptr[1:]
275 | for i := 0; i < sector.numNeighbors; i++ {
276 | child := sector.neighbors[i]
277 | // Test each line that may lead back to the key sector (can reach higher up in the graph)
278 | if child.loDFS <= sector.indexDFS {
279 | for j := 0; j < sector.numLines; j++ {
280 | tgtLine := sector.lines[j]
281 | if (tgtLine.backSector == uint16(child.index)) || (tgtLine.frontSector == uint16(child.index)) {
282 | if ShouldTest(srcLine, uint16(key.index), tgtLine, uint16(sector.index)) {
283 | if w.testLinePair(srcLine, tgtLine) {
284 | w.markPairVisible(srcLine, tgtLine)
285 | goto done
286 | }
287 | }
288 | }
289 | }
290 | }
291 | }
292 | }
293 | }
294 |
295 | // Another f-rant about use of goto was here, since deleted
296 | if !isVisible { // if it was not made visible before, we hide it
297 |
298 | graph := sector.graph
299 |
300 | // See if we're in a loop
301 | if sector.loDFS == sector.indexDFS {
302 |
303 | // Nope. Hide ourself and all our children from the other components
304 | for i := sector.indexDFS; i <= sector.hiDFS; i++ {
305 | w.HideSectorFromComponents(root, graph.sectors[i])
306 | }
307 |
308 | } else {
309 |
310 | // Yep. Hide ourself
311 | w.HideSectorFromComponents(root, sector)
312 |
313 | for i := 0; i < sector.numNeighbors; i++ {
314 | child := sector.neighbors[i]
315 | if child.graphParent == sector {
316 | // Hide any child components that aren't in the loop
317 | if child.loDFS >= sector.indexDFS {
318 | for j := child.indexDFS; j <= child.hiDFS; j++ {
319 | w.HideSectorFromComponents(root, graph.sectors[j])
320 | }
321 | } else {
322 | w.ProcessSectorLines(key, root, child, lines)
323 | }
324 | }
325 | }
326 | }
327 | // Must not reach "done" label from here
328 | return
329 | }
330 |
331 | done:
332 | // Continue checking all of our children
333 | for i := 0; i < sector.numNeighbors; i++ {
334 | child := sector.neighbors[i]
335 | if child.graphParent == sector {
336 | w.ProcessSectorLines(key, root, child, lines)
337 | }
338 | }
339 | }
340 |
341 | func (w *RejectWork) ProcessSector(sector *RejSector) {
342 | if sector.isArticulation {
343 |
344 | // For now, make sure this sector is at the top of the graph (keeps things simple)
345 | w.QuickGraph(sector)
346 |
347 | lines := make([]*TransLine, sector.numLines+1)
348 |
349 | for i := 0; i < sector.numNeighbors; i++ {
350 |
351 | child := sector.neighbors[i]
352 |
353 | // Find each child that is the start of a component of this sector
354 | if child.graphParent == sector {
355 |
356 | // Make a list of lines that connect this component
357 | index := 0
358 | for j := 0; j < sector.numLines; j++ {
359 | line := sector.lines[j]
360 | if (line.backSector == uint16(child.index)) || (line.frontSector == uint16(child.index)) {
361 | lines[index] = line
362 | index++
363 | }
364 | }
365 |
366 | // If this child is part of a loop, add lines from all the other children in the loop too
367 | if child.loDFS < child.indexDFS {
368 | for j := i + 1; j < sector.numNeighbors; j++ {
369 | child2 := sector.neighbors[j]
370 | if child2.indexDFS <= child.hiDFS {
371 | for k := 0; k < sector.numLines; k++ {
372 | line := sector.lines[k]
373 | if (line.backSector == uint16(child2.index)) || (line.frontSector == uint16(child2.index)) {
374 | lines[index] = line
375 | index++
376 | }
377 | }
378 | }
379 | }
380 | }
381 |
382 | lines[index] = nil
383 | w.ProcessSectorLines(sector, child, child, lines)
384 | }
385 |
386 | }
387 |
388 | } else {
389 |
390 | graph := sector.baseGraph
391 | for i := 0; i < graph.numSectors; i++ {
392 | tgtSector := graph.sectors[i]
393 | if *(w.rejectTableIJ(sector.index, tgtSector.index)) == VIS_UNKNOWN {
394 | for j := 0; j < sector.numLines; j++ {
395 | srcLine := sector.lines[j]
396 | for k := 0; k < tgtSector.numLines; k++ {
397 | tgtLine := tgtSector.lines[k]
398 | if w.testLinePair(srcLine, tgtLine) {
399 | w.markPairVisible(srcLine, tgtLine)
400 | goto next
401 | }
402 | }
403 | }
404 | w.markVisibility(sector.index, tgtSector.index, VIS_HIDDEN)
405 | next:
406 | }
407 | }
408 | }
409 | }
410 |
411 | func reSectorsCompare_WithGraphs(x reSectors_SorterWithGraphs, i, j int) int {
412 | sec1 := x[i]
413 | sec2 := x[j]
414 |
415 | // Favor the sector with the best metric (higher is better)
416 | if sec1.metric != sec2.metric {
417 | return sec2.metric - sec1.metric
418 | }
419 |
420 | // Favor the sector that is not part of a loop
421 | var sec1Loop, sec2Loop int
422 | if sec1.loDFS < sec1.indexDFS {
423 | sec1Loop = 1
424 | } else {
425 | sec1Loop = 0
426 | }
427 | if sec2.loDFS < sec2.indexDFS {
428 | sec2Loop = 1
429 | } else {
430 | sec2Loop = 0
431 | }
432 |
433 | if sec1Loop != sec2Loop {
434 | return sec1Loop - sec2Loop
435 | }
436 |
437 | // Favor the sector with the most neighbors
438 | if sec1.numNeighbors != sec2.numNeighbors {
439 | return sec2.numNeighbors - sec1.numNeighbors
440 | }
441 |
442 | // Favor the sector with the most visible lines
443 | if sec1.numLines != sec2.numLines {
444 | return sec2.numLines - sec1.numLines
445 | }
446 |
447 | // It's a tie - use the sector index - lower index favored
448 | return sec1.index - sec2.index
449 | }
450 |
451 | type reSectors_SorterWithGraphs []*RejSector
452 |
453 | func (x reSectors_SorterWithGraphs) Len() int { return len(x) }
454 | func (x reSectors_SorterWithGraphs) Less(i, j int) bool {
455 | return reSectorsCompare_WithGraphs(x, i, j) < 0
456 | }
457 | func (x reSectors_SorterWithGraphs) Swap(i, j int) { x[i], x[j] = x[j], x[i] }
458 |
459 | func (w *RejectWork) setupMixer() {
460 | w.extra.mixer = make([]byte, len(w.rejectTable))
461 | copy(w.extra.mixer, w.rejectTable)
462 | }
463 |
464 | func (w *RejectWork) mergeAndDestroyMixer() {
465 | for i, v := range w.rejectTable {
466 | if v == VIS_UNKNOWN && w.extra.mixer[i] != VIS_UNKNOWN {
467 | w.rejectTable[i] = w.extra.mixer[i]
468 | }
469 | }
470 | w.extra.mixer = nil
471 | }
472 |
473 | // Returns not just sector's neighbors, but also sectors in the same group as
474 | // this one, IF in reject builder configuration groupShareVis == true.
475 | // Doesn't return their neighbors, though
476 | func (r *RejectWork) DFSGetNeighborsAndGroupsiblings(s *RejSector) []*RejSector {
477 | if !r.groupShareVis {
478 | return s.neighbors
479 | }
480 | group := r.groups[r.groups[s.index].parent]
481 | if len(group.sectors) == 1 {
482 | return s.neighbors
483 | }
484 | ret := make([]*RejSector, 0, len(s.neighbors)+len(group.sectors))
485 | filt := make(map[int]bool)
486 | for _, sec := range s.neighbors {
487 | // No test for duplicates here - there already must be none
488 | if sec == nil {
489 | // weird stuff like this exists because some questionable things
490 | // were ported over from Zennode's C++ code
491 | break
492 | }
493 | filt[sec.index] = true
494 | ret = append(ret, sec)
495 | }
496 | for _, si := range group.sectors {
497 | // but group siblings are likely to coincide with neighbors, as per
498 | // original intent of old RMB program. Also don't include self in list,
499 | // of course
500 | if s.index != si && !filt[si] {
501 | sec := &(r.sectors[si])
502 | ret = append(ret, sec)
503 | filt[si] = true
504 | }
505 | }
506 | return ret
507 | }
508 |
--------------------------------------------------------------------------------
/rejectSymmDefs.go:
--------------------------------------------------------------------------------
1 | // Copyright (C) 2022-2025, VigilantDoomer
2 | //
3 | // This file is part of VigilantBSP program.
4 | //
5 | // VigilantBSP is free software: you can redistribute it
6 | // and/or modify it under the terms of GNU General Public License
7 | // as published by the Free Software Foundation, either version 2 of
8 | // the License, or (at your option) any later version.
9 | //
10 | // VigilantBSP is distributed in the hope that it will be useful,
11 | // but WITHOUT ANY WARRANTY; without even the implied warranty of
12 | // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 | // GNU General Public License for more details.
14 | //
15 | // You should have received a copy of the GNU General Public License
16 | // along with VigilantBSP. If not, see .
17 |
18 | // rejectSymmDefs.go
19 | package main
20 |
21 | import (
22 | "io"
23 | )
24 |
25 | // -----------------------------------------------------------------------------
26 | // Block of pragma directives
27 | //
28 | // The pragmas aren't part of Go languages blablabla... see rejectdefs.go
29 | // Anyway, this dictates how the code for symmetric reject generation is
30 | // generated
31 | // -----------------------------------------------------------------------------
32 |
33 | //
34 | // #pragma setprefix "Symmetric"
35 | // #pragma replace RejectExtraData with RejectSymmNoExtra
36 | // #pragma replace_prototype *RejectWork.rejectTableIJ with *RejectWork.rejectTableIJSymm
37 | // #pragma replace_prototype *RejectWork.getResult with *RejectWork.getResultSymm
38 | // #pragma replace_prototype *RejectWork.prepareReject with *RejectWork.prepareRejectSymm
39 | // #pragma replace_prototype *RejectWork.markVisibility with *RejectWork.markVisibilityOneWay
40 | // #pragma replace_prototype *RejectWork.markVisibilitySector with *RejectWork.markVisibilitySectorOneWay
41 | // #pragma replace_prototype *RejectWork.setupMixer with *RejectWork.setupMixerSymmNoop
42 | // #pragma replace_prototype *RejectWork.mergeAndDestroyMixer with *RejectWork.mergeAndDestroyMixerSymmNoop
43 | // #pragma replace_prototype *RejectWork.markVisibilityGroup with *RejectWork.markVisibilityGroupSymmNoop
44 | // #pragma replace_prototype *RejectWork.mixerIJ with *RejectWork.mixerIJSymmNoop
45 | // #pragma replace_prototype *RejectWork.DFSGetNeighborsAndGroupsiblings with *RejectWork.DFSGetNeighborsAndGroupsiblingsSymm
46 | // #pragma replace_prototype *RejectWork.reportDoForDistance with *RejectWork.reportDoForDistanceSymm
47 | // #pragma init getSymmRejectWorkIntf with morphed getRejectWorkIntf
48 |
49 | // -----------------------------------------------------------------------------
50 | // End block of pragma directives
51 | // -----------------------------------------------------------------------------
52 |
53 | type RejectSymmNoExtra struct{}
54 |
55 | // This callback must be overriden in init section of a go source file that is
56 | // automatically generated
57 | var getSymmRejectWorkIntf RejectWorkIntfMaker = nil
58 |
59 | // Here, not only dropping groups, but also dropping a call on
60 | // rejectTableIJ(j,i)
61 | func (r *RejectWork) markVisibilityOneWay(i, j int, visibility uint8) {
62 | cell1 := r.rejectTableIJ(i, j)
63 | if *cell1 == VIS_UNKNOWN {
64 | *cell1 = visibility
65 | }
66 | }
67 |
68 | func (r *RejectWork) markVisibilitySectorOneWay(i, j int, visibility uint8) {
69 | cell1 := r.rejectTableIJ(i, j)
70 | if *cell1 == VIS_UNKNOWN {
71 | *cell1 = visibility
72 | }
73 | }
74 |
75 | func (w *RejectWork) setupMixerSymmNoop() {
76 |
77 | }
78 |
79 | func (w *RejectWork) mergeAndDestroyMixerSymmNoop() {
80 |
81 | }
82 |
83 | func (r *RejectWork) markVisibilityGroupSymmNoop(i, j int, visibility uint8) {
84 |
85 | }
86 |
87 | func (r *RejectWork) mixerIJSymmNoop(i, j int) *uint8 {
88 | return nil
89 | }
90 |
91 | // The core piece of symmetric reject processing - only store data for row<=col
92 | func (r *RejectWork) rejectTableIJSymm(i, j int) *uint8 {
93 | if i > j {
94 | i, j = j, i
95 | }
96 | return &r.rejectTable[int64(i)*(r.symmShim-int64(i))>>1+
97 | int64(j)]
98 | }
99 |
100 | func (r *RejectWork) getResultSymm() []byte {
101 | rejectSize := rejectLumpSize_nonUDMF(r.numSectors)
102 | result := make([]byte, rejectSize, rejectSize)
103 | //tbIdx := 0
104 | i := 0
105 | j := 0
106 | for k := 0; k < rejectSize; k++ {
107 | bits := 0
108 | if isHidden(*r.rejectTableIJ(i, j)) {
109 | bits = bits | 0x01
110 | }
111 | r.symmMoveIJ(&i, &j)
112 | if isHidden(*r.rejectTableIJ(i, j)) {
113 | bits = bits | 0x02
114 | }
115 | r.symmMoveIJ(&i, &j)
116 | if isHidden(*r.rejectTableIJ(i, j)) {
117 | bits = bits | 0x04
118 | }
119 | r.symmMoveIJ(&i, &j)
120 | if isHidden(*r.rejectTableIJ(i, j)) {
121 | bits = bits | 0x08
122 | }
123 | r.symmMoveIJ(&i, &j)
124 | if isHidden(*r.rejectTableIJ(i, j)) {
125 | bits = bits | 0x10
126 | }
127 | r.symmMoveIJ(&i, &j)
128 | if isHidden(*r.rejectTableIJ(i, j)) {
129 | bits = bits | 0x20
130 | }
131 | r.symmMoveIJ(&i, &j)
132 | if isHidden(*r.rejectTableIJ(i, j)) {
133 | bits = bits | 0x40
134 | }
135 | r.symmMoveIJ(&i, &j)
136 | if isHidden(*r.rejectTableIJ(i, j)) {
137 | bits = bits | 0x80
138 | }
139 | r.symmMoveIJ(&i, &j)
140 | result[k] = uint8(bits)
141 | }
142 | return result
143 | }
144 |
145 | func (r *RejectWork) symmMoveIJ(i, j *int) {
146 | (*j)++
147 | if *j >= r.numSectors && (*i+1) < r.numSectors {
148 | *j = 0
149 | (*i)++
150 | }
151 | }
152 |
153 | func (r *RejectWork) prepareRejectSymm() {
154 | // The working table size (uses bytes not bits).
155 | // Extra 7 bytes to simplify getResult() method
156 | tableSize := uint64(r.numSectors)*uint64(r.numSectors) -
157 | uint64(r.numSectors-1)*uint64(r.numSectors)/2 + 7
158 | r.rejectTable = make([]uint8, tableSize, tableSize)
159 | for i, _ := range r.rejectTable {
160 | r.rejectTable[i] = 0
161 | }
162 | r.symmShim = int64(r.numSectors)<<1 - 1
163 | }
164 |
165 | func (r *RejectWork) DFSGetNeighborsAndGroupsiblingsSymm(s *RejSector) []*RejSector {
166 | return s.neighbors
167 | }
168 |
169 | func (r *RejectWork) reportDoForDistanceSymm(w io.Writer, distance uint16) {
170 | r.printfln(w, "# %s All sectors with LOS distance>%d are reported",
171 | r.mapName, distance)
172 | for i := 0; i < r.numSectors; i++ {
173 | for j := i + 1; j < r.numSectors; j++ {
174 | // According to manual, only _mutually_ visible sectors that
175 | // exceed the specified length are to be reported
176 | // But in symmetric reject case, mutual visibility can be checked
177 | // in just one direction...
178 | if *(r.distanceTableIJ(i, j)) > distance &&
179 | !isHidden(*(r.rejectTableIJ(i, j))) {
180 | r.printfln(w, "%d,%d", i, j)
181 | }
182 | }
183 | }
184 | }
185 |
--------------------------------------------------------------------------------
/rejectdefs.go:
--------------------------------------------------------------------------------
1 | // Copyright (C) 2022-2023, VigilantDoomer
2 | //
3 | // This file is part of VigilantBSP program.
4 | //
5 | // VigilantBSP is free software: you can redistribute it
6 | // and/or modify it under the terms of GNU General Public License
7 | // as published by the Free Software Foundation, either version 2 of
8 | // the License, or (at your option) any later version.
9 | //
10 | // VigilantBSP is distributed in the hope that it will be useful,
11 | // but WITHOUT ANY WARRANTY; without even the implied warranty of
12 | // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 | // GNU General Public License for more details.
14 | //
15 | // You should have received a copy of the GNU General Public License
16 | // along with VigilantBSP. If not, see .
17 |
18 | // rejectdefs.go
19 | package main
20 |
21 | // -----------------------------------------------------------------------------
22 | // Block of pragma directives
23 | //
24 | // The pragmas aren't part of Go languages and are not parsed by Go compiler or
25 | // the go build command. Instead, go generate will be used to call a special
26 | // program I wrote that will parse source code into ASTs, apply modifications to
27 | // it, and then produce a new file (see gen/codegen.go in VigilantBSP source
28 | // tree)
29 | //
30 | // The idea was first used to support Zdoom extended nodes without penalizing
31 | // performance of generating nodes in vanilla/DeeP format (see zdefs.go). That
32 | // case is probably easier to get into.
33 | //
34 | // This current file has NOTHING to do with nodes, but instead concerns the
35 | // generation of "fast" REJECT builder, which fast variation is the one that
36 | // will cover most used cases actually, by replacing certain functions/methods
37 | // with easier-to-inline for Go compiler. It solves the issue of performance
38 | // loss that occurs from implementing RMB effects such as GROUP
39 | // -----------------------------------------------------------------------------
40 |
41 | //
42 | // #pragma setprefix "Fast"
43 | // #pragma replace RejectExtraData with RejectNoExtra
44 | // #pragma replace_prototype *RejectWork.markVisibility with *RejectWork.markVisibilityFast
45 | // #pragma replace_prototype *RejectWork.setupMixer with *RejectWork.setupMixerNoop
46 | // #pragma replace_prototype *RejectWork.mergeAndDestroyMixer with *RejectWork.mergeAndDestroyMixerNoop
47 | // #pragma replace_prototype *RejectWork.markVisibilityGroup with *RejectWork.markVisibilityGroupNoop
48 | // #pragma replace_prototype *RejectWork.mixerIJ with *RejectWork.mixerIJNoop
49 | // #pragma replace_prototype *RejectWork.DFSGetNeighborsAndGroupsiblings with *RejectWork.DFSGetNeighborsAndGroupsiblingsFast
50 | // #pragma init getFastRejectWorkIntf with morphed getRejectWorkIntf
51 |
52 | // -----------------------------------------------------------------------------
53 | // End block of pragma directives
54 | // -----------------------------------------------------------------------------
55 |
56 | type RejectNoExtra struct{}
57 |
58 | type RejectWorkIntfMaker func() RejectWorkIntf
59 |
60 | // This callback must be overriden in init section of a go source file that is
61 | // automatically generated
62 | var getFastRejectWorkIntf RejectWorkIntfMaker = nil
63 |
64 | type RejectWorkIntf interface {
65 | main(input RejectInput, hasGroups bool, groupShareVis bool,
66 | groups []RejGroup) []byte
67 | }
68 |
69 | // This whole unit started around this - the version of markVisibility without
70 | // support for groups (RMF effect called GROUP). This *must* be inlinable. The
71 | // version with groups that it is replacing is not inlinable, which results in
72 | // noticeable performance loss
73 | func (r *RejectWork) markVisibilityFast(i, j int, visibility uint8) {
74 | cell1 := r.rejectTableIJ(i, j)
75 | if *cell1 == VIS_UNKNOWN {
76 | *cell1 = visibility
77 | }
78 |
79 | cell2 := r.rejectTableIJ(j, i)
80 | if *cell2 == VIS_UNKNOWN {
81 | *cell2 = visibility
82 | }
83 | }
84 |
85 | func (w *RejectWork) setupMixerNoop() {
86 |
87 | }
88 |
89 | func (w *RejectWork) mergeAndDestroyMixerNoop() {
90 |
91 | }
92 |
93 | func (r *RejectWork) markVisibilityGroupNoop(i, j int, visibility uint8) {
94 |
95 | }
96 |
97 | func (r *RejectWork) mixerIJNoop(i, j int) *uint8 {
98 | return nil
99 | }
100 |
101 | func (r *RejectWork) DFSGetNeighborsAndGroupsiblingsFast(s *RejSector) []*RejSector {
102 | return s.neighbors
103 | }
104 |
--------------------------------------------------------------------------------
/ring.go:
--------------------------------------------------------------------------------
1 | // Copyright (C) 2022, VigilantDoomer
2 | //
3 | // This file is part of VigilantBSP program.
4 | //
5 | // VigilantBSP is free software: you can redistribute it
6 | // and/or modify it under the terms of GNU General Public License
7 | // as published by the Free Software Foundation, either version 2 of
8 | // the License, or (at your option) any later version.
9 | //
10 | // VigilantBSP is distributed in the hope that it will be useful,
11 | // but WITHOUT ANY WARRANTY; without even the implied warranty of
12 | // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 | // GNU General Public License for more details.
14 | //
15 | // You should have received a copy of the GNU General Public License
16 | // along with VigilantBSP. If not, see .
17 | package main
18 |
19 | // Implements ring buffer (a fixed size power of two queue). Not intended to
20 | // be thread-safe or such, just when I need a fast queue.
21 | // I encountered this article by someone actually good at programming (unlike
22 | // me) when was searching for a smart way to do this, it is probable my
23 | // implementation does not do it justice, though:
24 | // https://www.snellman.net/blog/archive/2016-12-13-ring-buffers/
25 |
26 | const MAX_RING_CAPACITY = uint32(2147483648)
27 |
28 | // RingU16 is so called because the values stored in it are uint16 integers
29 | // In case another buffer appears that stores something else, that could be
30 | // named something else
31 | // Beware: the routines perform no overflow or underflow checking for Enqueue's
32 | // and Dequeue's. The end user is solely responsible to ascertain they don't
33 | // dequeue an empty ring or enqueue a full ring.
34 | type RingU16 struct {
35 | read uint32
36 | write uint32
37 | capacity uint32 // never changes after initialization
38 | buf []uint16
39 | }
40 |
41 | // The argument capacity is how much data you expect to hold in ring buffer.
42 | // This function will upsize it automatically to a power of two if non-power of
43 | // two capacity is provided.
44 | func CreateRingU16(capacity uint32) *RingU16 {
45 | iCap := RoundPOW2_Uint32(capacity)
46 | if iCap < capacity {
47 | Log.Panic("Integer overflow when computing ring capacity (before rounding up to power of two: %d). Specified capacity clearly exceeds the possible maximum\n",
48 | capacity)
49 | }
50 | if iCap > MAX_RING_CAPACITY {
51 | Log.Panic("Exceeds maximum ring capacity: %d (%d rounded up to power of two)\n",
52 | iCap, capacity)
53 | }
54 | capacity = iCap
55 | return &RingU16{
56 | read: 0,
57 | write: 0,
58 | capacity: capacity,
59 | buf: make([]uint16, capacity, capacity),
60 | }
61 | }
62 |
63 | func RoundPOW2_Uint32(x uint32) uint32 {
64 | if x <= 2 {
65 | return x
66 | }
67 |
68 | x--
69 |
70 | for tmp := x >> 1; tmp != 0; tmp >>= 1 {
71 | x |= tmp
72 | }
73 |
74 | return x + 1
75 | }
76 |
77 | func (r *RingU16) mask(val uint32) uint32 {
78 | return val & (r.capacity - 1)
79 | }
80 |
81 | func (r *RingU16) Enqueue(item uint16) {
82 | r.buf[r.mask(r.write)] = item
83 | r.write++
84 | }
85 |
86 | func (r *RingU16) Dequeue() uint16 {
87 | res := r.buf[r.mask(r.read)]
88 | r.read++
89 | return res
90 | }
91 |
92 | func (r *RingU16) Empty() bool {
93 | return r.read == r.write
94 | }
95 |
96 | func (r *RingU16) Size() uint32 {
97 | return r.write - r.read
98 | }
99 |
100 | func (r *RingU16) Full() bool {
101 | return r.Size() == r.capacity
102 | }
103 |
104 | func (r *RingU16) Reset() {
105 | r.write = r.read
106 | }
107 |
--------------------------------------------------------------------------------
/rmbunit.go:
--------------------------------------------------------------------------------
1 | // Copyright (C) 2022-2025, VigilantDoomer
2 | //
3 | // This file is part of VigilantBSP program.
4 | //
5 | // VigilantBSP is free software: you can redistribute it
6 | // and/or modify it under the terms of GNU General Public License
7 | // as published by the Free Software Foundation, either version 2 of
8 | // the License, or (at your option) any later version.
9 | //
10 | // VigilantBSP is distributed in the hope that it will be useful,
11 | // but WITHOUT ANY WARRANTY; without even the implied warranty of
12 | // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 | // GNU General Public License for more details.
14 | //
15 | // You should have received a copy of the GNU General Public License
16 | // along with VigilantBSP. If not, see .
17 | package main
18 |
19 | // This file defines all structures shared by those parts of the code that
20 | // need to deal with RMB: reject code and parser code.
21 |
22 | import (
23 | "fmt"
24 | )
25 |
26 | const ( // RMB option/command types
27 | RMB_UNKNOWN = iota
28 | RMB_EXMY_MAP
29 | RMB_MAPXY_MAP
30 | RMB_ASSERT
31 | RMB_BAND
32 | RMB_BLIND
33 | RMB_BLOCK
34 | RMB_DISTANCE
35 | RMB_DOOR
36 | RMB_EXCLUDE
37 | RMB_GROUP
38 | RMB_INCLUDE
39 | RMB_INVERT
40 | RMB_LEFT
41 | RMB_LENGTH
42 | RMB_LINE
43 | RMB_NODOOR
44 | RMB_NOMAP
45 | RMB_NOPROCESS
46 | RMB_ONE
47 | RMB_PERFECT
48 | RMB_PREPROCESS
49 | RMB_PROCESS
50 | RMB_REPORT
51 | RMB_RIGHT
52 | RMB_SAFE
53 | RMB_TRACE
54 | RMB_VORTEX
55 |
56 | RMB_SIMPLE_BLIND // BLIND 0/1 might be replaced by this
57 | RMB_SIMPLE_SAFE // SAFE 0/1 might be replaced by this
58 | )
59 |
60 | const ( // RMB_FRAME_TYPE
61 | RMB_FRAME_GLOBAL = iota // applies to all levels
62 | RMB_FRAME_EXMY // applies to a specific ExMy level
63 | RMB_FRAME_MAPXY // applies to a specific MapXY level, or to UDMF level with arbitrary name
64 | )
65 |
66 | // RMB actually references to these as options, not commands, but from
67 | // perspective of the reject builder these are instructions to follow
68 | type RMBCommand struct {
69 | SrcLine int // index of line in source text (file) this command was parsed from
70 | Type int
71 | Invert bool // whether invert prefix was applied to this option
72 | Band bool // whether band prefix was applied to this option
73 | // Banded bool
74 | Data [2]int // stores integer(s) specified for this option
75 | List [2][]int // stores list(s) of sectors specified for this option
76 | //
77 | WadFileName []byte // for NOPROCESS command, this can exist - the wad to get reject from
78 | MapLumpName []byte // for NOPROCESS command, second optional argument is map name. Since UDMF, it does not have to be MAPXY or EXMY anymore
79 | Frame *RMBFrame // which frame this belongs to. Only assigned at the end of LoadRMB
80 | }
81 |
82 | type RMBFrameId struct {
83 | Type int // RMB_FRAME_TYPE constant
84 | Name string // name of map
85 | }
86 |
87 | // RMB allows specifying options:
88 | // 1) to apply to all levels
89 | // 2) to apply to specific level
90 | // and the latter can also override the former. Thus, a two-tier structure
91 | // based on the frame allows to track which options apply to the current level
92 | type RMBFrame struct {
93 | Id RMBFrameId
94 | Commands []RMBCommand
95 | Parent *RMBFrame // nil for global. Map's frame may have global frame as its parent, if one exists
96 | RMB *LoadedRMB // which RMB this belongs too. Only assigned at the end of LoadRMB
97 | }
98 |
99 | type LoadedRMB struct {
100 | mapFrames map[RMBFrameId]*RMBFrame
101 | globalFrame *RMBFrame
102 | srcFile string
103 | CRLF bool
104 | }
105 |
106 | // Caution, may return a non-nil frame for non-existent map, if global frame
107 | // exists it will return that. This was done to simplify lump scheduler, it will
108 | // request frames for maps as it encounters them
109 | func (l *LoadedRMB) LookupRMBFrameForMap(frameId RMBFrameId) *RMBFrame {
110 | if l == nil {
111 | return nil
112 | }
113 | frame := l.mapFrames[frameId]
114 | if frame == nil {
115 | return l.globalFrame
116 | }
117 | return frame
118 | }
119 |
120 | // Wrapper over LookupRMBFrameForMap, this takes a name of lump that marks
121 | // the start of level lumps. There should be no bytes beyond the null byte in
122 | // the input marker name (ByteSliceBeforeTerm subroutine in main program takes
123 | // care of this)
124 | func (l *LoadedRMB) LookupRMBFrameForMapMarker(marker []byte) *RMBFrame {
125 | if l == nil {
126 | return nil
127 | }
128 | var frameId RMBFrameId
129 | if marker[0] == 'E' && MAP_ExMx.FindSubmatch(marker) != nil {
130 | frameId = RMBFrameId{
131 | Type: RMB_FRAME_EXMY,
132 | Name: string(marker),
133 | }
134 | } else {
135 | frameId = RMBFrameId{
136 | Type: RMB_FRAME_MAPXY,
137 | Name: string(marker),
138 | }
139 | }
140 | return l.LookupRMBFrameForMap(frameId)
141 | }
142 |
143 | func (l *RMBCommand) Error(s string, a ...interface{}) {
144 | fmtS := fmt.Sprintf("RMB %s%d error: %s", l.getFile(), l.SrcLine, s)
145 | Log.Error(fmtS, a...)
146 | }
147 |
148 | func (l *RMBCommand) Info(s string, a ...interface{}) {
149 | fmtS := fmt.Sprintf("RMB %s%d message: %s", l.getFile(), l.SrcLine, s)
150 | Log.Printf(fmtS, a...)
151 | }
152 |
153 | // returns filename with a colon appended at the end, or empty string if
154 | // filename is empty or couldn't be determined
155 | func (l *RMBCommand) getFile() string {
156 | if l.Frame == nil { // shouldn't happen
157 | return ""
158 | }
159 | if l.Frame.RMB == nil { // shouldn't happen
160 | return ""
161 | }
162 | ret := l.Frame.RMB.srcFile
163 | if len(ret) > 0 {
164 | return ret + ":"
165 | }
166 | return ret
167 | }
168 |
169 | func (f *RMBFrame) isEmpty() bool {
170 | return f == nil || (len(f.Commands) == 0 && f.Parent.isEmpty())
171 | }
172 |
173 | func (f *RMBFrame) Clone() *RMBFrame {
174 | if f == nil {
175 | return nil
176 | }
177 | ret := &RMBFrame{}
178 | *ret = *f
179 | ret.Parent = ret.Parent.Clone()
180 | ret.Commands = make([]RMBCommand, len(f.Commands))
181 | copy(ret.Commands, f.Commands)
182 | for i, _ := range ret.Commands {
183 | ret.Commands[i].Frame = ret
184 | }
185 | return ret
186 | }
187 |
--------------------------------------------------------------------------------
/segalias.go:
--------------------------------------------------------------------------------
1 | // Copyright (C) 2022, VigilantDoomer
2 | //
3 | // This file is part of VigilantBSP program.
4 | //
5 | // VigilantBSP is free software: you can redistribute it
6 | // and/or modify it under the terms of GNU General Public License
7 | // as published by the Free Software Foundation, either version 2 of
8 | // the License, or (at your option) any later version.
9 | //
10 | // VigilantBSP is distributed in the hope that it will be useful,
11 | // but WITHOUT ANY WARRANTY; without even the implied warranty of
12 | // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 | // GNU General Public License for more details.
14 | //
15 | // You should have received a copy of the GNU General Public License
16 | // along with VigilantBSP. If not, see .
17 |
18 | // segalias
19 | package main
20 |
21 | const ALIAS_ROW_SIZE = 65536 / 8 // so can store 65536 aliases before need to grow
22 |
23 | // Cheap integer aliases for colinear segs. Only a tiny bit of Zennode's original
24 | // alias idea. Test one seg as a partition per all the colinear ones and receive
25 | // a discount on execution time xD
26 | type SegAliasHolder struct {
27 | visited []uint8 // bit array, bit set to 1 means = true
28 | maxAlias int // max known alias so far. Incremented by Generate
29 | }
30 |
31 | // Init must be called before SegAliasHolder can be used for the first time,
32 | // and is intended to be called only once
33 | func (s *SegAliasHolder) Init() {
34 | s.visited = make([]uint8, ALIAS_ROW_SIZE, ALIAS_ROW_SIZE) // can grow later
35 | s.maxAlias = 0
36 | }
37 |
38 | // Generate returns a new available alias that was not in use AND marks
39 | // it as visited. Minimal return value is 1, so that you can use 0 to mean
40 | // "no alias was assigned"
41 | func (s *SegAliasHolder) Generate() int {
42 | s.maxAlias++
43 | idx := s.maxAlias - 1
44 | bte := idx >> 3
45 | bit := idx & 0x07
46 | if bte > len(s.visited)-1 {
47 | s.grow()
48 | }
49 | s.visited[bte] = s.visited[bte] | (1 << bit)
50 | return s.maxAlias
51 | }
52 |
53 | func (s *SegAliasHolder) grow() {
54 | L := len(s.visited)
55 | tmp := make([]uint8, L+ALIAS_ROW_SIZE, L+ALIAS_ROW_SIZE)
56 | copy(tmp, s.visited)
57 | s.visited = tmp
58 | }
59 |
60 | // MarkAndRecall marks alias as visited but returns whether it was visited already
61 | func (s *SegAliasHolder) MarkAndRecall(alias int) bool {
62 | idx := alias - 1
63 | bte := idx >> 3
64 | bit := idx & 0x07
65 | b := s.visited[bte]&(1<>3 + 1
78 | if cutLen > len(s.visited) {
79 | cutLen = len(s.visited)
80 | }
81 | view := s.visited[:cutLen]
82 | for i, _ := range view {
83 | view[i] = 0
84 | }
85 | }
86 |
87 | func (s *SegAliasHolder) Clone() *SegAliasHolder {
88 | ret := new(SegAliasHolder)
89 | ret.maxAlias = s.maxAlias
90 | L := len(s.visited)
91 | ret.visited = make([]uint8, L, L)
92 | cutLen := s.maxAlias>>3 + 1
93 | if cutLen > len(s.visited) {
94 | cutLen = len(s.visited)
95 | }
96 | view := s.visited[:cutLen]
97 | for k, v := range view {
98 | ret.visited[k] = v
99 | }
100 | return ret
101 | }
102 |
--------------------------------------------------------------------------------
/solidblocks.go:
--------------------------------------------------------------------------------
1 | // Copyright (C) 2022, VigilantDoomer
2 | //
3 | // This file is part of VigilantBSP program.
4 | //
5 | // VigilantBSP is free software: you can redistribute it
6 | // and/or modify it under the terms of GNU General Public License
7 | // as published by the Free Software Foundation, either version 2 of
8 | // the License, or (at your option) any later version.
9 | //
10 | // VigilantBSP is distributed in the hope that it will be useful,
11 | // but WITHOUT ANY WARRANTY; without even the implied warranty of
12 | // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 | // GNU General Public License for more details.
14 | //
15 | // You should have received a copy of the GNU General Public License
16 | // along with VigilantBSP. If not, see .
17 |
18 | // solidblocks
19 | package main
20 |
21 | // put constants for controlling goroutine and generating goroutine together,
22 | // so they are assigned different ordinals and can't be mistaken for each other
23 | const (
24 | // these are constants for solids-only blockmap CONTROLLER
25 | // they are used for BconRequest.Message
26 | BCON_NEED_SOLID_BLOCKMAP = iota
27 | BCON_NONEED_SOLID_BLOCKMAP
28 | BCON_DONE_WITH_SOLID_BLOCKMAP
29 | // internal constant for solids-only blockmap CONTROLLER
30 | BCON_WAIT_FOR_REQUEST
31 | // these are constants for solids-only blockmap GENERATOR
32 | BGEN_GENERATE_BLOCKMAP
33 | BGEN_RETRIEVE_BLOCKMAP
34 | BGEN_DIE
35 | )
36 |
37 | const (
38 | // Used in BconRequest.Sender
39 | SOLIDBLOCKS_NODES = iota
40 | SOLIDBLOCKS_REJECT
41 | )
42 |
43 | type SolidBlocks_Input struct {
44 | lines SolidLines
45 | bounds LevelBounds
46 | control <-chan BconRequest
47 | genworker chan BgenRequest
48 | linesToIgnore []bool
49 | }
50 |
51 | type BconRequest struct {
52 | Sender int
53 | Message int
54 | }
55 |
56 | type BgenRequest struct {
57 | Action int
58 | ReplyTo chan<- *Blockmap
59 | }
60 |
61 | // Sentinel goroutine that keeps track of who requested solid-only blockmap,
62 | // there may be several users or there may be none, the blockmap is built by
63 | // another goroutine only if there is AT LEAST ONE request for it and is disposed
64 | // of when it becomes certain NO one would be requesting it anymore for SURE
65 | func SolidBlocks_Control(input SolidBlocks_Input) {
66 | users := bcon_SystemUsers()
67 | if len(users) == 0 {
68 | return
69 | }
70 |
71 | bmi := BlockmapInput{
72 | lines: input.lines,
73 | bounds: input.bounds,
74 | XOffset: 0,
75 | YOffset: 0,
76 | useZeroHeader: false,
77 | internalPurpose: true,
78 | gcShield: nil,
79 | linesToIgnore: input.linesToIgnore,
80 | }
81 | blockmapScheduled := false
82 | go bmi.SolidBlocks_Generator(input.genworker)
83 | for msg := range input.control {
84 | switch msg.Message {
85 | // No, BCON_WAIT_FOR_REQUEST is not allowed here
86 | case BCON_NEED_SOLID_BLOCKMAP:
87 | {
88 | bcon_UpdateUserStatus(users, msg)
89 | if !blockmapScheduled {
90 | blockmapScheduled = true
91 | input.genworker <- BgenRequest{
92 | Action: BGEN_GENERATE_BLOCKMAP,
93 | ReplyTo: nil,
94 | }
95 |
96 | }
97 | }
98 | case BCON_NONEED_SOLID_BLOCKMAP:
99 | {
100 | bcon_UpdateUserStatus(users, msg)
101 | }
102 | case BCON_DONE_WITH_SOLID_BLOCKMAP:
103 | {
104 | bcon_UpdateUserStatus(users, msg)
105 | }
106 | default:
107 | { // Programmer error.
108 | Log.Printf("Illegal request to solid blockmap generator\n")
109 | }
110 | }
111 | if bcon_NeedToDie(users) {
112 | break
113 | }
114 | }
115 | // Generator must die (quit)
116 | input.genworker <- BgenRequest{
117 | Action: BGEN_DIE,
118 | ReplyTo: nil,
119 | }
120 | // Now blockmap can be garbage collected, provided all users deleted
121 | // their reference to it
122 | // Sentinel also quits
123 | }
124 |
125 | func bcon_AddUser(us []BconRequest, u BconRequest) []BconRequest {
126 | return append(us, u)
127 | }
128 |
129 | func bcon_UpdateUserStatus(us []BconRequest, u BconRequest) {
130 | for i, _ := range us {
131 | if us[i].Sender == u.Sender {
132 | us[i].Message = u.Message
133 | return
134 | }
135 | }
136 | Log.Printf("Solid blockmap generator (internal purposes): update status fail: unregistered user!\n")
137 | }
138 |
139 | func bcon_SystemUsers() []BconRequest {
140 | us := make([]BconRequest, 0, 2)
141 | us = bcon_AddUser(us, BconRequest{
142 | Sender: SOLIDBLOCKS_REJECT,
143 | Message: BCON_WAIT_FOR_REQUEST,
144 | })
145 | us = bcon_AddUser(us, BconRequest{
146 | Sender: SOLIDBLOCKS_NODES,
147 | Message: BCON_WAIT_FOR_REQUEST,
148 | })
149 | return us
150 | }
151 |
152 | func bcon_NeedToDie(us []BconRequest) bool {
153 | for _, uu := range us {
154 | if uu.Message != BCON_NONEED_SOLID_BLOCKMAP && uu.Message != BCON_DONE_WITH_SOLID_BLOCKMAP {
155 | return false
156 | }
157 | }
158 | return true
159 | }
160 |
161 | func (input *BlockmapInput) SolidBlocks_Generator(recv <-chan BgenRequest) {
162 | var bm *Blockmap
163 | generated := false
164 | for msg := range recv {
165 | if msg.Action == BGEN_DIE {
166 | // Hopefully sent by control and not by impostor
167 | break
168 | }
169 | if msg.Action == BGEN_GENERATE_BLOCKMAP {
170 | if !generated {
171 | generated = true
172 | bm = CreateBlockmap(input)
173 | }
174 | }
175 | if msg.Action == BGEN_RETRIEVE_BLOCKMAP {
176 | if !generated {
177 | //Looks like a retrieve request might happen faster than control
178 | //notifies us of the need to generate of which control was itself
179 | //notified by the same thread we got request now from. Wow, race conditions are tough
180 | //Log.Printf("Solid blockmap requested but was not ever generated\n")
181 | generated = true
182 | bm = CreateBlockmap(input)
183 | }
184 | msg.ReplyTo <- bm
185 | }
186 | }
187 | }
188 |
--------------------------------------------------------------------------------
/sorthelpers.go:
--------------------------------------------------------------------------------
1 | // Copyright (C) 2022, VigilantDoomer
2 | //
3 | // This file is part of VigilantBSP program.
4 | //
5 | // VigilantBSP is free software: you can redistribute it
6 | // and/or modify it under the terms of GNU General Public License
7 | // as published by the Free Software Foundation, either version 2 of
8 | // the License, or (at your option) any later version.
9 | //
10 | // VigilantBSP is distributed in the hope that it will be useful,
11 | // but WITHOUT ANY WARRANTY; without even the implied warranty of
12 | // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 | // GNU General Public License for more details.
14 | //
15 | // You should have received a copy of the GNU General Public License
16 | // along with VigilantBSP. If not, see .
17 |
18 | // sorthelpers
19 | package main
20 |
21 | // Implementations of sort.Interface for _stock_ Go types go here.
22 | // Note: don't add implementations of sort.Interface of types invented for
23 | // the project, keep those in the file where those are declared
24 |
25 | type UInt32Slice []uint32
26 |
27 | func (x UInt32Slice) Len() int { return len(x) }
28 | func (x UInt32Slice) Less(i, j int) bool { return x[i] < x[j] }
29 | func (x UInt32Slice) Swap(i, j int) { x[i], x[j] = x[j], x[i] }
30 |
31 | type Uint16Slice []uint16
32 |
33 | func (x Uint16Slice) Len() int { return len(x) }
34 | func (x Uint16Slice) Less(i, j int) bool { return x[i] < x[j] }
35 | func (x Uint16Slice) Swap(i, j int) { x[i], x[j] = x[j], x[i] }
36 |
--------------------------------------------------------------------------------
/superblocks_test.go:
--------------------------------------------------------------------------------
1 | // Copyright (C) 2022, VigilantDoomer
2 | //
3 | // This file is part of VigilantBSP program.
4 | //
5 | // VigilantBSP is free software: you can redistribute it
6 | // and/or modify it under the terms of GNU General Public License
7 | // as published by the Free Software Foundation, either version 2 of
8 | // the License, or (at your option) any later version.
9 | //
10 | // VigilantBSP is distributed in the hope that it will be useful,
11 | // but WITHOUT ANY WARRANTY; without even the implied warranty of
12 | // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 | // GNU General Public License for more details.
14 | //
15 | // You should have received a copy of the GNU General Public License
16 | // along with VigilantBSP. If not, see .
17 |
18 | // superblocks_test.go
19 | package main
20 |
21 | import (
22 | "testing"
23 | )
24 |
25 | func TestSuperblocksConsts(t *testing.T) {
26 | marginLen := float64(IFFY_LEN * 1.5)
27 | if marginLen != float64(MARGIN_LEN) {
28 | t.Errorf("Const MARGIN_LEN must be equal to IFFY_LEN * 1.5n \n")
29 | }
30 | distMultiply := float64(1 << DIST_SHIFT)
31 | if 1.0/distMultiply != DIST_EPSILON {
32 | t.Errorf("Const DIST_SHIFT out of sync with DIST_EPSILON\n")
33 | }
34 | }
35 |
--------------------------------------------------------------------------------
/trollgen.go:
--------------------------------------------------------------------------------
1 | // Copyright (C) 2022, VigilantDoomer
2 | //
3 | // This file is part of VigilantBSP program.
4 | //
5 | // VigilantBSP is free software: you can redistribute it
6 | // and/or modify it under the terms of GNU General Public License
7 | // as published by the Free Software Foundation, either version 2 of
8 | // the License, or (at your option) any later version.
9 | //
10 | // VigilantBSP is distributed in the hope that it will be useful,
11 | // but WITHOUT ANY WARRANTY; without even the implied warranty of
12 | // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 | // GNU General Public License for more details.
14 | //
15 | // You should have received a copy of the GNU General Public License
16 | // along with VigilantBSP. If not, see .
17 |
18 | // trollgen
19 | package main
20 |
21 | import (
22 | "sort"
23 | )
24 |
25 | // Troll, Or Lump Starting Offset Dealiasing Broker
26 | //
27 | // Idea: WAD format allows sharing data between lumps, so do it
28 | // Multiple zero-filled reject lumps can be effortlessly compressed this way
29 | // Problem: SLADE3 drops all lumps that start on the same offset when loading
30 | // or saving wads (cause some "troll wads" used that)
31 | // Solution: make start offset different for each lump (but still share data).
32 | // Not too difficult and saves waiting for SLADE3 to remove the limitation
33 |
34 | type OffsetForSize struct {
35 | offset uint32
36 | next *OffsetForSize
37 | }
38 |
39 | type OffsetContainer struct {
40 | first *OffsetForSize
41 | }
42 |
43 | // So, whether some lumps have the same size, or a different one for each lump,
44 | // each of them needs UNIQUE offset in either case. Call AddSize for each
45 | // ZERO-filled lump that'll be generated, not once per unique size, but once per lump
46 | // (got 3 lumps of the same size - you call AddSize thrice with this size as parameter)
47 | //
48 | // And yeah. Several lumps may be of the same size but each of them needs their
49 | // own UNIQUE offset. Be sure to call AddSize for each lump - if 3 lumps have same
50 | // size you call AddSize for each of them (with the same argument, yes).
51 | //
52 | // Beware: all methods are mutating object
53 | type Troll struct {
54 | keyToIds map[uint32]int // size(key) to id(value), id = index in offsets slice
55 | allKeys []uint32 // tracks all sizes as well as their repetitions
56 | offsets []*OffsetContainer // index(id) is stored as value in a map keyToIds
57 | maxSize uint32 // the maximum size ever seen as a key
58 | compiled bool
59 | }
60 |
61 | func CreateTroll() *Troll {
62 | na := Troll{
63 | keyToIds: make(map[uint32]int),
64 | allKeys: make([]uint32, 0, 99),
65 | offsets: make([]*OffsetContainer, 0, 99),
66 | maxSize: 0,
67 | compiled: false,
68 | }
69 | return &na
70 | }
71 |
72 | // Registers that a zero-byte filled lump of this size will be generated
73 | // If you have several lumps of the same size, you call this several times!
74 | func (t *Troll) AddSize(rejlump_size uint32) {
75 | t.allKeys = append(t.allKeys, rejlump_size)
76 | candidateId := len(t.offsets)
77 | if _, ok := t.keyToIds[rejlump_size]; !ok {
78 | t.keyToIds[rejlump_size] = candidateId
79 | dumb := new(OffsetContainer)
80 | dumb.first = nil
81 | t.offsets = append(t.offsets, dumb)
82 | }
83 | if t.maxSize < rejlump_size {
84 | t.maxSize = rejlump_size
85 | }
86 | }
87 |
88 | // Returns how much zero bytes will be needed for "lump data" region that
89 | // will be reused for ALL of your lumps you registered with AddSize
90 | func (t *Troll) Compile() uint32 {
91 | if t.compiled {
92 | panic("The offset generator was finalized already.")
93 | }
94 | zeroCount := t.maxSize
95 | sort.Sort(UInt32Slice(t.allKeys))
96 | newOffset := uint32(0)
97 | for i := len(t.allKeys) - 1; i >= 0; i-- {
98 | sz := t.allKeys[i]
99 | id, ok := t.keyToIds[sz]
100 | if !ok {
101 | // Must never happen
102 | Log.Panic("Inconsistent data (key not in map)")
103 | }
104 | p := t.offsets[id]
105 | rec := new(OffsetForSize)
106 | rec.next = p.first
107 | rec.offset = newOffset
108 | if newOffset+sz > zeroCount {
109 | zeroCount = newOffset + sz
110 | }
111 | newOffset++
112 | p.first = rec
113 | }
114 | t.compiled = true
115 | return zeroCount
116 | }
117 |
118 | // Call once for each lump (and not once for unique size) like you did AddSize.
119 | // Do not reuse the return value between several lumps of the same size!
120 | // You need unique offset for each lump, and if you called AddSize for each lump,
121 | // you'll get it. Order of PopOffset calls doesn't matter, but every call should
122 | // have been matched by a prior AddSize call with the same parameter
123 | func (t *Troll) PopOffset(rejlump_size uint32) uint32 {
124 | id, ok := t.keyToIds[rejlump_size]
125 | if !ok {
126 | // Must never occur
127 | Log.Panic("Invalid request to lump offset generator: programmer never called AddSize with %d size as an argument",
128 | rejlump_size)
129 | }
130 | p := t.offsets[id]
131 | if p.first == nil {
132 | // Must never occur
133 | Log.Panic("Couldn't fetch _next_ offset for %d size - programmer may have forgot to register (yet) another instance of it via AddSize",
134 | rejlump_size)
135 | }
136 | res := p.first.offset
137 | p.first = p.first.next
138 | return res
139 | }
140 |
--------------------------------------------------------------------------------
/udmf_level.go:
--------------------------------------------------------------------------------
1 | // Copyright (C) 2025, VigilantDoomer
2 | //
3 | // This file is part of VigilantBSP program.
4 | //
5 | // VigilantBSP is free software: you can redistribute it
6 | // and/or modify it under the terms of GNU General Public License
7 | // as published by the Free Software Foundation, either version 2 of
8 | // the License, or (at your option) any later version.
9 | //
10 | // VigilantBSP is distributed in the hope that it will be useful,
11 | // but WITHOUT ANY WARRANTY; without even the implied warranty of
12 | // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 | // GNU General Public License for more details.
14 | //
15 | // You should have received a copy of the GNU General Public License
16 | // along with VigilantBSP. If not, see .
17 | package main
18 |
19 | import (
20 | "os"
21 | )
22 |
23 | type UDMF_Level struct {
24 | RejectLumpIdx int
25 | wriBus *WriteBusControl
26 | WroteRejectAlready bool
27 | le []LumpEntry
28 | f *os.File // input file descriptor, is not always loaded into this structure
29 | fc *FileControl
30 | mapName string
31 | }
32 |
33 | // DoLevel is executed once per level, but UDMF_Level struct is allocated once and is
34 | // reused for all levels, hence it must do reinitialization of all fields
35 | func (l *UDMF_Level) DoLevel(le []LumpEntry, idx int, action *ScheduledLump,
36 | f *os.File, wriBus *WriteBusControl, fileControl *FileControl) {
37 | // reinit all
38 | l.le = le
39 | l.f = f
40 | l.fc = fileControl
41 | l.mapName = GetLumpName(le, action)
42 | l.WroteRejectAlready = false
43 | l.RejectLumpIdx = 0
44 | // TODO grap TEXTMAP and parse it. Parser is yet to be written
45 | Log.Printf("(noop) Processing UDMF level %s:\n", l.mapName)
46 | textmap, err := LoadTEXTMAP(f, le, action.Level[0])
47 | if err != nil {
48 | Log.Panic("Couldn't read TEXTMAP: %s\n", err.Error())
49 | }
50 | // TEXTMAP is written to output file so as not forget it
51 | wriBus.SendRawLump(textmap, action.DirIndex, "", "")
52 | // TODO implement TEXTMAP parser
53 | // parser should ideally be scheduled concurrently with writing to the bus
54 | // when TEXTMAP parser failed (or doesn't exist lol), merely copy everything
55 | // but, uh, the first implementation doesn't have to be this effecient
56 | parseFail := true
57 | if config.Reject == REJECT_NORMAL {
58 | // currently no-op, but do check RMB
59 | if !action.RMBOptions.isEmpty() {
60 | Log.Printf("Not actually building REJECT, but acknowledging that a non-empty RMB frame exists:\n")
61 | inherited := action.RMBOptions.Parent == nil ||
62 | len(action.RMBOptions.Commands) == 0
63 | inheritedStr := "yes"
64 | if !inherited {
65 | inheritedStr = "no"
66 | }
67 | Log.Printf(" level RMB frame is inherited from global RMB frame: " + inheritedStr + "\n")
68 | }
69 | }
70 | if len(action.Level) > 1 { // supposed to be, normally, unless ENDMAP is missing
71 | for _, subaction := range action.Level[1:] {
72 | idx = subaction.DirIndex
73 | //bname := ByteSliceBeforeTerm(le[idx].Name[:])
74 | copyLump := true
75 | if !parseFail {
76 | // TODO REJECT, GL Nodes
77 | // might set copyLump == false
78 | }
79 | if copyLump {
80 | tmpBuf := make([]byte, le[idx].Size, le[idx].Size)
81 | f.ReadAt(tmpBuf, int64(le[idx].FilePos))
82 | wriBus.SendRawLump(tmpBuf, idx, "", "")
83 | }
84 | }
85 | }
86 |
87 | if !parseFail {
88 | l.WaitForAndWriteData()
89 | }
90 | }
91 |
92 | func LoadTEXTMAP(f *os.File, le []LumpEntry, textmapAction *ScheduledLump) ([]byte, error) {
93 | fp := le[textmapAction.DirIndex].FilePos
94 | sz := le[textmapAction.DirIndex].Size
95 | if sz == 0 {
96 | return make([]byte, 0), nil
97 | }
98 | ret := make([]byte, sz)
99 | _, err := f.ReadAt(ret, int64(fp))
100 | if err != nil {
101 | return nil, err
102 | }
103 | return ret, nil
104 | }
105 |
106 | func (l *UDMF_Level) WaitForAndWriteData() {
107 | // TODO when have data to write (first thing to support would be building
108 | // reject for UDMF maps as no one else can do it... yet. Later, GL nodes.
109 | // Blockmap and regular nodes should not be built for UDMF.)
110 | }
111 |
--------------------------------------------------------------------------------
/vigilantbsp.go:
--------------------------------------------------------------------------------
1 | // Copyright (C) 2022-2025, VigilantDoomer
2 | //
3 | // This file is part of VigilantBSP program.
4 | //
5 | // VigilantBSP is free software: you can redistribute it
6 | // and/or modify it under the terms of GNU General Public License
7 | // as published by the Free Software Foundation, either version 2 of
8 | // the License, or (at your option) any later version.
9 | //
10 | // VigilantBSP is distributed in the hope that it will be useful,
11 | // but WITHOUT ANY WARRANTY; without even the implied warranty of
12 | // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 | // GNU General Public License for more details.
14 | //
15 | // You should have received a copy of the GNU General Public License
16 | // along with VigilantBSP. If not, see .
17 |
18 | // -- This file is where the program entry is.
19 | // VigilantBSP uses algorithms and ideas from various other free software
20 | // nodebuilder programs, in addition to original research on topics such as
21 | // multithreading, autodetection of self-referencing sectors for visibility or
22 | // to avoid deleting them when deleting "invisible" 2-sided lines, enhancements
23 | // to visplane reduction heuristics, etc.
24 | // TODO document better which part was borrowed from whom. Rough breakdown:
25 | // 1. Blockmap: Marisa Heit's code in ZDBSP + insight from Zokumbsp;
26 | // multi-threading is mine, subset compression is also reimplemented by me
27 | // according to Doomwiki description of the technique, byte stealing is mine
28 | // invention, longest list placed last position idea is from Doomwiki article on
29 | // Block Rocking Bytes
30 | // 2. Nodes: port of BSP v5.2, heavily modified, ideas borrowed from Zennode,
31 | // Zokumbsp and AJ-BSP, ZDBSP as well (for extended nodes); I also implemented some
32 | // ideas proposed by Lee Killough that he considered hard at the time
33 | // 3. Reject: port of Zennode's reject code, bugfixes from Zokumbsp, further
34 | // optimization and self-referencing sector support is mine etc.
35 | package main
36 |
37 | import (
38 | "encoding/binary"
39 | "os"
40 | "path/filepath"
41 | "runtime/pprof"
42 | "time"
43 | )
44 |
45 | type LevelBounds struct {
46 | Xmin int16
47 | Ymin int16
48 | Xmax int16
49 | Ymax int16
50 | }
51 |
52 | func main() {
53 | timeStart := time.Now()
54 |
55 | var RMB *LoadedRMB
56 |
57 | // before config can be legitimately accessed, must call Configure()
58 | Configure()
59 | //Log.Verbose(2, "Parsing command line takes %s\n", time.Since(timeStart))
60 |
61 | var err error
62 | // Initialize wad file name from arguments
63 | if config.Profile {
64 | f, err := os.Create(config.ProfilePath)
65 | if err != nil {
66 | Log.Printf("Could not create CPU profile: %s", err.Error())
67 | } else {
68 | defer f.Close()
69 | if err := pprof.StartCPUProfile(f); err != nil {
70 | Log.Printf("Could not start CPU profile: %s", err.Error())
71 | } else {
72 | defer pprof.StopCPUProfile()
73 | }
74 | }
75 | }
76 |
77 | // Do we have a file?
78 | if config.InputFileName == "" {
79 | Log.Error("You must specify an input file.\n")
80 | os.Exit(1)
81 | }
82 | config.InputFileName, _ = filepath.Abs(config.InputFileName)
83 | if config.OutputFileName != "" {
84 | config.OutputFileName, _ = filepath.Abs(config.OutputFileName)
85 | // Now that we can perform --eject , this check is much warranted as
86 | // user might have incentive to try to overcome the limitation (--eject
87 | // makes output file required).
88 | // Note that output file colliding with input file would produce bugs anyway.
89 | // And apparently Linux the kernel indeed allowed to produce corrupt file
90 | // in this case, and opening input file with read-only access and
91 | // output file with exclusive read-write access doesn't prevent this.
92 | // So this check is really not optional
93 | f1, err1 := os.Stat(config.InputFileName)
94 | f2, err2 := os.Stat(config.OutputFileName)
95 | if err1 == nil && err2 == nil {
96 | if os.SameFile(f1, f2) {
97 | Log.Error("You cannot specify output file that maps to the same input file (whether via same path and name, or hardlinks, or symlinks)\n")
98 | os.Exit(1)
99 | }
100 | }
101 | }
102 |
103 | mainFileControl := FileControl{}
104 | defer mainFileControl.Shutdown()
105 |
106 | // If RMB needs to be read, it is done first. This allows us to abort
107 | // early if RMB options file exists but is malformed, or contains incorrect
108 | // syntax.
109 | if config.UseRMB {
110 | RMB = LoadAssociatedRMB(config.InputFileName, &mainFileControl)
111 | if RMB != nil {
112 | Log.Printf("Successfully loaded RMB file %s\n",
113 | mainFileControl.rmbFileName)
114 | }
115 | }
116 |
117 | // Try open input wad
118 | f, err := mainFileControl.OpenInputFile(config.InputFileName)
119 | if err != nil {
120 | Log.Error("An error has occured while trying to read %s: %s\n",
121 | config.InputFileName, err)
122 | os.Exit(1)
123 | }
124 |
125 | wh := new(WadHeader)
126 | le, err2 := TryReadWadDirectory(true, f, wh)
127 | if err2 != nil {
128 | Log.Error(err2.Error() + "\n")
129 | os.Exit(1)
130 | }
131 |
132 | // Now that we are definitely having lumps, let's organize a schedule of
133 | // future "copy lump"/"process level" operations
134 | troll := CreateTroll()
135 | rejectsize := make(map[int]uint32)
136 | wadDir := LoadWadDirectory(true, le, rejectsize, troll, config.Eject, RMB)
137 | ScheduleRoot, validities, lvls := wadDir.scheduleRoot, wadDir.validities,
138 | wadDir.lvls
139 | mainFileControl.inputWad = &PinnedWad{
140 | le: le,
141 | scheduleRoot: ScheduleRoot,
142 | readerAt: f,
143 | }
144 |
145 | if ScheduleRoot == nil {
146 | Log.Error("No lumps - terminating.\n")
147 | os.Exit(1)
148 | }
149 |
150 | // Now resize directory, add (or even delete) entries where needed, and place
151 | // them in correct order. Levels missing mandatory lumps will be removed
152 | // from processing and made to be just copied instead
153 | le = UpdateDirectoryAndSchedule(le, &ScheduleRoot, validities)
154 | wh.LumpCount = uint32(len(le))
155 |
156 | lvls = FindValidLevels(ScheduleRoot)
157 |
158 | if lvls == 0 {
159 | Log.Error("Unable to find any levels I can rebuild - terminating.\n")
160 | os.Exit(1)
161 | }
162 | Log.Printf("Number of levels that will be rebuilt: %d\n", lvls)
163 |
164 | // All of our reject lumps will reuse a single pool of zeroes (lump
165 | // overlapping is allowed by wad format)
166 | rejectStart := uint32(binary.Size(wh))
167 | var zerosToInsert uint32
168 | if config.Reject == REJECT_ZEROFILLED {
169 | zerosToInsert = troll.Compile()
170 | } else {
171 | zerosToInsert = 0
172 | }
173 | // The reject zeroes will be placed after the header, and then directory will
174 | // follow.
175 | wh.DirectoryStart = uint32(binary.Size(wh)) + zerosToInsert
176 | // how many zero bytes we can obtain at the end of the header (taking
177 | // advantage of the fact that last field of wad's header - LumpCount - is
178 | // stored in LittleEndian)
179 | var reusableFromHeader uint32
180 | if zerosToInsert > 0 { // only seek savings if anything was meant to be used to begin with
181 | reusableFromHeader = RightTrailZeros(wh.DirectoryStart)
182 | } else {
183 | reusableFromHeader = 0
184 | }
185 | if zerosToInsert > reusableFromHeader {
186 | zerosToInsert = uint32(zerosToInsert - reusableFromHeader)
187 | } else {
188 | if reusableFromHeader > zerosToInsert {
189 | reusableFromHeader = zerosToInsert
190 | }
191 | zerosToInsert = 0
192 | }
193 | // expected to never evaluate < 0, as sizeof(header) > sizeof(header.lastfield)
194 | rejectStart = rejectStart - reusableFromHeader
195 | // After decrement, the number of right trailing zeros in little endian
196 | // may only increase, but we won't check it any further yet
197 | wh.DirectoryStart = wh.DirectoryStart - reusableFromHeader
198 |
199 | ZeroOffsetFirstLump := false
200 |
201 | if (zerosToInsert > 0) && (le[0].Size == 0) {
202 | // And if first entry in directory is a lump of zero size, 8 ZERO bytes may
203 | // become available! Remember to set offset to 0 for that lump, though
204 | // 2025-01-23 this means that if we could create lump at that place,
205 | // we could have had an error, as new lumps are created with zero size as
206 | // well, but they don't stay that way. Fortunately, this never happens for
207 | // as long as we only make lumps inside level, where they are always preceded
208 | // by an existing level marker
209 | // TODO insert a safeguard nonetheless for sanity's sake -- test if entry was
210 | // pre-existing
211 | decr := uint32(8)
212 | ZeroOffsetFirstLump = true
213 | if zerosToInsert > 8 {
214 | zerosToInsert = uint32(zerosToInsert - 8)
215 | } else {
216 | decr = 8 - zerosToInsert // how much to move directory back
217 | zerosToInsert = 0
218 | }
219 | wh.DirectoryStart = wh.DirectoryStart - decr
220 | }
221 |
222 | if config.Reject == REJECT_ZEROFILLED {
223 | Log.Verbose(1, "Will need to insert %d zero bytes between header and directory. Reject lumps will begin at offset %d.\n",
224 | zerosToInsert, rejectStart)
225 | }
226 | Log.Verbose(1, "Directory starts at offset %d.\n", wh.DirectoryStart)
227 |
228 | outFileName := config.OutputFileName
229 | if outFileName == "" {
230 | Log.Printf("Preparing to write %s - will create a temp file first.\n", config.InputFileName)
231 | } else {
232 | Log.Printf("Preparing to write %s...\n", outFileName)
233 | }
234 | fout, outFileName, ferr := mainFileControl.OpenOutputFile(config.OutputFileName)
235 | if ferr != nil {
236 | Log.Error("An error has occured while trying to create/modify %s: %s\n", outFileName, ferr)
237 | os.Exit(1)
238 | }
239 |
240 | // in the future, will probably store new wad's header separately
241 | err = binary.Write(fout, binary.LittleEndian, wh)
242 | if err != nil {
243 | Log.Error("An error has occured while trying to create/modify %s: %s\n", outFileName, err)
244 | os.Exit(1)
245 | }
246 |
247 | if zerosToInsert > 0 {
248 | WriteNZerosOrFail(fout, zerosToInsert, outFileName)
249 | }
250 | // skip directory also
251 | WriteNZerosOrFail(fout, uint32(binary.Size(le)), outFileName)
252 | action := ScheduleRoot
253 | curPos := uint32(binary.Size(wh)) + zerosToInsert + uint32(binary.Size(le))
254 | wriBus := StartWriteBus(fout, le, curPos)
255 | lvl := new(Level) // reusable
256 | udmfLvl := new(UDMF_Level) // likewise
257 | for action != nil {
258 | if action.Drop {
259 | action = action.Next
260 | continue
261 | }
262 | // This part copies non-level lump (contents and directory entry) as well
263 | // as a marker lump
264 | // ! lumps that are part of a level are stored with the marker lump,
265 | // they are not getting copied in this part
266 | idx := action.DirIndex
267 | if le[idx].Size != 0 {
268 | tmpBuf := make([]byte, le[idx].Size, le[idx].Size)
269 | f.ReadAt(tmpBuf, int64(le[idx].FilePos))
270 | wriBus.SendRawLump(tmpBuf, idx, "", "")
271 | } else if (idx == 0) && ZeroOffsetFirstLump {
272 | // first lump has zero size, if we set zero offset we can steal some bytes
273 | // for our zero byte pool for empty zero-filled reject lumps. Do it.
274 | le[idx].FilePos = 0
275 | }
276 |
277 | // Now see if we are on a level marker lump and got stuff to process
278 | if action.Level != nil {
279 | // action.Level is an array of lumps belonging to the level. This
280 | // is where all stuff goes
281 | if action.LevelFormat == FORMAT_UDMF {
282 | udmfLvl.DoLevel(le, idx, action, f, wriBus, &mainFileControl)
283 | } else {
284 | lvl.DoLevel(le, idx, rejectsize, troll, action, rejectStart,
285 | f, wriBus, &mainFileControl)
286 | }
287 | wriBus.Sync() // make sure all that is to be logged is there before new level is processed
288 | }
289 | action = action.Next
290 | }
291 | wriBus.Shutdown()
292 | fout.Seek(int64(wh.DirectoryStart), 0)
293 | binary.Write(fout, binary.LittleEndian, le)
294 | suc := mainFileControl.Success()
295 | if suc {
296 | trFileName := outFileName
297 | if mainFileControl.UsingTmp() {
298 | // we were using tmp file, which means our real output file is same
299 | // as input one. So make sure user sees that we written (overwritten)
300 | // the desired file instead of some temp file
301 | trFileName = config.InputFileName
302 | }
303 | Log.Printf("%s successfully written \n ", trFileName)
304 | } else {
305 | Log.Printf("I/O error on flushing data / closing files. The data might not have been saved!\n")
306 | }
307 | if config.DumpSegsFlag {
308 | DebugSaveDumpedSegs(config.SegDumpFile)
309 | }
310 | if config.MemProfile {
311 | DumpMemoryProfile(config.MemProfilePath)
312 | }
313 | Log.Printf("Total time: %s\n", time.Since(timeStart))
314 | }
315 |
316 | func GetBounds(vertices []Vertex) LevelBounds {
317 | Xmin := int16(32767)
318 | Ymin := int16(32767)
319 | Xmax := int16(-32768)
320 | Ymax := int16(-32768)
321 | for _, v := range vertices {
322 | if v.XPos < Xmin {
323 | Xmin = v.XPos
324 | }
325 | if v.YPos < Ymin {
326 | Ymin = v.YPos
327 | }
328 | if v.XPos > Xmax {
329 | Xmax = v.XPos
330 | }
331 | if v.YPos > Ymax {
332 | Ymax = v.YPos
333 | }
334 | }
335 | return LevelBounds{
336 | Xmin: Xmin,
337 | Ymin: Ymin,
338 | Xmax: Xmax,
339 | Ymax: Ymax,
340 | }
341 | }
342 |
--------------------------------------------------------------------------------
/writebus.go:
--------------------------------------------------------------------------------
1 | // Copyright (C) 2022, VigilantDoomer
2 | //
3 | // This file is part of VigilantBSP program.
4 | //
5 | // VigilantBSP is free software: you can redistribute it
6 | // and/or modify it under the terms of GNU General Public License
7 | // as published by the Free Software Foundation, either version 2 of
8 | // the License, or (at your option) any later version.
9 | //
10 | // VigilantBSP is distributed in the hope that it will be useful,
11 | // but WITHOUT ANY WARRANTY; without even the implied warranty of
12 | // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 | // GNU General Public License for more details.
14 | //
15 | // You should have received a copy of the GNU General Public License
16 | // along with VigilantBSP. If not, see .
17 |
18 | // Bus for organized writes to destination file
19 | package main
20 |
21 | import (
22 | "os"
23 | )
24 |
25 | const (
26 | WRI_TYPE_BYTES = iota
27 | WRI_TYPE_INTERFACE
28 | WRI_TYPE_DEEPNODES
29 | WRI_TYPE_SYNC
30 | )
31 |
32 | type WriteBusRequest struct {
33 | tpeIndex int // typeIndex
34 | bData []byte
35 | intfData interface{}
36 | deepData []DeepNode
37 | lumpIdx int
38 | lumpName string
39 | postfix string
40 | }
41 |
42 | type WriteBus struct {
43 | fout *os.File
44 | le []LumpEntry
45 | curPos uint32
46 | }
47 |
48 | type WriteBusControl struct {
49 | bus *WriteBus
50 | ch chan<- WriteBusRequest
51 | finisher <-chan bool
52 | sync <-chan bool
53 | }
54 |
55 | func StartWriteBus(fout *os.File, le []LumpEntry, curPos uint32) *WriteBusControl {
56 | bus := &WriteBus{
57 | fout: fout,
58 | le: le,
59 | curPos: curPos,
60 | }
61 | ch := make(chan WriteBusRequest)
62 | finisher := make(chan bool)
63 | sync := make(chan bool)
64 | go bus.WriteBusLoop(ch, finisher, sync)
65 | return &WriteBusControl{
66 | bus: bus,
67 | ch: ch,
68 | finisher: finisher,
69 | sync: sync,
70 | }
71 | }
72 |
73 | func (b *WriteBus) WriteBusLoop(ch <-chan WriteBusRequest, chFinish chan<- bool,
74 | chSync chan<- bool) {
75 | for req := range ch {
76 | switch req.tpeIndex {
77 | case WRI_TYPE_BYTES:
78 | {
79 | WriteSliceLump(req.bData, &(b.curPos), b.fout, b.le, req.lumpIdx,
80 | req.lumpName, req.postfix)
81 | }
82 | case WRI_TYPE_INTERFACE:
83 | {
84 | ConvertAndWriteGenericLump(req.intfData, &(b.curPos), b.fout,
85 | b.le, req.lumpIdx, req.lumpName, req.postfix)
86 | }
87 | case WRI_TYPE_DEEPNODES:
88 | {
89 | ConvertAndWriteDeepNodes(req.deepData, &(b.curPos), b.fout,
90 | b.le, req.lumpIdx, req.lumpName, req.postfix)
91 | }
92 | case WRI_TYPE_SYNC:
93 | {
94 | Log.Sync()
95 | chSync <- true
96 | }
97 | default:
98 | {
99 | Log.Error("Unknown request at WriteBusLoop (%d)\n", req.tpeIndex)
100 | }
101 | }
102 | }
103 | chFinish <- true
104 | }
105 |
106 | // Asynchronous calls that instruct bus to write lump data, stored in varied
107 | // forms before writing
108 |
109 | // Sends lump to bus to write
110 | // Lump data is stored in byte form, no conversion needed
111 | func (c *WriteBusControl) SendRawLump(data []byte, lumpIdx int, s string,
112 | postfix string) {
113 | envl := WriteBusRequest{
114 | tpeIndex: WRI_TYPE_BYTES,
115 | bData: data,
116 | lumpIdx: lumpIdx,
117 | lumpName: s,
118 | postfix: postfix,
119 | }
120 | c.ch <- envl
121 | }
122 |
123 | // Sends lump to bus to write
124 | // Lump data is represented as a structure or an array(slice) of something
125 | // different than bytes - conversion to respective byte order must be applied
126 | func (c *WriteBusControl) SendGenericLump(data interface{},
127 | lumpIdx int, s string, postfix string) {
128 | envl := WriteBusRequest{
129 | tpeIndex: WRI_TYPE_INTERFACE,
130 | intfData: data,
131 | lumpIdx: lumpIdx,
132 | lumpName: s,
133 | postfix: postfix,
134 | }
135 | c.ch <- envl
136 | }
137 |
138 | // Sends lump to bus to write
139 | // A DeepNodes signature needs to be prepended to data, and data needs to be
140 | // converted to a proper byte order
141 | func (c *WriteBusControl) SendDeepNodesLump(data []DeepNode, lumpIdx int,
142 | s string, postfix string) {
143 | envl := WriteBusRequest{
144 | tpeIndex: WRI_TYPE_DEEPNODES,
145 | deepData: data,
146 | lumpIdx: lumpIdx,
147 | lumpName: s,
148 | postfix: postfix,
149 | }
150 | c.ch <- envl
151 | }
152 |
153 | func (c *WriteBusControl) Shutdown() {
154 | close(c.ch)
155 | <-c.finisher
156 | }
157 |
158 | // This service call is needed to prevent actions pertaining to the previous
159 | // level being logged to output after processing of new level is announced
160 | func (c *WriteBusControl) Sync() {
161 | envl := WriteBusRequest{
162 | tpeIndex: WRI_TYPE_SYNC,
163 | }
164 | c.ch <- envl
165 | <-c.sync
166 | }
167 |
--------------------------------------------------------------------------------
/zenscore.go:
--------------------------------------------------------------------------------
1 | // Copyright (C) 2022, VigilantDoomer
2 | //
3 | // This file is part of VigilantBSP program.
4 | //
5 | // VigilantBSP is free software: you can redistribute it
6 | // and/or modify it under the terms of GNU General Public License
7 | // as published by the Free Software Foundation, either version 2 of
8 | // the License, or (at your option) any later version.
9 | //
10 | // VigilantBSP is distributed in the hope that it will be useful,
11 | // but WITHOUT ANY WARRANTY; without even the implied warranty of
12 | // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 | // GNU General Public License for more details.
14 | //
15 | // You should have received a copy of the GNU General Public License
16 | // along with VigilantBSP. If not, see .
17 | package main
18 |
19 | import (
20 | "sort"
21 | )
22 |
23 | const VERY_BAD_SCORE = -2147483648
24 |
25 | // zenscore module - provides the partition score system from Zennode family
26 | // of nodebuilders. It is designed to reduce depth.
27 | // This code is basically ported from Zennode v1.2.1 (c) Mark Rousseau and
28 | // ZokumBSP v 1.0.11 nodebuilders and
29 | // contains logic that, as anecdotal evidence tends to suggest, can reduce
30 | // BSP tree depth provided appropriate magic constants are used in its
31 | // arcane formula
32 |
33 | // metric = S? (L * R) / (X1 ? X1 * S / X2 : 1) - (X3 * S + X4) * S : (L * R)
34 | // where ? : is a C ternary operator, L = number of segs to the left,
35 | // R = number of segs to the right, S = number of split segs, and X1-4 are
36 | // magic numbers defined in constants below. They were derived empirically
37 | // by Mark Rousseau based on how they affected the wads he had at his disposal
38 | // to try and find the best possible depth-reducing algorithm
39 | // In all honesty, I (VigilantDoomer) have no idea how this formula works
40 | // towards depth reduction - unlike BSP's visplane reduction algorithm, there
41 | // is no rationale to be found behind either numbers or the formula
42 |
43 | const ORIG_ZEN_X1 = 20
44 |
45 | const ORIG_ZEN_X2 = 10
46 |
47 | const ORIG_ZEN_X3 = 1
48 |
49 | const ORIG_ZEN_X4 = 25
50 |
51 | // similar metric, but for sectors instead of segs
52 | // Note: Zennode and ZokumBSP defined the values found below, and even allowed
53 | // to modify them with environment variables, but used X-constants in their
54 | // place, while these remained unused
55 |
56 | const ORIG_ZEN_Y1 = 1
57 |
58 | const ORIG_ZEN_Y2 = 7
59 |
60 | const ORIG_ZEN_Y3 = 1
61 |
62 | const ORIG_ZEN_Y4 = 0
63 |
64 | // These variables are stored as globals and not as fields in config variable
65 |
66 | var ZEN_X1 = ORIG_ZEN_X1
67 | var ZEN_X2 = ORIG_ZEN_X2
68 | var ZEN_X3 = ORIG_ZEN_X3
69 | var ZEN_X4 = ORIG_ZEN_X4
70 | var ZEN_Y1 = ORIG_ZEN_Y1
71 | var ZEN_Y2 = ORIG_ZEN_Y2
72 | var ZEN_Y3 = ORIG_ZEN_Y3
73 | var ZEN_Y4 = ORIG_ZEN_Y4
74 |
75 | // So, important note about working on it: this metric is not cost, so a greater
76 | // value may mean better, not worse for some of the fields
77 | // Also, maybe this will have to redefine all secondary modes, not just depth
78 | // one
79 |
80 | // Use secondary score for partition selection:
81 | // - Split minimization (no tree balancing)
82 | // - Minimize depth, favor no split segs
83 | // - Minimize depth, favor fewer subsectors
84 | // - Minimize depth, favor both of above equally
85 |
86 | type DepthScoreBundle struct {
87 | seg *NodeSeg
88 | preciousSplit int // lesser value wins
89 | equivSplit int // equivalencies split. Lesser value wins
90 | segSplit int
91 | diagonalFactor int // if diagonal penalty is used - not part of original Zennode logic
92 | scoreSeg int // greater value wins
93 | scoreSector int // greater value wins
94 | scoreTotal int // lesser value wins (got this wrong at first)
95 | }
96 |
97 | type ZenIntermediary struct {
98 | segL int
99 | segR int
100 | segS int
101 | sectorL int
102 | sectorR int
103 | sectorS int
104 | }
105 |
106 | type DepthScoresBySeg []DepthScoreBundle
107 |
108 | func (x DepthScoresBySeg) Len() int { return len(x) }
109 | func (x DepthScoresBySeg) Less(i, j int) bool {
110 | if x[i].scoreSeg < x[j].scoreSeg {
111 | return false
112 | }
113 | if x[i].scoreSeg > x[j].scoreSeg {
114 | return true
115 | }
116 | if x[i].scoreSector < x[j].scoreSector {
117 | return false
118 | }
119 | if x[i].scoreSector > x[j].scoreSeg {
120 | return true
121 | }
122 | return x[i].seg.Linedef < x[j].seg.Linedef
123 | }
124 | func (x DepthScoresBySeg) Swap(i, j int) { x[i], x[j] = x[j], x[i] }
125 |
126 | type DepthScoresBySector []DepthScoreBundle
127 |
128 | func (x DepthScoresBySector) Len() int { return len(x) }
129 | func (x DepthScoresBySector) Less(i, j int) bool {
130 | if x[i].scoreSector < x[j].scoreSector {
131 | return false
132 | }
133 | if x[i].scoreSector > x[j].scoreSeg {
134 | return true
135 | }
136 | if x[i].scoreSeg < x[j].scoreSeg {
137 | return false
138 | }
139 | if x[i].scoreSeg > x[j].scoreSeg {
140 | return true
141 | }
142 | return x[i].seg.Linedef < x[j].seg.Linedef
143 | }
144 | func (x DepthScoresBySector) Swap(i, j int) { x[i], x[j] = x[j], x[i] }
145 |
146 | type DepthScoresByTotal []DepthScoreBundle
147 |
148 | func (x DepthScoresByTotal) Len() int { return len(x) }
149 | func (x DepthScoresByTotal) Less(i, j int) bool {
150 | if x[i].preciousSplit < x[j].preciousSplit {
151 | return true
152 | } else if x[i].preciousSplit > x[j].preciousSplit {
153 | return false
154 | }
155 |
156 | if x[i].preciousSplit > 0 {
157 | // equal non-zero precious split value - prefer orthogonal partition
158 | // line for better Hexen polyobj support
159 | axisAlignedI := x[i].seg.pdx == 0 || x[i].seg.pdy == 0
160 | axisAlignedJ := x[j].seg.pdx == 0 || x[j].seg.pdy == 0
161 | if axisAlignedI && !axisAlignedJ {
162 | return true
163 | }
164 | if axisAlignedJ && !axisAlignedI {
165 | return false
166 | }
167 | }
168 |
169 | if x[i].scoreTotal < x[j].scoreTotal {
170 | return true
171 | } else if x[i].scoreTotal > x[j].scoreTotal {
172 | return false
173 | }
174 |
175 | // The above is not sufficient to produce an unique candidate. Let's try
176 | // more checks, these ones are new to VigilantBSP, not borrowed elsewhere
177 |
178 | /*iSum := x[i].equivSplit + x[i].segSplit
179 | jSum := x[j].equivSplit + x[j].segSplit
180 |
181 | if iSum < jSum {
182 | return true
183 | } else {
184 | return false
185 | }*/
186 |
187 | if x[i].equivSplit < x[j].equivSplit {
188 | return true
189 | } else if x[i].equivSplit > x[j].equivSplit {
190 | return false
191 | }
192 |
193 | if x[i].segSplit < x[j].segSplit {
194 | return true
195 | } else if x[i].segSplit > x[j].segSplit {
196 | return false
197 | }
198 |
199 | // Hexen maps (or if diagonal penalty is enabled) may need to prefer
200 | // orthogonal partitions amongst best picks
201 | if x[i].diagonalFactor < x[j].diagonalFactor {
202 | return true
203 | } else if x[i].diagonalFactor > x[j].diagonalFactor {
204 | return false
205 | }
206 |
207 | return x[i].seg.Linedef < x[j].seg.Linedef
208 | }
209 | func (x DepthScoresByTotal) Swap(i, j int) { x[i], x[j] = x[j], x[i] }
210 |
211 | // Sorts the argument with respect to several metric evaluations so that
212 | // the best record is the first (#0) in array
213 | // The argument is mutated. Code is courtesy of Zennode (c) Mark Rousseau
214 | // February 2023 - fixed significant typos (was clobbering other fields instead
215 | // of assigning scoreTotal), now functions correctly
216 | func ZenPickBestScore(sc []DepthScoreBundle) {
217 | sort.Sort(DepthScoresBySeg(sc))
218 | rank := 0
219 | for i := 0; i < len(sc); i++ {
220 | sc[i].scoreTotal = rank
221 | if i < len(sc)-1 && sc[i].scoreSeg != sc[i+1].scoreSeg {
222 | rank++
223 | }
224 | }
225 | sort.Sort(DepthScoresBySector(sc))
226 | rank = 0
227 | for i := 0; i < len(sc); i++ {
228 | sc[i].scoreTotal += rank
229 | if i < len(sc)-1 && sc[i].scoreSector != sc[i+1].scoreSector {
230 | rank++
231 | }
232 | }
233 | sort.Sort(DepthScoresByTotal(sc))
234 | // The #0 element now contains the best choice
235 | }
236 |
237 | // These builds seg and sector scores for the entire array. Other fields
238 | // not touched
239 | func (w *NodesWork) ZenComputeScores(super *Superblock, sc []DepthScoreBundle,
240 | sectorHits []uint8, depthArtifacts bool) {
241 | var c IntersectionContext
242 | for i, _ := range sc {
243 | inter := ZenIntermediary{
244 | segL: 0,
245 | segR: 0,
246 | segS: 0,
247 | sectorL: 0,
248 | sectorR: 0,
249 | sectorS: 0,
250 | }
251 | // Fill sectorHits array with zeros FAST (fewer bound checks)
252 | // Credit: gist github.com taylorza GO-Fillslice.md
253 | sectorHits[0] = 0
254 | hitArrayLen := len(sectorHits)
255 | for j := 1; j < hitArrayLen; j = j << 1 {
256 | copy(sectorHits[j:], sectorHits[:j])
257 | }
258 | // Obtain data for current partitition by evaluating all segs against
259 | // it again, as we avoided doing so earlier (performance reasons).
260 | part := sc[i].seg
261 | c.psx = part.psx
262 | c.psy = part.psy
263 | c.pex = part.pex
264 | c.pey = part.pey
265 | c.pdx = c.psx - c.pex
266 | c.pdy = c.psy - c.pey
267 | w.evalPartitionWorker_Zen(super, &(sc[i]), &inter, sectorHits, &c)
268 | for j := 0; j < len(sectorHits); j++ {
269 | switch sectorHits[j] {
270 | case 0x0F:
271 | {
272 | inter.sectorL++
273 | }
274 | case 0xF0:
275 | {
276 | inter.sectorR++
277 | }
278 | case 0xFF:
279 | {
280 | inter.sectorS++
281 | }
282 | }
283 | }
284 |
285 | scoreIntermediate(&(sc[i]), &inter, depthArtifacts)
286 | }
287 | }
288 |
289 | func scoreIntermediate(rec *DepthScoreBundle, inter *ZenIntermediary,
290 | depthArtifacts bool) {
291 | // Compute both metrics now
292 | rec.scoreSeg = (inter.segL + inter.segS) * (inter.segR + inter.segS)
293 | rec.scoreSector = (inter.sectorL + inter.sectorS) * (inter.sectorR + inter.sectorS)
294 |
295 | if rec.scoreSeg == 0 {
296 | // one-sided (bad). Force it to rank lowest
297 | rec.scoreSeg = VERY_BAD_SCORE
298 | rec.scoreSector = VERY_BAD_SCORE
299 | return
300 | }
301 |
302 | // TODO computations below feature both multiplication and division,
303 | // there might be condition when they overflow on 32-bit system
304 |
305 | // Finish computing seg metric
306 | if inter.segS > 0 {
307 | // Have seg splits, so
308 | tmp := ZEN_X1 * inter.segS
309 | if ZEN_X2 < tmp {
310 | rec.scoreSeg = ZEN_X2 * rec.scoreSeg / tmp
311 | }
312 | if depthArtifacts {
313 | // ZokumBSP calibrated formula differently
314 | rec.scoreSeg -= (ZEN_X3*inter.segS*(inter.segS/3) +
315 | ZEN_X4) * inter.segS
316 | } else {
317 | rec.scoreSeg -= (ZEN_X3*inter.segS + ZEN_X4) * inter.segS
318 | }
319 | } else { // Logic introduced in ZokumBSP, activated when no SEG splits are preferred
320 | // It also makes better balanced partitions with respect to SEG
321 | // count on both sides score better (higher)
322 | if config.EffectiveSecondary != SECONDARY_PRIORITY_SUBSECTORS { // reference to global: config
323 | rec.scoreSeg = 0x7FFFFFFF - Abs(inter.segL-
324 | inter.segR)
325 | }
326 | }
327 |
328 | // Finish computing sector metric
329 | if depthArtifacts {
330 | // Ok, both Zennode and ZokumBSP use ZEN_X* rather than ZEN_Y*
331 | // consts. That is not the only change here, however
332 | if inter.sectorS > 0 {
333 | tmp := ZEN_X1 * inter.sectorS
334 | if ZEN_X2 < tmp {
335 | rec.scoreSector = ZEN_X2 * rec.scoreSector / tmp
336 | }
337 | // Bye bye sanity - yes the last multiplicative is number
338 | // of split segs not sectors in Zennode and ZokumBSP
339 | // --VigilantDoomer
340 | rec.scoreSector -= (ZEN_X3*inter.sectorS + ZEN_X4) * inter.segS
341 | } else { // Logic introduced in ZokumBSP, activated when no SECTOR splits are preferred
342 | // It also makes better balanced partitions with respect to
343 | // SECTOR count on both sides score better (higher)
344 | if config.EffectiveSecondary != SECONDARY_PRIORITY_SEGS { // reference to global: config
345 | rec.scoreSector = 0x7FFFFFFF - Abs(inter.sectorL-
346 | inter.sectorR)
347 | }
348 | }
349 | } else {
350 | // What I though it would be
351 | if inter.sectorS > 0 {
352 | tmp := ZEN_Y1 * inter.sectorS
353 | if ZEN_Y2 < tmp {
354 | rec.scoreSector = ZEN_Y2 * rec.scoreSector / tmp
355 | }
356 | // Next formula is not what Zennode and ZokumBSP defacto
357 | // use
358 | rec.scoreSector -= (ZEN_Y3*inter.sectorS + ZEN_Y4) * inter.sectorS
359 | } else { // Logic introduced in ZokumBSP, activated when no SECTOR splits are preferred
360 | // It also makes better balanced partitions with respect to
361 | // SECTOR count on both sides score better (higher)
362 | if config.EffectiveSecondary != SECONDARY_PRIORITY_SEGS { // reference to global: config
363 | rec.scoreSector = 0x7FFFFFFF - Abs(inter.sectorL-
364 | inter.sectorR)
365 | }
366 | }
367 | }
368 | }
369 |
370 | func (w *NodesWork) evalPartitionWorker_Zen(block *Superblock,
371 | rec *DepthScoreBundle, intermediate *ZenIntermediary, sectorHits []uint8,
372 | c *IntersectionContext) {
373 | part := rec.seg
374 | num := BoxOnLineSide(block, part)
375 | if num < 0 {
376 | // LEFT
377 | intermediate.segL += block.realNum
378 | block.MarkSectorsHitNoCached(sectorHits, uint8(0x0F))
379 | return
380 | } else if num > 0 {
381 | // RIGHT
382 | intermediate.segR += block.realNum
383 | block.MarkSectorsHitNoCached(sectorHits, uint8(0xF0))
384 | return
385 | }
386 |
387 | for check := block.segs; check != nil; check = check.nextInSuper { // Check partition against all Segs
388 | // get state of lines' relation to each other
389 | leftside := false
390 | mask := uint8(0xF0)
391 | c.lsx = check.StartVertex.X
392 | c.lsy = check.StartVertex.Y
393 | c.lex = check.EndVertex.X
394 | c.ley = check.EndVertex.Y
395 | val := w.doLinesIntersect(c) // use more accurate side evaluation
396 | if ((val&2 != 0) && (val&64 != 0)) || ((val&4 != 0) && (val&32 != 0)) {
397 | // Split line
398 | intermediate.segS++
399 | mask = uint8(0xFF)
400 | } else {
401 | if check == part || check == part.partner {
402 | // Partition itself or its partner
403 | leftside = check == part.partner
404 | if leftside {
405 | intermediate.segL++
406 | } else {
407 | intermediate.segR++
408 | }
409 | } else {
410 | if val&34 != 0 {
411 | // to the left
412 | leftside = true
413 | intermediate.segL++
414 | }
415 | if val&68 != 0 {
416 | // to the right
417 | intermediate.segR++
418 | }
419 | if (val&1 != 0) && (val&16 != 0) {
420 | // Collinear seg
421 | if check.pdx*part.pdx+check.pdy*part.pdy < 0 {
422 | leftside = true
423 | intermediate.segL++
424 | } else {
425 | intermediate.segR++
426 | }
427 | }
428 | }
429 | }
430 | if leftside {
431 | mask = uint8(0x0F)
432 | }
433 |
434 | sectorHits[check.sector] |= mask
435 | }
436 |
437 | // handle sub-blocks recursively
438 | for num := 0; num < 2; num++ {
439 | if block.subs[num] == nil {
440 | continue
441 | }
442 |
443 | w.evalPartitionWorker_Zen(block.subs[num], rec, intermediate,
444 | sectorHits, c)
445 | }
446 |
447 | }
448 |
449 | func ZenSegMinorToDepthScores(input []SegMinorBundle) []DepthScoreBundle {
450 | res := make([]DepthScoreBundle, len(input))
451 | for i, entry := range input {
452 | res[i].seg = entry.seg
453 | res[i].preciousSplit = entry.minor.PreciousSplit
454 | res[i].scoreSeg = 0
455 | res[i].scoreSector = 0
456 | res[i].scoreTotal = 0
457 | res[i].equivSplit = entry.minor.SectorsSplit
458 | res[i].segSplit = entry.minor.SegsSplit
459 | }
460 | return res
461 | }
462 |
--------------------------------------------------------------------------------
/zstream.go:
--------------------------------------------------------------------------------
1 | // Copyright (C) 2022, VigilantDoomer
2 | //
3 | // This file is part of VigilantBSP program.
4 | //
5 | // VigilantBSP is free software: you can redistribute it
6 | // and/or modify it under the terms of GNU General Public License
7 | // as published by the Free Software Foundation, either version 2 of
8 | // the License, or (at your option) any later version.
9 | //
10 | // VigilantBSP is distributed in the hope that it will be useful,
11 | // but WITHOUT ANY WARRANTY; without even the implied warranty of
12 | // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 | // GNU General Public License for more details.
14 | //
15 | // You should have received a copy of the GNU General Public License
16 | // along with VigilantBSP. If not, see .
17 |
18 | // zstream.go implements stream that is optionally compressed, to reduce
19 | // boilerplate that needs to handle both compressed and uncompressed Zdoom node
20 | // format cases.
21 | // With the ZStream object, only initialization one-liner will differ, the rest
22 | // (writing and obtaining final value) will be the same regardless of whether
23 | // compression was used
24 | package main
25 |
26 | import (
27 | "bytes"
28 | "compress/zlib"
29 | )
30 |
31 | type ZStream struct {
32 | raw *bytes.Buffer
33 | compressor *zlib.Writer
34 | }
35 |
36 | func (z *ZStream) Write(p []byte) (int, error) {
37 | if z.compressor != nil {
38 | n, err := z.compressor.Write(p)
39 | return n, err
40 | }
41 | n, err := z.raw.Write(p)
42 | return n, err
43 | }
44 |
45 | // This is how the data that was written can actually be read
46 | // Note that this function can only be called once!
47 | func (z *ZStream) FinalizeAndGetBytes() ([]byte, error) {
48 | if z.raw == nil {
49 | Log.Panic("ZStream.FinalizeAndGetBytes() can be called only once\n")
50 | }
51 | if z.compressor != nil {
52 | err := z.compressor.Close()
53 | z.compressor = nil
54 | if err != nil {
55 | return nil, err
56 | }
57 | }
58 | btes := z.raw.Bytes()
59 | z.raw = nil
60 | return btes, nil
61 | }
62 |
63 | // Initializes internal buffer with header already written (if non-nil), the
64 | // header is always non-compressed.
65 | // All writes on returned value will occur past that header, optionally
66 | // undergoing automatic compression (if compressed == true), or without that
67 | // (otherwise)
68 | // After you finished writing, you are obligated to call FinalizeAndGetBytes()
69 | // (the returned value will also give you data you have written in its final
70 | // byte sequence form), as in case of compression that calls Close() on the
71 | // compressor used to perform it - and it is pretty much obligatory
72 | func CreateZStream(header []byte, compressed bool) *ZStream {
73 | z := &ZStream{
74 | raw: &bytes.Buffer{},
75 | compressor: nil,
76 | }
77 | if header != nil {
78 | z.raw.Write(header)
79 | }
80 | if compressed {
81 | z.compressor, _ = zlib.NewWriterLevel(z.raw, config.ZdoomCompression) // reference to global: config
82 | }
83 | return z
84 | }
85 |
--------------------------------------------------------------------------------