Last active
February 13, 2025 03:52
-
-
Save AndyGrant/d11a38d392e613ee13f8835010f54c2a to your computer and use it in GitHub Desktop.
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
| #!/bin/python3 | |
| import numpy as np | |
| import argparse | |
| from network import Network | |
| def check_for_overflow(acc_weights, acc_biases): | |
| # Given the accum weights (L1, 22528), and accum biases (L1), determine a bound on | |
| # the smallest and largest possible outputs for an individual neuron in the accum. | |
| # | |
| # For each output neuron in the accumulator, and for each possible King Bucket, we | |
| # estimate the upper bound as follows. The lower bound is simply a negation... | |
| # | |
| # - We identify the largest weight for a KxK relation, and place the enemy king there. | |
| # - We identify the 15 squares which have the largest possible weight for one of our | |
| # own pieces. Usually this largest weight will be from placing a friendly Queen on | |
| # the square in question. But we take the max of all PNBRQ weights anyway. | |
| # - We repeat the same process, but for the enemy pieces. | |
| # - For those 15+15 weights, we sum up all the weights that are greater than 0 | |
| # - Lastly, we add the bias for the neuron in question, alongside the King weight. | |
| # | |
| # This means this estimate is valid for the following rules of setting up a chess | |
| # board. The rules are meant VERY literally: | |
| # | |
| # - There is exactly one King of each colour on the board | |
| # - There are no more than 15 friendly non-King pieces, on different squares, on the board. | |
| # - There are no more than 15 enemy non-King pieces, on different squares, on the board. | |
| # | |
| # The following are the known sources of over-estimation. They could be corrected, but | |
| # doing so requires a significant increase in code and computational complexity. | |
| # | |
| # - The Kings may both be on the same square | |
| # - Either King may be on the same square as a friendly or enemy non-King piece | |
| # - Each square may contain both a friendly, and an enemy, non-King piece | |
| # - We allow material configurations that are illegal, like having too many of a given piece. | |
| lowest = highest = 0 | |
| n_kbuckets, n_squares, n_relations = (32, 64, 11) | |
| n_inputs = n_kbuckets * n_squares * n_relations | |
| assert acc_weights.shape[1] == acc_biases.shape[0] | |
| assert n_squares * n_relations * n_kbuckets == acc_weights.shape[0] | |
| for col_idx in range(acc_weights.shape[1]): | |
| for start in range(0, n_inputs, n_squares * n_relations): | |
| chunk = acc_weights[:, col_idx] # Only the col_idx-th Neuron | |
| chunk = chunk[start:start + n_squares * n_relations] # Only for a single King Bucket | |
| chunk = chunk.reshape((n_relations, n_squares)).T # Index by [square][relation] | |
| king_weights = chunk[:, 10] # King x King | |
| our_piece_weights = chunk[:, [0, 2, 4, 6, 8]] # King x Our PNBRQ | |
| their_piece_weights = chunk[:, [1, 3, 5, 7, 9]] # Kinx x Their PNBRQ | |
| max_our_weights = np.max(our_piece_weights, axis=1) # Most positive friendly relation | |
| min_our_weights = np.min(our_piece_weights, axis=1) # Most negative friendly relation | |
| max_their_weights = np.max(their_piece_weights, axis=1) # Most positive enemy relation | |
| min_their_weights = np.min(their_piece_weights, axis=1) # Most negative enemy relation | |
| our_big_15 = max_our_weights [np.argpartition(max_our_weights , -15)[-15:]] | |
| our_small_15 = min_our_weights [np.argpartition(min_our_weights , 15)[:15 ]] | |
| their_big_15 = max_their_weights[np.argpartition(max_their_weights, -15)[-15:]] | |
| their_small_15 = min_their_weights[np.argpartition(min_their_weights, 15)[:15 ]] | |
| upper_bound = our_big_15[our_big_15 > 0].sum() \ | |
| + their_big_15[their_big_15 > 0].sum() \ | |
| + max(king_weights) + acc_biases[col_idx] | |
| lower_bound = our_small_15[our_small_15 < 0].sum() \ | |
| + their_small_15[their_small_15 < 0].sum() \ | |
| + min(king_weights) + acc_biases[col_idx] | |
| highest = max(highest, upper_bound) | |
| lowest = min(lowest, lower_bound) | |
| return (lowest, highest) | |
| if __name__ == '__main__': | |
| p = argparse.ArgumentParser() | |
| p.add_argument('--network', type=str, help='NNUE Input File', required=True) | |
| args = p.parse_args() | |
| n = Network() | |
| n.read_network(args.network) | |
| if n.is_compressed: | |
| raise Exception('This script cannot be run on compressed Networks') | |
| x = check_for_overflow(n.accumulator['weights'], n.accumulator['biases']) | |
| print ('Most Extreme Neuron Outputs:', x) |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment