| Bytes | Lang | Time | Link |
|---|---|---|---|
| nan | Wolfram Language Mathematica | 241220T021645Z | 138 Aspe |
| nan | 240929T015852Z | 138 Aspe | |
| nan | Python 3.8+ | 240908T183007Z | Jonathan |
| 161 | Maple | 240909T164645Z | Sophia A |
| 2222 | C++ gcc | 240909T133454Z | jdt |
| 061 | Charcoal | 240908T232023Z | Neil |
Wolfram Language (Mathematica), ~8.49784
Clear["Global`*"];
(*Define the divisors function*)div[n_Integer?Positive] := Divisors[n]
(*Define the PT function with memoization*)
PT[0, 0] := 0;
PT[n_Integer?Positive, k_Integer?NonNegative] :=
PT[n, k] =
If[n == 1,
If[k > 0, 1,
0], (Sum[
Sum[d*PT[i + 1, k]*PT[d, k - 1], {d, div[n - i - 1]}], {i, 0,
n - 2}])/(n - 1)]
Table[PT[n, k], {n, 0, 64}, {k, 0, n}] // Grid // AbsoluteTiming //Print (* 0.744101 seconds on my laptop *)
Table[PT[n, k], {n, 0, 128}, {k, 0, n}] // Grid // AbsoluteTiming //Print (* 6.02482 seconds on my laptop *)
Rust
// Cargo.toml
// [package]
// name = "polya_trees_count"
// version = "0.1.0"
// edition = "2021"
// # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
// [dependencies]
// num-bigint = "0.4"
// num-traits = "0.2"
// [profile.release]
// lto = "y"
// panic = "abort"
// $ cargo build --release
// $ cargo run --release
use std::collections::HashMap;
use std::time::Instant;
use num_bigint::BigInt;
use num_traits::One;
type BigIntCache = HashMap<(i32, i32), BigInt>;
type DivisorCache = HashMap<i32, Vec<i32>>;
fn divisors(n: i32, cache: &mut DivisorCache) -> Vec<i32> {
if let Some(divs) = cache.get(&n) {
return divs.clone();
}
let mut divs = Vec::new();
for d in 1..=n {
if n % d == 0 {
divs.push(d);
}
}
cache.insert(n, divs.clone());
divs
}
fn tree_count(nodes: i32, max_height: i32, cache: &mut BigIntCache, divisor_cache: &mut DivisorCache) -> BigInt {
if nodes == 1 {
return if max_height > 0 { BigInt::one() } else { BigInt::from(0) };
}
if let Some(count) = cache.get(&(nodes, max_height)) {
return count.clone();
}
let next_height = max_height - 1;
let mut total = BigInt::from(0);
for i in 1..nodes {
for &d in &divisors(nodes - i, divisor_cache) {
total += tree_count(i, max_height, cache, divisor_cache) * BigInt::from(d) * tree_count(d, next_height, cache, divisor_cache);
}
}
total /= BigInt::from(nodes - 1);
cache.insert((nodes, max_height), total.clone());
total
}
fn measure(size: usize) -> (f64, Vec<BigInt>) {
let mut divisor_cache = DivisorCache::new();
let mut tree_count_cache = BigIntCache::new();
let start = Instant::now();
let mut table = Vec::with_capacity(size * (size + 1) / 2);
for n in 0..size as i32 {
for h in 0..=n {
table.push(tree_count(n, h, &mut tree_count_cache, &mut divisor_cache));
}
}
let elapsed = start.elapsed();
println!("{}: {} seconds", size, elapsed.as_secs_f64());
(elapsed.as_secs_f64(), table)
}
fn main() {
let size = 64;
let (time_taken_1, _table_1) = measure(size);
let (time_taken_2, table_2) = measure(size * 2);
println!("Ratio: {}", time_taken_2 / time_taken_1);
print!("[");
for val in &table_2 {
print!("{}, ", val);
}
println!("]");
}
Rust port of Jonathan Allan's answer. The ratio looks about same:
64: 0.0767605 seconds
128: 0.8220573 seconds
Ratio: 10.709379172881887
64: 0.0745293 seconds
128: 0.824322 seconds
Ratio: 11.060374912953696
64: 0.0759358 seconds
128: 0.881342 seconds
Ratio: 11.606409624972674
Python 3.8+, ~ 8.3 - 11.5 ~ 6.8 - 10.9
Thanks to gsitcia for improving speed by extracting a memoised weighted tree count function and for providing a nice timing framework.
import functools
@functools.lru_cache(None)
def divisors(n):
return [d for d in range(n, 0, -1) if n % d == 0]
@functools.lru_cache(None)
def divisor_weighted_tree_count(nodes, max_height):
return sum(d * tree_count(d, max_height) for d in divisors(nodes))
@functools.lru_cache(None)
def tree_count(nodes, max_height):
if nodes == 1:
return int(max_height > 0)
if max_height == 0:
return 0
next_height = max_height - 1
return sum(
tree_count(i, max_height) * divisor_weighted_tree_count(nodes - i, next_height)
for i in range(1, nodes)
) // (nodes - 1)
Try it online! (The ratio at TIO is not as good as locally in Python 11 on Windows 11 for some reason, I used the latter for my headline ratio range.)
Here are the results of the same performance measuring as in the TIO code run six times locally (Python 3.11, Windows 11, AMD Ryzen 9 5900HX):
ratios=[7.547238551824045, 7.7139313128133695, 8.107636991904526, 8.47469155964413, 8.515605405050453, 9.172278506610215, 9.264749488149516, 9.557285255101801, 9.732214553686939, 10.802841012657689]
average_of_non_outlier_ratios=8.81729913412012
ratios=[8.762047219247435, 9.363372687067253, 10.145566544719255, 10.273140281551237, 10.487873752826854, 10.565872639113923, 10.64029167782034, 10.851233192929538, 10.912942606522284]
average_of_non_outlier_ratios=10.332478682289771
ratios=[8.382434555765164, 8.908493460199315, 9.066042017779642, 9.246098538177652, 9.304737133942659, 9.344529276678669, 9.546876520275672, 9.67229994476119, 9.823606780332556, 10.052732405298912, 10.14929796045462]
average_of_non_outlier_ratios=9.440601786382919
ratios=[7.803052242564962, 7.932774450890674, 8.347134115632969, 8.852060665356214, 8.994566905937972, 9.0039864187857, 9.498503106794697, 9.546413522518069, 9.691255389690461, 9.854593372412797, 9.937521780512633, 10.19999157374594]
average_of_non_outlier_ratios=9.165880972853216
ratios=[7.099565478498517, 8.512732314924916, 9.046465299081852, 9.084934322568525, 9.327585485547285, 9.338322586851053, 9.863004542840564, 10.110251878795596, 10.392818699806847, 10.751848197312722]
average_of_non_outlier_ratios=9.459514391302081
ratios=[8.368777843440359, 8.996354403223918, 9.304374924721822, 9.380879393237096, 9.64438818894823, 9.765254594302451, 10.050183387024001, 10.396627186085164, 10.611003799304875]
average_of_non_outlier_ratios=9.648294582506097
Each ratio in ratios is the minimum time across ten trials at size \$128\$ divided by the minimum time across ten trials at size \$64\$. Each run of trials stops when it does not appear that the next trial will finish within \$59\$ seconds of starting the run. The average of the ratios after discarding the minimum and maximum (average_of_non_outlier_ratios) is then displayed.
Yes, there is quite a large variance in the results, I'm not sure how to better measure this, but it looks like it's roughly \$8=2^3\$.
Maple, 161 bytes, ~9.8
Uncompressed:
div := n -> numtheory:-divisors(n):
PT := proc(n, k) option remember; local d, i;
if n = 1 then ifelse(k > 0, 1, 0) else
add(add(d * PT(i + 1, k) * PT(d, k - 1),
d = div(n - i - 1)), i = 0..n - 2) / (n - 1) fi end:
Benchmark:
Tri := rows -> local n, k; seq(seq(PT(n, k), k=0..n), n=0..rows):
CodeTools:-Usage(Tri(64));
memory used=97.22MiB, alloc change=32.00MiB,
cpu time=1.42s, real time=1.47s, gc time=0ns
CodeTools:-Usage(Tri(128));
memory used=1.02GiB, alloc change=97.00MiB,
cpu time=14.03s, real time=14.26s, gc time=93.75ms
cpu: 14.03/1.42 ~ 9.8;
real: 14.26/1.47 ~ 9.7;
C++ (gcc), 2222 bytes
#include <chrono>
#include <map>
#include <iostream>
#include <vector>
#include <boost/multiprecision/cpp_int.hpp>
using bigint = boost::multiprecision::cpp_int;
std::map<int, std::vector<int>> divisor_cache;
std::map<std::pair<int, int>, bigint> tree_count_cache;
const std::vector<int>& divisors(int n) {
if (divisor_cache.find(n) != divisor_cache.end()) {
return divisor_cache[n];
}
std::vector<int> divisors;
for (int d = 1; d <= n; ++d) {
if (n % d == 0) {
divisors.push_back(d);
}
}
divisor_cache[n] = divisors;
return divisor_cache[n];
}
bigint tree_count(int nodes, int max_height) {
if (nodes == 1) {
return max_height > 0 ? 1 : 0;
}
auto cache_key = std::make_pair(nodes, max_height);
if (tree_count_cache.find(cache_key) != tree_count_cache.end()) {
return tree_count_cache[cache_key];
}
int next_height = max_height - 1;
bigint total = 0;
for (int i = 1; i < nodes; ++i) {
for (const auto& d : divisors(nodes - i)) {
total += tree_count(i, max_height) * d * tree_count(d, next_height);
}
}
total /= (nodes - 1);
tree_count_cache[cache_key] = total;
return total;
}
std::pair<double, std::vector<bigint>> measure(int size) {
using namespace std;
using namespace std::chrono;
divisor_cache.clear();
tree_count_cache.clear();
auto start = steady_clock::now();
vector<bigint> table;
table.reserve(size * (size + 1) / 2);
for (int n = 0; n < size; ++n) {
for (int h = 0; h <= n; ++h) {
table.push_back(tree_count(n, h));
}
}
auto end = steady_clock::now();
duration<double> elapsed = end - start;
std::cout << size << ": " << elapsed.count() << " seconds" << std::endl;
return { elapsed.count(), table };
}
int main() {
int size = 64;
auto [time_taken_1, table_1] = measure(size);
auto [time_taken_2, table_2] = measure(size * 2);
std::cout << "Ratio: " << time_taken_2 / time_taken_1 << "\n[";
// Print the table values
for (const auto& val : table_2) {
std::cout << val << ", ";
}
std::cout << "]\n";
return 0;
}
C++ port of Jonathan Allan's answer. The ratio looks about same:
64: 0.115634 seconds
64: 0.116452 seconds
64: 0.114451 seconds
64: 0.118066 seconds
64: 0.118879 seconds
64: 0.117783 seconds
64: 0.116525 seconds
64: 0.117978 seconds
64: 0.116567 seconds
64: 0.117778 seconds
128: 1.38461 seconds
128: 1.40242 seconds
128: 1.39773 seconds
128: 1.41565 seconds
128: 1.36205 seconds
128: 1.36052 seconds
128: 1.38965 seconds
128: 1.38398 seconds
128: 1.34887 seconds
128: 1.32237 seconds
Ratio: 11.7663
Charcoal, 61 bytes, score ~9.4
Nθ⊞υ⟦⁰⟧Fθ⊞υE⊕θ∧κ∨¬ι÷ΣEιΣE÷ι⊕μ××⊕ξ§§υ⊕ξ⊖κ§§υ⁻ι⊖×⊕ξ⊕μκιEυ⭆¹…ι⊕κ
Try it online! Link is to verbose version of code. Explanation: Initially based on the reference implementation but using dynamic programming rather than recursion and memoisation, I replaced i with nodes - i and then switched the order of the double sum, allowing me to replace the inner loop with a stepped loop.
Before the last optimisation this code definitely exhibited O(n⁴) complexity and could only manage n=32 online. However if I Attempt This Online! I get timings of ~1s for n=16, ~4s for n=32 and ~32s for n=64, so this version might actually be better than that.
Edit: Testing locally, 10 runs for n=128 averaged out at 2.63 minutes each while 10 runs for n=64 averaged out at 0.28 minutes each, about 9.4 times faster.