Problem Info
Problem link here.
My Work
#include <iostream>
#include <fstream>
#include <vector>
#include <map>
#include <algorithm>
// #include "debugging.h"
using std::cout;
using std::endl;
using std::vector;
using std::pair;
// 2019 dec plat
int main() {
std::ifstream read("pieaters.in");
int pie_num;
int cow_num;
read >> pie_num >> cow_num;
std::map<pair<int, int>, int> weights;
for (int c = 0; c < cow_num; c++) {
int weight;
int start;
int end;
read >> weight >> start >> end;
weights[{--start, --end}] = weight;
}
// max weight given that we can only use pies in a certain range
vector<vector<int>> max_weight(pie_num, vector<int>(pie_num));
for (int i = 0; i < pie_num; i++) {
max_weight[i][i] = weights[{i, i}];
}
for (int len = 2; len <= pie_num; len++) {
// ranges that can be considered given the length
vector<pair<int, int>> rel_ranges;
for (const auto& [range, w] : weights) {
if (range.second - range.first + 1 <= len) {
rel_ranges.push_back(range);
}
}
for (int start = 0; start + len - 1 < pie_num; start++) {
int end = start + len - 1;
for (const pair<int, int> r : rel_ranges) {
if (!(start <= r.first && r.second <= end)) {
continue;
}
// go through each range and try to add it to the current range
for (int i = r.first; i <= r.second; i++) {
int l_seg = 0;
if (i != start) {
l_seg = max_weight[start][i - 1];
}
int r_seg = 0;
if (i != end) {
r_seg = max_weight[i + 1][end];
}
max_weight[start][end] = std::max(
max_weight[start][end],
l_seg + r_seg + weights[r]
);
}
}
}
}
int total_max = max_weight[0][pie_num - 1];
cout << total_max << endl;
std::ofstream("pieaters.out") << total_max << endl;
}
It does a \mathcal{O}(n^3m) range DP. For each range, it goes through all the cows that eat pies within that range, giving a guaranteed pie for each cow and adding that cow’s weight to the left and right segments calculated previously.
This manages to pass on around 1/2 of the test cases.
Thing is, I’m not sure how to optimize out the factor of m. Can someone help?