Power Network Fixes
Fix power distribution so its spread evenly across all machines Fix glitch when removing machines from network other machines can stop working
This commit is contained in:
parent
b9ca4fc778
commit
b9de6b6ef6
@ -4,6 +4,7 @@ ele.register_conduit("elepower_dynamics:conduit", {
|
|||||||
description = "Power Conduit",
|
description = "Power Conduit",
|
||||||
tiles = {"elepower_conduit.png"},
|
tiles = {"elepower_conduit.png"},
|
||||||
use_texture_alpha = "clip",
|
use_texture_alpha = "clip",
|
||||||
|
ele_conductor_density = 1/8,
|
||||||
groups = {oddly_breakable_by_hand = 1, cracky = 1}
|
groups = {oddly_breakable_by_hand = 1, cracky = 1}
|
||||||
})
|
})
|
||||||
|
|
||||||
|
@ -257,8 +257,8 @@ function ele.register_base_device(nodename, nodedef)
|
|||||||
-- Override destruct callback
|
-- Override destruct callback
|
||||||
local original_after_destruct = nodedef.after_destruct
|
local original_after_destruct = nodedef.after_destruct
|
||||||
nodedef.after_destruct = function (pos)
|
nodedef.after_destruct = function (pos)
|
||||||
ele.clear_networks(pos)
|
|
||||||
|
|
||||||
|
ele.clear_networks(pos)
|
||||||
if original_after_destruct then
|
if original_after_destruct then
|
||||||
original_after_destruct(pos)
|
original_after_destruct(pos)
|
||||||
end
|
end
|
||||||
|
@ -139,7 +139,8 @@ local function give_node_power(pos, available)
|
|||||||
local capacity = ele.helpers.get_node_property(user_meta, pos, "capacity")
|
local capacity = ele.helpers.get_node_property(user_meta, pos, "capacity")
|
||||||
local inrush = ele.helpers.get_node_property(user_meta, pos, "inrush")
|
local inrush = ele.helpers.get_node_property(user_meta, pos, "inrush")
|
||||||
local storage = user_meta:get_int("storage")
|
local storage = user_meta:get_int("storage")
|
||||||
|
local want = capacity - storage
|
||||||
|
|
||||||
local total_add = 0
|
local total_add = 0
|
||||||
|
|
||||||
if available >= inrush then
|
if available >= inrush then
|
||||||
@ -157,9 +158,10 @@ local function give_node_power(pos, available)
|
|||||||
storage = capacity
|
storage = capacity
|
||||||
end
|
end
|
||||||
|
|
||||||
return total_add, storage
|
return total_add, storage, want
|
||||||
end
|
end
|
||||||
|
|
||||||
|
|
||||||
minetest.register_abm({
|
minetest.register_abm({
|
||||||
nodenames = {"group:ele_provider"},
|
nodenames = {"group:ele_provider"},
|
||||||
label = "elepower Power Transfer Tick",
|
label = "elepower Power Transfer Tick",
|
||||||
@ -218,32 +220,73 @@ minetest.register_abm({
|
|||||||
|
|
||||||
if p_output and pw_storage >= p_output then
|
if p_output and pw_storage >= p_output then
|
||||||
pw_supply = pw_supply + p_output
|
pw_supply = pw_supply + p_output
|
||||||
|
|
||||||
elseif p_output and pw_storage < p_output then
|
elseif p_output and pw_storage < p_output then
|
||||||
pw_supply = pw_supply + pw_storage
|
pw_supply = pw_supply + pw_storage
|
||||||
end
|
end
|
||||||
end
|
end
|
||||||
|
|
||||||
-- Give power to users
|
-- Give power to users
|
||||||
for _,ndv in ipairs(users) do
|
local divide_power = {}
|
||||||
if pw_demand > pw_supply then
|
|
||||||
break
|
for _,ndv in ipairs(users) do --ndv = pos table
|
||||||
end
|
|
||||||
|
-- Check how much power a node wants and can get ie is it close to full charge
|
||||||
-- Sharing: Determine how much each user gets
|
local user_gets, user_storage, user_want = give_node_power(ndv, (pw_supply - pw_demand))
|
||||||
local user_gets, user_storage = give_node_power(ndv, (pw_supply - pw_demand))
|
|
||||||
pw_demand = pw_demand + user_gets
|
-- Add the node_users wanting power to table for later power division
|
||||||
|
if user_gets > 0 then
|
||||||
if user_gets > 0 then
|
table.insert(divide_power,{pos = ndv,user_gets = user_gets, user_storage = user_storage})
|
||||||
local user_meta = minetest.get_meta(ndv)
|
end
|
||||||
user_meta:set_int("storage", user_storage + user_gets)
|
|
||||||
|
|
||||||
-- Set timer on this node
|
|
||||||
ele.helpers.start_timer(ndv)
|
|
||||||
end
|
|
||||||
end
|
end
|
||||||
|
|
||||||
|
-- The below shares avaliable power from a network between node_users
|
||||||
|
-- Only whole numbers are accepted so any remainders are added to
|
||||||
|
-- the first few node_users. If divided power is less than 1 the
|
||||||
|
-- network is overloaded and delivers no power to any nodes.
|
||||||
|
-- A node_user can recieve power from two different networks
|
||||||
|
-- if pw_supply ~= 0 then minetest.debug(node.name.." - Power Supplied: "..pw_supply) end --debug line
|
||||||
|
|
||||||
|
local num_users = #divide_power
|
||||||
|
local div_pwr = pw_supply/num_users
|
||||||
|
local whole_pwr_num = math.floor(div_pwr)
|
||||||
|
local remainder_pwr_num = (math.fmod(div_pwr,1))*num_users
|
||||||
|
|
||||||
|
if div_pwr < 1 then
|
||||||
|
num_users = 0 -- network overload
|
||||||
|
end
|
||||||
|
|
||||||
|
local i = 1
|
||||||
|
|
||||||
|
while(num_users >= i)do
|
||||||
|
local final_pwr_num
|
||||||
|
|
||||||
|
if remainder_pwr_num > 0.5 then
|
||||||
|
final_pwr_num = whole_pwr_num + 1
|
||||||
|
remainder_pwr_num = remainder_pwr_num - 1
|
||||||
|
|
||||||
|
else
|
||||||
|
final_pwr_num = whole_pwr_num
|
||||||
|
end
|
||||||
|
|
||||||
|
if final_pwr_num > divide_power[i].user_gets then
|
||||||
|
final_pwr_num = divide_power[i].user_gets
|
||||||
|
end
|
||||||
|
|
||||||
|
--minetest.debug("node_user "..minetest.pos_to_string(divide_power[i].pos).." Power Supplied:"..final_pwr_num) -- debug line
|
||||||
|
|
||||||
|
local user_meta = minetest.get_meta(divide_power[i].pos)
|
||||||
|
user_meta:set_int("storage", divide_power[i].user_storage + final_pwr_num)
|
||||||
|
pw_demand = pw_demand + final_pwr_num
|
||||||
|
|
||||||
|
ele.helpers.start_timer(divide_power[i].pos)
|
||||||
|
|
||||||
|
i = i+1
|
||||||
|
end
|
||||||
|
|
||||||
-- Take the power from provider nodes
|
-- Take the power from provider nodes
|
||||||
if pw_demand > 0 then
|
if pw_demand > 0 then
|
||||||
|
|
||||||
for _, spos in ipairs(providers) do
|
for _, spos in ipairs(providers) do
|
||||||
if pw_demand == 0 then break end
|
if pw_demand == 0 then break end
|
||||||
local smeta = minetest.get_meta(spos)
|
local smeta = minetest.get_meta(spos)
|
||||||
@ -260,9 +303,13 @@ minetest.register_abm({
|
|||||||
ele.helpers.start_timer(spos)
|
ele.helpers.start_timer(spos)
|
||||||
end
|
end
|
||||||
end
|
end
|
||||||
|
--if pw_supply ~= 0 then minetest.debug("end_run") end -- debug line
|
||||||
end,
|
end,
|
||||||
})
|
})
|
||||||
|
|
||||||
|
------------------------
|
||||||
|
-- Network Add/Remove --
|
||||||
|
------------------------
|
||||||
local function check_connections(pos)
|
local function check_connections(pos)
|
||||||
local connections = {}
|
local connections = {}
|
||||||
local positions = {
|
local positions = {
|
||||||
@ -290,14 +337,17 @@ function ele.clear_networks(pos)
|
|||||||
local placed = name ~= "air"
|
local placed = name ~= "air"
|
||||||
local positions = check_connections(pos)
|
local positions = check_connections(pos)
|
||||||
if #positions < 1 then return end
|
if #positions < 1 then return end
|
||||||
|
|
||||||
local hash_pos = minetest.hash_node_position(pos)
|
local hash_pos = minetest.hash_node_position(pos)
|
||||||
local dead_end = #positions == 1
|
local dead_end = #positions == 1
|
||||||
|
|
||||||
for _,connected_pos in ipairs(positions) do
|
for _,connected_pos in ipairs(positions) do
|
||||||
|
|
||||||
local networks = ele.graphcache.devices[minetest.hash_node_position(connected_pos)] or
|
local networks = ele.graphcache.devices[minetest.hash_node_position(connected_pos)] or
|
||||||
{minetest.pos_to_string(connected_pos)}
|
{minetest.pos_to_string(connected_pos)}
|
||||||
|
|
||||||
for _,net in ipairs(networks) do
|
for _,net in ipairs(networks) do
|
||||||
if net and ele.graphcache[net] then
|
if net and ele.graphcache[net] then
|
||||||
-- This is so we can break the pipeline instead of the network search loop
|
-- This is so we can break the pipeline instead of the network search loop
|
||||||
while true do
|
while true do
|
||||||
if dead_end and placed then
|
if dead_end and placed then
|
||||||
@ -344,6 +394,7 @@ function ele.clear_networks(pos)
|
|||||||
local network_ids = ele.graphcache.devices[minetest.hash_node_position(positions[1])] or
|
local network_ids = ele.graphcache.devices[minetest.hash_node_position(positions[1])] or
|
||||||
{minetest.pos_to_string(positions[1])}
|
{minetest.pos_to_string(positions[1])}
|
||||||
|
|
||||||
|
|
||||||
if not #network_ids then
|
if not #network_ids then
|
||||||
-- We're evidently not on a network, nothing to remove ourselves from
|
-- We're evidently not on a network, nothing to remove ourselves from
|
||||||
break
|
break
|
||||||
@ -360,14 +411,17 @@ function ele.clear_networks(pos)
|
|||||||
ele.graphcache.devices[pos1] = nil
|
ele.graphcache.devices[pos1] = nil
|
||||||
end
|
end
|
||||||
ele.graphcache[int_net] = nil
|
ele.graphcache[int_net] = nil
|
||||||
|
|
||||||
else
|
else
|
||||||
-- Search for and remove device
|
-- Search for and remove device
|
||||||
|
-- This checks and removes from network.users,
|
||||||
|
-- network.conductors and network.providers
|
||||||
ele.graphcache.devices[hash_pos] = nil
|
ele.graphcache.devices[hash_pos] = nil
|
||||||
for tblname, table in pairs(network) do
|
for tblname, tables in pairs(network) do
|
||||||
if type(table) == "table" then
|
if type(tables) == "table" then
|
||||||
for devicenum, device in pairs(table) do
|
for devicenum, device in pairs(tables) do
|
||||||
if vector.equals(device, pos) then
|
if vector.equals(device, pos) then
|
||||||
table[devicenum] = nil
|
table.remove(tables,devicenum)
|
||||||
end
|
end
|
||||||
end
|
end
|
||||||
end
|
end
|
||||||
@ -382,11 +436,11 @@ function ele.clear_networks(pos)
|
|||||||
local pos1 = minetest.hash_node_position(v)
|
local pos1 = minetest.hash_node_position(v)
|
||||||
ele.graphcache.devices[pos1] = nil
|
ele.graphcache.devices[pos1] = nil
|
||||||
end
|
end
|
||||||
ele.graphcache[net] = nil
|
ele.graphcache[net] = nil
|
||||||
break
|
break
|
||||||
end
|
end
|
||||||
break
|
break
|
||||||
end
|
end
|
||||||
end
|
end
|
||||||
end
|
end
|
||||||
end
|
end
|
||||||
|
Loading…
Reference in New Issue
Block a user