Skip to content

Commit

Permalink
Merge branch 'resource-limit-tests' into resource-limit
Browse files Browse the repository at this point in the history
This includes the changes that are on `fix/tests` branch.
  • Loading branch information
David Ellis committed Oct 20, 2020
2 parents baeff32 + bb06cdf commit ba55eca
Show file tree
Hide file tree
Showing 8 changed files with 379 additions and 329 deletions.
16 changes: 11 additions & 5 deletions odd-jobs.cabal
Original file line number Diff line number Diff line change
@@ -1,10 +1,10 @@
cabal-version: 1.12

-- This file has been generated from package.yaml by hpack version 0.31.2.
-- This file has been generated from package.yaml by hpack version 0.33.0.
--
-- see: https://github.com/sol/hpack
--
-- hash: e6616995768a7a1fd8654b632013cf28cfa6e31926533c4470e8e970d1d2d3bd
-- hash: 559bd60bc7a5df63861a213412268e713c83719c49399149be8dcbcc620aa0e5

name: odd-jobs
version: 0.2.2
Expand Down Expand Up @@ -208,11 +208,18 @@ test-suite jobrunner
main-is: Test.hs
other-modules:
CliParser
Try
Try2
OddJobs.Cli
OddJobs.ConfigBuilder
OddJobs.Endpoints
OddJobs.Job
OddJobs.Migrations
OddJobs.Types
OddJobs.Web
UI
Paths_odd_jobs
hs-source-dirs:
test
src
default-extensions: NamedFieldPuns LambdaCase TemplateHaskell ScopedTypeVariables GeneralizedNewtypeDeriving QuasiQuotes OverloadedStrings
ghc-options: -Wall -fno-warn-orphans -fno-warn-unused-imports -fno-warn-dodgy-exports -Werror=missing-fields -threaded -with-rtsopts=-N -main-is Test
build-depends:
Expand All @@ -236,7 +243,6 @@ test-suite jobrunner
, monad-control
, monad-logger
, mtl
, odd-jobs
, optparse-applicative
, postgresql-simple
, random
Expand Down
5 changes: 3 additions & 2 deletions package.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -133,13 +133,14 @@ tests:
- -threaded
- -with-rtsopts=-N
main: Test
source-dirs: test
source-dirs:
- test
- src
dependencies:
- tasty
- tasty-discover
- hedgehog
- tasty-hedgehog
- odd-jobs
- tasty-hunit
- random
- monad-control
Expand Down
37 changes: 34 additions & 3 deletions src/OddJobs/ConfigBuilder.hs
Original file line number Diff line number Diff line change
Expand Up @@ -53,7 +53,39 @@ mkConfig :: (LogLevel -> LogEvent -> IO ())
-- function, unless you know what you're doing.
-> Config
-- ^ The final 'Config' that can be used to start various job-runners
mkConfig logger tname dbpool ccControl jrunner configOverridesFn =
mkConfig logger tname =
mkResourceConfig logger (simpleTableNames tname)

-- | This function gives you a 'Config' with a bunch of sensible defaults
-- already applied, but it allows the specification of all database table
-- names.. It requires the bare minimum of other configuration parameters that
-- this library cannot assume on your behalf.
--
-- It makes a few __important assumptions__ about your 'jobPayload 'JSON, which
-- are documented in 'defaultJobType'.
mkResourceConfig :: (LogLevel -> LogEvent -> IO ())
-- ^ "Structured logging" function. Ref: 'cfgLogger'
-> TableNames
-- ^ DB tables which hold your jobs and resources
-> Pool Connection
-- ^ DB connection-pool to be used by job-runner. Ref: 'cfgDbPool'
-> ConcurrencyControl
-- ^ Concurrency configuration. Ref: 'cfgConcurrencyControl'
-> (Job -> IO ())
-- ^ The actual "job runner" which contains your application code. Ref: 'cfgJobRunner'
-> (Config -> Config)
-- ^ A function that allows you to modify the \"interim config\". The
-- \"interim config\" will cotain a bunch of in-built default config
-- params, along with the config params that you\'ve just provided
-- (i.e. logging function, table name, DB pool, etc). You can use this
-- function to override values in the \"interim config\". If you do not
-- wish to modify the \"interim config\" just pass 'Prelude.id' as an
-- argument to this parameter. __Note:__ it is strongly recommended
-- that you __do not__ modify the generated 'Config' outside of this
-- function, unless you know what you're doing.
-> Config
-- ^ The final 'Config' that can be used to start various job-runners
mkResourceConfig logger tnames dbpool ccControl jrunner configOverridesFn =
let cfg = configOverridesFn $ Config
{ cfgPollingInterval = defaultPollingInterval
, cfgOnJobSuccess = (const $ pure ())
Expand All @@ -63,7 +95,7 @@ mkConfig logger tname dbpool ccControl jrunner configOverridesFn =
, cfgDbPool = dbpool
, cfgOnJobStart = (const $ pure ())
, cfgDefaultMaxAttempts = 10
, cfgTableNames = simpleTableNames tname
, cfgTableNames = tnames
, cfgOnJobTimeout = (const $ pure ())
, cfgConcurrencyControl = ccControl
, cfgDefaultResourceLimit = 1
Expand All @@ -77,7 +109,6 @@ mkConfig logger tname dbpool ccControl jrunner configOverridesFn =
in cfg



-- | If you aren't interested in structured logging, you can use this function
-- to emit plain-text logs (or define your own).
defaultLogStr :: (Job -> Text)
Expand Down
1 change: 1 addition & 0 deletions src/OddJobs/Job.hs
Original file line number Diff line number Diff line change
Expand Up @@ -394,6 +394,7 @@ runJob jid = do
Just job -> do
startTime <- liftIO getCurrentTime
lockTimeout <- getDefaultJobTimeout
log LevelInfo $ LogJobStart job
(flip catches) [Handler $ timeoutHandler job startTime, Handler $ exceptionHandler job startTime] $ do
runJobWithTimeout lockTimeout job
endTime <- liftIO getCurrentTime
Expand Down
46 changes: 35 additions & 11 deletions src/OddJobs/Migrations.hs
Original file line number Diff line number Diff line change
Expand Up @@ -8,6 +8,29 @@ import Database.PostgreSQL.Simple as PGS
import Data.Functor (void)
import OddJobs.Types

-- | Create database objects based on the name given for the primary job table,
-- with the names of other objects being generated autmatically.
createJobTable :: Connection -> TableName -> IO ()
createJobTable conn tname = createJobTables conn $ simpleTableNames tname

-- | Create database objects based on the name given for the primary job table
-- and the associated resource table. The names of indexes, functions, and
-- triggers are generated automatically.
createJobTables :: Connection -> TableNames -> IO ()
createJobTables conn tnames = do
void $ PGS.execute_ conn (createJobTableQuery tnames)
void $ PGS.execute_ conn (createResourceTableQuery tnames)
void $ PGS.execute_ conn (createNotificationTrigger tnames)

-- | Remove all Odd Jobs objects from the database
dropJobTables :: Connection -> TableNames -> IO ()
dropJobTables conn tnames = do
void $ PGS.execute_ conn $ dropObject "table" $ tnResource tnames
void $ PGS.execute_ conn $ dropObject "table" $ tnJob tnames
void $ PGS.execute_ conn $ dropObject "function" $ notifyFunctionName tnames
where
dropObject typ obj = "drop " <> typ <> " if exists " <> obj <> ";"

createJobTableQuery :: TableNames -> Query
createJobTableQuery (TableNames tname _) = "CREATE TABLE " <> tname <>
"( id serial primary key" <>
Expand Down Expand Up @@ -39,23 +62,24 @@ createResourceTableQuery tnames = "create table " <> tnResource tnames <>

createNotificationTrigger :: TableNames -> Query
createNotificationTrigger tnames = "create or replace function " <> fnName <> "() returns trigger as $$" <>

createResourceTableQuery :: TableNames -> Query
createResourceTableQuery tnames = "create table " <> tnResource tnames <>
"( resource_id text primary key" <>
", resource_limit int" <>
");"

createNotificationTrigger :: TableNames -> Query
createNotificationTrigger tnames = "create or replace function " <> notifyFunctionName tnames <> "() returns trigger as $$" <>
"begin \n" <>
" perform pg_notify('" <> pgEventName tnames <> "', \n" <>
" json_build_object('id', new.id, 'run_at', new.run_at, 'locked_at', new.locked_at)::text); \n" <>
" return new; \n" <>
"end; \n" <>
"$$ language plpgsql;" <>
"create trigger " <> trgName <> " after insert on " <> tnJob tnames <> " for each row execute procedure " <> fnName <> "();"
"create trigger " <> trgName <> " after insert on " <> tnJob tnames <> " for each row execute procedure " <> notifyFunctionName tnames <> "();"
where
fnName = "notify_job_monitor_for_" <> tnJob tnames
trgName = "trg_notify_job_monitor_for_" <> tnJob tnames


createJobTable :: Connection -> TableName -> IO ()
createJobTable conn tname = createJobTables conn $ simpleTableNames tname

createJobTables :: Connection -> TableNames -> IO ()
createJobTables conn tnames = do
void $ PGS.execute_ conn (createJobTableQuery tnames)
void $ PGS.execute_ conn (createResourceTableQuery tnames)
void $ PGS.execute_ conn (createNotificationTrigger tnames)
notifyFunctionName :: TableNames -> Query
notifyFunctionName tnames = "notify_job_monitor_for_" <> tnJob tnames
Loading

0 comments on commit ba55eca

Please sign in to comment.