LeonardoBerti commited on
Commit
97822f9
·
verified ·
1 Parent(s): 2d4e738

Upload 243 files

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. .gitattributes +12 -0
  2. .gitignore +20 -0
  3. .vscode/launch.json +45 -0
  4. ABIDES/.gitignore +13 -0
  5. ABIDES/Kernel.py +575 -0
  6. ABIDES/LICENSE.txt +89 -0
  7. ABIDES/README.md +4 -0
  8. ABIDES/abides.py +32 -0
  9. ABIDES/agent/Agent.py +185 -0
  10. ABIDES/agent/ExchangeAgent.py +415 -0
  11. ABIDES/agent/FinancialAgent.py +36 -0
  12. ABIDES/agent/FundamentalTrackingAgent.py +64 -0
  13. ABIDES/agent/HeuristicBeliefLearningAgent.py +174 -0
  14. ABIDES/agent/NoiseAgent.py +161 -0
  15. ABIDES/agent/OrderBookImbalanceAgent.py +193 -0
  16. ABIDES/agent/TradingAgent.py +643 -0
  17. ABIDES/agent/ValueAgent.py +260 -0
  18. ABIDES/agent/WorldAgent.py +871 -0
  19. ABIDES/agent/ZeroIntelligenceAgent.py +320 -0
  20. ABIDES/agent/__init__.py +0 -0
  21. ABIDES/agent/etf/EtfArbAgent.py +197 -0
  22. ABIDES/agent/etf/EtfMarketMakerAgent.py +241 -0
  23. ABIDES/agent/etf/EtfPrimaryAgent.py +166 -0
  24. ABIDES/agent/etf/__init__.py +0 -0
  25. ABIDES/agent/examples/ExampleExperimentalAgent.py +146 -0
  26. ABIDES/agent/examples/ImpactAgent.py +160 -0
  27. ABIDES/agent/examples/MarketReplayAgent.py +125 -0
  28. ABIDES/agent/examples/MomentumAgent.py +80 -0
  29. ABIDES/agent/examples/QLearningAgent.py +213 -0
  30. ABIDES/agent/examples/ShockAgent.py +171 -0
  31. ABIDES/agent/examples/SubscriptionAgent.py +52 -0
  32. ABIDES/agent/examples/SumClientAgent.py +107 -0
  33. ABIDES/agent/examples/SumServiceAgent.py +77 -0
  34. ABIDES/agent/examples/__init__.py +0 -0
  35. ABIDES/agent/examples/crypto/PPFL_ClientAgent.py +380 -0
  36. ABIDES/agent/examples/crypto/PPFL_ServiceAgent.py +155 -0
  37. ABIDES/agent/examples/crypto/PPFL_TemplateClientAgent.py +311 -0
  38. ABIDES/agent/execution/ExecutionAgent.py +106 -0
  39. ABIDES/agent/execution/POVExecutionAgent.py +93 -0
  40. ABIDES/agent/execution/TWAPExecutionAgent.py +38 -0
  41. ABIDES/agent/execution/VWAPExecutionAgent.py +55 -0
  42. ABIDES/agent/market_makers/AdaptiveMarketMakerAgent.py +298 -0
  43. ABIDES/agent/market_makers/MarketMakerAgent.py +130 -0
  44. ABIDES/agent/market_makers/POVMarketMakerAgent.py +195 -0
  45. ABIDES/agent/market_makers/SpreadBasedMarketMakerAgent.py +282 -0
  46. ABIDES/cli/book_plot.py +259 -0
  47. ABIDES/cli/dump.py +23 -0
  48. ABIDES/cli/event_midpoint.py +208 -0
  49. ABIDES/cli/event_ticker.py +152 -0
  50. ABIDES/cli/intraday_index.py +255 -0
.gitattributes ADDED
@@ -0,0 +1,12 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ *.ckpt filter=lfs diff=lfs merge=lfs -text
2
+ .csv filter=lfs diff=lfs merge=lfs -text
3
+ *.csv filter=lfs diff=lfs merge=lfs -text
4
+ ABIDES/data/synthetic_fundamental.bz2 filter=lfs diff=lfs merge=lfs -text
5
+ ABIDES/data/trades/trades_2014/ct_20140128.bgz filter=lfs diff=lfs merge=lfs -text
6
+ ABIDES/realism/visualizations/market_replay_TSLA_2015-01-29_11-00-00_pov_0.1_40.png filter=lfs diff=lfs merge=lfs -text
7
+ ABIDES/util/plotting/rmsc03_two_hour.png filter=lfs diff=lfs merge=lfs -text
8
+ ABIDES/util/plotting/world_agent_sim.png filter=lfs diff=lfs merge=lfs -text
9
+ data/architecture.jpg filter=lfs diff=lfs merge=lfs -text
10
+ data/INTC/INTC.zip filter=lfs diff=lfs merge=lfs -text
11
+ data/simulations-1.png filter=lfs diff=lfs merge=lfs -text
12
+ data/simulations.jpg filter=lfs diff=lfs merge=lfs -text
.gitignore ADDED
@@ -0,0 +1,20 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ __pycache__
2
+ data/reconstructions
3
+ data/TSLA
4
+ data/INTC/INTC_*
5
+ data/INTC/t*
6
+ data/INTC/v*
7
+ data/plot
8
+ data/checkpoints
9
+ ABIDEs/log/paper/IABS*
10
+ ABIDES/log
11
+ *.pdf
12
+ trash
13
+ lightning_logs/
14
+ data/generated_orders.csv
15
+ data/real_orders.csv
16
+ wandb/
17
+ output
18
+ launch.sh
19
+ env
20
+ launch_sim.sh
.vscode/launch.json ADDED
@@ -0,0 +1,45 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ // Use IntelliSense to learn about possible attributes.
3
+ // Hover to view descriptions of existing attributes.
4
+ // For more information, visit: https://go.microsoft.com/fwlink/?linkid=830387
5
+ "version": "0.2.0",
6
+ "configurations": [
7
+ {
8
+ "name": "Python: Current File2",
9
+ "type": "debugpy",
10
+ "request": "launch",
11
+ "program": "${file}",
12
+ "console": "integratedTerminal",
13
+ "args": ["-u", "ABIDES/abides.py", "-m", "TRADES", "-c", "world_agent_sim", "-t", "TSLA", "-date", "20150129", "-seed", "40", "-st", "'09:30:00'", "-et", "'10:00:00'", "-d", "True", "-id", "0.811", "-type", "DDIM"]
14
+ },
15
+ {
16
+ "name": "Python: Current File",
17
+ "type": "debugpy",
18
+ "request": "launch",
19
+ "program": "${file}",
20
+ "console": "integratedTerminal",
21
+ "args": [
22
+ "-u",
23
+ "ABIDES/abides.py",
24
+ "-c",
25
+ "world_agent_sim",
26
+ "-t",
27
+ "INTC",
28
+ "-date",
29
+ "20150130",
30
+ "-s",
31
+ "1234",
32
+ "-d",
33
+ "True",
34
+ "-m",
35
+ "CGAN",
36
+ "-st",
37
+ "'09:30:00'",
38
+ "-et",
39
+ "'10:00:00'",
40
+ "-id",
41
+ "-1.05518"
42
+ ]
43
+ }
44
+ ],
45
+ }
ABIDES/.gitignore ADDED
@@ -0,0 +1,13 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ __pycache__/
3
+ batch_output/
4
+ cache/
5
+ local/
6
+ .idea/
7
+ results/
8
+ util/crypto/datasets
9
+ venv/
10
+ .idea/
11
+ .ipynb/
12
+ .ipynb_checkpoints/
13
+ log/IABS_1/
ABIDES/Kernel.py ADDED
@@ -0,0 +1,575 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import numpy as np
2
+ import pandas as pd
3
+
4
+ import os, queue, sys
5
+ from message.Message import MessageType
6
+
7
+ from util.util import log_print
8
+ from util.plotting import liquidity_telemetry
9
+ from main import plot_graphs
10
+ from evaluation.quantitative_eval import predictive_lstm
11
+
12
+ class Kernel:
13
+
14
+ def __init__(self, kernel_name, random_state = None):
15
+ # kernel_name is for human readers only.
16
+ self.name = kernel_name
17
+ self.random_state = random_state
18
+
19
+ if not random_state:
20
+ raise ValueError("A valid, seeded np.random.RandomState object is required " +
21
+ "for the Kernel", self.name)
22
+ sys.exit()
23
+
24
+ # A single message queue to keep everything organized by increasing
25
+ # delivery timestamp.
26
+ self.messages = queue.PriorityQueue()
27
+
28
+ # currentTime is None until after kernelStarting() event completes
29
+ # for all agents. This is a pd.Timestamp that includes the date.
30
+ self.currentTime = None
31
+
32
+ # Timestamp at which the Kernel was created. Primarily used to
33
+ # create a unique log directory for this run. Also used to
34
+ # print some elapsed time and messages per second statistics.
35
+ self.kernelWallClockStart = pd.Timestamp('now')
36
+
37
+ # TODO: This is financial, and so probably should not be here...
38
+ self.meanResultByAgentType = {}
39
+ self.agentCountByType = {}
40
+
41
+ # The Kernel maintains a summary log to which agents can write
42
+ # information that should be centralized for very fast access
43
+ # by separate statistical summary programs. Detailed event
44
+ # logging should go only to the agent's individual log. This
45
+ # is for things like "final position value" and such.
46
+ self.summaryLog = []
47
+
48
+ log_print ("Kernel initialized: {}", self.name)
49
+
50
+
51
+ # This is called to actually start the simulation, once all agent
52
+ # configuration is done.
53
+ def runner(self, agents = [], startTime = None, stopTime = None,
54
+ num_simulations = 1, defaultComputationDelay = 1,
55
+ defaultLatency = 1, agentLatency = None, latencyNoise = [ 1.0 ],
56
+ agentLatencyModel = None, skip_log = False,
57
+ seed = None, oracle = None, log_dir = None):
58
+
59
+ # agents must be a list of agents for the simulation,
60
+ # based on class agent.Agent
61
+ self.agents = agents
62
+
63
+ # Simulation custom state in a freeform dictionary. Allows config files
64
+ # that drive multiple simulations, or require the ability to generate
65
+ # special logs after simulation, to obtain needed output without special
66
+ # case code in the Kernel. Per-agent state should be handled using the
67
+ # provided updateAgentState() method.
68
+ self.custom_state = {}
69
+
70
+ # The kernel start and stop time (first and last timestamp in
71
+ # the simulation, separate from anything like exchange open/close).
72
+ self.startTime = startTime
73
+ self.stopTime = stopTime
74
+
75
+ # The global seed, NOT used for anything agent-related.
76
+ self.seed = seed
77
+
78
+ # Should the Kernel skip writing agent logs?
79
+ self.skip_log = skip_log
80
+
81
+ # The data oracle for this simulation, if needed.
82
+ self.oracle = oracle
83
+
84
+ # If a log directory was not specified, use the initial wallclock.
85
+ self.log_dir = log_dir
86
+
87
+ # The kernel maintains a current time for each agent to allow
88
+ # simulation of per-agent computation delays. The agent's time
89
+ # is pushed forward (see below) each time it awakens, and it
90
+ # cannot receive new messages/wakeups until the global time
91
+ # reaches the agent's time. (i.e. it cannot act again while
92
+ # it is still "in the future")
93
+
94
+ # This also nicely enforces agents being unable to act before
95
+ # the simulation startTime.
96
+ self.agentCurrentTimes = [self.startTime] * len(agents)
97
+
98
+ # agentComputationDelays is in nanoseconds, starts with a default
99
+ # value from config, and can be changed by any agent at any time
100
+ # (for itself only). It represents the time penalty applied to
101
+ # an agent each time it is awakened (wakeup or recvMsg). The
102
+ # penalty applies _after_ the agent acts, before it may act again.
103
+ # TODO: this might someday change to pd.Timedelta objects.
104
+ self.agentComputationDelays = [defaultComputationDelay] * len(agents)
105
+
106
+ # If an agentLatencyModel is defined, it will be used instead of
107
+ # the older, non-model-based attributes.
108
+ self.agentLatencyModel = agentLatencyModel
109
+
110
+ # If an agentLatencyModel is NOT defined, the older parameters:
111
+ # agentLatency (or defaultLatency) and latencyNoise should be specified.
112
+ # These should be considered deprecated and will be removed in the future.
113
+
114
+ # If agentLatency is not defined, define it using the defaultLatency.
115
+ # This matrix defines the communication delay between every pair of
116
+ # agents.
117
+ if agentLatency is None:
118
+ self.agentLatency = [[defaultLatency] * len(agents)] * len(agents)
119
+ else:
120
+ self.agentLatency = agentLatency
121
+
122
+ # There is a noise model for latency, intended to be a one-sided
123
+ # distribution with the peak at zero. By default there is no noise
124
+ # (100% chance to add zero ns extra delay). Format is a list with
125
+ # list index = ns extra delay, value = probability of this delay.
126
+ self.latencyNoise = latencyNoise
127
+
128
+ # The kernel maintains an accumulating additional delay parameter
129
+ # for the current agent. This is applied to each message sent
130
+ # and upon return from wakeup/receiveMessage, in addition to the
131
+ # agent's standard computation delay. However, it never carries
132
+ # over to future wakeup/receiveMessage calls. It is useful for
133
+ # staggering of sent messages.
134
+ self.currentAgentAdditionalDelay = 0
135
+
136
+ log_print ("Kernel started: {}", self.name)
137
+ log_print ("Simulation started!")
138
+
139
+ # Note that num_simulations has not yet been really used or tested
140
+ # for anything. Instead we have been running multiple simulations
141
+ # with coarse parallelization from a shell script.
142
+ for sim in range(num_simulations):
143
+ log_print ("Starting sim {}", sim)
144
+
145
+ # Event notification for kernel init (agents should not try to
146
+ # communicate with other agents, as order is unknown). Agents
147
+ # should initialize any internal resources that may be needed
148
+ # to communicate with other agents during agent.kernelStarting().
149
+ # Kernel passes self-reference for agents to retain, so they can
150
+ # communicate with the kernel in the future (as it does not have
151
+ # an agentID).
152
+ log_print ("\n--- Agent.kernelInitializing() ---")
153
+ for agent in self.agents:
154
+ agent.kernelInitializing(self)
155
+
156
+ # Event notification for kernel start (agents may set up
157
+ # communications or references to other agents, as all agents
158
+ # are guaranteed to exist now). Agents should obtain references
159
+ # to other agents they require for proper operation (exchanges,
160
+ # brokers, subscription services...). Note that we generally
161
+ # don't (and shouldn't) permit agents to get direct references
162
+ # to other agents (like the exchange) as they could then bypass
163
+ # the Kernel, and therefore simulation "physics" to send messages
164
+ # directly and instantly or to perform disallowed direct inspection
165
+ # of the other agent's state. Agents should instead obtain the
166
+ # agent ID of other agents, and communicate with them only via
167
+ # the Kernel. Direct references to utility objects that are not
168
+ # agents are acceptable (e.g. oracles).
169
+ log_print ("\n--- Agent.kernelStarting() ---")
170
+ for agent in self.agents:
171
+ agent.kernelStarting(self.startTime)
172
+
173
+ # Set the kernel to its startTime.
174
+ self.currentTime = self.startTime
175
+ log_print ("\n--- Kernel Clock started ---")
176
+ log_print ("Kernel.currentTime is now {}", self.currentTime)
177
+
178
+ # Start processing the Event Queue.
179
+ log_print ("\n--- Kernel Event Queue begins ---")
180
+ log_print ("Kernel will start processing messages. Queue length: {}", len(self.messages.queue))
181
+
182
+ # Track starting wall clock time and total message count for stats at the end.
183
+ eventQueueWallClockStart = pd.Timestamp('now')
184
+ ttl_messages = 0
185
+
186
+ # Process messages until there aren't any (at which point there never can
187
+ # be again, because agents only "wake" in response to messages), or until
188
+ # the kernel stop time is reached.
189
+ while not self.messages.empty() and self.currentTime and (self.currentTime <= self.stopTime):
190
+ # Get the next message in timestamp order (delivery time) and extract it.
191
+ self.currentTime, event = self.messages.get()
192
+ msg_recipient, msg_type, msg = event
193
+
194
+ # Periodically print the simulation time and total messages, even if muted.
195
+ if ttl_messages % 100000 == 0:
196
+ print("msg_recipient: ", msg_recipient, "msg_type: ", msg_type, "msg: ", msg)
197
+ print ("\n--- Simulation time: {}, messages processed: {}, wallclock elapsed: {} ---\n".format(
198
+ self.fmtTime(self.currentTime), ttl_messages, pd.Timestamp('now') - eventQueueWallClockStart))
199
+
200
+ #log_print ("\n--- Kernel Event Queue pop ---")
201
+ #log_print ("Kernel handling {} message for agent {} at time {}", msg_type, msg_recipient, self.fmtTime(self.currentTime))
202
+
203
+ ttl_messages += 1
204
+
205
+ # In between messages, always reset the currentAgentAdditionalDelay.
206
+ self.currentAgentAdditionalDelay = 0
207
+
208
+ # Dispatch message to agent.
209
+ if msg_type == MessageType.WAKEUP:
210
+
211
+ # Who requested this wakeup call?
212
+ agent = msg_recipient
213
+
214
+ # Test to see if the agent is already in the future. If so,
215
+ # delay the wakeup until the agent can act again.
216
+ if self.agentCurrentTimes[agent] > self.currentTime:
217
+ # Push the wakeup call back into the PQ with a new time.
218
+ self.messages.put((self.agentCurrentTimes[agent],
219
+ (msg_recipient, msg_type, msg)))
220
+ #log_print ("Agent in future: wakeup requeued for {}", self.fmtTime(self.agentCurrentTimes[agent]))
221
+ continue
222
+
223
+ # Set agent's current time to global current time for start
224
+ # of processing.
225
+ self.agentCurrentTimes[agent] = self.currentTime
226
+
227
+ # Wake the agent.
228
+ agents[agent].wakeup(self.currentTime)
229
+
230
+ # Delay the agent by its computation delay plus any transient additional delay requested.
231
+ self.agentCurrentTimes[agent] += pd.Timedelta(self.agentComputationDelays[agent] +
232
+ self.currentAgentAdditionalDelay)
233
+
234
+ #log_print ("After wakeup return, agent {} delayed from {} to {}", agent, self.fmtTime(self.currentTime), self.fmtTime(self.agentCurrentTimes[agent]))
235
+
236
+ elif msg_type == MessageType.MESSAGE:
237
+
238
+ # Who is receiving this message?
239
+ agent = msg_recipient
240
+
241
+ # Test to see if the agent is already in the future. If so,
242
+ # delay the message until the agent can act again.
243
+ if self.agentCurrentTimes[agent] > self.currentTime:
244
+ # Push the message back into the PQ with a new time.
245
+ self.messages.put((self.agentCurrentTimes[agent],
246
+ (msg_recipient, msg_type, msg)))
247
+ #log_print ("Agent in future: message requeued for {}", self.fmtTime(self.agentCurrentTimes[agent]))
248
+ continue
249
+
250
+ # Set agent's current time to global current time for start
251
+ # of processing.
252
+ self.agentCurrentTimes[agent] = self.currentTime
253
+
254
+ # Deliver the message.
255
+ agents[agent].receiveMessage(self.currentTime, msg)
256
+
257
+ # Delay the agent by its computation delay plus any transient additional delay requested.
258
+ self.agentCurrentTimes[agent] += pd.Timedelta(self.agentComputationDelays[agent] +
259
+ self.currentAgentAdditionalDelay)
260
+
261
+ #log_print ("After receiveMessage return, agent {} delayed from {} to {}",
262
+ # agent, self.fmtTime(self.currentTime), self.fmtTime(self.agentCurrentTimes[agent]))
263
+
264
+ else:
265
+ raise ValueError("Unknown message type found in queue",
266
+ "currentTime:", self.currentTime,
267
+ "messageType:", self.msg.type)
268
+
269
+ if self.messages.empty():
270
+ log_print ("\n--- Kernel Event Queue empty ---")
271
+
272
+ if self.currentTime and (self.currentTime > self.stopTime):
273
+ log_print ("\n--- Kernel Stop Time surpassed ---")
274
+
275
+ # Record wall clock stop time and elapsed time for stats at the end.
276
+ eventQueueWallClockStop = pd.Timestamp('now')
277
+
278
+ eventQueueWallClockElapsed = eventQueueWallClockStop - eventQueueWallClockStart
279
+
280
+ # Event notification for kernel end (agents may communicate with
281
+ # other agents, as all agents are still guaranteed to exist).
282
+ # Agents should not destroy resources they may need to respond
283
+ # to final communications from other agents.
284
+ log_print ("\n--- Agent.kernelStopping() ---")
285
+ for agent in agents:
286
+ agent.kernelStopping()
287
+
288
+ # Event notification for kernel termination (agents should not
289
+ # attempt communication with other agents, as order of termination
290
+ # is unknown). Agents should clean up all used resources as the
291
+ # simulation program may not actually terminate if num_simulations > 1.
292
+ log_print ("\n--- Agent.kernelTerminating() ---")
293
+ for agent in agents:
294
+ agent.kernelTerminating()
295
+
296
+ print ("Event Queue elapsed: {}, messages: {}, messages per second: {:0.1f}".format(
297
+ eventQueueWallClockElapsed, ttl_messages,
298
+ ttl_messages / (eventQueueWallClockElapsed / (np.timedelta64(1, 's')))))
299
+ log_print ("Ending sim {}", sim)
300
+
301
+
302
+ # The Kernel adds a handful of custom state results for all simulations,
303
+ # which configurations may use, print, log, or discard.
304
+ self.custom_state['kernel_event_queue_elapsed_wallclock'] = eventQueueWallClockElapsed
305
+ self.custom_state['kernel_slowest_agent_finish_time'] = max(self.agentCurrentTimes)
306
+
307
+ # Agents will request the Kernel to serialize their agent logs, usually
308
+ # during kernelTerminating, but the Kernel must write out the summary
309
+ # log itself.
310
+ self.writeSummaryLog()
311
+
312
+ # This should perhaps be elsewhere, as it is explicitly financial, but it
313
+ # is convenient to have a quick summary of the results for now.
314
+ print ("Mean ending value by agent type:")
315
+ for a in self.meanResultByAgentType:
316
+ value = self.meanResultByAgentType[a]
317
+ count = self.agentCountByType[a]
318
+ print ("{}: {:d}".format(a, int(round(value / count))))
319
+
320
+ print ("Simulation ending!")
321
+ stock_name = self.log_dir.split("_")[2]
322
+ date = self.log_dir.split("_")[3]
323
+ time = self.log_dir.split("_")[4]
324
+ path = os.path.join(os.getcwd(), "ABIDES", "log", self.log_dir)
325
+ liquidity_telemetry.main(
326
+ exchange_path=path+"/EXCHANGE_AGENT.bz2",
327
+ ob_path=path+f"/ORDERBOOK_{stock_name}_FULL.bz2",
328
+ outfile=path+"/world_agent_sim.png",
329
+ plot_config="ABIDES/util/plotting/configs/plot_09.30_12.00.json"
330
+ )
331
+ real_data_path = os.path.join(os.getcwd(), "ABIDES", "log", "paper", f"market_replay_{stock_name}_{date}_{time}", "processed_orders.csv")
332
+ trades_data_path = os.path.join(os.getcwd(), "ABIDES", "log", self.log_dir, "processed_orders.csv")
333
+ plot_graphs(real_data_path, trades_data_path, trades_data_path, trades_data_path)
334
+ predictive_lstm.main(real_data_path, trades_data_path)
335
+ return self.custom_state
336
+
337
+
338
+ def sendMessage(self, sender = None, recipient = None, msg = None, delay = 0):
339
+ # Called by an agent to send a message to another agent. The kernel
340
+ # supplies its own currentTime (i.e. "now") to prevent possible
341
+ # abuse by agents. The kernel will handle computational delay penalties
342
+ # and/or network latency. The message must derive from the message.Message class.
343
+ # The optional delay parameter represents an agent's request for ADDITIONAL
344
+ # delay (beyond the Kernel's mandatory computation + latency delays) to represent
345
+ # parallel pipeline processing delays (that should delay the transmission of messages
346
+ # but do not make the agent "busy" and unable to respond to new messages).
347
+
348
+ if sender is None:
349
+ raise ValueError("sendMessage() called without valid sender ID",
350
+ "sender:", sender, "recipient:", recipient,
351
+ "msg:", msg)
352
+
353
+ if recipient is None:
354
+ raise ValueError("sendMessage() called without valid recipient ID",
355
+ "sender:", sender, "recipient:", recipient,
356
+ "msg:", msg)
357
+
358
+ if msg is None:
359
+ raise ValueError("sendMessage() called with message == None",
360
+ "sender:", sender, "recipient:", recipient,
361
+ "msg:", msg)
362
+
363
+ # Apply the agent's current computation delay to effectively "send" the message
364
+ # at the END of the agent's current computation period when it is done "thinking".
365
+ # NOTE: sending multiple messages on a single wake will transmit all at the same
366
+ # time, at the end of computation. To avoid this, use Agent.delay() to accumulate
367
+ # a temporary delay (current cycle only) that will also stagger messages.
368
+
369
+ # The optional pipeline delay parameter DOES push the send time forward, since it
370
+ # represents "thinking" time before the message would be sent. We don't use this
371
+ # for much yet, but it could be important later.
372
+
373
+ # This means message delay (before latency) is the agent's standard computation delay
374
+ # PLUS any accumulated delay for this wake cycle PLUS any one-time requested delay
375
+ # for this specific message only.
376
+ sentTime = self.currentTime + pd.Timedelta(self.agentComputationDelays[sender] +
377
+ self.currentAgentAdditionalDelay + delay)
378
+
379
+ # Apply communication delay per the agentLatencyModel, if defined, or the
380
+ # agentLatency matrix [sender][recipient] otherwise.
381
+ if self.agentLatencyModel is not None:
382
+ latency = self.agentLatencyModel.get_latency(sender_id = sender, recipient_id = recipient)
383
+ deliverAt = sentTime + pd.Timedelta(latency)
384
+ log_print ("Kernel applied latency {}, accumulated delay {}, one-time delay {} on sendMessage from: {} to {}, scheduled for {}",
385
+ latency, self.currentAgentAdditionalDelay, delay, self.agents[sender].name, self.agents[recipient].name,
386
+ self.fmtTime(deliverAt))
387
+ else:
388
+ latency = self.agentLatency[sender][recipient]
389
+ noise = self.random_state.choice(len(self.latencyNoise), 1, self.latencyNoise)[0]
390
+ deliverAt = sentTime + pd.Timedelta(latency + noise)
391
+ log_print ("Kernel applied latency {}, noise {}, accumulated delay {}, one-time delay {} on sendMessage from: {} to {}, scheduled for {}",
392
+ latency, noise, self.currentAgentAdditionalDelay, delay, self.agents[sender].name, self.agents[recipient].name,
393
+ self.fmtTime(deliverAt))
394
+
395
+ # Finally drop the message in the queue with priority == delivery time.
396
+ self.messages.put((deliverAt, (recipient, MessageType.MESSAGE, msg)))
397
+
398
+ log_print ("Sent time: {}, current time {}, computation delay {}", sentTime, self.currentTime, self.agentComputationDelays[sender])
399
+ log_print ("Message queued: {}", msg)
400
+
401
+
402
+
403
+ def setWakeup(self, sender = None, requestedTime = None):
404
+ # Called by an agent to receive a "wakeup call" from the kernel
405
+ # at some requested future time. Defaults to the next possible
406
+ # timestamp. Wakeup time cannot be the current time or a past time.
407
+ # Sender is required and should be the ID of the agent making the call.
408
+ # The agent is responsible for maintaining any required state; the
409
+ # kernel will not supply any parameters to the wakeup() call.
410
+
411
+ if requestedTime is None:
412
+ requestedTime = self.currentTime + pd.TimeDelta(1)
413
+
414
+ if sender is None:
415
+ raise ValueError("setWakeup() called without valid sender ID",
416
+ "sender:", sender, "requestedTime:", requestedTime)
417
+
418
+ if self.currentTime and (requestedTime < self.currentTime):
419
+ raise ValueError("setWakeup() called with requested time not in future",
420
+ "currentTime:", self.currentTime,
421
+ "requestedTime:", requestedTime)
422
+
423
+ #log_print ("Kernel adding wakeup for agent {} at time {}",
424
+ # sender, self.fmtTime(requestedTime))
425
+
426
+ self.messages.put((requestedTime,
427
+ (sender, MessageType.WAKEUP, None)))
428
+
429
+
430
+ def getAgentComputeDelay(self, sender = None):
431
+ # Allows an agent to query its current computation delay.
432
+ return self.agentComputationDelays[sender]
433
+
434
+
435
+ def setAgentComputeDelay(self, sender = None, requestedDelay = None):
436
+ # Called by an agent to update its computation delay. This does
437
+ # not initiate a global delay, nor an immediate delay for the
438
+ # agent. Rather it sets the new default delay for the calling
439
+ # agent. The delay will be applied upon every return from wakeup
440
+ # or recvMsg. Note that this delay IS applied to any messages
441
+ # sent by the agent during the current wake cycle (simulating the
442
+ # messages popping out at the end of its "thinking" time).
443
+
444
+ # Also note that we DO permit a computation delay of zero, but this should
445
+ # really only be used for special or massively parallel agents.
446
+
447
+ # requestedDelay should be in whole nanoseconds.
448
+ if not type(requestedDelay) is int:
449
+ raise ValueError("Requested computation delay must be whole nanoseconds.",
450
+ "requestedDelay:", requestedDelay)
451
+
452
+ # requestedDelay must be non-negative.
453
+ if not requestedDelay >= 0:
454
+ raise ValueError("Requested computation delay must be non-negative nanoseconds.",
455
+ "requestedDelay:", requestedDelay)
456
+
457
+ self.agentComputationDelays[sender] = requestedDelay
458
+
459
+
460
+
461
+ def delayAgent(self, sender = None, additionalDelay = None):
462
+ # Called by an agent to accumulate temporary delay for the current wake cycle.
463
+ # This will apply the total delay (at time of sendMessage) to each message,
464
+ # and will modify the agent's next available time slot. These happen on top
465
+ # of the agent's compute delay BUT DO NOT ALTER IT. (i.e. effects are transient)
466
+ # Mostly useful for staggering outbound messages.
467
+
468
+ # additionalDelay should be in whole nanoseconds.
469
+ if not type(additionalDelay) is int:
470
+ raise ValueError("Additional delay must be whole nanoseconds.",
471
+ "additionalDelay:", additionalDelay)
472
+
473
+ # additionalDelay must be non-negative.
474
+ if not additionalDelay >= 0:
475
+ raise ValueError("Additional delay must be non-negative nanoseconds.",
476
+ "additionalDelay:", additionalDelay)
477
+
478
+ self.currentAgentAdditionalDelay += additionalDelay
479
+
480
+
481
+
482
+ def findAgentByType(self, type):
483
+ # Called to request an arbitrary agent ID that matches the class or base class
484
+ # passed as "type". For example, any ExchangeAgent, or any NasdaqExchangeAgent.
485
+ # This method is rather expensive, so the results should be cached by the caller!
486
+
487
+ for agent in self.agents:
488
+ if isinstance(agent, type):
489
+ return agent.id
490
+
491
+
492
+ def writeLog (self, sender, dfLog, filename=None):
493
+ # Called by any agent, usually at the very end of the simulation just before
494
+ # kernel shutdown, to write to disk any log dataframe it has been accumulating
495
+ # during simulation. The format can be decided by the agent, although changes
496
+ # will require a special tool to read and parse the logs. The Kernel places
497
+ # the log in a unique directory per run, with one filename per agent, also
498
+ # decided by the Kernel using agent type, id, etc.
499
+
500
+ # If there are too many agents, placing all these files in a directory might
501
+ # be unfortunate. Also if there are too many agents, or if the logs are too
502
+ # large, memory could become an issue. In this case, we might have to take
503
+ # a speed hit to write logs incrementally.
504
+
505
+ # If filename is not None, it will be used as the filename. Otherwise,
506
+ # the Kernel will construct a filename based on the name of the Agent
507
+ # requesting log archival.
508
+
509
+ if self.skip_log: return
510
+
511
+ path = os.path.join(os.getcwd(), "ABIDES", "log", self.log_dir)
512
+
513
+ if filename:
514
+ file = "{}.bz2".format(filename)
515
+ else:
516
+ file = "{}.bz2".format(self.agents[sender].name.replace(" ",""))
517
+
518
+ if not os.path.exists(path):
519
+ os.makedirs(path)
520
+
521
+ dfLog.to_pickle(os.path.join(path, file), compression='bz2')
522
+
523
+
524
+ def appendSummaryLog (self, sender, eventType, event):
525
+ # We don't even include a timestamp, because this log is for one-time-only
526
+ # summary reporting, like starting cash, or ending cash.
527
+ self.summaryLog.append({ 'AgentID' : sender,
528
+ 'AgentStrategy' : self.agents[sender].type,
529
+ 'EventType' : eventType, 'Event' : event })
530
+
531
+
532
+ def writeSummaryLog (self):
533
+ path = os.path.join("ABIDES", "log", self.log_dir)
534
+ file = "summary_log.bz2"
535
+
536
+ if not os.path.exists(path):
537
+ os.makedirs(path)
538
+
539
+ dfLog = pd.DataFrame(self.summaryLog)
540
+
541
+ dfLog.to_pickle(os.path.join(path, file), compression='bz2')
542
+
543
+
544
+ def updateAgentState (self, agent_id, state):
545
+ """ Called by an agent that wishes to replace its custom state in the dictionary
546
+ the Kernel will return at the end of simulation. Shared state must be set directly,
547
+ and agents should coordinate that non-destructively.
548
+
549
+ Note that it is never necessary to use this kernel state dictionary for an agent
550
+ to remember information about itself, only to report it back to the config file.
551
+ """
552
+
553
+ if 'agent_state' not in self.custom_state: self.custom_state['agent_state'] = {}
554
+ self.custom_state['agent_state'][agent_id] = state
555
+
556
+
557
+ @staticmethod
558
+ def fmtTime(simulationTime):
559
+ # The Kernel class knows how to pretty-print time. It is assumed simulationTime
560
+ # is in nanoseconds since midnight. Note this is a static method which can be
561
+ # called either on the class or an instance.
562
+
563
+ # Try just returning the pd.Timestamp now.
564
+ return (simulationTime)
565
+
566
+ ns = simulationTime
567
+ hr = int(ns / (1000000000 * 60 * 60))
568
+ ns -= (hr * 1000000000 * 60 * 60)
569
+ m = int(ns / (1000000000 * 60))
570
+ ns -= (m * 1000000000 * 60)
571
+ s = int(ns / 1000000000)
572
+ ns = int(ns - (s * 1000000000))
573
+
574
+ return "{:02d}:{:02d}:{:02d}.{:09d}".format(hr, m, s, ns)
575
+
ABIDES/LICENSE.txt ADDED
@@ -0,0 +1,89 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ The authors of ABIDES are David Byrd and Dr. Tucker Balch of the Georgia Institute of Technology.
2
+
3
+
4
+ BSD 3-Clause License
5
+
6
+ Copyright (c) 2019 Georgia Tech Research Corporation
7
+
8
+ Redistribution and use in source and binary forms, with or without
9
+ modification, are permitted provided that the following conditions are met:
10
+
11
+ * Redistributions of source code must retain the above copyright notice, this
12
+ list of conditions and the following disclaimer.
13
+
14
+ * Redistributions in binary form must reproduce the above copyright notice,
15
+ this list of conditions and the following disclaimer in the documentation
16
+ and/or other materials provided with the distribution.
17
+
18
+ * Neither the name of the copyright holder nor the names of its
19
+ contributors may be used to endorse or promote products derived from
20
+ this software without specific prior written permission.
21
+
22
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
23
+ AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24
+ IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
25
+ DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
26
+ FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
27
+ DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
28
+ SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
29
+ CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30
+ OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32
+
33
+
34
+
35
+ Funding Acknowledgement
36
+
37
+ This material is based upon work supported by the National Science Foundation under Award No. 1741026. Any opinions, findings, and conclusions or recommendations expressed in this publication are those of the author(s) and do not necessarily reflect the views of the National Science Foundation.
38
+
39
+
40
+
41
+ This software uses the pandas library for Python:
42
+
43
+
44
+ BSD 3-Clause License
45
+
46
+ Copyright (c) 2008-2012, AQR Capital Management, LLC, Lambda Foundry, Inc. and PyData Development Team
47
+ All rights reserved.
48
+
49
+ Redistribution and use in source and binary forms, with or without
50
+ modification, are permitted provided that the following conditions are met:
51
+
52
+ * Redistributions of source code must retain the above copyright notice, this
53
+ list of conditions and the following disclaimer.
54
+
55
+ * Redistributions in binary form must reproduce the above copyright notice,
56
+ this list of conditions and the following disclaimer in the documentation
57
+ and/or other materials provided with the distribution.
58
+
59
+ * Neither the name of the copyright holder nor the names of its
60
+ contributors may be used to endorse or promote products derived from
61
+ this software without specific prior written permission.
62
+
63
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
64
+ AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
65
+ IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
66
+ DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
67
+ FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
68
+ DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
69
+ SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
70
+ CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
71
+ OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
72
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
73
+
74
+
75
+
76
+ This software uses the NumPy library for Python:
77
+
78
+
79
+ Copyright © 2005-2018, NumPy Developers.
80
+ All rights reserved.
81
+
82
+ Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met:
83
+
84
+ Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer.
85
+ Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution.
86
+ Neither the name of the NumPy Developers nor the names of any contributors may be used to endorse or promote products derived from this software without specific prior written permission.
87
+
88
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS “AS IS” AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
89
+
ABIDES/README.md ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # ABIDES: Agent-Based Interactive Discrete Event Simulation environment
2
+
3
+ > ABIDES is an Agent-Based Interactive Discrete Event Simulation environment. It was developed by David Byrd, Mahmoud Mahfouz, Danial Dervovic, Maria Hybinette and Tucker Hybinette Balch.
4
+ > We only extended it to perform market simulations with our world agent represented by the diffusion model. So, our contribution are the files WorldAgent.py in agent DIR, and world_agent_sim.py in the config DIR.
ABIDES/abides.py ADDED
@@ -0,0 +1,32 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import argparse
2
+ import importlib
3
+ import os
4
+ import sys
5
+
6
+ if __name__ == '__main__':
7
+
8
+ # Print system banner.
9
+ system_name = "ABIDES: Agent-Based Interactive Discrete Event Simulation"
10
+ sys.path.insert(0, os.getcwd())
11
+ print ("=" * len(system_name))
12
+ print (system_name)
13
+ print ("=" * len(system_name))
14
+ print ()
15
+
16
+ # Test command line parameters. Only peel off the config file.
17
+ # Anything else should be left FOR the config file to consume as agent
18
+ # or experiment parameterization.
19
+ parser = argparse.ArgumentParser(description='Simulation configuration.')
20
+ parser.add_argument('-c', '--config', required=True,
21
+ help='Name of config file to execute')
22
+ parser.add_argument('--config-help', action='store_true',
23
+ help='Print argument options for the specific config file.')
24
+
25
+ args, config_args = parser.parse_known_args()
26
+
27
+ # First parameter supplied is config file.
28
+ config_file = args.config
29
+
30
+ config = importlib.import_module('config.{}'.format(config_file),
31
+ package=None)
32
+
ABIDES/agent/Agent.py ADDED
@@ -0,0 +1,185 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import pandas as pd
2
+
3
+ from copy import deepcopy
4
+ from util.util import log_print
5
+
6
+ class Agent:
7
+
8
+ def __init__ (self, id, name, type, random_state, log_to_file=True):
9
+
10
+ # ID must be a unique number (usually autoincremented).
11
+ # Name is for human consumption, should be unique (often type + number).
12
+ # Type is for machine aggregation of results, should be same for all
13
+ # agents following the same strategy (incl. parameter settings).
14
+ # Every agent is given a random state to use for any stochastic needs.
15
+ # This is an np.random.RandomState object, already seeded.
16
+ self.id = id
17
+ self.name = name
18
+ self.type = type
19
+ self.log_to_file = log_to_file
20
+ self.random_state = random_state
21
+
22
+ if not random_state:
23
+ raise ValueError("A valid, seeded np.random.RandomState object is required " +
24
+ "for every agent.Agent", self.name)
25
+ sys.exit()
26
+
27
+ # Kernel is supplied via kernelInitializing method of kernel lifecycle.
28
+ self.kernel = None
29
+
30
+ # What time does the agent think it is? Should be updated each time
31
+ # the agent wakes via wakeup or receiveMessage. (For convenience
32
+ # of reference throughout the Agent class hierarchy, NOT THE
33
+ # CANONICAL TIME.)
34
+ self.currentTime = None
35
+
36
+ # Agents may choose to maintain a log. During simulation,
37
+ # it should be stored as a list of dictionaries. The expected
38
+ # keys by default are: EventTime, EventType, Event. Other
39
+ # Columns may be added, but will then require specializing
40
+ # parsing and will increase output dataframe size. If there
41
+ # is a non-empty log, it will be written to disk as a Dataframe
42
+ # at kernel termination.
43
+
44
+ # It might, or might not, make sense to formalize these log Events
45
+ # as a class, with enumerated EventTypes and so forth.
46
+ self.log = []
47
+ self.logEvent("AGENT_TYPE", type)
48
+
49
+
50
+ ### Flow of required kernel listening methods:
51
+ ### init -> start -> (entire simulation) -> end -> terminate
52
+
53
+ def kernelInitializing (self, kernel):
54
+ # Called by kernel one time when simulation first begins.
55
+ # No other agents are guaranteed to exist at this time.
56
+
57
+ # Kernel reference must be retained, as this is the only time the
58
+ # agent can "see" it.
59
+
60
+ self.kernel = kernel
61
+
62
+ log_print ("{} exists!", self.name)
63
+
64
+
65
+ def kernelStarting (self, startTime):
66
+ # Called by kernel one time _after_ simulationInitializing.
67
+ # All other agents are guaranteed to exist at this time.
68
+ # startTime is the earliest time for which the agent can
69
+ # schedule a wakeup call (or could receive a message).
70
+
71
+ # Base Agent schedules a wakeup call for the first available timestamp.
72
+ # Subclass agents may override this behavior as needed.
73
+
74
+ log_print ("Agent {} ({}) requesting kernel wakeup at time {}",
75
+ self.id, self.name, self.kernel.fmtTime(startTime))
76
+
77
+ self.setWakeup(startTime)
78
+
79
+
80
+ def kernelStopping (self):
81
+ # Called by kernel one time _before_ simulationTerminating.
82
+ # All other agents are guaranteed to exist at this time.
83
+
84
+ pass
85
+
86
+
87
+ def kernelTerminating (self):
88
+ # Called by kernel one time when simulation terminates.
89
+ # No other agents are guaranteed to exist at this time.
90
+
91
+ # If this agent has been maintaining a log, convert it to a Dataframe
92
+ # and request that the Kernel write it to disk before terminating.
93
+ if self.log and self.log_to_file:
94
+ dfLog = pd.DataFrame(self.log)
95
+ dfLog.set_index('EventTime', inplace=True)
96
+ self.writeLog(dfLog)
97
+
98
+
99
+ ### Methods for internal use by agents (e.g. bookkeeping).
100
+
101
+ def logEvent (self, eventType, event = '', appendSummaryLog = False):
102
+ # Adds an event to this agent's log. The deepcopy of the Event field,
103
+ # often an object, ensures later state changes to the object will not
104
+ # retroactively update the logged event.
105
+
106
+ # We can make a single copy of the object (in case it is an arbitrary
107
+ # class instance) for both potential log targets, because we don't
108
+ # alter logs once recorded.
109
+ e = deepcopy(event)
110
+ self.log.append({ 'EventTime' : self.currentTime, 'EventType' : eventType,
111
+ 'Event' : e })
112
+
113
+ if appendSummaryLog: self.kernel.appendSummaryLog(self.id, eventType, e)
114
+
115
+
116
+ ### Methods required for communication from other agents.
117
+ ### The kernel will _not_ call these methods on its own behalf,
118
+ ### only to pass traffic from other agents..
119
+
120
+ def receiveMessage (self, currentTime, msg):
121
+ # Called each time a message destined for this agent reaches
122
+ # the front of the kernel's priority queue. currentTime is
123
+ # the simulation time at which the kernel is delivering this
124
+ # message -- the agent should treat this as "now". msg is
125
+ # an object guaranteed to inherit from the message.Message class.
126
+
127
+ self.currentTime = currentTime
128
+
129
+ log_print ("At {}, agent {} ({}) received: {}",
130
+ self.kernel.fmtTime(currentTime), self.id, self.name, msg)
131
+
132
+
133
+ def wakeup (self, currentTime):
134
+ # Agents can request a wakeup call at a future simulation time using
135
+ # Agent.setWakeup(). This is the method called when the wakeup time
136
+ # arrives.
137
+
138
+ self.currentTime = currentTime
139
+
140
+ log_print ("At {}, agent {} ({}) received wakeup.",
141
+ self.kernel.fmtTime(currentTime), self.id, self.name)
142
+
143
+
144
+ ### Methods used to request services from the Kernel. These should be used
145
+ ### by all agents. Kernel methods should _not_ be called directly!
146
+
147
+ ### Presently the kernel expects agent IDs only, not agent references.
148
+ ### It is possible this could change in the future. Normal agents will
149
+ ### not typically wish to request additional delay.
150
+ def sendMessage (self, recipientID, msg, delay = 0):
151
+ self.kernel.sendMessage(self.id, recipientID, msg, delay=delay)
152
+
153
+ def setWakeup (self, requestedTime):
154
+ self.kernel.setWakeup(self.id, requestedTime)
155
+
156
+ def getComputationDelay (self):
157
+ return self.kernel.getAgentComputeDelay(sender = self.id)
158
+
159
+ def setComputationDelay (self, requestedDelay):
160
+ self.kernel.setAgentComputeDelay(sender = self.id, requestedDelay = requestedDelay)
161
+
162
+ def delay (self, additionalDelay):
163
+ self.kernel.delayAgent(sender = self.id, additionalDelay = additionalDelay)
164
+
165
+ def writeLog (self, dfLog, filename=None):
166
+ self.kernel.writeLog(self.id, dfLog, filename)
167
+
168
+ def updateAgentState (self, state):
169
+ """ Agents should use this method to replace their custom state in the dictionary
170
+ the Kernel will return to the experimental config file at the end of the
171
+ simulation. This is intended to be write-only, and agents should not use
172
+ it to store information for their own later use.
173
+ """
174
+
175
+ self.kernel.updateAgentState(self.id, state)
176
+
177
+
178
+ ### Internal methods that should not be modified without a very good reason.
179
+
180
+ def __lt__(self, other):
181
+ # Required by Python3 for this object to be placed in a priority queue.
182
+
183
+ return ("{}".format(self.id) <
184
+ "{}".format(other.id))
185
+
ABIDES/agent/ExchangeAgent.py ADDED
@@ -0,0 +1,415 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # The ExchangeAgent expects a numeric agent id, printable name, agent type, timestamp to open and close trading,
2
+ # a list of equity symbols for which it should create order books, a frequency at which to archive snapshots
3
+ # of its order books, a pipeline delay (in ns) for order activity, the exchange computation delay (in ns),
4
+ # the levels of order stream history to maintain per symbol (maintains all orders that led to the last N trades),
5
+ # whether to log all order activity to the agent log, and a random state object (already seeded) to use
6
+ # for stochasticity.
7
+ from agent.FinancialAgent import FinancialAgent
8
+ from message.Message import Message
9
+ from util.OrderBook import OrderBook
10
+ from util.util import log_print
11
+
12
+ import datetime as dt
13
+
14
+ import warnings
15
+ warnings.simplefilter(action='ignore', category=FutureWarning)
16
+ warnings.simplefilter(action='ignore', category=UserWarning)
17
+
18
+ import pandas as pd
19
+ pd.set_option('display.max_rows', 500)
20
+
21
+ from copy import deepcopy
22
+
23
+
24
+ class ExchangeAgent(FinancialAgent):
25
+
26
+ def __init__(self, id, name, type, mkt_open, mkt_close, symbols, book_freq='S', wide_book=False, pipeline_delay = 40000,
27
+ computation_delay = 1, stream_history = 0, log_orders = False, random_state = None):
28
+
29
+ super().__init__(id, name, type, random_state)
30
+
31
+ # Do not request repeated wakeup calls.
32
+ self.reschedule = False
33
+
34
+ # Store this exchange's open and close times.
35
+ self.mkt_open = mkt_open
36
+ self.mkt_close = mkt_close
37
+
38
+ # Right now, only the exchange agent has a parallel processing pipeline delay. This is an additional
39
+ # delay added only to order activity (placing orders, etc) and not simple inquiries (market operating
40
+ # hours, etc).
41
+ self.pipeline_delay = pipeline_delay
42
+
43
+ # Computation delay is applied on every wakeup call or message received.
44
+ self.computation_delay = computation_delay
45
+
46
+ # The exchange maintains an order stream of all orders leading to the last L trades
47
+ # to support certain agents from the auction literature (GD, HBL, etc).
48
+ self.stream_history = stream_history
49
+
50
+ # Log all order activity?
51
+ self.log_orders = log_orders
52
+
53
+ # Create an order book for each symbol.
54
+ self.order_books = {}
55
+
56
+ for symbol in symbols:
57
+ self.order_books[symbol] = OrderBook(self, symbol)
58
+
59
+ # At what frequency will we archive the order books for visualization and analysis?
60
+ self.book_freq = book_freq
61
+
62
+ # Store orderbook in wide format? ONLY WORKS with book_freq == 0
63
+ self.wide_book = wide_book
64
+
65
+ # The subscription dict is a dictionary with the key = agent ID,
66
+ # value = dict (key = symbol, value = list [levels (no of levels to recieve updates for),
67
+ # frequency (min number of ns between messages), last agent update timestamp]
68
+ # e.g. {101 : {'AAPL' : [1, 10, pd.Timestamp(10:00:00)}}
69
+ self.subscription_dict = {}
70
+
71
+ # The exchange agent overrides this to obtain a reference to an oracle.
72
+ # This is needed to establish a "last trade price" at open (i.e. an opening
73
+ # price) in case agents query last trade before any simulated trades are made.
74
+ # This can probably go away once we code the opening cross auction.
75
+ def kernelInitializing (self, kernel):
76
+ super().kernelInitializing(kernel)
77
+
78
+ self.oracle = self.kernel.oracle
79
+
80
+ # Obtain opening prices (in integer cents). These are not noisy right now.
81
+ for symbol in self.order_books:
82
+ try:
83
+ self.order_books[symbol].last_trade = self.oracle.getDailyOpenPrice(symbol, self.mkt_open)
84
+ #log_print ("Opening price for {} is {}", symbol, self.order_books[symbol].last_trade)
85
+ except AttributeError as e:
86
+ log_print(str(e))
87
+
88
+
89
+ # The exchange agent overrides this to additionally log the full depth of its
90
+ # order books for the entire day.
91
+ def kernelTerminating (self):
92
+ super().kernelTerminating()
93
+
94
+ # If the oracle supports writing the fundamental value series for its
95
+ # symbols, write them to disk.
96
+ if hasattr(self.oracle, 'f_log'):
97
+ for symbol in self.oracle.f_log:
98
+ dfFund = pd.DataFrame(self.oracle.f_log[symbol])
99
+ if not dfFund.empty:
100
+ dfFund.set_index('FundamentalTime', inplace=True)
101
+ self.writeLog(dfFund, filename='fundamental_{}'.format(symbol))
102
+ #log_print("Fundamental archival complete.")
103
+ if self.book_freq is None: return
104
+ else:
105
+ # Iterate over the order books controlled by this exchange.
106
+ for symbol in self.order_books:
107
+ start_time = dt.datetime.now()
108
+ self.logOrderBookSnapshots(symbol)
109
+ end_time = dt.datetime.now()
110
+ print("Time taken to log the order book: {}".format(end_time - start_time))
111
+ print("Order book archival complete.")
112
+
113
+ def receiveMessage(self, currentTime, msg):
114
+ super().receiveMessage(currentTime, msg)
115
+
116
+ # Unless the intent of an experiment is to examine computational issues within an Exchange,
117
+ # it will typically have either 1 ns delay (near instant but cannot process multiple orders
118
+ # in the same atomic time unit) or 0 ns delay (can process any number of orders, always in
119
+ # the atomic time unit in which they are received). This is separate from, and additional
120
+ # to, any parallel pipeline delay imposed for order book activity.
121
+
122
+ # Note that computation delay MUST be updated before any calls to sendMessage.
123
+ self.setComputationDelay(self.computation_delay)
124
+
125
+ # Is the exchange closed? (This block only affects post-close, not pre-open.)
126
+ if currentTime > self.mkt_close:
127
+ # Most messages after close will receive a 'MKT_CLOSED' message in response. A few things
128
+ # might still be processed, like requests for final trade prices or such.
129
+ if msg.body['msg'] in ['LIMIT_ORDER', 'MARKET_ORDER', 'CANCEL_ORDER', 'MODIFY_ORDER']:
130
+ #log_print("{} received {}: {}", self.name, msg.body['msg'], msg.body['order'])
131
+ self.sendMessage(msg.body['sender'], Message({"msg": "MKT_CLOSED"}))
132
+
133
+ # Don't do any further processing on these messages!
134
+ return
135
+ elif 'QUERY' in msg.body['msg']:
136
+ # Specifically do allow querying after market close, so agents can get the
137
+ # final trade of the day as their "daily close" price for a symbol.
138
+ pass
139
+ else:
140
+ #log_print("{} received {}, discarded: market is closed.", self.name, msg.body['msg'])
141
+ self.sendMessage(msg.body['sender'], Message({"msg": "MKT_CLOSED"}))
142
+
143
+ # Don't do any further processing on these messages!
144
+ return
145
+
146
+ # Log order messages only if that option is configured. Log all other messages.
147
+ if msg.body['msg'] in ['LIMIT_ORDER', 'MARKET_ORDER', 'CANCEL_ORDER', 'MODIFY_ORDER']:
148
+ if self.log_orders: self.logEvent(msg.body['msg'], msg.body['order'].to_dict())
149
+ else:
150
+ self.logEvent(msg.body['msg'], msg.body['sender'])
151
+
152
+ # Handle the DATA SUBSCRIPTION request and cancellation messages from the agents.
153
+ if msg.body['msg'] in ["MARKET_DATA_SUBSCRIPTION_REQUEST", "MARKET_DATA_SUBSCRIPTION_CANCELLATION"]:
154
+ #log_print("{} received {} request from agent {}", self.name, msg.body['msg'], msg.body['sender'])
155
+ self.updateSubscriptionDict(msg, currentTime)
156
+
157
+ # Handle all message types understood by this exchange.
158
+ if msg.body['msg'] == "WHEN_MKT_OPEN":
159
+ #log_print("{} received WHEN_MKT_OPEN request from agent {}", self.name, msg.body['sender'])
160
+
161
+ # The exchange is permitted to respond to requests for simple immutable data (like "what are your
162
+ # hours?") instantly. This does NOT include anything that queries mutable data, like equity
163
+ # quotes or trades.
164
+ self.setComputationDelay(0)
165
+
166
+ self.sendMessage(msg.body['sender'], Message({"msg": "WHEN_MKT_OPEN", "data": self.mkt_open}))
167
+ elif msg.body['msg'] == "WHEN_MKT_CLOSE":
168
+ #log_print("{} received WHEN_MKT_CLOSE request from agent {}", self.name, msg.body['sender'])
169
+
170
+ # The exchange is permitted to respond to requests for simple immutable data (like "what are your
171
+ # hours?") instantly. This does NOT include anything that queries mutable data, like equity
172
+ # quotes or trades.
173
+ self.setComputationDelay(0)
174
+
175
+ self.sendMessage(msg.body['sender'], Message({"msg": "WHEN_MKT_CLOSE", "data": self.mkt_close}))
176
+ elif msg.body['msg'] == "QUERY_LAST_TRADE":
177
+ symbol = msg.body['symbol']
178
+ if symbol not in self.order_books:
179
+ log_print("Last trade request discarded. Unknown symbol: {}", symbol)
180
+ else:
181
+ #log_print("{} received QUERY_LAST_TRADE ({}) request from agent {}", self.name, symbol, msg.body['sender'])
182
+
183
+ # Return the single last executed trade price (currently not volume) for the requested symbol.
184
+ # This will return the average share price if multiple executions resulted from a single order.
185
+ self.sendMessage(msg.body['sender'], Message({"msg": "QUERY_LAST_TRADE", "symbol": symbol,
186
+ "data": self.order_books[symbol].last_trade,
187
+ "mkt_closed": True if currentTime > self.mkt_close else False}))
188
+ elif msg.body['msg'] == "QUERY_SPREAD":
189
+ symbol = msg.body['symbol']
190
+ depth = msg.body['depth']
191
+ if symbol not in self.order_books:
192
+ log_print("Bid-ask spread request discarded. Unknown symbol: {}", symbol)
193
+ else:
194
+ #log_print("{} received QUERY_SPREAD ({}:{}) request from agent {}", self.name, symbol, depth, msg.body['sender'])
195
+
196
+ # Return the requested depth on both sides of the order book for the requested symbol.
197
+ # Returns price levels and aggregated volume at each level (not individual orders).
198
+ self.sendMessage(msg.body['sender'], Message({"msg": "QUERY_SPREAD", "symbol": symbol, "depth": depth,
199
+ "bids": self.order_books[symbol].getInsideBids(depth),
200
+ "asks": self.order_books[symbol].getInsideAsks(depth),
201
+ "data": self.order_books[symbol].last_trade,
202
+ "mkt_closed": True if currentTime > self.mkt_close else False,
203
+ "book": ''}))
204
+
205
+ # It is possible to also send the pretty-printed order book to the agent for logging, but forcing pretty-printing
206
+ # of a large order book is very slow, so we should only do it with good reason. We don't currently
207
+ # have a configurable option for it.
208
+ # "book": self.order_books[symbol].prettyPrint(silent=True) }))
209
+ elif msg.body['msg'] == "QUERY_ORDER_STREAM":
210
+ symbol = msg.body['symbol']
211
+ length = msg.body['length']
212
+
213
+ if symbol not in self.order_books:
214
+ log_print("Order stream request discarded. Unknown symbol: {}", symbol)
215
+ else:
216
+ log_print("{} received QUERY_ORDER_STREAM ({}:{}) request from agent {}", self.name, symbol, length, msg.body['sender'])
217
+
218
+ # We return indices [1:length] inclusive because the agent will want "orders leading up to the last
219
+ # L trades", and the items under index 0 are more recent than the last trade.
220
+ self.sendMessage(msg.body['sender'], Message({"msg": "QUERY_ORDER_STREAM", "symbol": symbol, "length": length,
221
+ "mkt_closed": True if currentTime > self.mkt_close else False,
222
+ "orders": self.order_books[symbol].history[1:length + 1]
223
+ }))
224
+ elif msg.body['msg'] == 'QUERY_TRANSACTED_VOLUME':
225
+ symbol = msg.body['symbol']
226
+ lookback_period = msg.body['lookback_period']
227
+ if symbol not in self.order_books:
228
+ log_print("Order stream request discarded. Unknown symbol: {}", symbol)
229
+ else:
230
+ log_print("{} received QUERY_TRANSACTED_VOLUME ({}:{}) request from agent {}", self.name, symbol, lookback_period, msg.body['sender'])
231
+ self.sendMessage(msg.body['sender'], Message({"msg": "QUERY_TRANSACTED_VOLUME", "symbol": symbol,
232
+ "transacted_volume": self.order_books[symbol].get_transacted_volume(lookback_period),
233
+ "mkt_closed": True if currentTime > self.mkt_close else False
234
+ }))
235
+ elif msg.body['msg'] == "LIMIT_ORDER":
236
+ order = msg.body['order']
237
+ #log_print("{} received LIMIT_ORDER: {}", self.name, order)
238
+ if order.symbol not in self.order_books:
239
+ log_print("Limit Order discarded. Unknown symbol: {}", order.symbol)
240
+ else:
241
+ # Hand the order to the order book for processing.
242
+ self.order_books[order.symbol].handleLimitOrder(deepcopy(order))
243
+ #self.publishOrderBookData()
244
+ elif msg.body['msg'] == "MARKET_ORDER":
245
+ order = msg.body['order']
246
+ #log_print("{} received MARKET_ORDER: {}", self.name, order)
247
+ if order.symbol not in self.order_books:
248
+ log_print("Market Order discarded. Unknown symbol: {}", order.symbol)
249
+ else:
250
+ # Hand the market order to the order book for processing.
251
+ self.order_books[order.symbol].handleMarketOrder(deepcopy(order))
252
+
253
+ elif msg.body['msg'] == "CANCEL_ORDER":
254
+ # Note: this is somewhat open to abuse, as in theory agents could cancel other agents' orders.
255
+ # An agent could also become confused if they receive a (partial) execution on an order they
256
+ # then successfully cancel, but receive the cancel confirmation first. Things to think about
257
+ # for later...
258
+ order = msg.body['order']
259
+ #log_print("{} received CANCEL_ORDER: {}", self.name, order)
260
+ if order.symbol not in self.order_books:
261
+ log_print("Cancellation request discarded. Unknown symbol: {}", order.symbol)
262
+ else:
263
+ # Hand the order to the order book for processing.
264
+ self.order_books[order.symbol].cancelOrder(deepcopy(order))
265
+ self.publishOrderBookData()
266
+ elif msg.body['msg'] == 'MODIFY_ORDER':
267
+ # Replace an existing order with a modified order. There could be some timing issues
268
+ # here. What if an order is partially executed, but the submitting agent has not
269
+ # yet received the norification, and submits a modification to the quantity of the
270
+ # (already partially executed) order? I guess it is okay if we just think of this
271
+ # as "delete and then add new" and make it the agent's problem if anything weird
272
+ # happens.
273
+ order = msg.body['order']
274
+ new_order = msg.body['new_order']
275
+ #log_print("{} received MODIFY_ORDER: {}, new order: {}".format(self.name, order, new_order))
276
+ if order.symbol not in self.order_books:
277
+ log_print("Modification request discarded. Unknown symbol: {}".format(order.symbol))
278
+ else:
279
+ self.order_books[order.symbol].modifyOrder(deepcopy(order), deepcopy(new_order))
280
+ self.publishOrderBookData()
281
+
282
+ def updateSubscriptionDict(self, msg, currentTime):
283
+ # The subscription dict is a dictionary with the key = agent ID,
284
+ # value = dict (key = symbol, value = list [levels (no of levels to recieve updates for),
285
+ # frequency (min number of ns between messages), last agent update timestamp]
286
+ # e.g. {101 : {'AAPL' : [1, 10, pd.Timestamp(10:00:00)}}
287
+ if msg.body['msg'] == "MARKET_DATA_SUBSCRIPTION_REQUEST":
288
+ agent_id, symbol, levels, freq = msg.body['sender'], msg.body['symbol'], msg.body['levels'], msg.body['freq']
289
+ self.subscription_dict[agent_id] = {symbol: [levels, freq, currentTime]}
290
+ elif msg.body['msg'] == "MARKET_DATA_SUBSCRIPTION_CANCELLATION":
291
+ agent_id, symbol = msg.body['sender'], msg.body['symbol']
292
+ del self.subscription_dict[agent_id][symbol]
293
+
294
+ def publishOrderBookData(self):
295
+ '''
296
+ The exchange agents sends an order book update to the agents using the subscription API if one of the following
297
+ conditions are met:
298
+ 1) agent requests ALL order book updates (freq == 0)
299
+ 2) order book update timestamp > last time agent was updated AND the orderbook update time stamp is greater than
300
+ the last agent update time stamp by a period more than that specified in the freq parameter.
301
+ '''
302
+ for agent_id, params in self.subscription_dict.items():
303
+ for symbol, values in params.items():
304
+ levels, freq, last_agent_update = values[0], values[1], values[2]
305
+ orderbook_last_update = self.order_books[symbol].last_update_ts
306
+ if (freq == 0) or \
307
+ ((orderbook_last_update > last_agent_update) and ((orderbook_last_update - last_agent_update).delta >= freq)):
308
+ self.sendMessage(agent_id, Message({"msg": "MARKET_DATA",
309
+ "symbol": symbol,
310
+ "bids": self.order_books[symbol].getInsideBids(levels),
311
+ "asks": self.order_books[symbol].getInsideAsks(levels),
312
+ "last_transaction": self.order_books[symbol].last_trade,
313
+ "exchange_ts": self.currentTime}))
314
+ self.subscription_dict[agent_id][symbol][2] = orderbook_last_update
315
+
316
+ def logOrderBookSnapshots(self, symbol):
317
+ """
318
+ Log full depth quotes (price, volume) from this order book at some pre-determined frequency. Here we are looking at
319
+ the actual log for this order book (i.e. are there snapshots to export, independent of the requested frequency).
320
+ """
321
+ def get_quote_range_iterator(s):
322
+ """ Helper method for order book logging. Takes pandas Series and returns python range() from first to last
323
+ element.
324
+ """
325
+ forbidden_values = [0, 19999900] # TODO: Put constant value in more sensible place!
326
+ quotes = sorted(s)
327
+ for val in forbidden_values:
328
+ try: quotes.remove(val)
329
+ except ValueError:
330
+ pass
331
+ return quotes
332
+
333
+ book = self.order_books[symbol]
334
+
335
+ if book.book_log:
336
+
337
+ print("Logging order book to file...")
338
+ dfLog = book.book_log_to_df()
339
+ dfLog.set_index('QuoteTime', inplace=True)
340
+ dfLog = dfLog[~dfLog.index.duplicated(keep='last')]
341
+ dfLog.sort_index(inplace=True)
342
+
343
+ if str(self.book_freq).isdigit() and int(self.book_freq) == 0: # Save all possible information
344
+ # Get the full range of quotes at the finest possible resolution.
345
+ quotes = get_quote_range_iterator(dfLog.columns.unique())
346
+
347
+ # Restructure the log to have multi-level rows of all possible pairs of time and quote
348
+ # with volume as the only column.
349
+ if not self.wide_book:
350
+ filledIndex = pd.MultiIndex.from_product([dfLog.index, quotes], names=['time', 'quote'])
351
+ dfLog = dfLog.stack()
352
+ dfLog = dfLog.reindex(filledIndex)
353
+
354
+ filename = f'ORDERBOOK_{symbol}_FULL'
355
+
356
+ else: # Sample at frequency self.book_freq
357
+ # With multiple quotes in a nanosecond, use the last one, then resample to the requested freq.
358
+ dfLog = dfLog.resample(self.book_freq).ffill()
359
+ dfLog.sort_index(inplace=True)
360
+
361
+ # Create a fully populated index at the desired frequency from market open to close.
362
+ # Then project the logged data into this complete index.
363
+ time_idx = pd.date_range(self.mkt_open, self.mkt_close, freq=self.book_freq, closed='right')
364
+ dfLog = dfLog.reindex(time_idx, method='ffill')
365
+ dfLog.sort_index(inplace=True)
366
+
367
+ if not self.wide_book:
368
+ dfLog = dfLog.stack()
369
+ dfLog.sort_index(inplace=True)
370
+
371
+ # Get the full range of quotes at the finest possible resolution.
372
+ quotes = get_quote_range_iterator(dfLog.index.get_level_values(1).unique())
373
+
374
+ # Restructure the log to have multi-level rows of all possible pairs of time and quote
375
+ # with volume as the only column.
376
+ filledIndex = pd.MultiIndex.from_product([time_idx, quotes], names=['time', 'quote'])
377
+ dfLog = dfLog.reindex(filledIndex)
378
+
379
+ filename = f'ORDERBOOK_{symbol}_FREQ_{self.book_freq}'
380
+
381
+ # Final cleanup
382
+ if not self.wide_book:
383
+ dfLog.rename('Volume')
384
+ df = pd.SparseDataFrame(index=dfLog.index)
385
+ df['Volume'] = dfLog
386
+ else:
387
+ df = dfLog
388
+ df = df.reindex(sorted(df.columns), axis=1)
389
+
390
+ # Archive the order book snapshots directly to a file named with the symbol, rather than
391
+ # to the exchange agent log.
392
+ self.writeLog(df, filename=filename)
393
+ print("Order book logging complete!")
394
+
395
+ def sendMessage (self, recipientID, msg):
396
+ # The ExchangeAgent automatically applies appropriate parallel processing pipeline delay
397
+ # to those message types which require it.
398
+ # TODO: probably organize the order types into categories once there are more, so we can
399
+ # take action by category (e.g. ORDER-related messages) instead of enumerating all message
400
+ # types to be affected.
401
+ if msg.body['msg'] in ['ORDER_ACCEPTED', 'ORDER_CANCELLED', 'ORDER_EXECUTED']:
402
+ # Messages that require order book modification (not simple queries) incur the additional
403
+ # parallel processing delay as configured.
404
+ super().sendMessage(recipientID, msg, delay = self.pipeline_delay)
405
+ if self.log_orders: self.logEvent(msg.body['msg'], msg.body['order'].to_dict())
406
+ else:
407
+ # Other message types incur only the currently-configured computation delay for this agent.
408
+ super().sendMessage(recipientID, msg)
409
+
410
+ # Simple accessor methods for the market open and close times.
411
+ def getMarketOpen(self):
412
+ return self.__mkt_open
413
+
414
+ def getMarketClose(self):
415
+ return self.__mkt_close
ABIDES/agent/FinancialAgent.py ADDED
@@ -0,0 +1,36 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from agent.Agent import Agent
2
+ import sys
3
+ import traceback
4
+
5
+ # The FinancialAgent class contains attributes and methods that should be available
6
+ # to all agent types (traders, exchanges, etc) in a financial market simulation.
7
+ # To be honest, it mainly exists because the base Agent class should not have any
8
+ # finance-specific aspects and it doesn't make sense for ExchangeAgent to inherit
9
+ # from TradingAgent. Hopefully we'll find more common ground for traders and
10
+ # exchanges to make this more useful later on.
11
+ class FinancialAgent(Agent):
12
+
13
+ def __init__(self, id, name, type, random_state, log_to_file=True):
14
+ # Base class init.
15
+ super().__init__(id, name, type, random_state, log_to_file)
16
+
17
+ # Used by any subclass to dollarize an int-cents price for printing.
18
+ def dollarize (self, cents):
19
+ return dollarize(cents)
20
+
21
+ pass
22
+
23
+
24
+ # Dollarizes int-cents prices for printing. Defined outside the class for
25
+ # utility access by non-agent classes.
26
+
27
+ def dollarize(cents):
28
+ if type(cents) is list:
29
+ return ( [ dollarize(x) for x in cents ] )
30
+ elif type(cents) is int:
31
+ return "${:0.2f}".format(cents / 100)
32
+ else:
33
+ # If cents is already a float, there is an error somewhere.
34
+ print ("ERROR: dollarize(cents) called without int or list of ints: {}".format(cents))
35
+ traceback.print_stack()
36
+ sys.exit()
ABIDES/agent/FundamentalTrackingAgent.py ADDED
@@ -0,0 +1,64 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from agent.TradingAgent import TradingAgent
2
+ from util.util import log_print
3
+ import numpy as np
4
+ import pandas as pd
5
+
6
+
7
+ class FundamentalTrackingAgent(TradingAgent):
8
+ """ Agent who collects and saves to disk noise-free observations of the fundamental. """
9
+
10
+ def __init__(self, id, name, type, log_frequency, symbol, log_orders=False):
11
+ """ Constructor for FundamentalTrackingAgent
12
+
13
+ :param log_frequency: Frequency to update log (in nanoseconds)
14
+ :param symbol: symbol for which fundamental is being logged
15
+ """
16
+ super().__init__(id, name, type, starting_cash=0, log_orders=log_orders,
17
+ random_state=np.random.RandomState(seed=np.random.randint(low=0,
18
+ high=2 ** 32,
19
+ dtype='uint64')))
20
+
21
+ self.log_freqency = log_frequency
22
+ self.fundamental_series = []
23
+ self.symbol = symbol
24
+
25
+ def kernelStarting(self, startTime):
26
+ # self.kernel is set in Agent.kernelInitializing()
27
+ # self.exchangeID is set in TradingAgent.kernelStarting()
28
+ super().kernelStarting(startTime)
29
+ self.oracle = self.kernel.oracle
30
+
31
+
32
+ def kernelStopping(self):
33
+ """ Stops kernel and saves fundamental series to disk. """
34
+ # Always call parent method to be safe.
35
+ super().kernelStopping()
36
+ self.writeFundamental()
37
+
38
+ def measureFundamental(self):
39
+ """ Saves the fundamental value at self.currentTime to self.fundamental_series. """
40
+ obs_t = self.oracle.observePrice(self.symbol, self.currentTime, sigma_n=0)
41
+ self.fundamental_series.append({'FundamentalTime': self.currentTime, 'FundamentalValue': obs_t})
42
+
43
+ def wakeup(self, currentTime):
44
+ """ Advances agent in time and takes measurement of fundamental. """
45
+ # Parent class handles discovery of exchange times and market_open wakeup call.
46
+ super().wakeup(currentTime)
47
+
48
+ if not self.mkt_open or not self.mkt_close:
49
+ # No logging if market is closed
50
+ return
51
+
52
+ self.measureFundamental()
53
+ self.setWakeup(currentTime + self.getWakeFrequency())
54
+
55
+ def writeFundamental(self):
56
+ """ Logs fundamental series to file. """
57
+ dfFund = pd.DataFrame(self.fundamental_series)
58
+ dfFund.set_index('FundamentalTime', inplace=True)
59
+ self.writeLog(dfFund, filename='fundamental_{symbol}_freq_{self.log_frequency}_ns'.format(self.symbol))
60
+
61
+ print("Noise-free fundamental archival complete.")
62
+
63
+ def getWakeFrequency(self):
64
+ return pd.Timedelta(self.log_freqency, unit='ns')
ABIDES/agent/HeuristicBeliefLearningAgent.py ADDED
@@ -0,0 +1,174 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from agent.ZeroIntelligenceAgent import ZeroIntelligenceAgent
2
+ from message.Message import Message
3
+ from util.util import log_print
4
+
5
+ from math import sqrt
6
+ import numpy as np
7
+ import pandas as pd
8
+ import sys
9
+
10
+ np.set_printoptions(threshold=np.inf)
11
+
12
+
13
+ class HeuristicBeliefLearningAgent(ZeroIntelligenceAgent):
14
+
15
+ def __init__(self, id, name, type, symbol='IBM', starting_cash=100000, sigma_n=1000,
16
+ r_bar=100000, kappa=0.05, sigma_s=100000, q_max=10, sigma_pv=5000000, R_min=0,
17
+ R_max=250, eta=1.0, lambda_a=0.005, L=8, log_orders=False,
18
+ random_state=None):
19
+
20
+ # Base class init.
21
+ super().__init__(id, name, type, symbol=symbol, starting_cash=starting_cash, sigma_n=sigma_n,
22
+ r_bar=r_bar, kappa=kappa, sigma_s=sigma_s, q_max=q_max, sigma_pv=sigma_pv, R_min=R_min,
23
+ R_max=R_max, eta=eta, lambda_a=lambda_a, log_orders=log_orders,
24
+ random_state=random_state)
25
+
26
+ # Store important parameters particular to the HBL agent.
27
+ self.L = L # length of order book history to use (number of transactions)
28
+
29
+ def wakeup(self, currentTime):
30
+ # Parent class handles discovery of exchange times and market_open wakeup call.
31
+ # Also handles ZI-style "background agent" needs that are not specific to HBL.
32
+ super().wakeup(currentTime)
33
+
34
+ # Only if the superclass leaves the state as ACTIVE should we proceed with our
35
+ # trading strategy.
36
+ if self.state != 'ACTIVE': return
37
+
38
+ # To make trade decisions, the HBL agent requires recent order stream information.
39
+ self.getOrderStream(self.symbol, length=self.L)
40
+ self.state = 'AWAITING_STREAM'
41
+
42
+ def placeOrder(self):
43
+ # Called when it is time for the agent to determine a limit price and place an order.
44
+ # This method implements the HBL strategy and falls back to the ZI (superclass)
45
+ # strategy if there is not enough information for the HBL strategy.
46
+
47
+ # See if there is enough history for HBL. If not, we will _exactly_ perform the
48
+ # ZI placeOrder(). If so, we will use parts of ZI but compute our limit price
49
+ # differently. Note that we are not given orders more recent than the most recent
50
+ # trade.
51
+
52
+ if len(self.stream_history[self.symbol]) < self.L:
53
+ # Not enough history for HBL.
54
+ log_print("Insufficient history for HBL: length {}, L {}", len(self.stream_history[self.symbol]), self.L)
55
+ super().placeOrder()
56
+ return
57
+
58
+ # There is enough history for HBL.
59
+
60
+ # Use the superclass (ZI) method to obtain an observation, update internal estimate
61
+ # parameters, decide to buy or sell, and calculate the total unit valuation, because
62
+ # all of this logic is identical to ZI.
63
+ v, buy = self.updateEstimates()
64
+
65
+ # Walk through the visible order history and accumulate values needed for HBL's
66
+ # estimation of successful transaction by limit price.
67
+ low_p = sys.maxsize
68
+ high_p = 0
69
+
70
+ # Find the lowest and highest observed prices in the order history.
71
+ for h in self.stream_history[self.symbol]:
72
+ for id, order in h.items():
73
+ p = order['limit_price']
74
+ if p < low_p: low_p = p
75
+ if p > high_p: high_p = p
76
+
77
+ # Set up the ndarray we will use for our computation.
78
+ # idx 0-7 are sa, sb, ua, ub, num, denom, Pr, Es
79
+ nd = np.zeros((high_p - low_p + 1, 8))
80
+
81
+ # Iterate through the history and compile our observations.
82
+ for h in self.stream_history[self.symbol]:
83
+ # h follows increasing "transactions into the past", with index zero being orders
84
+ # after the most recent transaction.
85
+ for id, order in h.items():
86
+ p = order['limit_price']
87
+ if p < low_p: low_p = p
88
+ if p > high_p: high_p = p
89
+
90
+ # For now if there are any transactions, consider the order successful. For single
91
+ # unit orders, this is sufficient. For multi-unit orders,
92
+ # we may wish to switch to a proportion of shares executed.
93
+ if order['is_buy_order']:
94
+ if order['transactions']:
95
+ nd[p - low_p, 1] += 1
96
+ else:
97
+ nd[p - low_p, 3] += 1
98
+ else:
99
+ if order['transactions']:
100
+ nd[p - low_p, 0] += 1
101
+ else:
102
+ nd[p - low_p, 2] += 1
103
+
104
+ # Compute the sums and cumulative sums required, from our observations,
105
+ # to drive the HBL's transaction probability estimates.
106
+ if buy:
107
+ nd[:, [0, 1, 2]] = np.cumsum(nd[:, [0, 1, 2]], axis=0)
108
+ nd[::-1, 3] = np.cumsum(nd[::-1, 3], axis=0)
109
+ nd[:, 4] = np.sum(nd[:, [0, 1, 2]], axis=1)
110
+ else:
111
+ nd[::-1, [0, 1, 3]] = np.cumsum(nd[::-1, [0, 1, 3]], axis=0)
112
+ nd[:, 2] = np.cumsum(nd[:, 2], axis=0)
113
+ nd[:, 4] = np.sum(nd[:, [0, 1, 3]], axis=1)
114
+
115
+ nd[:, 5] = np.sum(nd[:, 0:4], axis=1)
116
+
117
+ # Okay to ignore divide by zero errors here because we expect that in
118
+ # some cases (0/0 can happen) and we immediately convert the resulting
119
+ # nan to zero, which is the right answer for us.
120
+
121
+ # Compute probability estimates for successful transaction at all price levels.
122
+ with np.errstate(divide='ignore', invalid='ignore'):
123
+ nd[:, 6] = np.nan_to_num(np.divide(nd[:, 4], nd[:, 5]))
124
+
125
+ # Compute expected surplus for all price levels.
126
+ if buy:
127
+ nd[:, 7] = nd[:, 6] * (v - np.arange(low_p, high_p + 1))
128
+ else:
129
+ nd[:, 7] = nd[:, 6] * (np.arange(low_p, high_p + 1) - v)
130
+
131
+ # Extract the price and other data for the maximum expected surplus.
132
+ best_idx = np.argmax(nd[:, 7])
133
+ best_Es, best_Pr = nd[best_idx, [7, 6]]
134
+ best_p = low_p + best_idx
135
+
136
+ # If the best expected surplus is positive, go for it.
137
+ if best_Es > 0:
138
+ log_print("Numpy: {} selects limit price {} with expected surplus {} (Pr = {:0.4f})", self.name, best_p,
139
+ int(round(best_Es)), best_Pr)
140
+
141
+ # Place the constructed order.
142
+ self.placeLimitOrder(self.symbol, 100, buy, int(round(best_p)))
143
+ else:
144
+ # Do nothing if best limit price has negative expected surplus with below code.
145
+ log_print("Numpy: {} elects not to place an order (best expected surplus <= 0)", self.name)
146
+
147
+ # OTHER OPTION 1: Allow negative expected surplus with below code.
148
+ # log_print ("Numpy: {} placing undesirable order (best expected surplus <= 0)", self.name)
149
+ # self.placeLimitOrder(self.symbol, 1, buy, int(round(best_p)))
150
+
151
+ # OTHER OPTION 2: Force fallback to ZI logic on negative surplus with below code (including return).
152
+ # log_print ("Numpy: no desirable order for {}, acting as ZI", self.name)
153
+ # super().placeOrder()
154
+
155
+ def receiveMessage(self, currentTime, msg):
156
+
157
+ # We have been awakened by something other than our scheduled wakeup.
158
+ # If our internal state indicates we were waiting for a particular event,
159
+ # check if we can transition to a new state.
160
+
161
+ # Allow parent class to handle state + message combinations it understands.
162
+ super().receiveMessage(currentTime, msg)
163
+
164
+ # Do our special stuff.
165
+ if self.state == 'AWAITING_STREAM':
166
+ # We were waiting to receive the recent order stream.
167
+ if msg.body['msg'] == 'QUERY_ORDER_STREAM':
168
+ # This is what we were waiting for.
169
+
170
+ # But if the market is now closed, don't advance.
171
+ if self.mkt_closed: return
172
+
173
+ self.getCurrentSpread(self.symbol)
174
+ self.state = 'AWAITING_SPREAD'
ABIDES/agent/NoiseAgent.py ADDED
@@ -0,0 +1,161 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from agent.TradingAgent import TradingAgent
2
+ from util.util import log_print
3
+
4
+ from math import sqrt
5
+ import numpy as np
6
+ import pandas as pd
7
+
8
+
9
+ class NoiseAgent(TradingAgent):
10
+
11
+ def __init__(self, id, name, type, symbol='IBM', starting_cash=100000,
12
+ log_orders=False, log_to_file=True, random_state=None, wakeup_time = None ):
13
+
14
+ # Base class init.
15
+ super().__init__(id, name, type, starting_cash=starting_cash, log_orders=log_orders,
16
+ log_to_file=log_to_file, random_state=random_state)
17
+
18
+ self.wakeup_time = wakeup_time,
19
+
20
+ self.symbol = symbol # symbol to trade
21
+
22
+ # The agent uses this to track whether it has begun its strategy or is still
23
+ # handling pre-market tasks.
24
+ self.trading = False
25
+
26
+ # The agent begins in its "complete" state, not waiting for
27
+ # any special event or condition.
28
+ self.state = 'AWAITING_WAKEUP'
29
+
30
+ # The agent must track its previous wake time, so it knows how many time
31
+ # units have passed.
32
+ self.prev_wake_time = None
33
+
34
+ self.size = np.random.randint(20, 50)
35
+
36
+ def kernelStarting(self, startTime):
37
+ # self.kernel is set in Agent.kernelInitializing()
38
+ # self.exchangeID is set in TradingAgent.kernelStarting()
39
+
40
+ super().kernelStarting(startTime)
41
+
42
+ self.oracle = self.kernel.oracle
43
+
44
+ def kernelStopping(self):
45
+ # Always call parent method to be safe.
46
+ super().kernelStopping()
47
+ return
48
+ # Print end of day valuation.
49
+ H = int(round(self.getHoldings(self.symbol), -2) / 100)
50
+
51
+ #noise trader surplus is marked to EOD
52
+ bid, bid_vol, ask, ask_vol = self.getKnownBidAsk(self.symbol)
53
+
54
+ if bid and ask:
55
+ rT = int(bid + ask)/2
56
+ else:
57
+ rT = self.last_trade[self.symbol]
58
+
59
+ # final (real) fundamental value times shares held.
60
+ surplus = rT * H
61
+
62
+ log_print("surplus after holdings: {}", surplus)
63
+
64
+ # Add ending cash value and subtract starting cash value.
65
+ surplus += self.holdings['CASH'] - self.starting_cash
66
+ surplus = float(surplus) / self.starting_cash
67
+
68
+ self.logEvent('FINAL_VALUATION', surplus, True)
69
+
70
+ log_print(
71
+ "{} final report. Holdings {}, end cash {}, start cash {}, final fundamental {}, surplus {}",
72
+ self.name, H, self.holdings['CASH'], self.starting_cash, rT, surplus)
73
+
74
+ print("Final relative surplus", self.name, surplus)
75
+
76
+ def wakeup(self, currentTime):
77
+ # Parent class handles discovery of exchange times and market_open wakeup call.
78
+ super().wakeup(currentTime)
79
+
80
+ self.state = 'INACTIVE'
81
+
82
+ if not self.mkt_open or not self.mkt_close:
83
+ # TradingAgent handles discovery of exchange times.
84
+ return
85
+ else:
86
+ if not self.trading:
87
+ self.trading = True
88
+
89
+ # Time to start trading!
90
+ log_print("{} is ready to start trading now.", self.name)
91
+
92
+ # Steady state wakeup behavior starts here.
93
+
94
+ # If we've been told the market has closed for the day, we will only request
95
+ # final price information, then stop.
96
+ if self.mkt_closed and (self.symbol in self.daily_close_price):
97
+ # Market is closed and we already got the daily close price.
98
+ return
99
+
100
+ if self.wakeup_time[0] >currentTime:
101
+ self.setWakeup(self.wakeup_time[0])
102
+
103
+ if self.mkt_closed and (not self.symbol in self.daily_close_price):
104
+ self.getCurrentSpread(self.symbol)
105
+ self.state = 'AWAITING_SPREAD'
106
+ return
107
+
108
+ if type(self) == NoiseAgent:
109
+ self.getCurrentSpread(self.symbol)
110
+ self.state = 'AWAITING_SPREAD'
111
+ else:
112
+ self.state = 'ACTIVE'
113
+
114
+ def placeOrder(self):
115
+ #place order in random direction at a mid
116
+ buy_indicator = np.random.randint(0, 1 + 1)
117
+ bid, bid_vol, ask, ask_vol = self.getKnownBidAsk(self.symbol)
118
+
119
+ if buy_indicator and ask:
120
+ self.placeLimitOrder(self.symbol, self.size, buy_indicator, ask)
121
+ elif not buy_indicator and bid:
122
+ self.placeLimitOrder(self.symbol, self.size, buy_indicator, bid)
123
+
124
+ def receiveMessage(self, currentTime, msg):
125
+ # Parent class schedules market open wakeup call once market open/close times are known.
126
+ super().receiveMessage(currentTime, msg)
127
+
128
+ # We have been awakened by something other than our scheduled wakeup.
129
+ # If our internal state indicates we were waiting for a particular event,
130
+ # check if we can transition to a new state.
131
+
132
+ if self.state == 'AWAITING_SPREAD':
133
+ # We were waiting to receive the current spread/book. Since we don't currently
134
+ # track timestamps on retained information, we rely on actually seeing a
135
+ # QUERY_SPREAD response message.
136
+
137
+ if msg.body['msg'] == 'QUERY_SPREAD':
138
+ # This is what we were waiting for.
139
+
140
+ # But if the market is now closed, don't advance to placing orders.
141
+ if self.mkt_closed: return
142
+
143
+ # We now have the information needed to place a limit order with the eta
144
+ # strategic threshold parameter.
145
+ self.placeOrder()
146
+ self.state = 'AWAITING_WAKEUP'
147
+
148
+ # Internal state and logic specific to this agent subclass.
149
+
150
+ # Cancel all open orders.
151
+ # Return value: did we issue any cancellation requests?
152
+ def cancelOrders(self):
153
+ if not self.orders: return False
154
+
155
+ for id, order in self.orders.items():
156
+ self.cancelOrder(order)
157
+
158
+ return True
159
+
160
+ def getWakeFrequency(self):
161
+ return pd.Timedelta(self.random_state.randint(low=0, high=100), unit='ns')
ABIDES/agent/OrderBookImbalanceAgent.py ADDED
@@ -0,0 +1,193 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from agent.TradingAgent import TradingAgent
2
+ import pandas as pd
3
+ from util.util import log_print
4
+
5
+ import matplotlib
6
+ matplotlib.use('TkAgg')
7
+ import matplotlib.pyplot as plt
8
+
9
+
10
+ class OrderBookImbalanceAgent(TradingAgent):
11
+
12
+ # The OrderBookImbalanceAgent is a simple example of an agent that predicts the short term
13
+ # price movement of a security by the preponderance of directionality in the limit order
14
+ # book. Specifically, it predicts the price will fall if the ratio of bid volume to total volume
15
+ # in the first N levels of the book is greater than a configurable threshold, and will rise if
16
+ # that ratio is below a symmetric value. There is a trailing stop on the bid_pct to exit.
17
+ #
18
+ # Note that this means the current iteration of the OBI agent is treating order book imbalance
19
+ # as an oscillating indicator rather than a momentum indicator. (Imbalance to the buy side
20
+ # indicates "overbought" rather than an upward trend.)
21
+ #
22
+ # Parameters unique to this agent:
23
+ # Levels: how much order book depth should be considered when evaluating imbalance?
24
+ # Entry Threshold: how much imbalance is required before the agent takes a non-flat position?
25
+ # For example, entry_threshold=0.1 causes long entry at 0.6 or short entry at 0.4.
26
+ # Trail Dist: how far behind the peak bid_pct should the trailing stop follow?
27
+
28
+ def __init__(self, id, name, type, symbol=None, levels=10, entry_threshold=0.17, trail_dist=0.085, freq=3600000000000, starting_cash=1000000, log_orders=True, random_state=None):
29
+ super().__init__(id, name, type, starting_cash=starting_cash, log_orders=log_orders, random_state=random_state)
30
+ self.symbol = symbol
31
+ self.levels = levels
32
+ self.entry_threshold = entry_threshold
33
+ self.trail_dist = trail_dist
34
+ self.freq = freq
35
+ self.last_market_data_update = None
36
+ self.is_long = False
37
+ self.is_short = False
38
+
39
+ self.trailing_stop = None
40
+ self.plotme = []
41
+
42
+
43
+ def kernelStarting(self, startTime):
44
+ super().kernelStarting(startTime)
45
+
46
+ def wakeup(self, currentTime):
47
+ super().wakeup(currentTime)
48
+ super().requestDataSubscription(self.symbol, levels=self.levels, freq=self.freq)
49
+ self.setComputationDelay(1)
50
+
51
+ def receiveMessage(self, currentTime, msg):
52
+ super().receiveMessage(currentTime, msg)
53
+ if msg.body['msg'] == 'MARKET_DATA':
54
+ self.cancelOrders()
55
+
56
+ self.last_market_data_update = currentTime
57
+ bids, asks = msg.body['bids'], msg.body['asks']
58
+
59
+ bid_liq = sum(x[1] for x in bids)
60
+ ask_liq = sum(x[1] for x in asks)
61
+
62
+ log_print("bid, ask levels: {}", len(bids), len(asks))
63
+ log_print("bids: {}, asks: {}", bids, asks)
64
+
65
+ # OBI strategy.
66
+ target = 0
67
+
68
+ if bid_liq == 0 or ask_liq == 0:
69
+ log_print("OBI agent inactive: zero bid or ask liquidity")
70
+ return
71
+ else:
72
+ # bid_pct encapsulates both sides of the question, as a normalized expression
73
+ # representing what fraction of total visible volume is on the buy side.
74
+ bid_pct = bid_liq / (bid_liq + ask_liq)
75
+
76
+ # If we are short, we need to decide if we should hold or exit.
77
+ if self.is_short:
78
+ # Update trailing stop.
79
+ if bid_pct - self.trail_dist > self.trailing_stop:
80
+ log_print("Trailing stop updated: new > old ({:2f} > {:2f})", bid_pct - self.trail_dist, self.trailing_stop)
81
+ self.trailing_stop = bid_pct - self.trail_dist
82
+ else:
83
+ log_print("Trailing stop remains: potential < old ({:2f} < {:2f})", bid_pct - self.trail_dist, self.trailing_stop)
84
+
85
+ # Check the trailing stop.
86
+ if bid_pct < self.trailing_stop:
87
+ log_print("OBI agent exiting short position: bid_pct < trailing_stop ({:2f} < {:2f})", bid_pct, self.trailing_stop)
88
+ target = 0
89
+ self.is_short = False
90
+ self.trailing_stop = None
91
+ else:
92
+ log_print("OBI agent holding short position: bid_pct > trailing_stop ({:2f} > {:2f})", bid_pct, self.trailing_stop)
93
+ target = -100
94
+ # If we are long, we need to decide if we should hold or exit.
95
+ elif self.is_long:
96
+ if bid_pct + self.trail_dist < self.trailing_stop:
97
+ log_print("Trailing stop updated: new < old ({:2f} < {:2f})", bid_pct + self.trail_dist, self.trailing_stop)
98
+ self.trailing_stop = bid_pct + self.trail_dist
99
+ else:
100
+ log_print("Trailing stop remains: potential > old ({:2f} > {:2f})", bid_pct + self.trail_dist, self.trailing_stop)
101
+
102
+ # Check the trailing stop.
103
+ if bid_pct > self.trailing_stop:
104
+ log_print("OBI agent exiting long position: bid_pct > trailing_stop ({:2f} > {:2f})", bid_pct, self.trailing_stop)
105
+ target = 0
106
+ self.is_long = False
107
+ self.trailing_stop = None
108
+ else:
109
+ log_print("OBI agent holding long position: bid_pct < trailing_stop ({:2f} < {:2f})", bid_pct, self.trailing_stop)
110
+ target = 100
111
+ # If we are flat, we need to decide if we should enter (long or short).
112
+ else:
113
+ if bid_pct > (0.5 + self.entry_threshold):
114
+ log_print("OBI agent entering long position: bid_pct < entry_threshold ({:2f} < {:2f})", bid_pct, 0.5 - self.entry_threshold)
115
+ target = 100
116
+ self.is_long = True
117
+ self.trailing_stop = bid_pct + self.trail_dist
118
+ log_print("Initial trailing stop: {:2f}", self.trailing_stop)
119
+ elif bid_pct < (0.5 - self.entry_threshold):
120
+ log_print("OBI agent entering short position: bid_pct > entry_threshold ({:2f} > {:2f})", bid_pct, 0.5 + self.entry_threshold)
121
+ target = -100
122
+ self.is_short = True
123
+ self.trailing_stop = bid_pct - self.trail_dist
124
+ log_print("Initial trailing stop: {:2f}", self.trailing_stop)
125
+ else:
126
+ log_print("OBI agent staying flat: long_entry < bid_pct < short_entry ({:2f} < {:2f} < {:2f})", 0.5 - self.entry_threshold, bid_pct, 0.5 + self.entry_threshold)
127
+ target = 0
128
+
129
+
130
+ self.plotme.append( { 'currentTime' : self.currentTime, 'midpoint' : (asks[0][0] + bids[0][0]) / 2, 'bid_pct' : bid_pct } )
131
+
132
+
133
+ # Adjust holdings to target.
134
+ holdings = self.holdings[self.symbol] if self.symbol in self.holdings else 0
135
+ delta = target - holdings
136
+ direction = True if delta > 0 else False
137
+ price = self.computeRequiredPrice(direction, abs(delta), bids, asks)
138
+
139
+ log_print("Current holdings: {}", self.holdings)
140
+
141
+ if delta == 0:
142
+ log_print("No adjustments to holdings needed.")
143
+ else:
144
+ log_print("Adjusting holdings by {}", delta)
145
+ self.placeLimitOrder(self.symbol, abs(delta), direction, price)
146
+
147
+
148
+ def getWakeFrequency(self):
149
+ return pd.Timedelta('1s')
150
+
151
+
152
+ # Computes required limit price to immediately execute a trade for the specified quantity
153
+ # of shares.
154
+ def computeRequiredPrice (self, direction, shares, known_bids, known_asks):
155
+ book = known_asks if direction else known_bids
156
+
157
+ # Start at the inside and add up the shares.
158
+ t = 0
159
+
160
+ for i in range(len(book)):
161
+ p, v = book[i]
162
+ t += v
163
+
164
+ # If we have accumulated enough shares, return this price.
165
+ if t >= shares:
166
+ return p
167
+
168
+ # Not enough shares. Just return worst price (highest ask, lowest bid).
169
+ return book[-1][0]
170
+
171
+
172
+ # Cancel all open orders.
173
+ # Return value: did we issue any cancellation requests?
174
+ def cancelOrders(self):
175
+ if not self.orders: return False
176
+
177
+ for id, order in self.orders.items():
178
+ self.cancelOrder(order)
179
+
180
+ return True
181
+
182
+
183
+ # Lifecycle.
184
+ def kernelTerminating(self):
185
+ # Plotting code is probably not needed here long term, but helps during development.
186
+
187
+ #df = pd.DataFrame(self.plotme)
188
+ #df.set_index('currentTime', inplace=True)
189
+ #df.rolling(30).mean().plot(secondary_y=['bid_pct'], figsize=(12,9))
190
+ ##plt.show()
191
+ super().kernelTerminating()
192
+
193
+
ABIDES/agent/TradingAgent.py ADDED
@@ -0,0 +1,643 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from agent.FinancialAgent import FinancialAgent
2
+ from agent.ExchangeAgent import ExchangeAgent
3
+ from message.Message import Message
4
+ from util.order.LimitOrder import LimitOrder
5
+ from util.order.MarketOrder import MarketOrder
6
+ from util.util import log_print
7
+
8
+ from copy import deepcopy
9
+ import sys
10
+
11
+ # The TradingAgent class (via FinancialAgent, via Agent) is intended as the
12
+ # base class for all trading agents (i.e. not things like exchanges) in a
13
+ # market simulation. It handles a lot of messaging (inbound and outbound)
14
+ # and state maintenance automatically, so subclasses can focus just on
15
+ # implementing a strategy without too much bookkeeping.
16
+ class TradingAgent(FinancialAgent):
17
+
18
+ def __init__(self, id, name, type, random_state=None, starting_cash=100000, log_orders=False, log_to_file=True):
19
+ # Base class init.
20
+ super().__init__(id, name, type, random_state, log_to_file)
21
+
22
+ # We don't yet know when the exchange opens or closes.
23
+ self.mkt_open = None
24
+ self.mkt_close = None
25
+
26
+ # Log order activity?
27
+ self.log_orders = log_orders
28
+
29
+ # Log all activity to file?
30
+ if log_orders is None:
31
+ self.log_orders = False
32
+ self.log_to_file = False
33
+
34
+ # Store starting_cash in case we want to refer to it for performance stats.
35
+ # It should NOT be modified. Use the 'CASH' key in self.holdings.
36
+ # 'CASH' is always in cents! Note that agents are limited by their starting
37
+ # cash, currently without leverage. Taking short positions is permitted,
38
+ # but does NOT increase the amount of at-risk capital allowed.
39
+ self.starting_cash = starting_cash
40
+
41
+ # TradingAgent has constants to support simulated market orders.
42
+ self.MKT_BUY = sys.maxsize
43
+ self.MKT_SELL = 0
44
+
45
+ # The base TradingAgent will track its holdings and outstanding orders.
46
+ # Holdings is a dictionary of symbol -> shares. CASH is a special symbol
47
+ # worth one cent per share. Orders is a dictionary of active, open orders
48
+ # (not cancelled, not fully executed) keyed by order_id.
49
+ self.holdings = { 'CASH' : starting_cash }
50
+ self.orders = {}
51
+
52
+ # The base TradingAgent also tracks last known prices for every symbol
53
+ # for which it has received as QUERY_LAST_TRADE message. Subclass
54
+ # agents may use or ignore this as they wish. Note that the subclass
55
+ # agent must request pricing when it wants it. This agent does NOT
56
+ # automatically generate such requests, though it has a helper function
57
+ # that can be used to make it happen.
58
+ self.last_trade = {}
59
+
60
+ # used in subscription mode to record the timestamp for which the data was current in the ExchangeAgent
61
+ self.exchange_ts = {}
62
+
63
+ # When a last trade price comes in after market close, the trading agent
64
+ # automatically records it as the daily close price for a symbol.
65
+ self.daily_close_price = {}
66
+
67
+ self.nav_diff = 0
68
+ self.basket_size = 0
69
+
70
+ # The agent remembers the last known bids and asks (with variable depth,
71
+ # showing only aggregate volume at each price level) when it receives
72
+ # a response to QUERY_SPREAD.
73
+ self.known_bids = {}
74
+ self.known_asks = {}
75
+
76
+ # The agent remembers the order history communicated by the exchange
77
+ # when such is requested by an agent (for example, a heuristic belief
78
+ # learning agent).
79
+ self.stream_history = {}
80
+
81
+ # The agent records the total transacted volume in the exchange for a given symbol and lookback period
82
+ self.transacted_volume = {}
83
+
84
+ # Each agent can choose to log the orders executed
85
+ self.executed_orders = []
86
+
87
+ # For special logging at the first moment the simulator kernel begins
88
+ # running (which is well after agent init), it is useful to keep a simple
89
+ # boolean flag.
90
+ self.first_wake = True
91
+
92
+ # Remember whether we have already passed the exchange close time, as far
93
+ # as we know.
94
+ self.mkt_closed = False
95
+
96
+ # This is probably a transient feature, but for now we permit the exchange
97
+ # to return the entire order book sometimes, for development and debugging.
98
+ # It is very expensive to pass this around, and breaks "simulation physics",
99
+ # but can really help understand why agents are making certain decisions.
100
+ # Subclasses should NOT rely on this feature as part of their strategy,
101
+ # as it will go away.
102
+ self.book = ''
103
+
104
+
105
+ # Simulation lifecycle messages.
106
+
107
+ def kernelStarting(self, startTime):
108
+ # self.kernel is set in Agent.kernelInitializing()
109
+ self.logEvent('STARTING_CASH', self.starting_cash, True)
110
+
111
+ # Find an exchange with which we can place orders. It is guaranteed
112
+ # to exist by now (if there is one).
113
+ self.exchangeID = self.kernel.findAgentByType(ExchangeAgent)
114
+
115
+ log_print ("Agent {} requested agent of type Agent.ExchangeAgent. Given Agent ID: {}",
116
+ self.id, self.exchangeID)
117
+
118
+ # Request a wake-up call as in the base Agent.
119
+ super().kernelStarting(startTime)
120
+
121
+
122
+ def kernelStopping (self):
123
+ # Always call parent method to be safe.
124
+ super().kernelStopping()
125
+
126
+ # Print end of day holdings.
127
+ self.logEvent('FINAL_HOLDINGS', self.fmtHoldings(self.holdings))
128
+ self.logEvent('FINAL_CASH_POSITION', self.holdings['CASH'], True)
129
+
130
+ # Mark to market.
131
+ cash = self.markToMarket(self.holdings)
132
+
133
+ self.logEvent('ENDING_CASH', cash, True)
134
+ print ("Final holdings for {}: {}. Marked to market: {}".format(self.name, self.fmtHoldings(self.holdings),
135
+ cash))
136
+
137
+ # Record final results for presentation/debugging. This is an ugly way
138
+ # to do this, but it is useful for now.
139
+ mytype = self.type
140
+ gain = cash - self.starting_cash
141
+
142
+ if mytype in self.kernel.meanResultByAgentType:
143
+ self.kernel.meanResultByAgentType[mytype] += gain
144
+ self.kernel.agentCountByType[mytype] += 1
145
+ else:
146
+ self.kernel.meanResultByAgentType[mytype] = gain
147
+ self.kernel.agentCountByType[mytype] = 1
148
+
149
+
150
+ # Simulation participation messages.
151
+
152
+ def wakeup (self, currentTime):
153
+ super().wakeup(currentTime)
154
+
155
+ if self.first_wake:
156
+ # Log initial holdings.
157
+ self.logEvent('HOLDINGS_UPDATED', self.holdings)
158
+ self.first_wake = False
159
+
160
+ if self.mkt_open is None:
161
+ # Ask our exchange when it opens and closes.
162
+ self.sendMessage(self.exchangeID, Message({ "msg" : "WHEN_MKT_OPEN", "sender": self.id }))
163
+ self.sendMessage(self.exchangeID, Message({ "msg" : "WHEN_MKT_CLOSE", "sender": self.id }))
164
+
165
+ # For the sake of subclasses, TradingAgent now returns a boolean
166
+ # indicating whether the agent is "ready to trade" -- has it received
167
+ # the market open and closed times, and is the market not already closed.
168
+ return (self.mkt_open and self.mkt_close) and not self.mkt_closed
169
+
170
+ def requestDataSubscription(self, symbol, levels, freq):
171
+ self.sendMessage(recipientID = self.exchangeID,
172
+ msg = Message({"msg": "MARKET_DATA_SUBSCRIPTION_REQUEST",
173
+ "sender": self.id, "symbol": symbol, "levels": levels, "freq": freq}))
174
+
175
+ # Used by any Trading Agent subclass to cancel subscription to market data from the Exchange Agent
176
+ def cancelDataSubscription(self, symbol):
177
+ self.sendMessage(recipientID=self.exchangeID,
178
+ msg=Message({"msg": "MARKET_DATA_SUBSCRIPTION_CANCELLATION",
179
+ "sender": self.id, "symbol": symbol}))
180
+
181
+
182
+ def receiveMessage (self, currentTime, msg):
183
+ super().receiveMessage(currentTime, msg)
184
+
185
+ # Do we know the market hours?
186
+ had_mkt_hours = self.mkt_open is not None and self.mkt_close is not None
187
+
188
+ # Record market open or close times.
189
+ if msg.body['msg'] == "WHEN_MKT_OPEN":
190
+ self.mkt_open = msg.body['data']
191
+
192
+ log_print ("Recorded market open: {}", self.kernel.fmtTime(self.mkt_open))
193
+
194
+ elif msg.body['msg'] == "WHEN_MKT_CLOSE":
195
+ self.mkt_close = msg.body['data']
196
+
197
+ log_print ("Recorded market close: {}", self.kernel.fmtTime(self.mkt_close))
198
+
199
+ elif msg.body['msg'] == "ORDER_EXECUTED":
200
+ # Call the orderExecuted method, which subclasses should extend. This parent
201
+ # class could implement default "portfolio tracking" or "returns tracking"
202
+ # behavior.
203
+ order = msg.body['order']
204
+
205
+ self.orderExecuted(order)
206
+
207
+ elif msg.body['msg'] == "ORDER_ACCEPTED":
208
+ # Call the orderAccepted method, which subclasses should extend.
209
+ order = msg.body['order']
210
+
211
+ self.orderAccepted(order)
212
+
213
+ elif msg.body['msg'] == "ORDER_CANCELLED":
214
+ # Call the orderCancelled method, which subclasses should extend.
215
+ order = msg.body['order']
216
+
217
+ self.orderCancelled(order)
218
+
219
+ elif msg.body['msg'] == "MKT_CLOSED":
220
+ # We've tried to ask the exchange for something after it closed. Remember this
221
+ # so we stop asking for things that can't happen.
222
+
223
+ self.marketClosed()
224
+
225
+ elif msg.body['msg'] == 'QUERY_LAST_TRADE':
226
+ # Call the queryLastTrade method, which subclasses may extend.
227
+ # Also note if the market is closed.
228
+ if msg.body['mkt_closed']: self.mkt_closed = True
229
+
230
+ self.queryLastTrade(msg.body['symbol'], msg.body['data'])
231
+
232
+ elif msg.body['msg'] == 'QUERY_SPREAD':
233
+ # Call the querySpread method, which subclasses may extend.
234
+ # Also note if the market is closed.
235
+ if msg.body['mkt_closed']: self.mkt_closed = True
236
+
237
+ self.querySpread(msg.body['symbol'], msg.body['data'], msg.body['bids'], msg.body['asks'], msg.body['book'])
238
+
239
+ elif msg.body['msg'] == 'QUERY_ORDER_STREAM':
240
+ # Call the queryOrderStream method, which subclasses may extend.
241
+ # Also note if the market is closed.
242
+ if msg.body['mkt_closed']: self.mkt_closed = True
243
+
244
+ self.queryOrderStream(msg.body['symbol'], msg.body['orders'])
245
+
246
+ elif msg.body['msg'] == 'QUERY_TRANSACTED_VOLUME':
247
+ if msg.body['mkt_closed']: self.mkt_closed = True
248
+ self.query_transacted_volume(msg.body['symbol'], msg.body['transacted_volume'])
249
+
250
+ elif msg.body['msg'] == 'MARKET_DATA':
251
+ self.handleMarketData(msg)
252
+
253
+ # Now do we know the market hours?
254
+ have_mkt_hours = self.mkt_open is not None and self.mkt_close is not None
255
+
256
+ # Once we know the market open and close times, schedule a wakeup call for market open.
257
+ # Only do this once, when we first have both items.
258
+ if have_mkt_hours and not had_mkt_hours:
259
+ # Agents are asked to generate a wake offset from the market open time. We structure
260
+ # this as a subclass request so each agent can supply an appropriate offset relative
261
+ # to its trading frequency.
262
+ ns_offset = self.getWakeFrequency()
263
+
264
+ self.setWakeup(currentTime + ns_offset)
265
+
266
+
267
+ # Used by any Trading Agent subclass to query the last trade price for a symbol.
268
+ # This activity is not logged.
269
+ def getLastTrade (self, symbol):
270
+ self.sendMessage(self.exchangeID, Message({ "msg" : "QUERY_LAST_TRADE", "sender": self.id,
271
+ "symbol" : symbol }))
272
+
273
+
274
+ # Used by any Trading Agent subclass to query the current spread for a symbol.
275
+ # This activity is not logged.
276
+ def getCurrentSpread (self, symbol, depth=1):
277
+ self.sendMessage(self.exchangeID, Message({ "msg" : "QUERY_SPREAD", "sender": self.id,
278
+ "symbol" : symbol, "depth" : depth }))
279
+
280
+
281
+ # Used by any Trading Agent subclass to query the recent order stream for a symbol.
282
+ def getOrderStream (self, symbol, length=1):
283
+ self.sendMessage(self.exchangeID, Message({ "msg" : "QUERY_ORDER_STREAM", "sender": self.id,
284
+ "symbol" : symbol, "length" : length }))
285
+
286
+ def get_transacted_volume(self, symbol, lookback_period='10min'):
287
+ """ Used by any trading agent subclass to query the total transacted volume in a given lookback period """
288
+ self.sendMessage(self.exchangeID, Message({ "msg": "QUERY_TRANSACTED_VOLUME", "sender": self.id,
289
+ "symbol": symbol, "lookback_period": lookback_period}))
290
+
291
+ # Used by any Trading Agent subclass to place a limit order. Parameters expect:
292
+ # string (valid symbol), int (positive share quantity), bool (True == BUY), int (price in cents).
293
+ # The call may optionally specify an order_id (otherwise global autoincrement is used) and
294
+ # whether cash or risk limits should be enforced or ignored for the order.
295
+ def placeLimitOrder (self, symbol, quantity, is_buy_order, limit_price, order_id=None, ignore_risk = True, tag = None):
296
+ order = LimitOrder(self.id, self.currentTime, symbol, quantity, is_buy_order, limit_price, order_id, tag)
297
+
298
+ if quantity > 0:
299
+ # Test if this order can be permitted given our at-risk limits.
300
+ new_holdings = self.holdings.copy()
301
+
302
+ q = order.quantity if order.is_buy_order else -order.quantity
303
+
304
+ if order.symbol in new_holdings: new_holdings[order.symbol] += q
305
+ else: new_holdings[order.symbol] = q
306
+
307
+ # If at_risk is lower, always allow. Otherwise, new_at_risk must be below starting cash.
308
+ if not ignore_risk:
309
+ # Compute before and after at-risk capital.
310
+ at_risk = self.markToMarket(self.holdings) - self.holdings['CASH']
311
+ new_at_risk = self.markToMarket(new_holdings) - new_holdings['CASH']
312
+
313
+ if (new_at_risk > at_risk) and (new_at_risk > self.starting_cash):
314
+ log_print ("TradingAgent ignored limit order due to at-risk constraints: {}\n{}", order, self.fmtHoldings(self.holdings))
315
+ return
316
+
317
+ # Copy the intended order for logging, so any changes made to it elsewhere
318
+ # don't retroactively alter our "as placed" log of the order. Eventually
319
+ # it might be nice to make the whole history of the order into transaction
320
+ # objects inside the order (we're halfway there) so there CAN be just a single
321
+ # object per order, that never alters its original state, and eliminate all these copies.
322
+ self.orders[order.order_id] = deepcopy(order)
323
+ self.sendMessage(self.exchangeID, Message({ "msg" : "LIMIT_ORDER", "sender": self.id,
324
+ "order" : order }))
325
+
326
+ # Log this activity.
327
+ if self.log_orders: self.logEvent('ORDER_SUBMITTED', order.to_dict())
328
+
329
+ else:
330
+ log_print ("TradingAgent ignored limit order of quantity zero: {}", order)
331
+
332
+ def placeMarketOrder(self, symbol, quantity, is_buy_order, order_id=None, ignore_risk = True, tag=None):
333
+ """
334
+ Used by any Trading Agent subclass to place a market order. The market order is created as multiple limit orders
335
+ crossing the spread walking the book until all the quantities are matched.
336
+ :param symbol (str): name of the stock traded
337
+ :param quantity (int): order quantity
338
+ :param is_buy_order (bool): True if Buy else False
339
+ :param order_id: Order ID for market replay
340
+ :param ignore_risk (bool): Determines whether cash or risk limits should be enforced or ignored for the order
341
+ :return:
342
+ """
343
+ order = MarketOrder(self.id, self.currentTime, symbol, quantity, is_buy_order, order_id)
344
+ if quantity > 0:
345
+ # compute new holdings
346
+ new_holdings = self.holdings.copy()
347
+ q = order.quantity if order.is_buy_order else -order.quantity
348
+ if order.symbol in new_holdings: new_holdings[order.symbol] += q
349
+ else: new_holdings[order.symbol] = q
350
+
351
+ if not ignore_risk:
352
+ # Compute before and after at-risk capital.
353
+ at_risk = self.markToMarket(self.holdings) - self.holdings['CASH']
354
+ new_at_risk = self.markToMarket(new_holdings) - new_holdings['CASH']
355
+
356
+ if (new_at_risk > at_risk) and (new_at_risk > self.starting_cash):
357
+ log_print("TradingAgent ignored market order due to at-risk constraints: {}\n{}",
358
+ order, self.fmtHoldings(self.holdings))
359
+ return
360
+ self.orders[order.order_id] = deepcopy(order)
361
+ self.sendMessage(self.exchangeID, Message({"msg" : "MARKET_ORDER", "sender": self.id, "order": order}))
362
+ if self.log_orders: self.logEvent('ORDER_SUBMITTED', order.to_dict())
363
+ else:
364
+ log_print("TradingAgent ignored market order of quantity zero: {}", order)
365
+
366
+ def cancelOrder(self, order):
367
+ """Used by any Trading Agent subclass to cancel any order. The order must currently
368
+ appear in the agent's open orders list."""
369
+ if isinstance(order, LimitOrder):
370
+ self.sendMessage(self.exchangeID, Message({"msg": "CANCEL_ORDER", "sender": self.id,
371
+ "order": order}))
372
+ # Log this activity.
373
+ if self.log_orders: self.logEvent('CANCEL_SUBMITTED', order.to_dict())
374
+ else:
375
+ log_print("order {} of type, {} cannot be cancelled", order, type(order))
376
+
377
+ def modifyOrder (self, order, newOrder):
378
+ """ Used by any Trading Agent subclass to modify any existing limit order. The order must currently
379
+ appear in the agent's open orders list. Some additional tests might be useful here
380
+ to ensure the old and new orders are the same in some way."""
381
+ self.sendMessage(self.exchangeID, Message({ "msg" : "MODIFY_ORDER", "sender": self.id,
382
+ "order" : order, "new_order" : newOrder}))
383
+
384
+ # Log this activity.
385
+ if self.log_orders: self.logEvent('MODIFY_ORDER', order.to_dict())
386
+
387
+
388
+ # Handles ORDER_EXECUTED messages from an exchange agent. Subclasses may wish to extend,
389
+ # but should still call parent method for basic portfolio/returns tracking.
390
+ def orderExecuted (self, order):
391
+ log_print ("Received notification of execution for: {}", order)
392
+
393
+ # Log this activity.
394
+ if self.log_orders: self.logEvent('ORDER_EXECUTED', order.to_dict())
395
+
396
+ # At the very least, we must update CASH and holdings at execution time.
397
+ qty = order.quantity if order.is_buy_order else -1 * order.quantity
398
+ sym = order.symbol
399
+
400
+ if sym in self.holdings:
401
+ self.holdings[sym] += qty
402
+ else:
403
+ self.holdings[sym] = qty
404
+
405
+ if self.holdings[sym] == 0: del self.holdings[sym]
406
+
407
+ # As with everything else, CASH holdings are in CENTS.
408
+ self.holdings['CASH'] -= (qty * order.fill_price)
409
+
410
+ # If this original order is now fully executed, remove it from the open orders list.
411
+ # Otherwise, decrement by the quantity filled just now. It is _possible_ that due
412
+ # to timing issues, it might not be in the order list (i.e. we issued a cancellation
413
+ # but it was executed first, or something).
414
+ if order.order_id in self.orders:
415
+ o = self.orders[order.order_id]
416
+
417
+ if order.quantity >= o.quantity: del self.orders[order.order_id]
418
+ else: o.quantity -= order.quantity
419
+
420
+ else:
421
+ log_print ("Execution received for order not in orders list: {}", order)
422
+
423
+ log_print ("After execution, agent open orders: {}", self.orders)
424
+
425
+ # After execution, log holdings.
426
+ self.logEvent('HOLDINGS_UPDATED', self.holdings)
427
+
428
+
429
+ # Handles ORDER_ACCEPTED messages from an exchange agent. Subclasses may wish to extend.
430
+ def orderAccepted (self, order):
431
+ log_print ("Received notification of acceptance for: {}", order)
432
+
433
+ # Log this activity.
434
+ if self.log_orders: self.logEvent('ORDER_ACCEPTED', order.to_dict())
435
+
436
+
437
+ # We may later wish to add a status to the open orders so an agent can tell whether
438
+ # a given order has been accepted or not (instead of needing to override this method).
439
+
440
+
441
+ # Handles ORDER_CANCELLED messages from an exchange agent. Subclasses may wish to extend.
442
+ def orderCancelled (self, order):
443
+ log_print ("Received notification of cancellation for: {}", order)
444
+
445
+ # Log this activity.
446
+ if self.log_orders: self.logEvent('ORDER_CANCELLED', order.to_dict())
447
+
448
+ # Remove the cancelled order from the open orders list. We may of course wish to have
449
+ # additional logic here later, so agents can easily "look for" cancelled orders. Of
450
+ # course they can just override this method.
451
+ if order.order_id in self.orders:
452
+ del self.orders[order.order_id]
453
+ else:
454
+ log_print ("Cancellation received for order not in orders list: {}", order)
455
+
456
+
457
+ # Handles MKT_CLOSED messages from an exchange agent. Subclasses may wish to extend.
458
+ def marketClosed (self):
459
+ log_print ("Received notification of market closure.")
460
+
461
+ # Log this activity.
462
+ self.logEvent('MKT_CLOSED')
463
+
464
+ # Remember that this has happened.
465
+ self.mkt_closed = True
466
+
467
+
468
+ # Handles QUERY_LAST_TRADE messages from an exchange agent.
469
+ def queryLastTrade (self, symbol, price):
470
+ self.last_trade[symbol] = price
471
+
472
+ log_print ("Received last trade price of {} for {}.", self.last_trade[symbol], symbol)
473
+
474
+ if self.mkt_closed:
475
+ # Note this as the final price of the day.
476
+ self.daily_close_price[symbol] = self.last_trade[symbol]
477
+
478
+ log_print ("Received daily close price of {} for {}.", self.last_trade[symbol], symbol)
479
+
480
+
481
+ # Handles QUERY_SPREAD messages from an exchange agent.
482
+ def querySpread (self, symbol, price, bids, asks, book):
483
+ # The spread message now also includes last price for free.
484
+ self.queryLastTrade(symbol, price)
485
+
486
+ self.known_bids[symbol] = bids
487
+ self.known_asks[symbol] = asks
488
+
489
+ if bids: best_bid, best_bid_qty = (bids[0][0], bids[0][1])
490
+ else: best_bid, best_bid_qty = ('No bids', 0)
491
+
492
+ if asks: best_ask, best_ask_qty = (asks[0][0], asks[0][1])
493
+ else: best_ask, best_ask_qty = ('No asks', 0)
494
+
495
+ log_print ("Received spread of {} @ {} / {} @ {} for {}", best_bid_qty, best_bid, best_ask_qty, best_ask, symbol)
496
+
497
+ self.logEvent("BID_DEPTH", bids)
498
+ self.logEvent("ASK_DEPTH", asks)
499
+ self.logEvent("IMBALANCE", [sum([x[1] for x in bids]), sum([x[1] for x in asks])])
500
+
501
+ self.book = book
502
+
503
+ def handleMarketData(self, msg):
504
+ '''
505
+ Handles Market Data messages for agents using subscription mechanism
506
+ '''
507
+ symbol = msg.body['symbol']
508
+ self.known_asks[symbol] = msg.body['asks']
509
+ self.known_bids[symbol] = msg.body['bids']
510
+ self.last_trade[symbol] = msg.body['last_transaction']
511
+ self.exchange_ts[symbol] = msg.body['exchange_ts']
512
+
513
+
514
+ # Handles QUERY_ORDER_STREAM messages from an exchange agent.
515
+ def queryOrderStream (self, symbol, orders):
516
+ # It is up to the requesting agent to do something with the data, which is a list of dictionaries keyed
517
+ # by order id. The list index is 0 for orders since the most recent trade, 1 for orders that led up to
518
+ # the most recent trade, and so on. Agents are not given index 0 (orders more recent than the last
519
+ # trade).
520
+ self.stream_history[self.symbol] = orders
521
+
522
+ def query_transacted_volume(self, symbol, transacted_volume):
523
+ """ Handles the QUERY_TRANSACTED_VOLUME messages from the exchange agent"""
524
+ self.transacted_volume[symbol] = transacted_volume
525
+
526
+ # Utility functions that perform calculations from available knowledge, but implement no
527
+ # particular strategy.
528
+
529
+
530
+ # Extract the current known bid and asks. This does NOT request new information.
531
+ def getKnownBidAsk (self, symbol, best=True):
532
+ if best:
533
+ bid = self.known_bids[symbol][0][0] if self.known_bids[symbol] else None
534
+ ask = self.known_asks[symbol][0][0] if self.known_asks[symbol] else None
535
+ bid_vol = self.known_bids[symbol][0][1] if self.known_bids[symbol] else 0
536
+ ask_vol = self.known_asks[symbol][0][1] if self.known_asks[symbol] else 0
537
+ return bid, bid_vol, ask, ask_vol
538
+ else:
539
+ bids = self.known_bids[symbol] if self.known_bids[symbol] else None
540
+ asks = self.known_asks[symbol] if self.known_asks[symbol] else None
541
+ return bids, asks
542
+
543
+
544
+ # Extract the current bid and ask liquidity within a certain proportion of the
545
+ # inside bid and ask. (i.e. within=0.01 means to report total BID shares
546
+ # within 1% of the best bid price, and total ASK shares within 1% of the best
547
+ # ask price)
548
+ #
549
+ # Returns bid_liquidity, ask_liquidity. Note that this is from the order book
550
+ # perspective, not the agent perspective. (The agent would be selling into the
551
+ # bid liquidity, etc.)
552
+ def getKnownLiquidity (self, symbol, within=0.00):
553
+ bid_liq = self.getBookLiquidity(self.known_bids[symbol], within)
554
+ ask_liq = self.getBookLiquidity(self.known_asks[symbol], within)
555
+
556
+ log_print ("Bid/ask liq: {}, {}", bid_liq, ask_liq)
557
+ log_print ("Known bids: {}", self.known_bids[self.symbol])
558
+ log_print ("Known asks: {}", self.known_asks[self.symbol])
559
+
560
+ return bid_liq, ask_liq
561
+
562
+
563
+ # Helper function for the above. Checks one side of the known order book.
564
+ def getBookLiquidity (self, book, within):
565
+ liq = 0
566
+ for i, (price, shares) in enumerate(book):
567
+ if i == 0:
568
+ best = price
569
+
570
+ # Is this price within "within" proportion of the best price?
571
+ if abs(best - price) <= int(round(best * within)):
572
+ log_print ("Within {} of {}: {} with {} shares", within, best, price, shares)
573
+ liq += shares
574
+
575
+ return liq
576
+
577
+
578
+ # Marks holdings to market (including cash).
579
+ def markToMarket (self, holdings, use_midpoint=False):
580
+ cash = holdings['CASH']
581
+
582
+ cash += self.basket_size * self.nav_diff
583
+
584
+ for symbol, shares in holdings.items():
585
+ if symbol == 'CASH': continue
586
+ if self.last_trade[symbol] is None:
587
+ self.logEvent('MARKED_TO_MARKET', cash)
588
+ return cash
589
+ if use_midpoint:
590
+ bid, ask, midpoint = self.getKnownBidAskMidpoint(symbol)
591
+ if bid is None or ask is None or midpoint is None:
592
+ value = self.last_trade[symbol] * shares
593
+ else:
594
+ value = midpoint * shares
595
+ else:
596
+ value = self.last_trade[symbol] * shares
597
+
598
+ cash += value
599
+
600
+ self.logEvent('MARK_TO_MARKET', "{} {} @ {} == {}".format(shares, symbol,
601
+ self.last_trade[symbol], value))
602
+
603
+ self.logEvent('MARKED_TO_MARKET', cash)
604
+
605
+ return cash
606
+
607
+
608
+ # Gets holdings. Returns zero for any symbol not held.
609
+ def getHoldings (self, symbol):
610
+ if symbol in self.holdings: return self.holdings[symbol]
611
+ return 0
612
+
613
+
614
+ # Get the known best bid, ask, and bid/ask midpoint from cached data. No volume.
615
+ def getKnownBidAskMidpoint (self, symbol) :
616
+ bid = self.known_bids[symbol][0][0] if self.known_bids[symbol] else None
617
+ ask = self.known_asks[symbol][0][0] if self.known_asks[symbol] else None
618
+
619
+ midpoint = int(round((bid + ask) / 2)) if bid is not None and ask is not None else None
620
+
621
+ return bid, ask, midpoint
622
+
623
+ def get_average_transaction_price(self):
624
+ """ Calculates the average price paid (weighted by the order size) """
625
+ return round(sum(executed_order.quantity * executed_order.fill_price for executed_order in self.executed_orders) / \
626
+ sum(executed_order.quantity for executed_order in self.executed_orders), 2)
627
+
628
+ # Prints holdings. Standard dictionary->string representation is almost fine, but it is
629
+ # less confusing to see the CASH holdings in dollars and cents, instead of just integer
630
+ # cents. We could change to a Holdings object that knows to print CASH "special".
631
+ def fmtHoldings (self, holdings):
632
+ h = ''
633
+ for k,v in sorted(holdings.items()):
634
+ if k == 'CASH': continue
635
+ h += "{}: {}, ".format(k,v)
636
+
637
+ # There must always be a CASH entry.
638
+ h += "{}: {}".format('CASH', holdings['CASH'])
639
+ h = '{ ' + h + ' }'
640
+ return h
641
+
642
+
643
+ pass
ABIDES/agent/ValueAgent.py ADDED
@@ -0,0 +1,260 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from agent.TradingAgent import TradingAgent
2
+ from util.util import log_print
3
+
4
+ from math import sqrt
5
+ import numpy as np
6
+ import pandas as pd
7
+
8
+
9
+ class ValueAgent(TradingAgent):
10
+
11
+ def __init__(self, id, name, type, symbol='IBM', starting_cash=100000, sigma_n=10000,
12
+ r_bar=100000, kappa=0.05, sigma_s=100000,
13
+ lambda_a=0.005, log_orders=False, log_to_file=True, random_state=None, wakeup_time=pd.Timedelta('0s')):
14
+
15
+ # Base class init.
16
+ super().__init__(id, name, type, starting_cash=starting_cash,
17
+ log_orders=log_orders, log_to_file=log_to_file, random_state=random_state)
18
+
19
+ # Store important parameters particular to the ZI agent.
20
+ self.symbol = symbol # symbol to trade
21
+ self.sigma_n = sigma_n # observation noise variance
22
+ self.r_bar = r_bar # true mean fundamental value
23
+ self.kappa = kappa # mean reversion parameter
24
+ self.sigma_s = sigma_s # shock variance
25
+ self.lambda_a = lambda_a # mean arrival rate of ZI agents
26
+ self.wakeup_time = wakeup_time
27
+ # The agent uses this to track whether it has begun its strategy or is still
28
+ # handling pre-market tasks.
29
+ self.trading = False
30
+
31
+ # The agent begins in its "complete" state, not waiting for
32
+ # any special event or condition.
33
+ self.state = 'AWAITING_WAKEUP'
34
+
35
+ # The agent maintains two priors: r_t and sigma_t (value and error estimates).
36
+ self.r_t = r_bar
37
+ self.sigma_t = 0
38
+
39
+ # The agent must track its previous wake time, so it knows how many time
40
+ # units have passed.
41
+ self.prev_wake_time = None
42
+
43
+ self.percent_aggr = 0.1 #percent of time that the agent will aggress the spread
44
+ self.size = np.random.randint(20, 50) #size that the agent will be placing
45
+ self.depth_spread = 2
46
+
47
+ def kernelStarting(self, startTime):
48
+ # self.kernel is set in Agent.kernelInitializing()
49
+ # self.exchangeID is set in TradingAgent.kernelStarting()
50
+
51
+ super().kernelStarting(startTime)
52
+
53
+ self.oracle = self.kernel.oracle
54
+
55
+ def kernelStopping(self):
56
+ # Always call parent method to be safe.
57
+ super().kernelStopping()
58
+ return
59
+ # Print end of day valuation.
60
+ H = int(round(self.getHoldings(self.symbol), -2) / 100)
61
+ # May request real fundamental value from oracle as part of final cleanup/stats.
62
+ #marked to fundamental
63
+ rT = self.oracle.observePrice(self.symbol, self.currentTime, sigma_n=0, random_state=self.random_state)
64
+
65
+ # final (real) fundamental value times shares held.
66
+ surplus = rT * H
67
+
68
+ log_print("surplus after holdings: {}", surplus)
69
+
70
+ # Add ending cash value and subtract starting cash value.
71
+ surplus += self.holdings['CASH'] - self.starting_cash
72
+ surplus = float( surplus )/self.starting_cash
73
+
74
+ self.logEvent('FINAL_VALUATION', surplus, True)
75
+
76
+ log_print(
77
+ "{} final report. Holdings {}, end cash {}, start cash {}, final fundamental {}, surplus {}",
78
+ self.name, H, self.holdings['CASH'], self.starting_cash, rT, surplus)
79
+
80
+ #print("Final surplus", self.name, surplus)
81
+
82
+ def wakeup(self, currentTime):
83
+ # Parent class handles discovery of exchange times and market_open wakeup call.
84
+ super().wakeup(currentTime)
85
+
86
+ self.state = 'INACTIVE'
87
+
88
+ if not self.mkt_open or not self.mkt_close:
89
+ # TradingAgent handles discovery of exchange times.
90
+ return
91
+ else:
92
+ if not self.trading:
93
+ self.trading = True
94
+
95
+ # Time to start trading!
96
+ log_print("{} is ready to start trading now.", self.name)
97
+
98
+ # Steady state wakeup behavior starts here.
99
+
100
+ # If we've been told the market has closed for the day, we will only request
101
+ # final price information, then stop.
102
+ if self.mkt_closed and (self.symbol in self.daily_close_price):
103
+ # Market is closed and we already got the daily close price.
104
+ return
105
+
106
+ delta_time = self.random_state.exponential(scale=1.0 / self.lambda_a)
107
+ self.setWakeup(currentTime + pd.Timedelta('{}ns'.format(int(round(delta_time)))))
108
+
109
+ if currentTime <= self.wakeup_time:
110
+ # This wakeup was set by the agent itself.
111
+ return
112
+
113
+ if self.mkt_closed and (not self.symbol in self.daily_close_price):
114
+ self.getCurrentSpread(self.symbol)
115
+ self.state = 'AWAITING_SPREAD'
116
+ return
117
+
118
+ self.cancelOrders()
119
+
120
+ if type(self) == ValueAgent:
121
+ self.getCurrentSpread(self.symbol)
122
+ self.state = 'AWAITING_SPREAD'
123
+ else:
124
+ self.state = 'ACTIVE'
125
+
126
+ def updateEstimates(self):
127
+ # Called by a background agent that wishes to obtain a new fundamental observation,
128
+ # update its internal estimation parameters, and compute a new total valuation for the
129
+ # action it is considering.
130
+
131
+ # The agent obtains a new noisy observation of the current fundamental value
132
+ # and uses this to update its internal estimates in a Bayesian manner.
133
+ obs_t = self.oracle.observePrice(self.symbol, self.currentTime, sigma_n=self.sigma_n,
134
+ random_state=self.random_state)
135
+
136
+ log_print("{} observed {} at {}", self.name, obs_t, self.currentTime)
137
+
138
+ # Update internal estimates of the current fundamental value and our error of same.
139
+
140
+ # If this is our first estimate, treat the previous wake time as "market open".
141
+ if self.prev_wake_time is None: self.prev_wake_time = self.mkt_open
142
+
143
+ # First, obtain an intermediate estimate of the fundamental value by advancing
144
+ # time from the previous wake time to the current time, performing mean
145
+ # reversion at each time step.
146
+
147
+ # delta must be integer time steps since last wake
148
+ delta = (self.currentTime - self.prev_wake_time) / np.timedelta64(1, 'ns')
149
+
150
+ # Update r estimate for time advancement.
151
+ r_tprime = (1 - (1 - self.kappa) ** delta) * self.r_bar
152
+ r_tprime += ((1 - self.kappa) ** delta) * self.r_t
153
+
154
+ # Update sigma estimate for time advancement.
155
+ sigma_tprime = ((1 - self.kappa) ** (2 * delta)) * self.sigma_t
156
+ sigma_tprime += ((1 - (1 - self.kappa) ** (2 * delta)) / (1 - (1 - self.kappa) ** 2)) * self.sigma_s
157
+
158
+ # Apply the new observation, with "confidence" in the observation inversely proportional
159
+ # to the observation noise, and "confidence" in the previous estimate inversely proportional
160
+ # to the shock variance.
161
+ self.r_t = (self.sigma_n / (self.sigma_n + sigma_tprime)) * r_tprime
162
+ self.r_t += (sigma_tprime / (self.sigma_n + sigma_tprime)) * obs_t
163
+
164
+ self.sigma_t = (self.sigma_n * self.sigma_t) / (self.sigma_n + self.sigma_t)
165
+
166
+ # Now having a best estimate of the fundamental at time t, we can make our best estimate
167
+ # of the final fundamental (for time T) as of current time t. Delta is now the number
168
+ # of time steps remaining until the simulated exchange closes.
169
+ delta = max(0, (self.mkt_close - self.currentTime) / np.timedelta64(1, 'ns'))
170
+
171
+ # IDEA: instead of letting agent "imagine time forward" to the end of the day,
172
+ # impose a maximum forward delta, like ten minutes or so. This could make
173
+ # them think more like traders and less like long-term investors. Add
174
+ # this line of code (keeping the max() line above) to try it.
175
+ # delta = min(delta, 1000000000 * 60 * 10)
176
+
177
+ r_T = (1 - (1 - self.kappa) ** delta) * self.r_bar
178
+ r_T += ((1 - self.kappa) ** delta) * self.r_t
179
+
180
+ # Our final fundamental estimate should be quantized to whole units of value.
181
+ r_T = int(round(r_T))
182
+
183
+ # Finally (for the final fundamental estimation section) remember the current
184
+ # time as the previous wake time.
185
+ self.prev_wake_time = self.currentTime
186
+
187
+ log_print("{} estimates r_T = {} as of {}", self.name, r_T, self.currentTime)
188
+
189
+ return r_T
190
+
191
+ def placeOrder(self):
192
+ #estimate final value of the fundamental price
193
+ #used for surplus calculation
194
+ r_T = self.updateEstimates()
195
+ bid, bid_vol, ask, ask_vol = self.getKnownBidAsk(self.symbol)
196
+
197
+ if bid and ask:
198
+ mid = int((ask+bid)/2)
199
+ spread = abs(ask - bid)
200
+
201
+ if np.random.rand() < self.percent_aggr:
202
+ adjust_int = 0
203
+ else:
204
+ adjust_int = np.random.randint( 0, self.depth_spread*spread )
205
+ #adjustment to the limit price, allowed to post inside the spread
206
+ #or deeper in the book as a passive order to maximize surplus
207
+
208
+ if r_T < mid:
209
+ #fundamental belief that price will go down, place a sell order
210
+ buy = False
211
+ p = bid + adjust_int #submit a market order to sell, limit order inside the spread or deeper in the book
212
+ elif r_T >= mid:
213
+ #fundamental belief that price will go up, buy order
214
+ buy = True
215
+ p = ask - adjust_int #submit a market order to buy, a limit order inside the spread or deeper in the book
216
+ else:
217
+ # initialize randomly
218
+ buy = np.random.randint(0, 1 + 1)
219
+ p = r_T
220
+
221
+ # Place the order
222
+ self.placeLimitOrder(self.symbol, self.size, buy, p)
223
+
224
+ def receiveMessage(self, currentTime, msg):
225
+ # Parent class schedules market open wakeup call once market open/close times are known.
226
+ super().receiveMessage(currentTime, msg)
227
+
228
+ # We have been awakened by something other than our scheduled wakeup.
229
+ # If our internal state indicates we were waiting for a particular event,
230
+ # check if we can transition to a new state.
231
+
232
+ if self.state == 'AWAITING_SPREAD':
233
+ # We were waiting to receive the current spread/book. Since we don't currently
234
+ # track timestamps on retained information, we rely on actually seeing a
235
+ # QUERY_SPREAD response message.
236
+
237
+ if msg.body['msg'] == 'QUERY_SPREAD':
238
+ # This is what we were waiting for.
239
+
240
+ # But if the market is now closed, don't advance to placing orders.
241
+ if self.mkt_closed: return
242
+
243
+ # We now have the information needed to place a limit order with the eta
244
+ # strategic threshold parameter.
245
+ self.placeOrder()
246
+ self.state = 'AWAITING_WAKEUP'
247
+
248
+ # Cancel all open orders.
249
+ # Return value: did we issue any cancellation requests?
250
+
251
+ def cancelOrders(self):
252
+ if not self.orders: return False
253
+
254
+ for id, order in self.orders.items():
255
+ self.cancelOrder(order)
256
+
257
+ return True
258
+
259
+ def getWakeFrequency(self):
260
+ return pd.Timedelta(self.random_state.randint(low=0, high=100), unit='ns')
ABIDES/agent/WorldAgent.py ADDED
@@ -0,0 +1,871 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ from copy import deepcopy
3
+
4
+ from scipy import stats
5
+ import torch
6
+ from agent.Agent import Agent
7
+ from agent.ExchangeAgent import ExchangeAgent
8
+ from agent.TradingAgent import TradingAgent
9
+ from torch import nn
10
+ from torch.nn import functional as F
11
+ from util.order.LimitOrder import LimitOrder
12
+ from util.util import log_print
13
+ from message.Message import Message
14
+
15
+ from math import sqrt
16
+ import numpy as np
17
+ import pandas as pd
18
+ import datetime
19
+
20
+ from ABIDES.util.order.MarketOrder import MarketOrder
21
+ from utils.utils_data import reset_indexes, normalize_messages, one_hot_encoding_type, to_sparse_representation, tanh_encoding_type
22
+ import constants as cst
23
+
24
+ class WorldAgent(Agent):
25
+ # the objective of this world agent is to replicate the market for the first 30mins and then
26
+ # generated new orderr with the help of a diffusion model for the rest of the day,
27
+ # the diffusion model takes in input the last orders or the last snapshot of the order book
28
+ # and generates new orders for the next time step
29
+
30
+
31
+ def __init__(self, id, name, type, symbol, date, date_trading_days, model, data_dir, log_orders=True, random_state=None, normalization_terms=None,
32
+ using_diffusion=False, chosen_model=None, seq_len=256, cond_seq_size=255, cond_type='full', size_type_emb=3, gen_seq_size=1):
33
+
34
+ super().__init__(id, name, type, random_state=random_state, log_to_file=log_orders)
35
+ self.count_neg_size = 0
36
+ self.next_historical_orders_index = 0
37
+ self.lob_snapshots = []
38
+ self.sparse_lob_snapshots = []
39
+ self.symbol = symbol
40
+ self.date = date
41
+ self.gen_seq_size = gen_seq_size
42
+ self.size_type_emb = size_type_emb
43
+ self.log_orders = log_orders
44
+ self.executed_trades = dict()
45
+ self.state = 'AWAITING_WAKEUP'
46
+ self.model = model
47
+ self.historical_orders, self.historical_lob = self._load_orders_lob(self.symbol, data_dir, self.date, date_trading_days)
48
+ self.historical_order_ids = self.historical_orders[:, 2]
49
+ self.unused_order_ids = np.setdiff1d(np.arange(0, 99999999), self.historical_order_ids)
50
+ self.next_orders = None
51
+ self.subscription_requested = False
52
+ self.date_trading_days = date_trading_days
53
+ self.first_wakeup = True
54
+ self.active_limit_orders = {}
55
+ self.placed_orders = []
56
+ self.count_diff_placed_orders = 0
57
+ self.count_modify = 0
58
+ self.cond_type = cond_type
59
+ self.cond_seq_size = cond_seq_size
60
+ self.seq_len = seq_len
61
+ self.first_generation = True
62
+ self.normalization_terms = normalization_terms
63
+ self.ignored_cancel = 0
64
+ self.generated_orders_out_of_depth = 0
65
+ self.generated_cancel_orders_empty_depth = 0
66
+ self.diff_limit_order_placed = 0
67
+ self.diff_market_order_placed = 0
68
+ self.diff_cancel_order_placed = 0
69
+ self.depth_rounding = 0
70
+ self.last_bid_price = 0
71
+ self.last_ask_price = 0
72
+ self.using_diffusion = using_diffusion
73
+ self.chosen_model = chosen_model
74
+ if using_diffusion:
75
+ self.starting_time_diffusion = '15min'
76
+ #self.model.type_embedder.weight.data = torch.tensor([[ 0.4438, -0.2984, 0.2888], [ 0.8249, 0.5847, 0.1448], [ 1.5600, -1.2847, 1.0294]], device=cst.DEVICE, dtype=torch.float32)
77
+ #print(self.model.type_embedder.weight.data)
78
+ #self.model.type_embedder.weight.data = torch.tensor([[ 0.1438, -0.4984, 0.5888], [ 0.8249, 0.3847, 0.0448], [ 1.6600, -1.9847, 1.7294]], device=cst.DEVICE, dtype=torch.float32)
79
+ #exit()
80
+ else:
81
+ self.starting_time_diffusion = '157780min'
82
+
83
+ def kernelStarting(self, startTime):
84
+ # self.kernel is set in Agent.kernelInitializing()
85
+ super().kernelStarting(startTime)
86
+ self.oracle = self.kernel.oracle
87
+ self.exchangeID = self.kernel.findAgentByType(ExchangeAgent)
88
+ self.mkt_open = startTime
89
+
90
+ def kernelTerminating(self):
91
+ # self.kernel is set in Agent.kernelInitializing()
92
+ super().kernelTerminating()
93
+ print("World Agent terminating.")
94
+ print("World Agent ignored {} cancel orders".format(self.ignored_cancel))
95
+
96
+ def requestDataSubscription(self, symbol, levels):
97
+ self.sendMessage(recipientID=self.exchangeID,
98
+ msg=Message({"msg": "MARKET_DATA_SUBSCRIPTION_REQUEST",
99
+ "sender": self.id,
100
+ "symbol": symbol,
101
+ "levels": levels,
102
+ "freq": 0}) # if freq is 0 all the LOB updates will be provided
103
+ )
104
+
105
+ def cancelDataSubscription(self):
106
+ self.sendMessage(recipientID=self.exchangeID,
107
+ msg=Message({"msg": "CANCEL_MARKET_DATA_SUBSCRIPTION",
108
+ "sender": self.id,
109
+ "symbol": self.symbol})
110
+ )
111
+
112
+ def wakeup(self, currentTime):
113
+ super().wakeup(currentTime)
114
+ #make a print every 5 minutes
115
+
116
+ if currentTime.minute % 5 == 0 and currentTime.second == 00:
117
+ print("Current time: {}".format(currentTime))
118
+ #print("Number of generated orders out of depth: {}".format(self.generated_orders_out_of_depth))
119
+ #print("Number of generated cancel orders unmatched: {}".format(self.generated_cancel_orders_empty_depth))
120
+ #print("Number of generated cancel orders matched: {}".format(self.diff_cancel_order_placed))
121
+ #print("Number of negative size: {}".format(self.count_neg_size))
122
+ #print("Number of generated placed orders: {}".format(self.count_diff_placed_orders))
123
+ #print("Of which {} market order and {} limit order".format(self.diff_market_order_placed, self.diff_limit_order_placed))
124
+ now = datetime.datetime.now()
125
+ current_time = now.strftime("%H:%M:%S")
126
+ #print("Current Time =", current_time)
127
+
128
+ if self.first_wakeup:
129
+ self.state = 'PRE_GENERATING'
130
+ offset = datetime.timedelta(seconds=self.historical_orders[0, 0])
131
+ time_next_wakeup = currentTime + offset
132
+ self.setWakeup(time_next_wakeup)
133
+ self.requestDataSubscription(self.symbol, levels=10)
134
+ self.first_wakeup = False
135
+
136
+ # if current time is between 09:30 and 09:45, then we are in the pre-open phase
137
+ elif self.mkt_open <= currentTime <= self.mkt_open + pd.Timedelta(self.starting_time_diffusion):
138
+ next_order = self.historical_orders[self.next_historical_orders_index]
139
+ self.last_offset_time = next_order[0]
140
+ self.placeOrder(currentTime, next_order)
141
+ self.next_historical_orders_index += 1
142
+ if self.next_historical_orders_index < len(self.historical_orders):
143
+ offset = datetime.timedelta(seconds=self.historical_orders[self.next_historical_orders_index, 0])
144
+ self.setWakeup(currentTime + offset + datetime.timedelta(microseconds=1))
145
+ else:
146
+ return
147
+
148
+ elif currentTime > self.mkt_open + pd.Timedelta(self.starting_time_diffusion) and not self.using_diffusion:
149
+ print("cancelling data subscription")
150
+ self.cancelDataSubscription()
151
+
152
+ elif currentTime > self.mkt_open + pd.Timedelta(self.starting_time_diffusion) and self.using_diffusion:
153
+ self.state = 'GENERATING'
154
+ # we generate the first order then the others will be generated everytime we receive the update of the lob
155
+ if self.first_generation:
156
+ if self.chosen_model == 'CGAN':
157
+ # we need to fit the temporal distance with a gamma distribution
158
+ temporal_distance = self.historical_orders[:, 0]
159
+ # remove all the zeros from the temporal distance
160
+ temporal_distance = temporal_distance[temporal_distance > 0]
161
+ self.shape_temp_distance, self.loc_temp_distance, self.scale_temp_distance = stats.gamma.fit(temporal_distance)
162
+
163
+ generated_orders = self._generate_order(currentTime)
164
+ self.next_orders = generated_orders
165
+ self.first_generation = False
166
+ offset_time = datetime.timedelta(seconds=generated_orders[0][0])
167
+ self.setWakeup(currentTime + offset_time + datetime.timedelta(microseconds=1))
168
+ return
169
+
170
+ #check if in the kernel messages queue there are messages for the first agent
171
+ wait = False
172
+ for timestamp, msg in self.kernel.messages.queue:
173
+ if msg[0] == self.id:
174
+ wait = True
175
+
176
+ # first we place the last order generated and next we generate the next order
177
+ if self.next_orders is not None and not wait:
178
+ self.placeOrder(currentTime, self.next_orders[0])
179
+ self.next_orders = self.next_orders[1:]
180
+
181
+ elif wait:
182
+ self.setWakeup(currentTime + datetime.timedelta(microseconds=1))
183
+ return
184
+
185
+ if len(self.next_orders) == 0:
186
+ self.next_orders = self._generate_order(currentTime)
187
+ offset_time = datetime.timedelta(seconds=self.next_orders[0][0])
188
+ self.setWakeup(currentTime + offset_time + datetime.timedelta(microseconds=1))
189
+ return
190
+
191
+
192
+ def receiveMessage(self, currentTime, msg):
193
+ if currentTime > self.mkt_open + pd.Timedelta(self.starting_time_diffusion) and not self.using_diffusion:
194
+ return
195
+
196
+ super().receiveMessage(currentTime, msg)
197
+ if msg.body['msg'] == 'MARKET_DATA':
198
+ self._update_lob_snapshot(msg)
199
+ self._update_active_limit_orders()
200
+
201
+ # if we had placed a market order and it is executed we receive the message of the limit order filled, so if it was a buy we receive a sell
202
+ elif msg.body['msg'] == 'ORDER_EXECUTED':
203
+ direction = 1 if msg.body['order'].is_buy_order else -1
204
+ self.placed_orders.append(np.array([self.last_offset_time, 4, msg.body['order'].order_id, msg.body['order'].quantity, msg.body['order'].limit_price, direction]))
205
+ self.logEvent('ORDER_EXECUTED', msg.body['order'].to_dict())
206
+
207
+ elif msg.body['msg'] == 'ORDER_ACCEPTED':
208
+ direction = 1 if msg.body['order'].is_buy_order else -1
209
+ self.placed_orders.append(np.array([self.last_offset_time, 1, msg.body['order'].order_id, msg.body['order'].quantity, msg.body['order'].limit_price, direction]))
210
+ self.logEvent('ORDER_ACCEPTED', msg.body['order'].to_dict())
211
+
212
+ elif msg.body['msg'] == 'ORDER_CANCELLED':
213
+ direction = 1 if msg.body['order'].is_buy_order else -1
214
+ self.placed_orders.append(np.array([self.last_offset_time, 3, msg.body['order'].order_id, msg.body['order'].quantity, msg.body['order'].limit_price, direction]))
215
+ self.logEvent('ORDER_CANCELLED', msg.body['order'].to_dict())
216
+
217
+ def placeOrder(self, currentTime, order):
218
+ order_id = order[2]
219
+ type = order[1]
220
+ quantity = order[3]
221
+ price = int(order[4])
222
+ direction = order[5]
223
+ if quantity > 0:
224
+ direction = False if direction == -1 else True
225
+ if type == 1:
226
+ self.placeLimitOrder(self.symbol, quantity, is_buy_order=direction, limit_price=price, order_id=order_id)
227
+
228
+ elif type == 2 or type == 3:
229
+ if order_id in self.active_limit_orders:
230
+ old_order = self.active_limit_orders[order_id]
231
+ del self.active_limit_orders[order_id]
232
+ else:
233
+ self.ignored_cancel += 1
234
+ return
235
+ # raise Exception("trying to cancel an order that doesn't exist")
236
+ if type == 3:
237
+ # total deletion of a limit order
238
+ self.cancelOrder(old_order)
239
+ elif type == 2:
240
+ # partial deletion of a limit order
241
+ new_order = LimitOrder(
242
+ agent_id=self.id,
243
+ time_placed=self.currentTime,
244
+ symbol=self.symbol,
245
+ quantity=old_order.quantity-quantity,
246
+ is_buy_order=old_order.is_buy_order,
247
+ limit_price=old_order.limit_price,
248
+ order_id=old_order.order_id,
249
+ tag=None
250
+ )
251
+ self.modifyOrder(old_order, new_order)
252
+ self.placed_orders.append(np.array([order[0], 2, new_order.order_id, quantity, new_order.limit_price, direction]))
253
+
254
+ elif type == 4:
255
+ # if type == 4 it means that it is an execution order, so if it is an execution order of a sell limit order
256
+ # we place a buy market order of the same quantity and viceversa
257
+ is_buy_order = False if direction else True
258
+ # the current order_id is the order_id of the sell (buy) limit order filled,
259
+ # so we need to assign to the market order another order_id
260
+ order_id = self.unused_order_ids[0]
261
+ self.unused_order_ids = self.unused_order_ids[1:]
262
+ self.placeMarketOrder(self.symbol, quantity, is_buy_order=is_buy_order, order_id=order_id)
263
+ else:
264
+ log_print("Agent ignored order of quantity zero: {}", order)
265
+
266
+ def _generate_order(self, currentTime):
267
+ generated = None
268
+ post_processed_orders = []
269
+ while len(post_processed_orders) == 0:
270
+ if self.chosen_model == 'TRADES':
271
+ if self.cond_type == 'full':
272
+ orders = np.array(self.placed_orders[-self.cond_seq_size:])
273
+ cond_orders = self._preprocess_orders_for_diff_cond(orders, np.array(self.lob_snapshots[-self.cond_seq_size -1:]))
274
+ lob_snapshots = np.array(self.lob_snapshots[-self.cond_seq_size-1:])
275
+ cond_lob = torch.from_numpy(self._z_score_orderbook(lob_snapshots)).to(cst.DEVICE, torch.float32)
276
+ cond_lob = cond_lob.unsqueeze(0)
277
+ elif self.cond_type == 'only_event':
278
+ orders = np.array(self.placed_orders[-self.cond_seq_size:])
279
+ cond_orders = self._preprocess_orders_for_diff_cond(orders, np.array(self.lob_snapshots[-self.cond_seq_size -1:]))
280
+ cond_lob = None
281
+ else:
282
+ raise ValueError("cond_type not recognized")
283
+ cond_orders = cond_orders.unsqueeze(0)
284
+ x = torch.zeros(1, self.gen_seq_size, cst.LEN_ORDER, device=cst.DEVICE, dtype=torch.float32)
285
+ generated = self.model.sample(cond_orders=cond_orders, x=x, cond_lob=cond_lob)
286
+ post_processed_orders = []
287
+ for i in range(generated.shape[1]):
288
+ order = self._postprocess_generated_TRADES(generated[0, i, :])
289
+ if order is not None:
290
+ post_processed_orders.append(order)
291
+
292
+ elif self.chosen_model == 'CGAN':
293
+ cond_market_features = self._preprocess_market_features_for_cgan(np.array(self.lob_snapshots[-(self.seq_len)*2+1:]))
294
+ '''
295
+ cond_market_features =
296
+ ['volume_imbalance_1',
297
+ 'volume_imbalance_5',
298
+ 'absolute_volume_1',
299
+ 'absolute_volume_5',
300
+ 'spread',
301
+ 'order_sign_imbalance_256',
302
+ 'order_sign_imbalance_128',
303
+ 'returns_1',
304
+ 'returns_50']
305
+ '''
306
+ noise = torch.randn(1, 1, self.model.generator_lstm_hidden_state_dim).to(cst.DEVICE, torch.float32)
307
+ generated = self.model.sample(noise=noise, cond_market_features=cond_market_features)
308
+ generated = self.model.post_process_order(generated)
309
+ # generated = ['event_type', 'size', 'direction', 'depth', 'cancel_depth', 'quantity_100', 'quantity_type']
310
+ generated = generated[0, 0, :]
311
+ generated = self._postprocess_generated_gan(generated)
312
+ if generated is not None:
313
+ post_processed_orders = [generated]
314
+ # generated = [offset, order_type, order_id, size, price, direction]
315
+ return post_processed_orders
316
+
317
+
318
+ def placeLimitOrder(self, symbol, quantity, is_buy_order, limit_price, order_id=None, ignore_risk=True, tag=None):
319
+ order = LimitOrder(self.id, self.currentTime, symbol, quantity, is_buy_order, limit_price, order_id, tag)
320
+ self.sendMessage(self.exchangeID, Message({"msg": "LIMIT_ORDER", "sender": self.id, "order": order}))
321
+ # Log this activity.
322
+ if self.log_orders: self.logEvent('ORDER_SUBMITTED', order.to_dict())
323
+
324
+ def placeMarketOrder(self, symbol, quantity, is_buy_order, order_id=None, ignore_risk=True, tag=None):
325
+ """
326
+ The market order is created as multiple limit orders crossing the spread walking the book until all the quantities are matched.
327
+ """
328
+ order = MarketOrder(self.id, self.currentTime, symbol, quantity, is_buy_order, order_id)
329
+ self.sendMessage(self.exchangeID, Message({"msg": "MARKET_ORDER", "sender": self.id, "order": order}))
330
+ if self.log_orders: self.logEvent('ORDER_SUBMITTED', order.to_dict())
331
+
332
+ def cancelOrder(self, order):
333
+ """Used by any Trading Agent subclass to cancel any order.
334
+ The order must currently appear in the agent's open orders list."""
335
+ if isinstance(order, LimitOrder):
336
+ self.sendMessage(self.exchangeID, Message({"msg": "CANCEL_ORDER", "sender": self.id,
337
+ "order": order}))
338
+ # Log this activity.
339
+ if self.log_orders: self.logEvent('CANCEL_SUBMITTED', order.to_dict())
340
+ else:
341
+ log_print("order {} of type, {} cannot be cancelled", order, type(order))
342
+
343
+ def modifyOrder(self, order, newOrder):
344
+ """ Used by any Trading Agent subclass to modify any existing limit order. The order must currently
345
+ appear in the agent's open orders list. Some additional tests might be useful here
346
+ to ensure the old and new orders are the same in some way."""
347
+ self.sendMessage(self.exchangeID, Message({"msg": "MODIFY_ORDER", "sender": self.id,
348
+ "order": order, "new_order": newOrder}))
349
+ # Log this activity.
350
+ if self.log_orders: self.logEvent('MODIFY_ORDER', order.to_dict())
351
+
352
+ def _postprocess_generated_gan(self, generated):
353
+ ''' we need to go from the output of the cgan model to an actual order '''
354
+ generated = generated.cpu().detach().numpy()
355
+ # firstly we generate the offset
356
+ offset = stats.gamma.rvs(self.shape_temp_distance, self.loc_temp_distance, self.scale_temp_distance)
357
+
358
+ direction = generated[2]
359
+ quantity_type = generated[6]
360
+ order_type = generated[0]
361
+ # order type == -1 -> limit order
362
+ # order type == 0 -> cancel order
363
+ # order type == 1 -> market order
364
+ order_type += 2
365
+ if order_type == 3 or order_type == 2:
366
+ order_type += 1
367
+ # order type == 1 -> limit order
368
+ # order type == 3 -> cancel order
369
+ # order type == 4 -> market order
370
+
371
+ # we return the depth, the cancel depth, the size and the quantity100 to the original scale
372
+ mean_depth = self.normalization_terms["lob"][12]
373
+ std_depth = self.normalization_terms["lob"][13]
374
+ mean_cancel_depth = self.normalization_terms["lob"][8]
375
+ std_cancel_depth = self.normalization_terms["lob"][9]
376
+ mean_size_100 = self.normalization_terms["lob"][10]
377
+ std_size_100 = self.normalization_terms["lob"][11]
378
+ mean_size = self.normalization_terms["lob"][14]
379
+ std_size = self.normalization_terms["lob"][15]
380
+ depth = int(generated[3] * std_depth + mean_depth)
381
+ cancel_depth = int(generated[4] * std_cancel_depth + mean_cancel_depth)
382
+ # we are considering only the first 10 levels of the order book so we need to check if the cancel depth is greater than 9
383
+ if cancel_depth > 9:
384
+ return None
385
+ size_100 = generated[5] * std_size_100 + mean_size_100
386
+ size = int(generated[1] * std_size + mean_size)
387
+
388
+ if quantity_type == -1:
389
+ size = int(size_100)*100
390
+
391
+ if order_type == 1:
392
+ order_id = self.unused_order_ids[0]
393
+ self.unused_order_ids = self.unused_order_ids[1:]
394
+ if direction == 1:
395
+ bid_side = self.lob_snapshots[-1][2::4]
396
+ bid_price = bid_side[0]
397
+ if bid_price == 0:
398
+ bid_price = self.last_bid_price
399
+ else:
400
+ self.last_bid_price = bid_price
401
+ last_price = bid_side[-1]
402
+ price = bid_price - depth*100
403
+ # if the first 10 levels are full and the price is less than the last price we generate another order
404
+ # because we consider only the first 10 levels
405
+ if price < last_price and last_price > 0:
406
+ self.generated_orders_out_of_depth += 1
407
+ return None
408
+ self.diff_limit_order_placed += 1
409
+ else:
410
+ ask_side = self.lob_snapshots[-1][0::4]
411
+ ask_price = ask_side[0]
412
+ if ask_price == 0:
413
+ ask_price = self.last_ask_price
414
+ else:
415
+ self.last_ask_price = ask_price
416
+ last_price = ask_side[-1]
417
+ price = ask_price + depth*100
418
+ if price > last_price and last_price > 0:
419
+ self.generated_orders_out_of_depth += 1
420
+ return None
421
+ self.diff_limit_order_placed += 1
422
+
423
+ elif order_type == 3:
424
+ if direction == 1:
425
+ bid_side = self.lob_snapshots[-1][2::4]
426
+ bid_price = bid_side[0]
427
+ if bid_price == 0:
428
+ return None
429
+ else:
430
+ self.last_bid_price = bid_price
431
+ #select the price at depth = cancel_depth
432
+ price = bid_side[cancel_depth]
433
+ # search all the active limit orders with the same price
434
+ orders_with_same_price = [order for order in self.active_limit_orders.values() if order.limit_price == price]
435
+ # if there are no orders with the same price then we generate another order
436
+ if len(orders_with_same_price) == 0:
437
+ self.generated_cancel_orders_empty_depth += 1
438
+ #chech if there are buy limit orders active
439
+ if len([order for order in self.active_limit_orders.values() if order.is_buy_order]) == 0:
440
+ return None
441
+ # find the order with the closest price and quantity
442
+ order_id = min(self.active_limit_orders.values(), key=lambda x: (abs(x.limit_price - price), abs(x.quantity - size))).order_id
443
+ else:
444
+ # we select the order with the quantity closer to the quantity generated
445
+ order_id = min(orders_with_same_price, key=lambda x: abs(x.quantity - size)).order_id
446
+ self.diff_cancel_order_placed += 1
447
+
448
+ else:
449
+ ask_side = self.lob_snapshots[-1][0::4]
450
+ ask_price = ask_side[0]
451
+ if ask_price == 0:
452
+ return None
453
+ else:
454
+ self.last_ask_price = ask_price
455
+ price = ask_side[cancel_depth]
456
+ # search all the active limit orders in the same level
457
+ orders_with_same_price = [order for order in self.active_limit_orders.values() if order.limit_price == price]
458
+ # if there are no orders with the same price then we generate another order
459
+ if len(orders_with_same_price) == 0:
460
+ self.generated_cancel_orders_empty_depth += 1
461
+ #chech if there are sell limit orders active
462
+ if len([order for order in self.active_limit_orders.values() if not order.is_buy_order]) == 0:
463
+ return None
464
+ order_id = min(self.active_limit_orders.values(), key=lambda x: (abs(x.limit_price - price), abs(x.quantity - size))).order_id
465
+ else:
466
+ # we select the order with the quantity near to the quantity generated
467
+ order_id = min(orders_with_same_price, key=lambda x: abs(x.quantity - size)).order_id
468
+ self.diff_cancel_order_placed += 1
469
+
470
+ elif order_type == 4:
471
+ self.diff_market_order_placed += 1
472
+ if direction == 1:
473
+ price = self.lob_snapshots[-1][0]
474
+ else:
475
+ price = self.lob_snapshots[-1][2]
476
+ order_id = 0
477
+ # the diffusion gives in output market order and not execution of limit order,
478
+ # so we transform market orders in execution orders of the opposite side as the original message files
479
+ direction = -direction
480
+ self.count_diff_placed_orders += 1
481
+ return np.array([offset, order_type, order_id, size, price, direction])
482
+
483
+
484
+
485
+ def _postprocess_generated_TRADES(self, generated):
486
+ ''' we need to go from the output of the diffusion model to an actual order '''
487
+ direction = generated[self.size_type_emb+3]
488
+ if direction < 0:
489
+ direction = -1
490
+ else:
491
+ direction = 1
492
+
493
+ #order_type = torch.argmax(generated[1:self.size_type_emb+1]).item() + 1
494
+ order_type = torch.argmin(torch.sum(torch.abs(self.model.type_embedder.weight.data - generated[1:self.size_type_emb+1]), dim=1)).item()+1
495
+ #print(order_type)
496
+
497
+ if order_type == 3 or order_type == 2:
498
+ order_type += 1
499
+ # order type == 1 -> limit order
500
+ # order type == 3 -> cancel order
501
+ # order type == 4 -> market order
502
+
503
+ # we return the size and the time to the original scale
504
+ size = round(generated[self.size_type_emb+1].item() * self.normalization_terms["event"][1] + self.normalization_terms["event"][0], ndigits=0)
505
+ depth = round(generated[-1].item() * self.normalization_terms["event"][7] + self.normalization_terms["event"][6], ndigits=0)
506
+ time = generated[0].item() * self.normalization_terms["event"][5] + self.normalization_terms["event"][4]
507
+
508
+ # if the price or the size are negative we return None and we generate another order
509
+ if size < 0 or size > 1000:
510
+ self.count_neg_size += 1
511
+ return None
512
+
513
+ # if the time is negative we approximate to 1 microsecond
514
+ if time <= 0:
515
+ time = 0.0000001
516
+
517
+ if order_type == 1:
518
+ order_id = self.unused_order_ids[0]
519
+ self.unused_order_ids = self.unused_order_ids[1:]
520
+ if direction == 1:
521
+ bid_side = self.lob_snapshots[-1][2::4]
522
+ bid_price = bid_side[0]
523
+ if bid_price == 0:
524
+ bid_price = self.last_bid_price
525
+ else:
526
+ self.last_bid_price = bid_price
527
+ last_price = bid_side[-1]
528
+ price = bid_price - depth*100
529
+ # if the first 10 levels are full and the price is less than the last price we generate another order
530
+ # because we consider only the first 10 levels
531
+ if price < last_price and last_price > 0:
532
+ self.generated_orders_out_of_depth += 1
533
+ return None
534
+ self.diff_limit_order_placed += 1
535
+ else:
536
+ ask_side = self.lob_snapshots[-1][0::4]
537
+ ask_price = ask_side[0]
538
+ if ask_price == 0:
539
+ ask_price = self.last_ask_price
540
+ else:
541
+ self.last_ask_price = ask_price
542
+ last_price = ask_side[-1]
543
+ price = ask_price + depth*100
544
+ if price > last_price and last_price > 0:
545
+ self.generated_orders_out_of_depth += 1
546
+ return None
547
+ self.diff_limit_order_placed += 1
548
+
549
+ elif order_type == 3:
550
+ if direction == 1:
551
+ bid_side = self.lob_snapshots[-1][2::4]
552
+ bid_price = bid_side[0]
553
+ if bid_price == 0:
554
+ return None
555
+ else:
556
+ self.last_bid_price = bid_price
557
+ price = bid_price - depth*100
558
+ # search all the active limit orders with the same price
559
+ orders_with_same_price = [order for order in self.active_limit_orders.values() if order.limit_price == price]
560
+ # if there are no orders with the same price then we generate another order
561
+ if len(orders_with_same_price) == 0:
562
+ self.generated_cancel_orders_empty_depth += 1
563
+ #chech if there are buy limit orders active
564
+ if len([order for order in self.active_limit_orders.values() if order.is_buy_order]) == 0:
565
+ return None
566
+ # find the order with the closest price and quantity
567
+ order_id = min(self.active_limit_orders.values(), key=lambda x: (abs(x.limit_price - price), abs(x.quantity - size))).order_id
568
+ else:
569
+ # we select the order with the quantity closer to the quantity generated
570
+ order_id = min(orders_with_same_price, key=lambda x: abs(x.quantity - size)).order_id
571
+ self.diff_cancel_order_placed += 1
572
+
573
+ else:
574
+ ask_side = self.lob_snapshots[-1][0::4]
575
+ ask_price = ask_side[0]
576
+ if ask_price == 0:
577
+ return None
578
+ else:
579
+ self.last_ask_price = ask_price
580
+ price = ask_price + depth*100
581
+ # search all the active limit orders in the same level
582
+ orders_with_same_price = [order for order in self.active_limit_orders.values() if order.limit_price == price]
583
+ # if there are no orders with the same price then we generate another order
584
+ if len(orders_with_same_price) == 0:
585
+ self.generated_cancel_orders_empty_depth += 1
586
+ #chech if there are sell limit orders active
587
+ if len([order for order in self.active_limit_orders.values() if not order.is_buy_order]) == 0:
588
+ return None
589
+ order_id = min(self.active_limit_orders.values(), key=lambda x: (abs(x.limit_price - price), abs(x.quantity - size))).order_id
590
+ else:
591
+ # we select the order with the quantity near to the quantity generated
592
+ order_id = min(orders_with_same_price, key=lambda x: abs(x.quantity - size)).order_id
593
+ self.diff_cancel_order_placed += 1
594
+
595
+ elif order_type == 4:
596
+ self.diff_market_order_placed += 1
597
+ if direction == 1:
598
+ price = self.lob_snapshots[-1][0]
599
+ else:
600
+ price = self.lob_snapshots[-1][2]
601
+ order_id = 0
602
+ # the diffusion gives in output market order and not execution of limit order,
603
+ # so we transform market orders in execution orders of the opposite side as the original message files
604
+ direction = -direction
605
+ self.count_diff_placed_orders += 1
606
+ return np.array([time, order_type, order_id, size, price, direction])
607
+
608
+
609
+ def _update_active_limit_orders(self):
610
+ asks = self.kernel.agents[0].order_books[self.symbol].asks
611
+ bids = self.kernel.agents[0].order_books[self.symbol].bids
612
+ self.active_limit_orders = {}
613
+ for level in asks:
614
+ for order in level:
615
+ self.active_limit_orders[order.order_id] = order
616
+ for level in bids:
617
+ for order in level:
618
+ self.active_limit_orders[order.order_id] = order
619
+
620
+
621
+ def _z_score_orderbook(self, orderbook):
622
+ orderbook[:, 0::2] = orderbook[:, 0::2] / 100
623
+ orderbook[:, 0::2] = (orderbook[:, 0::2] - self.normalization_terms["lob"][2]) / self.normalization_terms["lob"][3]
624
+ orderbook[:, 1::2] = (orderbook[:, 1::2] - self.normalization_terms["lob"][0]) / self.normalization_terms["lob"][1]
625
+ return orderbook
626
+
627
+
628
+ def _preprocess_orders_for_diff_cond(self, orders, lob_snapshots):
629
+ COLUMNS_NAMES = {"orderbook": ["sell1", "vsell1", "buy1", "vbuy1",
630
+ "sell2", "vsell2", "buy2", "vbuy2",
631
+ "sell3", "vsell3", "buy3", "vbuy3",
632
+ "sell4", "vsell4", "buy4", "vbuy4",
633
+ "sell5", "vsell5", "buy5", "vbuy5",
634
+ "sell6", "vsell6", "buy6", "vbuy6",
635
+ "sell7", "vsell7", "buy7", "vbuy7",
636
+ "sell8", "vsell8", "buy8", "vbuy8",
637
+ "sell9", "vsell9", "buy9", "vbuy9",
638
+ "sell10", "vsell10", "buy10", "vbuy10"],
639
+ "message": ["time", "event_type", "order_id", "size", "price", "direction"]}
640
+ orders_dataframe = pd.DataFrame(orders, columns=COLUMNS_NAMES["message"])
641
+ lob_dataframe = pd.DataFrame(lob_snapshots, columns=COLUMNS_NAMES["orderbook"])
642
+
643
+ # we compute the depth of the orders with respect to the orderbook
644
+ orders_dataframe["depth"] = 0
645
+ for j in range(0, orders_dataframe.shape[0]):
646
+ order_price = orders_dataframe["price"].iloc[j]
647
+ direction = orders_dataframe["direction"].iloc[j]
648
+ type = orders_dataframe["event_type"].iloc[j]
649
+ if type == 1:
650
+ index = j + 1
651
+ else:
652
+ index = j
653
+ if direction == 1:
654
+ bid_side = lob_dataframe.iloc[index, 2::4]
655
+ bid_price = bid_side[0]
656
+ depth = (bid_price - order_price) // 100
657
+ if depth < 0:
658
+ depth = 0
659
+ else:
660
+ ask_side = lob_dataframe.iloc[index, 0::4]
661
+ ask_price = ask_side[0]
662
+ depth = (order_price - ask_price) // 100
663
+ if depth < 0:
664
+ depth = 0
665
+ orders_dataframe.loc[j, "depth"] = depth
666
+
667
+ # if order type is 4, then we transform the execution of a sell limit order in a buy market order
668
+ orders_dataframe["direction"] = orders_dataframe["direction"] * orders_dataframe["event_type"].apply(
669
+ lambda x: -1 if x == 4 else 1)
670
+
671
+ # drop the order_id column
672
+ orders_dataframe = orders_dataframe.drop(columns=["order_id"])
673
+
674
+ # divide all the price, both of lob and messages, by 100
675
+ orders_dataframe["price"] = orders_dataframe["price"] / 100
676
+
677
+ # apply z score to orders
678
+ orders_dataframe, _, _, _, _, _, _, _, _ = normalize_messages(orders_dataframe,
679
+ mean_size=self.normalization_terms["event"][0],
680
+ mean_prices=self.normalization_terms["event"][2],
681
+ std_size=self.normalization_terms["event"][1],
682
+ std_prices=self.normalization_terms["event"][3],
683
+ mean_time=self.normalization_terms["event"][4],
684
+ std_time=self.normalization_terms["event"][5],
685
+ mean_depth=self.normalization_terms["event"][6],
686
+ std_depth=self.normalization_terms["event"][7]
687
+ )
688
+
689
+
690
+ return torch.from_numpy(orders_dataframe.to_numpy()).to(cst.DEVICE, torch.float32)
691
+
692
+
693
+ def _load_orders_lob(self, symbol, data_dir, date, date_trading_days):
694
+ path = "{}/{}/{}_{}_{}".format(
695
+ data_dir,
696
+ symbol,
697
+ symbol,
698
+ date_trading_days[0],
699
+ date_trading_days[1],
700
+ )
701
+ COLUMNS_NAMES = {"orderbook": ["sell1", "vsell1", "buy1", "vbuy1",
702
+ "sell2", "vsell2", "buy2", "vbuy2",
703
+ "sell3", "vsell3", "buy3", "vbuy3",
704
+ "sell4", "vsell4", "buy4", "vbuy4",
705
+ "sell5", "vsell5", "buy5", "vbuy5",
706
+ "sell6", "vsell6", "buy6", "vbuy6",
707
+ "sell7", "vsell7", "buy7", "vbuy7",
708
+ "sell8", "vsell8", "buy8", "vbuy8",
709
+ "sell9", "vsell9", "buy9", "vbuy9",
710
+ "sell10", "vsell10", "buy10", "vbuy10"],
711
+ "message": ["time", "event_type", "order_id", "size", "price", "direction"]}
712
+ for i, filename in enumerate(os.listdir(path)):
713
+ f = os.path.join(path, filename)
714
+ filename_splitted = filename.split('_')
715
+ file_date = filename_splitted[1]
716
+ if os.path.isfile(f) and file_date == date:
717
+ if filename_splitted[4] == "message":
718
+ events = pd.read_csv(f, header=None, names=COLUMNS_NAMES["message"])
719
+ elif filename_splitted[4] == "orderbook":
720
+ lob = pd.read_csv(f, header=None, names=COLUMNS_NAMES["orderbook"])
721
+ else:
722
+ raise ValueError("File name not recognized")
723
+
724
+ events, lob = self._preprocess_events_for_market_replay(events, lob)
725
+ # transform to numpy
726
+ lob = lob.to_numpy()
727
+ events = events.to_numpy()
728
+ return events, lob
729
+
730
+
731
+ def _preprocess_events_for_market_replay(self, events, lob):
732
+
733
+ # drop the rows with event_type = 5, 6, 7
734
+ indexes = events[events["event_type"].isin([5, 6, 7])].index
735
+ events = events.drop(indexes)
736
+ lob = lob.drop(indexes)
737
+
738
+ # do the difference of time row per row in messages and subsitute the values with the differences
739
+ first = events["time"].iloc[0]
740
+ events["time"] = events["time"].diff()
741
+ events["time"].iloc[0] = first - 34200
742
+
743
+ dataframes = reset_indexes([events, lob])
744
+ events = dataframes[0]
745
+ lob = dataframes[1]
746
+ # get the order ids of the rows with order_type=1
747
+ order_ids = events.loc[events['event_type'] == 1, 'order_id']
748
+
749
+ # filter out the rows that have order_type != 1 and have an order id that is not in order_ids
750
+ filtered_df = events.loc[((events['event_type'] != 1) & ~(events['order_id'].isin(order_ids)))].index
751
+
752
+ events = events.drop(filtered_df)
753
+ lob = lob.drop(filtered_df)
754
+ dataframes = reset_indexes([events, lob])
755
+ return dataframes[0], dataframes[1]
756
+
757
+
758
+ def _update_lob_snapshot(self, msg):
759
+ last_lob_snapshot = []
760
+ min_actual_lob_level = min(len(msg.body['asks']), len(msg.body['bids']))
761
+ # we take the first 10 levels of the lob and update the list of lob snapshots
762
+ # to use for the conditioning of the diffusion model
763
+ for i in range(0, 10):
764
+ if i < min_actual_lob_level:
765
+ last_lob_snapshot.append(msg.body['asks'][i][0])
766
+ last_lob_snapshot.append(msg.body['asks'][i][1])
767
+ last_lob_snapshot.append(msg.body['bids'][i][0])
768
+ last_lob_snapshot.append(msg.body['bids'][i][1])
769
+ #we need the else in case the actual lob has less than 10 levels
770
+ else:
771
+ if len(msg.body['asks']) > len(msg.body['bids']) and i < len(msg.body['asks']):
772
+ last_lob_snapshot.append(msg.body['asks'][i][0])
773
+ last_lob_snapshot.append(msg.body['asks'][i][1])
774
+ last_lob_snapshot.append(0)
775
+ last_lob_snapshot.append(0)
776
+ elif len(msg.body['bids']) > len(msg.body['asks']) and i < len(msg.body['bids']):
777
+ last_lob_snapshot.append(0)
778
+ last_lob_snapshot.append(0)
779
+ last_lob_snapshot.append(msg.body['bids'][i][0])
780
+ last_lob_snapshot.append(msg.body['bids'][i][1])
781
+ else:
782
+ for _ in range(4): last_lob_snapshot.append(0)
783
+ self.last_lob_snapshot = last_lob_snapshot
784
+ self.lob_snapshots.append(last_lob_snapshot)
785
+ self.sparse_lob_snapshots.append(to_sparse_representation(last_lob_snapshot, 100))
786
+
787
+
788
+ def _preprocess_market_features_for_cgan(self, lob_snapshots):
789
+ lob_snapshots = np.array(lob_snapshots)
790
+ COLUMNS_NAMES = {"orderbook": ["sell1", "vsell1", "buy1", "vbuy1",
791
+ "sell2", "vsell2", "buy2", "vbuy2",
792
+ "sell3", "vsell3", "buy3", "vbuy3",
793
+ "sell4", "vsell4", "buy4", "vbuy4",
794
+ "sell5", "vsell5", "buy5", "vbuy5",
795
+ "sell6", "vsell6", "buy6", "vbuy6",
796
+ "sell7", "vsell7", "buy7", "vbuy7",
797
+ "sell8", "vsell8", "buy8", "vbuy8",
798
+ "sell9", "vsell9", "buy9", "vbuy9",
799
+ "sell10", "vsell10", "buy10", "vbuy10"],
800
+ }
801
+ lob_dataframe = pd.DataFrame(lob_snapshots, columns=COLUMNS_NAMES["orderbook"])
802
+ orders = np.array(self.placed_orders[-self.seq_len*2 +1:])
803
+ orders_dataframe = pd.DataFrame(orders, columns=["time", "type", "order_id", "quantity", "price", "direction"])
804
+ dataframes = [[orders_dataframe, lob_dataframe]]
805
+ mean_spread = self.normalization_terms["lob"][0]
806
+ std_spread = self.normalization_terms["lob"][1]
807
+ mean_return = self.normalization_terms["lob"][2]
808
+ std_return = self.normalization_terms["lob"][3]
809
+ mean_vol_imb = self.normalization_terms["lob"][4]
810
+ std_vol_imb = self.normalization_terms["lob"][5]
811
+ mean_abs_vol = self.normalization_terms["lob"][6]
812
+ std_abs_vol = self.normalization_terms["lob"][7]
813
+ for i in range(len(dataframes)):
814
+ lob_sizes = dataframes[i][1].iloc[:, 1::2]
815
+ lob_prices = dataframes[i][1].iloc[:, 0::2]
816
+ dataframes[i][1]["volume_imbalance_1"] = lob_sizes.iloc[:, 1] / (lob_sizes.iloc[:, 1] + lob_sizes.iloc[:, 0])
817
+ dataframes[i][1]["volume_imbalance_5"] = (lob_sizes.iloc[:, 1] + lob_sizes.iloc[:, 3] + lob_sizes.iloc[:, 5] + lob_sizes.iloc[:, 7] + lob_sizes.iloc[:, 9]) / (lob_sizes.iloc[:, :10].sum(axis=1))
818
+ dataframes[i][1]["absolute_volume_1"] = lob_sizes.iloc[:, 1] + lob_sizes.iloc[:, 0]
819
+ dataframes[i][1]["absolute_volume_5"] = lob_sizes.iloc[:, :10].sum(axis=1)
820
+ dataframes[i][1]["spread"] = lob_prices.iloc[:, 0] - lob_prices.iloc[:, 1]
821
+
822
+ for i in range(len(dataframes)):
823
+ order_sign_imbalance_256 = pd.Series(0, index=dataframes[i][1].index)
824
+ order_sign_imbalance_128 = pd.Series(0, index=dataframes[i][1].index)
825
+ returns_50 = pd.Series(0, index=dataframes[i][1].index)
826
+ returns_1 = pd.Series(0, index=dataframes[i][1].index)
827
+ lob_prices = dataframes[i][1].iloc[:, 0::2]
828
+ mid_prices = (lob_prices.iloc[:, 0] + lob_prices.iloc[:, 1]) / 2
829
+ for j in range(len(dataframes[i][1])-256):
830
+ order_sign_imbalance_256.iloc[j] = dataframes[i][0]["direction"].iloc[j:j+256].sum()
831
+ order_sign_imbalance_128.iloc[j] = dataframes[i][0]["direction"].iloc[j+128:j+256].sum()
832
+ returns_1.iloc[j] = mid_prices[j+255] / mid_prices[j+254] - 1
833
+ returns_50.iloc[j] = mid_prices[j+255] / mid_prices[j+205] - 1
834
+ dataframes[i][1] = dataframes[i][1].iloc[255:]
835
+ dataframes[i][1].loc[:, "order_sign_imbalance_256"] = order_sign_imbalance_256.iloc[:-255] / 256
836
+ dataframes[i][1].loc[:, "order_sign_imbalance_128"] = order_sign_imbalance_128.iloc[:-255] / 128
837
+ dataframes[i][1].loc[:, "returns_1"] = returns_1.iloc[:-255]
838
+ dataframes[i][1].loc[:, "returns_50"] = returns_50.iloc[:-255]
839
+ dataframes[i][1] = dataframes[i][1][["volume_imbalance_1", "volume_imbalance_5", "absolute_volume_1", "absolute_volume_5", "spread", "order_sign_imbalance_256", "order_sign_imbalance_128", "returns_1", "returns_50"]]
840
+
841
+ for i in range(len(dataframes)):
842
+ dataframes[i][1] = dataframes[i][1].reset_index(drop=True)
843
+
844
+ for i in range(len(dataframes)):
845
+ #transform nan values in 0
846
+ dataframes[i][1] = dataframes[i][1].fillna(0)
847
+
848
+ market_features = dataframes[0][1]
849
+ market_features["returns_1"] = (market_features["returns_1"] - mean_return) / std_return
850
+ market_features["returns_50"] = (market_features["returns_50"] - mean_return) / std_return
851
+ market_features["volume_imbalance_1"] = (market_features["volume_imbalance_1"] - mean_vol_imb) / std_vol_imb
852
+ market_features["volume_imbalance_5"] = (market_features["volume_imbalance_5"] - mean_vol_imb) / std_vol_imb
853
+ market_features["absolute_volume_1"] = (market_features["absolute_volume_1"] - mean_abs_vol) / std_abs_vol
854
+ market_features["absolute_volume_5"] = (market_features["absolute_volume_5"] - mean_abs_vol) / std_abs_vol
855
+ market_features["spread"] = (market_features["spread"] - mean_spread) / std_spread
856
+ market_features = market_features.to_numpy()
857
+ market_features = torch.from_numpy(market_features).to(cst.DEVICE, torch.float32)
858
+ market_features = market_features.unsqueeze(0)
859
+ return market_features
860
+
861
+
862
+
863
+
864
+
865
+
866
+
867
+
868
+
869
+
870
+
871
+
ABIDES/agent/ZeroIntelligenceAgent.py ADDED
@@ -0,0 +1,320 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from agent.TradingAgent import TradingAgent
2
+ from util.util import log_print
3
+
4
+ from math import sqrt
5
+ import numpy as np
6
+ import pandas as pd
7
+
8
+
9
+ class ZeroIntelligenceAgent(TradingAgent):
10
+
11
+ def __init__(self, id, name, type, symbol='IBM', starting_cash=100000, sigma_n=1000,
12
+ r_bar=100000, kappa=0.05, sigma_s=100000, q_max=10,
13
+ sigma_pv=5000000, R_min=0, R_max=250, eta=1.0,
14
+ lambda_a=0.005, log_orders=False, random_state=None):
15
+
16
+ # Base class init.
17
+ super().__init__(id, name, type, starting_cash=starting_cash, log_orders=log_orders, random_state=random_state)
18
+
19
+ # Store important parameters particular to the ZI agent.
20
+ self.symbol = symbol # symbol to trade
21
+ self.sigma_n = sigma_n # observation noise variance
22
+ self.r_bar = r_bar # true mean fundamental value
23
+ self.kappa = kappa # mean reversion parameter
24
+ self.sigma_s = sigma_s # shock variance
25
+ self.q_max = q_max # max unit holdings
26
+ self.sigma_pv = sigma_pv # private value variance
27
+ self.R_min = R_min # min requested surplus
28
+ self.R_max = R_max # max requested surplus
29
+ self.eta = eta # strategic threshold
30
+ self.lambda_a = lambda_a # mean arrival rate of ZI agents
31
+
32
+ # The agent uses this to track whether it has begun its strategy or is still
33
+ # handling pre-market tasks.
34
+ self.trading = False
35
+
36
+ # The agent begins in its "complete" state, not waiting for
37
+ # any special event or condition.
38
+ self.state = 'AWAITING_WAKEUP'
39
+
40
+ # The agent maintains two priors: r_t and sigma_t (value and error estimates).
41
+ self.r_t = r_bar
42
+ self.sigma_t = 0
43
+
44
+ # The agent must track its previous wake time, so it knows how many time
45
+ # units have passed.
46
+ self.prev_wake_time = None
47
+
48
+ # The agent has a private value for each incremental unit.
49
+ self.theta = [int(x) for x in sorted(
50
+ np.round(self.random_state.normal(loc=0, scale=sqrt(sigma_pv), size=(q_max * 2))).tolist(),
51
+ reverse=True)]
52
+
53
+ def kernelStarting(self, startTime):
54
+ # self.kernel is set in Agent.kernelInitializing()
55
+ # self.exchangeID is set in TradingAgent.kernelStarting()
56
+
57
+ super().kernelStarting(startTime)
58
+
59
+ self.oracle = self.kernel.oracle
60
+
61
+ def kernelStopping(self):
62
+ # Always call parent method to be safe.
63
+ super().kernelStopping()
64
+
65
+ # Print end of day valuation.
66
+ H = int(round(self.getHoldings(self.symbol), -2) / 100)
67
+ # May request real fundamental value from oracle as part of final cleanup/stats.
68
+ if self.symbol != 'ETF':
69
+ rT = self.oracle.observePrice(self.symbol, self.currentTime, sigma_n=0, random_state=self.random_state)
70
+ else:
71
+ portfolio_rT, rT = self.oracle.observePortfolioPrice(self.symbol, self.portfolio, self.currentTime,
72
+ sigma_n=0,
73
+ random_state=self.random_state)
74
+
75
+ # Start with surplus as private valuation of shares held.
76
+ if H > 0:
77
+ surplus = sum([self.theta[x + self.q_max - 1] for x in range(1, H + 1)])
78
+ elif H < 0:
79
+ surplus = -sum([self.theta[x + self.q_max - 1] for x in range(H + 1, 1)])
80
+ else:
81
+ surplus = 0
82
+
83
+ log_print("surplus init: {}", surplus)
84
+
85
+ # Add final (real) fundamental value times shares held.
86
+ surplus += rT * H
87
+
88
+ log_print("surplus after holdings: {}", surplus)
89
+
90
+ # Add ending cash value and subtract starting cash value.
91
+ surplus += self.holdings['CASH'] - self.starting_cash
92
+
93
+ self.logEvent('FINAL_VALUATION', surplus, True)
94
+
95
+ log_print(
96
+ "{} final report. Holdings {}, end cash {}, start cash {}, final fundamental {}, preferences {}, surplus {}",
97
+ self.name, H, self.holdings['CASH'], self.starting_cash, rT, self.theta, surplus)
98
+
99
+ def wakeup(self, currentTime):
100
+ # Parent class handles discovery of exchange times and market_open wakeup call.
101
+ super().wakeup(currentTime)
102
+
103
+ self.state = 'INACTIVE'
104
+
105
+ if not self.mkt_open or not self.mkt_close:
106
+ # TradingAgent handles discovery of exchange times.
107
+ return
108
+ else:
109
+ if not self.trading:
110
+ self.trading = True
111
+
112
+ # Time to start trading!
113
+ log_print("{} is ready to start trading now.", self.name)
114
+
115
+ # Steady state wakeup behavior starts here.
116
+
117
+ # If we've been told the market has closed for the day, we will only request
118
+ # final price information, then stop.
119
+ if self.mkt_closed and (self.symbol in self.daily_close_price):
120
+ # Market is closed and we already got the daily close price.
121
+ return
122
+
123
+ # Schedule a wakeup for the next time this agent should arrive at the market
124
+ # (following the conclusion of its current activity cycle).
125
+ # We do this early in case some of our expected message responses don't arrive.
126
+
127
+ # Agents should arrive according to a Poisson process. This is equivalent to
128
+ # each agent independently sampling its next arrival time from an exponential
129
+ # distribution in alternate Beta formation with Beta = 1 / lambda, where lambda
130
+ # is the mean arrival rate of the Poisson process.
131
+ delta_time = self.random_state.exponential(scale=1.0 / self.lambda_a)
132
+ self.setWakeup(currentTime + pd.Timedelta('{}ns'.format(int(round(delta_time)))))
133
+
134
+ # If the market has closed and we haven't obtained the daily close price yet,
135
+ # do that before we cease activity for the day. Don't do any other behavior
136
+ # after market close.
137
+ if self.mkt_closed and (not self.symbol in self.daily_close_price):
138
+ self.getCurrentSpread(self.symbol)
139
+ self.state = 'AWAITING_SPREAD'
140
+ return
141
+
142
+ # Issue cancel requests for any open orders. Don't wait for confirmation, as presently
143
+ # the only reason it could fail is that the order already executed. (But requests won't
144
+ # be generated for those, anyway, unless something strange has happened.)
145
+ self.cancelOrders()
146
+
147
+ # The ZI agent doesn't try to maintain a zero position, so there is no need to exit positions
148
+ # as some "active trading" agents might. It might exit a position based on its order logic,
149
+ # but this will be as a natural consequence of its beliefs.
150
+
151
+ # In order to use the "strategic threshold" parameter (eta), the ZI agent needs the current
152
+ # spread (inside bid/ask quote). It would not otherwise need any trade/quote information.
153
+
154
+ # If the calling agent is a subclass, don't initiate the strategy section of wakeup(), as it
155
+ # may want to do something different.
156
+
157
+ if type(self) == ZeroIntelligenceAgent:
158
+ self.getCurrentSpread(self.symbol)
159
+ self.state = 'AWAITING_SPREAD'
160
+ else:
161
+ self.state = 'ACTIVE'
162
+
163
+ def updateEstimates(self):
164
+ # Called by a background agent that wishes to obtain a new fundamental observation,
165
+ # update its internal estimation parameters, and compute a new total valuation for the
166
+ # action it is considering.
167
+
168
+ # The agent obtains a new noisy observation of the current fundamental value
169
+ # and uses this to update its internal estimates in a Bayesian manner.
170
+ obs_t = self.oracle.observePrice(self.symbol, self.currentTime, sigma_n=self.sigma_n,
171
+ random_state=self.random_state)
172
+
173
+ log_print("{} observed {} at {}", self.name, obs_t, self.currentTime)
174
+
175
+ # Flip a coin to decide if we will buy or sell a unit at this time.
176
+ q = int(self.getHoldings(self.symbol) / 100) # q now represents an index to how many 100 lots are held
177
+
178
+ if q >= self.q_max:
179
+ buy = False
180
+ log_print("Long holdings limit: agent will SELL")
181
+ elif q <= -self.q_max:
182
+ buy = True
183
+ log_print("Short holdings limit: agent will BUY")
184
+ else:
185
+ buy = bool(self.random_state.randint(0, 2))
186
+ log_print("Coin flip: agent will {}", "BUY" if buy else "SELL")
187
+
188
+ # Update internal estimates of the current fundamental value and our error of same.
189
+
190
+ # If this is our first estimate, treat the previous wake time as "market open".
191
+ if self.prev_wake_time is None: self.prev_wake_time = self.mkt_open
192
+
193
+ # First, obtain an intermediate estimate of the fundamental value by advancing
194
+ # time from the previous wake time to the current time, performing mean
195
+ # reversion at each time step.
196
+
197
+ # delta must be integer time steps since last wake
198
+ delta = (self.currentTime - self.prev_wake_time) / np.timedelta64(1, 'ns')
199
+
200
+ # Update r estimate for time advancement.
201
+ r_tprime = (1 - (1 - self.kappa) ** delta) * self.r_bar
202
+ r_tprime += ((1 - self.kappa) ** delta) * self.r_t
203
+
204
+ # Update sigma estimate for time advancement.
205
+ sigma_tprime = ((1 - self.kappa) ** (2 * delta)) * self.sigma_t
206
+ sigma_tprime += ((1 - (1 - self.kappa) ** (2 * delta)) / (1 - (1 - self.kappa) ** 2)) * self.sigma_s
207
+
208
+ # Apply the new observation, with "confidence" in the observation inversely proportional
209
+ # to the observation noise, and "confidence" in the previous estimate inversely proportional
210
+ # to the shock variance.
211
+ self.r_t = (self.sigma_n / (self.sigma_n + sigma_tprime)) * r_tprime
212
+ self.r_t += (sigma_tprime / (self.sigma_n + sigma_tprime)) * obs_t
213
+
214
+ self.sigma_t = (self.sigma_n * self.sigma_t) / (self.sigma_n + self.sigma_t)
215
+
216
+ # Now having a best estimate of the fundamental at time t, we can make our best estimate
217
+ # of the final fundamental (for time T) as of current time t. Delta is now the number
218
+ # of time steps remaining until the simulated exchange closes.
219
+ delta = max(0, (self.mkt_close - self.currentTime) / np.timedelta64(1, 'ns'))
220
+
221
+ # IDEA: instead of letting agent "imagine time forward" to the end of the day,
222
+ # impose a maximum forward delta, like ten minutes or so. This could make
223
+ # them think more like traders and less like long-term investors. Add
224
+ # this line of code (keeping the max() line above) to try it.
225
+ # delta = min(delta, 1000000000 * 60 * 10)
226
+
227
+ r_T = (1 - (1 - self.kappa) ** delta) * self.r_bar
228
+ r_T += ((1 - self.kappa) ** delta) * self.r_t
229
+
230
+ # Our final fundamental estimate should be quantized to whole units of value.
231
+ r_T = int(round(r_T))
232
+
233
+ # Finally (for the final fundamental estimation section) remember the current
234
+ # time as the previous wake time.
235
+ self.prev_wake_time = self.currentTime
236
+
237
+ log_print("{} estimates r_T = {} as of {}", self.name, r_T, self.currentTime)
238
+
239
+ # Determine the agent's total valuation.
240
+ q += (self.q_max - 1)
241
+ theta = self.theta[q + 1 if buy else q]
242
+ v = r_T + theta
243
+
244
+ log_print("{} total unit valuation is {} (theta = {})", self.name, v, theta)
245
+
246
+ # Return values needed to implement strategy and select limit price.
247
+ return v, buy
248
+
249
+ def placeOrder(self):
250
+ # Called when it is time for the agent to determine a limit price and place an order.
251
+ # updateEstimates() returns the agent's current total valuation for the share it
252
+ # is considering to trade and whether it will buy or sell that share.
253
+ v, buy = self.updateEstimates()
254
+
255
+ # Select a requested surplus for this trade.
256
+ R = self.random_state.randint(self.R_min, self.R_max + 1)
257
+
258
+ # Determine the limit price.
259
+ p = v - R if buy else v + R
260
+
261
+ # Either place the constructed order, or if the agent could secure (eta * R) surplus
262
+ # immediately by taking the inside bid/ask, do that instead.
263
+ bid, bid_vol, ask, ask_vol = self.getKnownBidAsk(self.symbol)
264
+ if buy and ask_vol > 0:
265
+ R_ask = v - ask
266
+ if R_ask >= (self.eta * R):
267
+ log_print("{} desired R = {}, but took R = {} at ask = {} due to eta", self.name, R, R_ask, ask)
268
+ p = ask
269
+ else:
270
+ log_print("{} demands R = {}, limit price {}", self.name, R, p)
271
+ elif (not buy) and bid_vol > 0:
272
+ R_bid = bid - v
273
+ if R_bid >= (self.eta * R):
274
+ log_print("{} desired R = {}, but took R = {} at bid = {} due to eta", self.name, R, R_bid, bid)
275
+ p = bid
276
+ else:
277
+ log_print("{} demands R = {}, limit price {}", self.name, R, p)
278
+
279
+ # Place the order.
280
+ size = 100
281
+ self.placeLimitOrder(self.symbol, size, buy, p)
282
+
283
+ def receiveMessage(self, currentTime, msg):
284
+ # Parent class schedules market open wakeup call once market open/close times are known.
285
+ super().receiveMessage(currentTime, msg)
286
+
287
+ # We have been awakened by something other than our scheduled wakeup.
288
+ # If our internal state indicates we were waiting for a particular event,
289
+ # check if we can transition to a new state.
290
+
291
+ if self.state == 'AWAITING_SPREAD':
292
+ # We were waiting to receive the current spread/book. Since we don't currently
293
+ # track timestamps on retained information, we rely on actually seeing a
294
+ # QUERY_SPREAD response message.
295
+
296
+ if msg.body['msg'] == 'QUERY_SPREAD':
297
+ # This is what we were waiting for.
298
+
299
+ # But if the market is now closed, don't advance to placing orders.
300
+ if self.mkt_closed: return
301
+
302
+ # We now have the information needed to place a limit order with the eta
303
+ # strategic threshold parameter.
304
+ self.placeOrder()
305
+ self.state = 'AWAITING_WAKEUP'
306
+
307
+ # Internal state and logic specific to this agent subclass.
308
+
309
+ # Cancel all open orders.
310
+ # Return value: did we issue any cancellation requests?
311
+ def cancelOrders(self):
312
+ if not self.orders: return False
313
+
314
+ for id, order in self.orders.items():
315
+ self.cancelOrder(order)
316
+
317
+ return True
318
+
319
+ def getWakeFrequency(self):
320
+ return pd.Timedelta(self.random_state.randint(low=0, high=100), unit='ns')
ABIDES/agent/__init__.py ADDED
File without changes
ABIDES/agent/etf/EtfArbAgent.py ADDED
@@ -0,0 +1,197 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from agent.TradingAgent import TradingAgent
2
+ from util.util import log_print
3
+
4
+ import numpy as np
5
+ import pandas as pd
6
+
7
+
8
+ class EtfArbAgent(TradingAgent):
9
+
10
+ def __init__(self, id, name, type, portfolio = {}, gamma = 0, starting_cash=100000, lambda_a = 0.005,
11
+ log_orders = False, random_state = None):
12
+
13
+ # Base class init.
14
+ super().__init__(id, name, type, starting_cash=starting_cash, log_orders=log_orders, random_state = random_state)
15
+
16
+ # Store important parameters particular to the ETF arbitrage agent.
17
+ self.inPrime = False # Determines if the agent also participates in the Primary ETF market
18
+ self.portfolio = portfolio # ETF portfolio
19
+ self.gamma = gamma # Threshold for difference between ETF and index to trade
20
+ self.messageCount = len(self.portfolio) + 1 # Tracks the number of messages sent so all limit orders are sent after
21
+ # are sent after all mid prices are calculated
22
+ #self.q_max = q_max # max unit holdings
23
+ self.lambda_a = lambda_a # mean arrival rate of ETF arb agents (eventually change to a subscription)
24
+
25
+ # NEED TO TEST IF THERE ARE SYMBOLS IN PORTFOLIO
26
+
27
+ # The agent uses this to track whether it has begun its strategy or is still
28
+ # handling pre-market tasks.
29
+ self.trading = False
30
+
31
+ # The agent begins in its "complete" state, not waiting for
32
+ # any special event or condition.
33
+ self.state = 'AWAITING_WAKEUP'
34
+
35
+ def kernelStarting(self, startTime):
36
+ # self.kernel is set in Agent.kernelInitializing()
37
+ # self.exchangeID is set in TradingAgent.kernelStarting()
38
+
39
+ super().kernelStarting(startTime)
40
+
41
+ self.oracle = self.kernel.oracle
42
+
43
+
44
+ def kernelStopping (self):
45
+ # Always call parent method to be safe.
46
+ super().kernelStopping()
47
+
48
+ # Print end of day valuation.
49
+ H = {}
50
+ H['ETF'] = self.getHoldings('ETF')
51
+ for i,s in enumerate(self.portfolio):
52
+ H[s] = self.getHoldings(s)
53
+ print(H)
54
+ print(self.daily_close_price)
55
+
56
+
57
+ def wakeup (self, currentTime):
58
+ # Parent class handles discovery of exchange times and market_open wakeup call.
59
+ super().wakeup(currentTime)
60
+
61
+ self.state = 'INACTIVE'
62
+
63
+ if not self.mkt_open or not self.mkt_close:
64
+ return
65
+ else:
66
+ if not self.trading:
67
+ self.trading = True
68
+ # Time to start trading!
69
+ log_print ("{} is ready to start trading now.", self.name)
70
+
71
+
72
+ # Steady state wakeup behavior starts here.
73
+ # If we've been told the market has closed for the day, we will only request
74
+ # final price information, then stop.
75
+ # If the market has closed and we haven't obtained the daily close price yet,
76
+ # do that before we cease activity for the day. Don't do any other behavior
77
+ # after market close.
78
+ # If the calling agent is a subclass, don't initiate the strategy section of wakeup(), as it
79
+ # may want to do something different.
80
+ if self.mkt_closed and not self.inPrime:
81
+ for i,s in enumerate(self.portfolio):
82
+ if s not in self.daily_close_price:
83
+ self.getLastTrade(s)
84
+ self.state = 'AWAITING_LAST_TRADE'
85
+ if 'ETF' not in self.daily_close_price:
86
+ self.getLastTrade('ETF')
87
+ self.state = 'AWAITING_LAST_TRADE'
88
+ return
89
+
90
+ # Schedule a wakeup for the next time this agent should arrive at the market
91
+ # (following the conclusion of its current activity cycle).
92
+ # We do this early in case some of our expected message responses don't arrive.
93
+
94
+ # Agents should arrive according to a Poisson process. This is equivalent to
95
+ # each agent independently sampling its next arrival time from an exponential
96
+ # distribution in alternate Beta formation with Beta = 1 / lambda, where lambda
97
+ # is the mean arrival rate of the Poisson process.
98
+ elif not self.inPrime:
99
+ delta_time = self.random_state.exponential(scale = 1.0 / self.lambda_a)
100
+ self.setWakeup(currentTime + pd.Timedelta('{}ns'.format(int(round(delta_time)))))
101
+
102
+ # Issue cancel requests for any open orders. Don't wait for confirmation, as presently
103
+ # the only reason it could fail is that the order already executed. (But requests won't
104
+ # be generated for those, anyway, unless something strange has happened.)
105
+ self.cancelOrders()
106
+
107
+
108
+ # The ETF arb agent DOES try to maintain a zero position, so there IS need to exit positions
109
+ # as some "active trading" agents might. It might exit a position based on its order logic,
110
+ # but this will be as a natural consequence of its beliefs... but it submits marketable orders so...
111
+ for i,s in enumerate(self.portfolio):
112
+ self.getCurrentSpread(s)
113
+ self.getCurrentSpread('ETF')
114
+ self.state = 'AWAITING_SPREAD'
115
+
116
+ else:
117
+ self.state = 'ACTIVE'
118
+
119
+ def getPriceEstimates(self):
120
+ index_mids = np.empty(len(self.portfolio))
121
+ index_p = {}
122
+ empty_mid = False
123
+ for i,s in enumerate(self.portfolio):
124
+ bid, bid_vol, ask, ask_vol = self.getKnownBidAsk(s)
125
+ if bid != None and ask != None:
126
+ index_p[s] = {'bid': bid, 'ask': ask}
127
+ mid = 0.5 * (int(bid) + int(ask))
128
+ else:
129
+ mid = float()
130
+ index_p[s] = {'bid': float(), 'ask': float()}
131
+ empty_mid = True
132
+ index_mids[i] = mid
133
+ bid, bid_vol, ask, ask_vol = self.getKnownBidAsk('ETF')
134
+ etf_p = {'bid': bid, 'ask': ask}
135
+ if bid != None and ask != None:
136
+ etf_mid = 0.5 * (int(bid) + int(ask))
137
+ else:
138
+ etf_mid = float()
139
+ empty_mid = True
140
+ index_mid = np.sum(index_mids)
141
+ return etf_mid, index_mid, etf_p, index_p, empty_mid
142
+
143
+ def placeOrder(self):
144
+ etf_mid, index_mid, etf_p, index_p, empty_mid = self.getPriceEstimates()
145
+ if empty_mid:
146
+ #print('no move because index or ETF was missing part of NBBO')
147
+ pass
148
+ elif (index_mid - etf_mid) > self.gamma:
149
+ self.placeLimitOrder('ETF', 1, True, etf_p['ask'])
150
+ elif (etf_mid - index_mid) > self.gamma:
151
+ self.placeLimitOrder('ETF', 1, False, etf_p['bid'])
152
+ else:
153
+ pass
154
+ #print('no move because abs(index - ETF mid) < gamma')
155
+
156
+ def receiveMessage(self, currentTime, msg):
157
+ # Parent class schedules market open wakeup call once market open/close times are known.
158
+ super().receiveMessage(currentTime, msg)
159
+
160
+ # We have been awakened by something other than our scheduled wakeup.
161
+ # If our internal state indicates we were waiting for a particular event,
162
+ # check if we can transition to a new state.
163
+
164
+ if self.state == 'AWAITING_SPREAD':
165
+ # We were waiting to receive the current spread/book. Since we don't currently
166
+ # track timestamps on retained information, we rely on actually seeing a
167
+ # QUERY_SPREAD response message.
168
+
169
+ if msg.body['msg'] == 'QUERY_SPREAD':
170
+ # This is what we were waiting for.
171
+ self.messageCount -= 1
172
+
173
+ # But if the market is now closed, don't advance to placing orders.
174
+ if self.mkt_closed: return
175
+
176
+ # We now have the information needed to place a limit order with the eta
177
+ # strategic threshold parameter.
178
+ if self.messageCount == 0:
179
+ self.placeOrder()
180
+ self.messageCount = len(self.portfolio) + 1
181
+ self.state = 'AWAITING_WAKEUP'
182
+
183
+
184
+ # Internal state and logic specific to this agent subclass.
185
+
186
+ # Cancel all open orders.
187
+ # Return value: did we issue any cancellation requests?
188
+ def cancelOrders (self):
189
+ if not self.orders: return False
190
+
191
+ for id, order in self.orders.items():
192
+ self.cancelOrder(order)
193
+
194
+ return True
195
+
196
+ def getWakeFrequency (self):
197
+ return pd.Timedelta(self.random_state.randint(low = 0, high = 100), unit='ns')
ABIDES/agent/etf/EtfMarketMakerAgent.py ADDED
@@ -0,0 +1,241 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from agent.etf.EtfArbAgent import EtfArbAgent
2
+ from agent.etf.EtfPrimaryAgent import EtfPrimaryAgent
3
+ from message.Message import Message
4
+ from util.order.etf.BasketOrder import BasketOrder
5
+ from util.order.BasketOrder import BasketOrder
6
+ from util.util import log_print
7
+
8
+ import pandas as pd
9
+ import sys
10
+
11
+ class EtfMarketMakerAgent(EtfArbAgent):
12
+
13
+ def __init__(self, id, name, type, portfolio = {}, gamma = 0, starting_cash=100000, lambda_a = 0.005,
14
+ log_orders = False, random_state = None):
15
+
16
+ # Base class init.
17
+ super().__init__(id, name, type, portfolio = portfolio, gamma = gamma, starting_cash=starting_cash,
18
+ lambda_a = lambda_a, log_orders=log_orders, random_state = random_state)
19
+
20
+ # NEED TO TEST IF THERE ARE SYMBOLS IN PORTFOLIO
21
+
22
+ # Store important parameters particular to the ETF arbitrage agent.
23
+ self.inPrime = True # Determines if the agent also participates in the Primary ETF market
24
+
25
+ # We don't yet know when the primary opens or closes.
26
+ self.prime_open = None
27
+ self.prime_close = None
28
+
29
+ # Remember whether we have already passed the primary close time, as far
30
+ # as we know.
31
+ self.prime_closed = False
32
+ self.switched_mkt = False
33
+
34
+ def kernelStarting(self, startTime):
35
+ # self.kernel is set in Agent.kernelInitializing()
36
+ # self.exchangeID is set in TradingAgent.kernelStarting()
37
+
38
+ self.primeID = self.kernel.findAgentByType(EtfPrimaryAgent)
39
+
40
+ log_print ("Agent {} requested agent of type Agent.EtfPrimaryAgent. Given Agent ID: {}",
41
+ self.id, self.primeID)
42
+
43
+ super().kernelStarting(startTime)
44
+
45
+
46
+ def wakeup (self, currentTime):
47
+ # Parent class handles discovery of exchange times and market_open wakeup call.
48
+ super().wakeup(currentTime)
49
+
50
+ # Only if the superclass leaves the state as ACTIVE should we proceed with our
51
+ # trading strategy.
52
+ if self.state != 'ACTIVE': return
53
+
54
+ if not self.prime_open:
55
+ # Ask our primary when it opens and closes, exchange is handled in TradingAgent
56
+ self.sendMessage(self.primeID, Message({ "msg" : "WHEN_PRIME_OPEN", "sender": self.id }))
57
+ self.sendMessage(self.primeID, Message({ "msg" : "WHEN_PRIME_CLOSE", "sender": self.id }))
58
+
59
+
60
+ # Steady state wakeup behavior starts here.
61
+ if not self.mkt_closed and self.prime_closed:
62
+ print('The prime closed before the exchange')
63
+ sys.exit()
64
+
65
+ elif self.mkt_closed and self.prime_closed:
66
+ return
67
+
68
+ # If we've been told the market has closed for the day, we will only request
69
+ # final price information, then stop.
70
+ # If the market has closed and we haven't obtained the daily close price yet,
71
+ # do that before we cease activity for the day. Don't do any other behavior
72
+ # after market close.
73
+ elif self.mkt_closed and not self.prime_closed:
74
+ if self.switched_mkt and self.currentTime >= self.prime_open:
75
+ self.getEtfNav()
76
+ self.state = 'AWAITING_NAV'
77
+ elif not self.switched_mkt:
78
+ for i,s in enumerate(self.portfolio):
79
+ if s not in self.daily_close_price:
80
+ self.getLastTrade(s)
81
+ self.state = 'AWAITING_LAST_TRADE'
82
+ if 'ETF' not in self.daily_close_price:
83
+ self.getLastTrade('ETF')
84
+ self.state = 'AWAITING_LAST_TRADE'
85
+
86
+ print('holdings before primary: ' + str(self.holdings))
87
+
88
+ self.setWakeup(self.prime_open)
89
+ self.switched_mkt = True
90
+ else:
91
+ self.setWakeup(self.prime_open)
92
+ return
93
+
94
+ # Schedule a wakeup for the next time this agent should arrive at the market
95
+ # (following the conclusion of its current activity cycle).
96
+ # We do this early in case some of our expected message responses don't arrive.
97
+
98
+ # Agents should arrive according to a Poisson process. This is equivalent to
99
+ # each agent independently sampling its next arrival time from an exponential
100
+ # distribution in alternate Beta formation with Beta = 1 / lambda, where lambda
101
+ # is the mean arrival rate of the Poisson process.
102
+ else:
103
+ delta_time = self.random_state.exponential(scale = 1.0 / self.lambda_a)
104
+ self.setWakeup(currentTime + pd.Timedelta('{}ns'.format(int(round(delta_time)))))
105
+
106
+ # Issue cancel requests for any open orders. Don't wait for confirmation, as presently
107
+ # the only reason it could fail is that the order already executed. (But requests won't
108
+ # be generated for those, anyway, unless something strange has happened.)
109
+ self.cancelOrders()
110
+
111
+
112
+ # The ETF arb agent DOES try to maintain a zero position, so there IS need to exit positions
113
+ # as some "active trading" agents might. It might exit a position based on its order logic,
114
+ # but this will be as a natural consequence of its beliefs... but it submits marketable orders so...
115
+
116
+
117
+ # If the calling agent is a subclass, don't initiate the strategy section of wakeup(), as it
118
+ # may want to do something different.
119
+ # FIGURE OUT WHAT TO DO WITH MULTIPLE SPREADS...
120
+ for i,s in enumerate(self.portfolio):
121
+ self.getCurrentSpread(s)
122
+ self.getCurrentSpread('ETF')
123
+ self.state = 'AWAITING_SPREAD'
124
+
125
+ def placeOrder(self):
126
+ etf_mid, index_mid, etf_p, index_p, empty_mid = self.getPriceEstimates()
127
+ if empty_mid:
128
+ #print('no move because index or ETF was missing part of NBBO')
129
+ pass
130
+ elif (index_mid - etf_mid) > self.gamma:
131
+ #print('buy ETF')
132
+ for i,s in enumerate(self.portfolio):
133
+ self.placeLimitOrder(s, 1, False, index_p[s]['bid'])
134
+ self.placeLimitOrder('ETF', 1, True, etf_p['ask'])
135
+ elif (etf_mid - index_mid) > self.gamma:
136
+ #print('sell ETF')
137
+ for i,s in enumerate(self.portfolio):
138
+ self.placeLimitOrder(s, 1, True, index_p[s]['ask'])
139
+ self.placeLimitOrder('ETF', 1, False, etf_p['bid'])
140
+ else:
141
+ pass
142
+ #print('no move because abs(index - ETF mid) < gamma')
143
+
144
+ def decideBasket(self):
145
+ print(self.portfolio)
146
+ index_est = 0
147
+ for i,s in enumerate(self.portfolio):
148
+ index_est += self.daily_close_price[s]
149
+
150
+ H = {}
151
+ for i,s in enumerate(self.portfolio):
152
+ H[s] = self.getHoldings(s)
153
+ etf_h = self.getHoldings('ETF')
154
+
155
+ self.nav_diff = self.nav - index_est
156
+ if self.nav_diff > 0:
157
+ if min(H.values()) > 0 and etf_h < 0:
158
+ print("send creation basket")
159
+ self.placeBasketOrder(min(H.values()), True)
160
+ else: print('wrong side for basket')
161
+ elif self.nav_diff < 0:
162
+ if etf_h > 0 and max(H.values()) < 0:
163
+ print("submit redemption basket")
164
+ self.placeBasketOrder(etf_h, False)
165
+ else: print('wrong side for basket')
166
+ else:
167
+ if min(H.values()) > 0 and etf_h < 0:
168
+ print("send creation basket")
169
+ self.placeBasketOrder(min(H.values()), True)
170
+ elif etf_h > 0 and max(H.values()) < 0:
171
+ print("submit redemption basket")
172
+ self.placeBasketOrder(etf_h, False)
173
+
174
+
175
+ def receiveMessage(self, currentTime, msg):
176
+ # Parent class schedules market open wakeup call once market open/close times are known.
177
+ super().receiveMessage(currentTime, msg)
178
+
179
+ # We have been awakened by something other than our scheduled wakeup.
180
+ # If our internal state indicates we were waiting for a particular event,
181
+ # check if we can transition to a new state.
182
+
183
+ # Record market open or close times.
184
+ if msg.body['msg'] == "WHEN_PRIME_OPEN":
185
+ self.prime_open = msg.body['data']
186
+
187
+ log_print ("Recorded primary open: {}", self.kernel.fmtTime(self.prime_open))
188
+
189
+ elif msg.body['msg'] == "WHEN_PRIME_CLOSE":
190
+ self.prime_close = msg.body['data']
191
+
192
+ log_print ("Recorded primary close: {}", self.kernel.fmtTime(self.prime_close))
193
+
194
+ if self.state == 'AWAITING_NAV':
195
+ if msg.body['msg'] == 'QUERY_NAV':
196
+ if msg.body['prime_closed']: self.prime_closed = True
197
+ self.queryEtfNav(msg.body['nav'])
198
+
199
+ # But if the market is now closed, don't advance to placing orders.
200
+ if self.prime_closed: return
201
+
202
+ # We now have the information needed to place a C/R basket.
203
+ self.decideBasket()
204
+
205
+ elif self.state == 'AWAITING_BASKET':
206
+ if msg.body['msg'] == 'BASKET_EXECUTED':
207
+ order = msg.body['order']
208
+ # We now have the information needed to place a C/R basket.
209
+ for i,s in enumerate(self.portfolio):
210
+ if order.is_buy_order: self.holdings[s] -= order.quantity
211
+ else: self.holdings[s] += order.quantity
212
+ if order.is_buy_order:
213
+ self.holdings['ETF'] += order.quantity
214
+ self.basket_size = order.quantity
215
+ else:
216
+ self.holdings['ETF'] -= order.quantity
217
+ self.basket_size = -1 * order.quantity
218
+
219
+ self.state = 'INACTIVE'
220
+
221
+
222
+ # Internal state and logic specific to this agent subclass.
223
+
224
+ # Used by any ETF Arb Agent subclass to query the Net Assest Value (NAV) of the ETF.
225
+ # This activity is not logged.
226
+ def getEtfNav (self):
227
+ self.sendMessage(self.primeID, Message({ "msg" : "QUERY_NAV", "sender": self.id }))
228
+
229
+ # Used by ETF Arb Agent subclass to place a basket order.
230
+ # This activity is not logged.
231
+ def placeBasketOrder (self, quantity, is_create_order):
232
+ order = BasketOrder(self.id, self.currentTime, 'ETF', quantity, is_create_order)
233
+ print('BASKET ORDER PLACED: ' + str(order))
234
+ self.sendMessage(self.primeID, Message({ "msg" : "BASKET_ORDER", "sender": self.id,
235
+ "order" : order }))
236
+ self.state = 'AWAITING_BASKET'
237
+
238
+ # Handles QUERY NAV messages from primary
239
+ def queryEtfNav(self, nav):
240
+ self.nav = nav
241
+ log_print ("Received NAV of ETF.")
ABIDES/agent/etf/EtfPrimaryAgent.py ADDED
@@ -0,0 +1,166 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # The ExchangeAgent expects a numeric agent id, printable name, agent type, timestamp to open and close trading,
2
+ # a list of equity symbols for which it should create order books, a frequency at which to archive snapshots
3
+ # of its order books, a pipeline delay (in ns) for order activity, the exchange computation delay (in ns),
4
+ # the levels of order stream history to maintain per symbol (maintains all orders that led to the last N trades),
5
+ # whether to log all order activity to the agent log, and a random state object (already seeded) to use
6
+ # for stochasticity.
7
+ from agent.FinancialAgent import FinancialAgent
8
+ from agent.ExchangeAgent import ExchangeAgent
9
+ from message.Message import Message
10
+ from util.util import log_print
11
+
12
+ import pandas as pd
13
+ pd.set_option('display.max_rows', 500)
14
+
15
+
16
+ class EtfPrimaryAgent(FinancialAgent):
17
+
18
+ def __init__(self, id, name, type, prime_open, prime_close, symbol, pipeline_delay = 40000,
19
+ computation_delay = 1, random_state = None):
20
+
21
+ super().__init__(id, name, type, random_state)
22
+
23
+ # Do not request repeated wakeup calls.
24
+ self.reschedule = False
25
+
26
+ # Store this exchange's open and close times.
27
+ self.prime_open = prime_open
28
+ self.prime_close = prime_close
29
+
30
+ self.mkt_close = None
31
+
32
+ self.nav = 0
33
+ self.create = 0
34
+ self.redeem = 0
35
+
36
+ self.symbol = symbol
37
+
38
+ # Right now, only the exchange agent has a parallel processing pipeline delay. This is an additional
39
+ # delay added only to order activity (placing orders, etc) and not simple inquiries (market operating
40
+ # hours, etc).
41
+ self.pipeline_delay = pipeline_delay
42
+
43
+ # Computation delay is applied on every wakeup call or message received.
44
+ self.computation_delay = computation_delay
45
+
46
+ def kernelStarting(self, startTime):
47
+ # Find an exchange with which we can place orders. It is guaranteed
48
+ # to exist by now (if there is one).
49
+ self.exchangeID = self.kernel.findAgentByType(ExchangeAgent)
50
+
51
+ log_print ("Agent {} requested agent of type Agent.ExchangeAgent. Given Agent ID: {}",
52
+ self.id, self.exchangeID)
53
+
54
+ # Request a wake-up call as in the base Agent.
55
+ super().kernelStarting(startTime)
56
+
57
+
58
+ def kernelStopping (self):
59
+ # Always call parent method to be safe.
60
+ super().kernelStopping()
61
+
62
+ print ("Final C/R baskets for {}: {} creation baskets. {} redemption baskets".format(self.name,
63
+ self.create, self.redeem))
64
+
65
+
66
+ # Simulation participation messages.
67
+
68
+ def wakeup (self, currentTime):
69
+ super().wakeup(currentTime)
70
+
71
+ if self.mkt_close is None:
72
+ # Ask our exchange when it opens and closes.
73
+ self.sendMessage(self.exchangeID, Message({ "msg" : "WHEN_MKT_CLOSE", "sender": self.id }))
74
+
75
+ else:
76
+ # Get close price of ETF/nav
77
+ self.getLastTrade(self.symbol)
78
+
79
+ def receiveMessage (self, currentTime, msg):
80
+ super().receiveMessage(currentTime, msg)
81
+
82
+ # Unless the intent of an experiment is to examine computational issues within an Exchange,
83
+ # it will typically have either 1 ns delay (near instant but cannot process multiple orders
84
+ # in the same atomic time unit) or 0 ns delay (can process any number of orders, always in
85
+ # the atomic time unit in which they are received). This is separate from, and additional
86
+ # to, any parallel pipeline delay imposed for order book activity.
87
+
88
+ # Note that computation delay MUST be updated before any calls to sendMessage.
89
+ self.setComputationDelay(self.computation_delay)
90
+
91
+ # Is the exchange closed? (This block only affects post-close, not pre-open.)
92
+ if currentTime > self.prime_close:
93
+ # Most messages after close will receive a 'PRIME_CLOSED' message in response.
94
+ log_print ("{} received {}, discarded: prime is closed.", self.name, msg.body['msg'])
95
+ self.sendMessage(msg.body['sender'], Message({ "msg": "PRIME_CLOSED" }))
96
+ # Don't do any further processing on these messages!
97
+ return
98
+
99
+
100
+ if msg.body['msg'] == "WHEN_MKT_CLOSE":
101
+ self.mkt_close = msg.body['data']
102
+ log_print ("Recorded market close: {}", self.kernel.fmtTime(self.mkt_close))
103
+ self.setWakeup(self.mkt_close)
104
+ return
105
+
106
+ elif msg.body['msg'] == 'QUERY_LAST_TRADE':
107
+ # Call the queryLastTrade method.
108
+ self.queryLastTrade(msg.body['symbol'], msg.body['data'])
109
+ return
110
+
111
+ self.logEvent(msg.body['msg'], msg.body['sender'])
112
+
113
+ # Handle all message types understood by this exchange.
114
+ if msg.body['msg'] == "WHEN_PRIME_OPEN":
115
+ log_print ("{} received WHEN_PRIME_OPEN request from agent {}", self.name, msg.body['sender'])
116
+
117
+ # The exchange is permitted to respond to requests for simple immutable data (like "what are your
118
+ # hours?") instantly. This does NOT include anything that queries mutable data, like equity
119
+ # quotes or trades.
120
+ self.setComputationDelay(0)
121
+
122
+ self.sendMessage(msg.body['sender'], Message({ "msg": "WHEN_PRIME_OPEN", "data": self.prime_open }))
123
+
124
+ elif msg.body['msg'] == "WHEN_PRIME_CLOSE":
125
+ log_print ("{} received WHEN_PRIME_CLOSE request from agent {}", self.name, msg.body['sender'])
126
+
127
+ # The exchange is permitted to respond to requests for simple immutable data (like "what are your
128
+ # hours?") instantly. This does NOT include anything that queries mutable data, like equity
129
+ # quotes or trades.
130
+ self.setComputationDelay(0)
131
+
132
+ self.sendMessage(msg.body['sender'], Message({ "msg": "WHEN_PRIME_CLOSE", "data": self.prime_close }))
133
+
134
+ elif msg.body['msg'] == "QUERY_NAV":
135
+ log_print ("{} received QUERY_NAV ({}) request from agent {}", self.name, msg.body['sender'])
136
+
137
+ # Return the NAV for the requested symbol.
138
+ self.sendMessage(msg.body['sender'], Message({ "msg": "QUERY_NAV",
139
+ "nav": self.nav, "prime_closed": True if currentTime > self.prime_close else False }))
140
+
141
+ elif msg.body['msg'] == "BASKET_ORDER":
142
+ order = msg.body['order']
143
+ log_print ("{} received BASKET_ORDER: {}", self.name, order)
144
+ if order.is_buy_order: self.create += 1
145
+ else: self.redeem += 1
146
+ order.fill_price = self.nav
147
+ self.sendMessage(msg.body['sender'], Message({ "msg": "BASKET_EXECUTED", "order": order}))
148
+
149
+
150
+ # Handles QUERY_LAST_TRADE messages from an exchange agent.
151
+ def queryLastTrade (self, symbol, price):
152
+ self.nav = price
153
+ log_print ("Received daily close price or nav of {} for {}.", price, symbol)
154
+
155
+ # Used by any Trading Agent subclass to query the last trade price for a symbol.
156
+ # This activity is not logged.
157
+ def getLastTrade (self, symbol):
158
+ self.sendMessage(self.exchangeID, Message({ "msg" : "QUERY_LAST_TRADE", "sender": self.id,
159
+ "symbol" : symbol }))
160
+
161
+ # Simple accessor methods for the market open and close times.
162
+ def getPrimeOpen(self):
163
+ return self.__prime_open
164
+
165
+ def getPrimeClose(self):
166
+ return self.__prime_close
ABIDES/agent/etf/__init__.py ADDED
File without changes
ABIDES/agent/examples/ExampleExperimentalAgent.py ADDED
@@ -0,0 +1,146 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from agent.examples.SubscriptionAgent import SubscriptionAgent
2
+ import pandas as pd
3
+ from copy import deepcopy
4
+
5
+
6
+ class ExampleExperimentalAgentTemplate(SubscriptionAgent):
7
+ """ Minimal working template for an experimental trading agent
8
+ """
9
+ def __init__(self, id, name, type, symbol, starting_cash, levels, subscription_freq, log_orders=False, random_state=None):
10
+ """ Constructor for ExampleExperimentalAgentTemplate.
11
+
12
+ :param id: Agent's ID as set in config
13
+ :param name: Agent's human-readable name as set in config
14
+ :param type: Agent's human-readable type as set in config, useful for grouping agents semantically
15
+ :param symbol: Name of asset being traded
16
+ :param starting_cash: Dollar amount of cash agent starts with.
17
+ :param levels: Number of levels of orderbook to subscribe to
18
+ :param subscription_freq: Frequency of orderbook updates subscribed to (in nanoseconds)
19
+ :param log_orders: bool to decide if agent's individual actions logged to file.
20
+ :param random_state: numpy RandomState object from which agent derives randomness
21
+ """
22
+ super().__init__(id, name, type, symbol, starting_cash, levels, subscription_freq, log_orders=log_orders, random_state=random_state)
23
+
24
+ self.current_bids = None # subscription to market data populates this list
25
+ self.current_asks = None # subscription to market data populates this list
26
+
27
+ def wakeup(self, currentTime):
28
+ """ Action to be taken by agent at each wakeup.
29
+
30
+ :param currentTime: pd.Timestamp for current simulation time
31
+ """
32
+ super().wakeup(currentTime)
33
+ self.setWakeup(currentTime + self.getWakeFrequency())
34
+
35
+ def receiveMessage(self, currentTime, msg):
36
+ """ Action taken when agent receives a message from the exchange
37
+
38
+ :param currentTime: pd.Timestamp for current simulation time
39
+ :param msg: message from exchange
40
+ :return:
41
+ """
42
+ super().receiveMessage(currentTime, msg) # receives subscription market data
43
+
44
+ def getWakeFrequency(self):
45
+ """ Set next wakeup time for agent. """
46
+ return pd.Timedelta("1min")
47
+
48
+ def placeLimitOrder(self, quantity, is_buy_order, limit_price):
49
+ """ Place a limit order at the exchange.
50
+ :param quantity (int): order quantity
51
+ :param is_buy_order (bool): True if Buy else False
52
+ :param limit_price: price level at which to place a limit order
53
+ :return:
54
+ """
55
+ super().placeLimitOrder(self.symbol, quantity, is_buy_order, limit_price)
56
+
57
+ def placeMarketOrder(self, quantity, is_buy_order):
58
+ """ Place a market order at the exchange.
59
+ :param quantity (int): order quantity
60
+ :param is_buy_order (bool): True if Buy else False
61
+ :return:
62
+ """
63
+ super().placeMarketOrder(self.symbol, quantity, is_buy_order)
64
+
65
+ def cancelAllOrders(self):
66
+ """ Cancels all resting limit orders placed by the experimental agent.
67
+ """
68
+ for _, order in self.orders.items():
69
+ self.cancelOrder(order)
70
+
71
+
72
+ class ExampleExperimentalAgent(ExampleExperimentalAgentTemplate):
73
+
74
+ def __init__(self, *args, wake_freq, order_size, short_window, long_window, **kwargs):
75
+ """
76
+ :param args: superclass args
77
+ :param wake_freq: Frequency of wakeup -- str to be parsed by pd.Timedelta
78
+ :param order_size: size of orders to place
79
+ :param short_window: length of mid price short moving average window -- str to be parsed by pd.Timedelta
80
+ :param long_window: length of mid price long moving average window -- str to be parsed by pd.Timedelta
81
+ :param kwargs: superclass kwargs
82
+ """
83
+ super().__init__(*args, **kwargs)
84
+ self.wake_freq = wake_freq
85
+ self.order_size = order_size
86
+ self.short_window = short_window
87
+ self.long_window = long_window
88
+ self.mid_price_history = pd.DataFrame(columns=['mid_price'], index=pd.to_datetime([]))
89
+
90
+ def getCurrentMidPrice(self):
91
+ """ Retrieve mid price from most recent subscription data.
92
+
93
+ :return:
94
+ """
95
+
96
+ try:
97
+ best_bid = self.current_bids[0][0]
98
+ best_ask = self.current_asks[0][0]
99
+ return round((best_ask + best_bid) / 2)
100
+ except (TypeError, IndexError):
101
+ return None
102
+
103
+ def receiveMessage(self, currentTime, msg):
104
+ """ Action taken when agent receives a message from the exchange -- action here is for agent to update internal
105
+ log of most recently observed mid-price.
106
+
107
+ :param currentTime: pd.Timestamp for current simulation time
108
+ :param msg: message from exchange
109
+ :return:
110
+ """
111
+ super().receiveMessage(currentTime, msg) # receives subscription market data
112
+ self.mid_price_history = self.mid_price_history.append(
113
+ pd.Series({'mid_price': self.getCurrentMidPrice()}, name=currentTime))
114
+ self.mid_price_history.dropna(inplace=True)
115
+
116
+ def computeMidPriceMovingAverages(self):
117
+ """ Returns the short-window and long-window moving averages of mid price.
118
+ :return:
119
+ """
120
+ try:
121
+ short_moving_avg = self.mid_price_history.rolling(self.short_window).mean().iloc[-1]['mid_price']
122
+ long_moving_avg = self.mid_price_history.rolling(self.long_window).mean().iloc[-1]['mid_price']
123
+ return short_moving_avg, long_moving_avg
124
+ except IndexError:
125
+ return None, None
126
+
127
+ def wakeup(self, currentTime):
128
+ """ Action to be taken by agent at each wakeup.
129
+
130
+ :param currentTime: pd.Timestamp for current simulation time
131
+ """
132
+ super().wakeup(currentTime)
133
+ short_moving_avg, long_moving_avg = self.computeMidPriceMovingAverages()
134
+ if short_moving_avg is not None and long_moving_avg is not None:
135
+ if short_moving_avg > long_moving_avg:
136
+ self.placeMarketOrder(self.order_size, 0)
137
+ elif short_moving_avg < long_moving_avg:
138
+ self.placeMarketOrder(self.order_size, 1)
139
+
140
+ def getWakeFrequency(self):
141
+ """ Set next wakeup time for agent. """
142
+ return pd.Timedelta(self.wake_freq)
143
+
144
+
145
+
146
+
ABIDES/agent/examples/ImpactAgent.py ADDED
@@ -0,0 +1,160 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from agent.TradingAgent import TradingAgent
2
+
3
+ import pandas as pd
4
+
5
+
6
+ class ImpactAgent(TradingAgent):
7
+
8
+ def __init__(self, id, name, type, symbol = None, starting_cash = None, greed = None, within = 0.01,
9
+ impact = True, impact_time = None, random_state = None):
10
+ # Base class init.
11
+ super().__init__(id, name, type, starting_cash = starting_cash, random_state = random_state)
12
+
13
+ self.symbol = symbol # symbol to trade
14
+ self.trading = False # ready to trade
15
+ self.traded = False # has made its one trade
16
+
17
+ # The amount of available "nearby" liquidity to consume when placing its order.
18
+ self.greed = greed # trade this proportion of liquidity
19
+ self.within = within # within this range of the inside price
20
+
21
+ # When should we make the impact trade?
22
+ self.impact_time = impact_time
23
+
24
+ # The agent begins in its "complete" state, not waiting for
25
+ # any special event or condition.
26
+ self.state = 'AWAITING_WAKEUP'
27
+
28
+ # Controls whether the impact trade is actually placed.
29
+ self.impact = impact
30
+
31
+
32
+ def wakeup (self, currentTime):
33
+ # Parent class handles discovery of exchange times and market_open wakeup call.
34
+ super().wakeup(currentTime)
35
+
36
+ if not self.mkt_open or not self.mkt_close:
37
+ # TradingAgent handles discovery of exchange times.
38
+ return
39
+ else:
40
+ if not self.trading:
41
+ self.trading = True
42
+
43
+ # Time to start trading!
44
+ print ("{} is ready to start trading now.".format(self.name))
45
+
46
+
47
+ # Steady state wakeup behavior starts here.
48
+
49
+ # First, see if we have received a MKT_CLOSED message for the day. If so,
50
+ # there's nothing to do except clean-up.
51
+ if self.mkt_closed and (self.symbol in self.daily_close_price):
52
+ # Market is closed and we already got the daily close price.
53
+ return
54
+
55
+
56
+ ### Impact agent operates at a specific time.
57
+ if currentTime < self.impact_time:
58
+ print ("Impact agent waiting for impact_time {}".format(self.impact_time))
59
+ self.setWakeup(self.impact_time)
60
+ return
61
+
62
+
63
+ ### The impact agent only trades once, but we will monitor prices for
64
+ ### the sake of performance.
65
+ self.setWakeup(currentTime + pd.Timedelta('30m'))
66
+
67
+
68
+ # If the market is closed and we haven't obtained the daily close price yet,
69
+ # do that before we cease activity for the day. Don't do any other behavior
70
+ # after market close.
71
+ #
72
+ # Also, if we already made our one trade, do nothing except monitor prices.
73
+ if self.traded or (self.mkt_closed and (not self.symbol in self.daily_close_price)):
74
+ self.getLastTrade()
75
+ self.state = 'AWAITING_LAST_TRADE'
76
+ return
77
+
78
+
79
+ # The impact agent will place one order based on the current spread.
80
+ self.getCurrentSpread()
81
+ self.state = 'AWAITING_SPREAD'
82
+
83
+
84
+ def receiveMessage (self, currentTime, msg):
85
+ # Parent class schedules market open wakeup call once market open/close times are known.
86
+ super().receiveMessage(currentTime, msg)
87
+
88
+ # We have been awakened by something other than our scheduled wakeup.
89
+ # If our internal state indicates we were waiting for a particular event,
90
+ # check if we can transition to a new state.
91
+
92
+ if self.state == 'AWAITING_SPREAD':
93
+ # We were waiting for current spread information to make our trade.
94
+ # If the message we just received is QUERY_SPREAD, that means we just got it.
95
+ if msg.body['msg'] == 'QUERY_SPREAD':
96
+ # Place our one trade.
97
+ bid, bid_vol, ask, ask_vol = self.getKnownBidAsk(self.symbol)
98
+ bid_liq, ask_liq = self.getKnownLiquidity(self.symbol, within=self.within)
99
+
100
+ # Buy order.
101
+ direction, shares, price = True, int(round(ask_liq * self.greed)), ask
102
+
103
+ # Sell order. This should be a parameter, but isn't yet.
104
+ #direction, shares, price = False, int(round(bid_liq * self.greed)), bid
105
+
106
+ # Compute the limit price we must offer to ensure our order executes immediately.
107
+ # This is essentially a workaround for the lack of true market orders in our
108
+ # current simulation.
109
+ price = self.computeRequiredPrice(direction, shares)
110
+
111
+ # Actually place the order only if self.impact is true.
112
+ if self.impact:
113
+ print ("Impact agent firing: {} {} @ {}".format('BUY' if direction else 'SELL', shares, self.dollarize(price)))
114
+ self.placeLimitOrder (self.symbol, shares, direction, price)
115
+ else:
116
+ print ("Impact agent would fire: {} {} @ {} (but self.impact = False)".format('BUY' if direction else 'SELL', shares, self.dollarize(price)))
117
+
118
+ self.traded = True
119
+ self.state = 'AWAITING_WAKEUP'
120
+
121
+
122
+ # Internal state and logic specific to this agent.
123
+
124
+ def placeLimitOrder (self, symbol, quantity, is_buy_order, limit_price):
125
+ super().placeLimitOrder(symbol, quantity, is_buy_order, limit_price, ignore_risk = True)
126
+
127
+ # Computes required limit price to immediately execute a trade for the specified quantity
128
+ # of shares.
129
+ def computeRequiredPrice (self, direction, shares):
130
+ book = self.known_asks[self.symbol] if direction else self.known_bids[self.symbol]
131
+
132
+ # Start at the inside and add up the shares.
133
+ t = 0
134
+
135
+ for i in range(len(book)):
136
+ p,v = book[i]
137
+ t += v
138
+
139
+ # If we have accumulated enough shares, return this price.
140
+ if t >= shares: return p
141
+
142
+ # Not enough shares. Just return worst price (highest ask, lowest bid).
143
+ return book[-1][0]
144
+
145
+
146
+ # Request the last trade price for our symbol.
147
+ def getLastTrade (self):
148
+ super().getLastTrade(self.symbol)
149
+
150
+
151
+ # Request the spread for our symbol.
152
+ def getCurrentSpread (self):
153
+ # Impact agent gets depth 10000 on each side (probably everything).
154
+ super().getCurrentSpread(self.symbol, 10000)
155
+
156
+
157
+ def getWakeFrequency (self):
158
+ return (pd.Timedelta('1ns'))
159
+
160
+
ABIDES/agent/examples/MarketReplayAgent.py ADDED
@@ -0,0 +1,125 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import pickle
2
+ import os.path
3
+ from datetime import datetime
4
+ import pandas as pd
5
+ #from joblib import Memory
6
+
7
+ from agent.TradingAgent import TradingAgent
8
+ from util.order.LimitOrder import LimitOrder
9
+ from util.util import log_print
10
+
11
+
12
+ class MarketReplayAgent(TradingAgent):
13
+
14
+ def __init__(self, id, name, type, symbol, date, start_time, end_time,
15
+ orders_file_path, processed_orders_folder_path,
16
+ starting_cash, log_orders=False, random_state=None):
17
+ super().__init__(id, name, type, starting_cash=starting_cash, log_orders=log_orders, random_state=random_state)
18
+ self.symbol = symbol
19
+ self.date = date
20
+ self.log_orders = log_orders
21
+ self.executed_trades = dict()
22
+ self.state = 'AWAITING_WAKEUP'
23
+
24
+ self.historical_orders = L3OrdersProcessor(self.symbol,
25
+ self.date, start_time, end_time,
26
+ orders_file_path, processed_orders_folder_path)
27
+ self.wakeup_times = self.historical_orders.wakeup_times
28
+
29
+ def wakeup(self, currentTime):
30
+ super().wakeup(currentTime)
31
+ if not self.mkt_open or not self.mkt_close:
32
+ return
33
+ try:
34
+ self.setWakeup(self.wakeup_times[0])
35
+ self.wakeup_times.pop(0)
36
+ self.placeOrder(currentTime, self.historical_orders.orders_dict[currentTime])
37
+ except IndexError:
38
+ log_print(f"Market Replay Agent submitted all orders - last order @ {currentTime}")
39
+
40
+ def receiveMessage(self, currentTime, msg):
41
+ super().receiveMessage(currentTime, msg)
42
+ if msg.body['msg'] == 'ORDER_EXECUTED':
43
+ order = msg.body['order']
44
+ self.executed_trades[currentTime] = [order.fill_price, order.quantity]
45
+ self.last_trade[self.symbol] = order.fill_price
46
+
47
+ def placeOrder(self, currentTime, order):
48
+ if len(order) == 1:
49
+ order = order[0]
50
+ order_id = order['ORDER_ID']
51
+ existing_order = self.orders.get(order_id)
52
+ if not existing_order and order['SIZE'] > 0:
53
+ self.placeLimitOrder(self.symbol, order['SIZE'], order['BUY_SELL_FLAG'] == 'BUY', order['PRICE'],
54
+ order_id=order_id)
55
+ elif existing_order and order['SIZE'] == 0:
56
+ self.cancelOrder(existing_order)
57
+ elif existing_order:
58
+ self.modifyOrder(existing_order, LimitOrder(self.id, currentTime, self.symbol, order['SIZE'],
59
+ order['BUY_SELL_FLAG'] == 'BUY', order['PRICE'],
60
+ order_id=order_id))
61
+ else:
62
+ for ind_order in order:
63
+ self.placeOrder(currentTime, order=[ind_order])
64
+
65
+ def getWakeFrequency(self):
66
+ log_print(f"Market Replay Agent first wake up: {self.historical_orders.first_wakeup}")
67
+ return self.historical_orders.first_wakeup - self.mkt_open
68
+
69
+
70
+ #mem = Memory(cachedir='./cache', verbose=0)
71
+
72
+
73
+ class L3OrdersProcessor:
74
+ COLUMNS = ['TIMESTAMP', 'ORDER_ID', 'PRICE', 'SIZE', 'BUY_SELL_FLAG']
75
+ DIRECTION = {0: 'BUY', 1: 'SELL'}
76
+
77
+ # Class for reading historical exchange orders stream
78
+ def __init__(self, symbol, date, start_time, end_time, orders_file_path, processed_orders_folder_path):
79
+ self.symbol = symbol
80
+ self.date = date
81
+ self.start_time = start_time
82
+ self.end_time = end_time
83
+ self.orders_file_path = orders_file_path
84
+ self.processed_orders_folder_path = processed_orders_folder_path
85
+
86
+ self.orders_dict = self.processOrders()
87
+ self.wakeup_times = [*self.orders_dict]
88
+ self.first_wakeup = self.wakeup_times[0]
89
+
90
+ def processOrders(self):
91
+ def convertDate(date_str):
92
+ try:
93
+ return datetime.strptime(date_str, '%Y%m%d%H%M%S.%f')
94
+ except ValueError:
95
+ return convertDate(date_str[:-1])
96
+
97
+ #@mem.cache
98
+ def read_processed_orders_file(processed_orders_file):
99
+ with open(processed_orders_file, 'rb') as handle:
100
+ return pickle.load(handle)
101
+
102
+ processed_orders_file = f'{self.processed_orders_folder_path}marketreplay_{self.symbol}_{self.date.date()}.pkl'
103
+ if os.path.isfile(processed_orders_file):
104
+ print(f'Processed file exists for {self.symbol} and {self.date.date()}: {processed_orders_file}')
105
+ return read_processed_orders_file(processed_orders_file)
106
+ else:
107
+ print(f'Processed file does not exist for {self.symbol} and {self.date.date()}, processing...')
108
+ orders_df = pd.read_csv(self.orders_file_path).iloc[1:]
109
+ all_columns = orders_df.columns[0].split('|')
110
+ orders_df = orders_df[orders_df.columns[0]].str.split('|', 16, expand=True)
111
+ orders_df.columns = all_columns
112
+ orders_df = orders_df[L3OrdersProcessor.COLUMNS]
113
+ orders_df['BUY_SELL_FLAG'] = orders_df['BUY_SELL_FLAG'].astype(int).replace(L3OrdersProcessor.DIRECTION)
114
+ orders_df['TIMESTAMP'] = orders_df['TIMESTAMP'].astype(str).apply(convertDate)
115
+ orders_df['SIZE'] = orders_df['SIZE'].astype(int)
116
+ orders_df['PRICE'] = orders_df['PRICE'].astype(float) * 100
117
+ orders_df['PRICE'] = orders_df['PRICE'].astype(int)
118
+ orders_df = orders_df.loc[(orders_df.TIMESTAMP >= self.start_time) & (orders_df.TIMESTAMP < self.end_time)]
119
+ orders_df.set_index('TIMESTAMP', inplace=True)
120
+ log_print(f"Number of Orders: {len(orders_df)}")
121
+ orders_dict = {k: g.to_dict(orient='records') for k, g in orders_df.groupby(level=0)}
122
+ with open(processed_orders_file, 'wb') as handle:
123
+ pickle.dump(orders_dict, handle, protocol=pickle.HIGHEST_PROTOCOL)
124
+ print(f'processed file created as {processed_orders_file}')
125
+ return orders_dict
ABIDES/agent/examples/MomentumAgent.py ADDED
@@ -0,0 +1,80 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from agent.TradingAgent import TradingAgent
2
+ import pandas as pd
3
+ import numpy as np
4
+
5
+
6
+ class MomentumAgent(TradingAgent):
7
+ """
8
+ Simple Trading Agent that compares the 20 past mid-price observations with the 50 past observations and places a
9
+ buy limit order if the 20 mid-price average >= 50 mid-price average or a
10
+ sell limit order if the 20 mid-price average < 50 mid-price average
11
+ """
12
+
13
+ def __init__(self, id, name, type, symbol, starting_cash,
14
+ min_size, max_size, wake_up_freq='60s',
15
+ subscribe=False, log_orders=False, random_state=None, wakeup_time=None):
16
+
17
+ super().__init__(id, name, type, starting_cash=starting_cash, log_orders=log_orders, random_state=random_state)
18
+ self.symbol = symbol
19
+ self.wakeup_time = wakeup_time
20
+ self.min_size = min_size # Minimum order size
21
+ self.max_size = max_size # Maximum order size
22
+ self.size = self.random_state.randint(self.min_size, self.max_size)
23
+ self.wake_up_freq = wake_up_freq
24
+ self.subscribe = subscribe # Flag to determine whether to subscribe to data or use polling mechanism
25
+ self.subscription_requested = False
26
+ self.mid_list, self.avg_20_list, self.avg_50_list = [], [], []
27
+ self.log_orders = log_orders
28
+ self.state = "AWAITING_WAKEUP"
29
+
30
+ def kernelStarting(self, startTime):
31
+ super().kernelStarting(startTime)
32
+
33
+ def wakeup(self, currentTime):
34
+ """ Agent wakeup is determined by self.wake_up_freq """
35
+ can_trade = super().wakeup(currentTime)
36
+ if self.subscribe and not self.subscription_requested:
37
+ super().requestDataSubscription(self.symbol, levels=1, freq=10e9)
38
+ self.subscription_requested = True
39
+ self.state = 'AWAITING_MARKET_DATA'
40
+ elif can_trade and not self.subscribe:
41
+ self.getCurrentSpread(self.symbol)
42
+ self.state = 'AWAITING_SPREAD'
43
+
44
+ def receiveMessage(self, currentTime, msg):
45
+ """ Momentum agent actions are determined after obtaining the best bid and ask in the LOB """
46
+ super().receiveMessage(currentTime, msg)
47
+ if not self.subscribe and self.state == 'AWAITING_SPREAD' and msg.body['msg'] == 'QUERY_SPREAD':
48
+ bid, _, ask, _ = self.getKnownBidAsk(self.symbol)
49
+ self.placeOrders(currentTime, bid, ask)
50
+ self.setWakeup(currentTime + self.getWakeFrequency())
51
+ self.state = 'AWAITING_WAKEUP'
52
+ elif currentTime < self.wakeup_time:
53
+ return
54
+ elif self.subscribe and self.state == 'AWAITING_MARKET_DATA' and msg.body['msg'] == 'MARKET_DATA':
55
+ bids, asks = self.known_bids[self.symbol], self.known_asks[self.symbol]
56
+ if bids and asks: self.placeOrders(currentTime, bids[0][0], asks[0][0])
57
+ self.state = 'AWAITING_MARKET_DATA'
58
+
59
+ def placeOrders(self, currentTime, bid, ask):
60
+ """ Momentum Agent actions logic """
61
+ #if currentTime < self.wakeup_time:
62
+ # return
63
+ if bid and ask:
64
+ self.mid_list.append((bid + ask) / 2)
65
+ if len(self.mid_list) > 20: self.avg_20_list.append(MomentumAgent.ma(self.mid_list, n=20)[-1].round(2))
66
+ if len(self.mid_list) > 50: self.avg_50_list.append(MomentumAgent.ma(self.mid_list, n=50)[-1].round(2))
67
+ if len(self.avg_20_list) > 0 and len(self.avg_50_list) > 0:
68
+ if self.avg_20_list[-1] >= self.avg_50_list[-1]:
69
+ self.placeLimitOrder(self.symbol, quantity=self.size, is_buy_order=True, limit_price=ask)
70
+ else:
71
+ self.placeLimitOrder(self.symbol, quantity=self.size, is_buy_order=False, limit_price=bid)
72
+
73
+ def getWakeFrequency(self):
74
+ return pd.Timedelta(self.wake_up_freq)
75
+
76
+ @staticmethod
77
+ def ma(a, n=20):
78
+ ret = np.cumsum(a, dtype=float)
79
+ ret[n:] = ret[n:] - ret[:-n]
80
+ return ret[n - 1:] / n
ABIDES/agent/examples/QLearningAgent.py ADDED
@@ -0,0 +1,213 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from agent.TradingAgent import TradingAgent
2
+ from message.Message import Message
3
+ from util.util import log_print
4
+
5
+ import numpy as np
6
+ import pandas as pd
7
+ import sys
8
+
9
+ class QLearningAgent(TradingAgent):
10
+
11
+ def __init__(self, id, name, type, symbol='IBM', starting_cash=100000,
12
+ qtable = None, log_orders = False, random_state = None):
13
+
14
+ # Base class init.
15
+ super().__init__(id, name, type, starting_cash=starting_cash, log_orders=log_orders, random_state = random_state)
16
+
17
+ # Store important parameters particular to the QLearning agent.
18
+ self.symbol = symbol
19
+ self.qtable = qtable
20
+
21
+ # The agent uses this to track whether it has begun its strategy or is still
22
+ # handling pre-market tasks.
23
+ self.trading = False
24
+
25
+ # The agent begins in its "complete" state, not waiting for
26
+ # any special event or condition.
27
+ self.state = 'AWAITING_WAKEUP'
28
+
29
+ # The agent tracks an experience history to sample and learn from over time.
30
+ # Tuples of (s,a,s',r). This is not currently used (nor part of the saved state).
31
+ self.experience = []
32
+
33
+ # Remember prior state, action, and portfolio value (marked to market).
34
+ self.s = None
35
+ self.a = None
36
+ self.v = None
37
+
38
+
39
+
40
+ # During kernelStopping, give the Kernel our saved state for potential
41
+ # subsequent simulations during the same experiment.
42
+ def kernelStopping (self):
43
+ super().kernelStopping()
44
+ self.updateAgentState(self.qtable)
45
+
46
+
47
+ def wakeup (self, currentTime):
48
+ # Parent class handles discovery of exchange times and market_open wakeup call.
49
+ super().wakeup(currentTime)
50
+
51
+ self.state = 'INACTIVE'
52
+
53
+ if not self.mkt_open or not self.mkt_close:
54
+ # TradingAgent handles discovery of exchange times.
55
+ return
56
+ else:
57
+ if not self.trading:
58
+ self.trading = True
59
+
60
+ # Time to start trading!
61
+ log_print ("{} is ready to start trading now.", self.name)
62
+
63
+
64
+ # Steady state wakeup behavior starts here.
65
+
66
+ # If we've been told the market has closed for the day, we will only request
67
+ # final price information, then stop.
68
+ if self.mkt_closed and (self.symbol in self.daily_close_price):
69
+ # Market is closed and we already got the daily close price.
70
+ return
71
+
72
+
73
+ # Schedule a wakeup for the next time this agent should arrive at the market
74
+ # (following the conclusion of its current activity cycle).
75
+ # We do this early in case some of our expected message responses don't arrive.
76
+
77
+ # The QLearning agent is not a background agent, so it should select strategically
78
+ # appropriate times. (Maybe we should LEARN the best times or frequencies at which
79
+ # to trade, someday.) Presently it just trades once a minute.
80
+ self.setWakeup(currentTime + pd.Timedelta('1min'))
81
+
82
+ # If the market has closed and we haven't obtained the daily close price yet,
83
+ # do that before we cease activity for the day. Don't do any other behavior
84
+ # after market close.
85
+ if self.mkt_closed and (not self.symbol in self.daily_close_price):
86
+ self.getCurrentSpread(self.symbol)
87
+ self.state = 'AWAITING_SPREAD'
88
+ return
89
+
90
+ # Cancel unfilled orders (but don't exit positions).
91
+ self.cancelOrders()
92
+
93
+ # Get the order book or whatever else we need for the state.
94
+ self.getCurrentSpread(self.symbol, depth=1000)
95
+ self.state = 'AWAITING_SPREAD'
96
+
97
+
98
+
99
+ def placeOrder (self):
100
+ # Called when it is time for the agent to determine a limit price and place an order.
101
+
102
+ # Compute the order imbalance feature.
103
+ bid_vol = sum([ v[1] for v in self.known_bids[self.symbol] ])
104
+ ask_vol = sum([ v[1] for v in self.known_asks[self.symbol] ])
105
+ imba = bid_vol - ask_vol
106
+
107
+ # A unit of stock is now 100 shares instead of one.
108
+ imba = int(imba / 100)
109
+
110
+ # Get our current holdings in the stock of interest.
111
+ h = self.getHoldings(self.symbol)
112
+
113
+ # The new state will be called s_prime. This agent simply uses current
114
+ # holdings (limit: one share long or short) and offer volume imbalance.
115
+ # State: 1000s digit is 0 (short), 1 (neutral), 2 (long). Remaining digits
116
+ # are 000 (-100 imba) to 200 (+100 imba).
117
+ s_prime = ((h + 1) * 1000) + (imba + 100)
118
+
119
+ log_print ("h: {}, imba: {}, s_prime: {}", h, imba, s_prime)
120
+
121
+ # Compute our reward from last time. We estimate the change in the value
122
+ # of our portfolio by marking it to market and comparing against the last
123
+ # time we were contemplating an action.
124
+ v = self.markToMarket(self.holdings, use_midpoint=True)
125
+ r = v - self.v if self.v is not None else 0
126
+
127
+ # Store our experience tuple.
128
+ self.experience.append((self.s, self.a, s_prime, r))
129
+
130
+ # Update our q table.
131
+ old_q = self.qtable.q[self.s, self.a]
132
+ old_weighted = (1 - self.qtable.alpha) * old_q
133
+
134
+ a_prime = np.argmax(self.qtable.q[s_prime,:])
135
+ new_q = r + (self.qtable.gamma * self.qtable.q[s_prime, a_prime])
136
+ new_weighted = self.qtable.alpha * new_q
137
+
138
+ self.qtable.q[self.s, self.a] = old_weighted + new_weighted
139
+
140
+
141
+ # Decay alpha.
142
+ self.qtable.alpha *= self.qtable.alpha_decay
143
+ self.qtable.alpha = max(self.qtable.alpha, self.qtable.alpha_min)
144
+
145
+
146
+ # Compute our next action. 0 = sell one, 1 == do nothing, 2 == buy one.
147
+ if self.random_state.rand() < self.qtable.epsilon:
148
+ # Random action, and decay epsilon.
149
+ a = self.random_state.randint(0,3)
150
+ self.qtable.epsilon *= self.qtable.epsilon_decay
151
+ self.qtable.epsilon = max(self.qtable.epsilon, self.qtable.epsilon_min)
152
+ else:
153
+ # Expected best action.
154
+ a = a_prime
155
+
156
+ # Respect holding limit.
157
+ if a == 0 and h == -1: a = 1
158
+ elif a == 2 and h == 1: a = 1
159
+
160
+
161
+ # Remember s, a, and v for next time.
162
+ self.s = s_prime
163
+ self.a = a
164
+ self.v = v
165
+
166
+
167
+ # Place the order. We probably want this to be a market order, once supported,
168
+ # or use a "compute required price for guaranteed execution" function like the
169
+ # impact agent, but that requires fetching quite a bit of book depth.
170
+ if a == 0: self.placeLimitOrder(self.symbol, 1, False, 50000)
171
+ elif a == 2: self.placeLimitOrder(self.symbol, 1, True, 200000)
172
+
173
+
174
+ def receiveMessage (self, currentTime, msg):
175
+ # Parent class schedules market open wakeup call once market open/close times are known.
176
+ super().receiveMessage(currentTime, msg)
177
+
178
+ # We have been awakened by something other than our scheduled wakeup.
179
+ # If our internal state indicates we were waiting for a particular event,
180
+ # check if we can transition to a new state.
181
+
182
+ if self.state == 'AWAITING_SPREAD':
183
+ # We were waiting to receive the current spread/book. Since we don't currently
184
+ # track timestamps on retained information, we rely on actually seeing a
185
+ # QUERY_SPREAD response message.
186
+
187
+ if msg.body['msg'] == 'QUERY_SPREAD':
188
+ # This is what we were waiting for.
189
+
190
+ # But if the market is now closed, don't advance to placing orders.
191
+ if self.mkt_closed: return
192
+
193
+ # We now have the information needed to place a limit order with the eta
194
+ # strategic threshold parameter.
195
+ self.placeOrder()
196
+ self.state = 'AWAITING_WAKEUP'
197
+
198
+
199
+ # Internal state and logic specific to this agent subclass.
200
+
201
+ # Cancel all open orders.
202
+ # Return value: did we issue any cancellation requests?
203
+ def cancelOrders (self):
204
+ if not self.orders: return False
205
+
206
+ for id, order in self.orders.items():
207
+ self.cancelOrder(order)
208
+
209
+ return True
210
+
211
+ def getWakeFrequency (self):
212
+ return pd.Timedelta(self.random_state.randint(low = 0, high = 100), unit='ns')
213
+
ABIDES/agent/examples/ShockAgent.py ADDED
@@ -0,0 +1,171 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from agent.TradingAgent import TradingAgent
2
+
3
+ import pandas as pd
4
+
5
+
6
+ # Extends Impact agent to fire large order evenly over a predetermined time period
7
+ # Need to add: (1) duration, (2) number of wakeups, (3) desired execution size
8
+ class ImpactAgent(TradingAgent):
9
+
10
+ def __init__(self, id, name, type, symbol = None, starting_cash = None, within = 0.01,
11
+ impact = True, impact_time = None, impact_duration = 0, impact_trades = 0,
12
+ impact_vol = None, random_state = None):
13
+ # Base class init.
14
+ super().__init__(id, name, type, starting_cash = starting_cash, random_state = random_state)
15
+
16
+ self.symbol = symbol # symbol to trade
17
+ self.trading = False # ready to trade
18
+ self.traded = False # has made its t trade
19
+
20
+ # The amount of available "nearby" liquidity to consume when placing its order.
21
+ self.within = within # within this range of the inside price
22
+
23
+ self.impact_time = impact_time # When should we make the impact trade?
24
+ self.impact_duration = impact_duration # How long the agent should wait to submit the next trade
25
+ self.impact_trades = impact_trades # The number of trades to execute across
26
+ self.impact_vol = impact_vol # The total volume to execute across all trades
27
+
28
+ # The agent begins in its "complete" state, not waiting for
29
+ # any special event or condition.
30
+ self.state = 'AWAITING_WAKEUP'
31
+
32
+ # Controls whether the impact trade is actually placed.
33
+ self.impact = impact
34
+
35
+
36
+ def wakeup (self, currentTime):
37
+ # Parent class handles discovery of exchange times and market_open wakeup call.
38
+ super().wakeup(currentTime)
39
+
40
+ if not self.mkt_open or not self.mkt_close:
41
+ # TradingAgent handles discovery of exchange times.
42
+ return
43
+ else:
44
+ if not self.trading:
45
+ self.trading = True
46
+
47
+ # Time to start trading!
48
+ print ("{} is ready to start trading now.".format(self.name))
49
+
50
+
51
+ # Steady state wakeup behavior starts here.
52
+
53
+ # First, see if we have received a MKT_CLOSED message for the day. If so,
54
+ # there's nothing to do except clean-up.
55
+ if self.mkt_closed and (self.symbol in self.daily_close_price):
56
+ # Market is closed and we already got the daily close price.
57
+ return
58
+
59
+
60
+ ### Impact agent operates at a specific time.
61
+ if currentTime < self.impact_time:
62
+ print ("Impact agent waiting for impact_time {}".format(self.impact_time))
63
+ self.setWakeup(self.impact_time)
64
+ return
65
+
66
+
67
+ ### The impact agent only trades once, but we will monitor prices for
68
+ ### the sake of performance.
69
+ self.setWakeup(currentTime + pd.Timedelta('30m'))
70
+
71
+
72
+ # If the market is closed and we haven't obtained the daily close price yet,
73
+ # do that before we cease activity for the day. Don't do any other behavior
74
+ # after market close.
75
+ #
76
+ # Also, if we already made our one trade, do nothing except monitor prices.
77
+ #if self.traded >= self.impact_trades or (self.mkt_closed and (not self.symbol in self.daily_close_price)):
78
+ if self.traded or (self.mkt_closed and (not self.symbol in self.daily_close_price)):
79
+ self.getLastTrade()
80
+ self.state = 'AWAITING_LAST_TRADE'
81
+ return
82
+
83
+ #if self.traded < self.impact_trades:
84
+ #self.setWakeup(currentTime + impact_duration)
85
+
86
+ # The impact agent will place one order based on the current spread.
87
+ self.getCurrentSpread()
88
+ self.state = 'AWAITING_SPREAD'
89
+
90
+
91
+ def receiveMessage (self, currentTime, msg):
92
+ # Parent class schedules market open wakeup call once market open/close times are known.
93
+ super().receiveMessage(currentTime, msg)
94
+
95
+ # We have been awakened by something other than our scheduled wakeup.
96
+ # If our internal state indicates we were waiting for a particular event,
97
+ # check if we can transition to a new state.
98
+
99
+ if self.state == 'AWAITING_SPREAD':
100
+ # We were waiting for current spread information to make our trade.
101
+ # If the message we just received is QUERY_SPREAD, that means we just got it.
102
+ if msg.body['msg'] == 'QUERY_SPREAD':
103
+ # Place our one trade.
104
+ bid, bid_vol, ask, ask_vol = self.getKnownBidAsk(self.symbol)
105
+ #bid_liq, ask_liq = self.getKnownLiquidity(self.symbol, within=self.within)
106
+ print('within: ' + str(self.within))
107
+ bid_liq, ask_liq = self.getKnownLiquidity(self.symbol, within=0.75)
108
+
109
+ # Buy order.
110
+ #direction, shares, price = True, int(round(ask_liq * self.greed)), ask
111
+
112
+ # Sell order. This should be a parameter, but isn't yet.
113
+ #direction, shares = False, int(round(bid_liq * self.greed))
114
+ direction, shares = False, int(round(bid_liq * 0.5))
115
+
116
+ # Compute the limit price we must offer to ensure our order executes immediately.
117
+ # This is essentially a workaround for the lack of true market orders in our
118
+ # current simulation.
119
+ price = self.computeRequiredPrice(direction, shares)
120
+
121
+ # Actually place the order only if self.impact is true.
122
+ if self.impact:
123
+ print ("Impact agent firing: {} {} @ {} @ {}".format('BUY' if direction else 'SELL', shares, self.dollarize(price), currentTime))
124
+ self.placeLimitOrder (self.symbol, shares, direction, price)
125
+ else:
126
+ print ("Impact agent would fire: {} {} @ {} (but self.impact = False)".format('BUY' if direction else 'SELL', shares, self.dollarize(price)))
127
+
128
+ self.traded = True
129
+ self.state = 'AWAITING_WAKEUP'
130
+
131
+
132
+ # Internal state and logic specific to this agent.
133
+
134
+ def placeLimitOrder (self, symbol, quantity, is_buy_order, limit_price):
135
+ super().placeLimitOrder(symbol, quantity, is_buy_order, limit_price, ignore_risk = True)
136
+
137
+ # Computes required limit price to immediately execute a trade for the specified quantity
138
+ # of shares.
139
+ def computeRequiredPrice (self, direction, shares):
140
+ book = self.known_asks[self.symbol] if direction else self.known_bids[self.symbol]
141
+
142
+ # Start at the inside and add up the shares.
143
+ t = 0
144
+
145
+ for i in range(len(book)):
146
+ p,v = book[i]
147
+ t += v
148
+
149
+ # If we have accumulated enough shares, return this price.
150
+ # Need to also return if greater than the number of desired shares
151
+ if t >= shares: return p
152
+
153
+ # Not enough shares. Just return worst price (highest ask, lowest bid).
154
+ return book[-1][0]
155
+
156
+
157
+ # Request the last trade price for our symbol.
158
+ def getLastTrade (self):
159
+ super().getLastTrade(self.symbol)
160
+
161
+
162
+ # Request the spread for our symbol.
163
+ def getCurrentSpread (self):
164
+ # Impact agent gets depth 10000 on each side (probably everything).
165
+ super().getCurrentSpread(self.symbol, 10000)
166
+
167
+
168
+ def getWakeFrequency (self):
169
+ return (pd.Timedelta('1ns'))
170
+
171
+
ABIDES/agent/examples/SubscriptionAgent.py ADDED
@@ -0,0 +1,52 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from agent.TradingAgent import TradingAgent
2
+ from util.util import log_print
3
+
4
+ import pandas as pd
5
+
6
+
7
+ class SubscriptionAgent(TradingAgent):
8
+ """
9
+ Simple agent to demonstrate subscription to order book market data.
10
+ """
11
+
12
+ def __init__(self, id, name, type, symbol, starting_cash, levels, freq, log_orders=False, random_state=None):
13
+ super().__init__(id, name, type, starting_cash=starting_cash, log_orders=log_orders, random_state=random_state)
14
+ self.symbol = symbol # symbol traded
15
+ self.levels = levels # number of price levels to subscribe to/recieve updates for
16
+ self.freq = freq # minimum number of nanoseconds between market data messages
17
+ self.subscribe = True # Flag to determine whether to subscribe to data or use polling mechanism
18
+ self.subscription_requested = False
19
+ self.last_update_ts = None # timestamp of the last agent update.
20
+ # This is NOT required but only used to demonstrate how subscription works
21
+ self.state = 'AWAITING_MARKET_DATA'
22
+ self.current_bids = None
23
+ self.current_asks = None
24
+
25
+ def kernelStarting(self, startTime):
26
+ super().kernelStarting(startTime)
27
+
28
+ def wakeup(self, currentTime):
29
+ super().wakeup(currentTime)
30
+ if self.subscribe and not self.subscription_requested:
31
+ super().requestDataSubscription(self.symbol, levels=self.levels, freq=self.freq)
32
+ self.subscription_requested = True
33
+ self.last_update_ts = currentTime
34
+
35
+ def receiveMessage(self, currentTime, msg):
36
+ super().receiveMessage(currentTime, msg)
37
+ if self.subscribe and self.state == 'AWAITING_MARKET_DATA' and msg.body['msg'] == 'MARKET_DATA':
38
+ bids, asks = msg.body['bids'], msg.body['asks']
39
+ log_print("--------------------")
40
+ log_print("seconds elapsed since last update: {}", (currentTime - self.last_update_ts).delta / 1e9)
41
+ log_print("number of bid levels: {}", len(bids))
42
+ log_print("number of ask levels: {}", len(asks))
43
+ log_print("bids: {}, asks: {}", bids, asks)
44
+ log_print("Current Agent Timestamp: {}", currentTime)
45
+ log_print("Exchange Timestamp: {}", self.exchange_ts[self.symbol])
46
+ log_print("--------------------")
47
+ self.last_update_ts = currentTime
48
+ self.current_bids = bids
49
+ self.current_asks = asks
50
+
51
+ def getWakeFrequency(self):
52
+ return pd.Timedelta('1s')
ABIDES/agent/examples/SumClientAgent.py ADDED
@@ -0,0 +1,107 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from agent.Agent import Agent
2
+ from agent.examples.SumServiceAgent import SumServiceAgent
3
+ from message.Message import Message
4
+ from util.util import log_print
5
+
6
+ import pandas as pd
7
+
8
+
9
+ # The SumClientAgent class inherits from the base Agent class. It is intended
10
+ # to serve as an example in which a service agent performs some aggregated
11
+ # computation for multiple clients and returns the result to all clients.
12
+
13
+ class SumClientAgent(Agent):
14
+
15
+ def __init__(self, id, name, type, peer_list=None, random_state=None):
16
+ # Base class init.
17
+ super().__init__(id, name, type, random_state)
18
+
19
+ self.peer_list = peer_list
20
+ self.peer_exchange_complete = False
21
+
22
+ self.peers_received = {}
23
+ self.peer_sum = 0
24
+
25
+
26
+ ### Simulation lifecycle messages.
27
+
28
+ def kernelStarting(self, startTime):
29
+ # self.kernel is set in Agent.kernelInitializing()
30
+
31
+ # Find an SumServiceAgent which can answer our queries. It is guaranteed
32
+ # to exist by now (if there is one).
33
+ self.serviceAgentID = self.kernel.findAgentByType(SumServiceAgent)
34
+
35
+ log_print ("Agent {} requested agent of type Agent.SumServiceAgent. Given Agent ID: {}",
36
+ self.id, self.serviceAgentID)
37
+
38
+ # Request a wake-up call as in the base Agent (spread across five seconds).
39
+ super().kernelStarting(startTime + pd.Timedelta(self.random_state.randint(low = 0, high = 5000000000), unit='ns'))
40
+
41
+
42
+ ### Simulation participation messages.
43
+
44
+ def wakeup (self, currentTime):
45
+ # Allow the base Agent to do whatever it needs to.
46
+ super().wakeup(currentTime)
47
+
48
+ # This agent only needs one wakeup call at simulation start. At this time,
49
+ # each client agent will send a number to each agent in its peer list.
50
+ # Each number will be sampled independently. That is, client agent 1 will
51
+ # send n2 to agent 2, n3 to agent 3, and so forth.
52
+
53
+ # Once a client agent has received these initial random numbers from all
54
+ # agents in the peer list, it will make its first request from the sum
55
+ # service. Afterwards, it will simply request new sums when answers are
56
+ # delivered to previous queries.
57
+
58
+ # At the first wakeup, initiate peer exchange.
59
+ if not self.peer_exchange_complete:
60
+ n = [self.random_state.randint(low = 0, high = 100) for i in range(len(self.peer_list))]
61
+ log_print ("agent {} peer list: {}", self.id, self.peer_list)
62
+ log_print ("agent {} numbers to exchange: {}", self.id, n)
63
+
64
+ for idx, peer in enumerate(self.peer_list):
65
+ self.sendMessage(peer, Message({ "msg" : "PEER_EXCHANGE", "sender": self.id, "n" : n[idx] }))
66
+
67
+ else:
68
+ # For subsequent (self-induced) wakeups, place a sum query.
69
+ n1, n2 = [self.random_state.randint(low = 0, high = 100) for i in range(2)]
70
+
71
+ log_print ("agent {} transmitting numbers {} and {} with peer sum {}", self.id, n1, n2, self.peer_sum)
72
+
73
+ # Add the sum of the peer exchange values to both numbers.
74
+ n1 += self.peer_sum
75
+ n2 += self.peer_sum
76
+
77
+ self.sendMessage(self.serviceAgentID, Message({ "msg" : "SUM_QUERY", "sender": self.id,
78
+ "n1" : n1, "n2" : n2 }))
79
+
80
+ return
81
+
82
+
83
+ def receiveMessage (self, currentTime, msg):
84
+ # Allow the base Agent to do whatever it needs to.
85
+ super().receiveMessage(currentTime, msg)
86
+
87
+ if msg.body['msg'] == "PEER_EXCHANGE":
88
+
89
+ # Ensure we don't somehow record the same peer twice.
90
+ if msg.body['sender'] not in self.peers_received:
91
+ self.peers_received[msg.body['sender']] = True
92
+ self.peer_sum += msg.body['n']
93
+
94
+ if len(self.peers_received) == len(self.peer_list):
95
+ # We just heard from the final peer. Initiate our first sum request.
96
+ log_print ("agent {} heard from final peer. peers_received = {}, peer_sum = {}",
97
+ self.id, self.peers_received, self.peer_sum)
98
+
99
+ self.peer_exchange_complete = True
100
+ self.setWakeup(currentTime + pd.Timedelta('1ns'))
101
+
102
+ elif msg.body['msg'] == "SUM_QUERY_RESPONSE":
103
+ log_print("Agent {} received sum query response: {}", self.id, msg)
104
+
105
+ # Now schedule a new query.
106
+ self.setWakeup(currentTime + pd.Timedelta('1m'))
107
+
ABIDES/agent/examples/SumServiceAgent.py ADDED
@@ -0,0 +1,77 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from agent.Agent import Agent
2
+ from message.Message import Message
3
+ from util.util import log_print
4
+
5
+
6
+ # The SumServiceAgent class inherits from the base Agent class. It is intended
7
+ # to serve as an example in which a service agent performs some aggregated
8
+ # computation for multiple clients and returns the result to all clients.
9
+
10
+ class SumServiceAgent(Agent):
11
+
12
+ def __init__(self, id, name, type, random_state=None, num_clients=10):
13
+ # Base class init.
14
+ super().__init__(id, name, type, random_state)
15
+
16
+ # How many clients should we wait for?
17
+ self.num_clients = num_clients
18
+
19
+ # A list of the numbers to sum: dictionary keyed by agentID.
20
+ self.numbers = {}
21
+
22
+ # We track the total sum for the entire day to print at the end.
23
+ self.total = 0
24
+
25
+
26
+ ### Simulation lifecycle messages.
27
+ def kernelStarting(self, startTime):
28
+ # self.kernel is set in Agent.kernelInitializing()
29
+
30
+ # This agent should have negligible computation delay.
31
+ self.setComputationDelay(1000000) # 1 ms
32
+
33
+ # Request a wake-up call as in the base Agent.
34
+ super().kernelStarting(startTime)
35
+
36
+
37
+ def kernelStopping(self):
38
+ # Print the total sum for the day, only for completed sum requests.
39
+ print("Agent {} reports total sum: {}".format(self.id, self.total))
40
+
41
+ # Allow the base class to perform stopping activities.
42
+ super().kernelStopping()
43
+
44
+
45
+ ### Simulation participation messages.
46
+
47
+ # The service agent does not require wakeup calls.
48
+
49
+ def receiveMessage (self, currentTime, msg):
50
+ # Allow the base Agent to do whatever it needs to.
51
+ super().receiveMessage(currentTime, msg)
52
+
53
+ if msg.body['msg'] == "SUM_QUERY":
54
+ log_print("Agent {} received sum query: {}", self.id, msg)
55
+ self.numbers[msg.body['sender']] = (msg.body['n1'], msg.body['n2'])
56
+
57
+ if len(self.numbers.keys()) >= self.num_clients:
58
+ # It is time to sum the numbers.
59
+ self.processSum()
60
+
61
+ # Then clear the pending queries.
62
+ self.numbers = {}
63
+
64
+
65
+ ### Sum client numbers and respond to each client.
66
+ def processSum (self):
67
+
68
+ current_sum = sum([ x[0] + x[1] for x in self.numbers.values() ])
69
+ self.total += current_sum
70
+
71
+ log_print("Agent {} computed sum: {}", self.id, current_sum)
72
+
73
+ for sender in self.numbers.keys():
74
+ self.sendMessage(sender, Message({ "msg" : "SUM_QUERY_RESPONSE", "sender": self.id,
75
+ "sum" : current_sum }))
76
+
77
+
ABIDES/agent/examples/__init__.py ADDED
File without changes
ABIDES/agent/examples/crypto/PPFL_ClientAgent.py ADDED
@@ -0,0 +1,380 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from agent.Agent import Agent
2
+ from agent.examples.crypto.PPFL_ServiceAgent import PPFL_ServiceAgent
3
+ from message.Message import Message
4
+ from util.util import log_print
5
+
6
+ from util.crypto.logReg import getWeights, reportStats
7
+ import util.crypto.diffieHellman as dh
8
+
9
+ import numpy as np
10
+ from os.path import exists
11
+ import pandas as pd
12
+ import random
13
+
14
+
15
+ # The PPFL_ClientAgent class inherits from the base Agent class. It implements
16
+ # a secure federated learning protocol with basic differential privacy plus
17
+ # secure multiparty communication.
18
+
19
+ class PPFL_ClientAgent(Agent):
20
+
21
+ def __init__(self, id, name, type, peer_list=None, iterations=4, multiplier=10000, secret_scale = 100000,
22
+ X_train = None, y_train = None, X_test = None, y_test = None, split_size = None,
23
+ learning_rate = None, clear_learning = None, num_clients = None, num_subgraphs = None,
24
+ epsilon = None, max_logreg_iterations = None, collusion = False, random_state=None):
25
+
26
+ # Base class init.
27
+ super().__init__(id, name, type, random_state)
28
+
29
+
30
+ # Store the client's peer list (subgraph, neighborhood) with which it should communicate.
31
+ self.peer_list = peer_list
32
+
33
+ # Initialize a tracking attribute for the initial peer exchange and record the subgraph size.
34
+ self.peer_exchange_complete = False
35
+ self.num_peers = len(self.peer_list)
36
+
37
+ # Record the total number of clients participating in the protocol and the number of subgraphs.
38
+ # Neither of these are part of the protocol, or necessary for real-world implementation, but do
39
+ # allow for convenient logging of progress and results in simulation.
40
+ self.num_clients = num_clients
41
+ self.num_subgraphs = num_subgraphs
42
+
43
+ # Record whether the clients should be recording information about the potential accuracy of
44
+ # peer data reconstruction via collusion among the clients.
45
+ self.collusion = collusion
46
+
47
+ # Record the number of protocol (federated learning) iterations the clients will perform.
48
+ self.no_of_iterations = iterations
49
+
50
+ # Record the multiplier that will be used to protect against floating point accuracy loss and
51
+ # the scale of the client shared secrets.
52
+ self.multiplier = multiplier
53
+ self.secret_scale = secret_scale
54
+
55
+ # Record the number of local iterations of logistic regression each client will run during
56
+ # each protocol iteration and what local learning rate will be used.
57
+ self.max_logreg_iterations = max_logreg_iterations
58
+ self.learning_rate = learning_rate
59
+
60
+ # Record whether clients will do federated learning in the clear (no privacy, no encryption)
61
+ # and, if needed, the epsilon value for differential privacy.
62
+ self.clear_learning = clear_learning
63
+ self.epsilon = epsilon
64
+
65
+ # Record the training and testing splits for the data set to be learned.
66
+ self.X_train = X_train
67
+ self.y_train = y_train
68
+
69
+ self.X_test = X_test
70
+ self.y_test = y_test
71
+
72
+ # Record the number of features in the data set.
73
+ self.no_of_weights = X_train.shape[1]
74
+
75
+ # Initialize an attribute to remember the shared weights returned from the server.
76
+ self.prevWeight = None
77
+
78
+ # Each client receives only a portion of the training data each protocol iteration.
79
+ self.split_size = split_size
80
+
81
+ # Initialize a dictionary to remember which peers we have heard from during peer exchange.
82
+ self.peers_received = {}
83
+
84
+ # Initialize a dictionary to accumulate this client's timing information by task.
85
+ self.elapsed_time = { 'DH_OFFLINE' : pd.Timedelta(0), 'DH_ONLINE' : pd.Timedelta(0),
86
+ 'TRAINING' : pd.Timedelta(0), 'ENCRYPTION' : pd.Timedelta(0) }
87
+
88
+
89
+ # Pre-generate this client's local training data for each iteration (for the sake of simulation speed).
90
+ self.trainX = []
91
+ self.trainY = []
92
+
93
+ # This is a faster PRNG than the default, for times when we must select a large quantity of randomness.
94
+ self.prng = np.random.Generator(np.random.SFC64())
95
+
96
+ ### Data randomly selected from total training set each iteration, simulating online behavior.
97
+ for i in range(iterations):
98
+ slice = self.prng.choice(range(self.X_train.shape[0]), size = split_size, replace = False)
99
+
100
+ # Pull together the current local training set.
101
+ self.trainX.append(self.X_train[slice].copy())
102
+ self.trainY.append(self.y_train[slice].copy())
103
+
104
+
105
+ # Create dictionaries to hold the public and secure keys for this client, and the public keys shared
106
+ # by its peers.
107
+ self.pubkeys = {}
108
+ self.seckeys = {}
109
+ self.peer_public_keys = {}
110
+
111
+ # Create dictionaries to hold the shared key for each peer each iteration and the seed for the
112
+ # following iteration.
113
+ self.r = {}
114
+ self.R = {}
115
+
116
+
117
+ # Specify the parameters used for generation of randomness.
118
+ self.px_reg = 1
119
+ self.px_epsilon = epsilon
120
+ self.px_min_rows = self.split_size
121
+
122
+ self.px_shape = 1 / ( self.num_peers + 1)
123
+ self.px_scale = 2 / (( self.num_peers + 1 ) * self.px_min_rows * self.px_reg * self.px_epsilon )
124
+
125
+ if self.id == 1: print (f"px_shape is {self.px_shape}")
126
+ if self.id == 1: print (f"px_scale is {self.px_scale}")
127
+
128
+ # Specify the required shape for vectorized generation of randomness.
129
+ self.px_dims = ( self.num_peers, self.no_of_iterations, self.no_of_weights )
130
+
131
+
132
+ # Iteration counter.
133
+ self.current_iteration = 0
134
+
135
+
136
+
137
+
138
+ ### Simulation lifecycle messages.
139
+
140
+ def kernelStarting(self, startTime):
141
+
142
+ # Initialize custom state properties into which we will later accumulate results.
143
+ # To avoid redundancy, we allow only the first client to handle initialization.
144
+ if self.id == 1:
145
+ self.kernel.custom_state['dh_offline'] = pd.Timedelta(0)
146
+ self.kernel.custom_state['dh_online'] = pd.Timedelta(0)
147
+ self.kernel.custom_state['training'] = pd.Timedelta(0)
148
+ self.kernel.custom_state['encryption'] = pd.Timedelta(0)
149
+
150
+ # Find the PPFL service agent, so messages can be directed there.
151
+ self.serviceAgentID = self.kernel.findAgentByType(PPFL_ServiceAgent)
152
+
153
+ # Request a wake-up call as in the base Agent. Noise is kept small because
154
+ # the overall protocol duration is so short right now. (up to one microsecond)
155
+ super().kernelStarting(startTime + pd.Timedelta(self.random_state.randint(low = 0, high = 1000), unit='ns'))
156
+
157
+
158
+ def kernelStopping(self):
159
+
160
+ # Accumulate into the Kernel's "custom state" this client's elapsed times per category.
161
+ # Note that times which should be reported in the mean per iteration are already so computed.
162
+ # These will be output to the config (experiment) file at the end of the simulation.
163
+
164
+ self.kernel.custom_state['dh_offline'] += self.elapsed_time['DH_OFFLINE']
165
+ self.kernel.custom_state['dh_online'] += (self.elapsed_time['DH_ONLINE'] / self.no_of_iterations)
166
+ self.kernel.custom_state['training'] += (self.elapsed_time['TRAINING'] / self.no_of_iterations)
167
+ self.kernel.custom_state['encryption'] += (self.elapsed_time['ENCRYPTION'] / self.no_of_iterations)
168
+
169
+ super().kernelStopping()
170
+
171
+
172
+ ### Simulation participation messages.
173
+
174
+ def wakeup (self, currentTime):
175
+ super().wakeup(currentTime)
176
+
177
+ # Record start of wakeup for real-time computation delay..
178
+ dt_wake_start = pd.Timestamp('now')
179
+
180
+ # Check if the clients are still performing the one-time peer exchange.
181
+ if not self.peer_exchange_complete:
182
+
183
+ # Generate DH keys.
184
+ if not self.clear_learning: self.pubkeys, self.seckeys = dh.dict_keygeneration( self.peer_list )
185
+
186
+ # Record elapsed wallclock for Diffie Hellman offline.
187
+ dt_wake_end = pd.Timestamp('now')
188
+ self.elapsed_time['DH_OFFLINE'] += dt_wake_end - dt_wake_start
189
+
190
+ # Set computation delay to elapsed wallclock time.
191
+ self.setComputationDelay(int((dt_wake_end - dt_wake_start).to_timedelta64()))
192
+
193
+ # Send generated values to peers.
194
+ if not self.clear_learning:
195
+ for idx, peer in enumerate(self.peer_list):
196
+ # We assume a star network configuration where all messages between peers must be forwarded
197
+ # through the server.
198
+ self.sendMessage(self.serviceAgentID, Message({ "msg" : "FWD_MSG", "msgToForward" : "PEER_EXCHANGE",
199
+ "sender": self.id, "recipient": peer, "pubkey" : self.pubkeys[peer] }))
200
+
201
+ if self.clear_learning:
202
+ self.peer_exchange_complete = True
203
+ self.setWakeup(currentTime + pd.Timedelta('1ns'))
204
+
205
+ else:
206
+
207
+ # We are waking up to start a new iteration of the protocol.
208
+ # (Peer exchange is done before all this.)
209
+
210
+ if (self.current_iteration == 0):
211
+ # During iteration 0 (only) we complete the key exchange and prepare the
212
+ # common key list, because at this point we know we have received keys
213
+ # from all peers.
214
+
215
+ # R is the common key dictionary (by peer agent id).
216
+ if not self.clear_learning: self.R = dh.dict_keyexchange(self.peer_list, self.id, self.pubkeys,
217
+ self.seckeys, self.peer_public_keys)
218
+
219
+ # Pre-generate all of this client's local differential privacy noise (for simulation speed).
220
+ # We will need one per weight per protocol iteration.
221
+ self.my_noise = np.random.laplace(scale = self.px_scale, size = (self.no_of_iterations, self.no_of_weights))
222
+
223
+
224
+ # Diffie Hellman is done in every iteration.
225
+ if not self.clear_learning:
226
+ for peer_id, common_key in self.R.items():
227
+
228
+ random.seed(common_key)
229
+ rand = random.getrandbits(512)
230
+
231
+ rand_b_raw = format(rand, '0512b')
232
+ rand_b_rawr = rand_b_raw[:256]
233
+ rand_b_rawR = rand_b_raw[256:]
234
+
235
+
236
+ # Negate offsets below this agent's id. This ensures each offset will be
237
+ # added once and subtracted once.
238
+ r = int(rand_b_rawr,2) % (2**32)
239
+
240
+ log_print ("SELECTED r: {}", r)
241
+
242
+ # Update dictionary of shared secrets for this iteration.
243
+ self.r[peer_id] = r if peer_id < self.id else -r
244
+
245
+ # Store the shared seeds for the next iteration.
246
+ self.R[peer_id] = int(rand_b_rawR,2)
247
+
248
+
249
+ # Record elapsed wallclock for Diffie Hellman online.
250
+ dt_online_complete = pd.Timestamp('now')
251
+
252
+ # For convenience of things indexed by iteration...
253
+ i = self.current_iteration
254
+
255
+ # Perform the local training for this client, using only its local (private) data. The configured learning
256
+ # rate might need to be increased if there are very many clients, each with very little data, otherwise
257
+ # convergence may take a really long time.
258
+ #
259
+ # max_iter controls how many iterations of gradient descent to perform on the logistic
260
+ # regression model. previous_weight should be passed as None for the first iteration.
261
+ weight = getWeights(previous_weight = self.prevWeight, max_iter = self.max_logreg_iterations, lr = self.learning_rate,
262
+ trainX = self.trainX[i], trainY = self.trainY[i], self_id = self.id)
263
+
264
+ # If in collusion analysis mode, write out the weights we will need to evaluate reconstruction.
265
+ if self.collusion:
266
+ with open('results/collusion_weights.csv', 'a') as results_file:
267
+ results_file.write(f"{self.id},{self.current_iteration},{','.join([str(x) for x in weight])}\n")
268
+
269
+ # Record elapsed wallclock for training model.
270
+ dt_training_complete = pd.Timestamp('now')
271
+
272
+ if not self.clear_learning:
273
+ # Add a random sample from Laplace to each of the weights.
274
+ noise = self.my_noise[i]
275
+
276
+ if self.collusion:
277
+ with open('results/collusion_selected_noise.csv', 'a') as results_file:
278
+ # Write out the noise added to each weight by this client.
279
+ results_file.write(f"{self.id},{self.current_iteration},{','.join([str(x) for x in noise])}\n")
280
+
281
+ log_print ("weight {}", weight)
282
+ log_print ("noise {}", noise)
283
+
284
+ if self.clear_learning: n = np.array(weight) * self.multiplier
285
+ else: n = (np.array(weight) + noise) * self.multiplier
286
+
287
+ log_print ("n {}", n)
288
+ log_print ("r {}", self.r)
289
+
290
+ weights_to_send = n + sum(self.r.values())
291
+
292
+ log_print ("weights_to_send {}", weights_to_send)
293
+
294
+
295
+ # Record elapsed wallclock for encryption.
296
+ dt_encryption_complete = pd.Timestamp('now')
297
+
298
+ # Set computation delay to elapsed wallclock time.
299
+ self.setComputationDelay(int((dt_encryption_complete - dt_wake_start).to_timedelta64()))
300
+
301
+ # Send the message to the server.
302
+ self.sendMessage(self.serviceAgentID, Message({ "msg" : "CLIENT_WEIGHTS", "sender": self.id,
303
+ "weights" : weights_to_send }))
304
+
305
+ self.current_iteration += 1
306
+
307
+ # Store elapsed times by category.
308
+ self.elapsed_time['DH_ONLINE'] += dt_online_complete - dt_wake_start
309
+ self.elapsed_time['TRAINING'] += dt_training_complete - dt_online_complete
310
+ self.elapsed_time['ENCRYPTION'] += dt_encryption_complete - dt_training_complete
311
+
312
+
313
+
314
+ def receiveMessage (self, currentTime, msg):
315
+ super().receiveMessage(currentTime, msg)
316
+
317
+ if msg.body['msg'] == "PEER_EXCHANGE":
318
+
319
+ # Record start of message processing.
320
+ dt_rcv_start = pd.Timestamp('now')
321
+
322
+ # Ensure we don't somehow record the same peer twice. These all come from the
323
+ # service provider, relayed from other clients, but are "fixed up" to appear
324
+ # as if they come straight from the relevant peer.
325
+ if msg.body['sender'] not in self.peers_received:
326
+
327
+ # Record the content of the message and that we received it.
328
+ self.peers_received[msg.body['sender']] = True
329
+ self.peer_public_keys[msg.body['sender']] = msg.body['pubkey']
330
+
331
+ # Record end of message processing.
332
+ dt_rcv_end = pd.Timestamp('now')
333
+
334
+ # Store elapsed times by category.
335
+ self.elapsed_time['DH_OFFLINE'] += dt_rcv_end - dt_rcv_start
336
+
337
+ # Set computation delay to elapsed wallclock time.
338
+ self.setComputationDelay(int((dt_rcv_end - dt_rcv_start).to_timedelta64()))
339
+
340
+ # If this is the last peer from whom we expect to hear, move on with the protocol.
341
+ if len(self.peers_received) == self.num_peers:
342
+ self.peer_exchange_complete = True
343
+ self.setWakeup(currentTime + pd.Timedelta('1ns'))
344
+
345
+ elif msg.body['msg'] == "SHARED_WEIGHTS":
346
+ # Reset computation delay.
347
+ self.setComputationDelay(0)
348
+
349
+ # Extract the shared weights from the message.
350
+ self.prevWeight = msg.body['weights']
351
+
352
+ # Remove the multiplier that was helping guard against floating point error.
353
+ self.prevWeight /= self.multiplier
354
+
355
+ log_print ("Client weights received for iteration {} by {}: {}", self.current_iteration, self.id, self.prevWeight)
356
+
357
+ # Client number 1 (arbitrary choice) records the shared learning progress each iteration
358
+ # for later visualization and analysis.
359
+ if self.id == 1:
360
+ is_acc, is_mcc, is_f1, is_mse, is_auprc, oos_acc, oos_mcc, oos_f1, oos_mse, oos_auprc = reportStats(self.prevWeight, self.current_iteration, self.X_train, self.y_train, self.X_test, self.y_test)
361
+
362
+ if not exists("results/all_results.csv"):
363
+ with open('results/all_results.csv', 'a') as results_file:
364
+ # Write out the header.
365
+ results_file.write(f"Clients,Peers,Subgraphs,Iterations,Train Rows,Learning Rate,In The Clear?,Local Iterations,Epsilon,Iteration,IS ACC,OOS ACC,IS MCC,OOS MCC,IS MSE,OOS MSE,IS F1,OOS F1,IS AUPRC,OOS AUPRC\n")
366
+
367
+ with open('results/all_results.csv', 'a') as results_file:
368
+ # Write out the current protocol iteration weights and metadata.
369
+ results_file.write(f"{self.num_clients},{self.num_peers},{self.num_subgraphs},{self.no_of_iterations},{self.split_size},{self.learning_rate},{self.clear_learning},{self.max_logreg_iterations},{self.epsilon},{self.current_iteration},{is_acc},{oos_acc},{is_mcc},{oos_mcc},{is_mse},{oos_mse},{is_f1},{oos_f1},{is_auprc},{oos_auprc}\n")
370
+
371
+ if self.collusion:
372
+ with open('results/collusion_consensus.csv', 'a') as results_file:
373
+ # Agent 1 also writes out the consensus weights each iteration (for collusion analysis).
374
+ results_file.write(f"{self.current_iteration},{','.join([str(x) for x in self.prevWeight])}\n")
375
+
376
+
377
+ # Start a new iteration if we are not at the end of the protocol.
378
+ if self.current_iteration < self.no_of_iterations:
379
+ self.setWakeup(currentTime + pd.Timedelta('1ns'))
380
+
ABIDES/agent/examples/crypto/PPFL_ServiceAgent.py ADDED
@@ -0,0 +1,155 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from agent.Agent import Agent
2
+ from message.Message import Message
3
+ from util.util import log_print
4
+ from util.crypto.logReg import getWeights
5
+
6
+ from copy import deepcopy
7
+ import numpy as np
8
+ import pandas as pd
9
+
10
+ ### NEW MESSAGES: CLIENT_WEIGHTS : weights, SHARED_WEIGHTS : weights
11
+
12
+ # The PPFL_ServiceAgent class inherits from the base Agent class. It provides
13
+ # the simple shared service necessary for model combination under secure
14
+ # federated learning.
15
+
16
+ class PPFL_ServiceAgent(Agent):
17
+
18
+ def __init__(self, id, name, type, random_state=None, msg_fwd_delay=1000000,
19
+ iterations=4, num_clients=10):
20
+
21
+ # Base class init.
22
+ super().__init__(id, name, type, random_state)
23
+
24
+ # From how many clients do we expect to hear in each protocol iteration?
25
+ self.num_clients = num_clients
26
+
27
+ # How long does it take us to forward a peer-to-peer client relay message?
28
+ self.msg_fwd_delay = msg_fwd_delay
29
+
30
+ # Agent accumulation of elapsed times by category of task.
31
+ self.elapsed_time = { 'STORE_MODEL' : pd.Timedelta(0), 'COMBINE_MODEL' : pd.Timedelta(0) }
32
+
33
+ # How many iterations of the protocol should be run?
34
+ self.no_of_iterations = iterations
35
+
36
+ # Create a dictionary keyed by agentID to record which clients we have
37
+ # already heard from during the current protocol iteration. This can
38
+ # also guard against double accumulation from duplicate messages.
39
+ self.received = {}
40
+
41
+ # Create a list to accumulate received values. We don't need to know which came from which
42
+ # client, what the values mean, or indeed anything about them.
43
+ self.total = []
44
+
45
+ # Track the current iteration of the protocol.
46
+ self.current_iteration = 0
47
+
48
+
49
+ ### Simulation lifecycle messages.
50
+ def kernelStarting(self, startTime):
51
+ # self.kernel is set in Agent.kernelInitializing()
52
+
53
+ # Initialize custom state properties into which we will accumulate results later.
54
+ self.kernel.custom_state['srv_store_model'] = pd.Timedelta(0)
55
+ self.kernel.custom_state['srv_combine_model'] = pd.Timedelta(0)
56
+
57
+ # This agent should have negligible (or no) computation delay until otherwise specified.
58
+ self.setComputationDelay(0)
59
+
60
+ # Request a wake-up call as in the base Agent.
61
+ super().kernelStarting(startTime)
62
+
63
+
64
+ def kernelStopping(self):
65
+ # Add the server time components to the custom state in the Kernel, for output to the config.
66
+ # Note that times which should be reported in the mean per iteration are already so computed.
67
+ self.kernel.custom_state['srv_store_model'] += (self.elapsed_time['STORE_MODEL'] / self.no_of_iterations)
68
+ self.kernel.custom_state['srv_combine_model'] += (self.elapsed_time['COMBINE_MODEL'] / self.no_of_iterations)
69
+
70
+ # Allow the base class to perform stopping activities.
71
+ super().kernelStopping()
72
+
73
+
74
+ ### Simulation participation messages.
75
+
76
+ # The service agent does not require wakeup calls.
77
+
78
+ def receiveMessage (self, currentTime, msg):
79
+ # Allow the base Agent to do whatever it needs to.
80
+ super().receiveMessage(currentTime, msg)
81
+
82
+ # Logic for receiving weights from client agents. The weights are almost certainly
83
+ # noisy and encrypted, but that doesn't matter to us.
84
+ if msg.body['msg'] == "CLIENT_WEIGHTS":
85
+
86
+ # Start wallclock timing for message handling.
87
+ dt_combine_complete = None
88
+ dt_start_rcv = pd.Timestamp('now')
89
+
90
+ sender = msg.body['sender']
91
+ if sender in self.received: return
92
+
93
+ self.received[sender] = True
94
+ self.total.append(msg.body['weights'].tolist())
95
+
96
+ # Capture elapsed wallclock for model storage.
97
+ dt_store_complete = pd.Timestamp('now')
98
+
99
+ log_print ("Server received {} from {}.", msg.body['weights'], msg.body['sender'])
100
+
101
+ if len(self.received.keys()) >= self.num_clients:
102
+ # This is the last client on whom we were waiting.
103
+ self.combineWeights()
104
+
105
+ # Capture elapsed wallclock for model combination.
106
+ dt_combine_complete = pd.Timestamp('now')
107
+
108
+ # Then clear the protocol attributes for the next round.
109
+ self.received = {}
110
+ self.total = []
111
+
112
+ # Capture elapsed wallclock at end of CLIENT_WEIGHTS.
113
+ dt_end_rcv = pd.Timestamp('now')
114
+
115
+ # Compute time deltas only after all elapsed times are captured.
116
+ if dt_combine_complete is not None: self.elapsed_time['COMBINE_MODEL'] += dt_combine_complete - dt_store_complete
117
+ self.elapsed_time['STORE_MODEL'] += dt_store_complete - dt_start_rcv
118
+ elapsed_total = int((dt_end_rcv - dt_start_rcv).to_timedelta64())
119
+
120
+ # Use total elapsed wallclock as computation delay.
121
+ self.setComputationDelay(elapsed_total)
122
+
123
+ elif msg.body['msg'] == "FWD_MSG":
124
+ # In our star topology, all client messages are forwarded securely through the server.
125
+ sender = msg.body['sender']
126
+ recipient = msg.body['recipient']
127
+ msg.body['msg'] = msg.body['msgToForward']
128
+
129
+ self.setComputationDelay(self.msg_fwd_delay)
130
+
131
+ # Normally not advisable, but here we need to fix up the sender so the
132
+ # server can be a silent proxy.
133
+ self.kernel.sendMessage(sender, recipient, msg)
134
+
135
+
136
+ ### Combine client weights and respond to each client.
137
+ def combineWeights (self):
138
+
139
+ log_print ("total: {}", self.total)
140
+
141
+ # Don't respond after the final iteration.
142
+ if (self.current_iteration < self.no_of_iterations):
143
+
144
+ # Take the mean weights across the clients.
145
+ self.total = np.array(self.total)
146
+ totals = np.mean(self.total, axis=0)
147
+
148
+ # Send the combined weights back to each client who participated.
149
+ for sender in self.received.keys():
150
+ log_print ("Sending {} to {}", totals, sender)
151
+ self.sendMessage(sender, Message({ "msg" : "SHARED_WEIGHTS", "sender": self.id, "weights" : deepcopy(totals) }))
152
+
153
+ # This is the end of one round of the protocol.
154
+ self.current_iteration += 1
155
+
ABIDES/agent/examples/crypto/PPFL_TemplateClientAgent.py ADDED
@@ -0,0 +1,311 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from agent.Agent import Agent
2
+ from agent.examples.crypto.PPFL_ServiceAgent import PPFL_ServiceAgent
3
+ from message.Message import Message
4
+ from util.util import log_print
5
+
6
+ from util.crypto.logReg import getWeights, reportStats
7
+ import util.crypto.diffieHellman as dh
8
+
9
+ import numpy as np
10
+ from os.path import exists
11
+ import pandas as pd
12
+ import random
13
+
14
+
15
+ # The PPFL_TemplateClientAgent class inherits from the base Agent class. It has the
16
+ # structure of a secure federated learning protocol with secure multiparty communication,
17
+ # but without any particular learning or noise methods. That is, this is a template in
18
+ # which the client parties simply pass around arbitrary data. Sections that would need
19
+ # to be completed are clearly marked.
20
+
21
+ class PPFL_TemplateClientAgent(Agent):
22
+
23
+ def __init__(self, id, name, type, peer_list=None, iterations=4, multiplier=10000, secret_scale = 100000,
24
+ X_train = None, y_train = None, X_test = None, y_test = None, split_size = None,
25
+ num_clients = None, num_subgraphs = None, random_state=None):
26
+
27
+ # Base class init.
28
+ super().__init__(id, name, type, random_state)
29
+
30
+
31
+ # Store the client's peer list (subgraph, neighborhood) with which it should communicate.
32
+ self.peer_list = peer_list
33
+
34
+ # Initialize a tracking attribute for the initial peer exchange and record the subgraph size.
35
+ self.peer_exchange_complete = False
36
+ self.num_peers = len(self.peer_list)
37
+
38
+ # Record the total number of clients participating in the protocol and the number of subgraphs.
39
+ # Neither of these are part of the protocol, or necessary for real-world implementation, but do
40
+ # allow for convenient logging of progress and results in simulation.
41
+ self.num_clients = num_clients
42
+ self.num_subgraphs = num_subgraphs
43
+
44
+ # Record the number of protocol (federated learning) iterations the clients will perform.
45
+ self.no_of_iterations = iterations
46
+
47
+ # Record the multiplier that will be used to protect against floating point accuracy loss and
48
+ # the scale of the client shared secrets.
49
+ self.multiplier = multiplier
50
+ self.secret_scale = secret_scale
51
+
52
+ # Record the training and testing splits for the data set to be learned.
53
+ self.X_train = X_train
54
+ self.y_train = y_train
55
+
56
+ self.X_test = X_test
57
+ self.y_test = y_test
58
+
59
+ # Record the number of features in the data set.
60
+ self.no_of_weights = X_train.shape[1]
61
+
62
+ # Initialize an attribute to remember the shared weights returned from the server.
63
+ self.prevWeight = np.zeros(self.no_of_weights)
64
+
65
+ # Each client receives only a portion of the training data each protocol iteration.
66
+ self.split_size = split_size
67
+
68
+ # Initialize a dictionary to remember which peers we have heard from during peer exchange.
69
+ self.peers_received = {}
70
+
71
+ # Initialize a dictionary to accumulate this client's timing information by task.
72
+ self.elapsed_time = { 'DH_OFFLINE' : pd.Timedelta(0), 'DH_ONLINE' : pd.Timedelta(0),
73
+ 'TRAINING' : pd.Timedelta(0), 'ENCRYPTION' : pd.Timedelta(0) }
74
+
75
+
76
+ # Pre-generate this client's local training data for each iteration (for the sake of simulation speed).
77
+ self.trainX = []
78
+ self.trainY = []
79
+
80
+ # This is a faster PRNG than the default, for times when we must select a large quantity of randomness.
81
+ self.prng = np.random.Generator(np.random.SFC64())
82
+
83
+ ### Data randomly selected from total training set each iteration, simulating online behavior.
84
+ for i in range(iterations):
85
+ slice = self.prng.choice(range(self.X_train.shape[0]), size = split_size, replace = False)
86
+
87
+ # Pull together the current local training set.
88
+ self.trainX.append(self.X_train[slice].copy())
89
+ self.trainY.append(self.y_train[slice].copy())
90
+
91
+
92
+ # Create dictionaries to hold the public and secure keys for this client, and the public keys shared
93
+ # by its peers.
94
+ self.pubkeys = {}
95
+ self.seckeys = {}
96
+ self.peer_public_keys = {}
97
+
98
+ # Create dictionaries to hold the shared key for each peer each iteration and the seed for the
99
+ # following iteration.
100
+ self.r = {}
101
+ self.R = {}
102
+
103
+
104
+ ### ADD DIFFERENTIAL PRIVACY CONSTANTS AND CONFIGURATION HERE, IF NEEDED.
105
+ #
106
+ #
107
+
108
+
109
+ # Iteration counter.
110
+ self.current_iteration = 0
111
+
112
+
113
+
114
+
115
+ ### Simulation lifecycle messages.
116
+
117
+ def kernelStarting(self, startTime):
118
+
119
+ # Initialize custom state properties into which we will later accumulate results.
120
+ # To avoid redundancy, we allow only the first client to handle initialization.
121
+ if self.id == 1:
122
+ self.kernel.custom_state['dh_offline'] = pd.Timedelta(0)
123
+ self.kernel.custom_state['dh_online'] = pd.Timedelta(0)
124
+ self.kernel.custom_state['training'] = pd.Timedelta(0)
125
+ self.kernel.custom_state['encryption'] = pd.Timedelta(0)
126
+
127
+ # Find the PPFL service agent, so messages can be directed there.
128
+ self.serviceAgentID = self.kernel.findAgentByType(PPFL_ServiceAgent)
129
+
130
+ # Request a wake-up call as in the base Agent. Noise is kept small because
131
+ # the overall protocol duration is so short right now. (up to one microsecond)
132
+ super().kernelStarting(startTime + pd.Timedelta(self.random_state.randint(low = 0, high = 1000), unit='ns'))
133
+
134
+
135
+ def kernelStopping(self):
136
+
137
+ # Accumulate into the Kernel's "custom state" this client's elapsed times per category.
138
+ # Note that times which should be reported in the mean per iteration are already so computed.
139
+ # These will be output to the config (experiment) file at the end of the simulation.
140
+
141
+ self.kernel.custom_state['dh_offline'] += self.elapsed_time['DH_OFFLINE']
142
+ self.kernel.custom_state['dh_online'] += (self.elapsed_time['DH_ONLINE'] / self.no_of_iterations)
143
+ self.kernel.custom_state['training'] += (self.elapsed_time['TRAINING'] / self.no_of_iterations)
144
+ self.kernel.custom_state['encryption'] += (self.elapsed_time['ENCRYPTION'] / self.no_of_iterations)
145
+
146
+ super().kernelStopping()
147
+
148
+
149
+ ### Simulation participation messages.
150
+
151
+ def wakeup (self, currentTime):
152
+ super().wakeup(currentTime)
153
+
154
+ # Record start of wakeup for real-time computation delay..
155
+ dt_wake_start = pd.Timestamp('now')
156
+
157
+ # Check if the clients are still performing the one-time peer exchange.
158
+ if not self.peer_exchange_complete:
159
+
160
+ # Generate DH keys.
161
+ self.pubkeys, self.seckeys = dh.dict_keygeneration( self.peer_list )
162
+
163
+ # Record elapsed wallclock for Diffie Hellman offline.
164
+ dt_wake_end = pd.Timestamp('now')
165
+ self.elapsed_time['DH_OFFLINE'] += dt_wake_end - dt_wake_start
166
+
167
+ # Set computation delay to elapsed wallclock time.
168
+ self.setComputationDelay(int((dt_wake_end - dt_wake_start).to_timedelta64()))
169
+
170
+ # Send generated values to peers.
171
+ for idx, peer in enumerate(self.peer_list):
172
+ # We assume a star network configuration where all messages between peers must be forwarded
173
+ # through the server.
174
+ self.sendMessage(self.serviceAgentID, Message({ "msg" : "FWD_MSG", "msgToForward" : "PEER_EXCHANGE",
175
+ "sender": self.id, "recipient": peer, "pubkey" : self.pubkeys[peer] }))
176
+
177
+ else:
178
+
179
+ # We are waking up to start a new iteration of the protocol.
180
+ # (Peer exchange is done before all this.)
181
+
182
+ if (self.current_iteration == 0):
183
+ # During iteration 0 (only) we complete the key exchange and prepare the
184
+ # common key list, because at this point we know we have received keys
185
+ # from all peers.
186
+
187
+ # R is the common key dictionary (by peer agent id).
188
+ dh.dict_keyexchange(self.peer_list, self.id, self.pubkeys, self.seckeys, self.peer_public_keys)
189
+
190
+
191
+ # CREATE AND CACHE LOCAL DIFFERENTIAL PRIVACY NOISE HERE, IF NEEDED.
192
+ #
193
+ #
194
+
195
+
196
+ # Diffie Hellman is done in every iteration.
197
+ for peer_id, common_key in self.R.items():
198
+
199
+ random.seed(common_key)
200
+ rand = random.getrandbits(512)
201
+
202
+ rand_b_raw = format(rand, '0512b')
203
+ rand_b_rawr = rand_b_raw[:256]
204
+ rand_b_rawR = rand_b_raw[256:]
205
+
206
+
207
+ # Negate offsets below this agent's id. This ensures each offset will be
208
+ # added once and subtracted once.
209
+ r = int(rand_b_rawr,2) % (2**32)
210
+
211
+ # Update dictionary of shared secrets for this iteration.
212
+ self.r[peer_id] = r if peer_id < self.id else -r
213
+
214
+ # Store the shared seeds for the next iteration.
215
+ self.R[peer_id] = int(rand_b_rawR,2)
216
+
217
+
218
+ # Record elapsed wallclock for Diffie Hellman online.
219
+ dt_online_complete = pd.Timestamp('now')
220
+
221
+ # For convenience of things indexed by iteration...
222
+ i = self.current_iteration
223
+
224
+
225
+ ### ADD LOCAL LEARNING METHOD HERE, IF NEEDED.
226
+ #
227
+ #
228
+
229
+ weight = np.random.normal (loc = self.prevWeight, scale = self.prevWeight / 10, size = self.prevWeight.shape)
230
+
231
+
232
+ # Record elapsed wallclock for training model.
233
+ dt_training_complete = pd.Timestamp('now')
234
+
235
+
236
+ ### ADD NOISE TO THE WEIGHTS HERE, IF NEEDED.
237
+ #
238
+ #
239
+
240
+ n = np.array(weight) * self.multiplier
241
+ weights_to_send = n + sum(self.r.values())
242
+
243
+
244
+ # Record elapsed wallclock for encryption.
245
+ dt_encryption_complete = pd.Timestamp('now')
246
+
247
+ # Set computation delay to elapsed wallclock time.
248
+ self.setComputationDelay(int((dt_encryption_complete - dt_wake_start).to_timedelta64()))
249
+
250
+ # Send the message to the server.
251
+ self.sendMessage(self.serviceAgentID, Message({ "msg" : "CLIENT_WEIGHTS", "sender": self.id,
252
+ "weights" : weights_to_send }))
253
+
254
+ self.current_iteration += 1
255
+
256
+ # Store elapsed times by category.
257
+ self.elapsed_time['DH_ONLINE'] += dt_online_complete - dt_wake_start
258
+ self.elapsed_time['TRAINING'] += dt_training_complete - dt_online_complete
259
+ self.elapsed_time['ENCRYPTION'] += dt_encryption_complete - dt_training_complete
260
+
261
+
262
+
263
+ def receiveMessage (self, currentTime, msg):
264
+ super().receiveMessage(currentTime, msg)
265
+
266
+ if msg.body['msg'] == "PEER_EXCHANGE":
267
+
268
+ # Record start of message processing.
269
+ dt_rcv_start = pd.Timestamp('now')
270
+
271
+ # Ensure we don't somehow record the same peer twice. These all come from the
272
+ # service provider, relayed from other clients, but are "fixed up" to appear
273
+ # as if they come straight from the relevant peer.
274
+ if msg.body['sender'] not in self.peers_received:
275
+
276
+ # Record the content of the message and that we received it.
277
+ self.peers_received[msg.body['sender']] = True
278
+ self.peer_public_keys[msg.body['sender']] = msg.body['pubkey']
279
+
280
+ # Record end of message processing.
281
+ dt_rcv_end = pd.Timestamp('now')
282
+
283
+ # Store elapsed times by category.
284
+ self.elapsed_time['DH_OFFLINE'] += dt_rcv_end - dt_rcv_start
285
+
286
+ # Set computation delay to elapsed wallclock time.
287
+ self.setComputationDelay(int((dt_rcv_end - dt_rcv_start).to_timedelta64()))
288
+
289
+ # If this is the last peer from whom we expect to hear, move on with the protocol.
290
+ if len(self.peers_received) == self.num_peers:
291
+ self.peer_exchange_complete = True
292
+ self.setWakeup(currentTime + pd.Timedelta('1ns'))
293
+
294
+ elif msg.body['msg'] == "SHARED_WEIGHTS":
295
+ # Reset computation delay.
296
+ self.setComputationDelay(0)
297
+
298
+ # Extract the shared weights from the message.
299
+ self.prevWeight = msg.body['weights']
300
+
301
+ # Remove the multiplier that was helping guard against floating point error.
302
+ self.prevWeight /= self.multiplier
303
+
304
+ log_print ("Client weights received for iteration {} by {}: {}", self.current_iteration, self.id, self.prevWeight)
305
+
306
+ if self.id == 1: print (f"Protocol iteration {self.current_iteration} complete.")
307
+
308
+ # Start a new iteration if we are not at the end of the protocol.
309
+ if self.current_iteration < self.no_of_iterations:
310
+ self.setWakeup(currentTime + pd.Timedelta('1ns'))
311
+
ABIDES/agent/execution/ExecutionAgent.py ADDED
@@ -0,0 +1,106 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import pandas as pd
2
+ import datetime
3
+
4
+ from agent.TradingAgent import TradingAgent
5
+ from util.util import log_print
6
+
7
+
8
+ class ExecutionAgent(TradingAgent):
9
+
10
+ def __init__(self, id, name, type, symbol, starting_cash,
11
+ direction, quantity, execution_time_horizon,
12
+ trade=True, log_orders=False, random_state=None):
13
+ super().__init__(id, name, type, starting_cash=starting_cash, log_orders=log_orders, random_state=random_state)
14
+ self.symbol = symbol
15
+ self.direction = direction
16
+ self.quantity = quantity
17
+ self.execution_time_horizon = execution_time_horizon
18
+
19
+ self.start_time = self.execution_time_horizon[0]
20
+ self.end_time = self.execution_time_horizon[-1]
21
+ self.schedule = None
22
+
23
+ self.rem_quantity = quantity
24
+ self.arrival_price = None
25
+
26
+ self.accepted_orders = []
27
+ self.trade = trade
28
+ self.log_orders = log_orders
29
+
30
+ self.state = 'AWAITING_WAKEUP'
31
+
32
+ def kernelStopping(self):
33
+ super().kernelStopping()
34
+ if self.trade:
35
+ slippage = self.get_average_transaction_price() - self.arrival_price if self.direction == 'BUY' else \
36
+ self.arrival_price - self.get_average_transaction_price()
37
+ self.logEvent('DIRECTION', self.direction, True)
38
+ self.logEvent('TOTAL_QTY', self.quantity, True)
39
+ self.logEvent('REM_QTY', self.rem_quantity, True)
40
+ self.logEvent('ARRIVAL_MID', self.arrival_price, True)
41
+ self.logEvent('AVG_TXN_PRICE', self.get_average_transaction_price(), True)
42
+ self.logEvent('SLIPPAGE', slippage, True)
43
+
44
+ def wakeup(self, currentTime):
45
+ can_trade = super().wakeup(currentTime)
46
+ if not can_trade: return
47
+ if self.trade:
48
+ try:
49
+ self.setWakeup([time for time in self.execution_time_horizon if time > currentTime][0])
50
+ except IndexError:
51
+ pass
52
+
53
+ self.getCurrentSpread(self.symbol, depth=1000)
54
+ self.state = 'AWAITING_SPREAD'
55
+
56
+ def receiveMessage(self, currentTime, msg):
57
+ super().receiveMessage(currentTime, msg)
58
+ if msg.body['msg'] == 'ORDER_EXECUTED': self.handleOrderExecution(currentTime, msg)
59
+ elif msg.body['msg'] == 'ORDER_ACCEPTED': self.handleOrderAcceptance(currentTime, msg)
60
+ if self.rem_quantity > 0 and self.state == 'AWAITING_SPREAD' and msg.body['msg'] == 'QUERY_SPREAD':
61
+ self.cancelOrders()
62
+ self.placeOrders(currentTime)
63
+
64
+ def handleOrderExecution(self, currentTime, msg):
65
+ executed_order = msg.body['order']
66
+ self.executed_orders.append(executed_order)
67
+ executed_qty = sum(executed_order.quantity for executed_order in self.executed_orders)
68
+ self.rem_quantity = self.quantity - executed_qty
69
+ log_print('[---- {} - {} ----]: LIMIT ORDER EXECUTED - {} @ {}'.format(self.name, currentTime,
70
+ executed_order.quantity,
71
+ executed_order.fill_price))
72
+ log_print('[---- {} - {} ----]: EXECUTED QUANTITY: {}'.format(self.name, currentTime, executed_qty))
73
+ log_print('[---- {} - {} ----]: REMAINING QUANTITY: {}'.format(self.name, currentTime, self.rem_quantity))
74
+ log_print('[---- {} - {} ----]: % EXECUTED: {} \n'.format(self.name, currentTime,
75
+ round((1 - self.rem_quantity / self.quantity) * 100, 2)))
76
+
77
+ def handleOrderAcceptance(self, currentTime, msg):
78
+ accepted_order = msg.body['order']
79
+ self.accepted_orders.append(accepted_order)
80
+ accepted_qty = sum(accepted_order.quantity for accepted_order in self.accepted_orders)
81
+ log_print('[---- {} - {} ----]: ACCEPTED QUANTITY : {}'.format(self.name, currentTime, accepted_qty))
82
+
83
+ def placeOrders(self, currentTime):
84
+ if currentTime.floor('1s') == self.execution_time_horizon[-2]:
85
+ self.placeMarketOrder(symbol=self.symbol, quantity=self.rem_quantity, is_buy_order=self.direction == 'BUY')
86
+ elif currentTime.floor('1s') in self.execution_time_horizon[:-2]:
87
+ bid, _, ask, _ = self.getKnownBidAsk(self.symbol)
88
+
89
+ if currentTime.floor('1s') == self.start_time:
90
+ self.arrival_price = (bid + ask) / 2
91
+ log_print("[---- {} - {} ----]: Arrival Mid Price {}".format(self.name, currentTime,
92
+ self.arrival_price))
93
+
94
+ qty = self.schedule[pd.Interval(currentTime.floor('1s'),
95
+ currentTime.floor('1s')+datetime.timedelta(minutes=1))]
96
+ price = ask if self.direction == 'BUY' else bid
97
+ self.placeLimitOrder(symbol=self.symbol, quantity=qty,
98
+ is_buy_order=self.direction == 'BUY', limit_price=price)
99
+ log_print('[---- {} - {} ----]: LIMIT ORDER PLACED - {} @ {}'.format(self.name, currentTime, qty, price))
100
+
101
+ def cancelOrders(self):
102
+ for _, order in self.orders.items():
103
+ self.cancelOrder(order)
104
+
105
+ def getWakeFrequency(self):
106
+ return self.execution_time_horizon[0] - self.mkt_open
ABIDES/agent/execution/POVExecutionAgent.py ADDED
@@ -0,0 +1,93 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import sys
2
+ import warnings
3
+ import pandas as pd
4
+
5
+ from agent.TradingAgent import TradingAgent
6
+ from util.util import log_print
7
+
8
+ POVExecutionWarning_msg = "Running a configuration using POVExecutionAgent requires an ExchangeAgent with " \
9
+ "attribute `stream_history` set to a large value, recommended at sys.maxsize."
10
+
11
+
12
+ class POVExecutionAgent(TradingAgent):
13
+
14
+ def __init__(self, id, name, type, symbol, starting_cash,
15
+ direction, quantity, pov, start_time, freq, lookback_period, end_time=None,
16
+ trade=True, log_orders=False, random_state=None):
17
+ super().__init__(id, name, type, starting_cash=starting_cash, log_orders=log_orders, random_state=random_state)
18
+ self.log_events = True # save events for plotting
19
+ self.symbol = symbol
20
+ self.direction = direction
21
+ self.quantity = quantity
22
+ self.rem_quantity = quantity
23
+ self.pov = pov
24
+ self.start_time = start_time
25
+ self.end_time = end_time
26
+ self.freq = freq
27
+ self.look_back_period = lookback_period
28
+ self.trade = trade
29
+ self.accepted_orders = []
30
+ self.state = 'AWAITING_WAKEUP'
31
+
32
+ warnings.warn(POVExecutionWarning_msg, UserWarning, stacklevel=1)
33
+ self.processEndTime()
34
+
35
+ def processEndTime(self):
36
+ """ Make end time of POV order sensible, i.e. if a time is given leave it alone; else, add 24 hours to start."""
37
+ if self.end_time is None:
38
+ self.end_time = self.start_time + pd.to_timedelta('24 hours')
39
+
40
+ def wakeup(self, currentTime):
41
+ can_trade = super().wakeup(currentTime)
42
+ self.setWakeup(currentTime + self.getWakeFrequency())
43
+ if not can_trade: return
44
+ if self.trade and self.rem_quantity > 0 and self.start_time < currentTime < self.end_time:
45
+ self.cancelOrders()
46
+ self.getCurrentSpread(self.symbol, depth=sys.maxsize)
47
+ self.get_transacted_volume(self.symbol, lookback_period=self.look_back_period)
48
+ self.state = 'AWAITING_TRANSACTED_VOLUME'
49
+
50
+ def getWakeFrequency(self):
51
+ return pd.Timedelta(self.freq)
52
+
53
+ def receiveMessage(self, currentTime, msg):
54
+ super().receiveMessage(currentTime, msg)
55
+ if msg.body['msg'] == 'ORDER_EXECUTED': self.handleOrderExecution(currentTime, msg)
56
+ elif msg.body['msg'] == 'ORDER_ACCEPTED': self.handleOrderAcceptance(currentTime, msg)
57
+
58
+ if currentTime > self.end_time:
59
+ log_print(
60
+ f'[---- {self.name} - {currentTime} ----]: current time {currentTime} is after specified end time of POV order '
61
+ f'{self.end_time}. TRADING CONCLUDED. ')
62
+ return
63
+
64
+ if self.rem_quantity > 0 and \
65
+ self.state == 'AWAITING_TRANSACTED_VOLUME' \
66
+ and msg.body['msg'] == 'QUERY_TRANSACTED_VOLUME' \
67
+ and self.transacted_volume[self.symbol] is not None\
68
+ and currentTime > self.start_time:
69
+ qty = round(self.pov * self.transacted_volume[self.symbol])
70
+ self.cancelOrders()
71
+ self.placeMarketOrder(self.symbol, qty, self.direction == 'BUY')
72
+ log_print(f'[---- {self.name} - {currentTime} ----]: TOTAL TRANSACTED VOLUME IN THE LAST {self.look_back_period} = {self.transacted_volume[self.symbol]}')
73
+ log_print(f'[---- {self.name} - {currentTime} ----]: MARKET ORDER PLACED - {qty}')
74
+
75
+ def handleOrderAcceptance(self, currentTime, msg):
76
+ accepted_order = msg.body['order']
77
+ self.accepted_orders.append(accepted_order)
78
+ accepted_qty = sum(accepted_order.quantity for accepted_order in self.accepted_orders)
79
+ log_print(f'[---- {self.name} - {currentTime} ----]: ACCEPTED QUANTITY : {accepted_qty}')
80
+
81
+ def handleOrderExecution(self, currentTime, msg):
82
+ executed_order = msg.body['order']
83
+ self.executed_orders.append(executed_order)
84
+ executed_qty = sum(executed_order.quantity for executed_order in self.executed_orders)
85
+ self.rem_quantity = self.quantity - executed_qty
86
+ log_print(f'[---- {self.name} - {currentTime} ----]: LIMIT ORDER EXECUTED - {executed_order.quantity} @ {executed_order.fill_price}')
87
+ log_print(f'[---- {self.name} - {currentTime} ----]: EXECUTED QUANTITY: {executed_qty}')
88
+ log_print(f'[---- {self.name} - {currentTime} ----]: REMAINING QUANTITY (NOT EXECUTED): {self.rem_quantity}')
89
+ log_print(f'[---- {self.name} - {currentTime} ----]: % EXECUTED: {round((1 - self.rem_quantity / self.quantity) * 100, 2)} \n')
90
+
91
+ def cancelOrders(self):
92
+ for _, order in self.orders.items():
93
+ self.cancelOrder(order)
ABIDES/agent/execution/TWAPExecutionAgent.py ADDED
@@ -0,0 +1,38 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import pandas as pd
2
+ from agent.execution.ExecutionAgent import ExecutionAgent
3
+ from util.util import log_print
4
+
5
+
6
+ class TWAPExecutionAgent(ExecutionAgent):
7
+ """
8
+ TWAP (Time Weighted Average Price) Execution Agent:
9
+ - Aims to execute the parent order as close as possible to the TWAP average price
10
+ - breaks up the parent order and releases dynamically smaller chunks of the order to the market using
11
+ evenly divided time slots during the execution time horizon.
12
+ -
13
+ e.g. 18,000 shares - to be executed between 09:32:00 and 09:35:00
14
+ - > 18,000 shares over 3 minutes = 100 shares per second
15
+ """
16
+
17
+ def __init__(self, id, name, type, symbol, starting_cash,
18
+ direction, quantity, execution_time_horizon, freq,
19
+ trade=True, log_orders=False, random_state=None):
20
+ super().__init__(id, name, type, symbol, starting_cash,
21
+ direction=direction, quantity=quantity, execution_time_horizon=execution_time_horizon,
22
+ trade=trade, log_orders=log_orders, random_state=random_state)
23
+
24
+ self.freq = freq
25
+ self.schedule = self.generate_schedule()
26
+
27
+ def generate_schedule(self):
28
+
29
+ schedule = {}
30
+ bins = pd.interval_range(start=self.start_time, end=self.end_time, freq=self.freq)
31
+ child_quantity = int(self.quantity / len(self.execution_time_horizon))
32
+ for b in bins:
33
+ schedule[b] = child_quantity
34
+ log_print('[---- {} {} - Schedule ----]:'.format(self.name, self.currentTime))
35
+ log_print('[---- {} {} - Total Number of Orders ----]: {}'.format(self.name, self.currentTime, len(schedule)))
36
+ for t, q in schedule.items():
37
+ log_print("From: {}, To: {}, Quantity: {}".format(t.left.time(), t.right.time(), q))
38
+ return schedule
ABIDES/agent/execution/VWAPExecutionAgent.py ADDED
@@ -0,0 +1,55 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import pandas as pd
2
+
3
+ from agent.execution.ExecutionAgent import ExecutionAgent
4
+ from util.util import log_print
5
+
6
+
7
+ class VWAPExecutionAgent(ExecutionAgent):
8
+ """
9
+ VWAP (Volume Weighted Average Price) Execution Agent:
10
+ breaks up the parent order and releases dynamically smaller chunks of the order to the market using
11
+ stock specific historical volume profiles. Aims to execute the parent order as close as possible to the
12
+ VWAP average price
13
+ """
14
+
15
+ def __init__(self, id, name, type, symbol, starting_cash,
16
+ direction, quantity, execution_time_horizon, freq, volume_profile_path,
17
+ trade=True, log_orders=False, random_state=None):
18
+ super().__init__(id, name, type, symbol, starting_cash,
19
+ direction=direction, quantity=quantity, execution_time_horizon=execution_time_horizon,
20
+ trade=trade, log_orders=log_orders, random_state=random_state)
21
+ self.freq = freq
22
+ self.volume_profile_path = volume_profile_path
23
+ self.schedule = self.generate_schedule()
24
+
25
+ def generate_schedule(self):
26
+
27
+ if self.volume_profile_path is None:
28
+ volume_profile = VWAPExecutionAgent.synthetic_volume_profile(self.start_time, self.freq)
29
+ else:
30
+ volume_profile = pd.read_pickle(self.volume_profile_path).to_dict()
31
+
32
+ schedule = {}
33
+ bins = pd.interval_range(start=self.start_time, end=self.end_time, freq=self.freq)
34
+ for b in bins:
35
+ schedule[b] = round(volume_profile[b.left] * self.quantity)
36
+ log_print('[---- {} {} - Schedule ----]:'.format(self.name, self.currentTime))
37
+ log_print('[---- {} {} - Total Number of Orders ----]: {}'.format(self.name, self.currentTime, len(schedule)))
38
+ for t, q in schedule.items():
39
+ log_print("From: {}, To: {}, Quantity: {}".format(t.left.time(), t.right.time(), q))
40
+ return schedule
41
+
42
+ @staticmethod
43
+ def synthetic_volume_profile(date, freq):
44
+ mkt_open = pd.to_datetime(date.date()) + pd.to_timedelta('09:30:00')
45
+ mkt_close = pd.to_datetime(date.date()) + pd.to_timedelta('16:00:00')
46
+ day_range = pd.date_range(mkt_open, mkt_close, freq=freq)
47
+
48
+ vol_profile = {}
49
+ for t, x in zip(day_range, range(int(-len(day_range) / 2), int(len(day_range) / 2), 1)):
50
+ vol_profile[t] = x ** 2 + 2 * x + 2
51
+
52
+ factor = 1.0 / sum(vol_profile.values())
53
+ vol_profile = {k: v * factor for k, v in vol_profile.items()}
54
+
55
+ return vol_profile
ABIDES/agent/market_makers/AdaptiveMarketMakerAgent.py ADDED
@@ -0,0 +1,298 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from agent.TradingAgent import TradingAgent
2
+ import pandas as pd
3
+ from util.util import log_print
4
+ from collections import namedtuple, deque
5
+ from util.util import ignored, sigmoid
6
+ from math import floor, ceil
7
+
8
+
9
+ ANCHOR_TOP_STR = 'top'
10
+ ANCHOR_BOTTOM_STR = 'bottom'
11
+ ANCHOR_MIDDLE_STR = 'middle'
12
+
13
+ ADAPTIVE_SPREAD_STR = 'adaptive'
14
+ INITIAL_SPREAD_VALUE = 50
15
+
16
+
17
+ class AdaptiveMarketMakerAgent(TradingAgent):
18
+ """ This class implements a modification of the Chakraborty-Kearns `ladder` market-making strategy, wherein the
19
+ the size of order placed at each level is set as a fraction of measured transacted volume in the previous time
20
+ period.
21
+
22
+ Can skew orders to size of current inventory using beta parameter, whence beta == 0 represents inventory being
23
+ ignored and beta == infinity represents all liquidity placed on one side of book.
24
+
25
+ ADAPTIVE SPREAD: the market maker's spread can be set either as a fixed or value or can be adaptive,
26
+
27
+ """
28
+
29
+ def __init__(self, id, name, type, symbol, starting_cash, pov=0.05, min_order_size=20, window_size=5, anchor=ANCHOR_MIDDLE_STR,
30
+ num_ticks=20, level_spacing=0.5, wake_up_freq='1s', subscribe=False, subscribe_freq=10e9, subscribe_num_levels=1, cancel_limit_delay=50,
31
+ skew_beta=0, spread_alpha=0.85, backstop_quantity=None, log_orders=False, random_state=None, wakeup_time=None):
32
+
33
+ super().__init__(id, name, type, starting_cash=starting_cash, log_orders=log_orders, random_state=random_state)
34
+ self.is_adaptive = False
35
+ self.symbol = symbol # Symbol traded
36
+ self.pov = pov # fraction of transacted volume placed at each price level
37
+ self.min_order_size = min_order_size # minimum size order to place at each level, if pov <= min
38
+ self.anchor = self.validateAnchor(anchor) # anchor either top of window or bottom of window to mid-price
39
+ self.window_size = self.validateWindowSize(window_size) # Size in ticks (cents) of how wide the window around mid price is. If equal to
40
+ # string 'adaptive' then ladder starts at best bid and ask
41
+ self.num_ticks = num_ticks # number of ticks on each side of window in which to place liquidity
42
+ self.level_spacing = level_spacing # level spacing as a fraction of the spread
43
+ self.wake_up_freq = wake_up_freq # Frequency of agent wake up
44
+ self.subscribe = subscribe # Flag to determine whether to subscribe to data or use polling mechanism
45
+ self.subscribe_freq = subscribe_freq # Frequency in nanoseconds^-1 at which to receive market updates
46
+ # in subscribe mode
47
+ self.subscribe_num_levels = subscribe_num_levels # Number of orderbook levels in subscription mode
48
+ self.cancel_limit_delay = cancel_limit_delay # delay in nanoseconds between order cancellations and new limit order placements
49
+
50
+ self.skew_beta = skew_beta # parameter for determining order placement imbalance
51
+ self.spread_alpha = spread_alpha # parameter for exponentially weighted moving average of spread. 1 corresponds to ignoring old values, 0 corresponds to no updates
52
+ self.backstop_quantity = backstop_quantity # how many orders to place at outside order level, to prevent liquidity dropouts. If None then place same as at other levels.
53
+ self.log_orders = log_orders
54
+
55
+ ## Internal variables
56
+
57
+ self.subscription_requested = False
58
+ self.state = self.initialiseState()
59
+ self.buy_order_size = self.min_order_size
60
+ self.sell_order_size = self.min_order_size
61
+
62
+ self.last_mid = None # last observed mid price
63
+ self.last_spread = INITIAL_SPREAD_VALUE # last observed spread moving average
64
+ self.tick_size = None if self.is_adaptive else ceil(self.last_spread * self.level_spacing)
65
+ self.LIQUIDITY_DROPOUT_WARNING = f"Liquidity dropout for agent {self.name}."
66
+ self.wakeup_time = wakeup_time
67
+
68
+
69
+ def initialiseState(self):
70
+ """ Returns variables that keep track of whether spread and transacted volume have been observed. """
71
+ if not self.subscribe:
72
+ return {
73
+ "AWAITING_SPREAD": True,
74
+ "AWAITING_TRANSACTED_VOLUME": True
75
+ }
76
+ else:
77
+ return {
78
+ "AWAITING_MARKET_DATA": True,
79
+ "AWAITING_TRANSACTED_VOLUME": True
80
+ }
81
+
82
+ def validateAnchor(self, anchor):
83
+ """ Checks that input parameter anchor takes allowed value, raises ValueError if not.
84
+
85
+ :param anchor: str
86
+ :return:
87
+ """
88
+ if anchor not in [ANCHOR_TOP_STR, ANCHOR_BOTTOM_STR, ANCHOR_MIDDLE_STR]:
89
+ raise ValueError(f"Variable anchor must take the value `{ANCHOR_BOTTOM_STR}`, `{ANCHOR_MIDDLE_STR}` or "
90
+ f"`{ANCHOR_TOP_STR}`")
91
+ else:
92
+ return anchor
93
+
94
+ def validateWindowSize(self, window_size):
95
+ """ Checks that input parameter window_size takes allowed value, raises ValueError if not
96
+
97
+ :param window_size:
98
+ :return:
99
+ """
100
+ try: # fixed window size specified
101
+ return int(window_size)
102
+ except:
103
+ if window_size.lower() == 'adaptive':
104
+ self.is_adaptive = True
105
+ self.anchor = ANCHOR_MIDDLE_STR
106
+ return None
107
+ else:
108
+ raise ValueError(f"Variable window_size must be of type int or string {ADAPTIVE_SPREAD_STR}.")
109
+
110
+ def kernelStarting(self, startTime):
111
+ super().kernelStarting(startTime)
112
+
113
+ def wakeup(self, currentTime):
114
+ """ Agent wakeup is determined by self.wake_up_freq """
115
+ can_trade = super().wakeup(currentTime)
116
+ if self.subscribe and not self.subscription_requested:
117
+ super().requestDataSubscription(self.symbol, levels=self.subscribe_num_levels,
118
+ freq=pd.Timedelta(self.subscribe_freq, unit='ns'))
119
+ self.subscription_requested = True
120
+ self.get_transacted_volume(self.symbol, lookback_period=self.subscribe_freq)
121
+ self.state = self.initialiseState()
122
+
123
+ elif currentTime <= self.wakeup_time:
124
+ self.setWakeup(currentTime + self.getWakeFrequency())
125
+
126
+ elif can_trade and not self.subscribe:
127
+ self.cancelAllOrders()
128
+ self.delay(self.cancel_limit_delay)
129
+ self.getCurrentSpread(self.symbol, depth=self.subscribe_num_levels)
130
+ self.get_transacted_volume(self.symbol, lookback_period=self.wake_up_freq)
131
+ self.initialiseState()
132
+
133
+ def receiveMessage(self, currentTime, msg):
134
+ """ Processes message from exchange. Main function is to update orders in orderbook relative to mid-price.
135
+
136
+ :param simulation current time
137
+ :param message received by self from ExchangeAgent
138
+
139
+ :type currentTime: pd.Timestamp
140
+ :type msg: str
141
+
142
+ :return:
143
+ """
144
+
145
+ super().receiveMessage(currentTime, msg)
146
+ if self.last_mid is not None:
147
+ mid = self.last_mid
148
+
149
+ if self.last_spread is not None and self.is_adaptive:
150
+ self._adaptive_update_window_and_tick_size()
151
+
152
+ if msg.body['msg'] == 'QUERY_TRANSACTED_VOLUME' and self.state['AWAITING_TRANSACTED_VOLUME'] is True:
153
+ self.updateOrderSize()
154
+ self.state['AWAITING_TRANSACTED_VOLUME'] = False
155
+
156
+ if not self.subscribe:
157
+ if msg.body['msg'] == 'QUERY_SPREAD' and self.state['AWAITING_SPREAD'] is True:
158
+ bid, _, ask, _ = self.getKnownBidAsk(self.symbol)
159
+ if bid and ask:
160
+ mid = int((ask + bid) / 2)
161
+ self.last_mid = mid
162
+ if self.is_adaptive:
163
+ spread = int(ask - bid)
164
+ self._adaptive_update_spread(spread)
165
+
166
+ self.state['AWAITING_SPREAD'] = False
167
+ else:
168
+ log_print("SPREAD MISSING at time {}", currentTime)
169
+ self.state['AWAITING_SPREAD'] = False # use last mid price and spread
170
+
171
+ if self.state['AWAITING_SPREAD'] is False and self.state['AWAITING_TRANSACTED_VOLUME'] is False:
172
+ if self.last_mid is None:
173
+ self.state = self.initialiseState()
174
+ self.setWakeup(currentTime + self.getWakeFrequency())
175
+ return
176
+ self.placeOrders(mid)
177
+ self.state = self.initialiseState()
178
+ self.setWakeup(currentTime + self.getWakeFrequency())
179
+
180
+ else: # subscription mode
181
+ if msg.body['msg'] == 'MARKET_DATA' and self.state['AWAITING_MARKET_DATA'] is True:
182
+ bid = self.known_bids[self.symbol][0][0] if self.known_bids[self.symbol] else None
183
+ ask = self.known_asks[self.symbol][0][0] if self.known_asks[self.symbol] else None
184
+ if bid and ask:
185
+
186
+ mid = int((ask + bid) / 2)
187
+ self.last_mid = mid
188
+ if self.is_adaptive:
189
+ spread = int(ask - bid)
190
+ self._adaptive_update_spread(spread)
191
+
192
+ self.state['AWAITING_MARKET_DATA'] = False
193
+ else:
194
+ log_print("SPREAD MISSING at time {}", currentTime)
195
+ self.state['AWAITING_MARKET_DATA'] = False
196
+
197
+ if self.state['MARKET_DATA'] is False and self.state['AWAITING_TRANSACTED_VOLUME'] is False:
198
+ self.placeOrders(mid)
199
+ self.state = self.initialiseState()
200
+
201
+ def _adaptive_update_spread(self, spread):
202
+ """ Update internal spread estimate with exponentially weighted moving average
203
+ :param spread:
204
+ :return:
205
+ """
206
+ spread_ewma = self.spread_alpha * spread + (1 - self.spread_alpha) * self.last_spread
207
+ self.window_size = spread_ewma
208
+ self.last_spread = spread_ewma
209
+
210
+ def _adaptive_update_window_and_tick_size(self):
211
+ """ Update window size and tick size relative to internal spread estimate.
212
+
213
+ :return:
214
+ """
215
+ self.window_size = self.last_spread
216
+ self.tick_size = round(self.level_spacing * self.window_size)
217
+ if self.tick_size == 0:
218
+ self.tick_size = 1
219
+
220
+ def updateOrderSize(self):
221
+ """ Updates size of order to be placed. """
222
+ qty = round(self.pov * self.transacted_volume[self.symbol])
223
+ if self.skew_beta == 0: # ignore inventory
224
+ self.buy_order_size = qty if qty >= self.min_order_size else self.min_order_size
225
+ self.sell_order_size = qty if qty >= self.min_order_size else self.min_order_size
226
+ else:
227
+ holdings = self.getHoldings(self.symbol)
228
+ proportion_sell = sigmoid(holdings, self.skew_beta)
229
+ sell_size = ceil(proportion_sell * qty)
230
+ buy_size = floor((1 - proportion_sell) * qty)
231
+
232
+ self.buy_order_size = buy_size if buy_size >= self.min_order_size else self.min_order_size
233
+ self.sell_order_size = sell_size if sell_size >= self.min_order_size else self.min_order_size
234
+
235
+ def computeOrdersToPlace(self, mid):
236
+ """ Given a mid price, computes the orders that need to be removed from orderbook, and adds these orders to
237
+ bid and ask deques.
238
+
239
+ :param mid: mid-price
240
+ :type mid: int
241
+
242
+ :return:
243
+ """
244
+
245
+ if self.anchor == ANCHOR_MIDDLE_STR:
246
+ highest_bid = int(mid) - floor(0.5 * self.window_size)
247
+ lowest_ask = int(mid) + ceil(0.5 * self.window_size)
248
+ elif self.anchor == ANCHOR_BOTTOM_STR:
249
+ highest_bid = int(mid - 1)
250
+ lowest_ask = int(mid + self.window_size)
251
+ elif self.anchor == ANCHOR_TOP_STR:
252
+ highest_bid = int(mid - self.window_size)
253
+ lowest_ask = int(mid + 1)
254
+
255
+ lowest_bid = highest_bid - ((self.num_ticks - 1) * self.tick_size)
256
+ highest_ask = lowest_ask + ((self.num_ticks - 1) * self.tick_size)
257
+
258
+ bids_to_place = [price for price in range(lowest_bid, highest_bid + self.tick_size, self.tick_size)]
259
+ asks_to_place = [price for price in range(lowest_ask, highest_ask + self.tick_size, self.tick_size)]
260
+
261
+ return bids_to_place, asks_to_place
262
+
263
+ def placeOrders(self, mid):
264
+ """ Given a mid-price, compute new orders that need to be placed, then send the orders to the Exchange.
265
+
266
+ :param mid: mid-price
267
+ :type mid: int
268
+
269
+ """
270
+ bid_orders, ask_orders = self.computeOrdersToPlace(mid)
271
+
272
+ if self.backstop_quantity is not None:
273
+ bid_price = bid_orders[0]
274
+ log_print('{}: Placing BUY limit order of size {} @ price {}', self.name, self.backstop_quantity, bid_price)
275
+ self.placeLimitOrder(self.symbol, self.backstop_quantity, True, bid_price)
276
+ bid_orders = bid_orders[1:]
277
+
278
+ ask_price = ask_orders[-1]
279
+ log_print('{}: Placing SELL limit order of size {} @ price {}', self.name, self.backstop_quantity, ask_price)
280
+ self.placeLimitOrder(self.symbol, self.backstop_quantity, False, ask_price)
281
+ ask_orders = ask_orders[:-1]
282
+
283
+ for bid_price in bid_orders:
284
+ log_print('{}: Placing BUY limit order of size {} @ price {}', self.name, self.buy_order_size, bid_price)
285
+ self.placeLimitOrder(self.symbol, self.buy_order_size, True, bid_price)
286
+
287
+ for ask_price in ask_orders:
288
+ log_print('{}: Placing SELL limit order of size {} @ price {}', self.name, self.sell_order_size, ask_price)
289
+ self.placeLimitOrder(self.symbol, self.sell_order_size, False, ask_price)
290
+
291
+ def getWakeFrequency(self):
292
+ """ Get time increment corresponding to wakeup period. """
293
+ return pd.Timedelta(self.wake_up_freq)
294
+
295
+ def cancelAllOrders(self):
296
+ """ Cancels all resting limit orders placed by the market maker """
297
+ for _, order in self.orders.items():
298
+ self.cancelOrder(order)
ABIDES/agent/market_makers/MarketMakerAgent.py ADDED
@@ -0,0 +1,130 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from agent.TradingAgent import TradingAgent
2
+ import pandas as pd
3
+ from util.util import log_print
4
+
5
+
6
+ DEFAULT_LEVELS_QUOTE_DICT = {1: [1, 0, 0, 0, 0],
7
+ 2: [.5, .5, 0, 0, 0],
8
+ 3: [.34, .33, .33, 0, 0],
9
+ 4: [.25, .25, .25, .25, 0],
10
+ 5: [.20, .20, .20, .20, .20]}
11
+
12
+
13
+ class MarketMakerAgent(TradingAgent):
14
+ """
15
+ Simple market maker agent that attempts to provide liquidity in the orderbook by placing orders on both sides every
16
+ time it wakes up. The agent starts off by cancelling any existing orders, it then queries the current spread to
17
+ determine the trades to be placed. The order size is chosen at random between min_size and max_size which are parameters
18
+ of the agent.
19
+
20
+ If subscribe == True
21
+ The agent places orders in 1-5 price levels randomly with sizes determined by the levels_quote_dict.
22
+
23
+ Else if subscribe == False:
24
+ The agent places an order of size / 2 at both the best bid and best ask price levels
25
+
26
+ """
27
+
28
+ def __init__(self, id, name, type, symbol, starting_cash, min_size, max_size , wake_up_freq='1s',
29
+ subscribe=False, subscribe_freq=10e9, subscribe_num_levels=5, log_orders=False, random_state=None):
30
+
31
+ super().__init__(id, name, type, starting_cash=starting_cash, log_orders=log_orders, random_state=random_state)
32
+ self.symbol = symbol # Symbol traded
33
+ self.min_size = min_size # Minimum order size
34
+ self.max_size = max_size # Maximum order size
35
+ self.size = round(self.random_state.randint(self.min_size, self.max_size) / 2) # order size per LOB side
36
+ self.wake_up_freq = wake_up_freq # Frequency of agent wake up
37
+ self.subscribe = subscribe # Flag to determine whether to subscribe to data or use polling mechanism
38
+ self.subscribe_freq = subscribe_freq # Frequency in nanoseconds^-1 at which to receive market updates
39
+ # in subscribe mode
40
+ self.subscribe_num_levels = subscribe_num_levels # Number of orderbook levels in subscription mode
41
+ self.subscription_requested = False
42
+ self.log_orders = log_orders
43
+ self.state = "AWAITING_WAKEUP"
44
+ # Percentage of the order size to be placed at different levels is determined by levels_quote_dict
45
+ self.levels_quote_dict = DEFAULT_LEVELS_QUOTE_DICT
46
+ self.num_levels = None
47
+ self.size_split = None
48
+ self.last_spread = 10
49
+
50
+ def kernelStarting(self, startTime):
51
+ super().kernelStarting(startTime)
52
+
53
+ def wakeup(self, currentTime):
54
+ """ Agent wakeup is determined by self.wake_up_freq """
55
+ can_trade = super().wakeup(currentTime)
56
+ if self.subscribe and not self.subscription_requested:
57
+ super().requestDataSubscription(self.symbol, levels=self.subscribe_num_levels, freq=self.subscribe_freq)
58
+ self.subscription_requested = True
59
+ self.state = 'AWAITING_MARKET_DATA'
60
+ elif can_trade and not self.subscribe:
61
+ self.cancelOrders()
62
+ self.getCurrentSpread(self.symbol, depth=self.subscribe_num_levels)
63
+ self.state = 'AWAITING_SPREAD'
64
+
65
+ def receiveMessage(self, currentTime, msg):
66
+ """ Market Maker actions are determined after obtaining the bids and asks in the LOB """
67
+ super().receiveMessage(currentTime, msg)
68
+ if not self.subscribe and self.state == 'AWAITING_SPREAD' and msg.body['msg'] == 'QUERY_SPREAD':
69
+ self.cancelOrders()
70
+ mid = self.last_trade[self.symbol]
71
+
72
+ self.num_levels = 2 * self.subscribe_num_levels # Number of price levels to place the trades in
73
+
74
+ bid, bid_vol, ask, ask_vol = self.getKnownBidAsk(self.symbol)
75
+
76
+ if bid and ask:
77
+ mid = int((ask + bid) / 2)
78
+ spread = int(abs(ask - bid)/2)
79
+ else:
80
+ log_print(f"SPREAD MISSING at time {currentTime}")
81
+ spread = self.last_spread
82
+
83
+ for i in range(self.num_levels):
84
+ self.size = round(self.random_state.randint(self.min_size, self.max_size) / 2)
85
+ #bids
86
+ self.placeLimitOrder(self.symbol, self.size, True, mid - spread - i)
87
+ #asks
88
+ self.placeLimitOrder(self.symbol, self.size, False, mid + spread + i)
89
+
90
+ self.setWakeup(currentTime + self.getWakeFrequency())
91
+ self.state = 'AWAITING_WAKEUP'
92
+
93
+ elif self.subscribe and self.state == 'AWAITING_MARKET_DATA' and msg.body['msg'] == 'MARKET_DATA':
94
+ self.cancelOrders()
95
+ num_levels_place = len(self.levels_quote_dict.keys())
96
+ self.num_levels = self.random_state.randint(1, num_levels_place) # Number of price levels to place the trades in
97
+ self.size_split = self.levels_quote_dict.get(self.num_levels) # % of the order size to be placed at different levels
98
+ self.placeOrders(self.known_bids[self.symbol], self.known_asks[self.symbol])
99
+ self.state = 'AWAITING_MARKET_DATA'
100
+
101
+ def placeOrders(self, bids, asks):
102
+ if bids and asks:
103
+ buy_quotes, sell_quotes = {}, {}
104
+ self.size = round(self.random_state.randint(self.min_size, self.max_size) / 2)
105
+ for i in range(self.num_levels):
106
+ vol = round(self.size_split[i] * self.size)
107
+ try:
108
+ buy_quotes[bids[i][0]] = vol
109
+ except IndexError:
110
+ # Orderbook price level i empty so create a new price level with bid price 1 CENT below
111
+ buy_quotes[bids[-1][0] - 1] = vol
112
+ try:
113
+ sell_quotes[asks[i][0]] = vol
114
+ except IndexError:
115
+ # Orderbook price level i empty so create a new price level with bid price 1 CENT above
116
+ sell_quotes[asks[-1][0] + 1] = vol
117
+
118
+ for price, vol in buy_quotes.items():
119
+ self.placeLimitOrder(self.symbol, vol, True, price)
120
+ for price, vol in sell_quotes.items():
121
+ self.placeLimitOrder(self.symbol, vol, False, price)
122
+
123
+
124
+ def cancelOrders(self):
125
+ """ cancels all resting limit orders placed by the market maker """
126
+ for _, order in self.orders.items():
127
+ self.cancelOrder(order)
128
+
129
+ def getWakeFrequency(self):
130
+ return pd.Timedelta(self.wake_up_freq)
ABIDES/agent/market_makers/POVMarketMakerAgent.py ADDED
@@ -0,0 +1,195 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from agent.TradingAgent import TradingAgent
2
+ import pandas as pd
3
+ from util.util import log_print
4
+ from collections import namedtuple, deque
5
+ from util.util import ignored
6
+
7
+
8
+ ANCHOR_TOP_STR = 'top'
9
+ ANCHOR_BOTTOM_STR = 'bottom'
10
+
11
+
12
+ class POVMarketMakerAgent(TradingAgent):
13
+ """ This class implements a modification of the Chakraborty-Kearns `ladder` market-making strategy, wherein the
14
+ the size of order placed at each level is set as a fraction of measured transacted volume in the previous time
15
+ period.
16
+ """
17
+
18
+ def __init__(self, id, name, type, symbol, starting_cash, pov=0.05, min_order_size=20, window_size=5, anchor=ANCHOR_BOTTOM_STR,
19
+ num_ticks=20, wake_up_freq='1s', subscribe=False, subscribe_freq=10e9, subscribe_num_levels=1,
20
+ log_orders=False, random_state=None):
21
+
22
+ super().__init__(id, name, type, starting_cash=starting_cash, log_orders=log_orders, random_state=random_state)
23
+ self.symbol = symbol # Symbol traded
24
+ self.pov = pov # fraction of transacted volume placed at each price level
25
+ self.min_order_size = min_order_size # minimum size order to place at each level, if pov <= min
26
+ self.window_size = window_size # Size in ticks (cents) of how wide the window around mid price is
27
+ self.anchor = self.validateAnchor(anchor) # anchor either top of window or bottom of window to mid-price
28
+ self.num_ticks = num_ticks # number of ticks on each side of window in which to place liquidity
29
+
30
+ self.wake_up_freq = wake_up_freq # Frequency of agent wake up
31
+ self.subscribe = subscribe # Flag to determine whether to subscribe to data or use polling mechanism
32
+ self.subscribe_freq = subscribe_freq # Frequency in nanoseconds^-1 at which to receive market updates
33
+ # in subscribe mode
34
+ self.subscribe_num_levels = subscribe_num_levels # Number of orderbook levels in subscription mode
35
+ self.log_orders = log_orders
36
+
37
+ ## Internal variables
38
+
39
+ self.subscription_requested = False
40
+ self.state = self.initialiseState()
41
+ self.order_size = self.min_order_size
42
+
43
+ self.last_mid = None # last observed mid price
44
+ self.LIQUIDITY_DROPOUT_WARNING = f"Liquidity dropout for agent {self.name}."
45
+
46
+ def initialiseState(self):
47
+ """ Returns variables that keep track of whether spread and transacted volume have been observed. """
48
+ if not self.subscribe:
49
+ return {
50
+ "AWAITING_SPREAD": True,
51
+ "AWAITING_TRANSACTED_VOLUME": True
52
+ }
53
+ else:
54
+ return {
55
+ "AWAITING_MARKET_DATA": True,
56
+ "AWAITING_TRANSACTED_VOLUME": True
57
+ }
58
+
59
+ def validateAnchor(self, anchor):
60
+ """ Checks that input parameter anchor takes allowed value, raises ValueError if not.
61
+
62
+ :param anchor: str
63
+ :return:
64
+ """
65
+ if anchor not in [ANCHOR_TOP_STR, ANCHOR_BOTTOM_STR]:
66
+ raise ValueError(f"Variable anchor must take the value `{ANCHOR_BOTTOM_STR}` or `{ANCHOR_TOP_STR}`")
67
+ else:
68
+ return anchor
69
+
70
+ def kernelStarting(self, startTime):
71
+ super().kernelStarting(startTime)
72
+
73
+ def wakeup(self, currentTime):
74
+ """ Agent wakeup is determined by self.wake_up_freq """
75
+ can_trade = super().wakeup(currentTime)
76
+ if self.subscribe and not self.subscription_requested:
77
+ super().requestDataSubscription(self.symbol, levels=self.subscribe_num_levels,
78
+ freq=pd.Timedelta(self.subscribe_freq, unit='ns'))
79
+ self.subscription_requested = True
80
+ self.get_transacted_volume(self.symbol, lookback_period=self.subscribe_freq)
81
+ self.state = self.initialiseState()
82
+
83
+ elif can_trade and not self.subscribe:
84
+ self.getCurrentSpread(self.symbol, depth=self.subscribe_num_levels)
85
+ self.get_transacted_volume(self.symbol, lookback_period=self.wake_up_freq)
86
+ self.initialiseState()
87
+
88
+ def receiveMessage(self, currentTime, msg):
89
+ """ Processes message from exchange. Main function is to update orders in orderbook relative to mid-price.
90
+
91
+ :param simulation current time
92
+ :param message received by self from ExchangeAgent
93
+
94
+ :type currentTime: pd.Timestamp
95
+ :type msg: str
96
+
97
+ :return:
98
+ """
99
+
100
+ super().receiveMessage(currentTime, msg)
101
+
102
+ if self.last_mid is not None:
103
+ mid = self.last_mid
104
+
105
+ if msg.body['msg'] == 'QUERY_TRANSACTED_VOLUME' and self.state['AWAITING_TRANSACTED_VOLUME'] is True:
106
+ self.updateOrderSize()
107
+ self.state['AWAITING_TRANSACTED_VOLUME'] = False
108
+
109
+ if not self.subscribe:
110
+ if msg.body['msg'] == 'QUERY_SPREAD' and self.state['AWAITING_SPREAD'] is True:
111
+ bid, _, ask, _ = self.getKnownBidAsk(self.symbol)
112
+ if bid and ask:
113
+ mid = int((ask + bid) / 2)
114
+ self.last_mid = mid
115
+ self.state['AWAITING_SPREAD'] = False
116
+ else:
117
+ log_print(f"SPREAD MISSING at time {currentTime}")
118
+
119
+ if self.state['AWAITING_SPREAD'] is False and self.state['AWAITING_TRANSACTED_VOLUME'] is False:
120
+ self.cancelAllOrders()
121
+ self.placeOrders(mid)
122
+ self.state = self.initialiseState()
123
+ self.setWakeup(currentTime + self.getWakeFrequency())
124
+
125
+ else: # subscription mode
126
+ if msg.body['msg'] == 'MARKET_DATA' and self.state['AWAITING_MARKET_DATA'] is True:
127
+ bid = self.known_bids[self.symbol][0][0] if self.known_bids[self.symbol] else None
128
+ ask = self.known_asks[self.symbol][0][0] if self.known_asks[self.symbol] else None
129
+ if bid and ask:
130
+ mid = int((ask + bid) / 2)
131
+ self.last_mid = mid
132
+ self.state['AWAITING_MARKET_DATA'] = False
133
+ else:
134
+ log_print(f"SPREAD MISSING at time {currentTime}")
135
+ self.state['AWAITING_MARKET_DATA'] = False
136
+
137
+ if self.state['MARKET_DATA'] is False and self.state['AWAITING_TRANSACTED_VOLUME'] is False:
138
+ self.placeOrders(mid)
139
+ self.state = self.initialiseState()
140
+
141
+ def updateOrderSize(self):
142
+ """ Updates size of order to be placed. """
143
+ qty = round(self.pov * self.transacted_volume[self.symbol])
144
+ self.order_size = qty if qty >= self.min_order_size else self.min_order_size
145
+
146
+ def computeOrdersToPlace(self, mid):
147
+ """ Given a mid price, computes the orders that need to be removed from orderbook, and adds these orders to
148
+ bid and ask deques.
149
+
150
+ :param mid: mid-price
151
+ :type mid: int
152
+
153
+ :return:
154
+ """
155
+
156
+ if self.anchor == ANCHOR_BOTTOM_STR:
157
+ highest_bid = int(mid - 1)
158
+ lowest_ask = int(mid + self.window_size)
159
+ elif self.anchor == ANCHOR_TOP_STR:
160
+ highest_bid = int(mid - self.window_size)
161
+ lowest_ask = int(mid + 1)
162
+
163
+ lowest_bid = highest_bid - self.num_ticks
164
+ highest_ask = lowest_ask + self.num_ticks
165
+
166
+ bids_to_place = [price for price in range(lowest_bid, highest_bid + 1)]
167
+ asks_to_place = [price for price in range(lowest_ask, highest_ask + 1)]
168
+
169
+ return bids_to_place, asks_to_place
170
+
171
+ def placeOrders(self, mid):
172
+ """ Given a mid-price, compute new orders that need to be placed, then send the orders to the Exchange.
173
+
174
+ :param mid: mid-price
175
+ :type mid: int
176
+
177
+ """
178
+
179
+ bid_orders, ask_orders = self.computeOrdersToPlace(mid)
180
+ for bid_price in bid_orders:
181
+ log_print(f'{self.name}: Placing BUY limit order of size {self.order_size} @ price {bid_price}')
182
+ self.placeLimitOrder(self.symbol, self.order_size, True, bid_price)
183
+
184
+ for ask_price in ask_orders:
185
+ log_print(f'{self.name}: Placing SELL limit order of size {self.order_size} @ price {ask_price}')
186
+ self.placeLimitOrder(self.symbol, self.order_size, False, ask_price)
187
+
188
+ def getWakeFrequency(self):
189
+ """ Get time increment corresponding to wakeup period. """
190
+ return pd.Timedelta(self.wake_up_freq)
191
+
192
+ def cancelAllOrders(self):
193
+ """ Cancels all resting limit orders placed by the market maker """
194
+ for _, order in self.orders.items():
195
+ self.cancelOrder(order)
ABIDES/agent/market_makers/SpreadBasedMarketMakerAgent.py ADDED
@@ -0,0 +1,282 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from agent.TradingAgent import TradingAgent
2
+ import pandas as pd
3
+ from util.util import log_print
4
+ from collections import namedtuple, deque
5
+ from util.util import ignored
6
+
7
+
8
+ ANCHOR_TOP_STR = 'top'
9
+ ANCHOR_BOTTOM_STR = 'bottom'
10
+
11
+
12
+ class SpreadBasedMarketMakerAgent(TradingAgent):
13
+ """ This class implements the Chakraborty-Kearns `ladder` market-making strategy. """
14
+
15
+ _Order = namedtuple('_Order', ['price', 'id']) # Internal data structure used to describe a placed order
16
+
17
+ def __init__(self, id, name, type, symbol, starting_cash, order_size=1, window_size=5, anchor=ANCHOR_BOTTOM_STR,
18
+ num_ticks=20, wake_up_freq='1s', subscribe=True, subscribe_freq=10e9, subscribe_num_levels=1,
19
+ log_orders=False, random_state=None):
20
+
21
+ super().__init__(id, name, type, starting_cash=starting_cash, log_orders=log_orders, random_state=random_state)
22
+ self.symbol = symbol # Symbol traded
23
+ self.order_size = order_size # order size per price level
24
+ self.window_size = window_size # Size in ticks (cents) of how wide the window around mid price is
25
+ self.anchor = self.validateAnchor(anchor) # anchor either top of window or bottom of window to mid-price
26
+ self.num_ticks = num_ticks # number of ticks on each side of window in which to place liquidity
27
+
28
+ self.wake_up_freq = wake_up_freq # Frequency of agent wake up
29
+ self.subscribe = subscribe # Flag to determine whether to subscribe to data or use polling mechanism
30
+ self.subscribe_freq = subscribe_freq # Frequency in nanoseconds^-1 at which to receive market updates
31
+ # in subscribe mode
32
+ self.subscribe_num_levels = subscribe_num_levels # Number of orderbook levels in subscription mode
33
+ self.log_orders = log_orders
34
+
35
+ ## Internal variables
36
+
37
+ self.subscription_requested = False
38
+ self.state = "AWAITING_WAKEUP"
39
+
40
+ self.current_bids = None # double-ended queue holding bid orders in the book
41
+ self.current_asks = None # double-ended queue holding ask orders in the book
42
+ self.last_mid = None # last observed mid price
43
+ self.order_id_counter = 0 # counter for bookkeeping orders made by self
44
+ self.LIQUIDITY_DROPOUT_WARNING = f"Liquidity dropout for agent {self.name}."
45
+
46
+ def validateAnchor(self, anchor):
47
+ """ Checks that input parameter anchor takes allowed value, raises ValueError if not.
48
+
49
+ :param anchor: str
50
+ :return:
51
+ """
52
+ if anchor not in [ANCHOR_TOP_STR, ANCHOR_BOTTOM_STR]:
53
+ raise ValueError(f"Variable anchor must take the value `{ANCHOR_BOTTOM_STR}` or `{ANCHOR_TOP_STR}`")
54
+ else:
55
+ return anchor
56
+
57
+ def kernelStarting(self, startTime):
58
+ super().kernelStarting(startTime)
59
+
60
+ def wakeup(self, currentTime):
61
+ """ Agent wakeup is determined by self.wake_up_freq """
62
+ can_trade = super().wakeup(currentTime)
63
+ if self.subscribe and not self.subscription_requested:
64
+ super().requestDataSubscription(self.symbol, levels=self.subscribe_num_levels, freq=self.subscribe_freq)
65
+ self.subscription_requested = True
66
+ self.state = 'AWAITING_MARKET_DATA'
67
+ elif can_trade and not self.subscribe:
68
+ self.getCurrentSpread(self.symbol, depth=self.subscribe_num_levels)
69
+ self.state = 'AWAITING_SPREAD'
70
+
71
+ def receiveMessage(self, currentTime, msg):
72
+ """ Processes message from exchange. Main function is to update orders in orderbook relative to mid-price.
73
+
74
+ :param simulation current time
75
+ :param message received by self from ExchangeAgent
76
+
77
+ :type currentTime: pd.Timestamp
78
+ :type msg: str
79
+
80
+ :return:
81
+ """
82
+
83
+ super().receiveMessage(currentTime, msg)
84
+
85
+ if self.last_mid is not None:
86
+ mid = self.last_mid
87
+
88
+ if not self.subscribe and self.state == 'AWAITING_SPREAD' and msg.body['msg'] == 'QUERY_SPREAD':
89
+
90
+ bid, _, ask, _ = self.getKnownBidAsk(self.symbol)
91
+ if bid and ask:
92
+ mid = int((ask + bid) / 2)
93
+ else:
94
+ log_print(f"SPREAD MISSING at time {currentTime}")
95
+
96
+ orders_to_cancel = self.computeOrdersToCancel(mid)
97
+ self.cancelOrders(orders_to_cancel)
98
+ self.placeOrders(mid)
99
+ self.setWakeup(currentTime + self.getWakeFrequency())
100
+ self.state = 'AWAITING_WAKEUP'
101
+ self.last_mid = mid
102
+
103
+ elif self.subscribe and self.state == 'AWAITING_MARKET_DATA' and msg.body['msg'] == 'MARKET_DATA':
104
+
105
+ bid = self.known_bids[self.symbol][0][0] if self.known_bids[self.symbol] else None
106
+ ask = self.known_asks[self.symbol][0][0] if self.known_asks[self.symbol] else None
107
+ if bid and ask:
108
+ mid = int((ask + bid) / 2)
109
+ else:
110
+ log_print(f"SPREAD MISSING at time {currentTime}")
111
+ return
112
+
113
+ orders_to_cancel = self.computeOrdersToCancel(mid)
114
+ self.cancelOrders(orders_to_cancel)
115
+ self.placeOrders(mid)
116
+ self.state = 'AWAITING_MARKET_DATA'
117
+ self.last_mid = mid
118
+
119
+ def computeOrdersToCancel(self, mid):
120
+ """ Given a mid price, computes the orders that need to be removed from orderbook, and pops these orders from
121
+ bid and ask deques.
122
+
123
+ :param mid: mid-price
124
+ :type mid: int
125
+
126
+ :return:
127
+ """
128
+
129
+ orders_to_cancel = []
130
+
131
+ if (self.current_asks is None) or (self.current_bids is None):
132
+ return orders_to_cancel
133
+
134
+ num_ticks_to_increase = int(mid - self.last_mid)
135
+
136
+ if num_ticks_to_increase > 0:
137
+ for _ in range(num_ticks_to_increase):
138
+ with ignored(self.LIQUIDITY_DROPOUT_WARNING, IndexError):
139
+ orders_to_cancel.append(self.current_bids.popleft())
140
+ with ignored(self.LIQUIDITY_DROPOUT_WARNING, IndexError):
141
+ orders_to_cancel.append(self.current_asks.popleft())
142
+ elif num_ticks_to_increase < 0:
143
+ for _ in range(- num_ticks_to_increase):
144
+ with ignored(self.LIQUIDITY_DROPOUT_WARNING, IndexError):
145
+ orders_to_cancel.append(self.current_bids.pop())
146
+ with ignored(self.LIQUIDITY_DROPOUT_WARNING, IndexError):
147
+ orders_to_cancel.append(self.current_asks.pop())
148
+
149
+ return orders_to_cancel
150
+
151
+ def cancelOrders(self, orders_to_cancel):
152
+ """ Given a list of _Order objects, remove the corresponding orders from ExchangeAgent's orderbook
153
+
154
+ :param orders_to_cancel: orders to remove from orderbook
155
+ :type orders_to_cancel: list(_Order)
156
+ :return:
157
+ """
158
+ for order_tuple in orders_to_cancel:
159
+ order_id = order_tuple.id
160
+ try:
161
+ order = self.orders[order_id]
162
+ self.cancelOrder(order)
163
+ except KeyError:
164
+ continue
165
+
166
+ def computeOrdersToPlace(self, mid):
167
+ """ Given a mid price, computes the orders that need to be removed from orderbook, and adds these orders to
168
+ bid and ask deques.
169
+
170
+ :param mid: mid-price
171
+ :type mid: int
172
+
173
+ :return:
174
+ """
175
+
176
+ bids_to_place = []
177
+ asks_to_place = []
178
+
179
+ if (not self.current_asks) or (not self.current_bids):
180
+ self.cancelAllOrders()
181
+ self.initialiseBidsAsksDeques(mid)
182
+ bids_to_place.extend([order for order in self.current_bids])
183
+ asks_to_place.extend([order for order in self.current_asks])
184
+ return bids_to_place, asks_to_place
185
+
186
+ if self.last_mid is not None:
187
+ num_ticks_to_increase = int(mid - self.last_mid)
188
+ else:
189
+ num_ticks_to_increase = 0
190
+
191
+ if num_ticks_to_increase > 0:
192
+
193
+ base_bid_price = self.current_bids[-1].price
194
+ base_ask_price = self.current_asks[-1].price
195
+
196
+ for price_increment in range(1, num_ticks_to_increase + 1):
197
+ bid_price = base_bid_price + price_increment
198
+ new_bid_order = self.generateNewOrderId(bid_price)
199
+ bids_to_place.append(new_bid_order)
200
+ self.current_bids.append(new_bid_order)
201
+
202
+ ask_price = base_ask_price + price_increment
203
+ new_ask_order = self.generateNewOrderId(ask_price)
204
+ asks_to_place.append(new_ask_order)
205
+ self.current_asks.append(new_ask_order)
206
+
207
+ elif num_ticks_to_increase < 0:
208
+
209
+ base_bid_price = self.current_bids[0].price
210
+ base_ask_price = self.current_asks[0].price
211
+
212
+ for price_increment in range(1, 1 - num_ticks_to_increase):
213
+ bid_price = base_bid_price - price_increment
214
+ new_bid_order = self.generateNewOrderId(bid_price)
215
+ bids_to_place.append(new_bid_order)
216
+ self.current_bids.appendleft(new_bid_order)
217
+
218
+ ask_price = base_ask_price - price_increment
219
+ new_ask_order = self.generateNewOrderId(ask_price)
220
+ asks_to_place.append(new_ask_order)
221
+ self.current_asks.appendleft(new_ask_order)
222
+
223
+ return bids_to_place, asks_to_place
224
+
225
+ def placeOrders(self, mid):
226
+ """ Given a mid-price, compute new orders that need to be placed, then send the orders to the Exchange.
227
+
228
+ :param mid: mid-price
229
+ :type mid: int
230
+
231
+ """
232
+
233
+ bid_orders, ask_orders = self.computeOrdersToPlace(mid)
234
+ for bid_order in bid_orders:
235
+ log_print(f'{self.name}: Placing BUY limit order of size {self.order_size} @ price {bid_order.price}')
236
+ self.placeLimitOrder(self.symbol, self.order_size, True, bid_order.price, order_id=bid_order.id)
237
+
238
+ for ask_order in ask_orders:
239
+ log_print(f'{self.name}: Placing SELL limit order of size {self.order_size} @ price {ask_order.price}')
240
+ self.placeLimitOrder(self.symbol, self.order_size, False, ask_order.price, order_id=ask_order.id)
241
+
242
+ def initialiseBidsAsksDeques(self, mid):
243
+ """ Initialise the current_bids and current_asks object attributes, which internally keep track of the limit
244
+ orders sent to the Exchange.
245
+
246
+ :param mid: mid-price
247
+ :type mid: int
248
+
249
+ """
250
+
251
+ if self.anchor == ANCHOR_BOTTOM_STR:
252
+ highest_bid = int(mid - 1)
253
+ lowest_ask = int(mid + self.window_size)
254
+ elif self.anchor == ANCHOR_TOP_STR:
255
+ highest_bid = int(mid - self.window_size)
256
+ lowest_ask = int(mid + 1)
257
+
258
+ lowest_bid = highest_bid - self.num_ticks
259
+ highest_ask = lowest_ask + self.num_ticks
260
+
261
+ self.current_bids = deque([self.generateNewOrderId(price) for price in range(lowest_bid, highest_bid + 1)])
262
+ self.current_asks = deque([self.generateNewOrderId(price) for price in range(lowest_ask, highest_ask + 1)])
263
+
264
+ def generateNewOrderId(self, price):
265
+ """ Generate a _Order object for a particular price level
266
+
267
+ :param price:
268
+ :type price: int
269
+
270
+ """
271
+ self.order_id_counter += 1
272
+ order_id = f"{self.name}_{self.id}_{self.order_id_counter}"
273
+ return self._Order(price, order_id)
274
+
275
+ def getWakeFrequency(self):
276
+ """ Get time increment corresponding to wakeup period. """
277
+ return pd.Timedelta(self.wake_up_freq)
278
+
279
+ def cancelAllOrders(self):
280
+ """ Cancels all resting limit orders placed by the market maker """
281
+ for _, order in self.orders.items():
282
+ self.cancelOrder(order)
ABIDES/cli/book_plot.py ADDED
@@ -0,0 +1,259 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import copy
2
+ from mpl_toolkits.mplot3d import Axes3D
3
+ import matplotlib
4
+ matplotlib.use('TkAgg')
5
+ import matplotlib.pyplot as plt
6
+ import seaborn as sns
7
+ import numpy as np
8
+ import pandas as pd
9
+ import sys
10
+
11
+ from matplotlib.colors import LogNorm
12
+
13
+ from joblib import Memory
14
+
15
+ # Auto-detect terminal width.
16
+ pd.options.display.width = None
17
+ pd.options.display.max_rows = 1000
18
+ pd.options.display.max_colwidth = 200
19
+
20
+ # Initialize a persistent memcache.
21
+ mem = Memory(cachedir='./.cached_plot_book', verbose=0)
22
+ mem_hist = Memory(cachedir='./.cached_plot_book_historical', verbose=0)
23
+ mem_hist_plot = Memory(cachedir='./.cached_plot_book_historical_heatmap', verbose=0)
24
+
25
+
26
+ # Turn these into command line parameters.
27
+ SHOW_BEST = False
28
+ TIME_STEPS = False
29
+ PLOT_HISTORICAL = False
30
+
31
+
32
+ # Used to read and cache simulated quotes (best bid/ask).
33
+ # Doesn't actually pay attention to symbols yet.
34
+ #@mem.cache
35
+ def read_book_quotes (file):
36
+ print ("Simulated quotes were not cached. This will take a minute.")
37
+ df = pd.read_pickle(file, compression='bz2')
38
+
39
+ if len(df) <= 0:
40
+ print ("There appear to be no simulated quotes.")
41
+ sys.exit()
42
+
43
+ print ("Cached simulated quotes.")
44
+ return df
45
+
46
+
47
+ # Used to read historical national best bid/ask spread.
48
+ @mem_hist.cache
49
+ def read_historical_quotes (file, symbol):
50
+ print ("Historical quotes were not cached. This will take a minute.")
51
+ df = pd.read_pickle(file, compression='bz2')
52
+
53
+ if len(df) <= 0:
54
+ print ("There appear to be no historical quotes.")
55
+ sys.exit()
56
+
57
+ df = df.loc[symbol]
58
+
59
+ return df
60
+
61
+
62
+ # Used to cache the transformed historical dataframe for a symbol.
63
+ @mem_hist_plot.cache
64
+ def prepare_histogram (df_hist):
65
+ print ("Historical dataframe transformation was not cached. This will take a minute.")
66
+
67
+ min_quote = df_hist['BEST_BID'].min()
68
+ max_quote = df_hist['BEST_ASK'].max()
69
+
70
+ quote_range = pd.Series(np.arange(min_quote, max_quote + 0.01, 0.01)).round(2).map(str)
71
+ quote_range = quote_range.str.pad(6, side='right', fillchar='0')
72
+
73
+ df = pd.DataFrame(index=df_hist.index, columns=quote_range)
74
+ df[:] = 0
75
+
76
+ i = 0
77
+
78
+ for idx in df.index:
79
+ if i % 1000 == 0: print ("Caching {}".format(idx))
80
+
81
+ col = '{:0.2f}'.format(round(df_hist.loc[idx].BEST_BID, 2))
82
+ val = -df_hist.loc[idx].BEST_BIDSIZ
83
+ df.loc[idx,col] = val
84
+
85
+ col = '{:0.2f}'.format(round(df_hist.loc[idx].BEST_ASK, 2))
86
+ val = df_hist.loc[idx].BEST_ASKSIZ
87
+ df.loc[idx,col] = val
88
+
89
+ i += 1
90
+
91
+ return df
92
+
93
+
94
+ # Main program starts here.
95
+
96
+ if len(sys.argv) < 2:
97
+ print ("Usage: python book_plot.py <Simulator Order Book DataFrame file>")
98
+ sys.exit()
99
+
100
+ book_file = sys.argv[1]
101
+
102
+ print ("Visualizing order book from {}".format(book_file))
103
+
104
+ sns.set()
105
+
106
+ df_book = read_book_quotes(book_file)
107
+ #df_hist = read_historical_quotes('./data/nbbo/nbbo_2018/nbbom_20180518.bgz', 'IBM')
108
+
109
+ fig = plt.figure(dpi=300,figsize=(12,9))
110
+
111
+ # Use this to make all volume positive (ASK volume is negative in the dataframe).
112
+ #df_book.Volume = df_book.Volume.abs()
113
+
114
+ # Use this to swap the sign of BID vs ASK volume (to better fit a colormap, perhaps).
115
+ #df_book.Volume = df_book.Volume * -1
116
+
117
+ # Use this to clip volume to an upper limit.
118
+ #df_book.Volume = df_book.Volume.clip(lower=-400,upper=400)
119
+
120
+ # Use this to turn zero volume into np.nan (useful for some plot types).
121
+ #df_book.Volume[df_book.Volume == 0] = np.nan
122
+
123
+ # This section colors the best bid, best ask, and bid/ask midpoint
124
+ # differently from the rest of the heatmap below, by substituting
125
+ # special values at those indices. It is important to do this while
126
+ # the price index is still numeric. We use a value outside the
127
+ # range of actual order book volumes (selected dynamically).
128
+ min_volume = df_book.Volume.min()
129
+ max_volume = df_book.Volume.max()
130
+
131
+ best_bid_value = min_volume - 1000
132
+ best_ask_value = min_volume - 1001
133
+ midpoint_value = min_volume - 1002
134
+
135
+ # This converts the DateTimeIndex to integer nanoseconds since market open. We use
136
+ # these as our time steps for discrete time simulations (e.g. SRG config).
137
+ if TIME_STEPS:
138
+ df_book = df_book.unstack(1)
139
+ t = df_book.index.get_level_values(0) - df_book.index.get_level_values(0)[0]
140
+ df_book.index = (t / np.timedelta64(1, 'ns')).astype(np.int64)
141
+ df_book = df_book.stack()
142
+
143
+
144
+ # Use this to restrict plotting to a certain time of day. Depending on quote frequency,
145
+ # plotting could be very slow without this.
146
+ #df_book = df_book.unstack(1)
147
+ #df_book = df_book.between_time('11:50:00', '12:10:00')
148
+ #df_book = df_book.stack()
149
+
150
+
151
+
152
+ if SHOW_BEST:
153
+
154
+ df_book = df_book.unstack(1)
155
+ df_book.columns = df_book.columns.droplevel(0)
156
+
157
+ # Now row (single) index is time. Column (single) index is quote price.
158
+
159
+ # In temporary data frame, find best bid per (time) row.
160
+ # Copy bids only.
161
+ best_bid = df_book[df_book < 0].copy()
162
+
163
+ # Replace every non-zero bid volume with the column header (quote price) instead.
164
+ for col in best_bid.columns:
165
+ c = best_bid[col]
166
+ c[c < 0] = col
167
+
168
+ # Copy asks only.
169
+ best_ask = df_book[df_book > 0].copy()
170
+
171
+ # Replace every non-zero ask volume with the column header (quote price) instead.
172
+ for col in best_ask.columns:
173
+ c = best_ask[col]
174
+ c[c > 0] = col
175
+
176
+ # In a new column in each temporary data frame, compute the best bid or ask.
177
+ best_bid['best'] = best_bid.idxmax(axis=1)
178
+ best_ask['best'] = best_ask.idxmin(axis=1)
179
+
180
+ # Iterate over the index (all three DF have the same index) and set the special
181
+ # best bid/ask value in the correct column(s) per row. Also compute and include
182
+ # the midpoint where possible.
183
+ for idx in df_book.index:
184
+ bb = best_bid.loc[idx,'best']
185
+ #if bb: df_book.loc[idx,bb] = best_bid_value
186
+
187
+ ba = best_ask.loc[idx,'best']
188
+ #if ba: df_book.loc[idx,ba] = best_ask_value
189
+
190
+ if ba and bb: df_book.loc[idx,round((ba+bb)/2)] = midpoint_value
191
+
192
+
193
+ # Put the data frame indices back the way they were and ensure it is a DataFrame,
194
+ # not a Series.
195
+ df_book = df_book.stack()
196
+ df_book = pd.DataFrame(data=df_book)
197
+ df_book.columns = ['Volume']
198
+
199
+
200
+ # Change the MultiIndex to time and dollars.
201
+ df_book['Time'] = df_book.index.get_level_values(0)
202
+ df_book['Price'] = df_book.index.get_level_values(1)
203
+
204
+ # Use this to restrict plotting to a certain range of prices.
205
+ #df_book = df_book.loc[(df_book.Price > 98500) & (df_book.Price < 101500)]
206
+
207
+ # Use this to pad price strings for appearance.
208
+ #df_book.Price = df_book.Price.map(str)
209
+ #df_book.Price = df_book.Price.str.pad(6, side='right', fillchar='0')
210
+
211
+ df_book.set_index(['Time', 'Price'], inplace=True)
212
+
213
+ # This section makes a 2-D histogram (time vs price, color == volume)
214
+ unstacked = df_book.unstack(1)
215
+ if not TIME_STEPS: unstacked.index = unstacked.index.time
216
+ unstacked.columns = unstacked.columns.droplevel(0)
217
+
218
+ with sns.axes_style("white"):
219
+ ax = sns.heatmap(unstacked, cmap='seismic', mask=unstacked < min_volume, vmin=min_volume, cbar_kws={'label': 'Shares Available'}, center=0, antialiased = False)
220
+
221
+ ax.set(xlabel='Quoted Price', ylabel='Quote Time')
222
+
223
+ # Plot layers of best bid, best ask, and midpoint in special colors.
224
+ #best_bids = unstacked[unstacked == best_bid_value].copy().notnull()
225
+ midpoints = unstacked[unstacked == midpoint_value].copy().notnull()
226
+ #best_asks = unstacked[unstacked == best_ask_value].copy().notnull()
227
+
228
+ if SHOW_BEST:
229
+ #sns.heatmap(best_bids, cmap=['xkcd:hot purple'], mask=~best_bids, cbar=False, ax=ax)
230
+ #sns.heatmap(midpoints, cmap=['xkcd:hot green'], mask=~midpoints, cbar=False, ax=ax)
231
+ sns.heatmap(midpoints, cmap=['black'], mask=~midpoints, cbar=False, ax=ax)
232
+ #sns.heatmap(best_asks, cmap=['xkcd:hot pink'], mask=~best_asks, cbar=False, ax=ax)
233
+
234
+ plt.tight_layout()
235
+
236
+ # This section plots the historical order book (no depth available).
237
+ if PLOT_HISTORICAL:
238
+ fig = plt.figure(dpi=300,figsize=(12,9))
239
+
240
+ df_hist = df_hist.between_time('9:30', '16:00')
241
+ #df_hist = df_hist.between_time('10:00', '10:05')
242
+ df_hist = df_hist.resample('1S').last().ffill()
243
+
244
+ df = prepare_histogram(df_hist)
245
+ df.index = df.index.time
246
+
247
+ # There's no order book depth anyway, so make all bids the same volume
248
+ # and all asks the same volume, so they're easy to see.
249
+ df[df > 0] = 1
250
+ df[df < 0] = -1
251
+
252
+ ax = sns.heatmap(df, cmap=sns.color_palette("coolwarm", 7), cbar_kws={'label': 'Shares Available'}, center=0)
253
+ ax.set(xlabel='Quoted Price', ylabel='Quote Time')
254
+
255
+ plt.tight_layout()
256
+
257
+ # Show all the plots.
258
+ #plt.show()
259
+
ABIDES/cli/dump.py ADDED
@@ -0,0 +1,23 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import pandas as pd
2
+ import sys
3
+
4
+ # Auto-detect terminal width.
5
+ pd.options.display.width = None
6
+ pd.options.display.max_rows = 500000
7
+ pd.options.display.max_colwidth = 200
8
+
9
+ if len(sys.argv) < 2:
10
+ print ("Usage: python dump.py <DataFrame file> [List of Event Types]")
11
+ sys.exit()
12
+
13
+ file = sys.argv[1]
14
+
15
+ df = pd.read_pickle(file, compression='bz2')
16
+
17
+ if len(sys.argv) > 2:
18
+ events = sys.argv[2:]
19
+ event = "|".join(events)
20
+ df = df[df['EventType'].str.contains(event)]
21
+
22
+ print(df)
23
+
ABIDES/cli/event_midpoint.py ADDED
@@ -0,0 +1,208 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import ast
2
+ import matplotlib
3
+ matplotlib.use('TkAgg')
4
+ import matplotlib.pyplot as plt
5
+ import pandas as pd
6
+ import os
7
+ import re
8
+ import sys
9
+
10
+ from joblib import Memory
11
+
12
+ # Auto-detect terminal width.
13
+ pd.options.display.width = None
14
+ pd.options.display.max_rows = 1000
15
+ pd.options.display.max_colwidth = 200
16
+
17
+ # Initialize a persistent memcache.
18
+ #mem_sim = Memory(cachedir='./.cached_plot_sim', verbose=0)
19
+
20
+
21
+ # Linewidth for plots.
22
+ LW = 2
23
+
24
+ # Rolling window for smoothing.
25
+ #SIM_WINDOW = 250
26
+ SIM_WINDOW = 1
27
+
28
+
29
+ # Used to read and cache simulated quotes.
30
+ # Doesn't actually pay attention to symbols yet.
31
+ #@mem_sim.cache
32
+ def read_simulated_quotes (file, symbol):
33
+ print ("Simulated quotes were not cached. This will take a minute.")
34
+ df = pd.read_pickle(file, compression='bz2')
35
+ df['Timestamp'] = df.index
36
+
37
+ # Keep only the last bid and last ask event at each timestamp.
38
+ df = df.drop_duplicates(subset=['Timestamp','EventType'], keep='last')
39
+
40
+ del df['Timestamp']
41
+
42
+ df_bid = df[df['EventType'] == 'BEST_BID'].copy()
43
+ df_ask = df[df['EventType'] == 'BEST_ASK'].copy()
44
+
45
+ if len(df) <= 0:
46
+ print ("There appear to be no simulated quotes.")
47
+ sys.exit()
48
+
49
+ df_bid['BEST_BID'] = [b for s,b,bv in df_bid['Event'].str.split(',')]
50
+ df_bid['BEST_BID_VOL'] = [bv for s,b,bv in df_bid['Event'].str.split(',')]
51
+ df_ask['BEST_ASK'] = [a for s,a,av in df_ask['Event'].str.split(',')]
52
+ df_ask['BEST_ASK_VOL'] = [av for s,a,av in df_ask['Event'].str.split(',')]
53
+
54
+ df_bid['BEST_BID'] = df_bid['BEST_BID'].str.replace('$','').astype('float64')
55
+ df_ask['BEST_ASK'] = df_ask['BEST_ASK'].str.replace('$','').astype('float64')
56
+
57
+ df_bid['BEST_BID_VOL'] = df_bid['BEST_BID_VOL'].astype('float64')
58
+ df_ask['BEST_ASK_VOL'] = df_ask['BEST_ASK_VOL'].astype('float64')
59
+
60
+ df = df_bid.join(df_ask, how='outer', lsuffix='.bid', rsuffix='.ask')
61
+ df['BEST_BID'] = df['BEST_BID'].ffill().bfill()
62
+ df['BEST_ASK'] = df['BEST_ASK'].ffill().bfill()
63
+ df['BEST_BID_VOL'] = df['BEST_BID_VOL'].ffill().bfill()
64
+ df['BEST_ASK_VOL'] = df['BEST_ASK_VOL'].ffill().bfill()
65
+
66
+ df['MIDPOINT'] = (df['BEST_BID'] + df['BEST_ASK']) / 2.0
67
+
68
+ return df
69
+
70
+
71
+
72
+ # Main program starts here.
73
+
74
+ if len(sys.argv) < 3:
75
+ print ("Usage: python event_midpoint.py <Ticker symbol> <Simulator DataFrame file(s)>")
76
+ sys.exit()
77
+
78
+ # TODO: only really works for one symbol right now.
79
+
80
+ symbol = sys.argv[1]
81
+ sim_files = sys.argv[2:]
82
+
83
+ fig,ax = plt.subplots(figsize=(12,9), nrows=1, ncols=1)
84
+
85
+
86
+
87
+ # Plot each impact simulation with the baseline subtracted (i.e. residual effect).
88
+ i = 1
89
+ legend = []
90
+ #legend = ['baseline']
91
+
92
+ # Events is now a dictionary of event lists (key == greed parameter).
93
+ events = {}
94
+
95
+ first_date = None
96
+ impact_time = 200
97
+
98
+ for sim_file in sim_files:
99
+
100
+ # Skip baseline files.
101
+ if 'baseline' in sim_file: continue
102
+
103
+ if 'greed' in os.path.dirname(sim_file):
104
+ # Group plots by greed parameter.
105
+ m = re.search("greed(\d\d\d)_", sim_file)
106
+ g = m.group(1)
107
+ else:
108
+ g = 'greed'
109
+
110
+ baseline_file = os.path.join(os.path.dirname(sim_file) + '_baseline', os.path.basename(sim_file))
111
+ print ("Visualizing simulation baseline from {}".format(baseline_file))
112
+
113
+ df_baseline = read_simulated_quotes(baseline_file, symbol)
114
+
115
+ # Read the event file.
116
+ print ("Visualizing simulated {} from {}".format(symbol, sim_file))
117
+
118
+ df_sim = read_simulated_quotes(sim_file, symbol)
119
+
120
+ plt.rcParams.update({'font.size': 12})
121
+
122
+ # Given nanosecond ("time step") data, we can just force everything to
123
+ # fill out an integer index of nanoseconds.
124
+ rng = pd.date_range(start=df_sim.index[0], end=df_sim.index[-1], freq='1N')
125
+
126
+ df_baseline = df_baseline[~df_baseline.index.duplicated(keep='last')]
127
+ df_baseline = df_baseline.reindex(rng,method='ffill')
128
+ df_baseline = df_baseline.reset_index(drop=True)
129
+
130
+ df_sim = df_sim[~df_sim.index.duplicated(keep='last')]
131
+ df_sim = df_sim.reindex(rng,method='ffill')
132
+ df_sim = df_sim.reset_index(drop=True)
133
+
134
+ # Absolute price difference.
135
+ #s = df_sim['MIDPOINT'] - df_baseline['MIDPOINT']
136
+
137
+ # Relative price difference.
138
+ s = (df_sim['MIDPOINT'] / df_baseline['MIDPOINT']) - 1.0
139
+
140
+ s = s.rolling(window=SIM_WINDOW).mean()
141
+ s.name = sim_file
142
+
143
+ if g not in events: events[g] = []
144
+
145
+ events[g].append(s.copy())
146
+
147
+ i += 1
148
+
149
+
150
+ # Now have a list of series (each an event) that are time-aligned. BUT the data is
151
+ # still aperiodic, so they may not have the exact same index timestamps.
152
+
153
+ legend = []
154
+
155
+ for g in events:
156
+ df = pd.DataFrame()
157
+ legend.append("greed = " + str(g))
158
+
159
+ for s in events[g]:
160
+ print ("Joining {}".format(s.name))
161
+ df = df.join(s, how='outer')
162
+
163
+ df.dropna(how='all', inplace=True)
164
+ df = df.ffill().bfill()
165
+
166
+ # Smooth after combining means at each instant-of-trade.
167
+ #df.mean(axis=1).rolling(window=250).mean().plot(grid=True, linewidth=LW, ax=ax)
168
+
169
+ # No additional smoothing.
170
+ m = df.mean(axis=1)
171
+ s = df.std(axis=1)
172
+
173
+ # Plot mean and std.
174
+ m.plot(grid=True, linewidth=LW, ax=ax, fontsize=12, label="Relative mean mid-price")
175
+
176
+ # Fill std region?
177
+ #ax.fill_between(m.index, m-s, m+s, alpha=0.2)
178
+
179
+
180
+ # Do the rest a single time for the whole plot.
181
+
182
+ # If we need a vertical "time of event" line...
183
+ ax.axvline(x=200, color='0.5', linestyle='--', linewidth=2, label="Order placement time")
184
+
185
+ # Absolute or relative time labels...
186
+ ax.set_xticklabels(['0','10000','20000','30000','40000','50000','60000','70000'])
187
+ #ax.set_xticklabels(['T-30', 'T-20', 'T-10', 'T', 'T+10', 'T+20', 'T+30'])
188
+
189
+ ax.legend(legend)
190
+ #ax.legend()
191
+
192
+ # Force y axis limits to make multiple plots line up exactly...
193
+ #ax.set_ylim(-0.0065,0.0010)
194
+ #ax.set_ylim(-0.0010,0.0065)
195
+
196
+ # If an in-figure super title is required...
197
+ #plt.suptitle('Impact Event Study: {}'.format(symbol))
198
+
199
+ ax.set_xlabel('Relative Time (ms)', fontsize=12, fontweight='bold')
200
+ ax.set_ylabel('Baseline-Relative Price', fontsize=12, fontweight='bold')
201
+
202
+ #plt.savefig('IABS_SELL_100_multi_size.png')
203
+ #plt.savefig('abides_impact_sell.png')
204
+ #plt.savefig('abides_multi_buy.png')
205
+ #plt.savefig('abides_multi_sell.png')
206
+
207
+ #plt.show()
208
+
ABIDES/cli/event_ticker.py ADDED
@@ -0,0 +1,152 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import ast
2
+ import matplotlib
3
+ matplotlib.use('TkAgg')
4
+ import matplotlib.pyplot as plt
5
+ import pandas as pd
6
+ import os
7
+ import re
8
+ import sys
9
+
10
+ from joblib import Memory
11
+
12
+ # Auto-detect terminal width.
13
+ pd.options.display.width = None
14
+ pd.options.display.max_rows = 1000
15
+ pd.options.display.max_colwidth = 200
16
+
17
+ # Initialize a persistent memcache.
18
+ mem_sim = Memory(cachedir='./.cached_plot_sim', verbose=0)
19
+
20
+
21
+ # Linewidth for plots.
22
+ LW = 2
23
+
24
+ # Rolling window for smoothing.
25
+ #SIM_WINDOW = 250
26
+ SIM_WINDOW = 1
27
+
28
+
29
+ # Used to read and cache simulated trades.
30
+ # Doesn't actually pay attention to symbols yet.
31
+ #@mem_sim.cache
32
+ def read_simulated_trades (file, symbol):
33
+ #print ("Simulated trades were not cached. This will take a minute.")
34
+ df = pd.read_pickle(file, compression='bz2')
35
+ df = df[df['EventType'] == 'LAST_TRADE']
36
+
37
+ if len(df) <= 0:
38
+ print ("There appear to be no simulated trades.")
39
+ sys.exit()
40
+
41
+ df['PRICE'] = [y for x,y in df['Event'].str.split(',')]
42
+ df['SIZE'] = [x for x,y in df['Event'].str.split(',')]
43
+
44
+ df['PRICE'] = df['PRICE'].str.replace('$','').astype('float64')
45
+ df['SIZE'] = df['SIZE'].astype('float64')
46
+
47
+ return df
48
+
49
+
50
+ # Main program starts here.
51
+
52
+ if len(sys.argv) < 3:
53
+ print ("Usage: python mean_std_event.py <Ticker symbol> <Simulator DataFrame file(s)>")
54
+ sys.exit()
55
+
56
+ # TODO: only really works for one symbol right now.
57
+
58
+ symbol = sys.argv[1]
59
+ sim_files = sys.argv[2:]
60
+
61
+ fig,ax = plt.subplots(figsize=(12,9), nrows=1, ncols=1)
62
+
63
+
64
+
65
+ # Plot each impact simulation with the baseline subtracted (i.e. residual effect).
66
+ i = 1
67
+ legend = []
68
+ #legend = ['baseline']
69
+
70
+ events = []
71
+
72
+ first_date = None
73
+ impact_time = 200
74
+
75
+ for sim_file in sim_files:
76
+
77
+ # Skip baseline files.
78
+ if 'baseline' in sim_file: continue
79
+
80
+ baseline_file = os.path.join(os.path.dirname(sim_file) + '_baseline', os.path.basename(sim_file))
81
+ print ("Visualizing simulation baseline from {}".format(baseline_file))
82
+
83
+ df_baseline = read_simulated_trades(baseline_file, symbol)
84
+
85
+ # Read the event file.
86
+ print ("Visualizing simulated {} from {}".format(symbol, sim_file))
87
+
88
+ df_sim = read_simulated_trades(sim_file, symbol)
89
+
90
+ plt.rcParams.update({'font.size': 12})
91
+
92
+ # Given nanosecond ("time step") data, we can just force everything to
93
+ # fill out an integer index of nanoseconds.
94
+ rng = pd.date_range(start=df_sim.index[0], end=df_sim.index[-1], freq='1N')
95
+
96
+ df_baseline = df_baseline[~df_baseline.index.duplicated(keep='last')]
97
+ df_baseline = df_baseline.reindex(rng,method='ffill')
98
+ df_baseline = df_baseline.reset_index(drop=True)
99
+
100
+ df_sim = df_sim[~df_sim.index.duplicated(keep='last')]
101
+ df_sim = df_sim.reindex(rng,method='ffill')
102
+ df_sim = df_sim.reset_index(drop=True)
103
+
104
+ s = df_sim['PRICE'] - df_baseline['PRICE']
105
+ s = s.rolling(window=SIM_WINDOW).mean()
106
+
107
+ s.name = sim_file
108
+ events.append(s.copy())
109
+
110
+ i += 1
111
+
112
+
113
+ # Now have a list of series (each an event) that are time-aligned.
114
+ df = pd.DataFrame()
115
+
116
+ for s in events:
117
+ print ("Joining {}".format(s.name))
118
+ df = df.join(s, how='outer')
119
+
120
+ df.dropna(how='all', inplace=True)
121
+ df = df.ffill().bfill()
122
+
123
+ # Smooth after combining means at each instant-of-trade.
124
+ #df.mean(axis=1).rolling(window=250).mean().plot(grid=True, linewidth=LW, ax=ax)
125
+
126
+ # No additional smoothing.
127
+ m = df.mean(axis=1)
128
+ s = df.std(axis=1)
129
+
130
+ # Plot mean and std.
131
+ m.plot(grid=True, linewidth=LW, ax=ax)
132
+
133
+ # Shade the stdev region?
134
+ ax.fill_between(m.index, m-s, m+s, alpha=0.2)
135
+
136
+ # Override prettier axis ticks...
137
+ #ax.set_xticklabels(['T-30', 'T-20', 'T-10', 'T', 'T+10', 'T+20', 'T+30'])
138
+
139
+ # Force y axis limits to match some other plot.
140
+ #ax.set_ylim(-0.1, 0.5)
141
+
142
+ # Set a super title if required.
143
+ plt.suptitle('Impact Event Study: {}'.format(symbol))
144
+
145
+ ax.set_xlabel('Relative Time')
146
+ ax.set_ylabel('Baseline-Relative Price')
147
+
148
+ #plt.savefig('background_{}.png'.format(b))
149
+
150
+ #plt.show()
151
+
152
+
ABIDES/cli/intraday_index.py ADDED
@@ -0,0 +1,255 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # This file calculates the intraday index in a similar method to the Dow,
2
+ # after the conclusion of a simulation run. It takes the average price
3
+ # of the symbols in the index at each timestep, and graphs it. The
4
+ # price refers to the mid.
5
+ # Also graphs the mid of the underlying symbols
6
+
7
+ import ast
8
+ import matplotlib
9
+ matplotlib.use('Agg')
10
+ import matplotlib.pyplot as plt
11
+ import pandas as pd
12
+ import os
13
+ import sys
14
+ import numpy as np
15
+
16
+ from joblib import Memory
17
+
18
+ # Auto-detect terminal width.
19
+ pd.options.display.width = None
20
+ pd.options.display.max_rows = 1000
21
+ pd.options.display.max_colwidth = 200
22
+
23
+ # Initialize a persistent memcache.
24
+ mem_hist = Memory(cachedir='./.cached_plot_hist', verbose=0)
25
+ mem_sim = Memory(cachedir='./.cached_plot_sim', verbose=0)
26
+
27
+
28
+ PRINT_BASELINE = False
29
+ PRINT_DELTA_ONLY = False
30
+
31
+ BETWEEN_START = pd.to_datetime('09:30').time()
32
+ BETWEEN_END = pd.to_datetime('09:30:00.000001').time()
33
+
34
+ # Linewidth for plots.
35
+ LW = 2
36
+
37
+ # Used to read and cache simulated quotes.
38
+ # Doesn't actually pay attention to symbols yet.
39
+ #@mem_sim.cache
40
+ def read_simulated_quotes (file):
41
+ print ("Simulated quotes were not cached. This will take a minute.")
42
+ df = pd.read_pickle(file, compression='bz2')
43
+ df['Timestamp'] = df.index
44
+
45
+ df_bid = df[df['EventType'] == 'BEST_BID'].copy()
46
+ df_ask = df[df['EventType'] == 'BEST_ASK'].copy()
47
+
48
+ if len(df) <= 0:
49
+ print ("There appear to be no simulated quotes.")
50
+ sys.exit()
51
+
52
+ df_bid['SYM'] = [s for s,b,bv in df_bid['Event'].str.split(',')]
53
+ df_bid['BEST_BID'] = [b for s,b,bv in df_bid['Event'].str.split(',')]
54
+ df_bid['BEST_BID_VOL'] = [bv for s,b,bv in df_bid['Event'].str.split(',')]
55
+ df_ask['SYM'] = [s for s,a,av in df_ask['Event'].str.split(',')]
56
+ df_ask['BEST_ASK'] = [a for s,a,av in df_ask['Event'].str.split(',')]
57
+ df_ask['BEST_ASK_VOL'] = [av for s,a,av in df_ask['Event'].str.split(',')]
58
+
59
+ df_bid['BEST_BID'] = df_bid['BEST_BID'].str.replace('$','').astype('float64')
60
+ df_ask['BEST_ASK'] = df_ask['BEST_ASK'].str.replace('$','').astype('float64')
61
+
62
+ df_bid['BEST_BID_VOL'] = df_bid['BEST_BID_VOL'].astype('float64')
63
+ df_ask['BEST_ASK_VOL'] = df_ask['BEST_ASK_VOL'].astype('float64')
64
+
65
+
66
+ # Keep only the last bid and last ask event at each timestamp.
67
+ df_bid = df_bid.drop_duplicates(subset=['Timestamp', 'SYM'], keep='last')
68
+ df_ask = df_ask.drop_duplicates(subset=['Timestamp', 'SYM'], keep='last')
69
+
70
+ #df = df_bid.join(df_ask, how='outer', lsuffix='.bid', rsuffix='.ask')
71
+
72
+ # THIS ISN'T TRUE, YOU CAN'T GET THE MID FROM FUTURE ORDERS!!!
73
+ #df['BEST_BID'] = df['BEST_BID'].ffill().bfill()
74
+ #df['BEST_ASK'] = df['BEST_ASK'].ffill().bfill()
75
+ #df['BEST_BID_VOL'] = df['BEST_BID_VOL'].ffill().bfill()
76
+ #df['BEST_ASK_VOL'] = df['BEST_ASK_VOL'].ffill().bfill()
77
+ df = pd.merge(df_bid, df_ask, how='left', left_on=['Timestamp','SYM'], right_on = ['Timestamp','SYM'])
78
+
79
+ df['MIDPOINT'] = (df['BEST_BID'] + df['BEST_ASK']) / 2.0
80
+
81
+ #ts = df['Timestamp.bid']
82
+ ts = df['Timestamp']
83
+ #print(df)
84
+ df['INTRADAY_INDEX'] = 0
85
+ #df['Timestamp.bid'] = df.index.to_series()
86
+ #df['Timestamp'] = df.index.to_series()
87
+ #df['SYM.bid'] = df['SYM.bid'].fillna(df['SYM.ask'])
88
+ #symbols = df['SYM.bid'].unique()
89
+ symbols = df['SYM'].unique()
90
+ #print(df)
91
+ for i,x in enumerate(symbols):
92
+ #df_one_sym = df[df['SYM.bid']==x]
93
+ df_one_sym = df[df['SYM']==x]
94
+ #df_one_sym = df_one_sym[['Timestamp.bid','MIDPOINT','BEST_BID','BEST_ASK']]
95
+ df_one_sym = df_one_sym[['Timestamp','MIDPOINT','BEST_BID','BEST_ASK']]
96
+ #md = pd.merge_asof(ts, df_one_sym, on='Timestamp.bid')
97
+ #print(ts)
98
+ #print(df_one_sym)
99
+ md = pd.merge_asof(ts, df_one_sym, on='Timestamp')
100
+ md = md.set_index(df.index)
101
+ df['MIDPOINT.' + x] = md['MIDPOINT']
102
+ df['BID.' + x] = md['BEST_BID']
103
+ df['ASK.' + x] = md['BEST_ASK']
104
+ if x != 'ETF':
105
+ df['INTRADAY_INDEX'] = df['INTRADAY_INDEX'] + md['MIDPOINT']
106
+ df['MSE'] = (df['INTRADAY_INDEX'] - df['MIDPOINT.ETF'])**2
107
+
108
+ #del df['Timestamp.bid']
109
+ #del df['Timestamp.ask']
110
+ df = df.set_index(df['Timestamp'])
111
+ del df['Timestamp']
112
+
113
+ return df
114
+
115
+
116
+
117
+ # Main program starts here.
118
+
119
+ if len(sys.argv) < 2:
120
+ print ("Usage: python midpoint_plot.py <Ticker symbol> <Simulator DataFrame file>")
121
+ sys.exit()
122
+
123
+ # TODO: only really works for one symbol right now.
124
+
125
+ #symbols = sys.argv[1]
126
+ sim_file = sys.argv[1]
127
+
128
+ print ("Visualizing simulated {} from {}".format(12,sim_file))
129
+ df_sim = read_simulated_quotes(sim_file)
130
+
131
+ if PRINT_BASELINE:
132
+ baseline_file = os.path.join(os.path.dirname(sim_file) + '_baseline', os.path.basename(sim_file))
133
+ print (baseline_file)
134
+ df_baseline = read_simulated_quotes(baseline_file)
135
+
136
+ plt.rcParams.update({'font.size': 12})
137
+
138
+
139
+ # Use to restrict time to plot.
140
+ df_sim = df_sim.between_time(BETWEEN_START, BETWEEN_END)
141
+
142
+ if PRINT_BASELINE:
143
+ df_baseline = df_baseline.between_time(BETWEEN_START, BETWEEN_END)
144
+
145
+ fig,ax = plt.subplots(figsize=(12,9), nrows=1, ncols=1)
146
+ axes = [ax]
147
+
148
+ # For smoothing...
149
+ #hist_window = 100
150
+ #sim_window = 100
151
+
152
+ hist_window = 1
153
+ sim_window = 1
154
+
155
+ if PRINT_BASELINE:
156
+ # For nanosecond experiments, turn it into int index. Pandas gets weird if all
157
+ # the times vary only by a few nanoseconds.
158
+ rng = pd.date_range(start=df_sim.index[0], end=df_sim.index[-1], freq='1N')
159
+
160
+ df_baseline = df_baseline[~df_baseline.index.duplicated(keep='last')]
161
+ df_baseline = df_baseline.reindex(rng,method='ffill')
162
+ df_baseline = df_baseline.reset_index(drop=True)
163
+
164
+ df_sim = df_sim[~df_sim.index.duplicated(keep='last')]
165
+ df_sim = df_sim.reindex(rng,method='ffill')
166
+ df_sim = df_sim.reset_index(drop=True)
167
+
168
+ # Print both separately.
169
+ if PRINT_DELTA_ONLY:
170
+ # Print the difference as a single series.
171
+ df_diff = df_sim['MIDPOINT'] - df_baseline['MIDPOINT']
172
+
173
+ # Smoothing.
174
+ df_diff = df_diff.rolling(window=10).mean()
175
+
176
+ df_diff.plot(color='C0', grid=True, linewidth=LW, ax=axes[0])
177
+
178
+ axes[0].legend(['Bid-ask Midpoint Delta'])
179
+ else:
180
+ df_baseline['MIDPOINT'].plot(color='C0', grid=True, linewidth=LW, ax=axes[0])
181
+ df_sim['MIDPOINT'].plot(color='C1', grid=True, linewidth=LW, alpha=0.9, ax=axes[0])
182
+
183
+ axes[0].legend(['Baseline', 'With Impact'])
184
+
185
+ else:
186
+ #df_sim['PRICE'] = df_sim['PRICE'].rolling(window=sim_window).mean()
187
+
188
+ # For nanosecond experiments, turn it into int index. Pandas gets weird if all
189
+ # the times vary only by a few nanoseconds.
190
+ rng = pd.date_range(start=df_sim.index[0], end=df_sim.index[-1], freq='1N')
191
+ df_sim = df_sim[~df_sim.index.duplicated(keep='last')]
192
+ df_sim = df_sim.reindex(rng,method='ffill')
193
+ df_time = df_sim.copy()
194
+ df_sim = df_sim.reset_index(drop=True)
195
+
196
+ #symbols = df_sim['SYM.bid'].unique()
197
+ symbols = df_sim['SYM'].unique()
198
+ for i,x in enumerate(symbols):
199
+ #df_sim[df_sim['SYM.bid']==x]['MIDPOINT.' + x].plot(color='C1', grid=True, linewidth=LW, alpha=0.9, ax=axes[0])
200
+ df_sim['MIDPOINT.' + x].plot(color='C1', grid=True, linewidth=LW, alpha=0.9, ax=axes[0])
201
+ #df_sim['BID.' + x].plot(color='C2', grid=True, linewidth=LW, alpha=0.9, ax=axes[0])
202
+ #df_sim['ASK.' + x].plot(color='C3', grid=True, linewidth=LW, alpha=0.9, ax=axes[0])
203
+ if x != 'ETF':
204
+ axes[0].legend(['Simulated'])
205
+
206
+ plt.suptitle('Bid-Ask Midpoint: {}'.format(x))
207
+
208
+ axes[0].set_ylabel('Quote Price')
209
+ axes[0].set_xlabel('Quote Time')
210
+
211
+ plt.savefig('graphs/background_' + str(x) + '_{}.png'.format('png'))
212
+ plt.cla()
213
+
214
+ if x == 'ETF':
215
+ df_sim['MIDPOINT.' + x].plot(color='C1', grid=True, linewidth=LW, alpha=0.9, ax=axes[0], label = 'ETF Mid')
216
+ i = np.argwhere(symbols=='ETF')
217
+ symbols_portfolio = np.delete(symbols, i)
218
+ df_sim['INTRADAY_INDEX'].plot(color='C4', grid=True, linewidth=LW, alpha=0.9, ax=axes[0], label = 'Index')
219
+ #axes[0].legend(['Simulated'])
220
+ plt.suptitle('65 ZI, 0 ETF Arb, gamma = 500: {}'.format(symbols_portfolio))
221
+
222
+ axes[0].set_ylabel('Quote Price')
223
+ axes[0].set_xlabel('Quote Time')
224
+ axes[0].legend()
225
+ ymin = 249000
226
+ ymax = 251000
227
+ axes[0].set_ylim([ymin,ymax])
228
+
229
+ plt.savefig('graphs/index_vs_etf_ten_arb_gamma_500'.format('png'))
230
+ plt.cla()
231
+
232
+ i = np.argwhere(symbols=='ETF')
233
+ symbols_portfolio = np.delete(symbols, i)
234
+ df_sim['INTRADAY_INDEX'].plot(color='C4', grid=True, linewidth=LW, alpha=0.9, ax=axes[0])
235
+ axes[0].legend(['Simulated'])
236
+ plt.suptitle('Intraday Index: {}'.format(symbols_portfolio))
237
+
238
+ plt.savefig('graphs/intraday_index_' + str(symbols_portfolio) + '_{}.png'.format('png'))
239
+ plt.cla()
240
+
241
+ df_sim['MSE'].plot(color='C5', grid=True, linewidth=LW, alpha=0.9, ax=axes[0])
242
+ #axes[0].legend(['Simulated'])
243
+ plt.suptitle('65 ZI, 10 ETF Arb, gamma = 500')
244
+ axes[0].set_ylabel('Mean Squared Error')
245
+ axes[0].set_xlabel('Quote Time')
246
+ ymin = -1000
247
+ ymax = 700000
248
+ axes[0].set_ylim([ymin,ymax])
249
+
250
+ plt.savefig('graphs/mse_index_etf_ten_arb_gamma_500'.format('png'))
251
+ plt.close()
252
+ #df_time.to_csv('test.csv')
253
+
254
+ ##plt.show()
255
+