我几乎可以肯定这与港口连接有关;准确地说,使用我在驱动程序连接中使用的URL参数:
var driver = neo4j.driver("bolt://neo4j:7687", neo4j.auth.basic("tester", "tester"));
其中“7687”是Neo4j在docker-compose.yml中的螺栓端口,“Neo4j”是我分配给Neo4j图像的名称(见下文)。
我正在遵循(使用不同的JS驱动程序)我能找到的唯一的Node+Neo4J+Docker教程(https://medium.com/@slavahatnuke/neo4j-node-js-docker-docker-compose-fdc1cc9cf405),那里的用户评论似乎证实了错误在我的连接URL中。
"use strict"; var express = require('express'); var neo4j = require('neo4j-driver').v1; const PORT = 8080; var app = express(); //below is a test of the Neo4j connection, with which I have trouble connecting from in a Docker container router.get('/test', function (req, res) { var driver = neo4j.driver("bolt://neo4j:7687", neo4j.auth.basic("tester", "tester")); var session = driver.session(); console.log(session); var countPromise = session.readTransaction(function(transaction) { var result = transaction.run("MATCH (n) RETURN COUNT(n) AS c") .then(function(result) { console.log(result); return result.records[0].get("c"); }); return result; }); countPromise.then(function(result) { res.json(result); session.close(); driver.close(); }); }); app.listen(PORT); console.log("Running on " + PORT); module.exports = app;
Neo4j图像是Docker的官方Neo4j图像。
我正在使用以下docker-compose.yml文件:
nodeapp: image: node:latest volumes: - ./nodeapp:/nodeapp links: - neo4j ports: - "18080:8080" working_dir: /nodeapp entrypoint: node index.js neo4j: image: neo4j ports: - "17474:7474" - "17687:7687" volumes: - ./db/dbms:/data/dbms
当我运行它(使用“docker-compose up”)时,我可以:
我肯定有一些简单的东西Docker写作,我错过了。
在没有Docker的情况下,我使用URL“bolt://localhost:7687”成功地将应用程序连接到数据库。现在它在Docker中,我不知道我该用什么。
谢谢你的指导。
Name Command State Ports ------------------------------------------------------------------------- initialnode_neo4 /docker- Up 7473/tcp, 0.0.0. j_1 entrypoint.sh 0:17474->7474/tc neo4j p, 0.0.0.0:17687 ->7687/tcp initialnode_node node index.js Up 0.0.0.0:18080->8 app_1 080/tcp
"NetworkMode": "default", "PortBindings": { "7474/tcp": [ { "HostIp": "", "HostPort": "17474" } ], "7687/tcp": [ { "HostIp": "", "HostPort": "17687" } ] }, ... "ExposedPorts": { "7473/tcp": {}, "7474/tcp": {}, "7687/tcp": {} }, ... "NetworkSettings": { "Bridge": "", "SandboxID": "5eb2b7805cc802391c08be8c85cdbb19fd42de6cc794e76e57f5214c6763c140", "HairpinMode": false, "LinkLocalIPv6Address": "", "LinkLocalIPv6PrefixLen": 0, "Ports": { "7473/tcp": null, "7474/tcp": [ { "HostIp": "0.0.0.0", "HostPort": "17474" } ], "7687/tcp": [ { "HostIp": "0.0.0.0", "HostPort": "17687" } ] }, ... "Gateway": "172.17.0.1", "GlobalIPv6Address": "", "GlobalIPv6PrefixLen": 0, "IPAddress": "172.17.0.2", "IPPrefixLen": 16, "IPv6Gateway": "", "MacAddress": "02:42:ac:11:00:02", "Networks": { "bridge": { "IPAMConfig": null, "Links": null, "Aliases": null, "NetworkID": "8ffa640b707301bb9b8b6c2a1cfb6f5deac8c1a5f5de9e9de0976697b01027ef", "EndpointID": "941911b65cf0b714fa13ac45ca967478a513fe5c282283aa0bfe20afcc0df06f", "Gateway": "172.17.0.1", "IPAddress": "172.17.0.2", "IPPrefixLen": 16, "IPv6Gateway": "", "GlobalIPv6Address": "", "GlobalIPv6PrefixLen": 0, "MacAddress": "02:42:ac:11:00:02" } } } ...
{"log":" _writeConnectionHolder: \n","stream":"stdout","time":"2017-05-05T16:20:59.37931736Z"} {"log":" ConnectionHolder {\n","stream":"stdout","time":"2017-05-05T16:20:59.379324034Z"} {"log":" _mode: 'WRITE',\n","stream":"stdout","time":"2017-05-05T16:20:59.379330305Z"} {"log":" _connectionProvider: \n","stream":"stdout","time":"2017-05-05T16:20:59.379353139Z"} {"log":" DirectConnectionProvider {\n","stream":"stdout","time":"2017-05-05T16:20:59.379363823Z"} {"log":" _address: 'neo4j:7687',\n","stream":"stdout","time":"2017-05-05T16:20:59.3793706Z"} {"log":" _connectionPool: [Object],\n","stream":"stdout","time":"2017-05-05T16:20:59.379377162Z"} {"log":" _driverOnErrorCallback: [Function: bound _driverOnErrorCallback] },\n","stream":"stdout","time":"2017-05-05T16:20:59.379384507Z"} {"log":" _referenceCount: 0,\n","stream":"stdout","time":"2017-05-05T16:20:59.379391793Z"} {"log":" _connectionPromise: Promise { null } },\n","stream":"stdout","time":"2017-05-05T16:20:59.379398668Z"} {"log":" _open: true,\n","stream":"stdout","time":"2017-05-05T16:20:59.379405771Z"} {"log":" _hasTx: false,\n","stream":"stdout","time":"2017-05-05T16:20:59.379412739Z"} {"log":" _lastBookmark: undefined,\n","stream":"stdout","time":"2017-05-05T16:20:59.379437046Z"} {"log":" _transactionExecutor: \n","stream":"stdout","time":"2017-05-05T16:20:59.379445641Z"} {"log":" TransactionExecutor {\n","stream":"stdout","time":"2017-05-05T16:20:59.379452547Z"} {"log":" _maxRetryTimeMs: 30000,\n","stream":"stdout","time":"2017-05-05T16:20:59.379459251Z"} {"log":" _initialRetryDelayMs: 1000,\n","stream":"stdout","time":"2017-05-05T16:20:59.379466191Z"} {"log":" _multiplier: 2,\n","stream":"stdout","time":"2017-05-05T16:20:59.379472573Z"} {"log":" _jitterFactor: 0.2,\n","stream":"stdout","time":"2017-05-05T16:20:59.379478888Z"} {"log":" _inFlightTimeoutIds: [] } }\n","stream":"stdout","time":"2017-05-05T16:20:59.379485201Z"} {"log":"(node:1) UnhandledPromiseRejectionWarning: Unhandled promise rejection (rejection id: 2): Error: Connection was closed by server\n","stream":"stderr","time":"2017-05-05T16:21:32.301674097Z"}
似乎这个问题是由于我的个人电脑设置,这解释了为什么没有其他人有这个问题。
除了Docker Neo4j映像之外,我还运行了一个本地版本的Neo4j,它们都使用相同的端口。
这导致连接被拒绝。
在我的拓扑中,当元组从spout转移到bolt或从bolt转移到bolt时,我看到大约1-2 ms的延迟。我使用纳秒时间戳来计算延迟,因为整个拓扑运行在单个Worker中。拓扑是在集群中运行的,集群运行在具有生产能力的硬件中。 根据我的理解,在这种情况下,元组不需要序列化/反序列化,因为所有东西都在单个JVM中。我已经将大多数喷流和螺栓的并行性提示设置为5,并且喷流仅以每秒100的速率产生事件。我
我正在将Neo4J嵌入式数据库与OGM一起使用,并通过OGM SessionFactory在目录中创建数据库服务: 这很好,但现在我想用Neo4J浏览器工具浏览创建的数据库。当我阅读时,我必须通过Bolt公开我的数据库才能访问它。 在Neo4J Embedded留档中,他们使用GraphDatabase aseService并简单地指定一个额外的螺栓驱动程序来公开数据库: 但不幸的是,在使用OGM
我有一个非常简单的Storm螺栓,从Kafka喷口输入,应该只是写到标准输出。它扩展了BaseRichBolt。有关的两种方法是:
因此,如果您使用基于JUnit的单元测试,是否建议您运行一个小型模拟拓扑(?)并测试该拓扑下的(或)的隐含契约?或者,是否可以使用JUnit,但这意味着我们必须仔细模拟Bolt的生命周期(创建它、调用、嘲弄等)?在这种情况下,被测类(螺栓/喷口)有哪些一般的测试点需要考虑? 其他开发人员在创建正确的单元测试方面做了什么? 我注意到有一个拓扑测试API(参见:https://github.com/x
我正在尝试测量拓扑中每个bolt的延迟。Storm给出的延迟数是不够的,因为我们想要计算百分位数。在我当前的设置中,我通过测量完成execute方法(包括发出调用)所需的时间来测量bolt的延迟。该方法的假设是,即使当前bolt实例和下一个bolt实例在拓扑结构中共享同一个执行器,收集器的emit也会立即返回,而不需要调用下一个bolt实例执行方法。
我的拓扑看起来是这样的:Bolt A向Bolt B和C发出相同的元组,每个元组都将数据持久化到Cassandra。这些操作不是幂等的,并且包括对两个不同计数器列族的更新。我只对元组失败和在Cassandra的某些异常(不是读/写超时,只是QueryConsistency或Validation异常)中重播它感兴趣。问题是,如果bolt B失败,相同的元组将从spout重播,并再次发送到bolt C,
我建立了一套喷口和螺栓的Storm拓扑,也使用Spring进行依赖注入。 不幸的是,尽管我已经将所有的喷口和螺栓声明为@Components,但没有一个字段自动连接。 然而,在我声明拓扑的地方,Spring运行良好,所有依赖项都被正确注入。 是因为集群吗。submitTopology(“test”,conf,builder.createTopology())将拓扑提交到自动布线不起作用的集群(在本
我想创建一个拓扑,其中一个喷口发出单词,一个bolt基于这些单词创建一个以word命名的目录。 我有两个supervisor节点,如果word以“a”到“l”开头,则在一个节点上创建目录,否则在另一个节点上创建目录。例如,如果word是“确认”,则将在一个节点上创建一个目录,如果word是“机器”,则将在另一个节点上创建目录。 附注。我使用Pyleus(https://github.com/yel